Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
Staging: octeon-ethernet: Fix race freeing transmit buffers.
Staging: octeon-ethernet: Convert to use net_device_ops.
MIPS: Cavium: Add CPU hotplugging code.
MIPS: SMP: Allow suspend and hibernation if CPU hotplug is available
MIPS: Add arch generic CPU hotplug
DMA: txx9dmac: use dma_unmap_single if DMA_COMPL_{SRC,DEST}_UNMAP_SINGLE set
MIPS: Sibyte: Fix build error if CONFIG_SERIAL_SB1250_DUART is undefined.
MIPS: MIPSsim: Fix build error if MSC01E_INT_BASE is undefined.
MIPS: Hibernation: Remove SMP TLB and cacheflushing code.
MIPS: Build fix - include <linux/smp.h> into all smp_processor_id() users.
MIPS: bug.h Build fix - include <linux/compiler.h>.

+996 -492
+14 -3
arch/mips/Kconfig
··· 601 601 select SYS_SUPPORTS_64BIT_KERNEL 602 602 select SYS_SUPPORTS_BIG_ENDIAN 603 603 select SYS_SUPPORTS_HIGHMEM 604 + select SYS_SUPPORTS_HOTPLUG_CPU 604 605 select SYS_HAS_CPU_CAVIUM_OCTEON 605 606 help 606 607 The Octeon simulator is software performance model of the Cavium ··· 616 615 select SYS_SUPPORTS_64BIT_KERNEL 617 616 select SYS_SUPPORTS_BIG_ENDIAN 618 617 select SYS_SUPPORTS_HIGHMEM 618 + select SYS_SUPPORTS_HOTPLUG_CPU 619 619 select SYS_HAS_EARLY_PRINTK 620 620 select SYS_HAS_CPU_CAVIUM_OCTEON 621 621 select SWAP_IO_SPACE ··· 786 784 bool 787 785 788 786 config HOTPLUG_CPU 787 + bool "Support for hot-pluggable CPUs" 788 + depends on SMP && HOTPLUG && SYS_SUPPORTS_HOTPLUG_CPU 789 + help 790 + Say Y here to allow turning CPUs off and on. CPUs can be 791 + controlled through /sys/devices/system/cpu. 792 + (Note: power management support will enable this option 793 + automatically on SMP systems. ) 794 + Say N if you want to disable CPU hotplug. 795 + 796 + config SYS_SUPPORTS_HOTPLUG_CPU 789 797 bool 790 - default n 791 798 792 799 config I8259 793 800 bool ··· 2147 2136 2148 2137 config ARCH_HIBERNATION_POSSIBLE 2149 2138 def_bool y 2150 - depends on !SMP 2139 + depends on SYS_SUPPORTS_HOTPLUG_CPU 2151 2140 2152 2141 config ARCH_SUSPEND_POSSIBLE 2153 2142 def_bool y 2154 - depends on !SMP 2143 + depends on SYS_SUPPORTS_HOTPLUG_CPU 2155 2144 2156 2145 source "kernel/power/Kconfig" 2157 2146
+60 -1
arch/mips/cavium-octeon/octeon-irq.c
··· 7 7 */ 8 8 #include <linux/irq.h> 9 9 #include <linux/interrupt.h> 10 - #include <linux/hardirq.h> 10 + #include <linux/smp.h> 11 11 12 12 #include <asm/octeon/octeon.h> 13 13 #include <asm/octeon/cvmx-pexp-defs.h> ··· 501 501 } 502 502 } 503 503 } 504 + 505 + #ifdef CONFIG_HOTPLUG_CPU 506 + static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu) 507 + { 508 + unsigned int isset; 509 + #ifdef CONFIG_SMP 510 + int coreid = cpu_logical_map(cpu); 511 + #else 512 + int coreid = cvmx_get_core_num(); 513 + #endif 514 + int bit = (irq < OCTEON_IRQ_WDOG0) ? 515 + irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0; 516 + if (irq < 64) { 517 + isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) & 518 + (1ull << bit)) >> bit; 519 + } else { 520 + isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) & 521 + (1ull << bit)) >> bit; 522 + } 523 + return isset; 524 + } 525 + 526 + void fixup_irqs(void) 527 + { 528 + int irq; 529 + 530 + for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) 531 + octeon_irq_core_disable_local(irq); 532 + 533 + for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) { 534 + if (is_irq_enabled_on_cpu(irq, smp_processor_id())) { 535 + /* ciu irq migrates to next cpu */ 536 + octeon_irq_chip_ciu0.disable(irq); 537 + octeon_irq_ciu0_set_affinity(irq, &cpu_online_map); 538 + } 539 + } 540 + 541 + #if 0 542 + for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++) 543 + octeon_irq_mailbox_mask(irq); 544 + #endif 545 + for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { 546 + if (is_irq_enabled_on_cpu(irq, smp_processor_id())) { 547 + /* ciu irq migrates to next cpu */ 548 + octeon_irq_chip_ciu0.disable(irq); 549 + octeon_irq_ciu0_set_affinity(irq, &cpu_online_map); 550 + } 551 + } 552 + 553 + for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) { 554 + if (is_irq_enabled_on_cpu(irq, smp_processor_id())) { 555 + /* ciu irq migrates to next cpu */ 556 + octeon_irq_chip_ciu1.disable(irq); 557 + octeon_irq_ciu1_set_affinity(irq, &cpu_online_map); 558 + } 559 + } 560 + } 561 + 562 + #endif /* CONFIG_HOTPLUG_CPU */
+70
arch/mips/cavium-octeon/octeon_boot.h
··· 1 + /* 2 + * (C) Copyright 2004, 2005 Cavium Networks 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License as 6 + * published by the Free Software Foundation; either version 2 of 7 + * the License, or (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 17 + * MA 02111-1307 USA 18 + */ 19 + 20 + #ifndef __OCTEON_BOOT_H__ 21 + #define __OCTEON_BOOT_H__ 22 + 23 + #include <linux/types.h> 24 + 25 + struct boot_init_vector { 26 + uint32_t stack_addr; 27 + uint32_t code_addr; 28 + uint32_t app_start_func_addr; 29 + uint32_t k0_val; 30 + uint32_t flags; 31 + uint32_t boot_info_addr; 32 + uint32_t pad; 33 + uint32_t pad2; 34 + }; 35 + 36 + /* similar to bootloader's linux_app_boot_info but without global data */ 37 + struct linux_app_boot_info { 38 + uint32_t labi_signature; 39 + uint32_t start_core0_addr; 40 + uint32_t avail_coremask; 41 + uint32_t pci_console_active; 42 + uint32_t icache_prefetch_disable; 43 + uint32_t InitTLBStart_addr; 44 + uint32_t start_app_addr; 45 + uint32_t cur_exception_base; 46 + uint32_t no_mark_private_data; 47 + uint32_t compact_flash_common_base_addr; 48 + uint32_t compact_flash_attribute_base_addr; 49 + uint32_t led_display_base_addr; 50 + }; 51 + 52 + /* If not to copy a lot of bootloader's structures 53 + here is only offset of requested member */ 54 + #define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK 0x765c 55 + 56 + /* hardcoded in bootloader */ 57 + #define LABI_ADDR_IN_BOOTLOADER 0x700 58 + 59 + #define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot" 60 + 61 + #define LABI_SIGNATURE 0xAABBCCDD 62 + 63 + /* from uboot-headers/octeon_mem_map.h */ 64 + #define EXCEPTION_BASE_INCR (4 * 1024) 65 + /* Increment size for exception base addresses (4k minimum) */ 66 + #define EXCEPTION_BASE_BASE 0 67 + #define BOOTLOADER_PRIV_DATA_BASE (EXCEPTION_BASE_BASE + 0x800) 68 + #define BOOTLOADER_BOOT_VECTOR (BOOTLOADER_PRIV_DATA_BASE) 69 + 70 + #endif /* __OCTEON_BOOT_H__ */
+1
arch/mips/cavium-octeon/setup.c
··· 13 13 #include <linux/io.h> 14 14 #include <linux/irq.h> 15 15 #include <linux/serial.h> 16 + #include <linux/smp.h> 16 17 #include <linux/types.h> 17 18 #include <linux/string.h> /* for memset */ 18 19 #include <linux/tty.h>
+233 -1
arch/mips/cavium-octeon/smp.c
··· 5 5 * 6 6 * Copyright (C) 2004-2008 Cavium Networks 7 7 */ 8 + #include <linux/cpu.h> 8 9 #include <linux/init.h> 9 10 #include <linux/delay.h> 10 11 #include <linux/smp.h> ··· 20 19 21 20 #include <asm/octeon/octeon.h> 22 21 22 + #include "octeon_boot.h" 23 + 23 24 volatile unsigned long octeon_processor_boot = 0xff; 24 25 volatile unsigned long octeon_processor_sp; 25 26 volatile unsigned long octeon_processor_gp; 27 + 28 + #ifdef CONFIG_HOTPLUG_CPU 29 + static unsigned int InitTLBStart_addr; 30 + #endif 26 31 27 32 static irqreturn_t mailbox_interrupt(int irq, void *dev_id) 28 33 { ··· 74 67 } 75 68 76 69 /** 77 - * Detect available CPUs, populate phys_cpu_present_map 70 + * Detect available CPUs, populate cpu_possible_map 78 71 */ 72 + static void octeon_smp_hotplug_setup(void) 73 + { 74 + #ifdef CONFIG_HOTPLUG_CPU 75 + uint32_t labi_signature; 76 + 77 + labi_signature = 78 + cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 79 + LABI_ADDR_IN_BOOTLOADER + 80 + offsetof(struct linux_app_boot_info, 81 + labi_signature))); 82 + if (labi_signature != LABI_SIGNATURE) 83 + pr_err("The bootloader version on this board is incorrect\n"); 84 + InitTLBStart_addr = 85 + cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 86 + LABI_ADDR_IN_BOOTLOADER + 87 + offsetof(struct linux_app_boot_info, 88 + InitTLBStart_addr))); 89 + #endif 90 + } 91 + 79 92 static void octeon_smp_setup(void) 80 93 { 81 94 const int coreid = cvmx_get_core_num(); ··· 118 91 cpus++; 119 92 } 120 93 } 94 + cpu_present_map = cpu_possible_map; 95 + 96 + octeon_smp_hotplug_setup(); 121 97 } 122 98 123 99 /** ··· 158 128 const int coreid = cvmx_get_core_num(); 159 129 union cvmx_ciu_intx_sum0 interrupt_enable; 160 130 131 + #ifdef CONFIG_HOTPLUG_CPU 132 + unsigned int cur_exception_base; 133 + 134 + cur_exception_base = cvmx_read64_uint32( 135 + CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 136 + LABI_ADDR_IN_BOOTLOADER + 137 + offsetof(struct linux_app_boot_info, 138 + cur_exception_base))); 139 + /* cur_exception_base is incremented in bootloader after setting */ 140 + write_c0_ebase((unsigned int)(cur_exception_base - EXCEPTION_BASE_INCR)); 141 + #endif 161 142 octeon_check_cpu_bist(); 162 143 octeon_init_cvmcount(); 163 144 /* ··· 240 199 #endif 241 200 } 242 201 202 + #ifdef CONFIG_HOTPLUG_CPU 203 + 204 + /* State of each CPU. */ 205 + DEFINE_PER_CPU(int, cpu_state); 206 + 207 + extern void fixup_irqs(void); 208 + 209 + static DEFINE_SPINLOCK(smp_reserve_lock); 210 + 211 + static int octeon_cpu_disable(void) 212 + { 213 + unsigned int cpu = smp_processor_id(); 214 + 215 + if (cpu == 0) 216 + return -EBUSY; 217 + 218 + spin_lock(&smp_reserve_lock); 219 + 220 + cpu_clear(cpu, cpu_online_map); 221 + cpu_clear(cpu, cpu_callin_map); 222 + local_irq_disable(); 223 + fixup_irqs(); 224 + local_irq_enable(); 225 + 226 + flush_cache_all(); 227 + local_flush_tlb_all(); 228 + 229 + spin_unlock(&smp_reserve_lock); 230 + 231 + return 0; 232 + } 233 + 234 + static void octeon_cpu_die(unsigned int cpu) 235 + { 236 + int coreid = cpu_logical_map(cpu); 237 + uint32_t avail_coremask; 238 + struct cvmx_bootmem_named_block_desc *block_desc; 239 + 240 + #ifdef CONFIG_CAVIUM_OCTEON_WATCHDOG 241 + /* Disable the watchdog */ 242 + cvmx_ciu_wdogx_t ciu_wdog; 243 + ciu_wdog.u64 = cvmx_read_csr(CVMX_CIU_WDOGX(cpu)); 244 + ciu_wdog.s.mode = 0; 245 + cvmx_write_csr(CVMX_CIU_WDOGX(cpu), ciu_wdog.u64); 246 + #endif 247 + 248 + while (per_cpu(cpu_state, cpu) != CPU_DEAD) 249 + cpu_relax(); 250 + 251 + /* 252 + * This is a bit complicated strategics of getting/settig available 253 + * cores mask, copied from bootloader 254 + */ 255 + /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */ 256 + block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); 257 + 258 + if (!block_desc) { 259 + avail_coremask = 260 + cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 261 + LABI_ADDR_IN_BOOTLOADER + 262 + offsetof 263 + (struct linux_app_boot_info, 264 + avail_coremask))); 265 + } else { /* alternative, already initialized */ 266 + avail_coremask = 267 + cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 268 + block_desc->base_addr + 269 + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK)); 270 + } 271 + 272 + avail_coremask |= 1 << coreid; 273 + 274 + /* Setting avail_coremask for bootoct binary */ 275 + if (!block_desc) { 276 + cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 277 + LABI_ADDR_IN_BOOTLOADER + 278 + offsetof(struct linux_app_boot_info, 279 + avail_coremask)), 280 + avail_coremask); 281 + } else { 282 + cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 283 + block_desc->base_addr + 284 + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK), 285 + avail_coremask); 286 + } 287 + 288 + pr_info("Reset core %d. Available Coremask = %x \n", coreid, 289 + avail_coremask); 290 + cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 291 + cvmx_write_csr(CVMX_CIU_PP_RST, 0); 292 + } 293 + 294 + void play_dead(void) 295 + { 296 + int coreid = cvmx_get_core_num(); 297 + 298 + idle_task_exit(); 299 + octeon_processor_boot = 0xff; 300 + per_cpu(cpu_state, coreid) = CPU_DEAD; 301 + 302 + while (1) /* core will be reset here */ 303 + ; 304 + } 305 + 306 + extern void kernel_entry(unsigned long arg1, ...); 307 + 308 + static void start_after_reset(void) 309 + { 310 + kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */ 311 + } 312 + 313 + int octeon_update_boot_vector(unsigned int cpu) 314 + { 315 + 316 + int coreid = cpu_logical_map(cpu); 317 + unsigned int avail_coremask; 318 + struct cvmx_bootmem_named_block_desc *block_desc; 319 + struct boot_init_vector *boot_vect = 320 + (struct boot_init_vector *) cvmx_phys_to_ptr(0x0 + 321 + BOOTLOADER_BOOT_VECTOR); 322 + 323 + block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); 324 + 325 + if (!block_desc) { 326 + avail_coremask = 327 + cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 328 + LABI_ADDR_IN_BOOTLOADER + 329 + offsetof(struct linux_app_boot_info, 330 + avail_coremask))); 331 + } else { /* alternative, already initialized */ 332 + avail_coremask = 333 + cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 334 + block_desc->base_addr + 335 + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK)); 336 + } 337 + 338 + if (!(avail_coremask & (1 << coreid))) { 339 + /* core not available, assume, that catched by simple-executive */ 340 + cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 341 + cvmx_write_csr(CVMX_CIU_PP_RST, 0); 342 + } 343 + 344 + boot_vect[coreid].app_start_func_addr = 345 + (uint32_t) (unsigned long) start_after_reset; 346 + boot_vect[coreid].code_addr = InitTLBStart_addr; 347 + 348 + CVMX_SYNC; 349 + 350 + cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask); 351 + 352 + return 0; 353 + } 354 + 355 + static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb, 356 + unsigned long action, void *hcpu) 357 + { 358 + unsigned int cpu = (unsigned long)hcpu; 359 + 360 + switch (action) { 361 + case CPU_UP_PREPARE: 362 + octeon_update_boot_vector(cpu); 363 + break; 364 + case CPU_ONLINE: 365 + pr_info("Cpu %d online\n", cpu); 366 + break; 367 + case CPU_DEAD: 368 + break; 369 + } 370 + 371 + return NOTIFY_OK; 372 + } 373 + 374 + static struct notifier_block __cpuinitdata octeon_cpu_notifier = { 375 + .notifier_call = octeon_cpu_callback, 376 + }; 377 + 378 + static int __cpuinit register_cavium_notifier(void) 379 + { 380 + register_hotcpu_notifier(&octeon_cpu_notifier); 381 + 382 + return 0; 383 + } 384 + 385 + late_initcall(register_cavium_notifier); 386 + 387 + #endif /* CONFIG_HOTPLUG_CPU */ 388 + 243 389 struct plat_smp_ops octeon_smp_ops = { 244 390 .send_ipi_single = octeon_send_ipi_single, 245 391 .send_ipi_mask = octeon_send_ipi_mask, ··· 436 208 .boot_secondary = octeon_boot_secondary, 437 209 .smp_setup = octeon_smp_setup, 438 210 .prepare_cpus = octeon_prepare_cpus, 211 + #ifdef CONFIG_HOTPLUG_CPU 212 + .cpu_disable = octeon_cpu_disable, 213 + .cpu_die = octeon_cpu_die, 214 + #endif 439 215 };
+1
arch/mips/include/asm/bug.h
··· 1 1 #ifndef __ASM_BUG_H 2 2 #define __ASM_BUG_H 3 3 4 + #include <linux/compiler.h> 4 5 #include <asm/sgidefs.h> 5 6 6 7 #ifdef CONFIG_BUG
+1
arch/mips/include/asm/bugs.h
··· 11 11 12 12 #include <linux/bug.h> 13 13 #include <linux/delay.h> 14 + #include <linux/smp.h> 14 15 15 16 #include <asm/cpu.h> 16 17 #include <asm/cpu-info.h>
+1
arch/mips/include/asm/irq.h
··· 10 10 #define _ASM_IRQ_H 11 11 12 12 #include <linux/linkage.h> 13 + #include <linux/smp.h> 13 14 14 15 #include <asm/mipsmtregs.h> 15 16
+1
arch/mips/include/asm/mmu_context.h
··· 13 13 14 14 #include <linux/errno.h> 15 15 #include <linux/sched.h> 16 + #include <linux/smp.h> 16 17 #include <linux/slab.h> 17 18 #include <asm/cacheflush.h> 18 19 #include <asm/tlbflush.h>
+4
arch/mips/include/asm/smp-ops.h
··· 26 26 void (*boot_secondary)(int cpu, struct task_struct *idle); 27 27 void (*smp_setup)(void); 28 28 void (*prepare_cpus)(unsigned int max_cpus); 29 + #ifdef CONFIG_HOTPLUG_CPU 30 + int (*cpu_disable)(void); 31 + void (*cpu_die)(unsigned int cpu); 32 + #endif 29 33 }; 30 34 31 35 extern void register_smp_ops(struct plat_smp_ops *ops);
+20
arch/mips/include/asm/smp.h
··· 13 13 14 14 #include <linux/bitops.h> 15 15 #include <linux/linkage.h> 16 + #include <linux/smp.h> 16 17 #include <linux/threads.h> 17 18 #include <linux/cpumask.h> 18 19 ··· 41 40 /* Octeon - Tell another core to flush its icache */ 42 41 #define SMP_ICACHE_FLUSH 0x4 43 42 43 + extern volatile cpumask_t cpu_callin_map; 44 44 45 45 extern void asmlinkage smp_bootstrap(void); 46 46 ··· 56 54 57 55 mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); 58 56 } 57 + 58 + #ifdef CONFIG_HOTPLUG_CPU 59 + static inline int __cpu_disable(void) 60 + { 61 + extern struct plat_smp_ops *mp_ops; /* private */ 62 + 63 + return mp_ops->cpu_disable(); 64 + } 65 + 66 + static inline void __cpu_die(unsigned int cpu) 67 + { 68 + extern struct plat_smp_ops *mp_ops; /* private */ 69 + 70 + mp_ops->cpu_die(cpu); 71 + } 72 + 73 + extern void play_dead(void); 74 + #endif 59 75 60 76 extern asmlinkage void smp_call_function_interrupt(void); 61 77
+1
arch/mips/include/asm/sn/addrs.h
··· 11 11 12 12 13 13 #ifndef __ASSEMBLY__ 14 + #include <linux/smp.h> 14 15 #include <linux/types.h> 15 16 #endif /* !__ASSEMBLY__ */ 16 17
+1
arch/mips/jazz/irq.c
··· 10 10 #include <linux/init.h> 11 11 #include <linux/interrupt.h> 12 12 #include <linux/kernel.h> 13 + #include <linux/smp.h> 13 14 #include <linux/spinlock.h> 14 15 15 16 #include <asm/irq_cpu.h>
+1
arch/mips/kernel/cevt-bcm1480.c
··· 18 18 #include <linux/clockchips.h> 19 19 #include <linux/interrupt.h> 20 20 #include <linux/percpu.h> 21 + #include <linux/smp.h> 21 22 22 23 #include <asm/addrspace.h> 23 24 #include <asm/io.h>
+1
arch/mips/kernel/cevt-r4k.c
··· 9 9 #include <linux/clockchips.h> 10 10 #include <linux/interrupt.h> 11 11 #include <linux/percpu.h> 12 + #include <linux/smp.h> 12 13 13 14 #include <asm/smtc_ipi.h> 14 15 #include <asm/time.h>
+1
arch/mips/kernel/cevt-sb1250.c
··· 18 18 #include <linux/clockchips.h> 19 19 #include <linux/interrupt.h> 20 20 #include <linux/percpu.h> 21 + #include <linux/smp.h> 21 22 22 23 #include <asm/addrspace.h> 23 24 #include <asm/io.h>
+1
arch/mips/kernel/cevt-smtc.c
··· 10 10 #include <linux/clockchips.h> 11 11 #include <linux/interrupt.h> 12 12 #include <linux/percpu.h> 13 + #include <linux/smp.h> 13 14 14 15 #include <asm/smtc_ipi.h> 15 16 #include <asm/time.h>
+1
arch/mips/kernel/cpu-probe.c
··· 14 14 #include <linux/init.h> 15 15 #include <linux/kernel.h> 16 16 #include <linux/ptrace.h> 17 + #include <linux/smp.h> 17 18 #include <linux/stddef.h> 18 19 19 20 #include <asm/bugs.h>
+1
arch/mips/kernel/i8253.c
··· 7 7 #include <linux/interrupt.h> 8 8 #include <linux/jiffies.h> 9 9 #include <linux/module.h> 10 + #include <linux/smp.h> 10 11 #include <linux/spinlock.h> 11 12 12 13 #include <asm/delay.h>
+1
arch/mips/kernel/irq-gic.c
··· 2 2 3 3 #include <linux/bitmap.h> 4 4 #include <linux/init.h> 5 + #include <linux/smp.h> 5 6 6 7 #include <asm/io.h> 7 8 #include <asm/gic.h>
+1
arch/mips/kernel/kgdb.c
··· 26 26 #include <linux/kgdb.h> 27 27 #include <linux/kdebug.h> 28 28 #include <linux/sched.h> 29 + #include <linux/smp.h> 29 30 #include <asm/inst.h> 30 31 #include <asm/fpu.h> 31 32 #include <asm/cacheflush.h>
+12 -1
arch/mips/kernel/process.c
··· 50 50 */ 51 51 void __noreturn cpu_idle(void) 52 52 { 53 + int cpu; 54 + 55 + /* CPU is going idle. */ 56 + cpu = smp_processor_id(); 57 + 53 58 /* endless idle loop with no priority at all */ 54 59 while (1) { 55 60 tick_nohz_stop_sched_tick(1); 56 - while (!need_resched()) { 61 + while (!need_resched() && cpu_online(cpu)) { 57 62 #ifdef CONFIG_MIPS_MT_SMTC 58 63 extern void smtc_idle_loop_hook(void); 59 64 ··· 67 62 if (cpu_wait) 68 63 (*cpu_wait)(); 69 64 } 65 + #ifdef CONFIG_HOTPLUG_CPU 66 + if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && 67 + (system_state == SYSTEM_RUNNING || 68 + system_state == SYSTEM_BOOTING)) 69 + play_dead(); 70 + #endif 70 71 tick_nohz_restart_sched_tick(); 71 72 preempt_enable_no_resched(); 72 73 schedule();
+1
arch/mips/kernel/smp-cmp.c
··· 20 20 21 21 #include <linux/kernel.h> 22 22 #include <linux/sched.h> 23 + #include <linux/smp.h> 23 24 #include <linux/cpumask.h> 24 25 #include <linux/interrupt.h> 25 26 #include <linux/compiler.h>
+16
arch/mips/kernel/smp-up.c
··· 55 55 { 56 56 } 57 57 58 + #ifdef CONFIG_HOTPLUG_CPU 59 + static int up_cpu_disable(void) 60 + { 61 + return -ENOSYS; 62 + } 63 + 64 + static void up_cpu_die(unsigned int cpu) 65 + { 66 + BUG(); 67 + } 68 + #endif 69 + 58 70 struct plat_smp_ops up_smp_ops = { 59 71 .send_ipi_single = up_send_ipi_single, 60 72 .send_ipi_mask = up_send_ipi_mask, ··· 76 64 .boot_secondary = up_boot_secondary, 77 65 .smp_setup = up_smp_setup, 78 66 .prepare_cpus = up_prepare_cpus, 67 + #ifdef CONFIG_HOTPLUG_CPU 68 + .cpu_disable = up_cpu_disable, 69 + .cpu_die = up_cpu_die, 70 + #endif 79 71 };
+14 -4
arch/mips/kernel/smp.c
··· 22 22 #include <linux/delay.h> 23 23 #include <linux/init.h> 24 24 #include <linux/interrupt.h> 25 + #include <linux/smp.h> 25 26 #include <linux/spinlock.h> 26 27 #include <linux/threads.h> 27 28 #include <linux/module.h> ··· 45 44 #include <asm/mipsmtregs.h> 46 45 #endif /* CONFIG_MIPS_MT_SMTC */ 47 46 48 - static volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 47 + volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 49 48 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 50 49 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 51 50 ··· 201 200 * and keep control until "cpu_online(cpu)" is set. Note: cpu is 202 201 * physical, not logical. 203 202 */ 203 + static struct task_struct *cpu_idle_thread[NR_CPUS]; 204 + 204 205 int __cpuinit __cpu_up(unsigned int cpu) 205 206 { 206 207 struct task_struct *idle; ··· 212 209 * The following code is purely to make sure 213 210 * Linux can schedule processes on this slave. 214 211 */ 215 - idle = fork_idle(cpu); 216 - if (IS_ERR(idle)) 217 - panic(KERN_ERR "Fork failed for CPU %d", cpu); 212 + if (!cpu_idle_thread[cpu]) { 213 + idle = fork_idle(cpu); 214 + cpu_idle_thread[cpu] = idle; 215 + 216 + if (IS_ERR(idle)) 217 + panic(KERN_ERR "Fork failed for CPU %d", cpu); 218 + } else { 219 + idle = cpu_idle_thread[cpu]; 220 + init_idle(idle, cpu); 221 + } 218 222 219 223 mp_ops->boot_secondary(cpu, idle); 220 224
+1
arch/mips/kernel/smtc.c
··· 20 20 #include <linux/clockchips.h> 21 21 #include <linux/kernel.h> 22 22 #include <linux/sched.h> 23 + #include <linux/smp.h> 23 24 #include <linux/cpumask.h> 24 25 #include <linux/interrupt.h> 25 26 #include <linux/kernel_stat.h>
+4 -1
arch/mips/kernel/topology.c
··· 17 17 #endif /* CONFIG_NUMA */ 18 18 19 19 for_each_present_cpu(i) { 20 - ret = register_cpu(&per_cpu(cpu_devices, i), i); 20 + struct cpu *c = &per_cpu(cpu_devices, i); 21 + 22 + c->hotpluggable = 1; 23 + ret = register_cpu(c, i); 21 24 if (ret) 22 25 printk(KERN_WARNING "topology_init: register_cpu %d " 23 26 "failed (%d)\n", i, ret);
+1
arch/mips/mipssim/sim_time.c
··· 91 91 mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; 92 92 } else { 93 93 #endif 94 + { 94 95 if (cpu_has_vint) 95 96 set_vi_handler(cp0_compare_irq, mips_timer_dispatch); 96 97 mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+1
arch/mips/mm/c-octeon.c
··· 8 8 #include <linux/init.h> 9 9 #include <linux/kernel.h> 10 10 #include <linux/sched.h> 11 + #include <linux/smp.h> 11 12 #include <linux/mm.h> 12 13 #include <linux/bitops.h> 13 14 #include <linux/cpu.h>
+1
arch/mips/mm/c-r3k.c
··· 12 12 #include <linux/init.h> 13 13 #include <linux/kernel.h> 14 14 #include <linux/sched.h> 15 + #include <linux/smp.h> 15 16 #include <linux/mm.h> 16 17 17 18 #include <asm/page.h>
+1
arch/mips/mm/c-r4k.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/linkage.h> 15 15 #include <linux/sched.h> 16 + #include <linux/smp.h> 16 17 #include <linux/mm.h> 17 18 #include <linux/module.h> 18 19 #include <linux/bitops.h>
+1
arch/mips/mm/c-tx39.c
··· 11 11 #include <linux/init.h> 12 12 #include <linux/kernel.h> 13 13 #include <linux/sched.h> 14 + #include <linux/smp.h> 14 15 #include <linux/mm.h> 15 16 16 17 #include <asm/cacheops.h>
+1
arch/mips/mm/highmem.c
··· 1 1 #include <linux/module.h> 2 2 #include <linux/highmem.h> 3 + #include <linux/smp.h> 3 4 #include <asm/fixmap.h> 4 5 #include <asm/tlbflush.h> 5 6
+1
arch/mips/mm/init.c
··· 13 13 #include <linux/module.h> 14 14 #include <linux/signal.h> 15 15 #include <linux/sched.h> 16 + #include <linux/smp.h> 16 17 #include <linux/kernel.h> 17 18 #include <linux/errno.h> 18 19 #include <linux/string.h>
+1
arch/mips/mm/page.c
··· 10 10 #include <linux/init.h> 11 11 #include <linux/kernel.h> 12 12 #include <linux/sched.h> 13 + #include <linux/smp.h> 13 14 #include <linux/mm.h> 14 15 #include <linux/module.h> 15 16 #include <linux/proc_fs.h>
+1
arch/mips/mm/tlb-r3k.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/kernel.h> 15 15 #include <linux/sched.h> 16 + #include <linux/smp.h> 16 17 #include <linux/mm.h> 17 18 18 19 #include <asm/page.h>
+1
arch/mips/mm/tlb-r4k.c
··· 10 10 */ 11 11 #include <linux/init.h> 12 12 #include <linux/sched.h> 13 + #include <linux/smp.h> 13 14 #include <linux/mm.h> 14 15 #include <linux/hugetlb.h> 15 16
+1
arch/mips/mm/tlb-r8k.c
··· 10 10 */ 11 11 #include <linux/init.h> 12 12 #include <linux/sched.h> 13 + #include <linux/smp.h> 13 14 #include <linux/mm.h> 14 15 15 16 #include <asm/cpu.h>
+1
arch/mips/mm/tlbex.c
··· 23 23 #include <linux/bug.h> 24 24 #include <linux/kernel.h> 25 25 #include <linux/types.h> 26 + #include <linux/smp.h> 26 27 #include <linux/string.h> 27 28 #include <linux/init.h> 28 29
+1
arch/mips/mti-malta/malta-int.c
··· 24 24 #include <linux/init.h> 25 25 #include <linux/irq.h> 26 26 #include <linux/sched.h> 27 + #include <linux/smp.h> 27 28 #include <linux/slab.h> 28 29 #include <linux/interrupt.h> 29 30 #include <linux/io.h>
+1
arch/mips/pci/pci-ip27.c
··· 10 10 #include <linux/init.h> 11 11 #include <linux/kernel.h> 12 12 #include <linux/pci.h> 13 + #include <linux/smp.h> 13 14 #include <asm/sn/arch.h> 14 15 #include <asm/pci/bridge.h> 15 16 #include <asm/paccess.h>
+1
arch/mips/pmc-sierra/yosemite/smp.c
··· 1 1 #include <linux/linkage.h> 2 2 #include <linux/sched.h> 3 + #include <linux/smp.h> 3 4 4 5 #include <asm/pmon.h> 5 6 #include <asm/titan_dep.h>
-9
arch/mips/power/hibernate.S
··· 43 43 bne t1, t3, 1b 44 44 PTR_L t0, PBE_NEXT(t0) 45 45 bnez t0, 0b 46 - /* flush caches to make sure context is in memory */ 47 - PTR_L t0, __flush_cache_all 48 - jalr t0 49 - /* flush tlb entries */ 50 - #ifdef CONFIG_SMP 51 - jal flush_tlb_all 52 - #else 53 - jal local_flush_tlb_all 54 - #endif 55 46 PTR_LA t0, saved_regs 56 47 PTR_L ra, PT_R31(t0) 57 48 PTR_L sp, PT_R29(t0)
+1
arch/mips/sgi-ip27/ip27-init.c
··· 9 9 #include <linux/kernel.h> 10 10 #include <linux/init.h> 11 11 #include <linux/sched.h> 12 + #include <linux/smp.h> 12 13 #include <linux/mm.h> 13 14 #include <linux/module.h> 14 15 #include <linux/cpumask.h>
+1
arch/mips/sgi-ip27/ip27-irq.c
··· 18 18 #include <linux/ioport.h> 19 19 #include <linux/timex.h> 20 20 #include <linux/slab.h> 21 + #include <linux/smp.h> 21 22 #include <linux/random.h> 22 23 #include <linux/kernel.h> 23 24 #include <linux/kernel_stat.h>
+1
arch/mips/sgi-ip27/ip27-timer.c
··· 10 10 #include <linux/interrupt.h> 11 11 #include <linux/kernel_stat.h> 12 12 #include <linux/param.h> 13 + #include <linux/smp.h> 13 14 #include <linux/time.h> 14 15 #include <linux/timex.h> 15 16 #include <linux/mm.h>
+1
arch/mips/sgi-ip27/ip27-xtalk.c
··· 9 9 10 10 #include <linux/init.h> 11 11 #include <linux/kernel.h> 12 + #include <linux/smp.h> 12 13 #include <asm/sn/types.h> 13 14 #include <asm/sn/klconfig.h> 14 15 #include <asm/sn/hub.h>
+1
arch/mips/sibyte/bcm1480/irq.c
··· 19 19 #include <linux/init.h> 20 20 #include <linux/linkage.h> 21 21 #include <linux/interrupt.h> 22 + #include <linux/smp.h> 22 23 #include <linux/spinlock.h> 23 24 #include <linux/mm.h> 24 25 #include <linux/slab.h>
+4 -3
arch/mips/sibyte/common/cfe_console.c
··· 51 51 setleds("u0cn"); 52 52 } else if (!strcmp(consdev, "uart1")) { 53 53 setleds("u1cn"); 54 + } else 54 55 #endif 55 56 #ifdef CONFIG_VGA_CONSOLE 56 - } else if (!strcmp(consdev, "pcconsole0")) { 57 - setleds("pccn"); 58 - #endif 57 + if (!strcmp(consdev, "pcconsole0")) { 58 + setleds("pccn"); 59 59 } else 60 + #endif 60 61 return -ENODEV; 61 62 } 62 63 return 0;
+1
arch/mips/sni/time.c
··· 1 1 #include <linux/types.h> 2 2 #include <linux/interrupt.h> 3 + #include <linux/smp.h> 3 4 #include <linux/time.h> 4 5 #include <linux/clockchips.h> 5 6
+12 -8
drivers/dma/txx9dmac.c
··· 432 432 list_splice_init(&txd->tx_list, &dc->free_list); 433 433 list_move(&desc->desc_node, &dc->free_list); 434 434 435 - /* 436 - * We use dma_unmap_page() regardless of how the buffers were 437 - * mapped before they were submitted... 438 - */ 439 435 if (!ds) { 440 436 dma_addr_t dmaaddr; 441 437 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 442 438 dmaaddr = is_dmac64(dc) ? 443 439 desc->hwdesc.DAR : desc->hwdesc32.DAR; 444 - dma_unmap_page(chan2parent(&dc->chan), dmaaddr, 445 - desc->len, DMA_FROM_DEVICE); 440 + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 441 + dma_unmap_single(chan2parent(&dc->chan), 442 + dmaaddr, desc->len, DMA_FROM_DEVICE); 443 + else 444 + dma_unmap_page(chan2parent(&dc->chan), 445 + dmaaddr, desc->len, DMA_FROM_DEVICE); 446 446 } 447 447 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 448 448 dmaaddr = is_dmac64(dc) ? 449 449 desc->hwdesc.SAR : desc->hwdesc32.SAR; 450 - dma_unmap_page(chan2parent(&dc->chan), dmaaddr, 451 - desc->len, DMA_TO_DEVICE); 450 + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 451 + dma_unmap_single(chan2parent(&dc->chan), 452 + dmaaddr, desc->len, DMA_TO_DEVICE); 453 + else 454 + dma_unmap_page(chan2parent(&dc->chan), 455 + dmaaddr, desc->len, DMA_TO_DEVICE); 452 456 } 453 457 } 454 458
-1
drivers/staging/octeon/Makefile
··· 12 12 obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o 13 13 14 14 octeon-ethernet-objs := ethernet.o 15 - octeon-ethernet-objs += ethernet-common.o 16 15 octeon-ethernet-objs += ethernet-mdio.o 17 16 octeon-ethernet-objs += ethernet-mem.o 18 17 octeon-ethernet-objs += ethernet-proc.o
-328
drivers/staging/octeon/ethernet-common.c
··· 1 - /********************************************************************** 2 - * Author: Cavium Networks 3 - * 4 - * Contact: support@caviumnetworks.com 5 - * This file is part of the OCTEON SDK 6 - * 7 - * Copyright (c) 2003-2007 Cavium Networks 8 - * 9 - * This file is free software; you can redistribute it and/or modify 10 - * it under the terms of the GNU General Public License, Version 2, as 11 - * published by the Free Software Foundation. 12 - * 13 - * This file is distributed in the hope that it will be useful, but 14 - * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 - * NONINFRINGEMENT. See the GNU General Public License for more 17 - * details. 18 - * 19 - * You should have received a copy of the GNU General Public License 20 - * along with this file; if not, write to the Free Software 21 - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 - * or visit http://www.gnu.org/licenses/. 23 - * 24 - * This file may also be available under a different license from Cavium. 25 - * Contact Cavium Networks for more information 26 - **********************************************************************/ 27 - #include <linux/kernel.h> 28 - #include <linux/mii.h> 29 - #include <net/dst.h> 30 - 31 - #include <asm/atomic.h> 32 - #include <asm/octeon/octeon.h> 33 - 34 - #include "ethernet-defines.h" 35 - #include "ethernet-tx.h" 36 - #include "ethernet-mdio.h" 37 - #include "ethernet-util.h" 38 - #include "octeon-ethernet.h" 39 - #include "ethernet-common.h" 40 - 41 - #include "cvmx-pip.h" 42 - #include "cvmx-pko.h" 43 - #include "cvmx-fau.h" 44 - #include "cvmx-helper.h" 45 - 46 - #include "cvmx-gmxx-defs.h" 47 - 48 - /** 49 - * Get the low level ethernet statistics 50 - * 51 - * @dev: Device to get the statistics from 52 - * Returns Pointer to the statistics 53 - */ 54 - static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) 55 - { 56 - cvmx_pip_port_status_t rx_status; 57 - cvmx_pko_port_status_t tx_status; 58 - struct octeon_ethernet *priv = netdev_priv(dev); 59 - 60 - if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { 61 - if (octeon_is_simulation()) { 62 - /* The simulator doesn't support statistics */ 63 - memset(&rx_status, 0, sizeof(rx_status)); 64 - memset(&tx_status, 0, sizeof(tx_status)); 65 - } else { 66 - cvmx_pip_get_port_status(priv->port, 1, &rx_status); 67 - cvmx_pko_get_port_status(priv->port, 1, &tx_status); 68 - } 69 - 70 - priv->stats.rx_packets += rx_status.inb_packets; 71 - priv->stats.tx_packets += tx_status.packets; 72 - priv->stats.rx_bytes += rx_status.inb_octets; 73 - priv->stats.tx_bytes += tx_status.octets; 74 - priv->stats.multicast += rx_status.multicast_packets; 75 - priv->stats.rx_crc_errors += rx_status.inb_errors; 76 - priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; 77 - 78 - /* 79 - * The drop counter must be incremented atomically 80 - * since the RX tasklet also increments it. 81 - */ 82 - #ifdef CONFIG_64BIT 83 - atomic64_add(rx_status.dropped_packets, 84 - (atomic64_t *)&priv->stats.rx_dropped); 85 - #else 86 - atomic_add(rx_status.dropped_packets, 87 - (atomic_t *)&priv->stats.rx_dropped); 88 - #endif 89 - } 90 - 91 - return &priv->stats; 92 - } 93 - 94 - /** 95 - * Set the multicast list. Currently unimplemented. 96 - * 97 - * @dev: Device to work on 98 - */ 99 - static void cvm_oct_common_set_multicast_list(struct net_device *dev) 100 - { 101 - union cvmx_gmxx_prtx_cfg gmx_cfg; 102 - struct octeon_ethernet *priv = netdev_priv(dev); 103 - int interface = INTERFACE(priv->port); 104 - int index = INDEX(priv->port); 105 - 106 - if ((interface < 2) 107 - && (cvmx_helper_interface_get_mode(interface) != 108 - CVMX_HELPER_INTERFACE_MODE_SPI)) { 109 - union cvmx_gmxx_rxx_adr_ctl control; 110 - control.u64 = 0; 111 - control.s.bcst = 1; /* Allow broadcast MAC addresses */ 112 - 113 - if (dev->mc_list || (dev->flags & IFF_ALLMULTI) || 114 - (dev->flags & IFF_PROMISC)) 115 - /* Force accept multicast packets */ 116 - control.s.mcst = 2; 117 - else 118 - /* Force reject multicat packets */ 119 - control.s.mcst = 1; 120 - 121 - if (dev->flags & IFF_PROMISC) 122 - /* 123 - * Reject matches if promisc. Since CAM is 124 - * shut off, should accept everything. 125 - */ 126 - control.s.cam_mode = 0; 127 - else 128 - /* Filter packets based on the CAM */ 129 - control.s.cam_mode = 1; 130 - 131 - gmx_cfg.u64 = 132 - cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 133 - cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), 134 - gmx_cfg.u64 & ~1ull); 135 - 136 - cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 137 - control.u64); 138 - if (dev->flags & IFF_PROMISC) 139 - cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN 140 - (index, interface), 0); 141 - else 142 - cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN 143 - (index, interface), 1); 144 - 145 - cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), 146 - gmx_cfg.u64); 147 - } 148 - } 149 - 150 - /** 151 - * Set the hardware MAC address for a device 152 - * 153 - * @dev: Device to change the MAC address for 154 - * @addr: Address structure to change it too. MAC address is addr + 2. 155 - * Returns Zero on success 156 - */ 157 - static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) 158 - { 159 - struct octeon_ethernet *priv = netdev_priv(dev); 160 - union cvmx_gmxx_prtx_cfg gmx_cfg; 161 - int interface = INTERFACE(priv->port); 162 - int index = INDEX(priv->port); 163 - 164 - memcpy(dev->dev_addr, addr + 2, 6); 165 - 166 - if ((interface < 2) 167 - && (cvmx_helper_interface_get_mode(interface) != 168 - CVMX_HELPER_INTERFACE_MODE_SPI)) { 169 - int i; 170 - uint8_t *ptr = addr; 171 - uint64_t mac = 0; 172 - for (i = 0; i < 6; i++) 173 - mac = (mac << 8) | (uint64_t) (ptr[i + 2]); 174 - 175 - gmx_cfg.u64 = 176 - cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 177 - cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), 178 - gmx_cfg.u64 & ~1ull); 179 - 180 - cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); 181 - cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 182 - ptr[2]); 183 - cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 184 - ptr[3]); 185 - cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 186 - ptr[4]); 187 - cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 188 - ptr[5]); 189 - cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 190 - ptr[6]); 191 - cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 192 - ptr[7]); 193 - cvm_oct_common_set_multicast_list(dev); 194 - cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), 195 - gmx_cfg.u64); 196 - } 197 - return 0; 198 - } 199 - 200 - /** 201 - * Change the link MTU. Unimplemented 202 - * 203 - * @dev: Device to change 204 - * @new_mtu: The new MTU 205 - * 206 - * Returns Zero on success 207 - */ 208 - static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) 209 - { 210 - struct octeon_ethernet *priv = netdev_priv(dev); 211 - int interface = INTERFACE(priv->port); 212 - int index = INDEX(priv->port); 213 - #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 214 - int vlan_bytes = 4; 215 - #else 216 - int vlan_bytes = 0; 217 - #endif 218 - 219 - /* 220 - * Limit the MTU to make sure the ethernet packets are between 221 - * 64 bytes and 65535 bytes. 222 - */ 223 - if ((new_mtu + 14 + 4 + vlan_bytes < 64) 224 - || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { 225 - pr_err("MTU must be between %d and %d.\n", 226 - 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes); 227 - return -EINVAL; 228 - } 229 - dev->mtu = new_mtu; 230 - 231 - if ((interface < 2) 232 - && (cvmx_helper_interface_get_mode(interface) != 233 - CVMX_HELPER_INTERFACE_MODE_SPI)) { 234 - /* Add ethernet header and FCS, and VLAN if configured. */ 235 - int max_packet = new_mtu + 14 + 4 + vlan_bytes; 236 - 237 - if (OCTEON_IS_MODEL(OCTEON_CN3XXX) 238 - || OCTEON_IS_MODEL(OCTEON_CN58XX)) { 239 - /* Signal errors on packets larger than the MTU */ 240 - cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), 241 - max_packet); 242 - } else { 243 - /* 244 - * Set the hardware to truncate packets larger 245 - * than the MTU and smaller the 64 bytes. 246 - */ 247 - union cvmx_pip_frm_len_chkx frm_len_chk; 248 - frm_len_chk.u64 = 0; 249 - frm_len_chk.s.minlen = 64; 250 - frm_len_chk.s.maxlen = max_packet; 251 - cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), 252 - frm_len_chk.u64); 253 - } 254 - /* 255 - * Set the hardware to truncate packets larger than 256 - * the MTU. The jabber register must be set to a 257 - * multiple of 8 bytes, so round up. 258 - */ 259 - cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), 260 - (max_packet + 7) & ~7u); 261 - } 262 - return 0; 263 - } 264 - 265 - /** 266 - * Per network device initialization 267 - * 268 - * @dev: Device to initialize 269 - * Returns Zero on success 270 - */ 271 - int cvm_oct_common_init(struct net_device *dev) 272 - { 273 - static int count; 274 - char mac[8] = { 0x00, 0x00, 275 - octeon_bootinfo->mac_addr_base[0], 276 - octeon_bootinfo->mac_addr_base[1], 277 - octeon_bootinfo->mac_addr_base[2], 278 - octeon_bootinfo->mac_addr_base[3], 279 - octeon_bootinfo->mac_addr_base[4], 280 - octeon_bootinfo->mac_addr_base[5] + count 281 - }; 282 - struct octeon_ethernet *priv = netdev_priv(dev); 283 - 284 - /* 285 - * Force the interface to use the POW send if always_use_pow 286 - * was specified or it is in the pow send list. 287 - */ 288 - if ((pow_send_group != -1) 289 - && (always_use_pow || strstr(pow_send_list, dev->name))) 290 - priv->queue = -1; 291 - 292 - if (priv->queue != -1) { 293 - dev->hard_start_xmit = cvm_oct_xmit; 294 - if (USE_HW_TCPUDP_CHECKSUM) 295 - dev->features |= NETIF_F_IP_CSUM; 296 - } else 297 - dev->hard_start_xmit = cvm_oct_xmit_pow; 298 - count++; 299 - 300 - dev->get_stats = cvm_oct_common_get_stats; 301 - dev->set_mac_address = cvm_oct_common_set_mac_address; 302 - dev->set_multicast_list = cvm_oct_common_set_multicast_list; 303 - dev->change_mtu = cvm_oct_common_change_mtu; 304 - dev->do_ioctl = cvm_oct_ioctl; 305 - /* We do our own locking, Linux doesn't need to */ 306 - dev->features |= NETIF_F_LLTX; 307 - SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); 308 - #ifdef CONFIG_NET_POLL_CONTROLLER 309 - dev->poll_controller = cvm_oct_poll_controller; 310 - #endif 311 - 312 - cvm_oct_mdio_setup_device(dev); 313 - dev->set_mac_address(dev, mac); 314 - dev->change_mtu(dev, dev->mtu); 315 - 316 - /* 317 - * Zero out stats for port so we won't mistakenly show 318 - * counters from the bootloader. 319 - */ 320 - memset(dev->get_stats(dev), 0, sizeof(struct net_device_stats)); 321 - 322 - return 0; 323 - } 324 - 325 - void cvm_oct_common_uninit(struct net_device *dev) 326 - { 327 - /* Currently nothing to do */ 328 - }
-29
drivers/staging/octeon/ethernet-common.h
··· 1 - /********************************************************************* 2 - * Author: Cavium Networks 3 - * 4 - * Contact: support@caviumnetworks.com 5 - * This file is part of the OCTEON SDK 6 - * 7 - * Copyright (c) 2003-2007 Cavium Networks 8 - * 9 - * This file is free software; you can redistribute it and/or modify 10 - * it under the terms of the GNU General Public License, Version 2, as 11 - * published by the Free Software Foundation. 12 - * 13 - * This file is distributed in the hope that it will be useful, but 14 - * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 - * NONINFRINGEMENT. See the GNU General Public License for more 17 - * details. 18 - * 19 - * You should have received a copy of the GNU General Public License 20 - * along with this file; if not, write to the Free Software 21 - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 - * or visit http://www.gnu.org/licenses/. 23 - * 24 - * This file may also be available under a different license from Cavium. 25 - * Contact Cavium Networks for more information 26 - *********************************************************************/ 27 - 28 - int cvm_oct_common_init(struct net_device *dev); 29 - void cvm_oct_common_uninit(struct net_device *dev);
+2
drivers/staging/octeon/ethernet-defines.h
··· 117 117 118 118 /* Maximum number of packets to process per interrupt. */ 119 119 #define MAX_RX_PACKETS 120 120 + /* Maximum number of SKBs to try to free per xmit packet. */ 121 + #define MAX_SKB_TO_FREE 10 120 122 #define MAX_OUT_QUEUE_DEPTH 1000 121 123 122 124 #ifndef CONFIG_SMP
+3 -6
drivers/staging/octeon/ethernet-rgmii.c
··· 33 33 34 34 #include "ethernet-defines.h" 35 35 #include "octeon-ethernet.h" 36 - #include "ethernet-common.h" 37 36 #include "ethernet-util.h" 38 37 39 38 #include "cvmx-helper.h" ··· 264 265 return return_status; 265 266 } 266 267 267 - static int cvm_oct_rgmii_open(struct net_device *dev) 268 + int cvm_oct_rgmii_open(struct net_device *dev) 268 269 { 269 270 union cvmx_gmxx_prtx_cfg gmx_cfg; 270 271 struct octeon_ethernet *priv = netdev_priv(dev); ··· 285 286 return 0; 286 287 } 287 288 288 - static int cvm_oct_rgmii_stop(struct net_device *dev) 289 + int cvm_oct_rgmii_stop(struct net_device *dev) 289 290 { 290 291 union cvmx_gmxx_prtx_cfg gmx_cfg; 291 292 struct octeon_ethernet *priv = netdev_priv(dev); ··· 304 305 int r; 305 306 306 307 cvm_oct_common_init(dev); 307 - dev->open = cvm_oct_rgmii_open; 308 - dev->stop = cvm_oct_rgmii_stop; 309 - dev->stop(dev); 308 + dev->netdev_ops->ndo_stop(dev); 310 309 311 310 /* 312 311 * Due to GMX errata in CN3XXX series chips, it is necessary
+3 -6
drivers/staging/octeon/ethernet-sgmii.c
··· 34 34 #include "ethernet-defines.h" 35 35 #include "octeon-ethernet.h" 36 36 #include "ethernet-util.h" 37 - #include "ethernet-common.h" 38 37 39 38 #include "cvmx-helper.h" 40 39 41 40 #include "cvmx-gmxx-defs.h" 42 41 43 - static int cvm_oct_sgmii_open(struct net_device *dev) 42 + int cvm_oct_sgmii_open(struct net_device *dev) 44 43 { 45 44 union cvmx_gmxx_prtx_cfg gmx_cfg; 46 45 struct octeon_ethernet *priv = netdev_priv(dev); ··· 60 61 return 0; 61 62 } 62 63 63 - static int cvm_oct_sgmii_stop(struct net_device *dev) 64 + int cvm_oct_sgmii_stop(struct net_device *dev) 64 65 { 65 66 union cvmx_gmxx_prtx_cfg gmx_cfg; 66 67 struct octeon_ethernet *priv = netdev_priv(dev); ··· 112 113 { 113 114 struct octeon_ethernet *priv = netdev_priv(dev); 114 115 cvm_oct_common_init(dev); 115 - dev->open = cvm_oct_sgmii_open; 116 - dev->stop = cvm_oct_sgmii_stop; 117 - dev->stop(dev); 116 + dev->netdev_ops->ndo_stop(dev); 118 117 if (!octeon_is_simulation()) 119 118 priv->poll = cvm_oct_sgmii_poll; 120 119
-1
drivers/staging/octeon/ethernet-spi.c
··· 33 33 34 34 #include "ethernet-defines.h" 35 35 #include "octeon-ethernet.h" 36 - #include "ethernet-common.h" 37 36 #include "ethernet-util.h" 38 37 39 38 #include "cvmx-spi.h"
+36 -26
drivers/staging/octeon/ethernet-tx.c
··· 47 47 48 48 #include "ethernet-defines.h" 49 49 #include "octeon-ethernet.h" 50 + #include "ethernet-tx.h" 50 51 #include "ethernet-util.h" 51 52 52 53 #include "cvmx-wqe.h" ··· 83 82 uint64_t old_scratch2; 84 83 int dropped; 85 84 int qos; 85 + int queue_it_up; 86 86 struct octeon_ethernet *priv = netdev_priv(dev); 87 - int32_t in_use; 87 + int32_t skb_to_free; 88 + int32_t undo; 88 89 int32_t buffers_to_free; 89 90 #if REUSE_SKBUFFS_WITHOUT_FREE 90 91 unsigned char *fpa_head; ··· 123 120 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); 124 121 125 122 /* 126 - * Assume we're going to be able t osend this 127 - * packet. Fetch and increment the number of pending 128 - * packets for output. 123 + * Fetch and increment the number of packets to be 124 + * freed. 129 125 */ 130 126 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, 131 127 FAU_NUM_PACKET_BUFFERS_TO_FREE, 132 128 0); 133 129 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, 134 - priv->fau + qos * 4, 1); 130 + priv->fau + qos * 4, 131 + MAX_SKB_TO_FREE); 135 132 } 136 133 137 134 /* ··· 256 253 257 254 /* 258 255 * The skbuff will be reused without ever being freed. We must 259 - * cleanup a bunch of Linux stuff. 256 + * cleanup a bunch of core things. 260 257 */ 261 - dst_release(skb->dst); 262 - skb->dst = NULL; 258 + dst_release(skb_dst(skb)); 259 + skb_dst_set(skb, NULL); 263 260 #ifdef CONFIG_XFRM 264 261 secpath_put(skb->sp); 265 262 skb->sp = NULL; ··· 289 286 if (USE_ASYNC_IOBDMA) { 290 287 /* Get the number of skbuffs in use by the hardware */ 291 288 CVMX_SYNCIOBDMA; 292 - in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 289 + skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 293 290 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); 294 291 } else { 295 292 /* Get the number of skbuffs in use by the hardware */ 296 - in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1); 293 + skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 294 + MAX_SKB_TO_FREE); 297 295 buffers_to_free = 298 296 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 299 297 } 298 + 299 + /* 300 + * We try to claim MAX_SKB_TO_FREE buffers. If there were not 301 + * that many available, we have to un-claim (undo) any that 302 + * were in excess. If skb_to_free is positive we will free 303 + * that many buffers. 304 + */ 305 + undo = skb_to_free > 0 ? 306 + MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; 307 + if (undo > 0) 308 + cvmx_fau_atomic_add32(priv->fau+qos*4, -undo); 309 + skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? 310 + MAX_SKB_TO_FREE : -skb_to_free; 300 311 301 312 /* 302 313 * If we're sending faster than the receive can free them then ··· 347 330 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); 348 331 } 349 332 333 + queue_it_up = 0; 350 334 if (unlikely(dropped)) { 351 335 dev_kfree_skb_any(skb); 352 - cvmx_fau_atomic_add32(priv->fau + qos * 4, -1); 353 336 priv->stats.tx_dropped++; 354 337 } else { 355 338 if (USE_SKBUFFS_IN_HW) { 356 339 /* Put this packet on the queue to be freed later */ 357 340 if (pko_command.s.dontfree) 358 - skb_queue_tail(&priv->tx_free_list[qos], skb); 359 - else { 341 + queue_it_up = 1; 342 + else 360 343 cvmx_fau_atomic_add32 361 344 (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); 362 - cvmx_fau_atomic_add32(priv->fau + qos * 4, -1); 363 - } 364 345 } else { 365 346 /* Put this packet on the queue to be freed later */ 366 - skb_queue_tail(&priv->tx_free_list[qos], skb); 347 + queue_it_up = 1; 367 348 } 368 349 } 369 350 370 - /* Free skbuffs not in use by the hardware, possibly two at a time */ 371 - if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) { 351 + if (queue_it_up) { 372 352 spin_lock(&priv->tx_free_list[qos].lock); 373 - /* 374 - * Check again now that we have the lock. It might 375 - * have changed. 376 - */ 377 - if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) 378 - dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); 379 - if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) 380 - dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); 353 + __skb_queue_tail(&priv->tx_free_list[qos], skb); 354 + cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0); 381 355 spin_unlock(&priv->tx_free_list[qos].lock); 356 + } else { 357 + cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1); 382 358 } 383 359 384 360 return 0;
+25
drivers/staging/octeon/ethernet-tx.h
··· 30 30 int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, 31 31 int do_free, int qos); 32 32 void cvm_oct_tx_shutdown(struct net_device *dev); 33 + 34 + /** 35 + * Free dead transmit skbs. 36 + * 37 + * @priv: The driver data 38 + * @skb_to_free: The number of SKBs to free (free none if negative). 39 + * @qos: The queue to free from. 40 + * @take_lock: If true, acquire the skb list lock. 41 + */ 42 + static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv, 43 + int skb_to_free, 44 + int qos, int take_lock) 45 + { 46 + /* Free skbuffs not in use by the hardware. */ 47 + if (skb_to_free > 0) { 48 + if (take_lock) 49 + spin_lock(&priv->tx_free_list[qos].lock); 50 + while (skb_to_free > 0) { 51 + dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); 52 + skb_to_free--; 53 + } 54 + if (take_lock) 55 + spin_unlock(&priv->tx_free_list[qos].lock); 56 + } 57 + }
+3 -6
drivers/staging/octeon/ethernet-xaui.c
··· 33 33 34 34 #include "ethernet-defines.h" 35 35 #include "octeon-ethernet.h" 36 - #include "ethernet-common.h" 37 36 #include "ethernet-util.h" 38 37 39 38 #include "cvmx-helper.h" 40 39 41 40 #include "cvmx-gmxx-defs.h" 42 41 43 - static int cvm_oct_xaui_open(struct net_device *dev) 42 + int cvm_oct_xaui_open(struct net_device *dev) 44 43 { 45 44 union cvmx_gmxx_prtx_cfg gmx_cfg; 46 45 struct octeon_ethernet *priv = netdev_priv(dev); ··· 59 60 return 0; 60 61 } 61 62 62 - static int cvm_oct_xaui_stop(struct net_device *dev) 63 + int cvm_oct_xaui_stop(struct net_device *dev) 63 64 { 64 65 union cvmx_gmxx_prtx_cfg gmx_cfg; 65 66 struct octeon_ethernet *priv = netdev_priv(dev); ··· 111 112 { 112 113 struct octeon_ethernet *priv = netdev_priv(dev); 113 114 cvm_oct_common_init(dev); 114 - dev->open = cvm_oct_xaui_open; 115 - dev->stop = cvm_oct_xaui_stop; 116 - dev->stop(dev); 115 + dev->netdev_ops->ndo_stop(dev); 117 116 if (!octeon_is_simulation()) 118 117 priv->poll = cvm_oct_xaui_poll; 119 118
+412 -58
drivers/staging/octeon/ethernet.c
··· 37 37 #include <asm/octeon/octeon.h> 38 38 39 39 #include "ethernet-defines.h" 40 + #include "octeon-ethernet.h" 40 41 #include "ethernet-mem.h" 41 42 #include "ethernet-rx.h" 42 43 #include "ethernet-tx.h" 44 + #include "ethernet-mdio.h" 43 45 #include "ethernet-util.h" 44 46 #include "ethernet-proc.h" 45 - #include "ethernet-common.h" 46 - #include "octeon-ethernet.h" 47 + 47 48 48 49 #include "cvmx-pip.h" 49 50 #include "cvmx-pko.h" ··· 52 51 #include "cvmx-ipd.h" 53 52 #include "cvmx-helper.h" 54 53 54 + #include "cvmx-gmxx-defs.h" 55 55 #include "cvmx-smix-defs.h" 56 56 57 57 #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \ ··· 131 129 */ 132 130 static void cvm_do_timer(unsigned long arg) 133 131 { 132 + int32_t skb_to_free, undo; 133 + int queues_per_port; 134 + int qos; 135 + struct octeon_ethernet *priv; 134 136 static int port; 135 - if (port < CVMX_PIP_NUM_INPUT_PORTS) { 136 - if (cvm_oct_device[port]) { 137 - int queues_per_port; 138 - int qos; 139 - struct octeon_ethernet *priv = 140 - netdev_priv(cvm_oct_device[port]); 141 - if (priv->poll) { 142 - /* skip polling if we don't get the lock */ 143 - if (!down_trylock(&mdio_sem)) { 144 - priv->poll(cvm_oct_device[port]); 145 - up(&mdio_sem); 146 - } 147 - } 148 137 149 - queues_per_port = cvmx_pko_get_num_queues(port); 150 - /* Drain any pending packets in the free list */ 151 - for (qos = 0; qos < queues_per_port; qos++) { 152 - if (skb_queue_len(&priv->tx_free_list[qos])) { 153 - spin_lock(&priv->tx_free_list[qos]. 154 - lock); 155 - while (skb_queue_len 156 - (&priv->tx_free_list[qos]) > 157 - cvmx_fau_fetch_and_add32(priv-> 158 - fau + 159 - qos * 4, 160 - 0)) 161 - dev_kfree_skb(__skb_dequeue 162 - (&priv-> 163 - tx_free_list 164 - [qos])); 165 - spin_unlock(&priv->tx_free_list[qos]. 166 - lock); 167 - } 168 - } 169 - cvm_oct_device[port]->get_stats(cvm_oct_device[port]); 170 - } 171 - port++; 172 - /* Poll the next port in a 50th of a second. 173 - This spreads the polling of ports out a little bit */ 174 - mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50); 175 - } else { 138 + if (port >= CVMX_PIP_NUM_INPUT_PORTS) { 139 + /* 140 + * All ports have been polled. Start the next 141 + * iteration through the ports in one second. 142 + */ 176 143 port = 0; 177 - /* All ports have been polled. Start the next iteration through 178 - the ports in one second */ 179 144 mod_timer(&cvm_oct_poll_timer, jiffies + HZ); 145 + return; 180 146 } 147 + if (!cvm_oct_device[port]) 148 + goto out; 149 + 150 + priv = netdev_priv(cvm_oct_device[port]); 151 + if (priv->poll) { 152 + /* skip polling if we don't get the lock */ 153 + if (!down_trylock(&mdio_sem)) { 154 + priv->poll(cvm_oct_device[port]); 155 + up(&mdio_sem); 156 + } 157 + } 158 + 159 + queues_per_port = cvmx_pko_get_num_queues(port); 160 + /* Drain any pending packets in the free list */ 161 + for (qos = 0; qos < queues_per_port; qos++) { 162 + if (skb_queue_len(&priv->tx_free_list[qos]) == 0) 163 + continue; 164 + skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 165 + MAX_SKB_TO_FREE); 166 + undo = skb_to_free > 0 ? 167 + MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; 168 + if (undo > 0) 169 + cvmx_fau_atomic_add32(priv->fau+qos*4, -undo); 170 + skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? 171 + MAX_SKB_TO_FREE : -skb_to_free; 172 + cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1); 173 + } 174 + cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]); 175 + 176 + out: 177 + port++; 178 + /* Poll the next port in a 50th of a second. 179 + This spreads the polling of ports out a little bit */ 180 + mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50); 181 181 } 182 182 183 183 /** ··· 250 246 EXPORT_SYMBOL(cvm_oct_free_work); 251 247 252 248 /** 249 + * Get the low level ethernet statistics 250 + * 251 + * @dev: Device to get the statistics from 252 + * Returns Pointer to the statistics 253 + */ 254 + static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) 255 + { 256 + cvmx_pip_port_status_t rx_status; 257 + cvmx_pko_port_status_t tx_status; 258 + struct octeon_ethernet *priv = netdev_priv(dev); 259 + 260 + if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { 261 + if (octeon_is_simulation()) { 262 + /* The simulator doesn't support statistics */ 263 + memset(&rx_status, 0, sizeof(rx_status)); 264 + memset(&tx_status, 0, sizeof(tx_status)); 265 + } else { 266 + cvmx_pip_get_port_status(priv->port, 1, &rx_status); 267 + cvmx_pko_get_port_status(priv->port, 1, &tx_status); 268 + } 269 + 270 + priv->stats.rx_packets += rx_status.inb_packets; 271 + priv->stats.tx_packets += tx_status.packets; 272 + priv->stats.rx_bytes += rx_status.inb_octets; 273 + priv->stats.tx_bytes += tx_status.octets; 274 + priv->stats.multicast += rx_status.multicast_packets; 275 + priv->stats.rx_crc_errors += rx_status.inb_errors; 276 + priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; 277 + 278 + /* 279 + * The drop counter must be incremented atomically 280 + * since the RX tasklet also increments it. 281 + */ 282 + #ifdef CONFIG_64BIT 283 + atomic64_add(rx_status.dropped_packets, 284 + (atomic64_t *)&priv->stats.rx_dropped); 285 + #else 286 + atomic_add(rx_status.dropped_packets, 287 + (atomic_t *)&priv->stats.rx_dropped); 288 + #endif 289 + } 290 + 291 + return &priv->stats; 292 + } 293 + 294 + /** 295 + * Change the link MTU. Unimplemented 296 + * 297 + * @dev: Device to change 298 + * @new_mtu: The new MTU 299 + * 300 + * Returns Zero on success 301 + */ 302 + static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) 303 + { 304 + struct octeon_ethernet *priv = netdev_priv(dev); 305 + int interface = INTERFACE(priv->port); 306 + int index = INDEX(priv->port); 307 + #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 308 + int vlan_bytes = 4; 309 + #else 310 + int vlan_bytes = 0; 311 + #endif 312 + 313 + /* 314 + * Limit the MTU to make sure the ethernet packets are between 315 + * 64 bytes and 65535 bytes. 316 + */ 317 + if ((new_mtu + 14 + 4 + vlan_bytes < 64) 318 + || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { 319 + pr_err("MTU must be between %d and %d.\n", 320 + 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes); 321 + return -EINVAL; 322 + } 323 + dev->mtu = new_mtu; 324 + 325 + if ((interface < 2) 326 + && (cvmx_helper_interface_get_mode(interface) != 327 + CVMX_HELPER_INTERFACE_MODE_SPI)) { 328 + /* Add ethernet header and FCS, and VLAN if configured. */ 329 + int max_packet = new_mtu + 14 + 4 + vlan_bytes; 330 + 331 + if (OCTEON_IS_MODEL(OCTEON_CN3XXX) 332 + || OCTEON_IS_MODEL(OCTEON_CN58XX)) { 333 + /* Signal errors on packets larger than the MTU */ 334 + cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), 335 + max_packet); 336 + } else { 337 + /* 338 + * Set the hardware to truncate packets larger 339 + * than the MTU and smaller the 64 bytes. 340 + */ 341 + union cvmx_pip_frm_len_chkx frm_len_chk; 342 + frm_len_chk.u64 = 0; 343 + frm_len_chk.s.minlen = 64; 344 + frm_len_chk.s.maxlen = max_packet; 345 + cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), 346 + frm_len_chk.u64); 347 + } 348 + /* 349 + * Set the hardware to truncate packets larger than 350 + * the MTU. The jabber register must be set to a 351 + * multiple of 8 bytes, so round up. 352 + */ 353 + cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), 354 + (max_packet + 7) & ~7u); 355 + } 356 + return 0; 357 + } 358 + 359 + /** 360 + * Set the multicast list. Currently unimplemented. 361 + * 362 + * @dev: Device to work on 363 + */ 364 + static void cvm_oct_common_set_multicast_list(struct net_device *dev) 365 + { 366 + union cvmx_gmxx_prtx_cfg gmx_cfg; 367 + struct octeon_ethernet *priv = netdev_priv(dev); 368 + int interface = INTERFACE(priv->port); 369 + int index = INDEX(priv->port); 370 + 371 + if ((interface < 2) 372 + && (cvmx_helper_interface_get_mode(interface) != 373 + CVMX_HELPER_INTERFACE_MODE_SPI)) { 374 + union cvmx_gmxx_rxx_adr_ctl control; 375 + control.u64 = 0; 376 + control.s.bcst = 1; /* Allow broadcast MAC addresses */ 377 + 378 + if (dev->mc_list || (dev->flags & IFF_ALLMULTI) || 379 + (dev->flags & IFF_PROMISC)) 380 + /* Force accept multicast packets */ 381 + control.s.mcst = 2; 382 + else 383 + /* Force reject multicat packets */ 384 + control.s.mcst = 1; 385 + 386 + if (dev->flags & IFF_PROMISC) 387 + /* 388 + * Reject matches if promisc. Since CAM is 389 + * shut off, should accept everything. 390 + */ 391 + control.s.cam_mode = 0; 392 + else 393 + /* Filter packets based on the CAM */ 394 + control.s.cam_mode = 1; 395 + 396 + gmx_cfg.u64 = 397 + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 398 + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), 399 + gmx_cfg.u64 & ~1ull); 400 + 401 + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 402 + control.u64); 403 + if (dev->flags & IFF_PROMISC) 404 + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN 405 + (index, interface), 0); 406 + else 407 + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN 408 + (index, interface), 1); 409 + 410 + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), 411 + gmx_cfg.u64); 412 + } 413 + } 414 + 415 + /** 416 + * Set the hardware MAC address for a device 417 + * 418 + * @dev: Device to change the MAC address for 419 + * @addr: Address structure to change it too. MAC address is addr + 2. 420 + * Returns Zero on success 421 + */ 422 + static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) 423 + { 424 + struct octeon_ethernet *priv = netdev_priv(dev); 425 + union cvmx_gmxx_prtx_cfg gmx_cfg; 426 + int interface = INTERFACE(priv->port); 427 + int index = INDEX(priv->port); 428 + 429 + memcpy(dev->dev_addr, addr + 2, 6); 430 + 431 + if ((interface < 2) 432 + && (cvmx_helper_interface_get_mode(interface) != 433 + CVMX_HELPER_INTERFACE_MODE_SPI)) { 434 + int i; 435 + uint8_t *ptr = addr; 436 + uint64_t mac = 0; 437 + for (i = 0; i < 6; i++) 438 + mac = (mac << 8) | (uint64_t) (ptr[i + 2]); 439 + 440 + gmx_cfg.u64 = 441 + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 442 + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), 443 + gmx_cfg.u64 & ~1ull); 444 + 445 + cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); 446 + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 447 + ptr[2]); 448 + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 449 + ptr[3]); 450 + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 451 + ptr[4]); 452 + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 453 + ptr[5]); 454 + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 455 + ptr[6]); 456 + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 457 + ptr[7]); 458 + cvm_oct_common_set_multicast_list(dev); 459 + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), 460 + gmx_cfg.u64); 461 + } 462 + return 0; 463 + } 464 + 465 + /** 466 + * Per network device initialization 467 + * 468 + * @dev: Device to initialize 469 + * Returns Zero on success 470 + */ 471 + int cvm_oct_common_init(struct net_device *dev) 472 + { 473 + static int count; 474 + char mac[8] = { 0x00, 0x00, 475 + octeon_bootinfo->mac_addr_base[0], 476 + octeon_bootinfo->mac_addr_base[1], 477 + octeon_bootinfo->mac_addr_base[2], 478 + octeon_bootinfo->mac_addr_base[3], 479 + octeon_bootinfo->mac_addr_base[4], 480 + octeon_bootinfo->mac_addr_base[5] + count 481 + }; 482 + struct octeon_ethernet *priv = netdev_priv(dev); 483 + 484 + /* 485 + * Force the interface to use the POW send if always_use_pow 486 + * was specified or it is in the pow send list. 487 + */ 488 + if ((pow_send_group != -1) 489 + && (always_use_pow || strstr(pow_send_list, dev->name))) 490 + priv->queue = -1; 491 + 492 + if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM) 493 + dev->features |= NETIF_F_IP_CSUM; 494 + 495 + count++; 496 + 497 + /* We do our own locking, Linux doesn't need to */ 498 + dev->features |= NETIF_F_LLTX; 499 + SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); 500 + 501 + cvm_oct_mdio_setup_device(dev); 502 + dev->netdev_ops->ndo_set_mac_address(dev, mac); 503 + dev->netdev_ops->ndo_change_mtu(dev, dev->mtu); 504 + 505 + /* 506 + * Zero out stats for port so we won't mistakenly show 507 + * counters from the bootloader. 508 + */ 509 + memset(dev->netdev_ops->ndo_get_stats(dev), 0, 510 + sizeof(struct net_device_stats)); 511 + 512 + return 0; 513 + } 514 + 515 + void cvm_oct_common_uninit(struct net_device *dev) 516 + { 517 + /* Currently nothing to do */ 518 + } 519 + 520 + static const struct net_device_ops cvm_oct_npi_netdev_ops = { 521 + .ndo_init = cvm_oct_common_init, 522 + .ndo_uninit = cvm_oct_common_uninit, 523 + .ndo_start_xmit = cvm_oct_xmit, 524 + .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, 525 + .ndo_set_mac_address = cvm_oct_common_set_mac_address, 526 + .ndo_do_ioctl = cvm_oct_ioctl, 527 + .ndo_change_mtu = cvm_oct_common_change_mtu, 528 + .ndo_get_stats = cvm_oct_common_get_stats, 529 + #ifdef CONFIG_NET_POLL_CONTROLLER 530 + .ndo_poll_controller = cvm_oct_poll_controller, 531 + #endif 532 + }; 533 + static const struct net_device_ops cvm_oct_xaui_netdev_ops = { 534 + .ndo_init = cvm_oct_xaui_init, 535 + .ndo_uninit = cvm_oct_xaui_uninit, 536 + .ndo_open = cvm_oct_xaui_open, 537 + .ndo_stop = cvm_oct_xaui_stop, 538 + .ndo_start_xmit = cvm_oct_xmit, 539 + .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, 540 + .ndo_set_mac_address = cvm_oct_common_set_mac_address, 541 + .ndo_do_ioctl = cvm_oct_ioctl, 542 + .ndo_change_mtu = cvm_oct_common_change_mtu, 543 + .ndo_get_stats = cvm_oct_common_get_stats, 544 + #ifdef CONFIG_NET_POLL_CONTROLLER 545 + .ndo_poll_controller = cvm_oct_poll_controller, 546 + #endif 547 + }; 548 + static const struct net_device_ops cvm_oct_sgmii_netdev_ops = { 549 + .ndo_init = cvm_oct_sgmii_init, 550 + .ndo_uninit = cvm_oct_sgmii_uninit, 551 + .ndo_open = cvm_oct_sgmii_open, 552 + .ndo_stop = cvm_oct_sgmii_stop, 553 + .ndo_start_xmit = cvm_oct_xmit, 554 + .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, 555 + .ndo_set_mac_address = cvm_oct_common_set_mac_address, 556 + .ndo_do_ioctl = cvm_oct_ioctl, 557 + .ndo_change_mtu = cvm_oct_common_change_mtu, 558 + .ndo_get_stats = cvm_oct_common_get_stats, 559 + #ifdef CONFIG_NET_POLL_CONTROLLER 560 + .ndo_poll_controller = cvm_oct_poll_controller, 561 + #endif 562 + }; 563 + static const struct net_device_ops cvm_oct_spi_netdev_ops = { 564 + .ndo_init = cvm_oct_spi_init, 565 + .ndo_uninit = cvm_oct_spi_uninit, 566 + .ndo_start_xmit = cvm_oct_xmit, 567 + .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, 568 + .ndo_set_mac_address = cvm_oct_common_set_mac_address, 569 + .ndo_do_ioctl = cvm_oct_ioctl, 570 + .ndo_change_mtu = cvm_oct_common_change_mtu, 571 + .ndo_get_stats = cvm_oct_common_get_stats, 572 + #ifdef CONFIG_NET_POLL_CONTROLLER 573 + .ndo_poll_controller = cvm_oct_poll_controller, 574 + #endif 575 + }; 576 + static const struct net_device_ops cvm_oct_rgmii_netdev_ops = { 577 + .ndo_init = cvm_oct_rgmii_init, 578 + .ndo_uninit = cvm_oct_rgmii_uninit, 579 + .ndo_open = cvm_oct_rgmii_open, 580 + .ndo_stop = cvm_oct_rgmii_stop, 581 + .ndo_start_xmit = cvm_oct_xmit, 582 + .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, 583 + .ndo_set_mac_address = cvm_oct_common_set_mac_address, 584 + .ndo_do_ioctl = cvm_oct_ioctl, 585 + .ndo_change_mtu = cvm_oct_common_change_mtu, 586 + .ndo_get_stats = cvm_oct_common_get_stats, 587 + #ifdef CONFIG_NET_POLL_CONTROLLER 588 + .ndo_poll_controller = cvm_oct_poll_controller, 589 + #endif 590 + }; 591 + static const struct net_device_ops cvm_oct_pow_netdev_ops = { 592 + .ndo_init = cvm_oct_common_init, 593 + .ndo_start_xmit = cvm_oct_xmit_pow, 594 + .ndo_set_multicast_list = cvm_oct_common_set_multicast_list, 595 + .ndo_set_mac_address = cvm_oct_common_set_mac_address, 596 + .ndo_do_ioctl = cvm_oct_ioctl, 597 + .ndo_change_mtu = cvm_oct_common_change_mtu, 598 + .ndo_get_stats = cvm_oct_common_get_stats, 599 + #ifdef CONFIG_NET_POLL_CONTROLLER 600 + .ndo_poll_controller = cvm_oct_poll_controller, 601 + #endif 602 + }; 603 + 604 + /** 253 605 * Module/ driver initialization. Creates the linux network 254 606 * devices. 255 607 * ··· 663 303 struct octeon_ethernet *priv = netdev_priv(dev); 664 304 memset(priv, 0, sizeof(struct octeon_ethernet)); 665 305 666 - dev->init = cvm_oct_common_init; 306 + dev->netdev_ops = &cvm_oct_pow_netdev_ops; 667 307 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; 668 308 priv->port = CVMX_PIP_NUM_INPUT_PORTS; 669 309 priv->queue = -1; ··· 732 372 break; 733 373 734 374 case CVMX_HELPER_INTERFACE_MODE_NPI: 735 - dev->init = cvm_oct_common_init; 736 - dev->uninit = cvm_oct_common_uninit; 375 + dev->netdev_ops = &cvm_oct_npi_netdev_ops; 737 376 strcpy(dev->name, "npi%d"); 738 377 break; 739 378 740 379 case CVMX_HELPER_INTERFACE_MODE_XAUI: 741 - dev->init = cvm_oct_xaui_init; 742 - dev->uninit = cvm_oct_xaui_uninit; 380 + dev->netdev_ops = &cvm_oct_xaui_netdev_ops; 743 381 strcpy(dev->name, "xaui%d"); 744 382 break; 745 383 746 384 case CVMX_HELPER_INTERFACE_MODE_LOOP: 747 - dev->init = cvm_oct_common_init; 748 - dev->uninit = cvm_oct_common_uninit; 385 + dev->netdev_ops = &cvm_oct_npi_netdev_ops; 749 386 strcpy(dev->name, "loop%d"); 750 387 break; 751 388 752 389 case CVMX_HELPER_INTERFACE_MODE_SGMII: 753 - dev->init = cvm_oct_sgmii_init; 754 - dev->uninit = cvm_oct_sgmii_uninit; 390 + dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; 755 391 strcpy(dev->name, "eth%d"); 756 392 break; 757 393 758 394 case CVMX_HELPER_INTERFACE_MODE_SPI: 759 - dev->init = cvm_oct_spi_init; 760 - dev->uninit = cvm_oct_spi_uninit; 395 + dev->netdev_ops = &cvm_oct_spi_netdev_ops; 761 396 strcpy(dev->name, "spi%d"); 762 397 break; 763 398 764 399 case CVMX_HELPER_INTERFACE_MODE_RGMII: 765 400 case CVMX_HELPER_INTERFACE_MODE_GMII: 766 - dev->init = cvm_oct_rgmii_init; 767 - dev->uninit = cvm_oct_rgmii_uninit; 401 + dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; 768 402 strcpy(dev->name, "eth%d"); 769 403 break; 770 404 } 771 405 772 - if (!dev->init) { 406 + if (!dev->netdev_ops) { 773 407 kfree(dev); 774 408 } else if (register_netdev(dev) < 0) { 775 409 pr_err("Failed to register ethernet device "
+11
drivers/staging/octeon/octeon-ethernet.h
··· 111 111 112 112 extern int cvm_oct_rgmii_init(struct net_device *dev); 113 113 extern void cvm_oct_rgmii_uninit(struct net_device *dev); 114 + extern int cvm_oct_rgmii_open(struct net_device *dev); 115 + extern int cvm_oct_rgmii_stop(struct net_device *dev); 116 + 114 117 extern int cvm_oct_sgmii_init(struct net_device *dev); 115 118 extern void cvm_oct_sgmii_uninit(struct net_device *dev); 119 + extern int cvm_oct_sgmii_open(struct net_device *dev); 120 + extern int cvm_oct_sgmii_stop(struct net_device *dev); 121 + 116 122 extern int cvm_oct_spi_init(struct net_device *dev); 117 123 extern void cvm_oct_spi_uninit(struct net_device *dev); 118 124 extern int cvm_oct_xaui_init(struct net_device *dev); 119 125 extern void cvm_oct_xaui_uninit(struct net_device *dev); 126 + extern int cvm_oct_xaui_open(struct net_device *dev); 127 + extern int cvm_oct_xaui_stop(struct net_device *dev); 128 + 129 + extern int cvm_oct_common_init(struct net_device *dev); 130 + extern void cvm_oct_common_uninit(struct net_device *dev); 120 131 121 132 extern int always_use_pow; 122 133 extern int pow_send_group;