Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mips-next-3.9' of git://git.linux-mips.org/pub/scm/john/linux-john into mips-for-linux-next

+5451 -1968
+47
Documentation/devicetree/bindings/mips/cpu_irq.txt
··· 1 + MIPS CPU interrupt controller 2 + 3 + On MIPS the mips_cpu_intc_init() helper can be used to initialize the 8 CPU 4 + IRQs from a devicetree file and create a irq_domain for IRQ controller. 5 + 6 + With the irq_domain in place we can describe how the 8 IRQs are wired to the 7 + platforms internal interrupt controller cascade. 8 + 9 + Below is an example of a platform describing the cascade inside the devicetree 10 + and the code used to load it inside arch_init_irq(). 11 + 12 + Required properties: 13 + - compatible : Should be "mti,cpu-interrupt-controller" 14 + 15 + Example devicetree: 16 + cpu-irq: cpu-irq@0 { 17 + #address-cells = <0>; 18 + 19 + interrupt-controller; 20 + #interrupt-cells = <1>; 21 + 22 + compatible = "mti,cpu-interrupt-controller"; 23 + }; 24 + 25 + intc: intc@200 { 26 + compatible = "ralink,rt2880-intc"; 27 + reg = <0x200 0x100>; 28 + 29 + interrupt-controller; 30 + #interrupt-cells = <1>; 31 + 32 + interrupt-parent = <&cpu-irq>; 33 + interrupts = <2>; 34 + }; 35 + 36 + 37 + Example platform irq.c: 38 + static struct of_device_id __initdata of_irq_ids[] = { 39 + { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init }, 40 + { .compatible = "ralink,rt2880-intc", .data = intc_of_init }, 41 + {}, 42 + }; 43 + 44 + void __init arch_init_irq(void) 45 + { 46 + of_irq_init(of_irq_ids); 47 + }
+16
Documentation/devicetree/bindings/serial/lantiq_asc.txt
··· 1 + Lantiq SoC ASC serial controller 2 + 3 + Required properties: 4 + - compatible : Should be "lantiq,asc" 5 + - reg : Address and length of the register set for the device 6 + - interrupts: the 3 (tx rx err) interrupt numbers. The interrupt specifier 7 + depends on the interrupt-parent interrupt controller. 8 + 9 + Example: 10 + 11 + asc1: serial@E100C00 { 12 + compatible = "lantiq,asc"; 13 + reg = <0xE100C00 0x400>; 14 + interrupt-parent = <&icu0>; 15 + interrupts = <112 113 114>; 16 + };
+1 -1
Documentation/kernel-parameters.txt
··· 2438 2438 real-time workloads. It can also improve energy 2439 2439 efficiency for asymmetric multiprocessors. 2440 2440 2441 - rcu_nocbs_poll [KNL,BOOT] 2441 + rcu_nocb_poll [KNL,BOOT] 2442 2442 Rather than requiring that offloaded CPUs 2443 2443 (specified by rcu_nocbs= above) explicitly 2444 2444 awaken the corresponding "rcuoN" kthreads,
+1 -1
Documentation/x86/boot.txt
··· 57 57 Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover 58 58 protocol entry point. 59 59 60 - Protocol 2.12: (Kernel 3.9) Added the xloadflags field and extension fields 60 + Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields 61 61 to struct boot_params for for loading bzImage and ramdisk 62 62 above 4G in 64bit. 63 63
+1 -1
MAINTAINERS
··· 1489 1489 M: Haavard Skinnemoen <hskinnemoen@gmail.com> 1490 1490 M: Hans-Christian Egtvedt <egtvedt@samfundet.no> 1491 1491 W: http://www.atmel.com/products/AVR32/ 1492 - W: http://avr32linux.org/ 1492 + W: http://mirror.egtvedt.no/avr32linux.org/ 1493 1493 W: http://avrfreaks.net/ 1494 1494 S: Maintained 1495 1495 F: arch/avr32/
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 8 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Unicycling Gorilla 6 6 7 7 # *DOCUMENTATION*
+23 -2
arch/arm/common/gic.c
··· 351 351 irq_set_chained_handler(irq, gic_handle_cascade_irq); 352 352 } 353 353 354 + static u8 gic_get_cpumask(struct gic_chip_data *gic) 355 + { 356 + void __iomem *base = gic_data_dist_base(gic); 357 + u32 mask, i; 358 + 359 + for (i = mask = 0; i < 32; i += 4) { 360 + mask = readl_relaxed(base + GIC_DIST_TARGET + i); 361 + mask |= mask >> 16; 362 + mask |= mask >> 8; 363 + if (mask) 364 + break; 365 + } 366 + 367 + if (!mask) 368 + pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); 369 + 370 + return mask; 371 + } 372 + 354 373 static void __init gic_dist_init(struct gic_chip_data *gic) 355 374 { 356 375 unsigned int i; ··· 388 369 /* 389 370 * Set all global interrupts to this CPU only. 390 371 */ 391 - cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); 372 + cpumask = gic_get_cpumask(gic); 373 + cpumask |= cpumask << 8; 374 + cpumask |= cpumask << 16; 392 375 for (i = 32; i < gic_irqs; i += 4) 393 376 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 394 377 ··· 421 400 * Get what the GIC says our CPU mask is. 422 401 */ 423 402 BUG_ON(cpu >= NR_GIC_CPU_IF); 424 - cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); 403 + cpu_mask = gic_get_cpumask(gic); 425 404 gic_cpu_map[cpu] = cpu_mask; 426 405 427 406 /*
+1 -1
arch/arm/include/asm/memory.h
··· 37 37 */ 38 38 #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) 39 39 #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) 40 - #define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) 40 + #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) 41 41 42 42 /* 43 43 * The maximum size of a 26-bit user space task.
+1 -1
arch/arm/mach-exynos/Kconfig
··· 414 414 select CPU_EXYNOS4210 415 415 select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD 416 416 select PINCTRL 417 - select PINCTRL_EXYNOS4 417 + select PINCTRL_EXYNOS 418 418 select USE_OF 419 419 help 420 420 Machine support for Samsung Exynos4 machine with device tree enabled.
+1 -1
arch/arm/mach-realview/include/mach/irqs-eb.h
··· 115 115 /* 116 116 * Only define NR_IRQS if less than NR_IRQS_EB 117 117 */ 118 - #define NR_IRQS_EB (IRQ_EB_GIC_START + 96) 118 + #define NR_IRQS_EB (IRQ_EB_GIC_START + 128) 119 119 120 120 #if defined(CONFIG_MACH_REALVIEW_EB) \ 121 121 && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
+1 -1
arch/arm/mm/dma-mapping.c
··· 640 640 641 641 if (is_coherent || nommu()) 642 642 addr = __alloc_simple_buffer(dev, size, gfp, &page); 643 - else if (gfp & GFP_ATOMIC) 643 + else if (!(gfp & __GFP_WAIT)) 644 644 addr = __alloc_from_pool(size, &page); 645 645 else if (!IS_ENABLED(CONFIG_CMA)) 646 646 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+10
arch/avr32/include/asm/dma-mapping.h
··· 336 336 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 337 337 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 338 338 339 + /* drivers/base/dma-mapping.c */ 340 + extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 341 + void *cpu_addr, dma_addr_t dma_addr, size_t size); 342 + extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 343 + void *cpu_addr, dma_addr_t dma_addr, 344 + size_t size); 345 + 346 + #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 347 + #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 348 + 339 349 #endif /* __ASM_AVR32_DMA_MAPPING_H */
+10
arch/blackfin/include/asm/dma-mapping.h
··· 154 154 _dma_sync((dma_addr_t)vaddr, size, dir); 155 155 } 156 156 157 + /* drivers/base/dma-mapping.c */ 158 + extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 159 + void *cpu_addr, dma_addr_t dma_addr, size_t size); 160 + extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 161 + void *cpu_addr, dma_addr_t dma_addr, 162 + size_t size); 163 + 164 + #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 165 + #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 166 + 157 167 #endif /* _BLACKFIN_DMA_MAPPING_H */
+15
arch/c6x/include/asm/dma-mapping.h
··· 89 89 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) 90 90 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) 91 91 92 + /* Not supported for now */ 93 + static inline int dma_mmap_coherent(struct device *dev, 94 + struct vm_area_struct *vma, void *cpu_addr, 95 + dma_addr_t dma_addr, size_t size) 96 + { 97 + return -EINVAL; 98 + } 99 + 100 + static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, 101 + void *cpu_addr, dma_addr_t dma_addr, 102 + size_t size) 103 + { 104 + return -EINVAL; 105 + } 106 + 92 107 #endif /* _ASM_C6X_DMA_MAPPING_H */
+10
arch/cris/include/asm/dma-mapping.h
··· 158 158 { 159 159 } 160 160 161 + /* drivers/base/dma-mapping.c */ 162 + extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 163 + void *cpu_addr, dma_addr_t dma_addr, size_t size); 164 + extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 165 + void *cpu_addr, dma_addr_t dma_addr, 166 + size_t size); 167 + 168 + #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 169 + #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 170 + 161 171 162 172 #endif
+15
arch/frv/include/asm/dma-mapping.h
··· 132 132 flush_write_buffers(); 133 133 } 134 134 135 + /* Not supported for now */ 136 + static inline int dma_mmap_coherent(struct device *dev, 137 + struct vm_area_struct *vma, void *cpu_addr, 138 + dma_addr_t dma_addr, size_t size) 139 + { 140 + return -EINVAL; 141 + } 142 + 143 + static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, 144 + void *cpu_addr, dma_addr_t dma_addr, 145 + size_t size) 146 + { 147 + return -EINVAL; 148 + } 149 + 135 150 #endif /* _ASM_DMA_MAPPING_H */
+10
arch/m68k/include/asm/dma-mapping.h
··· 115 115 #include <asm-generic/dma-mapping-broken.h> 116 116 #endif 117 117 118 + /* drivers/base/dma-mapping.c */ 119 + extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 120 + void *cpu_addr, dma_addr_t dma_addr, size_t size); 121 + extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 122 + void *cpu_addr, dma_addr_t dma_addr, 123 + size_t size); 124 + 125 + #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 126 + #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 127 + 118 128 #endif /* _M68K_DMA_MAPPING_H */
+1
arch/mips/Kbuild.platforms
··· 21 21 platforms += pmcs-msp71xx 22 22 platforms += pnx833x 23 23 platforms += powertv 24 + platforms += ralink 24 25 platforms += rb532 25 26 platforms += sgi-ip22 26 27 platforms += sgi-ip27
+23
arch/mips/Kconfig
··· 107 107 config BCM47XX 108 108 bool "Broadcom BCM47XX based boards" 109 109 select ARCH_WANT_OPTIONAL_GPIOLIB 110 + select BOOT_RAW 110 111 select CEVT_R4K 111 112 select CSRC_R4K 112 113 select DMA_NONCOHERENT 113 114 select FW_CFE 114 115 select HW_HAS_PCI 115 116 select IRQ_CPU 117 + select NO_EXCEPT_FILL 116 118 select SYS_SUPPORTS_32BIT_KERNEL 117 119 select SYS_SUPPORTS_LITTLE_ENDIAN 118 120 select SYS_HAS_EARLY_PRINTK ··· 296 294 select BOOT_RAW 297 295 select CEVT_R4K 298 296 select CSRC_R4K 297 + select CSRC_GIC 299 298 select DMA_NONCOHERENT 300 299 select GENERIC_ISA_DMA 301 300 select HAVE_PCSPKR_PLATFORM ··· 427 424 select USB_OHCI_LITTLE_ENDIAN 428 425 help 429 426 This enables support for the Cisco PowerTV Platform. 427 + 428 + config RALINK 429 + bool "Ralink based machines" 430 + select CEVT_R4K 431 + select CSRC_R4K 432 + select BOOT_RAW 433 + select DMA_NONCOHERENT 434 + select IRQ_CPU 435 + select USE_OF 436 + select SYS_HAS_CPU_MIPS32_R1 437 + select SYS_HAS_CPU_MIPS32_R2 438 + select SYS_SUPPORTS_32BIT_KERNEL 439 + select SYS_SUPPORTS_LITTLE_ENDIAN 440 + select SYS_HAS_EARLY_PRINTK 441 + select HAVE_MACH_CLKDEV 442 + select CLKDEV_LOOKUP 430 443 431 444 config SGI_IP22 432 445 bool "SGI IP22 (Indy/Indigo2)" ··· 856 837 source "arch/mips/lasat/Kconfig" 857 838 source "arch/mips/pmcs-msp71xx/Kconfig" 858 839 source "arch/mips/powertv/Kconfig" 840 + source "arch/mips/ralink/Kconfig" 859 841 source "arch/mips/sgi-ip27/Kconfig" 860 842 source "arch/mips/sibyte/Kconfig" 861 843 source "arch/mips/txx9/Kconfig" ··· 935 915 bool 936 916 937 917 config CSRC_R4K 918 + bool 919 + 920 + config CSRC_GIC 938 921 bool 939 922 940 923 config CSRC_SB1250
+19 -1
arch/mips/ath79/Kconfig
··· 14 14 Say 'Y' here if you want your kernel to support the 15 15 Atheros AP121 reference board. 16 16 17 + config ATH79_MACH_AP136 18 + bool "Atheros AP136 reference board" 19 + select SOC_QCA955X 20 + select ATH79_DEV_GPIO_BUTTONS 21 + select ATH79_DEV_LEDS_GPIO 22 + select ATH79_DEV_SPI 23 + select ATH79_DEV_USB 24 + select ATH79_DEV_WMAC 25 + help 26 + Say 'Y' here if you want your kernel to support the 27 + Atheros AP136 reference board. 28 + 17 29 config ATH79_MACH_AP81 18 30 bool "Atheros AP81 reference board" 19 31 select SOC_AR913X ··· 100 88 select PCI_AR724X if PCI 101 89 def_bool n 102 90 91 + config SOC_QCA955X 92 + select USB_ARCH_HAS_EHCI 93 + select HW_HAS_PCI 94 + select PCI_AR724X if PCI 95 + def_bool n 96 + 103 97 config PCI_AR724X 104 98 def_bool n 105 99 ··· 122 104 def_bool n 123 105 124 106 config ATH79_DEV_WMAC 125 - depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X) 107 + depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X || SOC_QCA955X) 126 108 def_bool n 127 109 128 110 endif
+1
arch/mips/ath79/Makefile
··· 27 27 # Machines 28 28 # 29 29 obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o 30 + obj-$(CONFIG_ATH79_MACH_AP136) += mach-ap136.o 30 31 obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o 31 32 obj-$(CONFIG_ATH79_MACH_DB120) += mach-db120.o 32 33 obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o
+78
arch/mips/ath79/clock.c
··· 295 295 iounmap(dpll_base); 296 296 } 297 297 298 + static void __init qca955x_clocks_init(void) 299 + { 300 + u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv; 301 + u32 cpu_pll, ddr_pll; 302 + u32 bootstrap; 303 + 304 + bootstrap = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP); 305 + if (bootstrap & QCA955X_BOOTSTRAP_REF_CLK_40) 306 + ath79_ref_clk.rate = 40 * 1000 * 1000; 307 + else 308 + ath79_ref_clk.rate = 25 * 1000 * 1000; 309 + 310 + pll = ath79_pll_rr(QCA955X_PLL_CPU_CONFIG_REG); 311 + out_div = (pll >> QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT) & 312 + QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK; 313 + ref_div = (pll >> QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT) & 314 + QCA955X_PLL_CPU_CONFIG_REFDIV_MASK; 315 + nint = (pll >> QCA955X_PLL_CPU_CONFIG_NINT_SHIFT) & 316 + QCA955X_PLL_CPU_CONFIG_NINT_MASK; 317 + frac = (pll >> QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT) & 318 + QCA955X_PLL_CPU_CONFIG_NFRAC_MASK; 319 + 320 + cpu_pll = nint * ath79_ref_clk.rate / ref_div; 321 + cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 6)); 322 + cpu_pll /= (1 << out_div); 323 + 324 + pll = ath79_pll_rr(QCA955X_PLL_DDR_CONFIG_REG); 325 + out_div = (pll >> QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT) & 326 + QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK; 327 + ref_div = (pll >> QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT) & 328 + QCA955X_PLL_DDR_CONFIG_REFDIV_MASK; 329 + nint = (pll >> QCA955X_PLL_DDR_CONFIG_NINT_SHIFT) & 330 + QCA955X_PLL_DDR_CONFIG_NINT_MASK; 331 + frac = (pll >> QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT) & 332 + QCA955X_PLL_DDR_CONFIG_NFRAC_MASK; 333 + 334 + ddr_pll = nint * ath79_ref_clk.rate / ref_div; 335 + ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 10)); 336 + ddr_pll /= (1 << out_div); 337 + 338 + clk_ctrl = ath79_pll_rr(QCA955X_PLL_CLK_CTRL_REG); 339 + 340 + postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) & 341 + QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK; 342 + 343 + if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS) 344 + ath79_cpu_clk.rate = ath79_ref_clk.rate; 345 + else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL) 346 + ath79_cpu_clk.rate = ddr_pll / (postdiv + 1); 347 + else 348 + ath79_cpu_clk.rate = cpu_pll / (postdiv + 1); 349 + 350 + postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) & 351 + QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK; 352 + 353 + if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS) 354 + ath79_ddr_clk.rate = ath79_ref_clk.rate; 355 + else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL) 356 + ath79_ddr_clk.rate = cpu_pll / (postdiv + 1); 357 + else 358 + ath79_ddr_clk.rate = ddr_pll / (postdiv + 1); 359 + 360 + postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) & 361 + QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK; 362 + 363 + if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS) 364 + ath79_ahb_clk.rate = ath79_ref_clk.rate; 365 + else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL) 366 + ath79_ahb_clk.rate = ddr_pll / (postdiv + 1); 367 + else 368 + ath79_ahb_clk.rate = cpu_pll / (postdiv + 1); 369 + 370 + ath79_wdt_clk.rate = ath79_ref_clk.rate; 371 + ath79_uart_clk.rate = ath79_ref_clk.rate; 372 + } 373 + 298 374 void __init ath79_clocks_init(void) 299 375 { 300 376 if (soc_is_ar71xx()) ··· 383 307 ar933x_clocks_init(); 384 308 else if (soc_is_ar934x()) 385 309 ar934x_clocks_init(); 310 + else if (soc_is_qca955x()) 311 + qca955x_clocks_init(); 386 312 else 387 313 BUG(); 388 314
+4
arch/mips/ath79/common.c
··· 72 72 reg = AR933X_RESET_REG_RESET_MODULE; 73 73 else if (soc_is_ar934x()) 74 74 reg = AR934X_RESET_REG_RESET_MODULE; 75 + else if (soc_is_qca955x()) 76 + reg = QCA955X_RESET_REG_RESET_MODULE; 75 77 else 76 78 BUG(); 77 79 ··· 100 98 reg = AR933X_RESET_REG_RESET_MODULE; 101 99 else if (soc_is_ar934x()) 102 100 reg = AR934X_RESET_REG_RESET_MODULE; 101 + else if (soc_is_qca955x()) 102 + reg = QCA955X_RESET_REG_RESET_MODULE; 103 103 else 104 104 BUG(); 105 105
+5 -4
arch/mips/ath79/dev-common.c
··· 36 36 static struct plat_serial8250_port ath79_uart_data[] = { 37 37 { 38 38 .mapbase = AR71XX_UART_BASE, 39 - .irq = ATH79_MISC_IRQ_UART, 39 + .irq = ATH79_MISC_IRQ(3), 40 40 .flags = AR71XX_UART_FLAGS, 41 41 .iotype = UPIO_MEM32, 42 42 .regshift = 2, ··· 62 62 .flags = IORESOURCE_MEM, 63 63 }, 64 64 { 65 - .start = ATH79_MISC_IRQ_UART, 66 - .end = ATH79_MISC_IRQ_UART, 65 + .start = ATH79_MISC_IRQ(3), 66 + .end = ATH79_MISC_IRQ(3), 67 67 .flags = IORESOURCE_IRQ, 68 68 }, 69 69 }; ··· 90 90 if (soc_is_ar71xx() || 91 91 soc_is_ar724x() || 92 92 soc_is_ar913x() || 93 - soc_is_ar934x()) { 93 + soc_is_ar934x() || 94 + soc_is_qca955x()) { 94 95 ath79_uart_data[0].uartclk = clk_get_rate(clk); 95 96 platform_device_register(&ath79_uart_device); 96 97 } else if (soc_is_ar933x()) {
+66 -60
arch/mips/ath79/dev-usb.c
··· 25 25 #include "common.h" 26 26 #include "dev-usb.h" 27 27 28 - static struct resource ath79_ohci_resources[2]; 29 - 30 - static u64 ath79_ohci_dmamask = DMA_BIT_MASK(32); 28 + static u64 ath79_usb_dmamask = DMA_BIT_MASK(32); 31 29 32 30 static struct usb_ohci_pdata ath79_ohci_pdata = { 33 31 }; 34 - 35 - static struct platform_device ath79_ohci_device = { 36 - .name = "ohci-platform", 37 - .id = -1, 38 - .resource = ath79_ohci_resources, 39 - .num_resources = ARRAY_SIZE(ath79_ohci_resources), 40 - .dev = { 41 - .dma_mask = &ath79_ohci_dmamask, 42 - .coherent_dma_mask = DMA_BIT_MASK(32), 43 - .platform_data = &ath79_ohci_pdata, 44 - }, 45 - }; 46 - 47 - static struct resource ath79_ehci_resources[2]; 48 - 49 - static u64 ath79_ehci_dmamask = DMA_BIT_MASK(32); 50 32 51 33 static struct usb_ehci_pdata ath79_ehci_pdata_v1 = { 52 34 .has_synopsys_hc_bug = 1, ··· 39 57 .has_tt = 1, 40 58 }; 41 59 42 - static struct platform_device ath79_ehci_device = { 43 - .name = "ehci-platform", 44 - .id = -1, 45 - .resource = ath79_ehci_resources, 46 - .num_resources = ARRAY_SIZE(ath79_ehci_resources), 47 - .dev = { 48 - .dma_mask = &ath79_ehci_dmamask, 49 - .coherent_dma_mask = DMA_BIT_MASK(32), 50 - }, 51 - }; 52 - 53 - static void __init ath79_usb_init_resource(struct resource res[2], 54 - unsigned long base, 55 - unsigned long size, 56 - int irq) 60 + static void __init ath79_usb_register(const char *name, int id, 61 + unsigned long base, unsigned long size, 62 + int irq, const void *data, 63 + size_t data_size) 57 64 { 65 + struct resource res[2]; 66 + struct platform_device *pdev; 67 + 68 + memset(res, 0, sizeof(res)); 69 + 58 70 res[0].flags = IORESOURCE_MEM; 59 71 res[0].start = base; 60 72 res[0].end = base + size - 1; ··· 56 80 res[1].flags = IORESOURCE_IRQ; 57 81 res[1].start = irq; 58 82 res[1].end = irq; 83 + 84 + pdev = platform_device_register_resndata(NULL, name, id, 85 + res, ARRAY_SIZE(res), 86 + data, data_size); 87 + 88 + if (IS_ERR(pdev)) { 89 + pr_err("ath79: unable to register USB at %08lx, err=%d\n", 90 + base, (int) PTR_ERR(pdev)); 91 + return; 92 + } 93 + 94 + pdev->dev.dma_mask = &ath79_usb_dmamask; 95 + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 59 96 } 60 97 61 98 #define AR71XX_USB_RESET_MASK (AR71XX_RESET_USB_HOST | \ ··· 95 106 96 107 mdelay(900); 97 108 98 - ath79_usb_init_resource(ath79_ohci_resources, AR71XX_OHCI_BASE, 99 - AR71XX_OHCI_SIZE, ATH79_MISC_IRQ_OHCI); 100 - platform_device_register(&ath79_ohci_device); 109 + ath79_usb_register("ohci-platform", -1, 110 + AR71XX_OHCI_BASE, AR71XX_OHCI_SIZE, 111 + ATH79_MISC_IRQ(6), 112 + &ath79_ohci_pdata, sizeof(ath79_ohci_pdata)); 101 113 102 - ath79_usb_init_resource(ath79_ehci_resources, AR71XX_EHCI_BASE, 103 - AR71XX_EHCI_SIZE, ATH79_CPU_IRQ_USB); 104 - ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v1; 105 - platform_device_register(&ath79_ehci_device); 114 + ath79_usb_register("ehci-platform", -1, 115 + AR71XX_EHCI_BASE, AR71XX_EHCI_SIZE, 116 + ATH79_CPU_IRQ(3), 117 + &ath79_ehci_pdata_v1, sizeof(ath79_ehci_pdata_v1)); 106 118 } 107 119 108 120 static void __init ar7240_usb_setup(void) ··· 125 135 126 136 iounmap(usb_ctrl_base); 127 137 128 - ath79_usb_init_resource(ath79_ohci_resources, AR7240_OHCI_BASE, 129 - AR7240_OHCI_SIZE, ATH79_CPU_IRQ_USB); 130 - platform_device_register(&ath79_ohci_device); 138 + ath79_usb_register("ohci-platform", -1, 139 + AR7240_OHCI_BASE, AR7240_OHCI_SIZE, 140 + ATH79_CPU_IRQ(3), 141 + &ath79_ohci_pdata, sizeof(ath79_ohci_pdata)); 131 142 } 132 143 133 144 static void __init ar724x_usb_setup(void) ··· 142 151 ath79_device_reset_clear(AR724X_RESET_USB_PHY); 143 152 mdelay(10); 144 153 145 - ath79_usb_init_resource(ath79_ehci_resources, AR724X_EHCI_BASE, 146 - AR724X_EHCI_SIZE, ATH79_CPU_IRQ_USB); 147 - ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2; 148 - platform_device_register(&ath79_ehci_device); 154 + ath79_usb_register("ehci-platform", -1, 155 + AR724X_EHCI_BASE, AR724X_EHCI_SIZE, 156 + ATH79_CPU_IRQ(3), 157 + &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); 149 158 } 150 159 151 160 static void __init ar913x_usb_setup(void) ··· 159 168 ath79_device_reset_clear(AR913X_RESET_USB_PHY); 160 169 mdelay(10); 161 170 162 - ath79_usb_init_resource(ath79_ehci_resources, AR913X_EHCI_BASE, 163 - AR913X_EHCI_SIZE, ATH79_CPU_IRQ_USB); 164 - ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2; 165 - platform_device_register(&ath79_ehci_device); 171 + ath79_usb_register("ehci-platform", -1, 172 + AR913X_EHCI_BASE, AR913X_EHCI_SIZE, 173 + ATH79_CPU_IRQ(3), 174 + &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); 166 175 } 167 176 168 177 static void __init ar933x_usb_setup(void) ··· 176 185 ath79_device_reset_clear(AR933X_RESET_USB_PHY); 177 186 mdelay(10); 178 187 179 - ath79_usb_init_resource(ath79_ehci_resources, AR933X_EHCI_BASE, 180 - AR933X_EHCI_SIZE, ATH79_CPU_IRQ_USB); 181 - ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2; 182 - platform_device_register(&ath79_ehci_device); 188 + ath79_usb_register("ehci-platform", -1, 189 + AR933X_EHCI_BASE, AR933X_EHCI_SIZE, 190 + ATH79_CPU_IRQ(3), 191 + &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); 183 192 } 184 193 185 194 static void __init ar934x_usb_setup(void) ··· 202 211 ath79_device_reset_clear(AR934X_RESET_USB_HOST); 203 212 udelay(1000); 204 213 205 - ath79_usb_init_resource(ath79_ehci_resources, AR934X_EHCI_BASE, 206 - AR934X_EHCI_SIZE, ATH79_CPU_IRQ_USB); 207 - ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2; 208 - platform_device_register(&ath79_ehci_device); 214 + ath79_usb_register("ehci-platform", -1, 215 + AR934X_EHCI_BASE, AR934X_EHCI_SIZE, 216 + ATH79_CPU_IRQ(3), 217 + &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); 218 + } 219 + 220 + static void __init qca955x_usb_setup(void) 221 + { 222 + ath79_usb_register("ehci-platform", 0, 223 + QCA955X_EHCI0_BASE, QCA955X_EHCI_SIZE, 224 + ATH79_IP3_IRQ(0), 225 + &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); 226 + 227 + ath79_usb_register("ehci-platform", 1, 228 + QCA955X_EHCI1_BASE, QCA955X_EHCI_SIZE, 229 + ATH79_IP3_IRQ(1), 230 + &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); 209 231 } 210 232 211 233 void __init ath79_register_usb(void) ··· 235 231 ar933x_usb_setup(); 236 232 else if (soc_is_ar934x()) 237 233 ar934x_usb_setup(); 234 + else if (soc_is_qca955x()) 235 + qca955x_usb_setup(); 238 236 else 239 237 BUG(); 240 238 }
+25 -5
arch/mips/ath79/dev-wmac.c
··· 55 55 56 56 ath79_wmac_resources[0].start = AR913X_WMAC_BASE; 57 57 ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1; 58 - ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2; 59 - ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2; 58 + ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2); 59 + ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2); 60 60 } 61 61 62 62 ··· 83 83 84 84 ath79_wmac_resources[0].start = AR933X_WMAC_BASE; 85 85 ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1; 86 - ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2; 87 - ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2; 86 + ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2); 87 + ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2); 88 88 89 89 t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP); 90 90 if (t & AR933X_BOOTSTRAP_REF_CLK_40) ··· 107 107 ath79_wmac_resources[0].start = AR934X_WMAC_BASE; 108 108 ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1; 109 109 ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1); 110 - ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1); 110 + ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1); 111 111 112 112 t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP); 113 113 if (t & AR934X_BOOTSTRAP_REF_CLK_40) 114 + ath79_wmac_data.is_clk_25mhz = false; 115 + else 116 + ath79_wmac_data.is_clk_25mhz = true; 117 + } 118 + 119 + static void qca955x_wmac_setup(void) 120 + { 121 + u32 t; 122 + 123 + ath79_wmac_device.name = "qca955x_wmac"; 124 + 125 + ath79_wmac_resources[0].start = QCA955X_WMAC_BASE; 126 + ath79_wmac_resources[0].end = QCA955X_WMAC_BASE + QCA955X_WMAC_SIZE - 1; 127 + ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1); 128 + ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1); 129 + 130 + t = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP); 131 + if (t & QCA955X_BOOTSTRAP_REF_CLK_40) 114 132 ath79_wmac_data.is_clk_25mhz = false; 115 133 else 116 134 ath79_wmac_data.is_clk_25mhz = true; ··· 142 124 ar933x_wmac_setup(); 143 125 else if (soc_is_ar934x()) 144 126 ar934x_wmac_setup(); 127 + else if (soc_is_qca955x()) 128 + qca955x_wmac_setup(); 145 129 else 146 130 BUG(); 147 131
+2
arch/mips/ath79/early_printk.c
··· 74 74 case REV_ID_MAJOR_AR9341: 75 75 case REV_ID_MAJOR_AR9342: 76 76 case REV_ID_MAJOR_AR9344: 77 + case REV_ID_MAJOR_QCA9556: 78 + case REV_ID_MAJOR_QCA9558: 77 79 _prom_putchar = prom_putchar_ar71xx; 78 80 break; 79 81
+28 -30
arch/mips/ath79/gpio.c
··· 137 137 .base = 0, 138 138 }; 139 139 140 - void ath79_gpio_function_enable(u32 mask) 140 + static void __iomem *ath79_gpio_get_function_reg(void) 141 141 { 142 - void __iomem *base = ath79_gpio_base; 143 - unsigned long flags; 142 + u32 reg = 0; 144 143 145 - spin_lock_irqsave(&ath79_gpio_lock, flags); 144 + if (soc_is_ar71xx() || 145 + soc_is_ar724x() || 146 + soc_is_ar913x() || 147 + soc_is_ar933x()) 148 + reg = AR71XX_GPIO_REG_FUNC; 149 + else if (soc_is_ar934x()) 150 + reg = AR934X_GPIO_REG_FUNC; 151 + else 152 + BUG(); 146 153 147 - __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_FUNC) | mask, 148 - base + AR71XX_GPIO_REG_FUNC); 149 - /* flush write */ 150 - __raw_readl(base + AR71XX_GPIO_REG_FUNC); 151 - 152 - spin_unlock_irqrestore(&ath79_gpio_lock, flags); 153 - } 154 - 155 - void ath79_gpio_function_disable(u32 mask) 156 - { 157 - void __iomem *base = ath79_gpio_base; 158 - unsigned long flags; 159 - 160 - spin_lock_irqsave(&ath79_gpio_lock, flags); 161 - 162 - __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_FUNC) & ~mask, 163 - base + AR71XX_GPIO_REG_FUNC); 164 - /* flush write */ 165 - __raw_readl(base + AR71XX_GPIO_REG_FUNC); 166 - 167 - spin_unlock_irqrestore(&ath79_gpio_lock, flags); 154 + return ath79_gpio_base + reg; 168 155 } 169 156 170 157 void ath79_gpio_function_setup(u32 set, u32 clear) 171 158 { 172 - void __iomem *base = ath79_gpio_base; 159 + void __iomem *reg = ath79_gpio_get_function_reg(); 173 160 unsigned long flags; 174 161 175 162 spin_lock_irqsave(&ath79_gpio_lock, flags); 176 163 177 - __raw_writel((__raw_readl(base + AR71XX_GPIO_REG_FUNC) & ~clear) | set, 178 - base + AR71XX_GPIO_REG_FUNC); 164 + __raw_writel((__raw_readl(reg) & ~clear) | set, reg); 179 165 /* flush write */ 180 - __raw_readl(base + AR71XX_GPIO_REG_FUNC); 166 + __raw_readl(reg); 181 167 182 168 spin_unlock_irqrestore(&ath79_gpio_lock, flags); 169 + } 170 + 171 + void ath79_gpio_function_enable(u32 mask) 172 + { 173 + ath79_gpio_function_setup(mask, 0); 174 + } 175 + 176 + void ath79_gpio_function_disable(u32 mask) 177 + { 178 + ath79_gpio_function_setup(0, mask); 183 179 } 184 180 185 181 void __init ath79_gpio_init(void) ··· 194 198 ath79_gpio_count = AR933X_GPIO_COUNT; 195 199 else if (soc_is_ar934x()) 196 200 ath79_gpio_count = AR934X_GPIO_COUNT; 201 + else if (soc_is_qca955x()) 202 + ath79_gpio_count = QCA955X_GPIO_COUNT; 197 203 else 198 204 BUG(); 199 205 200 206 ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE); 201 207 ath79_gpio_chip.ngpio = ath79_gpio_count; 202 - if (soc_is_ar934x()) { 208 + if (soc_is_ar934x() || soc_is_qca955x()) { 203 209 ath79_gpio_chip.direction_input = ar934x_gpio_direction_input; 204 210 ath79_gpio_chip.direction_output = ar934x_gpio_direction_output; 205 211 }
+129 -60
arch/mips/ath79/irq.c
··· 35 35 pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) & 36 36 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE); 37 37 38 - if (pending & MISC_INT_UART) 39 - generic_handle_irq(ATH79_MISC_IRQ_UART); 40 - 41 - else if (pending & MISC_INT_DMA) 42 - generic_handle_irq(ATH79_MISC_IRQ_DMA); 43 - 44 - else if (pending & MISC_INT_PERFC) 45 - generic_handle_irq(ATH79_MISC_IRQ_PERFC); 46 - 47 - else if (pending & MISC_INT_TIMER) 48 - generic_handle_irq(ATH79_MISC_IRQ_TIMER); 49 - 50 - else if (pending & MISC_INT_TIMER2) 51 - generic_handle_irq(ATH79_MISC_IRQ_TIMER2); 52 - 53 - else if (pending & MISC_INT_TIMER3) 54 - generic_handle_irq(ATH79_MISC_IRQ_TIMER3); 55 - 56 - else if (pending & MISC_INT_TIMER4) 57 - generic_handle_irq(ATH79_MISC_IRQ_TIMER4); 58 - 59 - else if (pending & MISC_INT_OHCI) 60 - generic_handle_irq(ATH79_MISC_IRQ_OHCI); 61 - 62 - else if (pending & MISC_INT_ERROR) 63 - generic_handle_irq(ATH79_MISC_IRQ_ERROR); 64 - 65 - else if (pending & MISC_INT_GPIO) 66 - generic_handle_irq(ATH79_MISC_IRQ_GPIO); 67 - 68 - else if (pending & MISC_INT_WDOG) 69 - generic_handle_irq(ATH79_MISC_IRQ_WDOG); 70 - 71 - else if (pending & MISC_INT_ETHSW) 72 - generic_handle_irq(ATH79_MISC_IRQ_ETHSW); 73 - 74 - else 38 + if (!pending) { 75 39 spurious_interrupt(); 40 + return; 41 + } 42 + 43 + while (pending) { 44 + int bit = __ffs(pending); 45 + 46 + generic_handle_irq(ATH79_MISC_IRQ(bit)); 47 + pending &= ~BIT(bit); 48 + } 76 49 } 77 50 78 51 static void ar71xx_misc_irq_unmask(struct irq_data *d) ··· 103 130 104 131 if (soc_is_ar71xx() || soc_is_ar913x()) 105 132 ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; 106 - else if (soc_is_ar724x() || soc_is_ar933x() || soc_is_ar934x()) 133 + else if (soc_is_ar724x() || 134 + soc_is_ar933x() || 135 + soc_is_ar934x() || 136 + soc_is_qca955x()) 107 137 ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; 108 138 else 109 139 BUG(); ··· 117 141 handle_level_irq); 118 142 } 119 143 120 - irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler); 144 + irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler); 121 145 } 122 146 123 147 static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) ··· 150 174 irq_set_chip_and_handler(i, &dummy_irq_chip, 151 175 handle_level_irq); 152 176 153 - irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar934x_ip2_irq_dispatch); 177 + irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch); 178 + } 179 + 180 + static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) 181 + { 182 + u32 status; 183 + 184 + disable_irq_nosync(irq); 185 + 186 + status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS); 187 + status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL; 188 + 189 + if (status == 0) { 190 + spurious_interrupt(); 191 + goto enable; 192 + } 193 + 194 + if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) { 195 + /* TODO: flush DDR? */ 196 + generic_handle_irq(ATH79_IP2_IRQ(0)); 197 + } 198 + 199 + if (status & QCA955X_EXT_INT_WMAC_ALL) { 200 + /* TODO: flush DDR? */ 201 + generic_handle_irq(ATH79_IP2_IRQ(1)); 202 + } 203 + 204 + enable: 205 + enable_irq(irq); 206 + } 207 + 208 + static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc) 209 + { 210 + u32 status; 211 + 212 + disable_irq_nosync(irq); 213 + 214 + status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS); 215 + status &= QCA955X_EXT_INT_PCIE_RC2_ALL | 216 + QCA955X_EXT_INT_USB1 | 217 + QCA955X_EXT_INT_USB2; 218 + 219 + if (status == 0) { 220 + spurious_interrupt(); 221 + goto enable; 222 + } 223 + 224 + if (status & QCA955X_EXT_INT_USB1) { 225 + /* TODO: flush DDR? */ 226 + generic_handle_irq(ATH79_IP3_IRQ(0)); 227 + } 228 + 229 + if (status & QCA955X_EXT_INT_USB2) { 230 + /* TODO: flush DDR? */ 231 + generic_handle_irq(ATH79_IP3_IRQ(1)); 232 + } 233 + 234 + if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) { 235 + /* TODO: flush DDR? */ 236 + generic_handle_irq(ATH79_IP3_IRQ(2)); 237 + } 238 + 239 + enable: 240 + enable_irq(irq); 241 + } 242 + 243 + static void qca955x_irq_init(void) 244 + { 245 + int i; 246 + 247 + for (i = ATH79_IP2_IRQ_BASE; 248 + i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++) 249 + irq_set_chip_and_handler(i, &dummy_irq_chip, 250 + handle_level_irq); 251 + 252 + irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch); 253 + 254 + for (i = ATH79_IP3_IRQ_BASE; 255 + i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++) 256 + irq_set_chip_and_handler(i, &dummy_irq_chip, 257 + handle_level_irq); 258 + 259 + irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch); 154 260 } 155 261 156 262 asmlinkage void plat_irq_dispatch(void) ··· 242 184 pending = read_c0_status() & read_c0_cause() & ST0_IM; 243 185 244 186 if (pending & STATUSF_IP7) 245 - do_IRQ(ATH79_CPU_IRQ_TIMER); 187 + do_IRQ(ATH79_CPU_IRQ(7)); 246 188 247 189 else if (pending & STATUSF_IP2) 248 190 ath79_ip2_handler(); 249 191 250 192 else if (pending & STATUSF_IP4) 251 - do_IRQ(ATH79_CPU_IRQ_GE0); 193 + do_IRQ(ATH79_CPU_IRQ(4)); 252 194 253 195 else if (pending & STATUSF_IP5) 254 - do_IRQ(ATH79_CPU_IRQ_GE1); 196 + do_IRQ(ATH79_CPU_IRQ(5)); 255 197 256 198 else if (pending & STATUSF_IP3) 257 199 ath79_ip3_handler(); 258 200 259 201 else if (pending & STATUSF_IP6) 260 - do_IRQ(ATH79_CPU_IRQ_MISC); 202 + do_IRQ(ATH79_CPU_IRQ(6)); 261 203 262 204 else 263 205 spurious_interrupt(); ··· 270 212 * Issue a flush in the handlers to ensure that the driver sees 271 213 * the update. 272 214 */ 215 + 216 + static void ath79_default_ip2_handler(void) 217 + { 218 + do_IRQ(ATH79_CPU_IRQ(2)); 219 + } 220 + 221 + static void ath79_default_ip3_handler(void) 222 + { 223 + do_IRQ(ATH79_CPU_IRQ(3)); 224 + } 225 + 273 226 static void ar71xx_ip2_handler(void) 274 227 { 275 228 ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI); 276 - do_IRQ(ATH79_CPU_IRQ_IP2); 229 + do_IRQ(ATH79_CPU_IRQ(2)); 277 230 } 278 231 279 232 static void ar724x_ip2_handler(void) 280 233 { 281 234 ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE); 282 - do_IRQ(ATH79_CPU_IRQ_IP2); 235 + do_IRQ(ATH79_CPU_IRQ(2)); 283 236 } 284 237 285 238 static void ar913x_ip2_handler(void) 286 239 { 287 240 ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC); 288 - do_IRQ(ATH79_CPU_IRQ_IP2); 241 + do_IRQ(ATH79_CPU_IRQ(2)); 289 242 } 290 243 291 244 static void ar933x_ip2_handler(void) 292 245 { 293 246 ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC); 294 - do_IRQ(ATH79_CPU_IRQ_IP2); 295 - } 296 - 297 - static void ar934x_ip2_handler(void) 298 - { 299 - do_IRQ(ATH79_CPU_IRQ_IP2); 247 + do_IRQ(ATH79_CPU_IRQ(2)); 300 248 } 301 249 302 250 static void ar71xx_ip3_handler(void) 303 251 { 304 252 ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB); 305 - do_IRQ(ATH79_CPU_IRQ_USB); 253 + do_IRQ(ATH79_CPU_IRQ(3)); 306 254 } 307 255 308 256 static void ar724x_ip3_handler(void) 309 257 { 310 258 ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB); 311 - do_IRQ(ATH79_CPU_IRQ_USB); 259 + do_IRQ(ATH79_CPU_IRQ(3)); 312 260 } 313 261 314 262 static void ar913x_ip3_handler(void) 315 263 { 316 264 ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB); 317 - do_IRQ(ATH79_CPU_IRQ_USB); 265 + do_IRQ(ATH79_CPU_IRQ(3)); 318 266 } 319 267 320 268 static void ar933x_ip3_handler(void) 321 269 { 322 270 ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB); 323 - do_IRQ(ATH79_CPU_IRQ_USB); 271 + do_IRQ(ATH79_CPU_IRQ(3)); 324 272 } 325 273 326 274 static void ar934x_ip3_handler(void) 327 275 { 328 276 ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB); 329 - do_IRQ(ATH79_CPU_IRQ_USB); 277 + do_IRQ(ATH79_CPU_IRQ(3)); 330 278 } 331 279 332 280 void __init arch_init_irq(void) ··· 350 286 ath79_ip2_handler = ar933x_ip2_handler; 351 287 ath79_ip3_handler = ar933x_ip3_handler; 352 288 } else if (soc_is_ar934x()) { 353 - ath79_ip2_handler = ar934x_ip2_handler; 289 + ath79_ip2_handler = ath79_default_ip2_handler; 354 290 ath79_ip3_handler = ar934x_ip3_handler; 291 + } else if (soc_is_qca955x()) { 292 + ath79_ip2_handler = ath79_default_ip2_handler; 293 + ath79_ip3_handler = ath79_default_ip3_handler; 355 294 } else { 356 295 BUG(); 357 296 } 358 297 359 - cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC; 298 + cp0_perfcount_irq = ATH79_MISC_IRQ(5); 360 299 mips_cpu_irq_init(); 361 300 ath79_misc_irq_init(); 362 301 363 302 if (soc_is_ar934x()) 364 303 ar934x_ip2_irq_init(); 304 + else if (soc_is_qca955x()) 305 + qca955x_irq_init(); 365 306 }
+156
arch/mips/ath79/mach-ap136.c
··· 1 + /* 2 + * Qualcomm Atheros AP136 reference board support 3 + * 4 + * Copyright (c) 2012 Qualcomm Atheros 5 + * Copyright (c) 2012-2013 Gabor Juhos <juhosg@openwrt.org> 6 + * 7 + * Permission to use, copy, modify, and/or distribute this software for any 8 + * purpose with or without fee is hereby granted, provided that the above 9 + * copyright notice and this permission notice appear in all copies. 10 + * 11 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 + * 19 + */ 20 + 21 + #include <linux/pci.h> 22 + #include <linux/ath9k_platform.h> 23 + 24 + #include "machtypes.h" 25 + #include "dev-gpio-buttons.h" 26 + #include "dev-leds-gpio.h" 27 + #include "dev-spi.h" 28 + #include "dev-usb.h" 29 + #include "dev-wmac.h" 30 + #include "pci.h" 31 + 32 + #define AP136_GPIO_LED_STATUS_RED 14 33 + #define AP136_GPIO_LED_STATUS_GREEN 19 34 + #define AP136_GPIO_LED_USB 4 35 + #define AP136_GPIO_LED_WLAN_2G 13 36 + #define AP136_GPIO_LED_WLAN_5G 12 37 + #define AP136_GPIO_LED_WPS_RED 15 38 + #define AP136_GPIO_LED_WPS_GREEN 20 39 + 40 + #define AP136_GPIO_BTN_WPS 16 41 + #define AP136_GPIO_BTN_RFKILL 21 42 + 43 + #define AP136_KEYS_POLL_INTERVAL 20 /* msecs */ 44 + #define AP136_KEYS_DEBOUNCE_INTERVAL (3 * AP136_KEYS_POLL_INTERVAL) 45 + 46 + #define AP136_WMAC_CALDATA_OFFSET 0x1000 47 + #define AP136_PCIE_CALDATA_OFFSET 0x5000 48 + 49 + static struct gpio_led ap136_leds_gpio[] __initdata = { 50 + { 51 + .name = "qca:green:status", 52 + .gpio = AP136_GPIO_LED_STATUS_GREEN, 53 + .active_low = 1, 54 + }, 55 + { 56 + .name = "qca:red:status", 57 + .gpio = AP136_GPIO_LED_STATUS_RED, 58 + .active_low = 1, 59 + }, 60 + { 61 + .name = "qca:green:wps", 62 + .gpio = AP136_GPIO_LED_WPS_GREEN, 63 + .active_low = 1, 64 + }, 65 + { 66 + .name = "qca:red:wps", 67 + .gpio = AP136_GPIO_LED_WPS_RED, 68 + .active_low = 1, 69 + }, 70 + { 71 + .name = "qca:red:wlan-2g", 72 + .gpio = AP136_GPIO_LED_WLAN_2G, 73 + .active_low = 1, 74 + }, 75 + { 76 + .name = "qca:red:usb", 77 + .gpio = AP136_GPIO_LED_USB, 78 + .active_low = 1, 79 + } 80 + }; 81 + 82 + static struct gpio_keys_button ap136_gpio_keys[] __initdata = { 83 + { 84 + .desc = "WPS button", 85 + .type = EV_KEY, 86 + .code = KEY_WPS_BUTTON, 87 + .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL, 88 + .gpio = AP136_GPIO_BTN_WPS, 89 + .active_low = 1, 90 + }, 91 + { 92 + .desc = "RFKILL button", 93 + .type = EV_KEY, 94 + .code = KEY_RFKILL, 95 + .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL, 96 + .gpio = AP136_GPIO_BTN_RFKILL, 97 + .active_low = 1, 98 + }, 99 + }; 100 + 101 + static struct spi_board_info ap136_spi_info[] = { 102 + { 103 + .bus_num = 0, 104 + .chip_select = 0, 105 + .max_speed_hz = 25000000, 106 + .modalias = "mx25l6405d", 107 + } 108 + }; 109 + 110 + static struct ath79_spi_platform_data ap136_spi_data = { 111 + .bus_num = 0, 112 + .num_chipselect = 1, 113 + }; 114 + 115 + #ifdef CONFIG_PCI 116 + static struct ath9k_platform_data ap136_ath9k_data; 117 + 118 + static int ap136_pci_plat_dev_init(struct pci_dev *dev) 119 + { 120 + if (dev->bus->number == 1 && (PCI_SLOT(dev->devfn)) == 0) 121 + dev->dev.platform_data = &ap136_ath9k_data; 122 + 123 + return 0; 124 + } 125 + 126 + static void __init ap136_pci_init(u8 *eeprom) 127 + { 128 + memcpy(ap136_ath9k_data.eeprom_data, eeprom, 129 + sizeof(ap136_ath9k_data.eeprom_data)); 130 + 131 + ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init); 132 + ath79_register_pci(); 133 + } 134 + #else 135 + static inline void ap136_pci_init(void) {} 136 + #endif /* CONFIG_PCI */ 137 + 138 + static void __init ap136_setup(void) 139 + { 140 + u8 *art = (u8 *) KSEG1ADDR(0x1fff0000); 141 + 142 + ath79_register_leds_gpio(-1, ARRAY_SIZE(ap136_leds_gpio), 143 + ap136_leds_gpio); 144 + ath79_register_gpio_keys_polled(-1, AP136_KEYS_POLL_INTERVAL, 145 + ARRAY_SIZE(ap136_gpio_keys), 146 + ap136_gpio_keys); 147 + ath79_register_spi(&ap136_spi_data, ap136_spi_info, 148 + ARRAY_SIZE(ap136_spi_info)); 149 + ath79_register_usb(); 150 + ath79_register_wmac(art + AP136_WMAC_CALDATA_OFFSET); 151 + ap136_pci_init(art + AP136_PCIE_CALDATA_OFFSET); 152 + } 153 + 154 + MIPS_MACHINE(ATH79_MACH_AP136_010, "AP136-010", 155 + "Atheros AP136-010 reference board", 156 + ap136_setup);
+1
arch/mips/ath79/machtypes.h
··· 17 17 enum ath79_mach_type { 18 18 ATH79_MACH_GENERIC = 0, 19 19 ATH79_MACH_AP121, /* Atheros AP121 reference board */ 20 + ATH79_MACH_AP136_010, /* Atheros AP136-010 reference board */ 20 21 ATH79_MACH_AP81, /* Atheros AP81 reference board */ 21 22 ATH79_MACH_DB120, /* Atheros DB120 reference board */ 22 23 ATH79_MACH_PB44, /* Atheros PB44 reference board */
+154 -11
arch/mips/ath79/pci.c
··· 14 14 15 15 #include <linux/init.h> 16 16 #include <linux/pci.h> 17 + #include <linux/resource.h> 18 + #include <linux/platform_device.h> 17 19 #include <asm/mach-ath79/ar71xx_regs.h> 18 20 #include <asm/mach-ath79/ath79.h> 19 21 #include <asm/mach-ath79/irq.h> 20 - #include <asm/mach-ath79/pci.h> 21 22 #include "pci.h" 22 23 23 24 static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev); ··· 49 48 } 50 49 }; 51 50 51 + static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = { 52 + { 53 + .bus = 0, 54 + .slot = 0, 55 + .pin = 1, 56 + .irq = ATH79_PCI_IRQ(0), 57 + }, 58 + { 59 + .bus = 1, 60 + .slot = 0, 61 + .pin = 1, 62 + .irq = ATH79_PCI_IRQ(1), 63 + }, 64 + }; 65 + 52 66 int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin) 53 67 { 54 68 int irq = -1; ··· 79 63 soc_is_ar9344()) { 80 64 ath79_pci_irq_map = ar724x_pci_irq_map; 81 65 ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map); 66 + } else if (soc_is_qca955x()) { 67 + ath79_pci_irq_map = qca955x_pci_irq_map; 68 + ath79_pci_nr_irqs = ARRAY_SIZE(qca955x_pci_irq_map); 82 69 } else { 83 70 pr_crit("pci %s: invalid irq map\n", 84 71 pci_name((struct pci_dev *) dev)); ··· 93 74 const struct ath79_pci_irq *entry; 94 75 95 76 entry = &ath79_pci_irq_map[i]; 96 - if (entry->slot == slot && entry->pin == pin) { 77 + if (entry->bus == dev->bus->number && 78 + entry->slot == slot && 79 + entry->pin == pin) { 97 80 irq = entry->irq; 98 81 break; 99 82 } ··· 131 110 ath79_pci_plat_dev_init = func; 132 111 } 133 112 113 + static struct platform_device * 114 + ath79_register_pci_ar71xx(void) 115 + { 116 + struct platform_device *pdev; 117 + struct resource res[4]; 118 + 119 + memset(res, 0, sizeof(res)); 120 + 121 + res[0].name = "cfg_base"; 122 + res[0].flags = IORESOURCE_MEM; 123 + res[0].start = AR71XX_PCI_CFG_BASE; 124 + res[0].end = AR71XX_PCI_CFG_BASE + AR71XX_PCI_CFG_SIZE - 1; 125 + 126 + res[1].flags = IORESOURCE_IRQ; 127 + res[1].start = ATH79_CPU_IRQ(2); 128 + res[1].end = ATH79_CPU_IRQ(2); 129 + 130 + res[2].name = "io_base"; 131 + res[2].flags = IORESOURCE_IO; 132 + res[2].start = 0; 133 + res[2].end = 0; 134 + 135 + res[3].name = "mem_base"; 136 + res[3].flags = IORESOURCE_MEM; 137 + res[3].start = AR71XX_PCI_MEM_BASE; 138 + res[3].end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1; 139 + 140 + pdev = platform_device_register_simple("ar71xx-pci", -1, 141 + res, ARRAY_SIZE(res)); 142 + return pdev; 143 + } 144 + 145 + static struct platform_device * 146 + ath79_register_pci_ar724x(int id, 147 + unsigned long cfg_base, 148 + unsigned long ctrl_base, 149 + unsigned long crp_base, 150 + unsigned long mem_base, 151 + unsigned long mem_size, 152 + unsigned long io_base, 153 + int irq) 154 + { 155 + struct platform_device *pdev; 156 + struct resource res[6]; 157 + 158 + memset(res, 0, sizeof(res)); 159 + 160 + res[0].name = "cfg_base"; 161 + res[0].flags = IORESOURCE_MEM; 162 + res[0].start = cfg_base; 163 + res[0].end = cfg_base + AR724X_PCI_CFG_SIZE - 1; 164 + 165 + res[1].name = "ctrl_base"; 166 + res[1].flags = IORESOURCE_MEM; 167 + res[1].start = ctrl_base; 168 + res[1].end = ctrl_base + AR724X_PCI_CTRL_SIZE - 1; 169 + 170 + res[2].flags = IORESOURCE_IRQ; 171 + res[2].start = irq; 172 + res[2].end = irq; 173 + 174 + res[3].name = "mem_base"; 175 + res[3].flags = IORESOURCE_MEM; 176 + res[3].start = mem_base; 177 + res[3].end = mem_base + mem_size - 1; 178 + 179 + res[4].name = "io_base"; 180 + res[4].flags = IORESOURCE_IO; 181 + res[4].start = io_base; 182 + res[4].end = io_base; 183 + 184 + res[5].name = "crp_base"; 185 + res[5].flags = IORESOURCE_MEM; 186 + res[5].start = crp_base; 187 + res[5].end = crp_base + AR724X_PCI_CRP_SIZE - 1; 188 + 189 + pdev = platform_device_register_simple("ar724x-pci", id, 190 + res, ARRAY_SIZE(res)); 191 + return pdev; 192 + } 193 + 134 194 int __init ath79_register_pci(void) 135 195 { 136 - if (soc_is_ar71xx()) 137 - return ar71xx_pcibios_init(); 196 + struct platform_device *pdev = NULL; 138 197 139 - if (soc_is_ar724x()) 140 - return ar724x_pcibios_init(ATH79_CPU_IRQ_IP2); 141 - 142 - if (soc_is_ar9342() || soc_is_ar9344()) { 198 + if (soc_is_ar71xx()) { 199 + pdev = ath79_register_pci_ar71xx(); 200 + } else if (soc_is_ar724x()) { 201 + pdev = ath79_register_pci_ar724x(-1, 202 + AR724X_PCI_CFG_BASE, 203 + AR724X_PCI_CTRL_BASE, 204 + AR724X_PCI_CRP_BASE, 205 + AR724X_PCI_MEM_BASE, 206 + AR724X_PCI_MEM_SIZE, 207 + 0, 208 + ATH79_CPU_IRQ(2)); 209 + } else if (soc_is_ar9342() || 210 + soc_is_ar9344()) { 143 211 u32 bootstrap; 144 212 145 213 bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP); 146 - if (bootstrap & AR934X_BOOTSTRAP_PCIE_RC) 147 - return ar724x_pcibios_init(ATH79_IP2_IRQ(0)); 214 + if ((bootstrap & AR934X_BOOTSTRAP_PCIE_RC) == 0) 215 + return -ENODEV; 216 + 217 + pdev = ath79_register_pci_ar724x(-1, 218 + AR724X_PCI_CFG_BASE, 219 + AR724X_PCI_CTRL_BASE, 220 + AR724X_PCI_CRP_BASE, 221 + AR724X_PCI_MEM_BASE, 222 + AR724X_PCI_MEM_SIZE, 223 + 0, 224 + ATH79_IP2_IRQ(0)); 225 + } else if (soc_is_qca9558()) { 226 + pdev = ath79_register_pci_ar724x(0, 227 + QCA955X_PCI_CFG_BASE0, 228 + QCA955X_PCI_CTRL_BASE0, 229 + QCA955X_PCI_CRP_BASE0, 230 + QCA955X_PCI_MEM_BASE0, 231 + QCA955X_PCI_MEM_SIZE, 232 + 0, 233 + ATH79_IP2_IRQ(0)); 234 + 235 + pdev = ath79_register_pci_ar724x(1, 236 + QCA955X_PCI_CFG_BASE1, 237 + QCA955X_PCI_CTRL_BASE1, 238 + QCA955X_PCI_CRP_BASE1, 239 + QCA955X_PCI_MEM_BASE1, 240 + QCA955X_PCI_MEM_SIZE, 241 + 1, 242 + ATH79_IP3_IRQ(2)); 243 + } else { 244 + /* No PCI support */ 245 + return -ENODEV; 148 246 } 149 247 150 - return -ENODEV; 248 + if (!pdev) 249 + pr_err("unable to register PCI controller device\n"); 250 + 251 + return pdev ? 0 : -ENODEV; 151 252 }
+1
arch/mips/ath79/pci.h
··· 14 14 #define _ATH79_PCI_H 15 15 16 16 struct ath79_pci_irq { 17 + int bus; 17 18 u8 slot; 18 19 u8 pin; 19 20 int irq;
+17 -1
arch/mips/ath79/setup.c
··· 164 164 rev = id & AR934X_REV_ID_REVISION_MASK; 165 165 break; 166 166 167 + case REV_ID_MAJOR_QCA9556: 168 + ath79_soc = ATH79_SOC_QCA9556; 169 + chip = "9556"; 170 + rev = id & QCA955X_REV_ID_REVISION_MASK; 171 + break; 172 + 173 + case REV_ID_MAJOR_QCA9558: 174 + ath79_soc = ATH79_SOC_QCA9558; 175 + chip = "9558"; 176 + rev = id & QCA955X_REV_ID_REVISION_MASK; 177 + break; 178 + 167 179 default: 168 180 panic("ath79: unknown SoC, id:0x%08x", id); 169 181 } 170 182 171 183 ath79_soc_rev = rev; 172 184 173 - sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev); 185 + if (soc_is_qca955x()) 186 + sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s rev %u", 187 + chip, rev); 188 + else 189 + sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev); 174 190 pr_info("SoC: %s\n", ath79_sys_type); 175 191 } 176 192
+119 -42
arch/mips/bcm47xx/nvram.c
··· 3 3 * 4 4 * Copyright (C) 2005 Broadcom Corporation 5 5 * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> 6 - * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de> 6 + * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de> 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify it 9 9 * under the terms of the GNU General Public License as published by the ··· 18 18 #include <linux/kernel.h> 19 19 #include <linux/string.h> 20 20 #include <asm/addrspace.h> 21 - #include <asm/mach-bcm47xx/nvram.h> 21 + #include <bcm47xx_nvram.h> 22 22 #include <asm/mach-bcm47xx/bcm47xx.h> 23 23 24 24 static char nvram_buf[NVRAM_SPACE]; 25 25 26 - /* Probe for NVRAM header */ 27 - static void early_nvram_init(void) 26 + static u32 find_nvram_size(u32 end) 28 27 { 29 - #ifdef CONFIG_BCM47XX_SSB 30 - struct ssb_mipscore *mcore_ssb; 31 - #endif 32 - #ifdef CONFIG_BCM47XX_BCMA 33 - struct bcma_drv_cc *bcma_cc; 34 - #endif 35 28 struct nvram_header *header; 29 + u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000}; 36 30 int i; 37 - u32 base = 0; 38 - u32 lim = 0; 39 - u32 off; 40 - u32 *src, *dst; 41 31 42 - switch (bcm47xx_bus_type) { 43 - #ifdef CONFIG_BCM47XX_SSB 44 - case BCM47XX_BUS_TYPE_SSB: 45 - mcore_ssb = &bcm47xx_bus.ssb.mipscore; 46 - base = mcore_ssb->pflash.window; 47 - lim = mcore_ssb->pflash.window_size; 48 - break; 49 - #endif 50 - #ifdef CONFIG_BCM47XX_BCMA 51 - case BCM47XX_BUS_TYPE_BCMA: 52 - bcma_cc = &bcm47xx_bus.bcma.bus.drv_cc; 53 - base = bcma_cc->pflash.window; 54 - lim = bcma_cc->pflash.window_size; 55 - break; 56 - #endif 32 + for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { 33 + header = (struct nvram_header *)KSEG1ADDR(end - nvram_sizes[i]); 34 + if (header->magic == NVRAM_HEADER) 35 + return nvram_sizes[i]; 57 36 } 58 37 38 + return 0; 39 + } 40 + 41 + /* Probe for NVRAM header */ 42 + static int nvram_find_and_copy(u32 base, u32 lim) 43 + { 44 + struct nvram_header *header; 45 + int i; 46 + u32 off; 47 + u32 *src, *dst; 48 + u32 size; 49 + 50 + /* TODO: when nvram is on nand flash check for bad blocks first. */ 59 51 off = FLASH_MIN; 60 52 while (off <= lim) { 61 53 /* Windowed flash access */ 62 - header = (struct nvram_header *) 63 - KSEG1ADDR(base + off - NVRAM_SPACE); 64 - if (header->magic == NVRAM_HEADER) 54 + size = find_nvram_size(base + off); 55 + if (size) { 56 + header = (struct nvram_header *)KSEG1ADDR(base + off - 57 + size); 65 58 goto found; 59 + } 66 60 off <<= 1; 67 61 } 68 62 69 63 /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */ 70 64 header = (struct nvram_header *) KSEG1ADDR(base + 4096); 71 - if (header->magic == NVRAM_HEADER) 65 + if (header->magic == NVRAM_HEADER) { 66 + size = NVRAM_SPACE; 72 67 goto found; 68 + } 73 69 74 70 header = (struct nvram_header *) KSEG1ADDR(base + 1024); 75 - if (header->magic == NVRAM_HEADER) 71 + if (header->magic == NVRAM_HEADER) { 72 + size = NVRAM_SPACE; 76 73 goto found; 74 + } 77 75 78 - return; 76 + pr_err("no nvram found\n"); 77 + return -ENXIO; 79 78 80 79 found: 80 + 81 + if (header->len > size) 82 + pr_err("The nvram size accoridng to the header seems to be bigger than the partition on flash\n"); 83 + if (header->len > NVRAM_SPACE) 84 + pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n", 85 + header->len, NVRAM_SPACE); 86 + 81 87 src = (u32 *) header; 82 88 dst = (u32 *) nvram_buf; 83 89 for (i = 0; i < sizeof(struct nvram_header); i += 4) 84 90 *dst++ = *src++; 85 - for (; i < header->len && i < NVRAM_SPACE; i += 4) 91 + for (; i < header->len && i < NVRAM_SPACE && i < size; i += 4) 86 92 *dst++ = le32_to_cpu(*src++); 93 + memset(dst, 0x0, NVRAM_SPACE - i); 94 + 95 + return 0; 87 96 } 88 97 89 - int nvram_getenv(char *name, char *val, size_t val_len) 98 + #ifdef CONFIG_BCM47XX_SSB 99 + static int nvram_init_ssb(void) 100 + { 101 + struct ssb_mipscore *mcore = &bcm47xx_bus.ssb.mipscore; 102 + u32 base; 103 + u32 lim; 104 + 105 + if (mcore->pflash.present) { 106 + base = mcore->pflash.window; 107 + lim = mcore->pflash.window_size; 108 + } else { 109 + pr_err("Couldn't find supported flash memory\n"); 110 + return -ENXIO; 111 + } 112 + 113 + return nvram_find_and_copy(base, lim); 114 + } 115 + #endif 116 + 117 + #ifdef CONFIG_BCM47XX_BCMA 118 + static int nvram_init_bcma(void) 119 + { 120 + struct bcma_drv_cc *cc = &bcm47xx_bus.bcma.bus.drv_cc; 121 + u32 base; 122 + u32 lim; 123 + 124 + #ifdef CONFIG_BCMA_NFLASH 125 + if (cc->nflash.boot) { 126 + base = BCMA_SOC_FLASH1; 127 + lim = BCMA_SOC_FLASH1_SZ; 128 + } else 129 + #endif 130 + if (cc->pflash.present) { 131 + base = cc->pflash.window; 132 + lim = cc->pflash.window_size; 133 + #ifdef CONFIG_BCMA_SFLASH 134 + } else if (cc->sflash.present) { 135 + base = cc->sflash.window; 136 + lim = cc->sflash.size; 137 + #endif 138 + } else { 139 + pr_err("Couldn't find supported flash memory\n"); 140 + return -ENXIO; 141 + } 142 + 143 + return nvram_find_and_copy(base, lim); 144 + } 145 + #endif 146 + 147 + static int nvram_init(void) 148 + { 149 + switch (bcm47xx_bus_type) { 150 + #ifdef CONFIG_BCM47XX_SSB 151 + case BCM47XX_BUS_TYPE_SSB: 152 + return nvram_init_ssb(); 153 + #endif 154 + #ifdef CONFIG_BCM47XX_BCMA 155 + case BCM47XX_BUS_TYPE_BCMA: 156 + return nvram_init_bcma(); 157 + #endif 158 + } 159 + return -ENXIO; 160 + } 161 + 162 + int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len) 90 163 { 91 164 char *var, *value, *end, *eq; 165 + int err; 92 166 93 167 if (!name) 94 - return NVRAM_ERR_INV_PARAM; 168 + return -EINVAL; 95 169 96 - if (!nvram_buf[0]) 97 - early_nvram_init(); 170 + if (!nvram_buf[0]) { 171 + err = nvram_init(); 172 + if (err) 173 + return err; 174 + } 98 175 99 176 /* Look for name=value and return value */ 100 177 var = &nvram_buf[sizeof(struct nvram_header)]; ··· 187 110 return snprintf(val, val_len, "%s", value); 188 111 } 189 112 } 190 - return NVRAM_ERR_ENVNOTFOUND; 113 + return -ENOENT; 191 114 } 192 - EXPORT_SYMBOL(nvram_getenv); 115 + EXPORT_SYMBOL(bcm47xx_nvram_getenv);
+3 -3
arch/mips/bcm47xx/setup.c
··· 35 35 #include <asm/reboot.h> 36 36 #include <asm/time.h> 37 37 #include <bcm47xx.h> 38 - #include <asm/mach-bcm47xx/nvram.h> 38 + #include <bcm47xx_nvram.h> 39 39 40 40 union bcm47xx_bus bcm47xx_bus; 41 41 EXPORT_SYMBOL(bcm47xx_bus); ··· 115 115 memset(&iv->sprom, 0, sizeof(struct ssb_sprom)); 116 116 bcm47xx_fill_sprom(&iv->sprom, NULL, false); 117 117 118 - if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) 118 + if (bcm47xx_nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) 119 119 iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); 120 120 121 121 return 0; ··· 138 138 panic("Failed to initialize SSB bus (err %d)", err); 139 139 140 140 mcore = &bcm47xx_bus.ssb.mipscore; 141 - if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) { 141 + if (bcm47xx_nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) { 142 142 if (strstr(buf, "console=ttyS1")) { 143 143 struct ssb_serial_port port; 144 144
+10 -12
arch/mips/bcm47xx/sprom.c
··· 27 27 */ 28 28 29 29 #include <bcm47xx.h> 30 - #include <nvram.h> 30 + #include <bcm47xx_nvram.h> 31 31 32 32 static void create_key(const char *prefix, const char *postfix, 33 33 const char *name, char *buf, int len) ··· 50 50 51 51 create_key(prefix, postfix, name, key, sizeof(key)); 52 52 53 - err = nvram_getenv(key, buf, len); 54 - if (fallback && err == NVRAM_ERR_ENVNOTFOUND && prefix) { 53 + err = bcm47xx_nvram_getenv(key, buf, len); 54 + if (fallback && err == -ENOENT && prefix) { 55 55 create_key(NULL, postfix, name, key, sizeof(key)); 56 - err = nvram_getenv(key, buf, len); 56 + err = bcm47xx_nvram_getenv(key, buf, len); 57 57 } 58 58 return err; 59 59 } ··· 71 71 fallback); \ 72 72 if (err < 0) \ 73 73 return; \ 74 - err = kstrto ## type (buf, 0, &var); \ 74 + err = kstrto ## type(strim(buf), 0, &var); \ 75 75 if (err) { \ 76 76 pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \ 77 77 prefix, name, postfix, buf, err); \ ··· 99 99 err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback); 100 100 if (err < 0) 101 101 return; 102 - err = kstrtou32(buf, 0, &val); 102 + err = kstrtou32(strim(buf), 0, &val); 103 103 if (err) { 104 104 pr_warn("can not parse nvram name %s%s with value %s got %i\n", 105 105 prefix, name, buf, err); ··· 120 120 err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback); 121 121 if (err < 0) 122 122 return; 123 - err = kstrtou32(buf, 0, &val); 123 + err = kstrtou32(strim(buf), 0, &val); 124 124 if (err) { 125 125 pr_warn("can not parse nvram name %s%s with value %s got %i\n", 126 126 prefix, name, buf, err); ··· 144 144 if (err < 0) 145 145 return; 146 146 147 - nvram_parse_macaddr(buf, *val); 147 + bcm47xx_nvram_parse_macaddr(buf, *val); 148 148 } 149 149 150 150 static void nvram_read_alpha2(const char *prefix, const char *name, ··· 652 652 static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix, 653 653 bool fallback) 654 654 { 655 - nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0, 656 - fallback); 655 + nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0, true); 657 656 nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0, 658 657 fallback); 659 - nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0, 660 - fallback); 658 + nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0, true); 661 659 nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo, 662 660 &sprom->boardflags_hi, fallback); 663 661 nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
+2 -2
arch/mips/bcm63xx/early_printk.c
··· 10 10 #include <bcm63xx_io.h> 11 11 #include <bcm63xx_regs.h> 12 12 13 - static void __init wait_xfered(void) 13 + static void wait_xfered(void) 14 14 { 15 15 unsigned int val; 16 16 ··· 22 22 } while (1); 23 23 } 24 24 25 - void __init prom_putchar(char c) 25 + void prom_putchar(char c) 26 26 { 27 27 wait_xfered(); 28 28 bcm_uart0_writel(c, UART_FIFO_REG);
+1
arch/mips/configs/ath79_defconfig
··· 1 1 CONFIG_ATH79=y 2 2 CONFIG_ATH79_MACH_AP121=y 3 + CONFIG_ATH79_MACH_AP136=y 3 4 CONFIG_ATH79_MACH_AP81=y 4 5 CONFIG_ATH79_MACH_DB120=y 5 6 CONFIG_ATH79_MACH_PB44=y
+167
arch/mips/configs/rt305x_defconfig
··· 1 + CONFIG_RALINK=y 2 + CONFIG_DTB_RT305X_EVAL=y 3 + CONFIG_CPU_MIPS32_R2=y 4 + # CONFIG_COMPACTION is not set 5 + # CONFIG_CROSS_MEMORY_ATTACH is not set 6 + CONFIG_HZ_100=y 7 + # CONFIG_SECCOMP is not set 8 + CONFIG_EXPERIMENTAL=y 9 + # CONFIG_LOCALVERSION_AUTO is not set 10 + CONFIG_SYSVIPC=y 11 + CONFIG_HIGH_RES_TIMERS=y 12 + CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_INITRAMFS_SOURCE="" 14 + CONFIG_INITRAMFS_ROOT_UID=1000 15 + CONFIG_INITRAMFS_ROOT_GID=1000 16 + # CONFIG_RD_GZIP is not set 17 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 18 + CONFIG_KALLSYMS_ALL=y 19 + # CONFIG_AIO is not set 20 + CONFIG_EMBEDDED=y 21 + # CONFIG_VM_EVENT_COUNTERS is not set 22 + # CONFIG_SLUB_DEBUG is not set 23 + # CONFIG_COMPAT_BRK is not set 24 + CONFIG_MODULES=y 25 + CONFIG_MODULE_UNLOAD=y 26 + # CONFIG_BLK_DEV_BSG is not set 27 + CONFIG_PARTITION_ADVANCED=y 28 + # CONFIG_IOSCHED_CFQ is not set 29 + # CONFIG_COREDUMP is not set 30 + # CONFIG_SUSPEND is not set 31 + CONFIG_NET=y 32 + CONFIG_PACKET=y 33 + CONFIG_UNIX=y 34 + CONFIG_INET=y 35 + CONFIG_IP_MULTICAST=y 36 + CONFIG_IP_ADVANCED_ROUTER=y 37 + CONFIG_IP_MULTIPLE_TABLES=y 38 + CONFIG_IP_ROUTE_MULTIPATH=y 39 + CONFIG_IP_ROUTE_VERBOSE=y 40 + CONFIG_IP_MROUTE=y 41 + CONFIG_IP_MROUTE_MULTIPLE_TABLES=y 42 + CONFIG_ARPD=y 43 + CONFIG_SYN_COOKIES=y 44 + # CONFIG_INET_XFRM_MODE_TRANSPORT is not set 45 + # CONFIG_INET_XFRM_MODE_TUNNEL is not set 46 + # CONFIG_INET_XFRM_MODE_BEET is not set 47 + # CONFIG_INET_LRO is not set 48 + # CONFIG_INET_DIAG is not set 49 + CONFIG_TCP_CONG_ADVANCED=y 50 + # CONFIG_TCP_CONG_BIC is not set 51 + # CONFIG_TCP_CONG_WESTWOOD is not set 52 + # CONFIG_TCP_CONG_HTCP is not set 53 + # CONFIG_IPV6 is not set 54 + CONFIG_NETFILTER=y 55 + # CONFIG_BRIDGE_NETFILTER is not set 56 + CONFIG_NF_CONNTRACK=m 57 + CONFIG_NF_CONNTRACK_FTP=m 58 + CONFIG_NF_CONNTRACK_IRC=m 59 + CONFIG_NETFILTER_XT_TARGET_CT=m 60 + CONFIG_NETFILTER_XT_TARGET_LOG=m 61 + CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 62 + CONFIG_NETFILTER_XT_MATCH_COMMENT=m 63 + CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m 64 + CONFIG_NETFILTER_XT_MATCH_LIMIT=m 65 + CONFIG_NETFILTER_XT_MATCH_MAC=m 66 + CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m 67 + CONFIG_NETFILTER_XT_MATCH_STATE=m 68 + CONFIG_NF_CONNTRACK_IPV4=m 69 + # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set 70 + CONFIG_IP_NF_IPTABLES=m 71 + CONFIG_IP_NF_FILTER=m 72 + CONFIG_IP_NF_TARGET_REJECT=m 73 + CONFIG_IP_NF_MANGLE=m 74 + CONFIG_IP_NF_RAW=m 75 + CONFIG_BRIDGE=y 76 + # CONFIG_BRIDGE_IGMP_SNOOPING is not set 77 + CONFIG_VLAN_8021Q=y 78 + CONFIG_NET_SCHED=y 79 + CONFIG_HAMRADIO=y 80 + CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 81 + # CONFIG_FIRMWARE_IN_KERNEL is not set 82 + CONFIG_MTD=y 83 + CONFIG_MTD_CMDLINE_PARTS=y 84 + CONFIG_MTD_CHAR=y 85 + CONFIG_MTD_BLOCK=y 86 + CONFIG_MTD_CFI=y 87 + CONFIG_MTD_CFI_AMDSTD=y 88 + CONFIG_MTD_COMPLEX_MAPPINGS=y 89 + CONFIG_MTD_PHYSMAP=y 90 + CONFIG_MTD_PHYSMAP_OF=y 91 + CONFIG_MTD_M25P80=y 92 + CONFIG_EEPROM_93CX6=m 93 + CONFIG_SCSI=y 94 + CONFIG_BLK_DEV_SD=y 95 + CONFIG_NETDEVICES=y 96 + # CONFIG_NET_VENDOR_WIZNET is not set 97 + CONFIG_PHYLIB=y 98 + CONFIG_PPP=m 99 + CONFIG_PPP_FILTER=y 100 + CONFIG_PPP_MULTILINK=y 101 + CONFIG_PPPOE=m 102 + CONFIG_PPP_ASYNC=m 103 + CONFIG_ISDN=y 104 + CONFIG_INPUT=m 105 + CONFIG_INPUT_POLLDEV=m 106 + # CONFIG_INPUT_MOUSEDEV is not set 107 + # CONFIG_KEYBOARD_ATKBD is not set 108 + # CONFIG_INPUT_MOUSE is not set 109 + CONFIG_INPUT_MISC=y 110 + # CONFIG_SERIO is not set 111 + # CONFIG_VT is not set 112 + # CONFIG_LEGACY_PTYS is not set 113 + # CONFIG_DEVKMEM is not set 114 + CONFIG_SERIAL_8250=y 115 + CONFIG_SERIAL_8250_CONSOLE=y 116 + CONFIG_SERIAL_8250_RUNTIME_UARTS=2 117 + CONFIG_SERIAL_OF_PLATFORM=y 118 + CONFIG_SPI=y 119 + # CONFIG_HWMON is not set 120 + CONFIG_WATCHDOG=y 121 + # CONFIG_HID is not set 122 + # CONFIG_USB_HID is not set 123 + CONFIG_USB=y 124 + CONFIG_USB_ANNOUNCE_NEW_DEVICES=y 125 + CONFIG_USB_STORAGE=y 126 + CONFIG_USB_STORAGE_DEBUG=y 127 + CONFIG_NEW_LEDS=y 128 + CONFIG_LEDS_CLASS=y 129 + CONFIG_LEDS_TRIGGERS=y 130 + CONFIG_LEDS_TRIGGER_TIMER=y 131 + CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 132 + CONFIG_STAGING=y 133 + # CONFIG_IOMMU_SUPPORT is not set 134 + # CONFIG_DNOTIFY is not set 135 + # CONFIG_PROC_PAGE_MONITOR is not set 136 + CONFIG_TMPFS=y 137 + CONFIG_TMPFS_XATTR=y 138 + CONFIG_JFFS2_FS=y 139 + CONFIG_JFFS2_SUMMARY=y 140 + CONFIG_JFFS2_FS_XATTR=y 141 + # CONFIG_JFFS2_FS_POSIX_ACL is not set 142 + # CONFIG_JFFS2_FS_SECURITY is not set 143 + CONFIG_JFFS2_COMPRESSION_OPTIONS=y 144 + # CONFIG_JFFS2_ZLIB is not set 145 + CONFIG_SQUASHFS=y 146 + # CONFIG_SQUASHFS_ZLIB is not set 147 + CONFIG_SQUASHFS_XZ=y 148 + CONFIG_PRINTK_TIME=y 149 + # CONFIG_ENABLE_MUST_CHECK is not set 150 + CONFIG_MAGIC_SYSRQ=y 151 + CONFIG_STRIP_ASM_SYMS=y 152 + CONFIG_DEBUG_FS=y 153 + # CONFIG_SCHED_DEBUG is not set 154 + # CONFIG_FTRACE is not set 155 + CONFIG_CMDLINE_BOOL=y 156 + CONFIG_CRYPTO_MANAGER=m 157 + CONFIG_CRYPTO_ARC4=m 158 + # CONFIG_CRYPTO_ANSI_CPRNG is not set 159 + CONFIG_CRC_ITU_T=m 160 + CONFIG_CRC32_SARWATE=y 161 + # CONFIG_XZ_DEC_X86 is not set 162 + # CONFIG_XZ_DEC_POWERPC is not set 163 + # CONFIG_XZ_DEC_IA64 is not set 164 + # CONFIG_XZ_DEC_ARM is not set 165 + # CONFIG_XZ_DEC_ARMTHUMB is not set 166 + # CONFIG_XZ_DEC_SPARC is not set 167 + CONFIG_AVERAGE=y
+7
arch/mips/include/asm/cpu-features.h
··· 98 98 #ifndef cpu_has_rixi 99 99 #define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI) 100 100 #endif 101 + #ifndef cpu_has_mmips 102 + #define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS) 103 + #endif 101 104 #ifndef cpu_has_vtag_icache 102 105 #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) 103 106 #endif ··· 274 271 275 272 #ifndef cpu_has_perf_cntr_intr_bit 276 273 #define cpu_has_perf_cntr_intr_bit (cpu_data[0].options & MIPS_CPU_PCI) 274 + #endif 275 + 276 + #ifndef cpu_has_vz 277 + #define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ) 277 278 #endif 278 279 279 280 #endif /* __ASM_CPU_FEATURES_H */
+4 -1
arch/mips/include/asm/cpu.h
··· 96 96 #define PRID_IMP_1004K 0x9900 97 97 #define PRID_IMP_1074K 0x9a00 98 98 #define PRID_IMP_M14KC 0x9c00 99 + #define PRID_IMP_M14KEC 0x9e00 99 100 100 101 /* 101 102 * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE ··· 265 264 CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K, 266 265 CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350, 267 266 CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC, 267 + CPU_M14KEC, 268 268 269 269 /* 270 270 * MIPS64 class processors ··· 324 322 #define MIPS_CPU_ULRI 0x00200000 /* CPU has ULRI feature */ 325 323 #define MIPS_CPU_PCI 0x00400000 /* CPU has Perf Ctr Int indicator */ 326 324 #define MIPS_CPU_RIXI 0x00800000 /* CPU has TLB Read/eXec Inhibit */ 325 + #define MIPS_CPU_MICROMIPS 0x01000000 /* CPU has microMIPS capability */ 327 326 328 327 /* 329 328 * CPU ASE encodings ··· 336 333 #define MIPS_ASE_DSP 0x00000010 /* Signal Processing ASE */ 337 334 #define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */ 338 335 #define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */ 339 - 336 + #define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */ 340 337 341 338 #endif /* _ASM_CPU_H */
+1
arch/mips/include/asm/gic.h
··· 359 359 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ 360 360 #define GIC_PIN_TO_VEC_OFFSET (1) 361 361 362 + extern int gic_present; 362 363 extern unsigned long _gic_base; 363 364 extern unsigned int gic_irq_base; 364 365 extern unsigned int gic_irq_flags[];
+1 -1
arch/mips/include/asm/hazards.h
··· 141 141 142 142 #elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \ 143 143 defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \ 144 - defined(CONFIG_CPU_R5500) 144 + defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR) 145 145 146 146 /* 147 147 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
+6
arch/mips/include/asm/irq_cpu.h
··· 17 17 extern void rm7k_cpu_irq_init(void); 18 18 extern void rm9k_cpu_irq_init(void); 19 19 20 + #ifdef CONFIG_IRQ_DOMAIN 21 + struct device_node; 22 + extern int mips_cpu_intc_init(struct device_node *of_node, 23 + struct device_node *parent); 24 + #endif 25 + 20 26 #endif /* _ASM_IRQ_CPU_H */
+124
arch/mips/include/asm/mach-ath79/ar71xx_regs.h
··· 41 41 #define AR71XX_RESET_BASE (AR71XX_APB_BASE + 0x00060000) 42 42 #define AR71XX_RESET_SIZE 0x100 43 43 44 + #define AR71XX_PCI_MEM_BASE 0x10000000 45 + #define AR71XX_PCI_MEM_SIZE 0x07000000 46 + 47 + #define AR71XX_PCI_WIN0_OFFS 0x10000000 48 + #define AR71XX_PCI_WIN1_OFFS 0x11000000 49 + #define AR71XX_PCI_WIN2_OFFS 0x12000000 50 + #define AR71XX_PCI_WIN3_OFFS 0x13000000 51 + #define AR71XX_PCI_WIN4_OFFS 0x14000000 52 + #define AR71XX_PCI_WIN5_OFFS 0x15000000 53 + #define AR71XX_PCI_WIN6_OFFS 0x16000000 54 + #define AR71XX_PCI_WIN7_OFFS 0x07000000 55 + 56 + #define AR71XX_PCI_CFG_BASE \ 57 + (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000) 58 + #define AR71XX_PCI_CFG_SIZE 0x100 59 + 44 60 #define AR7240_USB_CTRL_BASE (AR71XX_APB_BASE + 0x00030000) 45 61 #define AR7240_USB_CTRL_SIZE 0x100 46 62 #define AR7240_OHCI_BASE 0x1b000000 47 63 #define AR7240_OHCI_SIZE 0x1000 64 + 65 + #define AR724X_PCI_MEM_BASE 0x10000000 66 + #define AR724X_PCI_MEM_SIZE 0x04000000 67 + 68 + #define AR724X_PCI_CFG_BASE 0x14000000 69 + #define AR724X_PCI_CFG_SIZE 0x1000 70 + #define AR724X_PCI_CRP_BASE (AR71XX_APB_BASE + 0x000c0000) 71 + #define AR724X_PCI_CRP_SIZE 0x1000 72 + #define AR724X_PCI_CTRL_BASE (AR71XX_APB_BASE + 0x000f0000) 73 + #define AR724X_PCI_CTRL_SIZE 0x100 48 74 49 75 #define AR724X_EHCI_BASE 0x1b000000 50 76 #define AR724X_EHCI_SIZE 0x1000 ··· 93 67 #define AR934X_EHCI_SIZE 0x200 94 68 #define AR934X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000) 95 69 #define AR934X_SRIF_SIZE 0x1000 70 + 71 + #define QCA955X_PCI_MEM_BASE0 0x10000000 72 + #define QCA955X_PCI_MEM_BASE1 0x12000000 73 + #define QCA955X_PCI_MEM_SIZE 0x02000000 74 + #define QCA955X_PCI_CFG_BASE0 0x14000000 75 + #define QCA955X_PCI_CFG_BASE1 0x16000000 76 + #define QCA955X_PCI_CFG_SIZE 0x1000 77 + #define QCA955X_PCI_CRP_BASE0 (AR71XX_APB_BASE + 0x000c0000) 78 + #define QCA955X_PCI_CRP_BASE1 (AR71XX_APB_BASE + 0x00250000) 79 + #define QCA955X_PCI_CRP_SIZE 0x1000 80 + #define QCA955X_PCI_CTRL_BASE0 (AR71XX_APB_BASE + 0x000f0000) 81 + #define QCA955X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000) 82 + #define QCA955X_PCI_CTRL_SIZE 0x100 83 + 84 + #define QCA955X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000) 85 + #define QCA955X_WMAC_SIZE 0x20000 86 + #define QCA955X_EHCI0_BASE 0x1b000000 87 + #define QCA955X_EHCI1_BASE 0x1b400000 88 + #define QCA955X_EHCI_SIZE 0x1000 96 89 97 90 /* 98 91 * DDR_CTRL block ··· 244 199 #define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21) 245 200 #define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24) 246 201 202 + #define QCA955X_PLL_CPU_CONFIG_REG 0x00 203 + #define QCA955X_PLL_DDR_CONFIG_REG 0x04 204 + #define QCA955X_PLL_CLK_CTRL_REG 0x08 205 + 206 + #define QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT 0 207 + #define QCA955X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f 208 + #define QCA955X_PLL_CPU_CONFIG_NINT_SHIFT 6 209 + #define QCA955X_PLL_CPU_CONFIG_NINT_MASK 0x3f 210 + #define QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT 12 211 + #define QCA955X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f 212 + #define QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19 213 + #define QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK 0x3 214 + 215 + #define QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT 0 216 + #define QCA955X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff 217 + #define QCA955X_PLL_DDR_CONFIG_NINT_SHIFT 10 218 + #define QCA955X_PLL_DDR_CONFIG_NINT_MASK 0x3f 219 + #define QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT 16 220 + #define QCA955X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f 221 + #define QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23 222 + #define QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7 223 + 224 + #define QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2) 225 + #define QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3) 226 + #define QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4) 227 + #define QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5 228 + #define QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f 229 + #define QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10 230 + #define QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f 231 + #define QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15 232 + #define QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f 233 + #define QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20) 234 + #define QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21) 235 + #define QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24) 236 + 247 237 /* 248 238 * USB_CONFIG block 249 239 */ ··· 317 237 #define AR934X_RESET_REG_RESET_MODULE 0x1c 318 238 #define AR934X_RESET_REG_BOOTSTRAP 0xb0 319 239 #define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac 240 + 241 + #define QCA955X_RESET_REG_RESET_MODULE 0x1c 242 + #define QCA955X_RESET_REG_BOOTSTRAP 0xb0 243 + #define QCA955X_RESET_REG_EXT_INT_STATUS 0xac 320 244 321 245 #define MISC_INT_ETHSW BIT(12) 322 246 #define MISC_INT_TIMER4 BIT(10) ··· 399 315 #define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1) 400 316 #define AR934X_BOOTSTRAP_DDR1 BIT(0) 401 317 318 + #define QCA955X_BOOTSTRAP_REF_CLK_40 BIT(4) 319 + 402 320 #define AR934X_PCIE_WMAC_INT_WMAC_MISC BIT(0) 403 321 #define AR934X_PCIE_WMAC_INT_WMAC_TX BIT(1) 404 322 #define AR934X_PCIE_WMAC_INT_WMAC_RXLP BIT(2) ··· 419 333 AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \ 420 334 AR934X_PCIE_WMAC_INT_PCIE_RC3) 421 335 336 + #define QCA955X_EXT_INT_WMAC_MISC BIT(0) 337 + #define QCA955X_EXT_INT_WMAC_TX BIT(1) 338 + #define QCA955X_EXT_INT_WMAC_RXLP BIT(2) 339 + #define QCA955X_EXT_INT_WMAC_RXHP BIT(3) 340 + #define QCA955X_EXT_INT_PCIE_RC1 BIT(4) 341 + #define QCA955X_EXT_INT_PCIE_RC1_INT0 BIT(5) 342 + #define QCA955X_EXT_INT_PCIE_RC1_INT1 BIT(6) 343 + #define QCA955X_EXT_INT_PCIE_RC1_INT2 BIT(7) 344 + #define QCA955X_EXT_INT_PCIE_RC1_INT3 BIT(8) 345 + #define QCA955X_EXT_INT_PCIE_RC2 BIT(12) 346 + #define QCA955X_EXT_INT_PCIE_RC2_INT0 BIT(13) 347 + #define QCA955X_EXT_INT_PCIE_RC2_INT1 BIT(14) 348 + #define QCA955X_EXT_INT_PCIE_RC2_INT2 BIT(15) 349 + #define QCA955X_EXT_INT_PCIE_RC2_INT3 BIT(16) 350 + #define QCA955X_EXT_INT_USB1 BIT(24) 351 + #define QCA955X_EXT_INT_USB2 BIT(28) 352 + 353 + #define QCA955X_EXT_INT_WMAC_ALL \ 354 + (QCA955X_EXT_INT_WMAC_MISC | QCA955X_EXT_INT_WMAC_TX | \ 355 + QCA955X_EXT_INT_WMAC_RXLP | QCA955X_EXT_INT_WMAC_RXHP) 356 + 357 + #define QCA955X_EXT_INT_PCIE_RC1_ALL \ 358 + (QCA955X_EXT_INT_PCIE_RC1 | QCA955X_EXT_INT_PCIE_RC1_INT0 | \ 359 + QCA955X_EXT_INT_PCIE_RC1_INT1 | QCA955X_EXT_INT_PCIE_RC1_INT2 | \ 360 + QCA955X_EXT_INT_PCIE_RC1_INT3) 361 + 362 + #define QCA955X_EXT_INT_PCIE_RC2_ALL \ 363 + (QCA955X_EXT_INT_PCIE_RC2 | QCA955X_EXT_INT_PCIE_RC2_INT0 | \ 364 + QCA955X_EXT_INT_PCIE_RC2_INT1 | QCA955X_EXT_INT_PCIE_RC2_INT2 | \ 365 + QCA955X_EXT_INT_PCIE_RC2_INT3) 366 + 422 367 #define REV_ID_MAJOR_MASK 0xfff0 423 368 #define REV_ID_MAJOR_AR71XX 0x00a0 424 369 #define REV_ID_MAJOR_AR913X 0x00b0 ··· 461 344 #define REV_ID_MAJOR_AR9341 0x0120 462 345 #define REV_ID_MAJOR_AR9342 0x1120 463 346 #define REV_ID_MAJOR_AR9344 0x2120 347 + #define REV_ID_MAJOR_QCA9556 0x0130 348 + #define REV_ID_MAJOR_QCA9558 0x1130 464 349 465 350 #define AR71XX_REV_ID_MINOR_MASK 0x3 466 351 #define AR71XX_REV_ID_MINOR_AR7130 0x0 ··· 482 363 #define AR724X_REV_ID_REVISION_MASK 0x3 483 364 484 365 #define AR934X_REV_ID_REVISION_MASK 0xf 366 + 367 + #define QCA955X_REV_ID_REVISION_MASK 0xf 485 368 486 369 /* 487 370 * SPI block ··· 522 401 #define AR71XX_GPIO_REG_INT_ENABLE 0x24 523 402 #define AR71XX_GPIO_REG_FUNC 0x28 524 403 404 + #define AR934X_GPIO_REG_FUNC 0x6c 405 + 525 406 #define AR71XX_GPIO_COUNT 16 526 407 #define AR7240_GPIO_COUNT 18 527 408 #define AR7241_GPIO_COUNT 20 528 409 #define AR913X_GPIO_COUNT 22 529 410 #define AR933X_GPIO_COUNT 30 530 411 #define AR934X_GPIO_COUNT 23 412 + #define QCA955X_GPIO_COUNT 24 531 413 532 414 /* 533 415 * SRIF block
+17
arch/mips/include/asm/mach-ath79/ath79.h
··· 32 32 ATH79_SOC_AR9341, 33 33 ATH79_SOC_AR9342, 34 34 ATH79_SOC_AR9344, 35 + ATH79_SOC_QCA9556, 36 + ATH79_SOC_QCA9558, 35 37 }; 36 38 37 39 extern enum ath79_soc_type ath79_soc; ··· 98 96 static inline int soc_is_ar934x(void) 99 97 { 100 98 return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344(); 99 + } 100 + 101 + static inline int soc_is_qca9556(void) 102 + { 103 + return ath79_soc == ATH79_SOC_QCA9556; 104 + } 105 + 106 + static inline int soc_is_qca9558(void) 107 + { 108 + return ath79_soc == ATH79_SOC_QCA9558; 109 + } 110 + 111 + static inline int soc_is_qca955x(void) 112 + { 113 + return soc_is_qca9556() || soc_is_qca9558(); 101 114 } 102 115 103 116 extern void __iomem *ath79_ddr_base;
+7 -20
arch/mips/include/asm/mach-ath79/irq.h
··· 10 10 #define __ASM_MACH_ATH79_IRQ_H 11 11 12 12 #define MIPS_CPU_IRQ_BASE 0 13 - #define NR_IRQS 48 13 + #define NR_IRQS 51 14 + 15 + #define ATH79_CPU_IRQ(_x) (MIPS_CPU_IRQ_BASE + (_x)) 14 16 15 17 #define ATH79_MISC_IRQ_BASE 8 16 18 #define ATH79_MISC_IRQ_COUNT 32 19 + #define ATH79_MISC_IRQ(_x) (ATH79_MISC_IRQ_BASE + (_x)) 17 20 18 21 #define ATH79_PCI_IRQ_BASE (ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT) 19 22 #define ATH79_PCI_IRQ_COUNT 6 ··· 26 23 #define ATH79_IP2_IRQ_COUNT 2 27 24 #define ATH79_IP2_IRQ(_x) (ATH79_IP2_IRQ_BASE + (_x)) 28 25 29 - #define ATH79_CPU_IRQ_IP2 (MIPS_CPU_IRQ_BASE + 2) 30 - #define ATH79_CPU_IRQ_USB (MIPS_CPU_IRQ_BASE + 3) 31 - #define ATH79_CPU_IRQ_GE0 (MIPS_CPU_IRQ_BASE + 4) 32 - #define ATH79_CPU_IRQ_GE1 (MIPS_CPU_IRQ_BASE + 5) 33 - #define ATH79_CPU_IRQ_MISC (MIPS_CPU_IRQ_BASE + 6) 34 - #define ATH79_CPU_IRQ_TIMER (MIPS_CPU_IRQ_BASE + 7) 35 - 36 - #define ATH79_MISC_IRQ_TIMER (ATH79_MISC_IRQ_BASE + 0) 37 - #define ATH79_MISC_IRQ_ERROR (ATH79_MISC_IRQ_BASE + 1) 38 - #define ATH79_MISC_IRQ_GPIO (ATH79_MISC_IRQ_BASE + 2) 39 - #define ATH79_MISC_IRQ_UART (ATH79_MISC_IRQ_BASE + 3) 40 - #define ATH79_MISC_IRQ_WDOG (ATH79_MISC_IRQ_BASE + 4) 41 - #define ATH79_MISC_IRQ_PERFC (ATH79_MISC_IRQ_BASE + 5) 42 - #define ATH79_MISC_IRQ_OHCI (ATH79_MISC_IRQ_BASE + 6) 43 - #define ATH79_MISC_IRQ_DMA (ATH79_MISC_IRQ_BASE + 7) 44 - #define ATH79_MISC_IRQ_TIMER2 (ATH79_MISC_IRQ_BASE + 8) 45 - #define ATH79_MISC_IRQ_TIMER3 (ATH79_MISC_IRQ_BASE + 9) 46 - #define ATH79_MISC_IRQ_TIMER4 (ATH79_MISC_IRQ_BASE + 10) 47 - #define ATH79_MISC_IRQ_ETHSW (ATH79_MISC_IRQ_BASE + 12) 26 + #define ATH79_IP3_IRQ_BASE (ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT) 27 + #define ATH79_IP3_IRQ_COUNT 3 28 + #define ATH79_IP3_IRQ(_x) (ATH79_IP3_IRQ_BASE + (_x)) 48 29 49 30 #include_next <irq.h> 50 31
-28
arch/mips/include/asm/mach-ath79/pci.h
··· 1 - /* 2 - * Atheros AR71XX/AR724X PCI support 3 - * 4 - * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com> 5 - * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> 6 - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 7 - * 8 - * This program is free software; you can redistribute it and/or modify it 9 - * under the terms of the GNU General Public License version 2 as published 10 - * by the Free Software Foundation. 11 - */ 12 - 13 - #ifndef __ASM_MACH_ATH79_PCI_H 14 - #define __ASM_MACH_ATH79_PCI_H 15 - 16 - #if defined(CONFIG_PCI) && defined(CONFIG_SOC_AR71XX) 17 - int ar71xx_pcibios_init(void); 18 - #else 19 - static inline int ar71xx_pcibios_init(void) { return 0; } 20 - #endif 21 - 22 - #if defined(CONFIG_PCI_AR724X) 23 - int ar724x_pcibios_init(int irq); 24 - #else 25 - static inline int ar724x_pcibios_init(int irq) { return 0; } 26 - #endif 27 - 28 - #endif /* __ASM_MACH_ATH79_PCI_H */
+5 -8
arch/mips/include/asm/mach-bcm47xx/nvram.h arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
··· 8 8 * option) any later version. 9 9 */ 10 10 11 - #ifndef __NVRAM_H 12 - #define __NVRAM_H 11 + #ifndef __BCM47XX_NVRAM_H 12 + #define __BCM47XX_NVRAM_H 13 13 14 14 #include <linux/types.h> 15 15 #include <linux/kernel.h> ··· 32 32 #define NVRAM_MAX_VALUE_LEN 255 33 33 #define NVRAM_MAX_PARAM_LEN 64 34 34 35 - #define NVRAM_ERR_INV_PARAM -8 36 - #define NVRAM_ERR_ENVNOTFOUND -9 35 + extern int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len); 37 36 38 - extern int nvram_getenv(char *name, char *val, size_t val_len); 39 - 40 - static inline void nvram_parse_macaddr(char *buf, u8 macaddr[6]) 37 + static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6]) 41 38 { 42 39 if (strchr(buf, ':')) 43 40 sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], ··· 48 51 printk(KERN_WARNING "Can not parse mac address: %s\n", buf); 49 52 } 50 53 51 - #endif 54 + #endif /* __BCM47XX_NVRAM_H */
+2
arch/mips/include/asm/mach-lantiq/lantiq.h
··· 34 34 extern void ltq_disable_irq(struct irq_data *data); 35 35 extern void ltq_mask_and_ack_irq(struct irq_data *data); 36 36 extern void ltq_enable_irq(struct irq_data *data); 37 + extern int ltq_eiu_get_irq(int exin); 37 38 38 39 /* clock handling */ 39 40 extern int clk_activate(struct clk *clk); ··· 42 41 extern struct clk *clk_get_cpu(void); 43 42 extern struct clk *clk_get_fpi(void); 44 43 extern struct clk *clk_get_io(void); 44 + extern struct clk *clk_get_ppe(void); 45 45 46 46 /* find out what bootsource we have */ 47 47 extern unsigned char ltq_boot_select(void);
+139
arch/mips/include/asm/mach-ralink/rt305x.h
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify it 3 + * under the terms of the GNU General Public License version 2 as published 4 + * by the Free Software Foundation. 5 + * 6 + * Parts of this file are based on Ralink's 2.6.21 BSP 7 + * 8 + * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> 9 + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 10 + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> 11 + */ 12 + 13 + #ifndef _RT305X_REGS_H_ 14 + #define _RT305X_REGS_H_ 15 + 16 + enum rt305x_soc_type { 17 + RT305X_SOC_UNKNOWN = 0, 18 + RT305X_SOC_RT3050, 19 + RT305X_SOC_RT3052, 20 + RT305X_SOC_RT3350, 21 + RT305X_SOC_RT3352, 22 + RT305X_SOC_RT5350, 23 + }; 24 + 25 + extern enum rt305x_soc_type rt305x_soc; 26 + 27 + static inline int soc_is_rt3050(void) 28 + { 29 + return rt305x_soc == RT305X_SOC_RT3050; 30 + } 31 + 32 + static inline int soc_is_rt3052(void) 33 + { 34 + return rt305x_soc == RT305X_SOC_RT3052; 35 + } 36 + 37 + static inline int soc_is_rt305x(void) 38 + { 39 + return soc_is_rt3050() || soc_is_rt3052(); 40 + } 41 + 42 + static inline int soc_is_rt3350(void) 43 + { 44 + return rt305x_soc == RT305X_SOC_RT3350; 45 + } 46 + 47 + static inline int soc_is_rt3352(void) 48 + { 49 + return rt305x_soc == RT305X_SOC_RT3352; 50 + } 51 + 52 + static inline int soc_is_rt5350(void) 53 + { 54 + return rt305x_soc == RT305X_SOC_RT5350; 55 + } 56 + 57 + #define RT305X_SYSC_BASE 0x10000000 58 + 59 + #define SYSC_REG_CHIP_NAME0 0x00 60 + #define SYSC_REG_CHIP_NAME1 0x04 61 + #define SYSC_REG_CHIP_ID 0x0c 62 + #define SYSC_REG_SYSTEM_CONFIG 0x10 63 + 64 + #define RT3052_CHIP_NAME0 0x30335452 65 + #define RT3052_CHIP_NAME1 0x20203235 66 + 67 + #define RT3350_CHIP_NAME0 0x33335452 68 + #define RT3350_CHIP_NAME1 0x20203035 69 + 70 + #define RT3352_CHIP_NAME0 0x33335452 71 + #define RT3352_CHIP_NAME1 0x20203235 72 + 73 + #define RT5350_CHIP_NAME0 0x33355452 74 + #define RT5350_CHIP_NAME1 0x20203035 75 + 76 + #define CHIP_ID_ID_MASK 0xff 77 + #define CHIP_ID_ID_SHIFT 8 78 + #define CHIP_ID_REV_MASK 0xff 79 + 80 + #define RT305X_SYSCFG_CPUCLK_SHIFT 18 81 + #define RT305X_SYSCFG_CPUCLK_MASK 0x1 82 + #define RT305X_SYSCFG_CPUCLK_LOW 0x0 83 + #define RT305X_SYSCFG_CPUCLK_HIGH 0x1 84 + 85 + #define RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT 2 86 + #define RT305X_SYSCFG_CPUCLK_MASK 0x1 87 + #define RT305X_SYSCFG_SRAM_CS0_MODE_WDT 0x1 88 + 89 + #define RT3352_SYSCFG0_CPUCLK_SHIFT 8 90 + #define RT3352_SYSCFG0_CPUCLK_MASK 0x1 91 + #define RT3352_SYSCFG0_CPUCLK_LOW 0x0 92 + #define RT3352_SYSCFG0_CPUCLK_HIGH 0x1 93 + 94 + #define RT5350_SYSCFG0_CPUCLK_SHIFT 8 95 + #define RT5350_SYSCFG0_CPUCLK_MASK 0x3 96 + #define RT5350_SYSCFG0_CPUCLK_360 0x0 97 + #define RT5350_SYSCFG0_CPUCLK_320 0x2 98 + #define RT5350_SYSCFG0_CPUCLK_300 0x3 99 + 100 + /* multi function gpio pins */ 101 + #define RT305X_GPIO_I2C_SD 1 102 + #define RT305X_GPIO_I2C_SCLK 2 103 + #define RT305X_GPIO_SPI_EN 3 104 + #define RT305X_GPIO_SPI_CLK 4 105 + /* GPIO 7-14 is shared between UART0, PCM and I2S interfaces */ 106 + #define RT305X_GPIO_7 7 107 + #define RT305X_GPIO_10 10 108 + #define RT305X_GPIO_14 14 109 + #define RT305X_GPIO_UART1_TXD 15 110 + #define RT305X_GPIO_UART1_RXD 16 111 + #define RT305X_GPIO_JTAG_TDO 17 112 + #define RT305X_GPIO_JTAG_TDI 18 113 + #define RT305X_GPIO_MDIO_MDC 22 114 + #define RT305X_GPIO_MDIO_MDIO 23 115 + #define RT305X_GPIO_SDRAM_MD16 24 116 + #define RT305X_GPIO_SDRAM_MD31 39 117 + #define RT305X_GPIO_GE0_TXD0 40 118 + #define RT305X_GPIO_GE0_RXCLK 51 119 + 120 + #define RT305X_GPIO_MODE_I2C BIT(0) 121 + #define RT305X_GPIO_MODE_SPI BIT(1) 122 + #define RT305X_GPIO_MODE_UART0_SHIFT 2 123 + #define RT305X_GPIO_MODE_UART0_MASK 0x7 124 + #define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT) 125 + #define RT305X_GPIO_MODE_UARTF 0x0 126 + #define RT305X_GPIO_MODE_PCM_UARTF 0x1 127 + #define RT305X_GPIO_MODE_PCM_I2S 0x2 128 + #define RT305X_GPIO_MODE_I2S_UARTF 0x3 129 + #define RT305X_GPIO_MODE_PCM_GPIO 0x4 130 + #define RT305X_GPIO_MODE_GPIO_UARTF 0x5 131 + #define RT305X_GPIO_MODE_GPIO_I2S 0x6 132 + #define RT305X_GPIO_MODE_GPIO 0x7 133 + #define RT305X_GPIO_MODE_UART1 BIT(5) 134 + #define RT305X_GPIO_MODE_JTAG BIT(6) 135 + #define RT305X_GPIO_MODE_MDIO BIT(7) 136 + #define RT305X_GPIO_MODE_SDRAM BIT(8) 137 + #define RT305X_GPIO_MODE_RGMII BIT(9) 138 + 139 + #endif
+25
arch/mips/include/asm/mach-ralink/war.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> 7 + */ 8 + #ifndef __ASM_MACH_RALINK_WAR_H 9 + #define __ASM_MACH_RALINK_WAR_H 10 + 11 + #define R4600_V1_INDEX_ICACHEOP_WAR 0 12 + #define R4600_V1_HIT_CACHEOP_WAR 0 13 + #define R4600_V2_HIT_CACHEOP_WAR 0 14 + #define R5432_CP0_INTERRUPT_WAR 0 15 + #define BCM1250_M3_WAR 0 16 + #define SIBYTE_1956_WAR 0 17 + #define MIPS4K_ICACHE_REFILL_WAR 0 18 + #define MIPS_CACHE_SYNC_WAR 0 19 + #define TX49XX_ICACHE_INDEX_INV_WAR 0 20 + #define RM9000_CDEX_SMP_WAR 0 21 + #define ICACHE_REFILLS_WORKAROUND_WAR 0 22 + #define R10000_LLSC_WAR 0 23 + #define MIPS34K_MISSED_ITLB_WAR 0 24 + 25 + #endif /* __ASM_MACH_RALINK_WAR_H */
+7 -21
arch/mips/include/asm/mips-boards/generic.h
··· 1 1 /* 2 - * Carsten Langgaard, carstenl@mips.com 3 - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 4 - * 5 - * This program is free software; you can distribute it and/or modify it 6 - * under the terms of the GNU General Public License (Version 2) as 7 - * published by the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope it will be useful, but WITHOUT 10 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 12 4 * for more details. 13 5 * 14 - * You should have received a copy of the GNU General Public License along 15 - * with this program; if not, write to the Free Software Foundation, Inc., 16 - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 17 - * 18 6 * Defines of the MIPS boards specific address-MAP, registers, etc. 7 + * 8 + * Copyright (C) 2000,2012 MIPS Technologies, Inc. 9 + * All rights reserved. 10 + * Authors: Carsten Langgaard <carstenl@mips.com> 11 + * Steven J. Hill <sjhill@mips.com> 19 12 */ 20 13 #ifndef __ASM_MIPS_BOARDS_GENERIC_H 21 14 #define __ASM_MIPS_BOARDS_GENERIC_H ··· 22 29 */ 23 30 #define ASCII_DISPLAY_WORD_BASE 0x1f000410 24 31 #define ASCII_DISPLAY_POS_BASE 0x1f000418 25 - 26 - 27 - /* 28 - * Yamon Prom print address. 29 - */ 30 - #define YAMON_PROM_PRINT_ADDR 0x1fc00504 31 - 32 32 33 33 /* 34 34 * Reset register.
+165 -226
arch/mips/include/asm/mipsregs.h
··· 595 595 #define MIPS_CONF3_DSP2P (_ULCAST_(1) << 11) 596 596 #define MIPS_CONF3_RXI (_ULCAST_(1) << 12) 597 597 #define MIPS_CONF3_ULRI (_ULCAST_(1) << 13) 598 + #define MIPS_CONF3_ISA (_ULCAST_(3) << 14) 599 + #define MIPS_CONF3_VZ (_ULCAST_(1) << 23) 598 600 599 601 #define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0) 600 602 #define MIPS_CONF4_MMUEXTDEF (_ULCAST_(3) << 14) ··· 1160 1158 __res; \ 1161 1159 }) 1162 1160 1161 + #ifdef HAVE_AS_DSP 1162 + #define rddsp(mask) \ 1163 + ({ \ 1164 + unsigned int __dspctl; \ 1165 + \ 1166 + __asm__ __volatile__( \ 1167 + " rddsp %0, %x1 \n" \ 1168 + : "=r" (__dspctl) \ 1169 + : "i" (mask)); \ 1170 + __dspctl; \ 1171 + }) 1172 + 1173 + #define wrdsp(val, mask) \ 1174 + do { \ 1175 + __asm__ __volatile__( \ 1176 + " wrdsp %0, %x1 \n" \ 1177 + : \ 1178 + : "r" (val), "i" (mask)); \ 1179 + } while (0) 1180 + 1181 + #define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) 1182 + #define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) 1183 + #define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) 1184 + #define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) 1185 + 1186 + #define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) 1187 + #define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) 1188 + #define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) 1189 + #define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) 1190 + 1191 + #define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) 1192 + #define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) 1193 + #define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) 1194 + #define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) 1195 + 1196 + #define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) 1197 + #define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) 1198 + #define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) 1199 + #define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) 1200 + 1201 + #else 1202 + 1203 + #ifdef CONFIG_CPU_MICROMIPS 1204 + #define rddsp(mask) \ 1205 + ({ \ 1206 + unsigned int __res; \ 1207 + \ 1208 + __asm__ __volatile__( \ 1209 + " .set push \n" \ 1210 + " .set noat \n" \ 1211 + " # rddsp $1, %x1 \n" \ 1212 + " .hword ((0x0020067c | (%x1 << 14)) >> 16) \n" \ 1213 + " .hword ((0x0020067c | (%x1 << 14)) & 0xffff) \n" \ 1214 + " move %0, $1 \n" \ 1215 + " .set pop \n" \ 1216 + : "=r" (__res) \ 1217 + : "i" (mask)); \ 1218 + __res; \ 1219 + }) 1220 + 1221 + #define wrdsp(val, mask) \ 1222 + do { \ 1223 + __asm__ __volatile__( \ 1224 + " .set push \n" \ 1225 + " .set noat \n" \ 1226 + " move $1, %0 \n" \ 1227 + " # wrdsp $1, %x1 \n" \ 1228 + " .hword ((0x0020167c | (%x1 << 14)) >> 16) \n" \ 1229 + " .hword ((0x0020167c | (%x1 << 14)) & 0xffff) \n" \ 1230 + " .set pop \n" \ 1231 + : \ 1232 + : "r" (val), "i" (mask)); \ 1233 + } while (0) 1234 + 1235 + #define _umips_dsp_mfxxx(ins) \ 1236 + ({ \ 1237 + unsigned long __treg; \ 1238 + \ 1239 + __asm__ __volatile__( \ 1240 + " .set push \n" \ 1241 + " .set noat \n" \ 1242 + " .hword 0x0001 \n" \ 1243 + " .hword %x1 \n" \ 1244 + " move %0, $1 \n" \ 1245 + " .set pop \n" \ 1246 + : "=r" (__treg) \ 1247 + : "i" (ins)); \ 1248 + __treg; \ 1249 + }) 1250 + 1251 + #define _umips_dsp_mtxxx(val, ins) \ 1252 + do { \ 1253 + __asm__ __volatile__( \ 1254 + " .set push \n" \ 1255 + " .set noat \n" \ 1256 + " move $1, %0 \n" \ 1257 + " .hword 0x0001 \n" \ 1258 + " .hword %x1 \n" \ 1259 + " .set pop \n" \ 1260 + : \ 1261 + : "r" (val), "i" (ins)); \ 1262 + } while (0) 1263 + 1264 + #define _umips_dsp_mflo(reg) _umips_dsp_mfxxx((reg << 14) | 0x107c) 1265 + #define _umips_dsp_mfhi(reg) _umips_dsp_mfxxx((reg << 14) | 0x007c) 1266 + 1267 + #define _umips_dsp_mtlo(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x307c)) 1268 + #define _umips_dsp_mthi(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x207c)) 1269 + 1270 + #define mflo0() _umips_dsp_mflo(0) 1271 + #define mflo1() _umips_dsp_mflo(1) 1272 + #define mflo2() _umips_dsp_mflo(2) 1273 + #define mflo3() _umips_dsp_mflo(3) 1274 + 1275 + #define mfhi0() _umips_dsp_mfhi(0) 1276 + #define mfhi1() _umips_dsp_mfhi(1) 1277 + #define mfhi2() _umips_dsp_mfhi(2) 1278 + #define mfhi3() _umips_dsp_mfhi(3) 1279 + 1280 + #define mtlo0(x) _umips_dsp_mtlo(x, 0) 1281 + #define mtlo1(x) _umips_dsp_mtlo(x, 1) 1282 + #define mtlo2(x) _umips_dsp_mtlo(x, 2) 1283 + #define mtlo3(x) _umips_dsp_mtlo(x, 3) 1284 + 1285 + #define mthi0(x) _umips_dsp_mthi(x, 0) 1286 + #define mthi1(x) _umips_dsp_mthi(x, 1) 1287 + #define mthi2(x) _umips_dsp_mthi(x, 2) 1288 + #define mthi3(x) _umips_dsp_mthi(x, 3) 1289 + 1290 + #else /* !CONFIG_CPU_MICROMIPS */ 1163 1291 #define rddsp(mask) \ 1164 1292 ({ \ 1165 1293 unsigned int __res; \ ··· 1315 1183 " # wrdsp $1, %x1 \n" \ 1316 1184 " .word 0x7c2004f8 | (%x1 << 11) \n" \ 1317 1185 " .set pop \n" \ 1318 - : \ 1186 + : \ 1319 1187 : "r" (val), "i" (mask)); \ 1320 1188 } while (0) 1321 1189 1322 - #if 0 /* Need DSP ASE capable assembler ... */ 1323 - #define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) 1324 - #define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) 1325 - #define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) 1326 - #define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) 1327 - 1328 - #define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) 1329 - #define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) 1330 - #define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) 1331 - #define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) 1332 - 1333 - #define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) 1334 - #define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) 1335 - #define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) 1336 - #define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) 1337 - 1338 - #define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) 1339 - #define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) 1340 - #define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) 1341 - #define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) 1342 - 1343 - #else 1344 - 1345 - #define mfhi0() \ 1190 + #define _dsp_mfxxx(ins) \ 1346 1191 ({ \ 1347 1192 unsigned long __treg; \ 1348 1193 \ 1349 1194 __asm__ __volatile__( \ 1350 - " .set push \n" \ 1351 - " .set noat \n" \ 1352 - " # mfhi %0, $ac0 \n" \ 1353 - " .word 0x00000810 \n" \ 1354 - " move %0, $1 \n" \ 1355 - " .set pop \n" \ 1356 - : "=r" (__treg)); \ 1195 + " .set push \n" \ 1196 + " .set noat \n" \ 1197 + " .word (0x00000810 | %1) \n" \ 1198 + " move %0, $1 \n" \ 1199 + " .set pop \n" \ 1200 + : "=r" (__treg) \ 1201 + : "i" (ins)); \ 1357 1202 __treg; \ 1358 1203 }) 1359 1204 1360 - #define mfhi1() \ 1361 - ({ \ 1362 - unsigned long __treg; \ 1363 - \ 1364 - __asm__ __volatile__( \ 1365 - " .set push \n" \ 1366 - " .set noat \n" \ 1367 - " # mfhi %0, $ac1 \n" \ 1368 - " .word 0x00200810 \n" \ 1369 - " move %0, $1 \n" \ 1370 - " .set pop \n" \ 1371 - : "=r" (__treg)); \ 1372 - __treg; \ 1373 - }) 1374 - 1375 - #define mfhi2() \ 1376 - ({ \ 1377 - unsigned long __treg; \ 1378 - \ 1379 - __asm__ __volatile__( \ 1380 - " .set push \n" \ 1381 - " .set noat \n" \ 1382 - " # mfhi %0, $ac2 \n" \ 1383 - " .word 0x00400810 \n" \ 1384 - " move %0, $1 \n" \ 1385 - " .set pop \n" \ 1386 - : "=r" (__treg)); \ 1387 - __treg; \ 1388 - }) 1389 - 1390 - #define mfhi3() \ 1391 - ({ \ 1392 - unsigned long __treg; \ 1393 - \ 1394 - __asm__ __volatile__( \ 1395 - " .set push \n" \ 1396 - " .set noat \n" \ 1397 - " # mfhi %0, $ac3 \n" \ 1398 - " .word 0x00600810 \n" \ 1399 - " move %0, $1 \n" \ 1400 - " .set pop \n" \ 1401 - : "=r" (__treg)); \ 1402 - __treg; \ 1403 - }) 1404 - 1405 - #define mflo0() \ 1406 - ({ \ 1407 - unsigned long __treg; \ 1408 - \ 1409 - __asm__ __volatile__( \ 1410 - " .set push \n" \ 1411 - " .set noat \n" \ 1412 - " # mflo %0, $ac0 \n" \ 1413 - " .word 0x00000812 \n" \ 1414 - " move %0, $1 \n" \ 1415 - " .set pop \n" \ 1416 - : "=r" (__treg)); \ 1417 - __treg; \ 1418 - }) 1419 - 1420 - #define mflo1() \ 1421 - ({ \ 1422 - unsigned long __treg; \ 1423 - \ 1424 - __asm__ __volatile__( \ 1425 - " .set push \n" \ 1426 - " .set noat \n" \ 1427 - " # mflo %0, $ac1 \n" \ 1428 - " .word 0x00200812 \n" \ 1429 - " move %0, $1 \n" \ 1430 - " .set pop \n" \ 1431 - : "=r" (__treg)); \ 1432 - __treg; \ 1433 - }) 1434 - 1435 - #define mflo2() \ 1436 - ({ \ 1437 - unsigned long __treg; \ 1438 - \ 1439 - __asm__ __volatile__( \ 1440 - " .set push \n" \ 1441 - " .set noat \n" \ 1442 - " # mflo %0, $ac2 \n" \ 1443 - " .word 0x00400812 \n" \ 1444 - " move %0, $1 \n" \ 1445 - " .set pop \n" \ 1446 - : "=r" (__treg)); \ 1447 - __treg; \ 1448 - }) 1449 - 1450 - #define mflo3() \ 1451 - ({ \ 1452 - unsigned long __treg; \ 1453 - \ 1454 - __asm__ __volatile__( \ 1455 - " .set push \n" \ 1456 - " .set noat \n" \ 1457 - " # mflo %0, $ac3 \n" \ 1458 - " .word 0x00600812 \n" \ 1459 - " move %0, $1 \n" \ 1460 - " .set pop \n" \ 1461 - : "=r" (__treg)); \ 1462 - __treg; \ 1463 - }) 1464 - 1465 - #define mthi0(x) \ 1205 + #define _dsp_mtxxx(val, ins) \ 1466 1206 do { \ 1467 1207 __asm__ __volatile__( \ 1468 1208 " .set push \n" \ 1469 1209 " .set noat \n" \ 1470 1210 " move $1, %0 \n" \ 1471 - " # mthi $1, $ac0 \n" \ 1472 - " .word 0x00200011 \n" \ 1211 + " .word (0x00200011 | %1) \n" \ 1473 1212 " .set pop \n" \ 1474 1213 : \ 1475 - : "r" (x)); \ 1214 + : "r" (val), "i" (ins)); \ 1476 1215 } while (0) 1477 1216 1478 - #define mthi1(x) \ 1479 - do { \ 1480 - __asm__ __volatile__( \ 1481 - " .set push \n" \ 1482 - " .set noat \n" \ 1483 - " move $1, %0 \n" \ 1484 - " # mthi $1, $ac1 \n" \ 1485 - " .word 0x00200811 \n" \ 1486 - " .set pop \n" \ 1487 - : \ 1488 - : "r" (x)); \ 1489 - } while (0) 1217 + #define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002) 1218 + #define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000) 1490 1219 1491 - #define mthi2(x) \ 1492 - do { \ 1493 - __asm__ __volatile__( \ 1494 - " .set push \n" \ 1495 - " .set noat \n" \ 1496 - " move $1, %0 \n" \ 1497 - " # mthi $1, $ac2 \n" \ 1498 - " .word 0x00201011 \n" \ 1499 - " .set pop \n" \ 1500 - : \ 1501 - : "r" (x)); \ 1502 - } while (0) 1220 + #define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002)) 1221 + #define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000)) 1503 1222 1504 - #define mthi3(x) \ 1505 - do { \ 1506 - __asm__ __volatile__( \ 1507 - " .set push \n" \ 1508 - " .set noat \n" \ 1509 - " move $1, %0 \n" \ 1510 - " # mthi $1, $ac3 \n" \ 1511 - " .word 0x00201811 \n" \ 1512 - " .set pop \n" \ 1513 - : \ 1514 - : "r" (x)); \ 1515 - } while (0) 1223 + #define mflo0() _dsp_mflo(0) 1224 + #define mflo1() _dsp_mflo(1) 1225 + #define mflo2() _dsp_mflo(2) 1226 + #define mflo3() _dsp_mflo(3) 1516 1227 1517 - #define mtlo0(x) \ 1518 - do { \ 1519 - __asm__ __volatile__( \ 1520 - " .set push \n" \ 1521 - " .set noat \n" \ 1522 - " move $1, %0 \n" \ 1523 - " # mtlo $1, $ac0 \n" \ 1524 - " .word 0x00200013 \n" \ 1525 - " .set pop \n" \ 1526 - : \ 1527 - : "r" (x)); \ 1528 - } while (0) 1228 + #define mfhi0() _dsp_mfhi(0) 1229 + #define mfhi1() _dsp_mfhi(1) 1230 + #define mfhi2() _dsp_mfhi(2) 1231 + #define mfhi3() _dsp_mfhi(3) 1529 1232 1530 - #define mtlo1(x) \ 1531 - do { \ 1532 - __asm__ __volatile__( \ 1533 - " .set push \n" \ 1534 - " .set noat \n" \ 1535 - " move $1, %0 \n" \ 1536 - " # mtlo $1, $ac1 \n" \ 1537 - " .word 0x00200813 \n" \ 1538 - " .set pop \n" \ 1539 - : \ 1540 - : "r" (x)); \ 1541 - } while (0) 1233 + #define mtlo0(x) _dsp_mtlo(x, 0) 1234 + #define mtlo1(x) _dsp_mtlo(x, 1) 1235 + #define mtlo2(x) _dsp_mtlo(x, 2) 1236 + #define mtlo3(x) _dsp_mtlo(x, 3) 1542 1237 1543 - #define mtlo2(x) \ 1544 - do { \ 1545 - __asm__ __volatile__( \ 1546 - " .set push \n" \ 1547 - " .set noat \n" \ 1548 - " move $1, %0 \n" \ 1549 - " # mtlo $1, $ac2 \n" \ 1550 - " .word 0x00201013 \n" \ 1551 - " .set pop \n" \ 1552 - : \ 1553 - : "r" (x)); \ 1554 - } while (0) 1238 + #define mthi0(x) _dsp_mthi(x, 0) 1239 + #define mthi1(x) _dsp_mthi(x, 1) 1240 + #define mthi2(x) _dsp_mthi(x, 2) 1241 + #define mthi3(x) _dsp_mthi(x, 3) 1555 1242 1556 - #define mtlo3(x) \ 1557 - do { \ 1558 - __asm__ __volatile__( \ 1559 - " .set push \n" \ 1560 - " .set noat \n" \ 1561 - " move $1, %0 \n" \ 1562 - " # mtlo $1, $ac3 \n" \ 1563 - " .word 0x00201813 \n" \ 1564 - " .set pop \n" \ 1565 - : \ 1566 - : "r" (x)); \ 1567 - } while (0) 1568 - 1243 + #endif /* CONFIG_CPU_MICROMIPS */ 1569 1244 #endif 1570 1245 1571 1246 /*
+79
arch/mips/include/asm/netlogic/mips-extns.h
··· 68 68 __write_64bit_c0_register($9, 7, (val)); \ 69 69 } while (0) 70 70 71 + /* 72 + * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with 73 + * standard functions will be very inefficient. This provides 74 + * optimized functions for the normal operations on the registers. 75 + * 76 + * Call with interrupts disabled. 77 + */ 78 + static inline void ack_c0_eirr(int irq) 79 + { 80 + __asm__ __volatile__( 81 + ".set push\n\t" 82 + ".set mips64\n\t" 83 + ".set noat\n\t" 84 + "li $1, 1\n\t" 85 + "dsllv $1, $1, %0\n\t" 86 + "dmtc0 $1, $9, 6\n\t" 87 + ".set pop" 88 + : : "r" (irq)); 89 + } 90 + 91 + static inline void set_c0_eimr(int irq) 92 + { 93 + __asm__ __volatile__( 94 + ".set push\n\t" 95 + ".set mips64\n\t" 96 + ".set noat\n\t" 97 + "li $1, 1\n\t" 98 + "dsllv %0, $1, %0\n\t" 99 + "dmfc0 $1, $9, 7\n\t" 100 + "or $1, %0\n\t" 101 + "dmtc0 $1, $9, 7\n\t" 102 + ".set pop" 103 + : "+r" (irq)); 104 + } 105 + 106 + static inline void clear_c0_eimr(int irq) 107 + { 108 + __asm__ __volatile__( 109 + ".set push\n\t" 110 + ".set mips64\n\t" 111 + ".set noat\n\t" 112 + "li $1, 1\n\t" 113 + "dsllv %0, $1, %0\n\t" 114 + "dmfc0 $1, $9, 7\n\t" 115 + "or $1, %0\n\t" 116 + "xor $1, %0\n\t" 117 + "dmtc0 $1, $9, 7\n\t" 118 + ".set pop" 119 + : "+r" (irq)); 120 + } 121 + 122 + /* 123 + * Read c0 eimr and c0 eirr, do AND of the two values, the result is 124 + * the interrupts which are raised and are not masked. 125 + */ 126 + static inline uint64_t read_c0_eirr_and_eimr(void) 127 + { 128 + uint64_t val; 129 + 130 + #ifdef CONFIG_64BIT 131 + val = read_c0_eimr() & read_c0_eirr(); 132 + #else 133 + __asm__ __volatile__( 134 + ".set push\n\t" 135 + ".set mips64\n\t" 136 + ".set noat\n\t" 137 + "dmfc0 %M0, $9, 6\n\t" 138 + "dmfc0 %L0, $9, 7\n\t" 139 + "and %M0, %L0\n\t" 140 + "dsll %L0, %M0, 32\n\t" 141 + "dsra %M0, %M0, 32\n\t" 142 + "dsra %L0, %L0, 32\n\t" 143 + ".set pop" 144 + : "=r" (val)); 145 + #endif 146 + 147 + return val; 148 + } 149 + 71 150 static inline int hard_smp_processor_id(void) 72 151 { 73 152 return __read_32bit_c0_register($15, 1) & 0x3ff;
+2
arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
··· 46 46 #define CPU_BLOCKID_FPU 9 47 47 #define CPU_BLOCKID_MAP 10 48 48 49 + #define ICU_DEFEATURE 0x100 50 + 49 51 #define LSU_DEFEATURE 0x304 50 52 #define LSU_DEBUG_ADDR 0x305 51 53 #define LSU_DEBUG_DATA0 0x306
+10 -2
arch/mips/include/asm/netlogic/xlp-hal/pic.h
··· 261 261 #define PIC_LOCAL_SCHEDULING 1 262 262 #define PIC_GLOBAL_SCHEDULING 0 263 263 264 + #define PIC_CLK_HZ 133333333 265 + 264 266 #define nlm_read_pic_reg(b, r) nlm_read_reg64(b, r) 265 267 #define nlm_write_pic_reg(b, r, v) nlm_write_reg64(b, r, v) 266 268 #define nlm_get_pic_pcibase(node) nlm_pcicfg_base(XLP_IO_PIC_OFFSET(node)) ··· 315 313 nlm_pic_read_timer(uint64_t base, int timer) 316 314 { 317 315 return nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer)); 316 + } 317 + 318 + static inline uint32_t 319 + nlm_pic_read_timer32(uint64_t base, int timer) 320 + { 321 + return (uint32_t)nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer)); 318 322 } 319 323 320 324 static inline void ··· 384 376 } 385 377 386 378 static inline void 387 - nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt) 379 + nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt, int en) 388 380 { 389 - nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, hwt); 381 + nlm_pic_write_irt_direct(base, irt, en, 0, 0, irq, hwt); 390 382 } 391 383 392 384 int nlm_irq_to_irt(int irq);
+45 -3
arch/mips/include/asm/netlogic/xlr/pic.h
··· 35 35 #ifndef _ASM_NLM_XLR_PIC_H 36 36 #define _ASM_NLM_XLR_PIC_H 37 37 38 - #define PIC_CLKS_PER_SEC 66666666ULL 38 + #define PIC_CLK_HZ 66666666 39 39 /* PIC hardware interrupt numbers */ 40 40 #define PIC_IRT_WD_INDEX 0 41 41 #define PIC_IRT_TIMER_0_INDEX 1 42 + #define PIC_IRT_TIMER_INDEX(i) ((i) + PIC_IRT_TIMER_0_INDEX) 42 43 #define PIC_IRT_TIMER_1_INDEX 2 43 44 #define PIC_IRT_TIMER_2_INDEX 3 44 45 #define PIC_IRT_TIMER_3_INDEX 4 ··· 100 99 101 100 /* PIC Registers */ 102 101 #define PIC_CTRL 0x00 102 + #define PIC_CTRL_STE 8 /* timer enable start bit */ 103 103 #define PIC_IPI 0x04 104 104 #define PIC_INT_ACK 0x06 105 105 ··· 253 251 } 254 252 255 253 static inline void 256 - nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt) 254 + nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt, int en) 257 255 { 258 256 nlm_write_reg(base, PIC_IRT_0(irt), (1u << hwt)); 259 257 /* local scheduling, invalid, level by default */ 260 258 nlm_write_reg(base, PIC_IRT_1(irt), 261 - (1 << 30) | (1 << 6) | irq); 259 + (en << 30) | (1 << 6) | irq); 260 + } 261 + 262 + static inline uint64_t 263 + nlm_pic_read_timer(uint64_t base, int timer) 264 + { 265 + uint32_t up1, up2, low; 266 + 267 + up1 = nlm_read_reg(base, PIC_TIMER_COUNT_1(timer)); 268 + low = nlm_read_reg(base, PIC_TIMER_COUNT_0(timer)); 269 + up2 = nlm_read_reg(base, PIC_TIMER_COUNT_1(timer)); 270 + 271 + if (up1 != up2) /* wrapped, get the new low */ 272 + low = nlm_read_reg(base, PIC_TIMER_COUNT_0(timer)); 273 + return ((uint64_t)up2 << 32) | low; 274 + 275 + } 276 + 277 + static inline uint32_t 278 + nlm_pic_read_timer32(uint64_t base, int timer) 279 + { 280 + return nlm_read_reg(base, PIC_TIMER_COUNT_0(timer)); 281 + } 282 + 283 + static inline void 284 + nlm_pic_set_timer(uint64_t base, int timer, uint64_t value, int irq, int cpu) 285 + { 286 + uint32_t up, low; 287 + uint64_t pic_ctrl = nlm_read_reg(base, PIC_CTRL); 288 + int en; 289 + 290 + en = (irq > 0); 291 + up = value >> 32; 292 + low = value & 0xFFFFFFFF; 293 + nlm_write_reg(base, PIC_TIMER_MAXVAL_0(timer), low); 294 + nlm_write_reg(base, PIC_TIMER_MAXVAL_1(timer), up); 295 + nlm_pic_init_irt(base, PIC_IRT_TIMER_INDEX(timer), irq, cpu, 0); 296 + 297 + /* enable the timer */ 298 + pic_ctrl |= (1 << (PIC_CTRL_STE + timer)); 299 + nlm_write_reg(base, PIC_CTRL, pic_ctrl); 262 300 } 263 301 #endif 264 302 #endif /* _ASM_NLM_XLR_PIC_H */
+5
arch/mips/include/asm/pci.h
··· 144 144 145 145 extern char * (*pcibios_plat_setup)(char *str); 146 146 147 + #ifdef CONFIG_OF 147 148 /* this function parses memory ranges from a device node */ 148 149 extern void pci_load_of_ranges(struct pci_controller *hose, 149 150 struct device_node *node); 151 + #else 152 + static inline void pci_load_of_ranges(struct pci_controller *hose, 153 + struct device_node *node) {} 154 + #endif 150 155 151 156 #endif /* _ASM_PCI_H */
+1 -1
arch/mips/include/asm/time.h
··· 75 75 76 76 static inline int init_mips_clocksource(void) 77 77 { 78 - #ifdef CONFIG_CSRC_R4K 78 + #if defined(CONFIG_CSRC_R4K) && !defined(CONFIG_CSRC_GIC) 79 79 return init_r4k_clocksource(); 80 80 #else 81 81 return 0;
+32
arch/mips/kernel/Makefile
··· 27 27 obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o 28 28 obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o 29 29 obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o 30 + obj-$(CONFIG_CSRC_GIC) += csrc-gic.o 30 31 obj-$(CONFIG_SYNC_R4K) += sync-r4k.o 31 32 32 33 obj-$(CONFIG_STACKTRACE) += stacktrace.o ··· 98 97 obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o 99 98 100 99 obj-$(CONFIG_JUMP_LABEL) += jump_label.o 100 + 101 + # 102 + # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe 103 + # to enable DSP assembler support here even if the MIPS Release 2 CPU we 104 + # are targetting does not support DSP because all code-paths making use of 105 + # it properly check that the running CPU *actually does* support these 106 + # instructions. 107 + # 108 + ifeq ($(CONFIG_CPU_MIPSR2), y) 109 + CFLAGS_DSP = -DHAVE_AS_DSP 110 + 111 + # 112 + # Check if assembler supports DSP ASE 113 + # 114 + ifeq ($(call cc-option-yn,-mdsp), y) 115 + CFLAGS_DSP += -mdsp 116 + endif 117 + 118 + # 119 + # Check if assembler supports DSP ASE Rev2 120 + # 121 + ifeq ($(call cc-option-yn,-mdspr2), y) 122 + CFLAGS_DSP += -mdspr2 123 + endif 124 + 125 + CFLAGS_signal.o = $(CFLAGS_DSP) 126 + CFLAGS_signal32.o = $(CFLAGS_DSP) 127 + CFLAGS_process.o = $(CFLAGS_DSP) 128 + CFLAGS_branch.o = $(CFLAGS_DSP) 129 + CFLAGS_ptrace.o = $(CFLAGS_DSP) 130 + endif 101 131 102 132 CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
+13 -1
arch/mips/kernel/cpu-probe.c
··· 201 201 break; 202 202 203 203 case CPU_M14KC: 204 + case CPU_M14KEC: 204 205 case CPU_24K: 205 206 case CPU_34K: 206 207 case CPU_1004K: ··· 468 467 c->ases |= MIPS_ASE_MIPSMT; 469 468 if (config3 & MIPS_CONF3_ULRI) 470 469 c->options |= MIPS_CPU_ULRI; 470 + if (config3 & MIPS_CONF3_ISA) 471 + c->options |= MIPS_CPU_MICROMIPS; 472 + if (config3 & MIPS_CONF3_VZ) 473 + c->ases |= MIPS_ASE_VZ; 471 474 472 475 return config3 & MIPS_CONF_M; 473 476 } ··· 871 866 __cpu_name[cpu] = "MIPS 20Kc"; 872 867 break; 873 868 case PRID_IMP_24K: 874 - case PRID_IMP_24KE: 875 869 c->cputype = CPU_24K; 876 870 __cpu_name[cpu] = "MIPS 24Kc"; 871 + break; 872 + case PRID_IMP_24KE: 873 + c->cputype = CPU_24K; 874 + __cpu_name[cpu] = "MIPS 24KEc"; 877 875 break; 878 876 case PRID_IMP_25KF: 879 877 c->cputype = CPU_25KF; ··· 893 885 case PRID_IMP_M14KC: 894 886 c->cputype = CPU_M14KC; 895 887 __cpu_name[cpu] = "MIPS M14Kc"; 888 + break; 889 + case PRID_IMP_M14KEC: 890 + c->cputype = CPU_M14KEC; 891 + __cpu_name[cpu] = "MIPS M14KEc"; 896 892 break; 897 893 case PRID_IMP_1004K: 898 894 c->cputype = CPU_1004K;
+49
arch/mips/kernel/csrc-gic.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 7 + */ 8 + #include <linux/clocksource.h> 9 + #include <linux/init.h> 10 + 11 + #include <asm/time.h> 12 + #include <asm/gic.h> 13 + 14 + static cycle_t gic_hpt_read(struct clocksource *cs) 15 + { 16 + unsigned int hi, hi2, lo; 17 + 18 + do { 19 + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi); 20 + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo); 21 + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2); 22 + } while (hi2 != hi); 23 + 24 + return (((cycle_t) hi) << 32) + lo; 25 + } 26 + 27 + static struct clocksource gic_clocksource = { 28 + .name = "GIC", 29 + .read = gic_hpt_read, 30 + .flags = CLOCK_SOURCE_IS_CONTINUOUS, 31 + }; 32 + 33 + void __init gic_clocksource_init(unsigned int frequency) 34 + { 35 + unsigned int config, bits; 36 + 37 + /* Calculate the clocksource mask. */ 38 + GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config); 39 + bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >> 40 + (GIC_SH_CONFIG_COUNTBITS_SHF - 2)); 41 + 42 + /* Set clocksource mask. */ 43 + gic_clocksource.mask = CLOCKSOURCE_MASK(bits); 44 + 45 + /* Calculate a somewhat reasonable rating value. */ 46 + gic_clocksource.rating = 200 + frequency / 10000000; 47 + 48 + clocksource_register_hz(&gic_clocksource, frequency); 49 + }
+2 -3
arch/mips/kernel/early_printk.c
··· 14 14 15 15 extern void prom_putchar(char); 16 16 17 - static void __init 18 - early_console_write(struct console *con, const char *s, unsigned n) 17 + static void early_console_write(struct console *con, const char *s, unsigned n) 19 18 { 20 19 while (n-- && *s) { 21 20 if (*s == '\n') ··· 24 25 } 25 26 } 26 27 27 - static struct console early_console __initdata = { 28 + static struct console early_console = { 28 29 .name = "early", 29 30 .write = early_console_write, 30 31 .flags = CON_PRINTBUFFER | CON_BOOT,
+42
arch/mips/kernel/irq_cpu.c
··· 31 31 #include <linux/interrupt.h> 32 32 #include <linux/kernel.h> 33 33 #include <linux/irq.h> 34 + #include <linux/irqdomain.h> 34 35 35 36 #include <asm/irq_cpu.h> 36 37 #include <asm/mipsregs.h> ··· 114 113 irq_set_chip_and_handler(i, &mips_cpu_irq_controller, 115 114 handle_percpu_irq); 116 115 } 116 + 117 + #ifdef CONFIG_IRQ_DOMAIN 118 + static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq, 119 + irq_hw_number_t hw) 120 + { 121 + static struct irq_chip *chip; 122 + 123 + if (hw < 2 && cpu_has_mipsmt) { 124 + /* Software interrupts are used for MT/CMT IPI */ 125 + chip = &mips_mt_cpu_irq_controller; 126 + } else { 127 + chip = &mips_cpu_irq_controller; 128 + } 129 + 130 + irq_set_chip_and_handler(irq, chip, handle_percpu_irq); 131 + 132 + return 0; 133 + } 134 + 135 + static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = { 136 + .map = mips_cpu_intc_map, 137 + .xlate = irq_domain_xlate_onecell, 138 + }; 139 + 140 + int __init mips_cpu_intc_init(struct device_node *of_node, 141 + struct device_node *parent) 142 + { 143 + struct irq_domain *domain; 144 + 145 + /* Mask interrupts. */ 146 + clear_c0_status(ST0_IM); 147 + clear_c0_cause(CAUSEF_IP); 148 + 149 + domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0, 150 + &mips_cpu_intc_irq_domain_ops, NULL); 151 + if (!domain) 152 + panic("Failed to add irqdomain for MIPS CPU\n"); 153 + 154 + return 0; 155 + } 156 + #endif /* CONFIG_IRQ_DOMAIN */
+2
arch/mips/kernel/proc.c
··· 95 95 if (cpu_has_dsp) seq_printf(m, "%s", " dsp"); 96 96 if (cpu_has_dsp2) seq_printf(m, "%s", " dsp2"); 97 97 if (cpu_has_mipsmt) seq_printf(m, "%s", " mt"); 98 + if (cpu_has_mmips) seq_printf(m, "%s", " micromips"); 99 + if (cpu_has_vz) seq_printf(m, "%s", " vz"); 98 100 seq_printf(m, "\n"); 99 101 100 102 seq_printf(m, "shadow register sets\t: %d\n",
+70 -21
arch/mips/kernel/setup.c
··· 480 480 } 481 481 early_param("mem", early_parse_mem); 482 482 483 + #ifdef CONFIG_PROC_VMCORE 484 + unsigned long setup_elfcorehdr, setup_elfcorehdr_size; 485 + static int __init early_parse_elfcorehdr(char *p) 486 + { 487 + int i; 488 + 489 + setup_elfcorehdr = memparse(p, &p); 490 + 491 + for (i = 0; i < boot_mem_map.nr_map; i++) { 492 + unsigned long start = boot_mem_map.map[i].addr; 493 + unsigned long end = (boot_mem_map.map[i].addr + 494 + boot_mem_map.map[i].size); 495 + if (setup_elfcorehdr >= start && setup_elfcorehdr < end) { 496 + /* 497 + * Reserve from the elf core header to the end of 498 + * the memory segment, that should all be kdump 499 + * reserved memory. 500 + */ 501 + setup_elfcorehdr_size = end - setup_elfcorehdr; 502 + break; 503 + } 504 + } 505 + /* 506 + * If we don't find it in the memory map, then we shouldn't 507 + * have to worry about it, as the new kernel won't use it. 508 + */ 509 + return 0; 510 + } 511 + early_param("elfcorehdr", early_parse_elfcorehdr); 512 + #endif 513 + 514 + static void __init arch_mem_addpart(phys_t mem, phys_t end, int type) 515 + { 516 + phys_t size; 517 + int i; 518 + 519 + size = end - mem; 520 + if (!size) 521 + return; 522 + 523 + /* Make sure it is in the boot_mem_map */ 524 + for (i = 0; i < boot_mem_map.nr_map; i++) { 525 + if (mem >= boot_mem_map.map[i].addr && 526 + mem < (boot_mem_map.map[i].addr + 527 + boot_mem_map.map[i].size)) 528 + return; 529 + } 530 + add_memory_region(mem, size, type); 531 + } 532 + 483 533 static void __init arch_mem_init(char **cmdline_p) 484 534 { 485 - phys_t init_mem, init_end, init_size; 486 - 487 535 extern void plat_mem_setup(void); 488 536 489 537 /* call board setup routine */ 490 538 plat_mem_setup(); 491 539 492 - init_mem = PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT; 493 - init_end = PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT; 494 - init_size = init_end - init_mem; 495 - if (init_size) { 496 - /* Make sure it is in the boot_mem_map */ 497 - int i, found; 498 - found = 0; 499 - for (i = 0; i < boot_mem_map.nr_map; i++) { 500 - if (init_mem >= boot_mem_map.map[i].addr && 501 - init_mem < (boot_mem_map.map[i].addr + 502 - boot_mem_map.map[i].size)) { 503 - found = 1; 504 - break; 505 - } 506 - } 507 - if (!found) 508 - add_memory_region(init_mem, init_size, 509 - BOOT_MEM_INIT_RAM); 510 - } 540 + /* 541 + * Make sure all kernel memory is in the maps. The "UP" and 542 + * "DOWN" are opposite for initdata since if it crosses over 543 + * into another memory section you don't want that to be 544 + * freed when the initdata is freed. 545 + */ 546 + arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT, 547 + PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT, 548 + BOOT_MEM_RAM); 549 + arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT, 550 + PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT, 551 + BOOT_MEM_INIT_RAM); 511 552 512 553 pr_info("Determined physical RAM map:\n"); 513 554 print_memory_map(); ··· 578 537 } 579 538 580 539 bootmem_init(); 540 + #ifdef CONFIG_PROC_VMCORE 541 + if (setup_elfcorehdr && setup_elfcorehdr_size) { 542 + printk(KERN_INFO "kdump reserved memory at %lx-%lx\n", 543 + setup_elfcorehdr, setup_elfcorehdr_size); 544 + reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size, 545 + BOOTMEM_DEFAULT); 546 + } 547 + #endif 581 548 #ifdef CONFIG_KEXEC 582 549 if (crashk_res.start != crashk_res.end) 583 550 reserve_bootmem(crashk_res.start,
+1
arch/mips/kernel/smtc.c
··· 41 41 #include <asm/addrspace.h> 42 42 #include <asm/smtc.h> 43 43 #include <asm/smtc_proc.h> 44 + #include <asm/setup.h> 44 45 45 46 /* 46 47 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+3 -12
arch/mips/kernel/vpe.c
··· 697 697 dmt_flag = dmt(); 698 698 vpeflags = dvpe(); 699 699 700 - if (!list_empty(&v->tc)) { 701 - if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { 702 - evpe(vpeflags); 703 - emt(dmt_flag); 704 - local_irq_restore(flags); 705 - 706 - printk(KERN_WARNING 707 - "VPE loader: TC %d is already in use.\n", 708 - v->tc->index); 709 - return -ENOEXEC; 710 - } 711 - } else { 700 + if (list_empty(&v->tc)) { 712 701 evpe(vpeflags); 713 702 emt(dmt_flag); 714 703 local_irq_restore(flags); ··· 708 719 709 720 return -ENOEXEC; 710 721 } 722 + 723 + t = list_first_entry(&v->tc, struct tc, tc); 711 724 712 725 /* Put MVPE's into 'configuration state' */ 713 726 set_c0_mvpcontrol(MVPCONTROL_VPC);
+10 -2
arch/mips/lantiq/clk.c
··· 26 26 #include "prom.h" 27 27 28 28 /* lantiq socs have 3 static clocks */ 29 - static struct clk cpu_clk_generic[3]; 29 + static struct clk cpu_clk_generic[4]; 30 30 31 - void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io) 31 + void clkdev_add_static(unsigned long cpu, unsigned long fpi, 32 + unsigned long io, unsigned long ppe) 32 33 { 33 34 cpu_clk_generic[0].rate = cpu; 34 35 cpu_clk_generic[1].rate = fpi; 35 36 cpu_clk_generic[2].rate = io; 37 + cpu_clk_generic[3].rate = ppe; 36 38 } 37 39 38 40 struct clk *clk_get_cpu(void) ··· 52 50 { 53 51 return &cpu_clk_generic[2]; 54 52 } 53 + 54 + struct clk *clk_get_ppe(void) 55 + { 56 + return &cpu_clk_generic[3]; 57 + } 58 + EXPORT_SYMBOL_GPL(clk_get_ppe); 55 59 56 60 static inline int clk_good(struct clk *clk) 57 61 {
+6 -1
arch/mips/lantiq/clk.h
··· 27 27 #define CLOCK_167M 166666667 28 28 #define CLOCK_196_608M 196608000 29 29 #define CLOCK_200M 200000000 30 + #define CLOCK_222M 222000000 31 + #define CLOCK_240M 240000000 30 32 #define CLOCK_250M 250000000 31 33 #define CLOCK_266M 266666666 32 34 #define CLOCK_300M 300000000 33 35 #define CLOCK_333M 333333333 34 36 #define CLOCK_393M 393215332 35 37 #define CLOCK_400M 400000000 38 + #define CLOCK_450M 450000000 36 39 #define CLOCK_500M 500000000 37 40 #define CLOCK_600M 600000000 38 41 ··· 67 64 }; 68 65 69 66 extern void clkdev_add_static(unsigned long cpu, unsigned long fpi, 70 - unsigned long io); 67 + unsigned long io, unsigned long ppe); 71 68 72 69 extern unsigned long ltq_danube_cpu_hz(void); 73 70 extern unsigned long ltq_danube_fpi_hz(void); 71 + extern unsigned long ltq_danube_pp32_hz(void); 74 72 75 73 extern unsigned long ltq_ar9_cpu_hz(void); 76 74 extern unsigned long ltq_ar9_fpi_hz(void); 77 75 78 76 extern unsigned long ltq_vr9_cpu_hz(void); 79 77 extern unsigned long ltq_vr9_fpi_hz(void); 78 + extern unsigned long ltq_vr9_pp32_hz(void); 80 79 81 80 #endif
+2 -2
arch/mips/lantiq/falcon/sysctrl.c
··· 241 241 242 242 /* get our 3 static rates for cpu, fpi and io clocks */ 243 243 if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV) 244 - clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M); 244 + clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M, 0); 245 245 else 246 - clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M); 246 + clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M, 0); 247 247 248 248 /* add our clock domains */ 249 249 clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
+73 -32
arch/mips/lantiq/irq.c
··· 33 33 /* register definitions - external irqs */ 34 34 #define LTQ_EIU_EXIN_C 0x0000 35 35 #define LTQ_EIU_EXIN_INIC 0x0004 36 + #define LTQ_EIU_EXIN_INC 0x0008 36 37 #define LTQ_EIU_EXIN_INEN 0x000C 37 38 38 - /* irq numbers used by the external interrupt unit (EIU) */ 39 - #define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30) 40 - #define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31) 41 - #define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26) 42 - #define LTQ_EIU_IR3 INT_NUM_IM1_IRL0 43 - #define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) 44 - #define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) 45 - #define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) 46 - #define XWAY_EXIN_COUNT 3 39 + /* number of external interrupts */ 47 40 #define MAX_EIU 6 48 41 49 42 /* the performance counter */ ··· 65 72 int gic_present; 66 73 #endif 67 74 68 - static unsigned short ltq_eiu_irq[MAX_EIU] = { 69 - LTQ_EIU_IR0, 70 - LTQ_EIU_IR1, 71 - LTQ_EIU_IR2, 72 - LTQ_EIU_IR3, 73 - LTQ_EIU_IR4, 74 - LTQ_EIU_IR5, 75 - }; 76 - 77 75 static int exin_avail; 76 + static struct resource ltq_eiu_irq[MAX_EIU]; 78 77 static void __iomem *ltq_icu_membase[MAX_IM]; 79 78 static void __iomem *ltq_eiu_membase; 80 79 static struct irq_domain *ltq_domain; 80 + 81 + int ltq_eiu_get_irq(int exin) 82 + { 83 + if (exin < exin_avail) 84 + return ltq_eiu_irq[exin].start; 85 + return -1; 86 + } 81 87 82 88 void ltq_disable_irq(struct irq_data *d) 83 89 { ··· 120 128 ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier); 121 129 } 122 130 131 + static int ltq_eiu_settype(struct irq_data *d, unsigned int type) 132 + { 133 + int i; 134 + 135 + for (i = 0; i < MAX_EIU; i++) { 136 + if (d->hwirq == ltq_eiu_irq[i].start) { 137 + int val = 0; 138 + int edge = 0; 139 + 140 + switch (type) { 141 + case IRQF_TRIGGER_NONE: 142 + break; 143 + case IRQF_TRIGGER_RISING: 144 + val = 1; 145 + edge = 1; 146 + break; 147 + case IRQF_TRIGGER_FALLING: 148 + val = 2; 149 + edge = 1; 150 + break; 151 + case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING: 152 + val = 3; 153 + edge = 1; 154 + break; 155 + case IRQF_TRIGGER_HIGH: 156 + val = 5; 157 + break; 158 + case IRQF_TRIGGER_LOW: 159 + val = 6; 160 + break; 161 + default: 162 + pr_err("invalid type %d for irq %ld\n", 163 + type, d->hwirq); 164 + return -EINVAL; 165 + } 166 + 167 + if (edge) 168 + irq_set_handler(d->hwirq, handle_edge_irq); 169 + 170 + ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | 171 + (val << (i * 4)), LTQ_EIU_EXIN_C); 172 + } 173 + } 174 + 175 + return 0; 176 + } 177 + 123 178 static unsigned int ltq_startup_eiu_irq(struct irq_data *d) 124 179 { 125 180 int i; 126 181 127 182 ltq_enable_irq(d); 128 183 for (i = 0; i < MAX_EIU; i++) { 129 - if (d->hwirq == ltq_eiu_irq[i]) { 130 - /* low level - we should really handle set_type */ 131 - ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | 132 - (0x6 << (i * 4)), LTQ_EIU_EXIN_C); 184 + if (d->hwirq == ltq_eiu_irq[i].start) { 185 + /* by default we are low level triggered */ 186 + ltq_eiu_settype(d, IRQF_TRIGGER_LOW); 133 187 /* clear all pending */ 134 - ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i), 135 - LTQ_EIU_EXIN_INIC); 188 + ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i), 189 + LTQ_EIU_EXIN_INC); 136 190 /* enable */ 137 191 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i), 138 192 LTQ_EIU_EXIN_INEN); ··· 195 157 196 158 ltq_disable_irq(d); 197 159 for (i = 0; i < MAX_EIU; i++) { 198 - if (d->hwirq == ltq_eiu_irq[i]) { 160 + if (d->hwirq == ltq_eiu_irq[i].start) { 199 161 /* disable */ 200 162 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i), 201 163 LTQ_EIU_EXIN_INEN); ··· 224 186 .irq_ack = ltq_ack_irq, 225 187 .irq_mask = ltq_disable_irq, 226 188 .irq_mask_ack = ltq_mask_and_ack_irq, 189 + .irq_set_type = ltq_eiu_settype, 227 190 }; 228 191 229 192 static void ltq_hw_irqdispatch(int module) ··· 340 301 return 0; 341 302 342 303 for (i = 0; i < exin_avail; i++) 343 - if (hw == ltq_eiu_irq[i]) 304 + if (hw == ltq_eiu_irq[i].start) 344 305 chip = &ltq_eiu_type; 345 306 346 307 irq_set_chip_and_handler(hw, chip, handle_level_irq); ··· 362 323 { 363 324 struct device_node *eiu_node; 364 325 struct resource res; 365 - int i; 326 + int i, ret; 366 327 367 328 for (i = 0; i < MAX_IM; i++) { 368 329 if (of_address_to_resource(node, i, &res)) ··· 379 340 } 380 341 381 342 /* the external interrupts are optional and xway only */ 382 - eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu"); 343 + eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); 383 344 if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { 384 345 /* find out how many external irq sources we have */ 385 - const __be32 *count = of_get_property(node, 386 - "lantiq,count", NULL); 346 + exin_avail = of_irq_count(eiu_node); 387 347 388 - if (count) 389 - exin_avail = *count; 390 348 if (exin_avail > MAX_EIU) 391 349 exin_avail = MAX_EIU; 350 + 351 + ret = of_irq_to_resource_table(eiu_node, 352 + ltq_eiu_irq, exin_avail); 353 + if (ret != exin_avail) 354 + panic("failed to load external irq resources\n"); 392 355 393 356 if (request_mem_region(res.start, resource_size(&res), 394 357 res.name) < 0)
+43
arch/mips/lantiq/xway/clk.c
··· 53 53 } 54 54 } 55 55 56 + unsigned long ltq_danube_pp32_hz(void) 57 + { 58 + unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 7) & 3; 59 + unsigned long clk; 60 + 61 + switch (clksys) { 62 + case 1: 63 + clk = CLOCK_240M; 64 + break; 65 + case 2: 66 + clk = CLOCK_222M; 67 + break; 68 + case 3: 69 + clk = CLOCK_133M; 70 + break; 71 + default: 72 + clk = CLOCK_266M; 73 + break; 74 + } 75 + 76 + return clk; 77 + } 78 + 56 79 unsigned long ltq_ar9_sys_hz(void) 57 80 { 58 81 if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2) ··· 167 144 break; 168 145 default: 169 146 clk = 0; 147 + break; 148 + } 149 + 150 + return clk; 151 + } 152 + 153 + unsigned long ltq_vr9_pp32_hz(void) 154 + { 155 + unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 3; 156 + unsigned long clk; 157 + 158 + switch (clksys) { 159 + case 1: 160 + clk = CLOCK_450M; 161 + break; 162 + case 2: 163 + clk = CLOCK_300M; 164 + break; 165 + default: 166 + clk = CLOCK_500M; 170 167 break; 171 168 } 172 169
+9
arch/mips/lantiq/xway/reset.c
··· 78 78 /* reset and boot a gphy. these phys only exist on xrx200 SoC */ 79 79 int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr) 80 80 { 81 + struct clk *clk; 82 + 81 83 if (!of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) { 82 84 dev_err(dev, "this SoC has no GPHY\n"); 83 85 return -EINVAL; 84 86 } 87 + 88 + clk = clk_get_sys("1f203000.rcu", "gphy"); 89 + if (IS_ERR(clk)) 90 + return PTR_ERR(clk); 91 + 92 + clk_enable(clk); 93 + 85 94 if (id > 1) { 86 95 dev_err(dev, "%u is an invalid gphy id\n", id); 87 96 return -EINVAL;
+9 -6
arch/mips/lantiq/xway/sysctrl.c
··· 305 305 306 306 /* check if all the core register ranges are available */ 307 307 if (!np_pmu || !np_cgu || !np_ebu) 308 - panic("Failed to load core nodess from devicetree"); 308 + panic("Failed to load core nodes from devicetree"); 309 309 310 310 if (of_address_to_resource(np_pmu, 0, &res_pmu) || 311 311 of_address_to_resource(np_cgu, 0, &res_cgu) || ··· 356 356 357 357 if (of_machine_is_compatible("lantiq,ase")) { 358 358 if (ltq_cgu_r32(CGU_SYS) & (1 << 5)) 359 - clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M); 359 + clkdev_add_static(CLOCK_266M, CLOCK_133M, 360 + CLOCK_133M, CLOCK_266M); 360 361 else 361 - clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M); 362 + clkdev_add_static(CLOCK_133M, CLOCK_133M, 363 + CLOCK_133M, CLOCK_133M); 362 364 clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY), 363 365 clkdev_add_pmu("1e180000.etop", "ephy", 0, PMU_EPHY); 364 366 } else if (of_machine_is_compatible("lantiq,vr9")) { 365 367 clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(), 366 - ltq_vr9_fpi_hz()); 368 + ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz()); 367 369 clkdev_add_pmu("1d900000.pcie", "phy", 1, PMU1_PCIE_PHY); 368 370 clkdev_add_pmu("1d900000.pcie", "bus", 0, PMU_PCIE_CLK); 369 371 clkdev_add_pmu("1d900000.pcie", "msi", 1, PMU1_PCIE_MSI); ··· 376 374 PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | 377 375 PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | 378 376 PMU_PPE_QSB | PMU_PPE_TOP); 377 + clkdev_add_pmu("1f203000.rcu", "gphy", 0, PMU_GPHY); 379 378 } else if (of_machine_is_compatible("lantiq,ar9")) { 380 379 clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(), 381 - ltq_ar9_fpi_hz()); 380 + ltq_ar9_fpi_hz(), CLOCK_250M); 382 381 clkdev_add_pmu("1e180000.etop", "switch", 0, PMU_SWITCH); 383 382 } else { 384 383 clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(), 385 - ltq_danube_fpi_hz()); 384 + ltq_danube_fpi_hz(), ltq_danube_pp32_hz()); 386 385 } 387 386 }
+1 -1
arch/mips/loongson1/common/prom.c
··· 73 73 74 74 #define PORT(offset) (u8 *)(KSEG1ADDR(LS1X_UART0_BASE + offset)) 75 75 76 - void __init prom_putchar(char c) 76 + void prom_putchar(char c) 77 77 { 78 78 int timeout; 79 79
+1
arch/mips/mm/c-r4k.c
··· 1057 1057 break; 1058 1058 1059 1059 case CPU_M14KC: 1060 + case CPU_M14KEC: 1060 1061 case CPU_24K: 1061 1062 case CPU_34K: 1062 1063 case CPU_74K:
+1
arch/mips/mm/tlbex.c
··· 581 581 case CPU_4KC: 582 582 case CPU_4KEC: 583 583 case CPU_M14KC: 584 + case CPU_M14KEC: 584 585 case CPU_SB1: 585 586 case CPU_SB1A: 586 587 case CPU_4KSC:
+50 -33
arch/mips/mti-malta/malta-time.c
··· 17 17 * 18 18 * Setting up the clock on the MIPS boards. 19 19 */ 20 - 21 20 #include <linux/types.h> 22 21 #include <linux/i8253.h> 23 22 #include <linux/init.h> ··· 24 25 #include <linux/sched.h> 25 26 #include <linux/spinlock.h> 26 27 #include <linux/interrupt.h> 27 - #include <linux/time.h> 28 28 #include <linux/timex.h> 29 29 #include <linux/mc146818rtc.h> 30 30 ··· 32 34 #include <asm/hardirq.h> 33 35 #include <asm/irq.h> 34 36 #include <asm/div64.h> 35 - #include <asm/cpu.h> 36 37 #include <asm/setup.h> 37 38 #include <asm/time.h> 38 39 #include <asm/mc146818-time.h> 39 40 #include <asm/msc01_ic.h> 41 + #include <asm/gic.h> 40 42 41 43 #include <asm/mips-boards/generic.h> 42 44 #include <asm/mips-boards/prom.h> ··· 44 46 #include <asm/mips-boards/maltaint.h> 45 47 46 48 unsigned long cpu_khz; 49 + int gic_frequency; 47 50 48 51 static int mips_cpu_timer_irq; 49 52 static int mips_cpu_perf_irq; ··· 60 61 do_IRQ(mips_cpu_perf_irq); 61 62 } 62 63 63 - /* 64 - * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect 65 - */ 66 - static unsigned int __init estimate_cpu_frequency(void) 64 + static unsigned int freqround(unsigned int freq, unsigned int amount) 67 65 { 68 - unsigned int prid = read_c0_prid() & 0xffff00; 69 - unsigned int count; 66 + freq += amount; 67 + freq -= freq % (amount*2); 68 + return freq; 69 + } 70 70 71 + /* 72 + * Estimate CPU and GIC frequencies. 73 + */ 74 + static void __init estimate_frequencies(void) 75 + { 71 76 unsigned long flags; 72 - unsigned int start; 77 + unsigned int count, start; 78 + unsigned int giccount = 0, gicstart = 0; 73 79 74 80 local_irq_save(flags); 75 81 76 - /* Start counter exactly on falling edge of update flag */ 82 + /* Start counter exactly on falling edge of update flag. */ 77 83 while (CMOS_READ(RTC_REG_A) & RTC_UIP); 78 84 while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); 79 85 80 - /* Start r4k counter. */ 86 + /* Initialize counters. */ 81 87 start = read_c0_count(); 88 + if (gic_present) 89 + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart); 82 90 83 - /* Read counter exactly on falling edge of update flag */ 91 + /* Read counter exactly on falling edge of update flag. */ 84 92 while (CMOS_READ(RTC_REG_A) & RTC_UIP); 85 93 while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); 86 94 87 - count = read_c0_count() - start; 95 + count = read_c0_count(); 96 + if (gic_present) 97 + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount); 88 98 89 - /* restore interrupts */ 90 99 local_irq_restore(flags); 91 100 101 + count -= start; 102 + if (gic_present) 103 + giccount -= gicstart; 104 + 92 105 mips_hpt_frequency = count; 93 - if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && 94 - (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) 95 - count *= 2; 96 - 97 - count += 5000; /* round */ 98 - count -= count%10000; 99 - 100 - return count; 106 + if (gic_present) 107 + gic_frequency = giccount; 101 108 } 102 109 103 110 void read_persistent_clock(struct timespec *ts) ··· 149 144 150 145 void __init plat_time_init(void) 151 146 { 152 - unsigned int est_freq; 147 + unsigned int prid = read_c0_prid() & 0xffff00; 148 + unsigned int freq; 153 149 154 - /* Set Data mode - binary. */ 155 - CMOS_WRITE(CMOS_READ(RTC_CONTROL) | RTC_DM_BINARY, RTC_CONTROL); 150 + estimate_frequencies(); 156 151 157 - est_freq = estimate_cpu_frequency(); 152 + freq = mips_hpt_frequency; 153 + if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && 154 + (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) 155 + freq *= 2; 156 + freq = freqround(freq, 5000); 157 + pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000, 158 + (freq%1000000)*100/1000000); 159 + cpu_khz = freq / 1000; 158 160 159 - printk("CPU frequency %d.%02d MHz\n", est_freq/1000000, 160 - (est_freq%1000000)*100/1000000); 161 + if (gic_present) { 162 + freq = freqround(gic_frequency, 5000); 163 + pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000, 164 + (freq%1000000)*100/1000000); 165 + gic_clocksource_init(gic_frequency); 166 + } else 167 + init_r4k_clocksource(); 161 168 162 - cpu_khz = est_freq / 1000; 163 - 164 - mips_scroll_message(); 165 - #ifdef CONFIG_I8253 /* Only Malta has a PIT */ 169 + #ifdef CONFIG_I8253 170 + /* Only Malta has a PIT. */ 166 171 setup_pit_timer(); 167 172 #endif 173 + 174 + mips_scroll_message(); 168 175 169 176 plat_perf_setup(); 170 177 }
+14 -27
arch/mips/netlogic/common/irq.c
··· 105 105 static void xlp_pic_mask_ack(struct irq_data *d) 106 106 { 107 107 struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); 108 - uint64_t mask = 1ull << pd->picirq; 109 108 110 - write_c0_eirr(mask); /* ack by writing EIRR */ 109 + clear_c0_eimr(pd->picirq); 110 + ack_c0_eirr(pd->picirq); 111 111 } 112 112 113 113 static void xlp_pic_unmask(struct irq_data *d) 114 114 { 115 115 struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); 116 116 117 - if (!pd) 118 - return; 117 + BUG_ON(!pd); 119 118 120 119 if (pd->extra_ack) 121 120 pd->extra_ack(d); 121 + 122 + /* re-enable the intr on this cpu */ 123 + set_c0_eimr(pd->picirq); 122 124 123 125 /* Ack is a single write, no need to lock */ 124 126 nlm_pic_ack(pd->node->picbase, pd->irt); ··· 136 134 137 135 static void cpuintr_disable(struct irq_data *d) 138 136 { 139 - uint64_t eimr; 140 - uint64_t mask = 1ull << d->irq; 141 - 142 - eimr = read_c0_eimr(); 143 - write_c0_eimr(eimr & ~mask); 137 + clear_c0_eimr(d->irq); 144 138 } 145 139 146 140 static void cpuintr_enable(struct irq_data *d) 147 141 { 148 - uint64_t eimr; 149 - uint64_t mask = 1ull << d->irq; 150 - 151 - eimr = read_c0_eimr(); 152 - write_c0_eimr(eimr | mask); 142 + set_c0_eimr(d->irq); 153 143 } 154 144 155 145 static void cpuintr_ack(struct irq_data *d) 156 146 { 157 - uint64_t mask = 1ull << d->irq; 158 - 159 - write_c0_eirr(mask); 160 - } 161 - 162 - static void cpuintr_nop(struct irq_data *d) 163 - { 164 - WARN(d->irq >= PIC_IRQ_BASE, "Bad irq %d", d->irq); 147 + ack_c0_eirr(d->irq); 165 148 } 166 149 167 150 /* ··· 157 170 .name = "XLP-CPU-INTR", 158 171 .irq_enable = cpuintr_enable, 159 172 .irq_disable = cpuintr_disable, 160 - .irq_mask = cpuintr_nop, 161 - .irq_ack = cpuintr_nop, 162 - .irq_eoi = cpuintr_ack, 173 + .irq_mask = cpuintr_disable, 174 + .irq_ack = cpuintr_ack, 175 + .irq_eoi = cpuintr_enable, 163 176 }; 164 177 165 178 static void __init nlm_init_percpu_irqs(void) ··· 217 230 nlm_setup_pic_irq(node, i, i, irt); 218 231 /* set interrupts to first cpu in node */ 219 232 nlm_pic_init_irt(nodep->picbase, irt, i, 220 - node * NLM_CPUS_PER_NODE); 233 + node * NLM_CPUS_PER_NODE, 0); 221 234 irqmask |= (1ull << i); 222 235 } 223 236 nodep->irqmask = irqmask; ··· 252 265 int i, node; 253 266 254 267 node = nlm_nodeid(); 255 - eirr = read_c0_eirr() & read_c0_eimr(); 268 + eirr = read_c0_eirr_and_eimr(); 256 269 257 270 i = __ilog2_u64(eirr); 258 271 if (i == -1)
+6 -2
arch/mips/netlogic/common/smp.c
··· 84 84 /* IRQ_IPI_SMP_FUNCTION Handler */ 85 85 void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) 86 86 { 87 - write_c0_eirr(1ull << irq); 87 + clear_c0_eimr(irq); 88 + ack_c0_eirr(irq); 88 89 smp_call_function_interrupt(); 90 + set_c0_eimr(irq); 89 91 } 90 92 91 93 /* IRQ_IPI_SMP_RESCHEDULE handler */ 92 94 void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc) 93 95 { 94 - write_c0_eirr(1ull << irq); 96 + clear_c0_eimr(irq); 97 + ack_c0_eirr(irq); 95 98 scheduler_ipi(); 99 + set_c0_eimr(irq); 96 100 } 97 101 98 102 /*
+6
arch/mips/netlogic/common/smpboot.S
··· 69 69 #endif 70 70 mtcr t1, t0 71 71 72 + li t0, ICU_DEFEATURE 73 + mfcr t1, t0 74 + ori t1, 0x1000 /* Enable Icache partitioning */ 75 + mtcr t1, t0 76 + 77 + 72 78 #ifdef XLP_AX_WORKAROUND 73 79 li t0, SCHED_DEFEATURE 74 80 lui t1, 0x0100 /* Disable BRU accepting ALU ops */
+56
arch/mips/netlogic/common/time.c
··· 35 35 #include <linux/init.h> 36 36 37 37 #include <asm/time.h> 38 + #include <asm/cpu-features.h> 39 + 38 40 #include <asm/netlogic/interrupt.h> 39 41 #include <asm/netlogic/common.h> 42 + #include <asm/netlogic/haldefs.h> 43 + #include <asm/netlogic/common.h> 44 + 45 + #if defined(CONFIG_CPU_XLP) 46 + #include <asm/netlogic/xlp-hal/iomap.h> 47 + #include <asm/netlogic/xlp-hal/xlp.h> 48 + #include <asm/netlogic/xlp-hal/pic.h> 49 + #elif defined(CONFIG_CPU_XLR) 50 + #include <asm/netlogic/xlr/iomap.h> 51 + #include <asm/netlogic/xlr/pic.h> 52 + #include <asm/netlogic/xlr/xlr.h> 53 + #else 54 + #error "Unknown CPU" 55 + #endif 40 56 41 57 unsigned int __cpuinit get_c0_compare_int(void) 42 58 { 43 59 return IRQ_TIMER; 44 60 } 45 61 62 + static cycle_t nlm_get_pic_timer(struct clocksource *cs) 63 + { 64 + uint64_t picbase = nlm_get_node(0)->picbase; 65 + 66 + return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER); 67 + } 68 + 69 + static cycle_t nlm_get_pic_timer32(struct clocksource *cs) 70 + { 71 + uint64_t picbase = nlm_get_node(0)->picbase; 72 + 73 + return ~nlm_pic_read_timer32(picbase, PIC_CLOCK_TIMER); 74 + } 75 + 76 + static struct clocksource csrc_pic = { 77 + .name = "PIC", 78 + .flags = CLOCK_SOURCE_IS_CONTINUOUS, 79 + }; 80 + 81 + static void nlm_init_pic_timer(void) 82 + { 83 + uint64_t picbase = nlm_get_node(0)->picbase; 84 + 85 + nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0); 86 + if (current_cpu_data.cputype == CPU_XLR) { 87 + csrc_pic.mask = CLOCKSOURCE_MASK(32); 88 + csrc_pic.read = nlm_get_pic_timer32; 89 + } else { 90 + csrc_pic.mask = CLOCKSOURCE_MASK(64); 91 + csrc_pic.read = nlm_get_pic_timer; 92 + } 93 + csrc_pic.rating = 1000; 94 + clocksource_register_hz(&csrc_pic, PIC_CLK_HZ); 95 + } 96 + 46 97 void __init plat_time_init(void) 47 98 { 99 + nlm_init_pic_timer(); 48 100 mips_hpt_frequency = nlm_get_cpu_frequency(); 101 + if (current_cpu_type() == CPU_XLR) 102 + preset_lpj = mips_hpt_frequency / (3 * HZ); 103 + else 104 + preset_lpj = mips_hpt_frequency / (2 * HZ); 49 105 pr_info("MIPS counter frequency [%ld]\n", 50 106 (unsigned long)mips_hpt_frequency); 51 107 }
+25 -10
arch/mips/netlogic/xlp/wakeup.c
··· 51 51 #include <asm/netlogic/xlp-hal/xlp.h> 52 52 #include <asm/netlogic/xlp-hal/sys.h> 53 53 54 - static int xlp_wakeup_core(uint64_t sysbase, int core) 54 + static int xlp_wakeup_core(uint64_t sysbase, int node, int core) 55 55 { 56 56 uint32_t coremask, value; 57 57 int count; ··· 82 82 struct nlm_soc_info *nodep; 83 83 uint64_t syspcibase; 84 84 uint32_t syscoremask; 85 - int core, n, cpu; 85 + int core, n, cpu, count, val; 86 86 87 87 for (n = 0; n < NLM_NR_NODES; n++) { 88 88 syspcibase = nlm_get_sys_pcibase(n); 89 89 if (nlm_read_reg(syspcibase, 0) == 0xffffffff) 90 90 break; 91 91 92 - /* read cores in reset from SYS and account for boot cpu */ 93 - nlm_node_init(n); 92 + /* read cores in reset from SYS */ 93 + if (n != 0) 94 + nlm_node_init(n); 94 95 nodep = nlm_get_node(n); 95 96 syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET); 96 - if (n == 0) 97 + /* The boot cpu */ 98 + if (n == 0) { 97 99 syscoremask |= 1; 100 + nodep->coremask = 1; 101 + } 98 102 99 103 for (core = 0; core < NLM_CORES_PER_NODE; core++) { 104 + /* we will be on node 0 core 0 */ 105 + if (n == 0 && core == 0) 106 + continue; 107 + 100 108 /* see if the core exists */ 101 109 if ((syscoremask & (1 << core)) == 0) 102 110 continue; 103 111 104 - /* see if at least the first thread is enabled */ 112 + /* see if at least the first hw thread is enabled */ 105 113 cpu = (n * NLM_CORES_PER_NODE + core) 106 114 * NLM_THREADS_PER_CORE; 107 115 if (!cpumask_test_cpu(cpu, wakeup_mask)) 108 116 continue; 109 117 110 118 /* wake up the core */ 111 - if (xlp_wakeup_core(nodep->sysbase, core)) 112 - nodep->coremask |= 1u << core; 113 - else 114 - pr_err("Failed to enable core %d\n", core); 119 + if (!xlp_wakeup_core(nodep->sysbase, n, core)) 120 + continue; 121 + 122 + /* core is up */ 123 + nodep->coremask |= 1u << core; 124 + 125 + /* spin until the first hw thread sets its ready */ 126 + count = 0x20000000; 127 + do { 128 + val = *(volatile int *)&nlm_cpu_ready[cpu]; 129 + } while (val == 0 && --count > 0); 115 130 } 116 131 } 117 132 }
+2
arch/mips/netlogic/xlr/fmn-config.c
··· 216 216 case PRID_IMP_NETLOGIC_XLS404B: 217 217 case PRID_IMP_NETLOGIC_XLS408B: 218 218 case PRID_IMP_NETLOGIC_XLS416B: 219 + case PRID_IMP_NETLOGIC_XLS608B: 220 + case PRID_IMP_NETLOGIC_XLS616B: 219 221 setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0, 220 222 FMN_STNID_GMAC0_TX3, 8, 8, 32); 221 223 setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
+1 -1
arch/mips/netlogic/xlr/platform.c
··· 64 64 .iotype = UPIO_MEM32, \ 65 65 .flags = (UPF_SKIP_TEST | \ 66 66 UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\ 67 - .uartclk = PIC_CLKS_PER_SEC, \ 67 + .uartclk = PIC_CLK_HZ, \ 68 68 .type = PORT_16550A, \ 69 69 .serial_in = nlm_xlr_uart_in, \ 70 70 .serial_out = nlm_xlr_uart_out, \
+1 -1
arch/mips/netlogic/xlr/setup.c
··· 70 70 s.iotype = UPIO_MEM32; 71 71 s.regshift = 2; 72 72 s.irq = PIC_UART_0_IRQ; 73 - s.uartclk = PIC_CLKS_PER_SEC; 73 + s.uartclk = PIC_CLK_HZ; 74 74 s.serial_in = nlm_xlr_uart_in; 75 75 s.serial_out = nlm_xlr_uart_out; 76 76 s.mapbase = uart_base;
+1
arch/mips/oprofile/common.c
··· 78 78 switch (current_cpu_type()) { 79 79 case CPU_5KC: 80 80 case CPU_M14KC: 81 + case CPU_M14KEC: 81 82 case CPU_20KC: 82 83 case CPU_24K: 83 84 case CPU_25KF:
+4
arch/mips/oprofile/op_model_mipsxx.c
··· 351 351 op_model_mipsxx_ops.cpu_type = "mips/M14Kc"; 352 352 break; 353 353 354 + case CPU_M14KEC: 355 + op_model_mipsxx_ops.cpu_type = "mips/M14KEc"; 356 + break; 357 + 354 358 case CPU_20KC: 355 359 op_model_mipsxx_ops.cpu_type = "mips/20K"; 356 360 break;
+125 -69
arch/mips/pci/pci-ar71xx.c
··· 18 18 #include <linux/pci.h> 19 19 #include <linux/pci_regs.h> 20 20 #include <linux/interrupt.h> 21 + #include <linux/module.h> 22 + #include <linux/platform_device.h> 21 23 22 24 #include <asm/mach-ath79/ar71xx_regs.h> 23 25 #include <asm/mach-ath79/ath79.h> 24 - #include <asm/mach-ath79/pci.h> 25 - 26 - #define AR71XX_PCI_MEM_BASE 0x10000000 27 - #define AR71XX_PCI_MEM_SIZE 0x07000000 28 - 29 - #define AR71XX_PCI_WIN0_OFFS 0x10000000 30 - #define AR71XX_PCI_WIN1_OFFS 0x11000000 31 - #define AR71XX_PCI_WIN2_OFFS 0x12000000 32 - #define AR71XX_PCI_WIN3_OFFS 0x13000000 33 - #define AR71XX_PCI_WIN4_OFFS 0x14000000 34 - #define AR71XX_PCI_WIN5_OFFS 0x15000000 35 - #define AR71XX_PCI_WIN6_OFFS 0x16000000 36 - #define AR71XX_PCI_WIN7_OFFS 0x07000000 37 - 38 - #define AR71XX_PCI_CFG_BASE \ 39 - (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000) 40 - #define AR71XX_PCI_CFG_SIZE 0x100 41 26 42 27 #define AR71XX_PCI_REG_CRP_AD_CBE 0x00 43 28 #define AR71XX_PCI_REG_CRP_WRDATA 0x04 ··· 48 63 49 64 #define AR71XX_PCI_IRQ_COUNT 5 50 65 51 - static DEFINE_SPINLOCK(ar71xx_pci_lock); 52 - static void __iomem *ar71xx_pcicfg_base; 66 + struct ar71xx_pci_controller { 67 + void __iomem *cfg_base; 68 + spinlock_t lock; 69 + int irq; 70 + int irq_base; 71 + struct pci_controller pci_ctrl; 72 + struct resource io_res; 73 + struct resource mem_res; 74 + }; 53 75 54 76 /* Byte lane enable bits */ 55 77 static const u8 ar71xx_pci_ble_table[4][4] = { ··· 99 107 return ret; 100 108 } 101 109 102 - static int ar71xx_pci_check_error(int quiet) 110 + static inline struct ar71xx_pci_controller * 111 + pci_bus_to_ar71xx_controller(struct pci_bus *bus) 103 112 { 104 - void __iomem *base = ar71xx_pcicfg_base; 113 + struct pci_controller *hose; 114 + 115 + hose = (struct pci_controller *) bus->sysdata; 116 + return container_of(hose, struct ar71xx_pci_controller, pci_ctrl); 117 + } 118 + 119 + static int ar71xx_pci_check_error(struct ar71xx_pci_controller *apc, int quiet) 120 + { 121 + void __iomem *base = apc->cfg_base; 105 122 u32 pci_err; 106 123 u32 ahb_err; 107 124 ··· 145 144 return !!(ahb_err | pci_err); 146 145 } 147 146 148 - static inline void ar71xx_pci_local_write(int where, int size, u32 value) 147 + static inline void ar71xx_pci_local_write(struct ar71xx_pci_controller *apc, 148 + int where, int size, u32 value) 149 149 { 150 - void __iomem *base = ar71xx_pcicfg_base; 150 + void __iomem *base = apc->cfg_base; 151 151 u32 ad_cbe; 152 152 153 153 value = value << (8 * (where & 3)); ··· 164 162 unsigned int devfn, 165 163 int where, int size, u32 cmd) 166 164 { 167 - void __iomem *base = ar71xx_pcicfg_base; 165 + struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus); 166 + void __iomem *base = apc->cfg_base; 168 167 u32 addr; 169 168 170 169 addr = ar71xx_pci_bus_addr(bus, devfn, where); ··· 174 171 __raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0), 175 172 base + AR71XX_PCI_REG_CFG_CBE); 176 173 177 - return ar71xx_pci_check_error(1); 174 + return ar71xx_pci_check_error(apc, 1); 178 175 } 179 176 180 177 static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, 181 178 int where, int size, u32 *value) 182 179 { 183 - void __iomem *base = ar71xx_pcicfg_base; 180 + struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus); 181 + void __iomem *base = apc->cfg_base; 184 182 unsigned long flags; 185 183 u32 data; 186 184 int err; ··· 190 186 ret = PCIBIOS_SUCCESSFUL; 191 187 data = ~0; 192 188 193 - spin_lock_irqsave(&ar71xx_pci_lock, flags); 189 + spin_lock_irqsave(&apc->lock, flags); 194 190 195 191 err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size, 196 192 AR71XX_PCI_CFG_CMD_READ); ··· 199 195 else 200 196 data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA); 201 197 202 - spin_unlock_irqrestore(&ar71xx_pci_lock, flags); 198 + spin_unlock_irqrestore(&apc->lock, flags); 203 199 204 200 *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7]; 205 201 ··· 209 205 static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, 210 206 int where, int size, u32 value) 211 207 { 212 - void __iomem *base = ar71xx_pcicfg_base; 208 + struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus); 209 + void __iomem *base = apc->cfg_base; 213 210 unsigned long flags; 214 211 int err; 215 212 int ret; ··· 218 213 value = value << (8 * (where & 3)); 219 214 ret = PCIBIOS_SUCCESSFUL; 220 215 221 - spin_lock_irqsave(&ar71xx_pci_lock, flags); 216 + spin_lock_irqsave(&apc->lock, flags); 222 217 223 218 err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size, 224 219 AR71XX_PCI_CFG_CMD_WRITE); ··· 227 222 else 228 223 __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA); 229 224 230 - spin_unlock_irqrestore(&ar71xx_pci_lock, flags); 225 + spin_unlock_irqrestore(&apc->lock, flags); 231 226 232 227 return ret; 233 228 } ··· 237 232 .write = ar71xx_pci_write_config, 238 233 }; 239 234 240 - static struct resource ar71xx_pci_io_resource = { 241 - .name = "PCI IO space", 242 - .start = 0, 243 - .end = 0, 244 - .flags = IORESOURCE_IO, 245 - }; 246 - 247 - static struct resource ar71xx_pci_mem_resource = { 248 - .name = "PCI memory space", 249 - .start = AR71XX_PCI_MEM_BASE, 250 - .end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1, 251 - .flags = IORESOURCE_MEM 252 - }; 253 - 254 - static struct pci_controller ar71xx_pci_controller = { 255 - .pci_ops = &ar71xx_pci_ops, 256 - .mem_resource = &ar71xx_pci_mem_resource, 257 - .io_resource = &ar71xx_pci_io_resource, 258 - }; 259 - 260 235 static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc) 261 236 { 237 + struct ar71xx_pci_controller *apc; 262 238 void __iomem *base = ath79_reset_base; 263 239 u32 pending; 240 + 241 + apc = irq_get_handler_data(irq); 264 242 265 243 pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) & 266 244 __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE); 267 245 268 246 if (pending & AR71XX_PCI_INT_DEV0) 269 - generic_handle_irq(ATH79_PCI_IRQ(0)); 247 + generic_handle_irq(apc->irq_base + 0); 270 248 271 249 else if (pending & AR71XX_PCI_INT_DEV1) 272 - generic_handle_irq(ATH79_PCI_IRQ(1)); 250 + generic_handle_irq(apc->irq_base + 1); 273 251 274 252 else if (pending & AR71XX_PCI_INT_DEV2) 275 - generic_handle_irq(ATH79_PCI_IRQ(2)); 253 + generic_handle_irq(apc->irq_base + 2); 276 254 277 255 else if (pending & AR71XX_PCI_INT_CORE) 278 - generic_handle_irq(ATH79_PCI_IRQ(4)); 256 + generic_handle_irq(apc->irq_base + 4); 279 257 280 258 else 281 259 spurious_interrupt(); ··· 266 278 267 279 static void ar71xx_pci_irq_unmask(struct irq_data *d) 268 280 { 269 - unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE; 281 + struct ar71xx_pci_controller *apc; 282 + unsigned int irq; 270 283 void __iomem *base = ath79_reset_base; 271 284 u32 t; 285 + 286 + apc = irq_data_get_irq_chip_data(d); 287 + irq = d->irq - apc->irq_base; 272 288 273 289 t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE); 274 290 __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE); ··· 283 291 284 292 static void ar71xx_pci_irq_mask(struct irq_data *d) 285 293 { 286 - unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE; 294 + struct ar71xx_pci_controller *apc; 295 + unsigned int irq; 287 296 void __iomem *base = ath79_reset_base; 288 297 u32 t; 298 + 299 + apc = irq_data_get_irq_chip_data(d); 300 + irq = d->irq - apc->irq_base; 289 301 290 302 t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE); 291 303 __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE); ··· 305 309 .irq_mask_ack = ar71xx_pci_irq_mask, 306 310 }; 307 311 308 - static __init void ar71xx_pci_irq_init(void) 312 + static void ar71xx_pci_irq_init(struct ar71xx_pci_controller *apc) 309 313 { 310 314 void __iomem *base = ath79_reset_base; 311 315 int i; ··· 315 319 316 320 BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT); 317 321 318 - for (i = ATH79_PCI_IRQ_BASE; 319 - i < ATH79_PCI_IRQ_BASE + AR71XX_PCI_IRQ_COUNT; i++) 322 + apc->irq_base = ATH79_PCI_IRQ_BASE; 323 + for (i = apc->irq_base; 324 + i < apc->irq_base + AR71XX_PCI_IRQ_COUNT; i++) { 320 325 irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip, 321 326 handle_level_irq); 327 + irq_set_chip_data(i, apc); 328 + } 322 329 323 - irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar71xx_pci_irq_handler); 330 + irq_set_handler_data(apc->irq, apc); 331 + irq_set_chained_handler(apc->irq, ar71xx_pci_irq_handler); 324 332 } 325 333 326 - static __init void ar71xx_pci_reset(void) 334 + static void ar71xx_pci_reset(void) 327 335 { 328 336 void __iomem *ddr_base = ath79_ddr_base; 329 337 ··· 349 349 mdelay(100); 350 350 } 351 351 352 - __init int ar71xx_pcibios_init(void) 352 + static int ar71xx_pci_probe(struct platform_device *pdev) 353 353 { 354 + struct ar71xx_pci_controller *apc; 355 + struct resource *res; 354 356 u32 t; 355 357 356 - ar71xx_pcicfg_base = ioremap(AR71XX_PCI_CFG_BASE, AR71XX_PCI_CFG_SIZE); 357 - if (ar71xx_pcicfg_base == NULL) 358 + apc = devm_kzalloc(&pdev->dev, sizeof(struct ar71xx_pci_controller), 359 + GFP_KERNEL); 360 + if (!apc) 358 361 return -ENOMEM; 362 + 363 + spin_lock_init(&apc->lock); 364 + 365 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base"); 366 + if (!res) 367 + return -EINVAL; 368 + 369 + apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res); 370 + if (!apc->cfg_base) 371 + return -ENOMEM; 372 + 373 + apc->irq = platform_get_irq(pdev, 0); 374 + if (apc->irq < 0) 375 + return -EINVAL; 376 + 377 + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base"); 378 + if (!res) 379 + return -EINVAL; 380 + 381 + apc->io_res.parent = res; 382 + apc->io_res.name = "PCI IO space"; 383 + apc->io_res.start = res->start; 384 + apc->io_res.end = res->end; 385 + apc->io_res.flags = IORESOURCE_IO; 386 + 387 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base"); 388 + if (!res) 389 + return -EINVAL; 390 + 391 + apc->mem_res.parent = res; 392 + apc->mem_res.name = "PCI memory space"; 393 + apc->mem_res.start = res->start; 394 + apc->mem_res.end = res->end; 395 + apc->mem_res.flags = IORESOURCE_MEM; 359 396 360 397 ar71xx_pci_reset(); 361 398 362 399 /* setup COMMAND register */ 363 400 t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE 364 401 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK; 365 - ar71xx_pci_local_write(PCI_COMMAND, 4, t); 402 + ar71xx_pci_local_write(apc, PCI_COMMAND, 4, t); 366 403 367 404 /* clear bus errors */ 368 - ar71xx_pci_check_error(1); 405 + ar71xx_pci_check_error(apc, 1); 369 406 370 - ar71xx_pci_irq_init(); 407 + ar71xx_pci_irq_init(apc); 371 408 372 - register_pci_controller(&ar71xx_pci_controller); 409 + apc->pci_ctrl.pci_ops = &ar71xx_pci_ops; 410 + apc->pci_ctrl.mem_resource = &apc->mem_res; 411 + apc->pci_ctrl.io_resource = &apc->io_res; 412 + 413 + register_pci_controller(&apc->pci_ctrl); 373 414 374 415 return 0; 375 416 } 417 + 418 + static struct platform_driver ar71xx_pci_driver = { 419 + .probe = ar71xx_pci_probe, 420 + .driver = { 421 + .name = "ar71xx-pci", 422 + .owner = THIS_MODULE, 423 + }, 424 + }; 425 + 426 + static int __init ar71xx_pci_init(void) 427 + { 428 + return platform_driver_register(&ar71xx_pci_driver); 429 + } 430 + 431 + postcore_initcall(ar71xx_pci_init);
+217 -87
arch/mips/pci/pci-ar724x.c
··· 9 9 * by the Free Software Foundation. 10 10 */ 11 11 12 + #include <linux/spinlock.h> 12 13 #include <linux/irq.h> 13 14 #include <linux/pci.h> 15 + #include <linux/module.h> 16 + #include <linux/platform_device.h> 14 17 #include <asm/mach-ath79/ath79.h> 15 18 #include <asm/mach-ath79/ar71xx_regs.h> 16 - #include <asm/mach-ath79/pci.h> 17 - 18 - #define AR724X_PCI_CFG_BASE 0x14000000 19 - #define AR724X_PCI_CFG_SIZE 0x1000 20 - #define AR724X_PCI_CTRL_BASE (AR71XX_APB_BASE + 0x000f0000) 21 - #define AR724X_PCI_CTRL_SIZE 0x100 22 - 23 - #define AR724X_PCI_MEM_BASE 0x10000000 24 - #define AR724X_PCI_MEM_SIZE 0x04000000 25 19 26 20 #define AR724X_PCI_REG_RESET 0x18 27 21 #define AR724X_PCI_REG_INT_STATUS 0x4c ··· 29 35 30 36 #define AR7240_BAR0_WAR_VALUE 0xffff 31 37 32 - static DEFINE_SPINLOCK(ar724x_pci_lock); 33 - static void __iomem *ar724x_pci_devcfg_base; 34 - static void __iomem *ar724x_pci_ctrl_base; 38 + #define AR724X_PCI_CMD_INIT (PCI_COMMAND_MEMORY | \ 39 + PCI_COMMAND_MASTER | \ 40 + PCI_COMMAND_INVALIDATE | \ 41 + PCI_COMMAND_PARITY | \ 42 + PCI_COMMAND_SERR | \ 43 + PCI_COMMAND_FAST_BACK) 35 44 36 - static u32 ar724x_pci_bar0_value; 37 - static bool ar724x_pci_bar0_is_cached; 38 - static bool ar724x_pci_link_up; 45 + struct ar724x_pci_controller { 46 + void __iomem *devcfg_base; 47 + void __iomem *ctrl_base; 48 + void __iomem *crp_base; 39 49 40 - static inline bool ar724x_pci_check_link(void) 50 + int irq; 51 + int irq_base; 52 + 53 + bool link_up; 54 + bool bar0_is_cached; 55 + u32 bar0_value; 56 + 57 + spinlock_t lock; 58 + 59 + struct pci_controller pci_controller; 60 + struct resource io_res; 61 + struct resource mem_res; 62 + }; 63 + 64 + static inline bool ar724x_pci_check_link(struct ar724x_pci_controller *apc) 41 65 { 42 66 u32 reset; 43 67 44 - reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET); 68 + reset = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_RESET); 45 69 return reset & AR724X_PCI_RESET_LINK_UP; 70 + } 71 + 72 + static inline struct ar724x_pci_controller * 73 + pci_bus_to_ar724x_controller(struct pci_bus *bus) 74 + { 75 + struct pci_controller *hose; 76 + 77 + hose = (struct pci_controller *) bus->sysdata; 78 + return container_of(hose, struct ar724x_pci_controller, pci_controller); 79 + } 80 + 81 + static int ar724x_pci_local_write(struct ar724x_pci_controller *apc, 82 + int where, int size, u32 value) 83 + { 84 + unsigned long flags; 85 + void __iomem *base; 86 + u32 data; 87 + int s; 88 + 89 + WARN_ON(where & (size - 1)); 90 + 91 + if (!apc->link_up) 92 + return PCIBIOS_DEVICE_NOT_FOUND; 93 + 94 + base = apc->crp_base; 95 + 96 + spin_lock_irqsave(&apc->lock, flags); 97 + data = __raw_readl(base + (where & ~3)); 98 + 99 + switch (size) { 100 + case 1: 101 + s = ((where & 3) * 8); 102 + data &= ~(0xff << s); 103 + data |= ((value & 0xff) << s); 104 + break; 105 + case 2: 106 + s = ((where & 2) * 8); 107 + data &= ~(0xffff << s); 108 + data |= ((value & 0xffff) << s); 109 + break; 110 + case 4: 111 + data = value; 112 + break; 113 + default: 114 + spin_unlock_irqrestore(&apc->lock, flags); 115 + return PCIBIOS_BAD_REGISTER_NUMBER; 116 + } 117 + 118 + __raw_writel(data, base + (where & ~3)); 119 + /* flush write */ 120 + __raw_readl(base + (where & ~3)); 121 + spin_unlock_irqrestore(&apc->lock, flags); 122 + 123 + return PCIBIOS_SUCCESSFUL; 46 124 } 47 125 48 126 static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, 49 127 int size, uint32_t *value) 50 128 { 129 + struct ar724x_pci_controller *apc; 51 130 unsigned long flags; 52 131 void __iomem *base; 53 132 u32 data; 54 133 55 - if (!ar724x_pci_link_up) 134 + apc = pci_bus_to_ar724x_controller(bus); 135 + if (!apc->link_up) 56 136 return PCIBIOS_DEVICE_NOT_FOUND; 57 137 58 138 if (devfn) 59 139 return PCIBIOS_DEVICE_NOT_FOUND; 60 140 61 - base = ar724x_pci_devcfg_base; 141 + base = apc->devcfg_base; 62 142 63 - spin_lock_irqsave(&ar724x_pci_lock, flags); 143 + spin_lock_irqsave(&apc->lock, flags); 64 144 data = __raw_readl(base + (where & ~3)); 65 145 66 146 switch (size) { ··· 153 85 case 4: 154 86 break; 155 87 default: 156 - spin_unlock_irqrestore(&ar724x_pci_lock, flags); 88 + spin_unlock_irqrestore(&apc->lock, flags); 157 89 158 90 return PCIBIOS_BAD_REGISTER_NUMBER; 159 91 } 160 92 161 - spin_unlock_irqrestore(&ar724x_pci_lock, flags); 93 + spin_unlock_irqrestore(&apc->lock, flags); 162 94 163 95 if (where == PCI_BASE_ADDRESS_0 && size == 4 && 164 - ar724x_pci_bar0_is_cached) { 96 + apc->bar0_is_cached) { 165 97 /* use the cached value */ 166 - *value = ar724x_pci_bar0_value; 98 + *value = apc->bar0_value; 167 99 } else { 168 100 *value = data; 169 101 } ··· 174 106 static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where, 175 107 int size, uint32_t value) 176 108 { 109 + struct ar724x_pci_controller *apc; 177 110 unsigned long flags; 178 111 void __iomem *base; 179 112 u32 data; 180 113 int s; 181 114 182 - if (!ar724x_pci_link_up) 115 + apc = pci_bus_to_ar724x_controller(bus); 116 + if (!apc->link_up) 183 117 return PCIBIOS_DEVICE_NOT_FOUND; 184 118 185 119 if (devfn) ··· 199 129 * BAR0 register in order to make the device memory 200 130 * accessible. 201 131 */ 202 - ar724x_pci_bar0_is_cached = true; 203 - ar724x_pci_bar0_value = value; 132 + apc->bar0_is_cached = true; 133 + apc->bar0_value = value; 204 134 205 135 value = AR7240_BAR0_WAR_VALUE; 206 136 } else { 207 - ar724x_pci_bar0_is_cached = false; 137 + apc->bar0_is_cached = false; 208 138 } 209 139 } 210 140 211 - base = ar724x_pci_devcfg_base; 141 + base = apc->devcfg_base; 212 142 213 - spin_lock_irqsave(&ar724x_pci_lock, flags); 143 + spin_lock_irqsave(&apc->lock, flags); 214 144 data = __raw_readl(base + (where & ~3)); 215 145 216 146 switch (size) { ··· 228 158 data = value; 229 159 break; 230 160 default: 231 - spin_unlock_irqrestore(&ar724x_pci_lock, flags); 161 + spin_unlock_irqrestore(&apc->lock, flags); 232 162 233 163 return PCIBIOS_BAD_REGISTER_NUMBER; 234 164 } ··· 236 166 __raw_writel(data, base + (where & ~3)); 237 167 /* flush write */ 238 168 __raw_readl(base + (where & ~3)); 239 - spin_unlock_irqrestore(&ar724x_pci_lock, flags); 169 + spin_unlock_irqrestore(&apc->lock, flags); 240 170 241 171 return PCIBIOS_SUCCESSFUL; 242 172 } ··· 246 176 .write = ar724x_pci_write, 247 177 }; 248 178 249 - static struct resource ar724x_io_resource = { 250 - .name = "PCI IO space", 251 - .start = 0, 252 - .end = 0, 253 - .flags = IORESOURCE_IO, 254 - }; 255 - 256 - static struct resource ar724x_mem_resource = { 257 - .name = "PCI memory space", 258 - .start = AR724X_PCI_MEM_BASE, 259 - .end = AR724X_PCI_MEM_BASE + AR724X_PCI_MEM_SIZE - 1, 260 - .flags = IORESOURCE_MEM, 261 - }; 262 - 263 - static struct pci_controller ar724x_pci_controller = { 264 - .pci_ops = &ar724x_pci_ops, 265 - .io_resource = &ar724x_io_resource, 266 - .mem_resource = &ar724x_mem_resource, 267 - }; 268 - 269 179 static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc) 270 180 { 181 + struct ar724x_pci_controller *apc; 271 182 void __iomem *base; 272 183 u32 pending; 273 184 274 - base = ar724x_pci_ctrl_base; 185 + apc = irq_get_handler_data(irq); 186 + base = apc->ctrl_base; 275 187 276 188 pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) & 277 189 __raw_readl(base + AR724X_PCI_REG_INT_MASK); 278 190 279 191 if (pending & AR724X_PCI_INT_DEV0) 280 - generic_handle_irq(ATH79_PCI_IRQ(0)); 192 + generic_handle_irq(apc->irq_base + 0); 281 193 282 194 else 283 195 spurious_interrupt(); ··· 267 215 268 216 static void ar724x_pci_irq_unmask(struct irq_data *d) 269 217 { 218 + struct ar724x_pci_controller *apc; 270 219 void __iomem *base; 220 + int offset; 271 221 u32 t; 272 222 273 - base = ar724x_pci_ctrl_base; 223 + apc = irq_data_get_irq_chip_data(d); 224 + base = apc->ctrl_base; 225 + offset = apc->irq_base - d->irq; 274 226 275 - switch (d->irq) { 276 - case ATH79_PCI_IRQ(0): 227 + switch (offset) { 228 + case 0: 277 229 t = __raw_readl(base + AR724X_PCI_REG_INT_MASK); 278 230 __raw_writel(t | AR724X_PCI_INT_DEV0, 279 231 base + AR724X_PCI_REG_INT_MASK); ··· 288 232 289 233 static void ar724x_pci_irq_mask(struct irq_data *d) 290 234 { 235 + struct ar724x_pci_controller *apc; 291 236 void __iomem *base; 237 + int offset; 292 238 u32 t; 293 239 294 - base = ar724x_pci_ctrl_base; 240 + apc = irq_data_get_irq_chip_data(d); 241 + base = apc->ctrl_base; 242 + offset = apc->irq_base - d->irq; 295 243 296 - switch (d->irq) { 297 - case ATH79_PCI_IRQ(0): 244 + switch (offset) { 245 + case 0: 298 246 t = __raw_readl(base + AR724X_PCI_REG_INT_MASK); 299 247 __raw_writel(t & ~AR724X_PCI_INT_DEV0, 300 248 base + AR724X_PCI_REG_INT_MASK); ··· 322 262 .irq_mask_ack = ar724x_pci_irq_mask, 323 263 }; 324 264 325 - static void __init ar724x_pci_irq_init(int irq) 265 + static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc, 266 + int id) 326 267 { 327 268 void __iomem *base; 328 269 int i; 329 270 330 - base = ar724x_pci_ctrl_base; 271 + base = apc->ctrl_base; 331 272 332 273 __raw_writel(0, base + AR724X_PCI_REG_INT_MASK); 333 274 __raw_writel(0, base + AR724X_PCI_REG_INT_STATUS); 334 275 335 - BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR724X_PCI_IRQ_COUNT); 276 + apc->irq_base = ATH79_PCI_IRQ_BASE + (id * AR724X_PCI_IRQ_COUNT); 336 277 337 - for (i = ATH79_PCI_IRQ_BASE; 338 - i < ATH79_PCI_IRQ_BASE + AR724X_PCI_IRQ_COUNT; i++) 278 + for (i = apc->irq_base; 279 + i < apc->irq_base + AR724X_PCI_IRQ_COUNT; i++) { 339 280 irq_set_chip_and_handler(i, &ar724x_pci_irq_chip, 340 281 handle_level_irq); 282 + irq_set_chip_data(i, apc); 283 + } 341 284 342 - irq_set_chained_handler(irq, ar724x_pci_irq_handler); 285 + irq_set_handler_data(apc->irq, apc); 286 + irq_set_chained_handler(apc->irq, ar724x_pci_irq_handler); 343 287 } 344 288 345 - int __init ar724x_pcibios_init(int irq) 289 + static int ar724x_pci_probe(struct platform_device *pdev) 346 290 { 347 - int ret; 291 + struct ar724x_pci_controller *apc; 292 + struct resource *res; 293 + int id; 348 294 349 - ret = -ENOMEM; 295 + id = pdev->id; 296 + if (id == -1) 297 + id = 0; 350 298 351 - ar724x_pci_devcfg_base = ioremap(AR724X_PCI_CFG_BASE, 352 - AR724X_PCI_CFG_SIZE); 353 - if (ar724x_pci_devcfg_base == NULL) 354 - goto err; 299 + apc = devm_kzalloc(&pdev->dev, sizeof(struct ar724x_pci_controller), 300 + GFP_KERNEL); 301 + if (!apc) 302 + return -ENOMEM; 355 303 356 - ar724x_pci_ctrl_base = ioremap(AR724X_PCI_CTRL_BASE, 357 - AR724X_PCI_CTRL_SIZE); 358 - if (ar724x_pci_ctrl_base == NULL) 359 - goto err_unmap_devcfg; 304 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl_base"); 305 + if (!res) 306 + return -EINVAL; 360 307 361 - ar724x_pci_link_up = ar724x_pci_check_link(); 362 - if (!ar724x_pci_link_up) 363 - pr_warn("ar724x: PCIe link is down\n"); 308 + apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res); 309 + if (apc->ctrl_base == NULL) 310 + return -EBUSY; 364 311 365 - ar724x_pci_irq_init(irq); 366 - register_pci_controller(&ar724x_pci_controller); 312 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base"); 313 + if (!res) 314 + return -EINVAL; 367 315 368 - return PCIBIOS_SUCCESSFUL; 316 + apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res); 317 + if (!apc->devcfg_base) 318 + return -EBUSY; 369 319 370 - err_unmap_devcfg: 371 - iounmap(ar724x_pci_devcfg_base); 372 - err: 373 - return ret; 320 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base"); 321 + if (!res) 322 + return -EINVAL; 323 + 324 + apc->crp_base = devm_request_and_ioremap(&pdev->dev, res); 325 + if (apc->crp_base == NULL) 326 + return -EBUSY; 327 + 328 + apc->irq = platform_get_irq(pdev, 0); 329 + if (apc->irq < 0) 330 + return -EINVAL; 331 + 332 + spin_lock_init(&apc->lock); 333 + 334 + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base"); 335 + if (!res) 336 + return -EINVAL; 337 + 338 + apc->io_res.parent = res; 339 + apc->io_res.name = "PCI IO space"; 340 + apc->io_res.start = res->start; 341 + apc->io_res.end = res->end; 342 + apc->io_res.flags = IORESOURCE_IO; 343 + 344 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base"); 345 + if (!res) 346 + return -EINVAL; 347 + 348 + apc->mem_res.parent = res; 349 + apc->mem_res.name = "PCI memory space"; 350 + apc->mem_res.start = res->start; 351 + apc->mem_res.end = res->end; 352 + apc->mem_res.flags = IORESOURCE_MEM; 353 + 354 + apc->pci_controller.pci_ops = &ar724x_pci_ops; 355 + apc->pci_controller.io_resource = &apc->io_res; 356 + apc->pci_controller.mem_resource = &apc->mem_res; 357 + 358 + apc->link_up = ar724x_pci_check_link(apc); 359 + if (!apc->link_up) 360 + dev_warn(&pdev->dev, "PCIe link is down\n"); 361 + 362 + ar724x_pci_irq_init(apc, id); 363 + 364 + ar724x_pci_local_write(apc, PCI_COMMAND, 4, AR724X_PCI_CMD_INIT); 365 + 366 + register_pci_controller(&apc->pci_controller); 367 + 368 + return 0; 374 369 } 370 + 371 + static struct platform_driver ar724x_pci_driver = { 372 + .probe = ar724x_pci_probe, 373 + .driver = { 374 + .name = "ar724x-pci", 375 + .owner = THIS_MODULE, 376 + }, 377 + }; 378 + 379 + static int __init ar724x_pci_init(void) 380 + { 381 + return platform_driver_register(&ar724x_pci_driver); 382 + } 383 + 384 + postcore_initcall(ar724x_pci_init);
+10 -2
arch/mips/pci/pci-lantiq.c
··· 129 129 130 130 /* setup reset gpio used by pci */ 131 131 reset_gpio = of_get_named_gpio(node, "gpio-reset", 0); 132 - if (gpio_is_valid(reset_gpio)) 133 - devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset"); 132 + if (gpio_is_valid(reset_gpio)) { 133 + int ret = devm_gpio_request(&pdev->dev, 134 + reset_gpio, "pci-reset"); 135 + if (ret) { 136 + dev_err(&pdev->dev, 137 + "failed to request gpio %d\n", reset_gpio); 138 + return ret; 139 + } 140 + gpio_direction_output(reset_gpio, 1); 141 + } 134 142 135 143 /* enable auto-switching between PCI and EBU */ 136 144 ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
+80 -44
arch/mips/pci/pci-xlp.c
··· 46 46 47 47 #include <asm/netlogic/interrupt.h> 48 48 #include <asm/netlogic/haldefs.h> 49 + #include <asm/netlogic/common.h> 49 50 50 51 #include <asm/netlogic/xlp-hal/iomap.h> 51 52 #include <asm/netlogic/xlp-hal/pic.h> ··· 65 64 u32 data; 66 65 u32 *cfgaddr; 67 66 67 + where &= ~3; 68 + if (bus->number == 0 && PCI_SLOT(devfn) == 1 && where == 0x954) 69 + return 0xffffffff; 70 + 68 71 cfgaddr = (u32 *)(pci_config_base + 69 - pci_cfg_addr(bus->number, devfn, where & ~3)); 72 + pci_cfg_addr(bus->number, devfn, where)); 70 73 data = *cfgaddr; 71 74 return data; 72 75 } ··· 162 157 .io_offset = 0x00000000UL, 163 158 }; 164 159 165 - static int get_irq_vector(const struct pci_dev *dev) 160 + static struct pci_dev *xlp_get_pcie_link(const struct pci_dev *dev) 166 161 { 167 - /* 168 - * For XLP PCIe, there is an IRQ per Link, find out which 169 - * link the device is on to assign interrupts 170 - */ 171 - if (dev->bus->self == NULL) 172 - return 0; 162 + struct pci_bus *bus, *p; 173 163 174 - switch (dev->bus->self->devfn) { 175 - case 0x8: 176 - return PIC_PCIE_LINK_0_IRQ; 177 - case 0x9: 178 - return PIC_PCIE_LINK_1_IRQ; 179 - case 0xa: 180 - return PIC_PCIE_LINK_2_IRQ; 181 - case 0xb: 182 - return PIC_PCIE_LINK_3_IRQ; 183 - } 184 - WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn); 185 - return 0; 164 + /* Find the bridge on bus 0 */ 165 + bus = dev->bus; 166 + for (p = bus->parent; p && p->number != 0; p = p->parent) 167 + bus = p; 168 + 169 + return p ? bus->self : NULL; 170 + } 171 + 172 + static inline int nlm_pci_link_to_irq(int link) 173 + { 174 + return PIC_PCIE_LINK_0_IRQ + link; 186 175 } 187 176 188 177 int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 189 178 { 190 - return get_irq_vector(dev); 179 + struct pci_dev *lnkdev; 180 + int lnkslot, lnkfunc; 181 + 182 + /* 183 + * For XLP PCIe, there is an IRQ per Link, find out which 184 + * link the device is on to assign interrupts 185 + */ 186 + lnkdev = xlp_get_pcie_link(dev); 187 + if (lnkdev == NULL) 188 + return 0; 189 + lnkfunc = PCI_FUNC(lnkdev->devfn); 190 + lnkslot = PCI_SLOT(lnkdev->devfn); 191 + return nlm_irq_to_xirq(lnkslot / 8, nlm_pci_link_to_irq(lnkfunc)); 191 192 } 192 193 193 194 /* Do platform specific device initialization at pci_enable_device() time */ ··· 202 191 return 0; 203 192 } 204 193 205 - static int xlp_enable_pci_bswap(void) 194 + /* 195 + * If big-endian, enable hardware byteswap on the PCIe bridges. 196 + * This will make both the SoC and PCIe devices behave consistently with 197 + * readl/writel. 198 + */ 199 + #ifdef __BIG_ENDIAN 200 + static void xlp_config_pci_bswap(int node, int link) 206 201 { 207 - uint64_t pciebase, sysbase; 208 - int node, i; 202 + uint64_t nbubase, lnkbase; 209 203 u32 reg; 210 204 211 - /* Chip-0 so node set to 0 */ 212 - node = 0; 213 - sysbase = nlm_get_bridge_regbase(node); 205 + nbubase = nlm_get_bridge_regbase(node); 206 + lnkbase = nlm_get_pcie_base(node, link); 207 + 214 208 /* 215 209 * Enable byte swap in hardware. Program each link's PCIe SWAP regions 216 210 * from the link's address ranges. 217 211 */ 218 - for (i = 0; i < 4; i++) { 219 - pciebase = nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, i)); 220 - if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff) 221 - continue; 212 + reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_BASE0 + link); 213 + nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_BASE, reg); 222 214 223 - reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEMEM_BASE0 + i); 224 - nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_MEM_BASE, reg); 215 + reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_LIMIT0 + link); 216 + nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_LIM, reg | 0xfff); 225 217 226 - reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEMEM_LIMIT0 + i); 227 - nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_MEM_LIM, 228 - reg | 0xfff); 218 + reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_BASE0 + link); 219 + nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_BASE, reg); 229 220 230 - reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEIO_BASE0 + i); 231 - nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_IO_BASE, reg); 232 - 233 - reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEIO_LIMIT0 + i); 234 - nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff); 235 - } 236 - return 0; 221 + reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_LIMIT0 + link); 222 + nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff); 237 223 } 224 + #else 225 + /* Swap configuration not needed in little-endian mode */ 226 + static inline void xlp_config_pci_bswap(int node, int link) {} 227 + #endif /* __BIG_ENDIAN */ 238 228 239 229 static int __init pcibios_init(void) 240 230 { 231 + struct nlm_soc_info *nodep; 232 + uint64_t pciebase; 233 + int link, n; 234 + u32 reg; 235 + 241 236 /* Firmware assigns PCI resources */ 242 237 pci_set_flags(PCI_PROBE_ONLY); 243 238 pci_config_base = ioremap(XLP_DEFAULT_PCI_ECFG_BASE, 64 << 20); ··· 252 235 ioport_resource.start = 0; 253 236 ioport_resource.end = ~0; 254 237 255 - xlp_enable_pci_bswap(); 238 + for (n = 0; n < NLM_NR_NODES; n++) { 239 + nodep = nlm_get_node(n); 240 + if (!nodep->coremask) 241 + continue; /* node does not exist */ 242 + 243 + for (link = 0; link < 4; link++) { 244 + pciebase = nlm_get_pcie_base(n, link); 245 + if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff) 246 + continue; 247 + xlp_config_pci_bswap(n, link); 248 + 249 + /* put in intpin and irq - u-boot does not */ 250 + reg = nlm_read_pci_reg(pciebase, 0xf); 251 + reg &= ~0x1fu; 252 + reg |= (1 << 8) | nlm_pci_link_to_irq(link); 253 + nlm_write_pci_reg(pciebase, 0xf, reg); 254 + pr_info("XLP PCIe: Link %d-%d initialized.\n", n, link); 255 + } 256 + } 257 + 256 258 set_io_port_base(CKSEG1); 257 259 nlm_pci_controller.io_map_base = CKSEG1; 258 260
+13 -2
arch/mips/pci/pci.c
··· 175 175 176 176 void register_pci_controller(struct pci_controller *hose) 177 177 { 178 - if (request_resource(&iomem_resource, hose->mem_resource) < 0) 178 + struct resource *parent; 179 + 180 + parent = hose->mem_resource->parent; 181 + if (!parent) 182 + parent = &iomem_resource; 183 + 184 + if (request_resource(parent, hose->mem_resource) < 0) 179 185 goto out; 180 - if (request_resource(&ioport_resource, hose->io_resource) < 0) { 186 + 187 + parent = hose->io_resource->parent; 188 + if (!parent) 189 + parent = &ioport_resource; 190 + 191 + if (request_resource(parent, hose->io_resource) < 0) { 181 192 release_resource(hose->mem_resource); 182 193 goto out; 183 194 }
+32
arch/mips/ralink/Kconfig
··· 1 + if RALINK 2 + 3 + choice 4 + prompt "Ralink SoC selection" 5 + default SOC_RT305X 6 + help 7 + Select Ralink MIPS SoC type. 8 + 9 + config SOC_RT305X 10 + bool "RT305x" 11 + select USB_ARCH_HAS_HCD 12 + select USB_ARCH_HAS_OHCI 13 + select USB_ARCH_HAS_EHCI 14 + 15 + endchoice 16 + 17 + choice 18 + prompt "Devicetree selection" 19 + default DTB_RT_NONE 20 + help 21 + Select the devicetree. 22 + 23 + config DTB_RT_NONE 24 + bool "None" 25 + 26 + config DTB_RT305X_EVAL 27 + bool "RT305x eval kit" 28 + depends on SOC_RT305X 29 + 30 + endchoice 31 + 32 + endif
+15
arch/mips/ralink/Makefile
··· 1 + # This program is free software; you can redistribute it and/or modify it 2 + # under the terms of the GNU General Public License version 2 as published 3 + # by the Free Software Foundation.# 4 + # Makefile for the Ralink common stuff 5 + # 6 + # Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> 7 + # Copyright (C) 2013 John Crispin <blogic@openwrt.org> 8 + 9 + obj-y := prom.o of.o reset.o clk.o irq.o 10 + 11 + obj-$(CONFIG_SOC_RT305X) += rt305x.o 12 + 13 + obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 14 + 15 + obj-y += dts/
+10
arch/mips/ralink/Platform
··· 1 + # 2 + # Ralink SoC common stuff 3 + # 4 + core-$(CONFIG_RALINK) += arch/mips/ralink/ 5 + cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink 6 + 7 + # 8 + # Ralink RT305x 9 + # 10 + load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000
+72
arch/mips/ralink/clk.c
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify it 3 + * under the terms of the GNU General Public License version 2 as published 4 + * by the Free Software Foundation. 5 + * 6 + * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> 7 + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> 8 + */ 9 + 10 + #include <linux/kernel.h> 11 + #include <linux/module.h> 12 + #include <linux/clkdev.h> 13 + #include <linux/clk.h> 14 + 15 + #include <asm/time.h> 16 + 17 + #include "common.h" 18 + 19 + struct clk { 20 + struct clk_lookup cl; 21 + unsigned long rate; 22 + }; 23 + 24 + void ralink_clk_add(const char *dev, unsigned long rate) 25 + { 26 + struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); 27 + 28 + if (!clk) 29 + panic("failed to add clock\n"); 30 + 31 + clk->cl.dev_id = dev; 32 + clk->cl.clk = clk; 33 + 34 + clk->rate = rate; 35 + 36 + clkdev_add(&clk->cl); 37 + } 38 + 39 + /* 40 + * Linux clock API 41 + */ 42 + int clk_enable(struct clk *clk) 43 + { 44 + return 0; 45 + } 46 + EXPORT_SYMBOL_GPL(clk_enable); 47 + 48 + void clk_disable(struct clk *clk) 49 + { 50 + } 51 + EXPORT_SYMBOL_GPL(clk_disable); 52 + 53 + unsigned long clk_get_rate(struct clk *clk) 54 + { 55 + return clk->rate; 56 + } 57 + EXPORT_SYMBOL_GPL(clk_get_rate); 58 + 59 + void __init plat_time_init(void) 60 + { 61 + struct clk *clk; 62 + 63 + ralink_of_remap(); 64 + 65 + ralink_clk_init(); 66 + clk = clk_get_sys("cpu", NULL); 67 + if (IS_ERR(clk)) 68 + panic("unable to get CPU clock, err=%ld", PTR_ERR(clk)); 69 + pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000); 70 + mips_hpt_frequency = clk_get_rate(clk) / 2; 71 + clk_put(clk); 72 + }
+44
arch/mips/ralink/common.h
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify it 3 + * under the terms of the GNU General Public License version 2 as published 4 + * by the Free Software Foundation. 5 + * 6 + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> 7 + */ 8 + 9 + #ifndef _RALINK_COMMON_H__ 10 + #define _RALINK_COMMON_H__ 11 + 12 + #define RAMIPS_SYS_TYPE_LEN 32 13 + 14 + struct ralink_pinmux_grp { 15 + const char *name; 16 + u32 mask; 17 + int gpio_first; 18 + int gpio_last; 19 + }; 20 + 21 + struct ralink_pinmux { 22 + struct ralink_pinmux_grp *mode; 23 + struct ralink_pinmux_grp *uart; 24 + int uart_shift; 25 + void (*wdt_reset)(void); 26 + }; 27 + extern struct ralink_pinmux gpio_pinmux; 28 + 29 + struct ralink_soc_info { 30 + unsigned char sys_type[RAMIPS_SYS_TYPE_LEN]; 31 + unsigned char *compatible; 32 + }; 33 + extern struct ralink_soc_info soc_info; 34 + 35 + extern void ralink_of_remap(void); 36 + 37 + extern void ralink_clk_init(void); 38 + extern void ralink_clk_add(const char *dev, unsigned long rate); 39 + 40 + extern void prom_soc_init(struct ralink_soc_info *soc_info); 41 + 42 + __iomem void *plat_of_remap_node(const char *node); 43 + 44 + #endif /* _RALINK_COMMON_H__ */
+1
arch/mips/ralink/dts/Makefile
··· 1 + obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
+106
arch/mips/ralink/dts/rt3050.dtsi
··· 1 + / { 2 + #address-cells = <1>; 3 + #size-cells = <1>; 4 + compatible = "ralink,rt3050-soc", "ralink,rt3052-soc"; 5 + 6 + cpus { 7 + cpu@0 { 8 + compatible = "mips,mips24KEc"; 9 + }; 10 + }; 11 + 12 + chosen { 13 + bootargs = "console=ttyS0,57600 init=/init"; 14 + }; 15 + 16 + cpuintc: cpuintc@0 { 17 + #address-cells = <0>; 18 + #interrupt-cells = <1>; 19 + interrupt-controller; 20 + compatible = "mti,cpu-interrupt-controller"; 21 + }; 22 + 23 + palmbus@10000000 { 24 + compatible = "palmbus"; 25 + reg = <0x10000000 0x200000>; 26 + ranges = <0x0 0x10000000 0x1FFFFF>; 27 + 28 + #address-cells = <1>; 29 + #size-cells = <1>; 30 + 31 + sysc@0 { 32 + compatible = "ralink,rt3052-sysc", "ralink,rt3050-sysc"; 33 + reg = <0x0 0x100>; 34 + }; 35 + 36 + timer@100 { 37 + compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt"; 38 + reg = <0x100 0x100>; 39 + }; 40 + 41 + intc: intc@200 { 42 + compatible = "ralink,rt3052-intc", "ralink,rt2880-intc"; 43 + reg = <0x200 0x100>; 44 + 45 + interrupt-controller; 46 + #interrupt-cells = <1>; 47 + 48 + interrupt-parent = <&cpuintc>; 49 + interrupts = <2>; 50 + }; 51 + 52 + memc@300 { 53 + compatible = "ralink,rt3052-memc", "ralink,rt3050-memc"; 54 + reg = <0x300 0x100>; 55 + }; 56 + 57 + gpio0: gpio@600 { 58 + compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio"; 59 + reg = <0x600 0x34>; 60 + 61 + gpio-controller; 62 + #gpio-cells = <2>; 63 + 64 + ralink,ngpio = <24>; 65 + ralink,regs = [ 00 04 08 0c 66 + 20 24 28 2c 67 + 30 34 ]; 68 + }; 69 + 70 + gpio1: gpio@638 { 71 + compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio"; 72 + reg = <0x638 0x24>; 73 + 74 + gpio-controller; 75 + #gpio-cells = <2>; 76 + 77 + ralink,ngpio = <16>; 78 + ralink,regs = [ 00 04 08 0c 79 + 10 14 18 1c 80 + 20 24 ]; 81 + }; 82 + 83 + gpio2: gpio@660 { 84 + compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio"; 85 + reg = <0x660 0x24>; 86 + 87 + gpio-controller; 88 + #gpio-cells = <2>; 89 + 90 + ralink,ngpio = <12>; 91 + ralink,regs = [ 00 04 08 0c 92 + 10 14 18 1c 93 + 20 24 ]; 94 + }; 95 + 96 + uartlite@c00 { 97 + compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a"; 98 + reg = <0xc00 0x100>; 99 + 100 + interrupt-parent = <&intc>; 101 + interrupts = <12>; 102 + 103 + reg-shift = <2>; 104 + }; 105 + }; 106 + };
+52
arch/mips/ralink/dts/rt3052_eval.dts
··· 1 + /dts-v1/; 2 + 3 + /include/ "rt3050.dtsi" 4 + 5 + / { 6 + #address-cells = <1>; 7 + #size-cells = <1>; 8 + compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc"; 9 + model = "Ralink RT3052 evaluation board"; 10 + 11 + memory@0 { 12 + reg = <0x0 0x2000000>; 13 + }; 14 + 15 + palmbus@10000000 { 16 + sysc@0 { 17 + ralink,pinmmux = "uartlite", "spi"; 18 + ralink,uartmux = "gpio"; 19 + ralink,wdtmux = <0>; 20 + }; 21 + }; 22 + 23 + cfi@1f000000 { 24 + compatible = "cfi-flash"; 25 + reg = <0x1f000000 0x800000>; 26 + 27 + bank-width = <2>; 28 + device-width = <2>; 29 + #address-cells = <1>; 30 + #size-cells = <1>; 31 + 32 + partition@0 { 33 + label = "uboot"; 34 + reg = <0x0 0x30000>; 35 + read-only; 36 + }; 37 + partition@30000 { 38 + label = "uboot-env"; 39 + reg = <0x30000 0x10000>; 40 + read-only; 41 + }; 42 + partition@40000 { 43 + label = "calibration"; 44 + reg = <0x40000 0x10000>; 45 + read-only; 46 + }; 47 + partition@50000 { 48 + label = "linux"; 49 + reg = <0x50000 0x7b0000>; 50 + }; 51 + }; 52 + };
+44
arch/mips/ralink/early_printk.c
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify it 3 + * under the terms of the GNU General Public License version 2 as published 4 + * by the Free Software Foundation. 5 + * 6 + * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org> 7 + */ 8 + 9 + #include <linux/io.h> 10 + #include <linux/serial_reg.h> 11 + 12 + #include <asm/addrspace.h> 13 + 14 + #define EARLY_UART_BASE 0x10000c00 15 + 16 + #define UART_REG_RX 0x00 17 + #define UART_REG_TX 0x04 18 + #define UART_REG_IER 0x08 19 + #define UART_REG_IIR 0x0c 20 + #define UART_REG_FCR 0x10 21 + #define UART_REG_LCR 0x14 22 + #define UART_REG_MCR 0x18 23 + #define UART_REG_LSR 0x1c 24 + 25 + static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE); 26 + 27 + static inline void uart_w32(u32 val, unsigned reg) 28 + { 29 + __raw_writel(val, uart_membase + reg); 30 + } 31 + 32 + static inline u32 uart_r32(unsigned reg) 33 + { 34 + return __raw_readl(uart_membase + reg); 35 + } 36 + 37 + void prom_putchar(unsigned char ch) 38 + { 39 + while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) 40 + ; 41 + uart_w32(ch, UART_REG_TX); 42 + while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) 43 + ; 44 + }
+180
arch/mips/ralink/irq.c
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify it 3 + * under the terms of the GNU General Public License version 2 as published 4 + * by the Free Software Foundation. 5 + * 6 + * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org> 7 + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> 8 + */ 9 + 10 + #include <linux/io.h> 11 + #include <linux/bitops.h> 12 + #include <linux/of_platform.h> 13 + #include <linux/of_address.h> 14 + #include <linux/of_irq.h> 15 + #include <linux/irqdomain.h> 16 + #include <linux/interrupt.h> 17 + 18 + #include <asm/irq_cpu.h> 19 + #include <asm/mipsregs.h> 20 + 21 + #include "common.h" 22 + 23 + /* INTC register offsets */ 24 + #define INTC_REG_STATUS0 0x00 25 + #define INTC_REG_STATUS1 0x04 26 + #define INTC_REG_TYPE 0x20 27 + #define INTC_REG_RAW_STATUS 0x30 28 + #define INTC_REG_ENABLE 0x34 29 + #define INTC_REG_DISABLE 0x38 30 + 31 + #define INTC_INT_GLOBAL BIT(31) 32 + 33 + #define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2) 34 + #define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5) 35 + #define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6) 36 + #define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7) 37 + 38 + /* we have a cascade of 8 irqs */ 39 + #define RALINK_INTC_IRQ_BASE 8 40 + 41 + /* we have 32 SoC irqs */ 42 + #define RALINK_INTC_IRQ_COUNT 32 43 + 44 + #define RALINK_INTC_IRQ_PERFC (RALINK_INTC_IRQ_BASE + 9) 45 + 46 + static void __iomem *rt_intc_membase; 47 + 48 + static inline void rt_intc_w32(u32 val, unsigned reg) 49 + { 50 + __raw_writel(val, rt_intc_membase + reg); 51 + } 52 + 53 + static inline u32 rt_intc_r32(unsigned reg) 54 + { 55 + return __raw_readl(rt_intc_membase + reg); 56 + } 57 + 58 + static void ralink_intc_irq_unmask(struct irq_data *d) 59 + { 60 + rt_intc_w32(BIT(d->hwirq), INTC_REG_ENABLE); 61 + } 62 + 63 + static void ralink_intc_irq_mask(struct irq_data *d) 64 + { 65 + rt_intc_w32(BIT(d->hwirq), INTC_REG_DISABLE); 66 + } 67 + 68 + static struct irq_chip ralink_intc_irq_chip = { 69 + .name = "INTC", 70 + .irq_unmask = ralink_intc_irq_unmask, 71 + .irq_mask = ralink_intc_irq_mask, 72 + .irq_mask_ack = ralink_intc_irq_mask, 73 + }; 74 + 75 + unsigned int __cpuinit get_c0_compare_int(void) 76 + { 77 + return CP0_LEGACY_COMPARE_IRQ; 78 + } 79 + 80 + static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc) 81 + { 82 + u32 pending = rt_intc_r32(INTC_REG_STATUS0); 83 + 84 + if (pending) { 85 + struct irq_domain *domain = irq_get_handler_data(irq); 86 + generic_handle_irq(irq_find_mapping(domain, __ffs(pending))); 87 + } else { 88 + spurious_interrupt(); 89 + } 90 + } 91 + 92 + asmlinkage void plat_irq_dispatch(void) 93 + { 94 + unsigned long pending; 95 + 96 + pending = read_c0_status() & read_c0_cause() & ST0_IM; 97 + 98 + if (pending & STATUSF_IP7) 99 + do_IRQ(RALINK_CPU_IRQ_COUNTER); 100 + 101 + else if (pending & STATUSF_IP5) 102 + do_IRQ(RALINK_CPU_IRQ_FE); 103 + 104 + else if (pending & STATUSF_IP6) 105 + do_IRQ(RALINK_CPU_IRQ_WIFI); 106 + 107 + else if (pending & STATUSF_IP2) 108 + do_IRQ(RALINK_CPU_IRQ_INTC); 109 + 110 + else 111 + spurious_interrupt(); 112 + } 113 + 114 + static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) 115 + { 116 + irq_set_chip_and_handler(irq, &ralink_intc_irq_chip, handle_level_irq); 117 + 118 + return 0; 119 + } 120 + 121 + static const struct irq_domain_ops irq_domain_ops = { 122 + .xlate = irq_domain_xlate_onecell, 123 + .map = intc_map, 124 + }; 125 + 126 + static int __init intc_of_init(struct device_node *node, 127 + struct device_node *parent) 128 + { 129 + struct resource res; 130 + struct irq_domain *domain; 131 + int irq; 132 + 133 + irq = irq_of_parse_and_map(node, 0); 134 + if (!irq) 135 + panic("Failed to get INTC IRQ"); 136 + 137 + if (of_address_to_resource(node, 0, &res)) 138 + panic("Failed to get intc memory range"); 139 + 140 + if (request_mem_region(res.start, resource_size(&res), 141 + res.name) < 0) 142 + pr_err("Failed to request intc memory"); 143 + 144 + rt_intc_membase = ioremap_nocache(res.start, 145 + resource_size(&res)); 146 + if (!rt_intc_membase) 147 + panic("Failed to remap intc memory"); 148 + 149 + /* disable all interrupts */ 150 + rt_intc_w32(~0, INTC_REG_DISABLE); 151 + 152 + /* route all INTC interrupts to MIPS HW0 interrupt */ 153 + rt_intc_w32(0, INTC_REG_TYPE); 154 + 155 + domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT, 156 + RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL); 157 + if (!domain) 158 + panic("Failed to add irqdomain"); 159 + 160 + rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE); 161 + 162 + irq_set_chained_handler(irq, ralink_intc_irq_handler); 163 + irq_set_handler_data(irq, domain); 164 + 165 + cp0_perfcount_irq = irq_create_mapping(domain, 9); 166 + 167 + return 0; 168 + } 169 + 170 + static struct of_device_id __initdata of_irq_ids[] = { 171 + { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init }, 172 + { .compatible = "ralink,rt2880-intc", .data = intc_of_init }, 173 + {}, 174 + }; 175 + 176 + void __init arch_init_irq(void) 177 + { 178 + of_irq_init(of_irq_ids); 179 + } 180 +
+107
arch/mips/ralink/of.c
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify it 3 + * under the terms of the GNU General Public License version 2 as published 4 + * by the Free Software Foundation. 5 + * 6 + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 7 + * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org> 8 + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> 9 + */ 10 + 11 + #include <linux/io.h> 12 + #include <linux/clk.h> 13 + #include <linux/init.h> 14 + #include <linux/of_fdt.h> 15 + #include <linux/kernel.h> 16 + #include <linux/bootmem.h> 17 + #include <linux/of_platform.h> 18 + #include <linux/of_address.h> 19 + 20 + #include <asm/reboot.h> 21 + #include <asm/bootinfo.h> 22 + #include <asm/addrspace.h> 23 + 24 + #include "common.h" 25 + 26 + __iomem void *rt_sysc_membase; 27 + __iomem void *rt_memc_membase; 28 + 29 + extern struct boot_param_header __dtb_start; 30 + 31 + __iomem void *plat_of_remap_node(const char *node) 32 + { 33 + struct resource res; 34 + struct device_node *np; 35 + 36 + np = of_find_compatible_node(NULL, NULL, node); 37 + if (!np) 38 + panic("Failed to find %s node", node); 39 + 40 + if (of_address_to_resource(np, 0, &res)) 41 + panic("Failed to get resource for %s", node); 42 + 43 + if ((request_mem_region(res.start, 44 + resource_size(&res), 45 + res.name) < 0)) 46 + panic("Failed to request resources for %s", node); 47 + 48 + return ioremap_nocache(res.start, resource_size(&res)); 49 + } 50 + 51 + void __init device_tree_init(void) 52 + { 53 + unsigned long base, size; 54 + void *fdt_copy; 55 + 56 + if (!initial_boot_params) 57 + return; 58 + 59 + base = virt_to_phys((void *)initial_boot_params); 60 + size = be32_to_cpu(initial_boot_params->totalsize); 61 + 62 + /* Before we do anything, lets reserve the dt blob */ 63 + reserve_bootmem(base, size, BOOTMEM_DEFAULT); 64 + 65 + /* The strings in the flattened tree are referenced directly by the 66 + * device tree, so copy the flattened device tree from init memory 67 + * to regular memory. 68 + */ 69 + fdt_copy = alloc_bootmem(size); 70 + memcpy(fdt_copy, initial_boot_params, size); 71 + initial_boot_params = fdt_copy; 72 + 73 + unflatten_device_tree(); 74 + 75 + /* free the space reserved for the dt blob */ 76 + free_bootmem(base, size); 77 + } 78 + 79 + void __init plat_mem_setup(void) 80 + { 81 + set_io_port_base(KSEG1); 82 + 83 + /* 84 + * Load the builtin devicetree. This causes the chosen node to be 85 + * parsed resulting in our memory appearing 86 + */ 87 + __dt_setup_arch(&__dtb_start); 88 + } 89 + 90 + static int __init plat_of_setup(void) 91 + { 92 + static struct of_device_id of_ids[3]; 93 + int len = sizeof(of_ids[0].compatible); 94 + 95 + if (!of_have_populated_dt()) 96 + panic("device tree not present"); 97 + 98 + strncpy(of_ids[0].compatible, soc_info.compatible, len); 99 + strncpy(of_ids[1].compatible, "palmbus", len); 100 + 101 + if (of_platform_populate(NULL, of_ids, NULL, NULL)) 102 + panic("failed to populate DT\n"); 103 + 104 + return 0; 105 + } 106 + 107 + arch_initcall(plat_of_setup);
+69
arch/mips/ralink/prom.c
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify it 3 + * under the terms of the GNU General Public License version 2 as published 4 + * by the Free Software Foundation. 5 + * 6 + * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org> 7 + * Copyright (C) 2010 Joonas Lahtinen <joonas.lahtinen@gmail.com> 8 + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> 9 + */ 10 + 11 + #include <linux/string.h> 12 + #include <linux/of_fdt.h> 13 + #include <linux/of_platform.h> 14 + 15 + #include <asm/bootinfo.h> 16 + #include <asm/addrspace.h> 17 + 18 + #include "common.h" 19 + 20 + struct ralink_soc_info soc_info; 21 + 22 + const char *get_system_type(void) 23 + { 24 + return soc_info.sys_type; 25 + } 26 + 27 + static __init void prom_init_cmdline(int argc, char **argv) 28 + { 29 + int i; 30 + 31 + pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n", 32 + (unsigned int)fw_arg0, (unsigned int)fw_arg1, 33 + (unsigned int)fw_arg2, (unsigned int)fw_arg3); 34 + 35 + argc = fw_arg0; 36 + argv = (char **) KSEG1ADDR(fw_arg1); 37 + 38 + if (!argv) { 39 + pr_debug("argv=%p is invalid, skipping\n", 40 + argv); 41 + return; 42 + } 43 + 44 + for (i = 0; i < argc; i++) { 45 + char *p = (char *) KSEG1ADDR(argv[i]); 46 + 47 + if (CPHYSADDR(p) && *p) { 48 + pr_debug("argv[%d]: %s\n", i, p); 49 + strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline)); 50 + strlcat(arcs_cmdline, p, sizeof(arcs_cmdline)); 51 + } 52 + } 53 + } 54 + 55 + void __init prom_init(void) 56 + { 57 + int argc; 58 + char **argv; 59 + 60 + prom_soc_init(&soc_info); 61 + 62 + pr_info("SoC Type: %s\n", get_system_type()); 63 + 64 + prom_init_cmdline(argc, argv); 65 + } 66 + 67 + void __init prom_free_prom_memory(void) 68 + { 69 + }
+44
arch/mips/ralink/reset.c
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify it 3 + * under the terms of the GNU General Public License version 2 as published 4 + * by the Free Software Foundation. 5 + * 6 + * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org> 7 + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 8 + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> 9 + */ 10 + 11 + #include <linux/pm.h> 12 + #include <linux/io.h> 13 + 14 + #include <asm/reboot.h> 15 + 16 + #include <asm/mach-ralink/ralink_regs.h> 17 + 18 + /* Reset Control */ 19 + #define SYSC_REG_RESET_CTRL 0x034 20 + #define RSTCTL_RESET_SYSTEM BIT(0) 21 + 22 + static void ralink_restart(char *command) 23 + { 24 + local_irq_disable(); 25 + rt_sysc_w32(RSTCTL_RESET_SYSTEM, SYSC_REG_RESET_CTRL); 26 + unreachable(); 27 + } 28 + 29 + static void ralink_halt(void) 30 + { 31 + local_irq_disable(); 32 + unreachable(); 33 + } 34 + 35 + static int __init mips_reboot_setup(void) 36 + { 37 + _machine_restart = ralink_restart; 38 + _machine_halt = ralink_halt; 39 + pm_power_off = ralink_halt; 40 + 41 + return 0; 42 + } 43 + 44 + arch_initcall(mips_reboot_setup);
+242
arch/mips/ralink/rt305x.c
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify it 3 + * under the terms of the GNU General Public License version 2 as published 4 + * by the Free Software Foundation. 5 + * 6 + * Parts of this file are based on Ralink's 2.6.21 BSP 7 + * 8 + * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> 9 + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 10 + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> 11 + */ 12 + 13 + #include <linux/kernel.h> 14 + #include <linux/init.h> 15 + #include <linux/module.h> 16 + 17 + #include <asm/mipsregs.h> 18 + #include <asm/mach-ralink/ralink_regs.h> 19 + #include <asm/mach-ralink/rt305x.h> 20 + 21 + #include "common.h" 22 + 23 + enum rt305x_soc_type rt305x_soc; 24 + 25 + struct ralink_pinmux_grp mode_mux[] = { 26 + { 27 + .name = "i2c", 28 + .mask = RT305X_GPIO_MODE_I2C, 29 + .gpio_first = RT305X_GPIO_I2C_SD, 30 + .gpio_last = RT305X_GPIO_I2C_SCLK, 31 + }, { 32 + .name = "spi", 33 + .mask = RT305X_GPIO_MODE_SPI, 34 + .gpio_first = RT305X_GPIO_SPI_EN, 35 + .gpio_last = RT305X_GPIO_SPI_CLK, 36 + }, { 37 + .name = "uartlite", 38 + .mask = RT305X_GPIO_MODE_UART1, 39 + .gpio_first = RT305X_GPIO_UART1_TXD, 40 + .gpio_last = RT305X_GPIO_UART1_RXD, 41 + }, { 42 + .name = "jtag", 43 + .mask = RT305X_GPIO_MODE_JTAG, 44 + .gpio_first = RT305X_GPIO_JTAG_TDO, 45 + .gpio_last = RT305X_GPIO_JTAG_TDI, 46 + }, { 47 + .name = "mdio", 48 + .mask = RT305X_GPIO_MODE_MDIO, 49 + .gpio_first = RT305X_GPIO_MDIO_MDC, 50 + .gpio_last = RT305X_GPIO_MDIO_MDIO, 51 + }, { 52 + .name = "sdram", 53 + .mask = RT305X_GPIO_MODE_SDRAM, 54 + .gpio_first = RT305X_GPIO_SDRAM_MD16, 55 + .gpio_last = RT305X_GPIO_SDRAM_MD31, 56 + }, { 57 + .name = "rgmii", 58 + .mask = RT305X_GPIO_MODE_RGMII, 59 + .gpio_first = RT305X_GPIO_GE0_TXD0, 60 + .gpio_last = RT305X_GPIO_GE0_RXCLK, 61 + }, {0} 62 + }; 63 + 64 + struct ralink_pinmux_grp uart_mux[] = { 65 + { 66 + .name = "uartf", 67 + .mask = RT305X_GPIO_MODE_UARTF, 68 + .gpio_first = RT305X_GPIO_7, 69 + .gpio_last = RT305X_GPIO_14, 70 + }, { 71 + .name = "pcm uartf", 72 + .mask = RT305X_GPIO_MODE_PCM_UARTF, 73 + .gpio_first = RT305X_GPIO_7, 74 + .gpio_last = RT305X_GPIO_14, 75 + }, { 76 + .name = "pcm i2s", 77 + .mask = RT305X_GPIO_MODE_PCM_I2S, 78 + .gpio_first = RT305X_GPIO_7, 79 + .gpio_last = RT305X_GPIO_14, 80 + }, { 81 + .name = "i2s uartf", 82 + .mask = RT305X_GPIO_MODE_I2S_UARTF, 83 + .gpio_first = RT305X_GPIO_7, 84 + .gpio_last = RT305X_GPIO_14, 85 + }, { 86 + .name = "pcm gpio", 87 + .mask = RT305X_GPIO_MODE_PCM_GPIO, 88 + .gpio_first = RT305X_GPIO_10, 89 + .gpio_last = RT305X_GPIO_14, 90 + }, { 91 + .name = "gpio uartf", 92 + .mask = RT305X_GPIO_MODE_GPIO_UARTF, 93 + .gpio_first = RT305X_GPIO_7, 94 + .gpio_last = RT305X_GPIO_14, 95 + }, { 96 + .name = "gpio i2s", 97 + .mask = RT305X_GPIO_MODE_GPIO_I2S, 98 + .gpio_first = RT305X_GPIO_7, 99 + .gpio_last = RT305X_GPIO_14, 100 + }, { 101 + .name = "gpio", 102 + .mask = RT305X_GPIO_MODE_GPIO, 103 + }, {0} 104 + }; 105 + 106 + void rt305x_wdt_reset(void) 107 + { 108 + u32 t; 109 + 110 + /* enable WDT reset output on pin SRAM_CS_N */ 111 + t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); 112 + t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT << 113 + RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT; 114 + rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); 115 + } 116 + 117 + struct ralink_pinmux gpio_pinmux = { 118 + .mode = mode_mux, 119 + .uart = uart_mux, 120 + .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT, 121 + .wdt_reset = rt305x_wdt_reset, 122 + }; 123 + 124 + void __init ralink_clk_init(void) 125 + { 126 + unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate; 127 + u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); 128 + 129 + if (soc_is_rt305x() || soc_is_rt3350()) { 130 + t = (t >> RT305X_SYSCFG_CPUCLK_SHIFT) & 131 + RT305X_SYSCFG_CPUCLK_MASK; 132 + switch (t) { 133 + case RT305X_SYSCFG_CPUCLK_LOW: 134 + cpu_rate = 320000000; 135 + break; 136 + case RT305X_SYSCFG_CPUCLK_HIGH: 137 + cpu_rate = 384000000; 138 + break; 139 + } 140 + sys_rate = uart_rate = wdt_rate = cpu_rate / 3; 141 + } else if (soc_is_rt3352()) { 142 + t = (t >> RT3352_SYSCFG0_CPUCLK_SHIFT) & 143 + RT3352_SYSCFG0_CPUCLK_MASK; 144 + switch (t) { 145 + case RT3352_SYSCFG0_CPUCLK_LOW: 146 + cpu_rate = 384000000; 147 + break; 148 + case RT3352_SYSCFG0_CPUCLK_HIGH: 149 + cpu_rate = 400000000; 150 + break; 151 + } 152 + sys_rate = wdt_rate = cpu_rate / 3; 153 + uart_rate = 40000000; 154 + } else if (soc_is_rt5350()) { 155 + t = (t >> RT5350_SYSCFG0_CPUCLK_SHIFT) & 156 + RT5350_SYSCFG0_CPUCLK_MASK; 157 + switch (t) { 158 + case RT5350_SYSCFG0_CPUCLK_360: 159 + cpu_rate = 360000000; 160 + sys_rate = cpu_rate / 3; 161 + break; 162 + case RT5350_SYSCFG0_CPUCLK_320: 163 + cpu_rate = 320000000; 164 + sys_rate = cpu_rate / 4; 165 + break; 166 + case RT5350_SYSCFG0_CPUCLK_300: 167 + cpu_rate = 300000000; 168 + sys_rate = cpu_rate / 3; 169 + break; 170 + default: 171 + BUG(); 172 + } 173 + uart_rate = 40000000; 174 + wdt_rate = sys_rate; 175 + } else { 176 + BUG(); 177 + } 178 + 179 + ralink_clk_add("cpu", cpu_rate); 180 + ralink_clk_add("10000b00.spi", sys_rate); 181 + ralink_clk_add("10000100.timer", wdt_rate); 182 + ralink_clk_add("10000500.uart", uart_rate); 183 + ralink_clk_add("10000c00.uartlite", uart_rate); 184 + } 185 + 186 + void __init ralink_of_remap(void) 187 + { 188 + rt_sysc_membase = plat_of_remap_node("ralink,rt3050-sysc"); 189 + rt_memc_membase = plat_of_remap_node("ralink,rt3050-memc"); 190 + 191 + if (!rt_sysc_membase || !rt_memc_membase) 192 + panic("Failed to remap core resources"); 193 + } 194 + 195 + void prom_soc_init(struct ralink_soc_info *soc_info) 196 + { 197 + void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); 198 + unsigned char *name; 199 + u32 n0; 200 + u32 n1; 201 + u32 id; 202 + 203 + n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); 204 + n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); 205 + 206 + if (n0 == RT3052_CHIP_NAME0 && n1 == RT3052_CHIP_NAME1) { 207 + unsigned long icache_sets; 208 + 209 + icache_sets = (read_c0_config1() >> 22) & 7; 210 + if (icache_sets == 1) { 211 + rt305x_soc = RT305X_SOC_RT3050; 212 + name = "RT3050"; 213 + soc_info->compatible = "ralink,rt3050-soc"; 214 + } else { 215 + rt305x_soc = RT305X_SOC_RT3052; 216 + name = "RT3052"; 217 + soc_info->compatible = "ralink,rt3052-soc"; 218 + } 219 + } else if (n0 == RT3350_CHIP_NAME0 && n1 == RT3350_CHIP_NAME1) { 220 + rt305x_soc = RT305X_SOC_RT3350; 221 + name = "RT3350"; 222 + soc_info->compatible = "ralink,rt3350-soc"; 223 + } else if (n0 == RT3352_CHIP_NAME0 && n1 == RT3352_CHIP_NAME1) { 224 + rt305x_soc = RT305X_SOC_RT3352; 225 + name = "RT3352"; 226 + soc_info->compatible = "ralink,rt3352-soc"; 227 + } else if (n0 == RT5350_CHIP_NAME0 && n1 == RT5350_CHIP_NAME1) { 228 + rt305x_soc = RT305X_SOC_RT5350; 229 + name = "RT5350"; 230 + soc_info->compatible = "ralink,rt5350-soc"; 231 + } else { 232 + panic("rt305x: unknown SoC, n0:%08x n1:%08x\n", n0, n1); 233 + } 234 + 235 + id = __raw_readl(sysc + SYSC_REG_CHIP_ID); 236 + 237 + snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, 238 + "Ralink %s id:%u rev:%u", 239 + name, 240 + (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK, 241 + (id & CHIP_ID_REV_MASK)); 242 + }
+1 -1
arch/mips/sgi-ip27/ip27-console.c
··· 31 31 return &ioc3->sregs.uarta; 32 32 } 33 33 34 - void __init prom_putchar(char c) 34 + void prom_putchar(char c) 35 35 { 36 36 struct ioc3_uartregs *uart = console_uart(); 37 37
+4 -4
arch/mips/txx9/generic/setup.c
··· 513 513 } 514 514 515 515 #ifdef CONFIG_EARLY_PRINTK 516 - static void __init null_prom_putchar(char c) 516 + static void null_prom_putchar(char c) 517 517 { 518 518 } 519 - void (*txx9_prom_putchar)(char c) __initdata = null_prom_putchar; 519 + void (*txx9_prom_putchar)(char c) = null_prom_putchar; 520 520 521 - void __init prom_putchar(char c) 521 + void prom_putchar(char c) 522 522 { 523 523 txx9_prom_putchar(c); 524 524 } 525 525 526 526 static void __iomem *early_txx9_sio_port; 527 527 528 - static void __init early_txx9_sio_putchar(char c) 528 + static void early_txx9_sio_putchar(char c) 529 529 { 530 530 #define TXX9_SICISR 0x0c 531 531 #define TXX9_SITFIFO 0x1c
+15
arch/mn10300/include/asm/dma-mapping.h
··· 168 168 mn10300_dcache_flush_inv(); 169 169 } 170 170 171 + /* Not supported for now */ 172 + static inline int dma_mmap_coherent(struct device *dev, 173 + struct vm_area_struct *vma, void *cpu_addr, 174 + dma_addr_t dma_addr, size_t size) 175 + { 176 + return -EINVAL; 177 + } 178 + 179 + static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, 180 + void *cpu_addr, dma_addr_t dma_addr, 181 + size_t size) 182 + { 183 + return -EINVAL; 184 + } 185 + 171 186 #endif
+15
arch/parisc/include/asm/dma-mapping.h
··· 238 238 /* At the moment, we panic on error for IOMMU resource exaustion */ 239 239 #define dma_mapping_error(dev, x) 0 240 240 241 + /* This API cannot be supported on PA-RISC */ 242 + static inline int dma_mmap_coherent(struct device *dev, 243 + struct vm_area_struct *vma, void *cpu_addr, 244 + dma_addr_t dma_addr, size_t size) 245 + { 246 + return -EINVAL; 247 + } 248 + 249 + static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, 250 + void *cpu_addr, dma_addr_t dma_addr, 251 + size_t size) 252 + { 253 + return -EINVAL; 254 + } 255 + 241 256 #endif
+35 -27
arch/powerpc/mm/hash_low_64.S
··· 115 115 sldi r29,r5,SID_SHIFT - VPN_SHIFT 116 116 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 117 117 or r29,r28,r29 118 - 119 - /* Calculate hash value for primary slot and store it in r28 */ 120 - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 121 - rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ 122 - xor r28,r5,r0 118 + /* 119 + * Calculate hash value for primary slot and store it in r28 120 + * r3 = va, r5 = vsid 121 + * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) 122 + */ 123 + rldicl r0,r3,64-12,48 124 + xor r28,r5,r0 /* hash */ 123 125 b 4f 124 126 125 127 3: /* Calc vpn and put it in r29 */ ··· 132 130 /* 133 131 * calculate hash value for primary slot and 134 132 * store it in r28 for 1T segment 133 + * r3 = va, r5 = vsid 135 134 */ 136 - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 137 - clrldi r5,r5,40 /* vsid & 0xffffff */ 138 - rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 139 - xor r28,r28,r5 135 + sldi r28,r5,25 /* vsid << 25 */ 136 + /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ 137 + rldicl r0,r3,64-12,36 138 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ 140 139 xor r28,r28,r0 /* hash */ 141 140 142 141 /* Convert linux PTE bits into HW equivalents */ ··· 410 407 */ 411 408 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 412 409 or r29,r28,r29 413 - 414 - /* Calculate hash value for primary slot and store it in r28 */ 415 - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 416 - rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ 417 - xor r28,r5,r0 410 + /* 411 + * Calculate hash value for primary slot and store it in r28 412 + * r3 = va, r5 = vsid 413 + * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) 414 + */ 415 + rldicl r0,r3,64-12,48 416 + xor r28,r5,r0 /* hash */ 418 417 b 4f 419 418 420 419 3: /* Calc vpn and put it in r29 */ ··· 431 426 /* 432 427 * Calculate hash value for primary slot and 433 428 * store it in r28 for 1T segment 429 + * r3 = va, r5 = vsid 434 430 */ 435 - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 436 - clrldi r5,r5,40 /* vsid & 0xffffff */ 437 - rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 438 - xor r28,r28,r5 431 + sldi r28,r5,25 /* vsid << 25 */ 432 + /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ 433 + rldicl r0,r3,64-12,36 434 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ 439 435 xor r28,r28,r0 /* hash */ 440 436 441 437 /* Convert linux PTE bits into HW equivalents */ ··· 758 752 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 759 753 or r29,r28,r29 760 754 761 - /* Calculate hash value for primary slot and store it in r28 */ 762 - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 763 - rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */ 764 - xor r28,r5,r0 755 + /* Calculate hash value for primary slot and store it in r28 756 + * r3 = va, r5 = vsid 757 + * r0 = (va >> 16) & ((1ul << (28 - 16)) -1) 758 + */ 759 + rldicl r0,r3,64-16,52 760 + xor r28,r5,r0 /* hash */ 765 761 b 4f 766 762 767 763 3: /* Calc vpn and put it in r29 */ 768 764 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT 769 765 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) 770 766 or r29,r28,r29 771 - 772 767 /* 773 768 * calculate hash value for primary slot and 774 769 * store it in r28 for 1T segment 770 + * r3 = va, r5 = vsid 775 771 */ 776 - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 777 - clrldi r5,r5,40 /* vsid & 0xffffff */ 778 - rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ 779 - xor r28,r28,r5 772 + sldi r28,r5,25 /* vsid << 25 */ 773 + /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */ 774 + rldicl r0,r3,64-16,40 775 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ 780 776 xor r28,r28,r0 /* hash */ 781 777 782 778 /* Convert linux PTE bits into HW equivalents */
+2 -2
arch/x86/ia32/ia32entry.S
··· 207 207 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 208 208 jnz ia32_ret_from_sys_call 209 209 TRACE_IRQS_ON 210 - sti 210 + ENABLE_INTERRUPTS(CLBR_NONE) 211 211 movl %eax,%esi /* second arg, syscall return value */ 212 212 cmpl $-MAX_ERRNO,%eax /* is it an error ? */ 213 213 jbe 1f ··· 217 217 call __audit_syscall_exit 218 218 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ 219 219 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi 220 - cli 220 + DISABLE_INTERRUPTS(CLBR_NONE) 221 221 TRACE_IRQS_OFF 222 222 testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 223 223 jz \exit
+3 -4
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 298 298 unsigned int); 299 299 }; 300 300 301 - #ifdef CONFIG_AMD_NB 302 - 301 + #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) 303 302 /* 304 303 * L3 cache descriptors 305 304 */ ··· 523 524 static struct _cache_attr subcaches = 524 525 __ATTR(subcaches, 0644, show_subcaches, store_subcaches); 525 526 526 - #else /* CONFIG_AMD_NB */ 527 + #else 527 528 #define amd_init_l3_cache(x, y) 528 - #endif /* CONFIG_AMD_NB */ 529 + #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ 529 530 530 531 static int 531 532 __cpuinit cpuid4_cache_lookup_regs(int index,
+5 -1
arch/x86/kernel/cpu/perf_event_intel.c
··· 2019 2019 break; 2020 2020 2021 2021 case 28: /* Atom */ 2022 - case 54: /* Cedariew */ 2022 + case 38: /* Lincroft */ 2023 + case 39: /* Penwell */ 2024 + case 53: /* Cloverview */ 2025 + case 54: /* Cedarview */ 2023 2026 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 2024 2027 sizeof(hw_cache_event_ids)); 2025 2028 ··· 2087 2084 pr_cont("SandyBridge events, "); 2088 2085 break; 2089 2086 case 58: /* IvyBridge */ 2087 + case 62: /* IvyBridge EP */ 2090 2088 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2091 2089 sizeof(hw_cache_event_ids)); 2092 2090 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
+1 -1
arch/x86/kernel/cpu/perf_event_p6.c
··· 19 19 20 20 }; 21 21 22 - static __initconst u64 p6_hw_cache_event_ids 22 + static u64 p6_hw_cache_event_ids 23 23 [PERF_COUNT_HW_CACHE_MAX] 24 24 [PERF_COUNT_HW_CACHE_OP_MAX] 25 25 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+8 -2
arch/x86/tools/insn_sanity.c
··· 55 55 static void usage(const char *err) 56 56 { 57 57 if (err) 58 - fprintf(stderr, "Error: %s\n\n", err); 58 + fprintf(stderr, "%s: Error: %s\n\n", prog, err); 59 59 fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); 60 60 fprintf(stderr, "\t-y 64bit mode\n"); 61 61 fprintf(stderr, "\t-n 32bit mode\n"); ··· 269 269 insns++; 270 270 } 271 271 272 - fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); 272 + fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", 273 + prog, 274 + (errors) ? "Failure" : "Success", 275 + insns, 276 + (input_file) ? "given" : "random", 277 + errors, 278 + seed); 273 279 274 280 return errors ? 1 : 0; 275 281 }
+15
arch/xtensa/include/asm/dma-mapping.h
··· 170 170 consistent_sync(vaddr, size, direction); 171 171 } 172 172 173 + /* Not supported for now */ 174 + static inline int dma_mmap_coherent(struct device *dev, 175 + struct vm_area_struct *vma, void *cpu_addr, 176 + dma_addr_t dma_addr, size_t size) 177 + { 178 + return -EINVAL; 179 + } 180 + 181 + static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, 182 + void *cpu_addr, dma_addr_t dma_addr, 183 + size_t size) 184 + { 185 + return -EINVAL; 186 + } 187 + 173 188 #endif /* _XTENSA_DMA_MAPPING_H */
+32 -10
block/genhd.c
··· 35 35 36 36 static struct device_type disk_type; 37 37 38 + static void disk_check_events(struct disk_events *ev, 39 + unsigned int *clearing_ptr); 38 40 static void disk_alloc_events(struct gendisk *disk); 39 41 static void disk_add_events(struct gendisk *disk); 40 42 static void disk_del_events(struct gendisk *disk); ··· 1551 1549 const struct block_device_operations *bdops = disk->fops; 1552 1550 struct disk_events *ev = disk->ev; 1553 1551 unsigned int pending; 1552 + unsigned int clearing = mask; 1554 1553 1555 1554 if (!ev) { 1556 1555 /* for drivers still using the old ->media_changed method */ ··· 1561 1558 return 0; 1562 1559 } 1563 1560 1564 - /* tell the workfn about the events being cleared */ 1561 + disk_block_events(disk); 1562 + 1563 + /* 1564 + * store the union of mask and ev->clearing on the stack so that the 1565 + * race with disk_flush_events does not cause ambiguity (ev->clearing 1566 + * can still be modified even if events are blocked). 1567 + */ 1565 1568 spin_lock_irq(&ev->lock); 1566 - ev->clearing |= mask; 1569 + clearing |= ev->clearing; 1570 + ev->clearing = 0; 1567 1571 spin_unlock_irq(&ev->lock); 1568 1572 1569 - /* uncondtionally schedule event check and wait for it to finish */ 1570 - disk_block_events(disk); 1571 - queue_delayed_work(system_freezable_wq, &ev->dwork, 0); 1572 - flush_delayed_work(&ev->dwork); 1573 - __disk_unblock_events(disk, false); 1573 + disk_check_events(ev, &clearing); 1574 + /* 1575 + * if ev->clearing is not 0, the disk_flush_events got called in the 1576 + * middle of this function, so we want to run the workfn without delay. 1577 + */ 1578 + __disk_unblock_events(disk, ev->clearing ? true : false); 1574 1579 1575 1580 /* then, fetch and clear pending events */ 1576 1581 spin_lock_irq(&ev->lock); 1577 - WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */ 1578 1582 pending = ev->pending & mask; 1579 1583 ev->pending &= ~mask; 1580 1584 spin_unlock_irq(&ev->lock); 1585 + WARN_ON_ONCE(clearing & mask); 1581 1586 1582 1587 return pending; 1583 1588 } 1584 1589 1590 + /* 1591 + * Separate this part out so that a different pointer for clearing_ptr can be 1592 + * passed in for disk_clear_events. 1593 + */ 1585 1594 static void disk_events_workfn(struct work_struct *work) 1586 1595 { 1587 1596 struct delayed_work *dwork = to_delayed_work(work); 1588 1597 struct disk_events *ev = container_of(dwork, struct disk_events, dwork); 1598 + 1599 + disk_check_events(ev, &ev->clearing); 1600 + } 1601 + 1602 + static void disk_check_events(struct disk_events *ev, 1603 + unsigned int *clearing_ptr) 1604 + { 1589 1605 struct gendisk *disk = ev->disk; 1590 1606 char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; 1591 - unsigned int clearing = ev->clearing; 1607 + unsigned int clearing = *clearing_ptr; 1592 1608 unsigned int events; 1593 1609 unsigned long intv; 1594 1610 int nr_events = 0, i; ··· 1620 1598 1621 1599 events &= ~ev->pending; 1622 1600 ev->pending |= events; 1623 - ev->clearing &= ~clearing; 1601 + *clearing_ptr &= ~clearing; 1624 1602 1625 1603 intv = disk_events_poll_jiffies(disk); 1626 1604 if (!ev->block && intv)
+73 -73
drivers/atm/iphase.h
··· 636 636 #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE 637 637 #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE 638 638 639 - typedef volatile u_int freg_t; 639 + typedef volatile u_int ffreg_t; 640 640 typedef u_int rreg_t; 641 641 642 642 typedef struct _ffredn_t { 643 - freg_t idlehead_high; /* Idle cell header (high) */ 644 - freg_t idlehead_low; /* Idle cell header (low) */ 645 - freg_t maxrate; /* Maximum rate */ 646 - freg_t stparms; /* Traffic Management Parameters */ 647 - freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ 648 - freg_t rm_type; /* */ 649 - u_int filler5[0x17 - 0x06]; 650 - freg_t cmd_reg; /* Command register */ 651 - u_int filler18[0x20 - 0x18]; 652 - freg_t cbr_base; /* CBR Pointer Base */ 653 - freg_t vbr_base; /* VBR Pointer Base */ 654 - freg_t abr_base; /* ABR Pointer Base */ 655 - freg_t ubr_base; /* UBR Pointer Base */ 656 - u_int filler24; 657 - freg_t vbrwq_base; /* VBR Wait Queue Base */ 658 - freg_t abrwq_base; /* ABR Wait Queue Base */ 659 - freg_t ubrwq_base; /* UBR Wait Queue Base */ 660 - freg_t vct_base; /* Main VC Table Base */ 661 - freg_t vcte_base; /* Extended Main VC Table Base */ 662 - u_int filler2a[0x2C - 0x2A]; 663 - freg_t cbr_tab_beg; /* CBR Table Begin */ 664 - freg_t cbr_tab_end; /* CBR Table End */ 665 - freg_t cbr_pointer; /* CBR Pointer */ 666 - u_int filler2f[0x30 - 0x2F]; 667 - freg_t prq_st_adr; /* Packet Ready Queue Start Address */ 668 - freg_t prq_ed_adr; /* Packet Ready Queue End Address */ 669 - freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ 670 - freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ 671 - freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ 672 - freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ 673 - freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ 674 - freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ 675 - u_int filler38[0x40 - 0x38]; 676 - freg_t queue_base; /* Base address for PRQ and TCQ */ 677 - freg_t desc_base; /* Base address of descriptor table */ 678 - u_int filler42[0x45 - 0x42]; 679 - freg_t mode_reg_0; /* Mode register 0 */ 680 - freg_t mode_reg_1; /* Mode register 1 */ 681 - freg_t intr_status_reg;/* Interrupt Status register */ 682 - freg_t mask_reg; /* Mask Register */ 683 - freg_t cell_ctr_high1; /* Total cell transfer count (high) */ 684 - freg_t cell_ctr_lo1; /* Total cell transfer count (low) */ 685 - freg_t state_reg; /* Status register */ 686 - u_int filler4c[0x58 - 0x4c]; 687 - freg_t curr_desc_num; /* Contains the current descriptor num */ 688 - freg_t next_desc; /* Next descriptor */ 689 - freg_t next_vc; /* Next VC */ 690 - u_int filler5b[0x5d - 0x5b]; 691 - freg_t present_slot_cnt;/* Present slot count */ 692 - u_int filler5e[0x6a - 0x5e]; 693 - freg_t new_desc_num; /* New descriptor number */ 694 - freg_t new_vc; /* New VC */ 695 - freg_t sched_tbl_ptr; /* Schedule table pointer */ 696 - freg_t vbrwq_wptr; /* VBR wait queue write pointer */ 697 - freg_t vbrwq_rptr; /* VBR wait queue read pointer */ 698 - freg_t abrwq_wptr; /* ABR wait queue write pointer */ 699 - freg_t abrwq_rptr; /* ABR wait queue read pointer */ 700 - freg_t ubrwq_wptr; /* UBR wait queue write pointer */ 701 - freg_t ubrwq_rptr; /* UBR wait queue read pointer */ 702 - freg_t cbr_vc; /* CBR VC */ 703 - freg_t vbr_sb_vc; /* VBR SB VC */ 704 - freg_t abr_sb_vc; /* ABR SB VC */ 705 - freg_t ubr_sb_vc; /* UBR SB VC */ 706 - freg_t vbr_next_link; /* VBR next link */ 707 - freg_t abr_next_link; /* ABR next link */ 708 - freg_t ubr_next_link; /* UBR next link */ 709 - u_int filler7a[0x7c-0x7a]; 710 - freg_t out_rate_head; /* Out of rate head */ 711 - u_int filler7d[0xca-0x7d]; /* pad out to full address space */ 712 - freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ 713 - freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ 714 - u_int fillercc[0x100-0xcc]; /* pad out to full address space */ 643 + ffreg_t idlehead_high; /* Idle cell header (high) */ 644 + ffreg_t idlehead_low; /* Idle cell header (low) */ 645 + ffreg_t maxrate; /* Maximum rate */ 646 + ffreg_t stparms; /* Traffic Management Parameters */ 647 + ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ 648 + ffreg_t rm_type; /* */ 649 + u_int filler5[0x17 - 0x06]; 650 + ffreg_t cmd_reg; /* Command register */ 651 + u_int filler18[0x20 - 0x18]; 652 + ffreg_t cbr_base; /* CBR Pointer Base */ 653 + ffreg_t vbr_base; /* VBR Pointer Base */ 654 + ffreg_t abr_base; /* ABR Pointer Base */ 655 + ffreg_t ubr_base; /* UBR Pointer Base */ 656 + u_int filler24; 657 + ffreg_t vbrwq_base; /* VBR Wait Queue Base */ 658 + ffreg_t abrwq_base; /* ABR Wait Queue Base */ 659 + ffreg_t ubrwq_base; /* UBR Wait Queue Base */ 660 + ffreg_t vct_base; /* Main VC Table Base */ 661 + ffreg_t vcte_base; /* Extended Main VC Table Base */ 662 + u_int filler2a[0x2C - 0x2A]; 663 + ffreg_t cbr_tab_beg; /* CBR Table Begin */ 664 + ffreg_t cbr_tab_end; /* CBR Table End */ 665 + ffreg_t cbr_pointer; /* CBR Pointer */ 666 + u_int filler2f[0x30 - 0x2F]; 667 + ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */ 668 + ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */ 669 + ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ 670 + ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ 671 + ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ 672 + ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ 673 + ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ 674 + ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ 675 + u_int filler38[0x40 - 0x38]; 676 + ffreg_t queue_base; /* Base address for PRQ and TCQ */ 677 + ffreg_t desc_base; /* Base address of descriptor table */ 678 + u_int filler42[0x45 - 0x42]; 679 + ffreg_t mode_reg_0; /* Mode register 0 */ 680 + ffreg_t mode_reg_1; /* Mode register 1 */ 681 + ffreg_t intr_status_reg;/* Interrupt Status register */ 682 + ffreg_t mask_reg; /* Mask Register */ 683 + ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */ 684 + ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */ 685 + ffreg_t state_reg; /* Status register */ 686 + u_int filler4c[0x58 - 0x4c]; 687 + ffreg_t curr_desc_num; /* Contains the current descriptor num */ 688 + ffreg_t next_desc; /* Next descriptor */ 689 + ffreg_t next_vc; /* Next VC */ 690 + u_int filler5b[0x5d - 0x5b]; 691 + ffreg_t present_slot_cnt;/* Present slot count */ 692 + u_int filler5e[0x6a - 0x5e]; 693 + ffreg_t new_desc_num; /* New descriptor number */ 694 + ffreg_t new_vc; /* New VC */ 695 + ffreg_t sched_tbl_ptr; /* Schedule table pointer */ 696 + ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */ 697 + ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */ 698 + ffreg_t abrwq_wptr; /* ABR wait queue write pointer */ 699 + ffreg_t abrwq_rptr; /* ABR wait queue read pointer */ 700 + ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */ 701 + ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */ 702 + ffreg_t cbr_vc; /* CBR VC */ 703 + ffreg_t vbr_sb_vc; /* VBR SB VC */ 704 + ffreg_t abr_sb_vc; /* ABR SB VC */ 705 + ffreg_t ubr_sb_vc; /* UBR SB VC */ 706 + ffreg_t vbr_next_link; /* VBR next link */ 707 + ffreg_t abr_next_link; /* ABR next link */ 708 + ffreg_t ubr_next_link; /* UBR next link */ 709 + u_int filler7a[0x7c-0x7a]; 710 + ffreg_t out_rate_head; /* Out of rate head */ 711 + u_int filler7d[0xca-0x7d]; /* pad out to full address space */ 712 + ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ 713 + ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ 714 + u_int fillercc[0x100-0xcc]; /* pad out to full address space */ 715 715 } ffredn_t; 716 716 717 717 typedef struct _rfredn_t {
+5
drivers/bcma/bcma_private.h
··· 94 94 #ifdef CONFIG_BCMA_DRIVER_GPIO 95 95 /* driver_gpio.c */ 96 96 int bcma_gpio_init(struct bcma_drv_cc *cc); 97 + int bcma_gpio_unregister(struct bcma_drv_cc *cc); 97 98 #else 98 99 static inline int bcma_gpio_init(struct bcma_drv_cc *cc) 99 100 { 100 101 return -ENOTSUPP; 102 + } 103 + static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc) 104 + { 105 + return 0; 101 106 } 102 107 #endif /* CONFIG_BCMA_DRIVER_GPIO */ 103 108
+1 -1
drivers/bcma/driver_chipcommon_nflash.c
··· 21 21 struct bcma_bus *bus = cc->core->bus; 22 22 23 23 if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && 24 - cc->core->id.rev != 0x38) { 24 + cc->core->id.rev != 38) { 25 25 bcma_err(bus, "NAND flash on unsupported board!\n"); 26 26 return -ENOTSUPP; 27 27 }
+5
drivers/bcma/driver_gpio.c
··· 96 96 97 97 return gpiochip_add(chip); 98 98 } 99 + 100 + int bcma_gpio_unregister(struct bcma_drv_cc *cc) 101 + { 102 + return gpiochip_remove(&cc->gpio); 103 + }
+7
drivers/bcma/main.c
··· 268 268 void bcma_bus_unregister(struct bcma_bus *bus) 269 269 { 270 270 struct bcma_device *cores[3]; 271 + int err; 272 + 273 + err = bcma_gpio_unregister(&bus->drv_cc); 274 + if (err == -EBUSY) 275 + bcma_err(bus, "Some GPIOs are still in use.\n"); 276 + else if (err) 277 + bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); 271 278 272 279 cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 273 280 cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
+1 -1
drivers/block/drbd/drbd_req.c
··· 168 168 } 169 169 170 170 /* must hold resource->req_lock */ 171 - static void start_new_tl_epoch(struct drbd_tconn *tconn) 171 + void start_new_tl_epoch(struct drbd_tconn *tconn) 172 172 { 173 173 /* no point closing an epoch, if it is empty, anyways. */ 174 174 if (tconn->current_tle_writes == 0)
+1
drivers/block/drbd/drbd_req.h
··· 267 267 int error; 268 268 }; 269 269 270 + extern void start_new_tl_epoch(struct drbd_tconn *tconn); 270 271 extern void drbd_req_destroy(struct kref *kref); 271 272 extern void _req_may_be_done(struct drbd_request *req, 272 273 struct bio_and_error *m);
+7
drivers/block/drbd/drbd_state.c
··· 931 931 enum drbd_state_rv rv = SS_SUCCESS; 932 932 enum sanitize_state_warnings ssw; 933 933 struct after_state_chg_work *ascw; 934 + bool did_remote, should_do_remote; 934 935 935 936 os = drbd_read_state(mdev); 936 937 ··· 982 981 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS)) 983 982 atomic_inc(&mdev->local_cnt); 984 983 984 + did_remote = drbd_should_do_remote(mdev->state); 985 985 mdev->state.i = ns.i; 986 + should_do_remote = drbd_should_do_remote(mdev->state); 986 987 mdev->tconn->susp = ns.susp; 987 988 mdev->tconn->susp_nod = ns.susp_nod; 988 989 mdev->tconn->susp_fen = ns.susp_fen; 990 + 991 + /* put replicated vs not-replicated requests in seperate epochs */ 992 + if (did_remote != should_do_remote) 993 + start_new_tl_epoch(mdev->tconn); 989 994 990 995 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) 991 996 drbd_print_uuids(mdev, "attached to UUIDs");
+18 -6
drivers/block/mtip32xx/mtip32xx.c
··· 626 626 } 627 627 } 628 628 629 - if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 629 + if (cmdto_cnt) { 630 630 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); 631 - 632 - mtip_restart_port(port); 631 + if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 632 + mtip_restart_port(port); 633 + wake_up_interruptible(&port->svc_wait); 634 + } 633 635 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 634 - wake_up_interruptible(&port->svc_wait); 635 636 } 636 637 637 638 if (port->ic_pause_timer) { ··· 3888 3887 * Delete our gendisk structure. This also removes the device 3889 3888 * from /dev 3890 3889 */ 3891 - del_gendisk(dd->disk); 3890 + if (dd->disk) { 3891 + if (dd->disk->queue) 3892 + del_gendisk(dd->disk); 3893 + else 3894 + put_disk(dd->disk); 3895 + } 3892 3896 3893 3897 spin_lock(&rssd_index_lock); 3894 3898 ida_remove(&rssd_index_ida, dd->index); ··· 3927 3921 "Shutting down %s ...\n", dd->disk->disk_name); 3928 3922 3929 3923 /* Delete our gendisk structure, and cleanup the blk queue. */ 3930 - del_gendisk(dd->disk); 3924 + if (dd->disk) { 3925 + if (dd->disk->queue) 3926 + del_gendisk(dd->disk); 3927 + else 3928 + put_disk(dd->disk); 3929 + } 3930 + 3931 3931 3932 3932 spin_lock(&rssd_index_lock); 3933 3933 ida_remove(&rssd_index_ida, dd->index);
+11 -7
drivers/block/xen-blkback/blkback.c
··· 161 161 static void make_response(struct xen_blkif *blkif, u64 id, 162 162 unsigned short op, int st); 163 163 164 - #define foreach_grant(pos, rbtree, node) \ 165 - for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \ 164 + #define foreach_grant_safe(pos, n, rbtree, node) \ 165 + for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ 166 + (n) = rb_next(&(pos)->node); \ 166 167 &(pos)->node != NULL; \ 167 - (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node)) 168 + (pos) = container_of(n, typeof(*(pos)), node), \ 169 + (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) 168 170 169 171 170 172 static void add_persistent_gnt(struct rb_root *root, ··· 219 217 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 220 218 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 221 219 struct persistent_gnt *persistent_gnt; 220 + struct rb_node *n; 222 221 int ret = 0; 223 222 int segs_to_unmap = 0; 224 223 225 - foreach_grant(persistent_gnt, root, node) { 224 + foreach_grant_safe(persistent_gnt, n, root, node) { 226 225 BUG_ON(persistent_gnt->handle == 227 226 BLKBACK_INVALID_HANDLE); 228 227 gnttab_set_unmap_op(&unmap[segs_to_unmap], ··· 233 230 persistent_gnt->handle); 234 231 235 232 pages[segs_to_unmap] = persistent_gnt->page; 236 - rb_erase(&persistent_gnt->node, root); 237 - kfree(persistent_gnt); 238 - num--; 239 233 240 234 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || 241 235 !rb_next(&persistent_gnt->node)) { ··· 241 241 BUG_ON(ret); 242 242 segs_to_unmap = 0; 243 243 } 244 + 245 + rb_erase(&persistent_gnt->node, root); 246 + kfree(persistent_gnt); 247 + num--; 244 248 } 245 249 BUG_ON(num != 0); 246 250 }
+6 -4
drivers/block/xen-blkfront.c
··· 792 792 { 793 793 struct llist_node *all_gnts; 794 794 struct grant *persistent_gnt; 795 + struct llist_node *n; 795 796 796 797 /* Prevent new requests being issued until we fix things up. */ 797 798 spin_lock_irq(&info->io_lock); ··· 805 804 /* Remove all persistent grants */ 806 805 if (info->persistent_gnts_c) { 807 806 all_gnts = llist_del_all(&info->persistent_gnts); 808 - llist_for_each_entry(persistent_gnt, all_gnts, node) { 807 + llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) { 809 808 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 810 809 __free_page(pfn_to_page(persistent_gnt->pfn)); 811 810 kfree(persistent_gnt); ··· 836 835 static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, 837 836 struct blkif_response *bret) 838 837 { 839 - int i; 838 + int i = 0; 840 839 struct bio_vec *bvec; 841 840 struct req_iterator iter; 842 841 unsigned long flags; ··· 853 852 */ 854 853 rq_for_each_segment(bvec, s->request, iter) { 855 854 BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); 856 - i = offset >> PAGE_SHIFT; 855 + if (bvec->bv_offset < offset) 856 + i++; 857 857 BUG_ON(i >= s->req.u.rw.nr_segments); 858 858 shared_data = kmap_atomic( 859 859 pfn_to_page(s->grants_used[i]->pfn)); ··· 863 861 bvec->bv_len); 864 862 bvec_kunmap_irq(bvec_data, &flags); 865 863 kunmap_atomic(shared_data); 866 - offset += bvec->bv_len; 864 + offset = bvec->bv_offset + bvec->bv_len; 867 865 } 868 866 } 869 867 /* Add the persistent grant into the list of free grants */
+2 -1
drivers/char/virtio_console.c
··· 2062 2062 /* Disable interrupts for vqs */ 2063 2063 vdev->config->reset(vdev); 2064 2064 /* Finish up work that's lined up */ 2065 - cancel_work_sync(&portdev->control_work); 2065 + if (use_multiport(portdev)) 2066 + cancel_work_sync(&portdev->control_work); 2066 2067 2067 2068 list_for_each_entry_safe(port, port2, &portdev->ports, list) 2068 2069 unplug_port(port);
+24 -3
drivers/gpu/drm/radeon/evergreen.c
··· 1313 1313 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { 1314 1314 radeon_wait_for_vblank(rdev, i); 1315 1315 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 1316 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1316 1317 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 1318 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1317 1319 } 1318 1320 } else { 1319 1321 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 1320 1322 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { 1321 1323 radeon_wait_for_vblank(rdev, i); 1322 1324 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 1325 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1323 1326 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 1327 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1324 1328 } 1325 1329 } 1326 1330 /* wait for the next frame */ ··· 1349 1345 blackout &= ~BLACKOUT_MODE_MASK; 1350 1346 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); 1351 1347 } 1348 + /* wait for the MC to settle */ 1349 + udelay(100); 1352 1350 } 1353 1351 1354 1352 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) ··· 1384 1378 if (ASIC_IS_DCE6(rdev)) { 1385 1379 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 1386 1380 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 1381 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1387 1382 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 1383 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1388 1384 } else { 1389 1385 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 1390 1386 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 1387 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1391 1388 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 1389 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1392 1390 } 1393 1391 /* wait for the next frame */ 1394 1392 frame_count = radeon_get_vblank_counter(rdev, i); ··· 2046 2036 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2047 2037 WREG32(DMA_TILING_CONFIG, gb_addr_config); 2048 2038 2049 - tmp = gb_addr_config & NUM_PIPES_MASK; 2050 - tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, 2051 - EVERGREEN_MAX_BACKENDS, disabled_rb_mask); 2039 + if ((rdev->config.evergreen.max_backends == 1) && 2040 + (rdev->flags & RADEON_IS_IGP)) { 2041 + if ((disabled_rb_mask & 3) == 1) { 2042 + /* RB0 disabled, RB1 enabled */ 2043 + tmp = 0x11111111; 2044 + } else { 2045 + /* RB1 disabled, RB0 enabled */ 2046 + tmp = 0x00000000; 2047 + } 2048 + } else { 2049 + tmp = gb_addr_config & NUM_PIPES_MASK; 2050 + tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, 2051 + EVERGREEN_MAX_BACKENDS, disabled_rb_mask); 2052 + } 2052 2053 WREG32(GB_BACKEND_MAP, tmp); 2053 2054 2054 2055 WREG32(CGTS_SYS_TCC_DISABLE, 0);
+5 -2
drivers/gpu/drm/radeon/r600.c
··· 1462 1462 u32 disabled_rb_mask) 1463 1463 { 1464 1464 u32 rendering_pipe_num, rb_num_width, req_rb_num; 1465 - u32 pipe_rb_ratio, pipe_rb_remain; 1465 + u32 pipe_rb_ratio, pipe_rb_remain, tmp; 1466 1466 u32 data = 0, mask = 1 << (max_rb_num - 1); 1467 1467 unsigned i, j; 1468 1468 1469 1469 /* mask out the RBs that don't exist on that asic */ 1470 - disabled_rb_mask |= (0xff << max_rb_num) & 0xff; 1470 + tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); 1471 + /* make sure at least one RB is available */ 1472 + if ((tmp & 0xff) != 0xff) 1473 + disabled_rb_mask = tmp; 1471 1474 1472 1475 rendering_pipe_num = 1 << tiling_pipe_num; 1473 1476 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+3 -3
drivers/gpu/drm/radeon/radeon_asic.c
··· 1445 1445 .vm = { 1446 1446 .init = &cayman_vm_init, 1447 1447 .fini = &cayman_vm_fini, 1448 - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1448 + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1449 1449 .set_page = &cayman_vm_set_page, 1450 1450 }, 1451 1451 .ring = { ··· 1572 1572 .vm = { 1573 1573 .init = &cayman_vm_init, 1574 1574 .fini = &cayman_vm_fini, 1575 - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1575 + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1576 1576 .set_page = &cayman_vm_set_page, 1577 1577 }, 1578 1578 .ring = { ··· 1699 1699 .vm = { 1700 1700 .init = &si_vm_init, 1701 1701 .fini = &si_vm_fini, 1702 - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1702 + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1703 1703 .set_page = &si_vm_set_page, 1704 1704 }, 1705 1705 .ring = {
+8
drivers/gpu/drm/radeon/radeon_combios.c
··· 2470 2470 1), 2471 2471 ATOM_DEVICE_CRT1_SUPPORT); 2472 2472 } 2473 + /* RV100 board with external TDMS bit mis-set. 2474 + * Actually uses internal TMDS, clear the bit. 2475 + */ 2476 + if (dev->pdev->device == 0x5159 && 2477 + dev->pdev->subsystem_vendor == 0x1014 && 2478 + dev->pdev->subsystem_device == 0x029A) { 2479 + tmp &= ~(1 << 4); 2480 + } 2473 2481 if ((tmp >> 4) & 0x1) { 2474 2482 devices |= ATOM_DEVICE_DFP2_SUPPORT; 2475 2483 radeon_add_legacy_encoder(dev,
+3 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 1115 1115 } 1116 1116 1117 1117 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1118 - if (radeon_fb == NULL) 1118 + if (radeon_fb == NULL) { 1119 + drm_gem_object_unreference_unlocked(obj); 1119 1120 return ERR_PTR(-ENOMEM); 1121 + } 1120 1122 1121 1123 ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); 1122 1124 if (ret) {
+3
drivers/gpu/drm/radeon/radeon_ring.c
··· 377 377 { 378 378 int r; 379 379 380 + /* make sure we aren't trying to allocate more space than there is on the ring */ 381 + if (ndw > (ring->ring_size / 4)) 382 + return -ENOMEM; 380 383 /* Align requested size with padding so unlock_commit can 381 384 * pad safely */ 382 385 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+1
drivers/gpu/drm/radeon/reg_srcs/cayman
··· 1 1 cayman 0x9400 2 2 0x0000802C GRBM_GFX_INDEX 3 + 0x00008040 WAIT_UNTIL 3 4 0x000084FC CP_STRMOUT_CNTL 4 5 0x000085F0 CP_COHER_CNTL 5 6 0x000085F4 CP_COHER_SIZE
+2
drivers/gpu/drm/radeon/rv515.c
··· 336 336 WREG32(R600_CITF_CNTL, blackout); 337 337 } 338 338 } 339 + /* wait for the MC to settle */ 340 + udelay(100); 339 341 } 340 342 341 343 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
+8 -5
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 429 429 struct ttm_bo_device *bdev = bo->bdev; 430 430 struct ttm_bo_driver *driver = bdev->driver; 431 431 432 - fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); 432 + fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); 433 433 if (!fbo) 434 434 return -ENOMEM; 435 435 ··· 448 448 fbo->vm_node = NULL; 449 449 atomic_set(&fbo->cpu_writers, 0); 450 450 451 - fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 451 + spin_lock(&bdev->fence_lock); 452 + if (bo->sync_obj) 453 + fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 454 + else 455 + fbo->sync_obj = NULL; 456 + spin_unlock(&bdev->fence_lock); 452 457 kref_init(&fbo->list_kref); 453 458 kref_init(&fbo->kref); 454 459 fbo->destroy = &ttm_transfered_destroy; ··· 666 661 */ 667 662 668 663 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 669 - 670 - /* ttm_buffer_object_transfer accesses bo->sync_obj */ 671 - ret = ttm_buffer_object_transfer(bo, &ghost_obj); 672 664 spin_unlock(&bdev->fence_lock); 673 665 if (tmp_obj) 674 666 driver->sync_obj_unref(&tmp_obj); 675 667 668 + ret = ttm_buffer_object_transfer(bo, &ghost_obj); 676 669 if (ret) 677 670 return ret; 678 671
+3 -8
drivers/infiniband/hw/qib/qib_qp.c
··· 263 263 struct qib_qp __rcu **qpp; 264 264 265 265 qpp = &dev->qp_table[n]; 266 - q = rcu_dereference_protected(*qpp, 267 - lockdep_is_held(&dev->qpt_lock)); 268 - for (; q; qpp = &q->next) { 266 + for (; (q = rcu_dereference_protected(*qpp, 267 + lockdep_is_held(&dev->qpt_lock))) != NULL; 268 + qpp = &q->next) 269 269 if (q == qp) { 270 270 atomic_dec(&qp->refcount); 271 271 *qpp = qp->next; 272 272 rcu_assign_pointer(qp->next, NULL); 273 - q = rcu_dereference_protected(*qpp, 274 - lockdep_is_held(&dev->qpt_lock)); 275 273 break; 276 274 } 277 - q = rcu_dereference_protected(*qpp, 278 - lockdep_is_held(&dev->qpt_lock)); 279 - } 280 275 } 281 276 282 277 spin_unlock_irqrestore(&dev->qpt_lock, flags);
+3 -3
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 741 741 742 742 tx_req->mapping = addr; 743 743 744 + skb_orphan(skb); 745 + skb_dst_drop(skb); 746 + 744 747 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), 745 748 addr, skb->len); 746 749 if (unlikely(rc)) { ··· 754 751 } else { 755 752 dev->trans_start = jiffies; 756 753 ++tx->tx_head; 757 - 758 - skb_orphan(skb); 759 - skb_dst_drop(skb); 760 754 761 755 if (++priv->tx_outstanding == ipoib_sendq_size) { 762 756 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+3 -3
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 600 600 netif_stop_queue(dev); 601 601 } 602 602 603 + skb_orphan(skb); 604 + skb_dst_drop(skb); 605 + 603 606 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 604 607 address->ah, qpn, tx_req, phead, hlen); 605 608 if (unlikely(rc)) { ··· 618 615 619 616 address->last_send = priv->tx_head; 620 617 ++priv->tx_head; 621 - 622 - skb_orphan(skb); 623 - skb_dst_drop(skb); 624 618 } 625 619 626 620 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
+1
drivers/media/radio/radio-keene.c
··· 374 374 radio->vdev.ioctl_ops = &usb_keene_ioctl_ops; 375 375 radio->vdev.lock = &radio->lock; 376 376 radio->vdev.release = video_device_release_empty; 377 + radio->vdev.vfl_dir = VFL_DIR_TX; 377 378 378 379 radio->usbdev = interface_to_usbdev(intf); 379 380 radio->intf = intf;
+1
drivers/media/radio/radio-si4713.c
··· 250 250 .name = "radio-si4713", 251 251 .release = video_device_release, 252 252 .ioctl_ops = &radio_si4713_ioctl_ops, 253 + .vfl_dir = VFL_DIR_TX, 253 254 }; 254 255 255 256 /* Platform driver interface */
+1
drivers/media/radio/radio-wl1273.c
··· 1971 1971 .ioctl_ops = &wl1273_ioctl_ops, 1972 1972 .name = WL1273_FM_DRIVER_NAME, 1973 1973 .release = wl1273_vdev_release, 1974 + .vfl_dir = VFL_DIR_TX, 1974 1975 }; 1975 1976 1976 1977 static int wl1273_fm_radio_remove(struct platform_device *pdev)
+10
drivers/media/radio/wl128x/fmdrv_v4l2.c
··· 518 518 .ioctl_ops = &fm_drv_ioctl_ops, 519 519 .name = FM_DRV_NAME, 520 520 .release = video_device_release, 521 + /* 522 + * To ensure both the tuner and modulator ioctls are accessible we 523 + * set the vfl_dir to M2M to indicate this. 524 + * 525 + * It is not really a mem2mem device of course, but it can both receive 526 + * and transmit using the same radio device. It's the only radio driver 527 + * that does this and it should really be split in two radio devices, 528 + * but that would affect applications using this driver. 529 + */ 530 + .vfl_dir = VFL_DIR_M2M, 521 531 }; 522 532 523 533 int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
+1 -1
drivers/mtd/bcm47xxpart.c
··· 14 14 #include <linux/slab.h> 15 15 #include <linux/mtd/mtd.h> 16 16 #include <linux/mtd/partitions.h> 17 - #include <asm/mach-bcm47xx/nvram.h> 17 + #include <bcm47xx_nvram.h> 18 18 19 19 /* 10 parts were found on sflash on Netgear WNDR4500 */ 20 20 #define BCM47XXPART_MAX_PARTS 12
+1
drivers/mtd/devices/Kconfig
··· 272 272 tristate "M-Systems Disk-On-Chip G3" 273 273 select BCH 274 274 select BCH_CONST_PARAMS 275 + select BITREVERSE 275 276 ---help--- 276 277 This provides an MTD device driver for the M-Systems DiskOnChip 277 278 G3 devices.
+1 -1
drivers/mtd/maps/physmap_of.c
··· 170 170 resource_size_t res_size; 171 171 struct mtd_part_parser_data ppdata; 172 172 bool map_indirect; 173 - const char *mtd_name; 173 + const char *mtd_name = NULL; 174 174 175 175 match = of_match_device(of_flash_match, &dev->dev); 176 176 if (!match)
+2 -2
drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
··· 17 17 #include "bcm47xxnflash.h" 18 18 19 19 /* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has 20 - * shown 164 retries as maxiumum. */ 21 - #define NFLASH_READY_RETRIES 1000 20 + * shown ~1000 retries as maxiumum. */ 21 + #define NFLASH_READY_RETRIES 10000 22 22 23 23 #define NFLASH_SECTOR_SIZE 512 24 24
+1 -1
drivers/mtd/nand/davinci_nand.c
··· 523 523 static const struct of_device_id davinci_nand_of_match[] = { 524 524 {.compatible = "ti,davinci-nand", }, 525 525 {}, 526 - } 526 + }; 527 527 MODULE_DEVICE_TABLE(of, davinci_nand_of_match); 528 528 529 529 static struct davinci_nand_pdata
+5 -2
drivers/mtd/nand/nand_base.c
··· 2857 2857 int i; 2858 2858 int val; 2859 2859 2860 - /* ONFI need to be probed in 8 bits mode */ 2861 - WARN_ON(chip->options & NAND_BUSWIDTH_16); 2860 + /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */ 2861 + if (chip->options & NAND_BUSWIDTH_16) { 2862 + pr_err("Trying ONFI probe in 16 bits mode, aborting !\n"); 2863 + return 0; 2864 + } 2862 2865 /* Try ONFI for unknown chip or LP */ 2863 2866 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); 2864 2867 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
+1
drivers/net/bonding/bond_sysfs.c
··· 1053 1053 pr_info("%s: Setting primary slave to None.\n", 1054 1054 bond->dev->name); 1055 1055 bond->primary_slave = NULL; 1056 + memset(bond->params.primary, 0, sizeof(bond->params.primary)); 1056 1057 bond_select_active_slave(bond); 1057 1058 goto out; 1058 1059 }
+5 -1
drivers/net/can/c_can/c_can.c
··· 488 488 489 489 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), 490 490 IFX_WRITE_LOW_16BIT(mask)); 491 + 492 + /* According to C_CAN documentation, the reserved bit 493 + * in IFx_MASK2 register is fixed 1 494 + */ 491 495 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), 492 - IFX_WRITE_HIGH_16BIT(mask)); 496 + IFX_WRITE_HIGH_16BIT(mask) | BIT(13)); 493 497 494 498 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 495 499 IFX_WRITE_LOW_16BIT(id));
+2 -2
drivers/net/ethernet/broadcom/b44.c
··· 381 381 } 382 382 383 383 #ifdef CONFIG_BCM47XX 384 - #include <asm/mach-bcm47xx/nvram.h> 384 + #include <bcm47xx_nvram.h> 385 385 static void b44_wap54g10_workaround(struct b44 *bp) 386 386 { 387 387 char buf[20]; ··· 393 393 * see https://dev.openwrt.org/ticket/146 394 394 * check and reset bit "isolate" 395 395 */ 396 - if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0) 396 + if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0) 397 397 return; 398 398 if (simple_strtoul(buf, NULL, 0) == 2) { 399 399 err = __b44_readphy(bp, 0, MII_BMCR, &val);
+4 -4
drivers/net/ethernet/emulex/benet/be.h
··· 36 36 37 37 #define DRV_VER "4.4.161.0u" 38 38 #define DRV_NAME "be2net" 39 - #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 40 - #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 41 - #define OC_NAME "Emulex OneConnect 10Gbps NIC" 39 + #define BE_NAME "Emulex BladeEngine2" 40 + #define BE3_NAME "Emulex BladeEngine3" 41 + #define OC_NAME "Emulex OneConnect" 42 42 #define OC_NAME_BE OC_NAME "(be3)" 43 43 #define OC_NAME_LANCER OC_NAME "(Lancer)" 44 44 #define OC_NAME_SH OC_NAME "(Skyhawk)" 45 - #define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" 45 + #define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver" 46 46 47 47 #define BE_VENDOR_ID 0x19a2 48 48 #define EMULEX_VENDOR_ID 0x10df
+1 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 25 25 MODULE_VERSION(DRV_VER); 26 26 MODULE_DEVICE_TABLE(pci, be_dev_ids); 27 27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); 28 - MODULE_AUTHOR("ServerEngines Corporation"); 28 + MODULE_AUTHOR("Emulex Corporation"); 29 29 MODULE_LICENSE("GPL"); 30 30 31 31 static unsigned int num_vfs;
+9
drivers/net/ethernet/intel/e1000e/defines.h
··· 232 232 #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 233 233 #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ 234 234 #define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ 235 + #define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ 235 236 #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 236 237 #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 237 238 #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ ··· 390 389 391 390 #define E1000_PBS_16K E1000_PBA_16K 392 391 392 + /* Uncorrectable/correctable ECC Error counts and enable bits */ 393 + #define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF 394 + #define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 395 + #define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 396 + #define E1000_PBECCSTS_ECC_ENABLE 0x00010000 397 + 393 398 #define IFS_MAX 80 394 399 #define IFS_MIN 40 395 400 #define IFS_RATIO 4 ··· 415 408 #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ 416 409 #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 417 410 #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 411 + #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ 418 412 #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 419 413 #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ 420 414 #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ ··· 451 443 #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 452 444 #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 453 445 #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ 446 + #define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ 454 447 #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ 455 448 #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ 456 449 #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
+2
drivers/net/ethernet/intel/e1000e/e1000.h
··· 309 309 310 310 struct napi_struct napi; 311 311 312 + unsigned int uncorr_errors; /* uncorrectable ECC errors */ 313 + unsigned int corr_errors; /* correctable ECC errors */ 312 314 unsigned int restart_queue; 313 315 u32 txd_cmd; 314 316
+2
drivers/net/ethernet/intel/e1000e/ethtool.c
··· 108 108 E1000_STAT("dropped_smbus", stats.mgpdc), 109 109 E1000_STAT("rx_dma_failed", rx_dma_failed), 110 110 E1000_STAT("tx_dma_failed", tx_dma_failed), 111 + E1000_STAT("uncorr_ecc_errors", uncorr_errors), 112 + E1000_STAT("corr_ecc_errors", corr_errors), 111 113 }; 112 114 113 115 #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
+1
drivers/net/ethernet/intel/e1000e/hw.h
··· 77 77 #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ 78 78 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ 79 79 E1000_PBS = 0x01008, /* Packet Buffer Size */ 80 + E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */ 80 81 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ 81 82 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ 82 83 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
+11
drivers/net/ethernet/intel/e1000e/ich8lan.c
··· 3624 3624 if (hw->mac.type == e1000_ich8lan) 3625 3625 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); 3626 3626 ew32(RFCTL, reg); 3627 + 3628 + /* Enable ECC on Lynxpoint */ 3629 + if (hw->mac.type == e1000_pch_lpt) { 3630 + reg = er32(PBECCSTS); 3631 + reg |= E1000_PBECCSTS_ECC_ENABLE; 3632 + ew32(PBECCSTS, reg); 3633 + 3634 + reg = er32(CTRL); 3635 + reg |= E1000_CTRL_MEHE; 3636 + ew32(CTRL, reg); 3637 + } 3627 3638 } 3628 3639 3629 3640 /**
+46
drivers/net/ethernet/intel/e1000e/netdev.c
··· 1678 1678 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1679 1679 } 1680 1680 1681 + /* Reset on uncorrectable ECC error */ 1682 + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { 1683 + u32 pbeccsts = er32(PBECCSTS); 1684 + 1685 + adapter->corr_errors += 1686 + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 1687 + adapter->uncorr_errors += 1688 + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 1689 + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 1690 + 1691 + /* Do the reset outside of interrupt context */ 1692 + schedule_work(&adapter->reset_task); 1693 + 1694 + /* return immediately since reset is imminent */ 1695 + return IRQ_HANDLED; 1696 + } 1697 + 1681 1698 if (napi_schedule_prep(&adapter->napi)) { 1682 1699 adapter->total_tx_bytes = 0; 1683 1700 adapter->total_tx_packets = 0; ··· 1756 1739 /* guard against interrupt when we're going down */ 1757 1740 if (!test_bit(__E1000_DOWN, &adapter->state)) 1758 1741 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1742 + } 1743 + 1744 + /* Reset on uncorrectable ECC error */ 1745 + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { 1746 + u32 pbeccsts = er32(PBECCSTS); 1747 + 1748 + adapter->corr_errors += 1749 + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 1750 + adapter->uncorr_errors += 1751 + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 1752 + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 1753 + 1754 + /* Do the reset outside of interrupt context */ 1755 + schedule_work(&adapter->reset_task); 1756 + 1757 + /* return immediately since reset is imminent */ 1758 + return IRQ_HANDLED; 1759 1759 } 1760 1760 1761 1761 if (napi_schedule_prep(&adapter->napi)) { ··· 2138 2104 if (adapter->msix_entries) { 2139 2105 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2140 2106 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); 2107 + } else if (hw->mac.type == e1000_pch_lpt) { 2108 + ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); 2141 2109 } else { 2142 2110 ew32(IMS, IMS_ENABLE_MASK); 2143 2111 } ··· 4287 4251 adapter->stats.mgptc += er32(MGTPTC); 4288 4252 adapter->stats.mgprc += er32(MGTPRC); 4289 4253 adapter->stats.mgpdc += er32(MGTPDC); 4254 + 4255 + /* Correctable ECC Errors */ 4256 + if (hw->mac.type == e1000_pch_lpt) { 4257 + u32 pbeccsts = er32(PBECCSTS); 4258 + adapter->corr_errors += 4259 + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 4260 + adapter->uncorr_errors += 4261 + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 4262 + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 4263 + } 4290 4264 } 4291 4265 4292 4266 /**
+1 -1
drivers/net/ethernet/mellanox/mlx4/main.c
··· 380 380 } 381 381 } 382 382 383 - if ((dev_cap->flags & 383 + if ((dev->caps.flags & 384 384 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 385 385 mlx4_is_master(dev)) 386 386 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
+2 -6
drivers/net/ethernet/via/via-rhine.c
··· 1801 1801 rp->tx_skbuff[entry]->len, 1802 1802 PCI_DMA_TODEVICE); 1803 1803 } 1804 - dev_kfree_skb_irq(rp->tx_skbuff[entry]); 1804 + dev_kfree_skb(rp->tx_skbuff[entry]); 1805 1805 rp->tx_skbuff[entry] = NULL; 1806 1806 entry = (++rp->dirty_tx) % TX_RING_SIZE; 1807 1807 } ··· 2010 2010 if (intr_status & IntrPCIErr) 2011 2011 netif_warn(rp, hw, dev, "PCI error\n"); 2012 2012 2013 - napi_disable(&rp->napi); 2014 - rhine_irq_disable(rp); 2015 - /* Slow and safe. Consider __napi_schedule as a replacement ? */ 2016 - napi_enable(&rp->napi); 2017 - napi_schedule(&rp->napi); 2013 + iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); 2018 2014 2019 2015 out_unlock: 2020 2016 mutex_unlock(&rp->task_lock);
+24 -14
drivers/net/tun.c
··· 298 298 } 299 299 300 300 static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 301 - u16 queue_index) 301 + struct tun_file *tfile) 302 302 { 303 303 struct hlist_head *head; 304 304 struct tun_flow_entry *e; 305 305 unsigned long delay = tun->ageing_time; 306 + u16 queue_index = tfile->queue_index; 306 307 307 308 if (!rxhash) 308 309 return; ··· 312 311 313 312 rcu_read_lock(); 314 313 315 - if (tun->numqueues == 1) 314 + /* We may get a very small possibility of OOO during switching, not 315 + * worth to optimize.*/ 316 + if (tun->numqueues == 1 || tfile->detached) 316 317 goto unlock; 317 318 318 319 e = tun_flow_find(head, rxhash); ··· 414 411 415 412 tun = rtnl_dereference(tfile->tun); 416 413 417 - if (tun) { 414 + if (tun && !tfile->detached) { 418 415 u16 index = tfile->queue_index; 419 416 BUG_ON(index >= tun->numqueues); 420 417 dev = tun->dev; 421 418 422 419 rcu_assign_pointer(tun->tfiles[index], 423 420 tun->tfiles[tun->numqueues - 1]); 424 - rcu_assign_pointer(tfile->tun, NULL); 425 421 ntfile = rtnl_dereference(tun->tfiles[index]); 426 422 ntfile->queue_index = index; 427 423 428 424 --tun->numqueues; 429 - if (clean) 425 + if (clean) { 426 + rcu_assign_pointer(tfile->tun, NULL); 430 427 sock_put(&tfile->sk); 431 - else 428 + } else 432 429 tun_disable_queue(tun, tfile); 433 430 434 431 synchronize_net(); ··· 442 439 } 443 440 444 441 if (clean) { 445 - if (tun && tun->numqueues == 0 && tun->numdisabled == 0 && 446 - !(tun->flags & TUN_PERSIST)) 447 - if (tun->dev->reg_state == NETREG_REGISTERED) 442 + if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 443 + netif_carrier_off(tun->dev); 444 + 445 + if (!(tun->flags & TUN_PERSIST) && 446 + tun->dev->reg_state == NETREG_REGISTERED) 448 447 unregister_netdevice(tun->dev); 448 + } 449 449 450 450 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, 451 451 &tfile->socket.flags)); ··· 475 469 wake_up_all(&tfile->wq.wait); 476 470 rcu_assign_pointer(tfile->tun, NULL); 477 471 --tun->numqueues; 472 + } 473 + list_for_each_entry(tfile, &tun->disabled, next) { 474 + wake_up_all(&tfile->wq.wait); 475 + rcu_assign_pointer(tfile->tun, NULL); 478 476 } 479 477 BUG_ON(tun->numqueues != 0); 480 478 ··· 510 500 goto out; 511 501 512 502 err = -EINVAL; 513 - if (rtnl_dereference(tfile->tun)) 503 + if (rtnl_dereference(tfile->tun) && !tfile->detached) 514 504 goto out; 515 505 516 506 err = -EBUSY; ··· 1209 1199 tun->dev->stats.rx_packets++; 1210 1200 tun->dev->stats.rx_bytes += len; 1211 1201 1212 - tun_flow_update(tun, rxhash, tfile->queue_index); 1202 + tun_flow_update(tun, rxhash, tfile); 1213 1203 return total_len; 1214 1204 } 1215 1205 ··· 1668 1658 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1669 1659 device_create_file(&tun->dev->dev, &dev_attr_group)) 1670 1660 pr_err("Failed to create tun sysfs files\n"); 1671 - 1672 - netif_carrier_on(tun->dev); 1673 1661 } 1662 + 1663 + netif_carrier_on(tun->dev); 1674 1664 1675 1665 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 1676 1666 ··· 1823 1813 ret = tun_attach(tun, file); 1824 1814 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 1825 1815 tun = rtnl_dereference(tfile->tun); 1826 - if (!tun || !(tun->flags & TUN_TAP_MQ)) 1816 + if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached) 1827 1817 ret = -EINVAL; 1828 1818 else 1829 1819 __tun_detach(tfile, false);
+3
drivers/net/usb/cdc_ncm.c
··· 1215 1215 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), 1216 1216 .driver_info = (unsigned long)&wwan_info, 1217 1217 }, 1218 + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), 1219 + .driver_info = (unsigned long)&wwan_info, 1220 + }, 1218 1221 1219 1222 /* Infineon(now Intel) HSPA Modem platform */ 1220 1223 { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
+13
drivers/net/usb/qmi_wwan.c
··· 351 351 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), 352 352 .driver_info = (unsigned long)&qmi_wwan_info, 353 353 }, 354 + { /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */ 355 + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), 356 + .driver_info = (unsigned long)&qmi_wwan_info, 357 + }, 354 358 355 359 /* 2. Combined interface devices matching on class+protocol */ 356 360 { /* Huawei E367 and possibly others in "Windows mode" */ ··· 363 359 }, 364 360 { /* Huawei E392, E398 and possibly others in "Windows mode" */ 365 361 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), 362 + .driver_info = (unsigned long)&qmi_wwan_info, 363 + }, 364 + { /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */ 365 + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37), 366 + .driver_info = (unsigned long)&qmi_wwan_info, 367 + }, 368 + { /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */ 369 + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67), 366 370 .driver_info = (unsigned long)&qmi_wwan_info, 367 371 }, 368 372 { /* Pantech UML290, P4200 and more */ ··· 473 461 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 474 462 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 475 463 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 464 + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 476 465 477 466 /* 4. Gobi 1000 devices */ 478 467 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+29 -6
drivers/net/usb/usbnet.c
··· 380 380 unsigned long lockflags; 381 381 size_t size = dev->rx_urb_size; 382 382 383 + /* prevent rx skb allocation when error ratio is high */ 384 + if (test_bit(EVENT_RX_KILL, &dev->flags)) { 385 + usb_free_urb(urb); 386 + return -ENOLINK; 387 + } 388 + 383 389 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 384 390 if (!skb) { 385 391 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); ··· 543 537 dev->net->stats.rx_errors++; 544 538 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 545 539 break; 540 + } 541 + 542 + /* stop rx if packet error rate is high */ 543 + if (++dev->pkt_cnt > 30) { 544 + dev->pkt_cnt = 0; 545 + dev->pkt_err = 0; 546 + } else { 547 + if (state == rx_cleanup) 548 + dev->pkt_err++; 549 + if (dev->pkt_err > 20) 550 + set_bit(EVENT_RX_KILL, &dev->flags); 546 551 } 547 552 548 553 state = defer_bh(dev, skb, &dev->rxq, state); ··· 807 790 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : 808 791 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 809 792 "simple"); 793 + 794 + /* reset rx error state */ 795 + dev->pkt_cnt = 0; 796 + dev->pkt_err = 0; 797 + clear_bit(EVENT_RX_KILL, &dev->flags); 810 798 811 799 // delay posting reads until we're fully open 812 800 tasklet_schedule (&dev->bh); ··· 1125 1103 if (info->tx_fixup) { 1126 1104 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1127 1105 if (!skb) { 1128 - if (netif_msg_tx_err(dev)) { 1129 - netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1130 - goto drop; 1131 - } else { 1132 - /* cdc_ncm collected packet; waits for more */ 1106 + /* packet collected; minidriver waiting for more */ 1107 + if (info->flags & FLAG_MULTI_PACKET) 1133 1108 goto not_drop; 1134 - } 1109 + netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1110 + goto drop; 1135 1111 } 1136 1112 } 1137 1113 length = skb->len; ··· 1273 1253 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); 1274 1254 } 1275 1255 } 1256 + 1257 + /* restart RX again after disabling due to high error rate */ 1258 + clear_bit(EVENT_RX_KILL, &dev->flags); 1276 1259 1277 1260 // waiting for all pending urbs to complete? 1278 1261 if (dev->wait) {
+3 -4
drivers/net/vmxnet3/vmxnet3_drv.c
··· 154 154 if (ret & 1) { /* Link is up. */ 155 155 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 156 156 adapter->netdev->name, adapter->link_speed); 157 - if (!netif_carrier_ok(adapter->netdev)) 158 - netif_carrier_on(adapter->netdev); 157 + netif_carrier_on(adapter->netdev); 159 158 160 159 if (affectTxQueue) { 161 160 for (i = 0; i < adapter->num_tx_queues; i++) ··· 164 165 } else { 165 166 printk(KERN_INFO "%s: NIC Link is Down\n", 166 167 adapter->netdev->name); 167 - if (netif_carrier_ok(adapter->netdev)) 168 - netif_carrier_off(adapter->netdev); 168 + netif_carrier_off(adapter->netdev); 169 169 170 170 if (affectTxQueue) { 171 171 for (i = 0; i < adapter->num_tx_queues; i++) ··· 3059 3061 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 3060 3062 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); 3061 3063 3064 + netif_carrier_off(netdev); 3062 3065 err = register_netdev(netdev); 3063 3066 3064 3067 if (err) {
+21 -14
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
··· 36 36 #include "debug.h" 37 37 38 38 #define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ 39 + #define BRCMS_FLUSH_TIMEOUT 500 /* msec */ 39 40 40 41 /* Flags we support */ 41 42 #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ ··· 709 708 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked); 710 709 } 711 710 711 + static bool brcms_tx_flush_completed(struct brcms_info *wl) 712 + { 713 + bool result; 714 + 715 + spin_lock_bh(&wl->lock); 716 + result = brcms_c_tx_flush_completed(wl->wlc); 717 + spin_unlock_bh(&wl->lock); 718 + return result; 719 + } 720 + 712 721 static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop) 713 722 { 714 723 struct brcms_info *wl = hw->priv; 724 + int ret; 715 725 716 726 no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false"); 717 727 718 - /* wait for packet queue and dma fifos to run empty */ 719 - spin_lock_bh(&wl->lock); 720 - brcms_c_wait_for_tx_completion(wl->wlc, drop); 721 - spin_unlock_bh(&wl->lock); 728 + ret = wait_event_timeout(wl->tx_flush_wq, 729 + brcms_tx_flush_completed(wl), 730 + msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT)); 731 + 732 + brcms_dbg_mac80211(wl->wlc->hw->d11core, 733 + "ret=%d\n", jiffies_to_msecs(ret)); 722 734 } 723 735 724 736 static const struct ieee80211_ops brcms_ops = { ··· 786 772 787 773 done: 788 774 spin_unlock_bh(&wl->lock); 775 + wake_up(&wl->tx_flush_wq); 789 776 } 790 777 791 778 /* ··· 1034 1019 wl->wiphy = hw->wiphy; 1035 1020 1036 1021 atomic_set(&wl->callbacks, 0); 1022 + 1023 + init_waitqueue_head(&wl->tx_flush_wq); 1037 1024 1038 1025 /* setup the bottom half handler */ 1039 1026 tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); ··· 1625 1608 wiphy_rfkill_start_polling(wl->pub->ieee_hw->wiphy); 1626 1609 spin_lock_bh(&wl->lock); 1627 1610 return blocked; 1628 - } 1629 - 1630 - /* 1631 - * precondition: perimeter lock has been acquired 1632 - */ 1633 - void brcms_msleep(struct brcms_info *wl, uint ms) 1634 - { 1635 - spin_unlock_bh(&wl->lock); 1636 - msleep(ms); 1637 - spin_lock_bh(&wl->lock); 1638 1611 }
+2 -1
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
··· 68 68 spinlock_t lock; /* per-device perimeter lock */ 69 69 spinlock_t isr_lock; /* per-device ISR synchronization lock */ 70 70 71 + /* tx flush */ 72 + wait_queue_head_t tx_flush_wq; 71 73 72 74 /* timer related fields */ 73 75 atomic_t callbacks; /* # outstanding callback functions */ ··· 102 100 extern void brcms_free_timer(struct brcms_timer *timer); 103 101 extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic); 104 102 extern bool brcms_del_timer(struct brcms_timer *timer); 105 - extern void brcms_msleep(struct brcms_info *wl, uint ms); 106 103 extern void brcms_dpc(unsigned long data); 107 104 extern void brcms_timer(struct brcms_timer *t); 108 105 extern void brcms_fatal_error(struct brcms_info *wl);
+12 -28
drivers/net/wireless/brcm80211/brcmsmac/main.c
··· 1027 1027 static bool 1028 1028 brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) 1029 1029 { 1030 - bool morepending = false; 1031 1030 struct bcma_device *core; 1032 1031 struct tx_status txstatus, *txs; 1033 1032 u32 s1, s2; ··· 1040 1041 txs = &txstatus; 1041 1042 core = wlc_hw->d11core; 1042 1043 *fatal = false; 1043 - s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1044 - while (!(*fatal) 1045 - && (s1 & TXS_V)) { 1046 - /* !give others some time to run! */ 1047 - if (n >= max_tx_num) { 1048 - morepending = true; 1049 - break; 1050 - } 1051 1044 1045 + while (n < max_tx_num) { 1046 + s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1052 1047 if (s1 == 0xffffffff) { 1053 1048 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit, 1054 1049 __func__); 1055 1050 *fatal = true; 1056 1051 return false; 1057 1052 } 1058 - s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); 1053 + /* only process when valid */ 1054 + if (!(s1 & TXS_V)) 1055 + break; 1059 1056 1057 + s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); 1060 1058 txs->status = s1 & TXS_STATUS_MASK; 1061 1059 txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; 1062 1060 txs->sequence = s2 & TXS_SEQ_MASK; ··· 1061 1065 txs->lasttxtime = 0; 1062 1066 1063 1067 *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); 1064 - 1065 - s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1068 + if (*fatal == true) 1069 + return false; 1066 1070 n++; 1067 1071 } 1068 1072 1069 - if (*fatal) 1070 - return false; 1071 - 1072 - return morepending; 1073 + return n >= max_tx_num; 1073 1074 } 1074 1075 1075 1076 static void brcms_c_tbtt(struct brcms_c_info *wlc) ··· 7511 7518 return wlc->band->bandunit; 7512 7519 } 7513 7520 7514 - void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) 7521 + bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc) 7515 7522 { 7516 - int timeout = 20; 7517 7523 int i; 7518 7524 7519 7525 /* Kick DMA to send any pending AMPDU */ 7520 7526 for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++) 7521 7527 if (wlc->hw->di[i]) 7522 - dma_txflush(wlc->hw->di[i]); 7528 + dma_kick_tx(wlc->hw->di[i]); 7523 7529 7524 - /* wait for queue and DMA fifos to run dry */ 7525 - while (brcms_txpktpendtot(wlc) > 0) { 7526 - brcms_msleep(wlc->wl, 1); 7527 - 7528 - if (--timeout == 0) 7529 - break; 7530 - } 7531 - 7532 - WARN_ON_ONCE(timeout == 0); 7530 + return !brcms_txpktpendtot(wlc); 7533 7531 } 7534 7532 7535 7533 void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
+1 -2
drivers/net/wireless/brcm80211/brcmsmac/pub.h
··· 314 314 extern void brcms_c_scan_start(struct brcms_c_info *wlc); 315 315 extern void brcms_c_scan_stop(struct brcms_c_info *wlc); 316 316 extern int brcms_c_get_curband(struct brcms_c_info *wlc); 317 - extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, 318 - bool drop); 319 317 extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel); 320 318 extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl); 321 319 extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc, ··· 330 332 extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); 331 333 extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); 332 334 extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); 335 + extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc); 333 336 334 337 #endif /* _BRCM_PUB_H_ */
+7 -17
drivers/net/wireless/iwlwifi/dvm/tx.c
··· 1153 1153 next_reclaimed = ssn; 1154 1154 } 1155 1155 1156 + if (tid != IWL_TID_NON_QOS) { 1157 + priv->tid_data[sta_id][tid].next_reclaimed = 1158 + next_reclaimed; 1159 + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", 1160 + next_reclaimed); 1161 + } 1162 + 1156 1163 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); 1157 1164 1158 1165 iwlagn_check_ratid_empty(priv, sta_id, tid); ··· 1210 1203 if (!is_agg) 1211 1204 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); 1212 1205 1213 - /* 1214 - * W/A for FW bug - the seq_ctl isn't updated when the 1215 - * queues are flushed. Fetch it from the packet itself 1216 - */ 1217 - if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) { 1218 - next_reclaimed = le16_to_cpu(hdr->seq_ctrl); 1219 - next_reclaimed = 1220 - SEQ_TO_SN(next_reclaimed + 0x10); 1221 - } 1222 - 1223 1206 is_offchannel_skb = 1224 1207 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); 1225 1208 freed++; 1226 - } 1227 - 1228 - if (tid != IWL_TID_NON_QOS) { 1229 - priv->tid_data[sta_id][tid].next_reclaimed = 1230 - next_reclaimed; 1231 - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", 1232 - next_reclaimed); 1233 1209 } 1234 1210 1235 1211 WARN_ON(!is_agg && freed != 1);
+5 -4
drivers/net/wireless/mwifiex/scan.c
··· 1563 1563 dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", 1564 1564 scan_rsp->number_of_sets); 1565 1565 ret = -1; 1566 - goto done; 1566 + goto check_next_scan; 1567 1567 } 1568 1568 1569 1569 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); ··· 1634 1634 if (!beacon_size || beacon_size > bytes_left) { 1635 1635 bss_info += bytes_left; 1636 1636 bytes_left = 0; 1637 - return -1; 1637 + ret = -1; 1638 + goto check_next_scan; 1638 1639 } 1639 1640 1640 1641 /* Initialize the current working beacon pointer for this BSS ··· 1691 1690 dev_err(priv->adapter->dev, 1692 1691 "%s: bytes left < IE length\n", 1693 1692 __func__); 1694 - goto done; 1693 + goto check_next_scan; 1695 1694 } 1696 1695 if (element_id == WLAN_EID_DS_PARAMS) { 1697 1696 channel = *(current_ptr + sizeof(struct ieee_types_header)); ··· 1754 1753 } 1755 1754 } 1756 1755 1756 + check_next_scan: 1757 1757 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); 1758 1758 if (list_empty(&adapter->scan_pending_q)) { 1759 1759 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); ··· 1815 1813 } 1816 1814 } 1817 1815 1818 - done: 1819 1816 return ret; 1820 1817 } 1821 1818
+4 -3
drivers/net/wireless/rtlwifi/base.c
··· 1004 1004 is_tx ? "Tx" : "Rx"); 1005 1005 1006 1006 if (is_tx) { 1007 - rtl_lps_leave(hw); 1007 + schedule_work(&rtlpriv-> 1008 + works.lps_leave_work); 1008 1009 ppsc->last_delaylps_stamp_jiffies = 1009 1010 jiffies; 1010 1011 } ··· 1015 1014 } 1016 1015 } else if (ETH_P_ARP == ether_type) { 1017 1016 if (is_tx) { 1018 - rtl_lps_leave(hw); 1017 + schedule_work(&rtlpriv->works.lps_leave_work); 1019 1018 ppsc->last_delaylps_stamp_jiffies = jiffies; 1020 1019 } 1021 1020 ··· 1025 1024 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); 1026 1025 1027 1026 if (is_tx) { 1028 - rtl_lps_leave(hw); 1027 + schedule_work(&rtlpriv->works.lps_leave_work); 1029 1028 ppsc->last_delaylps_stamp_jiffies = jiffies; 1030 1029 } 1031 1030
+2 -2
drivers/net/wireless/rtlwifi/usb.c
··· 542 542 WARN_ON(skb_queue_empty(&rx_queue)); 543 543 while (!skb_queue_empty(&rx_queue)) { 544 544 _skb = skb_dequeue(&rx_queue); 545 - _rtl_usb_rx_process_agg(hw, skb); 546 - ieee80211_rx_irqsafe(hw, skb); 545 + _rtl_usb_rx_process_agg(hw, _skb); 546 + ieee80211_rx_irqsafe(hw, _skb); 547 547 } 548 548 } 549 549
+3
drivers/net/xen-netback/common.h
··· 151 151 /* Notify xenvif that ring now has space to send an skb to the frontend */ 152 152 void xenvif_notify_tx_completion(struct xenvif *vif); 153 153 154 + /* Prevent the device from generating any further traffic. */ 155 + void xenvif_carrier_off(struct xenvif *vif); 156 + 154 157 /* Returns number of ring slots required to send an skb to the frontend */ 155 158 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); 156 159
+14 -9
drivers/net/xen-netback/interface.c
··· 343 343 return err; 344 344 } 345 345 346 - void xenvif_disconnect(struct xenvif *vif) 346 + void xenvif_carrier_off(struct xenvif *vif) 347 347 { 348 348 struct net_device *dev = vif->dev; 349 - if (netif_carrier_ok(dev)) { 350 - rtnl_lock(); 351 - netif_carrier_off(dev); /* discard queued packets */ 352 - if (netif_running(dev)) 353 - xenvif_down(vif); 354 - rtnl_unlock(); 355 - xenvif_put(vif); 356 - } 349 + 350 + rtnl_lock(); 351 + netif_carrier_off(dev); /* discard queued packets */ 352 + if (netif_running(dev)) 353 + xenvif_down(vif); 354 + rtnl_unlock(); 355 + xenvif_put(vif); 356 + } 357 + 358 + void xenvif_disconnect(struct xenvif *vif) 359 + { 360 + if (netif_carrier_ok(vif->dev)) 361 + xenvif_carrier_off(vif); 357 362 358 363 atomic_dec(&vif->refcnt); 359 364 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+71 -44
drivers/net/xen-netback/netback.c
··· 147 147 atomic_dec(&netbk->netfront_count); 148 148 } 149 149 150 - static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); 150 + static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, 151 + u8 status); 151 152 static void make_tx_response(struct xenvif *vif, 152 153 struct xen_netif_tx_request *txp, 153 154 s8 st); ··· 880 879 881 880 do { 882 881 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 883 - if (cons >= end) 882 + if (cons == end) 884 883 break; 885 884 txp = RING_GET_REQUEST(&vif->tx, cons++); 886 885 } while (1); 887 886 vif->tx.req_cons = cons; 888 887 xen_netbk_check_rx_xenvif(vif); 888 + xenvif_put(vif); 889 + } 890 + 891 + static void netbk_fatal_tx_err(struct xenvif *vif) 892 + { 893 + netdev_err(vif->dev, "fatal error; disabling device\n"); 894 + xenvif_carrier_off(vif); 889 895 xenvif_put(vif); 890 896 } 891 897 ··· 909 901 910 902 do { 911 903 if (frags >= work_to_do) { 912 - netdev_dbg(vif->dev, "Need more frags\n"); 904 + netdev_err(vif->dev, "Need more frags\n"); 905 + netbk_fatal_tx_err(vif); 913 906 return -frags; 914 907 } 915 908 916 909 if (unlikely(frags >= MAX_SKB_FRAGS)) { 917 - netdev_dbg(vif->dev, "Too many frags\n"); 910 + netdev_err(vif->dev, "Too many frags\n"); 911 + netbk_fatal_tx_err(vif); 918 912 return -frags; 919 913 } 920 914 921 915 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), 922 916 sizeof(*txp)); 923 917 if (txp->size > first->size) { 924 - netdev_dbg(vif->dev, "Frags galore\n"); 918 + netdev_err(vif->dev, "Frag is bigger than frame.\n"); 919 + netbk_fatal_tx_err(vif); 925 920 return -frags; 926 921 } 927 922 ··· 932 921 frags++; 933 922 934 923 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 935 - netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", 924 + netdev_err(vif->dev, "txp->offset: %x, size: %u\n", 936 925 txp->offset, txp->size); 926 + netbk_fatal_tx_err(vif); 937 927 return -frags; 938 928 } 939 929 } while ((txp++)->flags & XEN_NETTXF_more_data); ··· 978 966 pending_idx = netbk->pending_ring[index]; 979 967 page = xen_netbk_alloc_page(netbk, skb, pending_idx); 980 968 if (!page) 981 - return NULL; 969 + goto err; 982 970 983 971 gop->source.u.ref = txp->gref; 984 972 gop->source.domid = vif->domid; ··· 1000 988 } 1001 989 1002 990 return gop; 991 + err: 992 + /* Unwind, freeing all pages and sending error responses. */ 993 + while (i-- > start) { 994 + xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), 995 + XEN_NETIF_RSP_ERROR); 996 + } 997 + /* The head too, if necessary. */ 998 + if (start) 999 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); 1000 + 1001 + return NULL; 1003 1002 } 1004 1003 1005 1004 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, ··· 1019 996 { 1020 997 struct gnttab_copy *gop = *gopp; 1021 998 u16 pending_idx = *((u16 *)skb->data); 1022 - struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; 1023 - struct xenvif *vif = pending_tx_info[pending_idx].vif; 1024 - struct xen_netif_tx_request *txp; 1025 999 struct skb_shared_info *shinfo = skb_shinfo(skb); 1026 1000 int nr_frags = shinfo->nr_frags; 1027 1001 int i, err, start; 1028 1002 1029 1003 /* Check status of header. */ 1030 1004 err = gop->status; 1031 - if (unlikely(err)) { 1032 - pending_ring_idx_t index; 1033 - index = pending_index(netbk->pending_prod++); 1034 - txp = &pending_tx_info[pending_idx].req; 1035 - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 1036 - netbk->pending_ring[index] = pending_idx; 1037 - xenvif_put(vif); 1038 - } 1005 + if (unlikely(err)) 1006 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); 1039 1007 1040 1008 /* Skip first skb fragment if it is on same page as header fragment. */ 1041 1009 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 1042 1010 1043 1011 for (i = start; i < nr_frags; i++) { 1044 1012 int j, newerr; 1045 - pending_ring_idx_t index; 1046 1013 1047 1014 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 1048 1015 ··· 1041 1028 if (likely(!newerr)) { 1042 1029 /* Had a previous error? Invalidate this fragment. */ 1043 1030 if (unlikely(err)) 1044 - xen_netbk_idx_release(netbk, pending_idx); 1031 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1045 1032 continue; 1046 1033 } 1047 1034 1048 1035 /* Error on this fragment: respond to client with an error. */ 1049 - txp = &netbk->pending_tx_info[pending_idx].req; 1050 - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 1051 - index = pending_index(netbk->pending_prod++); 1052 - netbk->pending_ring[index] = pending_idx; 1053 - xenvif_put(vif); 1036 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); 1054 1037 1055 1038 /* Not the first error? Preceding frags already invalidated. */ 1056 1039 if (err) ··· 1054 1045 1055 1046 /* First error: invalidate header and preceding fragments. */ 1056 1047 pending_idx = *((u16 *)skb->data); 1057 - xen_netbk_idx_release(netbk, pending_idx); 1048 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1058 1049 for (j = start; j < i; j++) { 1059 1050 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1060 - xen_netbk_idx_release(netbk, pending_idx); 1051 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1061 1052 } 1062 1053 1063 1054 /* Remember the error: invalidate all subsequent fragments. */ ··· 1091 1082 1092 1083 /* Take an extra reference to offset xen_netbk_idx_release */ 1093 1084 get_page(netbk->mmap_pages[pending_idx]); 1094 - xen_netbk_idx_release(netbk, pending_idx); 1085 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1095 1086 } 1096 1087 } 1097 1088 ··· 1104 1095 1105 1096 do { 1106 1097 if (unlikely(work_to_do-- <= 0)) { 1107 - netdev_dbg(vif->dev, "Missing extra info\n"); 1098 + netdev_err(vif->dev, "Missing extra info\n"); 1099 + netbk_fatal_tx_err(vif); 1108 1100 return -EBADR; 1109 1101 } 1110 1102 ··· 1114 1104 if (unlikely(!extra.type || 1115 1105 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1116 1106 vif->tx.req_cons = ++cons; 1117 - netdev_dbg(vif->dev, 1107 + netdev_err(vif->dev, 1118 1108 "Invalid extra type: %d\n", extra.type); 1109 + netbk_fatal_tx_err(vif); 1119 1110 return -EINVAL; 1120 1111 } 1121 1112 ··· 1132 1121 struct xen_netif_extra_info *gso) 1133 1122 { 1134 1123 if (!gso->u.gso.size) { 1135 - netdev_dbg(vif->dev, "GSO size must not be zero.\n"); 1124 + netdev_err(vif->dev, "GSO size must not be zero.\n"); 1125 + netbk_fatal_tx_err(vif); 1136 1126 return -EINVAL; 1137 1127 } 1138 1128 1139 1129 /* Currently only TCPv4 S.O. is supported. */ 1140 1130 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 1141 - netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1131 + netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1132 + netbk_fatal_tx_err(vif); 1142 1133 return -EINVAL; 1143 1134 } 1144 1135 ··· 1277 1264 1278 1265 /* Get a netif from the list with work to do. */ 1279 1266 vif = poll_net_schedule_list(netbk); 1267 + /* This can sometimes happen because the test of 1268 + * list_empty(net_schedule_list) at the top of the 1269 + * loop is unlocked. Just go back and have another 1270 + * look. 1271 + */ 1280 1272 if (!vif) 1281 1273 continue; 1274 + 1275 + if (vif->tx.sring->req_prod - vif->tx.req_cons > 1276 + XEN_NETIF_TX_RING_SIZE) { 1277 + netdev_err(vif->dev, 1278 + "Impossible number of requests. " 1279 + "req_prod %d, req_cons %d, size %ld\n", 1280 + vif->tx.sring->req_prod, vif->tx.req_cons, 1281 + XEN_NETIF_TX_RING_SIZE); 1282 + netbk_fatal_tx_err(vif); 1283 + continue; 1284 + } 1282 1285 1283 1286 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); 1284 1287 if (!work_to_do) { ··· 1323 1294 work_to_do = xen_netbk_get_extras(vif, extras, 1324 1295 work_to_do); 1325 1296 idx = vif->tx.req_cons; 1326 - if (unlikely(work_to_do < 0)) { 1327 - netbk_tx_err(vif, &txreq, idx); 1297 + if (unlikely(work_to_do < 0)) 1328 1298 continue; 1329 - } 1330 1299 } 1331 1300 1332 1301 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); 1333 - if (unlikely(ret < 0)) { 1334 - netbk_tx_err(vif, &txreq, idx - ret); 1302 + if (unlikely(ret < 0)) 1335 1303 continue; 1336 - } 1304 + 1337 1305 idx += ret; 1338 1306 1339 1307 if (unlikely(txreq.size < ETH_HLEN)) { ··· 1342 1316 1343 1317 /* No crossing a page as the payload mustn't fragment. */ 1344 1318 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { 1345 - netdev_dbg(vif->dev, 1319 + netdev_err(vif->dev, 1346 1320 "txreq.offset: %x, size: %u, end: %lu\n", 1347 1321 txreq.offset, txreq.size, 1348 1322 (txreq.offset&~PAGE_MASK) + txreq.size); 1349 - netbk_tx_err(vif, &txreq, idx); 1323 + netbk_fatal_tx_err(vif); 1350 1324 continue; 1351 1325 } 1352 1326 ··· 1374 1348 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1375 1349 1376 1350 if (netbk_set_skb_gso(vif, skb, gso)) { 1351 + /* Failure in netbk_set_skb_gso is fatal. */ 1377 1352 kfree_skb(skb); 1378 - netbk_tx_err(vif, &txreq, idx); 1379 1353 continue; 1380 1354 } 1381 1355 } ··· 1474 1448 txp->size -= data_len; 1475 1449 } else { 1476 1450 /* Schedule a response immediately. */ 1477 - xen_netbk_idx_release(netbk, pending_idx); 1451 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1478 1452 } 1479 1453 1480 1454 if (txp->flags & XEN_NETTXF_csum_blank) ··· 1526 1500 xen_netbk_tx_submit(netbk); 1527 1501 } 1528 1502 1529 - static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) 1503 + static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, 1504 + u8 status) 1530 1505 { 1531 1506 struct xenvif *vif; 1532 1507 struct pending_tx_info *pending_tx_info; ··· 1541 1514 1542 1515 vif = pending_tx_info->vif; 1543 1516 1544 - make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); 1517 + make_tx_response(vif, &pending_tx_info->req, status); 1545 1518 1546 1519 index = pending_index(netbk->pending_prod++); 1547 1520 netbk->pending_ring[index] = pending_idx;
+2 -2
drivers/pinctrl/Kconfig
··· 184 184 select PINMUX 185 185 select PINCONF 186 186 187 - config PINCTRL_EXYNOS4 188 - bool "Pinctrl driver data for Exynos4 SoC" 187 + config PINCTRL_EXYNOS 188 + bool "Pinctrl driver data for Samsung EXYNOS SoCs" 189 189 depends on OF && GPIOLIB 190 190 select PINCTRL_SAMSUNG 191 191
+1 -1
drivers/pinctrl/Makefile
··· 36 36 obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o 37 37 obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o 38 38 obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o 39 - obj-$(CONFIG_PINCTRL_EXYNOS4) += pinctrl-exynos.o 39 + obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o 40 40 obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o 41 41 obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o 42 42 obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
+18
drivers/pinctrl/pinctrl-sirf.c
··· 1246 1246 return of_iomap(np, 0); 1247 1247 } 1248 1248 1249 + static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc, 1250 + const struct of_phandle_args *gpiospec, 1251 + u32 *flags) 1252 + { 1253 + if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE) 1254 + return -EINVAL; 1255 + 1256 + if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc) 1257 + return -EINVAL; 1258 + 1259 + if (flags) 1260 + *flags = gpiospec->args[1]; 1261 + 1262 + return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE; 1263 + } 1264 + 1249 1265 static int sirfsoc_pinmux_probe(struct platform_device *pdev) 1250 1266 { 1251 1267 int ret; ··· 1752 1736 bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE; 1753 1737 bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL); 1754 1738 bank->chip.gc.of_node = np; 1739 + bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate; 1740 + bank->chip.gc.of_gpio_n_cells = 2; 1755 1741 bank->chip.regs = regs; 1756 1742 bank->id = i; 1757 1743 bank->is_marco = is_marco;
+8 -7
drivers/regulator/max77686.c
··· 379 379 }; 380 380 381 381 #ifdef CONFIG_OF 382 - static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, 382 + static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, 383 383 struct max77686_platform_data *pdata) 384 384 { 385 + struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); 385 386 struct device_node *pmic_np, *regulators_np; 386 387 struct max77686_regulator_data *rdata; 387 388 struct of_regulator_match rmatch; ··· 391 390 pmic_np = iodev->dev->of_node; 392 391 regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators"); 393 392 if (!regulators_np) { 394 - dev_err(iodev->dev, "could not find regulators sub-node\n"); 393 + dev_err(&pdev->dev, "could not find regulators sub-node\n"); 395 394 return -EINVAL; 396 395 } 397 396 398 397 pdata->num_regulators = ARRAY_SIZE(regulators); 399 - rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * 398 + rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * 400 399 pdata->num_regulators, GFP_KERNEL); 401 400 if (!rdata) { 402 - dev_err(iodev->dev, 401 + dev_err(&pdev->dev, 403 402 "could not allocate memory for regulator data\n"); 404 403 return -ENOMEM; 405 404 } ··· 408 407 rmatch.name = regulators[i].name; 409 408 rmatch.init_data = NULL; 410 409 rmatch.of_node = NULL; 411 - of_regulator_match(iodev->dev, regulators_np, &rmatch, 1); 410 + of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1); 412 411 rdata[i].initdata = rmatch.init_data; 413 412 rdata[i].of_node = rmatch.of_node; 414 413 } ··· 418 417 return 0; 419 418 } 420 419 #else 421 - static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, 420 + static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, 422 421 struct max77686_platform_data *pdata) 423 422 { 424 423 return 0; ··· 441 440 } 442 441 443 442 if (iodev->dev->of_node) { 444 - ret = max77686_pmic_dt_parse_pdata(iodev, pdata); 443 + ret = max77686_pmic_dt_parse_pdata(pdev, pdata); 445 444 if (ret) 446 445 return ret; 447 446 }
+1 -2
drivers/regulator/max8907-regulator.c
··· 237 237 return -EINVAL; 238 238 } 239 239 240 - ret = of_regulator_match(pdev->dev.parent, regulators, 241 - max8907_matches, 240 + ret = of_regulator_match(&pdev->dev, regulators, max8907_matches, 242 241 ARRAY_SIZE(max8907_matches)); 243 242 if (ret < 0) { 244 243 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
+19 -20
drivers/regulator/max8997.c
··· 934 934 }; 935 935 936 936 #ifdef CONFIG_OF 937 - static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, 937 + static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev, 938 938 struct max8997_platform_data *pdata, 939 939 struct device_node *pmic_np) 940 940 { ··· 944 944 gpio = of_get_named_gpio(pmic_np, 945 945 "max8997,pmic-buck125-dvs-gpios", i); 946 946 if (!gpio_is_valid(gpio)) { 947 - dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio); 947 + dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio); 948 948 return -EINVAL; 949 949 } 950 950 pdata->buck125_gpios[i] = gpio; ··· 952 952 return 0; 953 953 } 954 954 955 - static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, 955 + static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, 956 956 struct max8997_platform_data *pdata) 957 957 { 958 + struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent); 958 959 struct device_node *pmic_np, *regulators_np, *reg_np; 959 960 struct max8997_regulator_data *rdata; 960 961 unsigned int i, dvs_voltage_nr = 1, ret; 961 962 962 963 pmic_np = iodev->dev->of_node; 963 964 if (!pmic_np) { 964 - dev_err(iodev->dev, "could not find pmic sub-node\n"); 965 + dev_err(&pdev->dev, "could not find pmic sub-node\n"); 965 966 return -ENODEV; 966 967 } 967 968 968 969 regulators_np = of_find_node_by_name(pmic_np, "regulators"); 969 970 if (!regulators_np) { 970 - dev_err(iodev->dev, "could not find regulators sub-node\n"); 971 + dev_err(&pdev->dev, "could not find regulators sub-node\n"); 971 972 return -EINVAL; 972 973 } 973 974 ··· 977 976 for_each_child_of_node(regulators_np, reg_np) 978 977 pdata->num_regulators++; 979 978 980 - rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * 979 + rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * 981 980 pdata->num_regulators, GFP_KERNEL); 982 981 if (!rdata) { 983 - dev_err(iodev->dev, "could not allocate memory for " 984 - "regulator data\n"); 982 + dev_err(&pdev->dev, "could not allocate memory for regulator data\n"); 985 983 return -ENOMEM; 986 984 } 987 985 ··· 991 991 break; 992 992 993 993 if (i == ARRAY_SIZE(regulators)) { 994 - dev_warn(iodev->dev, "don't know how to configure " 995 - "regulator %s\n", reg_np->name); 994 + dev_warn(&pdev->dev, "don't know how to configure regulator %s\n", 995 + reg_np->name); 996 996 continue; 997 997 } 998 998 999 999 rdata->id = i; 1000 - rdata->initdata = of_get_regulator_init_data( 1001 - iodev->dev, reg_np); 1000 + rdata->initdata = of_get_regulator_init_data(&pdev->dev, 1001 + reg_np); 1002 1002 rdata->reg_node = reg_np; 1003 1003 rdata++; 1004 1004 } ··· 1014 1014 1015 1015 if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs || 1016 1016 pdata->buck5_gpiodvs) { 1017 - ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np); 1017 + ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np); 1018 1018 if (ret) 1019 1019 return -EINVAL; 1020 1020 ··· 1025 1025 } else { 1026 1026 if (pdata->buck125_default_idx >= 8) { 1027 1027 pdata->buck125_default_idx = 0; 1028 - dev_info(iodev->dev, "invalid value for " 1029 - "default dvs index, using 0 instead\n"); 1028 + dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n"); 1030 1029 } 1031 1030 } 1032 1031 ··· 1039 1040 if (of_property_read_u32_array(pmic_np, 1040 1041 "max8997,pmic-buck1-dvs-voltage", 1041 1042 pdata->buck1_voltage, dvs_voltage_nr)) { 1042 - dev_err(iodev->dev, "buck1 voltages not specified\n"); 1043 + dev_err(&pdev->dev, "buck1 voltages not specified\n"); 1043 1044 return -EINVAL; 1044 1045 } 1045 1046 1046 1047 if (of_property_read_u32_array(pmic_np, 1047 1048 "max8997,pmic-buck2-dvs-voltage", 1048 1049 pdata->buck2_voltage, dvs_voltage_nr)) { 1049 - dev_err(iodev->dev, "buck2 voltages not specified\n"); 1050 + dev_err(&pdev->dev, "buck2 voltages not specified\n"); 1050 1051 return -EINVAL; 1051 1052 } 1052 1053 1053 1054 if (of_property_read_u32_array(pmic_np, 1054 1055 "max8997,pmic-buck5-dvs-voltage", 1055 1056 pdata->buck5_voltage, dvs_voltage_nr)) { 1056 - dev_err(iodev->dev, "buck5 voltages not specified\n"); 1057 + dev_err(&pdev->dev, "buck5 voltages not specified\n"); 1057 1058 return -EINVAL; 1058 1059 } 1059 1060 1060 1061 return 0; 1061 1062 } 1062 1063 #else 1063 - static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, 1064 + static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, 1064 1065 struct max8997_platform_data *pdata) 1065 1066 { 1066 1067 return 0; ··· 1084 1085 } 1085 1086 1086 1087 if (iodev->dev->of_node) { 1087 - ret = max8997_pmic_dt_parse_pdata(iodev, pdata); 1088 + ret = max8997_pmic_dt_parse_pdata(pdev, pdata); 1088 1089 if (ret) 1089 1090 return ret; 1090 1091 }
+1 -1
drivers/regulator/max8998.c
··· 65 65 .min = 2800000, .step = 100000, .max = 3100000, 66 66 }; 67 67 static const struct voltage_map_desc ldo10_voltage_map_desc = { 68 - .min = 95000, .step = 50000, .max = 1300000, 68 + .min = 950000, .step = 50000, .max = 1300000, 69 69 }; 70 70 static const struct voltage_map_desc ldo1213_voltage_map_desc = { 71 71 .min = 800000, .step = 100000, .max = 3300000,
+6
drivers/regulator/of_regulator.c
··· 120 120 if (!dev || !node) 121 121 return -EINVAL; 122 122 123 + for (i = 0; i < num_matches; i++) { 124 + struct of_regulator_match *match = &matches[i]; 125 + match->init_data = NULL; 126 + match->of_node = NULL; 127 + } 128 + 123 129 for_each_child_of_node(node, child) { 124 130 name = of_get_property(child, 125 131 "regulator-compatible", NULL);
+2 -2
drivers/regulator/s2mps11.c
··· 174 174 .min_uV = S2MPS11_BUCK_MIN2, \ 175 175 .uV_step = S2MPS11_BUCK_STEP2, \ 176 176 .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ 177 - .vsel_reg = S2MPS11_REG_B9CTRL2, \ 177 + .vsel_reg = S2MPS11_REG_B10CTRL2, \ 178 178 .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ 179 - .enable_reg = S2MPS11_REG_B9CTRL1, \ 179 + .enable_reg = S2MPS11_REG_B10CTRL1, \ 180 180 .enable_mask = S2MPS11_ENABLE_MASK \ 181 181 } 182 182
+2 -2
drivers/regulator/tps65217-regulator.c
··· 305 305 if (!regs) 306 306 return NULL; 307 307 308 - count = of_regulator_match(pdev->dev.parent, regs, 309 - reg_matches, TPS65217_NUM_REGULATOR); 308 + count = of_regulator_match(&pdev->dev, regs, reg_matches, 309 + TPS65217_NUM_REGULATOR); 310 310 of_node_put(regs); 311 311 if ((count < 0) || (count > TPS65217_NUM_REGULATOR)) 312 312 return NULL;
+1 -1
drivers/regulator/tps65910-regulator.c
··· 998 998 return NULL; 999 999 } 1000 1000 1001 - ret = of_regulator_match(pdev->dev.parent, regulators, matches, count); 1001 + ret = of_regulator_match(&pdev->dev, regulators, matches, count); 1002 1002 if (ret < 0) { 1003 1003 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", 1004 1004 ret);
+3
drivers/rtc/rtc-isl1208.c
··· 506 506 { 507 507 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 508 508 struct i2c_client *client = data; 509 + struct rtc_device *rtc = i2c_get_clientdata(client); 509 510 int handled = 0, sr, err; 510 511 511 512 /* ··· 528 527 529 528 if (sr & ISL1208_REG_SR_ALM) { 530 529 dev_dbg(&client->dev, "alarm!\n"); 530 + 531 + rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); 531 532 532 533 /* Clear the alarm */ 533 534 sr &= ~ISL1208_REG_SR_ALM;
+5 -3
drivers/rtc/rtc-pl031.c
··· 44 44 #define RTC_YMR 0x34 /* Year match register */ 45 45 #define RTC_YLR 0x38 /* Year data load register */ 46 46 47 + #define RTC_CR_EN (1 << 0) /* counter enable bit */ 47 48 #define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */ 48 49 49 50 #define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */ ··· 321 320 struct pl031_local *ldata; 322 321 struct pl031_vendor_data *vendor = id->data; 323 322 struct rtc_class_ops *ops = &vendor->ops; 324 - unsigned long time; 323 + unsigned long time, data; 325 324 326 325 ret = amba_request_regions(adev, NULL); 327 326 if (ret) ··· 346 345 dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev)); 347 346 dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev)); 348 347 348 + data = readl(ldata->base + RTC_CR); 349 349 /* Enable the clockwatch on ST Variants */ 350 350 if (vendor->clockwatch) 351 - writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, 352 - ldata->base + RTC_CR); 351 + data |= RTC_CR_CWEN; 352 + writel(data | RTC_CR_EN, ldata->base + RTC_CR); 353 353 354 354 /* 355 355 * On ST PL031 variants, the RTC reset value does not provide correct
+1 -1
drivers/rtc/rtc-vt8500.c
··· 137 137 return -EINVAL; 138 138 } 139 139 140 - writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) 140 + writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S) 141 141 | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S) 142 142 | (bin2bcd(tm->tm_mday)) 143 143 | ((tm->tm_year >= 200) << DATE_CENTURY_S),
+2 -2
drivers/ssb/driver_chipcommon_pmu.c
··· 14 14 #include <linux/delay.h> 15 15 #include <linux/export.h> 16 16 #ifdef CONFIG_BCM47XX 17 - #include <asm/mach-bcm47xx/nvram.h> 17 + #include <bcm47xx_nvram.h> 18 18 #endif 19 19 20 20 #include "ssb_private.h" ··· 322 322 if (bus->bustype == SSB_BUSTYPE_SSB) { 323 323 #ifdef CONFIG_BCM47XX 324 324 char buf[20]; 325 - if (nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0) 325 + if (bcm47xx_nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0) 326 326 crystalfreq = simple_strtoul(buf, NULL, 0); 327 327 #endif 328 328 }
+12
drivers/ssb/driver_gpio.c
··· 174 174 175 175 return -1; 176 176 } 177 + 178 + int ssb_gpio_unregister(struct ssb_bus *bus) 179 + { 180 + if (ssb_chipco_available(&bus->chipco) || 181 + ssb_extif_available(&bus->extif)) { 182 + return gpiochip_remove(&bus->gpio); 183 + } else { 184 + SSB_WARN_ON(1); 185 + } 186 + 187 + return -1; 188 + }
+9
drivers/ssb/main.c
··· 443 443 444 444 void ssb_bus_unregister(struct ssb_bus *bus) 445 445 { 446 + int err; 447 + 448 + err = ssb_gpio_unregister(bus); 449 + if (err == -EBUSY) 450 + ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n"); 451 + else if (err) 452 + ssb_dprintk(KERN_ERR PFX 453 + "Can not unregister GPIO driver: %i\n", err); 454 + 446 455 ssb_buses_lock(); 447 456 ssb_devices_unregister(bus); 448 457 list_del(&bus->list);
+5
drivers/ssb/ssb_private.h
··· 252 252 253 253 #ifdef CONFIG_SSB_DRIVER_GPIO 254 254 extern int ssb_gpio_init(struct ssb_bus *bus); 255 + extern int ssb_gpio_unregister(struct ssb_bus *bus); 255 256 #else /* CONFIG_SSB_DRIVER_GPIO */ 256 257 static inline int ssb_gpio_init(struct ssb_bus *bus) 257 258 { 258 259 return -ENOTSUPP; 260 + } 261 + static inline int ssb_gpio_unregister(struct ssb_bus *bus) 262 + { 263 + return 0; 259 264 } 260 265 #endif /* CONFIG_SSB_DRIVER_GPIO */ 261 266
+7 -1
drivers/target/target_core_device.c
··· 941 941 942 942 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 943 943 { 944 + int block_size = dev->dev_attrib.block_size; 945 + 944 946 if (dev->export_count) { 945 947 pr_err("dev[%p]: Unable to change SE Device" 946 948 " fabric_max_sectors while export_count is %d\n", ··· 980 978 /* 981 979 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 982 980 */ 981 + if (!block_size) { 982 + block_size = 512; 983 + pr_warn("Defaulting to 512 for zero block_size\n"); 984 + } 983 985 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 984 - dev->dev_attrib.block_size); 986 + block_size); 985 987 986 988 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; 987 989 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
+5
drivers/target/target_core_fabric_configfs.c
··· 754 754 return -EFAULT; 755 755 } 756 756 757 + if (!(dev->dev_flags & DF_CONFIGURED)) { 758 + pr_err("se_device not configured yet, cannot port link\n"); 759 + return -ENODEV; 760 + } 761 + 757 762 tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; 758 763 se_tpg = container_of(to_config_group(tpg_ci), 759 764 struct se_portal_group, tpg_group);
+8 -10
drivers/target/target_core_sbc.c
··· 58 58 buf[7] = dev->dev_attrib.block_size & 0xff; 59 59 60 60 rbuf = transport_kmap_data_sg(cmd); 61 - if (!rbuf) 62 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 63 - 64 - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 65 - transport_kunmap_data_sg(cmd); 61 + if (rbuf) { 62 + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 63 + transport_kunmap_data_sg(cmd); 64 + } 66 65 67 66 target_complete_cmd(cmd, GOOD); 68 67 return 0; ··· 96 97 buf[14] = 0x80; 97 98 98 99 rbuf = transport_kmap_data_sg(cmd); 99 - if (!rbuf) 100 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 101 - 102 - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 103 - transport_kunmap_data_sg(cmd); 100 + if (rbuf) { 101 + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 102 + transport_kunmap_data_sg(cmd); 103 + } 104 104 105 105 target_complete_cmd(cmd, GOOD); 106 106 return 0;
+11 -33
drivers/target/target_core_spc.c
··· 641 641 642 642 out: 643 643 rbuf = transport_kmap_data_sg(cmd); 644 - if (!rbuf) 645 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 646 - 647 - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 648 - transport_kunmap_data_sg(cmd); 644 + if (rbuf) { 645 + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 646 + transport_kunmap_data_sg(cmd); 647 + } 649 648 650 649 if (!ret) 651 650 target_complete_cmd(cmd, GOOD); ··· 850 851 { 851 852 struct se_device *dev = cmd->se_dev; 852 853 char *cdb = cmd->t_task_cdb; 853 - unsigned char *buf, *map_buf; 854 + unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; 854 855 int type = dev->transport->get_device_type(dev); 855 856 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 856 857 bool dbd = !!(cdb[1] & 0x08); ··· 862 863 int ret; 863 864 int i; 864 865 865 - map_buf = transport_kmap_data_sg(cmd); 866 - if (!map_buf) 867 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 868 - /* 869 - * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we 870 - * know we actually allocated a full page. Otherwise, if the 871 - * data buffer is too small, allocate a temporary buffer so we 872 - * don't have to worry about overruns in all our INQUIRY 873 - * emulation handling. 874 - */ 875 - if (cmd->data_length < SE_MODE_PAGE_BUF && 876 - (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { 877 - buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL); 878 - if (!buf) { 879 - transport_kunmap_data_sg(cmd); 880 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 881 - } 882 - } else { 883 - buf = map_buf; 884 - } 866 + memset(buf, 0, SE_MODE_PAGE_BUF); 867 + 885 868 /* 886 869 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 887 870 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). ··· 915 934 if (page == 0x3f) { 916 935 if (subpage != 0x00 && subpage != 0xff) { 917 936 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 918 - kfree(buf); 919 - transport_kunmap_data_sg(cmd); 920 937 return TCM_INVALID_CDB_FIELD; 921 938 } 922 939 ··· 951 972 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 952 973 page, subpage); 953 974 954 - transport_kunmap_data_sg(cmd); 955 975 return TCM_UNKNOWN_MODE_PAGE; 956 976 957 977 set_length: ··· 959 981 else 960 982 buf[0] = length - 1; 961 983 962 - if (buf != map_buf) { 963 - memcpy(map_buf, buf, cmd->data_length); 964 - kfree(buf); 984 + rbuf = transport_kmap_data_sg(cmd); 985 + if (rbuf) { 986 + memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); 987 + transport_kunmap_data_sg(cmd); 965 988 } 966 989 967 - transport_kunmap_data_sg(cmd); 968 990 target_complete_cmd(cmd, GOOD); 969 991 return 0; 970 992 }
+44
drivers/usb/core/hcd.c
··· 39 39 #include <asm/unaligned.h> 40 40 #include <linux/platform_device.h> 41 41 #include <linux/workqueue.h> 42 + #include <linux/pm_runtime.h> 42 43 43 44 #include <linux/usb.h> 44 45 #include <linux/usb/hcd.h> ··· 1026 1025 return retval; 1027 1026 } 1028 1027 1028 + /* 1029 + * usb_hcd_start_port_resume - a root-hub port is sending a resume signal 1030 + * @bus: the bus which the root hub belongs to 1031 + * @portnum: the port which is being resumed 1032 + * 1033 + * HCDs should call this function when they know that a resume signal is 1034 + * being sent to a root-hub port. The root hub will be prevented from 1035 + * going into autosuspend until usb_hcd_end_port_resume() is called. 1036 + * 1037 + * The bus's private lock must be held by the caller. 1038 + */ 1039 + void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum) 1040 + { 1041 + unsigned bit = 1 << portnum; 1042 + 1043 + if (!(bus->resuming_ports & bit)) { 1044 + bus->resuming_ports |= bit; 1045 + pm_runtime_get_noresume(&bus->root_hub->dev); 1046 + } 1047 + } 1048 + EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume); 1049 + 1050 + /* 1051 + * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal 1052 + * @bus: the bus which the root hub belongs to 1053 + * @portnum: the port which is being resumed 1054 + * 1055 + * HCDs should call this function when they know that a resume signal has 1056 + * stopped being sent to a root-hub port. The root hub will be allowed to 1057 + * autosuspend again. 1058 + * 1059 + * The bus's private lock must be held by the caller. 1060 + */ 1061 + void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum) 1062 + { 1063 + unsigned bit = 1 << portnum; 1064 + 1065 + if (bus->resuming_ports & bit) { 1066 + bus->resuming_ports &= ~bit; 1067 + pm_runtime_put_noidle(&bus->root_hub->dev); 1068 + } 1069 + } 1070 + EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume); 1029 1071 1030 1072 /*-------------------------------------------------------------------------*/ 1031 1073
+52 -18
drivers/usb/core/hub.c
··· 2838 2838 EXPORT_SYMBOL_GPL(usb_enable_ltm); 2839 2839 2840 2840 #ifdef CONFIG_USB_SUSPEND 2841 + /* 2842 + * usb_disable_function_remotewakeup - disable usb3.0 2843 + * device's function remote wakeup 2844 + * @udev: target device 2845 + * 2846 + * Assume there's only one function on the USB 3.0 2847 + * device and disable remote wake for the first 2848 + * interface. FIXME if the interface association 2849 + * descriptor shows there's more than one function. 2850 + */ 2851 + static int usb_disable_function_remotewakeup(struct usb_device *udev) 2852 + { 2853 + return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 2854 + USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, 2855 + USB_INTRF_FUNC_SUSPEND, 0, NULL, 0, 2856 + USB_CTRL_SET_TIMEOUT); 2857 + } 2841 2858 2842 2859 /* 2843 2860 * usb_port_suspend - suspend a usb device's upstream port ··· 2972 2955 dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", 2973 2956 port1, status); 2974 2957 /* paranoia: "should not happen" */ 2975 - if (udev->do_remote_wakeup) 2976 - (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 2977 - USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, 2978 - USB_DEVICE_REMOTE_WAKEUP, 0, 2979 - NULL, 0, 2980 - USB_CTRL_SET_TIMEOUT); 2958 + if (udev->do_remote_wakeup) { 2959 + if (!hub_is_superspeed(hub->hdev)) { 2960 + (void) usb_control_msg(udev, 2961 + usb_sndctrlpipe(udev, 0), 2962 + USB_REQ_CLEAR_FEATURE, 2963 + USB_RECIP_DEVICE, 2964 + USB_DEVICE_REMOTE_WAKEUP, 0, 2965 + NULL, 0, 2966 + USB_CTRL_SET_TIMEOUT); 2967 + } else 2968 + (void) usb_disable_function_remotewakeup(udev); 2969 + 2970 + } 2981 2971 2982 2972 /* Try to enable USB2 hardware LPM again */ 2983 2973 if (udev->usb2_hw_lpm_capable == 1) ··· 3076 3052 * udev->reset_resume 3077 3053 */ 3078 3054 } else if (udev->actconfig && !udev->reset_resume) { 3079 - le16_to_cpus(&devstatus); 3080 - if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { 3081 - status = usb_control_msg(udev, 3082 - usb_sndctrlpipe(udev, 0), 3083 - USB_REQ_CLEAR_FEATURE, 3055 + if (!hub_is_superspeed(udev->parent)) { 3056 + le16_to_cpus(&devstatus); 3057 + if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) 3058 + status = usb_control_msg(udev, 3059 + usb_sndctrlpipe(udev, 0), 3060 + USB_REQ_CLEAR_FEATURE, 3084 3061 USB_RECIP_DEVICE, 3085 - USB_DEVICE_REMOTE_WAKEUP, 0, 3086 - NULL, 0, 3087 - USB_CTRL_SET_TIMEOUT); 3088 - if (status) 3089 - dev_dbg(&udev->dev, 3090 - "disable remote wakeup, status %d\n", 3091 - status); 3062 + USB_DEVICE_REMOTE_WAKEUP, 0, 3063 + NULL, 0, 3064 + USB_CTRL_SET_TIMEOUT); 3065 + } else { 3066 + status = usb_get_status(udev, USB_RECIP_INTERFACE, 0, 3067 + &devstatus); 3068 + le16_to_cpus(&devstatus); 3069 + if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP 3070 + | USB_INTRF_STAT_FUNC_RW)) 3071 + status = 3072 + usb_disable_function_remotewakeup(udev); 3092 3073 } 3074 + 3075 + if (status) 3076 + dev_dbg(&udev->dev, 3077 + "disable remote wakeup, status %d\n", 3078 + status); 3093 3079 status = 0; 3094 3080 } 3095 3081 return status;
+1
drivers/usb/host/ehci-hcd.c
··· 797 797 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); 798 798 set_bit(i, &ehci->resuming_ports); 799 799 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); 800 + usb_hcd_start_port_resume(&hcd->self, i); 800 801 mod_timer(&hcd->rh_timer, ehci->reset_done[i]); 801 802 } 802 803 }
+8 -1
drivers/usb/host/ehci-hub.c
··· 649 649 status = STS_PCD; 650 650 } 651 651 } 652 - /* FIXME autosuspend idle root hubs */ 652 + 653 + /* If a resume is in progress, make sure it can finish */ 654 + if (ehci->resuming_ports) 655 + mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25)); 656 + 653 657 spin_unlock_irqrestore (&ehci->lock, flags); 654 658 return status ? retval : 0; 655 659 } ··· 855 851 /* resume signaling for 20 msec */ 856 852 ehci->reset_done[wIndex] = jiffies 857 853 + msecs_to_jiffies(20); 854 + usb_hcd_start_port_resume(&hcd->self, wIndex); 858 855 /* check the port again */ 859 856 mod_timer(&ehci_to_hcd(ehci)->rh_timer, 860 857 ehci->reset_done[wIndex]); ··· 867 862 clear_bit(wIndex, &ehci->suspended_ports); 868 863 set_bit(wIndex, &ehci->port_c_suspend); 869 864 ehci->reset_done[wIndex] = 0; 865 + usb_hcd_end_port_resume(&hcd->self, wIndex); 870 866 871 867 /* stop resume signaling */ 872 868 temp = ehci_readl(ehci, status_reg); ··· 956 950 ehci->reset_done[wIndex] = 0; 957 951 if (temp & PORT_PE) 958 952 set_bit(wIndex, &ehci->port_c_suspend); 953 + usb_hcd_end_port_resume(&hcd->self, wIndex); 959 954 } 960 955 961 956 if (temp & PORT_OC)
+30 -20
drivers/usb/host/ehci-q.c
··· 1197 1197 if (ehci->async_iaa || ehci->async_unlinking) 1198 1198 return; 1199 1199 1200 - /* Do all the waiting QHs at once */ 1201 - ehci->async_iaa = ehci->async_unlink; 1202 - ehci->async_unlink = NULL; 1203 - 1204 1200 /* If the controller isn't running, we don't have to wait for it */ 1205 1201 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { 1202 + 1203 + /* Do all the waiting QHs */ 1204 + ehci->async_iaa = ehci->async_unlink; 1205 + ehci->async_unlink = NULL; 1206 + 1206 1207 if (!nested) /* Avoid recursion */ 1207 1208 end_unlink_async(ehci); 1208 1209 1209 1210 /* Otherwise start a new IAA cycle */ 1210 1211 } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { 1212 + struct ehci_qh *qh; 1213 + 1214 + /* Do only the first waiting QH (nVidia bug?) */ 1215 + qh = ehci->async_unlink; 1216 + ehci->async_iaa = qh; 1217 + ehci->async_unlink = qh->unlink_next; 1218 + qh->unlink_next = NULL; 1219 + 1211 1220 /* Make sure the unlinks are all visible to the hardware */ 1212 1221 wmb(); 1213 1222 ··· 1264 1255 } 1265 1256 } 1266 1257 1258 + static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); 1259 + 1267 1260 static void unlink_empty_async(struct ehci_hcd *ehci) 1268 1261 { 1269 - struct ehci_qh *qh, *next; 1270 - bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); 1262 + struct ehci_qh *qh; 1263 + struct ehci_qh *qh_to_unlink = NULL; 1271 1264 bool check_unlinks_later = false; 1265 + int count = 0; 1272 1266 1273 - /* Unlink all the async QHs that have been empty for a timer cycle */ 1274 - next = ehci->async->qh_next.qh; 1275 - while (next) { 1276 - qh = next; 1277 - next = qh->qh_next.qh; 1278 - 1267 + /* Find the last async QH which has been empty for a timer cycle */ 1268 + for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { 1279 1269 if (list_empty(&qh->qtd_list) && 1280 1270 qh->qh_state == QH_STATE_LINKED) { 1281 - if (!stopped && qh->unlink_cycle == 1282 - ehci->async_unlink_cycle) 1271 + ++count; 1272 + if (qh->unlink_cycle == ehci->async_unlink_cycle) 1283 1273 check_unlinks_later = true; 1284 1274 else 1285 - single_unlink_async(ehci, qh); 1275 + qh_to_unlink = qh; 1286 1276 } 1287 1277 } 1288 1278 1289 - /* Start a new IAA cycle if any QHs are waiting for it */ 1290 - if (ehci->async_unlink) 1291 - start_iaa_cycle(ehci, false); 1279 + /* If nothing else is being unlinked, unlink the last empty QH */ 1280 + if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) { 1281 + start_unlink_async(ehci, qh_to_unlink); 1282 + --count; 1283 + } 1292 1284 1293 - /* QHs that haven't been empty for long enough will be handled later */ 1294 - if (check_unlinks_later) { 1285 + /* Other QHs will be handled later */ 1286 + if (count > 0) { 1295 1287 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); 1296 1288 ++ehci->async_unlink_cycle; 1297 1289 }
+6 -3
drivers/usb/host/ehci-sched.c
··· 213 213 } 214 214 215 215 static const unsigned char 216 - max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; 216 + max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; 217 217 218 218 /* carryover low/fullspeed bandwidth that crosses uframe boundries */ 219 219 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) ··· 2212 2212 } 2213 2213 ehci->now_frame = now_frame; 2214 2214 2215 + frame = ehci->last_iso_frame; 2215 2216 for (;;) { 2216 2217 union ehci_shadow q, *q_p; 2217 2218 __hc32 type, *hw_p; 2218 2219 2219 - frame = ehci->last_iso_frame; 2220 2220 restart: 2221 2221 /* scan each element in frame's queue for completions */ 2222 2222 q_p = &ehci->pshadow [frame]; ··· 2321 2321 /* Stop when we have reached the current frame */ 2322 2322 if (frame == now_frame) 2323 2323 break; 2324 - ehci->last_iso_frame = (frame + 1) & fmask; 2324 + 2325 + /* The last frame may still have active siTDs */ 2326 + ehci->last_iso_frame = frame; 2327 + frame = (frame + 1) & fmask; 2325 2328 } 2326 2329 }
+15 -14
drivers/usb/host/ehci-timer.c
··· 113 113 114 114 if (want != actual) { 115 115 116 - /* Poll again later, but give up after about 20 ms */ 117 - if (ehci->ASS_poll_count++ < 20) { 118 - ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); 119 - return; 120 - } 121 - ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n", 122 - want, actual); 116 + /* Poll again later */ 117 + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); 118 + ++ehci->ASS_poll_count; 119 + return; 123 120 } 121 + 122 + if (ehci->ASS_poll_count > 20) 123 + ehci_dbg(ehci, "ASS poll count reached %d\n", 124 + ehci->ASS_poll_count); 124 125 ehci->ASS_poll_count = 0; 125 126 126 127 /* The status is up-to-date; restart or stop the schedule as needed */ ··· 160 159 161 160 if (want != actual) { 162 161 163 - /* Poll again later, but give up after about 20 ms */ 164 - if (ehci->PSS_poll_count++ < 20) { 165 - ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); 166 - return; 167 - } 168 - ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n", 169 - want, actual); 162 + /* Poll again later */ 163 + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); 164 + return; 170 165 } 166 + 167 + if (ehci->PSS_poll_count > 20) 168 + ehci_dbg(ehci, "PSS poll count reached %d\n", 169 + ehci->PSS_poll_count); 171 170 ehci->PSS_poll_count = 0; 172 171 173 172 /* The status is up-to-date; restart or stop the schedule as needed */
+1
drivers/usb/host/pci-quirks.c
··· 780 780 "defaulting to EHCI.\n"); 781 781 dev_warn(&xhci_pdev->dev, 782 782 "USB 3.0 devices will work at USB 2.0 speeds.\n"); 783 + usb_disable_xhci_ports(xhci_pdev); 783 784 return; 784 785 } 785 786
+3
drivers/usb/host/uhci-hub.c
··· 116 116 } 117 117 } 118 118 clear_bit(port, &uhci->resuming_ports); 119 + usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port); 119 120 } 120 121 121 122 /* Wait for the UHCI controller in HP's iLO2 server management chip. ··· 168 167 set_bit(port, &uhci->resuming_ports); 169 168 uhci->ports_timeout = jiffies + 170 169 msecs_to_jiffies(25); 170 + usb_hcd_start_port_resume( 171 + &uhci_to_hcd(uhci)->self, port); 171 172 172 173 /* Make sure we see the port again 173 174 * after the resuming period is over. */
+9 -4
drivers/usb/host/xhci-ring.c
··· 1698 1698 faked_port_index + 1); 1699 1699 if (slot_id && xhci->devs[slot_id]) 1700 1700 xhci_ring_device(xhci, slot_id); 1701 - if (bus_state->port_remote_wakeup && (1 << faked_port_index)) { 1701 + if (bus_state->port_remote_wakeup & (1 << faked_port_index)) { 1702 1702 bus_state->port_remote_wakeup &= 1703 1703 ~(1 << faked_port_index); 1704 1704 xhci_test_and_clear_bit(xhci, port_array, ··· 2589 2589 (trb_comp_code != COMP_STALL && 2590 2590 trb_comp_code != COMP_BABBLE)) 2591 2591 xhci_urb_free_priv(xhci, urb_priv); 2592 + else 2593 + kfree(urb_priv); 2592 2594 2593 2595 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2594 2596 if ((urb->actual_length != urb->transfer_buffer_length && ··· 3110 3108 * running_total. 3111 3109 */ 3112 3110 packets_transferred = (running_total + trb_buff_len) / 3113 - usb_endpoint_maxp(&urb->ep->desc); 3111 + GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); 3114 3112 3115 3113 if ((total_packet_count - packets_transferred) > 31) 3116 3114 return 31 << 17; ··· 3644 3642 td_len = urb->iso_frame_desc[i].length; 3645 3643 td_remain_len = td_len; 3646 3644 total_packet_count = DIV_ROUND_UP(td_len, 3647 - usb_endpoint_maxp(&urb->ep->desc)); 3645 + GET_MAX_PACKET( 3646 + usb_endpoint_maxp(&urb->ep->desc))); 3648 3647 /* A zero-length transfer still involves at least one packet. */ 3649 3648 if (total_packet_count == 0) 3650 3649 total_packet_count++; ··· 3667 3664 td = urb_priv->td[i]; 3668 3665 for (j = 0; j < trbs_per_td; j++) { 3669 3666 u32 remainder = 0; 3670 - field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3667 + field = 0; 3671 3668 3672 3669 if (first_trb) { 3670 + field = TRB_TBC(burst_count) | 3671 + TRB_TLBPC(residue); 3673 3672 /* Queue the isoc TRB */ 3674 3673 field |= TRB_TYPE(TRB_ISOC); 3675 3674 /* Assume URB_ISO_ASAP is set */
+1
drivers/usb/serial/cp210x.c
··· 60 60 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ 61 61 { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ 62 62 { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ 63 + { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ 63 64 { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ 64 65 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ 65 66 { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
+2
drivers/usb/serial/ftdi_sio.c
··· 584 584 /* 585 585 * ELV devices: 586 586 */ 587 + { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) }, 587 588 { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, 588 589 { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, 589 590 { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, ··· 671 670 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, 672 671 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, 673 672 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, 673 + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, 674 674 { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, 675 675 { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, 676 676 { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
+8 -1
drivers/usb/serial/ftdi_sio_ids.h
··· 147 147 #define XSENS_CONVERTER_6_PID 0xD38E 148 148 #define XSENS_CONVERTER_7_PID 0xD38F 149 149 150 + /** 151 + * Zolix (www.zolix.com.cb) product ids 152 + */ 153 + #define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */ 154 + 150 155 /* 151 156 * NDI (www.ndigital.com) product ids 152 157 */ ··· 209 204 210 205 /* 211 206 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). 212 - * All of these devices use FTDI's vendor ID (0x0403). 207 + * Almost all of these devices use FTDI's vendor ID (0x0403). 213 208 * Further IDs taken from ELV Windows .inf file. 214 209 * 215 210 * The previously included PID for the UO 100 module was incorrect. ··· 217 212 * 218 213 * Armin Laeuger originally sent the PID for the UM 100 module. 219 214 */ 215 + #define FTDI_ELV_VID 0x1B1F /* ELV AG */ 216 + #define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */ 220 217 #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ 221 218 #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ 222 219 #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
+13
drivers/usb/serial/option.c
··· 242 242 #define TELIT_PRODUCT_CC864_DUAL 0x1005 243 243 #define TELIT_PRODUCT_CC864_SINGLE 0x1006 244 244 #define TELIT_PRODUCT_DE910_DUAL 0x1010 245 + #define TELIT_PRODUCT_LE920 0x1200 245 246 246 247 /* ZTE PRODUCTS */ 247 248 #define ZTE_VENDOR_ID 0x19d2 ··· 454 453 #define TPLINK_VENDOR_ID 0x2357 455 454 #define TPLINK_PRODUCT_MA180 0x0201 456 455 456 + /* Changhong products */ 457 + #define CHANGHONG_VENDOR_ID 0x2077 458 + #define CHANGHONG_PRODUCT_CH690 0x7001 459 + 457 460 /* some devices interfaces need special handling due to a number of reasons */ 458 461 enum option_blacklist_reason { 459 462 OPTION_BLACKLIST_NONE = 0, ··· 537 532 538 533 static const struct option_blacklist_info zte_1255_blacklist = { 539 534 .reserved = BIT(3) | BIT(4), 535 + }; 536 + 537 + static const struct option_blacklist_info telit_le920_blacklist = { 538 + .sendsetup = BIT(0), 539 + .reserved = BIT(1) | BIT(5), 540 540 }; 541 541 542 542 static const struct usb_device_id option_ids[] = { ··· 794 784 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, 795 785 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, 796 786 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, 787 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), 788 + .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, 797 789 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ 798 790 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), 799 791 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, ··· 1330 1318 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, 1331 1319 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), 1332 1320 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1321 + { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, 1333 1322 { } /* Terminating entry */ 1334 1323 }; 1335 1324 MODULE_DEVICE_TABLE(usb, option_ids);
+1
drivers/usb/serial/qcserial.c
··· 53 53 {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ 54 54 {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ 55 55 {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ 56 + {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */ 56 57 57 58 /* Gobi 2000 devices */ 58 59 {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
+74 -2
drivers/usb/storage/initializers.c
··· 92 92 return 0; 93 93 } 94 94 95 - /* This places the HUAWEI E220 devices in multi-port mode */ 96 - int usb_stor_huawei_e220_init(struct us_data *us) 95 + /* This places the HUAWEI usb dongles in multi-port mode */ 96 + static int usb_stor_huawei_feature_init(struct us_data *us) 97 97 { 98 98 int result; 99 99 ··· 103 103 0x01, 0x0, NULL, 0x0, 1000); 104 104 US_DEBUGP("Huawei mode set result is %d\n", result); 105 105 return 0; 106 + } 107 + 108 + /* 109 + * It will send a scsi switch command called rewind' to huawei dongle. 110 + * When the dongle receives this command at the first time, 111 + * it will reboot immediately. After rebooted, it will ignore this command. 112 + * So it is unnecessary to read its response. 113 + */ 114 + static int usb_stor_huawei_scsi_init(struct us_data *us) 115 + { 116 + int result = 0; 117 + int act_len = 0; 118 + struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; 119 + char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, 120 + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 121 + 122 + bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); 123 + bcbw->Tag = 0; 124 + bcbw->DataTransferLength = 0; 125 + bcbw->Flags = bcbw->Lun = 0; 126 + bcbw->Length = sizeof(rewind_cmd); 127 + memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); 128 + memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); 129 + 130 + result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, 131 + US_BULK_CB_WRAP_LEN, &act_len); 132 + US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); 133 + return result; 134 + } 135 + 136 + /* 137 + * It tries to find the supported Huawei USB dongles. 138 + * In Huawei, they assign the following product IDs 139 + * for all of their mobile broadband dongles, 140 + * including the new dongles in the future. 141 + * So if the product ID is not included in this list, 142 + * it means it is not Huawei's mobile broadband dongles. 143 + */ 144 + static int usb_stor_huawei_dongles_pid(struct us_data *us) 145 + { 146 + struct usb_interface_descriptor *idesc; 147 + int idProduct; 148 + 149 + idesc = &us->pusb_intf->cur_altsetting->desc; 150 + idProduct = us->pusb_dev->descriptor.idProduct; 151 + /* The first port is CDROM, 152 + * means the dongle in the single port mode, 153 + * and a switch command is required to be sent. */ 154 + if (idesc && idesc->bInterfaceNumber == 0) { 155 + if ((idProduct == 0x1001) 156 + || (idProduct == 0x1003) 157 + || (idProduct == 0x1004) 158 + || (idProduct >= 0x1401 && idProduct <= 0x1500) 159 + || (idProduct >= 0x1505 && idProduct <= 0x1600) 160 + || (idProduct >= 0x1c02 && idProduct <= 0x2202)) { 161 + return 1; 162 + } 163 + } 164 + return 0; 165 + } 166 + 167 + int usb_stor_huawei_init(struct us_data *us) 168 + { 169 + int result = 0; 170 + 171 + if (usb_stor_huawei_dongles_pid(us)) { 172 + if (us->pusb_dev->descriptor.idProduct >= 0x1446) 173 + result = usb_stor_huawei_scsi_init(us); 174 + else 175 + result = usb_stor_huawei_feature_init(us); 176 + } 177 + return result; 106 178 }
+2 -2
drivers/usb/storage/initializers.h
··· 46 46 * flash reader */ 47 47 int usb_stor_ucr61s2b_init(struct us_data *us); 48 48 49 - /* This places the HUAWEI E220 devices in multi-port mode */ 50 - int usb_stor_huawei_e220_init(struct us_data *us); 49 + /* This places the HUAWEI usb dongles in multi-port mode */ 50 + int usb_stor_huawei_init(struct us_data *us);
+2 -327
drivers/usb/storage/unusual_devs.h
··· 1527 1527 /* Reported by fangxiaozhi <huananhu@huawei.com> 1528 1528 * This brings the HUAWEI data card devices into multi-port mode 1529 1529 */ 1530 - UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, 1530 + UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50, 1531 1531 "HUAWEI MOBILE", 1532 1532 "Mass Storage", 1533 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1534 - 0), 1535 - UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, 1536 - "HUAWEI MOBILE", 1537 - "Mass Storage", 1538 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1539 - 0), 1540 - UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, 1541 - "HUAWEI MOBILE", 1542 - "Mass Storage", 1543 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1544 - 0), 1545 - UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, 1546 - "HUAWEI MOBILE", 1547 - "Mass Storage", 1548 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1549 - 0), 1550 - UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000, 1551 - "HUAWEI MOBILE", 1552 - "Mass Storage", 1553 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1554 - 0), 1555 - UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, 1556 - "HUAWEI MOBILE", 1557 - "Mass Storage", 1558 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1559 - 0), 1560 - UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000, 1561 - "HUAWEI MOBILE", 1562 - "Mass Storage", 1563 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1564 - 0), 1565 - UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, 1566 - "HUAWEI MOBILE", 1567 - "Mass Storage", 1568 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1569 - 0), 1570 - UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, 1571 - "HUAWEI MOBILE", 1572 - "Mass Storage", 1573 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1574 - 0), 1575 - UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000, 1576 - "HUAWEI MOBILE", 1577 - "Mass Storage", 1578 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1579 - 0), 1580 - UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, 1581 - "HUAWEI MOBILE", 1582 - "Mass Storage", 1583 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1584 - 0), 1585 - UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, 1586 - "HUAWEI MOBILE", 1587 - "Mass Storage", 1588 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1589 - 0), 1590 - UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000, 1591 - "HUAWEI MOBILE", 1592 - "Mass Storage", 1593 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1594 - 0), 1595 - UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000, 1596 - "HUAWEI MOBILE", 1597 - "Mass Storage", 1598 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1599 - 0), 1600 - UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000, 1601 - "HUAWEI MOBILE", 1602 - "Mass Storage", 1603 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1604 - 0), 1605 - UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000, 1606 - "HUAWEI MOBILE", 1607 - "Mass Storage", 1608 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1609 - 0), 1610 - UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000, 1611 - "HUAWEI MOBILE", 1612 - "Mass Storage", 1613 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1614 - 0), 1615 - UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000, 1616 - "HUAWEI MOBILE", 1617 - "Mass Storage", 1618 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1619 - 0), 1620 - UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, 1621 - "HUAWEI MOBILE", 1622 - "Mass Storage", 1623 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1624 - 0), 1625 - UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, 1626 - "HUAWEI MOBILE", 1627 - "Mass Storage", 1628 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1629 - 0), 1630 - UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, 1631 - "HUAWEI MOBILE", 1632 - "Mass Storage", 1633 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1634 - 0), 1635 - UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, 1636 - "HUAWEI MOBILE", 1637 - "Mass Storage", 1638 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1639 - 0), 1640 - UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, 1641 - "HUAWEI MOBILE", 1642 - "Mass Storage", 1643 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1644 - 0), 1645 - UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, 1646 - "HUAWEI MOBILE", 1647 - "Mass Storage", 1648 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1649 - 0), 1650 - UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, 1651 - "HUAWEI MOBILE", 1652 - "Mass Storage", 1653 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1654 - 0), 1655 - UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, 1656 - "HUAWEI MOBILE", 1657 - "Mass Storage", 1658 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1659 - 0), 1660 - UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, 1661 - "HUAWEI MOBILE", 1662 - "Mass Storage", 1663 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1664 - 0), 1665 - UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, 1666 - "HUAWEI MOBILE", 1667 - "Mass Storage", 1668 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1669 - 0), 1670 - UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000, 1671 - "HUAWEI MOBILE", 1672 - "Mass Storage", 1673 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1674 - 0), 1675 - UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000, 1676 - "HUAWEI MOBILE", 1677 - "Mass Storage", 1678 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1679 - 0), 1680 - UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000, 1681 - "HUAWEI MOBILE", 1682 - "Mass Storage", 1683 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1684 - 0), 1685 - UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000, 1686 - "HUAWEI MOBILE", 1687 - "Mass Storage", 1688 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1689 - 0), 1690 - UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000, 1691 - "HUAWEI MOBILE", 1692 - "Mass Storage", 1693 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1694 - 0), 1695 - UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000, 1696 - "HUAWEI MOBILE", 1697 - "Mass Storage", 1698 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1699 - 0), 1700 - UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000, 1701 - "HUAWEI MOBILE", 1702 - "Mass Storage", 1703 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1704 - 0), 1705 - UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000, 1706 - "HUAWEI MOBILE", 1707 - "Mass Storage", 1708 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1709 - 0), 1710 - UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000, 1711 - "HUAWEI MOBILE", 1712 - "Mass Storage", 1713 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1714 - 0), 1715 - UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000, 1716 - "HUAWEI MOBILE", 1717 - "Mass Storage", 1718 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1719 - 0), 1720 - UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000, 1721 - "HUAWEI MOBILE", 1722 - "Mass Storage", 1723 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1724 - 0), 1725 - UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000, 1726 - "HUAWEI MOBILE", 1727 - "Mass Storage", 1728 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1729 - 0), 1730 - UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000, 1731 - "HUAWEI MOBILE", 1732 - "Mass Storage", 1733 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1734 - 0), 1735 - UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000, 1736 - "HUAWEI MOBILE", 1737 - "Mass Storage", 1738 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1739 - 0), 1740 - UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000, 1741 - "HUAWEI MOBILE", 1742 - "Mass Storage", 1743 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1744 - 0), 1745 - UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000, 1746 - "HUAWEI MOBILE", 1747 - "Mass Storage", 1748 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1749 - 0), 1750 - UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000, 1751 - "HUAWEI MOBILE", 1752 - "Mass Storage", 1753 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1754 - 0), 1755 - UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000, 1756 - "HUAWEI MOBILE", 1757 - "Mass Storage", 1758 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1759 - 0), 1760 - UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000, 1761 - "HUAWEI MOBILE", 1762 - "Mass Storage", 1763 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1764 - 0), 1765 - UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000, 1766 - "HUAWEI MOBILE", 1767 - "Mass Storage", 1768 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1769 - 0), 1770 - UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000, 1771 - "HUAWEI MOBILE", 1772 - "Mass Storage", 1773 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1774 - 0), 1775 - UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000, 1776 - "HUAWEI MOBILE", 1777 - "Mass Storage", 1778 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1779 - 0), 1780 - UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000, 1781 - "HUAWEI MOBILE", 1782 - "Mass Storage", 1783 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1784 - 0), 1785 - UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000, 1786 - "HUAWEI MOBILE", 1787 - "Mass Storage", 1788 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1789 - 0), 1790 - UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000, 1791 - "HUAWEI MOBILE", 1792 - "Mass Storage", 1793 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1794 - 0), 1795 - UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000, 1796 - "HUAWEI MOBILE", 1797 - "Mass Storage", 1798 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1799 - 0), 1800 - UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000, 1801 - "HUAWEI MOBILE", 1802 - "Mass Storage", 1803 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1804 - 0), 1805 - UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000, 1806 - "HUAWEI MOBILE", 1807 - "Mass Storage", 1808 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1809 - 0), 1810 - UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000, 1811 - "HUAWEI MOBILE", 1812 - "Mass Storage", 1813 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1814 - 0), 1815 - UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000, 1816 - "HUAWEI MOBILE", 1817 - "Mass Storage", 1818 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1819 - 0), 1820 - UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000, 1821 - "HUAWEI MOBILE", 1822 - "Mass Storage", 1823 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1824 - 0), 1825 - UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000, 1826 - "HUAWEI MOBILE", 1827 - "Mass Storage", 1828 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1829 - 0), 1830 - UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000, 1831 - "HUAWEI MOBILE", 1832 - "Mass Storage", 1833 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1834 - 0), 1835 - UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000, 1836 - "HUAWEI MOBILE", 1837 - "Mass Storage", 1838 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1839 - 0), 1840 - UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000, 1841 - "HUAWEI MOBILE", 1842 - "Mass Storage", 1843 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1844 - 0), 1845 - UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000, 1846 - "HUAWEI MOBILE", 1847 - "Mass Storage", 1848 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1849 - 0), 1850 - UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000, 1851 - "HUAWEI MOBILE", 1852 - "Mass Storage", 1853 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1854 - 0), 1855 - UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000, 1856 - "HUAWEI MOBILE", 1857 - "Mass Storage", 1858 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1533 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init, 1859 1534 0), 1860 1535 1861 1536 /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
+12
drivers/usb/storage/usb.c
··· 120 120 .useTransport = use_transport, \ 121 121 } 122 122 123 + #define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \ 124 + vendor_name, product_name, use_protocol, use_transport, \ 125 + init_function, Flags) \ 126 + { \ 127 + .vendorName = vendor_name, \ 128 + .productName = product_name, \ 129 + .useProtocol = use_protocol, \ 130 + .useTransport = use_transport, \ 131 + .initFunction = init_function, \ 132 + } 133 + 123 134 static struct us_unusual_dev us_unusual_dev_list[] = { 124 135 # include "unusual_devs.h" 125 136 { } /* Terminating entry */ ··· 142 131 #undef UNUSUAL_DEV 143 132 #undef COMPLIANT_DEV 144 133 #undef USUAL_DEV 134 + #undef UNUSUAL_VENDOR_INTF 145 135 146 136 #ifdef CONFIG_LOCKDEP 147 137
+15
drivers/usb/storage/usual-tables.c
··· 41 41 #define USUAL_DEV(useProto, useTrans) \ 42 42 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) } 43 43 44 + /* Define the device is matched with Vendor ID and interface descriptors */ 45 + #define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \ 46 + vendorName, productName, useProtocol, useTransport, \ 47 + initFunction, flags) \ 48 + { \ 49 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ 50 + | USB_DEVICE_ID_MATCH_VENDOR, \ 51 + .idVendor = (id_vendor), \ 52 + .bInterfaceClass = (cl), \ 53 + .bInterfaceSubClass = (sc), \ 54 + .bInterfaceProtocol = (pr), \ 55 + .driver_info = (flags) \ 56 + } 57 + 44 58 struct usb_device_id usb_storage_usb_ids[] = { 45 59 # include "unusual_devs.h" 46 60 { } /* Terminating entry */ ··· 64 50 #undef UNUSUAL_DEV 65 51 #undef COMPLIANT_DEV 66 52 #undef USUAL_DEV 53 + #undef UNUSUAL_VENDOR_INTF 67 54 68 55 /* 69 56 * The table of devices to ignore
+28 -13
drivers/vhost/net.c
··· 165 165 } 166 166 167 167 /* Caller must have TX VQ lock */ 168 - static void tx_poll_start(struct vhost_net *net, struct socket *sock) 168 + static int tx_poll_start(struct vhost_net *net, struct socket *sock) 169 169 { 170 + int ret; 171 + 170 172 if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) 171 - return; 172 - vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); 173 - net->tx_poll_state = VHOST_NET_POLL_STARTED; 173 + return 0; 174 + ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); 175 + if (!ret) 176 + net->tx_poll_state = VHOST_NET_POLL_STARTED; 177 + return ret; 174 178 } 175 179 176 180 /* In case of DMA done not in order in lower device driver for some reason. ··· 646 642 vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); 647 643 } 648 644 649 - static void vhost_net_enable_vq(struct vhost_net *n, 645 + static int vhost_net_enable_vq(struct vhost_net *n, 650 646 struct vhost_virtqueue *vq) 651 647 { 652 648 struct socket *sock; 649 + int ret; 653 650 654 651 sock = rcu_dereference_protected(vq->private_data, 655 652 lockdep_is_held(&vq->mutex)); 656 653 if (!sock) 657 - return; 654 + return 0; 658 655 if (vq == n->vqs + VHOST_NET_VQ_TX) { 659 656 n->tx_poll_state = VHOST_NET_POLL_STOPPED; 660 - tx_poll_start(n, sock); 657 + ret = tx_poll_start(n, sock); 661 658 } else 662 - vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); 659 + ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); 660 + 661 + return ret; 663 662 } 664 663 665 664 static struct socket *vhost_net_stop_vq(struct vhost_net *n, ··· 834 827 r = PTR_ERR(ubufs); 835 828 goto err_ubufs; 836 829 } 837 - oldubufs = vq->ubufs; 838 - vq->ubufs = ubufs; 830 + 839 831 vhost_net_disable_vq(n, vq); 840 832 rcu_assign_pointer(vq->private_data, sock); 841 - vhost_net_enable_vq(n, vq); 842 - 843 833 r = vhost_init_used(vq); 844 834 if (r) 845 - goto err_vq; 835 + goto err_used; 836 + r = vhost_net_enable_vq(n, vq); 837 + if (r) 838 + goto err_used; 839 + 840 + oldubufs = vq->ubufs; 841 + vq->ubufs = ubufs; 846 842 847 843 n->tx_packets = 0; 848 844 n->tx_zcopy_err = 0; ··· 869 859 mutex_unlock(&n->dev.mutex); 870 860 return 0; 871 861 862 + err_used: 863 + rcu_assign_pointer(vq->private_data, oldsock); 864 + vhost_net_enable_vq(n, vq); 865 + if (ubufs) 866 + vhost_ubuf_put_and_wait(ubufs); 872 867 err_ubufs: 873 868 fput(sock->file); 874 869 err_vq:
+1 -3
drivers/vhost/tcm_vhost.c
··· 575 575 576 576 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ 577 577 tv_tpg = vs->vs_tpg; 578 - if (unlikely(!tv_tpg)) { 579 - pr_err("%s endpoint not set\n", __func__); 578 + if (unlikely(!tv_tpg)) 580 579 return; 581 - } 582 580 583 581 mutex_lock(&vq->mutex); 584 582 vhost_disable_notify(&vs->dev, vq);
+15 -3
drivers/vhost/vhost.c
··· 77 77 init_poll_funcptr(&poll->table, vhost_poll_func); 78 78 poll->mask = mask; 79 79 poll->dev = dev; 80 + poll->wqh = NULL; 80 81 81 82 vhost_work_init(&poll->work, fn); 82 83 } 83 84 84 85 /* Start polling a file. We add ourselves to file's wait queue. The caller must 85 86 * keep a reference to a file until after vhost_poll_stop is called. */ 86 - void vhost_poll_start(struct vhost_poll *poll, struct file *file) 87 + int vhost_poll_start(struct vhost_poll *poll, struct file *file) 87 88 { 88 89 unsigned long mask; 90 + int ret = 0; 89 91 90 92 mask = file->f_op->poll(file, &poll->table); 91 93 if (mask) 92 94 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); 95 + if (mask & POLLERR) { 96 + if (poll->wqh) 97 + remove_wait_queue(poll->wqh, &poll->wait); 98 + ret = -EINVAL; 99 + } 100 + 101 + return ret; 93 102 } 94 103 95 104 /* Stop polling a file. After this function returns, it becomes safe to drop the 96 105 * file reference. You must also flush afterwards. */ 97 106 void vhost_poll_stop(struct vhost_poll *poll) 98 107 { 99 - remove_wait_queue(poll->wqh, &poll->wait); 108 + if (poll->wqh) { 109 + remove_wait_queue(poll->wqh, &poll->wait); 110 + poll->wqh = NULL; 111 + } 100 112 } 101 113 102 114 static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, ··· 804 792 fput(filep); 805 793 806 794 if (pollstart && vq->handle_kick) 807 - vhost_poll_start(&vq->poll, vq->kick); 795 + r = vhost_poll_start(&vq->poll, vq->kick); 808 796 809 797 mutex_unlock(&vq->mutex); 810 798
+1 -1
drivers/vhost/vhost.h
··· 42 42 43 43 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 44 44 unsigned long mask, struct vhost_dev *dev); 45 - void vhost_poll_start(struct vhost_poll *poll, struct file *file); 45 + int vhost_poll_start(struct vhost_poll *poll, struct file *file); 46 46 void vhost_poll_stop(struct vhost_poll *poll); 47 47 void vhost_poll_flush(struct vhost_poll *poll); 48 48 void vhost_poll_queue(struct vhost_poll *poll);
+2 -2
drivers/xen/events.c
··· 840 840 841 841 if (irq == -1) { 842 842 irq = xen_allocate_irq_dynamic(); 843 - if (irq == -1) 843 + if (irq < 0) 844 844 goto out; 845 845 846 846 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, ··· 944 944 945 945 if (irq == -1) { 946 946 irq = xen_allocate_irq_dynamic(); 947 - if (irq == -1) 947 + if (irq < 0) 948 948 goto out; 949 949 950 950 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
+7 -7
drivers/xen/xen-pciback/pciback_ops.c
··· 135 135 struct pci_dev *dev, struct xen_pci_op *op) 136 136 { 137 137 struct xen_pcibk_dev_data *dev_data; 138 - int otherend = pdev->xdev->otherend_id; 139 138 int status; 140 139 141 140 if (unlikely(verbose_request)) ··· 143 144 status = pci_enable_msi(dev); 144 145 145 146 if (status) { 146 - printk(KERN_ERR "error enable msi for guest %x status %x\n", 147 - otherend, status); 147 + pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n", 148 + pci_name(dev), pdev->xdev->otherend_id, 149 + status); 148 150 op->value = 0; 149 151 return XEN_PCI_ERR_op_failed; 150 152 } ··· 223 223 pci_name(dev), i, 224 224 op->msix_entries[i].vector); 225 225 } 226 - } else { 227 - printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n", 228 - pci_name(dev), result); 229 - } 226 + } else 227 + pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n", 228 + pci_name(dev), pdev->xdev->otherend_id, 229 + result); 230 230 kfree(entries); 231 231 232 232 op->value = result;
+10 -12
fs/btrfs/extent-tree.c
··· 4534 4534 unsigned nr_extents = 0; 4535 4535 int extra_reserve = 0; 4536 4536 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; 4537 - int ret; 4537 + int ret = 0; 4538 4538 bool delalloc_lock = true; 4539 4539 4540 4540 /* If we are a free space inode we need to not flush since we will be in ··· 4579 4579 csum_bytes = BTRFS_I(inode)->csum_bytes; 4580 4580 spin_unlock(&BTRFS_I(inode)->lock); 4581 4581 4582 - if (root->fs_info->quota_enabled) { 4582 + if (root->fs_info->quota_enabled) 4583 4583 ret = btrfs_qgroup_reserve(root, num_bytes + 4584 4584 nr_extents * root->leafsize); 4585 - if (ret) { 4586 - spin_lock(&BTRFS_I(inode)->lock); 4587 - calc_csum_metadata_size(inode, num_bytes, 0); 4588 - spin_unlock(&BTRFS_I(inode)->lock); 4589 - if (delalloc_lock) 4590 - mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); 4591 - return ret; 4592 - } 4593 - } 4594 4585 4595 - ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); 4586 + /* 4587 + * ret != 0 here means the qgroup reservation failed, we go straight to 4588 + * the shared error handling then. 4589 + */ 4590 + if (ret == 0) 4591 + ret = reserve_metadata_bytes(root, block_rsv, 4592 + to_reserve, flush); 4593 + 4596 4594 if (ret) { 4597 4595 u64 to_free = 0; 4598 4596 unsigned dropped;
+2 -1
fs/btrfs/extent_map.c
··· 288 288 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) 289 289 { 290 290 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 291 - try_merge_map(tree, em); 291 + if (em->in_tree) 292 + try_merge_map(tree, em); 292 293 } 293 294 294 295 /**
+20 -5
fs/btrfs/file.c
··· 293 293 struct btrfs_key key; 294 294 struct btrfs_ioctl_defrag_range_args range; 295 295 int num_defrag; 296 + int index; 297 + int ret; 296 298 297 299 /* get the inode */ 298 300 key.objectid = defrag->root; 299 301 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 300 302 key.offset = (u64)-1; 303 + 304 + index = srcu_read_lock(&fs_info->subvol_srcu); 305 + 301 306 inode_root = btrfs_read_fs_root_no_name(fs_info, &key); 302 307 if (IS_ERR(inode_root)) { 303 - kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 304 - return PTR_ERR(inode_root); 308 + ret = PTR_ERR(inode_root); 309 + goto cleanup; 310 + } 311 + if (btrfs_root_refs(&inode_root->root_item) == 0) { 312 + ret = -ENOENT; 313 + goto cleanup; 305 314 } 306 315 307 316 key.objectid = defrag->ino; ··· 318 309 key.offset = 0; 319 310 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); 320 311 if (IS_ERR(inode)) { 321 - kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 322 - return PTR_ERR(inode); 312 + ret = PTR_ERR(inode); 313 + goto cleanup; 323 314 } 315 + srcu_read_unlock(&fs_info->subvol_srcu, index); 324 316 325 317 /* do a chunk of defrag */ 326 318 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); ··· 356 346 357 347 iput(inode); 358 348 return 0; 349 + cleanup: 350 + srcu_read_unlock(&fs_info->subvol_srcu, index); 351 + kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 352 + return ret; 359 353 } 360 354 361 355 /* ··· 1608 1594 if (err < 0 && num_written > 0) 1609 1595 num_written = err; 1610 1596 } 1611 - out: 1597 + 1612 1598 if (sync) 1613 1599 atomic_dec(&BTRFS_I(inode)->sync_writers); 1600 + out: 1614 1601 sb_end_write(inode->i_sb); 1615 1602 current->backing_dev_info = NULL; 1616 1603 return num_written ? num_written : err;
+4 -1
fs/btrfs/ioctl.c
··· 515 515 516 516 BUG_ON(ret); 517 517 518 - d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); 519 518 fail: 520 519 if (async_transid) { 521 520 *async_transid = trans->transid; ··· 524 525 } 525 526 if (err && !ret) 526 527 ret = err; 528 + 529 + if (!ret) 530 + d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); 531 + 527 532 return ret; 528 533 } 529 534
+10 -3
fs/btrfs/ordered-data.c
··· 836 836 * if the disk i_size is already at the inode->i_size, or 837 837 * this ordered extent is inside the disk i_size, we're done 838 838 */ 839 - if (disk_i_size == i_size || offset <= disk_i_size) { 839 + if (disk_i_size == i_size) 840 840 goto out; 841 - } 841 + 842 + /* 843 + * We still need to update disk_i_size if outstanding_isize is greater 844 + * than disk_i_size. 845 + */ 846 + if (offset <= disk_i_size && 847 + (!ordered || ordered->outstanding_isize <= disk_i_size)) 848 + goto out; 842 849 843 850 /* 844 851 * walk backward from this ordered extent to disk_i_size. ··· 877 870 break; 878 871 if (test->file_offset >= i_size) 879 872 break; 880 - if (test->file_offset >= disk_i_size) { 873 + if (entry_end(test) > disk_i_size) { 881 874 /* 882 875 * we don't update disk_i_size now, so record this 883 876 * undealt i_size. Or we will not know the real
+20 -5
fs/btrfs/scrub.c
··· 580 580 int corrected = 0; 581 581 struct btrfs_key key; 582 582 struct inode *inode = NULL; 583 + struct btrfs_fs_info *fs_info; 583 584 u64 end = offset + PAGE_SIZE - 1; 584 585 struct btrfs_root *local_root; 586 + int srcu_index; 585 587 586 588 key.objectid = root; 587 589 key.type = BTRFS_ROOT_ITEM_KEY; 588 590 key.offset = (u64)-1; 589 - local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key); 590 - if (IS_ERR(local_root)) 591 + 592 + fs_info = fixup->root->fs_info; 593 + srcu_index = srcu_read_lock(&fs_info->subvol_srcu); 594 + 595 + local_root = btrfs_read_fs_root_no_name(fs_info, &key); 596 + if (IS_ERR(local_root)) { 597 + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 591 598 return PTR_ERR(local_root); 599 + } 592 600 593 601 key.type = BTRFS_INODE_ITEM_KEY; 594 602 key.objectid = inum; 595 603 key.offset = 0; 596 - inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL); 604 + inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); 605 + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 597 606 if (IS_ERR(inode)) 598 607 return PTR_ERR(inode); 599 608 ··· 615 606 } 616 607 617 608 if (PageUptodate(page)) { 618 - struct btrfs_fs_info *fs_info; 619 609 if (PageDirty(page)) { 620 610 /* 621 611 * we need to write the data to the defect sector. the ··· 3188 3180 u64 physical_for_dev_replace; 3189 3181 u64 len; 3190 3182 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; 3183 + int srcu_index; 3191 3184 3192 3185 key.objectid = root; 3193 3186 key.type = BTRFS_ROOT_ITEM_KEY; 3194 3187 key.offset = (u64)-1; 3188 + 3189 + srcu_index = srcu_read_lock(&fs_info->subvol_srcu); 3190 + 3195 3191 local_root = btrfs_read_fs_root_no_name(fs_info, &key); 3196 - if (IS_ERR(local_root)) 3192 + if (IS_ERR(local_root)) { 3193 + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 3197 3194 return PTR_ERR(local_root); 3195 + } 3198 3196 3199 3197 key.type = BTRFS_INODE_ITEM_KEY; 3200 3198 key.objectid = inum; 3201 3199 key.offset = 0; 3202 3200 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); 3201 + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 3203 3202 if (IS_ERR(inode)) 3204 3203 return PTR_ERR(inode); 3205 3204
+19 -8
fs/btrfs/transaction.c
··· 333 333 &root->fs_info->trans_block_rsv, 334 334 num_bytes, flush); 335 335 if (ret) 336 - return ERR_PTR(ret); 336 + goto reserve_fail; 337 337 } 338 338 again: 339 339 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 340 - if (!h) 341 - return ERR_PTR(-ENOMEM); 340 + if (!h) { 341 + ret = -ENOMEM; 342 + goto alloc_fail; 343 + } 342 344 343 345 /* 344 346 * If we are JOIN_NOLOCK we're already committing a transaction and ··· 367 365 if (ret < 0) { 368 366 /* We must get the transaction if we are JOIN_NOLOCK. */ 369 367 BUG_ON(type == TRANS_JOIN_NOLOCK); 370 - 371 - if (type < TRANS_JOIN_NOLOCK) 372 - sb_end_intwrite(root->fs_info->sb); 373 - kmem_cache_free(btrfs_trans_handle_cachep, h); 374 - return ERR_PTR(ret); 368 + goto join_fail; 375 369 } 376 370 377 371 cur_trans = root->fs_info->running_transaction; ··· 408 410 if (!current->journal_info && type != TRANS_USERSPACE) 409 411 current->journal_info = h; 410 412 return h; 413 + 414 + join_fail: 415 + if (type < TRANS_JOIN_NOLOCK) 416 + sb_end_intwrite(root->fs_info->sb); 417 + kmem_cache_free(btrfs_trans_handle_cachep, h); 418 + alloc_fail: 419 + if (num_bytes) 420 + btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, 421 + num_bytes); 422 + reserve_fail: 423 + if (qgroup_reserved) 424 + btrfs_qgroup_free(root, qgroup_reserved); 425 + return ERR_PTR(ret); 411 426 } 412 427 413 428 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
+2 -1
fs/btrfs/volumes.c
··· 1556 1556 ret = 0; 1557 1557 1558 1558 /* Notify udev that device has changed */ 1559 - btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 1559 + if (bdev) 1560 + btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 1560 1561 1561 1562 error_brelse: 1562 1563 brelse(bh);
+4 -4
fs/dlm/user.c
··· 503 503 #endif 504 504 return -EINVAL; 505 505 506 - #ifdef CONFIG_COMPAT 507 - if (count > sizeof(struct dlm_write_request32) + DLM_RESNAME_MAXLEN) 508 - #else 506 + /* 507 + * can't compare against COMPAT/dlm_write_request32 because 508 + * we don't yet know if is64bit is zero 509 + */ 509 510 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN) 510 - #endif 511 511 return -EINVAL; 512 512 513 513 kbuf = kzalloc(count + 1, GFP_NOFS);
+4 -1
fs/nilfs2/ioctl.c
··· 664 664 if (ret < 0) 665 665 printk(KERN_ERR "NILFS: GC failed during preparation: " 666 666 "cannot read source blocks: err=%d\n", ret); 667 - else 667 + else { 668 + if (nilfs_sb_need_update(nilfs)) 669 + set_nilfs_discontinued(nilfs); 668 670 ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); 671 + } 669 672 670 673 nilfs_remove_all_gcinodes(nilfs); 671 674 clear_nilfs_gc_running(nilfs);
+25
include/linux/llist.h
··· 125 125 (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) 126 126 127 127 /** 128 + * llist_for_each_entry_safe - iterate safely against remove over some entries 129 + * of lock-less list of given type. 130 + * @pos: the type * to use as a loop cursor. 131 + * @n: another type * to use as a temporary storage. 132 + * @node: the fist entry of deleted list entries. 133 + * @member: the name of the llist_node with the struct. 134 + * 135 + * In general, some entries of the lock-less list can be traversed 136 + * safely only after being removed from list, so start with an entry 137 + * instead of list head. This variant allows removal of entries 138 + * as we iterate. 139 + * 140 + * If being used on entries deleted from lock-less list directly, the 141 + * traverse order is from the newest to the oldest added entry. If 142 + * you want to traverse from the oldest to the newest, you must 143 + * reverse the order by yourself before traversing. 144 + */ 145 + #define llist_for_each_entry_safe(pos, n, node, member) \ 146 + for ((pos) = llist_entry((node), typeof(*(pos)), member), \ 147 + (n) = (pos)->member.next; \ 148 + &(pos)->member != NULL; \ 149 + (pos) = llist_entry(n, typeof(*(pos)), member), \ 150 + (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL) 151 + 152 + /** 128 153 * llist_empty - tests whether a lock-less list is empty 129 154 * @head: the list to test 130 155 *
+1 -1
include/linux/memcontrol.h
··· 429 429 * the slab_mutex must be held when looping through those caches 430 430 */ 431 431 #define for_each_memcg_cache_index(_idx) \ 432 - for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) 432 + for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++) 433 433 434 434 static inline bool memcg_kmem_enabled(void) 435 435 {
+1 -1
include/linux/mmu_notifier.h
··· 151 151 * Therefore notifier chains can only be traversed when either 152 152 * 153 153 * 1. mmap_sem is held. 154 - * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex). 154 + * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem). 155 155 * 3. No other concurrent thread can access the list (release) 156 156 */ 157 157 struct mmu_notifier {
+3 -3
include/linux/ssb/ssb_driver_gige.h
··· 98 98 } 99 99 100 100 #ifdef CONFIG_BCM47XX 101 - #include <asm/mach-bcm47xx/nvram.h> 101 + #include <bcm47xx_nvram.h> 102 102 /* Get the device MAC address */ 103 103 static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) 104 104 { 105 105 char buf[20]; 106 - if (nvram_getenv("et0macaddr", buf, sizeof(buf)) < 0) 106 + if (bcm47xx_nvram_getenv("et0macaddr", buf, sizeof(buf)) < 0) 107 107 return; 108 - nvram_parse_macaddr(buf, macaddr); 108 + bcm47xx_nvram_parse_macaddr(buf, macaddr); 109 109 } 110 110 #else 111 111 static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
+2
include/linux/usb.h
··· 357 357 int bandwidth_int_reqs; /* number of Interrupt requests */ 358 358 int bandwidth_isoc_reqs; /* number of Isoc. requests */ 359 359 360 + unsigned resuming_ports; /* bit array: resuming root-hub ports */ 361 + 360 362 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) 361 363 struct mon_bus *mon_bus; /* non-null when associated */ 362 364 int monitored; /* non-zero when monitored */
+3
include/linux/usb/hcd.h
··· 430 430 extern void usb_wakeup_notification(struct usb_device *hdev, 431 431 unsigned int portnum); 432 432 433 + extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); 434 + extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); 435 + 433 436 /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ 434 437 #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) 435 438 #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
+3 -1
include/linux/usb/usbnet.h
··· 33 33 wait_queue_head_t *wait; 34 34 struct mutex phy_mutex; 35 35 unsigned char suspend_count; 36 + unsigned char pkt_cnt, pkt_err; 36 37 37 38 /* i/o info: pipes etc */ 38 39 unsigned in, out; ··· 71 70 # define EVENT_DEV_OPEN 7 72 71 # define EVENT_DEVICE_REPORT_IDLE 8 73 72 # define EVENT_NO_RUNTIME_PM 9 73 + # define EVENT_RX_KILL 10 74 74 }; 75 75 76 76 static inline struct usb_driver *driver_of(struct usb_interface *intf) ··· 102 100 #define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */ 103 101 104 102 #define FLAG_POINTTOPOINT 0x1000 /* possibly use "usb%d" names */ 105 - #define FLAG_NOARP 0x2000 /* device can't do ARP */ 106 103 107 104 /* 108 105 * Indicates to usbnet, that USB driver accumulates multiple IP packets. ··· 109 108 */ 110 109 #define FLAG_MULTI_PACKET 0x2000 111 110 #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ 111 + #define FLAG_NOARP 0x8000 /* device can't do ARP */ 112 112 113 113 /* init device ... can sleep, or cause probe() failure */ 114 114 int (*bind)(struct usbnet *, struct usb_interface *);
+10 -10
include/net/transp_v6.h
··· 34 34 struct sockaddr *uaddr, 35 35 int addr_len); 36 36 37 - extern int datagram_recv_ctl(struct sock *sk, 38 - struct msghdr *msg, 39 - struct sk_buff *skb); 37 + extern int ip6_datagram_recv_ctl(struct sock *sk, 38 + struct msghdr *msg, 39 + struct sk_buff *skb); 40 40 41 - extern int datagram_send_ctl(struct net *net, 42 - struct sock *sk, 43 - struct msghdr *msg, 44 - struct flowi6 *fl6, 45 - struct ipv6_txoptions *opt, 46 - int *hlimit, int *tclass, 47 - int *dontfrag); 41 + extern int ip6_datagram_send_ctl(struct net *net, 42 + struct sock *sk, 43 + struct msghdr *msg, 44 + struct flowi6 *fl6, 45 + struct ipv6_txoptions *opt, 46 + int *hlimit, int *tclass, 47 + int *dontfrag); 48 48 49 49 #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) 50 50
+6
include/uapi/linux/usb/ch9.h
··· 152 152 #define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) 153 153 #define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) 154 154 155 + /* 156 + * Interface status, Figure 9-5 USB 3.0 spec 157 + */ 158 + #define USB_INTRF_STAT_FUNC_RW_CAP 1 159 + #define USB_INTRF_STAT_FUNC_RW 2 160 + 155 161 #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ 156 162 157 163 /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
+18 -2
kernel/events/core.c
··· 908 908 } 909 909 910 910 /* 911 + * Initialize event state based on the perf_event_attr::disabled. 912 + */ 913 + static inline void perf_event__state_init(struct perf_event *event) 914 + { 915 + event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 916 + PERF_EVENT_STATE_INACTIVE; 917 + } 918 + 919 + /* 911 920 * Called at perf_event creation and when events are attached/detached from a 912 921 * group. 913 922 */ ··· 6188 6179 event->overflow_handler = overflow_handler; 6189 6180 event->overflow_handler_context = context; 6190 6181 6191 - if (attr->disabled) 6192 - event->state = PERF_EVENT_STATE_OFF; 6182 + perf_event__state_init(event); 6193 6183 6194 6184 pmu = NULL; 6195 6185 ··· 6617 6609 6618 6610 mutex_lock(&gctx->mutex); 6619 6611 perf_remove_from_context(group_leader); 6612 + 6613 + /* 6614 + * Removing from the context ends up with disabled 6615 + * event. What we want here is event in the initial 6616 + * startup state, ready to be add into new context. 6617 + */ 6618 + perf_event__state_init(group_leader); 6620 6619 list_for_each_entry(sibling, &group_leader->sibling_list, 6621 6620 group_entry) { 6622 6621 perf_remove_from_context(sibling); 6622 + perf_event__state_init(sibling); 6623 6623 put_ctx(gctx); 6624 6624 } 6625 6625 mutex_unlock(&gctx->mutex);
+10 -3
kernel/rcutree_plugin.h
··· 40 40 #ifdef CONFIG_RCU_NOCB_CPU 41 41 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 42 42 static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ 43 - static bool rcu_nocb_poll; /* Offload kthread are to poll. */ 44 - module_param(rcu_nocb_poll, bool, 0444); 43 + static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ 45 44 static char __initdata nocb_buf[NR_CPUS * 5]; 46 45 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 47 46 ··· 2158 2159 } 2159 2160 __setup("rcu_nocbs=", rcu_nocb_setup); 2160 2161 2162 + static int __init parse_rcu_nocb_poll(char *arg) 2163 + { 2164 + rcu_nocb_poll = 1; 2165 + return 0; 2166 + } 2167 + early_param("rcu_nocb_poll", parse_rcu_nocb_poll); 2168 + 2161 2169 /* Is the specified CPU a no-CPUs CPU? */ 2162 2170 static bool is_nocb_cpu(int cpu) 2163 2171 { ··· 2372 2366 for (;;) { 2373 2367 /* If not polling, wait for next batch of callbacks. */ 2374 2368 if (!rcu_nocb_poll) 2375 - wait_event(rdp->nocb_wq, rdp->nocb_head); 2369 + wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); 2376 2370 list = ACCESS_ONCE(rdp->nocb_head); 2377 2371 if (!list) { 2378 2372 schedule_timeout_interruptible(1); 2373 + flush_signals(current); 2379 2374 continue; 2380 2375 } 2381 2376
+2 -2
kernel/sched/debug.c
··· 222 222 cfs_rq->runnable_load_avg); 223 223 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", 224 224 cfs_rq->blocked_load_avg); 225 - SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 226 - atomic64_read(&cfs_rq->tg->load_avg)); 225 + SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg", 226 + (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg)); 227 227 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", 228 228 cfs_rq->tg_load_contrib); 229 229 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
+1 -1
kernel/sched/fair.c
··· 2663 2663 hrtimer_cancel(&cfs_b->slack_timer); 2664 2664 } 2665 2665 2666 - static void unthrottle_offline_cfs_rqs(struct rq *rq) 2666 + static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) 2667 2667 { 2668 2668 struct cfs_rq *cfs_rq; 2669 2669
+1 -1
kernel/sched/rt.c
··· 566 566 static int do_balance_runtime(struct rt_rq *rt_rq) 567 567 { 568 568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 569 - struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 569 + struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; 570 570 int i, weight, more = 0; 571 571 u64 rt_period; 572 572
+2
lib/digsig.c
··· 162 162 memset(out1, 0, head); 163 163 memcpy(out1 + head, p, l); 164 164 165 + kfree(p); 166 + 165 167 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); 166 168 if (err) 167 169 goto err;
+4
mm/huge_memory.c
··· 1257 1257 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1258 1258 goto out; 1259 1259 1260 + /* Avoid dumping huge zero page */ 1261 + if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1262 + return ERR_PTR(-EFAULT); 1263 + 1260 1264 page = pmd_page(*pmd); 1261 1265 VM_BUG_ON(!PageHead(page)); 1262 1266 if (flags & FOLL_TOUCH) {
+1
mm/hugetlb.c
··· 3033 3033 if (!huge_pte_none(huge_ptep_get(ptep))) { 3034 3034 pte = huge_ptep_get_and_clear(mm, address, ptep); 3035 3035 pte = pte_mkhuge(pte_modify(pte, newprot)); 3036 + pte = arch_make_huge_pte(pte, vma, NULL, 0); 3036 3037 set_huge_pte_at(mm, address, ptep, pte); 3037 3038 pages++; 3038 3039 }
+3 -1
mm/migrate.c
··· 160 160 if (is_write_migration_entry(entry)) 161 161 pte = pte_mkwrite(pte); 162 162 #ifdef CONFIG_HUGETLB_PAGE 163 - if (PageHuge(new)) 163 + if (PageHuge(new)) { 164 164 pte = pte_mkhuge(pte); 165 + pte = arch_make_huge_pte(pte, vma, new, 0); 166 + } 165 167 #endif 166 168 flush_cache_page(vma, addr, pte_pfn(pte)); 167 169 set_pte_at(mm, addr, ptep, pte);
+1 -1
mm/mmap.c
··· 2943 2943 * vma in this mm is backed by the same anon_vma or address_space. 2944 2944 * 2945 2945 * We can take all the locks in random order because the VM code 2946 - * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never 2946 + * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never 2947 2947 * takes more than one of them in a row. Secondly we're protected 2948 2948 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 2949 2949 *
+3 -3
net/bluetooth/hci_conn.c
··· 249 249 __u8 reason = hci_proto_disconn_ind(conn); 250 250 251 251 switch (conn->type) { 252 - case ACL_LINK: 253 - hci_acl_disconn(conn, reason); 254 - break; 255 252 case AMP_LINK: 256 253 hci_amp_disconn(conn, reason); 254 + break; 255 + default: 256 + hci_acl_disconn(conn, reason); 257 257 break; 258 258 } 259 259 }
+13
net/bluetooth/smp.c
··· 859 859 860 860 skb_pull(skb, sizeof(code)); 861 861 862 + /* 863 + * The SMP context must be initialized for all other PDUs except 864 + * pairing and security requests. If we get any other PDU when 865 + * not initialized simply disconnect (done if this function 866 + * returns an error). 867 + */ 868 + if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ && 869 + !conn->smp_chan) { 870 + BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code); 871 + kfree_skb(skb); 872 + return -ENOTSUPP; 873 + } 874 + 862 875 switch (code) { 863 876 case SMP_CMD_PAIRING_REQ: 864 877 reason = smp_cmd_pairing_req(conn, skb);
+6 -3
net/core/pktgen.c
··· 1781 1781 return -EFAULT; 1782 1782 i += len; 1783 1783 mutex_lock(&pktgen_thread_lock); 1784 - pktgen_add_device(t, f); 1784 + ret = pktgen_add_device(t, f); 1785 1785 mutex_unlock(&pktgen_thread_lock); 1786 - ret = count; 1787 - sprintf(pg_result, "OK: add_device=%s", f); 1786 + if (!ret) { 1787 + ret = count; 1788 + sprintf(pg_result, "OK: add_device=%s", f); 1789 + } else 1790 + sprintf(pg_result, "ERROR: can not add device %s", f); 1788 1791 goto out; 1789 1792 } 1790 1793
+1 -1
net/core/skbuff.c
··· 683 683 new->network_header = old->network_header; 684 684 new->mac_header = old->mac_header; 685 685 new->inner_transport_header = old->inner_transport_header; 686 - new->inner_network_header = old->inner_transport_header; 686 + new->inner_network_header = old->inner_network_header; 687 687 skb_dst_copy(new, old); 688 688 new->rxhash = old->rxhash; 689 689 new->ooo_okay = old->ooo_okay;
+10 -4
net/ipv4/tcp_cong.c
··· 310 310 { 311 311 int cnt; /* increase in packets */ 312 312 unsigned int delta = 0; 313 + u32 snd_cwnd = tp->snd_cwnd; 314 + 315 + if (unlikely(!snd_cwnd)) { 316 + pr_err_once("snd_cwnd is nul, please report this bug.\n"); 317 + snd_cwnd = 1U; 318 + } 313 319 314 320 /* RFC3465: ABC Slow start 315 321 * Increase only after a full MSS of bytes is acked ··· 330 324 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 331 325 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 332 326 else 333 - cnt = tp->snd_cwnd; /* exponential increase */ 327 + cnt = snd_cwnd; /* exponential increase */ 334 328 335 329 /* RFC3465: ABC 336 330 * We MAY increase by 2 if discovered delayed ack ··· 340 334 tp->bytes_acked = 0; 341 335 342 336 tp->snd_cwnd_cnt += cnt; 343 - while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 344 - tp->snd_cwnd_cnt -= tp->snd_cwnd; 337 + while (tp->snd_cwnd_cnt >= snd_cwnd) { 338 + tp->snd_cwnd_cnt -= snd_cwnd; 345 339 delta++; 346 340 } 347 - tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); 341 + tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp); 348 342 } 349 343 EXPORT_SYMBOL_GPL(tcp_slow_start); 350 344
+6 -2
net/ipv4/tcp_input.c
··· 3504 3504 } 3505 3505 } else { 3506 3506 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3507 + if (!tcp_packets_in_flight(tp)) { 3508 + tcp_enter_frto_loss(sk, 2, flag); 3509 + return true; 3510 + } 3511 + 3507 3512 /* Prevent sending of new data. */ 3508 3513 tp->snd_cwnd = min(tp->snd_cwnd, 3509 3514 tcp_packets_in_flight(tp)); ··· 5654 5649 * the remote receives only the retransmitted (regular) SYNs: either 5655 5650 * the original SYN-data or the corresponding SYN-ACK is lost. 5656 5651 */ 5657 - syn_drop = (cookie->len <= 0 && data && 5658 - inet_csk(sk)->icsk_retransmits); 5652 + syn_drop = (cookie->len <= 0 && data && tp->total_retrans); 5659 5653 5660 5654 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5661 5655
+5 -1
net/ipv4/tcp_ipv4.c
··· 496 496 * errors returned from accept(). 497 497 */ 498 498 inet_csk_reqsk_queue_drop(sk, req, prev); 499 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 499 500 goto out; 500 501 501 502 case TCP_SYN_SENT: ··· 1501 1500 * clogging syn queue with openreqs with exponentially increasing 1502 1501 * timeout. 1503 1502 */ 1504 - if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1503 + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { 1504 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1505 1505 goto drop; 1506 + } 1506 1507 1507 1508 req = inet_reqsk_alloc(&tcp_request_sock_ops); 1508 1509 if (!req) ··· 1669 1666 drop_and_free: 1670 1667 reqsk_free(req); 1671 1668 drop: 1669 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1672 1670 return 0; 1673 1671 } 1674 1672 EXPORT_SYMBOL(tcp_v4_conn_request);
+1
net/ipv6/addrconf.c
··· 1660 1660 if (dev->addr_len != IEEE802154_ADDR_LEN) 1661 1661 return -1; 1662 1662 memcpy(eui, dev->dev_addr, 8); 1663 + eui[0] ^= 2; 1663 1664 return 0; 1664 1665 } 1665 1666
+9 -7
net/ipv6/datagram.c
··· 380 380 if (skb->protocol == htons(ETH_P_IPV6)) { 381 381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 382 382 if (np->rxopt.all) 383 - datagram_recv_ctl(sk, msg, skb); 383 + ip6_datagram_recv_ctl(sk, msg, skb); 384 384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 385 385 sin->sin6_scope_id = IP6CB(skb)->iif; 386 386 } else { ··· 468 468 } 469 469 470 470 471 - int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 471 + int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, 472 + struct sk_buff *skb) 472 473 { 473 474 struct ipv6_pinfo *np = inet6_sk(sk); 474 475 struct inet6_skb_parm *opt = IP6CB(skb); ··· 598 597 } 599 598 return 0; 600 599 } 600 + EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); 601 601 602 - int datagram_send_ctl(struct net *net, struct sock *sk, 603 - struct msghdr *msg, struct flowi6 *fl6, 604 - struct ipv6_txoptions *opt, 605 - int *hlimit, int *tclass, int *dontfrag) 602 + int ip6_datagram_send_ctl(struct net *net, struct sock *sk, 603 + struct msghdr *msg, struct flowi6 *fl6, 604 + struct ipv6_txoptions *opt, 605 + int *hlimit, int *tclass, int *dontfrag) 606 606 { 607 607 struct in6_pktinfo *src_info; 608 608 struct cmsghdr *cmsg; ··· 873 871 exit_f: 874 872 return err; 875 873 } 876 - EXPORT_SYMBOL_GPL(datagram_send_ctl); 874 + EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
+2 -2
net/ipv6/ip6_flowlabel.c
··· 365 365 msg.msg_control = (void*)(fl->opt+1); 366 366 memset(&flowi6, 0, sizeof(flowi6)); 367 367 368 - err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 369 - &junk, &junk); 368 + err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, 369 + &junk, &junk, &junk); 370 370 if (err) 371 371 goto done; 372 372 err = -EINVAL;
+1 -1
net/ipv6/ip6_gre.c
··· 960 960 int ret; 961 961 962 962 if (!ip6_tnl_xmit_ctl(t)) 963 - return -1; 963 + goto tx_err; 964 964 965 965 switch (skb->protocol) { 966 966 case htons(ETH_P_IP):
+3 -3
net/ipv6/ipv6_sockglue.c
··· 476 476 msg.msg_controllen = optlen; 477 477 msg.msg_control = (void*)(opt+1); 478 478 479 - retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 480 - &junk); 479 + retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, 480 + &junk, &junk); 481 481 if (retv) 482 482 goto done; 483 483 update: ··· 1002 1002 release_sock(sk); 1003 1003 1004 1004 if (skb) { 1005 - int err = datagram_recv_ctl(sk, &msg, skb); 1005 + int err = ip6_datagram_recv_ctl(sk, &msg, skb); 1006 1006 kfree_skb(skb); 1007 1007 if (err) 1008 1008 return err;
+3 -3
net/ipv6/raw.c
··· 507 507 sock_recv_ts_and_drops(msg, sk, skb); 508 508 509 509 if (np->rxopt.all) 510 - datagram_recv_ctl(sk, msg, skb); 510 + ip6_datagram_recv_ctl(sk, msg, skb); 511 511 512 512 err = copied; 513 513 if (flags & MSG_TRUNC) ··· 822 822 memset(opt, 0, sizeof(struct ipv6_txoptions)); 823 823 opt->tot_len = sizeof(struct ipv6_txoptions); 824 824 825 - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 826 - &hlimit, &tclass, &dontfrag); 825 + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 826 + &hlimit, &tclass, &dontfrag); 827 827 if (err < 0) { 828 828 fl6_sock_release(flowlabel); 829 829 return err;
+1 -1
net/ipv6/route.c
··· 928 928 dst_hold(&rt->dst); 929 929 read_unlock_bh(&table->tb6_lock); 930 930 931 - if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP)) 931 + if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL))) 932 932 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); 933 933 else if (!(rt->dst.flags & DST_HOST)) 934 934 nrt = rt6_alloc_clone(rt, &fl6->daddr);
+5 -1
net/ipv6/tcp_ipv6.c
··· 423 423 } 424 424 425 425 inet_csk_reqsk_queue_drop(sk, req, prev); 426 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 426 427 goto out; 427 428 428 429 case TCP_SYN_SENT: ··· 959 958 goto drop; 960 959 } 961 960 962 - if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 961 + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { 962 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 963 963 goto drop; 964 + } 964 965 965 966 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 966 967 if (req == NULL) ··· 1111 1108 drop_and_free: 1112 1109 reqsk_free(req); 1113 1110 drop: 1111 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1114 1112 return 0; /* don't send reset */ 1115 1113 } 1116 1114
+3 -3
net/ipv6/udp.c
··· 443 443 ip_cmsg_recv(msg, skb); 444 444 } else { 445 445 if (np->rxopt.all) 446 - datagram_recv_ctl(sk, msg, skb); 446 + ip6_datagram_recv_ctl(sk, msg, skb); 447 447 } 448 448 449 449 err = copied; ··· 1153 1153 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1154 1154 opt->tot_len = sizeof(*opt); 1155 1155 1156 - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1157 - &hlimit, &tclass, &dontfrag); 1156 + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1157 + &hlimit, &tclass, &dontfrag); 1158 1158 if (err < 0) { 1159 1159 fl6_sock_release(flowlabel); 1160 1160 return err;
+65 -17
net/l2tp/l2tp_core.c
··· 168 168 169 169 } 170 170 171 + /* Lookup the tunnel socket, possibly involving the fs code if the socket is 172 + * owned by userspace. A struct sock returned from this function must be 173 + * released using l2tp_tunnel_sock_put once you're done with it. 174 + */ 175 + struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) 176 + { 177 + int err = 0; 178 + struct socket *sock = NULL; 179 + struct sock *sk = NULL; 180 + 181 + if (!tunnel) 182 + goto out; 183 + 184 + if (tunnel->fd >= 0) { 185 + /* Socket is owned by userspace, who might be in the process 186 + * of closing it. Look the socket up using the fd to ensure 187 + * consistency. 188 + */ 189 + sock = sockfd_lookup(tunnel->fd, &err); 190 + if (sock) 191 + sk = sock->sk; 192 + } else { 193 + /* Socket is owned by kernelspace */ 194 + sk = tunnel->sock; 195 + } 196 + 197 + out: 198 + return sk; 199 + } 200 + EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup); 201 + 202 + /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ 203 + void l2tp_tunnel_sock_put(struct sock *sk) 204 + { 205 + struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 206 + if (tunnel) { 207 + if (tunnel->fd >= 0) { 208 + /* Socket is owned by userspace */ 209 + sockfd_put(sk->sk_socket); 210 + } 211 + sock_put(sk); 212 + } 213 + } 214 + EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); 215 + 171 216 /* Lookup a session by id in the global session list 172 217 */ 173 218 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) ··· 1168 1123 struct udphdr *uh; 1169 1124 struct inet_sock *inet; 1170 1125 __wsum csum; 1171 - int old_headroom; 1172 - int new_headroom; 1173 1126 int headroom; 1174 1127 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1175 1128 int udp_len; ··· 1179 1136 */ 1180 1137 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1181 1138 uhlen + hdr_len; 1182 - old_headroom = skb_headroom(skb); 1183 1139 if (skb_cow_head(skb, headroom)) { 1184 1140 kfree_skb(skb); 1185 1141 return NET_XMIT_DROP; 1186 1142 } 1187 1143 1188 - new_headroom = skb_headroom(skb); 1189 1144 skb_orphan(skb); 1190 - skb->truesize += new_headroom - old_headroom; 1191 - 1192 1145 /* Setup L2TP header */ 1193 1146 session->build_header(session, __skb_push(skb, hdr_len)); 1194 1147 ··· 1646 1607 tunnel->old_sk_destruct = sk->sk_destruct; 1647 1608 sk->sk_destruct = &l2tp_tunnel_destruct; 1648 1609 tunnel->sock = sk; 1610 + tunnel->fd = fd; 1649 1611 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); 1650 1612 1651 1613 sk->sk_allocation = GFP_ATOMIC; ··· 1682 1642 */ 1683 1643 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1684 1644 { 1685 - int err = 0; 1686 - struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; 1645 + int err = -EBADF; 1646 + struct socket *sock = NULL; 1647 + struct sock *sk = NULL; 1648 + 1649 + sk = l2tp_tunnel_sock_lookup(tunnel); 1650 + if (!sk) 1651 + goto out; 1652 + 1653 + sock = sk->sk_socket; 1654 + BUG_ON(!sock); 1687 1655 1688 1656 /* Force the tunnel socket to close. This will eventually 1689 1657 * cause the tunnel to be deleted via the normal socket close 1690 1658 * mechanisms when userspace closes the tunnel socket. 1691 1659 */ 1692 - if (sock != NULL) { 1693 - err = inet_shutdown(sock, 2); 1660 + err = inet_shutdown(sock, 2); 1694 1661 1695 - /* If the tunnel's socket was created by the kernel, 1696 - * close the socket here since the socket was not 1697 - * created by userspace. 1698 - */ 1699 - if (sock->file == NULL) 1700 - err = inet_release(sock); 1701 - } 1662 + /* If the tunnel's socket was created by the kernel, 1663 + * close the socket here since the socket was not 1664 + * created by userspace. 1665 + */ 1666 + if (sock->file == NULL) 1667 + err = inet_release(sock); 1702 1668 1669 + l2tp_tunnel_sock_put(sk); 1670 + out: 1703 1671 return err; 1704 1672 } 1705 1673 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+4 -1
net/l2tp/l2tp_core.h
··· 188 188 int (*recv_payload_hook)(struct sk_buff *skb); 189 189 void (*old_sk_destruct)(struct sock *); 190 190 struct sock *sock; /* Parent socket */ 191 - int fd; 191 + int fd; /* Parent fd, if tunnel socket 192 + * was created by userspace */ 192 193 193 194 uint8_t priv[0]; /* private data */ 194 195 }; ··· 229 228 return tunnel; 230 229 } 231 230 231 + extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel); 232 + extern void l2tp_tunnel_sock_put(struct sock *sk); 232 233 extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); 233 234 extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 234 235 extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+5 -5
net/l2tp/l2tp_ip6.c
··· 554 554 memset(opt, 0, sizeof(struct ipv6_txoptions)); 555 555 opt->tot_len = sizeof(struct ipv6_txoptions); 556 556 557 - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 558 - &hlimit, &tclass, &dontfrag); 557 + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 558 + &hlimit, &tclass, &dontfrag); 559 559 if (err < 0) { 560 560 fl6_sock_release(flowlabel); 561 561 return err; ··· 646 646 struct msghdr *msg, size_t len, int noblock, 647 647 int flags, int *addr_len) 648 648 { 649 - struct inet_sock *inet = inet_sk(sk); 649 + struct ipv6_pinfo *np = inet6_sk(sk); 650 650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; 651 651 size_t copied = 0; 652 652 int err = -EOPNOTSUPP; ··· 688 688 lsa->l2tp_scope_id = IP6CB(skb)->iif; 689 689 } 690 690 691 - if (inet->cmsg_flags) 692 - ip_cmsg_recv(msg, skb); 691 + if (np->rxopt.all) 692 + ip6_datagram_recv_ctl(sk, msg, skb); 693 693 694 694 if (flags & MSG_TRUNC) 695 695 copied = skb->len;
-6
net/l2tp/l2tp_ppp.c
··· 388 388 struct l2tp_session *session; 389 389 struct l2tp_tunnel *tunnel; 390 390 struct pppol2tp_session *ps; 391 - int old_headroom; 392 - int new_headroom; 393 391 int uhlen, headroom; 394 392 395 393 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) ··· 406 408 if (tunnel == NULL) 407 409 goto abort_put_sess; 408 410 409 - old_headroom = skb_headroom(skb); 410 411 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 411 412 headroom = NET_SKB_PAD + 412 413 sizeof(struct iphdr) + /* IP header */ ··· 414 417 sizeof(ppph); /* PPP header */ 415 418 if (skb_cow_head(skb, headroom)) 416 419 goto abort_put_sess_tun; 417 - 418 - new_headroom = skb_headroom(skb); 419 - skb->truesize += new_headroom - old_headroom; 420 420 421 421 /* Setup PPP header */ 422 422 __skb_push(skb, sizeof(ppph));
+9 -7
net/openvswitch/vport-netdev.c
··· 35 35 /* Must be called with rcu_read_lock. */ 36 36 static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) 37 37 { 38 - if (unlikely(!vport)) { 39 - kfree_skb(skb); 40 - return; 41 - } 38 + if (unlikely(!vport)) 39 + goto error; 40 + 41 + if (unlikely(skb_warn_if_lro(skb))) 42 + goto error; 42 43 43 44 /* Make our own copy of the packet. Otherwise we will mangle the 44 45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). ··· 51 50 52 51 skb_push(skb, ETH_HLEN); 53 52 ovs_vport_receive(vport, skb); 53 + return; 54 + 55 + error: 56 + kfree_skb(skb); 54 57 } 55 58 56 59 /* Called with rcu_read_lock and bottom-halves disabled. */ ··· 173 168 packet_length(skb), mtu); 174 169 goto error; 175 170 } 176 - 177 - if (unlikely(skb_warn_if_lro(skb))) 178 - goto error; 179 171 180 172 skb->dev = netdev_vport->dev; 181 173 len = skb->len;
+6 -4
net/packet/af_packet.c
··· 2361 2361 2362 2362 packet_flush_mclist(sk); 2363 2363 2364 - memset(&req_u, 0, sizeof(req_u)); 2365 - 2366 - if (po->rx_ring.pg_vec) 2364 + if (po->rx_ring.pg_vec) { 2365 + memset(&req_u, 0, sizeof(req_u)); 2367 2366 packet_set_ring(sk, &req_u, 1, 0); 2367 + } 2368 2368 2369 - if (po->tx_ring.pg_vec) 2369 + if (po->tx_ring.pg_vec) { 2370 + memset(&req_u, 0, sizeof(req_u)); 2370 2371 packet_set_ring(sk, &req_u, 1, 1); 2372 + } 2371 2373 2372 2374 fanout_release(sk); 2373 2375
+6 -6
net/sched/sch_netem.c
··· 438 438 if (q->rate) { 439 439 struct sk_buff_head *list = &sch->q; 440 440 441 - delay += packet_len_2_sched_time(skb->len, q); 442 - 443 441 if (!skb_queue_empty(list)) { 444 442 /* 445 - * Last packet in queue is reference point (now). 446 - * First packet in queue is already in flight, 447 - * calculate this time bonus and substract 443 + * Last packet in queue is reference point (now), 444 + * calculate this time bonus and subtract 448 445 * from delay. 449 446 */ 450 - delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; 447 + delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now; 448 + delay = max_t(psched_tdiff_t, 0, delay); 451 449 now = netem_skb_cb(skb_peek_tail(list))->time_to_send; 452 450 } 451 + 452 + delay += packet_len_2_sched_time(skb->len, q); 453 453 } 454 454 455 455 cb->time_to_send = now + delay;
+1 -1
net/sctp/auth.c
··· 71 71 return; 72 72 73 73 if (atomic_dec_and_test(&key->refcnt)) { 74 - kfree(key); 74 + kzfree(key); 75 75 SCTP_DBG_OBJCNT_DEC(keys); 76 76 } 77 77 }
+5
net/sctp/endpointola.c
··· 249 249 /* Final destructor for endpoint. */ 250 250 static void sctp_endpoint_destroy(struct sctp_endpoint *ep) 251 251 { 252 + int i; 253 + 252 254 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 253 255 254 256 /* Free up the HMAC transform. */ ··· 272 270 /* Cleanup. */ 273 271 sctp_inq_free(&ep->base.inqueue); 274 272 sctp_bind_addr_free(&ep->base.bind_addr); 273 + 274 + for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i) 275 + memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE); 275 276 276 277 /* Remove and free the port */ 277 278 if (sctp_sk(ep->base.sk)->bind_hash)
+1 -1
net/sctp/socket.c
··· 3390 3390 3391 3391 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); 3392 3392 out: 3393 - kfree(authkey); 3393 + kzfree(authkey); 3394 3394 return ret; 3395 3395 } 3396 3396
+1 -1
net/sunrpc/svcsock.c
··· 465 465 } 466 466 467 467 /* 468 - * See net/ipv6/datagram.c : datagram_recv_ctl 468 + * See net/ipv6/datagram.c : ip6_datagram_recv_ctl 469 469 */ 470 470 static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, 471 471 struct cmsghdr *cmh)
+1 -1
net/wireless/scan.c
··· 1358 1358 &iwe, IW_EV_UINT_LEN); 1359 1359 } 1360 1360 1361 - buf = kmalloc(30, GFP_ATOMIC); 1361 + buf = kmalloc(31, GFP_ATOMIC); 1362 1362 if (buf) { 1363 1363 memset(&iwe, 0, sizeof(iwe)); 1364 1364 iwe.cmd = IWEVCUSTOM;
+2
samples/seccomp/Makefile
··· 19 19 20 20 # Try to match the kernel target. 21 21 ifndef CONFIG_64BIT 22 + ifndef CROSS_COMPILE 22 23 23 24 # s390 has -m31 flag to build 31 bit binaries 24 25 ifndef CONFIG_S390 ··· 35 34 HOSTLOADLIBES_bpf-direct += $(MFLAG) 36 35 HOSTLOADLIBES_bpf-fancy += $(MFLAG) 37 36 HOSTLOADLIBES_dropper += $(MFLAG) 37 + endif 38 38 endif 39 39 40 40 # Tell kbuild to always build the programs
+5 -5
scripts/checkpatch.pl
··· 230 230 our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]}; 231 231 our $Lval = qr{$Ident(?:$Member)*}; 232 232 233 - our $Float_hex = qr{(?i:0x[0-9a-f]+p-?[0-9]+[fl]?)}; 234 - our $Float_dec = qr{(?i:((?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?))}; 235 - our $Float_int = qr{(?i:[0-9]+e-?[0-9]+[fl]?)}; 233 + our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?}; 234 + our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?}; 235 + our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?}; 236 236 our $Float = qr{$Float_hex|$Float_dec|$Float_int}; 237 - our $Constant = qr{(?:$Float|(?i:(?:0x[0-9a-f]+|[0-9]+)[ul]*))}; 238 - our $Assignment = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)}; 237 + our $Constant = qr{$Float|(?i)(?:0x[0-9a-f]+|[0-9]+)[ul]*}; 238 + our $Assignment = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=}; 239 239 our $Compare = qr{<=|>=|==|!=|<|>}; 240 240 our $Operators = qr{ 241 241 <=|>=|==|!=|
+7 -2
sound/soc/fsl/Kconfig
··· 108 108 config SND_SOC_IMX_SSI 109 109 tristate 110 110 111 - config SND_SOC_IMX_PCM_FIQ 111 + config SND_SOC_IMX_PCM 112 112 tristate 113 + 114 + config SND_SOC_IMX_PCM_FIQ 115 + bool 113 116 select FIQ 117 + select SND_SOC_IMX_PCM 114 118 115 119 config SND_SOC_IMX_PCM_DMA 116 - tristate 120 + bool 117 121 select SND_SOC_DMAENGINE_PCM 122 + select SND_SOC_IMX_PCM 118 123 119 124 config SND_SOC_IMX_AUDMUX 120 125 tristate
+1 -4
sound/soc/fsl/Makefile
··· 41 41 obj-$(CONFIG_SND_SOC_IMX_SSI) += snd-soc-imx-ssi.o 42 42 obj-$(CONFIG_SND_SOC_IMX_AUDMUX) += snd-soc-imx-audmux.o 43 43 44 - obj-$(CONFIG_SND_SOC_IMX_PCM_FIQ) += snd-soc-imx-pcm-fiq.o 45 - snd-soc-imx-pcm-fiq-y := imx-pcm-fiq.o imx-pcm.o 46 - obj-$(CONFIG_SND_SOC_IMX_PCM_DMA) += snd-soc-imx-pcm-dma.o 47 - snd-soc-imx-pcm-dma-y := imx-pcm-dma.o imx-pcm.o 44 + obj-$(CONFIG_SND_SOC_IMX_PCM) += snd-soc-imx-pcm.o 48 45 49 46 # i.MX Machine Support 50 47 snd-soc-eukrea-tlv320-objs := eukrea-tlv320.o
+1 -20
sound/soc/fsl/imx-pcm-dma.c
··· 154 154 .pcm_free = imx_pcm_free, 155 155 }; 156 156 157 - static int imx_soc_platform_probe(struct platform_device *pdev) 157 + int imx_pcm_dma_init(struct platform_device *pdev) 158 158 { 159 159 return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2); 160 160 } 161 - 162 - static int imx_soc_platform_remove(struct platform_device *pdev) 163 - { 164 - snd_soc_unregister_platform(&pdev->dev); 165 - return 0; 166 - } 167 - 168 - static struct platform_driver imx_pcm_driver = { 169 - .driver = { 170 - .name = "imx-pcm-audio", 171 - .owner = THIS_MODULE, 172 - }, 173 - .probe = imx_soc_platform_probe, 174 - .remove = imx_soc_platform_remove, 175 - }; 176 - 177 - module_platform_driver(imx_pcm_driver); 178 - MODULE_LICENSE("GPL"); 179 - MODULE_ALIAS("platform:imx-pcm-audio");
+1 -21
sound/soc/fsl/imx-pcm-fiq.c
··· 281 281 .pcm_free = imx_pcm_fiq_free, 282 282 }; 283 283 284 - static int imx_soc_platform_probe(struct platform_device *pdev) 284 + int imx_pcm_fiq_init(struct platform_device *pdev) 285 285 { 286 286 struct imx_ssi *ssi = platform_get_drvdata(pdev); 287 287 int ret; ··· 314 314 315 315 return ret; 316 316 } 317 - 318 - static int imx_soc_platform_remove(struct platform_device *pdev) 319 - { 320 - snd_soc_unregister_platform(&pdev->dev); 321 - return 0; 322 - } 323 - 324 - static struct platform_driver imx_pcm_driver = { 325 - .driver = { 326 - .name = "imx-fiq-pcm-audio", 327 - .owner = THIS_MODULE, 328 - }, 329 - 330 - .probe = imx_soc_platform_probe, 331 - .remove = imx_soc_platform_remove, 332 - }; 333 - 334 - module_platform_driver(imx_pcm_driver); 335 - 336 - MODULE_LICENSE("GPL");
+35
sound/soc/fsl/imx-pcm.c
··· 31 31 runtime->dma_bytes); 32 32 return ret; 33 33 } 34 + EXPORT_SYMBOL_GPL(snd_imx_pcm_mmap); 34 35 35 36 static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) 36 37 { ··· 80 79 out: 81 80 return ret; 82 81 } 82 + EXPORT_SYMBOL_GPL(imx_pcm_new); 83 83 84 84 void imx_pcm_free(struct snd_pcm *pcm) 85 85 { ··· 102 100 buf->area = NULL; 103 101 } 104 102 } 103 + EXPORT_SYMBOL_GPL(imx_pcm_free); 104 + 105 + static int imx_pcm_probe(struct platform_device *pdev) 106 + { 107 + if (strcmp(pdev->id_entry->name, "imx-fiq-pcm-audio") == 0) 108 + return imx_pcm_fiq_init(pdev); 109 + 110 + return imx_pcm_dma_init(pdev); 111 + } 112 + 113 + static int imx_pcm_remove(struct platform_device *pdev) 114 + { 115 + snd_soc_unregister_platform(&pdev->dev); 116 + return 0; 117 + } 118 + 119 + static struct platform_device_id imx_pcm_devtype[] = { 120 + { .name = "imx-pcm-audio", }, 121 + { .name = "imx-fiq-pcm-audio", }, 122 + { /* sentinel */ } 123 + }; 124 + MODULE_DEVICE_TABLE(platform, imx_pcm_devtype); 125 + 126 + static struct platform_driver imx_pcm_driver = { 127 + .driver = { 128 + .name = "imx-pcm", 129 + .owner = THIS_MODULE, 130 + }, 131 + .id_table = imx_pcm_devtype, 132 + .probe = imx_pcm_probe, 133 + .remove = imx_pcm_remove, 134 + }; 135 + module_platform_driver(imx_pcm_driver); 105 136 106 137 MODULE_DESCRIPTION("Freescale i.MX PCM driver"); 107 138 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+18
sound/soc/fsl/imx-pcm.h
··· 30 30 int imx_pcm_new(struct snd_soc_pcm_runtime *rtd); 31 31 void imx_pcm_free(struct snd_pcm *pcm); 32 32 33 + #ifdef CONFIG_SND_SOC_IMX_PCM_DMA 34 + int imx_pcm_dma_init(struct platform_device *pdev); 35 + #else 36 + static inline int imx_pcm_dma_init(struct platform_device *pdev) 37 + { 38 + return -ENODEV; 39 + } 40 + #endif 41 + 42 + #ifdef CONFIG_SND_SOC_IMX_PCM_FIQ 43 + int imx_pcm_fiq_init(struct platform_device *pdev); 44 + #else 45 + static inline int imx_pcm_fiq_init(struct platform_device *pdev) 46 + { 47 + return -ENODEV; 48 + } 49 + #endif 50 + 33 51 #endif /* _IMX_PCM_H */
+2
tools/vm/.gitignore
··· 1 + slabinfo 2 + page-types