Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fixes' into next/cleanup

Merging in fixes since there's a conflict in the omap4 clock tables caused by
it.

* fixes: (245 commits)
ARM: highbank: fix cache flush ordering for cpu hotplug
ARM: OMAP4: hwmod data: make 'ocp2scp_usb_phy_phy_48m" as the main clock
arm: mvebu: Fix the irq map function in SMP mode
Fix GE0/GE1 init on ix2-200 as GE0 has no PHY
ARM: S3C24XX: Fix interrupt pending register offset of the EINT controller
ARM: S3C24XX: Correct NR_IRQS definition for s3c2440
ARM i.MX6: Fix ldb_di clock selection
ARM: imx: provide twd clock lookup from device tree
ARM: imx35 Bugfix admux clock
ARM: clk-imx35: Bugfix iomux clock
+ Linux 3.9-rc6

Signed-off-by: Olof Johansson <olof@lixom.net>

Conflicts:
arch/arm/mach-omap2/cclock44xx_data.c

+2575 -1549
+2 -3
Documentation/sound/alsa/ALSA-Configuration.txt
··· 890 890 enable_msi - Enable Message Signaled Interrupt (MSI) (default = off) 891 891 power_save - Automatic power-saving timeout (in second, 0 = 892 892 disable) 893 - power_save_controller - Support runtime D3 of HD-audio controller 894 - (-1 = on for supported chip (default), false = off, 895 - true = force to on even for unsupported hardware) 893 + power_save_controller - Reset HD-audio controller in power-saving mode 894 + (default = on) 896 895 align_buffer_size - Force rounding of buffer/period sizes to multiples 897 896 of 128 bytes. This is more efficient in terms of memory 898 897 access but isn't required by the HDA spec and prevents
+8 -2
MAINTAINERS
··· 4941 4941 S: Maintained 4942 4942 F: fs/logfs/ 4943 4943 4944 + LPC32XX MACHINE SUPPORT 4945 + M: Roland Stigge <stigge@antcom.de> 4946 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 4947 + S: Maintained 4948 + F: arch/arm/mach-lpc32xx/ 4949 + 4944 4950 LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) 4945 4951 M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> 4946 4952 M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> ··· 5071 5065 F: drivers/net/ethernet/marvell/sk* 5072 5066 5073 5067 MARVELL LIBERTAS WIRELESS DRIVER 5074 - M: Dan Williams <dcbw@redhat.com> 5075 5068 L: libertas-dev@lists.infradead.org 5076 - S: Maintained 5069 + S: Orphan 5077 5070 F: drivers/net/wireless/libertas/ 5078 5071 5079 5072 MARVELL MV643XX ETHERNET DRIVER ··· 5574 5569 F: include/uapi/linux/netdevice.h 5575 5570 5576 5571 NETXEN (1/10) GbE SUPPORT 5572 + M: Manish Chopra <manish.chopra@qlogic.com> 5577 5573 M: Sony Chacko <sony.chacko@qlogic.com> 5578 5574 M: Rajesh Borundia <rajesh.borundia@qlogic.com> 5579 5575 L: netdev@vger.kernel.org
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 9 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc5 4 + EXTRAVERSION = -rc6 5 5 NAME = Unicycling Gorilla 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/alpha/Makefile
··· 12 12 13 13 LDFLAGS_vmlinux := -static -N #-relax 14 14 CHECKFLAGS += -D__alpha__ -m64 15 - cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data 15 + cflags-y := -pipe -mno-fp-regs -ffixed-8 16 16 cflags-y += $(call cc-option, -fno-jump-tables) 17 17 18 18 cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
+1 -1
arch/alpha/include/asm/floppy.h
··· 26 26 #define fd_disable_irq() disable_irq(FLOPPY_IRQ) 27 27 #define fd_cacheflush(addr,size) /* nothing */ 28 28 #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ 29 - IRQF_DISABLED, "floppy", NULL) 29 + 0, "floppy", NULL) 30 30 #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) 31 31 32 32 #ifdef CONFIG_PCI
-7
arch/alpha/kernel/irq.c
··· 117 117 return; 118 118 } 119 119 120 - /* 121 - * From here we must proceed with IPL_MAX. Note that we do not 122 - * explicitly enable interrupts afterwards - some MILO PALcode 123 - * (namely LX164 one) seems to have severe problems with RTI 124 - * at IPL 0. 125 - */ 126 - local_irq_disable(); 127 120 irq_enter(); 128 121 generic_handle_irq_desc(irq, desc); 129 122 irq_exit();
+8 -2
arch/alpha/kernel/irq_alpha.c
··· 45 45 unsigned long la_ptr, struct pt_regs *regs) 46 46 { 47 47 struct pt_regs *old_regs; 48 + 49 + /* 50 + * Disable interrupts during IRQ handling. 51 + * Note that there is no matching local_irq_enable() due to 52 + * severe problems with RTI at IPL0 and some MILO PALcode 53 + * (namely LX164). 54 + */ 55 + local_irq_disable(); 48 56 switch (type) { 49 57 case 0: 50 58 #ifdef CONFIG_SMP ··· 70 62 { 71 63 long cpu; 72 64 73 - local_irq_disable(); 74 65 smp_percpu_timer_interrupt(regs); 75 66 cpu = smp_processor_id(); 76 67 if (cpu != boot_cpuid) { ··· 229 222 230 223 struct irqaction timer_irqaction = { 231 224 .handler = timer_interrupt, 232 - .flags = IRQF_DISABLED, 233 225 .name = "timer", 234 226 }; 235 227
+5
arch/alpha/kernel/sys_nautilus.c
··· 188 188 extern void free_reserved_mem(void *, void *); 189 189 extern void pcibios_claim_one_bus(struct pci_bus *); 190 190 191 + static struct resource irongate_io = { 192 + .name = "Irongate PCI IO", 193 + .flags = IORESOURCE_IO, 194 + }; 191 195 static struct resource irongate_mem = { 192 196 .name = "Irongate PCI MEM", 193 197 .flags = IORESOURCE_MEM, ··· 213 209 214 210 irongate = pci_get_bus_and_slot(0, 0); 215 211 bus->self = irongate; 212 + bus->resource[0] = &irongate_io; 216 213 bus->resource[1] = &irongate_mem; 217 214 218 215 pci_bus_size_bridges(bus);
+7 -7
arch/alpha/kernel/sys_titan.c
··· 280 280 * all reported to the kernel as machine checks, so the handler 281 281 * is a nop so it can be called to count the individual events. 282 282 */ 283 - titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, 283 + titan_request_irq(63+16, titan_intr_nop, 0, 284 284 "CChip Error", NULL); 285 - titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, 285 + titan_request_irq(62+16, titan_intr_nop, 0, 286 286 "PChip 0 H_Error", NULL); 287 - titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, 287 + titan_request_irq(61+16, titan_intr_nop, 0, 288 288 "PChip 1 H_Error", NULL); 289 - titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, 289 + titan_request_irq(60+16, titan_intr_nop, 0, 290 290 "PChip 0 C_Error", NULL); 291 - titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, 291 + titan_request_irq(59+16, titan_intr_nop, 0, 292 292 "PChip 1 C_Error", NULL); 293 293 294 294 /* ··· 348 348 * Hook a couple of extra err interrupts that the 349 349 * common titan code won't. 350 350 */ 351 - titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, 351 + titan_request_irq(53+16, titan_intr_nop, 0, 352 352 "NMI", NULL); 353 - titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, 353 + titan_request_irq(50+16, titan_intr_nop, 0, 354 354 "Temperature Warning", NULL); 355 355 356 356 /*
+12 -2
arch/arm/Kconfig
··· 1175 1175 default 8 1176 1176 1177 1177 config IWMMXT 1178 - bool "Enable iWMMXt support" 1178 + bool "Enable iWMMXt support" if !CPU_PJ4 1179 1179 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 1180 - default y if PXA27x || PXA3xx || ARCH_MMP 1180 + default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 1181 1181 help 1182 1182 Enable support for iWMMXt context switching at run time if 1183 1183 running on a CPU that supports it. ··· 1430 1430 operation aborts with MMU exception, it might cause the processor 1431 1431 to deadlock. This workaround puts DSB before executing ISB if 1432 1432 an abort may occur on cache maintenance. 1433 + 1434 + config ARM_ERRATA_798181 1435 + bool "ARM errata: TLBI/DSB failure on Cortex-A15" 1436 + depends on CPU_V7 && SMP 1437 + help 1438 + On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not 1439 + adequately shooting down all use of the old entries. This 1440 + option enables the Linux kernel workaround for this erratum 1441 + which sends an IPI to the CPUs that are running the same ASID 1442 + as the one being invalidated. 1433 1443 1434 1444 endmenu 1435 1445
+1 -1
arch/arm/boot/dts/armada-370-mirabox.dts
··· 54 54 }; 55 55 56 56 mvsdio@d00d4000 { 57 - pinctrl-0 = <&sdio_pins2>; 57 + pinctrl-0 = <&sdio_pins3>; 58 58 pinctrl-names = "default"; 59 59 status = "okay"; 60 60 /*
+6
arch/arm/boot/dts/armada-370.dtsi
··· 59 59 "mpp50", "mpp51", "mpp52"; 60 60 marvell,function = "sd0"; 61 61 }; 62 + 63 + sdio_pins3: sdio-pins3 { 64 + marvell,pins = "mpp48", "mpp49", "mpp50", 65 + "mpp51", "mpp52", "mpp53"; 66 + marvell,function = "sd0"; 67 + }; 62 68 }; 63 69 64 70 gpio0: gpio@d0018100 {
+2 -2
arch/arm/boot/dts/dbx5x0.dtsi
··· 191 191 192 192 prcmu: prcmu@80157000 { 193 193 compatible = "stericsson,db8500-prcmu"; 194 - reg = <0x80157000 0x1000>; 195 - reg-names = "prcmu"; 194 + reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>; 195 + reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm"; 196 196 interrupts = <0 47 0x4>; 197 197 #address-cells = <1>; 198 198 #size-cells = <1>;
-1
arch/arm/boot/dts/imx28-m28evk.dts
··· 152 152 i2c0: i2c@80058000 { 153 153 pinctrl-names = "default"; 154 154 pinctrl-0 = <&i2c0_pins_a>; 155 - clock-frequency = <400000>; 156 155 status = "okay"; 157 156 158 157 sgtl5000: codec@0a {
-1
arch/arm/boot/dts/imx28-sps1.dts
··· 70 70 i2c0: i2c@80058000 { 71 71 pinctrl-names = "default"; 72 72 pinctrl-0 = <&i2c0_pins_a>; 73 - clock-frequency = <400000>; 74 73 status = "okay"; 75 74 76 75 rtc: rtc@51 {
+1
arch/arm/boot/dts/imx6qdl.dtsi
··· 91 91 compatible = "arm,cortex-a9-twd-timer"; 92 92 reg = <0x00a00600 0x20>; 93 93 interrupts = <1 13 0xf01>; 94 + clocks = <&clks 15>; 94 95 }; 95 96 96 97 L2: l2-cache@00a02000 {
+1
arch/arm/boot/dts/kirkwood-goflexnet.dts
··· 77 77 }; 78 78 79 79 nand@3000000 { 80 + chip-delay = <40>; 80 81 status = "okay"; 81 82 82 83 partition@0 {
+7 -7
arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
··· 96 96 marvell,function = "gpio"; 97 97 }; 98 98 pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { 99 - marvell,pins = "mpp44"; 99 + marvell,pins = "mpp46"; 100 100 marvell,function = "gpio"; 101 101 }; 102 102 pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { 103 - marvell,pins = "mpp45"; 103 + marvell,pins = "mpp47"; 104 104 marvell,function = "gpio"; 105 105 }; 106 106 ··· 157 157 gpios = <&gpio0 16 0>; 158 158 linux,default-trigger = "default-on"; 159 159 }; 160 - health_led1 { 160 + rebuild_led { 161 + label = "status:white:rebuild_led"; 162 + gpios = <&gpio1 4 0>; 163 + }; 164 + health_led { 161 165 label = "status:red:health_led"; 162 166 gpios = <&gpio1 5 0>; 163 - }; 164 - health_led2 { 165 - label = "status:white:health_led"; 166 - gpios = <&gpio1 4 0>; 167 167 }; 168 168 backup_led { 169 169 label = "status:blue:backup_led";
+7 -2
arch/arm/boot/dts/orion5x.dtsi
··· 13 13 compatible = "marvell,orion5x"; 14 14 interrupt-parent = <&intc>; 15 15 16 + aliases { 17 + gpio0 = &gpio0; 18 + }; 16 19 intc: interrupt-controller { 17 20 compatible = "marvell,orion-intc", "marvell,intc"; 18 21 interrupt-controller; ··· 35 32 #gpio-cells = <2>; 36 33 gpio-controller; 37 34 reg = <0x10100 0x40>; 38 - ngpio = <32>; 35 + ngpios = <32>; 36 + interrupt-controller; 37 + #interrupt-cells = <2>; 39 38 interrupts = <6>, <7>, <8>, <9>; 40 39 }; 41 40 ··· 96 91 reg = <0x90000 0x10000>, 97 92 <0xf2200000 0x800>; 98 93 reg-names = "regs", "sram"; 99 - interrupts = <22>; 94 + interrupts = <28>; 100 95 status = "okay"; 101 96 }; 102 97 };
+1 -1
arch/arm/include/asm/delay.h
··· 24 24 void (*delay)(unsigned long); 25 25 void (*const_udelay)(unsigned long); 26 26 void (*udelay)(unsigned long); 27 - bool const_clock; 27 + unsigned long ticks_per_jiffy; 28 28 } arm_delay_ops; 29 29 30 30 #define __delay(n) arm_delay_ops.delay(n)
+7
arch/arm/include/asm/highmem.h
··· 41 41 #endif 42 42 #endif 43 43 44 + /* 45 + * Needed to be able to broadcast the TLB invalidation for kmap. 46 + */ 47 + #ifdef CONFIG_ARM_ERRATA_798181 48 + #undef ARCH_NEEDS_KMAP_HIGH_GET 49 + #endif 50 + 44 51 #ifdef ARCH_NEEDS_KMAP_HIGH_GET 45 52 extern void *kmap_high_get(struct page *page); 46 53 #else
+2
arch/arm/include/asm/mmu_context.h
··· 27 27 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 28 28 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 29 29 30 + DECLARE_PER_CPU(atomic64_t, active_asids); 31 + 30 32 #else /* !CONFIG_CPU_HAS_ASID */ 31 33 32 34 #ifdef CONFIG_MMU
+15
arch/arm/include/asm/tlbflush.h
··· 450 450 isb(); 451 451 } 452 452 453 + #ifdef CONFIG_ARM_ERRATA_798181 454 + static inline void dummy_flush_tlb_a15_erratum(void) 455 + { 456 + /* 457 + * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. 458 + */ 459 + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); 460 + dsb(); 461 + } 462 + #else 463 + static inline void dummy_flush_tlb_a15_erratum(void) 464 + { 465 + } 466 + #endif 467 + 453 468 /* 454 469 * flush_pmd_entry 455 470 *
+12
arch/arm/kernel/entry-common.S
··· 276 276 */ 277 277 278 278 .macro mcount_enter 279 + /* 280 + * This pad compensates for the push {lr} at the call site. Note that we are 281 + * unable to unwind through a function which does not otherwise save its lr. 282 + */ 283 + UNWIND(.pad #4) 279 284 stmdb sp!, {r0-r3, lr} 285 + UNWIND(.save {r0-r3, lr}) 280 286 .endm 281 287 282 288 .macro mcount_get_lr reg ··· 295 289 .endm 296 290 297 291 ENTRY(__gnu_mcount_nc) 292 + UNWIND(.fnstart) 298 293 #ifdef CONFIG_DYNAMIC_FTRACE 299 294 mov ip, lr 300 295 ldmia sp!, {lr} ··· 303 296 #else 304 297 __mcount 305 298 #endif 299 + UNWIND(.fnend) 306 300 ENDPROC(__gnu_mcount_nc) 307 301 308 302 #ifdef CONFIG_DYNAMIC_FTRACE 309 303 ENTRY(ftrace_caller) 304 + UNWIND(.fnstart) 310 305 __ftrace_caller 306 + UNWIND(.fnend) 311 307 ENDPROC(ftrace_caller) 312 308 #endif 313 309 314 310 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 315 311 ENTRY(ftrace_graph_caller) 312 + UNWIND(.fnstart) 316 313 __ftrace_graph_caller 314 + UNWIND(.fnend) 317 315 ENDPROC(ftrace_graph_caller) 318 316 #endif 319 317
+1 -1
arch/arm/kernel/head.S
··· 267 267 addne r6, r6, #1 << SECTION_SHIFT 268 268 strne r6, [r3] 269 269 270 - #if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) 270 + #if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) 271 271 sub r4, r4, #4 @ Fixup page table pointer 272 272 @ for 64-bit descriptors 273 273 #endif
+3 -3
arch/arm/kernel/hw_breakpoint.c
··· 966 966 } 967 967 968 968 if (err) { 969 - pr_warning("CPU %d debug is powered down!\n", cpu); 969 + pr_warn_once("CPU %d debug is powered down!\n", cpu); 970 970 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); 971 971 return; 972 972 } ··· 987 987 isb(); 988 988 989 989 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { 990 - pr_warning("CPU %d failed to disable vector catch\n", cpu); 990 + pr_warn_once("CPU %d failed to disable vector catch\n", cpu); 991 991 return; 992 992 } 993 993 ··· 1007 1007 } 1008 1008 1009 1009 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { 1010 - pr_warning("CPU %d failed to clear debug register pairs\n", cpu); 1010 + pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); 1011 1011 return; 1012 1012 } 1013 1013
+22 -2
arch/arm/kernel/setup.c
··· 353 353 printk("%s", buf); 354 354 } 355 355 356 + static void __init cpuid_init_hwcaps(void) 357 + { 358 + unsigned int divide_instrs; 359 + 360 + if (cpu_architecture() < CPU_ARCH_ARMv7) 361 + return; 362 + 363 + divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; 364 + 365 + switch (divide_instrs) { 366 + case 2: 367 + elf_hwcap |= HWCAP_IDIVA; 368 + case 1: 369 + elf_hwcap |= HWCAP_IDIVT; 370 + } 371 + } 372 + 356 373 static void __init feat_v6_fixup(void) 357 374 { 358 375 int id = read_cpuid_id(); ··· 500 483 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", 501 484 list->elf_name, ENDIANNESS); 502 485 elf_hwcap = list->elf_hwcap; 486 + 487 + cpuid_init_hwcaps(); 488 + 503 489 #ifndef CONFIG_ARM_THUMB 504 - elf_hwcap &= ~HWCAP_THUMB; 490 + elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); 505 491 #endif 506 492 507 493 feat_v6_fixup(); ··· 544 524 size -= start & ~PAGE_MASK; 545 525 bank->start = PAGE_ALIGN(start); 546 526 547 - #ifndef CONFIG_LPAE 527 + #ifndef CONFIG_ARM_LPAE 548 528 if (bank->start + size < bank->start) { 549 529 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " 550 530 "32-bit physical address space\n", (long long)start);
-3
arch/arm/kernel/smp.c
··· 673 673 if (freq->flags & CPUFREQ_CONST_LOOPS) 674 674 return NOTIFY_OK; 675 675 676 - if (arm_delay_ops.const_clock) 677 - return NOTIFY_OK; 678 - 679 676 if (!per_cpu(l_p_j_ref, cpu)) { 680 677 per_cpu(l_p_j_ref, cpu) = 681 678 per_cpu(cpu_data, cpu).loops_per_jiffy;
+66
arch/arm/kernel/smp_tlb.c
··· 12 12 13 13 #include <asm/smp_plat.h> 14 14 #include <asm/tlbflush.h> 15 + #include <asm/mmu_context.h> 15 16 16 17 /**********************************************************************/ 17 18 ··· 70 69 local_flush_bp_all(); 71 70 } 72 71 72 + #ifdef CONFIG_ARM_ERRATA_798181 73 + static int erratum_a15_798181(void) 74 + { 75 + unsigned int midr = read_cpuid_id(); 76 + 77 + /* Cortex-A15 r0p0..r3p2 affected */ 78 + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) 79 + return 0; 80 + return 1; 81 + } 82 + #else 83 + static int erratum_a15_798181(void) 84 + { 85 + return 0; 86 + } 87 + #endif 88 + 89 + static void ipi_flush_tlb_a15_erratum(void *arg) 90 + { 91 + dmb(); 92 + } 93 + 94 + static void broadcast_tlb_a15_erratum(void) 95 + { 96 + if (!erratum_a15_798181()) 97 + return; 98 + 99 + dummy_flush_tlb_a15_erratum(); 100 + smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, 101 + NULL, 1); 102 + } 103 + 104 + static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) 105 + { 106 + int cpu; 107 + cpumask_t mask = { CPU_BITS_NONE }; 108 + 109 + if (!erratum_a15_798181()) 110 + return; 111 + 112 + dummy_flush_tlb_a15_erratum(); 113 + for_each_online_cpu(cpu) { 114 + if (cpu == smp_processor_id()) 115 + continue; 116 + /* 117 + * We only need to send an IPI if the other CPUs are running 118 + * the same ASID as the one being invalidated. There is no 119 + * need for locking around the active_asids check since the 120 + * switch_mm() function has at least one dmb() (as required by 121 + * this workaround) in case a context switch happens on 122 + * another CPU after the condition below. 123 + */ 124 + if (atomic64_read(&mm->context.id) == 125 + atomic64_read(&per_cpu(active_asids, cpu))) 126 + cpumask_set_cpu(cpu, &mask); 127 + } 128 + smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); 129 + } 130 + 73 131 void flush_tlb_all(void) 74 132 { 75 133 if (tlb_ops_need_broadcast()) 76 134 on_each_cpu(ipi_flush_tlb_all, NULL, 1); 77 135 else 78 136 local_flush_tlb_all(); 137 + broadcast_tlb_a15_erratum(); 79 138 } 80 139 81 140 void flush_tlb_mm(struct mm_struct *mm) ··· 144 83 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); 145 84 else 146 85 local_flush_tlb_mm(mm); 86 + broadcast_tlb_mm_a15_erratum(mm); 147 87 } 148 88 149 89 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) ··· 157 95 &ta, 1); 158 96 } else 159 97 local_flush_tlb_page(vma, uaddr); 98 + broadcast_tlb_mm_a15_erratum(vma->vm_mm); 160 99 } 161 100 162 101 void flush_tlb_kernel_page(unsigned long kaddr) ··· 168 105 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); 169 106 } else 170 107 local_flush_tlb_kernel_page(kaddr); 108 + broadcast_tlb_a15_erratum(); 171 109 } 172 110 173 111 void flush_tlb_range(struct vm_area_struct *vma, ··· 183 119 &ta, 1); 184 120 } else 185 121 local_flush_tlb_range(vma, start, end); 122 + broadcast_tlb_mm_a15_erratum(vma->vm_mm); 186 123 } 187 124 188 125 void flush_tlb_kernel_range(unsigned long start, unsigned long end) ··· 195 130 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); 196 131 } else 197 132 local_flush_tlb_kernel_range(start, end); 133 + broadcast_tlb_a15_erratum(); 198 134 } 199 135 200 136 void flush_bp_all(void)
+14 -21
arch/arm/kvm/vgic.c
··· 883 883 lr, irq, vgic_cpu->vgic_lr[lr]); 884 884 BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 885 885 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; 886 - 887 - goto out; 886 + return true; 888 887 } 889 888 890 889 /* Try to use another LR for this interrupt */ ··· 897 898 vgic_cpu->vgic_irq_lr_map[irq] = lr; 898 899 set_bit(lr, vgic_cpu->lr_used); 899 900 900 - out: 901 901 if (!vgic_irq_is_edge(vcpu, irq)) 902 902 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; 903 903 ··· 1016 1018 1017 1019 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); 1018 1020 1019 - /* 1020 - * We do not need to take the distributor lock here, since the only 1021 - * action we perform is clearing the irq_active_bit for an EOIed 1022 - * level interrupt. There is a potential race with 1023 - * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we 1024 - * check if the interrupt is already active. Two possibilities: 1025 - * 1026 - * - The queuing is occurring on the same vcpu: cannot happen, 1027 - * as we're already in the context of this vcpu, and 1028 - * executing the handler 1029 - * - The interrupt has been migrated to another vcpu, and we 1030 - * ignore this interrupt for this run. Big deal. It is still 1031 - * pending though, and will get considered when this vcpu 1032 - * exits. 1033 - */ 1034 1021 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { 1035 1022 /* 1036 1023 * Some level interrupts have been EOIed. Clear their ··· 1037 1054 } else { 1038 1055 vgic_cpu_irq_clear(vcpu, irq); 1039 1056 } 1057 + 1058 + /* 1059 + * Despite being EOIed, the LR may not have 1060 + * been marked as empty. 1061 + */ 1062 + set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); 1063 + vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; 1040 1064 } 1041 1065 } 1042 1066 ··· 1054 1064 } 1055 1065 1056 1066 /* 1057 - * Sync back the VGIC state after a guest run. We do not really touch 1058 - * the distributor here (the irq_pending_on_cpu bit is safe to set), 1059 - * so there is no need for taking its lock. 1067 + * Sync back the VGIC state after a guest run. The distributor lock is 1068 + * needed so we don't get preempted in the middle of the state processing. 1060 1069 */ 1061 1070 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1062 1071 { ··· 1101 1112 1102 1113 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1103 1114 { 1115 + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1116 + 1104 1117 if (!irqchip_in_kernel(vcpu->kvm)) 1105 1118 return; 1106 1119 1120 + spin_lock(&dist->lock); 1107 1121 __kvm_vgic_sync_hwstate(vcpu); 1122 + spin_unlock(&dist->lock); 1108 1123 } 1109 1124 1110 1125 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
+5 -3
arch/arm/lib/delay.c
··· 58 58 static void __timer_const_udelay(unsigned long xloops) 59 59 { 60 60 unsigned long long loops = xloops; 61 - loops *= loops_per_jiffy; 61 + loops *= arm_delay_ops.ticks_per_jiffy; 62 62 __timer_delay(loops >> UDELAY_SHIFT); 63 63 } 64 64 ··· 73 73 pr_info("Switching to timer-based delay loop\n"); 74 74 delay_timer = timer; 75 75 lpj_fine = timer->freq / HZ; 76 - loops_per_jiffy = lpj_fine; 76 + 77 + /* cpufreq may scale loops_per_jiffy, so keep a private copy */ 78 + arm_delay_ops.ticks_per_jiffy = lpj_fine; 77 79 arm_delay_ops.delay = __timer_delay; 78 80 arm_delay_ops.const_udelay = __timer_const_udelay; 79 81 arm_delay_ops.udelay = __timer_udelay; 80 - arm_delay_ops.const_clock = true; 82 + 81 83 delay_calibrated = true; 82 84 } else { 83 85 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
+3 -13
arch/arm/mach-cns3xxx/core.c
··· 22 22 23 23 static struct map_desc cns3xxx_io_desc[] __initdata = { 24 24 { 25 - .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, 26 - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), 27 - .length = SZ_4K, 28 - .type = MT_DEVICE, 29 - }, { 30 - .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT, 31 - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE), 32 - .length = SZ_4K, 33 - .type = MT_DEVICE, 34 - }, { 35 - .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, 36 - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE), 37 - .length = SZ_4K, 25 + .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT, 26 + .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE), 27 + .length = SZ_8K, 38 28 .type = MT_DEVICE, 39 29 }, { 40 30 .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT,
+8 -8
arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
··· 94 94 #define RTC_INTR_STS_OFFSET 0x34 95 95 96 96 #define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ 97 - #define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ 97 + #define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */ 98 98 99 99 #define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ 100 - #define CNS3XXX_PM_BASE_VIRT 0xFFF08000 100 + #define CNS3XXX_PM_BASE_VIRT 0xFB001000 101 101 102 102 #define PM_CLK_GATE_OFFSET 0x00 103 103 #define PM_SOFT_RST_OFFSET 0x04 ··· 109 109 #define PM_PLL_HM_PD_OFFSET 0x1C 110 110 111 111 #define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ 112 - #define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 112 + #define CNS3XXX_UART0_BASE_VIRT 0xFB002000 113 113 114 114 #define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ 115 115 #define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 ··· 130 130 #define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 131 131 132 132 #define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ 133 - #define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 133 + #define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000 134 134 135 135 #define TIMER1_COUNTER_OFFSET 0x00 136 136 #define TIMER1_AUTO_RELOAD_OFFSET 0x04 ··· 227 227 * Testchip peripheral and fpga gic regions 228 228 */ 229 229 #define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ 230 - #define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 230 + #define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000 231 231 232 232 #define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ 233 - #define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 233 + #define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100) 234 234 235 235 #define CNS3XXX_TC11MP_TWD_BASE 0x90000600 236 - #define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 236 + #define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600) 237 237 238 238 #define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ 239 - #define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 239 + #define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000) 240 240 241 241 #define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ 242 242 #define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000
+7 -3
arch/arm/mach-ep93xx/include/mach/uncompress.h
··· 47 47 48 48 static inline void putc(int c) 49 49 { 50 - /* Transmit fifo not full? */ 51 - while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF) 52 - ; 50 + int i; 51 + 52 + for (i = 0; i < 10000; i++) { 53 + /* Transmit fifo not full? */ 54 + if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)) 55 + break; 56 + } 53 57 54 58 __raw_writeb(c, PHYS_UART_DATA); 55 59 }
+4 -6
arch/arm/mach-highbank/hotplug.c
··· 28 28 */ 29 29 void __ref highbank_cpu_die(unsigned int cpu) 30 30 { 31 - flush_cache_all(); 32 - 33 31 highbank_set_cpu_jump(cpu, phys_to_virt(0)); 32 + 33 + flush_cache_louis(); 34 34 highbank_set_core_pwr(); 35 35 36 - cpu_do_idle(); 37 - 38 - /* We should never return from idle */ 39 - panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu); 36 + while (1) 37 + cpu_do_idle(); 40 38 }
+2
arch/arm/mach-imx/clk-imx35.c
··· 257 257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); 258 258 clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); 259 259 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); 260 + clk_register_clkdev(clk[admux_gate], "audmux", NULL); 260 261 261 262 clk_prepare_enable(clk[spba_gate]); 262 263 clk_prepare_enable(clk[gpio1_gate]); ··· 266 265 clk_prepare_enable(clk[iim_gate]); 267 266 clk_prepare_enable(clk[emi_gate]); 268 267 clk_prepare_enable(clk[max_gate]); 268 + clk_prepare_enable(clk[iomuxc_gate]); 269 269 270 270 /* 271 271 * SCC is needed to boot via mmc after a watchdog reset. The clock code
+1 -2
arch/arm/mach-imx/clk-imx6q.c
··· 115 115 static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; 116 116 static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; 117 117 static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; 118 - static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; 118 + static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; 119 119 static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; 120 120 static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; 121 121 static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; ··· 443 443 444 444 clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); 445 445 clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); 446 - clk_register_clkdev(clk[twd], NULL, "smp_twd"); 447 446 clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); 448 447 clk_register_clkdev(clk[ahb], "ahb", NULL); 449 448 clk_register_clkdev(clk[cko1], "cko1", NULL);
+2
arch/arm/mach-imx/common.h
··· 110 110 111 111 extern void imx_enable_cpu(int cpu, bool enable); 112 112 extern void imx_set_cpu_jump(int cpu, void *jump_addr); 113 + extern u32 imx_get_cpu_arg(int cpu); 114 + extern void imx_set_cpu_arg(int cpu, u32 arg); 113 115 extern void v7_cpu_resume(void); 114 116 #ifdef CONFIG_SMP 115 117 extern void v7_secondary_startup(void);
+12
arch/arm/mach-imx/hotplug.c
··· 46 46 void imx_cpu_die(unsigned int cpu) 47 47 { 48 48 cpu_enter_lowpower(); 49 + /* 50 + * We use the cpu jumping argument register to sync with 51 + * imx_cpu_kill() which is running on cpu0 and waiting for 52 + * the register being cleared to kill the cpu. 53 + */ 54 + imx_set_cpu_arg(cpu, ~0); 49 55 cpu_do_idle(); 50 56 } 51 57 52 58 int imx_cpu_kill(unsigned int cpu) 53 59 { 60 + unsigned long timeout = jiffies + msecs_to_jiffies(50); 61 + 62 + while (imx_get_cpu_arg(cpu) == 0) 63 + if (time_after(jiffies, timeout)) 64 + return 0; 54 65 imx_enable_cpu(cpu, false); 66 + imx_set_cpu_arg(cpu, 0); 55 67 return 1; 56 68 }
+12
arch/arm/mach-imx/src.c
··· 43 43 src_base + SRC_GPR1 + cpu * 8); 44 44 } 45 45 46 + u32 imx_get_cpu_arg(int cpu) 47 + { 48 + cpu = cpu_logical_map(cpu); 49 + return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); 50 + } 51 + 52 + void imx_set_cpu_arg(int cpu, u32 arg) 53 + { 54 + cpu = cpu_logical_map(cpu); 55 + writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); 56 + } 57 + 46 58 void imx_src_prepare_restart(void) 47 59 { 48 60 u32 val;
+6 -1
arch/arm/mach-kirkwood/board-iomega_ix2_200.c
··· 20 20 .duplex = DUPLEX_FULL, 21 21 }; 22 22 23 + static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = { 24 + .phy_addr = MV643XX_ETH_PHY_ADDR(11), 25 + }; 26 + 23 27 void __init iomega_ix2_200_init(void) 24 28 { 25 29 /* 26 30 * Basic setup. Needs to be called early. 27 31 */ 28 - kirkwood_ge01_init(&iomega_ix2_200_ge00_data); 32 + kirkwood_ge00_init(&iomega_ix2_200_ge00_data); 33 + kirkwood_ge01_init(&iomega_ix2_200_ge01_data); 29 34 }
+2
arch/arm/mach-kirkwood/guruplug-setup.c
··· 53 53 54 54 static struct mvsdio_platform_data guruplug_mvsdio_data = { 55 55 /* unfortunately the CD signal has not been connected */ 56 + .gpio_card_detect = -1, 57 + .gpio_write_protect = -1, 56 58 }; 57 59 58 60 static struct gpio_led guruplug_led_pins[] = {
+1
arch/arm/mach-kirkwood/openrd-setup.c
··· 55 55 56 56 static struct mvsdio_platform_data openrd_mvsdio_data = { 57 57 .gpio_card_detect = 29, /* MPP29 used as SD card detect */ 58 + .gpio_write_protect = -1, 58 59 }; 59 60 60 61 static unsigned int openrd_mpp_config[] __initdata = {
+1
arch/arm/mach-kirkwood/rd88f6281-setup.c
··· 69 69 70 70 static struct mvsdio_platform_data rd88f6281_mvsdio_data = { 71 71 .gpio_card_detect = 28, 72 + .gpio_write_protect = -1, 72 73 }; 73 74 74 75 static unsigned int rd88f6281_mpp_config[] __initdata = {
+4 -1
arch/arm/mach-msm/timer.c
··· 62 62 { 63 63 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); 64 64 65 - writel_relaxed(0, event_base + TIMER_CLEAR); 65 + ctrl &= ~TIMER_ENABLE_EN; 66 + writel_relaxed(ctrl, event_base + TIMER_ENABLE); 67 + 68 + writel_relaxed(ctrl, event_base + TIMER_CLEAR); 66 69 writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); 67 70 writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); 68 71 return 0;
+10 -14
arch/arm/mach-mvebu/irq-armada-370-xp.c
··· 44 44 45 45 #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) 46 46 47 + #define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) 48 + 47 49 #define ACTIVE_DOORBELLS (8) 48 50 49 51 static DEFINE_RAW_SPINLOCK(irq_controller_lock); ··· 61 59 */ 62 60 static void armada_370_xp_irq_mask(struct irq_data *d) 63 61 { 64 - #ifdef CONFIG_SMP 65 62 irq_hw_number_t hwirq = irqd_to_hwirq(d); 66 63 67 - if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) 64 + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) 68 65 writel(hwirq, main_int_base + 69 66 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); 70 67 else 71 68 writel(hwirq, per_cpu_int_base + 72 69 ARMADA_370_XP_INT_SET_MASK_OFFS); 73 - #else 74 - writel(irqd_to_hwirq(d), 75 - per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); 76 - #endif 77 70 } 78 71 79 72 static void armada_370_xp_irq_unmask(struct irq_data *d) 80 73 { 81 - #ifdef CONFIG_SMP 82 74 irq_hw_number_t hwirq = irqd_to_hwirq(d); 83 75 84 - if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) 76 + if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) 85 77 writel(hwirq, main_int_base + 86 78 ARMADA_370_XP_INT_SET_ENABLE_OFFS); 87 79 else 88 80 writel(hwirq, per_cpu_int_base + 89 81 ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 90 - #else 91 - writel(irqd_to_hwirq(d), 92 - per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 93 - #endif 94 82 } 95 83 96 84 #ifdef CONFIG_SMP ··· 136 144 unsigned int virq, irq_hw_number_t hw) 137 145 { 138 146 armada_370_xp_irq_mask(irq_get_irq_data(virq)); 139 - writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); 147 + if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) 148 + writel(hw, per_cpu_int_base + 149 + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 150 + else 151 + writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); 140 152 irq_set_status_flags(virq, IRQ_LEVEL); 141 153 142 - if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) { 154 + if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { 143 155 irq_set_percpu_devid(virq); 144 156 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, 145 157 handle_percpu_devid_irq);
+1 -11
arch/arm/mach-omap1/clock_data.c
··· 543 543 /* Direct from ULPD, no parent */ 544 544 .rate = 48000000, 545 545 .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), 546 - .enable_bit = USB_REQ_EN_SHIFT, 547 - }; 548 - 549 - static struct clk usb_dc_ck7xx = { 550 - .name = "usb_dc_ck", 551 - .ops = &clkops_generic, 552 - /* Direct from ULPD, no parent */ 553 - .rate = 48000000, 554 - .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), 555 546 .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT, 556 547 }; 557 548 ··· 718 727 CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), 719 728 CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), 720 729 CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), 721 - CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX), 722 - CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX), 730 + CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX), 723 731 CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), 724 732 CLK(NULL, "mclk", &mclk_16xx, CK_16XX), 725 733 CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
+20
arch/arm/mach-omap2/cclock44xx_data.c
··· 52 52 */ 53 53 #define OMAP4_DPLL_ABE_DEFFREQ 98304000 54 54 55 + /* 56 + * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section 57 + * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred 58 + * locked frequency for the USB DPLL is 960MHz. 59 + */ 60 + #define OMAP4_DPLL_USB_DEFFREQ 960000000 61 + 55 62 /* Root clocks */ 56 63 57 64 DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); ··· 1018 1011 OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, 1019 1012 hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); 1020 1013 1014 + DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0, 1015 + OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, 1016 + OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL); 1017 + 1021 1018 DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, 1022 1019 OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, 1023 1020 OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); ··· 1560 1549 CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk), 1561 1550 CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk), 1562 1551 CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk), 1552 + CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m), 1563 1553 CLK(NULL, "sha2md5_fck", &sha2md5_fck), 1564 1554 CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1), 1565 1555 CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0), ··· 1717 1705 rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ); 1718 1706 if (rc) 1719 1707 pr_err("%s: failed to configure ABE DPLL!\n", __func__); 1708 + 1709 + /* 1710 + * Lock USB DPLL on OMAP4 devices so that the L3INIT power 1711 + * domain can transition to retention state when not in use. 1712 + */ 1713 + rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ); 1714 + if (rc) 1715 + pr_err("%s: failed to configure USB DPLL!\n", __func__); 1720 1716 1721 1717 return 0; 1722 1718 }
+3
arch/arm/mach-omap2/common.h
··· 293 293 struct omap_hwmod; 294 294 extern int omap_dss_reset(struct omap_hwmod *); 295 295 296 + /* SoC specific clock initializer */ 297 + extern int (*omap_clk_init)(void); 298 + 296 299 #endif /* __ASSEMBLER__ */ 297 300 #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
+12 -6
arch/arm/mach-omap2/io.c
··· 55 55 #include "prm44xx.h" 56 56 57 57 /* 58 + * omap_clk_init: points to a function that does the SoC-specific 59 + * clock initializations 60 + */ 61 + int (*omap_clk_init)(void); 62 + 63 + /* 58 64 * The machine specific code may provide the extra mapping besides the 59 65 * default mapping provided here. 60 66 */ ··· 403 397 omap242x_clockdomains_init(); 404 398 omap2420_hwmod_init(); 405 399 omap_hwmod_init_postsetup(); 406 - omap2420_clk_init(); 400 + omap_clk_init = omap2420_clk_init; 407 401 } 408 402 409 403 void __init omap2420_init_late(void) ··· 433 427 omap243x_clockdomains_init(); 434 428 omap2430_hwmod_init(); 435 429 omap_hwmod_init_postsetup(); 436 - omap2430_clk_init(); 430 + omap_clk_init = omap2430_clk_init; 437 431 } 438 432 439 433 void __init omap2430_init_late(void) ··· 468 462 omap3xxx_clockdomains_init(); 469 463 omap3xxx_hwmod_init(); 470 464 omap_hwmod_init_postsetup(); 471 - omap3xxx_clk_init(); 465 + omap_clk_init = omap3xxx_clk_init; 472 466 } 473 467 474 468 void __init omap3430_init_early(void) ··· 506 500 omap3xxx_clockdomains_init(); 507 501 omap3xxx_hwmod_init(); 508 502 omap_hwmod_init_postsetup(); 509 - omap3xxx_clk_init(); 503 + omap_clk_init = omap3xxx_clk_init; 510 504 } 511 505 512 506 void __init omap3_init_late(void) ··· 574 568 am33xx_clockdomains_init(); 575 569 am33xx_hwmod_init(); 576 570 omap_hwmod_init_postsetup(); 577 - am33xx_clk_init(); 571 + omap_clk_init = am33xx_clk_init; 578 572 } 579 573 #endif 580 574 ··· 599 593 omap44xx_clockdomains_init(); 600 594 omap44xx_hwmod_init(); 601 595 omap_hwmod_init_postsetup(); 602 - omap4xxx_clk_init(); 596 + omap_clk_init = omap4xxx_clk_init; 603 597 } 604 598 605 599 void __init omap4430_init_late(void)
+5 -2
arch/arm/mach-omap2/omap_hwmod.c
··· 1368 1368 } 1369 1369 1370 1370 if (sf & SYSC_HAS_MIDLEMODE) { 1371 - if (oh->flags & HWMOD_SWSUP_MSTANDBY) { 1371 + if (oh->flags & HWMOD_FORCE_MSTANDBY) { 1372 + idlemode = HWMOD_IDLEMODE_FORCE; 1373 + } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) { 1372 1374 idlemode = HWMOD_IDLEMODE_NO; 1373 1375 } else { 1374 1376 if (sf & SYSC_HAS_ENAWAKEUP) ··· 1442 1440 } 1443 1441 1444 1442 if (sf & SYSC_HAS_MIDLEMODE) { 1445 - if (oh->flags & HWMOD_SWSUP_MSTANDBY) { 1443 + if ((oh->flags & HWMOD_SWSUP_MSTANDBY) || 1444 + (oh->flags & HWMOD_FORCE_MSTANDBY)) { 1446 1445 idlemode = HWMOD_IDLEMODE_FORCE; 1447 1446 } else { 1448 1447 if (sf & SYSC_HAS_ENAWAKEUP)
+7 -2
arch/arm/mach-omap2/omap_hwmod.h
··· 427 427 * 428 428 * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out 429 429 * of idle, rather than relying on module smart-idle 430 - * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out 431 - * of standby, rather than relying on module smart-standby 430 + * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and 431 + * out of standby, rather than relying on module smart-standby 432 432 * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for 433 433 * SDRAM controller, etc. XXX probably belongs outside the main hwmod file 434 434 * XXX Should be HWMOD_SETUP_NO_RESET ··· 459 459 * correctly, or this is being abused to deal with some PM latency 460 460 * issues -- but we're currently suffering from a shortage of 461 461 * folks who are able to track these issues down properly. 462 + * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device 463 + * is kept in force-standby mode. Failing to do so causes PM problems 464 + * with musb on OMAP3630 at least. Note that musb has a dedicated register 465 + * to control MSTANDBY signal when MIDLEMODE is set to force-standby. 462 466 */ 463 467 #define HWMOD_SWSUP_SIDLE (1 << 0) 464 468 #define HWMOD_SWSUP_MSTANDBY (1 << 1) ··· 475 471 #define HWMOD_16BIT_REG (1 << 8) 476 472 #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) 477 473 #define HWMOD_BLOCK_WFI (1 << 10) 474 + #define HWMOD_FORCE_MSTANDBY (1 << 11) 478 475 479 476 /* 480 477 * omap_hwmod._int_flags definitions
+6 -1
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
··· 1707 1707 * Erratum ID: i479 idle_req / idle_ack mechanism potentially 1708 1708 * broken when autoidle is enabled 1709 1709 * workaround is to disable the autoidle bit at module level. 1710 + * 1711 + * Enabling the device in any other MIDLEMODE setting but force-idle 1712 + * causes core_pwrdm not enter idle states at least on OMAP3630. 1713 + * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY 1714 + * signal when MIDLEMODE is set to force-idle. 1710 1715 */ 1711 1716 .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE 1712 - | HWMOD_SWSUP_MSTANDBY, 1717 + | HWMOD_FORCE_MSTANDBY, 1713 1718 }; 1714 1719 1715 1720 /* usb_otg_hs */
+11 -1
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
··· 2719 2719 .name = "ocp2scp_usb_phy", 2720 2720 .class = &omap44xx_ocp2scp_hwmod_class, 2721 2721 .clkdm_name = "l3_init_clkdm", 2722 - .main_clk = "func_48m_fclk", 2722 + /* 2723 + * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP 2724 + * block as an "optional clock," and normally should never be 2725 + * specified as the main_clk for an OMAP IP block. However it 2726 + * turns out that this clock is actually the main clock for 2727 + * the ocp2scp_usb_phy IP block: 2728 + * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html 2729 + * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems 2730 + * to be the best workaround. 2731 + */ 2732 + .main_clk = "ocp2scp_usb_phy_phy_48m", 2723 2733 .prcm = { 2724 2734 .omap4 = { 2725 2735 .clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET,
+4
arch/arm/mach-omap2/timer.c
··· 547 547 clksrc_nr, clksrc_src) \ 548 548 void __init omap##name##_gptimer_timer_init(void) \ 549 549 { \ 550 + if (omap_clk_init) \ 551 + omap_clk_init(); \ 550 552 omap_dmtimer_init(); \ 551 553 omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ 552 554 omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ ··· 558 556 clksrc_nr, clksrc_src) \ 559 557 void __init omap##name##_sync32k_timer_init(void) \ 560 558 { \ 559 + if (omap_clk_init) \ 560 + omap_clk_init(); \ 561 561 omap_dmtimer_init(); \ 562 562 omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ 563 563 /* Enable the use of clocksource="gp_timer" kernel parameter */ \
+1 -3
arch/arm/mach-s3c24xx/include/mach/irqs.h
··· 188 188 189 189 #if defined(CONFIG_CPU_S3C2416) 190 190 #define NR_IRQS (IRQ_S3C2416_I2S1 + 1) 191 - #elif defined(CONFIG_CPU_S3C2443) 192 - #define NR_IRQS (IRQ_S3C2443_AC97+1) 193 191 #else 194 - #define NR_IRQS (IRQ_S3C2440_AC97+1) 192 + #define NR_IRQS (IRQ_S3C2443_AC97 + 1) 195 193 #endif 196 194 197 195 /* compatibility define. */
+1 -1
arch/arm/mach-s3c24xx/irq.c
··· 499 499 base = (void *)0xfd000000; 500 500 501 501 intc->reg_mask = base + 0xa4; 502 - intc->reg_pending = base + 0x08; 502 + intc->reg_pending = base + 0xa8; 503 503 irq_num = 20; 504 504 irq_start = S3C2410_IRQ(32); 505 505 irq_offset = 4;
-1
arch/arm/mach-ux500/board-mop500-sdi.c
··· 81 81 #endif 82 82 83 83 struct mmci_platform_data mop500_sdi0_data = { 84 - .ios_handler = mop500_sdi0_ios_handler, 85 84 .ocr_mask = MMC_VDD_29_30, 86 85 .f_max = 50000000, 87 86 .capabilities = MMC_CAP_4_BIT_DATA |
+12
arch/arm/mach-ux500/board-mop500.c
··· 12 12 #include <linux/init.h> 13 13 #include <linux/interrupt.h> 14 14 #include <linux/platform_device.h> 15 + #include <linux/clk.h> 15 16 #include <linux/io.h> 16 17 #include <linux/i2c.h> 17 18 #include <linux/platform_data/i2c-nomadik.h> ··· 440 439 regulator_put(prox_regulator); 441 440 } 442 441 442 + void mop500_snowball_ethernet_clock_enable(void) 443 + { 444 + struct clk *clk; 445 + 446 + clk = clk_get_sys("fsmc", NULL); 447 + if (!IS_ERR(clk)) 448 + clk_prepare_enable(clk); 449 + } 450 + 443 451 static struct cryp_platform_data u8500_cryp1_platform_data = { 444 452 .mem_to_engine = { 445 453 .dir = STEDMA40_MEM_TO_PERIPH, ··· 692 682 mop500_spi_init(parent); 693 683 mop500_audio_init(parent); 694 684 mop500_uart_init(parent); 685 + 686 + mop500_snowball_ethernet_clock_enable(); 695 687 696 688 /* This board has full regulator constraints */ 697 689 regulator_has_full_constraints();
+1
arch/arm/mach-ux500/board-mop500.h
··· 104 104 void __init snowball_pinmaps_init(void); 105 105 void __init hrefv60_pinmaps_init(void); 106 106 void mop500_audio_init(struct device *parent); 107 + void mop500_snowball_ethernet_clock_enable(void); 107 108 108 109 int __init mop500_uib_init(void); 109 110 void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
+3 -2
arch/arm/mach-ux500/cpu-db8500.c
··· 312 312 /* Pinmaps must be in place before devices register */ 313 313 if (of_machine_is_compatible("st-ericsson,mop500")) 314 314 mop500_pinmaps_init(); 315 - else if (of_machine_is_compatible("calaosystems,snowball-a9500")) 315 + else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { 316 316 snowball_pinmaps_init(); 317 - else if (of_machine_is_compatible("st-ericsson,hrefv60+")) 317 + mop500_snowball_ethernet_clock_enable(); 318 + } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) 318 319 hrefv60_pinmaps_init(); 319 320 else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} 320 321 /* TODO: Add pinmaps for ccu9540 board. */
+4 -7
arch/arm/mm/cache-l2x0.c
··· 299 299 int lockregs; 300 300 int i; 301 301 302 - switch (cache_id) { 302 + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 303 303 case L2X0_CACHE_ID_PART_L310: 304 304 lockregs = 8; 305 305 break; ··· 333 333 if (cache_id_part_number_from_dt) 334 334 cache_id = cache_id_part_number_from_dt; 335 335 else 336 - cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) 337 - & L2X0_CACHE_ID_PART_MASK; 336 + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 338 337 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 339 338 340 339 aux &= aux_mask; 341 340 aux |= aux_val; 342 341 343 342 /* Determine the number of ways */ 344 - switch (cache_id) { 343 + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 345 344 case L2X0_CACHE_ID_PART_L310: 346 345 if (aux & (1 << 16)) 347 346 ways = 16; ··· 724 725 .flush_all = l2x0_flush_all, 725 726 .inv_all = l2x0_inv_all, 726 727 .disable = l2x0_disable, 727 - .set_debug = pl310_set_debug, 728 728 }, 729 729 }; 730 730 ··· 812 814 data->save(); 813 815 814 816 of_init = true; 815 - l2x0_init(l2x0_base, aux_val, aux_mask); 816 - 817 817 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); 818 + l2x0_init(l2x0_base, aux_val, aux_mask); 818 819 819 820 return 0; 820 821 }
+2 -1
arch/arm/mm/context.c
··· 48 48 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); 49 49 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); 50 50 51 - static DEFINE_PER_CPU(atomic64_t, active_asids); 51 + DEFINE_PER_CPU(atomic64_t, active_asids); 52 52 static DEFINE_PER_CPU(u64, reserved_asids); 53 53 static cpumask_t tlb_flush_pending; 54 54 ··· 215 215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { 216 216 local_flush_bp_all(); 217 217 local_flush_tlb_all(); 218 + dummy_flush_tlb_a15_erratum(); 218 219 } 219 220 220 221 atomic64_set(&per_cpu(active_asids, cpu), asid);
+48 -27
arch/arm/mm/mmu.c
··· 598 598 } while (pte++, addr += PAGE_SIZE, addr != end); 599 599 } 600 600 601 - static void __init alloc_init_section(pud_t *pud, unsigned long addr, 601 + static void __init map_init_section(pmd_t *pmd, unsigned long addr, 602 + unsigned long end, phys_addr_t phys, 603 + const struct mem_type *type) 604 + { 605 + #ifndef CONFIG_ARM_LPAE 606 + /* 607 + * In classic MMU format, puds and pmds are folded in to 608 + * the pgds. pmd_offset gives the PGD entry. PGDs refer to a 609 + * group of L1 entries making up one logical pointer to 610 + * an L2 table (2MB), where as PMDs refer to the individual 611 + * L1 entries (1MB). Hence increment to get the correct 612 + * offset for odd 1MB sections. 613 + * (See arch/arm/include/asm/pgtable-2level.h) 614 + */ 615 + if (addr & SECTION_SIZE) 616 + pmd++; 617 + #endif 618 + do { 619 + *pmd = __pmd(phys | type->prot_sect); 620 + phys += SECTION_SIZE; 621 + } while (pmd++, addr += SECTION_SIZE, addr != end); 622 + 623 + flush_pmd_entry(pmd); 624 + } 625 + 626 + static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, 602 627 unsigned long end, phys_addr_t phys, 603 628 const struct mem_type *type) 604 629 { 605 630 pmd_t *pmd = pmd_offset(pud, addr); 631 + unsigned long next; 606 632 607 - /* 608 - * Try a section mapping - end, addr and phys must all be aligned 609 - * to a section boundary. Note that PMDs refer to the individual 610 - * L1 entries, whereas PGDs refer to a group of L1 entries making 611 - * up one logical pointer to an L2 table. 612 - */ 613 - if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { 614 - pmd_t *p = pmd; 615 - 616 - #ifndef CONFIG_ARM_LPAE 617 - if (addr & SECTION_SIZE) 618 - pmd++; 619 - #endif 620 - 621 - do { 622 - *pmd = __pmd(phys | type->prot_sect); 623 - phys += SECTION_SIZE; 624 - } while (pmd++, addr += SECTION_SIZE, addr != end); 625 - 626 - flush_pmd_entry(p); 627 - } else { 633 + do { 628 634 /* 629 - * No need to loop; pte's aren't interested in the 630 - * individual L1 entries. 635 + * With LPAE, we must loop over to map 636 + * all the pmds for the given range. 631 637 */ 632 - alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); 633 - } 638 + next = pmd_addr_end(addr, end); 639 + 640 + /* 641 + * Try a section mapping - addr, next and phys must all be 642 + * aligned to a section boundary. 643 + */ 644 + if (type->prot_sect && 645 + ((addr | next | phys) & ~SECTION_MASK) == 0) { 646 + map_init_section(pmd, addr, next, phys, type); 647 + } else { 648 + alloc_init_pte(pmd, addr, next, 649 + __phys_to_pfn(phys), type); 650 + } 651 + 652 + phys += next - addr; 653 + 654 + } while (pmd++, addr = next, addr != end); 634 655 } 635 656 636 657 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, ··· 662 641 663 642 do { 664 643 next = pud_addr_end(addr, end); 665 - alloc_init_section(pud, addr, next, phys, type); 644 + alloc_init_pmd(pud, addr, next, phys, type); 666 645 phys += next - addr; 667 646 } while (pud++, addr = next, addr != end); 668 647 }
+17 -2
arch/arm/mm/proc-v7.S
··· 420 420 __v7_ca7mp_proc_info: 421 421 .long 0x410fc070 422 422 .long 0xff0ffff0 423 - __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV 423 + __v7_proc __v7_ca7mp_setup 424 424 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info 425 425 426 426 /* ··· 430 430 __v7_ca15mp_proc_info: 431 431 .long 0x410fc0f0 432 432 .long 0xff0ffff0 433 - __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV 433 + __v7_proc __v7_ca15mp_setup 434 434 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 435 + 436 + /* 437 + * Qualcomm Inc. Krait processors. 438 + */ 439 + .type __krait_proc_info, #object 440 + __krait_proc_info: 441 + .long 0x510f0400 @ Required ID value 442 + .long 0xff0ffc00 @ Mask for ID 443 + /* 444 + * Some Krait processors don't indicate support for SDIV and UDIV 445 + * instructions in the ARM instruction set, even though they actually 446 + * do support them. 447 + */ 448 + __v7_proc __v7_setup, hwcaps = HWCAP_IDIV 449 + .size __krait_proc_info, . - __krait_proc_info 435 450 436 451 /* 437 452 * Match any ARMv7 processor core.
+3 -4
arch/mips/Kconfig
··· 18 18 select HAVE_KRETPROBES 19 19 select HAVE_DEBUG_KMEMLEAK 20 20 select ARCH_BINFMT_ELF_RANDOMIZE_PIE 21 - select HAVE_ARCH_TRANSPARENT_HUGEPAGE 21 + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT 22 22 select RTC_LIB if !MACH_LOONGSON 23 23 select GENERIC_ATOMIC64 if !64BIT 24 24 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE ··· 657 657 bool "SNI RM200/300/400" 658 658 select FW_ARC if CPU_LITTLE_ENDIAN 659 659 select FW_ARC32 if CPU_LITTLE_ENDIAN 660 - select SNIPROM if CPU_BIG_ENDIAN 660 + select FW_SNIPROM if CPU_BIG_ENDIAN 661 661 select ARCH_MAY_HAVE_PC_FDC 662 662 select BOOT_ELF32 663 663 select CEVT_R4K ··· 1144 1144 config FW_ARC32 1145 1145 bool 1146 1146 1147 - config SNIPROM 1147 + config FW_SNIPROM 1148 1148 bool 1149 1149 1150 1150 config BOOT_ELF32 ··· 1493 1493 select CPU_SUPPORTS_32BIT_KERNEL 1494 1494 select CPU_SUPPORTS_64BIT_KERNEL 1495 1495 select CPU_SUPPORTS_HIGHMEM 1496 - select CPU_HAS_LLSC 1497 1496 select WEAK_ORDERING 1498 1497 select WEAK_REORDERING_BEYOND_LLSC 1499 1498 select CPU_HAS_PREFETCH
+1 -4
arch/mips/bcm63xx/boards/board_bcm963xx.c
··· 745 745 strcpy(cfe_version, "unknown"); 746 746 printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); 747 747 748 - if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { 749 - printk(KERN_ERR PFX "invalid nvram checksum\n"); 750 - return; 751 - } 748 + bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET); 752 749 753 750 board_name = bcm63xx_nvram_get_name(); 754 751 /* find board by name */
+3 -4
arch/mips/bcm63xx/nvram.c
··· 38 38 static struct bcm963xx_nvram nvram; 39 39 static int mac_addr_used; 40 40 41 - int __init bcm63xx_nvram_init(void *addr) 41 + void __init bcm63xx_nvram_init(void *addr) 42 42 { 43 43 unsigned int check_len; 44 44 u32 crc, expected_crc; ··· 60 60 crc = crc32_le(~0, (u8 *)&nvram, check_len); 61 61 62 62 if (crc != expected_crc) 63 - return -EINVAL; 64 - 65 - return 0; 63 + pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", 64 + expected_crc, crc); 66 65 } 67 66 68 67 u8 *bcm63xx_nvram_get_name(void)
+1 -1
arch/mips/bcm63xx/setup.c
··· 157 157 return board_register_devices(); 158 158 } 159 159 160 - device_initcall(bcm63xx_register_devices); 160 + arch_initcall(bcm63xx_register_devices);
+4 -1
arch/mips/cavium-octeon/setup.c
··· 174 174 175 175 static void octeon_generic_shutdown(void) 176 176 { 177 - int cpu, i; 177 + int i; 178 + #ifdef CONFIG_SMP 179 + int cpu; 180 + #endif 178 181 struct cvmx_bootmem_desc *bootmem_desc; 179 182 void *named_block_array_ptr; 180 183
+1 -3
arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
··· 9 9 * 10 10 * Initialized the local nvram copy from the target address and checks 11 11 * its checksum. 12 - * 13 - * Returns 0 on success. 14 12 */ 15 - int __init bcm63xx_nvram_init(void *nvram); 13 + void bcm63xx_nvram_init(void *nvram); 16 14 17 15 /** 18 16 * bcm63xx_nvram_get_name() - returns the board name according to nvram
-4
arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
··· 28 28 /* #define cpu_has_prefetch ? */ 29 29 #define cpu_has_mcheck 1 30 30 /* #define cpu_has_ejtag ? */ 31 - #ifdef CONFIG_CPU_HAS_LLSC 32 31 #define cpu_has_llsc 1 33 - #else 34 - #define cpu_has_llsc 0 35 - #endif 36 32 /* #define cpu_has_vtag_icache ? */ 37 33 /* #define cpu_has_dc_aliases ? */ 38 34 /* #define cpu_has_ic_fills_f_dc ? */
+187 -16
arch/mips/include/asm/mipsregs.h
··· 1166 1166 unsigned int __dspctl; \ 1167 1167 \ 1168 1168 __asm__ __volatile__( \ 1169 + " .set push \n" \ 1170 + " .set dsp \n" \ 1169 1171 " rddsp %0, %x1 \n" \ 1172 + " .set pop \n" \ 1170 1173 : "=r" (__dspctl) \ 1171 1174 : "i" (mask)); \ 1172 1175 __dspctl; \ ··· 1178 1175 #define wrdsp(val, mask) \ 1179 1176 do { \ 1180 1177 __asm__ __volatile__( \ 1178 + " .set push \n" \ 1179 + " .set dsp \n" \ 1181 1180 " wrdsp %0, %x1 \n" \ 1181 + " .set pop \n" \ 1182 1182 : \ 1183 1183 : "r" (val), "i" (mask)); \ 1184 1184 } while (0) 1185 1185 1186 - #define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) 1187 - #define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) 1188 - #define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) 1189 - #define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) 1186 + #define mflo0() \ 1187 + ({ \ 1188 + long mflo0; \ 1189 + __asm__( \ 1190 + " .set push \n" \ 1191 + " .set dsp \n" \ 1192 + " mflo %0, $ac0 \n" \ 1193 + " .set pop \n" \ 1194 + : "=r" (mflo0)); \ 1195 + mflo0; \ 1196 + }) 1190 1197 1191 - #define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) 1192 - #define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) 1193 - #define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) 1194 - #define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) 1198 + #define mflo1() \ 1199 + ({ \ 1200 + long mflo1; \ 1201 + __asm__( \ 1202 + " .set push \n" \ 1203 + " .set dsp \n" \ 1204 + " mflo %0, $ac1 \n" \ 1205 + " .set pop \n" \ 1206 + : "=r" (mflo1)); \ 1207 + mflo1; \ 1208 + }) 1195 1209 1196 - #define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) 1197 - #define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) 1198 - #define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) 1199 - #define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) 1210 + #define mflo2() \ 1211 + ({ \ 1212 + long mflo2; \ 1213 + __asm__( \ 1214 + " .set push \n" \ 1215 + " .set dsp \n" \ 1216 + " mflo %0, $ac2 \n" \ 1217 + " .set pop \n" \ 1218 + : "=r" (mflo2)); \ 1219 + mflo2; \ 1220 + }) 1200 1221 1201 - #define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) 1202 - #define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) 1203 - #define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) 1204 - #define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) 1222 + #define mflo3() \ 1223 + ({ \ 1224 + long mflo3; \ 1225 + __asm__( \ 1226 + " .set push \n" \ 1227 + " .set dsp \n" \ 1228 + " mflo %0, $ac3 \n" \ 1229 + " .set pop \n" \ 1230 + : "=r" (mflo3)); \ 1231 + mflo3; \ 1232 + }) 1233 + 1234 + #define mfhi0() \ 1235 + ({ \ 1236 + long mfhi0; \ 1237 + __asm__( \ 1238 + " .set push \n" \ 1239 + " .set dsp \n" \ 1240 + " mfhi %0, $ac0 \n" \ 1241 + " .set pop \n" \ 1242 + : "=r" (mfhi0)); \ 1243 + mfhi0; \ 1244 + }) 1245 + 1246 + #define mfhi1() \ 1247 + ({ \ 1248 + long mfhi1; \ 1249 + __asm__( \ 1250 + " .set push \n" \ 1251 + " .set dsp \n" \ 1252 + " mfhi %0, $ac1 \n" \ 1253 + " .set pop \n" \ 1254 + : "=r" (mfhi1)); \ 1255 + mfhi1; \ 1256 + }) 1257 + 1258 + #define mfhi2() \ 1259 + ({ \ 1260 + long mfhi2; \ 1261 + __asm__( \ 1262 + " .set push \n" \ 1263 + " .set dsp \n" \ 1264 + " mfhi %0, $ac2 \n" \ 1265 + " .set pop \n" \ 1266 + : "=r" (mfhi2)); \ 1267 + mfhi2; \ 1268 + }) 1269 + 1270 + #define mfhi3() \ 1271 + ({ \ 1272 + long mfhi3; \ 1273 + __asm__( \ 1274 + " .set push \n" \ 1275 + " .set dsp \n" \ 1276 + " mfhi %0, $ac3 \n" \ 1277 + " .set pop \n" \ 1278 + : "=r" (mfhi3)); \ 1279 + mfhi3; \ 1280 + }) 1281 + 1282 + 1283 + #define mtlo0(x) \ 1284 + ({ \ 1285 + __asm__( \ 1286 + " .set push \n" \ 1287 + " .set dsp \n" \ 1288 + " mtlo %0, $ac0 \n" \ 1289 + " .set pop \n" \ 1290 + : \ 1291 + : "r" (x)); \ 1292 + }) 1293 + 1294 + #define mtlo1(x) \ 1295 + ({ \ 1296 + __asm__( \ 1297 + " .set push \n" \ 1298 + " .set dsp \n" \ 1299 + " mtlo %0, $ac1 \n" \ 1300 + " .set pop \n" \ 1301 + : \ 1302 + : "r" (x)); \ 1303 + }) 1304 + 1305 + #define mtlo2(x) \ 1306 + ({ \ 1307 + __asm__( \ 1308 + " .set push \n" \ 1309 + " .set dsp \n" \ 1310 + " mtlo %0, $ac2 \n" \ 1311 + " .set pop \n" \ 1312 + : \ 1313 + : "r" (x)); \ 1314 + }) 1315 + 1316 + #define mtlo3(x) \ 1317 + ({ \ 1318 + __asm__( \ 1319 + " .set push \n" \ 1320 + " .set dsp \n" \ 1321 + " mtlo %0, $ac3 \n" \ 1322 + " .set pop \n" \ 1323 + : \ 1324 + : "r" (x)); \ 1325 + }) 1326 + 1327 + #define mthi0(x) \ 1328 + ({ \ 1329 + __asm__( \ 1330 + " .set push \n" \ 1331 + " .set dsp \n" \ 1332 + " mthi %0, $ac0 \n" \ 1333 + " .set pop \n" \ 1334 + : \ 1335 + : "r" (x)); \ 1336 + }) 1337 + 1338 + #define mthi1(x) \ 1339 + ({ \ 1340 + __asm__( \ 1341 + " .set push \n" \ 1342 + " .set dsp \n" \ 1343 + " mthi %0, $ac1 \n" \ 1344 + " .set pop \n" \ 1345 + : \ 1346 + : "r" (x)); \ 1347 + }) 1348 + 1349 + #define mthi2(x) \ 1350 + ({ \ 1351 + __asm__( \ 1352 + " .set push \n" \ 1353 + " .set dsp \n" \ 1354 + " mthi %0, $ac2 \n" \ 1355 + " .set pop \n" \ 1356 + : \ 1357 + : "r" (x)); \ 1358 + }) 1359 + 1360 + #define mthi3(x) \ 1361 + ({ \ 1362 + __asm__( \ 1363 + " .set push \n" \ 1364 + " .set dsp \n" \ 1365 + " mthi %0, $ac3 \n" \ 1366 + " .set pop \n" \ 1367 + : \ 1368 + : "r" (x)); \ 1369 + }) 1205 1370 1206 1371 #else 1207 1372
+1 -1
arch/mips/include/asm/signal.h
··· 21 21 #include <asm/sigcontext.h> 22 22 #include <asm/siginfo.h> 23 23 24 - #define __ARCH_HAS_ODD_SIGACTION 24 + #define __ARCH_HAS_IRIX_SIGACTION 25 25 26 26 #endif /* _ASM_SIGNAL_H */
+6 -2
arch/mips/include/uapi/asm/signal.h
··· 72 72 * 73 73 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single 74 74 * Unix names RESETHAND and NODEFER respectively. 75 + * 76 + * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever 77 + * supported its use and no libc was using it, so the entire sa-restorer 78 + * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48 79 + * retaining only the SA_RESTORER definition as a reminder to avoid 80 + * accidental reuse of the mask bit. 75 81 */ 76 82 #define SA_ONSTACK 0x08000000 77 83 #define SA_RESETHAND 0x80000000 ··· 89 83 90 84 #define SA_NOMASK SA_NODEFER 91 85 #define SA_ONESHOT SA_RESETHAND 92 - 93 - #define SA_RESTORER 0x04000000 /* Only for o32 */ 94 86 95 87 #define MINSIGSTKSZ 2048 96 88 #define SIGSTKSZ 8192
+6 -19
arch/mips/kernel/Makefile
··· 100 100 obj-$(CONFIG_JUMP_LABEL) += jump_label.o 101 101 102 102 # 103 - # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe 104 - # to enable DSP assembler support here even if the MIPS Release 2 CPU we 105 - # are targetting does not support DSP because all code-paths making use of 106 - # it properly check that the running CPU *actually does* support these 107 - # instructions. 103 + # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not 104 + # safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches 105 + # here because the compiler may use DSP ASE instructions (such as lwx) in 106 + # code paths where we cannot check that the CPU we are running on supports it. 107 + # Proper abstraction using HAVE_AS_DSP and macros is done in 108 + # arch/mips/include/asm/mipsregs.h. 108 109 # 109 110 ifeq ($(CONFIG_CPU_MIPSR2), y) 110 111 CFLAGS_DSP = -DHAVE_AS_DSP 111 - 112 - # 113 - # Check if assembler supports DSP ASE 114 - # 115 - ifeq ($(call cc-option-yn,-mdsp), y) 116 - CFLAGS_DSP += -mdsp 117 - endif 118 - 119 - # 120 - # Check if assembler supports DSP ASE Rev2 121 - # 122 - ifeq ($(call cc-option-yn,-mdspr2), y) 123 - CFLAGS_DSP += -mdspr2 124 - endif 125 112 126 113 CFLAGS_signal.o = $(CFLAGS_DSP) 127 114 CFLAGS_signal32.o = $(CFLAGS_DSP)
+6 -7
arch/mips/kernel/cpu-probe.c
··· 580 580 c->tlbsize = 48; 581 581 break; 582 582 case PRID_IMP_VR41XX: 583 + set_isa(c, MIPS_CPU_ISA_III); 584 + c->options = R4K_OPTS; 585 + c->tlbsize = 32; 583 586 switch (c->processor_id & 0xf0) { 584 587 case PRID_REV_VR4111: 585 588 c->cputype = CPU_VR4111; ··· 607 604 __cpu_name[cpu] = "NEC VR4131"; 608 605 } else { 609 606 c->cputype = CPU_VR4133; 607 + c->options |= MIPS_CPU_LLSC; 610 608 __cpu_name[cpu] = "NEC VR4133"; 611 609 } 612 610 break; ··· 617 613 __cpu_name[cpu] = "NEC Vr41xx"; 618 614 break; 619 615 } 620 - set_isa(c, MIPS_CPU_ISA_III); 621 - c->options = R4K_OPTS; 622 - c->tlbsize = 32; 623 616 break; 624 617 case PRID_IMP_R4300: 625 618 c->cputype = CPU_R4300; ··· 1227 1226 if (c->options & MIPS_CPU_FPU) { 1228 1227 c->fpu_id = cpu_get_fpu_id(); 1229 1228 1230 - if (c->isa_level == MIPS_CPU_ISA_M32R1 || 1231 - c->isa_level == MIPS_CPU_ISA_M32R2 || 1232 - c->isa_level == MIPS_CPU_ISA_M64R1 || 1233 - c->isa_level == MIPS_CPU_ISA_M64R2) { 1229 + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1230 + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1234 1231 if (c->fpu_id & MIPS_FPIR_3D) 1235 1232 c->ases |= MIPS_ASE_MIPS3D; 1236 1233 }
+1 -1
arch/mips/kernel/linux32.c
··· 171 171 err = compat_sys_shmctl(first, second, compat_ptr(ptr)); 172 172 break; 173 173 default: 174 - err = -EINVAL; 174 + err = -ENOSYS; 175 175 break; 176 176 } 177 177
+7 -4
arch/mips/kernel/mcount.S
··· 46 46 PTR_L a5, PT_R9(sp) 47 47 PTR_L a6, PT_R10(sp) 48 48 PTR_L a7, PT_R11(sp) 49 - #else 50 - PTR_ADDIU sp, PT_SIZE 51 49 #endif 52 - .endm 50 + PTR_ADDIU sp, PT_SIZE 51 + .endm 53 52 54 53 .macro RETURN_BACK 55 54 jr ra ··· 67 68 .globl _mcount 68 69 _mcount: 69 70 b ftrace_stub 70 - addiu sp,sp,8 71 + #ifdef CONFIG_32BIT 72 + addiu sp,sp,8 73 + #else 74 + nop 75 + #endif 71 76 72 77 /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ 73 78 lw t1, function_trace_stop
+1 -1
arch/mips/kernel/proc.c
··· 67 67 if (cpu_has_mips_r) { 68 68 seq_printf(m, "isa\t\t\t:"); 69 69 if (cpu_has_mips_1) 70 - seq_printf(m, "%s", "mips1"); 70 + seq_printf(m, "%s", " mips1"); 71 71 if (cpu_has_mips_2) 72 72 seq_printf(m, "%s", " mips2"); 73 73 if (cpu_has_mips_3)
+1 -1
arch/mips/kernel/traps.c
··· 1571 1571 #ifdef CONFIG_64BIT 1572 1572 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 1573 1573 #endif 1574 - if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) 1574 + if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) 1575 1575 status_set |= ST0_XX; 1576 1576 if (cpu_has_dsp) 1577 1577 status_set |= ST0_MX;
+8 -8
arch/mips/lib/bitops.c
··· 90 90 unsigned bit = nr & SZLONG_MASK; 91 91 unsigned long mask; 92 92 unsigned long flags; 93 - unsigned long res; 93 + int res; 94 94 95 95 a += nr >> SZLONG_LOG; 96 96 mask = 1UL << bit; 97 97 raw_local_irq_save(flags); 98 - res = (mask & *a); 98 + res = (mask & *a) != 0; 99 99 *a |= mask; 100 100 raw_local_irq_restore(flags); 101 101 return res; ··· 116 116 unsigned bit = nr & SZLONG_MASK; 117 117 unsigned long mask; 118 118 unsigned long flags; 119 - unsigned long res; 119 + int res; 120 120 121 121 a += nr >> SZLONG_LOG; 122 122 mask = 1UL << bit; 123 123 raw_local_irq_save(flags); 124 - res = (mask & *a); 124 + res = (mask & *a) != 0; 125 125 *a |= mask; 126 126 raw_local_irq_restore(flags); 127 127 return res; ··· 141 141 unsigned bit = nr & SZLONG_MASK; 142 142 unsigned long mask; 143 143 unsigned long flags; 144 - unsigned long res; 144 + int res; 145 145 146 146 a += nr >> SZLONG_LOG; 147 147 mask = 1UL << bit; 148 148 raw_local_irq_save(flags); 149 - res = (mask & *a); 149 + res = (mask & *a) != 0; 150 150 *a &= ~mask; 151 151 raw_local_irq_restore(flags); 152 152 return res; ··· 166 166 unsigned bit = nr & SZLONG_MASK; 167 167 unsigned long mask; 168 168 unsigned long flags; 169 - unsigned long res; 169 + int res; 170 170 171 171 a += nr >> SZLONG_LOG; 172 172 mask = 1UL << bit; 173 173 raw_local_irq_save(flags); 174 - res = (mask & *a); 174 + res = (mask & *a) != 0; 175 175 *a ^= mask; 176 176 raw_local_irq_restore(flags); 177 177 return res;
+2 -2
arch/mips/lib/csum_partial.S
··· 270 270 #endif 271 271 272 272 /* odd buffer alignment? */ 273 - #ifdef CPU_MIPSR2 273 + #ifdef CONFIG_CPU_MIPSR2 274 274 wsbh v1, sum 275 275 movn sum, v1, t7 276 276 #else ··· 670 670 addu sum, v1 671 671 #endif 672 672 673 - #ifdef CPU_MIPSR2 673 + #ifdef CONFIG_CPU_MIPSR2 674 674 wsbh v1, sum 675 675 movn sum, v1, odd 676 676 #else
+2 -4
arch/mips/mm/c-r4k.c
··· 1247 1247 return; 1248 1248 1249 1249 default: 1250 - if (c->isa_level == MIPS_CPU_ISA_M32R1 || 1251 - c->isa_level == MIPS_CPU_ISA_M32R2 || 1252 - c->isa_level == MIPS_CPU_ISA_M64R1 || 1253 - c->isa_level == MIPS_CPU_ISA_M64R2) { 1250 + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1251 + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1254 1252 #ifdef CONFIG_MIPS_CPU_SCACHE 1255 1253 if (mips_sc_init ()) { 1256 1254 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
+2 -4
arch/mips/mm/sc-mips.c
··· 98 98 c->scache.flags |= MIPS_CACHE_NOT_PRESENT; 99 99 100 100 /* Ignore anything but MIPSxx processors */ 101 - if (c->isa_level != MIPS_CPU_ISA_M32R1 && 102 - c->isa_level != MIPS_CPU_ISA_M32R2 && 103 - c->isa_level != MIPS_CPU_ISA_M64R1 && 104 - c->isa_level != MIPS_CPU_ISA_M64R2) 101 + if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 102 + MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) 105 103 return 0; 106 104 107 105 /* Does this MIPS32/MIPS64 CPU have a config2 register? */
+2 -2
arch/mips/pci/pci-alchemy.c
··· 19 19 #include <asm/mach-au1x00/au1000.h> 20 20 #include <asm/tlbmisc.h> 21 21 22 - #ifdef CONFIG_DEBUG_PCI 22 + #ifdef CONFIG_PCI_DEBUG 23 23 #define DBG(x...) printk(KERN_DEBUG x) 24 24 #else 25 25 #define DBG(x...) do {} while (0) ··· 162 162 if (status & (1 << 29)) { 163 163 *data = 0xffffffff; 164 164 error = -1; 165 - DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", 165 + DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n", 166 166 access_type, bus->number, device); 167 167 } else if ((status >> 28) & 0xf) { 168 168 DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
+3 -1
arch/s390/include/asm/pgtable.h
··· 344 344 #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ 345 345 346 346 /* Bits in the segment table entry */ 347 + #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 347 348 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 348 349 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 349 350 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ ··· 1532 1531 /* 1533 1532 * No page table caches to initialise 1534 1533 */ 1535 - #define pgtable_cache_init() do { } while (0) 1534 + static inline void pgtable_cache_init(void) { } 1535 + static inline void check_pgt_cache(void) { } 1536 1536 1537 1537 #include <asm-generic/pgtable.h> 1538 1538
+57 -30
arch/s390/lib/uaccess_pt.c
··· 77 77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address 78 78 * contains the (negative) exception code. 79 79 */ 80 - static __always_inline unsigned long follow_table(struct mm_struct *mm, 81 - unsigned long addr, int write) 80 + #ifdef CONFIG_64BIT 81 + static unsigned long follow_table(struct mm_struct *mm, 82 + unsigned long address, int write) 82 83 { 83 - pgd_t *pgd; 84 - pud_t *pud; 85 - pmd_t *pmd; 86 - pte_t *ptep; 84 + unsigned long *table = (unsigned long *)__pa(mm->pgd); 87 85 88 - pgd = pgd_offset(mm, addr); 89 - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 90 - return -0x3aUL; 91 - 92 - pud = pud_offset(pgd, addr); 93 - if (pud_none(*pud) || unlikely(pud_bad(*pud))) 94 - return -0x3bUL; 95 - 96 - pmd = pmd_offset(pud, addr); 97 - if (pmd_none(*pmd)) 98 - return -0x10UL; 99 - if (pmd_large(*pmd)) { 100 - if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) 101 - return -0x04UL; 102 - return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); 86 + switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { 87 + case _ASCE_TYPE_REGION1: 88 + table = table + ((address >> 53) & 0x7ff); 89 + if (unlikely(*table & _REGION_ENTRY_INV)) 90 + return -0x39UL; 91 + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 92 + case _ASCE_TYPE_REGION2: 93 + table = table + ((address >> 42) & 0x7ff); 94 + if (unlikely(*table & _REGION_ENTRY_INV)) 95 + return -0x3aUL; 96 + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 97 + case _ASCE_TYPE_REGION3: 98 + table = table + ((address >> 31) & 0x7ff); 99 + if (unlikely(*table & _REGION_ENTRY_INV)) 100 + return -0x3bUL; 101 + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 102 + case _ASCE_TYPE_SEGMENT: 103 + table = table + ((address >> 20) & 0x7ff); 104 + if (unlikely(*table & _SEGMENT_ENTRY_INV)) 105 + return -0x10UL; 106 + if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { 107 + if (write && (*table & _SEGMENT_ENTRY_RO)) 108 + return -0x04UL; 109 + return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + 110 + (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); 111 + } 112 + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 103 113 } 104 - if (unlikely(pmd_bad(*pmd))) 105 - return -0x10UL; 106 - 107 - ptep = pte_offset_map(pmd, addr); 108 - if (!pte_present(*ptep)) 114 + table = table + ((address >> 12) & 0xff); 115 + if (unlikely(*table & _PAGE_INVALID)) 109 116 return -0x11UL; 110 - if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) 117 + if (write && (*table & _PAGE_RO)) 111 118 return -0x04UL; 112 - 113 - return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); 119 + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); 114 120 } 121 + 122 + #else /* CONFIG_64BIT */ 123 + 124 + static unsigned long follow_table(struct mm_struct *mm, 125 + unsigned long address, int write) 126 + { 127 + unsigned long *table = (unsigned long *)__pa(mm->pgd); 128 + 129 + table = table + ((address >> 20) & 0x7ff); 130 + if (unlikely(*table & _SEGMENT_ENTRY_INV)) 131 + return -0x10UL; 132 + table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 133 + table = table + ((address >> 12) & 0xff); 134 + if (unlikely(*table & _PAGE_INVALID)) 135 + return -0x11UL; 136 + if (write && (*table & _PAGE_RO)) 137 + return -0x04UL; 138 + return (*table & PAGE_MASK) + (address & ~PAGE_MASK); 139 + } 140 + 141 + #endif /* CONFIG_64BIT */ 115 142 116 143 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, 117 144 size_t n, int write_user) ··· 224 197 225 198 static size_t clear_user_pt(size_t n, void __user *to) 226 199 { 227 - void *zpage = &empty_zero_page; 200 + void *zpage = (void *) empty_zero_page; 228 201 long done, size, ret; 229 202 230 203 done = 0;
+12 -13
arch/tile/kernel/setup.c
··· 1004 1004 1005 1005 #ifdef CONFIG_BLK_DEV_INITRD 1006 1006 1007 - /* 1008 - * Note that the kernel can potentially support other compression 1009 - * techniques than gz, though we don't do so by default. If we ever 1010 - * decide to do so we can either look for other filename extensions, 1011 - * or just allow a file with this name to be compressed with an 1012 - * arbitrary compressor (somewhat counterintuitively). 1013 - */ 1014 1007 static int __initdata set_initramfs_file; 1015 - static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; 1008 + static char __initdata initramfs_file[128] = "initramfs"; 1016 1009 1017 1010 static int __init setup_initramfs_file(char *str) 1018 1011 { ··· 1019 1026 early_param("initramfs_file", setup_initramfs_file); 1020 1027 1021 1028 /* 1022 - * We look for an "initramfs.cpio.gz" file in the hvfs. 1023 - * If there is one, we allocate some memory for it and it will be 1024 - * unpacked to the initramfs. 1029 + * We look for a file called "initramfs" in the hvfs. If there is one, we 1030 + * allocate some memory for it and it will be unpacked to the initramfs. 1031 + * If it's compressed, the initd code will uncompress it first. 1025 1032 */ 1026 1033 static void __init load_hv_initrd(void) 1027 1034 { ··· 1031 1038 1032 1039 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); 1033 1040 if (fd == HV_ENOENT) { 1034 - if (set_initramfs_file) 1041 + if (set_initramfs_file) { 1035 1042 pr_warning("No such hvfs initramfs file '%s'\n", 1036 1043 initramfs_file); 1037 - return; 1044 + return; 1045 + } else { 1046 + /* Try old backwards-compatible name. */ 1047 + fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); 1048 + if (fd == HV_ENOENT) 1049 + return; 1050 + } 1038 1051 } 1039 1052 BUG_ON(fd < 0); 1040 1053 stat = hv_fs_fstat(fd);
+2 -3
arch/x86/boot/compressed/Makefile
··· 4 4 # create a compressed vmlinux image from the original vmlinux 5 5 # 6 6 7 - targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o 7 + targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo 8 8 9 9 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 10 10 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC ··· 29 29 $(obj)/piggy.o 30 30 31 31 $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone 32 - $(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone 33 32 34 33 ifeq ($(CONFIG_EFI_STUB), y) 35 34 VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o ··· 42 43 $(obj)/vmlinux.bin: vmlinux FORCE 43 44 $(call if_changed,objcopy) 44 45 45 - targets += vmlinux.bin.all vmlinux.relocs 46 + targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs 46 47 47 48 CMD_RELOCS = arch/x86/tools/relocs 48 49 quiet_cmd_relocs = RELOCS $@
+2 -2
arch/x86/include/asm/syscall.h
··· 29 29 */ 30 30 static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) 31 31 { 32 - return regs->orig_ax & __SYSCALL_MASK; 32 + return regs->orig_ax; 33 33 } 34 34 35 35 static inline void syscall_rollback(struct task_struct *task, 36 36 struct pt_regs *regs) 37 37 { 38 - regs->ax = regs->orig_ax & __SYSCALL_MASK; 38 + regs->ax = regs->orig_ax; 39 39 } 40 40 41 41 static inline long syscall_get_error(struct task_struct *task,
+1 -1
arch/x86/kvm/lapic.c
··· 1857 1857 if (!pv_eoi_enabled(vcpu)) 1858 1858 return 0; 1859 1859 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, 1860 - addr); 1860 + addr, sizeof(u8)); 1861 1861 } 1862 1862 1863 1863 void kvm_lapic_init(void)
+6 -7
arch/x86/kvm/x86.c
··· 1823 1823 return 0; 1824 1824 } 1825 1825 1826 - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) 1826 + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 1827 + sizeof(u32))) 1827 1828 return 1; 1828 1829 1829 1830 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); ··· 1953 1952 1954 1953 gpa_offset = data & ~(PAGE_MASK | 1); 1955 1954 1956 - /* Check that the address is 32-byte aligned. */ 1957 - if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) 1958 - break; 1959 - 1960 1955 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, 1961 - &vcpu->arch.pv_time, data & ~1ULL)) 1956 + &vcpu->arch.pv_time, data & ~1ULL, 1957 + sizeof(struct pvclock_vcpu_time_info))) 1962 1958 vcpu->arch.pv_time_enabled = false; 1963 1959 else 1964 1960 vcpu->arch.pv_time_enabled = true; ··· 1975 1977 return 1; 1976 1978 1977 1979 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, 1978 - data & KVM_STEAL_VALID_BITS)) 1980 + data & KVM_STEAL_VALID_BITS, 1981 + sizeof(struct kvm_steal_time))) 1979 1982 return 1; 1980 1983 1981 1984 vcpu->arch.st.msr_val = data;
+1 -1
drivers/acpi/Kconfig
··· 396 396 397 397 config ACPI_BGRT 398 398 bool "Boottime Graphics Resource Table support" 399 - depends on EFI 399 + depends on EFI && X86 400 400 help 401 401 This driver adds support for exposing the ACPI Boottime Graphics 402 402 Resource Table, which allows the operating system to obtain
+1 -1
drivers/acpi/acpi_i2c.c
··· 90 90 acpi_handle handle; 91 91 acpi_status status; 92 92 93 - handle = ACPI_HANDLE(&adapter->dev); 93 + handle = ACPI_HANDLE(adapter->dev.parent); 94 94 if (!handle) 95 95 return; 96 96
+37 -39
drivers/acpi/pci_root.c
··· 415 415 struct acpi_pci_root *root; 416 416 struct acpi_pci_driver *driver; 417 417 u32 flags, base_flags; 418 - bool is_osc_granted = false; 419 418 420 419 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); 421 420 if (!root) ··· 475 476 flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; 476 477 acpi_pci_osc_support(root, flags); 477 478 479 + /* 480 + * TBD: Need PCI interface for enumeration/configuration of roots. 481 + */ 482 + 483 + mutex_lock(&acpi_pci_root_lock); 484 + list_add_tail(&root->node, &acpi_pci_roots); 485 + mutex_unlock(&acpi_pci_root_lock); 486 + 487 + /* 488 + * Scan the Root Bridge 489 + * -------------------- 490 + * Must do this prior to any attempt to bind the root device, as the 491 + * PCI namespace does not get created until this call is made (and 492 + * thus the root bridge's pci_dev does not exist). 493 + */ 494 + root->bus = pci_acpi_scan_root(root); 495 + if (!root->bus) { 496 + printk(KERN_ERR PREFIX 497 + "Bus %04x:%02x not present in PCI namespace\n", 498 + root->segment, (unsigned int)root->secondary.start); 499 + result = -ENODEV; 500 + goto out_del_root; 501 + } 502 + 478 503 /* Indicate support for various _OSC capabilities. */ 479 504 if (pci_ext_cfg_avail()) 480 505 flags |= OSC_EXT_PCI_CONFIG_SUPPORT; ··· 517 494 flags = base_flags; 518 495 } 519 496 } 497 + 520 498 if (!pcie_ports_disabled 521 499 && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { 522 500 flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL ··· 538 514 status = acpi_pci_osc_control_set(device->handle, &flags, 539 515 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); 540 516 if (ACPI_SUCCESS(status)) { 541 - is_osc_granted = true; 542 517 dev_info(&device->dev, 543 518 "ACPI _OSC control (0x%02x) granted\n", flags); 519 + if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { 520 + /* 521 + * We have ASPM control, but the FADT indicates 522 + * that it's unsupported. Clear it. 523 + */ 524 + pcie_clear_aspm(root->bus); 525 + } 544 526 } else { 545 - is_osc_granted = false; 546 527 dev_info(&device->dev, 547 528 "ACPI _OSC request failed (%s), " 548 529 "returned control mask: 0x%02x\n", 549 530 acpi_format_exception(status), flags); 531 + pr_info("ACPI _OSC control for PCIe not granted, " 532 + "disabling ASPM\n"); 533 + pcie_no_aspm(); 550 534 } 551 535 } else { 552 536 dev_info(&device->dev, 553 - "Unable to request _OSC control " 554 - "(_OSC support mask: 0x%02x)\n", flags); 555 - } 556 - 557 - /* 558 - * TBD: Need PCI interface for enumeration/configuration of roots. 559 - */ 560 - 561 - mutex_lock(&acpi_pci_root_lock); 562 - list_add_tail(&root->node, &acpi_pci_roots); 563 - mutex_unlock(&acpi_pci_root_lock); 564 - 565 - /* 566 - * Scan the Root Bridge 567 - * -------------------- 568 - * Must do this prior to any attempt to bind the root device, as the 569 - * PCI namespace does not get created until this call is made (and 570 - * thus the root bridge's pci_dev does not exist). 571 - */ 572 - root->bus = pci_acpi_scan_root(root); 573 - if (!root->bus) { 574 - printk(KERN_ERR PREFIX 575 - "Bus %04x:%02x not present in PCI namespace\n", 576 - root->segment, (unsigned int)root->secondary.start); 577 - result = -ENODEV; 578 - goto out_del_root; 579 - } 580 - 581 - /* ASPM setting */ 582 - if (is_osc_granted) { 583 - if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) 584 - pcie_clear_aspm(root->bus); 585 - } else { 586 - pr_info("ACPI _OSC control for PCIe not granted, " 587 - "disabling ASPM\n"); 588 - pcie_no_aspm(); 537 + "Unable to request _OSC control " 538 + "(_OSC support mask: 0x%02x)\n", flags); 589 539 } 590 540 591 541 pci_acpi_add_bus_pm_notifier(device, root->bus);
+7 -6
drivers/acpi/processor_idle.c
··· 66 66 67 67 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); 68 68 69 - static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX]; 69 + static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], 70 + acpi_cstate); 70 71 71 72 static int disabled_by_idle_boot_param(void) 72 73 { ··· 723 722 struct cpuidle_driver *drv, int index) 724 723 { 725 724 struct acpi_processor *pr; 726 - struct acpi_processor_cx *cx = acpi_cstate[index]; 725 + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 727 726 728 727 pr = __this_cpu_read(processors); 729 728 ··· 746 745 */ 747 746 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) 748 747 { 749 - struct acpi_processor_cx *cx = acpi_cstate[index]; 748 + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 750 749 751 750 ACPI_FLUSH_CPU_CACHE(); 752 751 ··· 776 775 struct cpuidle_driver *drv, int index) 777 776 { 778 777 struct acpi_processor *pr; 779 - struct acpi_processor_cx *cx = acpi_cstate[index]; 778 + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 780 779 781 780 pr = __this_cpu_read(processors); 782 781 ··· 834 833 struct cpuidle_driver *drv, int index) 835 834 { 836 835 struct acpi_processor *pr; 837 - struct acpi_processor_cx *cx = acpi_cstate[index]; 836 + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 838 837 839 838 pr = __this_cpu_read(processors); 840 839 ··· 961 960 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 962 961 continue; 963 962 #endif 964 - acpi_cstate[count] = cx; 963 + per_cpu(acpi_cstate[count], dev->cpu) = cx; 965 964 966 965 count++; 967 966 if (count == CPUIDLE_STATE_MAX)
+47 -13
drivers/base/power/qos.c
··· 46 46 #include "power.h" 47 47 48 48 static DEFINE_MUTEX(dev_pm_qos_mtx); 49 + static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); 49 50 50 51 static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); 51 52 ··· 217 216 struct pm_qos_constraints *c; 218 217 struct pm_qos_flags *f; 219 218 220 - mutex_lock(&dev_pm_qos_mtx); 219 + mutex_lock(&dev_pm_qos_sysfs_mtx); 221 220 222 221 /* 223 222 * If the device's PM QoS resume latency limit or PM QoS flags have been 224 223 * exposed to user space, they have to be hidden at this point. 225 224 */ 225 + pm_qos_sysfs_remove_latency(dev); 226 + pm_qos_sysfs_remove_flags(dev); 227 + 228 + mutex_lock(&dev_pm_qos_mtx); 229 + 226 230 __dev_pm_qos_hide_latency_limit(dev); 227 231 __dev_pm_qos_hide_flags(dev); 228 232 ··· 260 254 261 255 out: 262 256 mutex_unlock(&dev_pm_qos_mtx); 257 + 258 + mutex_unlock(&dev_pm_qos_sysfs_mtx); 263 259 } 264 260 265 261 /** ··· 566 558 kfree(req); 567 559 } 568 560 561 + static void dev_pm_qos_drop_user_request(struct device *dev, 562 + enum dev_pm_qos_req_type type) 563 + { 564 + mutex_lock(&dev_pm_qos_mtx); 565 + __dev_pm_qos_drop_user_request(dev, type); 566 + mutex_unlock(&dev_pm_qos_mtx); 567 + } 568 + 569 569 /** 570 570 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. 571 571 * @dev: Device whose PM QoS latency limit is to be exposed to user space. ··· 597 581 return ret; 598 582 } 599 583 584 + mutex_lock(&dev_pm_qos_sysfs_mtx); 585 + 600 586 mutex_lock(&dev_pm_qos_mtx); 601 587 602 588 if (IS_ERR_OR_NULL(dev->power.qos)) ··· 609 591 if (ret < 0) { 610 592 __dev_pm_qos_remove_request(req); 611 593 kfree(req); 594 + mutex_unlock(&dev_pm_qos_mtx); 612 595 goto out; 613 596 } 614 - 615 597 dev->power.qos->latency_req = req; 598 + 599 + mutex_unlock(&dev_pm_qos_mtx); 600 + 616 601 ret = pm_qos_sysfs_add_latency(dev); 617 602 if (ret) 618 - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 603 + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 619 604 620 605 out: 621 - mutex_unlock(&dev_pm_qos_mtx); 606 + mutex_unlock(&dev_pm_qos_sysfs_mtx); 622 607 return ret; 623 608 } 624 609 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 625 610 626 611 static void __dev_pm_qos_hide_latency_limit(struct device *dev) 627 612 { 628 - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { 629 - pm_qos_sysfs_remove_latency(dev); 613 + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) 630 614 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 631 - } 632 615 } 633 616 634 617 /** ··· 638 619 */ 639 620 void dev_pm_qos_hide_latency_limit(struct device *dev) 640 621 { 622 + mutex_lock(&dev_pm_qos_sysfs_mtx); 623 + 624 + pm_qos_sysfs_remove_latency(dev); 625 + 641 626 mutex_lock(&dev_pm_qos_mtx); 642 627 __dev_pm_qos_hide_latency_limit(dev); 643 628 mutex_unlock(&dev_pm_qos_mtx); 629 + 630 + mutex_unlock(&dev_pm_qos_sysfs_mtx); 644 631 } 645 632 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 646 633 ··· 674 649 } 675 650 676 651 pm_runtime_get_sync(dev); 652 + mutex_lock(&dev_pm_qos_sysfs_mtx); 653 + 677 654 mutex_lock(&dev_pm_qos_mtx); 678 655 679 656 if (IS_ERR_OR_NULL(dev->power.qos)) ··· 686 659 if (ret < 0) { 687 660 __dev_pm_qos_remove_request(req); 688 661 kfree(req); 662 + mutex_unlock(&dev_pm_qos_mtx); 689 663 goto out; 690 664 } 691 - 692 665 dev->power.qos->flags_req = req; 666 + 667 + mutex_unlock(&dev_pm_qos_mtx); 668 + 693 669 ret = pm_qos_sysfs_add_flags(dev); 694 670 if (ret) 695 - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 671 + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 696 672 697 673 out: 698 - mutex_unlock(&dev_pm_qos_mtx); 674 + mutex_unlock(&dev_pm_qos_sysfs_mtx); 699 675 pm_runtime_put(dev); 700 676 return ret; 701 677 } ··· 706 676 707 677 static void __dev_pm_qos_hide_flags(struct device *dev) 708 678 { 709 - if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { 710 - pm_qos_sysfs_remove_flags(dev); 679 + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) 711 680 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 712 - } 713 681 } 714 682 715 683 /** ··· 717 689 void dev_pm_qos_hide_flags(struct device *dev) 718 690 { 719 691 pm_runtime_get_sync(dev); 692 + mutex_lock(&dev_pm_qos_sysfs_mtx); 693 + 694 + pm_qos_sysfs_remove_flags(dev); 695 + 720 696 mutex_lock(&dev_pm_qos_mtx); 721 697 __dev_pm_qos_hide_flags(dev); 722 698 mutex_unlock(&dev_pm_qos_mtx); 699 + 700 + mutex_unlock(&dev_pm_qos_sysfs_mtx); 723 701 pm_runtime_put(dev); 724 702 } 725 703 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
+1 -1
drivers/base/regmap/regcache-rbtree.c
··· 398 398 base = 0; 399 399 400 400 if (max < rbnode->base_reg + rbnode->blklen) 401 - end = rbnode->base_reg + rbnode->blklen - max; 401 + end = max - rbnode->base_reg + 1; 402 402 else 403 403 end = rbnode->blklen; 404 404
+5 -4
drivers/base/regmap/regmap.c
··· 710 710 } 711 711 } 712 712 713 + regmap_debugfs_init(map, config->name); 714 + 713 715 ret = regcache_init(map, config); 714 716 if (ret != 0) 715 717 goto err_range; 716 - 717 - regmap_debugfs_init(map, config->name); 718 718 719 719 /* Add a devres resource for dev_get_regmap() */ 720 720 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); ··· 943 943 unsigned int ival; 944 944 int val_bytes = map->format.val_bytes; 945 945 for (i = 0; i < val_len / val_bytes; i++) { 946 - memcpy(map->work_buf, val + (i * val_bytes), val_bytes); 947 - ival = map->format.parse_val(map->work_buf); 946 + ival = map->format.parse_val(val + (i * val_bytes)); 948 947 ret = regcache_write(map, reg + (i * map->reg_stride), 949 948 ival); 950 949 if (ret) { ··· 1035 1036 kfree(async->work_buf); 1036 1037 kfree(async); 1037 1038 } 1039 + 1040 + return ret; 1038 1041 } 1039 1042 1040 1043 trace_regmap_hw_write_start(map->dev, reg,
+2 -1
drivers/block/aoe/aoecmd.c
··· 51 51 { 52 52 struct sk_buff *skb; 53 53 54 - skb = alloc_skb(len, GFP_ATOMIC); 54 + skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC); 55 55 if (skb) { 56 + skb_reserve(skb, MAX_HEADER); 56 57 skb_reset_mac_header(skb); 57 58 skb_reset_network_header(skb); 58 59 skb->protocol = __constant_htons(ETH_P_AOE);
+8 -1
drivers/block/loop.c
··· 922 922 lo->lo_flags |= LO_FLAGS_PARTSCAN; 923 923 if (lo->lo_flags & LO_FLAGS_PARTSCAN) 924 924 ioctl_by_bdev(bdev, BLKRRPART, 0); 925 + 926 + /* Grab the block_device to prevent its destruction after we 927 + * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). 928 + */ 929 + bdgrab(bdev); 925 930 return 0; 926 931 927 932 out_clr: ··· 1036 1031 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); 1037 1032 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); 1038 1033 memset(lo->lo_file_name, 0, LO_NAME_SIZE); 1039 - if (bdev) 1034 + if (bdev) { 1035 + bdput(bdev); 1040 1036 invalidate_bdev(bdev); 1037 + } 1041 1038 set_capacity(lo->lo_disk, 0); 1042 1039 loop_sysfs_exit(lo); 1043 1040 if (bdev) {
+9
drivers/char/hw_random/core.c
··· 380 380 } 381 381 EXPORT_SYMBOL_GPL(hwrng_unregister); 382 382 383 + static void __exit hwrng_exit(void) 384 + { 385 + mutex_lock(&rng_mutex); 386 + BUG_ON(current_rng); 387 + kfree(rng_buffer); 388 + mutex_unlock(&rng_mutex); 389 + } 390 + 391 + module_exit(hwrng_exit); 383 392 384 393 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); 385 394 MODULE_LICENSE("GPL");
+30 -14
drivers/char/virtio_console.c
··· 149 149 spinlock_t ports_lock; 150 150 151 151 /* To protect the vq operations for the control channel */ 152 - spinlock_t cvq_lock; 152 + spinlock_t c_ivq_lock; 153 + spinlock_t c_ovq_lock; 153 154 154 155 /* The current config space is stored here */ 155 156 struct virtio_console_config config; ··· 570 569 vq = portdev->c_ovq; 571 570 572 571 sg_init_one(sg, &cpkt, sizeof(cpkt)); 572 + 573 + spin_lock(&portdev->c_ovq_lock); 573 574 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { 574 575 virtqueue_kick(vq); 575 576 while (!virtqueue_get_buf(vq, &len)) 576 577 cpu_relax(); 577 578 } 579 + spin_unlock(&portdev->c_ovq_lock); 578 580 return 0; 579 581 } 580 582 ··· 1440 1436 * rproc_serial does not want the console port, only 1441 1437 * the generic port implementation. 1442 1438 */ 1443 - port->host_connected = port->guest_connected = true; 1439 + port->host_connected = true; 1444 1440 else if (!use_multiport(port->portdev)) { 1445 1441 /* 1446 1442 * If we're not using multiport support, ··· 1713 1709 portdev = container_of(work, struct ports_device, control_work); 1714 1710 vq = portdev->c_ivq; 1715 1711 1716 - spin_lock(&portdev->cvq_lock); 1712 + spin_lock(&portdev->c_ivq_lock); 1717 1713 while ((buf = virtqueue_get_buf(vq, &len))) { 1718 - spin_unlock(&portdev->cvq_lock); 1714 + spin_unlock(&portdev->c_ivq_lock); 1719 1715 1720 1716 buf->len = len; 1721 1717 buf->offset = 0; 1722 1718 1723 1719 handle_control_message(portdev, buf); 1724 1720 1725 - spin_lock(&portdev->cvq_lock); 1721 + spin_lock(&portdev->c_ivq_lock); 1726 1722 if (add_inbuf(portdev->c_ivq, buf) < 0) { 1727 1723 dev_warn(&portdev->vdev->dev, 1728 1724 "Error adding buffer to queue\n"); 1729 1725 free_buf(buf, false); 1730 1726 } 1731 1727 } 1732 - spin_unlock(&portdev->cvq_lock); 1728 + spin_unlock(&portdev->c_ivq_lock); 1733 1729 } 1734 1730 1735 1731 static void out_intr(struct virtqueue *vq) ··· 1756 1752 port->inbuf = get_inbuf(port); 1757 1753 1758 1754 /* 1759 - * Don't queue up data when port is closed. This condition 1755 + * Normally the port should not accept data when the port is 1756 + * closed. For generic serial ports, the host won't (shouldn't) 1757 + * send data till the guest is connected. But this condition 1760 1758 * can be reached when a console port is not yet connected (no 1761 - * tty is spawned) and the host sends out data to console 1762 - * ports. For generic serial ports, the host won't 1763 - * (shouldn't) send data till the guest is connected. 1759 + * tty is spawned) and the other side sends out data over the 1760 + * vring, or when a remote devices start sending data before 1761 + * the ports are opened. 1762 + * 1763 + * A generic serial port will discard data if not connected, 1764 + * while console ports and rproc-serial ports accepts data at 1765 + * any time. rproc-serial is initiated with guest_connected to 1766 + * false because port_fops_open expects this. Console ports are 1767 + * hooked up with an HVC console and is initialized with 1768 + * guest_connected to true. 1764 1769 */ 1765 - if (!port->guest_connected) 1770 + 1771 + if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev)) 1766 1772 discard_port_data(port); 1767 1773 1768 1774 spin_unlock_irqrestore(&port->inbuf_lock, flags); ··· 2000 1986 if (multiport) { 2001 1987 unsigned int nr_added_bufs; 2002 1988 2003 - spin_lock_init(&portdev->cvq_lock); 1989 + spin_lock_init(&portdev->c_ivq_lock); 1990 + spin_lock_init(&portdev->c_ovq_lock); 2004 1991 INIT_WORK(&portdev->control_work, &control_work_handler); 2005 1992 2006 - nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); 1993 + nr_added_bufs = fill_queue(portdev->c_ivq, 1994 + &portdev->c_ivq_lock); 2007 1995 if (!nr_added_bufs) { 2008 1996 dev_err(&vdev->dev, 2009 1997 "Error allocating buffers for control queue\n"); ··· 2156 2140 return ret; 2157 2141 2158 2142 if (use_multiport(portdev)) 2159 - fill_queue(portdev->c_ivq, &portdev->cvq_lock); 2143 + fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); 2160 2144 2161 2145 list_for_each_entry(port, &portdev->ports, list) { 2162 2146 port->in_vq = portdev->in_vqs[port->id];
+1 -1
drivers/clk/tegra/clk-tegra20.c
··· 703 703 clks[pll_a_out0] = clk; 704 704 705 705 /* PLLE */ 706 - clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL, 706 + clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base, 707 707 0, 100000000, &pll_e_params, 708 708 0, pll_e_freq_table, NULL); 709 709 clk_register_clkdev(clk, "pll_e", NULL);
+8 -2
drivers/cpufreq/cpufreq-cpu0.c
··· 178 178 179 179 static int cpu0_cpufreq_probe(struct platform_device *pdev) 180 180 { 181 - struct device_node *np; 181 + struct device_node *np, *parent; 182 182 int ret; 183 183 184 - for_each_child_of_node(of_find_node_by_path("/cpus"), np) { 184 + parent = of_find_node_by_path("/cpus"); 185 + if (!parent) { 186 + pr_err("failed to find OF /cpus\n"); 187 + return -ENOENT; 188 + } 189 + 190 + for_each_child_of_node(parent, np) { 185 191 if (of_get_property(np, "operating-points", NULL)) 186 192 break; 187 193 }
+3 -3
drivers/cpufreq/cpufreq_governor.h
··· 14 14 * published by the Free Software Foundation. 15 15 */ 16 16 17 - #ifndef _CPUFREQ_GOVERNER_H 18 - #define _CPUFREQ_GOVERNER_H 17 + #ifndef _CPUFREQ_GOVERNOR_H 18 + #define _CPUFREQ_GOVERNOR_H 19 19 20 20 #include <linux/cpufreq.h> 21 21 #include <linux/kobject.h> ··· 175 175 unsigned int sampling_rate); 176 176 int cpufreq_governor_dbs(struct dbs_data *dbs_data, 177 177 struct cpufreq_policy *policy, unsigned int event); 178 - #endif /* _CPUFREQ_GOVERNER_H */ 178 + #endif /* _CPUFREQ_GOVERNOR_H */
+1
drivers/dma/Kconfig
··· 83 83 84 84 config DW_DMAC 85 85 tristate "Synopsys DesignWare AHB DMA support" 86 + depends on GENERIC_HARDIRQS 86 87 select DMA_ENGINE 87 88 default y if CPU_AT32AP7000 88 89 help
+47 -22
drivers/eisa/pci_eisa.c
··· 19 19 /* There is only *one* pci_eisa device per machine, right ? */ 20 20 static struct eisa_root_device pci_eisa_root; 21 21 22 - static int __init pci_eisa_init(struct pci_dev *pdev, 23 - const struct pci_device_id *ent) 22 + static int __init pci_eisa_init(struct pci_dev *pdev) 24 23 { 25 - int rc; 24 + int rc, i; 25 + struct resource *res, *bus_res = NULL; 26 26 27 27 if ((rc = pci_enable_device (pdev))) { 28 28 printk (KERN_ERR "pci_eisa : Could not enable device %s\n", ··· 30 30 return rc; 31 31 } 32 32 33 + /* 34 + * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI 35 + * device, so the resources available on EISA are the same as those 36 + * available on the 82375 bus. This works the same as a PCI-PCI 37 + * bridge in subtractive-decode mode (see pci_read_bridge_bases()). 38 + * We assume other PCI-EISA bridges are similar. 39 + * 40 + * eisa_root_register() can only deal with a single io port resource, 41 + * so we use the first valid io port resource. 42 + */ 43 + pci_bus_for_each_resource(pdev->bus, res, i) 44 + if (res && (res->flags & IORESOURCE_IO)) { 45 + bus_res = res; 46 + break; 47 + } 48 + 49 + if (!bus_res) { 50 + dev_err(&pdev->dev, "No resources available\n"); 51 + return -1; 52 + } 53 + 33 54 pci_eisa_root.dev = &pdev->dev; 34 - pci_eisa_root.res = pdev->bus->resource[0]; 35 - pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start; 55 + pci_eisa_root.res = bus_res; 56 + pci_eisa_root.bus_base_addr = bus_res->start; 36 57 pci_eisa_root.slots = EISA_MAX_SLOTS; 37 58 pci_eisa_root.dma_mask = pdev->dma_mask; 38 59 dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); ··· 66 45 return 0; 67 46 } 68 47 69 - static struct pci_device_id pci_eisa_pci_tbl[] = { 70 - { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 71 - PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, 72 - { 0, } 73 - }; 74 - 75 - static struct pci_driver __refdata pci_eisa_driver = { 76 - .name = "pci_eisa", 77 - .id_table = pci_eisa_pci_tbl, 78 - .probe = pci_eisa_init, 79 - }; 80 - 81 - static int __init pci_eisa_init_module (void) 48 + /* 49 + * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init(). 50 + * Otherwise pnp resource will get enabled early and could prevent eisa 51 + * to be initialized. 52 + * Also need to make sure pci_eisa_init_early() is called after 53 + * x86/pci_subsys_init(). 54 + * So need to use subsys_initcall_sync with it. 55 + */ 56 + static int __init pci_eisa_init_early(void) 82 57 { 83 - return pci_register_driver (&pci_eisa_driver); 84 - } 58 + struct pci_dev *dev = NULL; 59 + int ret; 85 60 86 - device_initcall(pci_eisa_init_module); 87 - MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl); 61 + for_each_pci_dev(dev) 62 + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) { 63 + ret = pci_eisa_init(dev); 64 + if (ret) 65 + return ret; 66 + } 67 + 68 + return 0; 69 + } 70 + subsys_initcall_sync(pci_eisa_init_early);
+1 -1
drivers/gpio/gpio-ich.c
··· 214 214 * If it can't be trusted, assume that the pin can be used as a GPIO. 215 215 */ 216 216 if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) 217 - return 1; 217 + return 0; 218 218 219 219 return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV; 220 220 }
+11 -4
drivers/gpio/gpio-stmpe.c
··· 307 307 .xlate = irq_domain_xlate_twocell, 308 308 }; 309 309 310 - static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio) 310 + static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio, 311 + struct device_node *np) 311 312 { 312 - int base = stmpe_gpio->irq_base; 313 + int base = 0; 313 314 314 - stmpe_gpio->domain = irq_domain_add_simple(NULL, 315 + if (!np) 316 + base = stmpe_gpio->irq_base; 317 + 318 + stmpe_gpio->domain = irq_domain_add_simple(np, 315 319 stmpe_gpio->chip.ngpio, base, 316 320 &stmpe_gpio_irq_simple_ops, stmpe_gpio); 317 321 if (!stmpe_gpio->domain) { ··· 350 346 stmpe_gpio->chip = template_chip; 351 347 stmpe_gpio->chip.ngpio = stmpe->num_gpios; 352 348 stmpe_gpio->chip.dev = &pdev->dev; 349 + #ifdef CONFIG_OF 350 + stmpe_gpio->chip.of_node = np; 351 + #endif 353 352 stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; 354 353 355 354 if (pdata) ··· 373 366 goto out_free; 374 367 375 368 if (irq >= 0) { 376 - ret = stmpe_gpio_irq_init(stmpe_gpio); 369 + ret = stmpe_gpio_irq_init(stmpe_gpio, np); 377 370 if (ret) 378 371 goto out_disable; 379 372
-2
drivers/gpu/drm/drm_crtc.c
··· 2326 2326 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); 2327 2327 if (IS_ERR(fb)) { 2328 2328 DRM_DEBUG_KMS("could not create framebuffer\n"); 2329 - drm_modeset_unlock_all(dev); 2330 2329 return PTR_ERR(fb); 2331 2330 } 2332 2331 ··· 2505 2506 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); 2506 2507 if (IS_ERR(fb)) { 2507 2508 DRM_DEBUG_KMS("could not create framebuffer\n"); 2508 - drm_modeset_unlock_all(dev); 2509 2509 return PTR_ERR(fb); 2510 2510 } 2511 2511
+4 -2
drivers/gpu/drm/drm_fops.c
··· 123 123 int retcode = 0; 124 124 int need_setup = 0; 125 125 struct address_space *old_mapping; 126 + struct address_space *old_imapping; 126 127 127 128 minor = idr_find(&drm_minors_idr, minor_id); 128 129 if (!minor) ··· 138 137 if (!dev->open_count++) 139 138 need_setup = 1; 140 139 mutex_lock(&dev->struct_mutex); 140 + old_imapping = inode->i_mapping; 141 141 old_mapping = dev->dev_mapping; 142 142 if (old_mapping == NULL) 143 143 dev->dev_mapping = &inode->i_data; ··· 161 159 162 160 err_undo: 163 161 mutex_lock(&dev->struct_mutex); 164 - filp->f_mapping = old_mapping; 165 - inode->i_mapping = old_mapping; 162 + filp->f_mapping = old_imapping; 163 + inode->i_mapping = old_imapping; 166 164 iput(container_of(dev->dev_mapping, struct inode, i_data)); 167 165 dev->dev_mapping = old_mapping; 168 166 mutex_unlock(&dev->struct_mutex);
+1 -1
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 57 57 if (eb == NULL) { 58 58 int size = args->buffer_count; 59 59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 60 - BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); 60 + BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); 61 61 while (count > 2*size) 62 62 count >>= 1; 63 63 eb = kzalloc(count*sizeof(struct hlist_head) +
+17 -23
drivers/gpu/drm/i915/intel_crt.c
··· 45 45 46 46 struct intel_crt { 47 47 struct intel_encoder base; 48 + /* DPMS state is stored in the connector, which we need in the 49 + * encoder's enable/disable callbacks */ 50 + struct intel_connector *connector; 48 51 bool force_hotplug_required; 49 52 u32 adpa_reg; 50 53 }; ··· 84 81 return true; 85 82 } 86 83 87 - static void intel_disable_crt(struct intel_encoder *encoder) 88 - { 89 - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 90 - struct intel_crt *crt = intel_encoder_to_crt(encoder); 91 - u32 temp; 92 - 93 - temp = I915_READ(crt->adpa_reg); 94 - temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; 95 - temp &= ~ADPA_DAC_ENABLE; 96 - I915_WRITE(crt->adpa_reg, temp); 97 - } 98 - 99 - static void intel_enable_crt(struct intel_encoder *encoder) 100 - { 101 - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 102 - struct intel_crt *crt = intel_encoder_to_crt(encoder); 103 - u32 temp; 104 - 105 - temp = I915_READ(crt->adpa_reg); 106 - temp |= ADPA_DAC_ENABLE; 107 - I915_WRITE(crt->adpa_reg, temp); 108 - } 109 - 110 84 /* Note: The caller is required to filter out dpms modes not supported by the 111 85 * platform. */ 112 86 static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) ··· 114 134 115 135 I915_WRITE(crt->adpa_reg, temp); 116 136 } 137 + 138 + static void intel_disable_crt(struct intel_encoder *encoder) 139 + { 140 + intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); 141 + } 142 + 143 + static void intel_enable_crt(struct intel_encoder *encoder) 144 + { 145 + struct intel_crt *crt = intel_encoder_to_crt(encoder); 146 + 147 + intel_crt_set_dpms(encoder, crt->connector->base.dpms); 148 + } 149 + 117 150 118 151 static void intel_crt_dpms(struct drm_connector *connector, int mode) 119 152 { ··· 739 746 } 740 747 741 748 connector = &intel_connector->base; 749 + crt->connector = intel_connector; 742 750 drm_connector_init(dev, &intel_connector->base, 743 751 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 744 752
+3
drivers/gpu/drm/i915/intel_dp.c
··· 2559 2559 { 2560 2560 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 2561 2561 struct intel_dp *intel_dp = &intel_dig_port->dp; 2562 + struct drm_device *dev = intel_dp_to_dev(intel_dp); 2562 2563 2563 2564 i2c_del_adapter(&intel_dp->adapter); 2564 2565 drm_encoder_cleanup(encoder); 2565 2566 if (is_edp(intel_dp)) { 2566 2567 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2568 + mutex_lock(&dev->mode_config.mutex); 2567 2569 ironlake_panel_vdd_off_sync(intel_dp); 2570 + mutex_unlock(&dev->mode_config.mutex); 2568 2571 } 2569 2572 kfree(intel_dig_port); 2570 2573 }
+17
drivers/gpu/drm/nouveau/core/subdev/bios/base.c
··· 248 248 } 249 249 } 250 250 251 + static void 252 + nouveau_bios_shadow_platform(struct nouveau_bios *bios) 253 + { 254 + struct pci_dev *pdev = nv_device(bios)->pdev; 255 + size_t size; 256 + 257 + void __iomem *rom = pci_platform_rom(pdev, &size); 258 + if (rom && size) { 259 + bios->data = kmalloc(size, GFP_KERNEL); 260 + if (bios->data) { 261 + memcpy_fromio(bios->data, rom, size); 262 + bios->size = size; 263 + } 264 + } 265 + } 266 + 251 267 static int 252 268 nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) 253 269 { ··· 304 288 { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, 305 289 { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, 306 290 { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, 291 + { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL }, 307 292 {} 308 293 }; 309 294 struct methods *mthd, *best;
+10 -8
drivers/gpu/drm/nouveau/nouveau_abi16.c
··· 391 391 struct nouveau_drm *drm = nouveau_drm(dev); 392 392 struct nouveau_device *device = nv_device(drm->device); 393 393 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 394 - struct nouveau_abi16_chan *chan, *temp; 394 + struct nouveau_abi16_chan *chan = NULL, *temp; 395 395 struct nouveau_abi16_ntfy *ntfy; 396 396 struct nouveau_object *object; 397 397 struct nv_dma_class args = {}; ··· 404 404 if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) 405 405 return nouveau_abi16_put(abi16, -EINVAL); 406 406 407 - list_for_each_entry_safe(chan, temp, &abi16->channels, head) { 408 - if (chan->chan->handle == (NVDRM_CHAN | info->channel)) 407 + list_for_each_entry(temp, &abi16->channels, head) { 408 + if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { 409 + chan = temp; 409 410 break; 410 - chan = NULL; 411 + } 411 412 } 412 413 413 414 if (!chan) ··· 460 459 { 461 460 struct drm_nouveau_gpuobj_free *fini = data; 462 461 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 463 - struct nouveau_abi16_chan *chan, *temp; 462 + struct nouveau_abi16_chan *chan = NULL, *temp; 464 463 struct nouveau_abi16_ntfy *ntfy; 465 464 int ret; 466 465 467 466 if (unlikely(!abi16)) 468 467 return -ENOMEM; 469 468 470 - list_for_each_entry_safe(chan, temp, &abi16->channels, head) { 471 - if (chan->chan->handle == (NVDRM_CHAN | fini->channel)) 469 + list_for_each_entry(temp, &abi16->channels, head) { 470 + if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { 471 + chan = temp; 472 472 break; 473 - chan = NULL; 473 + } 474 474 } 475 475 476 476 if (!chan)
+20 -12
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 72 72 static struct drm_driver driver; 73 73 74 74 static int 75 + nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) 76 + { 77 + struct nouveau_drm *drm = 78 + container_of(event, struct nouveau_drm, vblank[head]); 79 + drm_handle_vblank(drm->dev, head); 80 + return NVKM_EVENT_KEEP; 81 + } 82 + 83 + static int 75 84 nouveau_drm_vblank_enable(struct drm_device *dev, int head) 76 85 { 77 86 struct nouveau_drm *drm = nouveau_drm(dev); 78 87 struct nouveau_disp *pdisp = nouveau_disp(drm->device); 79 - nouveau_event_get(pdisp->vblank, head, &drm->vblank); 88 + 89 + if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank))) 90 + return -EIO; 91 + WARN_ON_ONCE(drm->vblank[head].func); 92 + drm->vblank[head].func = nouveau_drm_vblank_handler; 93 + nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]); 80 94 return 0; 81 95 } 82 96 ··· 99 85 { 100 86 struct nouveau_drm *drm = nouveau_drm(dev); 101 87 struct nouveau_disp *pdisp = nouveau_disp(drm->device); 102 - nouveau_event_put(pdisp->vblank, head, &drm->vblank); 103 - } 104 - 105 - static int 106 - nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) 107 - { 108 - struct nouveau_drm *drm = 109 - container_of(event, struct nouveau_drm, vblank); 110 - drm_handle_vblank(drm->dev, head); 111 - return NVKM_EVENT_KEEP; 88 + if (drm->vblank[head].func) 89 + nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]); 90 + else 91 + WARN_ON_ONCE(1); 92 + drm->vblank[head].func = NULL; 112 93 } 113 94 114 95 static u64 ··· 301 292 302 293 dev->dev_private = drm; 303 294 drm->dev = dev; 304 - drm->vblank.func = nouveau_drm_vblank_handler; 305 295 306 296 INIT_LIST_HEAD(&drm->clients); 307 297 spin_lock_init(&drm->tile.lock);
+1 -1
drivers/gpu/drm/nouveau/nouveau_drm.h
··· 113 113 struct nvbios vbios; 114 114 struct nouveau_display *display; 115 115 struct backlight_device *backlight; 116 - struct nouveau_eventh vblank; 116 + struct nouveau_eventh vblank[4]; 117 117 118 118 /* power management */ 119 119 struct nouveau_pm *pm;
+26
drivers/gpu/drm/radeon/radeon_bios.c
··· 99 99 return true; 100 100 } 101 101 102 + static bool radeon_read_platform_bios(struct radeon_device *rdev) 103 + { 104 + uint8_t __iomem *bios; 105 + size_t size; 106 + 107 + rdev->bios = NULL; 108 + 109 + bios = pci_platform_rom(rdev->pdev, &size); 110 + if (!bios) { 111 + return false; 112 + } 113 + 114 + if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { 115 + return false; 116 + } 117 + rdev->bios = kmemdup(bios, size, GFP_KERNEL); 118 + if (rdev->bios == NULL) { 119 + return false; 120 + } 121 + 122 + return true; 123 + } 124 + 102 125 #ifdef CONFIG_ACPI 103 126 /* ATRM is used to get the BIOS on the discrete cards in 104 127 * dual-gpu systems. ··· 642 619 r = radeon_read_bios(rdev); 643 620 if (r == false) { 644 621 r = radeon_read_disabled_bios(rdev); 622 + } 623 + if (r == false) { 624 + r = radeon_read_platform_bios(rdev); 645 625 } 646 626 if (r == false || rdev->bios == NULL) { 647 627 DRM_ERROR("Unable to locate a BIOS ROM\n");
+12 -1
drivers/hid/hid-core.c
··· 2077 2077 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, 2078 2078 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, 2079 2079 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, 2080 - { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) }, 2081 2080 { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, 2082 2081 { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, 2083 2082 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, ··· 2241 2242 hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) || 2242 2243 (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST && 2243 2244 hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST)) 2245 + return true; 2246 + break; 2247 + case USB_VENDOR_ID_ATMEL_V_USB: 2248 + /* Masterkit MA901 usb radio based on Atmel tiny85 chip and 2249 + * it has the same USB ID as many Atmel V-USB devices. This 2250 + * usb radio is handled by radio-ma901.c driver so we want 2251 + * ignore the hid. Check the name, bus, product and ignore 2252 + * if we have MA901 usb radio. 2253 + */ 2254 + if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB && 2255 + hdev->bus == BUS_USB && 2256 + strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) 2244 2257 return true; 2245 2258 break; 2246 2259 }
+2 -3
drivers/hid/hid-ids.h
··· 158 158 #define USB_VENDOR_ID_ATMEL 0x03eb 159 159 #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c 160 160 #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 161 + #define USB_VENDOR_ID_ATMEL_V_USB 0x16c0 162 + #define USB_DEVICE_ID_ATMEL_V_USB 0x05df 161 163 162 164 #define USB_VENDOR_ID_AUREAL 0x0755 163 165 #define USB_DEVICE_ID_AUREAL_W01RN 0x2626 ··· 558 556 559 557 #define USB_VENDOR_ID_MADCATZ 0x0738 560 558 #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 561 - 562 - #define USB_VENDOR_ID_MASTERKIT 0x16c0 563 - #define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df 564 559 565 560 #define USB_VENDOR_ID_MCC 0x09db 566 561 #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
+20 -9
drivers/hid/hid-magicmouse.c
··· 462 462 return 0; 463 463 } 464 464 465 + static void magicmouse_input_configured(struct hid_device *hdev, 466 + struct hid_input *hi) 467 + 468 + { 469 + struct magicmouse_sc *msc = hid_get_drvdata(hdev); 470 + 471 + int ret = magicmouse_setup_input(msc->input, hdev); 472 + if (ret) { 473 + hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); 474 + /* clean msc->input to notify probe() of the failure */ 475 + msc->input = NULL; 476 + } 477 + } 478 + 479 + 465 480 static int magicmouse_probe(struct hid_device *hdev, 466 481 const struct hid_device_id *id) 467 482 { ··· 508 493 goto err_free; 509 494 } 510 495 511 - /* We do this after hid-input is done parsing reports so that 512 - * hid-input uses the most natural button and axis IDs. 513 - */ 514 - if (msc->input) { 515 - ret = magicmouse_setup_input(msc->input, hdev); 516 - if (ret) { 517 - hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); 518 - goto err_stop_hw; 519 - } 496 + if (!msc->input) { 497 + hid_err(hdev, "magicmouse input not registered\n"); 498 + ret = -ENOMEM; 499 + goto err_stop_hw; 520 500 } 521 501 522 502 if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) ··· 578 568 .remove = magicmouse_remove, 579 569 .raw_event = magicmouse_raw_event, 580 570 .input_mapping = magicmouse_input_mapping, 571 + .input_configured = magicmouse_input_configured, 581 572 }; 582 573 module_hid_driver(magicmouse_driver); 583 574
-1
drivers/i2c/busses/i2c-designware-platdrv.c
··· 182 182 adap->algo = &i2c_dw_algo; 183 183 adap->dev.parent = &pdev->dev; 184 184 adap->dev.of_node = pdev->dev.of_node; 185 - ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev)); 186 185 187 186 r = i2c_add_numbered_adapter(adap); 188 187 if (r) {
+1 -1
drivers/infiniband/hw/qib/qib_sd7220.c
··· 44 44 #include "qib.h" 45 45 #include "qib_7220.h" 46 46 47 - #define SD7220_FW_NAME "intel/sd7220.fw" 47 + #define SD7220_FW_NAME "qlogic/sd7220.fw" 48 48 MODULE_FIRMWARE(SD7220_FW_NAME); 49 49 50 50 /*
+38 -13
drivers/md/dm-cache-target.c
··· 6 6 7 7 #include "dm.h" 8 8 #include "dm-bio-prison.h" 9 + #include "dm-bio-record.h" 9 10 #include "dm-cache-metadata.h" 10 11 11 12 #include <linux/dm-io.h> ··· 202 201 unsigned req_nr:2; 203 202 struct dm_deferred_entry *all_io_entry; 204 203 205 - /* writethrough fields */ 204 + /* 205 + * writethrough fields. These MUST remain at the end of this 206 + * structure and the 'cache' member must be the first as it 207 + * is used to determine the offsetof the writethrough fields. 208 + */ 206 209 struct cache *cache; 207 210 dm_cblock_t cblock; 208 211 bio_end_io_t *saved_bi_end_io; 212 + struct dm_bio_details bio_details; 209 213 }; 210 214 211 215 struct dm_cache_migration { ··· 519 513 /*---------------------------------------------------------------- 520 514 * Per bio data 521 515 *--------------------------------------------------------------*/ 522 - static struct per_bio_data *get_per_bio_data(struct bio *bio) 516 + 517 + /* 518 + * If using writeback, leave out struct per_bio_data's writethrough fields. 519 + */ 520 + #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) 521 + #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) 522 + 523 + static size_t get_per_bio_data_size(struct cache *cache) 523 524 { 524 - struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 525 + return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; 526 + } 527 + 528 + static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) 529 + { 530 + struct per_bio_data *pb = dm_per_bio_data(bio, data_size); 525 531 BUG_ON(!pb); 526 532 return pb; 527 533 } 528 534 529 - static struct per_bio_data *init_per_bio_data(struct bio *bio) 535 + static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) 530 536 { 531 - struct per_bio_data *pb = get_per_bio_data(bio); 537 + struct per_bio_data *pb = get_per_bio_data(bio, data_size); 532 538 533 539 pb->tick = false; 534 540 pb->req_nr = dm_bio_get_target_bio_nr(bio); ··· 574 556 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 575 557 { 576 558 unsigned long flags; 577 - struct per_bio_data *pb = get_per_bio_data(bio); 559 + size_t pb_data_size = get_per_bio_data_size(cache); 560 + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 578 561 579 562 spin_lock_irqsave(&cache->lock, flags); 580 563 if (cache->need_tick_bio && ··· 654 635 655 636 static void writethrough_endio(struct bio *bio, int err) 656 637 { 657 - struct per_bio_data *pb = get_per_bio_data(bio); 638 + struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 658 639 bio->bi_end_io = pb->saved_bi_end_io; 659 640 660 641 if (err) { ··· 662 643 return; 663 644 } 664 645 646 + dm_bio_restore(&pb->bio_details, bio); 665 647 remap_to_cache(pb->cache, bio, pb->cblock); 666 648 667 649 /* ··· 682 662 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, 683 663 dm_oblock_t oblock, dm_cblock_t cblock) 684 664 { 685 - struct per_bio_data *pb = get_per_bio_data(bio); 665 + struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); 686 666 687 667 pb->cache = cache; 688 668 pb->cblock = cblock; 689 669 pb->saved_bi_end_io = bio->bi_end_io; 670 + dm_bio_record(&pb->bio_details, bio); 690 671 bio->bi_end_io = writethrough_endio; 691 672 692 673 remap_to_origin_clear_discard(pb->cache, bio, oblock); ··· 1056 1035 1057 1036 static void process_flush_bio(struct cache *cache, struct bio *bio) 1058 1037 { 1059 - struct per_bio_data *pb = get_per_bio_data(bio); 1038 + size_t pb_data_size = get_per_bio_data_size(cache); 1039 + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1060 1040 1061 1041 BUG_ON(bio->bi_size); 1062 1042 if (!pb->req_nr) ··· 1129 1107 dm_oblock_t block = get_bio_block(cache, bio); 1130 1108 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; 1131 1109 struct policy_result lookup_result; 1132 - struct per_bio_data *pb = get_per_bio_data(bio); 1110 + size_t pb_data_size = get_per_bio_data_size(cache); 1111 + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1133 1112 bool discarded_block = is_discarded_oblock(cache, block); 1134 1113 bool can_migrate = discarded_block || spare_migration_bandwidth(cache); 1135 1114 ··· 1904 1881 1905 1882 cache->ti = ca->ti; 1906 1883 ti->private = cache; 1907 - ti->per_bio_data_size = sizeof(struct per_bio_data); 1908 1884 ti->num_flush_bios = 2; 1909 1885 ti->flush_supported = true; 1910 1886 ··· 1912 1890 ti->discard_zeroes_data_unsupported = true; 1913 1891 1914 1892 memcpy(&cache->features, &ca->features, sizeof(cache->features)); 1893 + ti->per_bio_data_size = get_per_bio_data_size(cache); 1915 1894 1916 1895 cache->callbacks.congested_fn = cache_is_congested; 1917 1896 dm_table_add_target_callbacks(ti->table, &cache->callbacks); ··· 2115 2092 2116 2093 int r; 2117 2094 dm_oblock_t block = get_bio_block(cache, bio); 2095 + size_t pb_data_size = get_per_bio_data_size(cache); 2118 2096 bool can_migrate = false; 2119 2097 bool discarded_block; 2120 2098 struct dm_bio_prison_cell *cell; ··· 2132 2108 return DM_MAPIO_REMAPPED; 2133 2109 } 2134 2110 2135 - pb = init_per_bio_data(bio); 2111 + pb = init_per_bio_data(bio, pb_data_size); 2136 2112 2137 2113 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { 2138 2114 defer_bio(cache, bio); ··· 2217 2193 { 2218 2194 struct cache *cache = ti->private; 2219 2195 unsigned long flags; 2220 - struct per_bio_data *pb = get_per_bio_data(bio); 2196 + size_t pb_data_size = get_per_bio_data_size(cache); 2197 + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 2221 2198 2222 2199 if (pb->tick) { 2223 2200 policy_tick(cache->policy);
+1 -1
drivers/media/platform/Kconfig
··· 204 204 205 205 config VIDEO_SH_VEU 206 206 tristate "SuperH VEU mem2mem video processing driver" 207 - depends on VIDEO_DEV && VIDEO_V4L2 207 + depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS 208 208 select VIDEOBUF2_DMA_CONTIG 209 209 select V4L2_MEM2MEM_DEV 210 210 help
+11
drivers/media/radio/radio-ma901.c
··· 347 347 static int usb_ma901radio_probe(struct usb_interface *intf, 348 348 const struct usb_device_id *id) 349 349 { 350 + struct usb_device *dev = interface_to_usbdev(intf); 350 351 struct ma901radio_device *radio; 351 352 int retval = 0; 353 + 354 + /* Masterkit MA901 usb radio has the same USB ID as many others 355 + * Atmel V-USB devices. Let's make additional checks to be sure 356 + * that this is our device. 357 + */ 358 + 359 + if (dev->product && dev->manufacturer && 360 + (strncmp(dev->product, "MA901", 5) != 0 361 + || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0)) 362 + return -ENODEV; 352 363 353 364 radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL); 354 365 if (!radio) {
+2 -3
drivers/net/bonding/bond_main.c
··· 1976 1976 return -EINVAL; 1977 1977 } 1978 1978 1979 + write_unlock_bh(&bond->lock); 1979 1980 /* unregister rx_handler early so bond_handle_frame wouldn't be called 1980 1981 * for this slave anymore. 1981 1982 */ 1982 1983 netdev_rx_handler_unregister(slave_dev); 1983 - write_unlock_bh(&bond->lock); 1984 - synchronize_net(); 1985 1984 write_lock_bh(&bond->lock); 1986 1985 1987 1986 if (!all && !bond->params.fail_over_mac) { ··· 4902 4903 4903 4904 bond_destroy_debugfs(); 4904 4905 4905 - rtnl_link_unregister(&bond_link_ops); 4906 4906 unregister_pernet_subsys(&bond_net_ops); 4907 + rtnl_link_unregister(&bond_link_ops); 4907 4908 4908 4909 #ifdef CONFIG_NET_POLL_CONTROLLER 4909 4910 /*
+48 -44
drivers/net/bonding/bond_sysfs.c
··· 527 527 goto out; 528 528 } 529 529 if (new_value < 0) { 530 - pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n", 530 + pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n", 531 531 bond->dev->name, new_value, INT_MAX); 532 532 ret = -EINVAL; 533 533 goto out; ··· 542 542 pr_info("%s: Setting ARP monitoring interval to %d.\n", 543 543 bond->dev->name, new_value); 544 544 bond->params.arp_interval = new_value; 545 - if (bond->params.miimon) { 546 - pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", 547 - bond->dev->name, bond->dev->name); 548 - bond->params.miimon = 0; 549 - } 550 - if (!bond->params.arp_targets[0]) { 551 - pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", 552 - bond->dev->name); 545 + if (new_value) { 546 + if (bond->params.miimon) { 547 + pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", 548 + bond->dev->name, bond->dev->name); 549 + bond->params.miimon = 0; 550 + } 551 + if (!bond->params.arp_targets[0]) 552 + pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", 553 + bond->dev->name); 553 554 } 554 555 if (bond->dev->flags & IFF_UP) { 555 556 /* If the interface is up, we may need to fire off ··· 558 557 * timer will get fired off when the open function 559 558 * is called. 560 559 */ 561 - cancel_delayed_work_sync(&bond->mii_work); 562 - queue_delayed_work(bond->wq, &bond->arp_work, 0); 560 + if (!new_value) { 561 + cancel_delayed_work_sync(&bond->arp_work); 562 + } else { 563 + cancel_delayed_work_sync(&bond->mii_work); 564 + queue_delayed_work(bond->wq, &bond->arp_work, 0); 565 + } 563 566 } 564 - 565 567 out: 566 568 rtnl_unlock(); 567 569 return ret; ··· 706 702 } 707 703 if (new_value < 0) { 708 704 pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", 709 - bond->dev->name, new_value, 1, INT_MAX); 705 + bond->dev->name, new_value, 0, INT_MAX); 710 706 ret = -EINVAL; 711 707 goto out; 712 708 } else { ··· 761 757 goto out; 762 758 } 763 759 if (new_value < 0) { 764 - pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", 765 - bond->dev->name, new_value, 1, INT_MAX); 760 + pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n", 761 + bond->dev->name, new_value, 0, INT_MAX); 766 762 ret = -EINVAL; 767 763 goto out; 768 764 } else { ··· 972 968 } 973 969 if (new_value < 0) { 974 970 pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", 975 - bond->dev->name, new_value, 1, INT_MAX); 971 + bond->dev->name, new_value, 0, INT_MAX); 976 972 ret = -EINVAL; 977 973 goto out; 978 - } else { 979 - pr_info("%s: Setting MII monitoring interval to %d.\n", 980 - bond->dev->name, new_value); 981 - bond->params.miimon = new_value; 982 - if (bond->params.updelay) 983 - pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", 984 - bond->dev->name, 985 - bond->params.updelay * bond->params.miimon); 986 - if (bond->params.downdelay) 987 - pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", 988 - bond->dev->name, 989 - bond->params.downdelay * bond->params.miimon); 990 - if (bond->params.arp_interval) { 991 - pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", 992 - bond->dev->name); 993 - bond->params.arp_interval = 0; 994 - if (bond->params.arp_validate) { 995 - bond->params.arp_validate = 996 - BOND_ARP_VALIDATE_NONE; 997 - } 998 - } 999 - 1000 - if (bond->dev->flags & IFF_UP) { 1001 - /* If the interface is up, we may need to fire off 1002 - * the MII timer. If the interface is down, the 1003 - * timer will get fired off when the open function 1004 - * is called. 1005 - */ 974 + } 975 + pr_info("%s: Setting MII monitoring interval to %d.\n", 976 + bond->dev->name, new_value); 977 + bond->params.miimon = new_value; 978 + if (bond->params.updelay) 979 + pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", 980 + bond->dev->name, 981 + bond->params.updelay * bond->params.miimon); 982 + if (bond->params.downdelay) 983 + pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", 984 + bond->dev->name, 985 + bond->params.downdelay * bond->params.miimon); 986 + if (new_value && bond->params.arp_interval) { 987 + pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", 988 + bond->dev->name); 989 + bond->params.arp_interval = 0; 990 + if (bond->params.arp_validate) 991 + bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; 992 + } 993 + if (bond->dev->flags & IFF_UP) { 994 + /* If the interface is up, we may need to fire off 995 + * the MII timer. If the interface is down, the 996 + * timer will get fired off when the open function 997 + * is called. 998 + */ 999 + if (!new_value) { 1000 + cancel_delayed_work_sync(&bond->mii_work); 1001 + } else { 1006 1002 cancel_delayed_work_sync(&bond->arp_work); 1007 1003 queue_delayed_work(bond->wq, &bond->mii_work, 0); 1008 1004 }
+1
drivers/net/can/sja1000/Kconfig
··· 46 46 config CAN_PEAK_PCMCIA 47 47 tristate "PEAK PCAN-PC Card" 48 48 depends on PCMCIA 49 + depends on HAS_IOPORT 49 50 ---help--- 50 51 This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) 51 52 from PEAK-System (http://www.peak-system.com). To compile this
+2 -2
drivers/net/can/sja1000/plx_pci.c
··· 348 348 */ 349 349 if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == 350 350 REG_CR_BASICCAN_INITIAL && 351 - (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) && 351 + (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) && 352 352 (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) 353 353 flag = 1; 354 354 ··· 360 360 * See states on p. 23 of the Datasheet. 361 361 */ 362 362 if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && 363 - priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL && 363 + priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL && 364 364 priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) 365 365 return flag; 366 366
+3 -3
drivers/net/can/sja1000/sja1000.c
··· 92 92 */ 93 93 spin_lock_irqsave(&priv->cmdreg_lock, flags); 94 94 priv->write_reg(priv, REG_CMR, val); 95 - priv->read_reg(priv, REG_SR); 95 + priv->read_reg(priv, SJA1000_REG_SR); 96 96 spin_unlock_irqrestore(&priv->cmdreg_lock, flags); 97 97 } 98 98 ··· 502 502 503 503 while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { 504 504 n++; 505 - status = priv->read_reg(priv, REG_SR); 505 + status = priv->read_reg(priv, SJA1000_REG_SR); 506 506 /* check for absent controller due to hw unplug */ 507 507 if (status == 0xFF && sja1000_is_absent(priv)) 508 508 return IRQ_NONE; ··· 530 530 /* receive interrupt */ 531 531 while (status & SR_RBS) { 532 532 sja1000_rx(dev); 533 - status = priv->read_reg(priv, REG_SR); 533 + status = priv->read_reg(priv, SJA1000_REG_SR); 534 534 /* check for absent controller */ 535 535 if (status == 0xFF && sja1000_is_absent(priv)) 536 536 return IRQ_NONE;
+1 -1
drivers/net/can/sja1000/sja1000.h
··· 56 56 /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ 57 57 #define REG_MOD 0x00 58 58 #define REG_CMR 0x01 59 - #define REG_SR 0x02 59 + #define SJA1000_REG_SR 0x02 60 60 #define REG_IR 0x03 61 61 #define REG_IER 0x04 62 62 #define REG_ALC 0x0B
+1 -2
drivers/net/ethernet/atheros/atl1e/atl1e.h
··· 186 186 /* how about 0x2000 */ 187 187 #define MAX_TX_BUF_LEN 0x2000 188 188 #define MAX_TX_BUF_SHIFT 13 189 - /*#define MAX_TX_BUF_LEN 0x3000 */ 189 + #define MAX_TSO_SEG_SIZE 0x3c00 190 190 191 191 /* rrs word 1 bit 0:31 */ 192 192 #define RRS_RX_CSUM_MASK 0xFFFF ··· 438 438 struct atl1e_hw hw; 439 439 struct atl1e_hw_stats hw_stats; 440 440 441 - bool have_msi; 442 441 u32 wol; 443 442 u16 link_speed; 444 443 u16 link_duplex;
+3 -17
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
··· 1849 1849 struct net_device *netdev = adapter->netdev; 1850 1850 1851 1851 free_irq(adapter->pdev->irq, netdev); 1852 - 1853 - if (adapter->have_msi) 1854 - pci_disable_msi(adapter->pdev); 1855 1852 } 1856 1853 1857 1854 static int atl1e_request_irq(struct atl1e_adapter *adapter) 1858 1855 { 1859 1856 struct pci_dev *pdev = adapter->pdev; 1860 1857 struct net_device *netdev = adapter->netdev; 1861 - int flags = 0; 1862 1858 int err = 0; 1863 1859 1864 - adapter->have_msi = true; 1865 - err = pci_enable_msi(pdev); 1866 - if (err) { 1867 - netdev_dbg(netdev, 1868 - "Unable to allocate MSI interrupt Error: %d\n", err); 1869 - adapter->have_msi = false; 1870 - } 1871 - 1872 - if (!adapter->have_msi) 1873 - flags |= IRQF_SHARED; 1874 - err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev); 1860 + err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, 1861 + netdev); 1875 1862 if (err) { 1876 1863 netdev_dbg(adapter->netdev, 1877 1864 "Unable to allocate interrupt Error: %d\n", err); 1878 - if (adapter->have_msi) 1879 - pci_disable_msi(pdev); 1880 1865 return err; 1881 1866 } 1882 1867 netdev_dbg(netdev, "atl1e_request_irq OK\n"); ··· 2329 2344 2330 2345 INIT_WORK(&adapter->reset_task, atl1e_reset_task); 2331 2346 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); 2347 + netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); 2332 2348 err = register_netdev(netdev); 2333 2349 if (err) { 2334 2350 netdev_err(netdev, "register netdevice failed\n");
+5 -2
drivers/net/ethernet/broadcom/tg3.c
··· 14604 14604 if (j + len > block_end) 14605 14605 goto partno; 14606 14606 14607 - memcpy(tp->fw_ver, &vpd_data[j], len); 14608 - strncat(tp->fw_ver, " bc ", vpdlen - len - 1); 14607 + if (len >= sizeof(tp->fw_ver)) 14608 + len = sizeof(tp->fw_ver) - 1; 14609 + memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); 14610 + snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, 14611 + &vpd_data[j]); 14609 14612 } 14610 14613 14611 14614 partno:
+8 -1
drivers/net/ethernet/calxeda/xgmac.c
··· 163 163 #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ 164 164 165 165 /* XGMAC_INT_STAT reg */ 166 + #define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ 166 167 #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ 167 168 #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ 168 169 ··· 961 960 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); 962 961 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); 963 962 963 + /* Mask power mgt interrupt */ 964 + writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); 965 + 964 966 /* XGMAC requires AXI bus init. This is a 'magic number' for now */ 965 967 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); 966 968 ··· 1144 1140 int ip_checksum; 1145 1141 struct sk_buff *skb; 1146 1142 int frame_len; 1143 + 1144 + if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) 1145 + break; 1147 1146 1148 1147 entry = priv->rx_tail; 1149 1148 p = priv->dma_rx + entry; ··· 1832 1825 unsigned int pmt = 0; 1833 1826 1834 1827 if (mode & WAKE_MAGIC) 1835 - pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; 1828 + pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; 1836 1829 if (mode & WAKE_UCAST) 1837 1830 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; 1838 1831
+110 -104
drivers/net/ethernet/davicom/dm9000.c
··· 257 257 tmp = readl(reg); 258 258 } 259 259 260 + /* 261 + * Sleep, either by using msleep() or if we are suspending, then 262 + * use mdelay() to sleep. 263 + */ 264 + static void dm9000_msleep(board_info_t *db, unsigned int ms) 265 + { 266 + if (db->in_suspend) 267 + mdelay(ms); 268 + else 269 + msleep(ms); 270 + } 271 + 272 + /* Read a word from phyxcer */ 273 + static int 274 + dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) 275 + { 276 + board_info_t *db = netdev_priv(dev); 277 + unsigned long flags; 278 + unsigned int reg_save; 279 + int ret; 280 + 281 + mutex_lock(&db->addr_lock); 282 + 283 + spin_lock_irqsave(&db->lock, flags); 284 + 285 + /* Save previous register address */ 286 + reg_save = readb(db->io_addr); 287 + 288 + /* Fill the phyxcer register into REG_0C */ 289 + iow(db, DM9000_EPAR, DM9000_PHY | reg); 290 + 291 + /* Issue phyxcer read command */ 292 + iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); 293 + 294 + writeb(reg_save, db->io_addr); 295 + spin_unlock_irqrestore(&db->lock, flags); 296 + 297 + dm9000_msleep(db, 1); /* Wait read complete */ 298 + 299 + spin_lock_irqsave(&db->lock, flags); 300 + reg_save = readb(db->io_addr); 301 + 302 + iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ 303 + 304 + /* The read data keeps on REG_0D & REG_0E */ 305 + ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); 306 + 307 + /* restore the previous address */ 308 + writeb(reg_save, db->io_addr); 309 + spin_unlock_irqrestore(&db->lock, flags); 310 + 311 + mutex_unlock(&db->addr_lock); 312 + 313 + dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); 314 + return ret; 315 + } 316 + 317 + /* Write a word to phyxcer */ 318 + static void 319 + dm9000_phy_write(struct net_device *dev, 320 + int phyaddr_unused, int reg, int value) 321 + { 322 + board_info_t *db = netdev_priv(dev); 323 + unsigned long flags; 324 + unsigned long reg_save; 325 + 326 + dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 327 + mutex_lock(&db->addr_lock); 328 + 329 + spin_lock_irqsave(&db->lock, flags); 330 + 331 + /* Save previous register address */ 332 + reg_save = readb(db->io_addr); 333 + 334 + /* Fill the phyxcer register into REG_0C */ 335 + iow(db, DM9000_EPAR, DM9000_PHY | reg); 336 + 337 + /* Fill the written data into REG_0D & REG_0E */ 338 + iow(db, DM9000_EPDRL, value); 339 + iow(db, DM9000_EPDRH, value >> 8); 340 + 341 + /* Issue phyxcer write command */ 342 + iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); 343 + 344 + writeb(reg_save, db->io_addr); 345 + spin_unlock_irqrestore(&db->lock, flags); 346 + 347 + dm9000_msleep(db, 1); /* Wait write complete */ 348 + 349 + spin_lock_irqsave(&db->lock, flags); 350 + reg_save = readb(db->io_addr); 351 + 352 + iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ 353 + 354 + /* restore the previous address */ 355 + writeb(reg_save, db->io_addr); 356 + 357 + spin_unlock_irqrestore(&db->lock, flags); 358 + mutex_unlock(&db->addr_lock); 359 + } 360 + 260 361 /* dm9000_set_io 261 362 * 262 363 * select the specified set of io routines to use with the ··· 896 795 897 796 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 898 797 798 + dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 799 + dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ 800 + 899 801 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 900 802 901 803 /* if wol is needed, then always set NCR_WAKEEN otherwise we end ··· 1305 1201 return 0; 1306 1202 } 1307 1203 1308 - /* 1309 - * Sleep, either by using msleep() or if we are suspending, then 1310 - * use mdelay() to sleep. 1311 - */ 1312 - static void dm9000_msleep(board_info_t *db, unsigned int ms) 1313 - { 1314 - if (db->in_suspend) 1315 - mdelay(ms); 1316 - else 1317 - msleep(ms); 1318 - } 1319 - 1320 - /* 1321 - * Read a word from phyxcer 1322 - */ 1323 - static int 1324 - dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) 1325 - { 1326 - board_info_t *db = netdev_priv(dev); 1327 - unsigned long flags; 1328 - unsigned int reg_save; 1329 - int ret; 1330 - 1331 - mutex_lock(&db->addr_lock); 1332 - 1333 - spin_lock_irqsave(&db->lock,flags); 1334 - 1335 - /* Save previous register address */ 1336 - reg_save = readb(db->io_addr); 1337 - 1338 - /* Fill the phyxcer register into REG_0C */ 1339 - iow(db, DM9000_EPAR, DM9000_PHY | reg); 1340 - 1341 - iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ 1342 - 1343 - writeb(reg_save, db->io_addr); 1344 - spin_unlock_irqrestore(&db->lock,flags); 1345 - 1346 - dm9000_msleep(db, 1); /* Wait read complete */ 1347 - 1348 - spin_lock_irqsave(&db->lock,flags); 1349 - reg_save = readb(db->io_addr); 1350 - 1351 - iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ 1352 - 1353 - /* The read data keeps on REG_0D & REG_0E */ 1354 - ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); 1355 - 1356 - /* restore the previous address */ 1357 - writeb(reg_save, db->io_addr); 1358 - spin_unlock_irqrestore(&db->lock,flags); 1359 - 1360 - mutex_unlock(&db->addr_lock); 1361 - 1362 - dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); 1363 - return ret; 1364 - } 1365 - 1366 - /* 1367 - * Write a word to phyxcer 1368 - */ 1369 - static void 1370 - dm9000_phy_write(struct net_device *dev, 1371 - int phyaddr_unused, int reg, int value) 1372 - { 1373 - board_info_t *db = netdev_priv(dev); 1374 - unsigned long flags; 1375 - unsigned long reg_save; 1376 - 1377 - dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 1378 - mutex_lock(&db->addr_lock); 1379 - 1380 - spin_lock_irqsave(&db->lock,flags); 1381 - 1382 - /* Save previous register address */ 1383 - reg_save = readb(db->io_addr); 1384 - 1385 - /* Fill the phyxcer register into REG_0C */ 1386 - iow(db, DM9000_EPAR, DM9000_PHY | reg); 1387 - 1388 - /* Fill the written data into REG_0D & REG_0E */ 1389 - iow(db, DM9000_EPDRL, value); 1390 - iow(db, DM9000_EPDRH, value >> 8); 1391 - 1392 - iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ 1393 - 1394 - writeb(reg_save, db->io_addr); 1395 - spin_unlock_irqrestore(&db->lock, flags); 1396 - 1397 - dm9000_msleep(db, 1); /* Wait write complete */ 1398 - 1399 - spin_lock_irqsave(&db->lock,flags); 1400 - reg_save = readb(db->io_addr); 1401 - 1402 - iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ 1403 - 1404 - /* restore the previous address */ 1405 - writeb(reg_save, db->io_addr); 1406 - 1407 - spin_unlock_irqrestore(&db->lock, flags); 1408 - mutex_unlock(&db->addr_lock); 1409 - } 1410 - 1411 1204 static void 1412 1205 dm9000_shutdown(struct net_device *dev) 1413 1206 { ··· 1503 1502 db->flags |= DM9000_PLATF_SIMPLE_PHY; 1504 1503 #endif 1505 1504 1506 - dm9000_reset(db); 1505 + /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), 1506 + * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo 1507 + * while probe stage. 1508 + */ 1509 + 1510 + iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST); 1507 1511 1508 1512 /* try multiple times, DM9000 sometimes gets the read wrong */ 1509 1513 for (i = 0; i < 8; i++) {
+10 -1
drivers/net/ethernet/davicom/dm9000.h
··· 69 69 #define NCR_WAKEEN (1<<6) 70 70 #define NCR_FCOL (1<<4) 71 71 #define NCR_FDX (1<<3) 72 - #define NCR_LBK (3<<1) 72 + 73 + #define NCR_RESERVED (3<<1) 74 + #define NCR_MAC_LBK (1<<1) 73 75 #define NCR_RST (1<<0) 74 76 75 77 #define NSR_SPEED (1<<7) ··· 168 166 169 167 #define ISR_LNKCHNG (1<<5) 170 168 #define ISR_UNDERRUN (1<<4) 169 + 170 + /* Davicom MII registers. 171 + */ 172 + 173 + #define MII_DM_DSPCR 0x1b /* DSP Control Register */ 174 + 175 + #define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */ 171 176 172 177 #endif /* _DM9000X_H_ */ 173 178
+50 -32
drivers/net/ethernet/freescale/fec.c
··· 345 345 return NETDEV_TX_OK; 346 346 } 347 347 348 + /* Init RX & TX buffer descriptors 349 + */ 350 + static void fec_enet_bd_init(struct net_device *dev) 351 + { 352 + struct fec_enet_private *fep = netdev_priv(dev); 353 + struct bufdesc *bdp; 354 + unsigned int i; 355 + 356 + /* Initialize the receive buffer descriptors. */ 357 + bdp = fep->rx_bd_base; 358 + for (i = 0; i < RX_RING_SIZE; i++) { 359 + 360 + /* Initialize the BD for every fragment in the page. */ 361 + if (bdp->cbd_bufaddr) 362 + bdp->cbd_sc = BD_ENET_RX_EMPTY; 363 + else 364 + bdp->cbd_sc = 0; 365 + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 366 + } 367 + 368 + /* Set the last buffer to wrap */ 369 + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 370 + bdp->cbd_sc |= BD_SC_WRAP; 371 + 372 + fep->cur_rx = fep->rx_bd_base; 373 + 374 + /* ...and the same for transmit */ 375 + bdp = fep->tx_bd_base; 376 + fep->cur_tx = bdp; 377 + for (i = 0; i < TX_RING_SIZE; i++) { 378 + 379 + /* Initialize the BD for every fragment in the page. */ 380 + bdp->cbd_sc = 0; 381 + if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { 382 + dev_kfree_skb_any(fep->tx_skbuff[i]); 383 + fep->tx_skbuff[i] = NULL; 384 + } 385 + bdp->cbd_bufaddr = 0; 386 + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 387 + } 388 + 389 + /* Set the last buffer to wrap */ 390 + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 391 + bdp->cbd_sc |= BD_SC_WRAP; 392 + fep->dirty_tx = bdp; 393 + } 394 + 348 395 /* This function is called to start or restart the FEC during a link 349 396 * change. This only happens when switching between half and full 350 397 * duplex. ··· 435 388 /* Set maximum receive buffer size. */ 436 389 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); 437 390 391 + fec_enet_bd_init(ndev); 392 + 438 393 /* Set receive and transmit descriptor base. */ 439 394 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 440 395 if (fep->bufdesc_ex) ··· 446 397 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) 447 398 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 448 399 449 - fep->cur_rx = fep->rx_bd_base; 450 400 451 401 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 452 402 if (fep->tx_skbuff[i]) { ··· 1645 1597 { 1646 1598 struct fec_enet_private *fep = netdev_priv(ndev); 1647 1599 struct bufdesc *cbd_base; 1648 - struct bufdesc *bdp; 1649 - unsigned int i; 1650 1600 1651 1601 /* Allocate memory for buffer descriptors. */ 1652 1602 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, ··· 1654 1608 return -ENOMEM; 1655 1609 } 1656 1610 1611 + memset(cbd_base, 0, PAGE_SIZE); 1657 1612 spin_lock_init(&fep->hw_lock); 1658 1613 1659 1614 fep->netdev = ndev; ··· 1677 1630 1678 1631 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 1679 1632 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 1680 - 1681 - /* Initialize the receive buffer descriptors. */ 1682 - bdp = fep->rx_bd_base; 1683 - for (i = 0; i < RX_RING_SIZE; i++) { 1684 - 1685 - /* Initialize the BD for every fragment in the page. */ 1686 - bdp->cbd_sc = 0; 1687 - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1688 - } 1689 - 1690 - /* Set the last buffer to wrap */ 1691 - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1692 - bdp->cbd_sc |= BD_SC_WRAP; 1693 - 1694 - /* ...and the same for transmit */ 1695 - bdp = fep->tx_bd_base; 1696 - fep->cur_tx = bdp; 1697 - for (i = 0; i < TX_RING_SIZE; i++) { 1698 - 1699 - /* Initialize the BD for every fragment in the page. */ 1700 - bdp->cbd_sc = 0; 1701 - bdp->cbd_bufaddr = 0; 1702 - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1703 - } 1704 - 1705 - /* Set the last buffer to wrap */ 1706 - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1707 - bdp->cbd_sc |= BD_SC_WRAP; 1708 - fep->dirty_tx = bdp; 1709 1633 1710 1634 fec_restart(ndev, 0); 1711 1635
+11 -3
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
··· 1053 1053 txdr->buffer_info[i].dma = 1054 1054 dma_map_single(&pdev->dev, skb->data, skb->len, 1055 1055 DMA_TO_DEVICE); 1056 + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { 1057 + ret_val = 4; 1058 + goto err_nomem; 1059 + } 1056 1060 tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); 1057 1061 tx_desc->lower.data = cpu_to_le32(skb->len); 1058 1062 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | ··· 1073 1069 rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), 1074 1070 GFP_KERNEL); 1075 1071 if (!rxdr->buffer_info) { 1076 - ret_val = 4; 1072 + ret_val = 5; 1077 1073 goto err_nomem; 1078 1074 } 1079 1075 ··· 1081 1077 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1082 1078 GFP_KERNEL); 1083 1079 if (!rxdr->desc) { 1084 - ret_val = 5; 1080 + ret_val = 6; 1085 1081 goto err_nomem; 1086 1082 } 1087 1083 memset(rxdr->desc, 0, rxdr->size); ··· 1105 1101 1106 1102 skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); 1107 1103 if (!skb) { 1108 - ret_val = 6; 1104 + ret_val = 7; 1109 1105 goto err_nomem; 1110 1106 } 1111 1107 skb_reserve(skb, NET_IP_ALIGN); ··· 1114 1110 rxdr->buffer_info[i].dma = 1115 1111 dma_map_single(&pdev->dev, skb->data, 1116 1112 E1000_RXBUFFER_2048, DMA_FROM_DEVICE); 1113 + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { 1114 + ret_val = 8; 1115 + goto err_nomem; 1116 + } 1117 1117 rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); 1118 1118 memset(skb->data, 0x00, skb->len); 1119 1119 }
+6 -1
drivers/net/ethernet/intel/e1000e/netdev.c
··· 848 848 } 849 849 } 850 850 851 - if (!buffer_info->dma) 851 + if (!buffer_info->dma) { 852 852 buffer_info->dma = dma_map_page(&pdev->dev, 853 853 buffer_info->page, 0, 854 854 PAGE_SIZE, 855 855 DMA_FROM_DEVICE); 856 + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 857 + adapter->alloc_rx_buff_failed++; 858 + break; 859 + } 860 + } 856 861 857 862 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 858 863 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+6 -1
drivers/net/ethernet/intel/ixgb/ixgb_main.c
··· 2159 2159 skb->data, 2160 2160 adapter->rx_buffer_len, 2161 2161 DMA_FROM_DEVICE); 2162 + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 2163 + adapter->alloc_rx_buff_failed++; 2164 + break; 2165 + } 2162 2166 2163 2167 rx_desc = IXGB_RX_DESC(*rx_ring, i); 2164 2168 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); ··· 2172 2168 rx_desc->status = 0; 2173 2169 2174 2170 2175 - if (++i == rx_ring->count) i = 0; 2171 + if (++i == rx_ring->count) 2172 + i = 0; 2176 2173 buffer_info = &rx_ring->buffer_info[i]; 2177 2174 } 2178 2175
+9 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 7922 7922 ixgbe_dbg_init(); 7923 7923 #endif /* CONFIG_DEBUG_FS */ 7924 7924 7925 + ret = pci_register_driver(&ixgbe_driver); 7926 + if (ret) { 7927 + #ifdef CONFIG_DEBUG_FS 7928 + ixgbe_dbg_exit(); 7929 + #endif /* CONFIG_DEBUG_FS */ 7930 + return ret; 7931 + } 7932 + 7925 7933 #ifdef CONFIG_IXGBE_DCA 7926 7934 dca_register_notify(&dca_notifier); 7927 7935 #endif 7928 7936 7929 - ret = pci_register_driver(&ixgbe_driver); 7930 - return ret; 7937 + return 0; 7931 7938 } 7932 7939 7933 7940 module_init(ixgbe_init_module);
+1 -1
drivers/net/ethernet/marvell/sky2.c
··· 1067 1067 sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); 1068 1068 sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); 1069 1069 1070 - tp = space - 2048/8; 1070 + tp = space - 8192/8; 1071 1071 sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); 1072 1072 sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); 1073 1073 } else {
+1 -1
drivers/net/ethernet/marvell/sky2.h
··· 2074 2074 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ 2075 2075 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ 2076 2076 2077 - #define GMAC_DEF_MSK GM_IS_TX_FF_UR 2077 + #define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) 2078 2078 }; 2079 2079 2080 2080 /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
+2 -2
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 411 411 412 412 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) 413 413 { 414 - unsigned int i; 415 - for (i = ETH_ALEN - 1; i; --i) { 414 + int i; 415 + for (i = ETH_ALEN - 1; i >= 0; --i) { 416 416 dst_mac[i] = src_mac & 0xff; 417 417 src_mac >>= 8; 418 418 }
+1 -1
drivers/net/ethernet/micrel/ks8851.c
··· 528 528 for (; rxfc != 0; rxfc--) { 529 529 rxh = ks8851_rdreg32(ks, KS_RXFHSR); 530 530 rxstat = rxh & 0xffff; 531 - rxlen = rxh >> 16; 531 + rxlen = (rxh >> 16) & 0xfff; 532 532 533 533 netif_dbg(ks, rx_status, ks->netdev, 534 534 "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
+25 -3
drivers/net/ethernet/realtek/r8169.c
··· 3818 3818 } 3819 3819 } 3820 3820 3821 + static void rtl_speed_down(struct rtl8169_private *tp) 3822 + { 3823 + u32 adv; 3824 + int lpa; 3825 + 3826 + rtl_writephy(tp, 0x1f, 0x0000); 3827 + lpa = rtl_readphy(tp, MII_LPA); 3828 + 3829 + if (lpa & (LPA_10HALF | LPA_10FULL)) 3830 + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; 3831 + else if (lpa & (LPA_100HALF | LPA_100FULL)) 3832 + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 3833 + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; 3834 + else 3835 + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 3836 + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | 3837 + (tp->mii.supports_gmii ? 3838 + ADVERTISED_1000baseT_Half | 3839 + ADVERTISED_1000baseT_Full : 0); 3840 + 3841 + rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, 3842 + adv); 3843 + } 3844 + 3821 3845 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) 3822 3846 { 3823 3847 void __iomem *ioaddr = tp->mmio_addr; ··· 3872 3848 if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) 3873 3849 return false; 3874 3850 3875 - rtl_writephy(tp, 0x1f, 0x0000); 3876 - rtl_writephy(tp, MII_BMCR, 0x0000); 3877 - 3851 + rtl_speed_down(tp); 3878 3852 rtl_wol_suspend_quirk(tp); 3879 3853 3880 3854 return true;
+18 -10
drivers/net/ethernet/renesas/sh_eth.c
··· 1216 1216 if (felic_stat & ECSR_LCHNG) { 1217 1217 /* Link Changed */ 1218 1218 if (mdp->cd->no_psr || mdp->no_ether_link) { 1219 - if (mdp->link == PHY_DOWN) 1220 - link_stat = 0; 1221 - else 1222 - link_stat = PHY_ST_LINK; 1219 + goto ignore_link; 1223 1220 } else { 1224 1221 link_stat = (sh_eth_read(ndev, PSR)); 1225 1222 if (mdp->ether_link_active_low) ··· 1239 1242 } 1240 1243 } 1241 1244 1245 + ignore_link: 1242 1246 if (intr_status & EESR_TWB) { 1243 1247 /* Write buck end. unused write back interrupt */ 1244 1248 if (intr_status & EESR_TABT) /* Transmit Abort int */ ··· 1324 1326 struct sh_eth_private *mdp = netdev_priv(ndev); 1325 1327 struct sh_eth_cpu_data *cd = mdp->cd; 1326 1328 irqreturn_t ret = IRQ_NONE; 1327 - u32 intr_status = 0; 1329 + unsigned long intr_status; 1328 1330 1329 1331 spin_lock(&mdp->lock); 1330 1332 1331 - /* Get interrpt stat */ 1333 + /* Get interrupt status */ 1332 1334 intr_status = sh_eth_read(ndev, EESR); 1335 + /* Mask it with the interrupt mask, forcing ECI interrupt to be always 1336 + * enabled since it's the one that comes thru regardless of the mask, 1337 + * and we need to fully handle it in sh_eth_error() in order to quench 1338 + * it as it doesn't get cleared by just writing 1 to the ECI bit... 1339 + */ 1340 + intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; 1333 1341 /* Clear interrupt */ 1334 1342 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1335 1343 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | ··· 1377 1373 struct phy_device *phydev = mdp->phydev; 1378 1374 int new_state = 0; 1379 1375 1380 - if (phydev->link != PHY_DOWN) { 1376 + if (phydev->link) { 1381 1377 if (phydev->duplex != mdp->duplex) { 1382 1378 new_state = 1; 1383 1379 mdp->duplex = phydev->duplex; ··· 1391 1387 if (mdp->cd->set_rate) 1392 1388 mdp->cd->set_rate(ndev); 1393 1389 } 1394 - if (mdp->link == PHY_DOWN) { 1390 + if (!mdp->link) { 1395 1391 sh_eth_write(ndev, 1396 1392 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); 1397 1393 new_state = 1; 1398 1394 mdp->link = phydev->link; 1395 + if (mdp->cd->no_psr || mdp->no_ether_link) 1396 + sh_eth_rcv_snd_enable(ndev); 1399 1397 } 1400 1398 } else if (mdp->link) { 1401 1399 new_state = 1; 1402 - mdp->link = PHY_DOWN; 1400 + mdp->link = 0; 1403 1401 mdp->speed = 0; 1404 1402 mdp->duplex = -1; 1403 + if (mdp->cd->no_psr || mdp->no_ether_link) 1404 + sh_eth_rcv_snd_disable(ndev); 1405 1405 } 1406 1406 1407 1407 if (new_state && netif_msg_link(mdp)) ··· 1422 1414 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 1423 1415 mdp->mii_bus->id , mdp->phy_id); 1424 1416 1425 - mdp->link = PHY_DOWN; 1417 + mdp->link = 0; 1426 1418 mdp->speed = 0; 1427 1419 mdp->duplex = -1; 1428 1420
+1 -1
drivers/net/ethernet/renesas/sh_eth.h
··· 723 723 u32 phy_id; /* PHY ID */ 724 724 struct mii_bus *mii_bus; /* MDIO bus control */ 725 725 struct phy_device *phydev; /* PHY device control */ 726 - enum phy_state link; 726 + int link; 727 727 phy_interface_t phy_interface; 728 728 int msg_enable; 729 729 int speed;
+1 -1
drivers/net/ethernet/ti/cpsw.c
··· 436 436 * queue is stopped then start the queue as we have free desc for tx 437 437 */ 438 438 if (unlikely(netif_queue_stopped(ndev))) 439 - netif_start_queue(ndev); 439 + netif_wake_queue(ndev); 440 440 cpts_tx_timestamp(priv->cpts, skb); 441 441 priv->stats.tx_packets++; 442 442 priv->stats.tx_bytes += len;
+1 -1
drivers/net/ethernet/ti/davinci_emac.c
··· 1053 1053 * queue is stopped then start the queue as we have free desc for tx 1054 1054 */ 1055 1055 if (unlikely(netif_queue_stopped(ndev))) 1056 - netif_start_queue(ndev); 1056 + netif_wake_queue(ndev); 1057 1057 ndev->stats.tx_packets++; 1058 1058 ndev->stats.tx_bytes += len; 1059 1059 dev_kfree_skb_any(skb);
+8 -4
drivers/net/usb/smsc75xx.c
··· 914 914 static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) 915 915 { 916 916 struct usbnet *dev = netdev_priv(netdev); 917 + int ret; 917 918 918 - int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); 919 + if (new_mtu > MAX_SINGLE_PACKET_SIZE) 920 + return -EINVAL; 921 + 922 + ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); 919 923 if (ret < 0) { 920 924 netdev_warn(dev->net, "Failed to set mac rx frame length\n"); 921 925 return ret; ··· 1328 1324 1329 1325 netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); 1330 1326 1331 - ret = smsc75xx_set_rx_max_frame_length(dev, 1514); 1327 + ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); 1332 1328 if (ret < 0) { 1333 1329 netdev_warn(dev->net, "Failed to set max rx frame length\n"); 1334 1330 return ret; ··· 2138 2134 else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) 2139 2135 dev->net->stats.rx_frame_errors++; 2140 2136 } else { 2141 - /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ 2142 - if (unlikely(size > (ETH_FRAME_LEN + 12))) { 2137 + /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ 2138 + if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { 2143 2139 netif_dbg(dev, rx_err, dev->net, 2144 2140 "size err rx_cmd_a=0x%08x\n", 2145 2141 rx_cmd_a);
+2 -1
drivers/net/wireless/ath/ath9k/link.c
··· 170 170 { 171 171 struct ath_softc *sc = (struct ath_softc *)data; 172 172 173 - ieee80211_queue_work(sc->hw, &sc->hw_check_work); 173 + if (!test_bit(SC_OP_INVALID, &sc->sc_flags)) 174 + ieee80211_queue_work(sc->hw, &sc->hw_check_work); 174 175 } 175 176 176 177 /*
+53 -12
drivers/net/wireless/b43/dma.c
··· 1487 1487 const struct b43_dma_ops *ops; 1488 1488 struct b43_dmaring *ring; 1489 1489 struct b43_dmadesc_meta *meta; 1490 + static const struct b43_txstatus fake; /* filled with 0 */ 1491 + const struct b43_txstatus *txstat; 1490 1492 int slot, firstused; 1491 1493 bool frame_succeed; 1494 + int skip; 1495 + static u8 err_out1, err_out2; 1492 1496 1493 1497 ring = parse_cookie(dev, status->cookie, &slot); 1494 1498 if (unlikely(!ring)) ··· 1505 1501 firstused = ring->current_slot - ring->used_slots + 1; 1506 1502 if (firstused < 0) 1507 1503 firstused = ring->nr_slots + firstused; 1504 + 1505 + skip = 0; 1508 1506 if (unlikely(slot != firstused)) { 1509 1507 /* This possibly is a firmware bug and will result in 1510 - * malfunction, memory leaks and/or stall of DMA functionality. */ 1511 - b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " 1512 - "Expected %d, but got %d\n", 1513 - ring->index, firstused, slot); 1514 - return; 1508 + * malfunction, memory leaks and/or stall of DMA functionality. 1509 + */ 1510 + if (slot == next_slot(ring, next_slot(ring, firstused))) { 1511 + /* If a single header/data pair was missed, skip over 1512 + * the first two slots in an attempt to recover. 1513 + */ 1514 + slot = firstused; 1515 + skip = 2; 1516 + if (!err_out1) { 1517 + /* Report the error once. */ 1518 + b43dbg(dev->wl, 1519 + "Skip on DMA ring %d slot %d.\n", 1520 + ring->index, slot); 1521 + err_out1 = 1; 1522 + } 1523 + } else { 1524 + /* More than a single header/data pair were missed. 1525 + * Report this error once. 1526 + */ 1527 + if (!err_out2) 1528 + b43dbg(dev->wl, 1529 + "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", 1530 + ring->index, firstused, slot); 1531 + err_out2 = 1; 1532 + return; 1533 + } 1515 1534 } 1516 1535 1517 1536 ops = ring->ops; ··· 1549 1522 slot, firstused, ring->index); 1550 1523 break; 1551 1524 } 1525 + 1552 1526 if (meta->skb) { 1553 1527 struct b43_private_tx_info *priv_info = 1554 - b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); 1528 + b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); 1555 1529 1556 - unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); 1530 + unmap_descbuffer(ring, meta->dmaaddr, 1531 + meta->skb->len, 1); 1557 1532 kfree(priv_info->bouncebuffer); 1558 1533 priv_info->bouncebuffer = NULL; 1559 1534 } else { ··· 1567 1538 struct ieee80211_tx_info *info; 1568 1539 1569 1540 if (unlikely(!meta->skb)) { 1570 - /* This is a scatter-gather fragment of a frame, so 1571 - * the skb pointer must not be NULL. */ 1541 + /* This is a scatter-gather fragment of a frame, 1542 + * so the skb pointer must not be NULL. 1543 + */ 1572 1544 b43dbg(dev->wl, "TX status unexpected NULL skb " 1573 1545 "at slot %d (first=%d) on ring %d\n", 1574 1546 slot, firstused, ring->index); ··· 1580 1550 1581 1551 /* 1582 1552 * Call back to inform the ieee80211 subsystem about 1583 - * the status of the transmission. 1553 + * the status of the transmission. When skipping over 1554 + * a missed TX status report, use a status structure 1555 + * filled with zeros to indicate that the frame was not 1556 + * sent (frame_count 0) and not acknowledged 1584 1557 */ 1585 - frame_succeed = b43_fill_txstatus_report(dev, info, status); 1558 + if (unlikely(skip)) 1559 + txstat = &fake; 1560 + else 1561 + txstat = status; 1562 + 1563 + frame_succeed = b43_fill_txstatus_report(dev, info, 1564 + txstat); 1586 1565 #ifdef CONFIG_B43_DEBUG 1587 1566 if (frame_succeed) 1588 1567 ring->nr_succeed_tx_packets++; ··· 1619 1580 /* Everything unmapped and free'd. So it's not used anymore. */ 1620 1581 ring->used_slots--; 1621 1582 1622 - if (meta->is_last_fragment) { 1583 + if (meta->is_last_fragment && !skip) { 1623 1584 /* This is the last scatter-gather 1624 1585 * fragment of the frame. We are done. */ 1625 1586 break; 1626 1587 } 1627 1588 slot = next_slot(ring, slot); 1589 + if (skip > 0) 1590 + --skip; 1628 1591 } 1629 1592 if (ring->stopped) { 1630 1593 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
+4 -4
drivers/net/wireless/b43/phy_n.c
··· 1564 1564 u16 clip_off[2] = { 0xFFFF, 0xFFFF }; 1565 1565 1566 1566 u8 vcm_final = 0; 1567 - s8 offset[4]; 1567 + s32 offset[4]; 1568 1568 s32 results[8][4] = { }; 1569 1569 s32 results_min[4] = { }; 1570 1570 s32 poll_results[4] = { }; ··· 1615 1615 } 1616 1616 for (i = 0; i < 4; i += 2) { 1617 1617 s32 curr; 1618 - s32 mind = 40; 1618 + s32 mind = 0x100000; 1619 1619 s32 minpoll = 249; 1620 1620 u8 minvcm = 0; 1621 1621 if (2 * core != i) ··· 1732 1732 u8 regs_save_radio[2]; 1733 1733 u16 regs_save_phy[2]; 1734 1734 1735 - s8 offset[4]; 1735 + s32 offset[4]; 1736 1736 u8 core; 1737 1737 u8 rail; 1738 1738 ··· 1799 1799 } 1800 1800 1801 1801 for (i = 0; i < 4; i++) { 1802 - s32 mind = 40; 1802 + s32 mind = 0x100000; 1803 1803 u8 minvcm = 0; 1804 1804 s32 minpoll = 249; 1805 1805 s32 curr;
+139 -248
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
··· 1137 1137 gain0_15 = ((biq1 & 0xf) << 12) | 1138 1138 ((tia & 0xf) << 8) | 1139 1139 ((lna2 & 0x3) << 6) | 1140 - ((lna2 & 0x3) << 4) | 1141 - ((lna1 & 0x3) << 2) | 1142 - ((lna1 & 0x3) << 0); 1140 + ((lna2 & 1141 + 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); 1143 1142 1144 1143 mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); 1145 1144 mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); ··· 1156 1157 } 1157 1158 1158 1159 mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); 1159 - mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11); 1160 - mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3); 1161 1160 1162 1161 } 1163 1162 ··· 1328 1331 return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; 1329 1332 } 1330 1333 1331 - static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain, 1332 - u16 tia_gain, u16 lna2_gain) 1333 - { 1334 - u32 i_thresh_l, q_thresh_l; 1335 - u32 i_thresh_h, q_thresh_h; 1336 - struct lcnphy_iq_est iq_est_h, iq_est_l; 1337 - 1338 - wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain, 1339 - lna2_gain, 0); 1340 - 1341 - wlc_lcnphy_rx_gain_override_enable(pi, true); 1342 - wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0); 1343 - udelay(500); 1344 - write_radio_reg(pi, RADIO_2064_REG112, 0); 1345 - if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l)) 1346 - return false; 1347 - 1348 - wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0); 1349 - udelay(500); 1350 - write_radio_reg(pi, RADIO_2064_REG112, 0); 1351 - if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h)) 1352 - return false; 1353 - 1354 - i_thresh_l = (iq_est_l.i_pwr << 1); 1355 - i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr; 1356 - 1357 - q_thresh_l = (iq_est_l.q_pwr << 1); 1358 - q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr; 1359 - if ((iq_est_h.i_pwr > i_thresh_l) && 1360 - (iq_est_h.i_pwr < i_thresh_h) && 1361 - (iq_est_h.q_pwr > q_thresh_l) && 1362 - (iq_est_h.q_pwr < q_thresh_h)) 1363 - return true; 1364 - 1365 - return false; 1366 - } 1367 - 1368 1334 static bool 1369 1335 wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, 1370 1336 const struct lcnphy_rx_iqcomp *iqcomp, ··· 1342 1382 RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, 1343 1383 rfoverride3_old, rfoverride3val_old, rfoverride4_old, 1344 1384 rfoverride4val_old, afectrlovr_old, afectrlovrval_old; 1345 - int tia_gain, lna2_gain, biq1_gain; 1346 - bool set_gain; 1385 + int tia_gain; 1386 + u32 received_power, rx_pwr_threshold; 1347 1387 u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; 1348 1388 u16 values_to_save[11]; 1349 1389 s16 *ptr; ··· 1368 1408 goto cal_done; 1369 1409 } 1370 1410 1371 - WARN_ON(module != 1); 1372 - tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); 1373 - wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); 1411 + if (module == 1) { 1374 1412 1375 - for (i = 0; i < 11; i++) 1376 - values_to_save[i] = 1377 - read_radio_reg(pi, rxiq_cal_rf_reg[i]); 1378 - Core1TxControl_old = read_phy_reg(pi, 0x631); 1413 + tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); 1414 + wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); 1379 1415 1380 - or_phy_reg(pi, 0x631, 0x0015); 1416 + for (i = 0; i < 11; i++) 1417 + values_to_save[i] = 1418 + read_radio_reg(pi, rxiq_cal_rf_reg[i]); 1419 + Core1TxControl_old = read_phy_reg(pi, 0x631); 1381 1420 1382 - RFOverride0_old = read_phy_reg(pi, 0x44c); 1383 - RFOverrideVal0_old = read_phy_reg(pi, 0x44d); 1384 - rfoverride2_old = read_phy_reg(pi, 0x4b0); 1385 - rfoverride2val_old = read_phy_reg(pi, 0x4b1); 1386 - rfoverride3_old = read_phy_reg(pi, 0x4f9); 1387 - rfoverride3val_old = read_phy_reg(pi, 0x4fa); 1388 - rfoverride4_old = read_phy_reg(pi, 0x938); 1389 - rfoverride4val_old = read_phy_reg(pi, 0x939); 1390 - afectrlovr_old = read_phy_reg(pi, 0x43b); 1391 - afectrlovrval_old = read_phy_reg(pi, 0x43c); 1392 - old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); 1393 - old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); 1421 + or_phy_reg(pi, 0x631, 0x0015); 1394 1422 1395 - tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); 1396 - if (tx_gain_override_old) { 1397 - wlc_lcnphy_get_tx_gain(pi, &old_gains); 1398 - tx_gain_index_old = pi_lcn->lcnphy_current_index; 1399 - } 1423 + RFOverride0_old = read_phy_reg(pi, 0x44c); 1424 + RFOverrideVal0_old = read_phy_reg(pi, 0x44d); 1425 + rfoverride2_old = read_phy_reg(pi, 0x4b0); 1426 + rfoverride2val_old = read_phy_reg(pi, 0x4b1); 1427 + rfoverride3_old = read_phy_reg(pi, 0x4f9); 1428 + rfoverride3val_old = read_phy_reg(pi, 0x4fa); 1429 + rfoverride4_old = read_phy_reg(pi, 0x938); 1430 + rfoverride4val_old = read_phy_reg(pi, 0x939); 1431 + afectrlovr_old = read_phy_reg(pi, 0x43b); 1432 + afectrlovrval_old = read_phy_reg(pi, 0x43c); 1433 + old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); 1434 + old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); 1400 1435 1401 - wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); 1402 - 1403 - mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); 1404 - mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); 1405 - 1406 - mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); 1407 - mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); 1408 - 1409 - write_radio_reg(pi, RADIO_2064_REG116, 0x06); 1410 - write_radio_reg(pi, RADIO_2064_REG12C, 0x07); 1411 - write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); 1412 - write_radio_reg(pi, RADIO_2064_REG098, 0x03); 1413 - write_radio_reg(pi, RADIO_2064_REG00B, 0x7); 1414 - mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); 1415 - write_radio_reg(pi, RADIO_2064_REG01D, 0x01); 1416 - write_radio_reg(pi, RADIO_2064_REG114, 0x01); 1417 - write_radio_reg(pi, RADIO_2064_REG02E, 0x10); 1418 - write_radio_reg(pi, RADIO_2064_REG12A, 0x08); 1419 - 1420 - mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); 1421 - mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); 1422 - mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); 1423 - mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); 1424 - mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); 1425 - mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); 1426 - mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); 1427 - mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); 1428 - mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); 1429 - mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); 1430 - 1431 - mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); 1432 - mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); 1433 - 1434 - write_phy_reg(pi, 0x6da, 0xffff); 1435 - or_phy_reg(pi, 0x6db, 0x3); 1436 - 1437 - wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); 1438 - set_gain = false; 1439 - 1440 - lna2_gain = 3; 1441 - while ((lna2_gain >= 0) && !set_gain) { 1442 - tia_gain = 4; 1443 - 1444 - while ((tia_gain >= 0) && !set_gain) { 1445 - biq1_gain = 6; 1446 - 1447 - while ((biq1_gain >= 0) && !set_gain) { 1448 - set_gain = wlc_lcnphy_rx_iq_cal_gain(pi, 1449 - (u16) 1450 - biq1_gain, 1451 - (u16) 1452 - tia_gain, 1453 - (u16) 1454 - lna2_gain); 1455 - biq1_gain -= 1; 1456 - } 1457 - tia_gain -= 1; 1436 + tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); 1437 + if (tx_gain_override_old) { 1438 + wlc_lcnphy_get_tx_gain(pi, &old_gains); 1439 + tx_gain_index_old = pi_lcn->lcnphy_current_index; 1458 1440 } 1459 - lna2_gain -= 1; 1441 + 1442 + wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); 1443 + 1444 + mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); 1445 + mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); 1446 + 1447 + mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); 1448 + mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); 1449 + 1450 + write_radio_reg(pi, RADIO_2064_REG116, 0x06); 1451 + write_radio_reg(pi, RADIO_2064_REG12C, 0x07); 1452 + write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); 1453 + write_radio_reg(pi, RADIO_2064_REG098, 0x03); 1454 + write_radio_reg(pi, RADIO_2064_REG00B, 0x7); 1455 + mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); 1456 + write_radio_reg(pi, RADIO_2064_REG01D, 0x01); 1457 + write_radio_reg(pi, RADIO_2064_REG114, 0x01); 1458 + write_radio_reg(pi, RADIO_2064_REG02E, 0x10); 1459 + write_radio_reg(pi, RADIO_2064_REG12A, 0x08); 1460 + 1461 + mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); 1462 + mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); 1463 + mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); 1464 + mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); 1465 + mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); 1466 + mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); 1467 + mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); 1468 + mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); 1469 + mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); 1470 + mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); 1471 + 1472 + mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); 1473 + mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); 1474 + 1475 + wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); 1476 + write_phy_reg(pi, 0x6da, 0xffff); 1477 + or_phy_reg(pi, 0x6db, 0x3); 1478 + wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); 1479 + wlc_lcnphy_rx_gain_override_enable(pi, true); 1480 + 1481 + tia_gain = 8; 1482 + rx_pwr_threshold = 950; 1483 + while (tia_gain > 0) { 1484 + tia_gain -= 1; 1485 + wlc_lcnphy_set_rx_gain_by_distribution(pi, 1486 + 0, 0, 2, 2, 1487 + (u16) 1488 + tia_gain, 1, 0); 1489 + udelay(500); 1490 + 1491 + received_power = 1492 + wlc_lcnphy_measure_digital_power(pi, 2000); 1493 + if (received_power < rx_pwr_threshold) 1494 + break; 1495 + } 1496 + result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); 1497 + 1498 + wlc_lcnphy_stop_tx_tone(pi); 1499 + 1500 + write_phy_reg(pi, 0x631, Core1TxControl_old); 1501 + 1502 + write_phy_reg(pi, 0x44c, RFOverrideVal0_old); 1503 + write_phy_reg(pi, 0x44d, RFOverrideVal0_old); 1504 + write_phy_reg(pi, 0x4b0, rfoverride2_old); 1505 + write_phy_reg(pi, 0x4b1, rfoverride2val_old); 1506 + write_phy_reg(pi, 0x4f9, rfoverride3_old); 1507 + write_phy_reg(pi, 0x4fa, rfoverride3val_old); 1508 + write_phy_reg(pi, 0x938, rfoverride4_old); 1509 + write_phy_reg(pi, 0x939, rfoverride4val_old); 1510 + write_phy_reg(pi, 0x43b, afectrlovr_old); 1511 + write_phy_reg(pi, 0x43c, afectrlovrval_old); 1512 + write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); 1513 + write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); 1514 + 1515 + wlc_lcnphy_clear_trsw_override(pi); 1516 + 1517 + mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); 1518 + 1519 + for (i = 0; i < 11; i++) 1520 + write_radio_reg(pi, rxiq_cal_rf_reg[i], 1521 + values_to_save[i]); 1522 + 1523 + if (tx_gain_override_old) 1524 + wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); 1525 + else 1526 + wlc_lcnphy_disable_tx_gain_override(pi); 1527 + 1528 + wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); 1529 + wlc_lcnphy_rx_gain_override_enable(pi, false); 1460 1530 } 1461 - 1462 - if (set_gain) 1463 - result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024); 1464 - else 1465 - result = false; 1466 - 1467 - wlc_lcnphy_stop_tx_tone(pi); 1468 - 1469 - write_phy_reg(pi, 0x631, Core1TxControl_old); 1470 - 1471 - write_phy_reg(pi, 0x44c, RFOverrideVal0_old); 1472 - write_phy_reg(pi, 0x44d, RFOverrideVal0_old); 1473 - write_phy_reg(pi, 0x4b0, rfoverride2_old); 1474 - write_phy_reg(pi, 0x4b1, rfoverride2val_old); 1475 - write_phy_reg(pi, 0x4f9, rfoverride3_old); 1476 - write_phy_reg(pi, 0x4fa, rfoverride3val_old); 1477 - write_phy_reg(pi, 0x938, rfoverride4_old); 1478 - write_phy_reg(pi, 0x939, rfoverride4val_old); 1479 - write_phy_reg(pi, 0x43b, afectrlovr_old); 1480 - write_phy_reg(pi, 0x43c, afectrlovrval_old); 1481 - write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); 1482 - write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); 1483 - 1484 - wlc_lcnphy_clear_trsw_override(pi); 1485 - 1486 - mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); 1487 - 1488 - for (i = 0; i < 11; i++) 1489 - write_radio_reg(pi, rxiq_cal_rf_reg[i], 1490 - values_to_save[i]); 1491 - 1492 - if (tx_gain_override_old) 1493 - wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); 1494 - else 1495 - wlc_lcnphy_disable_tx_gain_override(pi); 1496 - 1497 - wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); 1498 - wlc_lcnphy_rx_gain_override_enable(pi, false); 1499 1531 1500 1532 cal_done: 1501 1533 kfree(ptr); ··· 1781 1829 write_radio_reg(pi, RADIO_2064_REG038, 3); 1782 1830 write_radio_reg(pi, RADIO_2064_REG091, 7); 1783 1831 } 1784 - 1785 - if (!(pi->sh->boardflags & BFL_FEM)) { 1786 - u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc, 1787 - 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0}; 1788 - 1789 - write_radio_reg(pi, RADIO_2064_REG02A, 0xf); 1790 - write_radio_reg(pi, RADIO_2064_REG091, 0x3); 1791 - write_radio_reg(pi, RADIO_2064_REG038, 0x3); 1792 - 1793 - write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]); 1794 - } 1795 1832 } 1796 1833 1797 1834 static int ··· 1975 2034 } else { 1976 2035 mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); 1977 2036 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); 1978 - mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0); 1979 - mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2); 1980 - mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0); 1981 - mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4); 1982 - mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); 1983 - mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77); 1984 - mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1); 1985 - mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7); 1986 - mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1); 1987 - mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4); 1988 2037 } 1989 2038 } else { 1990 2039 mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); ··· 2061 2130 (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); 2062 2131 2063 2132 mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); 2064 - mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0)); 2065 2133 } 2066 2134 2067 2135 static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) 2068 2136 { 2069 2137 struct phytbl_info tab; 2070 2138 u32 rfseq, ind; 2071 - u8 tssi_sel; 2072 2139 2073 2140 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; 2074 2141 tab.tbl_width = 32; ··· 2088 2159 2089 2160 mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); 2090 2161 2091 - if (pi->sh->boardflags & BFL_FEM) { 2092 - tssi_sel = 0x1; 2093 - wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); 2094 - } else { 2095 - tssi_sel = 0xe; 2096 - wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA); 2097 - } 2162 + wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); 2098 2163 mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); 2099 2164 2100 2165 mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); ··· 2124 2201 mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); 2125 2202 2126 2203 if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { 2127 - mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel); 2204 + mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); 2128 2205 mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); 2129 2206 } else { 2130 - mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1); 2131 2207 mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); 2132 2208 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); 2133 2209 } ··· 2172 2250 mod_phy_reg(pi, 0x4d7, (0x1 << 2), (1) << 2); 2173 2251 2174 2252 mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); 2175 - 2176 - mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0); 2177 - mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); 2178 - mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); 2179 2253 2180 2254 wlc_lcnphy_pwrctrl_rssiparams(pi); 2181 2255 } ··· 2791 2873 read_radio_reg(pi, RADIO_2064_REG007) & 1; 2792 2874 u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; 2793 2875 u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; 2794 - u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi); 2795 - 2796 2876 idleTssi = read_phy_reg(pi, 0x4ab); 2797 2877 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & 2798 2878 MCTL_EN_MAC)); ··· 2808 2892 mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); 2809 2893 mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); 2810 2894 wlc_lcnphy_tssi_setup(pi); 2811 - 2812 - mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0)); 2813 - mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6)); 2814 - 2815 - wlc_lcnphy_set_bbmult(pi, 0x0); 2816 - 2817 2895 wlc_phy_do_dummy_tx(pi, true, OFF); 2818 2896 idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) 2819 2897 >> 0); ··· 2829 2919 2830 2920 mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); 2831 2921 2832 - wlc_lcnphy_set_bbmult(pi, SAVE_bbmult); 2833 2922 wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); 2834 2923 wlc_lcnphy_set_tx_gain(pi, &old_gains); 2835 2924 wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); ··· 3042 3133 wlc_lcnphy_write_table(pi, &tab); 3043 3134 tab.tbl_offset++; 3044 3135 } 3045 - mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0); 3046 - mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0); 3047 - mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8); 3048 - mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4); 3049 - mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2); 3050 3136 3051 3137 mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); 3052 3138 ··· 3843 3939 target_gains.pad_gain = 21; 3844 3940 target_gains.dac_gain = 0; 3845 3941 wlc_lcnphy_set_tx_gain(pi, &target_gains); 3942 + wlc_lcnphy_set_tx_pwr_by_index(pi, 16); 3846 3943 3847 3944 if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { 3848 3945 ··· 3854 3949 lcnphy_recal ? LCNPHY_CAL_RECAL : 3855 3950 LCNPHY_CAL_FULL), false); 3856 3951 } else { 3857 - wlc_lcnphy_set_tx_pwr_by_index(pi, 16); 3858 3952 wlc_lcnphy_tx_iqlo_soft_cal_full(pi); 3859 3953 } 3860 3954 ··· 4278 4374 if (CHSPEC_IS5G(pi->radio_chanspec)) 4279 4375 pa_gain = 0x70; 4280 4376 else 4281 - pa_gain = 0x60; 4377 + pa_gain = 0x70; 4282 4378 4283 4379 if (pi->sh->boardflags & BFL_FEM) 4284 4380 pa_gain = 0x10; 4285 - 4286 4381 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; 4287 4382 tab.tbl_width = 32; 4288 4383 tab.tbl_len = 1; 4289 4384 tab.tbl_ptr = &val; 4290 4385 4291 4386 for (j = 0; j < 128; j++) { 4292 - if (pi->sh->boardflags & BFL_FEM) 4293 - gm_gain = gain_table[j].gm; 4294 - else 4295 - gm_gain = 15; 4296 - 4387 + gm_gain = gain_table[j].gm; 4297 4388 val = (((u32) pa_gain << 24) | 4298 4389 (gain_table[j].pad << 16) | 4299 4390 (gain_table[j].pga << 8) | gm_gain); ··· 4499 4600 4500 4601 write_phy_reg(pi, 0x4ea, 0x4688); 4501 4602 4502 - if (pi->sh->boardflags & BFL_FEM) 4503 - mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); 4504 - else 4505 - mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0); 4603 + mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); 4506 4604 4507 4605 mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); 4508 4606 ··· 4510 4614 wlc_lcnphy_rcal(pi); 4511 4615 4512 4616 wlc_lcnphy_rc_cal(pi); 4513 - 4514 - if (!(pi->sh->boardflags & BFL_FEM)) { 4515 - write_radio_reg(pi, RADIO_2064_REG032, 0x6f); 4516 - write_radio_reg(pi, RADIO_2064_REG033, 0x19); 4517 - write_radio_reg(pi, RADIO_2064_REG039, 0xe); 4518 - } 4519 - 4520 4617 } 4521 4618 4522 4619 static void wlc_lcnphy_radio_init(struct brcms_phy *pi) ··· 4539 4650 wlc_lcnphy_write_table(pi, &tab); 4540 4651 } 4541 4652 4542 - if (!(pi->sh->boardflags & BFL_FEM)) { 4543 - tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; 4544 - tab.tbl_width = 16; 4545 - tab.tbl_ptr = &val; 4546 - tab.tbl_len = 1; 4653 + tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; 4654 + tab.tbl_width = 16; 4655 + tab.tbl_ptr = &val; 4656 + tab.tbl_len = 1; 4547 4657 4548 - val = 150; 4549 - tab.tbl_offset = 0; 4550 - wlc_lcnphy_write_table(pi, &tab); 4658 + val = 114; 4659 + tab.tbl_offset = 0; 4660 + wlc_lcnphy_write_table(pi, &tab); 4551 4661 4552 - val = 220; 4553 - tab.tbl_offset = 1; 4554 - wlc_lcnphy_write_table(pi, &tab); 4555 - } 4662 + val = 130; 4663 + tab.tbl_offset = 1; 4664 + wlc_lcnphy_write_table(pi, &tab); 4665 + 4666 + val = 6; 4667 + tab.tbl_offset = 8; 4668 + wlc_lcnphy_write_table(pi, &tab); 4556 4669 4557 4670 if (CHSPEC_IS2G(pi->radio_chanspec)) { 4558 4671 if (pi->sh->boardflags & BFL_FEM) ··· 4946 5055 wlc_lcnphy_load_tx_iir_filter(pi, true, 3); 4947 5056 4948 5057 mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); 4949 - wlc_lcnphy_tssi_setup(pi); 4950 5058 } 4951 5059 4952 5060 void wlc_phy_detach_lcnphy(struct brcms_phy *pi) ··· 4984 5094 if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) 4985 5095 return false; 4986 5096 4987 - if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { 5097 + if ((pi->sh->boardflags & BFL_FEM) && 5098 + (LCNREV_IS(pi->pubpi.phy_rev, 1))) { 4988 5099 if (pi_lcn->lcnphy_tempsense_option == 3) { 4989 5100 pi->hwpwrctrl = true; 4990 5101 pi->hwpwrctrl_capable = true;
+33 -33
drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
··· 1992 1992 }; 1993 1993 1994 1994 static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { 1995 - 0x0009, 1996 1995 0x000a, 1997 - 0x0005, 1998 - 0x0006, 1999 1996 0x0009, 2000 - 0x000a, 2001 - 0x0005, 2002 1997 0x0006, 1998 + 0x0005, 1999 + 0x000a, 2003 2000 0x0009, 2004 - 0x000a, 2005 - 0x0005, 2006 2001 0x0006, 2002 + 0x0005, 2003 + 0x000a, 2007 2004 0x0009, 2008 - 0x000a, 2009 - 0x0005, 2010 2005 0x0006, 2006 + 0x0005, 2007 + 0x000a, 2011 2008 0x0009, 2012 - 0x000a, 2013 - 0x0005, 2014 2009 0x0006, 2010 + 0x0005, 2011 + 0x000a, 2015 2012 0x0009, 2016 - 0x000a, 2017 - 0x0005, 2018 2013 0x0006, 2014 + 0x0005, 2015 + 0x000a, 2019 2016 0x0009, 2020 - 0x000a, 2021 - 0x0005, 2022 2017 0x0006, 2018 + 0x0005, 2019 + 0x000a, 2023 2020 0x0009, 2024 - 0x000a, 2025 - 0x0005, 2026 2021 0x0006, 2022 + 0x0005, 2023 + 0x000a, 2027 2024 0x0009, 2028 - 0x000a, 2029 - 0x0005, 2030 2025 0x0006, 2026 + 0x0005, 2027 + 0x000a, 2031 2028 0x0009, 2032 - 0x000a, 2033 - 0x0005, 2034 2029 0x0006, 2030 + 0x0005, 2031 + 0x000a, 2035 2032 0x0009, 2036 - 0x000a, 2037 - 0x0005, 2038 2033 0x0006, 2034 + 0x0005, 2035 + 0x000a, 2039 2036 0x0009, 2040 - 0x000a, 2041 - 0x0005, 2042 2037 0x0006, 2038 + 0x0005, 2039 + 0x000a, 2043 2040 0x0009, 2044 - 0x000a, 2045 - 0x0005, 2046 2041 0x0006, 2042 + 0x0005, 2043 + 0x000a, 2047 2044 0x0009, 2048 - 0x000a, 2049 - 0x0005, 2050 2045 0x0006, 2046 + 0x0005, 2047 + 0x000a, 2051 2048 0x0009, 2052 - 0x000a, 2053 - 0x0005, 2054 2049 0x0006, 2050 + 0x0005, 2051 + 0x000a, 2055 2052 0x0009, 2056 - 0x000a, 2057 - 0x0005, 2058 2053 0x0006, 2054 + 0x0005, 2055 + 0x000a, 2056 + 0x0009, 2057 + 0x0006, 2058 + 0x0005, 2059 2059 }; 2060 2060 2061 2061 static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
+2 -1
drivers/net/wireless/iwlegacy/4965-rs.c
··· 2152 2152 int rate_idx; 2153 2153 int i; 2154 2154 u32 rate; 2155 - u8 use_green = il4965_rs_use_green(il, sta); 2155 + u8 use_green; 2156 2156 u8 active_tbl = 0; 2157 2157 u8 valid_tx_ant; 2158 2158 struct il_station_priv *sta_priv; ··· 2160 2160 if (!sta || !lq_sta) 2161 2161 return; 2162 2162 2163 + use_green = il4965_rs_use_green(il, sta); 2163 2164 sta_priv = (void *)sta->drv_priv; 2164 2165 2165 2166 i = lq_sta->last_txrate_idx;
+9
drivers/net/wireless/iwlwifi/dvm/lib.c
··· 1262 1262 } 1263 1263 1264 1264 /* 1265 + * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag 1266 + * in iwl_down but cancel the workers only later. 1267 + */ 1268 + if (!priv->ucode_loaded) { 1269 + IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id); 1270 + return -EIO; 1271 + } 1272 + 1273 + /* 1265 1274 * Synchronous commands from this op-mode must hold 1266 1275 * the mutex, this ensures we don't try to send two 1267 1276 * (or more) synchronous commands at a time.
+8 -10
drivers/net/wireless/iwlwifi/dvm/rxon.c
··· 1419 1419 1420 1420 mutex_lock(&priv->mutex); 1421 1421 1422 + if (changes & BSS_CHANGED_IDLE && bss_conf->idle) { 1423 + /* 1424 + * If we go idle, then clearly no "passive-no-rx" 1425 + * workaround is needed any more, this is a reset. 1426 + */ 1427 + iwlagn_lift_passive_no_rx(priv); 1428 + } 1429 + 1422 1430 if (unlikely(!iwl_is_ready(priv))) { 1423 1431 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 1424 1432 mutex_unlock(&priv->mutex); ··· 1458 1450 priv->timestamp = bss_conf->sync_tsf; 1459 1451 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 1460 1452 } else { 1461 - /* 1462 - * If we disassociate while there are pending 1463 - * frames, just wake up the queues and let the 1464 - * frames "escape" ... This shouldn't really 1465 - * be happening to start with, but we should 1466 - * not get stuck in this case either since it 1467 - * can happen if userspace gets confused. 1468 - */ 1469 - iwlagn_lift_passive_no_rx(priv); 1470 - 1471 1453 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1472 1454 1473 1455 if (ctx->ctxid == IWL_RXON_CTX_BSS)
+1 -1
drivers/net/wireless/iwlwifi/dvm/tx.c
··· 1192 1192 memset(&info->status, 0, sizeof(info->status)); 1193 1193 1194 1194 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && 1195 - iwl_is_associated_ctx(ctx) && ctx->vif && 1195 + ctx->vif && 1196 1196 ctx->vif->type == NL80211_IFTYPE_STATION) { 1197 1197 /* block and stop all queues */ 1198 1198 priv->passive_no_rx = true;
+2 -2
drivers/net/wireless/iwlwifi/dvm/ucode.c
··· 367 367 return -EIO; 368 368 } 369 369 370 + priv->ucode_loaded = true; 371 + 370 372 if (ucode_type != IWL_UCODE_WOWLAN) { 371 373 /* delay a bit to give rfkill time to run */ 372 374 msleep(5); ··· 381 379 priv->cur_ucode = old_type; 382 380 return ret; 383 381 } 384 - 385 - priv->ucode_loaded = true; 386 382 387 383 return 0; 388 384 }
+13
drivers/net/wireless/iwlwifi/pcie/trans.c
··· 475 475 476 476 /* If platform's RF_KILL switch is NOT set to KILL */ 477 477 hw_rfkill = iwl_is_rfkill_set(trans); 478 + if (hw_rfkill) 479 + set_bit(STATUS_RFKILL, &trans_pcie->status); 480 + else 481 + clear_bit(STATUS_RFKILL, &trans_pcie->status); 478 482 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 479 483 if (hw_rfkill && !run_in_rfkill) 480 484 return -ERFKILL; ··· 645 641 646 642 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 647 643 { 644 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 648 645 bool hw_rfkill; 649 646 int err; 650 647 ··· 661 656 iwl_enable_rfkill_int(trans); 662 657 663 658 hw_rfkill = iwl_is_rfkill_set(trans); 659 + if (hw_rfkill) 660 + set_bit(STATUS_RFKILL, &trans_pcie->status); 661 + else 662 + clear_bit(STATUS_RFKILL, &trans_pcie->status); 664 663 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 665 664 666 665 return 0; ··· 703 694 * op_mode. 704 695 */ 705 696 hw_rfkill = iwl_is_rfkill_set(trans); 697 + if (hw_rfkill) 698 + set_bit(STATUS_RFKILL, &trans_pcie->status); 699 + else 700 + clear_bit(STATUS_RFKILL, &trans_pcie->status); 706 701 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 707 702 } 708 703 }
+1 -1
drivers/net/wireless/iwlwifi/pcie/tx.c
··· 1264 1264 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1265 1265 int copy = 0; 1266 1266 1267 - if (!cmd->len) 1267 + if (!cmd->len[i]) 1268 1268 continue; 1269 1269 1270 1270 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
+2 -1
drivers/net/wireless/mwifiex/cfg80211.c
··· 1892 1892 } 1893 1893 } 1894 1894 1895 - for (i = 0; i < request->n_channels; i++) { 1895 + for (i = 0; i < min_t(u32, request->n_channels, 1896 + MWIFIEX_USER_SCAN_CHAN_MAX); i++) { 1896 1897 chan = request->channels[i]; 1897 1898 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; 1898 1899 priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
+1
drivers/net/wireless/mwifiex/pcie.c
··· 1508 1508 } 1509 1509 memcpy(adapter->upld_buf, skb->data, 1510 1510 min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); 1511 + skb_push(skb, INTF_HEADER_LEN); 1511 1512 if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, 1512 1513 PCI_DMA_FROMDEVICE)) 1513 1514 return -1;
+17 -21
drivers/nfc/microread/mei.c
··· 22 22 #include <linux/slab.h> 23 23 #include <linux/interrupt.h> 24 24 #include <linux/gpio.h> 25 - #include <linux/mei_bus.h> 25 + #include <linux/mei_cl_bus.h> 26 26 27 27 #include <linux/nfc.h> 28 28 #include <net/nfc/hci.h> ··· 31 31 #include "microread.h" 32 32 33 33 #define MICROREAD_DRIVER_NAME "microread" 34 - 35 - #define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \ 36 - 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) 37 34 38 35 struct mei_nfc_hdr { 39 36 u8 cmd; ··· 45 48 #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) 46 49 47 50 struct microread_mei_phy { 48 - struct mei_device *mei_device; 51 + struct mei_cl_device *device; 49 52 struct nfc_hci_dev *hdev; 50 53 51 54 int powered; ··· 102 105 103 106 MEI_DUMP_SKB_OUT("mei frame sent", skb); 104 107 105 - r = mei_send(phy->device, skb->data, skb->len); 108 + r = mei_cl_send(phy->device, skb->data, skb->len); 106 109 if (r > 0) 107 110 r = 0; 108 111 109 112 return r; 110 113 } 111 114 112 - static void microread_event_cb(struct mei_device *device, u32 events, 115 + static void microread_event_cb(struct mei_cl_device *device, u32 events, 113 116 void *context) 114 117 { 115 118 struct microread_mei_phy *phy = context; ··· 117 120 if (phy->hard_fault != 0) 118 121 return; 119 122 120 - if (events & BIT(MEI_EVENT_RX)) { 123 + if (events & BIT(MEI_CL_EVENT_RX)) { 121 124 struct sk_buff *skb; 122 125 int reply_size; 123 126 ··· 125 128 if (!skb) 126 129 return; 127 130 128 - reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ); 131 + reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ); 129 132 if (reply_size < MEI_NFC_HEADER_SIZE) { 130 133 kfree(skb); 131 134 return; ··· 146 149 .disable = microread_mei_disable, 147 150 }; 148 151 149 - static int microread_mei_probe(struct mei_device *device, 150 - const struct mei_id *id) 152 + static int microread_mei_probe(struct mei_cl_device *device, 153 + const struct mei_cl_device_id *id) 151 154 { 152 155 struct microread_mei_phy *phy; 153 156 int r; ··· 161 164 } 162 165 163 166 phy->device = device; 164 - mei_set_clientdata(device, phy); 167 + mei_cl_set_drvdata(device, phy); 165 168 166 - r = mei_register_event_cb(device, microread_event_cb, phy); 169 + r = mei_cl_register_event_cb(device, microread_event_cb, phy); 167 170 if (r) { 168 171 pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); 169 172 goto err_out; ··· 183 186 return r; 184 187 } 185 188 186 - static int microread_mei_remove(struct mei_device *device) 189 + static int microread_mei_remove(struct mei_cl_device *device) 187 190 { 188 - struct microread_mei_phy *phy = mei_get_clientdata(device); 191 + struct microread_mei_phy *phy = mei_cl_get_drvdata(device); 189 192 190 193 pr_info("Removing microread\n"); 191 194 ··· 199 202 return 0; 200 203 } 201 204 202 - static struct mei_id microread_mei_tbl[] = { 203 - { MICROREAD_DRIVER_NAME, MICROREAD_UUID }, 205 + static struct mei_cl_device_id microread_mei_tbl[] = { 206 + { MICROREAD_DRIVER_NAME }, 204 207 205 208 /* required last entry */ 206 209 { } 207 210 }; 208 - 209 211 MODULE_DEVICE_TABLE(mei, microread_mei_tbl); 210 212 211 - static struct mei_driver microread_driver = { 213 + static struct mei_cl_driver microread_driver = { 212 214 .id_table = microread_mei_tbl, 213 215 .name = MICROREAD_DRIVER_NAME, 214 216 ··· 221 225 222 226 pr_debug(DRIVER_DESC ": %s\n", __func__); 223 227 224 - r = mei_driver_register(&microread_driver); 228 + r = mei_cl_driver_register(&microread_driver); 225 229 if (r) { 226 230 pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); 227 231 return r; ··· 232 236 233 237 static void microread_mei_exit(void) 234 238 { 235 - mei_driver_unregister(&microread_driver); 239 + mei_cl_driver_unregister(&microread_driver); 236 240 } 237 241 238 242 module_init(microread_mei_init);
+8 -7
drivers/pci/pci-acpi.c
··· 53 53 return; 54 54 } 55 55 56 - if (!pci_dev->pm_cap || !pci_dev->pme_support 57 - || pci_check_pme_status(pci_dev)) { 58 - if (pci_dev->pme_poll) 59 - pci_dev->pme_poll = false; 56 + /* Clear PME Status if set. */ 57 + if (pci_dev->pme_support) 58 + pci_check_pme_status(pci_dev); 60 59 61 - pci_wakeup_event(pci_dev); 62 - pm_runtime_resume(&pci_dev->dev); 63 - } 60 + if (pci_dev->pme_poll) 61 + pci_dev->pme_poll = false; 62 + 63 + pci_wakeup_event(pci_dev); 64 + pm_runtime_resume(&pci_dev->dev); 64 65 65 66 if (pci_dev->subordinate) 66 67 pci_pme_wakeup_bus(pci_dev->subordinate);
+3 -2
drivers/pci/pci-driver.c
··· 390 390 391 391 /* 392 392 * Turn off Bus Master bit on the device to tell it to not 393 - * continue to do DMA 393 + * continue to do DMA. Don't touch devices in D3cold or unknown states. 394 394 */ 395 - pci_clear_master(pci_dev); 395 + if (pci_dev->current_state <= PCI_D3hot) 396 + pci_clear_master(pci_dev); 396 397 } 397 398 398 399 #ifdef CONFIG_PM
-13
drivers/pci/pcie/portdrv_pci.c
··· 185 185 #endif /* !PM */ 186 186 187 187 /* 188 - * PCIe port runtime suspend is broken for some chipsets, so use a 189 - * black list to disable runtime PM for these chipsets. 190 - */ 191 - static const struct pci_device_id port_runtime_pm_black_list[] = { 192 - { /* end: all zeroes */ } 193 - }; 194 - 195 - /* 196 188 * pcie_portdrv_probe - Probe PCI-Express port devices 197 189 * @dev: PCI-Express port device being probed 198 190 * ··· 217 225 * it by default. 218 226 */ 219 227 dev->d3cold_allowed = false; 220 - if (!pci_match_id(port_runtime_pm_black_list, dev)) 221 - pm_runtime_put_noidle(&dev->dev); 222 - 223 228 return 0; 224 229 } 225 230 226 231 static void pcie_portdrv_remove(struct pci_dev *dev) 227 232 { 228 - if (!pci_match_id(port_runtime_pm_black_list, dev)) 229 - pm_runtime_get_noresume(&dev->dev); 230 233 pcie_port_device_remove(dev); 231 234 pci_disable_device(dev); 232 235 }
+31 -36
drivers/pci/rom.c
··· 100 100 return min((size_t)(image - rom), size); 101 101 } 102 102 103 - static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) 104 - { 105 - struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; 106 - loff_t start; 107 - 108 - /* assign the ROM an address if it doesn't have one */ 109 - if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE)) 110 - return 0; 111 - start = pci_resource_start(pdev, PCI_ROM_RESOURCE); 112 - *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 113 - 114 - if (*size == 0) 115 - return 0; 116 - 117 - /* Enable ROM space decodes */ 118 - if (pci_enable_rom(pdev)) 119 - return 0; 120 - 121 - return start; 122 - } 123 - 124 103 /** 125 104 * pci_map_rom - map a PCI ROM to kernel space 126 105 * @pdev: pointer to pci device struct ··· 114 135 void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) 115 136 { 116 137 struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; 117 - loff_t start = 0; 138 + loff_t start; 118 139 void __iomem *rom; 119 140 120 141 /* ··· 133 154 return (void __iomem *)(unsigned long) 134 155 pci_resource_start(pdev, PCI_ROM_RESOURCE); 135 156 } else { 136 - start = pci_find_rom(pdev, size); 157 + /* assign the ROM an address if it doesn't have one */ 158 + if (res->parent == NULL && 159 + pci_assign_resource(pdev,PCI_ROM_RESOURCE)) 160 + return NULL; 161 + start = pci_resource_start(pdev, PCI_ROM_RESOURCE); 162 + *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 163 + if (*size == 0) 164 + return NULL; 165 + 166 + /* Enable ROM space decodes */ 167 + if (pci_enable_rom(pdev)) 168 + return NULL; 137 169 } 138 170 } 139 - 140 - /* 141 - * Some devices may provide ROMs via a source other than the BAR 142 - */ 143 - if (!start && pdev->rom && pdev->romlen) { 144 - *size = pdev->romlen; 145 - return phys_to_virt(pdev->rom); 146 - } 147 - 148 - if (!start) 149 - return NULL; 150 171 151 172 rom = ioremap(start, *size); 152 173 if (!rom) { ··· 181 202 if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) 182 203 return; 183 204 184 - if (!pdev->rom || !pdev->romlen) 185 - iounmap(rom); 205 + iounmap(rom); 186 206 187 207 /* Disable again before continuing, leave enabled if pci=rom */ 188 208 if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) ··· 205 227 } 206 228 } 207 229 230 + /** 231 + * pci_platform_rom - provides a pointer to any ROM image provided by the 232 + * platform 233 + * @pdev: pointer to pci device struct 234 + * @size: pointer to receive size of pci window over ROM 235 + */ 236 + void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) 237 + { 238 + if (pdev->rom && pdev->romlen) { 239 + *size = pdev->romlen; 240 + return phys_to_virt((phys_addr_t)pdev->rom); 241 + } 242 + 243 + return NULL; 244 + } 245 + 208 246 EXPORT_SYMBOL(pci_map_rom); 209 247 EXPORT_SYMBOL(pci_unmap_rom); 210 248 EXPORT_SYMBOL_GPL(pci_enable_rom); 211 249 EXPORT_SYMBOL_GPL(pci_disable_rom); 250 + EXPORT_SYMBOL(pci_platform_rom);
+19 -31
drivers/rtc/rtc-at91rm9200.c
··· 44 44 static unsigned int at91_alarm_year = AT91_RTC_EPOCH; 45 45 static void __iomem *at91_rtc_regs; 46 46 static int irq; 47 - static u32 at91_rtc_imr; 48 47 49 48 /* 50 49 * Decode time/date into rtc_time structure ··· 108 109 cr = at91_rtc_read(AT91_RTC_CR); 109 110 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); 110 111 111 - at91_rtc_imr |= AT91_RTC_ACKUPD; 112 112 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); 113 113 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ 114 114 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); 115 - at91_rtc_imr &= ~AT91_RTC_ACKUPD; 116 115 117 116 at91_rtc_write(AT91_RTC_TIMR, 118 117 bin2bcd(tm->tm_sec) << 0 ··· 142 145 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); 143 146 tm->tm_year = at91_alarm_year - 1900; 144 147 145 - alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM) 148 + alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) 146 149 ? 1 : 0; 147 150 148 151 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, ··· 168 171 tm.tm_sec = alrm->time.tm_sec; 169 172 170 173 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 171 - at91_rtc_imr &= ~AT91_RTC_ALARM; 172 174 at91_rtc_write(AT91_RTC_TIMALR, 173 175 bin2bcd(tm.tm_sec) << 0 174 176 | bin2bcd(tm.tm_min) << 8 ··· 180 184 181 185 if (alrm->enabled) { 182 186 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 183 - at91_rtc_imr |= AT91_RTC_ALARM; 184 187 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 185 188 } 186 189 ··· 196 201 197 202 if (enabled) { 198 203 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 199 - at91_rtc_imr |= AT91_RTC_ALARM; 200 204 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 201 - } else { 205 + } else 202 206 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 203 - at91_rtc_imr &= ~AT91_RTC_ALARM; 204 - } 205 207 206 208 return 0; 207 209 } ··· 207 215 */ 208 216 static int at91_rtc_proc(struct device *dev, struct seq_file *seq) 209 217 { 218 + unsigned long imr = at91_rtc_read(AT91_RTC_IMR); 219 + 210 220 seq_printf(seq, "update_IRQ\t: %s\n", 211 - (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no"); 221 + (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); 212 222 seq_printf(seq, "periodic_IRQ\t: %s\n", 213 - (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no"); 223 + (imr & AT91_RTC_SECEV) ? "yes" : "no"); 214 224 215 225 return 0; 216 226 } ··· 227 233 unsigned int rtsr; 228 234 unsigned long events = 0; 229 235 230 - rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr; 236 + rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); 231 237 if (rtsr) { /* this interrupt is shared! Is it ours? */ 232 238 if (rtsr & AT91_RTC_ALARM) 233 239 events |= (RTC_AF | RTC_IRQF); ··· 291 297 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 292 298 AT91_RTC_SECEV | AT91_RTC_TIMEV | 293 299 AT91_RTC_CALEV); 294 - at91_rtc_imr = 0; 295 300 296 301 ret = request_irq(irq, at91_rtc_interrupt, 297 302 IRQF_SHARED, ··· 329 336 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 330 337 AT91_RTC_SECEV | AT91_RTC_TIMEV | 331 338 AT91_RTC_CALEV); 332 - at91_rtc_imr = 0; 333 339 free_irq(irq, pdev); 334 340 335 341 rtc_device_unregister(rtc); ··· 341 349 342 350 /* AT91RM9200 RTC Power management control */ 343 351 344 - static u32 at91_rtc_bkpimr; 345 - 352 + static u32 at91_rtc_imr; 346 353 347 354 static int at91_rtc_suspend(struct device *dev) 348 355 { 349 356 /* this IRQ is shared with DBGU and other hardware which isn't 350 357 * necessarily doing PM like we are... 351 358 */ 352 - at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV); 353 - if (at91_rtc_bkpimr) { 354 - if (device_may_wakeup(dev)) { 359 + at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) 360 + & (AT91_RTC_ALARM|AT91_RTC_SECEV); 361 + if (at91_rtc_imr) { 362 + if (device_may_wakeup(dev)) 355 363 enable_irq_wake(irq); 356 - } else { 357 - at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr); 358 - at91_rtc_imr &= ~at91_rtc_bkpimr; 359 - } 360 - } 364 + else 365 + at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); 366 + } 361 367 return 0; 362 368 } 363 369 364 370 static int at91_rtc_resume(struct device *dev) 365 371 { 366 - if (at91_rtc_bkpimr) { 367 - if (device_may_wakeup(dev)) { 372 + if (at91_rtc_imr) { 373 + if (device_may_wakeup(dev)) 368 374 disable_irq_wake(irq); 369 - } else { 370 - at91_rtc_imr |= at91_rtc_bkpimr; 371 - at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr); 372 - } 375 + else 376 + at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); 373 377 } 374 378 return 0; 375 379 }
+1
drivers/rtc/rtc-at91rm9200.h
··· 64 64 #define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ 65 65 #define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ 66 66 #define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ 67 + #define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */ 67 68 68 69 #define AT91_RTC_VER 0x2c /* Valid Entry Register */ 69 70 #define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
+7 -4
drivers/s390/block/scm_blk.c
··· 307 307 case EQC_WR_PROHIBIT: 308 308 spin_lock_irqsave(&bdev->lock, flags); 309 309 if (bdev->state != SCM_WR_PROHIBIT) 310 - pr_info("%lu: Write access to the SCM increment is suspended\n", 310 + pr_info("%lx: Write access to the SCM increment is suspended\n", 311 311 (unsigned long) bdev->scmdev->address); 312 312 bdev->state = SCM_WR_PROHIBIT; 313 313 spin_unlock_irqrestore(&bdev->lock, flags); ··· 445 445 446 446 spin_lock_irqsave(&bdev->lock, flags); 447 447 if (bdev->state == SCM_WR_PROHIBIT) 448 - pr_info("%lu: Write access to the SCM increment is restored\n", 448 + pr_info("%lx: Write access to the SCM increment is restored\n", 449 449 (unsigned long) bdev->scmdev->address); 450 450 bdev->state = SCM_OPER; 451 451 spin_unlock_irqrestore(&bdev->lock, flags); ··· 463 463 goto out; 464 464 465 465 scm_major = ret; 466 - if (scm_alloc_rqs(nr_requests)) 466 + ret = scm_alloc_rqs(nr_requests); 467 + if (ret) 467 468 goto out_unreg; 468 469 469 470 scm_debug = debug_register("scm_log", 16, 1, 16); 470 - if (!scm_debug) 471 + if (!scm_debug) { 472 + ret = -ENOMEM; 471 473 goto out_free; 474 + } 472 475 473 476 debug_register_view(scm_debug, &debug_hex_ascii_view); 474 477 debug_set_level(scm_debug, 2);
+1 -1
drivers/s390/block/scm_drv.c
··· 19 19 20 20 switch (event) { 21 21 case SCM_CHANGE: 22 - pr_info("%lu: The capabilities of the SCM increment changed\n", 22 + pr_info("%lx: The capabilities of the SCM increment changed\n", 23 23 (unsigned long) scmdev->address); 24 24 SCM_LOG(2, "State changed"); 25 25 SCM_LOG_STATE(2, scmdev);
+9 -7
drivers/s390/char/tty3270.c
··· 915 915 int i, rc; 916 916 917 917 /* Check if the tty3270 is already there. */ 918 - view = raw3270_find_view(&tty3270_fn, tty->index); 918 + view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR); 919 919 if (!IS_ERR(view)) { 920 920 tp = container_of(view, struct tty3270, view); 921 921 tty->driver_data = tp; ··· 927 927 tp->inattr = TF_INPUT; 928 928 return tty_port_install(&tp->port, driver, tty); 929 929 } 930 - if (tty3270_max_index < tty->index) 931 - tty3270_max_index = tty->index; 930 + if (tty3270_max_index < tty->index + 1) 931 + tty3270_max_index = tty->index + 1; 932 932 933 933 /* Allocate tty3270 structure on first open. */ 934 934 tp = tty3270_alloc_view(); 935 935 if (IS_ERR(tp)) 936 936 return PTR_ERR(tp); 937 937 938 - rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); 938 + rc = raw3270_add_view(&tp->view, &tty3270_fn, 939 + tty->index + RAW3270_FIRSTMINOR); 939 940 if (rc) { 940 941 tty3270_free_view(tp); 941 942 return rc; ··· 1847 1846 1848 1847 void tty3270_create_cb(int minor) 1849 1848 { 1850 - tty_register_device(tty3270_driver, minor, NULL); 1849 + tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL); 1851 1850 } 1852 1851 1853 1852 void tty3270_destroy_cb(int minor) 1854 1853 { 1855 - tty_unregister_device(tty3270_driver, minor); 1854 + tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR); 1856 1855 } 1857 1856 1858 1857 struct raw3270_notifier tty3270_notifier = ··· 1885 1884 driver->driver_name = "tty3270"; 1886 1885 driver->name = "3270/tty"; 1887 1886 driver->major = IBM_TTY3270_MAJOR; 1888 - driver->minor_start = 0; 1887 + driver->minor_start = RAW3270_FIRSTMINOR; 1888 + driver->name_base = RAW3270_FIRSTMINOR; 1889 1889 driver->type = TTY_DRIVER_TYPE_SYSTEM; 1890 1890 driver->subtype = SYSTEM_TYPE_TTY; 1891 1891 driver->init_termios = tty_std_termios;
+3 -3
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 940 940 fc_exch_init(lport); 941 941 fc_rport_init(lport); 942 942 fc_disc_init(lport); 943 + fc_disc_config(lport, lport); 943 944 return 0; 944 945 } 945 946 ··· 2134 2133 } 2135 2134 2136 2135 ctlr = bnx2fc_to_ctlr(interface); 2136 + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); 2137 2137 interface->vlan_id = vlan_id; 2138 2138 2139 2139 interface->timer_work_queue = ··· 2145 2143 goto ifput_err; 2146 2144 } 2147 2145 2148 - lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0); 2146 + lport = bnx2fc_if_create(interface, &cdev->dev, 0); 2149 2147 if (!lport) { 2150 2148 printk(KERN_ERR PFX "Failed to create interface (%s)\n", 2151 2149 netdev->name); ··· 2160 2158 2161 2159 /* Make this master N_port */ 2162 2160 ctlr->lp = lport; 2163 - 2164 - cdev = fcoe_ctlr_to_ctlr_dev(ctlr); 2165 2161 2166 2162 if (link_state == BNX2FC_CREATE_LINK_UP) 2167 2163 cdev->enabled = FCOE_CTLR_ENABLED;
+11 -4
drivers/scsi/fcoe/fcoe.c
··· 490 490 { 491 491 struct net_device *netdev = fcoe->netdev; 492 492 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); 493 - struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); 494 493 495 494 rtnl_lock(); 496 495 if (!fcoe->removed) ··· 500 501 /* tear-down the FCoE controller */ 501 502 fcoe_ctlr_destroy(fip); 502 503 scsi_host_put(fip->lp->host); 503 - fcoe_ctlr_device_delete(ctlr_dev); 504 504 dev_put(netdev); 505 505 module_put(THIS_MODULE); 506 506 } ··· 2192 2194 */ 2193 2195 static void fcoe_destroy_work(struct work_struct *work) 2194 2196 { 2197 + struct fcoe_ctlr_device *cdev; 2198 + struct fcoe_ctlr *ctlr; 2195 2199 struct fcoe_port *port; 2196 2200 struct fcoe_interface *fcoe; 2197 2201 struct Scsi_Host *shost; ··· 2224 2224 mutex_lock(&fcoe_config_mutex); 2225 2225 2226 2226 fcoe = port->priv; 2227 + ctlr = fcoe_to_ctlr(fcoe); 2228 + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); 2229 + 2227 2230 fcoe_if_destroy(port->lport); 2228 2231 fcoe_interface_cleanup(fcoe); 2229 2232 2230 2233 mutex_unlock(&fcoe_config_mutex); 2234 + 2235 + fcoe_ctlr_device_delete(cdev); 2231 2236 } 2232 2237 2233 2238 /** ··· 2340 2335 rc = -EIO; 2341 2336 rtnl_unlock(); 2342 2337 fcoe_interface_cleanup(fcoe); 2343 - goto out_nortnl; 2338 + mutex_unlock(&fcoe_config_mutex); 2339 + fcoe_ctlr_device_delete(ctlr_dev); 2340 + goto out; 2344 2341 } 2345 2342 2346 2343 /* Make this the "master" N_Port */ ··· 2382 2375 2383 2376 out_nodev: 2384 2377 rtnl_unlock(); 2385 - out_nortnl: 2386 2378 mutex_unlock(&fcoe_config_mutex); 2379 + out: 2387 2380 return rc; 2388 2381 } 2389 2382
+46 -14
drivers/scsi/fcoe/fcoe_ctlr.c
··· 2815 2815 } 2816 2816 2817 2817 /** 2818 + * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode 2819 + * @lport: The local port to be (re)configured 2820 + * @fip: The FCoE controller whose mode is changing 2821 + * @fip_mode: The new fip mode 2822 + * 2823 + * Note that the we shouldn't be changing the libfc discovery settings 2824 + * (fc_disc_config) while an lport is going through the libfc state 2825 + * machine. The mode can only be changed when a fcoe_ctlr device is 2826 + * disabled, so that should ensure that this routine is only called 2827 + * when nothing is happening. 2828 + */ 2829 + void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, 2830 + enum fip_state fip_mode) 2831 + { 2832 + void *priv; 2833 + 2834 + WARN_ON(lport->state != LPORT_ST_RESET && 2835 + lport->state != LPORT_ST_DISABLED); 2836 + 2837 + if (fip_mode == FIP_MODE_VN2VN) { 2838 + lport->rport_priv_size = sizeof(struct fcoe_rport); 2839 + lport->point_to_multipoint = 1; 2840 + lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; 2841 + lport->tt.disc_start = fcoe_ctlr_disc_start; 2842 + lport->tt.disc_stop = fcoe_ctlr_disc_stop; 2843 + lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; 2844 + priv = fip; 2845 + } else { 2846 + lport->rport_priv_size = 0; 2847 + lport->point_to_multipoint = 0; 2848 + lport->tt.disc_recv_req = NULL; 2849 + lport->tt.disc_start = NULL; 2850 + lport->tt.disc_stop = NULL; 2851 + lport->tt.disc_stop_final = NULL; 2852 + priv = lport; 2853 + } 2854 + 2855 + fc_disc_config(lport, priv); 2856 + } 2857 + 2858 + /** 2818 2859 * fcoe_libfc_config() - Sets up libfc related properties for local port 2819 2860 * @lport: The local port to configure libfc for 2820 2861 * @fip: The FCoE controller in use by the local port ··· 2874 2833 fc_exch_init(lport); 2875 2834 fc_elsct_init(lport); 2876 2835 fc_lport_init(lport); 2877 - if (fip->mode == FIP_MODE_VN2VN) 2878 - lport->rport_priv_size = sizeof(struct fcoe_rport); 2879 2836 fc_rport_init(lport); 2880 - if (fip->mode == FIP_MODE_VN2VN) { 2881 - lport->point_to_multipoint = 1; 2882 - lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; 2883 - lport->tt.disc_start = fcoe_ctlr_disc_start; 2884 - lport->tt.disc_stop = fcoe_ctlr_disc_stop; 2885 - lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; 2886 - mutex_init(&lport->disc.disc_mutex); 2887 - INIT_LIST_HEAD(&lport->disc.rports); 2888 - lport->disc.priv = fip; 2889 - } else { 2890 - fc_disc_init(lport); 2891 - } 2837 + fc_disc_init(lport); 2838 + fcoe_ctlr_mode_set(lport, fip, fip->mode); 2892 2839 return 0; 2893 2840 } 2894 2841 EXPORT_SYMBOL_GPL(fcoe_libfc_config); ··· 2904 2875 void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) 2905 2876 { 2906 2877 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 2878 + struct fc_lport *lport = ctlr->lp; 2907 2879 2908 2880 mutex_lock(&ctlr->ctlr_mutex); 2909 2881 switch (ctlr_dev->mode) { ··· 2918 2888 } 2919 2889 2920 2890 mutex_unlock(&ctlr->ctlr_mutex); 2891 + 2892 + fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode); 2921 2893 } 2922 2894 EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
+18 -8
drivers/scsi/libfc/fc_disc.c
··· 712 712 } 713 713 714 714 /** 715 - * fc_disc_init() - Initialize the discovery layer for a local port 716 - * @lport: The local port that needs the discovery layer to be initialized 715 + * fc_disc_config() - Configure the discovery layer for a local port 716 + * @lport: The local port that needs the discovery layer to be configured 717 + * @priv: Private data structre for users of the discovery layer 717 718 */ 718 - int fc_disc_init(struct fc_lport *lport) 719 + void fc_disc_config(struct fc_lport *lport, void *priv) 719 720 { 720 - struct fc_disc *disc; 721 + struct fc_disc *disc = &lport->disc; 721 722 722 723 if (!lport->tt.disc_start) 723 724 lport->tt.disc_start = fc_disc_start; ··· 733 732 lport->tt.disc_recv_req = fc_disc_recv_req; 734 733 735 734 disc = &lport->disc; 735 + 736 + disc->priv = priv; 737 + } 738 + EXPORT_SYMBOL(fc_disc_config); 739 + 740 + /** 741 + * fc_disc_init() - Initialize the discovery layer for a local port 742 + * @lport: The local port that needs the discovery layer to be initialized 743 + */ 744 + void fc_disc_init(struct fc_lport *lport) 745 + { 746 + struct fc_disc *disc = &lport->disc; 747 + 736 748 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); 737 749 mutex_init(&disc->disc_mutex); 738 750 INIT_LIST_HEAD(&disc->rports); 739 - 740 - disc->priv = lport; 741 - 742 - return 0; 743 751 } 744 752 EXPORT_SYMBOL(fc_disc_init);
+2 -1
drivers/spi/Kconfig
··· 55 55 56 56 config SPI_ALTERA 57 57 tristate "Altera SPI Controller" 58 + depends on GENERIC_HARDIRQS 58 59 select SPI_BITBANG 59 60 help 60 61 This is the driver for the Altera SPI Controller. ··· 311 310 312 311 config SPI_PXA2XX 313 312 tristate "PXA2xx SSP SPI master" 314 - depends on ARCH_PXA || PCI || ACPI 313 + depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS 315 314 select PXA_SSP if ARCH_PXA 316 315 help 317 316 This enables using a PXA2xx or Sodaville SSP port as a SPI master
+1 -2
drivers/spi/spi-bcm63xx.c
··· 152 152 static int bcm63xx_spi_setup(struct spi_device *spi) 153 153 { 154 154 struct bcm63xx_spi *bs; 155 - int ret; 156 155 157 156 bs = spi_master_get_devdata(spi->master); 158 157 ··· 489 490 default: 490 491 dev_err(dev, "unsupported MSG_CTL width: %d\n", 491 492 bs->msg_ctl_width); 492 - goto out_clk_disable; 493 + goto out_err; 493 494 } 494 495 495 496 /* Initialize hardware */
+1 -1
drivers/spi/spi-mpc512x-psc.c
··· 164 164 165 165 for (i = count; i > 0; i--) { 166 166 data = tx_buf ? *tx_buf++ : 0; 167 - if (len == EOFBYTE) 167 + if (len == EOFBYTE && t->cs_change) 168 168 setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); 169 169 out_8(&fifo->txdata_8, data); 170 170 len--;
-1
drivers/spi/spi-pxa2xx.c
··· 1168 1168 1169 1169 master->dev.parent = &pdev->dev; 1170 1170 master->dev.of_node = pdev->dev.of_node; 1171 - ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev)); 1172 1171 /* the spi->mode bits understood by this driver: */ 1173 1172 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 1174 1173
+25 -16
drivers/spi/spi-s3c64xx.c
··· 994 994 { 995 995 struct s3c64xx_spi_driver_data *sdd = data; 996 996 struct spi_master *spi = sdd->master; 997 - unsigned int val; 997 + unsigned int val, clr = 0; 998 998 999 - val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR); 999 + val = readl(sdd->regs + S3C64XX_SPI_STATUS); 1000 1000 1001 - val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR | 1002 - S3C64XX_SPI_PND_RX_UNDERRUN_CLR | 1003 - S3C64XX_SPI_PND_TX_OVERRUN_CLR | 1004 - S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 1005 - 1006 - writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR); 1007 - 1008 - if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR) 1001 + if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { 1002 + clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; 1009 1003 dev_err(&spi->dev, "RX overrun\n"); 1010 - if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR) 1004 + } 1005 + if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { 1006 + clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; 1011 1007 dev_err(&spi->dev, "RX underrun\n"); 1012 - if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR) 1008 + } 1009 + if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { 1010 + clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; 1013 1011 dev_err(&spi->dev, "TX overrun\n"); 1014 - if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR) 1012 + } 1013 + if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { 1014 + clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 1015 1015 dev_err(&spi->dev, "TX underrun\n"); 1016 + } 1017 + 1018 + /* Clear the pending irq by setting and then clearing it */ 1019 + writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); 1020 + writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); 1016 1021 1017 1022 return IRQ_HANDLED; 1018 1023 } ··· 1041 1036 writel(0, regs + S3C64XX_SPI_MODE_CFG); 1042 1037 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 1043 1038 1044 - /* Clear any irq pending bits */ 1045 - writel(readl(regs + S3C64XX_SPI_PENDING_CLR), 1046 - regs + S3C64XX_SPI_PENDING_CLR); 1039 + /* Clear any irq pending bits, should set and clear the bits */ 1040 + val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | 1041 + S3C64XX_SPI_PND_RX_UNDERRUN_CLR | 1042 + S3C64XX_SPI_PND_TX_OVERRUN_CLR | 1043 + S3C64XX_SPI_PND_TX_UNDERRUN_CLR; 1044 + writel(val, regs + S3C64XX_SPI_PENDING_CLR); 1045 + writel(0, regs + S3C64XX_SPI_PENDING_CLR); 1047 1046 1048 1047 writel(0, regs + S3C64XX_SPI_SWAP_CFG); 1049 1048
+8 -17
drivers/spi/spi-tegra20-slink.c
··· 858 858 return 0; 859 859 } 860 860 861 - static int tegra_slink_prepare_transfer(struct spi_master *master) 862 - { 863 - struct tegra_slink_data *tspi = spi_master_get_devdata(master); 864 - 865 - return pm_runtime_get_sync(tspi->dev); 866 - } 867 - 868 - static int tegra_slink_unprepare_transfer(struct spi_master *master) 869 - { 870 - struct tegra_slink_data *tspi = spi_master_get_devdata(master); 871 - 872 - pm_runtime_put(tspi->dev); 873 - return 0; 874 - } 875 - 876 861 static int tegra_slink_transfer_one_message(struct spi_master *master, 877 862 struct spi_message *msg) 878 863 { ··· 870 885 871 886 msg->status = 0; 872 887 msg->actual_length = 0; 888 + ret = pm_runtime_get_sync(tspi->dev); 889 + if (ret < 0) { 890 + dev_err(tspi->dev, "runtime get failed: %d\n", ret); 891 + goto done; 892 + } 893 + 873 894 single_xfer = list_is_singular(&msg->transfers); 874 895 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 875 896 INIT_COMPLETION(tspi->xfer_completion); ··· 912 921 exit: 913 922 tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); 914 923 tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); 924 + pm_runtime_put(tspi->dev); 925 + done: 915 926 msg->status = ret; 916 927 spi_finalize_current_message(master); 917 928 return ret; ··· 1141 1148 /* the spi->mode bits understood by this driver: */ 1142 1149 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1143 1150 master->setup = tegra_slink_setup; 1144 - master->prepare_transfer_hardware = tegra_slink_prepare_transfer; 1145 1151 master->transfer_one_message = tegra_slink_transfer_one_message; 1146 - master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer; 1147 1152 master->num_chipselect = MAX_CHIP_SELECT; 1148 1153 master->bus_num = -1; 1149 1154
+8 -9
drivers/spi/spi.c
··· 543 543 /* Lock queue and check for queue work */ 544 544 spin_lock_irqsave(&master->queue_lock, flags); 545 545 if (list_empty(&master->queue) || !master->running) { 546 - if (master->busy && master->unprepare_transfer_hardware) { 547 - ret = master->unprepare_transfer_hardware(master); 548 - if (ret) { 549 - spin_unlock_irqrestore(&master->queue_lock, flags); 550 - dev_err(&master->dev, 551 - "failed to unprepare transfer hardware\n"); 552 - return; 553 - } 546 + if (!master->busy) { 547 + spin_unlock_irqrestore(&master->queue_lock, flags); 548 + return; 554 549 } 555 550 master->busy = false; 556 551 spin_unlock_irqrestore(&master->queue_lock, flags); 552 + if (master->unprepare_transfer_hardware && 553 + master->unprepare_transfer_hardware(master)) 554 + dev_err(&master->dev, 555 + "failed to unprepare transfer hardware\n"); 557 556 return; 558 557 } 559 558 ··· 983 984 acpi_status status; 984 985 acpi_handle handle; 985 986 986 - handle = ACPI_HANDLE(&master->dev); 987 + handle = ACPI_HANDLE(master->dev.parent); 987 988 if (!handle) 988 989 return; 989 990
-1
drivers/usb/core/port.c
··· 67 67 { 68 68 struct usb_port *port_dev = to_usb_port(dev); 69 69 70 - dev_pm_qos_hide_flags(dev); 71 70 kfree(port_dev); 72 71 } 73 72
+1 -1
drivers/video/fbmon.c
··· 1400 1400 fbmode->vmode = 0; 1401 1401 if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) 1402 1402 fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; 1403 - if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) 1403 + if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH) 1404 1404 fbmode->sync |= FB_SYNC_VERT_HIGH_ACT; 1405 1405 if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) 1406 1406 fbmode->vmode |= FB_VMODE_INTERLACED;
+1
drivers/video/sh_mobile_lcdcfb.c
··· 858 858 tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16) 859 859 | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7); 860 860 lcdc_write_chan(ch, LDHAJR, tmp); 861 + lcdc_write_chan_mirror(ch, LDHAJR, tmp); 861 862 } 862 863 863 864 static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl)
+2 -1
drivers/video/uvesafb.c
··· 1973 1973 err = -ENOMEM; 1974 1974 1975 1975 if (err) { 1976 - platform_device_put(uvesafb_device); 1976 + if (uvesafb_device) 1977 + platform_device_put(uvesafb_device); 1977 1978 platform_driver_unregister(&uvesafb_driver); 1978 1979 cn_del_callback(&uvesafb_cn_id); 1979 1980 return err;
+1 -1
firmware/Makefile
··· 82 82 fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \ 83 83 qlogic/12160.bin 84 84 fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin 85 - fw-shipped-$(CONFIG_INFINIBAND_QIB) += intel/sd7220.fw 85 + fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw 86 86 fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp 87 87 fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \ 88 88 ess/maestro3_assp_minisrc.fw
firmware/intel/sd7220.fw.ihex firmware/qlogic/sd7220.fw.ihex
+1
fs/block_dev.c
··· 551 551 ihold(bdev->bd_inode); 552 552 return bdev; 553 553 } 554 + EXPORT_SYMBOL(bdgrab); 554 555 555 556 long nr_blockdev_pages(void) 556 557 {
+7 -4
fs/ext4/extents.c
··· 2999 2999 if (split_flag & EXT4_EXT_DATA_VALID1) { 3000 3000 err = ext4_ext_zeroout(inode, ex2); 3001 3001 zero_ex.ee_block = ex2->ee_block; 3002 - zero_ex.ee_len = ext4_ext_get_actual_len(ex2); 3002 + zero_ex.ee_len = cpu_to_le16( 3003 + ext4_ext_get_actual_len(ex2)); 3003 3004 ext4_ext_store_pblock(&zero_ex, 3004 3005 ext4_ext_pblock(ex2)); 3005 3006 } else { 3006 3007 err = ext4_ext_zeroout(inode, ex); 3007 3008 zero_ex.ee_block = ex->ee_block; 3008 - zero_ex.ee_len = ext4_ext_get_actual_len(ex); 3009 + zero_ex.ee_len = cpu_to_le16( 3010 + ext4_ext_get_actual_len(ex)); 3009 3011 ext4_ext_store_pblock(&zero_ex, 3010 3012 ext4_ext_pblock(ex)); 3011 3013 } 3012 3014 } else { 3013 3015 err = ext4_ext_zeroout(inode, &orig_ex); 3014 3016 zero_ex.ee_block = orig_ex.ee_block; 3015 - zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex); 3017 + zero_ex.ee_len = cpu_to_le16( 3018 + ext4_ext_get_actual_len(&orig_ex)); 3016 3019 ext4_ext_store_pblock(&zero_ex, 3017 3020 ext4_ext_pblock(&orig_ex)); 3018 3021 } ··· 3275 3272 if (err) 3276 3273 goto out; 3277 3274 zero_ex.ee_block = ex->ee_block; 3278 - zero_ex.ee_len = ext4_ext_get_actual_len(ex); 3275 + zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); 3279 3276 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); 3280 3277 3281 3278 err = ext4_ext_get_access(handle, inode, path + depth);
+2 -2
fs/ext4/indirect.c
··· 1539 1539 blk = *i_data; 1540 1540 if (level > 0) { 1541 1541 ext4_lblk_t first2; 1542 - bh = sb_bread(inode->i_sb, blk); 1542 + bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); 1543 1543 if (!bh) { 1544 - EXT4_ERROR_INODE_BLOCK(inode, blk, 1544 + EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), 1545 1545 "Read failure"); 1546 1546 return -EIO; 1547 1547 }
+4 -1
fs/gfs2/file.c
··· 923 923 cmd = F_SETLK; 924 924 fl->fl_type = F_UNLCK; 925 925 } 926 - if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 926 + if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 927 + if (fl->fl_type == F_UNLCK) 928 + posix_lock_file_wait(file, fl); 927 929 return -EIO; 930 + } 928 931 if (IS_GETLK(cmd)) 929 932 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); 930 933 else if (fl->fl_type == F_UNLCK)
+1
fs/gfs2/incore.h
··· 588 588 struct dlm_lksb ls_control_lksb; /* control_lock */ 589 589 char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */ 590 590 struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ 591 + char *ls_lvb_bits; 591 592 592 593 spinlock_t ls_recover_spin; /* protects following fields */ 593 594 unsigned long ls_recover_flags; /* DFL_ */
+20 -19
fs/gfs2/lock_dlm.c
··· 483 483 484 484 static int all_jid_bits_clear(char *lvb) 485 485 { 486 - int i; 487 - for (i = JID_BITMAP_OFFSET; i < GDLM_LVB_SIZE; i++) { 488 - if (lvb[i]) 489 - return 0; 490 - } 491 - return 1; 486 + return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0, 487 + GDLM_LVB_SIZE - JID_BITMAP_OFFSET); 492 488 } 493 489 494 490 static void sync_wait_cb(void *arg) ··· 576 580 { 577 581 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); 578 582 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 579 - char lvb_bits[GDLM_LVB_SIZE]; 580 583 uint32_t block_gen, start_gen, lvb_gen, flags; 581 584 int recover_set = 0; 582 585 int write_lvb = 0; ··· 629 634 return; 630 635 } 631 636 632 - control_lvb_read(ls, &lvb_gen, lvb_bits); 637 + control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); 633 638 634 639 spin_lock(&ls->ls_recover_spin); 635 640 if (block_gen != ls->ls_recover_block || ··· 659 664 660 665 ls->ls_recover_result[i] = 0; 661 666 662 - if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) 667 + if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) 663 668 continue; 664 669 665 - __clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); 670 + __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); 666 671 write_lvb = 1; 667 672 } 668 673 } ··· 686 691 continue; 687 692 if (ls->ls_recover_submit[i] < start_gen) { 688 693 ls->ls_recover_submit[i] = 0; 689 - __set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); 694 + __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); 690 695 } 691 696 } 692 697 /* even if there are no bits to set, we need to write the ··· 700 705 spin_unlock(&ls->ls_recover_spin); 701 706 702 707 if (write_lvb) { 703 - control_lvb_write(ls, start_gen, lvb_bits); 708 + control_lvb_write(ls, start_gen, ls->ls_lvb_bits); 704 709 flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; 705 710 } else { 706 711 flags = DLM_LKF_CONVERT; ··· 720 725 */ 721 726 722 727 for (i = 0; i < recover_size; i++) { 723 - if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) { 728 + if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { 724 729 fs_info(sdp, "recover generation %u jid %d\n", 725 730 start_gen, i); 726 731 gfs2_recover_set(sdp, i); ··· 753 758 static int control_mount(struct gfs2_sbd *sdp) 754 759 { 755 760 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 756 - char lvb_bits[GDLM_LVB_SIZE]; 757 761 uint32_t start_gen, block_gen, mount_gen, lvb_gen; 758 762 int mounted_mode; 759 763 int retries = 0; ··· 851 857 * lvb_gen will be non-zero. 852 858 */ 853 859 854 - control_lvb_read(ls, &lvb_gen, lvb_bits); 860 + control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); 855 861 856 862 if (lvb_gen == 0xFFFFFFFF) { 857 863 /* special value to force mount attempts to fail */ ··· 881 887 * and all lvb bits to be clear (no pending journal recoveries.) 882 888 */ 883 889 884 - if (!all_jid_bits_clear(lvb_bits)) { 890 + if (!all_jid_bits_clear(ls->ls_lvb_bits)) { 885 891 /* journals need recovery, wait until all are clear */ 886 892 fs_info(sdp, "control_mount wait for journal recovery\n"); 887 893 goto restart; ··· 943 949 static int control_first_done(struct gfs2_sbd *sdp) 944 950 { 945 951 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 946 - char lvb_bits[GDLM_LVB_SIZE]; 947 952 uint32_t start_gen, block_gen; 948 953 int error; 949 954 ··· 984 991 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); 985 992 spin_unlock(&ls->ls_recover_spin); 986 993 987 - memset(lvb_bits, 0, sizeof(lvb_bits)); 988 - control_lvb_write(ls, start_gen, lvb_bits); 994 + memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); 995 + control_lvb_write(ls, start_gen, ls->ls_lvb_bits); 989 996 990 997 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); 991 998 if (error) ··· 1014 1021 uint32_t *result = NULL; 1015 1022 uint32_t old_size, new_size; 1016 1023 int i, max_jid; 1024 + 1025 + if (!ls->ls_lvb_bits) { 1026 + ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); 1027 + if (!ls->ls_lvb_bits) 1028 + return -ENOMEM; 1029 + } 1017 1030 1018 1031 max_jid = 0; 1019 1032 for (i = 0; i < num_slots; i++) { ··· 1056 1057 1057 1058 static void free_recover_size(struct lm_lockstruct *ls) 1058 1059 { 1060 + kfree(ls->ls_lvb_bits); 1059 1061 kfree(ls->ls_recover_submit); 1060 1062 kfree(ls->ls_recover_result); 1061 1063 ls->ls_recover_submit = NULL; ··· 1205 1205 ls->ls_recover_size = 0; 1206 1206 ls->ls_recover_submit = NULL; 1207 1207 ls->ls_recover_result = NULL; 1208 + ls->ls_lvb_bits = NULL; 1208 1209 1209 1210 error = set_recover_size(sdp, NULL, 0); 1210 1211 if (error)
+14 -18
fs/gfs2/rgrp.c
··· 576 576 RB_CLEAR_NODE(&ip->i_res->rs_node); 577 577 out: 578 578 up_write(&ip->i_rw_mutex); 579 - return 0; 579 + return error; 580 580 } 581 581 582 582 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) ··· 1181 1181 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) 1182 1182 { 1183 1183 struct super_block *sb = sdp->sd_vfs; 1184 - struct block_device *bdev = sb->s_bdev; 1185 - const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / 1186 - bdev_logical_block_size(sb->s_bdev); 1187 1184 u64 blk; 1188 1185 sector_t start = 0; 1189 - sector_t nr_sects = 0; 1186 + sector_t nr_blks = 0; 1190 1187 int rv; 1191 1188 unsigned int x; 1192 1189 u32 trimmed = 0; ··· 1203 1206 if (diff == 0) 1204 1207 continue; 1205 1208 blk = offset + ((bi->bi_start + x) * GFS2_NBBY); 1206 - blk *= sects_per_blk; /* convert to sectors */ 1207 1209 while(diff) { 1208 1210 if (diff & 1) { 1209 - if (nr_sects == 0) 1211 + if (nr_blks == 0) 1210 1212 goto start_new_extent; 1211 - if ((start + nr_sects) != blk) { 1212 - if (nr_sects >= minlen) { 1213 - rv = blkdev_issue_discard(bdev, 1214 - start, nr_sects, 1213 + if ((start + nr_blks) != blk) { 1214 + if (nr_blks >= minlen) { 1215 + rv = sb_issue_discard(sb, 1216 + start, nr_blks, 1215 1217 GFP_NOFS, 0); 1216 1218 if (rv) 1217 1219 goto fail; 1218 - trimmed += nr_sects; 1220 + trimmed += nr_blks; 1219 1221 } 1220 - nr_sects = 0; 1222 + nr_blks = 0; 1221 1223 start_new_extent: 1222 1224 start = blk; 1223 1225 } 1224 - nr_sects += sects_per_blk; 1226 + nr_blks++; 1225 1227 } 1226 1228 diff >>= 2; 1227 - blk += sects_per_blk; 1229 + blk++; 1228 1230 } 1229 1231 } 1230 - if (nr_sects >= minlen) { 1231 - rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0); 1232 + if (nr_blks >= minlen) { 1233 + rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0); 1232 1234 if (rv) 1233 1235 goto fail; 1234 - trimmed += nr_sects; 1236 + trimmed += nr_blks; 1235 1237 } 1236 1238 if (ptrimmed) 1237 1239 *ptrimmed = trimmed;
+1 -1
fs/nfsd/nfs4xdr.c
··· 264 264 iattr->ia_valid |= ATTR_SIZE; 265 265 } 266 266 if (bmval[0] & FATTR4_WORD0_ACL) { 267 - int nace; 267 + u32 nace; 268 268 struct nfs4_ace *ace; 269 269 270 270 READ_BUF(4); len += 4;
+2 -2
fs/reiserfs/xattr.c
··· 187 187 if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) 188 188 return -ENOSPC; 189 189 190 - if (name[0] == '.' && (name[1] == '\0' || 191 - (name[1] == '.' && name[2] == '\0'))) 190 + if (name[0] == '.' && (namelen < 2 || 191 + (namelen == 2 && name[1] == '.'))) 192 192 return 0; 193 193 194 194 dentry = lookup_one_len(name, dbuf->xadir, namelen);
+6 -6
fs/ubifs/super.c
··· 1568 1568 c->remounting_rw = 1; 1569 1569 c->ro_mount = 0; 1570 1570 1571 + if (c->space_fixup) { 1572 + err = ubifs_fixup_free_space(c); 1573 + if (err) 1574 + return err; 1575 + } 1576 + 1571 1577 err = check_free_space(c); 1572 1578 if (err) 1573 1579 goto out; ··· 1688 1682 * because, for example, the old index size was imprecise. 1689 1683 */ 1690 1684 err = dbg_check_space_info(c); 1691 - } 1692 - 1693 - if (c->space_fixup) { 1694 - err = ubifs_fixup_free_space(c); 1695 - if (err) 1696 - goto out; 1697 1685 } 1698 1686 1699 1687 mutex_unlock(&c->umount_mutex);
+2 -2
include/linux/compat.h
··· 141 141 } compat_sigset_t; 142 142 143 143 struct compat_sigaction { 144 - #ifndef __ARCH_HAS_ODD_SIGACTION 144 + #ifndef __ARCH_HAS_IRIX_SIGACTION 145 145 compat_uptr_t sa_handler; 146 146 compat_ulong_t sa_flags; 147 147 #else 148 - compat_ulong_t sa_flags; 148 + compat_uint_t sa_flags; 149 149 compat_uptr_t sa_handler; 150 150 #endif 151 151 #ifdef __ARCH_HAS_SA_RESTORER
+8 -8
include/linux/devfreq.h
··· 213 213 #endif 214 214 215 215 #else /* !CONFIG_PM_DEVFREQ */ 216 - static struct devfreq *devfreq_add_device(struct device *dev, 216 + static inline struct devfreq *devfreq_add_device(struct device *dev, 217 217 struct devfreq_dev_profile *profile, 218 218 const char *governor_name, 219 219 void *data) ··· 221 221 return NULL; 222 222 } 223 223 224 - static int devfreq_remove_device(struct devfreq *devfreq) 224 + static inline int devfreq_remove_device(struct devfreq *devfreq) 225 225 { 226 226 return 0; 227 227 } 228 228 229 - static int devfreq_suspend_device(struct devfreq *devfreq) 229 + static inline int devfreq_suspend_device(struct devfreq *devfreq) 230 230 { 231 231 return 0; 232 232 } 233 233 234 - static int devfreq_resume_device(struct devfreq *devfreq) 234 + static inline int devfreq_resume_device(struct devfreq *devfreq) 235 235 { 236 236 return 0; 237 237 } 238 238 239 - static struct opp *devfreq_recommended_opp(struct device *dev, 239 + static inline struct opp *devfreq_recommended_opp(struct device *dev, 240 240 unsigned long *freq, u32 flags) 241 241 { 242 - return -EINVAL; 242 + return ERR_PTR(-EINVAL); 243 243 } 244 244 245 - static int devfreq_register_opp_notifier(struct device *dev, 245 + static inline int devfreq_register_opp_notifier(struct device *dev, 246 246 struct devfreq *devfreq) 247 247 { 248 248 return -EINVAL; 249 249 } 250 250 251 - static int devfreq_unregister_opp_notifier(struct device *dev, 251 + static inline int devfreq_unregister_opp_notifier(struct device *dev, 252 252 struct devfreq *devfreq) 253 253 { 254 254 return -EINVAL;
+1 -1
include/linux/kvm_host.h
··· 518 518 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 519 519 void *data, unsigned long len); 520 520 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 521 - gpa_t gpa); 521 + gpa_t gpa, unsigned long len); 522 522 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); 523 523 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 524 524 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+1
include/linux/kvm_types.h
··· 71 71 u64 generation; 72 72 gpa_t gpa; 73 73 unsigned long hva; 74 + unsigned long len; 74 75 struct kvm_memory_slot *memslot; 75 76 }; 76 77
+2 -2
include/linux/netdevice.h
··· 210 210 #define NETDEV_HW_ADDR_T_SLAVE 3 211 211 #define NETDEV_HW_ADDR_T_UNICAST 4 212 212 #define NETDEV_HW_ADDR_T_MULTICAST 5 213 - bool synced; 214 213 bool global_use; 215 214 int refcount; 215 + int synced; 216 216 struct rcu_head rcu_head; 217 217 }; 218 218 ··· 895 895 * 896 896 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) 897 897 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 898 - * struct net_device *dev) 898 + * struct net_device *dev, u32 filter_mask) 899 899 * 900 900 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 901 901 * Called to change device carrier. Soft-devices (like dummy, team, etc)
+1
include/linux/pci.h
··· 916 916 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); 917 917 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); 918 918 size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); 919 + void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); 919 920 920 921 /* Power management related routines */ 921 922 int pci_save_state(struct pci_dev *dev);
+2 -2
include/linux/signal.h
··· 250 250 extern int sigsuspend(sigset_t *); 251 251 252 252 struct sigaction { 253 - #ifndef __ARCH_HAS_ODD_SIGACTION 253 + #ifndef __ARCH_HAS_IRIX_SIGACTION 254 254 __sighandler_t sa_handler; 255 255 unsigned long sa_flags; 256 256 #else 257 - unsigned long sa_flags; 257 + unsigned int sa_flags; 258 258 __sighandler_t sa_handler; 259 259 #endif 260 260 #ifdef __ARCH_HAS_SA_RESTORER
+7
include/linux/skbuff.h
··· 2643 2643 #endif 2644 2644 } 2645 2645 2646 + static inline void nf_reset_trace(struct sk_buff *skb) 2647 + { 2648 + #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 2649 + skb->nf_trace = 0; 2650 + #endif 2651 + } 2652 + 2646 2653 /* Note: This doesn't put any conntrack and bridge info in dst. */ 2647 2654 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2648 2655 {
+2 -1
include/scsi/libfc.h
··· 1074 1074 /* 1075 1075 * DISCOVERY LAYER 1076 1076 *****************************/ 1077 - int fc_disc_init(struct fc_lport *); 1077 + void fc_disc_init(struct fc_lport *); 1078 + void fc_disc_config(struct fc_lport *, void *); 1078 1079 1079 1080 static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc) 1080 1081 {
include/sound/max98090.h
+1
include/sound/soc-dapm.h
··· 488 488 /* status */ 489 489 u32 connect:1; /* source and sink widgets are connected */ 490 490 u32 walked:1; /* path has been walked */ 491 + u32 walking:1; /* path is in the process of being walked */ 491 492 u32 weak:1; /* path ignored for power management */ 492 493 493 494 int (*connected)(struct snd_soc_dapm_widget *source,
+1
ipc/msg.c
··· 872 872 goto out_unlock; 873 873 break; 874 874 } 875 + msg = ERR_PTR(-EAGAIN); 875 876 } else 876 877 break; 877 878 msg_counter++;
+1 -1
mm/mmap.c
··· 1940 1940 1941 1941 /* Check the cache first. */ 1942 1942 /* (Cache hit rate is typically around 35%.) */ 1943 - vma = mm->mmap_cache; 1943 + vma = ACCESS_ONCE(mm->mmap_cache); 1944 1944 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { 1945 1945 struct rb_node *rb_node; 1946 1946
+1 -1
mm/nommu.c
··· 821 821 struct vm_area_struct *vma; 822 822 823 823 /* check the cache first */ 824 - vma = mm->mmap_cache; 824 + vma = ACCESS_ONCE(mm->mmap_cache); 825 825 if (vma && vma->vm_start <= addr && vma->vm_end > addr) 826 826 return vma; 827 827
+7 -1
net/core/dev.c
··· 1624 1624 } 1625 1625 1626 1626 skb_orphan(skb); 1627 - nf_reset(skb); 1628 1627 1629 1628 if (unlikely(!is_skb_forwardable(dev, skb))) { 1630 1629 atomic_long_inc(&dev->rx_dropped); ··· 1639 1640 skb->mark = 0; 1640 1641 secpath_reset(skb); 1641 1642 nf_reset(skb); 1643 + nf_reset_trace(skb); 1642 1644 return netif_rx(skb); 1643 1645 } 1644 1646 EXPORT_SYMBOL_GPL(dev_forward_skb); ··· 3314 3314 if (dev->rx_handler) 3315 3315 return -EBUSY; 3316 3316 3317 + /* Note: rx_handler_data must be set before rx_handler */ 3317 3318 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); 3318 3319 rcu_assign_pointer(dev->rx_handler, rx_handler); 3319 3320 ··· 3335 3334 3336 3335 ASSERT_RTNL(); 3337 3336 RCU_INIT_POINTER(dev->rx_handler, NULL); 3337 + /* a reader seeing a non NULL rx_handler in a rcu_read_lock() 3338 + * section has a guarantee to see a non NULL rx_handler_data 3339 + * as well. 3340 + */ 3341 + synchronize_net(); 3338 3342 RCU_INIT_POINTER(dev->rx_handler_data, NULL); 3339 3343 } 3340 3344 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+3 -3
net/core/dev_addr_lists.c
··· 37 37 ha->type = addr_type; 38 38 ha->refcount = 1; 39 39 ha->global_use = global; 40 - ha->synced = false; 40 + ha->synced = 0; 41 41 list_add_tail_rcu(&ha->list, &list->list); 42 42 list->count++; 43 43 ··· 165 165 addr_len, ha->type); 166 166 if (err) 167 167 break; 168 - ha->synced = true; 168 + ha->synced++; 169 169 ha->refcount++; 170 170 } else if (ha->refcount == 1) { 171 171 __hw_addr_del(to_list, ha->addr, addr_len, ha->type); ··· 186 186 if (ha->synced) { 187 187 __hw_addr_del(to_list, ha->addr, 188 188 addr_len, ha->type); 189 - ha->synced = false; 189 + ha->synced--; 190 190 __hw_addr_del(from_list, ha->addr, 191 191 addr_len, ha->type); 192 192 }
+1 -1
net/core/flow.c
··· 328 328 struct flow_flush_info *info = data; 329 329 struct tasklet_struct *tasklet; 330 330 331 - tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); 331 + tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; 332 332 tasklet->data = (unsigned long)info; 333 333 tasklet_schedule(tasklet); 334 334 }
+3 -1
net/core/rtnetlink.c
··· 496 496 } 497 497 if (ops->fill_info) { 498 498 data = nla_nest_start(skb, IFLA_INFO_DATA); 499 - if (data == NULL) 499 + if (data == NULL) { 500 + err = -EMSGSIZE; 500 501 goto err_cancel_link; 502 + } 501 503 err = ops->fill_info(skb, dev); 502 504 if (err < 0) 503 505 goto err_cancel_data;
+4 -2
net/ipv4/devinet.c
··· 802 802 if (nlh->nlmsg_flags & NLM_F_EXCL || 803 803 !(nlh->nlmsg_flags & NLM_F_REPLACE)) 804 804 return -EEXIST; 805 - 806 - set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); 805 + ifa = ifa_existing; 806 + set_ifa_lifetime(ifa, valid_lft, prefered_lft); 807 + rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); 808 + blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); 807 809 } 808 810 return 0; 809 811 }
+27
net/ipv6/addrconf.c
··· 2529 2529 static void init_loopback(struct net_device *dev) 2530 2530 { 2531 2531 struct inet6_dev *idev; 2532 + struct net_device *sp_dev; 2533 + struct inet6_ifaddr *sp_ifa; 2534 + struct rt6_info *sp_rt; 2532 2535 2533 2536 /* ::1 */ 2534 2537 ··· 2543 2540 } 2544 2541 2545 2542 add_addr(idev, &in6addr_loopback, 128, IFA_HOST); 2543 + 2544 + /* Add routes to other interface's IPv6 addresses */ 2545 + for_each_netdev(dev_net(dev), sp_dev) { 2546 + if (!strcmp(sp_dev->name, dev->name)) 2547 + continue; 2548 + 2549 + idev = __in6_dev_get(sp_dev); 2550 + if (!idev) 2551 + continue; 2552 + 2553 + read_lock_bh(&idev->lock); 2554 + list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { 2555 + 2556 + if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) 2557 + continue; 2558 + 2559 + sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); 2560 + 2561 + /* Failure cases are ignored */ 2562 + if (!IS_ERR(sp_rt)) 2563 + ip6_ins_rt(sp_rt); 2564 + } 2565 + read_unlock_bh(&idev->lock); 2566 + } 2546 2567 } 2547 2568 2548 2569 static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
+12
net/ipv6/ip6_input.c
··· 118 118 ipv6_addr_loopback(&hdr->daddr)) 119 119 goto err; 120 120 121 + /* RFC4291 Errata ID: 3480 122 + * Interface-Local scope spans only a single interface on a 123 + * node and is useful only for loopback transmission of 124 + * multicast. Packets with interface-local scope received 125 + * from another node must be discarded. 126 + */ 127 + if (!(skb->pkt_type == PACKET_LOOPBACK || 128 + dev->flags & IFF_LOOPBACK) && 129 + ipv6_addr_is_multicast(&hdr->daddr) && 130 + IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) 131 + goto err; 132 + 121 133 /* RFC4291 2.7 122 134 * Nodes must not originate a packet to a multicast address whose scope 123 135 * field contains the reserved value 0; if such a packet is received, it
+1 -1
net/ipv6/netfilter/ip6t_NPT.c
··· 57 57 if (pfx_len - i >= 32) 58 58 mask = 0; 59 59 else 60 - mask = htonl(~((1 << (pfx_len - i)) - 1)); 60 + mask = htonl((1 << (i - pfx_len + 32)) - 1); 61 61 62 62 idx = i / 32; 63 63 addr->s6_addr32[idx] &= mask;
+1
net/key/af_key.c
··· 2693 2693 hdr->sadb_msg_pid = c->portid; 2694 2694 hdr->sadb_msg_version = PF_KEY_V2; 2695 2695 hdr->sadb_msg_errno = (uint8_t) 0; 2696 + hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; 2696 2697 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 2697 2698 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); 2698 2699 return 0;
+4 -2
net/mac80211/cfg.c
··· 2582 2582 list_del(&dep->list); 2583 2583 mutex_unlock(&local->mtx); 2584 2584 2585 - ieee80211_roc_notify_destroy(dep); 2585 + ieee80211_roc_notify_destroy(dep, true); 2586 2586 return 0; 2587 2587 } 2588 2588 ··· 2622 2622 ieee80211_start_next_roc(local); 2623 2623 mutex_unlock(&local->mtx); 2624 2624 2625 - ieee80211_roc_notify_destroy(found); 2625 + ieee80211_roc_notify_destroy(found, true); 2626 2626 } else { 2627 2627 /* work may be pending so use it all the time */ 2628 2628 found->abort = true; ··· 2632 2632 2633 2633 /* work will clean up etc */ 2634 2634 flush_delayed_work(&found->work); 2635 + WARN_ON(!found->to_be_freed); 2636 + kfree(found); 2635 2637 } 2636 2638 2637 2639 return 0;
+14 -3
net/mac80211/chan.c
··· 63 63 enum ieee80211_chanctx_mode mode) 64 64 { 65 65 struct ieee80211_chanctx *ctx; 66 + u32 changed; 66 67 int err; 67 68 68 69 lockdep_assert_held(&local->chanctx_mtx); ··· 77 76 ctx->conf.rx_chains_dynamic = 1; 78 77 ctx->mode = mode; 79 78 79 + /* acquire mutex to prevent idle from changing */ 80 + mutex_lock(&local->mtx); 81 + /* turn idle off *before* setting channel -- some drivers need that */ 82 + changed = ieee80211_idle_off(local); 83 + if (changed) 84 + ieee80211_hw_config(local, changed); 85 + 80 86 if (!local->use_chanctx) { 81 87 local->_oper_channel_type = 82 88 cfg80211_get_chandef_type(chandef); ··· 93 85 err = drv_add_chanctx(local, ctx); 94 86 if (err) { 95 87 kfree(ctx); 96 - return ERR_PTR(err); 88 + ctx = ERR_PTR(err); 89 + 90 + ieee80211_recalc_idle(local); 91 + goto out; 97 92 } 98 93 } 99 94 95 + /* and keep the mutex held until the new chanctx is on the list */ 100 96 list_add_rcu(&ctx->list, &local->chanctx_list); 101 97 102 - mutex_lock(&local->mtx); 103 - ieee80211_recalc_idle(local); 98 + out: 104 99 mutex_unlock(&local->mtx); 105 100 106 101 return ctx;
+3 -1
net/mac80211/ieee80211_i.h
··· 309 309 struct ieee80211_channel *chan; 310 310 311 311 bool started, abort, hw_begun, notified; 312 + bool to_be_freed; 312 313 313 314 unsigned long hw_start_time; 314 315 ··· 1348 1347 void ieee80211_roc_setup(struct ieee80211_local *local); 1349 1348 void ieee80211_start_next_roc(struct ieee80211_local *local); 1350 1349 void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); 1351 - void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc); 1350 + void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free); 1352 1351 void ieee80211_sw_roc_work(struct work_struct *work); 1353 1352 void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); 1354 1353 ··· 1362 1361 enum nl80211_iftype type); 1363 1362 void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); 1364 1363 void ieee80211_remove_interfaces(struct ieee80211_local *local); 1364 + u32 ieee80211_idle_off(struct ieee80211_local *local); 1365 1365 void ieee80211_recalc_idle(struct ieee80211_local *local); 1366 1366 void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, 1367 1367 const int offset);
+20 -17
net/mac80211/iface.c
··· 78 78 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); 79 79 } 80 80 81 - static u32 ieee80211_idle_off(struct ieee80211_local *local) 81 + u32 ieee80211_idle_off(struct ieee80211_local *local) 82 82 { 83 83 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) 84 84 return 0; ··· 349 349 static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) 350 350 { 351 351 struct ieee80211_sub_if_data *sdata; 352 - int ret = 0; 352 + int ret; 353 353 354 354 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) 355 355 return 0; 356 356 357 - mutex_lock(&local->iflist_mtx); 357 + ASSERT_RTNL(); 358 358 359 359 if (local->monitor_sdata) 360 - goto out_unlock; 360 + return 0; 361 361 362 362 sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); 363 - if (!sdata) { 364 - ret = -ENOMEM; 365 - goto out_unlock; 366 - } 363 + if (!sdata) 364 + return -ENOMEM; 367 365 368 366 /* set up data */ 369 367 sdata->local = local; ··· 375 377 if (WARN_ON(ret)) { 376 378 /* ok .. stupid driver, it asked for this! */ 377 379 kfree(sdata); 378 - goto out_unlock; 380 + return ret; 379 381 } 380 382 381 383 ret = ieee80211_check_queues(sdata); 382 384 if (ret) { 383 385 kfree(sdata); 384 - goto out_unlock; 386 + return ret; 385 387 } 386 388 387 389 ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, ··· 389 391 if (ret) { 390 392 drv_remove_interface(local, sdata); 391 393 kfree(sdata); 392 - goto out_unlock; 394 + return ret; 393 395 } 394 396 397 + mutex_lock(&local->iflist_mtx); 395 398 rcu_assign_pointer(local->monitor_sdata, sdata); 396 - out_unlock: 397 399 mutex_unlock(&local->iflist_mtx); 398 - return ret; 400 + 401 + return 0; 399 402 } 400 403 401 404 static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) ··· 406 407 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) 407 408 return; 408 409 410 + ASSERT_RTNL(); 411 + 409 412 mutex_lock(&local->iflist_mtx); 410 413 411 414 sdata = rcu_dereference_protected(local->monitor_sdata, 412 415 lockdep_is_held(&local->iflist_mtx)); 413 - if (!sdata) 414 - goto out_unlock; 416 + if (!sdata) { 417 + mutex_unlock(&local->iflist_mtx); 418 + return; 419 + } 415 420 416 421 rcu_assign_pointer(local->monitor_sdata, NULL); 422 + mutex_unlock(&local->iflist_mtx); 423 + 417 424 synchronize_net(); 418 425 419 426 ieee80211_vif_release_channel(sdata); ··· 427 422 drv_remove_interface(local, sdata); 428 423 429 424 kfree(sdata); 430 - out_unlock: 431 - mutex_unlock(&local->iflist_mtx); 432 425 } 433 426 434 427 /*
+2 -1
net/mac80211/mesh.c
··· 1060 1060 1061 1061 rcu_read_lock(); 1062 1062 list_for_each_entry_rcu(sdata, &local->interfaces, list) 1063 - if (ieee80211_vif_is_mesh(&sdata->vif)) 1063 + if (ieee80211_vif_is_mesh(&sdata->vif) && 1064 + ieee80211_sdata_running(sdata)) 1064 1065 ieee80211_queue_work(&local->hw, &sdata->work); 1065 1066 rcu_read_unlock(); 1066 1067 }
+4 -2
net/mac80211/mlme.c
··· 3608 3608 3609 3609 /* Restart STA timers */ 3610 3610 rcu_read_lock(); 3611 - list_for_each_entry_rcu(sdata, &local->interfaces, list) 3612 - ieee80211_restart_sta_timer(sdata); 3611 + list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3612 + if (ieee80211_sdata_running(sdata)) 3613 + ieee80211_restart_sta_timer(sdata); 3614 + } 3613 3615 rcu_read_unlock(); 3614 3616 } 3615 3617
+17 -6
net/mac80211/offchannel.c
··· 297 297 } 298 298 } 299 299 300 - void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) 300 + void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free) 301 301 { 302 302 struct ieee80211_roc_work *dep, *tmp; 303 + 304 + if (WARN_ON(roc->to_be_freed)) 305 + return; 303 306 304 307 /* was never transmitted */ 305 308 if (roc->frame) { ··· 319 316 GFP_KERNEL); 320 317 321 318 list_for_each_entry_safe(dep, tmp, &roc->dependents, list) 322 - ieee80211_roc_notify_destroy(dep); 319 + ieee80211_roc_notify_destroy(dep, true); 323 320 324 - kfree(roc); 321 + if (free) 322 + kfree(roc); 323 + else 324 + roc->to_be_freed = true; 325 325 } 326 326 327 327 void ieee80211_sw_roc_work(struct work_struct *work) ··· 336 330 bool started; 337 331 338 332 mutex_lock(&local->mtx); 333 + 334 + if (roc->to_be_freed) 335 + goto out_unlock; 339 336 340 337 if (roc->abort) 341 338 goto finish; ··· 379 370 finish: 380 371 list_del(&roc->list); 381 372 started = roc->started; 382 - ieee80211_roc_notify_destroy(roc); 373 + ieee80211_roc_notify_destroy(roc, !roc->abort); 383 374 384 375 if (started) { 385 376 drv_flush(local, false); ··· 419 410 420 411 list_del(&roc->list); 421 412 422 - ieee80211_roc_notify_destroy(roc); 413 + ieee80211_roc_notify_destroy(roc, true); 423 414 424 415 /* if there's another roc, start it now */ 425 416 ieee80211_start_next_roc(local); ··· 469 460 list_for_each_entry_safe(roc, tmp, &tmp_list, list) { 470 461 if (local->ops->remain_on_channel) { 471 462 list_del(&roc->list); 472 - ieee80211_roc_notify_destroy(roc); 463 + ieee80211_roc_notify_destroy(roc, true); 473 464 } else { 474 465 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); 475 466 476 467 /* work will clean up etc */ 477 468 flush_delayed_work(&roc->work); 469 + WARN_ON(!roc->to_be_freed); 470 + kfree(roc); 478 471 } 479 472 } 480 473
+13 -1
net/mac80211/rx.c
··· 2675 2675 2676 2676 memset(nskb->cb, 0, sizeof(nskb->cb)); 2677 2677 2678 - ieee80211_tx_skb(rx->sdata, nskb); 2678 + if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 2679 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 2680 + 2681 + info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 2682 + IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 2683 + IEEE80211_TX_CTL_NO_CCK_RATE; 2684 + if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) 2685 + info->hw_queue = 2686 + local->hw.offchannel_tx_hw_queue; 2687 + } 2688 + 2689 + __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 2690 + status->band); 2679 2691 } 2680 2692 dev_kfree_skb(rx->skb); 2681 2693 return RX_QUEUED;
+10 -2
net/mac80211/sta_info.c
··· 766 766 struct ieee80211_local *local; 767 767 struct ieee80211_sub_if_data *sdata; 768 768 int ret, i; 769 + bool have_key = false; 769 770 770 771 might_sleep(); 771 772 ··· 794 793 list_del_rcu(&sta->list); 795 794 796 795 mutex_lock(&local->key_mtx); 797 - for (i = 0; i < NUM_DEFAULT_KEYS; i++) 796 + for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 798 797 __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); 799 - if (sta->ptk) 798 + have_key = true; 799 + } 800 + if (sta->ptk) { 800 801 __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); 802 + have_key = true; 803 + } 801 804 mutex_unlock(&local->key_mtx); 805 + 806 + if (!have_key) 807 + synchronize_net(); 802 808 803 809 sta->dead = true; 804 810
+1
net/netfilter/nf_conntrack_standalone.c
··· 568 568 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); 569 569 if (!nf_ct_netfilter_header) { 570 570 pr_err("nf_conntrack: can't register to sysctl.\n"); 571 + ret = -ENOMEM; 571 572 goto out_sysctl; 572 573 } 573 574 #endif
-8
net/nfc/llcp/llcp.c
··· 107 107 accept_sk->sk_state_change(sk); 108 108 109 109 bh_unlock_sock(accept_sk); 110 - 111 - sock_orphan(accept_sk); 112 110 } 113 111 114 112 if (listen == true) { ··· 131 133 sk->sk_state_change(sk); 132 134 133 135 bh_unlock_sock(sk); 134 - 135 - sock_orphan(sk); 136 136 137 137 sk_del_node_init(sk); 138 138 } ··· 159 163 sk->sk_state_change(sk); 160 164 161 165 bh_unlock_sock(sk); 162 - 163 - sock_orphan(sk); 164 166 165 167 sk_del_node_init(sk); 166 168 } ··· 821 827 skb_get(skb); 822 828 } else { 823 829 pr_err("Receive queue is full\n"); 824 - kfree_skb(skb); 825 830 } 826 831 827 832 nfc_llcp_sock_put(llcp_sock); ··· 1021 1028 skb_get(skb); 1022 1029 } else { 1023 1030 pr_err("Receive queue is full\n"); 1024 - kfree_skb(skb); 1025 1031 } 1026 1032 } 1027 1033
+3 -3
net/nfc/llcp/sock.c
··· 270 270 } 271 271 272 272 if (sk->sk_state == LLCP_CONNECTED || !newsock) { 273 - nfc_llcp_accept_unlink(sk); 273 + list_del_init(&lsk->accept_queue); 274 + sock_put(sk); 275 + 274 276 if (newsock) 275 277 sock_graft(sk, newsock); 276 278 ··· 466 464 nfc_llcp_accept_unlink(accept_sk); 467 465 468 466 release_sock(accept_sk); 469 - 470 - sock_orphan(accept_sk); 471 467 } 472 468 } 473 469
+4 -1
net/sched/sch_cbq.c
··· 962 962 cbq_update(q); 963 963 if ((incr -= incr2) < 0) 964 964 incr = 0; 965 + q->now += incr; 966 + } else { 967 + if (now > q->now) 968 + q->now = now; 965 969 } 966 - q->now += incr; 967 970 q->now_rt = now; 968 971 969 972 for (;;) {
+1 -1
net/sched/sch_fq_codel.c
··· 195 195 flow->deficit = q->quantum; 196 196 flow->dropped = 0; 197 197 } 198 - if (++sch->q.qlen < sch->limit) 198 + if (++sch->q.qlen <= sch->limit) 199 199 return NET_XMIT_SUCCESS; 200 200 201 201 q->drop_overlimit++;
+1 -1
net/sched/sch_generic.c
··· 904 904 u64 mult; 905 905 int shift; 906 906 907 - r->rate_bps = rate << 3; 907 + r->rate_bps = (u64)rate << 3; 908 908 r->shift = 0; 909 909 r->mult = 1; 910 910 /*
+3 -3
net/unix/af_unix.c
··· 1412 1412 if (UNIXCB(skb).cred) 1413 1413 return; 1414 1414 if (test_bit(SOCK_PASSCRED, &sock->flags) || 1415 - (other->sk_socket && 1416 - test_bit(SOCK_PASSCRED, &other->sk_socket->flags))) { 1415 + !other->sk_socket || 1416 + test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) { 1417 1417 UNIXCB(skb).pid = get_pid(task_tgid(current)); 1418 1418 UNIXCB(skb).cred = get_current_cred(); 1419 1419 } ··· 1993 1993 if ((UNIXCB(skb).pid != siocb->scm->pid) || 1994 1994 (UNIXCB(skb).cred != siocb->scm->cred)) 1995 1995 break; 1996 - } else { 1996 + } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { 1997 1997 /* Copy credentials */ 1998 1998 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); 1999 1999 check_creds = 1;
+3 -3
net/vmw_vsock/af_vsock.c
··· 207 207 struct vsock_sock *vsk; 208 208 209 209 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) 210 - if (vsock_addr_equals_addr_any(addr, &vsk->local_addr)) 210 + if (addr->svm_port == vsk->local_addr.svm_port) 211 211 return sk_vsock(vsk); 212 212 213 213 return NULL; ··· 220 220 221 221 list_for_each_entry(vsk, vsock_connected_sockets(src, dst), 222 222 connected_table) { 223 - if (vsock_addr_equals_addr(src, &vsk->remote_addr) 224 - && vsock_addr_equals_addr(dst, &vsk->local_addr)) { 223 + if (vsock_addr_equals_addr(src, &vsk->remote_addr) && 224 + dst->svm_port == vsk->local_addr.svm_port) { 225 225 return sk_vsock(vsk); 226 226 } 227 227 }
+20 -11
net/vmw_vsock/vmci_transport.c
··· 464 464 struct vsock_sock *vlistener; 465 465 struct vsock_sock *vpending; 466 466 struct sock *pending; 467 + struct sockaddr_vm src; 468 + 469 + vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); 467 470 468 471 vlistener = vsock_sk(listener); 469 472 470 473 list_for_each_entry(vpending, &vlistener->pending_links, 471 474 pending_links) { 472 - struct sockaddr_vm src; 473 - struct sockaddr_vm dst; 474 - 475 - vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); 476 - vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port); 477 - 478 475 if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && 479 - vsock_addr_equals_addr(&dst, &vpending->local_addr)) { 476 + pkt->dst_port == vpending->local_addr.svm_port) { 480 477 pending = sk_vsock(vpending); 481 478 sock_hold(pending); 482 479 goto found; ··· 736 739 */ 737 740 bh_lock_sock(sk); 738 741 739 - if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED) 740 - vmci_trans(vsk)->notify_ops->handle_notify_pkt( 741 - sk, pkt, true, &dst, &src, 742 - &bh_process_pkt); 742 + if (!sock_owned_by_user(sk)) { 743 + /* The local context ID may be out of date, update it. */ 744 + vsk->local_addr.svm_cid = dst.svm_cid; 745 + 746 + if (sk->sk_state == SS_CONNECTED) 747 + vmci_trans(vsk)->notify_ops->handle_notify_pkt( 748 + sk, pkt, true, &dst, &src, 749 + &bh_process_pkt); 750 + } 743 751 744 752 bh_unlock_sock(sk); 745 753 ··· 904 902 905 903 lock_sock(sk); 906 904 905 + /* The local context ID may be out of date. */ 906 + vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; 907 + 907 908 switch (sk->sk_state) { 908 909 case SS_LISTEN: 909 910 vmci_transport_recv_listen(sk, pkt); ··· 963 958 pending = vmci_transport_get_pending(sk, pkt); 964 959 if (pending) { 965 960 lock_sock(pending); 961 + 962 + /* The local context ID may be out of date. */ 963 + vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context; 964 + 966 965 switch (pending->sk_state) { 967 966 case SS_CONNECTING: 968 967 err = vmci_transport_recv_connecting_server(sk,
-10
net/vmw_vsock/vsock_addr.c
··· 64 64 } 65 65 EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); 66 66 67 - bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, 68 - const struct sockaddr_vm *other) 69 - { 70 - return (addr->svm_cid == VMADDR_CID_ANY || 71 - other->svm_cid == VMADDR_CID_ANY || 72 - addr->svm_cid == other->svm_cid) && 73 - addr->svm_port == other->svm_port; 74 - } 75 - EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any); 76 - 77 67 int vsock_addr_cast(const struct sockaddr *addr, 78 68 size_t len, struct sockaddr_vm **out_addr) 79 69 {
-2
net/vmw_vsock/vsock_addr.h
··· 24 24 void vsock_addr_unbind(struct sockaddr_vm *addr); 25 25 bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, 26 26 const struct sockaddr_vm *other); 27 - bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, 28 - const struct sockaddr_vm *other); 29 27 int vsock_addr_cast(const struct sockaddr *addr, size_t len, 30 28 struct sockaddr_vm **out_addr); 31 29
+47 -17
net/wireless/core.c
··· 212 212 rdev_rfkill_poll(rdev); 213 213 } 214 214 215 + void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, 216 + struct wireless_dev *wdev) 217 + { 218 + lockdep_assert_held(&rdev->devlist_mtx); 219 + lockdep_assert_held(&rdev->sched_scan_mtx); 220 + 221 + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) 222 + return; 223 + 224 + if (!wdev->p2p_started) 225 + return; 226 + 227 + rdev_stop_p2p_device(rdev, wdev); 228 + wdev->p2p_started = false; 229 + 230 + rdev->opencount--; 231 + 232 + if (rdev->scan_req && rdev->scan_req->wdev == wdev) { 233 + bool busy = work_busy(&rdev->scan_done_wk); 234 + 235 + /* 236 + * If the work isn't pending or running (in which case it would 237 + * be waiting for the lock we hold) the driver didn't properly 238 + * cancel the scan when the interface was removed. In this case 239 + * warn and leak the scan request object to not crash later. 240 + */ 241 + WARN_ON(!busy); 242 + 243 + rdev->scan_req->aborted = true; 244 + ___cfg80211_scan_done(rdev, !busy); 245 + } 246 + } 247 + 215 248 static int cfg80211_rfkill_set_block(void *data, bool blocked) 216 249 { 217 250 struct cfg80211_registered_device *rdev = data; ··· 254 221 return 0; 255 222 256 223 rtnl_lock(); 257 - mutex_lock(&rdev->devlist_mtx); 224 + 225 + /* read-only iteration need not hold the devlist_mtx */ 258 226 259 227 list_for_each_entry(wdev, &rdev->wdev_list, list) { 260 228 if (wdev->netdev) { ··· 265 231 /* otherwise, check iftype */ 266 232 switch (wdev->iftype) { 267 233 case NL80211_IFTYPE_P2P_DEVICE: 268 - if (!wdev->p2p_started) 269 - break; 270 - rdev_stop_p2p_device(rdev, wdev); 271 - wdev->p2p_started = false; 272 - rdev->opencount--; 234 + /* but this requires it */ 235 + mutex_lock(&rdev->devlist_mtx); 236 + mutex_lock(&rdev->sched_scan_mtx); 237 + cfg80211_stop_p2p_device(rdev, wdev); 238 + mutex_unlock(&rdev->sched_scan_mtx); 239 + mutex_unlock(&rdev->devlist_mtx); 273 240 break; 274 241 default: 275 242 break; 276 243 } 277 244 } 278 245 279 - mutex_unlock(&rdev->devlist_mtx); 280 246 rtnl_unlock(); 281 247 282 248 return 0; ··· 779 745 wdev = container_of(work, struct wireless_dev, cleanup_work); 780 746 rdev = wiphy_to_dev(wdev->wiphy); 781 747 782 - cfg80211_lock_rdev(rdev); 748 + mutex_lock(&rdev->sched_scan_mtx); 783 749 784 750 if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { 785 751 rdev->scan_req->aborted = true; 786 752 ___cfg80211_scan_done(rdev, true); 787 753 } 788 - 789 - cfg80211_unlock_rdev(rdev); 790 - 791 - mutex_lock(&rdev->sched_scan_mtx); 792 754 793 755 if (WARN_ON(rdev->sched_scan_req && 794 756 rdev->sched_scan_req->dev == wdev->netdev)) { ··· 811 781 return; 812 782 813 783 mutex_lock(&rdev->devlist_mtx); 784 + mutex_lock(&rdev->sched_scan_mtx); 814 785 list_del_rcu(&wdev->list); 815 786 rdev->devlist_generation++; 816 787 817 788 switch (wdev->iftype) { 818 789 case NL80211_IFTYPE_P2P_DEVICE: 819 - if (!wdev->p2p_started) 820 - break; 821 - rdev_stop_p2p_device(rdev, wdev); 822 - wdev->p2p_started = false; 823 - rdev->opencount--; 790 + cfg80211_stop_p2p_device(rdev, wdev); 824 791 break; 825 792 default: 826 793 WARN_ON_ONCE(1); 827 794 break; 828 795 } 796 + mutex_unlock(&rdev->sched_scan_mtx); 829 797 mutex_unlock(&rdev->devlist_mtx); 830 798 } 831 799 EXPORT_SYMBOL(cfg80211_unregister_wdev); ··· 964 936 cfg80211_update_iface_num(rdev, wdev->iftype, 1); 965 937 cfg80211_lock_rdev(rdev); 966 938 mutex_lock(&rdev->devlist_mtx); 939 + mutex_lock(&rdev->sched_scan_mtx); 967 940 wdev_lock(wdev); 968 941 switch (wdev->iftype) { 969 942 #ifdef CONFIG_CFG80211_WEXT ··· 996 967 break; 997 968 } 998 969 wdev_unlock(wdev); 970 + mutex_unlock(&rdev->sched_scan_mtx); 999 971 rdev->opencount++; 1000 972 mutex_unlock(&rdev->devlist_mtx); 1001 973 cfg80211_unlock_rdev(rdev);
+3
net/wireless/core.h
··· 503 503 void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, 504 504 enum nl80211_iftype iftype, int num); 505 505 506 + void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, 507 + struct wireless_dev *wdev); 508 + 506 509 #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 507 510 508 511 #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
+27 -25
net/wireless/nl80211.c
··· 4702 4702 if (!rdev->ops->scan) 4703 4703 return -EOPNOTSUPP; 4704 4704 4705 - if (rdev->scan_req) 4706 - return -EBUSY; 4705 + mutex_lock(&rdev->sched_scan_mtx); 4706 + if (rdev->scan_req) { 4707 + err = -EBUSY; 4708 + goto unlock; 4709 + } 4707 4710 4708 4711 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { 4709 4712 n_channels = validate_scan_freqs( 4710 4713 info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); 4711 - if (!n_channels) 4712 - return -EINVAL; 4714 + if (!n_channels) { 4715 + err = -EINVAL; 4716 + goto unlock; 4717 + } 4713 4718 } else { 4714 4719 enum ieee80211_band band; 4715 4720 n_channels = 0; ··· 4728 4723 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) 4729 4724 n_ssids++; 4730 4725 4731 - if (n_ssids > wiphy->max_scan_ssids) 4732 - return -EINVAL; 4726 + if (n_ssids > wiphy->max_scan_ssids) { 4727 + err = -EINVAL; 4728 + goto unlock; 4729 + } 4733 4730 4734 4731 if (info->attrs[NL80211_ATTR_IE]) 4735 4732 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 4736 4733 else 4737 4734 ie_len = 0; 4738 4735 4739 - if (ie_len > wiphy->max_scan_ie_len) 4740 - return -EINVAL; 4736 + if (ie_len > wiphy->max_scan_ie_len) { 4737 + err = -EINVAL; 4738 + goto unlock; 4739 + } 4741 4740 4742 4741 request = kzalloc(sizeof(*request) 4743 4742 + sizeof(*request->ssids) * n_ssids 4744 4743 + sizeof(*request->channels) * n_channels 4745 4744 + ie_len, GFP_KERNEL); 4746 - if (!request) 4747 - return -ENOMEM; 4745 + if (!request) { 4746 + err = -ENOMEM; 4747 + goto unlock; 4748 + } 4748 4749 4749 4750 if (n_ssids) 4750 4751 request->ssids = (void *)&request->channels[n_channels]; ··· 4887 4876 kfree(request); 4888 4877 } 4889 4878 4879 + unlock: 4880 + mutex_unlock(&rdev->sched_scan_mtx); 4890 4881 return err; 4891 4882 } 4892 4883 ··· 7762 7749 if (!rdev->ops->stop_p2p_device) 7763 7750 return -EOPNOTSUPP; 7764 7751 7765 - if (!wdev->p2p_started) 7766 - return 0; 7767 - 7768 - rdev_stop_p2p_device(rdev, wdev); 7769 - wdev->p2p_started = false; 7770 - 7771 - mutex_lock(&rdev->devlist_mtx); 7772 - rdev->opencount--; 7773 - mutex_unlock(&rdev->devlist_mtx); 7774 - 7775 - if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { 7776 - rdev->scan_req->aborted = true; 7777 - ___cfg80211_scan_done(rdev, true); 7778 - } 7752 + mutex_lock(&rdev->sched_scan_mtx); 7753 + cfg80211_stop_p2p_device(rdev, wdev); 7754 + mutex_unlock(&rdev->sched_scan_mtx); 7779 7755 7780 7756 return 0; 7781 7757 } ··· 8488 8486 struct nlattr *nest; 8489 8487 int i; 8490 8488 8491 - ASSERT_RDEV_LOCK(rdev); 8489 + lockdep_assert_held(&rdev->sched_scan_mtx); 8492 8490 8493 8491 if (WARN_ON(!req)) 8494 8492 return 0;
+16 -8
net/wireless/scan.c
··· 169 169 union iwreq_data wrqu; 170 170 #endif 171 171 172 - ASSERT_RDEV_LOCK(rdev); 172 + lockdep_assert_held(&rdev->sched_scan_mtx); 173 173 174 174 request = rdev->scan_req; 175 175 ··· 230 230 rdev = container_of(wk, struct cfg80211_registered_device, 231 231 scan_done_wk); 232 232 233 - cfg80211_lock_rdev(rdev); 233 + mutex_lock(&rdev->sched_scan_mtx); 234 234 ___cfg80211_scan_done(rdev, false); 235 - cfg80211_unlock_rdev(rdev); 235 + mutex_unlock(&rdev->sched_scan_mtx); 236 236 } 237 237 238 238 void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) ··· 698 698 found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); 699 699 700 700 if (found) { 701 - found->pub.beacon_interval = tmp->pub.beacon_interval; 702 - found->pub.signal = tmp->pub.signal; 703 - found->pub.capability = tmp->pub.capability; 704 - found->ts = tmp->ts; 705 - 706 701 /* Update IEs */ 707 702 if (rcu_access_pointer(tmp->pub.proberesp_ies)) { 708 703 const struct cfg80211_bss_ies *old; ··· 718 723 719 724 if (found->pub.hidden_beacon_bss && 720 725 !list_empty(&found->hidden_list)) { 726 + const struct cfg80211_bss_ies *f; 727 + 721 728 /* 722 729 * The found BSS struct is one of the probe 723 730 * response members of a group, but we're ··· 729 732 * SSID to showing it, which is confusing so 730 733 * drop this information. 731 734 */ 735 + 736 + f = rcu_access_pointer(tmp->pub.beacon_ies); 737 + kfree_rcu((struct cfg80211_bss_ies *)f, 738 + rcu_head); 732 739 goto drop; 733 740 } 734 741 ··· 762 761 kfree_rcu((struct cfg80211_bss_ies *)old, 763 762 rcu_head); 764 763 } 764 + 765 + found->pub.beacon_interval = tmp->pub.beacon_interval; 766 + found->pub.signal = tmp->pub.signal; 767 + found->pub.capability = tmp->pub.capability; 768 + found->ts = tmp->ts; 765 769 } else { 766 770 struct cfg80211_internal_bss *new; 767 771 struct cfg80211_internal_bss *hidden; ··· 1062 1056 if (IS_ERR(rdev)) 1063 1057 return PTR_ERR(rdev); 1064 1058 1059 + mutex_lock(&rdev->sched_scan_mtx); 1065 1060 if (rdev->scan_req) { 1066 1061 err = -EBUSY; 1067 1062 goto out; ··· 1169 1162 dev_hold(dev); 1170 1163 } 1171 1164 out: 1165 + mutex_unlock(&rdev->sched_scan_mtx); 1172 1166 kfree(creq); 1173 1167 cfg80211_unlock_rdev(rdev); 1174 1168 return err;
+4 -2
net/wireless/sme.c
··· 85 85 ASSERT_RTNL(); 86 86 ASSERT_RDEV_LOCK(rdev); 87 87 ASSERT_WDEV_LOCK(wdev); 88 + lockdep_assert_held(&rdev->sched_scan_mtx); 88 89 89 90 if (rdev->scan_req) 90 91 return -EBUSY; ··· 321 320 { 322 321 struct wireless_dev *wdev = dev->ieee80211_ptr; 323 322 324 - mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); 325 323 wdev_lock(wdev); 326 324 __cfg80211_sme_scan_done(dev); 327 325 wdev_unlock(wdev); 328 - mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); 329 326 } 330 327 331 328 void cfg80211_sme_rx_auth(struct net_device *dev, ··· 923 924 int err; 924 925 925 926 mutex_lock(&rdev->devlist_mtx); 927 + /* might request scan - scan_mtx -> wdev_mtx dependency */ 928 + mutex_lock(&rdev->sched_scan_mtx); 926 929 wdev_lock(dev->ieee80211_ptr); 927 930 err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); 928 931 wdev_unlock(dev->ieee80211_ptr); 932 + mutex_unlock(&rdev->sched_scan_mtx); 929 933 mutex_unlock(&rdev->devlist_mtx); 930 934 931 935 return err;
+3 -2
net/wireless/trace.h
··· 27 27 #define WIPHY_PR_ARG __entry->wiphy_name 28 28 29 29 #define WDEV_ENTRY __field(u32, id) 30 - #define WDEV_ASSIGN (__entry->id) = (wdev ? wdev->identifier : 0) 30 + #define WDEV_ASSIGN (__entry->id) = (!IS_ERR_OR_NULL(wdev) \ 31 + ? wdev->identifier : 0) 31 32 #define WDEV_PR_FMT "wdev(%u)" 32 33 #define WDEV_PR_ARG (__entry->id) 33 34 ··· 1779 1778 ), 1780 1779 TP_fast_assign( 1781 1780 WIPHY_ASSIGN; 1782 - WIPHY_ASSIGN; 1781 + NETDEV_ASSIGN; 1783 1782 __entry->acl_policy = params->acl_policy; 1784 1783 ), 1785 1784 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d",
+6
net/wireless/wext-sme.c
··· 89 89 90 90 cfg80211_lock_rdev(rdev); 91 91 mutex_lock(&rdev->devlist_mtx); 92 + mutex_lock(&rdev->sched_scan_mtx); 92 93 wdev_lock(wdev); 93 94 94 95 if (wdev->sme_state != CFG80211_SME_IDLE) { ··· 136 135 err = cfg80211_mgd_wext_connect(rdev, wdev); 137 136 out: 138 137 wdev_unlock(wdev); 138 + mutex_unlock(&rdev->sched_scan_mtx); 139 139 mutex_unlock(&rdev->devlist_mtx); 140 140 cfg80211_unlock_rdev(rdev); 141 141 return err; ··· 192 190 193 191 cfg80211_lock_rdev(rdev); 194 192 mutex_lock(&rdev->devlist_mtx); 193 + mutex_lock(&rdev->sched_scan_mtx); 195 194 wdev_lock(wdev); 196 195 197 196 err = 0; ··· 226 223 err = cfg80211_mgd_wext_connect(rdev, wdev); 227 224 out: 228 225 wdev_unlock(wdev); 226 + mutex_unlock(&rdev->sched_scan_mtx); 229 227 mutex_unlock(&rdev->devlist_mtx); 230 228 cfg80211_unlock_rdev(rdev); 231 229 return err; ··· 289 285 290 286 cfg80211_lock_rdev(rdev); 291 287 mutex_lock(&rdev->devlist_mtx); 288 + mutex_lock(&rdev->sched_scan_mtx); 292 289 wdev_lock(wdev); 293 290 294 291 if (wdev->sme_state != CFG80211_SME_IDLE) { ··· 318 313 err = cfg80211_mgd_wext_connect(rdev, wdev); 319 314 out: 320 315 wdev_unlock(wdev); 316 + mutex_unlock(&rdev->sched_scan_mtx); 321 317 mutex_unlock(&rdev->devlist_mtx); 322 318 cfg80211_unlock_rdev(rdev); 323 319 return err;
+65 -1
net/xfrm/xfrm_replay.c
··· 334 334 x->xflags &= ~XFRM_TIME_DEFER; 335 335 } 336 336 337 + static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) 338 + { 339 + u32 seq_diff, oseq_diff; 340 + struct km_event c; 341 + struct xfrm_replay_state_esn *replay_esn = x->replay_esn; 342 + struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; 343 + 344 + /* we send notify messages in case 345 + * 1. we updated on of the sequence numbers, and the seqno difference 346 + * is at least x->replay_maxdiff, in this case we also update the 347 + * timeout of our timer function 348 + * 2. if x->replay_maxage has elapsed since last update, 349 + * and there were changes 350 + * 351 + * The state structure must be locked! 352 + */ 353 + 354 + switch (event) { 355 + case XFRM_REPLAY_UPDATE: 356 + if (!x->replay_maxdiff) 357 + break; 358 + 359 + if (replay_esn->seq_hi == preplay_esn->seq_hi) 360 + seq_diff = replay_esn->seq - preplay_esn->seq; 361 + else 362 + seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; 363 + 364 + if (replay_esn->oseq_hi == preplay_esn->oseq_hi) 365 + oseq_diff = replay_esn->oseq - preplay_esn->oseq; 366 + else 367 + oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; 368 + 369 + if (seq_diff < x->replay_maxdiff && 370 + oseq_diff < x->replay_maxdiff) { 371 + 372 + if (x->xflags & XFRM_TIME_DEFER) 373 + event = XFRM_REPLAY_TIMEOUT; 374 + else 375 + return; 376 + } 377 + 378 + break; 379 + 380 + case XFRM_REPLAY_TIMEOUT: 381 + if (memcmp(x->replay_esn, x->preplay_esn, 382 + xfrm_replay_state_esn_len(replay_esn)) == 0) { 383 + x->xflags |= XFRM_TIME_DEFER; 384 + return; 385 + } 386 + 387 + break; 388 + } 389 + 390 + memcpy(x->preplay_esn, x->replay_esn, 391 + xfrm_replay_state_esn_len(replay_esn)); 392 + c.event = XFRM_MSG_NEWAE; 393 + c.data.aevent = event; 394 + km_state_notify(x, &c); 395 + 396 + if (x->replay_maxage && 397 + !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) 398 + x->xflags &= ~XFRM_TIME_DEFER; 399 + } 400 + 337 401 static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) 338 402 { 339 403 int err = 0; ··· 574 510 .advance = xfrm_replay_advance_esn, 575 511 .check = xfrm_replay_check_esn, 576 512 .recheck = xfrm_replay_recheck_esn, 577 - .notify = xfrm_replay_notify_bmp, 513 + .notify = xfrm_replay_notify_esn, 578 514 .overflow = xfrm_replay_overflow_esn, 579 515 }; 580 516
+1 -1
sound/pci/hda/hda_codec.c
··· 173 173 "Line Out", "Speaker", "HP Out", "CD", 174 174 "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand", 175 175 "Line In", "Aux", "Mic", "Telephony", 176 - "SPDIF In", "Digitial In", "Reserved", "Other" 176 + "SPDIF In", "Digital In", "Reserved", "Other" 177 177 }; 178 178 179 179 return jack_types[(cfg & AC_DEFCFG_DEVICE)
+1 -1
sound/pci/hda/hda_eld.c
··· 320 320 unsigned char *buf, int *eld_size) 321 321 { 322 322 int i; 323 - int ret; 323 + int ret = 0; 324 324 int size; 325 325 326 326 /*
+1 -1
sound/pci/hda/hda_generic.c
··· 740 740 static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) 741 741 { 742 742 struct hda_gen_spec *spec = codec->spec; 743 - bool changed; 743 + bool changed = false; 744 744 int i; 745 745 746 746 if (!spec->power_down_unused || path->active)
+2 -4
sound/pci/hda/hda_intel.c
··· 134 134 * this may give more power-saving, but will take longer time to 135 135 * wake up. 136 136 */ 137 - static int power_save_controller = -1; 138 - module_param(power_save_controller, bint, 0644); 137 + static bool power_save_controller = 1; 138 + module_param(power_save_controller, bool, 0644); 139 139 MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode."); 140 140 #endif /* CONFIG_PM */ 141 141 ··· 2931 2931 struct snd_card *card = dev_get_drvdata(dev); 2932 2932 struct azx *chip = card->private_data; 2933 2933 2934 - if (power_save_controller > 0) 2935 - return 0; 2936 2934 if (!power_save_controller || 2937 2935 !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 2938 2936 return -EBUSY;
+1 -1
sound/pci/hda/patch_hdmi.c
··· 1196 1196 1197 1197 _snd_printd(SND_PR_VERBOSE, 1198 1198 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 1199 - codec->addr, pin_nid, eld->monitor_present, eld->eld_valid); 1199 + codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid); 1200 1200 1201 1201 if (eld->eld_valid) { 1202 1202 if (snd_hdmi_get_eld(codec, pin_nid, eld->eld_buffer,
+3 -1
sound/pci/hda/patch_realtek.c
··· 3440 3440 const hda_nid_t *ssids; 3441 3441 3442 3442 if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 || 3443 - codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670) 3443 + codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670 || 3444 + codec->vendor_id == 0x10ec0671) 3444 3445 ssids = alc663_ssids; 3445 3446 else 3446 3447 ssids = alc662_ssids; ··· 3895 3894 { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, 3896 3895 { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, 3897 3896 { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, 3897 + { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 }, 3898 3898 { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, 3899 3899 { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, 3900 3900 { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
sound/soc/codecs/max98090.c
sound/soc/codecs/max98090.h
+1
sound/soc/codecs/si476x.c
··· 159 159 switch (params_format(params)) { 160 160 case SNDRV_PCM_FORMAT_S8: 161 161 width = SI476X_PCM_FORMAT_S8; 162 + break; 162 163 case SNDRV_PCM_FORMAT_S16_LE: 163 164 width = SI476X_PCM_FORMAT_S16_LE; 164 165 break;
+3 -2
sound/soc/codecs/wm_adsp.c
··· 828 828 &buf_list); 829 829 if (!buf) { 830 830 adsp_err(dsp, "Out of memory\n"); 831 - return -ENOMEM; 831 + ret = -ENOMEM; 832 + goto out_fw; 832 833 } 833 834 834 835 adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", ··· 866 865 wm_adsp_buf_free(&buf_list); 867 866 out: 868 867 kfree(file); 869 - return 0; 868 + return ret; 870 869 } 871 870 872 871 int wm_adsp1_init(struct wm_adsp *adsp)
+5
sound/soc/fsl/imx-ssi.c
··· 496 496 497 497 if (imx_ssi->ac97_reset) 498 498 imx_ssi->ac97_reset(ac97); 499 + /* First read sometimes fails, do a dummy read */ 500 + imx_ssi_ac97_read(ac97, 0); 499 501 } 500 502 501 503 static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) ··· 506 504 507 505 if (imx_ssi->ac97_warm_reset) 508 506 imx_ssi->ac97_warm_reset(ac97); 507 + 508 + /* First read sometimes fails, do a dummy read */ 509 + imx_ssi_ac97_read(ac97, 0); 509 510 } 510 511 511 512 struct snd_ac97_bus_ops soc_ac97_ops = {
+1 -1
sound/soc/fsl/pcm030-audio-fabric.c
··· 51 51 .num_links = ARRAY_SIZE(pcm030_fabric_dai), 52 52 }; 53 53 54 - static int __init pcm030_fabric_probe(struct platform_device *op) 54 + static int pcm030_fabric_probe(struct platform_device *op) 55 55 { 56 56 struct device_node *np = op->dev.of_node; 57 57 struct device_node *platform_np;
+2 -2
sound/soc/sh/dma-sh7760.c
··· 342 342 return 0; 343 343 } 344 344 345 - static struct snd_soc_platform sh7760_soc_platform = { 346 - .pcm_ops = &camelot_pcm_ops, 345 + static struct snd_soc_platform_driver sh7760_soc_platform = { 346 + .ops = &camelot_pcm_ops, 347 347 .pcm_new = camelot_pcm_new, 348 348 .pcm_free = camelot_pcm_free, 349 349 };
+4 -4
sound/soc/soc-core.c
··· 3140 3140 if (params->mask) { 3141 3141 ret = regmap_read(codec->control_data, params->base, &val); 3142 3142 if (ret != 0) 3143 - return ret; 3143 + goto out; 3144 3144 3145 3145 val &= params->mask; 3146 3146 ··· 3158 3158 ((u32 *)data)[0] |= cpu_to_be32(val); 3159 3159 break; 3160 3160 default: 3161 - return -EINVAL; 3161 + ret = -EINVAL; 3162 + goto out; 3162 3163 } 3163 3164 } 3164 3165 3165 3166 ret = regmap_raw_write(codec->control_data, params->base, 3166 3167 data, len); 3167 3168 3169 + out: 3168 3170 kfree(data); 3169 3171 3170 3172 return ret; ··· 4199 4197 dev_err(card->dev, 4200 4198 "ASoC: Property '%s' index %d could not be read: %d\n", 4201 4199 propname, 2 * i, ret); 4202 - kfree(routes); 4203 4200 return -EINVAL; 4204 4201 } 4205 4202 ret = of_property_read_string_index(np, propname, ··· 4207 4206 dev_err(card->dev, 4208 4207 "ASoC: Property '%s' index %d could not be read: %d\n", 4209 4208 propname, (2 * i) + 1, ret); 4210 - kfree(routes); 4211 4209 return -EINVAL; 4212 4210 } 4213 4211 }
+14
sound/soc/soc-dapm.c
··· 831 831 if (path->weak) 832 832 continue; 833 833 834 + if (path->walking) 835 + return 1; 836 + 834 837 if (path->walked) 835 838 continue; 836 839 ··· 841 838 842 839 if (path->sink && path->connect) { 843 840 path->walked = 1; 841 + path->walking = 1; 844 842 845 843 /* do we need to add this widget to the list ? */ 846 844 if (list) { ··· 851 847 dev_err(widget->dapm->dev, 852 848 "ASoC: could not add widget %s\n", 853 849 widget->name); 850 + path->walking = 0; 854 851 return con; 855 852 } 856 853 } 857 854 858 855 con += is_connected_output_ep(path->sink, list); 856 + 857 + path->walking = 0; 859 858 } 860 859 } 861 860 ··· 938 931 if (path->weak) 939 932 continue; 940 933 934 + if (path->walking) 935 + return 1; 936 + 941 937 if (path->walked) 942 938 continue; 943 939 ··· 948 938 949 939 if (path->source && path->connect) { 950 940 path->walked = 1; 941 + path->walking = 1; 951 942 952 943 /* do we need to add this widget to the list ? */ 953 944 if (list) { ··· 958 947 dev_err(widget->dapm->dev, 959 948 "ASoC: could not add widget %s\n", 960 949 widget->name); 950 + path->walking = 0; 961 951 return con; 962 952 } 963 953 } 964 954 965 955 con += is_connected_input_ep(path->source, list); 956 + 957 + path->walking = 0; 966 958 } 967 959 } 968 960
+6 -6
sound/soc/spear/spear_pcm.c
··· 149 149 150 150 static u64 spear_pcm_dmamask = DMA_BIT_MASK(32); 151 151 152 - static int spear_pcm_new(struct snd_card *card, 153 - struct snd_soc_dai *dai, struct snd_pcm *pcm) 152 + static int spear_pcm_new(struct snd_soc_pcm_runtime *rtd) 154 153 { 154 + struct snd_card *card = rtd->card->snd_card; 155 155 int ret; 156 156 157 157 if (!card->dev->dma_mask) ··· 159 159 if (!card->dev->coherent_dma_mask) 160 160 card->dev->coherent_dma_mask = DMA_BIT_MASK(32); 161 161 162 - if (dai->driver->playback.channels_min) { 163 - ret = spear_pcm_preallocate_dma_buffer(pcm, 162 + if (rtd->cpu_dai->driver->playback.channels_min) { 163 + ret = spear_pcm_preallocate_dma_buffer(rtd->pcm, 164 164 SNDRV_PCM_STREAM_PLAYBACK, 165 165 spear_pcm_hardware.buffer_bytes_max); 166 166 if (ret) 167 167 return ret; 168 168 } 169 169 170 - if (dai->driver->capture.channels_min) { 171 - ret = spear_pcm_preallocate_dma_buffer(pcm, 170 + if (rtd->cpu_dai->driver->capture.channels_min) { 171 + ret = spear_pcm_preallocate_dma_buffer(rtd->pcm, 172 172 SNDRV_PCM_STREAM_CAPTURE, 173 173 spear_pcm_hardware.buffer_bytes_max); 174 174 if (ret)
+35 -10
sound/usb/clock.c
··· 253 253 { 254 254 struct usb_device *dev = chip->dev; 255 255 unsigned char data[4]; 256 - int err, crate; 256 + int err, cur_rate, prev_rate; 257 257 int clock = snd_usb_clock_find_source(chip, fmt->clock); 258 258 259 259 if (clock < 0) ··· 264 264 snd_printk(KERN_ERR "%d:%d:%d: clock source %d is not valid, cannot use\n", 265 265 dev->devnum, iface, fmt->altsetting, clock); 266 266 return -ENXIO; 267 + } 268 + 269 + err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, 270 + USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, 271 + UAC2_CS_CONTROL_SAM_FREQ << 8, 272 + snd_usb_ctrl_intf(chip) | (clock << 8), 273 + data, sizeof(data)); 274 + if (err < 0) { 275 + snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", 276 + dev->devnum, iface, fmt->altsetting); 277 + prev_rate = 0; 278 + } else { 279 + prev_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); 267 280 } 268 281 269 282 data[0] = rate; ··· 293 280 return err; 294 281 } 295 282 296 - if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, 297 - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, 298 - UAC2_CS_CONTROL_SAM_FREQ << 8, 299 - snd_usb_ctrl_intf(chip) | (clock << 8), 300 - data, sizeof(data))) < 0) { 283 + err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, 284 + USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, 285 + UAC2_CS_CONTROL_SAM_FREQ << 8, 286 + snd_usb_ctrl_intf(chip) | (clock << 8), 287 + data, sizeof(data)); 288 + if (err < 0) { 301 289 snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", 302 290 dev->devnum, iface, fmt->altsetting); 303 - return err; 291 + cur_rate = 0; 292 + } else { 293 + cur_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); 304 294 } 305 295 306 - crate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); 307 - if (crate != rate) 308 - snd_printd(KERN_WARNING "current rate %d is different from the runtime rate %d\n", crate, rate); 296 + if (cur_rate != rate) { 297 + snd_printd(KERN_WARNING 298 + "current rate %d is different from the runtime rate %d\n", 299 + cur_rate, rate); 300 + } 301 + 302 + /* Some devices doesn't respond to sample rate changes while the 303 + * interface is active. */ 304 + if (rate != prev_rate) { 305 + usb_set_interface(dev, iface, 0); 306 + usb_set_interface(dev, iface, fmt->altsetting); 307 + } 309 308 310 309 return 0; 311 310 }
+37 -10
virt/kvm/kvm_main.c
··· 1541 1541 } 1542 1542 1543 1543 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1544 - gpa_t gpa) 1544 + gpa_t gpa, unsigned long len) 1545 1545 { 1546 1546 struct kvm_memslots *slots = kvm_memslots(kvm); 1547 1547 int offset = offset_in_page(gpa); 1548 - gfn_t gfn = gpa >> PAGE_SHIFT; 1548 + gfn_t start_gfn = gpa >> PAGE_SHIFT; 1549 + gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1550 + gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1551 + gfn_t nr_pages_avail; 1549 1552 1550 1553 ghc->gpa = gpa; 1551 1554 ghc->generation = slots->generation; 1552 - ghc->memslot = gfn_to_memslot(kvm, gfn); 1553 - ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); 1554 - if (!kvm_is_error_hva(ghc->hva)) 1555 + ghc->len = len; 1556 + ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1557 + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); 1558 + if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { 1555 1559 ghc->hva += offset; 1556 - else 1557 - return -EFAULT; 1558 - 1560 + } else { 1561 + /* 1562 + * If the requested region crosses two memslots, we still 1563 + * verify that the entire region is valid here. 1564 + */ 1565 + while (start_gfn <= end_gfn) { 1566 + ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1567 + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1568 + &nr_pages_avail); 1569 + if (kvm_is_error_hva(ghc->hva)) 1570 + return -EFAULT; 1571 + start_gfn += nr_pages_avail; 1572 + } 1573 + /* Use the slow path for cross page reads and writes. */ 1574 + ghc->memslot = NULL; 1575 + } 1559 1576 return 0; 1560 1577 } 1561 1578 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); ··· 1583 1566 struct kvm_memslots *slots = kvm_memslots(kvm); 1584 1567 int r; 1585 1568 1569 + BUG_ON(len > ghc->len); 1570 + 1586 1571 if (slots->generation != ghc->generation) 1587 - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); 1572 + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1573 + 1574 + if (unlikely(!ghc->memslot)) 1575 + return kvm_write_guest(kvm, ghc->gpa, data, len); 1588 1576 1589 1577 if (kvm_is_error_hva(ghc->hva)) 1590 1578 return -EFAULT; ··· 1609 1587 struct kvm_memslots *slots = kvm_memslots(kvm); 1610 1588 int r; 1611 1589 1590 + BUG_ON(len > ghc->len); 1591 + 1612 1592 if (slots->generation != ghc->generation) 1613 - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); 1593 + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1594 + 1595 + if (unlikely(!ghc->memslot)) 1596 + return kvm_read_guest(kvm, ghc->gpa, data, len); 1614 1597 1615 1598 if (kvm_is_error_hva(ghc->hva)) 1616 1599 return -EFAULT;