Merge branches 'release', 'drop_do_IRQ', 'fix_early_irq', 'misc-2.6.37', 'next-fixes', 'optimize-unwind', 'remove-compat-h' and 'stack_trace' into release

Tony Luck c0f37d2a 5d4bff94

+1041 -730
+6 -2
MAINTAINERS
··· 3925 F: drivers/mfd/ 3926 3927 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM 3928 - S: Orphan 3929 L: linux-mmc@vger.kernel.org 3930 F: drivers/mmc/ 3931 F: include/linux/mmc/ 3932 ··· 5099 F: drivers/mmc/host/sdricoh_cs.c 5100 5101 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER 5102 - S: Orphan 5103 L: linux-mmc@vger.kernel.org 5104 F: drivers/mmc/host/sdhci.* 5105 5106 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
··· 3925 F: drivers/mfd/ 3926 3927 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM 3928 + M: Chris Ball <cjb@laptop.org> 3929 L: linux-mmc@vger.kernel.org 3930 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git 3931 + S: Maintained 3932 F: drivers/mmc/ 3933 F: include/linux/mmc/ 3934 ··· 5097 F: drivers/mmc/host/sdricoh_cs.c 5098 5099 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER 5100 + M: Chris Ball <cjb@laptop.org> 5101 L: linux-mmc@vger.kernel.org 5102 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git 5103 + S: Maintained 5104 F: drivers/mmc/host/sdhci.* 5105 5106 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
+1 -1
Makefile
··· 1 VERSION = 2 2 PATCHLEVEL = 6 3 SUBLEVEL = 36 4 - EXTRAVERSION = -rc5 5 NAME = Sheep on Meth 6 7 # *DOCUMENTATION*
··· 1 VERSION = 2 2 PATCHLEVEL = 6 3 SUBLEVEL = 36 4 + EXTRAVERSION = -rc6 5 NAME = Sheep on Meth 6 7 # *DOCUMENTATION*
-9
arch/alpha/kernel/entry.S
··· 915 .end sys_execve 916 917 .align 4 918 - .globl osf_sigprocmask 919 - .ent osf_sigprocmask 920 - osf_sigprocmask: 921 - .prologue 0 922 - mov $sp, $18 923 - jmp $31, sys_osf_sigprocmask 924 - .end osf_sigprocmask 925 - 926 - .align 4 927 .globl alpha_ni_syscall 928 .ent alpha_ni_syscall 929 alpha_ni_syscall:
··· 915 .end sys_execve 916 917 .align 4 918 .globl alpha_ni_syscall 919 .ent alpha_ni_syscall 920 alpha_ni_syscall:
+14 -40
arch/alpha/kernel/signal.c
··· 41 /* 42 * The OSF/1 sigprocmask calling sequence is different from the 43 * C sigprocmask() sequence.. 44 - * 45 - * how: 46 - * 1 - SIG_BLOCK 47 - * 2 - SIG_UNBLOCK 48 - * 3 - SIG_SETMASK 49 - * 50 - * We change the range to -1 .. 1 in order to let gcc easily 51 - * use the conditional move instructions. 52 - * 53 - * Note that we don't need to acquire the kernel lock for SMP 54 - * operation, as all of this is local to this thread. 55 */ 56 - SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, 57 - struct pt_regs *, regs) 58 { 59 - unsigned long oldmask = -EINVAL; 60 61 - if ((unsigned long)how-1 <= 2) { 62 - long sign = how-2; /* -1 .. 1 */ 63 - unsigned long block, unblock; 64 - 65 - newmask &= _BLOCKABLE; 66 - spin_lock_irq(&current->sighand->siglock); 67 - oldmask = current->blocked.sig[0]; 68 - 69 - unblock = oldmask & ~newmask; 70 - block = oldmask | newmask; 71 - if (!sign) 72 - block = unblock; 73 - if (sign <= 0) 74 - newmask = block; 75 - if (_NSIG_WORDS > 1 && sign > 0) 76 - sigemptyset(&current->blocked); 77 - current->blocked.sig[0] = newmask; 78 - recalc_sigpending(); 79 - spin_unlock_irq(&current->sighand->siglock); 80 - 81 - regs->r0 = 0; /* special no error return */ 82 } 83 - return oldmask; 84 } 85 86 SYSCALL_DEFINE3(osf_sigaction, int, sig, ··· 68 old_sigset_t mask; 69 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 70 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 71 - __get_user(new_ka.sa.sa_flags, &act->sa_flags)) 72 return -EFAULT; 73 - __get_user(mask, &act->sa_mask); 74 siginitset(&new_ka.sa.sa_mask, mask); 75 new_ka.ka_restorer = NULL; 76 } ··· 80 if (!ret && oact) { 81 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 82 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 83 - __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) 84 return -EFAULT; 85 - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); 86 } 87 88 return ret;
··· 41 /* 42 * The OSF/1 sigprocmask calling sequence is different from the 43 * C sigprocmask() sequence.. 44 */ 45 + SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) 46 { 47 + sigset_t oldmask; 48 + sigset_t mask; 49 + unsigned long res; 50 51 + siginitset(&mask, newmask & ~_BLOCKABLE); 52 + res = sigprocmask(how, &mask, &oldmask); 53 + if (!res) { 54 + force_successful_syscall_return(); 55 + res = oldmask.sig[0]; 56 } 57 + return res; 58 } 59 60 SYSCALL_DEFINE3(osf_sigaction, int, sig, ··· 94 old_sigset_t mask; 95 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 96 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 97 + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 98 + __get_user(mask, &act->sa_mask)) 99 return -EFAULT; 100 siginitset(&new_ka.sa.sa_mask, mask); 101 new_ka.ka_restorer = NULL; 102 } ··· 106 if (!ret && oact) { 107 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 108 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 109 + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 110 + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 111 return -EFAULT; 112 } 113 114 return ret;
+1 -1
arch/alpha/kernel/systbls.S
··· 58 .quad sys_open /* 45 */ 59 .quad alpha_ni_syscall 60 .quad sys_getxgid 61 - .quad osf_sigprocmask 62 .quad alpha_ni_syscall 63 .quad alpha_ni_syscall /* 50 */ 64 .quad sys_acct
··· 58 .quad sys_open /* 45 */ 59 .quad alpha_ni_syscall 60 .quad sys_getxgid 61 + .quad sys_osf_sigprocmask 62 .quad alpha_ni_syscall 63 .quad alpha_ni_syscall /* 50 */ 64 .quad sys_acct
+26 -1
arch/arm/Kconfig
··· 271 bool "Atmel AT91" 272 select ARCH_REQUIRE_GPIOLIB 273 select HAVE_CLK 274 - select ARCH_USES_GETTIMEOFFSET 275 help 276 This enables support for systems based on the Atmel AT91RM9200, 277 AT91SAM9 and AT91CAP9 processors. ··· 1049 workaround disables the write-allocate mode for the L2 cache via the 1050 ACTLR register. Note that setting specific bits in the ACTLR register 1051 may not be available in non-secure mode. 1052 1053 config PL310_ERRATA_588369 1054 bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
··· 271 bool "Atmel AT91" 272 select ARCH_REQUIRE_GPIOLIB 273 select HAVE_CLK 274 help 275 This enables support for systems based on the Atmel AT91RM9200, 276 AT91SAM9 and AT91CAP9 processors. ··· 1050 workaround disables the write-allocate mode for the L2 cache via the 1051 ACTLR register. Note that setting specific bits in the ACTLR register 1052 may not be available in non-secure mode. 1053 + 1054 + config ARM_ERRATA_742230 1055 + bool "ARM errata: DMB operation may be faulty" 1056 + depends on CPU_V7 && SMP 1057 + help 1058 + This option enables the workaround for the 742230 Cortex-A9 1059 + (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction 1060 + between two write operations may not ensure the correct visibility 1061 + ordering of the two writes. This workaround sets a specific bit in 1062 + the diagnostic register of the Cortex-A9 which causes the DMB 1063 + instruction to behave as a DSB, ensuring the correct behaviour of 1064 + the two writes. 1065 + 1066 + config ARM_ERRATA_742231 1067 + bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption" 1068 + depends on CPU_V7 && SMP 1069 + help 1070 + This option enables the workaround for the 742231 Cortex-A9 1071 + (r2p0..r2p2) erratum. Under certain conditions, specific to the 1072 + Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode, 1073 + accessing some data located in the same cache line, may get corrupted 1074 + data due to bad handling of the address hazard when the line gets 1075 + replaced from one of the CPUs at the same time as another CPU is 1076 + accessing it. This workaround sets specific bits in the diagnostic 1077 + register of the Cortex-A9 which reduces the linefill issuing 1078 + capabilities of the processor. 1079 1080 config PL310_ERRATA_588369 1081 bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
+1 -1
arch/arm/boot/compressed/Makefile
··· 116 $(obj)/font.c: $(FONTC) 117 $(call cmd,shipped) 118 119 - $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config 120 @sed "$(SEDFLAGS)" < $< > $@
··· 116 $(obj)/font.c: $(FONTC) 117 $(call cmd,shipped) 118 119 + $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG) 120 @sed "$(SEDFLAGS)" < $< > $@
+4
arch/arm/include/asm/pgtable.h
··· 317 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 318 #define pgprot_dmacoherent(prot) \ 319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) 320 #else 321 #define pgprot_dmacoherent(prot) \ 322 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
··· 317 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 318 #define pgprot_dmacoherent(prot) \ 319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) 320 + #define __HAVE_PHYS_MEM_ACCESS_PROT 321 + struct file; 322 + extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 323 + unsigned long size, pgprot_t vma_prot); 324 #else 325 #define pgprot_dmacoherent(prot) \ 326 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
+2
arch/arm/kernel/entry-common.S
··· 48 beq no_work_pending 49 mov r0, sp @ 'regs' 50 mov r2, why @ 'syscall' 51 bl do_notify_resume 52 b ret_slow_syscall @ Check work again 53
··· 48 beq no_work_pending 49 mov r0, sp @ 'regs' 50 mov r2, why @ 'syscall' 51 + tst r1, #_TIF_SIGPENDING @ delivering a signal? 52 + movne why, #0 @ prevent further restarts 53 bl do_notify_resume 54 b ret_slow_syscall @ Check work again 55
+2 -2
arch/arm/mach-at91/at91sam9g45_devices.c
··· 426 .sda_is_open_drain = 1, 427 .scl_pin = AT91_PIN_PA21, 428 .scl_is_open_drain = 1, 429 - .udelay = 2, /* ~100 kHz */ 430 }; 431 432 static struct platform_device at91sam9g45_twi0_device = { ··· 440 .sda_is_open_drain = 1, 441 .scl_pin = AT91_PIN_PB11, 442 .scl_is_open_drain = 1, 443 - .udelay = 2, /* ~100 kHz */ 444 }; 445 446 static struct platform_device at91sam9g45_twi1_device = {
··· 426 .sda_is_open_drain = 1, 427 .scl_pin = AT91_PIN_PA21, 428 .scl_is_open_drain = 1, 429 + .udelay = 5, /* ~100 kHz */ 430 }; 431 432 static struct platform_device at91sam9g45_twi0_device = { ··· 440 .sda_is_open_drain = 1, 441 .scl_pin = AT91_PIN_PB11, 442 .scl_is_open_drain = 1, 443 + .udelay = 5, /* ~100 kHz */ 444 }; 445 446 static struct platform_device at91sam9g45_twi1_device = {
+1 -2
arch/arm/mach-davinci/dm355.c
··· 769 .virtual = SRAM_VIRT, 770 .pfn = __phys_to_pfn(0x00010000), 771 .length = SZ_32K, 772 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 773 - .type = MT_DEVICE, 774 }, 775 }; 776
··· 769 .virtual = SRAM_VIRT, 770 .pfn = __phys_to_pfn(0x00010000), 771 .length = SZ_32K, 772 + .type = MT_MEMORY_NONCACHED, 773 }, 774 }; 775
+1 -2
arch/arm/mach-davinci/dm365.c
··· 969 .virtual = SRAM_VIRT, 970 .pfn = __phys_to_pfn(0x00010000), 971 .length = SZ_32K, 972 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 973 - .type = MT_DEVICE, 974 }, 975 }; 976
··· 969 .virtual = SRAM_VIRT, 970 .pfn = __phys_to_pfn(0x00010000), 971 .length = SZ_32K, 972 + .type = MT_MEMORY_NONCACHED, 973 }, 974 }; 975
+1 -2
arch/arm/mach-davinci/dm644x.c
··· 653 .virtual = SRAM_VIRT, 654 .pfn = __phys_to_pfn(0x00008000), 655 .length = SZ_16K, 656 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 657 - .type = MT_DEVICE, 658 }, 659 }; 660
··· 653 .virtual = SRAM_VIRT, 654 .pfn = __phys_to_pfn(0x00008000), 655 .length = SZ_16K, 656 + .type = MT_MEMORY_NONCACHED, 657 }, 658 }; 659
+1 -2
arch/arm/mach-davinci/dm646x.c
··· 737 .virtual = SRAM_VIRT, 738 .pfn = __phys_to_pfn(0x00010000), 739 .length = SZ_32K, 740 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 741 - .type = MT_DEVICE, 742 }, 743 }; 744
··· 737 .virtual = SRAM_VIRT, 738 .pfn = __phys_to_pfn(0x00010000), 739 .length = SZ_32K, 740 + .type = MT_MEMORY_NONCACHED, 741 }, 742 }; 743
+3 -3
arch/arm/mach-dove/include/mach/io.h
··· 13 14 #define IO_SPACE_LIMIT 0xffffffff 15 16 - #define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\ 17 - DOVE_PCIE0_IO_VIRT_BASE)) 18 - #define __mem_pci(a) (a) 19 20 #endif
··· 13 14 #define IO_SPACE_LIMIT 0xffffffff 15 16 + #define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \ 17 + DOVE_PCIE0_IO_VIRT_BASE)) 18 + #define __mem_pci(a) (a) 19 20 #endif
+1 -1
arch/arm/mach-kirkwood/include/mach/kirkwood.h
··· 38 39 #define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 40 #define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 41 - #define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000 42 #define KIRKWOOD_PCIE1_IO_SIZE SZ_1M 43 44 #define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
··· 38 39 #define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 40 #define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 41 + #define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000 42 #define KIRKWOOD_PCIE1_IO_SIZE SZ_1M 43 44 #define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
+2 -2
arch/arm/mach-kirkwood/pcie.c
··· 117 * IORESOURCE_IO 118 */ 119 pp->res[0].name = "PCIe 0 I/O Space"; 120 - pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE; 121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; 122 pp->res[0].flags = IORESOURCE_IO; 123 ··· 139 * IORESOURCE_IO 140 */ 141 pp->res[0].name = "PCIe 1 I/O Space"; 142 - pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE; 143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; 144 pp->res[0].flags = IORESOURCE_IO; 145
··· 117 * IORESOURCE_IO 118 */ 119 pp->res[0].name = "PCIe 0 I/O Space"; 120 + pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE; 121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; 122 pp->res[0].flags = IORESOURCE_IO; 123 ··· 139 * IORESOURCE_IO 140 */ 141 pp->res[0].name = "PCIe 1 I/O Space"; 142 + pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE; 143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; 144 pp->res[0].flags = IORESOURCE_IO; 145
+6 -1
arch/arm/mach-mmp/include/mach/system.h
··· 9 #ifndef __ASM_MACH_SYSTEM_H 10 #define __ASM_MACH_SYSTEM_H 11 12 static inline void arch_idle(void) 13 { 14 cpu_do_idle(); ··· 18 19 static inline void arch_reset(char mode, const char *cmd) 20 { 21 - cpu_reset(0); 22 } 23 #endif /* __ASM_MACH_SYSTEM_H */
··· 9 #ifndef __ASM_MACH_SYSTEM_H 10 #define __ASM_MACH_SYSTEM_H 11 12 + #include <mach/cputype.h> 13 + 14 static inline void arch_idle(void) 15 { 16 cpu_do_idle(); ··· 16 17 static inline void arch_reset(char mode, const char *cmd) 18 { 19 + if (cpu_is_pxa168()) 20 + cpu_reset(0xffff0000); 21 + else 22 + cpu_reset(0); 23 } 24 #endif /* __ASM_MACH_SYSTEM_H */
+1 -2
arch/arm/mach-pxa/cpufreq-pxa2xx.c
··· 312 freqs.cpu = policy->cpu; 313 314 if (freq_debug) 315 - pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, " 316 - "(SDRAM %d Mhz)\n", 317 freqs.new / 1000, (pxa_freq_settings[idx].div2) ? 318 (new_freq_mem / 2000) : (new_freq_mem / 1000)); 319
··· 312 freqs.cpu = policy->cpu; 313 314 if (freq_debug) 315 + pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", 316 freqs.new / 1000, (pxa_freq_settings[idx].div2) ? 317 (new_freq_mem / 2000) : (new_freq_mem / 1000)); 318
+12
arch/arm/mach-pxa/include/mach/hardware.h
··· 264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x 265 * == 0x3 for pxa300/pxa310/pxa320 266 */ 267 #define __cpu_is_pxa2xx(id) \ 268 ({ \ 269 unsigned int _id = (id) >> 13 & 0x7; \ 270 _id <= 0x2; \ 271 }) 272 273 #define __cpu_is_pxa3xx(id) \ 274 ({ \ 275 unsigned int _id = (id) >> 13 & 0x7; \ 276 _id == 0x3; \ 277 }) 278 279 #define __cpu_is_pxa93x(id) \ 280 ({ \ 281 unsigned int _id = (id) >> 4 & 0xfff; \ 282 _id == 0x683 || _id == 0x693; \ 283 }) 284 285 #define cpu_is_pxa2xx() \ 286 ({ \
··· 264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x 265 * == 0x3 for pxa300/pxa310/pxa320 266 */ 267 + #if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x) 268 #define __cpu_is_pxa2xx(id) \ 269 ({ \ 270 unsigned int _id = (id) >> 13 & 0x7; \ 271 _id <= 0x2; \ 272 }) 273 + #else 274 + #define __cpu_is_pxa2xx(id) (0) 275 + #endif 276 277 + #ifdef CONFIG_PXA3xx 278 #define __cpu_is_pxa3xx(id) \ 279 ({ \ 280 unsigned int _id = (id) >> 13 & 0x7; \ 281 _id == 0x3; \ 282 }) 283 + #else 284 + #define __cpu_is_pxa3xx(id) (0) 285 + #endif 286 287 + #if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935) 288 #define __cpu_is_pxa93x(id) \ 289 ({ \ 290 unsigned int _id = (id) >> 4 & 0xfff; \ 291 _id == 0x683 || _id == 0x693; \ 292 }) 293 + #else 294 + #define __cpu_is_pxa93x(id) (0) 295 + #endif 296 297 #define cpu_is_pxa2xx() \ 298 ({ \
+5 -1
arch/arm/mach-pxa/palm27x.c
··· 469 }, 470 }; 471 472 void __init palm27x_pmic_init(void) 473 { 474 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); 475 - pxa27x_set_i2c_power_info(NULL); 476 } 477 #endif
··· 469 }, 470 }; 471 472 + static struct i2c_pxa_platform_data palm27x_i2c_power_info = { 473 + .use_pio = 1, 474 + }; 475 + 476 void __init palm27x_pmic_init(void) 477 { 478 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); 479 + pxa27x_set_i2c_power_info(&palm27x_i2c_power_info); 480 } 481 #endif
+1
arch/arm/mach-pxa/vpac270.c
··· 240 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) 241 static struct pxamci_platform_data vpac270_mci_platform_data = { 242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 243 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, 244 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, 245 .detect_delay_ms = 200,
··· 240 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) 241 static struct pxamci_platform_data vpac270_mci_platform_data = { 242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 243 + .gpio_power = -1, 244 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, 245 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, 246 .detect_delay_ms = 200,
+3
arch/arm/mach-u300/include/mach/gpio.h
··· 273 extern int gpio_get_value(unsigned gpio); 274 extern void gpio_set_value(unsigned gpio, int value); 275 276 /* wrappers to sleep-enable the previous two functions */ 277 static inline unsigned gpio_to_irq(unsigned gpio) 278 {
··· 273 extern int gpio_get_value(unsigned gpio); 274 extern void gpio_set_value(unsigned gpio, int value); 275 276 + #define gpio_get_value_cansleep gpio_get_value 277 + #define gpio_set_value_cansleep gpio_set_value 278 + 279 /* wrappers to sleep-enable the previous two functions */ 280 static inline unsigned gpio_to_irq(unsigned gpio) 281 {
+7 -1
arch/arm/mach-vexpress/ct-ca9x4.c
··· 227 int i; 228 229 #ifdef CONFIG_CACHE_L2X0 230 - l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); 231 #endif 232 233 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
··· 227 int i; 228 229 #ifdef CONFIG_CACHE_L2X0 230 + void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC); 231 + 232 + /* set RAM latencies to 1 cycle for this core tile. */ 233 + writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); 234 + writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); 235 + 236 + l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); 237 #endif 238 239 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+17 -2
arch/arm/mm/alignment.c
··· 885 886 if (ai_usermode & UM_SIGNAL) 887 force_sig(SIGBUS, current); 888 - else 889 - set_cr(cr_no_alignment); 890 891 return 0; 892 }
··· 885 886 if (ai_usermode & UM_SIGNAL) 887 force_sig(SIGBUS, current); 888 + else { 889 + /* 890 + * We're about to disable the alignment trap and return to 891 + * user space. But if an interrupt occurs before actually 892 + * reaching user space, then the IRQ vector entry code will 893 + * notice that we were still in kernel space and therefore 894 + * the alignment trap won't be re-enabled in that case as it 895 + * is presumed to be always on from kernel space. 896 + * Let's prevent that race by disabling interrupts here (they 897 + * are disabled on the way back to user space anyway in 898 + * entry-common.S) and disable the alignment trap only if 899 + * there is no work pending for this thread. 900 + */ 901 + raw_local_irq_disable(); 902 + if (!(current_thread_info()->flags & _TIF_WORK_MASK)) 903 + set_cr(cr_no_alignment); 904 + } 905 906 return 0; 907 }
+29 -2
arch/arm/mm/mmu.c
··· 15 #include <linux/nodemask.h> 16 #include <linux/memblock.h> 17 #include <linux/sort.h> 18 19 #include <asm/cputype.h> 20 #include <asm/sections.h> ··· 247 .domain = DOMAIN_USER, 248 }, 249 [MT_MEMORY] = { 250 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 251 .domain = DOMAIN_KERNEL, 252 }, ··· 258 .domain = DOMAIN_KERNEL, 259 }, 260 [MT_MEMORY_NONCACHED] = { 261 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 262 .domain = DOMAIN_KERNEL, 263 }, ··· 418 * Enable CPU-specific coherency if supported. 419 * (Only available on XSC3 at the moment.) 420 */ 421 - if (arch_is_coherent() && cpu_is_xsc3()) 422 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 423 - 424 /* 425 * ARMv6 and above have extended page tables. 426 */ ··· 448 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 449 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 450 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 451 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 452 #endif 453 } 454 ··· 487 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 488 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 489 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 490 mem_types[MT_ROM].prot_sect |= cp->pmd; 491 492 switch (cp->pmd) { ··· 511 t->prot_sect |= PMD_DOMAIN(t->domain); 512 } 513 } 514 515 #define vectors_base() (vectors_high() ? 0xffff0000 : 0) 516
··· 15 #include <linux/nodemask.h> 16 #include <linux/memblock.h> 17 #include <linux/sort.h> 18 + #include <linux/fs.h> 19 20 #include <asm/cputype.h> 21 #include <asm/sections.h> ··· 246 .domain = DOMAIN_USER, 247 }, 248 [MT_MEMORY] = { 249 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 250 + L_PTE_USER | L_PTE_EXEC, 251 + .prot_l1 = PMD_TYPE_TABLE, 252 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 253 .domain = DOMAIN_KERNEL, 254 }, ··· 254 .domain = DOMAIN_KERNEL, 255 }, 256 [MT_MEMORY_NONCACHED] = { 257 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 258 + L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, 259 + .prot_l1 = PMD_TYPE_TABLE, 260 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 261 .domain = DOMAIN_KERNEL, 262 }, ··· 411 * Enable CPU-specific coherency if supported. 412 * (Only available on XSC3 at the moment.) 413 */ 414 + if (arch_is_coherent() && cpu_is_xsc3()) { 415 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 416 + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 417 + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 418 + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 419 + } 420 /* 421 * ARMv6 and above have extended page tables. 422 */ ··· 438 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 439 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 440 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 441 + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 442 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 443 + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 444 #endif 445 } 446 ··· 475 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 476 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 477 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 478 + mem_types[MT_MEMORY].prot_pte |= kern_pgprot; 479 + mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; 480 mem_types[MT_ROM].prot_sect |= cp->pmd; 481 482 switch (cp->pmd) { ··· 497 t->prot_sect |= PMD_DOMAIN(t->domain); 498 } 499 } 500 + 501 + #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 502 + pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 503 + unsigned long size, pgprot_t vma_prot) 504 + { 505 + if (!pfn_valid(pfn)) 506 + return pgprot_noncached(vma_prot); 507 + else if (file->f_flags & O_SYNC) 508 + return pgprot_writecombine(vma_prot); 509 + return vma_prot; 510 + } 511 + EXPORT_SYMBOL(phys_mem_access_prot); 512 + #endif 513 514 #define vectors_base() (vectors_high() ? 0xffff0000 : 0) 515
+56 -6
arch/arm/mm/proc-v7.S
··· 186 * It is assumed that: 187 * - cache type register is implemented 188 */ 189 - __v7_setup: 190 #ifdef CONFIG_SMP 191 mrc p15, 0, r0, c1, c0, 1 192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting 195 #endif 196 adr r12, __v7_setup_stack @ the local stack 197 stmia r12, {r0-r5, r7, r9, r11, lr} 198 bl v7_flush_dcache_all ··· 202 mrc p15, 0, r0, c0, c0, 0 @ read main ID register 203 and r10, r0, #0xff000000 @ ARM? 204 teq r10, #0x41000000 205 - bne 2f 206 and r5, r0, #0x00f00000 @ variant 207 and r6, r0, #0x0000000f @ revision 208 - orr r0, r6, r5, lsr #20-4 @ combine variant and revision 209 210 #ifdef CONFIG_ARM_ERRATA_430973 211 teq r5, #0x00100000 @ only present in r1p* 212 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register ··· 219 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 220 #endif 221 #ifdef CONFIG_ARM_ERRATA_458693 222 - teq r0, #0x20 @ only present in r2p0 223 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 224 orreq r10, r10, #(1 << 5) @ set L1NEON to 1 225 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 226 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 227 #endif 228 #ifdef CONFIG_ARM_ERRATA_460075 229 - teq r0, #0x20 @ only present in r2p0 230 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register 231 tsteq r10, #1 << 22 232 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit 233 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register 234 #endif 235 236 - 2: mov r10, #0 237 #ifdef HARVARD_CACHE 238 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 239 #endif ··· 349 .align 350 351 .section ".proc.info.init", #alloc, #execinstr 352 353 /* 354 * Match any ARMv7 processor core.
··· 186 * It is assumed that: 187 * - cache type register is implemented 188 */ 189 + __v7_ca9mp_setup: 190 #ifdef CONFIG_SMP 191 mrc p15, 0, r0, c1, c0, 1 192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting 195 #endif 196 + __v7_setup: 197 adr r12, __v7_setup_stack @ the local stack 198 stmia r12, {r0-r5, r7, r9, r11, lr} 199 bl v7_flush_dcache_all ··· 201 mrc p15, 0, r0, c0, c0, 0 @ read main ID register 202 and r10, r0, #0xff000000 @ ARM? 203 teq r10, #0x41000000 204 + bne 3f 205 and r5, r0, #0x00f00000 @ variant 206 and r6, r0, #0x0000000f @ revision 207 + orr r6, r6, r5, lsr #20-4 @ combine variant and revision 208 + ubfx r0, r0, #4, #12 @ primary part number 209 210 + /* Cortex-A8 Errata */ 211 + ldr r10, =0x00000c08 @ Cortex-A8 primary part number 212 + teq r0, r10 213 + bne 2f 214 #ifdef CONFIG_ARM_ERRATA_430973 215 teq r5, #0x00100000 @ only present in r1p* 216 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register ··· 213 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 214 #endif 215 #ifdef CONFIG_ARM_ERRATA_458693 216 + teq r6, #0x20 @ only present in r2p0 217 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 218 orreq r10, r10, #(1 << 5) @ set L1NEON to 1 219 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 220 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 221 #endif 222 #ifdef CONFIG_ARM_ERRATA_460075 223 + teq r6, #0x20 @ only present in r2p0 224 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register 225 tsteq r10, #1 << 22 226 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit 227 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register 228 #endif 229 + b 3f 230 231 + /* Cortex-A9 Errata */ 232 + 2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number 233 + teq r0, r10 234 + bne 3f 235 + #ifdef CONFIG_ARM_ERRATA_742230 236 + cmp r6, #0x22 @ only present up to r2p2 237 + mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register 238 + orrle r10, r10, #1 << 4 @ set bit #4 239 + mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register 240 + #endif 241 + #ifdef CONFIG_ARM_ERRATA_742231 242 + teq r6, #0x20 @ present in r2p0 243 + teqne r6, #0x21 @ present in r2p1 244 + teqne r6, #0x22 @ present in r2p2 245 + mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register 246 + orreq r10, r10, #1 << 12 @ set bit #12 247 + orreq r10, r10, #1 << 22 @ set bit #22 248 + mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register 249 + #endif 250 + 251 + 3: mov r10, #0 252 #ifdef HARVARD_CACHE 253 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 254 #endif ··· 322 .align 323 324 .section ".proc.info.init", #alloc, #execinstr 325 + 326 + .type __v7_ca9mp_proc_info, #object 327 + __v7_ca9mp_proc_info: 328 + .long 0x410fc090 @ Required ID value 329 + .long 0xff0ffff0 @ Mask for ID 330 + .long PMD_TYPE_SECT | \ 331 + PMD_SECT_AP_WRITE | \ 332 + PMD_SECT_AP_READ | \ 333 + PMD_FLAGS 334 + .long PMD_TYPE_SECT | \ 335 + PMD_SECT_XN | \ 336 + PMD_SECT_AP_WRITE | \ 337 + PMD_SECT_AP_READ 338 + b __v7_ca9mp_setup 339 + .long cpu_arch_name 340 + .long cpu_elf_name 341 + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 342 + .long cpu_v7_name 343 + .long v7_processor_functions 344 + .long v7wbi_tlb_fns 345 + .long v6_user_fns 346 + .long v7_cache_fns 347 + .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 348 349 /* 350 * Match any ARMv7 processor core.
+12 -21
arch/arm/plat-nomadik/timer.c
··· 1 /* 2 - * linux/arch/arm/mach-nomadik/timer.c 3 * 4 * Copyright (C) 2008 STMicroelectronics 5 * Copyright (C) 2010 Alessandro Rubini ··· 75 cr = readl(mtu_base + MTU_CR(1)); 76 writel(0, mtu_base + MTU_LR(1)); 77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); 78 - writel(0x2, mtu_base + MTU_IMSC); 79 break; 80 case CLOCK_EVT_MODE_SHUTDOWN: 81 case CLOCK_EVT_MODE_UNUSED: ··· 131 { 132 unsigned long rate; 133 struct clk *clk0; 134 - struct clk *clk1; 135 - u32 cr; 136 137 clk0 = clk_get_sys("mtu0", NULL); 138 BUG_ON(IS_ERR(clk0)); 139 140 - clk1 = clk_get_sys("mtu1", NULL); 141 - BUG_ON(IS_ERR(clk1)); 142 - 143 clk_enable(clk0); 144 - clk_enable(clk1); 145 146 /* 147 - * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500: 148 - * use a divide-by-16 counter if it's more than 16MHz 149 */ 150 - cr = MTU_CRn_32BITS;; 151 rate = clk_get_rate(clk0); 152 - if (rate > 16 << 20) { 153 rate /= 16; 154 cr |= MTU_CRn_PRESCALE_16; 155 } else { ··· 168 pr_err("timer: failed to initialize clock source %s\n", 169 nmdk_clksrc.name); 170 171 - /* Timer 1 is used for events, fix according to rate */ 172 - cr = MTU_CRn_32BITS; 173 - rate = clk_get_rate(clk1); 174 - if (rate > 16 << 20) { 175 - rate /= 16; 176 - cr |= MTU_CRn_PRESCALE_16; 177 - } else { 178 - cr |= MTU_CRn_PRESCALE_1; 179 - } 180 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); 181 182 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
··· 1 /* 2 + * linux/arch/arm/plat-nomadik/timer.c 3 * 4 * Copyright (C) 2008 STMicroelectronics 5 * Copyright (C) 2010 Alessandro Rubini ··· 75 cr = readl(mtu_base + MTU_CR(1)); 76 writel(0, mtu_base + MTU_LR(1)); 77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); 78 + writel(1 << 1, mtu_base + MTU_IMSC); 79 break; 80 case CLOCK_EVT_MODE_SHUTDOWN: 81 case CLOCK_EVT_MODE_UNUSED: ··· 131 { 132 unsigned long rate; 133 struct clk *clk0; 134 + u32 cr = MTU_CRn_32BITS; 135 136 clk0 = clk_get_sys("mtu0", NULL); 137 BUG_ON(IS_ERR(clk0)); 138 139 clk_enable(clk0); 140 141 /* 142 + * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz 143 + * for ux500. 144 + * Use a divide-by-16 counter if the tick rate is more than 32MHz. 145 + * At 32 MHz, the timer (with 32 bit counter) can be programmed 146 + * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer 147 + * with 16 gives too low timer resolution. 148 */ 149 rate = clk_get_rate(clk0); 150 + if (rate > 32000000) { 151 rate /= 16; 152 cr |= MTU_CRn_PRESCALE_16; 153 } else { ··· 170 pr_err("timer: failed to initialize clock source %s\n", 171 nmdk_clksrc.name); 172 173 + /* Timer 1 is used for events */ 174 + 175 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); 176 177 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
+5 -20
arch/arm/plat-omap/sram.c
··· 220 if (omap_sram_size == 0) 221 return; 222 223 - if (cpu_is_omap24xx()) { 224 - omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA; 225 - 226 - base = OMAP2_SRAM_PA; 227 - base = ROUND_DOWN(base, PAGE_SIZE); 228 - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 229 - } 230 - 231 if (cpu_is_omap34xx()) { 232 - omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA; 233 - base = OMAP3_SRAM_PA; 234 - base = ROUND_DOWN(base, PAGE_SIZE); 235 - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 236 - 237 /* 238 * SRAM must be marked as non-cached on OMAP3 since the 239 * CORE DPLL M2 divider change code (in SRAM) runs with the ··· 231 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; 232 } 233 234 - if (cpu_is_omap44xx()) { 235 - omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA; 236 - base = OMAP4_SRAM_PA; 237 - base = ROUND_DOWN(base, PAGE_SIZE); 238 - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 239 - } 240 - omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */ 241 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 242 243 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
··· 220 if (omap_sram_size == 0) 221 return; 222 223 if (cpu_is_omap34xx()) { 224 /* 225 * SRAM must be marked as non-cached on OMAP3 since the 226 * CORE DPLL M2 divider change code (in SRAM) runs with the ··· 244 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; 245 } 246 247 + omap_sram_io_desc[0].virtual = omap_sram_base; 248 + base = omap_sram_start; 249 + base = ROUND_DOWN(base, PAGE_SIZE); 250 + omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 251 + omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); 252 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 253 254 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
+6
arch/ia64/Kconfig
··· 53 bool 54 default y 55 56 config NEED_DMA_MAP_STATE 57 def_bool y 58 ··· 64 65 config SWIOTLB 66 bool 67 68 config GENERIC_LOCKBREAK 69 def_bool n
··· 53 bool 54 default y 55 56 + config ARCH_DMA_ADDR_T_64BIT 57 + def_bool y 58 + 59 config NEED_DMA_MAP_STATE 60 def_bool y 61 ··· 61 62 config SWIOTLB 63 bool 64 + 65 + config STACKTRACE_SUPPORT 66 + def_bool y 67 68 config GENERIC_LOCKBREAK 69 def_bool n
-208
arch/ia64/include/asm/compat.h
··· 1 - #ifndef _ASM_IA64_COMPAT_H 2 - #define _ASM_IA64_COMPAT_H 3 - /* 4 - * Architecture specific compatibility types 5 - */ 6 - #include <linux/types.h> 7 - 8 - #define COMPAT_USER_HZ 100 9 - #define COMPAT_UTS_MACHINE "i686\0\0\0" 10 - 11 - typedef u32 compat_size_t; 12 - typedef s32 compat_ssize_t; 13 - typedef s32 compat_time_t; 14 - typedef s32 compat_clock_t; 15 - typedef s32 compat_key_t; 16 - typedef s32 compat_pid_t; 17 - typedef u16 __compat_uid_t; 18 - typedef u16 __compat_gid_t; 19 - typedef u32 __compat_uid32_t; 20 - typedef u32 __compat_gid32_t; 21 - typedef u16 compat_mode_t; 22 - typedef u32 compat_ino_t; 23 - typedef u16 compat_dev_t; 24 - typedef s32 compat_off_t; 25 - typedef s64 compat_loff_t; 26 - typedef u16 compat_nlink_t; 27 - typedef u16 compat_ipc_pid_t; 28 - typedef s32 compat_daddr_t; 29 - typedef u32 compat_caddr_t; 30 - typedef __kernel_fsid_t compat_fsid_t; 31 - typedef s32 compat_timer_t; 32 - 33 - typedef s32 compat_int_t; 34 - typedef s32 compat_long_t; 35 - typedef s64 __attribute__((aligned(4))) compat_s64; 36 - typedef u32 compat_uint_t; 37 - typedef u32 compat_ulong_t; 38 - typedef u64 __attribute__((aligned(4))) compat_u64; 39 - 40 - struct compat_timespec { 41 - compat_time_t tv_sec; 42 - s32 tv_nsec; 43 - }; 44 - 45 - struct compat_timeval { 46 - compat_time_t tv_sec; 47 - s32 tv_usec; 48 - }; 49 - 50 - struct compat_stat { 51 - compat_dev_t st_dev; 52 - u16 __pad1; 53 - compat_ino_t st_ino; 54 - compat_mode_t st_mode; 55 - compat_nlink_t st_nlink; 56 - __compat_uid_t st_uid; 57 - __compat_gid_t st_gid; 58 - compat_dev_t st_rdev; 59 - u16 __pad2; 60 - u32 st_size; 61 - u32 st_blksize; 62 - u32 st_blocks; 63 - u32 st_atime; 64 - u32 st_atime_nsec; 65 - u32 st_mtime; 66 - u32 st_mtime_nsec; 67 - u32 st_ctime; 68 - u32 st_ctime_nsec; 69 - u32 __unused4; 70 - u32 __unused5; 71 - }; 72 - 73 - struct compat_flock { 74 - short l_type; 75 - short l_whence; 76 - compat_off_t l_start; 77 - compat_off_t l_len; 78 - compat_pid_t l_pid; 79 - }; 80 - 81 - #define F_GETLK64 12 82 - #define F_SETLK64 13 83 - #define F_SETLKW64 14 84 - 85 - /* 86 - * IA32 uses 4 byte alignment for 64 bit quantities, 87 - * so we need to pack this structure. 88 - */ 89 - struct compat_flock64 { 90 - short l_type; 91 - short l_whence; 92 - compat_loff_t l_start; 93 - compat_loff_t l_len; 94 - compat_pid_t l_pid; 95 - } __attribute__((packed)); 96 - 97 - struct compat_statfs { 98 - int f_type; 99 - int f_bsize; 100 - int f_blocks; 101 - int f_bfree; 102 - int f_bavail; 103 - int f_files; 104 - int f_ffree; 105 - compat_fsid_t f_fsid; 106 - int f_namelen; /* SunOS ignores this field. */ 107 - int f_frsize; 108 - int f_spare[5]; 109 - }; 110 - 111 - #define COMPAT_RLIM_OLD_INFINITY 0x7fffffff 112 - #define COMPAT_RLIM_INFINITY 0xffffffff 113 - 114 - typedef u32 compat_old_sigset_t; /* at least 32 bits */ 115 - 116 - #define _COMPAT_NSIG 64 117 - #define _COMPAT_NSIG_BPW 32 118 - 119 - typedef u32 compat_sigset_word; 120 - 121 - #define COMPAT_OFF_T_MAX 0x7fffffff 122 - #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL 123 - 124 - struct compat_ipc64_perm { 125 - compat_key_t key; 126 - __compat_uid32_t uid; 127 - __compat_gid32_t gid; 128 - __compat_uid32_t cuid; 129 - __compat_gid32_t cgid; 130 - unsigned short mode; 131 - unsigned short __pad1; 132 - unsigned short seq; 133 - unsigned short __pad2; 134 - compat_ulong_t unused1; 135 - compat_ulong_t unused2; 136 - }; 137 - 138 - struct compat_semid64_ds { 139 - struct compat_ipc64_perm sem_perm; 140 - compat_time_t sem_otime; 141 - compat_ulong_t __unused1; 142 - compat_time_t sem_ctime; 143 - compat_ulong_t __unused2; 144 - compat_ulong_t sem_nsems; 145 - compat_ulong_t __unused3; 146 - compat_ulong_t __unused4; 147 - }; 148 - 149 - struct compat_msqid64_ds { 150 - struct compat_ipc64_perm msg_perm; 151 - compat_time_t msg_stime; 152 - compat_ulong_t __unused1; 153 - compat_time_t msg_rtime; 154 - compat_ulong_t __unused2; 155 - compat_time_t msg_ctime; 156 - compat_ulong_t __unused3; 157 - compat_ulong_t msg_cbytes; 158 - compat_ulong_t msg_qnum; 159 - compat_ulong_t msg_qbytes; 160 - compat_pid_t msg_lspid; 161 - compat_pid_t msg_lrpid; 162 - compat_ulong_t __unused4; 163 - compat_ulong_t __unused5; 164 - }; 165 - 166 - struct compat_shmid64_ds { 167 - struct compat_ipc64_perm shm_perm; 168 - compat_size_t shm_segsz; 169 - compat_time_t shm_atime; 170 - compat_ulong_t __unused1; 171 - compat_time_t shm_dtime; 172 - compat_ulong_t __unused2; 173 - compat_time_t shm_ctime; 174 - compat_ulong_t __unused3; 175 - compat_pid_t shm_cpid; 176 - compat_pid_t shm_lpid; 177 - compat_ulong_t shm_nattch; 178 - compat_ulong_t __unused4; 179 - compat_ulong_t __unused5; 180 - }; 181 - 182 - /* 183 - * A pointer passed in from user mode. This should not be used for syscall parameters, 184 - * just declare them as pointers because the syscall entry code will have appropriately 185 - * converted them already. 186 - */ 187 - typedef u32 compat_uptr_t; 188 - 189 - static inline void __user * 190 - compat_ptr (compat_uptr_t uptr) 191 - { 192 - return (void __user *) (unsigned long) uptr; 193 - } 194 - 195 - static inline compat_uptr_t 196 - ptr_to_compat(void __user *uptr) 197 - { 198 - return (u32)(unsigned long)uptr; 199 - } 200 - 201 - static __inline__ void __user * 202 - arch_compat_alloc_user_space (long len) 203 - { 204 - struct pt_regs *regs = task_pt_regs(current); 205 - return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); 206 - } 207 - 208 - #endif /* _ASM_IA64_COMPAT_H */
···
+5 -6
arch/ia64/include/asm/hardirq.h
··· 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 */ 8 9 - 10 - #include <linux/threads.h> 11 - #include <linux/irq.h> 12 - 13 - #include <asm/processor.h> 14 - 15 /* 16 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. 17 */ ··· 13 #define __ARCH_IRQ_STAT 1 14 15 #define local_softirq_pending() (local_cpu_data->softirq_pending) 16 17 extern void __iomem *ipi_base_addr; 18
··· 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 */ 8 9 /* 10 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. 11 */ ··· 19 #define __ARCH_IRQ_STAT 1 20 21 #define local_softirq_pending() (local_cpu_data->softirq_pending) 22 + 23 + #include <linux/threads.h> 24 + #include <linux/irq.h> 25 + 26 + #include <asm/processor.h> 27 28 extern void __iomem *ipi_base_addr; 29
+6
arch/ia64/include/asm/iommu_table.h
···
··· 1 + #ifndef _ASM_IA64_IOMMU_TABLE_H 2 + #define _ASM_IA64_IOMMU_TABLE_H 3 + 4 + #define IOMMU_INIT_POST(_detect) 5 + 6 + #endif /* _ASM_IA64_IOMMU_TABLE_H */
+1
arch/ia64/kernel/Makefile
··· 34 obj-$(CONFIG_PCI_MSI) += msi_ia64.o 35 mca_recovery-y += mca_drv.o mca_drv_asm.o 36 obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 37 38 obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ 39 paravirt_patch.o
··· 34 obj-$(CONFIG_PCI_MSI) += msi_ia64.o 35 mca_recovery-y += mca_drv.o mca_drv_asm.o 36 obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 37 + obj-$(CONFIG_STACKTRACE) += stacktrace.o 38 39 obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ 40 paravirt_patch.o
+1 -1
arch/ia64/kernel/cyclone.c
··· 59 return -ENODEV; 60 } 61 base = readq(reg); 62 if(!base){ 63 printk(KERN_ERR "Summit chipset: Could not find valid CBAR" 64 " value.\n"); 65 use_cyclone = 0; 66 return -ENODEV; 67 } 68 - iounmap(reg); 69 70 /* setup PMCC */ 71 offset = (base + CYCLONE_PMCC_OFFSET);
··· 59 return -ENODEV; 60 } 61 base = readq(reg); 62 + iounmap(reg); 63 if(!base){ 64 printk(KERN_ERR "Summit chipset: Could not find valid CBAR" 65 " value.\n"); 66 use_cyclone = 0; 67 return -ENODEV; 68 } 69 70 /* setup PMCC */ 71 offset = (base + CYCLONE_PMCC_OFFSET);
+1 -47
arch/ia64/kernel/iosapic.c
··· 108 #define DBG(fmt...) 109 #endif 110 111 - #define NR_PREALLOCATE_RTE_ENTRIES \ 112 - (PAGE_SIZE / sizeof(struct iosapic_rte_info)) 113 - #define RTE_PREALLOCATED (1) 114 - 115 static DEFINE_SPINLOCK(iosapic_lock); 116 117 /* ··· 132 struct list_head rte_list; /* RTEs sharing the same vector */ 133 char rte_index; /* IOSAPIC RTE index */ 134 int refcnt; /* reference counter */ 135 - unsigned int flags; /* flags */ 136 struct iosapic *iosapic; 137 } ____cacheline_aligned; 138 ··· 149 } iosapic_intr_info[NR_IRQS]; 150 151 static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */ 152 - 153 - static int iosapic_kmalloc_ok; 154 - static LIST_HEAD(free_rte_list); 155 156 static inline void 157 iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val) ··· 544 } 545 } 546 547 - static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void) 548 - { 549 - int i; 550 - struct iosapic_rte_info *rte; 551 - int preallocated = 0; 552 - 553 - if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) { 554 - rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * 555 - NR_PREALLOCATE_RTE_ENTRIES); 556 - for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++) 557 - list_add(&rte->rte_list, &free_rte_list); 558 - } 559 - 560 - if (!list_empty(&free_rte_list)) { 561 - rte = list_entry(free_rte_list.next, struct iosapic_rte_info, 562 - rte_list); 563 - list_del(&rte->rte_list); 564 - preallocated++; 565 - } else { 566 - rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC); 567 - if (!rte) 568 - return NULL; 569 - } 570 - 571 - memset(rte, 0, sizeof(struct iosapic_rte_info)); 572 - if (preallocated) 573 - rte->flags |= RTE_PREALLOCATED; 574 - 575 - return rte; 576 - } 577 - 578 static inline int irq_is_shared (int irq) 579 { 580 return (iosapic_intr_info[irq].count > 1); ··· 576 577 rte = find_rte(irq, gsi); 578 if (!rte) { 579 - rte = iosapic_alloc_rte(); 580 if (!rte) { 581 printk(KERN_WARNING "%s: cannot allocate memory\n", 582 __func__); ··· 1126 return; 1127 } 1128 #endif 1129 - 1130 - static int __init iosapic_enable_kmalloc (void) 1131 - { 1132 - iosapic_kmalloc_ok = 1; 1133 - return 0; 1134 - } 1135 - core_initcall (iosapic_enable_kmalloc);
··· 108 #define DBG(fmt...) 109 #endif 110 111 static DEFINE_SPINLOCK(iosapic_lock); 112 113 /* ··· 136 struct list_head rte_list; /* RTEs sharing the same vector */ 137 char rte_index; /* IOSAPIC RTE index */ 138 int refcnt; /* reference counter */ 139 struct iosapic *iosapic; 140 } ____cacheline_aligned; 141 ··· 154 } iosapic_intr_info[NR_IRQS]; 155 156 static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */ 157 158 static inline void 159 iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val) ··· 552 } 553 } 554 555 static inline int irq_is_shared (int irq) 556 { 557 return (iosapic_intr_info[irq].count > 1); ··· 615 616 rte = find_rte(irq, gsi); 617 if (!rte) { 618 + rte = kzalloc(sizeof (*rte), GFP_ATOMIC); 619 if (!rte) { 620 printk(KERN_WARNING "%s: cannot allocate memory\n", 621 __func__); ··· 1165 return; 1166 } 1167 #endif
+4
arch/ia64/kernel/irq_ia64.c
··· 30 #include <linux/bitops.h> 31 #include <linux/irq.h> 32 #include <linux/ratelimit.h> 33 34 #include <asm/delay.h> 35 #include <asm/intrinsics.h> ··· 652 void __init 653 init_IRQ (void) 654 { 655 ia64_register_ipi(); 656 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 657 #ifdef CONFIG_SMP
··· 30 #include <linux/bitops.h> 31 #include <linux/irq.h> 32 #include <linux/ratelimit.h> 33 + #include <linux/acpi.h> 34 35 #include <asm/delay.h> 36 #include <asm/intrinsics.h> ··· 651 void __init 652 init_IRQ (void) 653 { 654 + #ifdef CONFIG_ACPI 655 + acpi_boot_init(); 656 + #endif 657 ia64_register_ipi(); 658 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 659 #ifdef CONFIG_SMP
+19 -19
arch/ia64/kernel/mca.c
··· 2055 2056 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); 2057 2058 - /* 2059 - * Configure the CMCI/P vector and handler. Interrupts for CMC are 2060 - * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). 2061 - */ 2062 - register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); 2063 - register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); 2064 - ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ 2065 - 2066 - /* Setup the MCA rendezvous interrupt vector */ 2067 - register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); 2068 - 2069 - /* Setup the MCA wakeup interrupt vector */ 2070 - register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); 2071 - 2072 - #ifdef CONFIG_ACPI 2073 - /* Setup the CPEI/P handler */ 2074 - register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 2075 - #endif 2076 - 2077 /* Initialize the areas set aside by the OS to buffer the 2078 * platform/processor error states for MCA/INIT/CMC 2079 * handling. ··· 2083 { 2084 if (!mca_init) 2085 return 0; 2086 2087 register_hotcpu_notifier(&mca_cpu_notifier); 2088
··· 2055 2056 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); 2057 2058 /* Initialize the areas set aside by the OS to buffer the 2059 * platform/processor error states for MCA/INIT/CMC 2060 * handling. ··· 2102 { 2103 if (!mca_init) 2104 return 0; 2105 + 2106 + /* 2107 + * Configure the CMCI/P vector and handler. Interrupts for CMC are 2108 + * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). 2109 + */ 2110 + register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); 2111 + register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); 2112 + ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ 2113 + 2114 + /* Setup the MCA rendezvous interrupt vector */ 2115 + register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); 2116 + 2117 + /* Setup the MCA wakeup interrupt vector */ 2118 + register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); 2119 + 2120 + #ifdef CONFIG_ACPI 2121 + /* Setup the CPEI/P handler */ 2122 + register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 2123 + #endif 2124 2125 register_hotcpu_notifier(&mca_cpu_notifier); 2126
+1 -1
arch/ia64/kernel/palinfo.c
··· 434 unsigned long phys_stacked; 435 pal_hints_u_t hints; 436 unsigned long iregs, dregs; 437 - char *info_type[]={ 438 "Implemented AR(s)", 439 "AR(s) with read side-effects", 440 "Implemented CR(s)",
··· 434 unsigned long phys_stacked; 435 pal_hints_u_t hints; 436 unsigned long iregs, dregs; 437 + static const char * const info_type[] = { 438 "Implemented AR(s)", 439 "AR(s) with read side-effects", 440 "Implemented CR(s)",
+6 -6
arch/ia64/kernel/perfmon.c
··· 1573 return -EINVAL; 1574 } 1575 1576 - ctx = (pfm_context_t *)filp->private_data; 1577 if (ctx == NULL) { 1578 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); 1579 return -EINVAL; ··· 1673 return 0; 1674 } 1675 1676 - ctx = (pfm_context_t *)filp->private_data; 1677 if (ctx == NULL) { 1678 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); 1679 return 0; ··· 1733 return -EBADF; 1734 } 1735 1736 - ctx = (pfm_context_t *)filp->private_data; 1737 if (ctx == NULL) { 1738 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); 1739 return -EBADF; ··· 1841 return -EBADF; 1842 } 1843 1844 - ctx = (pfm_context_t *)filp->private_data; 1845 if (ctx == NULL) { 1846 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); 1847 return -EBADF; ··· 1984 return -EBADF; 1985 } 1986 1987 - ctx = (pfm_context_t *)filp->private_data; 1988 if (ctx == NULL) { 1989 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); 1990 return -EBADF; ··· 4907 goto error_args; 4908 } 4909 4910 - ctx = (pfm_context_t *)file->private_data; 4911 if (unlikely(ctx == NULL)) { 4912 DPRINT(("no context for fd %d\n", fd)); 4913 goto error_args;
··· 1573 return -EINVAL; 1574 } 1575 1576 + ctx = filp->private_data; 1577 if (ctx == NULL) { 1578 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); 1579 return -EINVAL; ··· 1673 return 0; 1674 } 1675 1676 + ctx = filp->private_data; 1677 if (ctx == NULL) { 1678 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); 1679 return 0; ··· 1733 return -EBADF; 1734 } 1735 1736 + ctx = filp->private_data; 1737 if (ctx == NULL) { 1738 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); 1739 return -EBADF; ··· 1841 return -EBADF; 1842 } 1843 1844 + ctx = filp->private_data; 1845 if (ctx == NULL) { 1846 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); 1847 return -EBADF; ··· 1984 return -EBADF; 1985 } 1986 1987 + ctx = filp->private_data; 1988 if (ctx == NULL) { 1989 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); 1990 return -EBADF; ··· 4907 goto error_args; 4908 } 4909 4910 + ctx = file->private_data; 4911 if (unlikely(ctx == NULL)) { 4912 DPRINT(("no context for fd %d\n", fd)); 4913 goto error_args;
+1 -1
arch/ia64/kernel/salinfo.c
··· 642 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { 643 data = salinfo_data + i; 644 data->type = i; 645 - init_MUTEX(&data->mutex); 646 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); 647 if (!dir) 648 continue;
··· 642 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { 643 data = salinfo_data + i; 644 data->type = i; 645 + sema_init(&data->mutex, 1); 646 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); 647 if (!dir) 648 continue;
-4
arch/ia64/kernel/setup.c
··· 594 cpu_init(); /* initialize the bootstrap CPU */ 595 mmu_context_init(); /* initialize context_id bitmap */ 596 597 - #ifdef CONFIG_ACPI 598 - acpi_boot_init(); 599 - #endif 600 - 601 paravirt_banner(); 602 paravirt_arch_setup_console(cmdline_p); 603
··· 594 cpu_init(); /* initialize the bootstrap CPU */ 595 mmu_context_init(); /* initialize context_id bitmap */ 596 597 paravirt_banner(); 598 paravirt_arch_setup_console(cmdline_p); 599
+39
arch/ia64/kernel/stacktrace.c
···
··· 1 + /* 2 + * arch/ia64/kernel/stacktrace.c 3 + * 4 + * Stack trace management functions 5 + * 6 + */ 7 + #include <linux/sched.h> 8 + #include <linux/stacktrace.h> 9 + #include <linux/module.h> 10 + 11 + static void 12 + ia64_do_save_stack(struct unw_frame_info *info, void *arg) 13 + { 14 + struct stack_trace *trace = arg; 15 + unsigned long ip; 16 + int skip = trace->skip; 17 + 18 + trace->nr_entries = 0; 19 + do { 20 + unw_get_ip(info, &ip); 21 + if (ip == 0) 22 + break; 23 + if (skip == 0) { 24 + trace->entries[trace->nr_entries++] = ip; 25 + if (trace->nr_entries == trace->max_entries) 26 + break; 27 + } else 28 + skip--; 29 + } while (unw_unwind(info) >= 0); 30 + } 31 + 32 + /* 33 + * Save stack-backtrace addresses into a stack_trace buffer. 34 + */ 35 + void save_stack_trace(struct stack_trace *trace) 36 + { 37 + unw_init_running(ia64_do_save_stack, trace); 38 + } 39 + EXPORT_SYMBOL(save_stack_trace);
+19 -4
arch/ia64/kernel/unwind.c
··· 1204 static inline unw_hash_index_t 1205 hash (unsigned long ip) 1206 { 1207 - # define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */ 1208 1209 - return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE); 1210 - #undef hashmagic 1211 } 1212 1213 static inline long ··· 1531 struct unw_labeled_state *ls, *next; 1532 unsigned long ip = info->ip; 1533 struct unw_state_record sr; 1534 - struct unw_table *table; 1535 struct unw_reg_info *r; 1536 struct unw_insn insn; 1537 u8 *dp, *desc_end; ··· 1560 1561 STAT(parse_start = ia64_get_itc()); 1562 1563 for (table = unw.tables; table; table = table->next) { 1564 if (ip >= table->start && ip < table->end) { 1565 e = lookup(table, ip - table->segment_base); 1566 break; 1567 } 1568 } 1569 if (!e) { 1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
··· 1204 static inline unw_hash_index_t 1205 hash (unsigned long ip) 1206 { 1207 + /* magic number = ((sqrt(5)-1)/2)*2^64 */ 1208 + static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL; 1209 1210 + return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE); 1211 } 1212 1213 static inline long ··· 1531 struct unw_labeled_state *ls, *next; 1532 unsigned long ip = info->ip; 1533 struct unw_state_record sr; 1534 + struct unw_table *table, *prev; 1535 struct unw_reg_info *r; 1536 struct unw_insn insn; 1537 u8 *dp, *desc_end; ··· 1560 1561 STAT(parse_start = ia64_get_itc()); 1562 1563 + prev = NULL; 1564 for (table = unw.tables; table; table = table->next) { 1565 if (ip >= table->start && ip < table->end) { 1566 + /* 1567 + * Leave the kernel unwind table at the very front, 1568 + * lest moving it breaks some assumption elsewhere. 1569 + * Otherwise, move the matching table to the second 1570 + * position in the list so that traversals can benefit 1571 + * from commonality in backtrace paths. 1572 + */ 1573 + if (prev && prev != unw.tables) { 1574 + /* unw is safe - we're already spinlocked */ 1575 + prev->next = table->next; 1576 + table->next = unw.tables->next; 1577 + unw.tables->next = table; 1578 + } 1579 e = lookup(table, ip - table->segment_base); 1580 break; 1581 } 1582 + prev = table; 1583 } 1584 if (!e) { 1585 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
+2 -3
arch/ia64/xen/xen_pv_ops.c
··· 1136 static void __init 1137 xen_patch_branch(unsigned long tag, unsigned long type) 1138 { 1139 - const unsigned long nelem = 1140 - sizeof(xen_branch_target) / sizeof(xen_branch_target[0]); 1141 - __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem); 1142 }
··· 1136 static void __init 1137 xen_patch_branch(unsigned long tag, unsigned long type) 1138 { 1139 + __paravirt_patch_apply_branch(tag, type, xen_branch_target, 1140 + ARRAY_SIZE(xen_branch_target)); 1141 }
+1 -1
arch/mn10300/Kconfig.debug
··· 101 102 choice 103 prompt "GDB stub port" 104 - default GDBSTUB_TTYSM0 105 depends on GDBSTUB 106 help 107 Select the serial port used for GDB-stub.
··· 101 102 choice 103 prompt "GDB stub port" 104 + default GDBSTUB_ON_TTYSM0 105 depends on GDBSTUB 106 help 107 Select the serial port used for GDB-stub.
+20 -15
arch/mn10300/kernel/signal.c
··· 65 old_sigset_t mask; 66 if (verify_area(VERIFY_READ, act, sizeof(*act)) || 67 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 68 - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) 69 return -EFAULT; 70 - __get_user(new_ka.sa.sa_flags, &act->sa_flags); 71 - __get_user(mask, &act->sa_mask); 72 siginitset(&new_ka.sa.sa_mask, mask); 73 } 74 ··· 77 if (!ret && oact) { 78 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || 79 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 80 - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) 81 return -EFAULT; 82 - __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 83 - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); 84 } 85 86 return ret; ··· 101 struct sigcontext __user *sc, long *_d0) 102 { 103 unsigned int err = 0; 104 105 if (is_using_fpu(current)) 106 fpu_kill_state(current); ··· 333 regs->d0 = sig; 334 regs->d1 = (unsigned long) &frame->sc; 335 336 - set_fs(USER_DS); 337 - 338 /* the tracer may want to single-step inside the handler */ 339 if (test_thread_flag(TIF_SINGLESTEP)) 340 ptrace_notify(SIGTRAP); ··· 346 return 0; 347 348 give_sigsegv: 349 - force_sig(SIGSEGV, current); 350 return -EFAULT; 351 } 352 ··· 414 regs->d0 = sig; 415 regs->d1 = (long) &frame->info; 416 417 - set_fs(USER_DS); 418 - 419 /* the tracer may want to single-step inside the handler */ 420 if (test_thread_flag(TIF_SINGLESTEP)) 421 ptrace_notify(SIGTRAP); ··· 427 return 0; 428 429 give_sigsegv: 430 - force_sig(SIGSEGV, current); 431 return -EFAULT; 432 } 433 434 /* ··· 464 /* fallthrough */ 465 case -ERESTARTNOINTR: 466 regs->d0 = regs->orig_d0; 467 - regs->pc -= 2; 468 } 469 } 470 ··· 532 case -ERESTARTSYS: 533 case -ERESTARTNOINTR: 534 regs->d0 = regs->orig_d0; 535 - regs->pc -= 2; 536 break; 537 538 case -ERESTART_RESTARTBLOCK: 539 regs->d0 = __NR_restart_syscall; 540 - regs->pc -= 2; 541 break; 542 } 543 }
··· 65 old_sigset_t mask; 66 if (verify_area(VERIFY_READ, act, sizeof(*act)) || 67 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 68 + __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 69 + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 70 + __get_user(mask, &act->sa_mask)) 71 return -EFAULT; 72 siginitset(&new_ka.sa.sa_mask, mask); 73 } 74 ··· 77 if (!ret && oact) { 78 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || 79 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 80 + __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 81 + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 82 + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 83 return -EFAULT; 84 } 85 86 return ret; ··· 101 struct sigcontext __user *sc, long *_d0) 102 { 103 unsigned int err = 0; 104 + 105 + /* Always make any pending restarted system calls return -EINTR */ 106 + current_thread_info()->restart_block.fn = do_no_restart_syscall; 107 108 if (is_using_fpu(current)) 109 fpu_kill_state(current); ··· 330 regs->d0 = sig; 331 regs->d1 = (unsigned long) &frame->sc; 332 333 /* the tracer may want to single-step inside the handler */ 334 if (test_thread_flag(TIF_SINGLESTEP)) 335 ptrace_notify(SIGTRAP); ··· 345 return 0; 346 347 give_sigsegv: 348 + force_sigsegv(sig, current); 349 return -EFAULT; 350 } 351 ··· 413 regs->d0 = sig; 414 regs->d1 = (long) &frame->info; 415 416 /* the tracer may want to single-step inside the handler */ 417 if (test_thread_flag(TIF_SINGLESTEP)) 418 ptrace_notify(SIGTRAP); ··· 428 return 0; 429 430 give_sigsegv: 431 + force_sigsegv(sig, current); 432 return -EFAULT; 433 + } 434 + 435 + static inline void stepback(struct pt_regs *regs) 436 + { 437 + regs->pc -= 2; 438 + regs->orig_d0 = -1; 439 } 440 441 /* ··· 459 /* fallthrough */ 460 case -ERESTARTNOINTR: 461 regs->d0 = regs->orig_d0; 462 + stepback(regs); 463 } 464 } 465 ··· 527 case -ERESTARTSYS: 528 case -ERESTARTNOINTR: 529 regs->d0 = regs->orig_d0; 530 + stepback(regs); 531 break; 532 533 case -ERESTART_RESTARTBLOCK: 534 regs->d0 = __NR_restart_syscall; 535 + stepback(regs); 536 break; 537 } 538 }
+6 -8
arch/mn10300/mm/Makefile
··· 2 # Makefile for the MN10300-specific memory management code 3 # 4 5 obj-y := \ 6 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ 7 - misalignment.o dma-alloc.o 8 - 9 - ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y) 10 - obj-y += cache.o cache-mn10300.o 11 - ifeq ($(CONFIG_MN10300_CACHE_WBACK),y) 12 - obj-y += cache-flush-mn10300.o 13 - endif 14 - endif
··· 2 # Makefile for the MN10300-specific memory management code 3 # 4 5 + cacheflush-y := cache.o cache-mn10300.o 6 + cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o 7 + 8 + cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o 9 + 10 obj-y := \ 11 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ 12 + misalignment.o dma-alloc.o $(cacheflush-y)
+21
arch/mn10300/mm/cache-disabled.c
···
··· 1 + /* Handle the cache being disabled 2 + * 3 + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. 4 + * Written by David Howells (dhowells@redhat.com) 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public Licence 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the Licence, or (at your option) any later version. 10 + */ 11 + #include <linux/mm.h> 12 + 13 + /* 14 + * allow userspace to flush the instruction cache 15 + */ 16 + asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) 17 + { 18 + if (end < start) 19 + return -EINVAL; 20 + return 0; 21 + }
+13 -1
arch/x86/boot/early_serial_console.c
··· 58 if (arg[pos] == ',') 59 pos++; 60 61 - if (!strncmp(arg, "ttyS", 4)) { 62 static const int bases[] = { 0x3f8, 0x2f8 }; 63 int idx = 0; 64
··· 58 if (arg[pos] == ',') 59 pos++; 60 61 + /* 62 + * make sure we have 63 + * "serial,0x3f8,115200" 64 + * "serial,ttyS0,115200" 65 + * "ttyS0,115200" 66 + */ 67 + if (pos == 7 && !strncmp(arg + pos, "0x", 2)) { 68 + port = simple_strtoull(arg + pos, &e, 16); 69 + if (port == 0 || arg + pos == e) 70 + port = DEFAULT_SERIAL_PORT; 71 + else 72 + pos = e - arg; 73 + } else if (!strncmp(arg + pos, "ttyS", 4)) { 74 static const int bases[] = { 0x3f8, 0x2f8 }; 75 int idx = 0; 76
+6
arch/x86/include/asm/amd_iommu_proto.h
··· 38 39 #endif /* !CONFIG_AMD_IOMMU_STATS */ 40 41 #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
··· 38 39 #endif /* !CONFIG_AMD_IOMMU_STATS */ 40 41 + static inline bool is_rd890_iommu(struct pci_dev *pdev) 42 + { 43 + return (pdev->vendor == PCI_VENDOR_ID_ATI) && 44 + (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); 45 + } 46 + 47 #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
+12
arch/x86/include/asm/amd_iommu_types.h
··· 368 /* capabilities of that IOMMU read from ACPI */ 369 u32 cap; 370 371 /* 372 * Capability pointer. There could be more than one IOMMU per PCI 373 * device function if there are more than one AMD IOMMU capability ··· 414 415 /* default dma_ops domain for that IOMMU */ 416 struct dma_ops_domain *default_dom; 417 }; 418 419 /*
··· 368 /* capabilities of that IOMMU read from ACPI */ 369 u32 cap; 370 371 + /* flags read from acpi table */ 372 + u8 acpi_flags; 373 + 374 /* 375 * Capability pointer. There could be more than one IOMMU per PCI 376 * device function if there are more than one AMD IOMMU capability ··· 411 412 /* default dma_ops domain for that IOMMU */ 413 struct dma_ops_domain *default_dom; 414 + 415 + /* 416 + * This array is required to work around a potential BIOS bug. 417 + * The BIOS may miss to restore parts of the PCI configuration 418 + * space when the system resumes from S3. The result is that the 419 + * IOMMU does not execute commands anymore which leads to system 420 + * failure. 421 + */ 422 + u32 cache_cfg[4]; 423 }; 424 425 /*
+1 -1
arch/x86/include/asm/bitops.h
··· 309 static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) 310 { 311 return ((1UL << (nr % BITS_PER_LONG)) & 312 - (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 313 } 314 315 static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
··· 309 static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) 310 { 311 return ((1UL << (nr % BITS_PER_LONG)) & 312 + (addr[nr / BITS_PER_LONG])) != 0; 313 } 314 315 static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
+2
arch/x86/kernel/Makefile
··· 11 CFLAGS_REMOVE_tsc.o = -pg 12 CFLAGS_REMOVE_rtc.o = -pg 13 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 14 CFLAGS_REMOVE_ftrace.o = -pg 15 CFLAGS_REMOVE_early_printk.o = -pg 16 endif
··· 11 CFLAGS_REMOVE_tsc.o = -pg 12 CFLAGS_REMOVE_rtc.o = -pg 13 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 14 + CFLAGS_REMOVE_pvclock.o = -pg 15 + CFLAGS_REMOVE_kvmclock.o = -pg 16 CFLAGS_REMOVE_ftrace.o = -pg 17 CFLAGS_REMOVE_early_printk.o = -pg 18 endif
+3 -1
arch/x86/kernel/amd_iommu.c
··· 1953 size_t size, 1954 int dir) 1955 { 1956 dma_addr_t i, start; 1957 unsigned int pages; 1958 ··· 1961 (dma_addr + size > dma_dom->aperture_size)) 1962 return; 1963 1964 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 1965 dma_addr &= PAGE_MASK; 1966 start = dma_addr; ··· 1976 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1977 1978 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1979 - iommu_flush_pages(&dma_dom->domain, dma_addr, size); 1980 dma_dom->need_flush = false; 1981 } 1982 }
··· 1953 size_t size, 1954 int dir) 1955 { 1956 + dma_addr_t flush_addr; 1957 dma_addr_t i, start; 1958 unsigned int pages; 1959 ··· 1960 (dma_addr + size > dma_dom->aperture_size)) 1961 return; 1962 1963 + flush_addr = dma_addr; 1964 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 1965 dma_addr &= PAGE_MASK; 1966 start = dma_addr; ··· 1974 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1975 1976 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1977 + iommu_flush_pages(&dma_dom->domain, flush_addr, size); 1978 dma_dom->need_flush = false; 1979 } 1980 }
+45 -22
arch/x86/kernel/amd_iommu_init.c
··· 632 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 633 MMIO_GET_LD(range)); 634 iommu->evt_msi_num = MMIO_MSI_NUM(misc); 635 } 636 637 /* ··· 656 struct ivhd_entry *e; 657 658 /* 659 - * First set the recommended feature enable bits from ACPI 660 - * into the IOMMU control registers 661 */ 662 - h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? 663 - iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 664 - iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 665 - 666 - h->flags & IVHD_FLAG_PASSPW_EN_MASK ? 667 - iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 668 - iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 669 - 670 - h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 671 - iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 672 - iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 673 - 674 - h->flags & IVHD_FLAG_ISOC_EN_MASK ? 675 - iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 676 - iommu_feature_disable(iommu, CONTROL_ISOC_EN); 677 - 678 - /* 679 - * make IOMMU memory accesses cache coherent 680 - */ 681 - iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 682 683 /* 684 * Done. Now parse the device entries ··· 1103 } 1104 } 1105 1106 /* 1107 * This function finally enables all IOMMUs found in the system after 1108 * they have been initialized ··· 1147 1148 for_each_iommu(iommu) { 1149 iommu_disable(iommu); 1150 iommu_set_device_table(iommu); 1151 iommu_enable_command_buffer(iommu); 1152 iommu_enable_event_buffer(iommu);
··· 632 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 633 MMIO_GET_LD(range)); 634 iommu->evt_msi_num = MMIO_MSI_NUM(misc); 635 + 636 + if (is_rd890_iommu(iommu->dev)) { 637 + pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]); 638 + pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]); 639 + pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]); 640 + pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]); 641 + } 642 } 643 644 /* ··· 649 struct ivhd_entry *e; 650 651 /* 652 + * First save the recommended feature enable bits from ACPI 653 */ 654 + iommu->acpi_flags = h->flags; 655 656 /* 657 * Done. Now parse the device entries ··· 1116 } 1117 } 1118 1119 + static void iommu_init_flags(struct amd_iommu *iommu) 1120 + { 1121 + iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 1122 + iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 1123 + iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 1124 + 1125 + iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 1126 + iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 1127 + iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 1128 + 1129 + iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 1130 + iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 1131 + iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 1132 + 1133 + iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 1134 + iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 1135 + iommu_feature_disable(iommu, CONTROL_ISOC_EN); 1136 + 1137 + /* 1138 + * make IOMMU memory accesses cache coherent 1139 + */ 1140 + iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 1141 + } 1142 + 1143 + static void iommu_apply_quirks(struct amd_iommu *iommu) 1144 + { 1145 + if (is_rd890_iommu(iommu->dev)) { 1146 + pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]); 1147 + pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]); 1148 + pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]); 1149 + pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]); 1150 + } 1151 + } 1152 + 1153 /* 1154 * This function finally enables all IOMMUs found in the system after 1155 * they have been initialized ··· 1126 1127 for_each_iommu(iommu) { 1128 iommu_disable(iommu); 1129 + iommu_apply_quirks(iommu); 1130 + iommu_init_flags(iommu); 1131 iommu_set_device_table(iommu); 1132 iommu_enable_command_buffer(iommu); 1133 iommu_enable_event_buffer(iommu);
+11 -1
arch/x86/kernel/cpu/perf_event.c
··· 102 */ 103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ 104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 105 int enabled; 106 107 int n_events; ··· 1011 x86_perf_event_set_period(event); 1012 cpuc->events[idx] = event; 1013 __set_bit(idx, cpuc->active_mask); 1014 x86_pmu.enable(event); 1015 perf_event_update_userpage(event); 1016 ··· 1143 cpuc = &__get_cpu_var(cpu_hw_events); 1144 1145 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1146 - if (!test_bit(idx, cpuc->active_mask)) 1147 continue; 1148 1149 event = cpuc->events[idx]; 1150 hwc = &event->hw;
··· 102 */ 103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ 104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 105 + unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 106 int enabled; 107 108 int n_events; ··· 1010 x86_perf_event_set_period(event); 1011 cpuc->events[idx] = event; 1012 __set_bit(idx, cpuc->active_mask); 1013 + __set_bit(idx, cpuc->running); 1014 x86_pmu.enable(event); 1015 perf_event_update_userpage(event); 1016 ··· 1141 cpuc = &__get_cpu_var(cpu_hw_events); 1142 1143 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1144 + if (!test_bit(idx, cpuc->active_mask)) { 1145 + /* 1146 + * Though we deactivated the counter some cpus 1147 + * might still deliver spurious interrupts still 1148 + * in flight. Catch them: 1149 + */ 1150 + if (__test_and_clear_bit(idx, cpuc->running)) 1151 + handled++; 1152 continue; 1153 + } 1154 1155 event = cpuc->events[idx]; 1156 hwc = &event->hw;
+4
drivers/ata/ahci.c
··· 90 static int ahci_pci_device_resume(struct pci_dev *pdev); 91 #endif 92 93 static struct ata_port_operations ahci_vt8251_ops = { 94 .inherits = &ahci_ops, 95 .hardreset = ahci_vt8251_hardreset,
··· 90 static int ahci_pci_device_resume(struct pci_dev *pdev); 91 #endif 92 93 + static struct scsi_host_template ahci_sht = { 94 + AHCI_SHT("ahci"), 95 + }; 96 + 97 static struct ata_port_operations ahci_vt8251_ops = { 98 .inherits = &ahci_ops, 99 .hardreset = ahci_vt8251_hardreset,
+11 -1
drivers/ata/ahci.h
··· 298 299 extern int ahci_ignore_sss; 300 301 - extern struct scsi_host_template ahci_sht; 302 extern struct ata_port_operations ahci_ops; 303 304 void ahci_save_initial_config(struct device *dev,
··· 298 299 extern int ahci_ignore_sss; 300 301 + extern struct device_attribute *ahci_shost_attrs[]; 302 + extern struct device_attribute *ahci_sdev_attrs[]; 303 + 304 + #define AHCI_SHT(drv_name) \ 305 + ATA_NCQ_SHT(drv_name), \ 306 + .can_queue = AHCI_MAX_CMDS - 1, \ 307 + .sg_tablesize = AHCI_MAX_SG, \ 308 + .dma_boundary = AHCI_DMA_BOUNDARY, \ 309 + .shost_attrs = ahci_shost_attrs, \ 310 + .sdev_attrs = ahci_sdev_attrs 311 + 312 extern struct ata_port_operations ahci_ops; 313 314 void ahci_save_initial_config(struct device *dev,
+5 -1
drivers/ata/ahci_platform.c
··· 23 #include <linux/ahci_platform.h> 24 #include "ahci.h" 25 26 static int __init ahci_probe(struct platform_device *pdev) 27 { 28 struct device *dev = &pdev->dev; ··· 149 ahci_print_info(host, "platform"); 150 151 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, 152 - &ahci_sht); 153 if (rc) 154 goto err0; 155
··· 23 #include <linux/ahci_platform.h> 24 #include "ahci.h" 25 26 + static struct scsi_host_template ahci_platform_sht = { 27 + AHCI_SHT("ahci_platform"), 28 + }; 29 + 30 static int __init ahci_probe(struct platform_device *pdev) 31 { 32 struct device *dev = &pdev->dev; ··· 145 ahci_print_info(host, "platform"); 146 147 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, 148 + &ahci_platform_sht); 149 if (rc) 150 goto err0; 151
+4 -12
drivers/ata/libahci.c
··· 121 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 122 ahci_read_em_buffer, ahci_store_em_buffer); 123 124 - static struct device_attribute *ahci_shost_attrs[] = { 125 &dev_attr_link_power_management_policy, 126 &dev_attr_em_message_type, 127 &dev_attr_em_message, ··· 132 &dev_attr_em_buffer, 133 NULL 134 }; 135 136 - static struct device_attribute *ahci_sdev_attrs[] = { 137 &dev_attr_sw_activity, 138 &dev_attr_unload_heads, 139 NULL 140 }; 141 - 142 - struct scsi_host_template ahci_sht = { 143 - ATA_NCQ_SHT("ahci"), 144 - .can_queue = AHCI_MAX_CMDS - 1, 145 - .sg_tablesize = AHCI_MAX_SG, 146 - .dma_boundary = AHCI_DMA_BOUNDARY, 147 - .shost_attrs = ahci_shost_attrs, 148 - .sdev_attrs = ahci_sdev_attrs, 149 - }; 150 - EXPORT_SYMBOL_GPL(ahci_sht); 151 152 struct ata_port_operations ahci_ops = { 153 .inherits = &sata_pmp_port_ops,
··· 121 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 122 ahci_read_em_buffer, ahci_store_em_buffer); 123 124 + struct device_attribute *ahci_shost_attrs[] = { 125 &dev_attr_link_power_management_policy, 126 &dev_attr_em_message_type, 127 &dev_attr_em_message, ··· 132 &dev_attr_em_buffer, 133 NULL 134 }; 135 + EXPORT_SYMBOL_GPL(ahci_shost_attrs); 136 137 + struct device_attribute *ahci_sdev_attrs[] = { 138 &dev_attr_sw_activity, 139 &dev_attr_unload_heads, 140 NULL 141 }; 142 + EXPORT_SYMBOL_GPL(ahci_sdev_attrs); 143 144 struct ata_port_operations ahci_ops = { 145 .inherits = &sata_pmp_port_ops,
+1 -1
drivers/block/pktcdvd.c
··· 2369 pkt_shrink_pktlist(pd); 2370 } 2371 2372 - static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) 2373 { 2374 if (dev_minor >= MAX_WRITERS) 2375 return NULL;
··· 2369 pkt_shrink_pktlist(pd); 2370 } 2371 2372 + static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) 2373 { 2374 if (dev_minor >= MAX_WRITERS) 2375 return NULL;
+1 -1
drivers/dma/mv_xor.c
··· 162 163 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 164 { 165 - u32 val = (1 << (1 + (chan->idx * 16))); 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 168 }
··· 162 163 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 164 { 165 + u32 val = ~(1 << (chan->idx * 16)); 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 168 }
+3
drivers/edac/edac_mc.c
··· 339 { 340 int status; 341 342 status = cancel_delayed_work(&mci->work); 343 if (status == 0) { 344 debugf0("%s() not canceled, flush the queue\n",
··· 339 { 340 int status; 341 342 + if (mci->op_state != OP_RUNNING_POLL) 343 + return; 344 + 345 status = cancel_delayed_work(&mci->work); 346 if (status == 0) { 347 debugf0("%s() not canceled, flush the queue\n",
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 2400 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2401 break; 2402 case 3: 2403 - if (obj_priv->fence_reg > 8) 2404 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; 2405 else 2406 case 2:
··· 2400 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2401 break; 2402 case 3: 2403 + if (obj_priv->fence_reg >= 8) 2404 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; 2405 else 2406 case 2:
+6 -9
drivers/gpu/drm/i915/intel_sdvo.c
··· 2170 return true; 2171 2172 err: 2173 - intel_sdvo_destroy_enhance_property(connector); 2174 - kfree(intel_sdvo_connector); 2175 return false; 2176 } 2177 ··· 2242 return true; 2243 2244 err: 2245 - intel_sdvo_destroy_enhance_property(connector); 2246 - kfree(intel_sdvo_connector); 2247 return false; 2248 } 2249 ··· 2520 uint16_t response; 2521 } enhancements; 2522 2523 - if (!intel_sdvo_get_value(intel_sdvo, 2524 - SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2525 - &enhancements, sizeof(enhancements))) 2526 - return false; 2527 - 2528 if (enhancements.response == 0) { 2529 DRM_DEBUG_KMS("No enhancement is supported\n"); 2530 return true;
··· 2170 return true; 2171 2172 err: 2173 + intel_sdvo_destroy(connector); 2174 return false; 2175 } 2176 ··· 2243 return true; 2244 2245 err: 2246 + intel_sdvo_destroy(connector); 2247 return false; 2248 } 2249 ··· 2522 uint16_t response; 2523 } enhancements; 2524 2525 + enhancements.response = 0; 2526 + intel_sdvo_get_value(intel_sdvo, 2527 + SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2528 + &enhancements, sizeof(enhancements)); 2529 if (enhancements.response == 0) { 2530 DRM_DEBUG_KMS("No enhancement is supported\n"); 2531 return true;
+1
drivers/hwmon/coretemp.c
··· 36 #include <linux/pci.h> 37 #include <asm/msr.h> 38 #include <asm/processor.h> 39 40 #define DRVNAME "coretemp" 41
··· 36 #include <linux/pci.h> 37 #include <asm/msr.h> 38 #include <asm/processor.h> 39 + #include <asm/smp.h> 40 41 #define DRVNAME "coretemp" 42
+4 -2
drivers/infiniband/hw/cxgb3/iwch_cm.c
··· 463 V_MSS_IDX(mtu_idx) | 464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 466 - opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 467 skb->priority = CPL_PRIORITY_SETUP; 468 set_arp_failure_handler(skb, act_open_req_arp_failure); 469 ··· 1281 V_MSS_IDX(mtu_idx) | 1282 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 1283 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 1284 - opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 1285 1286 rpl = cplhdr(skb); 1287 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
··· 463 V_MSS_IDX(mtu_idx) | 464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 466 + opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) | 467 + V_CONG_CONTROL_FLAVOR(cong_flavor); 468 skb->priority = CPL_PRIORITY_SETUP; 469 set_arp_failure_handler(skb, act_open_req_arp_failure); 470 ··· 1280 V_MSS_IDX(mtu_idx) | 1281 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 1282 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 1283 + opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) | 1284 + V_CONG_CONTROL_FLAVOR(cong_flavor); 1285 1286 rpl = cplhdr(skb); 1287 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+5 -4
drivers/leds/leds-ns2.c
··· 81 int cmd_level; 82 int slow_level; 83 84 - read_lock(&led_dat->rw_lock); 85 86 cmd_level = gpio_get_value(led_dat->cmd); 87 slow_level = gpio_get_value(led_dat->slow); ··· 95 } 96 } 97 98 - read_unlock(&led_dat->rw_lock); 99 100 return ret; 101 } ··· 104 enum ns2_led_modes mode) 105 { 106 int i; 107 108 - write_lock(&led_dat->rw_lock); 109 110 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { 111 if (mode == ns2_led_modval[i].mode) { ··· 117 } 118 } 119 120 - write_unlock(&led_dat->rw_lock); 121 } 122 123 static void ns2_led_set(struct led_classdev *led_cdev,
··· 81 int cmd_level; 82 int slow_level; 83 84 + read_lock_irq(&led_dat->rw_lock); 85 86 cmd_level = gpio_get_value(led_dat->cmd); 87 slow_level = gpio_get_value(led_dat->slow); ··· 95 } 96 } 97 98 + read_unlock_irq(&led_dat->rw_lock); 99 100 return ret; 101 } ··· 104 enum ns2_led_modes mode) 105 { 106 int i; 107 + unsigned long flags; 108 109 + write_lock_irqsave(&led_dat->rw_lock, flags); 110 111 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { 112 if (mode == ns2_led_modval[i].mode) { ··· 116 } 117 } 118 119 + write_unlock_irqrestore(&led_dat->rw_lock, flags); 120 } 121 122 static void ns2_led_set(struct led_classdev *led_cdev,
+8 -4
drivers/mmc/host/sdhci-s3c.c
··· 241 static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 242 { 243 struct sdhci_host *host = platform_get_drvdata(dev); 244 if (host) { 245 - spin_lock(&host->lock); 246 if (state) { 247 dev_dbg(&dev->dev, "card inserted.\n"); 248 host->flags &= ~SDHCI_DEVICE_DEAD; ··· 255 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 256 } 257 tasklet_schedule(&host->card_tasklet); 258 - spin_unlock(&host->lock); 259 } 260 } 261 ··· 483 sdhci_remove_host(host, 1); 484 485 for (ptr = 0; ptr < 3; ptr++) { 486 - clk_disable(sc->clk_bus[ptr]); 487 - clk_put(sc->clk_bus[ptr]); 488 } 489 clk_disable(sc->clk_io); 490 clk_put(sc->clk_io);
··· 241 static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 242 { 243 struct sdhci_host *host = platform_get_drvdata(dev); 244 + unsigned long flags; 245 + 246 if (host) { 247 + spin_lock_irqsave(&host->lock, flags); 248 if (state) { 249 dev_dbg(&dev->dev, "card inserted.\n"); 250 host->flags &= ~SDHCI_DEVICE_DEAD; ··· 253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 254 } 255 tasklet_schedule(&host->card_tasklet); 256 + spin_unlock_irqrestore(&host->lock, flags); 257 } 258 } 259 ··· 481 sdhci_remove_host(host, 1); 482 483 for (ptr = 0; ptr < 3; ptr++) { 484 + if (sc->clk_bus[ptr]) { 485 + clk_disable(sc->clk_bus[ptr]); 486 + clk_put(sc->clk_bus[ptr]); 487 + } 488 } 489 clk_disable(sc->clk_io); 490 clk_put(sc->clk_io);
+10
drivers/net/3c59x.c
··· 2942 { 2943 struct vortex_private *vp = netdev_priv(dev); 2944 2945 wol->supported = WAKE_MAGIC; 2946 2947 wol->wolopts = 0; ··· 2955 static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2956 { 2957 struct vortex_private *vp = netdev_priv(dev); 2958 if (wol->wolopts & ~WAKE_MAGIC) 2959 return -EINVAL; 2960 ··· 3207 vp->enable_wol = 0; 3208 return; 3209 } 3210 3211 /* Change the power state to D3; RxEnable doesn't take effect. */ 3212 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
··· 2942 { 2943 struct vortex_private *vp = netdev_priv(dev); 2944 2945 + if (!VORTEX_PCI(vp)) 2946 + return; 2947 + 2948 wol->supported = WAKE_MAGIC; 2949 2950 wol->wolopts = 0; ··· 2952 static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2953 { 2954 struct vortex_private *vp = netdev_priv(dev); 2955 + 2956 + if (!VORTEX_PCI(vp)) 2957 + return -EOPNOTSUPP; 2958 + 2959 if (wol->wolopts & ~WAKE_MAGIC) 2960 return -EINVAL; 2961 ··· 3200 vp->enable_wol = 0; 3201 return; 3202 } 3203 + 3204 + if (VORTEX_PCI(vp)->current_state < PCI_D3hot) 3205 + return; 3206 3207 /* Change the power state to D3; RxEnable doesn't take effect. */ 3208 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
+9 -2
drivers/net/atlx/atl1.c
··· 1251 1252 rrd_ring->desc = NULL; 1253 rrd_ring->dma = 0; 1254 } 1255 1256 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) ··· 2853 pci_enable_wake(pdev, PCI_D3cold, 0); 2854 2855 atl1_reset_hw(&adapter->hw); 2856 - adapter->cmb.cmb->int_stats = 0; 2857 2858 - if (netif_running(netdev)) 2859 atl1_up(adapter); 2860 netif_device_attach(netdev); 2861 2862 return 0;
··· 1251 1252 rrd_ring->desc = NULL; 1253 rrd_ring->dma = 0; 1254 + 1255 + adapter->cmb.dma = 0; 1256 + adapter->cmb.cmb = NULL; 1257 + 1258 + adapter->smb.dma = 0; 1259 + adapter->smb.smb = NULL; 1260 } 1261 1262 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) ··· 2847 pci_enable_wake(pdev, PCI_D3cold, 0); 2848 2849 atl1_reset_hw(&adapter->hw); 2850 2851 + if (netif_running(netdev)) { 2852 + adapter->cmb.cmb->int_stats = 0; 2853 atl1_up(adapter); 2854 + } 2855 netif_device_attach(netdev); 2856 2857 return 0;
+1
drivers/net/e1000e/hw.h
··· 57 E1000_SCTL = 0x00024, /* SerDes Control - RW */ 58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ 59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ 60 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ 61 E1000_FCT = 0x00030, /* Flow Control Type - RW */ 62 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
··· 57 E1000_SCTL = 0x00024, /* SerDes Control - RW */ 58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ 59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ 60 + E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */ 61 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ 62 E1000_FCT = 0x00030, /* Flow Control Type - RW */ 63 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
+164 -33
drivers/net/e1000e/ich8lan.c
··· 105 #define E1000_FEXTNVM_SW_CONFIG 1 106 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ 107 108 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 109 110 #define E1000_ICH_RAR_ENTRIES 7 ··· 129 130 /* SMBus Address Phy Register */ 131 #define HV_SMB_ADDR PHY_REG(768, 26) 132 #define HV_SMB_ADDR_PEC_EN 0x0200 133 #define HV_SMB_ADDR_VALID 0x0080 134 ··· 242 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 243 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 244 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 245 246 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 247 { ··· 279 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 280 { 281 struct e1000_phy_info *phy = &hw->phy; 282 - u32 ctrl; 283 s32 ret_val = 0; 284 285 phy->addr = 1; ··· 301 * disabled, then toggle the LANPHYPC Value bit to force 302 * the interconnect to PCIe mode. 303 */ 304 - if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 305 ctrl = er32(CTRL); 306 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 307 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; ··· 311 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 312 ew32(CTRL, ctrl); 313 msleep(50); 314 } 315 316 /* ··· 329 ret_val = e1000e_phy_hw_reset_generic(hw); 330 if (ret_val) 331 goto out; 332 333 phy->id = e1000_phy_unknown; 334 ret_val = e1000e_get_phy_id(hw); ··· 583 if (mac->type == e1000_ich8lan) 584 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 585 586 - /* Disable PHY configuration by hardware, config by software */ 587 - if (mac->type == e1000_pch2lan) { 588 - u32 extcnf_ctrl = er32(EXTCNF_CTRL); 589 - 590 - extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; 591 - ew32(EXTCNF_CTRL, extcnf_ctrl); 592 - } 593 594 return 0; 595 } ··· 667 668 if (hw->phy.type == e1000_phy_82578) { 669 ret_val = e1000_link_stall_workaround_hv(hw); 670 if (ret_val) 671 goto out; 672 } ··· 920 } 921 922 /** 923 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 924 * @hw: pointer to the HW structure 925 * ··· 956 **/ 957 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 958 { 959 - struct e1000_adapter *adapter = hw->adapter; 960 struct e1000_phy_info *phy = &hw->phy; 961 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 962 s32 ret_val = 0; ··· 973 if (phy->type != e1000_phy_igp_3) 974 return ret_val; 975 976 - if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) { 977 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 978 break; 979 } ··· 1014 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1015 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1016 1017 - if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 1018 - ((hw->mac.type == e1000_pchlan) || 1019 - (hw->mac.type == e1000_pch2lan))) { 1020 /* 1021 * HW configures the SMBus address and LEDs when the 1022 * OEM and LCD Write Enable bits are set in the NVM. 1023 * When both NVM bits are cleared, SW will configure 1024 * them instead. 1025 */ 1026 - data = er32(STRAP); 1027 - data &= E1000_STRAP_SMBUS_ADDRESS_MASK; 1028 - reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT; 1029 - reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 1030 - ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, 1031 - reg_data); 1032 if (ret_val) 1033 goto out; 1034 ··· 1488 goto out; 1489 1490 /* Enable jumbo frame workaround in the PHY */ 1491 - e1e_rphy(hw, PHY_REG(769, 20), &data); 1492 - ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14)); 1493 - if (ret_val) 1494 - goto out; 1495 e1e_rphy(hw, PHY_REG(769, 23), &data); 1496 data &= ~(0x7F << 5); 1497 data |= (0x37 << 5); ··· 1496 goto out; 1497 e1e_rphy(hw, PHY_REG(769, 16), &data); 1498 data &= ~(1 << 13); 1499 - data |= (1 << 12); 1500 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1501 if (ret_val) 1502 goto out; ··· 1520 1521 mac_reg = er32(RCTL); 1522 mac_reg &= ~E1000_RCTL_SECRC; 1523 - ew32(FFLT_DBG, mac_reg); 1524 1525 ret_val = e1000e_read_kmrn_reg(hw, 1526 E1000_KMRNCTRLSTA_CTRL_OFFSET, ··· 1546 goto out; 1547 1548 /* Write PHY register values back to h/w defaults */ 1549 - e1e_rphy(hw, PHY_REG(769, 20), &data); 1550 - ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14)); 1551 - if (ret_val) 1552 - goto out; 1553 e1e_rphy(hw, PHY_REG(769, 23), &data); 1554 data &= ~(0x7F << 5); 1555 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 1556 if (ret_val) 1557 goto out; 1558 e1e_rphy(hw, PHY_REG(769, 16), &data); 1559 - data &= ~(1 << 12); 1560 data |= (1 << 13); 1561 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1562 if (ret_val) ··· 1594 1595 out: 1596 return ret_val; 1597 } 1598 1599 /** ··· 1703 if (e1000_check_reset_block(hw)) 1704 goto out; 1705 1706 /* Perform any necessary post-reset workarounds */ 1707 switch (hw->mac.type) { 1708 case e1000_pchlan: ··· 1734 /* Configure the LCD with the OEM bits in NVM */ 1735 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 1736 1737 out: 1738 return ret_val; 1739 } ··· 1756 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 1757 { 1758 s32 ret_val = 0; 1759 1760 ret_val = e1000e_phy_hw_reset_generic(hw); 1761 if (ret_val) ··· 3026 * external PHY is reset. 3027 */ 3028 ctrl |= E1000_CTRL_PHY_RST; 3029 } 3030 ret_val = e1000_acquire_swflag_ich8lan(hw); 3031 e_dbg("Issuing a global reset to ich8lan\n"); ··· 3584 void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 3585 { 3586 u32 phy_ctrl; 3587 3588 phy_ctrl = er32(PHY_CTRL); 3589 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; 3590 ew32(PHY_CTRL, phy_ctrl); 3591 3592 - if (hw->mac.type >= e1000_pchlan) 3593 - e1000_phy_hw_reset_ich8lan(hw); 3594 } 3595 3596 /**
··· 105 #define E1000_FEXTNVM_SW_CONFIG 1 106 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ 107 108 + #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 109 + #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 110 + #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 111 + 112 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 113 114 #define E1000_ICH_RAR_ENTRIES 7 ··· 125 126 /* SMBus Address Phy Register */ 127 #define HV_SMB_ADDR PHY_REG(768, 26) 128 + #define HV_SMB_ADDR_MASK 0x007F 129 #define HV_SMB_ADDR_PEC_EN 0x0200 130 #define HV_SMB_ADDR_VALID 0x0080 131 ··· 237 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 238 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 239 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 240 + static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 241 + static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 242 243 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 244 { ··· 272 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 273 { 274 struct e1000_phy_info *phy = &hw->phy; 275 + u32 ctrl, fwsm; 276 s32 ret_val = 0; 277 278 phy->addr = 1; ··· 294 * disabled, then toggle the LANPHYPC Value bit to force 295 * the interconnect to PCIe mode. 296 */ 297 + fwsm = er32(FWSM); 298 + if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) { 299 ctrl = er32(CTRL); 300 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 301 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; ··· 303 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 304 ew32(CTRL, ctrl); 305 msleep(50); 306 + 307 + /* 308 + * Gate automatic PHY configuration by hardware on 309 + * non-managed 82579 310 + */ 311 + if (hw->mac.type == e1000_pch2lan) 312 + e1000_gate_hw_phy_config_ich8lan(hw, true); 313 } 314 315 /* ··· 314 ret_val = e1000e_phy_hw_reset_generic(hw); 315 if (ret_val) 316 goto out; 317 + 318 + /* Ungate automatic PHY configuration on non-managed 82579 */ 319 + if ((hw->mac.type == e1000_pch2lan) && 320 + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 321 + msleep(10); 322 + e1000_gate_hw_phy_config_ich8lan(hw, false); 323 + } 324 325 phy->id = e1000_phy_unknown; 326 ret_val = e1000e_get_phy_id(hw); ··· 561 if (mac->type == e1000_ich8lan) 562 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 563 564 + /* Gate automatic PHY configuration by hardware on managed 82579 */ 565 + if ((mac->type == e1000_pch2lan) && 566 + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 567 + e1000_gate_hw_phy_config_ich8lan(hw, true); 568 569 return 0; 570 } ··· 648 649 if (hw->phy.type == e1000_phy_82578) { 650 ret_val = e1000_link_stall_workaround_hv(hw); 651 + if (ret_val) 652 + goto out; 653 + } 654 + 655 + if (hw->mac.type == e1000_pch2lan) { 656 + ret_val = e1000_k1_workaround_lv(hw); 657 if (ret_val) 658 goto out; 659 } ··· 895 } 896 897 /** 898 + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states 899 + * @hw: pointer to the HW structure 900 + * 901 + * Assumes semaphore already acquired. 902 + * 903 + **/ 904 + static s32 e1000_write_smbus_addr(struct e1000_hw *hw) 905 + { 906 + u16 phy_data; 907 + u32 strap = er32(STRAP); 908 + s32 ret_val = 0; 909 + 910 + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; 911 + 912 + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); 913 + if (ret_val) 914 + goto out; 915 + 916 + phy_data &= ~HV_SMB_ADDR_MASK; 917 + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); 918 + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 919 + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); 920 + 921 + out: 922 + return ret_val; 923 + } 924 + 925 + /** 926 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 927 * @hw: pointer to the HW structure 928 * ··· 903 **/ 904 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 905 { 906 struct e1000_phy_info *phy = &hw->phy; 907 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 908 s32 ret_val = 0; ··· 921 if (phy->type != e1000_phy_igp_3) 922 return ret_val; 923 924 + if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || 925 + (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { 926 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 927 break; 928 } ··· 961 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 962 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 963 964 + if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 965 + (hw->mac.type == e1000_pchlan)) || 966 + (hw->mac.type == e1000_pch2lan)) { 967 /* 968 * HW configures the SMBus address and LEDs when the 969 * OEM and LCD Write Enable bits are set in the NVM. 970 * When both NVM bits are cleared, SW will configure 971 * them instead. 972 */ 973 + ret_val = e1000_write_smbus_addr(hw); 974 if (ret_val) 975 goto out; 976 ··· 1440 goto out; 1441 1442 /* Enable jumbo frame workaround in the PHY */ 1443 e1e_rphy(hw, PHY_REG(769, 23), &data); 1444 data &= ~(0x7F << 5); 1445 data |= (0x37 << 5); ··· 1452 goto out; 1453 e1e_rphy(hw, PHY_REG(769, 16), &data); 1454 data &= ~(1 << 13); 1455 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1456 if (ret_val) 1457 goto out; ··· 1477 1478 mac_reg = er32(RCTL); 1479 mac_reg &= ~E1000_RCTL_SECRC; 1480 + ew32(RCTL, mac_reg); 1481 1482 ret_val = e1000e_read_kmrn_reg(hw, 1483 E1000_KMRNCTRLSTA_CTRL_OFFSET, ··· 1503 goto out; 1504 1505 /* Write PHY register values back to h/w defaults */ 1506 e1e_rphy(hw, PHY_REG(769, 23), &data); 1507 data &= ~(0x7F << 5); 1508 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 1509 if (ret_val) 1510 goto out; 1511 e1e_rphy(hw, PHY_REG(769, 16), &data); 1512 data |= (1 << 13); 1513 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1514 if (ret_val) ··· 1556 1557 out: 1558 return ret_val; 1559 + } 1560 + 1561 + /** 1562 + * e1000_k1_gig_workaround_lv - K1 Si workaround 1563 + * @hw: pointer to the HW structure 1564 + * 1565 + * Workaround to set the K1 beacon duration for 82579 parts 1566 + **/ 1567 + static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 1568 + { 1569 + s32 ret_val = 0; 1570 + u16 status_reg = 0; 1571 + u32 mac_reg; 1572 + 1573 + if (hw->mac.type != e1000_pch2lan) 1574 + goto out; 1575 + 1576 + /* Set K1 beacon duration based on 1Gbps speed or otherwise */ 1577 + ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); 1578 + if (ret_val) 1579 + goto out; 1580 + 1581 + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 1582 + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 1583 + mac_reg = er32(FEXTNVM4); 1584 + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 1585 + 1586 + if (status_reg & HV_M_STATUS_SPEED_1000) 1587 + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 1588 + else 1589 + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 1590 + 1591 + ew32(FEXTNVM4, mac_reg); 1592 + } 1593 + 1594 + out: 1595 + return ret_val; 1596 + } 1597 + 1598 + /** 1599 + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware 1600 + * @hw: pointer to the HW structure 1601 + * @gate: boolean set to true to gate, false to ungate 1602 + * 1603 + * Gate/ungate the automatic PHY configuration via hardware; perform 1604 + * the configuration via software instead. 1605 + **/ 1606 + static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) 1607 + { 1608 + u32 extcnf_ctrl; 1609 + 1610 + if (hw->mac.type != e1000_pch2lan) 1611 + return; 1612 + 1613 + extcnf_ctrl = er32(EXTCNF_CTRL); 1614 + 1615 + if (gate) 1616 + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; 1617 + else 1618 + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; 1619 + 1620 + ew32(EXTCNF_CTRL, extcnf_ctrl); 1621 + return; 1622 } 1623 1624 /** ··· 1602 if (e1000_check_reset_block(hw)) 1603 goto out; 1604 1605 + /* Allow time for h/w to get to quiescent state after reset */ 1606 + msleep(10); 1607 + 1608 /* Perform any necessary post-reset workarounds */ 1609 switch (hw->mac.type) { 1610 case e1000_pchlan: ··· 1630 /* Configure the LCD with the OEM bits in NVM */ 1631 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 1632 1633 + /* Ungate automatic PHY configuration on non-managed 82579 */ 1634 + if ((hw->mac.type == e1000_pch2lan) && 1635 + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 1636 + msleep(10); 1637 + e1000_gate_hw_phy_config_ich8lan(hw, false); 1638 + } 1639 + 1640 out: 1641 return ret_val; 1642 } ··· 1645 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 1646 { 1647 s32 ret_val = 0; 1648 + 1649 + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ 1650 + if ((hw->mac.type == e1000_pch2lan) && 1651 + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 1652 + e1000_gate_hw_phy_config_ich8lan(hw, true); 1653 1654 ret_val = e1000e_phy_hw_reset_generic(hw); 1655 if (ret_val) ··· 2910 * external PHY is reset. 2911 */ 2912 ctrl |= E1000_CTRL_PHY_RST; 2913 + 2914 + /* 2915 + * Gate automatic PHY configuration by hardware on 2916 + * non-managed 82579 2917 + */ 2918 + if ((hw->mac.type == e1000_pch2lan) && 2919 + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 2920 + e1000_gate_hw_phy_config_ich8lan(hw, true); 2921 } 2922 ret_val = e1000_acquire_swflag_ich8lan(hw); 2923 e_dbg("Issuing a global reset to ich8lan\n"); ··· 3460 void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 3461 { 3462 u32 phy_ctrl; 3463 + s32 ret_val; 3464 3465 phy_ctrl = er32(PHY_CTRL); 3466 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; 3467 ew32(PHY_CTRL, phy_ctrl); 3468 3469 + if (hw->mac.type >= e1000_pchlan) { 3470 + e1000_oem_bits_config_ich8lan(hw, true); 3471 + ret_val = hw->phy.ops.acquire(hw); 3472 + if (ret_val) 3473 + return; 3474 + e1000_write_smbus_addr(hw); 3475 + hw->phy.ops.release(hw); 3476 + } 3477 } 3478 3479 /**
+19 -10
drivers/net/e1000e/netdev.c
··· 2704 u32 psrctl = 0; 2705 u32 pages = 0; 2706 2707 /* Program MC offset vector base */ 2708 rctl = er32(RCTL); 2709 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); ··· 2752 e1e_wphy(hw, 0x10, 0x2823); 2753 e1e_wphy(hw, 0x11, 0x0003); 2754 e1e_wphy(hw, 22, phy_data); 2755 - } 2756 - 2757 - /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2758 - if (hw->mac.type == e1000_pch2lan) { 2759 - s32 ret_val; 2760 - 2761 - if (rctl & E1000_RCTL_LPE) 2762 - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2763 - else 2764 - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2765 } 2766 2767 /* Setup buffer sizes */ ··· 4830 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || 4831 (max_frame > adapter->max_hw_frame_size)) { 4832 e_err("Unsupported MTU setting\n"); 4833 return -EINVAL; 4834 } 4835
··· 2704 u32 psrctl = 0; 2705 u32 pages = 0; 2706 2707 + /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2708 + if (hw->mac.type == e1000_pch2lan) { 2709 + s32 ret_val; 2710 + 2711 + if (adapter->netdev->mtu > ETH_DATA_LEN) 2712 + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2713 + else 2714 + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2715 + } 2716 + 2717 /* Program MC offset vector base */ 2718 rctl = er32(RCTL); 2719 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); ··· 2742 e1e_wphy(hw, 0x10, 0x2823); 2743 e1e_wphy(hw, 0x11, 0x0003); 2744 e1e_wphy(hw, 22, phy_data); 2745 } 2746 2747 /* Setup buffer sizes */ ··· 4830 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || 4831 (max_frame > adapter->max_hw_frame_size)) { 4832 e_err("Unsupported MTU setting\n"); 4833 + return -EINVAL; 4834 + } 4835 + 4836 + /* Jumbo frame workaround on 82579 requires CRC be stripped */ 4837 + if ((adapter->hw.mac.type == e1000_pch2lan) && 4838 + !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 4839 + (new_mtu > ETH_DATA_LEN)) { 4840 + e_err("Jumbo Frames not supported on 82579 when CRC " 4841 + "stripping is disabled.\n"); 4842 return -EINVAL; 4843 } 4844
+2 -2
drivers/net/ibm_newemac/core.c
··· 2928 if (dev->emac_irq != NO_IRQ) 2929 irq_dispose_mapping(dev->emac_irq); 2930 err_free: 2931 - kfree(ndev); 2932 err_gone: 2933 /* if we were on the bootlist, remove us as we won't show up and 2934 * wake up all waiters to notify them in case they were waiting ··· 2971 if (dev->emac_irq != NO_IRQ) 2972 irq_dispose_mapping(dev->emac_irq); 2973 2974 - kfree(dev->ndev); 2975 2976 return 0; 2977 }
··· 2928 if (dev->emac_irq != NO_IRQ) 2929 irq_dispose_mapping(dev->emac_irq); 2930 err_free: 2931 + free_netdev(ndev); 2932 err_gone: 2933 /* if we were on the bootlist, remove us as we won't show up and 2934 * wake up all waiters to notify them in case they were waiting ··· 2971 if (dev->emac_irq != NO_IRQ) 2972 irq_dispose_mapping(dev->emac_irq); 2973 2974 + free_netdev(dev->ndev); 2975 2976 return 0; 2977 }
-3
drivers/net/netxen/netxen_nic_init.c
··· 1540 if (pkt_offset) 1541 skb_pull(skb, pkt_offset); 1542 1543 - skb->truesize = skb->len + sizeof(struct sk_buff); 1544 skb->protocol = eth_type_trans(skb, netdev); 1545 1546 napi_gro_receive(&sds_ring->napi, skb); ··· 1600 data_offset = l4_hdr_offset + TCP_HDR_SIZE; 1601 1602 skb_put(skb, lro_length + data_offset); 1603 - 1604 - skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); 1605 1606 skb_pull(skb, l2_hdr_offset); 1607 skb->protocol = eth_type_trans(skb, netdev);
··· 1540 if (pkt_offset) 1541 skb_pull(skb, pkt_offset); 1542 1543 skb->protocol = eth_type_trans(skb, netdev); 1544 1545 napi_gro_receive(&sds_ring->napi, skb); ··· 1601 data_offset = l4_hdr_offset + TCP_HDR_SIZE; 1602 1603 skb_put(skb, lro_length + data_offset); 1604 1605 skb_pull(skb, l2_hdr_offset); 1606 skb->protocol = eth_type_trans(skb, netdev);
+1 -6
drivers/net/qlcnic/qlcnic_init.c
··· 1316 return -ENOMEM; 1317 } 1318 1319 - skb_reserve(skb, 2); 1320 1321 dma = pci_map_single(pdev, skb->data, 1322 rds_ring->dma_size, PCI_DMA_FROMDEVICE); ··· 1404 if (pkt_offset) 1405 skb_pull(skb, pkt_offset); 1406 1407 - skb->truesize = skb->len + sizeof(struct sk_buff); 1408 skb->protocol = eth_type_trans(skb, netdev); 1409 1410 napi_gro_receive(&sds_ring->napi, skb); ··· 1464 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; 1465 1466 skb_put(skb, lro_length + data_offset); 1467 - 1468 - skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); 1469 1470 skb_pull(skb, l2_hdr_offset); 1471 skb->protocol = eth_type_trans(skb, netdev); ··· 1696 1697 if (pkt_offset) 1698 skb_pull(skb, pkt_offset); 1699 - 1700 - skb->truesize = skb->len + sizeof(struct sk_buff); 1701 1702 if (!qlcnic_check_loopback_buff(skb->data)) 1703 adapter->diag_cnt++;
··· 1316 return -ENOMEM; 1317 } 1318 1319 + skb_reserve(skb, NET_IP_ALIGN); 1320 1321 dma = pci_map_single(pdev, skb->data, 1322 rds_ring->dma_size, PCI_DMA_FROMDEVICE); ··· 1404 if (pkt_offset) 1405 skb_pull(skb, pkt_offset); 1406 1407 skb->protocol = eth_type_trans(skb, netdev); 1408 1409 napi_gro_receive(&sds_ring->napi, skb); ··· 1465 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; 1466 1467 skb_put(skb, lro_length + data_offset); 1468 1469 skb_pull(skb, l2_hdr_offset); 1470 skb->protocol = eth_type_trans(skb, netdev); ··· 1699 1700 if (pkt_offset) 1701 skb_pull(skb, pkt_offset); 1702 1703 if (!qlcnic_check_loopback_buff(skb->data)) 1704 adapter->diag_cnt++;
+1 -1
drivers/net/rionet.c
··· 384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? 385 __ilog2(sizeof(void *)) + 4 : 0); 386 unregister_netdev(ndev); 387 - kfree(ndev); 388 389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { 390 list_del(&peer->node);
··· 384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? 385 __ilog2(sizeof(void *)) + 4 : 0); 386 unregister_netdev(ndev); 387 + free_netdev(ndev); 388 389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { 390 list_del(&peer->node);
+1 -1
drivers/net/sgiseeq.c
··· 804 err_out_free_page: 805 free_page((unsigned long) sp->srings); 806 err_out_free_dev: 807 - kfree(dev); 808 809 err_out: 810 return err;
··· 804 err_out_free_page: 805 free_page((unsigned long) sp->srings); 806 err_out_free_dev: 807 + free_netdev(dev); 808 809 err_out: 810 return err;
+1
drivers/net/smsc911x.c
··· 58 59 MODULE_LICENSE("GPL"); 60 MODULE_VERSION(SMSC_DRV_VERSION); 61 62 #if USE_DEBUG > 0 63 static int debug = 16;
··· 58 59 MODULE_LICENSE("GPL"); 60 MODULE_VERSION(SMSC_DRV_VERSION); 61 + MODULE_ALIAS("platform:smsc911x"); 62 63 #if USE_DEBUG > 0 64 static int debug = 16;
+38 -5
drivers/net/tulip/de2104x.c
··· 243 NWayState = (1 << 14) | (1 << 13) | (1 << 12), 244 NWayRestart = (1 << 12), 245 NonselPortActive = (1 << 9), 246 LinkFailStatus = (1 << 2), 247 NetCxnErr = (1 << 1), 248 }; ··· 364 365 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ 366 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; 367 - static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, }; 368 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; 369 370 ··· 1067 unsigned int carrier; 1068 unsigned long flags; 1069 1070 carrier = (status & NetCxnErr) ? 0 : 1; 1071 1072 if (carrier) { ··· 1164 static void de_media_interrupt (struct de_private *de, u32 status) 1165 { 1166 if (status & LinkPass) { 1167 de_link_up(de); 1168 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); 1169 return; 1170 } 1171 1172 BUG_ON(!(status & LinkFail)); 1173 - 1174 - if (netif_carrier_ok(de->dev)) { 1175 de_link_down(de); 1176 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); 1177 } ··· 1250 if (de->de21040) 1251 return; 1252 1253 pci_read_config_dword(de->pdev, PCIPM, &pmctl); 1254 pmctl |= PM_Sleep; 1255 pci_write_config_dword(de->pdev, PCIPM, pmctl); ··· 1596 return 0; /* nothing to change */ 1597 1598 de_link_down(de); 1599 de_stop_rxtx(de); 1600 1601 de->media_type = new_media; 1602 de->media_lock = media_lock; 1603 de->media_advertise = ecmd->advertising; 1604 de_set_media(de); 1605 1606 return 0; 1607 } ··· 1936 for (i = 0; i < DE_MAX_MEDIA; i++) { 1937 if (de->media[i].csr13 == 0xffff) 1938 de->media[i].csr13 = t21041_csr13[i]; 1939 - if (de->media[i].csr14 == 0xffff) 1940 - de->media[i].csr14 = t21041_csr14[i]; 1941 if (de->media[i].csr15 == 0xffff) 1942 de->media[i].csr15 = t21041_csr15[i]; 1943 } ··· 2189 dev_err(&dev->dev, "pci_enable_device failed in resume\n"); 2190 goto out; 2191 } 2192 de_init_hw(de); 2193 out_attach: 2194 netif_device_attach(dev);
··· 243 NWayState = (1 << 14) | (1 << 13) | (1 << 12), 244 NWayRestart = (1 << 12), 245 NonselPortActive = (1 << 9), 246 + SelPortActive = (1 << 8), 247 LinkFailStatus = (1 << 2), 248 NetCxnErr = (1 << 1), 249 }; ··· 363 364 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ 365 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; 366 + static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; 367 + /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */ 368 + static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; 369 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; 370 371 ··· 1064 unsigned int carrier; 1065 unsigned long flags; 1066 1067 + /* clear port active bits */ 1068 + dw32(SIAStatus, NonselPortActive | SelPortActive); 1069 + 1070 carrier = (status & NetCxnErr) ? 0 : 1; 1071 1072 if (carrier) { ··· 1158 static void de_media_interrupt (struct de_private *de, u32 status) 1159 { 1160 if (status & LinkPass) { 1161 + /* Ignore if current media is AUI or BNC and we can't use TP */ 1162 + if ((de->media_type == DE_MEDIA_AUI || 1163 + de->media_type == DE_MEDIA_BNC) && 1164 + (de->media_lock || 1165 + !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))) 1166 + return; 1167 + /* If current media is not TP, change it to TP */ 1168 + if ((de->media_type == DE_MEDIA_AUI || 1169 + de->media_type == DE_MEDIA_BNC)) { 1170 + de->media_type = DE_MEDIA_TP_AUTO; 1171 + de_stop_rxtx(de); 1172 + de_set_media(de); 1173 + de_start_rxtx(de); 1174 + } 1175 de_link_up(de); 1176 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); 1177 return; 1178 } 1179 1180 BUG_ON(!(status & LinkFail)); 1181 + /* Mark the link as down only if current media is TP */ 1182 + if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI && 1183 + de->media_type != DE_MEDIA_BNC) { 1184 de_link_down(de); 1185 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); 1186 } ··· 1229 if (de->de21040) 1230 return; 1231 1232 + dw32(CSR13, 0); /* Reset phy */ 1233 pci_read_config_dword(de->pdev, PCIPM, &pmctl); 1234 pmctl |= PM_Sleep; 1235 pci_write_config_dword(de->pdev, PCIPM, pmctl); ··· 1574 return 0; /* nothing to change */ 1575 1576 de_link_down(de); 1577 + mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); 1578 de_stop_rxtx(de); 1579 1580 de->media_type = new_media; 1581 de->media_lock = media_lock; 1582 de->media_advertise = ecmd->advertising; 1583 de_set_media(de); 1584 + if (netif_running(de->dev)) 1585 + de_start_rxtx(de); 1586 1587 return 0; 1588 } ··· 1911 for (i = 0; i < DE_MAX_MEDIA; i++) { 1912 if (de->media[i].csr13 == 0xffff) 1913 de->media[i].csr13 = t21041_csr13[i]; 1914 + if (de->media[i].csr14 == 0xffff) { 1915 + /* autonegotiation is broken at least on some chip 1916 + revisions - rev. 0x21 works, 0x11 does not */ 1917 + if (de->pdev->revision < 0x20) 1918 + de->media[i].csr14 = t21041_csr14_brk[i]; 1919 + else 1920 + de->media[i].csr14 = t21041_csr14[i]; 1921 + } 1922 if (de->media[i].csr15 == 0xffff) 1923 de->media[i].csr15 = t21041_csr15[i]; 1924 } ··· 2158 dev_err(&dev->dev, "pci_enable_device failed in resume\n"); 2159 goto out; 2160 } 2161 + pci_set_master(pdev); 2162 + de_init_rings(de); 2163 de_init_hw(de); 2164 out_attach: 2165 netif_device_attach(dev);
+5
drivers/net/wireless/iwlwifi/iwl-core.c
··· 2613 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2614 return -EINVAL; 2615 2616 if (mode >= IWL_MAX_FORCE_RESET) { 2617 IWL_DEBUG_INFO(priv, "invalid reset request.\n"); 2618 return -EINVAL;
··· 2613 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2614 return -EINVAL; 2615 2616 + if (test_bit(STATUS_SCANNING, &priv->status)) { 2617 + IWL_DEBUG_INFO(priv, "scan in progress.\n"); 2618 + return -EINVAL; 2619 + } 2620 + 2621 if (mode >= IWL_MAX_FORCE_RESET) { 2622 IWL_DEBUG_INFO(priv, "invalid reset request.\n"); 2623 return -EINVAL;
+27
drivers/pci/intel-iommu.c
··· 3757 3758 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 3759 3760 /* On Tylersburg chipsets, some BIOSes have been known to enable the 3761 ISOCH DMAR unit for the Azalia sound device, but not give it any 3762 TLB entries, which causes it to deadlock. Check for that. We do
··· 3757 3758 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 3759 3760 + #define GGC 0x52 3761 + #define GGC_MEMORY_SIZE_MASK (0xf << 8) 3762 + #define GGC_MEMORY_SIZE_NONE (0x0 << 8) 3763 + #define GGC_MEMORY_SIZE_1M (0x1 << 8) 3764 + #define GGC_MEMORY_SIZE_2M (0x3 << 8) 3765 + #define GGC_MEMORY_VT_ENABLED (0x8 << 8) 3766 + #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8) 3767 + #define GGC_MEMORY_SIZE_3M_VT (0xa << 8) 3768 + #define GGC_MEMORY_SIZE_4M_VT (0xb << 8) 3769 + 3770 + static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) 3771 + { 3772 + unsigned short ggc; 3773 + 3774 + if (pci_read_config_word(dev, GGC, &ggc)) 3775 + return; 3776 + 3777 + if (!(ggc & GGC_MEMORY_VT_ENABLED)) { 3778 + printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); 3779 + dmar_map_gfx = 0; 3780 + } 3781 + } 3782 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); 3783 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); 3784 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); 3785 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); 3786 + 3787 /* On Tylersburg chipsets, some BIOSes have been known to enable the 3788 ISOCH DMAR unit for the Azalia sound device, but not give it any 3789 TLB entries, which causes it to deadlock. Check for that. We do
+1 -1
drivers/pci/iov.c
··· 608 * the VF BAR size multiplied by the number of VFs. The alignment 609 * is just the VF BAR size. 610 */ 611 - int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 612 { 613 struct resource tmp; 614 enum pci_bar_type type;
··· 608 * the VF BAR size multiplied by the number of VFs. The alignment 609 * is just the VF BAR size. 610 */ 611 + resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 612 { 613 struct resource tmp; 614 enum pci_bar_type type;
+3 -2
drivers/pci/pci.h
··· 264 extern void pci_iov_release(struct pci_dev *dev); 265 extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 266 enum pci_bar_type *type); 267 - extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 268 extern void pci_restore_iov_state(struct pci_dev *dev); 269 extern int pci_iov_bus_range(struct pci_bus *bus); 270 ··· 321 } 322 #endif /* CONFIG_PCI_IOV */ 323 324 - static inline int pci_resource_alignment(struct pci_dev *dev, 325 struct resource *res) 326 { 327 #ifdef CONFIG_PCI_IOV
··· 264 extern void pci_iov_release(struct pci_dev *dev); 265 extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 266 enum pci_bar_type *type); 267 + extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, 268 + int resno); 269 extern void pci_restore_iov_state(struct pci_dev *dev); 270 extern int pci_iov_bus_range(struct pci_bus *bus); 271 ··· 320 } 321 #endif /* CONFIG_PCI_IOV */ 322 323 + static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, 324 struct resource *res) 325 { 326 #ifdef CONFIG_PCI_IOV
+6
drivers/pcmcia/pcmcia_resource.c
··· 595 if (c->io[1].end) { 596 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); 597 if (ret) { 598 release_io_space(s, &c->io[0]); 599 goto out; 600 } 601 } else
··· 595 if (c->io[1].end) { 596 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); 597 if (ret) { 598 + struct resource tmp = c->io[0]; 599 + /* release the previously allocated resource */ 600 release_io_space(s, &c->io[0]); 601 + /* but preserve the settings, for they worked... */ 602 + c->io[0].end = resource_size(&tmp); 603 + c->io[0].start = tmp.start; 604 + c->io[0].flags = tmp.flags; 605 goto out; 606 } 607 } else
+1 -1
drivers/pcmcia/pd6729.c
··· 646 if (!pci_resource_start(dev, 0)) { 647 dev_warn(&dev->dev, "refusing to load the driver as the " 648 "io_base is NULL.\n"); 649 - goto err_out_free_mem; 650 } 651 652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
··· 646 if (!pci_resource_start(dev, 0)) { 647 dev_warn(&dev->dev, "refusing to load the driver as the " 648 "io_base is NULL.\n"); 649 + goto err_out_disable; 650 } 651 652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
+2 -2
drivers/s390/net/ctcm_main.c
··· 1154 dev_fsm, dev_fsm_len, GFP_KERNEL); 1155 if (priv->fsm == NULL) { 1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); 1157 - kfree(dev); 1158 return NULL; 1159 } 1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED); ··· 1165 grp = ctcmpc_init_mpc_group(priv); 1166 if (grp == NULL) { 1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); 1168 - kfree(dev); 1169 return NULL; 1170 } 1171 tasklet_init(&grp->mpc_tasklet2,
··· 1154 dev_fsm, dev_fsm_len, GFP_KERNEL); 1155 if (priv->fsm == NULL) { 1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); 1157 + free_netdev(dev); 1158 return NULL; 1159 } 1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED); ··· 1165 grp = ctcmpc_init_mpc_group(priv); 1166 if (grp == NULL) { 1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); 1168 + free_netdev(dev); 1169 return NULL; 1170 } 1171 tasklet_init(&grp->mpc_tasklet2,
+4
drivers/serial/ioc3_serial.c
··· 2017 struct ioc3_port *port; 2018 struct ioc3_port *ports[PORTS_PER_CARD]; 2019 int phys_port; 2020 2021 DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd)); 2022 ··· 2147 2148 /* error exits that give back resources */ 2149 out4: 2150 kfree(card_ptr); 2151 return ret; 2152 }
··· 2017 struct ioc3_port *port; 2018 struct ioc3_port *ports[PORTS_PER_CARD]; 2019 int phys_port; 2020 + int cnt; 2021 2022 DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd)); 2023 ··· 2146 2147 /* error exits that give back resources */ 2148 out4: 2149 + for (cnt = 0; cnt < phys_port; cnt++) 2150 + kfree(ports[cnt]); 2151 + 2152 kfree(card_ptr); 2153 return ret; 2154 }
+1 -1
drivers/vhost/net.c
··· 243 int r, nlogs = 0; 244 245 while (datalen > 0) { 246 - if (unlikely(headcount >= VHOST_NET_MAX_SG)) { 247 r = -ENOBUFS; 248 goto err; 249 }
··· 243 int r, nlogs = 0; 244 245 while (datalen > 0) { 246 + if (unlikely(seg >= VHOST_NET_MAX_SG)) { 247 r = -ENOBUFS; 248 goto err; 249 }
+4 -3
drivers/vhost/vhost.c
··· 858 if (r < 0) 859 return r; 860 len -= l; 861 - if (!len) 862 return 0; 863 } 864 - if (vq->log_ctx) 865 - eventfd_signal(vq->log_ctx, 1); 866 /* Length written exceeds what we have stored. This is a bug. */ 867 BUG(); 868 return 0;
··· 858 if (r < 0) 859 return r; 860 len -= l; 861 + if (!len) { 862 + if (vq->log_ctx) 863 + eventfd_signal(vq->log_ctx, 1); 864 return 0; 865 + } 866 } 867 /* Length written exceeds what we have stored. This is a bug. */ 868 BUG(); 869 return 0;
+2 -2
drivers/video/pxa168fb.c
··· 298 * Set bit to enable graphics DMA. 299 */ 300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); 301 - x |= fbi->active ? 0x00000100 : 0; 302 - fbi->active = 0; 303 304 /* 305 * If we are in a pseudo-color mode, we need to enable
··· 298 * Set bit to enable graphics DMA. 299 */ 300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); 301 + x &= ~CFG_GRA_ENA_MASK; 302 + x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0); 303 304 /* 305 * If we are in a pseudo-color mode, we need to enable
+2 -2
include/linux/netlink.h
··· 27 28 #define MAX_LINKS 32 29 30 - struct net; 31 - 32 struct sockaddr_nl { 33 sa_family_t nl_family; /* AF_NETLINK */ 34 unsigned short nl_pad; /* zero */ ··· 148 149 #include <linux/capability.h> 150 #include <linux/skbuff.h> 151 152 static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) 153 {
··· 27 28 #define MAX_LINKS 32 29 30 struct sockaddr_nl { 31 sa_family_t nl_family; /* AF_NETLINK */ 32 unsigned short nl_pad; /* zero */ ··· 150 151 #include <linux/capability.h> 152 #include <linux/skbuff.h> 153 + 154 + struct net; 155 156 static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) 157 {
+3
include/linux/pci_ids.h
··· 393 #define PCI_DEVICE_ID_VLSI_82C147 0x0105 394 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 395 396 #define PCI_VENDOR_ID_ADL 0x1005 397 #define PCI_DEVICE_ID_ADL_2301 0x2301 398
··· 393 #define PCI_DEVICE_ID_VLSI_82C147 0x0105 394 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 395 396 + /* AMD RD890 Chipset */ 397 + #define PCI_DEVICE_ID_RD890_IOMMU 0x5a23 398 + 399 #define PCI_VENDOR_ID_ADL 0x1005 400 #define PCI_DEVICE_ID_ADL_2301 0x2301 401
+1 -1
include/linux/socket.h
··· 322 int offset, 323 unsigned int len, __wsum *csump); 324 325 - extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); 326 extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); 327 extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, 328 int offset, int len);
··· 322 int offset, 323 unsigned int len, __wsum *csump); 324 325 + extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); 326 extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); 327 extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, 328 int offset, int len);
+1
include/net/addrconf.h
··· 121 * IPv6 Address Label subsystem (addrlabel.c) 122 */ 123 extern int ipv6_addr_label_init(void); 124 extern void ipv6_addr_label_rtnl_register(void); 125 extern u32 ipv6_addr_label(struct net *net, 126 const struct in6_addr *addr,
··· 121 * IPv6 Address Label subsystem (addrlabel.c) 122 */ 123 extern int ipv6_addr_label_init(void); 124 + extern void ipv6_addr_label_cleanup(void); 125 extern void ipv6_addr_label_rtnl_register(void); 126 extern u32 ipv6_addr_label(struct net *net, 127 const struct in6_addr *addr,
+1
include/net/dst.h
··· 242 dev->stats.rx_packets++; 243 dev->stats.rx_bytes += skb->len; 244 skb->rxhash = 0; 245 skb_dst_drop(skb); 246 nf_reset(skb); 247 }
··· 242 dev->stats.rx_packets++; 243 dev->stats.rx_bytes += skb->len; 244 skb->rxhash = 0; 245 + skb_set_queue_mapping(skb, 0); 246 skb_dst_drop(skb); 247 nf_reset(skb); 248 }
+2
include/net/route.h
··· 199 fl.fl_ip_sport = sport; 200 fl.fl_ip_dport = dport; 201 fl.proto = protocol; 202 ip_rt_put(*rp); 203 *rp = NULL; 204 security_sk_classify_flow(sk, &fl);
··· 199 fl.fl_ip_sport = sport; 200 fl.fl_ip_dport = dport; 201 fl.proto = protocol; 202 + if (inet_sk(sk)->transparent) 203 + fl.flags |= FLOWI_FLAG_ANYSRC; 204 ip_rt_put(*rp); 205 *rp = NULL; 206 security_sk_classify_flow(sk, &fl);
+2 -2
include/net/xfrm.h
··· 298 const struct xfrm_type *type_map[IPPROTO_MAX]; 299 struct xfrm_mode *mode_map[XFRM_MODE_MAX]; 300 int (*init_flags)(struct xfrm_state *x); 301 - void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl, 302 - struct xfrm_tmpl *tmpl, 303 xfrm_address_t *daddr, xfrm_address_t *saddr); 304 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 305 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
··· 298 const struct xfrm_type *type_map[IPPROTO_MAX]; 299 struct xfrm_mode *mode_map[XFRM_MODE_MAX]; 300 int (*init_flags)(struct xfrm_state *x); 301 + void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl); 302 + void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl, 303 xfrm_address_t *daddr, xfrm_address_t *saddr); 304 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 305 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
+18 -11
net/9p/trans_rdma.c
··· 426 427 /* Allocate an fcall for the reply */ 428 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL); 429 - if (!rpl_context) 430 goto err_close; 431 432 /* 433 * If the request has a buffer, steal it, otherwise ··· 447 } 448 rpl_context->rc = req->rc; 449 if (!rpl_context->rc) { 450 - kfree(rpl_context); 451 - goto err_close; 452 } 453 454 /* ··· 460 */ 461 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { 462 err = post_recv(client, rpl_context); 463 - if (err) { 464 - kfree(rpl_context->rc); 465 - kfree(rpl_context); 466 - goto err_close; 467 - } 468 } else 469 atomic_dec(&rdma->rq_count); 470 ··· 470 471 /* Post the request */ 472 c = kmalloc(sizeof *c, GFP_KERNEL); 473 - if (!c) 474 - goto err_close; 475 c->req = req; 476 477 c->busa = ib_dma_map_single(rdma->cm_id->device, ··· 500 return ib_post_send(rdma->qp, &wr, &bad_wr); 501 502 error: 503 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n"); 504 return -EIO; 505 - 506 err_close: 507 spin_lock_irqsave(&rdma->req_lock, flags); 508 if (rdma->state < P9_RDMA_CLOSING) {
··· 426 427 /* Allocate an fcall for the reply */ 428 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL); 429 + if (!rpl_context) { 430 + err = -ENOMEM; 431 goto err_close; 432 + } 433 434 /* 435 * If the request has a buffer, steal it, otherwise ··· 445 } 446 rpl_context->rc = req->rc; 447 if (!rpl_context->rc) { 448 + err = -ENOMEM; 449 + goto err_free2; 450 } 451 452 /* ··· 458 */ 459 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { 460 err = post_recv(client, rpl_context); 461 + if (err) 462 + goto err_free1; 463 } else 464 atomic_dec(&rdma->rq_count); 465 ··· 471 472 /* Post the request */ 473 c = kmalloc(sizeof *c, GFP_KERNEL); 474 + if (!c) { 475 + err = -ENOMEM; 476 + goto err_free1; 477 + } 478 c->req = req; 479 480 c->busa = ib_dma_map_single(rdma->cm_id->device, ··· 499 return ib_post_send(rdma->qp, &wr, &bad_wr); 500 501 error: 502 + kfree(c); 503 + kfree(rpl_context->rc); 504 + kfree(rpl_context); 505 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n"); 506 return -EIO; 507 + err_free1: 508 + kfree(rpl_context->rc); 509 + err_free2: 510 + kfree(rpl_context); 511 err_close: 512 spin_lock_irqsave(&rdma->req_lock, flags); 513 if (rdma->state < P9_RDMA_CLOSING) {
+2 -1
net/9p/trans_virtio.c
··· 329 330 mutex_lock(&virtio_9p_lock); 331 list_for_each_entry(chan, &virtio_chan_list, chan_list) { 332 - if (!strncmp(devname, chan->tag, chan->tag_len)) { 333 if (!chan->inuse) { 334 chan->inuse = true; 335 found = 1;
··· 329 330 mutex_lock(&virtio_9p_lock); 331 list_for_each_entry(chan, &virtio_chan_list, chan_list) { 332 + if (!strncmp(devname, chan->tag, chan->tag_len) && 333 + strlen(devname) == chan->tag_len) { 334 if (!chan->inuse) { 335 chan->inuse = true; 336 found = 1;
+2 -10
net/atm/br2684.c
··· 399 unregister_netdev(net_dev); 400 free_netdev(net_dev); 401 } 402 - read_lock_irq(&devs_lock); 403 - if (list_empty(&br2684_devs)) { 404 - /* last br2684 device */ 405 - unregister_atmdevice_notifier(&atm_dev_notifier); 406 - } 407 - read_unlock_irq(&devs_lock); 408 return; 409 } 410 ··· 669 670 if (list_empty(&br2684_devs)) { 671 /* 1st br2684 device */ 672 - register_atmdevice_notifier(&atm_dev_notifier); 673 brdev->number = 1; 674 } else 675 brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; ··· 808 return -ENOMEM; 809 #endif 810 register_atm_ioctl(&br2684_ioctl_ops); 811 return 0; 812 } 813 ··· 824 #endif 825 826 827 - /* if not already empty */ 828 - if (!list_empty(&br2684_devs)) 829 - unregister_atmdevice_notifier(&atm_dev_notifier); 830 831 while (!list_empty(&br2684_devs)) { 832 net_dev = list_entry_brdev(br2684_devs.next);
··· 399 unregister_netdev(net_dev); 400 free_netdev(net_dev); 401 } 402 return; 403 } 404 ··· 675 676 if (list_empty(&br2684_devs)) { 677 /* 1st br2684 device */ 678 brdev->number = 1; 679 } else 680 brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; ··· 815 return -ENOMEM; 816 #endif 817 register_atm_ioctl(&br2684_ioctl_ops); 818 + register_atmdevice_notifier(&atm_dev_notifier); 819 return 0; 820 } 821 ··· 830 #endif 831 832 833 + unregister_atmdevice_notifier(&atm_dev_notifier); 834 835 while (!list_empty(&br2684_devs)) { 836 net_dev = list_entry_brdev(br2684_devs.next);
+3 -2
net/core/iovec.c
··· 35 * in any case. 36 */ 37 38 - int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 39 { 40 - int size, err, ct; 41 42 if (m->msg_namelen) { 43 if (mode == VERIFY_READ) {
··· 35 * in any case. 36 */ 37 38 + long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 39 { 40 + int size, ct; 41 + long err; 42 43 if (m->msg_namelen) { 44 if (mode == VERIFY_READ) {
+4 -4
net/core/sock.c
··· 1351 { 1352 int uid; 1353 1354 - read_lock(&sk->sk_callback_lock); 1355 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1356 - read_unlock(&sk->sk_callback_lock); 1357 return uid; 1358 } 1359 EXPORT_SYMBOL(sock_i_uid); ··· 1362 { 1363 unsigned long ino; 1364 1365 - read_lock(&sk->sk_callback_lock); 1366 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1367 - read_unlock(&sk->sk_callback_lock); 1368 return ino; 1369 } 1370 EXPORT_SYMBOL(sock_i_ino);
··· 1351 { 1352 int uid; 1353 1354 + read_lock_bh(&sk->sk_callback_lock); 1355 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1356 + read_unlock_bh(&sk->sk_callback_lock); 1357 return uid; 1358 } 1359 EXPORT_SYMBOL(sock_i_uid); ··· 1362 { 1363 unsigned long ino; 1364 1365 + read_lock_bh(&sk->sk_callback_lock); 1366 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1367 + read_unlock_bh(&sk->sk_callback_lock); 1368 return ino; 1369 } 1370 EXPORT_SYMBOL(sock_i_ino);
+4 -4
net/ipv4/ip_gre.c
··· 45 #include <net/netns/generic.h> 46 #include <net/rtnetlink.h> 47 48 - #ifdef CONFIG_IPV6 49 #include <net/ipv6.h> 50 #include <net/ip6_fib.h> 51 #include <net/ip6_route.h> ··· 699 if ((dst = rt->rt_gateway) == 0) 700 goto tx_error_icmp; 701 } 702 - #ifdef CONFIG_IPV6 703 else if (skb->protocol == htons(ETH_P_IPV6)) { 704 struct in6_addr *addr6; 705 int addr_type; ··· 774 goto tx_error; 775 } 776 } 777 - #ifdef CONFIG_IPV6 778 else if (skb->protocol == htons(ETH_P_IPV6)) { 779 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); 780 ··· 850 if ((iph->ttl = tiph->ttl) == 0) { 851 if (skb->protocol == htons(ETH_P_IP)) 852 iph->ttl = old_iph->ttl; 853 - #ifdef CONFIG_IPV6 854 else if (skb->protocol == htons(ETH_P_IPV6)) 855 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 856 #endif
··· 45 #include <net/netns/generic.h> 46 #include <net/rtnetlink.h> 47 48 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 49 #include <net/ipv6.h> 50 #include <net/ip6_fib.h> 51 #include <net/ip6_route.h> ··· 699 if ((dst = rt->rt_gateway) == 0) 700 goto tx_error_icmp; 701 } 702 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 703 else if (skb->protocol == htons(ETH_P_IPV6)) { 704 struct in6_addr *addr6; 705 int addr_type; ··· 774 goto tx_error; 775 } 776 } 777 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 778 else if (skb->protocol == htons(ETH_P_IPV6)) { 779 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); 780 ··· 850 if ((iph->ttl = tiph->ttl) == 0) { 851 if (skb->protocol == htons(ETH_P_IP)) 852 iph->ttl = old_iph->ttl; 853 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 854 else if (skb->protocol == htons(ETH_P_IPV6)) 855 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 856 #endif
+13 -6
net/ipv4/ip_output.c
··· 488 * we can switch to copy when see the first bad fragment. 489 */ 490 if (skb_has_frags(skb)) { 491 - struct sk_buff *frag; 492 int first_len = skb_pagelen(skb); 493 - int truesizes = 0; 494 495 if (first_len - hlen > mtu || 496 ((first_len - hlen) & 7) || ··· 502 if (frag->len > mtu || 503 ((frag->len & 7) && frag->next) || 504 skb_headroom(frag) < hlen) 505 - goto slow_path; 506 507 /* Partially cloned skb? */ 508 if (skb_shared(frag)) 509 - goto slow_path; 510 511 BUG_ON(frag->sk); 512 if (skb->sk) { 513 frag->sk = skb->sk; 514 frag->destructor = sock_wfree; 515 } 516 - truesizes += frag->truesize; 517 } 518 519 /* Everything is OK. Generate! */ ··· 523 frag = skb_shinfo(skb)->frag_list; 524 skb_frag_list_init(skb); 525 skb->data_len = first_len - skb_headlen(skb); 526 - skb->truesize -= truesizes; 527 skb->len = first_len; 528 iph->tot_len = htons(first_len); 529 iph->frag_off = htons(IP_MF); ··· 574 } 575 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 576 return err; 577 } 578 579 slow_path:
··· 488 * we can switch to copy when see the first bad fragment. 489 */ 490 if (skb_has_frags(skb)) { 491 + struct sk_buff *frag, *frag2; 492 int first_len = skb_pagelen(skb); 493 494 if (first_len - hlen > mtu || 495 ((first_len - hlen) & 7) || ··· 503 if (frag->len > mtu || 504 ((frag->len & 7) && frag->next) || 505 skb_headroom(frag) < hlen) 506 + goto slow_path_clean; 507 508 /* Partially cloned skb? */ 509 if (skb_shared(frag)) 510 + goto slow_path_clean; 511 512 BUG_ON(frag->sk); 513 if (skb->sk) { 514 frag->sk = skb->sk; 515 frag->destructor = sock_wfree; 516 } 517 + skb->truesize -= frag->truesize; 518 } 519 520 /* Everything is OK. Generate! */ ··· 524 frag = skb_shinfo(skb)->frag_list; 525 skb_frag_list_init(skb); 526 skb->data_len = first_len - skb_headlen(skb); 527 skb->len = first_len; 528 iph->tot_len = htons(first_len); 529 iph->frag_off = htons(IP_MF); ··· 576 } 577 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 578 return err; 579 + 580 + slow_path_clean: 581 + skb_walk_frags(skb, frag2) { 582 + if (frag2 == frag) 583 + break; 584 + frag2->sk = NULL; 585 + frag2->destructor = NULL; 586 + skb->truesize += frag2->truesize; 587 + } 588 } 589 590 slow_path:
+1
net/ipv4/netfilter/ipt_REJECT.c
··· 112 /* ip_route_me_harder expects skb->dst to be set */ 113 skb_dst_set_noref(nskb, skb_dst(oldskb)); 114 115 if (ip_route_me_harder(nskb, addr_type)) 116 goto free_nskb; 117
··· 112 /* ip_route_me_harder expects skb->dst to be set */ 113 skb_dst_set_noref(nskb, skb_dst(oldskb)); 114 115 + nskb->protocol = htons(ETH_P_IP); 116 if (ip_route_me_harder(nskb, addr_type)) 117 goto free_nskb; 118
+3 -1
net/ipv4/netfilter/nf_defrag_ipv4.c
··· 66 const struct net_device *out, 67 int (*okfn)(struct sk_buff *)) 68 { 69 struct inet_sock *inet = inet_sk(skb->sk); 70 71 - if (inet && inet->nodefrag) 72 return NF_ACCEPT; 73 74 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
··· 66 const struct net_device *out, 67 int (*okfn)(struct sk_buff *)) 68 { 69 + struct sock *sk = skb->sk; 70 struct inet_sock *inet = inet_sk(skb->sk); 71 72 + if (sk && (sk->sk_family == PF_INET) && 73 + inet->nodefrag) 74 return NF_ACCEPT; 75 76 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+4 -2
net/ipv4/netfilter/nf_nat_snmp_basic.c
··· 893 unsigned char s[4]; 894 895 if (offset & 1) { 896 - s[0] = s[2] = 0; 897 s[1] = ~*optr; 898 s[3] = *nptr; 899 } else { 900 - s[1] = s[3] = 0; 901 s[0] = ~*optr; 902 s[2] = *nptr; 903 } 904 905 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
··· 893 unsigned char s[4]; 894 895 if (offset & 1) { 896 + s[0] = ~0; 897 s[1] = ~*optr; 898 + s[2] = 0; 899 s[3] = *nptr; 900 } else { 901 s[0] = ~*optr; 902 + s[1] = ~0; 903 s[2] = *nptr; 904 + s[3] = 0; 905 } 906 907 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
+1 -1
net/ipv4/route.c
··· 1231 } 1232 1233 if (net_ratelimit()) 1234 - printk(KERN_WARNING "Neighbour table overflow.\n"); 1235 rt_drop(rt); 1236 return -ENOBUFS; 1237 }
··· 1231 } 1232 1233 if (net_ratelimit()) 1234 + printk(KERN_WARNING "ipv4: Neighbour table overflow.\n"); 1235 rt_drop(rt); 1236 return -ENOBUFS; 1237 }
+6 -3
net/ipv4/tcp.c
··· 386 */ 387 388 mask = 0; 389 - if (sk->sk_err) 390 - mask = POLLERR; 391 392 /* 393 * POLLHUP is certainly not done right. But poll() doesn't ··· 455 if (tp->urg_data & TCP_URG_VALID) 456 mask |= POLLPRI; 457 } 458 return mask; 459 } 460 EXPORT_SYMBOL(tcp_poll); ··· 943 sg = sk->sk_route_caps & NETIF_F_SG; 944 945 while (--iovlen >= 0) { 946 - int seglen = iov->iov_len; 947 unsigned char __user *from = iov->iov_base; 948 949 iov++;
··· 386 */ 387 388 mask = 0; 389 390 /* 391 * POLLHUP is certainly not done right. But poll() doesn't ··· 457 if (tp->urg_data & TCP_URG_VALID) 458 mask |= POLLPRI; 459 } 460 + /* This barrier is coupled with smp_wmb() in tcp_reset() */ 461 + smp_rmb(); 462 + if (sk->sk_err) 463 + mask |= POLLERR; 464 + 465 return mask; 466 } 467 EXPORT_SYMBOL(tcp_poll); ··· 940 sg = sk->sk_route_caps & NETIF_F_SG; 941 942 while (--iovlen >= 0) { 943 + size_t seglen = iov->iov_len; 944 unsigned char __user *from = iov->iov_base; 945 946 iov++;
+4 -1
net/ipv4/tcp_input.c
··· 2545 cnt += tcp_skb_pcount(skb); 2546 2547 if (cnt > packets) { 2548 - if (tcp_is_sack(tp) || (oldcnt >= packets)) 2549 break; 2550 2551 mss = skb_shinfo(skb)->gso_size; ··· 4049 default: 4050 sk->sk_err = ECONNRESET; 4051 } 4052 4053 if (!sock_flag(sk, SOCK_DEAD)) 4054 sk->sk_error_report(sk);
··· 2545 cnt += tcp_skb_pcount(skb); 2546 2547 if (cnt > packets) { 2548 + if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || 2549 + (oldcnt >= packets)) 2550 break; 2551 2552 mss = skb_shinfo(skb)->gso_size; ··· 4048 default: 4049 sk->sk_err = ECONNRESET; 4050 } 4051 + /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4052 + smp_wmb(); 4053 4054 if (!sock_flag(sk, SOCK_DEAD)) 4055 sk->sk_error_report(sk);
+1 -1
net/ipv4/xfrm4_policy.c
··· 61 62 static int xfrm4_get_tos(struct flowi *fl) 63 { 64 - return fl->fl4_tos; 65 } 66 67 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
··· 61 62 static int xfrm4_get_tos(struct flowi *fl) 63 { 64 + return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */ 65 } 66 67 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
+19 -14
net/ipv4/xfrm4_state.c
··· 21 } 22 23 static void 24 - __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl, 25 - struct xfrm_tmpl *tmpl, 26 - xfrm_address_t *daddr, xfrm_address_t *saddr) 27 { 28 - x->sel.daddr.a4 = fl->fl4_dst; 29 - x->sel.saddr.a4 = fl->fl4_src; 30 - x->sel.dport = xfrm_flowi_dport(fl); 31 - x->sel.dport_mask = htons(0xffff); 32 - x->sel.sport = xfrm_flowi_sport(fl); 33 - x->sel.sport_mask = htons(0xffff); 34 - x->sel.family = AF_INET; 35 - x->sel.prefixlen_d = 32; 36 - x->sel.prefixlen_s = 32; 37 - x->sel.proto = fl->proto; 38 - x->sel.ifindex = fl->oif; 39 x->id = tmpl->id; 40 if (x->id.daddr.a4 == 0) 41 x->id.daddr.a4 = daddr->a4; ··· 74 .owner = THIS_MODULE, 75 .init_flags = xfrm4_init_flags, 76 .init_tempsel = __xfrm4_init_tempsel, 77 .output = xfrm4_output, 78 .extract_input = xfrm4_extract_input, 79 .extract_output = xfrm4_extract_output,
··· 21 } 22 23 static void 24 + __xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl) 25 { 26 + sel->daddr.a4 = fl->fl4_dst; 27 + sel->saddr.a4 = fl->fl4_src; 28 + sel->dport = xfrm_flowi_dport(fl); 29 + sel->dport_mask = htons(0xffff); 30 + sel->sport = xfrm_flowi_sport(fl); 31 + sel->sport_mask = htons(0xffff); 32 + sel->family = AF_INET; 33 + sel->prefixlen_d = 32; 34 + sel->prefixlen_s = 32; 35 + sel->proto = fl->proto; 36 + sel->ifindex = fl->oif; 37 + } 38 + 39 + static void 40 + xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl, 41 + xfrm_address_t *daddr, xfrm_address_t *saddr) 42 + { 43 x->id = tmpl->id; 44 if (x->id.daddr.a4 == 0) 45 x->id.daddr.a4 = daddr->a4; ··· 70 .owner = THIS_MODULE, 71 .init_flags = xfrm4_init_flags, 72 .init_tempsel = __xfrm4_init_tempsel, 73 + .init_temprop = xfrm4_init_temprop, 74 .output = xfrm4_output, 75 .extract_input = xfrm4_extract_input, 76 .extract_output = xfrm4_extract_output,
+8 -3
net/ipv6/addrconf.c
··· 4637 if (err < 0) { 4638 printk(KERN_CRIT "IPv6 Addrconf:" 4639 " cannot initialize default policy table: %d.\n", err); 4640 - return err; 4641 } 4642 4643 - register_pernet_subsys(&addrconf_ops); 4644 4645 /* The addrconf netdev notifier requires that loopback_dev 4646 * has it's ipv6 private information allocated and setup ··· 4694 unregister_netdevice_notifier(&ipv6_dev_notf); 4695 errlo: 4696 unregister_pernet_subsys(&addrconf_ops); 4697 - 4698 return err; 4699 } 4700 ··· 4707 4708 unregister_netdevice_notifier(&ipv6_dev_notf); 4709 unregister_pernet_subsys(&addrconf_ops); 4710 4711 rtnl_lock(); 4712
··· 4637 if (err < 0) { 4638 printk(KERN_CRIT "IPv6 Addrconf:" 4639 " cannot initialize default policy table: %d.\n", err); 4640 + goto out; 4641 } 4642 4643 + err = register_pernet_subsys(&addrconf_ops); 4644 + if (err < 0) 4645 + goto out_addrlabel; 4646 4647 /* The addrconf netdev notifier requires that loopback_dev 4648 * has it's ipv6 private information allocated and setup ··· 4692 unregister_netdevice_notifier(&ipv6_dev_notf); 4693 errlo: 4694 unregister_pernet_subsys(&addrconf_ops); 4695 + out_addrlabel: 4696 + ipv6_addr_label_cleanup(); 4697 + out: 4698 return err; 4699 } 4700 ··· 4703 4704 unregister_netdevice_notifier(&ipv6_dev_notf); 4705 unregister_pernet_subsys(&addrconf_ops); 4706 + ipv6_addr_label_cleanup(); 4707 4708 rtnl_lock(); 4709
+5
net/ipv6/addrlabel.c
··· 393 return register_pernet_subsys(&ipv6_addr_label_ops); 394 } 395 396 static const struct nla_policy ifal_policy[IFAL_MAX+1] = { 397 [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), }, 398 [IFAL_LABEL] = { .len = sizeof(u32), },
··· 393 return register_pernet_subsys(&ipv6_addr_label_ops); 394 } 395 396 + void ipv6_addr_label_cleanup(void) 397 + { 398 + unregister_pernet_subsys(&ipv6_addr_label_ops); 399 + } 400 + 401 static const struct nla_policy ifal_policy[IFAL_MAX+1] = { 402 [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), }, 403 [IFAL_LABEL] = { .len = sizeof(u32), },
+13 -5
net/ipv6/ip6_output.c
··· 639 640 if (skb_has_frags(skb)) { 641 int first_len = skb_pagelen(skb); 642 - int truesizes = 0; 643 644 if (first_len - hlen > mtu || 645 ((first_len - hlen) & 7) || ··· 651 if (frag->len > mtu || 652 ((frag->len & 7) && frag->next) || 653 skb_headroom(frag) < hlen) 654 - goto slow_path; 655 656 /* Partially cloned skb? */ 657 if (skb_shared(frag)) 658 - goto slow_path; 659 660 BUG_ON(frag->sk); 661 if (skb->sk) { 662 frag->sk = skb->sk; 663 frag->destructor = sock_wfree; 664 - truesizes += frag->truesize; 665 } 666 } 667 668 err = 0; ··· 693 694 first_len = skb_pagelen(skb); 695 skb->data_len = first_len - skb_headlen(skb); 696 - skb->truesize -= truesizes; 697 skb->len = first_len; 698 ipv6_hdr(skb)->payload_len = htons(first_len - 699 sizeof(struct ipv6hdr)); ··· 755 IPSTATS_MIB_FRAGFAILS); 756 dst_release(&rt->dst); 757 return err; 758 } 759 760 slow_path:
··· 639 640 if (skb_has_frags(skb)) { 641 int first_len = skb_pagelen(skb); 642 + struct sk_buff *frag2; 643 644 if (first_len - hlen > mtu || 645 ((first_len - hlen) & 7) || ··· 651 if (frag->len > mtu || 652 ((frag->len & 7) && frag->next) || 653 skb_headroom(frag) < hlen) 654 + goto slow_path_clean; 655 656 /* Partially cloned skb? */ 657 if (skb_shared(frag)) 658 + goto slow_path_clean; 659 660 BUG_ON(frag->sk); 661 if (skb->sk) { 662 frag->sk = skb->sk; 663 frag->destructor = sock_wfree; 664 } 665 + skb->truesize -= frag->truesize; 666 } 667 668 err = 0; ··· 693 694 first_len = skb_pagelen(skb); 695 skb->data_len = first_len - skb_headlen(skb); 696 skb->len = first_len; 697 ipv6_hdr(skb)->payload_len = htons(first_len - 698 sizeof(struct ipv6hdr)); ··· 756 IPSTATS_MIB_FRAGFAILS); 757 dst_release(&rt->dst); 758 return err; 759 + 760 + slow_path_clean: 761 + skb_walk_frags(skb, frag2) { 762 + if (frag2 == frag) 763 + break; 764 + frag2->sk = NULL; 765 + frag2->destructor = NULL; 766 + skb->truesize += frag2->truesize; 767 + } 768 } 769 770 slow_path:
+1 -1
net/ipv6/route.c
··· 670 671 if (net_ratelimit()) 672 printk(KERN_WARNING 673 - "Neighbour table overflow.\n"); 674 dst_free(&rt->dst); 675 return NULL; 676 }
··· 670 671 if (net_ratelimit()) 672 printk(KERN_WARNING 673 + "ipv6: Neighbour table overflow.\n"); 674 dst_free(&rt->dst); 675 return NULL; 676 }
+19 -14
net/ipv6/xfrm6_state.c
··· 20 #include <net/addrconf.h> 21 22 static void 23 - __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl, 24 - struct xfrm_tmpl *tmpl, 25 - xfrm_address_t *daddr, xfrm_address_t *saddr) 26 { 27 /* Initialize temporary selector matching only 28 * to current session. */ 29 - ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst); 30 - ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src); 31 - x->sel.dport = xfrm_flowi_dport(fl); 32 - x->sel.dport_mask = htons(0xffff); 33 - x->sel.sport = xfrm_flowi_sport(fl); 34 - x->sel.sport_mask = htons(0xffff); 35 - x->sel.family = AF_INET6; 36 - x->sel.prefixlen_d = 128; 37 - x->sel.prefixlen_s = 128; 38 - x->sel.proto = fl->proto; 39 - x->sel.ifindex = fl->oif; 40 x->id = tmpl->id; 41 if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) 42 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); ··· 172 .eth_proto = htons(ETH_P_IPV6), 173 .owner = THIS_MODULE, 174 .init_tempsel = __xfrm6_init_tempsel, 175 .tmpl_sort = __xfrm6_tmpl_sort, 176 .state_sort = __xfrm6_state_sort, 177 .output = xfrm6_output,
··· 20 #include <net/addrconf.h> 21 22 static void 23 + __xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl) 24 { 25 /* Initialize temporary selector matching only 26 * to current session. */ 27 + ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst); 28 + ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src); 29 + sel->dport = xfrm_flowi_dport(fl); 30 + sel->dport_mask = htons(0xffff); 31 + sel->sport = xfrm_flowi_sport(fl); 32 + sel->sport_mask = htons(0xffff); 33 + sel->family = AF_INET6; 34 + sel->prefixlen_d = 128; 35 + sel->prefixlen_s = 128; 36 + sel->proto = fl->proto; 37 + sel->ifindex = fl->oif; 38 + } 39 + 40 + static void 41 + xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl, 42 + xfrm_address_t *daddr, xfrm_address_t *saddr) 43 + { 44 x->id = tmpl->id; 45 if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) 46 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); ··· 168 .eth_proto = htons(ETH_P_IPV6), 169 .owner = THIS_MODULE, 170 .init_tempsel = __xfrm6_init_tempsel, 171 + .init_temprop = xfrm6_init_temprop, 172 .tmpl_sort = __xfrm6_tmpl_sort, 173 .state_sort = __xfrm6_state_sort, 174 .output = xfrm6_output,
+3 -1
net/netfilter/nf_conntrack_extend.c
··· 48 { 49 unsigned int off, len; 50 struct nf_ct_ext_type *t; 51 52 rcu_read_lock(); 53 t = rcu_dereference(nf_ct_ext_types[id]); 54 BUG_ON(t == NULL); 55 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 56 len = off + t->len; 57 rcu_read_unlock(); 58 59 - *ext = kzalloc(t->alloc_size, gfp); 60 if (!*ext) 61 return NULL; 62
··· 48 { 49 unsigned int off, len; 50 struct nf_ct_ext_type *t; 51 + size_t alloc_size; 52 53 rcu_read_lock(); 54 t = rcu_dereference(nf_ct_ext_types[id]); 55 BUG_ON(t == NULL); 56 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 57 len = off + t->len; 58 + alloc_size = t->alloc_size; 59 rcu_read_unlock(); 60 61 + *ext = kzalloc(alloc_size, gfp); 62 if (!*ext) 63 return NULL; 64
+1 -1
net/netfilter/nf_conntrack_sip.c
··· 1376 unsigned int msglen, origlen; 1377 const char *dptr, *end; 1378 s16 diff, tdiff = 0; 1379 - int ret; 1380 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; 1381 1382 if (ctinfo != IP_CT_ESTABLISHED &&
··· 1376 unsigned int msglen, origlen; 1377 const char *dptr, *end; 1378 s16 diff, tdiff = 0; 1379 + int ret = NF_ACCEPT; 1380 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; 1381 1382 if (ctinfo != IP_CT_ESTABLISHED &&
+5 -1
net/netfilter/nf_tproxy_core.c
··· 70 int 71 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) 72 { 73 - if (inet_sk(sk)->transparent) { 74 skb_orphan(skb); 75 skb->sk = sk; 76 skb->destructor = nf_tproxy_destructor;
··· 70 int 71 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) 72 { 73 + bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? 74 + inet_twsk(sk)->tw_transparent : 75 + inet_sk(sk)->transparent; 76 + 77 + if (transparent) { 78 skb_orphan(skb); 79 skb->sk = sk; 80 skb->destructor = nf_tproxy_destructor;
+2 -2
net/rds/tcp_connect.c
··· 43 struct rds_connection *conn; 44 struct rds_tcp_connection *tc; 45 46 - read_lock(&sk->sk_callback_lock); 47 conn = sk->sk_user_data; 48 if (conn == NULL) { 49 state_change = sk->sk_state_change; ··· 68 break; 69 } 70 out: 71 - read_unlock(&sk->sk_callback_lock); 72 state_change(sk); 73 } 74
··· 43 struct rds_connection *conn; 44 struct rds_tcp_connection *tc; 45 46 + read_lock_bh(&sk->sk_callback_lock); 47 conn = sk->sk_user_data; 48 if (conn == NULL) { 49 state_change = sk->sk_state_change; ··· 68 break; 69 } 70 out: 71 + read_unlock_bh(&sk->sk_callback_lock); 72 state_change(sk); 73 } 74
+2 -2
net/rds/tcp_listen.c
··· 114 115 rdsdebug("listen data ready sk %p\n", sk); 116 117 - read_lock(&sk->sk_callback_lock); 118 ready = sk->sk_user_data; 119 if (ready == NULL) { /* check for teardown race */ 120 ready = sk->sk_data_ready; ··· 131 queue_work(rds_wq, &rds_tcp_listen_work); 132 133 out: 134 - read_unlock(&sk->sk_callback_lock); 135 ready(sk, bytes); 136 } 137
··· 114 115 rdsdebug("listen data ready sk %p\n", sk); 116 117 + read_lock_bh(&sk->sk_callback_lock); 118 ready = sk->sk_user_data; 119 if (ready == NULL) { /* check for teardown race */ 120 ready = sk->sk_data_ready; ··· 131 queue_work(rds_wq, &rds_tcp_listen_work); 132 133 out: 134 + read_unlock_bh(&sk->sk_callback_lock); 135 ready(sk, bytes); 136 } 137
+2 -2
net/rds/tcp_recv.c
··· 324 325 rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 326 327 - read_lock(&sk->sk_callback_lock); 328 conn = sk->sk_user_data; 329 if (conn == NULL) { /* check for teardown race */ 330 ready = sk->sk_data_ready; ··· 338 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) 339 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 340 out: 341 - read_unlock(&sk->sk_callback_lock); 342 ready(sk, bytes); 343 } 344
··· 324 325 rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 326 327 + read_lock_bh(&sk->sk_callback_lock); 328 conn = sk->sk_user_data; 329 if (conn == NULL) { /* check for teardown race */ 330 ready = sk->sk_data_ready; ··· 338 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) 339 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 340 out: 341 + read_unlock_bh(&sk->sk_callback_lock); 342 ready(sk, bytes); 343 } 344
+2 -2
net/rds/tcp_send.c
··· 224 struct rds_connection *conn; 225 struct rds_tcp_connection *tc; 226 227 - read_lock(&sk->sk_callback_lock); 228 conn = sk->sk_user_data; 229 if (conn == NULL) { 230 write_space = sk->sk_write_space; ··· 244 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 245 246 out: 247 - read_unlock(&sk->sk_callback_lock); 248 249 /* 250 * write_space is only called when data leaves tcp's send queue if
··· 224 struct rds_connection *conn; 225 struct rds_tcp_connection *tc; 226 227 + read_lock_bh(&sk->sk_callback_lock); 228 conn = sk->sk_user_data; 229 if (conn == NULL) { 230 write_space = sk->sk_write_space; ··· 244 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 245 246 out: 247 + read_unlock_bh(&sk->sk_callback_lock); 248 249 /* 250 * write_space is only called when data leaves tcp's send queue if
+2 -2
net/rose/af_rose.c
··· 679 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 680 return -EINVAL; 681 682 - if (addr->srose_ndigis > ROSE_MAX_DIGIS) 683 return -EINVAL; 684 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { ··· 739 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 740 return -EINVAL; 741 742 - if (addr->srose_ndigis > ROSE_MAX_DIGIS) 743 return -EINVAL; 744 745 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
··· 679 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 680 return -EINVAL; 681 682 + if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) 683 return -EINVAL; 684 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { ··· 739 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 740 return -EINVAL; 741 742 + if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) 743 return -EINVAL; 744 745 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
+14 -14
net/sunrpc/xprtsock.c
··· 800 u32 _xid; 801 __be32 *xp; 802 803 - read_lock(&sk->sk_callback_lock); 804 dprintk("RPC: xs_udp_data_ready...\n"); 805 if (!(xprt = xprt_from_sock(sk))) 806 goto out; ··· 852 dropit: 853 skb_free_datagram(sk, skb); 854 out: 855 - read_unlock(&sk->sk_callback_lock); 856 } 857 858 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) ··· 1229 1230 dprintk("RPC: xs_tcp_data_ready...\n"); 1231 1232 - read_lock(&sk->sk_callback_lock); 1233 if (!(xprt = xprt_from_sock(sk))) 1234 goto out; 1235 if (xprt->shutdown) ··· 1248 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1249 } while (read > 0); 1250 out: 1251 - read_unlock(&sk->sk_callback_lock); 1252 } 1253 1254 /* ··· 1301 { 1302 struct rpc_xprt *xprt; 1303 1304 - read_lock(&sk->sk_callback_lock); 1305 if (!(xprt = xprt_from_sock(sk))) 1306 goto out; 1307 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); ··· 1313 1314 switch (sk->sk_state) { 1315 case TCP_ESTABLISHED: 1316 - spin_lock_bh(&xprt->transport_lock); 1317 if (!xprt_test_and_set_connected(xprt)) { 1318 struct sock_xprt *transport = container_of(xprt, 1319 struct sock_xprt, xprt); ··· 1327 1328 xprt_wake_pending_tasks(xprt, -EAGAIN); 1329 } 1330 - spin_unlock_bh(&xprt->transport_lock); 1331 break; 1332 case TCP_FIN_WAIT1: 1333 /* The client initiated a shutdown of the socket */ ··· 1365 xs_sock_mark_closed(xprt); 1366 } 1367 out: 1368 - read_unlock(&sk->sk_callback_lock); 1369 } 1370 1371 /** ··· 1376 { 1377 struct rpc_xprt *xprt; 1378 1379 - read_lock(&sk->sk_callback_lock); 1380 if (!(xprt = xprt_from_sock(sk))) 1381 goto out; 1382 dprintk("RPC: %s client %p...\n" ··· 1384 __func__, xprt, sk->sk_err); 1385 xprt_wake_pending_tasks(xprt, -EAGAIN); 1386 out: 1387 - read_unlock(&sk->sk_callback_lock); 1388 } 1389 1390 static void xs_write_space(struct sock *sk) ··· 1416 */ 1417 static void xs_udp_write_space(struct sock *sk) 1418 { 1419 - read_lock(&sk->sk_callback_lock); 1420 1421 /* from net/core/sock.c:sock_def_write_space */ 1422 if (sock_writeable(sk)) 1423 xs_write_space(sk); 1424 1425 - read_unlock(&sk->sk_callback_lock); 1426 } 1427 1428 /** ··· 1437 */ 1438 static void xs_tcp_write_space(struct sock *sk) 1439 { 1440 - read_lock(&sk->sk_callback_lock); 1441 1442 /* from net/core/stream.c:sk_stream_write_space */ 1443 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 1444 xs_write_space(sk); 1445 1446 - read_unlock(&sk->sk_callback_lock); 1447 } 1448 1449 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
··· 800 u32 _xid; 801 __be32 *xp; 802 803 + read_lock_bh(&sk->sk_callback_lock); 804 dprintk("RPC: xs_udp_data_ready...\n"); 805 if (!(xprt = xprt_from_sock(sk))) 806 goto out; ··· 852 dropit: 853 skb_free_datagram(sk, skb); 854 out: 855 + read_unlock_bh(&sk->sk_callback_lock); 856 } 857 858 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) ··· 1229 1230 dprintk("RPC: xs_tcp_data_ready...\n"); 1231 1232 + read_lock_bh(&sk->sk_callback_lock); 1233 if (!(xprt = xprt_from_sock(sk))) 1234 goto out; 1235 if (xprt->shutdown) ··· 1248 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1249 } while (read > 0); 1250 out: 1251 + read_unlock_bh(&sk->sk_callback_lock); 1252 } 1253 1254 /* ··· 1301 { 1302 struct rpc_xprt *xprt; 1303 1304 + read_lock_bh(&sk->sk_callback_lock); 1305 if (!(xprt = xprt_from_sock(sk))) 1306 goto out; 1307 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); ··· 1313 1314 switch (sk->sk_state) { 1315 case TCP_ESTABLISHED: 1316 + spin_lock(&xprt->transport_lock); 1317 if (!xprt_test_and_set_connected(xprt)) { 1318 struct sock_xprt *transport = container_of(xprt, 1319 struct sock_xprt, xprt); ··· 1327 1328 xprt_wake_pending_tasks(xprt, -EAGAIN); 1329 } 1330 + spin_unlock(&xprt->transport_lock); 1331 break; 1332 case TCP_FIN_WAIT1: 1333 /* The client initiated a shutdown of the socket */ ··· 1365 xs_sock_mark_closed(xprt); 1366 } 1367 out: 1368 + read_unlock_bh(&sk->sk_callback_lock); 1369 } 1370 1371 /** ··· 1376 { 1377 struct rpc_xprt *xprt; 1378 1379 + read_lock_bh(&sk->sk_callback_lock); 1380 if (!(xprt = xprt_from_sock(sk))) 1381 goto out; 1382 dprintk("RPC: %s client %p...\n" ··· 1384 __func__, xprt, sk->sk_err); 1385 xprt_wake_pending_tasks(xprt, -EAGAIN); 1386 out: 1387 + read_unlock_bh(&sk->sk_callback_lock); 1388 } 1389 1390 static void xs_write_space(struct sock *sk) ··· 1416 */ 1417 static void xs_udp_write_space(struct sock *sk) 1418 { 1419 + read_lock_bh(&sk->sk_callback_lock); 1420 1421 /* from net/core/sock.c:sock_def_write_space */ 1422 if (sock_writeable(sk)) 1423 xs_write_space(sk); 1424 1425 + read_unlock_bh(&sk->sk_callback_lock); 1426 } 1427 1428 /** ··· 1437 */ 1438 static void xs_tcp_write_space(struct sock *sk) 1439 { 1440 + read_lock_bh(&sk->sk_callback_lock); 1441 1442 /* from net/core/stream.c:sk_stream_write_space */ 1443 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 1444 xs_write_space(sk); 1445 1446 + read_unlock_bh(&sk->sk_callback_lock); 1447 } 1448 1449 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
+1 -1
net/wireless/wext-priv.c
··· 152 } else if (!iwp->pointer) 153 return -EFAULT; 154 155 - extra = kmalloc(extra_size, GFP_KERNEL); 156 if (!extra) 157 return -ENOMEM; 158
··· 152 } else if (!iwp->pointer) 153 return -EFAULT; 154 155 + extra = kzalloc(extra_size, GFP_KERNEL); 156 if (!extra) 157 return -ENOMEM; 158
+2 -3
net/xfrm/xfrm_policy.c
··· 1175 tmpl->mode == XFRM_MODE_BEET) { 1176 remote = &tmpl->id.daddr; 1177 local = &tmpl->saddr; 1178 - family = tmpl->encap_family; 1179 - if (xfrm_addr_any(local, family)) { 1180 - error = xfrm_get_saddr(net, &tmp, remote, family); 1181 if (error) 1182 goto fail; 1183 local = &tmp;
··· 1175 tmpl->mode == XFRM_MODE_BEET) { 1176 remote = &tmpl->id.daddr; 1177 local = &tmpl->saddr; 1178 + if (xfrm_addr_any(local, tmpl->encap_family)) { 1179 + error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); 1180 if (error) 1181 goto fail; 1182 local = &tmp;
+27 -18
net/xfrm/xfrm_state.c
··· 656 EXPORT_SYMBOL(xfrm_sad_getinfo); 657 658 static int 659 - xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl, 660 - struct xfrm_tmpl *tmpl, 661 - xfrm_address_t *daddr, xfrm_address_t *saddr, 662 - unsigned short family) 663 { 664 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 665 if (!afinfo) 666 return -1; 667 - afinfo->init_tempsel(x, fl, tmpl, daddr, saddr); 668 xfrm_state_put_afinfo(afinfo); 669 return 0; 670 } ··· 798 int error = 0; 799 struct xfrm_state *best = NULL; 800 u32 mark = pol->mark.v & pol->mark.m; 801 802 to_put = NULL; 803 804 spin_lock_bh(&xfrm_state_lock); 805 - h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family); 806 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 807 - if (x->props.family == family && 808 x->props.reqid == tmpl->reqid && 809 (mark & x->mark.m) == x->mark.v && 810 !(x->props.flags & XFRM_STATE_WILDRECV) && 811 - xfrm_state_addr_check(x, daddr, saddr, family) && 812 tmpl->mode == x->props.mode && 813 tmpl->id.proto == x->id.proto && 814 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 815 - xfrm_state_look_at(pol, x, fl, family, daddr, saddr, 816 &best, &acquire_in_progress, &error); 817 } 818 if (best) 819 goto found; 820 821 - h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); 822 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { 823 - if (x->props.family == family && 824 x->props.reqid == tmpl->reqid && 825 (mark & x->mark.m) == x->mark.v && 826 !(x->props.flags & XFRM_STATE_WILDRECV) && 827 - xfrm_state_addr_check(x, daddr, saddr, family) && 828 tmpl->mode == x->props.mode && 829 tmpl->id.proto == x->id.proto && 830 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 831 - xfrm_state_look_at(pol, x, fl, family, daddr, saddr, 832 &best, &acquire_in_progress, &error); 833 } 834 ··· 838 if (!x && !error && !acquire_in_progress) { 839 if (tmpl->id.spi && 840 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, 841 - tmpl->id.proto, family)) != NULL) { 842 to_put = x0; 843 error = -EEXIST; 844 goto out; ··· 848 error = -ENOMEM; 849 goto out; 850 } 851 - /* Initialize temporary selector matching only 852 * to current session. */ 853 - xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family); 854 memcpy(&x->mark, &pol->mark, sizeof(x->mark)); 855 856 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); ··· 865 x->km.state = XFRM_STATE_ACQ; 866 list_add(&x->km.all, &net->xfrm.state_all); 867 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); 868 - h = xfrm_src_hash(net, daddr, saddr, family); 869 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); 870 if (x->id.spi) { 871 - h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family); 872 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); 873 } 874 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
··· 656 EXPORT_SYMBOL(xfrm_sad_getinfo); 657 658 static int 659 + xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl, 660 + struct xfrm_tmpl *tmpl, 661 + xfrm_address_t *daddr, xfrm_address_t *saddr, 662 + unsigned short family) 663 { 664 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 665 if (!afinfo) 666 return -1; 667 + afinfo->init_tempsel(&x->sel, fl); 668 + 669 + if (family != tmpl->encap_family) { 670 + xfrm_state_put_afinfo(afinfo); 671 + afinfo = xfrm_state_get_afinfo(tmpl->encap_family); 672 + if (!afinfo) 673 + return -1; 674 + } 675 + afinfo->init_temprop(x, tmpl, daddr, saddr); 676 xfrm_state_put_afinfo(afinfo); 677 return 0; 678 } ··· 790 int error = 0; 791 struct xfrm_state *best = NULL; 792 u32 mark = pol->mark.v & pol->mark.m; 793 + unsigned short encap_family = tmpl->encap_family; 794 795 to_put = NULL; 796 797 spin_lock_bh(&xfrm_state_lock); 798 + h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); 799 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 800 + if (x->props.family == encap_family && 801 x->props.reqid == tmpl->reqid && 802 (mark & x->mark.m) == x->mark.v && 803 !(x->props.flags & XFRM_STATE_WILDRECV) && 804 + xfrm_state_addr_check(x, daddr, saddr, encap_family) && 805 tmpl->mode == x->props.mode && 806 tmpl->id.proto == x->id.proto && 807 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 808 + xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr, 809 &best, &acquire_in_progress, &error); 810 } 811 if (best) 812 goto found; 813 814 + h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); 815 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { 816 + if (x->props.family == encap_family && 817 x->props.reqid == tmpl->reqid && 818 (mark & x->mark.m) == x->mark.v && 819 !(x->props.flags & XFRM_STATE_WILDRECV) && 820 + xfrm_state_addr_check(x, daddr, saddr, encap_family) && 821 tmpl->mode == x->props.mode && 822 tmpl->id.proto == x->id.proto && 823 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 824 + xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr, 825 &best, &acquire_in_progress, &error); 826 } 827 ··· 829 if (!x && !error && !acquire_in_progress) { 830 if (tmpl->id.spi && 831 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, 832 + tmpl->id.proto, encap_family)) != NULL) { 833 to_put = x0; 834 error = -EEXIST; 835 goto out; ··· 839 error = -ENOMEM; 840 goto out; 841 } 842 + /* Initialize temporary state matching only 843 * to current session. */ 844 + xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); 845 memcpy(&x->mark, &pol->mark, sizeof(x->mark)); 846 847 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); ··· 856 x->km.state = XFRM_STATE_ACQ; 857 list_add(&x->km.all, &net->xfrm.state_all); 858 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); 859 + h = xfrm_src_hash(net, daddr, saddr, encap_family); 860 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); 861 if (x->id.spi) { 862 + h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); 863 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); 864 } 865 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
+5 -1
security/tomoyo/common.c
··· 1416 const pid_t gpid = task_pid_nr(current); 1417 static const int tomoyo_buffer_len = 4096; 1418 char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); 1419 if (!buffer) 1420 return NULL; 1421 do_gettimeofday(&tv); 1422 snprintf(buffer, tomoyo_buffer_len - 1, 1423 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)" 1424 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u" 1425 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }", 1426 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid, 1427 - (pid_t) sys_getpid(), (pid_t) sys_getppid(), 1428 current_uid(), current_gid(), current_euid(), 1429 current_egid(), current_suid(), current_sgid(), 1430 current_fsuid(), current_fsgid());
··· 1416 const pid_t gpid = task_pid_nr(current); 1417 static const int tomoyo_buffer_len = 4096; 1418 char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); 1419 + pid_t ppid; 1420 if (!buffer) 1421 return NULL; 1422 do_gettimeofday(&tv); 1423 + rcu_read_lock(); 1424 + ppid = task_tgid_vnr(current->real_parent); 1425 + rcu_read_unlock(); 1426 snprintf(buffer, tomoyo_buffer_len - 1, 1427 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)" 1428 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u" 1429 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }", 1430 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid, 1431 + task_tgid_vnr(current), ppid, 1432 current_uid(), current_gid(), current_euid(), 1433 current_egid(), current_suid(), current_sgid(), 1434 current_fsuid(), current_fsgid());
-3
security/tomoyo/common.h
··· 689 690 /********** Function prototypes. **********/ 691 692 - extern asmlinkage long sys_getpid(void); 693 - extern asmlinkage long sys_getppid(void); 694 - 695 /* Check whether the given string starts with the given keyword. */ 696 bool tomoyo_str_starts(char **src, const char *find); 697 /* Get tomoyo_realpath() of current process. */
··· 689 690 /********** Function prototypes. **********/ 691 692 /* Check whether the given string starts with the given keyword. */ 693 bool tomoyo_str_starts(char **src, const char *find); 694 /* Get tomoyo_realpath() of current process. */