Merge branches 'release', 'drop_do_IRQ', 'fix_early_irq', 'misc-2.6.37', 'next-fixes', 'optimize-unwind', 'remove-compat-h' and 'stack_trace' into release

Tony Luck c0f37d2a 5d4bff94

+1041 -730
+6 -2
MAINTAINERS
··· 3925 3925 F: drivers/mfd/ 3926 3926 3927 3927 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM 3928 - S: Orphan 3928 + M: Chris Ball <cjb@laptop.org> 3929 3929 L: linux-mmc@vger.kernel.org 3930 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git 3931 + S: Maintained 3930 3932 F: drivers/mmc/ 3931 3933 F: include/linux/mmc/ 3932 3934 ··· 5099 5097 F: drivers/mmc/host/sdricoh_cs.c 5100 5098 5101 5099 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER 5102 - S: Orphan 5100 + M: Chris Ball <cjb@laptop.org> 5103 5101 L: linux-mmc@vger.kernel.org 5102 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git 5103 + S: Maintained 5104 5104 F: drivers/mmc/host/sdhci.* 5105 5105 5106 5106 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 36 4 - EXTRAVERSION = -rc5 4 + EXTRAVERSION = -rc6 5 5 NAME = Sheep on Meth 6 6 7 7 # *DOCUMENTATION*
-9
arch/alpha/kernel/entry.S
··· 915 915 .end sys_execve 916 916 917 917 .align 4 918 - .globl osf_sigprocmask 919 - .ent osf_sigprocmask 920 - osf_sigprocmask: 921 - .prologue 0 922 - mov $sp, $18 923 - jmp $31, sys_osf_sigprocmask 924 - .end osf_sigprocmask 925 - 926 - .align 4 927 918 .globl alpha_ni_syscall 928 919 .ent alpha_ni_syscall 929 920 alpha_ni_syscall:
+14 -40
arch/alpha/kernel/signal.c
··· 41 41 /* 42 42 * The OSF/1 sigprocmask calling sequence is different from the 43 43 * C sigprocmask() sequence.. 44 - * 45 - * how: 46 - * 1 - SIG_BLOCK 47 - * 2 - SIG_UNBLOCK 48 - * 3 - SIG_SETMASK 49 - * 50 - * We change the range to -1 .. 1 in order to let gcc easily 51 - * use the conditional move instructions. 52 - * 53 - * Note that we don't need to acquire the kernel lock for SMP 54 - * operation, as all of this is local to this thread. 55 44 */ 56 - SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, 57 - struct pt_regs *, regs) 45 + SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) 58 46 { 59 - unsigned long oldmask = -EINVAL; 47 + sigset_t oldmask; 48 + sigset_t mask; 49 + unsigned long res; 60 50 61 - if ((unsigned long)how-1 <= 2) { 62 - long sign = how-2; /* -1 .. 1 */ 63 - unsigned long block, unblock; 64 - 65 - newmask &= _BLOCKABLE; 66 - spin_lock_irq(&current->sighand->siglock); 67 - oldmask = current->blocked.sig[0]; 68 - 69 - unblock = oldmask & ~newmask; 70 - block = oldmask | newmask; 71 - if (!sign) 72 - block = unblock; 73 - if (sign <= 0) 74 - newmask = block; 75 - if (_NSIG_WORDS > 1 && sign > 0) 76 - sigemptyset(&current->blocked); 77 - current->blocked.sig[0] = newmask; 78 - recalc_sigpending(); 79 - spin_unlock_irq(&current->sighand->siglock); 80 - 81 - regs->r0 = 0; /* special no error return */ 51 + siginitset(&mask, newmask & ~_BLOCKABLE); 52 + res = sigprocmask(how, &mask, &oldmask); 53 + if (!res) { 54 + force_successful_syscall_return(); 55 + res = oldmask.sig[0]; 82 56 } 83 - return oldmask; 57 + return res; 84 58 } 85 59 86 60 SYSCALL_DEFINE3(osf_sigaction, int, sig, ··· 68 94 old_sigset_t mask; 69 95 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 70 96 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 71 - __get_user(new_ka.sa.sa_flags, &act->sa_flags)) 97 + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 98 + __get_user(mask, &act->sa_mask)) 72 99 return -EFAULT; 73 - __get_user(mask, &act->sa_mask); 74 100 siginitset(&new_ka.sa.sa_mask, mask); 75 101 new_ka.ka_restorer = NULL; 76 102 } ··· 80 106 if (!ret && oact) { 81 107 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 82 108 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 83 - __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) 109 + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 110 + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 84 111 return -EFAULT; 85 - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); 86 112 } 87 113 88 114 return ret;
+1 -1
arch/alpha/kernel/systbls.S
··· 58 58 .quad sys_open /* 45 */ 59 59 .quad alpha_ni_syscall 60 60 .quad sys_getxgid 61 - .quad osf_sigprocmask 61 + .quad sys_osf_sigprocmask 62 62 .quad alpha_ni_syscall 63 63 .quad alpha_ni_syscall /* 50 */ 64 64 .quad sys_acct
+26 -1
arch/arm/Kconfig
··· 271 271 bool "Atmel AT91" 272 272 select ARCH_REQUIRE_GPIOLIB 273 273 select HAVE_CLK 274 - select ARCH_USES_GETTIMEOFFSET 275 274 help 276 275 This enables support for systems based on the Atmel AT91RM9200, 277 276 AT91SAM9 and AT91CAP9 processors. ··· 1049 1050 workaround disables the write-allocate mode for the L2 cache via the 1050 1051 ACTLR register. Note that setting specific bits in the ACTLR register 1051 1052 may not be available in non-secure mode. 1053 + 1054 + config ARM_ERRATA_742230 1055 + bool "ARM errata: DMB operation may be faulty" 1056 + depends on CPU_V7 && SMP 1057 + help 1058 + This option enables the workaround for the 742230 Cortex-A9 1059 + (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction 1060 + between two write operations may not ensure the correct visibility 1061 + ordering of the two writes. This workaround sets a specific bit in 1062 + the diagnostic register of the Cortex-A9 which causes the DMB 1063 + instruction to behave as a DSB, ensuring the correct behaviour of 1064 + the two writes. 1065 + 1066 + config ARM_ERRATA_742231 1067 + bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption" 1068 + depends on CPU_V7 && SMP 1069 + help 1070 + This option enables the workaround for the 742231 Cortex-A9 1071 + (r2p0..r2p2) erratum. Under certain conditions, specific to the 1072 + Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode, 1073 + accessing some data located in the same cache line, may get corrupted 1074 + data due to bad handling of the address hazard when the line gets 1075 + replaced from one of the CPUs at the same time as another CPU is 1076 + accessing it. This workaround sets specific bits in the diagnostic 1077 + register of the Cortex-A9 which reduces the linefill issuing 1078 + capabilities of the processor. 1052 1079 1053 1080 config PL310_ERRATA_588369 1054 1081 bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
+1 -1
arch/arm/boot/compressed/Makefile
··· 116 116 $(obj)/font.c: $(FONTC) 117 117 $(call cmd,shipped) 118 118 119 - $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config 119 + $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG) 120 120 @sed "$(SEDFLAGS)" < $< > $@
+4
arch/arm/include/asm/pgtable.h
··· 317 317 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 318 318 #define pgprot_dmacoherent(prot) \ 319 319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) 320 + #define __HAVE_PHYS_MEM_ACCESS_PROT 321 + struct file; 322 + extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 323 + unsigned long size, pgprot_t vma_prot); 320 324 #else 321 325 #define pgprot_dmacoherent(prot) \ 322 326 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
+2
arch/arm/kernel/entry-common.S
··· 48 48 beq no_work_pending 49 49 mov r0, sp @ 'regs' 50 50 mov r2, why @ 'syscall' 51 + tst r1, #_TIF_SIGPENDING @ delivering a signal? 52 + movne why, #0 @ prevent further restarts 51 53 bl do_notify_resume 52 54 b ret_slow_syscall @ Check work again 53 55
+2 -2
arch/arm/mach-at91/at91sam9g45_devices.c
··· 426 426 .sda_is_open_drain = 1, 427 427 .scl_pin = AT91_PIN_PA21, 428 428 .scl_is_open_drain = 1, 429 - .udelay = 2, /* ~100 kHz */ 429 + .udelay = 5, /* ~100 kHz */ 430 430 }; 431 431 432 432 static struct platform_device at91sam9g45_twi0_device = { ··· 440 440 .sda_is_open_drain = 1, 441 441 .scl_pin = AT91_PIN_PB11, 442 442 .scl_is_open_drain = 1, 443 - .udelay = 2, /* ~100 kHz */ 443 + .udelay = 5, /* ~100 kHz */ 444 444 }; 445 445 446 446 static struct platform_device at91sam9g45_twi1_device = {
+1 -2
arch/arm/mach-davinci/dm355.c
··· 769 769 .virtual = SRAM_VIRT, 770 770 .pfn = __phys_to_pfn(0x00010000), 771 771 .length = SZ_32K, 772 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 773 - .type = MT_DEVICE, 772 + .type = MT_MEMORY_NONCACHED, 774 773 }, 775 774 }; 776 775
+1 -2
arch/arm/mach-davinci/dm365.c
··· 969 969 .virtual = SRAM_VIRT, 970 970 .pfn = __phys_to_pfn(0x00010000), 971 971 .length = SZ_32K, 972 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 973 - .type = MT_DEVICE, 972 + .type = MT_MEMORY_NONCACHED, 974 973 }, 975 974 }; 976 975
+1 -2
arch/arm/mach-davinci/dm644x.c
··· 653 653 .virtual = SRAM_VIRT, 654 654 .pfn = __phys_to_pfn(0x00008000), 655 655 .length = SZ_16K, 656 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 657 - .type = MT_DEVICE, 656 + .type = MT_MEMORY_NONCACHED, 658 657 }, 659 658 }; 660 659
+1 -2
arch/arm/mach-davinci/dm646x.c
··· 737 737 .virtual = SRAM_VIRT, 738 738 .pfn = __phys_to_pfn(0x00010000), 739 739 .length = SZ_32K, 740 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 741 - .type = MT_DEVICE, 740 + .type = MT_MEMORY_NONCACHED, 742 741 }, 743 742 }; 744 743
+3 -3
arch/arm/mach-dove/include/mach/io.h
··· 13 13 14 14 #define IO_SPACE_LIMIT 0xffffffff 15 15 16 - #define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\ 17 - DOVE_PCIE0_IO_VIRT_BASE)) 18 - #define __mem_pci(a) (a) 16 + #define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \ 17 + DOVE_PCIE0_IO_VIRT_BASE)) 18 + #define __mem_pci(a) (a) 19 19 20 20 #endif
+1 -1
arch/arm/mach-kirkwood/include/mach/kirkwood.h
··· 38 38 39 39 #define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 40 40 #define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 41 - #define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000 41 + #define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000 42 42 #define KIRKWOOD_PCIE1_IO_SIZE SZ_1M 43 43 44 44 #define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
+2 -2
arch/arm/mach-kirkwood/pcie.c
··· 117 117 * IORESOURCE_IO 118 118 */ 119 119 pp->res[0].name = "PCIe 0 I/O Space"; 120 - pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE; 120 + pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE; 121 121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; 122 122 pp->res[0].flags = IORESOURCE_IO; 123 123 ··· 139 139 * IORESOURCE_IO 140 140 */ 141 141 pp->res[0].name = "PCIe 1 I/O Space"; 142 - pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE; 142 + pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE; 143 143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; 144 144 pp->res[0].flags = IORESOURCE_IO; 145 145
+6 -1
arch/arm/mach-mmp/include/mach/system.h
··· 9 9 #ifndef __ASM_MACH_SYSTEM_H 10 10 #define __ASM_MACH_SYSTEM_H 11 11 12 + #include <mach/cputype.h> 13 + 12 14 static inline void arch_idle(void) 13 15 { 14 16 cpu_do_idle(); ··· 18 16 19 17 static inline void arch_reset(char mode, const char *cmd) 20 18 { 21 - cpu_reset(0); 19 + if (cpu_is_pxa168()) 20 + cpu_reset(0xffff0000); 21 + else 22 + cpu_reset(0); 22 23 } 23 24 #endif /* __ASM_MACH_SYSTEM_H */
+1 -2
arch/arm/mach-pxa/cpufreq-pxa2xx.c
··· 312 312 freqs.cpu = policy->cpu; 313 313 314 314 if (freq_debug) 315 - pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, " 316 - "(SDRAM %d Mhz)\n", 315 + pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", 317 316 freqs.new / 1000, (pxa_freq_settings[idx].div2) ? 318 317 (new_freq_mem / 2000) : (new_freq_mem / 1000)); 319 318
+12
arch/arm/mach-pxa/include/mach/hardware.h
··· 264 264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x 265 265 * == 0x3 for pxa300/pxa310/pxa320 266 266 */ 267 + #if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x) 267 268 #define __cpu_is_pxa2xx(id) \ 268 269 ({ \ 269 270 unsigned int _id = (id) >> 13 & 0x7; \ 270 271 _id <= 0x2; \ 271 272 }) 273 + #else 274 + #define __cpu_is_pxa2xx(id) (0) 275 + #endif 272 276 277 + #ifdef CONFIG_PXA3xx 273 278 #define __cpu_is_pxa3xx(id) \ 274 279 ({ \ 275 280 unsigned int _id = (id) >> 13 & 0x7; \ 276 281 _id == 0x3; \ 277 282 }) 283 + #else 284 + #define __cpu_is_pxa3xx(id) (0) 285 + #endif 278 286 287 + #if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935) 279 288 #define __cpu_is_pxa93x(id) \ 280 289 ({ \ 281 290 unsigned int _id = (id) >> 4 & 0xfff; \ 282 291 _id == 0x683 || _id == 0x693; \ 283 292 }) 293 + #else 294 + #define __cpu_is_pxa93x(id) (0) 295 + #endif 284 296 285 297 #define cpu_is_pxa2xx() \ 286 298 ({ \
+5 -1
arch/arm/mach-pxa/palm27x.c
··· 469 469 }, 470 470 }; 471 471 472 + static struct i2c_pxa_platform_data palm27x_i2c_power_info = { 473 + .use_pio = 1, 474 + }; 475 + 472 476 void __init palm27x_pmic_init(void) 473 477 { 474 478 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); 475 - pxa27x_set_i2c_power_info(NULL); 479 + pxa27x_set_i2c_power_info(&palm27x_i2c_power_info); 476 480 } 477 481 #endif
+1
arch/arm/mach-pxa/vpac270.c
··· 240 240 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) 241 241 static struct pxamci_platform_data vpac270_mci_platform_data = { 242 242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 243 + .gpio_power = -1, 243 244 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, 244 245 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, 245 246 .detect_delay_ms = 200,
+3
arch/arm/mach-u300/include/mach/gpio.h
··· 273 273 extern int gpio_get_value(unsigned gpio); 274 274 extern void gpio_set_value(unsigned gpio, int value); 275 275 276 + #define gpio_get_value_cansleep gpio_get_value 277 + #define gpio_set_value_cansleep gpio_set_value 278 + 276 279 /* wrappers to sleep-enable the previous two functions */ 277 280 static inline unsigned gpio_to_irq(unsigned gpio) 278 281 {
+7 -1
arch/arm/mach-vexpress/ct-ca9x4.c
··· 227 227 int i; 228 228 229 229 #ifdef CONFIG_CACHE_L2X0 230 - l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); 230 + void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC); 231 + 232 + /* set RAM latencies to 1 cycle for this core tile. */ 233 + writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); 234 + writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); 235 + 236 + l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); 231 237 #endif 232 238 233 239 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+17 -2
arch/arm/mm/alignment.c
··· 885 885 886 886 if (ai_usermode & UM_SIGNAL) 887 887 force_sig(SIGBUS, current); 888 - else 889 - set_cr(cr_no_alignment); 888 + else { 889 + /* 890 + * We're about to disable the alignment trap and return to 891 + * user space. But if an interrupt occurs before actually 892 + * reaching user space, then the IRQ vector entry code will 893 + * notice that we were still in kernel space and therefore 894 + * the alignment trap won't be re-enabled in that case as it 895 + * is presumed to be always on from kernel space. 896 + * Let's prevent that race by disabling interrupts here (they 897 + * are disabled on the way back to user space anyway in 898 + * entry-common.S) and disable the alignment trap only if 899 + * there is no work pending for this thread. 900 + */ 901 + raw_local_irq_disable(); 902 + if (!(current_thread_info()->flags & _TIF_WORK_MASK)) 903 + set_cr(cr_no_alignment); 904 + } 890 905 891 906 return 0; 892 907 }
+29 -2
arch/arm/mm/mmu.c
··· 15 15 #include <linux/nodemask.h> 16 16 #include <linux/memblock.h> 17 17 #include <linux/sort.h> 18 + #include <linux/fs.h> 18 19 19 20 #include <asm/cputype.h> 20 21 #include <asm/sections.h> ··· 247 246 .domain = DOMAIN_USER, 248 247 }, 249 248 [MT_MEMORY] = { 249 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 250 + L_PTE_USER | L_PTE_EXEC, 251 + .prot_l1 = PMD_TYPE_TABLE, 250 252 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 251 253 .domain = DOMAIN_KERNEL, 252 254 }, ··· 258 254 .domain = DOMAIN_KERNEL, 259 255 }, 260 256 [MT_MEMORY_NONCACHED] = { 257 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 258 + L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, 259 + .prot_l1 = PMD_TYPE_TABLE, 261 260 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 262 261 .domain = DOMAIN_KERNEL, 263 262 }, ··· 418 411 * Enable CPU-specific coherency if supported. 419 412 * (Only available on XSC3 at the moment.) 420 413 */ 421 - if (arch_is_coherent() && cpu_is_xsc3()) 414 + if (arch_is_coherent() && cpu_is_xsc3()) { 422 415 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 423 - 416 + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 417 + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 418 + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 419 + } 424 420 /* 425 421 * ARMv6 and above have extended page tables. 426 422 */ ··· 448 438 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 449 439 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 450 440 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 441 + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 451 442 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 443 + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 452 444 #endif 453 445 } 454 446 ··· 487 475 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 488 476 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 489 477 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 478 + mem_types[MT_MEMORY].prot_pte |= kern_pgprot; 479 + mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; 490 480 mem_types[MT_ROM].prot_sect |= cp->pmd; 491 481 492 482 switch (cp->pmd) { ··· 511 497 t->prot_sect |= PMD_DOMAIN(t->domain); 512 498 } 513 499 } 500 + 501 + #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 502 + pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 503 + unsigned long size, pgprot_t vma_prot) 504 + { 505 + if (!pfn_valid(pfn)) 506 + return pgprot_noncached(vma_prot); 507 + else if (file->f_flags & O_SYNC) 508 + return pgprot_writecombine(vma_prot); 509 + return vma_prot; 510 + } 511 + EXPORT_SYMBOL(phys_mem_access_prot); 512 + #endif 514 513 515 514 #define vectors_base() (vectors_high() ? 0xffff0000 : 0) 516 515
+56 -6
arch/arm/mm/proc-v7.S
··· 186 186 * It is assumed that: 187 187 * - cache type register is implemented 188 188 */ 189 - __v7_setup: 189 + __v7_ca9mp_setup: 190 190 #ifdef CONFIG_SMP 191 191 mrc p15, 0, r0, c1, c0, 1 192 192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 193 193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 194 194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting 195 195 #endif 196 + __v7_setup: 196 197 adr r12, __v7_setup_stack @ the local stack 197 198 stmia r12, {r0-r5, r7, r9, r11, lr} 198 199 bl v7_flush_dcache_all ··· 202 201 mrc p15, 0, r0, c0, c0, 0 @ read main ID register 203 202 and r10, r0, #0xff000000 @ ARM? 204 203 teq r10, #0x41000000 205 - bne 2f 204 + bne 3f 206 205 and r5, r0, #0x00f00000 @ variant 207 206 and r6, r0, #0x0000000f @ revision 208 - orr r0, r6, r5, lsr #20-4 @ combine variant and revision 207 + orr r6, r6, r5, lsr #20-4 @ combine variant and revision 208 + ubfx r0, r0, #4, #12 @ primary part number 209 209 210 + /* Cortex-A8 Errata */ 211 + ldr r10, =0x00000c08 @ Cortex-A8 primary part number 212 + teq r0, r10 213 + bne 2f 210 214 #ifdef CONFIG_ARM_ERRATA_430973 211 215 teq r5, #0x00100000 @ only present in r1p* 212 216 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register ··· 219 213 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 220 214 #endif 221 215 #ifdef CONFIG_ARM_ERRATA_458693 222 - teq r0, #0x20 @ only present in r2p0 216 + teq r6, #0x20 @ only present in r2p0 223 217 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 224 218 orreq r10, r10, #(1 << 5) @ set L1NEON to 1 225 219 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 226 220 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 227 221 #endif 228 222 #ifdef CONFIG_ARM_ERRATA_460075 229 - teq r0, #0x20 @ only present in r2p0 223 + teq r6, #0x20 @ only present in r2p0 230 224 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register 231 225 tsteq r10, #1 << 22 232 226 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit 233 227 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register 234 228 #endif 229 + b 3f 235 230 236 - 2: mov r10, #0 231 + /* Cortex-A9 Errata */ 232 + 2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number 233 + teq r0, r10 234 + bne 3f 235 + #ifdef CONFIG_ARM_ERRATA_742230 236 + cmp r6, #0x22 @ only present up to r2p2 237 + mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register 238 + orrle r10, r10, #1 << 4 @ set bit #4 239 + mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register 240 + #endif 241 + #ifdef CONFIG_ARM_ERRATA_742231 242 + teq r6, #0x20 @ present in r2p0 243 + teqne r6, #0x21 @ present in r2p1 244 + teqne r6, #0x22 @ present in r2p2 245 + mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register 246 + orreq r10, r10, #1 << 12 @ set bit #12 247 + orreq r10, r10, #1 << 22 @ set bit #22 248 + mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register 249 + #endif 250 + 251 + 3: mov r10, #0 237 252 #ifdef HARVARD_CACHE 238 253 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 239 254 #endif ··· 349 322 .align 350 323 351 324 .section ".proc.info.init", #alloc, #execinstr 325 + 326 + .type __v7_ca9mp_proc_info, #object 327 + __v7_ca9mp_proc_info: 328 + .long 0x410fc090 @ Required ID value 329 + .long 0xff0ffff0 @ Mask for ID 330 + .long PMD_TYPE_SECT | \ 331 + PMD_SECT_AP_WRITE | \ 332 + PMD_SECT_AP_READ | \ 333 + PMD_FLAGS 334 + .long PMD_TYPE_SECT | \ 335 + PMD_SECT_XN | \ 336 + PMD_SECT_AP_WRITE | \ 337 + PMD_SECT_AP_READ 338 + b __v7_ca9mp_setup 339 + .long cpu_arch_name 340 + .long cpu_elf_name 341 + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 342 + .long cpu_v7_name 343 + .long v7_processor_functions 344 + .long v7wbi_tlb_fns 345 + .long v6_user_fns 346 + .long v7_cache_fns 347 + .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 352 348 353 349 /* 354 350 * Match any ARMv7 processor core.
+12 -21
arch/arm/plat-nomadik/timer.c
··· 1 1 /* 2 - * linux/arch/arm/mach-nomadik/timer.c 2 + * linux/arch/arm/plat-nomadik/timer.c 3 3 * 4 4 * Copyright (C) 2008 STMicroelectronics 5 5 * Copyright (C) 2010 Alessandro Rubini ··· 75 75 cr = readl(mtu_base + MTU_CR(1)); 76 76 writel(0, mtu_base + MTU_LR(1)); 77 77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); 78 - writel(0x2, mtu_base + MTU_IMSC); 78 + writel(1 << 1, mtu_base + MTU_IMSC); 79 79 break; 80 80 case CLOCK_EVT_MODE_SHUTDOWN: 81 81 case CLOCK_EVT_MODE_UNUSED: ··· 131 131 { 132 132 unsigned long rate; 133 133 struct clk *clk0; 134 - struct clk *clk1; 135 - u32 cr; 134 + u32 cr = MTU_CRn_32BITS; 136 135 137 136 clk0 = clk_get_sys("mtu0", NULL); 138 137 BUG_ON(IS_ERR(clk0)); 139 138 140 - clk1 = clk_get_sys("mtu1", NULL); 141 - BUG_ON(IS_ERR(clk1)); 142 - 143 139 clk_enable(clk0); 144 - clk_enable(clk1); 145 140 146 141 /* 147 - * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500: 148 - * use a divide-by-16 counter if it's more than 16MHz 142 + * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz 143 + * for ux500. 144 + * Use a divide-by-16 counter if the tick rate is more than 32MHz. 145 + * At 32 MHz, the timer (with 32 bit counter) can be programmed 146 + * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer 147 + * with 16 gives too low timer resolution. 149 148 */ 150 - cr = MTU_CRn_32BITS;; 151 149 rate = clk_get_rate(clk0); 152 - if (rate > 16 << 20) { 150 + if (rate > 32000000) { 153 151 rate /= 16; 154 152 cr |= MTU_CRn_PRESCALE_16; 155 153 } else { ··· 168 170 pr_err("timer: failed to initialize clock source %s\n", 169 171 nmdk_clksrc.name); 170 172 171 - /* Timer 1 is used for events, fix according to rate */ 172 - cr = MTU_CRn_32BITS; 173 - rate = clk_get_rate(clk1); 174 - if (rate > 16 << 20) { 175 - rate /= 16; 176 - cr |= MTU_CRn_PRESCALE_16; 177 - } else { 178 - cr |= MTU_CRn_PRESCALE_1; 179 - } 173 + /* Timer 1 is used for events */ 174 + 180 175 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); 181 176 182 177 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
+5 -20
arch/arm/plat-omap/sram.c
··· 220 220 if (omap_sram_size == 0) 221 221 return; 222 222 223 - if (cpu_is_omap24xx()) { 224 - omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA; 225 - 226 - base = OMAP2_SRAM_PA; 227 - base = ROUND_DOWN(base, PAGE_SIZE); 228 - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 229 - } 230 - 231 223 if (cpu_is_omap34xx()) { 232 - omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA; 233 - base = OMAP3_SRAM_PA; 234 - base = ROUND_DOWN(base, PAGE_SIZE); 235 - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 236 - 237 224 /* 238 225 * SRAM must be marked as non-cached on OMAP3 since the 239 226 * CORE DPLL M2 divider change code (in SRAM) runs with the ··· 231 244 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; 232 245 } 233 246 234 - if (cpu_is_omap44xx()) { 235 - omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA; 236 - base = OMAP4_SRAM_PA; 237 - base = ROUND_DOWN(base, PAGE_SIZE); 238 - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 239 - } 240 - omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */ 247 + omap_sram_io_desc[0].virtual = omap_sram_base; 248 + base = omap_sram_start; 249 + base = ROUND_DOWN(base, PAGE_SIZE); 250 + omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 251 + omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); 241 252 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 242 253 243 254 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
+6
arch/ia64/Kconfig
··· 53 53 bool 54 54 default y 55 55 56 + config ARCH_DMA_ADDR_T_64BIT 57 + def_bool y 58 + 56 59 config NEED_DMA_MAP_STATE 57 60 def_bool y 58 61 ··· 64 61 65 62 config SWIOTLB 66 63 bool 64 + 65 + config STACKTRACE_SUPPORT 66 + def_bool y 67 67 68 68 config GENERIC_LOCKBREAK 69 69 def_bool n
-208
arch/ia64/include/asm/compat.h
··· 1 - #ifndef _ASM_IA64_COMPAT_H 2 - #define _ASM_IA64_COMPAT_H 3 - /* 4 - * Architecture specific compatibility types 5 - */ 6 - #include <linux/types.h> 7 - 8 - #define COMPAT_USER_HZ 100 9 - #define COMPAT_UTS_MACHINE "i686\0\0\0" 10 - 11 - typedef u32 compat_size_t; 12 - typedef s32 compat_ssize_t; 13 - typedef s32 compat_time_t; 14 - typedef s32 compat_clock_t; 15 - typedef s32 compat_key_t; 16 - typedef s32 compat_pid_t; 17 - typedef u16 __compat_uid_t; 18 - typedef u16 __compat_gid_t; 19 - typedef u32 __compat_uid32_t; 20 - typedef u32 __compat_gid32_t; 21 - typedef u16 compat_mode_t; 22 - typedef u32 compat_ino_t; 23 - typedef u16 compat_dev_t; 24 - typedef s32 compat_off_t; 25 - typedef s64 compat_loff_t; 26 - typedef u16 compat_nlink_t; 27 - typedef u16 compat_ipc_pid_t; 28 - typedef s32 compat_daddr_t; 29 - typedef u32 compat_caddr_t; 30 - typedef __kernel_fsid_t compat_fsid_t; 31 - typedef s32 compat_timer_t; 32 - 33 - typedef s32 compat_int_t; 34 - typedef s32 compat_long_t; 35 - typedef s64 __attribute__((aligned(4))) compat_s64; 36 - typedef u32 compat_uint_t; 37 - typedef u32 compat_ulong_t; 38 - typedef u64 __attribute__((aligned(4))) compat_u64; 39 - 40 - struct compat_timespec { 41 - compat_time_t tv_sec; 42 - s32 tv_nsec; 43 - }; 44 - 45 - struct compat_timeval { 46 - compat_time_t tv_sec; 47 - s32 tv_usec; 48 - }; 49 - 50 - struct compat_stat { 51 - compat_dev_t st_dev; 52 - u16 __pad1; 53 - compat_ino_t st_ino; 54 - compat_mode_t st_mode; 55 - compat_nlink_t st_nlink; 56 - __compat_uid_t st_uid; 57 - __compat_gid_t st_gid; 58 - compat_dev_t st_rdev; 59 - u16 __pad2; 60 - u32 st_size; 61 - u32 st_blksize; 62 - u32 st_blocks; 63 - u32 st_atime; 64 - u32 st_atime_nsec; 65 - u32 st_mtime; 66 - u32 st_mtime_nsec; 67 - u32 st_ctime; 68 - u32 st_ctime_nsec; 69 - u32 __unused4; 70 - u32 __unused5; 71 - }; 72 - 73 - struct compat_flock { 74 - short l_type; 75 - short l_whence; 76 - compat_off_t l_start; 77 - compat_off_t l_len; 78 - compat_pid_t l_pid; 79 - }; 80 - 81 - #define F_GETLK64 12 82 - #define F_SETLK64 13 83 - #define F_SETLKW64 14 84 - 85 - /* 86 - * IA32 uses 4 byte alignment for 64 bit quantities, 87 - * so we need to pack this structure. 88 - */ 89 - struct compat_flock64 { 90 - short l_type; 91 - short l_whence; 92 - compat_loff_t l_start; 93 - compat_loff_t l_len; 94 - compat_pid_t l_pid; 95 - } __attribute__((packed)); 96 - 97 - struct compat_statfs { 98 - int f_type; 99 - int f_bsize; 100 - int f_blocks; 101 - int f_bfree; 102 - int f_bavail; 103 - int f_files; 104 - int f_ffree; 105 - compat_fsid_t f_fsid; 106 - int f_namelen; /* SunOS ignores this field. */ 107 - int f_frsize; 108 - int f_spare[5]; 109 - }; 110 - 111 - #define COMPAT_RLIM_OLD_INFINITY 0x7fffffff 112 - #define COMPAT_RLIM_INFINITY 0xffffffff 113 - 114 - typedef u32 compat_old_sigset_t; /* at least 32 bits */ 115 - 116 - #define _COMPAT_NSIG 64 117 - #define _COMPAT_NSIG_BPW 32 118 - 119 - typedef u32 compat_sigset_word; 120 - 121 - #define COMPAT_OFF_T_MAX 0x7fffffff 122 - #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL 123 - 124 - struct compat_ipc64_perm { 125 - compat_key_t key; 126 - __compat_uid32_t uid; 127 - __compat_gid32_t gid; 128 - __compat_uid32_t cuid; 129 - __compat_gid32_t cgid; 130 - unsigned short mode; 131 - unsigned short __pad1; 132 - unsigned short seq; 133 - unsigned short __pad2; 134 - compat_ulong_t unused1; 135 - compat_ulong_t unused2; 136 - }; 137 - 138 - struct compat_semid64_ds { 139 - struct compat_ipc64_perm sem_perm; 140 - compat_time_t sem_otime; 141 - compat_ulong_t __unused1; 142 - compat_time_t sem_ctime; 143 - compat_ulong_t __unused2; 144 - compat_ulong_t sem_nsems; 145 - compat_ulong_t __unused3; 146 - compat_ulong_t __unused4; 147 - }; 148 - 149 - struct compat_msqid64_ds { 150 - struct compat_ipc64_perm msg_perm; 151 - compat_time_t msg_stime; 152 - compat_ulong_t __unused1; 153 - compat_time_t msg_rtime; 154 - compat_ulong_t __unused2; 155 - compat_time_t msg_ctime; 156 - compat_ulong_t __unused3; 157 - compat_ulong_t msg_cbytes; 158 - compat_ulong_t msg_qnum; 159 - compat_ulong_t msg_qbytes; 160 - compat_pid_t msg_lspid; 161 - compat_pid_t msg_lrpid; 162 - compat_ulong_t __unused4; 163 - compat_ulong_t __unused5; 164 - }; 165 - 166 - struct compat_shmid64_ds { 167 - struct compat_ipc64_perm shm_perm; 168 - compat_size_t shm_segsz; 169 - compat_time_t shm_atime; 170 - compat_ulong_t __unused1; 171 - compat_time_t shm_dtime; 172 - compat_ulong_t __unused2; 173 - compat_time_t shm_ctime; 174 - compat_ulong_t __unused3; 175 - compat_pid_t shm_cpid; 176 - compat_pid_t shm_lpid; 177 - compat_ulong_t shm_nattch; 178 - compat_ulong_t __unused4; 179 - compat_ulong_t __unused5; 180 - }; 181 - 182 - /* 183 - * A pointer passed in from user mode. This should not be used for syscall parameters, 184 - * just declare them as pointers because the syscall entry code will have appropriately 185 - * converted them already. 186 - */ 187 - typedef u32 compat_uptr_t; 188 - 189 - static inline void __user * 190 - compat_ptr (compat_uptr_t uptr) 191 - { 192 - return (void __user *) (unsigned long) uptr; 193 - } 194 - 195 - static inline compat_uptr_t 196 - ptr_to_compat(void __user *uptr) 197 - { 198 - return (u32)(unsigned long)uptr; 199 - } 200 - 201 - static __inline__ void __user * 202 - arch_compat_alloc_user_space (long len) 203 - { 204 - struct pt_regs *regs = task_pt_regs(current); 205 - return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); 206 - } 207 - 208 - #endif /* _ASM_IA64_COMPAT_H */
+5 -6
arch/ia64/include/asm/hardirq.h
··· 6 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 7 */ 8 8 9 - 10 - #include <linux/threads.h> 11 - #include <linux/irq.h> 12 - 13 - #include <asm/processor.h> 14 - 15 9 /* 16 10 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. 17 11 */ ··· 13 19 #define __ARCH_IRQ_STAT 1 14 20 15 21 #define local_softirq_pending() (local_cpu_data->softirq_pending) 22 + 23 + #include <linux/threads.h> 24 + #include <linux/irq.h> 25 + 26 + #include <asm/processor.h> 16 27 17 28 extern void __iomem *ipi_base_addr; 18 29
+6
arch/ia64/include/asm/iommu_table.h
··· 1 + #ifndef _ASM_IA64_IOMMU_TABLE_H 2 + #define _ASM_IA64_IOMMU_TABLE_H 3 + 4 + #define IOMMU_INIT_POST(_detect) 5 + 6 + #endif /* _ASM_IA64_IOMMU_TABLE_H */
+1
arch/ia64/kernel/Makefile
··· 34 34 obj-$(CONFIG_PCI_MSI) += msi_ia64.o 35 35 mca_recovery-y += mca_drv.o mca_drv_asm.o 36 36 obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 37 + obj-$(CONFIG_STACKTRACE) += stacktrace.o 37 38 38 39 obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ 39 40 paravirt_patch.o
+1 -1
arch/ia64/kernel/cyclone.c
··· 59 59 return -ENODEV; 60 60 } 61 61 base = readq(reg); 62 + iounmap(reg); 62 63 if(!base){ 63 64 printk(KERN_ERR "Summit chipset: Could not find valid CBAR" 64 65 " value.\n"); 65 66 use_cyclone = 0; 66 67 return -ENODEV; 67 68 } 68 - iounmap(reg); 69 69 70 70 /* setup PMCC */ 71 71 offset = (base + CYCLONE_PMCC_OFFSET);
+1 -47
arch/ia64/kernel/iosapic.c
··· 108 108 #define DBG(fmt...) 109 109 #endif 110 110 111 - #define NR_PREALLOCATE_RTE_ENTRIES \ 112 - (PAGE_SIZE / sizeof(struct iosapic_rte_info)) 113 - #define RTE_PREALLOCATED (1) 114 - 115 111 static DEFINE_SPINLOCK(iosapic_lock); 116 112 117 113 /* ··· 132 136 struct list_head rte_list; /* RTEs sharing the same vector */ 133 137 char rte_index; /* IOSAPIC RTE index */ 134 138 int refcnt; /* reference counter */ 135 - unsigned int flags; /* flags */ 136 139 struct iosapic *iosapic; 137 140 } ____cacheline_aligned; 138 141 ··· 149 154 } iosapic_intr_info[NR_IRQS]; 150 155 151 156 static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */ 152 - 153 - static int iosapic_kmalloc_ok; 154 - static LIST_HEAD(free_rte_list); 155 157 156 158 static inline void 157 159 iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val) ··· 544 552 } 545 553 } 546 554 547 - static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void) 548 - { 549 - int i; 550 - struct iosapic_rte_info *rte; 551 - int preallocated = 0; 552 - 553 - if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) { 554 - rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * 555 - NR_PREALLOCATE_RTE_ENTRIES); 556 - for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++) 557 - list_add(&rte->rte_list, &free_rte_list); 558 - } 559 - 560 - if (!list_empty(&free_rte_list)) { 561 - rte = list_entry(free_rte_list.next, struct iosapic_rte_info, 562 - rte_list); 563 - list_del(&rte->rte_list); 564 - preallocated++; 565 - } else { 566 - rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC); 567 - if (!rte) 568 - return NULL; 569 - } 570 - 571 - memset(rte, 0, sizeof(struct iosapic_rte_info)); 572 - if (preallocated) 573 - rte->flags |= RTE_PREALLOCATED; 574 - 575 - return rte; 576 - } 577 - 578 555 static inline int irq_is_shared (int irq) 579 556 { 580 557 return (iosapic_intr_info[irq].count > 1); ··· 576 615 577 616 rte = find_rte(irq, gsi); 578 617 if (!rte) { 579 - rte = iosapic_alloc_rte(); 618 + rte = kzalloc(sizeof (*rte), GFP_ATOMIC); 580 619 if (!rte) { 581 620 printk(KERN_WARNING "%s: cannot allocate memory\n", 582 621 __func__); ··· 1126 1165 return; 1127 1166 } 1128 1167 #endif 1129 - 1130 - static int __init iosapic_enable_kmalloc (void) 1131 - { 1132 - iosapic_kmalloc_ok = 1; 1133 - return 0; 1134 - } 1135 - core_initcall (iosapic_enable_kmalloc);
+4
arch/ia64/kernel/irq_ia64.c
··· 30 30 #include <linux/bitops.h> 31 31 #include <linux/irq.h> 32 32 #include <linux/ratelimit.h> 33 + #include <linux/acpi.h> 33 34 34 35 #include <asm/delay.h> 35 36 #include <asm/intrinsics.h> ··· 652 651 void __init 653 652 init_IRQ (void) 654 653 { 654 + #ifdef CONFIG_ACPI 655 + acpi_boot_init(); 656 + #endif 655 657 ia64_register_ipi(); 656 658 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 657 659 #ifdef CONFIG_SMP
+19 -19
arch/ia64/kernel/mca.c
··· 2055 2055 2056 2056 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); 2057 2057 2058 - /* 2059 - * Configure the CMCI/P vector and handler. Interrupts for CMC are 2060 - * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). 2061 - */ 2062 - register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); 2063 - register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); 2064 - ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ 2065 - 2066 - /* Setup the MCA rendezvous interrupt vector */ 2067 - register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); 2068 - 2069 - /* Setup the MCA wakeup interrupt vector */ 2070 - register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); 2071 - 2072 - #ifdef CONFIG_ACPI 2073 - /* Setup the CPEI/P handler */ 2074 - register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 2075 - #endif 2076 - 2077 2058 /* Initialize the areas set aside by the OS to buffer the 2078 2059 * platform/processor error states for MCA/INIT/CMC 2079 2060 * handling. ··· 2083 2102 { 2084 2103 if (!mca_init) 2085 2104 return 0; 2105 + 2106 + /* 2107 + * Configure the CMCI/P vector and handler. Interrupts for CMC are 2108 + * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). 2109 + */ 2110 + register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); 2111 + register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); 2112 + ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ 2113 + 2114 + /* Setup the MCA rendezvous interrupt vector */ 2115 + register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); 2116 + 2117 + /* Setup the MCA wakeup interrupt vector */ 2118 + register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); 2119 + 2120 + #ifdef CONFIG_ACPI 2121 + /* Setup the CPEI/P handler */ 2122 + register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 2123 + #endif 2086 2124 2087 2125 register_hotcpu_notifier(&mca_cpu_notifier); 2088 2126
+1 -1
arch/ia64/kernel/palinfo.c
··· 434 434 unsigned long phys_stacked; 435 435 pal_hints_u_t hints; 436 436 unsigned long iregs, dregs; 437 - char *info_type[]={ 437 + static const char * const info_type[] = { 438 438 "Implemented AR(s)", 439 439 "AR(s) with read side-effects", 440 440 "Implemented CR(s)",
+6 -6
arch/ia64/kernel/perfmon.c
··· 1573 1573 return -EINVAL; 1574 1574 } 1575 1575 1576 - ctx = (pfm_context_t *)filp->private_data; 1576 + ctx = filp->private_data; 1577 1577 if (ctx == NULL) { 1578 1578 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); 1579 1579 return -EINVAL; ··· 1673 1673 return 0; 1674 1674 } 1675 1675 1676 - ctx = (pfm_context_t *)filp->private_data; 1676 + ctx = filp->private_data; 1677 1677 if (ctx == NULL) { 1678 1678 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); 1679 1679 return 0; ··· 1733 1733 return -EBADF; 1734 1734 } 1735 1735 1736 - ctx = (pfm_context_t *)filp->private_data; 1736 + ctx = filp->private_data; 1737 1737 if (ctx == NULL) { 1738 1738 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); 1739 1739 return -EBADF; ··· 1841 1841 return -EBADF; 1842 1842 } 1843 1843 1844 - ctx = (pfm_context_t *)filp->private_data; 1844 + ctx = filp->private_data; 1845 1845 if (ctx == NULL) { 1846 1846 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); 1847 1847 return -EBADF; ··· 1984 1984 return -EBADF; 1985 1985 } 1986 1986 1987 - ctx = (pfm_context_t *)filp->private_data; 1987 + ctx = filp->private_data; 1988 1988 if (ctx == NULL) { 1989 1989 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); 1990 1990 return -EBADF; ··· 4907 4907 goto error_args; 4908 4908 } 4909 4909 4910 - ctx = (pfm_context_t *)file->private_data; 4910 + ctx = file->private_data; 4911 4911 if (unlikely(ctx == NULL)) { 4912 4912 DPRINT(("no context for fd %d\n", fd)); 4913 4913 goto error_args;
+1 -1
arch/ia64/kernel/salinfo.c
··· 642 642 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { 643 643 data = salinfo_data + i; 644 644 data->type = i; 645 - init_MUTEX(&data->mutex); 645 + sema_init(&data->mutex, 1); 646 646 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); 647 647 if (!dir) 648 648 continue;
-4
arch/ia64/kernel/setup.c
··· 594 594 cpu_init(); /* initialize the bootstrap CPU */ 595 595 mmu_context_init(); /* initialize context_id bitmap */ 596 596 597 - #ifdef CONFIG_ACPI 598 - acpi_boot_init(); 599 - #endif 600 - 601 597 paravirt_banner(); 602 598 paravirt_arch_setup_console(cmdline_p); 603 599
+39
arch/ia64/kernel/stacktrace.c
··· 1 + /* 2 + * arch/ia64/kernel/stacktrace.c 3 + * 4 + * Stack trace management functions 5 + * 6 + */ 7 + #include <linux/sched.h> 8 + #include <linux/stacktrace.h> 9 + #include <linux/module.h> 10 + 11 + static void 12 + ia64_do_save_stack(struct unw_frame_info *info, void *arg) 13 + { 14 + struct stack_trace *trace = arg; 15 + unsigned long ip; 16 + int skip = trace->skip; 17 + 18 + trace->nr_entries = 0; 19 + do { 20 + unw_get_ip(info, &ip); 21 + if (ip == 0) 22 + break; 23 + if (skip == 0) { 24 + trace->entries[trace->nr_entries++] = ip; 25 + if (trace->nr_entries == trace->max_entries) 26 + break; 27 + } else 28 + skip--; 29 + } while (unw_unwind(info) >= 0); 30 + } 31 + 32 + /* 33 + * Save stack-backtrace addresses into a stack_trace buffer. 34 + */ 35 + void save_stack_trace(struct stack_trace *trace) 36 + { 37 + unw_init_running(ia64_do_save_stack, trace); 38 + } 39 + EXPORT_SYMBOL(save_stack_trace);
+19 -4
arch/ia64/kernel/unwind.c
··· 1204 1204 static inline unw_hash_index_t 1205 1205 hash (unsigned long ip) 1206 1206 { 1207 - # define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */ 1207 + /* magic number = ((sqrt(5)-1)/2)*2^64 */ 1208 + static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL; 1208 1209 1209 - return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE); 1210 - #undef hashmagic 1210 + return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE); 1211 1211 } 1212 1212 1213 1213 static inline long ··· 1531 1531 struct unw_labeled_state *ls, *next; 1532 1532 unsigned long ip = info->ip; 1533 1533 struct unw_state_record sr; 1534 - struct unw_table *table; 1534 + struct unw_table *table, *prev; 1535 1535 struct unw_reg_info *r; 1536 1536 struct unw_insn insn; 1537 1537 u8 *dp, *desc_end; ··· 1560 1560 1561 1561 STAT(parse_start = ia64_get_itc()); 1562 1562 1563 + prev = NULL; 1563 1564 for (table = unw.tables; table; table = table->next) { 1564 1565 if (ip >= table->start && ip < table->end) { 1566 + /* 1567 + * Leave the kernel unwind table at the very front, 1568 + * lest moving it breaks some assumption elsewhere. 1569 + * Otherwise, move the matching table to the second 1570 + * position in the list so that traversals can benefit 1571 + * from commonality in backtrace paths. 1572 + */ 1573 + if (prev && prev != unw.tables) { 1574 + /* unw is safe - we're already spinlocked */ 1575 + prev->next = table->next; 1576 + table->next = unw.tables->next; 1577 + unw.tables->next = table; 1578 + } 1565 1579 e = lookup(table, ip - table->segment_base); 1566 1580 break; 1567 1581 } 1582 + prev = table; 1568 1583 } 1569 1584 if (!e) { 1570 1585 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
+2 -3
arch/ia64/xen/xen_pv_ops.c
··· 1136 1136 static void __init 1137 1137 xen_patch_branch(unsigned long tag, unsigned long type) 1138 1138 { 1139 - const unsigned long nelem = 1140 - sizeof(xen_branch_target) / sizeof(xen_branch_target[0]); 1141 - __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem); 1139 + __paravirt_patch_apply_branch(tag, type, xen_branch_target, 1140 + ARRAY_SIZE(xen_branch_target)); 1142 1141 }
+1 -1
arch/mn10300/Kconfig.debug
··· 101 101 102 102 choice 103 103 prompt "GDB stub port" 104 - default GDBSTUB_TTYSM0 104 + default GDBSTUB_ON_TTYSM0 105 105 depends on GDBSTUB 106 106 help 107 107 Select the serial port used for GDB-stub.
+20 -15
arch/mn10300/kernel/signal.c
··· 65 65 old_sigset_t mask; 66 66 if (verify_area(VERIFY_READ, act, sizeof(*act)) || 67 67 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 68 - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) 68 + __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 69 + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 70 + __get_user(mask, &act->sa_mask)) 69 71 return -EFAULT; 70 - __get_user(new_ka.sa.sa_flags, &act->sa_flags); 71 - __get_user(mask, &act->sa_mask); 72 72 siginitset(&new_ka.sa.sa_mask, mask); 73 73 } 74 74 ··· 77 77 if (!ret && oact) { 78 78 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || 79 79 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 80 - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) 80 + __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 81 + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 82 + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 81 83 return -EFAULT; 82 - __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 83 - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); 84 84 } 85 85 86 86 return ret; ··· 101 101 struct sigcontext __user *sc, long *_d0) 102 102 { 103 103 unsigned int err = 0; 104 + 105 + /* Always make any pending restarted system calls return -EINTR */ 106 + current_thread_info()->restart_block.fn = do_no_restart_syscall; 104 107 105 108 if (is_using_fpu(current)) 106 109 fpu_kill_state(current); ··· 333 330 regs->d0 = sig; 334 331 regs->d1 = (unsigned long) &frame->sc; 335 332 336 - set_fs(USER_DS); 337 - 338 333 /* the tracer may want to single-step inside the handler */ 339 334 if (test_thread_flag(TIF_SINGLESTEP)) 340 335 ptrace_notify(SIGTRAP); ··· 346 345 return 0; 347 346 348 347 give_sigsegv: 349 - force_sig(SIGSEGV, current); 348 + force_sigsegv(sig, current); 350 349 return -EFAULT; 351 350 } 352 351 ··· 414 413 regs->d0 = sig; 415 414 regs->d1 = (long) &frame->info; 416 415 417 - set_fs(USER_DS); 418 - 419 416 /* the tracer may want to single-step inside the handler */ 420 417 if (test_thread_flag(TIF_SINGLESTEP)) 421 418 ptrace_notify(SIGTRAP); ··· 427 428 return 0; 428 429 429 430 give_sigsegv: 430 - force_sig(SIGSEGV, current); 431 + force_sigsegv(sig, current); 431 432 return -EFAULT; 433 + } 434 + 435 + static inline void stepback(struct pt_regs *regs) 436 + { 437 + regs->pc -= 2; 438 + regs->orig_d0 = -1; 432 439 } 433 440 434 441 /* ··· 464 459 /* fallthrough */ 465 460 case -ERESTARTNOINTR: 466 461 regs->d0 = regs->orig_d0; 467 - regs->pc -= 2; 462 + stepback(regs); 468 463 } 469 464 } 470 465 ··· 532 527 case -ERESTARTSYS: 533 528 case -ERESTARTNOINTR: 534 529 regs->d0 = regs->orig_d0; 535 - regs->pc -= 2; 530 + stepback(regs); 536 531 break; 537 532 538 533 case -ERESTART_RESTARTBLOCK: 539 534 regs->d0 = __NR_restart_syscall; 540 - regs->pc -= 2; 535 + stepback(regs); 541 536 break; 542 537 } 543 538 }
+6 -8
arch/mn10300/mm/Makefile
··· 2 2 # Makefile for the MN10300-specific memory management code 3 3 # 4 4 5 + cacheflush-y := cache.o cache-mn10300.o 6 + cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o 7 + 8 + cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o 9 + 5 10 obj-y := \ 6 11 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ 7 - misalignment.o dma-alloc.o 8 - 9 - ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y) 10 - obj-y += cache.o cache-mn10300.o 11 - ifeq ($(CONFIG_MN10300_CACHE_WBACK),y) 12 - obj-y += cache-flush-mn10300.o 13 - endif 14 - endif 12 + misalignment.o dma-alloc.o $(cacheflush-y)
+21
arch/mn10300/mm/cache-disabled.c
··· 1 + /* Handle the cache being disabled 2 + * 3 + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. 4 + * Written by David Howells (dhowells@redhat.com) 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public Licence 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the Licence, or (at your option) any later version. 10 + */ 11 + #include <linux/mm.h> 12 + 13 + /* 14 + * allow userspace to flush the instruction cache 15 + */ 16 + asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) 17 + { 18 + if (end < start) 19 + return -EINVAL; 20 + return 0; 21 + }
+13 -1
arch/x86/boot/early_serial_console.c
··· 58 58 if (arg[pos] == ',') 59 59 pos++; 60 60 61 - if (!strncmp(arg, "ttyS", 4)) { 61 + /* 62 + * make sure we have 63 + * "serial,0x3f8,115200" 64 + * "serial,ttyS0,115200" 65 + * "ttyS0,115200" 66 + */ 67 + if (pos == 7 && !strncmp(arg + pos, "0x", 2)) { 68 + port = simple_strtoull(arg + pos, &e, 16); 69 + if (port == 0 || arg + pos == e) 70 + port = DEFAULT_SERIAL_PORT; 71 + else 72 + pos = e - arg; 73 + } else if (!strncmp(arg + pos, "ttyS", 4)) { 62 74 static const int bases[] = { 0x3f8, 0x2f8 }; 63 75 int idx = 0; 64 76
+6
arch/x86/include/asm/amd_iommu_proto.h
··· 38 38 39 39 #endif /* !CONFIG_AMD_IOMMU_STATS */ 40 40 41 + static inline bool is_rd890_iommu(struct pci_dev *pdev) 42 + { 43 + return (pdev->vendor == PCI_VENDOR_ID_ATI) && 44 + (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); 45 + } 46 + 41 47 #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
+12
arch/x86/include/asm/amd_iommu_types.h
··· 368 368 /* capabilities of that IOMMU read from ACPI */ 369 369 u32 cap; 370 370 371 + /* flags read from acpi table */ 372 + u8 acpi_flags; 373 + 371 374 /* 372 375 * Capability pointer. There could be more than one IOMMU per PCI 373 376 * device function if there are more than one AMD IOMMU capability ··· 414 411 415 412 /* default dma_ops domain for that IOMMU */ 416 413 struct dma_ops_domain *default_dom; 414 + 415 + /* 416 + * This array is required to work around a potential BIOS bug. 417 + * The BIOS may miss to restore parts of the PCI configuration 418 + * space when the system resumes from S3. The result is that the 419 + * IOMMU does not execute commands anymore which leads to system 420 + * failure. 421 + */ 422 + u32 cache_cfg[4]; 417 423 }; 418 424 419 425 /*
+1 -1
arch/x86/include/asm/bitops.h
··· 309 309 static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) 310 310 { 311 311 return ((1UL << (nr % BITS_PER_LONG)) & 312 - (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 312 + (addr[nr / BITS_PER_LONG])) != 0; 313 313 } 314 314 315 315 static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
+2
arch/x86/kernel/Makefile
··· 11 11 CFLAGS_REMOVE_tsc.o = -pg 12 12 CFLAGS_REMOVE_rtc.o = -pg 13 13 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 14 + CFLAGS_REMOVE_pvclock.o = -pg 15 + CFLAGS_REMOVE_kvmclock.o = -pg 14 16 CFLAGS_REMOVE_ftrace.o = -pg 15 17 CFLAGS_REMOVE_early_printk.o = -pg 16 18 endif
+3 -1
arch/x86/kernel/amd_iommu.c
··· 1953 1953 size_t size, 1954 1954 int dir) 1955 1955 { 1956 + dma_addr_t flush_addr; 1956 1957 dma_addr_t i, start; 1957 1958 unsigned int pages; 1958 1959 ··· 1961 1960 (dma_addr + size > dma_dom->aperture_size)) 1962 1961 return; 1963 1962 1963 + flush_addr = dma_addr; 1964 1964 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 1965 1965 dma_addr &= PAGE_MASK; 1966 1966 start = dma_addr; ··· 1976 1974 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1977 1975 1978 1976 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1979 - iommu_flush_pages(&dma_dom->domain, dma_addr, size); 1977 + iommu_flush_pages(&dma_dom->domain, flush_addr, size); 1980 1978 dma_dom->need_flush = false; 1981 1979 } 1982 1980 }
+45 -22
arch/x86/kernel/amd_iommu_init.c
··· 632 632 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 633 633 MMIO_GET_LD(range)); 634 634 iommu->evt_msi_num = MMIO_MSI_NUM(misc); 635 + 636 + if (is_rd890_iommu(iommu->dev)) { 637 + pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]); 638 + pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]); 639 + pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]); 640 + pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]); 641 + } 635 642 } 636 643 637 644 /* ··· 656 649 struct ivhd_entry *e; 657 650 658 651 /* 659 - * First set the recommended feature enable bits from ACPI 660 - * into the IOMMU control registers 652 + * First save the recommended feature enable bits from ACPI 661 653 */ 662 - h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? 663 - iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 664 - iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 665 - 666 - h->flags & IVHD_FLAG_PASSPW_EN_MASK ? 667 - iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 668 - iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 669 - 670 - h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 671 - iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 672 - iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 673 - 674 - h->flags & IVHD_FLAG_ISOC_EN_MASK ? 675 - iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 676 - iommu_feature_disable(iommu, CONTROL_ISOC_EN); 677 - 678 - /* 679 - * make IOMMU memory accesses cache coherent 680 - */ 681 - iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 654 + iommu->acpi_flags = h->flags; 682 655 683 656 /* 684 657 * Done. Now parse the device entries ··· 1103 1116 } 1104 1117 } 1105 1118 1119 + static void iommu_init_flags(struct amd_iommu *iommu) 1120 + { 1121 + iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 1122 + iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 1123 + iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 1124 + 1125 + iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 1126 + iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 1127 + iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 1128 + 1129 + iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 1130 + iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 1131 + iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 1132 + 1133 + iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 1134 + iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 1135 + iommu_feature_disable(iommu, CONTROL_ISOC_EN); 1136 + 1137 + /* 1138 + * make IOMMU memory accesses cache coherent 1139 + */ 1140 + iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 1141 + } 1142 + 1143 + static void iommu_apply_quirks(struct amd_iommu *iommu) 1144 + { 1145 + if (is_rd890_iommu(iommu->dev)) { 1146 + pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]); 1147 + pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]); 1148 + pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]); 1149 + pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]); 1150 + } 1151 + } 1152 + 1106 1153 /* 1107 1154 * This function finally enables all IOMMUs found in the system after 1108 1155 * they have been initialized ··· 1147 1126 1148 1127 for_each_iommu(iommu) { 1149 1128 iommu_disable(iommu); 1129 + iommu_apply_quirks(iommu); 1130 + iommu_init_flags(iommu); 1150 1131 iommu_set_device_table(iommu); 1151 1132 iommu_enable_command_buffer(iommu); 1152 1133 iommu_enable_event_buffer(iommu);
+11 -1
arch/x86/kernel/cpu/perf_event.c
··· 102 102 */ 103 103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ 104 104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 105 + unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 105 106 int enabled; 106 107 107 108 int n_events; ··· 1011 1010 x86_perf_event_set_period(event); 1012 1011 cpuc->events[idx] = event; 1013 1012 __set_bit(idx, cpuc->active_mask); 1013 + __set_bit(idx, cpuc->running); 1014 1014 x86_pmu.enable(event); 1015 1015 perf_event_update_userpage(event); 1016 1016 ··· 1143 1141 cpuc = &__get_cpu_var(cpu_hw_events); 1144 1142 1145 1143 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1146 - if (!test_bit(idx, cpuc->active_mask)) 1144 + if (!test_bit(idx, cpuc->active_mask)) { 1145 + /* 1146 + * Though we deactivated the counter some cpus 1147 + * might still deliver spurious interrupts still 1148 + * in flight. Catch them: 1149 + */ 1150 + if (__test_and_clear_bit(idx, cpuc->running)) 1151 + handled++; 1147 1152 continue; 1153 + } 1148 1154 1149 1155 event = cpuc->events[idx]; 1150 1156 hwc = &event->hw;
+4
drivers/ata/ahci.c
··· 90 90 static int ahci_pci_device_resume(struct pci_dev *pdev); 91 91 #endif 92 92 93 + static struct scsi_host_template ahci_sht = { 94 + AHCI_SHT("ahci"), 95 + }; 96 + 93 97 static struct ata_port_operations ahci_vt8251_ops = { 94 98 .inherits = &ahci_ops, 95 99 .hardreset = ahci_vt8251_hardreset,
+11 -1
drivers/ata/ahci.h
··· 298 298 299 299 extern int ahci_ignore_sss; 300 300 301 - extern struct scsi_host_template ahci_sht; 301 + extern struct device_attribute *ahci_shost_attrs[]; 302 + extern struct device_attribute *ahci_sdev_attrs[]; 303 + 304 + #define AHCI_SHT(drv_name) \ 305 + ATA_NCQ_SHT(drv_name), \ 306 + .can_queue = AHCI_MAX_CMDS - 1, \ 307 + .sg_tablesize = AHCI_MAX_SG, \ 308 + .dma_boundary = AHCI_DMA_BOUNDARY, \ 309 + .shost_attrs = ahci_shost_attrs, \ 310 + .sdev_attrs = ahci_sdev_attrs 311 + 302 312 extern struct ata_port_operations ahci_ops; 303 313 304 314 void ahci_save_initial_config(struct device *dev,
+5 -1
drivers/ata/ahci_platform.c
··· 23 23 #include <linux/ahci_platform.h> 24 24 #include "ahci.h" 25 25 26 + static struct scsi_host_template ahci_platform_sht = { 27 + AHCI_SHT("ahci_platform"), 28 + }; 29 + 26 30 static int __init ahci_probe(struct platform_device *pdev) 27 31 { 28 32 struct device *dev = &pdev->dev; ··· 149 145 ahci_print_info(host, "platform"); 150 146 151 147 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, 152 - &ahci_sht); 148 + &ahci_platform_sht); 153 149 if (rc) 154 150 goto err0; 155 151
+4 -12
drivers/ata/libahci.c
··· 121 121 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 122 122 ahci_read_em_buffer, ahci_store_em_buffer); 123 123 124 - static struct device_attribute *ahci_shost_attrs[] = { 124 + struct device_attribute *ahci_shost_attrs[] = { 125 125 &dev_attr_link_power_management_policy, 126 126 &dev_attr_em_message_type, 127 127 &dev_attr_em_message, ··· 132 132 &dev_attr_em_buffer, 133 133 NULL 134 134 }; 135 + EXPORT_SYMBOL_GPL(ahci_shost_attrs); 135 136 136 - static struct device_attribute *ahci_sdev_attrs[] = { 137 + struct device_attribute *ahci_sdev_attrs[] = { 137 138 &dev_attr_sw_activity, 138 139 &dev_attr_unload_heads, 139 140 NULL 140 141 }; 141 - 142 - struct scsi_host_template ahci_sht = { 143 - ATA_NCQ_SHT("ahci"), 144 - .can_queue = AHCI_MAX_CMDS - 1, 145 - .sg_tablesize = AHCI_MAX_SG, 146 - .dma_boundary = AHCI_DMA_BOUNDARY, 147 - .shost_attrs = ahci_shost_attrs, 148 - .sdev_attrs = ahci_sdev_attrs, 149 - }; 150 - EXPORT_SYMBOL_GPL(ahci_sht); 142 + EXPORT_SYMBOL_GPL(ahci_sdev_attrs); 151 143 152 144 struct ata_port_operations ahci_ops = { 153 145 .inherits = &sata_pmp_port_ops,
+1 -1
drivers/block/pktcdvd.c
··· 2369 2369 pkt_shrink_pktlist(pd); 2370 2370 } 2371 2371 2372 - static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) 2372 + static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) 2373 2373 { 2374 2374 if (dev_minor >= MAX_WRITERS) 2375 2375 return NULL;
+1 -1
drivers/dma/mv_xor.c
··· 162 162 163 163 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 164 164 { 165 - u32 val = (1 << (1 + (chan->idx * 16))); 165 + u32 val = ~(1 << (chan->idx * 16)); 166 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 167 167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 168 168 }
+3
drivers/edac/edac_mc.c
··· 339 339 { 340 340 int status; 341 341 342 + if (mci->op_state != OP_RUNNING_POLL) 343 + return; 344 + 342 345 status = cancel_delayed_work(&mci->work); 343 346 if (status == 0) { 344 347 debugf0("%s() not canceled, flush the queue\n",
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 2400 2400 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2401 2401 break; 2402 2402 case 3: 2403 - if (obj_priv->fence_reg > 8) 2403 + if (obj_priv->fence_reg >= 8) 2404 2404 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; 2405 2405 else 2406 2406 case 2:
+6 -9
drivers/gpu/drm/i915/intel_sdvo.c
··· 2170 2170 return true; 2171 2171 2172 2172 err: 2173 - intel_sdvo_destroy_enhance_property(connector); 2174 - kfree(intel_sdvo_connector); 2173 + intel_sdvo_destroy(connector); 2175 2174 return false; 2176 2175 } 2177 2176 ··· 2242 2243 return true; 2243 2244 2244 2245 err: 2245 - intel_sdvo_destroy_enhance_property(connector); 2246 - kfree(intel_sdvo_connector); 2246 + intel_sdvo_destroy(connector); 2247 2247 return false; 2248 2248 } 2249 2249 ··· 2520 2522 uint16_t response; 2521 2523 } enhancements; 2522 2524 2523 - if (!intel_sdvo_get_value(intel_sdvo, 2524 - SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2525 - &enhancements, sizeof(enhancements))) 2526 - return false; 2527 - 2525 + enhancements.response = 0; 2526 + intel_sdvo_get_value(intel_sdvo, 2527 + SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2528 + &enhancements, sizeof(enhancements)); 2528 2529 if (enhancements.response == 0) { 2529 2530 DRM_DEBUG_KMS("No enhancement is supported\n"); 2530 2531 return true;
+1
drivers/hwmon/coretemp.c
··· 36 36 #include <linux/pci.h> 37 37 #include <asm/msr.h> 38 38 #include <asm/processor.h> 39 + #include <asm/smp.h> 39 40 40 41 #define DRVNAME "coretemp" 41 42
+4 -2
drivers/infiniband/hw/cxgb3/iwch_cm.c
··· 463 463 V_MSS_IDX(mtu_idx) | 464 464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 465 465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 466 - opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 466 + opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) | 467 + V_CONG_CONTROL_FLAVOR(cong_flavor); 467 468 skb->priority = CPL_PRIORITY_SETUP; 468 469 set_arp_failure_handler(skb, act_open_req_arp_failure); 469 470 ··· 1281 1280 V_MSS_IDX(mtu_idx) | 1282 1281 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 1283 1282 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 1284 - opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 1283 + opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) | 1284 + V_CONG_CONTROL_FLAVOR(cong_flavor); 1285 1285 1286 1286 rpl = cplhdr(skb); 1287 1287 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+5 -4
drivers/leds/leds-ns2.c
··· 81 81 int cmd_level; 82 82 int slow_level; 83 83 84 - read_lock(&led_dat->rw_lock); 84 + read_lock_irq(&led_dat->rw_lock); 85 85 86 86 cmd_level = gpio_get_value(led_dat->cmd); 87 87 slow_level = gpio_get_value(led_dat->slow); ··· 95 95 } 96 96 } 97 97 98 - read_unlock(&led_dat->rw_lock); 98 + read_unlock_irq(&led_dat->rw_lock); 99 99 100 100 return ret; 101 101 } ··· 104 104 enum ns2_led_modes mode) 105 105 { 106 106 int i; 107 + unsigned long flags; 107 108 108 - write_lock(&led_dat->rw_lock); 109 + write_lock_irqsave(&led_dat->rw_lock, flags); 109 110 110 111 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { 111 112 if (mode == ns2_led_modval[i].mode) { ··· 117 116 } 118 117 } 119 118 120 - write_unlock(&led_dat->rw_lock); 119 + write_unlock_irqrestore(&led_dat->rw_lock, flags); 121 120 } 122 121 123 122 static void ns2_led_set(struct led_classdev *led_cdev,
+8 -4
drivers/mmc/host/sdhci-s3c.c
··· 241 241 static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 242 242 { 243 243 struct sdhci_host *host = platform_get_drvdata(dev); 244 + unsigned long flags; 245 + 244 246 if (host) { 245 - spin_lock(&host->lock); 247 + spin_lock_irqsave(&host->lock, flags); 246 248 if (state) { 247 249 dev_dbg(&dev->dev, "card inserted.\n"); 248 250 host->flags &= ~SDHCI_DEVICE_DEAD; ··· 255 253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 256 254 } 257 255 tasklet_schedule(&host->card_tasklet); 258 - spin_unlock(&host->lock); 256 + spin_unlock_irqrestore(&host->lock, flags); 259 257 } 260 258 } 261 259 ··· 483 481 sdhci_remove_host(host, 1); 484 482 485 483 for (ptr = 0; ptr < 3; ptr++) { 486 - clk_disable(sc->clk_bus[ptr]); 487 - clk_put(sc->clk_bus[ptr]); 484 + if (sc->clk_bus[ptr]) { 485 + clk_disable(sc->clk_bus[ptr]); 486 + clk_put(sc->clk_bus[ptr]); 487 + } 488 488 } 489 489 clk_disable(sc->clk_io); 490 490 clk_put(sc->clk_io);
+10
drivers/net/3c59x.c
··· 2942 2942 { 2943 2943 struct vortex_private *vp = netdev_priv(dev); 2944 2944 2945 + if (!VORTEX_PCI(vp)) 2946 + return; 2947 + 2945 2948 wol->supported = WAKE_MAGIC; 2946 2949 2947 2950 wol->wolopts = 0; ··· 2955 2952 static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2956 2953 { 2957 2954 struct vortex_private *vp = netdev_priv(dev); 2955 + 2956 + if (!VORTEX_PCI(vp)) 2957 + return -EOPNOTSUPP; 2958 + 2958 2959 if (wol->wolopts & ~WAKE_MAGIC) 2959 2960 return -EINVAL; 2960 2961 ··· 3207 3200 vp->enable_wol = 0; 3208 3201 return; 3209 3202 } 3203 + 3204 + if (VORTEX_PCI(vp)->current_state < PCI_D3hot) 3205 + return; 3210 3206 3211 3207 /* Change the power state to D3; RxEnable doesn't take effect. */ 3212 3208 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
+9 -2
drivers/net/atlx/atl1.c
··· 1251 1251 1252 1252 rrd_ring->desc = NULL; 1253 1253 rrd_ring->dma = 0; 1254 + 1255 + adapter->cmb.dma = 0; 1256 + adapter->cmb.cmb = NULL; 1257 + 1258 + adapter->smb.dma = 0; 1259 + adapter->smb.smb = NULL; 1254 1260 } 1255 1261 1256 1262 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) ··· 2853 2847 pci_enable_wake(pdev, PCI_D3cold, 0); 2854 2848 2855 2849 atl1_reset_hw(&adapter->hw); 2856 - adapter->cmb.cmb->int_stats = 0; 2857 2850 2858 - if (netif_running(netdev)) 2851 + if (netif_running(netdev)) { 2852 + adapter->cmb.cmb->int_stats = 0; 2859 2853 atl1_up(adapter); 2854 + } 2860 2855 netif_device_attach(netdev); 2861 2856 2862 2857 return 0;
+1
drivers/net/e1000e/hw.h
··· 57 57 E1000_SCTL = 0x00024, /* SerDes Control - RW */ 58 58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ 59 59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ 60 + E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */ 60 61 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ 61 62 E1000_FCT = 0x00030, /* Flow Control Type - RW */ 62 63 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
+164 -33
drivers/net/e1000e/ich8lan.c
··· 105 105 #define E1000_FEXTNVM_SW_CONFIG 1 106 106 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ 107 107 108 + #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 109 + #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 110 + #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 111 + 108 112 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 109 113 110 114 #define E1000_ICH_RAR_ENTRIES 7 ··· 129 125 130 126 /* SMBus Address Phy Register */ 131 127 #define HV_SMB_ADDR PHY_REG(768, 26) 128 + #define HV_SMB_ADDR_MASK 0x007F 132 129 #define HV_SMB_ADDR_PEC_EN 0x0200 133 130 #define HV_SMB_ADDR_VALID 0x0080 134 131 ··· 242 237 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 243 238 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 244 239 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 240 + static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 241 + static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 245 242 246 243 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 247 244 { ··· 279 272 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 280 273 { 281 274 struct e1000_phy_info *phy = &hw->phy; 282 - u32 ctrl; 275 + u32 ctrl, fwsm; 283 276 s32 ret_val = 0; 284 277 285 278 phy->addr = 1; ··· 301 294 * disabled, then toggle the LANPHYPC Value bit to force 302 295 * the interconnect to PCIe mode. 303 296 */ 304 - if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 297 + fwsm = er32(FWSM); 298 + if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) { 305 299 ctrl = er32(CTRL); 306 300 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 307 301 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; ··· 311 303 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 312 304 ew32(CTRL, ctrl); 313 305 msleep(50); 306 + 307 + /* 308 + * Gate automatic PHY configuration by hardware on 309 + * non-managed 82579 310 + */ 311 + if (hw->mac.type == e1000_pch2lan) 312 + e1000_gate_hw_phy_config_ich8lan(hw, true); 314 313 } 315 314 316 315 /* ··· 329 314 ret_val = e1000e_phy_hw_reset_generic(hw); 330 315 if (ret_val) 331 316 goto out; 317 + 318 + /* Ungate automatic PHY configuration on non-managed 82579 */ 319 + if ((hw->mac.type == e1000_pch2lan) && 320 + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 321 + msleep(10); 322 + e1000_gate_hw_phy_config_ich8lan(hw, false); 323 + } 332 324 333 325 phy->id = e1000_phy_unknown; 334 326 ret_val = e1000e_get_phy_id(hw); ··· 583 561 if (mac->type == e1000_ich8lan) 584 562 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 585 563 586 - /* Disable PHY configuration by hardware, config by software */ 587 - if (mac->type == e1000_pch2lan) { 588 - u32 extcnf_ctrl = er32(EXTCNF_CTRL); 589 - 590 - extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; 591 - ew32(EXTCNF_CTRL, extcnf_ctrl); 592 - } 564 + /* Gate automatic PHY configuration by hardware on managed 82579 */ 565 + if ((mac->type == e1000_pch2lan) && 566 + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 567 + e1000_gate_hw_phy_config_ich8lan(hw, true); 593 568 594 569 return 0; 595 570 } ··· 667 648 668 649 if (hw->phy.type == e1000_phy_82578) { 669 650 ret_val = e1000_link_stall_workaround_hv(hw); 651 + if (ret_val) 652 + goto out; 653 + } 654 + 655 + if (hw->mac.type == e1000_pch2lan) { 656 + ret_val = e1000_k1_workaround_lv(hw); 670 657 if (ret_val) 671 658 goto out; 672 659 } ··· 920 895 } 921 896 922 897 /** 898 + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states 899 + * @hw: pointer to the HW structure 900 + * 901 + * Assumes semaphore already acquired. 902 + * 903 + **/ 904 + static s32 e1000_write_smbus_addr(struct e1000_hw *hw) 905 + { 906 + u16 phy_data; 907 + u32 strap = er32(STRAP); 908 + s32 ret_val = 0; 909 + 910 + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; 911 + 912 + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); 913 + if (ret_val) 914 + goto out; 915 + 916 + phy_data &= ~HV_SMB_ADDR_MASK; 917 + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); 918 + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 919 + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); 920 + 921 + out: 922 + return ret_val; 923 + } 924 + 925 + /** 923 926 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 924 927 * @hw: pointer to the HW structure 925 928 * ··· 956 903 **/ 957 904 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 958 905 { 959 - struct e1000_adapter *adapter = hw->adapter; 960 906 struct e1000_phy_info *phy = &hw->phy; 961 907 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 962 908 s32 ret_val = 0; ··· 973 921 if (phy->type != e1000_phy_igp_3) 974 922 return ret_val; 975 923 976 - if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) { 924 + if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || 925 + (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { 977 926 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 978 927 break; 979 928 } ··· 1014 961 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1015 962 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1016 963 1017 - if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 1018 - ((hw->mac.type == e1000_pchlan) || 1019 - (hw->mac.type == e1000_pch2lan))) { 964 + if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 965 + (hw->mac.type == e1000_pchlan)) || 966 + (hw->mac.type == e1000_pch2lan)) { 1020 967 /* 1021 968 * HW configures the SMBus address and LEDs when the 1022 969 * OEM and LCD Write Enable bits are set in the NVM. 1023 970 * When both NVM bits are cleared, SW will configure 1024 971 * them instead. 1025 972 */ 1026 - data = er32(STRAP); 1027 - data &= E1000_STRAP_SMBUS_ADDRESS_MASK; 1028 - reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT; 1029 - reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 1030 - ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, 1031 - reg_data); 973 + ret_val = e1000_write_smbus_addr(hw); 1032 974 if (ret_val) 1033 975 goto out; 1034 976 ··· 1488 1440 goto out; 1489 1441 1490 1442 /* Enable jumbo frame workaround in the PHY */ 1491 - e1e_rphy(hw, PHY_REG(769, 20), &data); 1492 - ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14)); 1493 - if (ret_val) 1494 - goto out; 1495 1443 e1e_rphy(hw, PHY_REG(769, 23), &data); 1496 1444 data &= ~(0x7F << 5); 1497 1445 data |= (0x37 << 5); ··· 1496 1452 goto out; 1497 1453 e1e_rphy(hw, PHY_REG(769, 16), &data); 1498 1454 data &= ~(1 << 13); 1499 - data |= (1 << 12); 1500 1455 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1501 1456 if (ret_val) 1502 1457 goto out; ··· 1520 1477 1521 1478 mac_reg = er32(RCTL); 1522 1479 mac_reg &= ~E1000_RCTL_SECRC; 1523 - ew32(FFLT_DBG, mac_reg); 1480 + ew32(RCTL, mac_reg); 1524 1481 1525 1482 ret_val = e1000e_read_kmrn_reg(hw, 1526 1483 E1000_KMRNCTRLSTA_CTRL_OFFSET, ··· 1546 1503 goto out; 1547 1504 1548 1505 /* Write PHY register values back to h/w defaults */ 1549 - e1e_rphy(hw, PHY_REG(769, 20), &data); 1550 - ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14)); 1551 - if (ret_val) 1552 - goto out; 1553 1506 e1e_rphy(hw, PHY_REG(769, 23), &data); 1554 1507 data &= ~(0x7F << 5); 1555 1508 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 1556 1509 if (ret_val) 1557 1510 goto out; 1558 1511 e1e_rphy(hw, PHY_REG(769, 16), &data); 1559 - data &= ~(1 << 12); 1560 1512 data |= (1 << 13); 1561 1513 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1562 1514 if (ret_val) ··· 1594 1556 1595 1557 out: 1596 1558 return ret_val; 1559 + } 1560 + 1561 + /** 1562 + * e1000_k1_gig_workaround_lv - K1 Si workaround 1563 + * @hw: pointer to the HW structure 1564 + * 1565 + * Workaround to set the K1 beacon duration for 82579 parts 1566 + **/ 1567 + static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 1568 + { 1569 + s32 ret_val = 0; 1570 + u16 status_reg = 0; 1571 + u32 mac_reg; 1572 + 1573 + if (hw->mac.type != e1000_pch2lan) 1574 + goto out; 1575 + 1576 + /* Set K1 beacon duration based on 1Gbps speed or otherwise */ 1577 + ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); 1578 + if (ret_val) 1579 + goto out; 1580 + 1581 + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 1582 + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 1583 + mac_reg = er32(FEXTNVM4); 1584 + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 1585 + 1586 + if (status_reg & HV_M_STATUS_SPEED_1000) 1587 + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 1588 + else 1589 + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 1590 + 1591 + ew32(FEXTNVM4, mac_reg); 1592 + } 1593 + 1594 + out: 1595 + return ret_val; 1596 + } 1597 + 1598 + /** 1599 + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware 1600 + * @hw: pointer to the HW structure 1601 + * @gate: boolean set to true to gate, false to ungate 1602 + * 1603 + * Gate/ungate the automatic PHY configuration via hardware; perform 1604 + * the configuration via software instead. 1605 + **/ 1606 + static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) 1607 + { 1608 + u32 extcnf_ctrl; 1609 + 1610 + if (hw->mac.type != e1000_pch2lan) 1611 + return; 1612 + 1613 + extcnf_ctrl = er32(EXTCNF_CTRL); 1614 + 1615 + if (gate) 1616 + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; 1617 + else 1618 + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; 1619 + 1620 + ew32(EXTCNF_CTRL, extcnf_ctrl); 1621 + return; 1597 1622 } 1598 1623 1599 1624 /** ··· 1703 1602 if (e1000_check_reset_block(hw)) 1704 1603 goto out; 1705 1604 1605 + /* Allow time for h/w to get to quiescent state after reset */ 1606 + msleep(10); 1607 + 1706 1608 /* Perform any necessary post-reset workarounds */ 1707 1609 switch (hw->mac.type) { 1708 1610 case e1000_pchlan: ··· 1734 1630 /* Configure the LCD with the OEM bits in NVM */ 1735 1631 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 1736 1632 1633 + /* Ungate automatic PHY configuration on non-managed 82579 */ 1634 + if ((hw->mac.type == e1000_pch2lan) && 1635 + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 1636 + msleep(10); 1637 + e1000_gate_hw_phy_config_ich8lan(hw, false); 1638 + } 1639 + 1737 1640 out: 1738 1641 return ret_val; 1739 1642 } ··· 1756 1645 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 1757 1646 { 1758 1647 s32 ret_val = 0; 1648 + 1649 + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ 1650 + if ((hw->mac.type == e1000_pch2lan) && 1651 + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 1652 + e1000_gate_hw_phy_config_ich8lan(hw, true); 1759 1653 1760 1654 ret_val = e1000e_phy_hw_reset_generic(hw); 1761 1655 if (ret_val) ··· 3026 2910 * external PHY is reset. 3027 2911 */ 3028 2912 ctrl |= E1000_CTRL_PHY_RST; 2913 + 2914 + /* 2915 + * Gate automatic PHY configuration by hardware on 2916 + * non-managed 82579 2917 + */ 2918 + if ((hw->mac.type == e1000_pch2lan) && 2919 + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 2920 + e1000_gate_hw_phy_config_ich8lan(hw, true); 3029 2921 } 3030 2922 ret_val = e1000_acquire_swflag_ich8lan(hw); 3031 2923 e_dbg("Issuing a global reset to ich8lan\n"); ··· 3584 3460 void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 3585 3461 { 3586 3462 u32 phy_ctrl; 3463 + s32 ret_val; 3587 3464 3588 3465 phy_ctrl = er32(PHY_CTRL); 3589 3466 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; 3590 3467 ew32(PHY_CTRL, phy_ctrl); 3591 3468 3592 - if (hw->mac.type >= e1000_pchlan) 3593 - e1000_phy_hw_reset_ich8lan(hw); 3469 + if (hw->mac.type >= e1000_pchlan) { 3470 + e1000_oem_bits_config_ich8lan(hw, true); 3471 + ret_val = hw->phy.ops.acquire(hw); 3472 + if (ret_val) 3473 + return; 3474 + e1000_write_smbus_addr(hw); 3475 + hw->phy.ops.release(hw); 3476 + } 3594 3477 } 3595 3478 3596 3479 /**
+19 -10
drivers/net/e1000e/netdev.c
··· 2704 2704 u32 psrctl = 0; 2705 2705 u32 pages = 0; 2706 2706 2707 + /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2708 + if (hw->mac.type == e1000_pch2lan) { 2709 + s32 ret_val; 2710 + 2711 + if (adapter->netdev->mtu > ETH_DATA_LEN) 2712 + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2713 + else 2714 + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2715 + } 2716 + 2707 2717 /* Program MC offset vector base */ 2708 2718 rctl = er32(RCTL); 2709 2719 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); ··· 2752 2742 e1e_wphy(hw, 0x10, 0x2823); 2753 2743 e1e_wphy(hw, 0x11, 0x0003); 2754 2744 e1e_wphy(hw, 22, phy_data); 2755 - } 2756 - 2757 - /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2758 - if (hw->mac.type == e1000_pch2lan) { 2759 - s32 ret_val; 2760 - 2761 - if (rctl & E1000_RCTL_LPE) 2762 - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2763 - else 2764 - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2765 2745 } 2766 2746 2767 2747 /* Setup buffer sizes */ ··· 4830 4830 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || 4831 4831 (max_frame > adapter->max_hw_frame_size)) { 4832 4832 e_err("Unsupported MTU setting\n"); 4833 + return -EINVAL; 4834 + } 4835 + 4836 + /* Jumbo frame workaround on 82579 requires CRC be stripped */ 4837 + if ((adapter->hw.mac.type == e1000_pch2lan) && 4838 + !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 4839 + (new_mtu > ETH_DATA_LEN)) { 4840 + e_err("Jumbo Frames not supported on 82579 when CRC " 4841 + "stripping is disabled.\n"); 4833 4842 return -EINVAL; 4834 4843 } 4835 4844
+2 -2
drivers/net/ibm_newemac/core.c
··· 2928 2928 if (dev->emac_irq != NO_IRQ) 2929 2929 irq_dispose_mapping(dev->emac_irq); 2930 2930 err_free: 2931 - kfree(ndev); 2931 + free_netdev(ndev); 2932 2932 err_gone: 2933 2933 /* if we were on the bootlist, remove us as we won't show up and 2934 2934 * wake up all waiters to notify them in case they were waiting ··· 2971 2971 if (dev->emac_irq != NO_IRQ) 2972 2972 irq_dispose_mapping(dev->emac_irq); 2973 2973 2974 - kfree(dev->ndev); 2974 + free_netdev(dev->ndev); 2975 2975 2976 2976 return 0; 2977 2977 }
-3
drivers/net/netxen/netxen_nic_init.c
··· 1540 1540 if (pkt_offset) 1541 1541 skb_pull(skb, pkt_offset); 1542 1542 1543 - skb->truesize = skb->len + sizeof(struct sk_buff); 1544 1543 skb->protocol = eth_type_trans(skb, netdev); 1545 1544 1546 1545 napi_gro_receive(&sds_ring->napi, skb); ··· 1600 1601 data_offset = l4_hdr_offset + TCP_HDR_SIZE; 1601 1602 1602 1603 skb_put(skb, lro_length + data_offset); 1603 - 1604 - skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); 1605 1604 1606 1605 skb_pull(skb, l2_hdr_offset); 1607 1606 skb->protocol = eth_type_trans(skb, netdev);
+1 -6
drivers/net/qlcnic/qlcnic_init.c
··· 1316 1316 return -ENOMEM; 1317 1317 } 1318 1318 1319 - skb_reserve(skb, 2); 1319 + skb_reserve(skb, NET_IP_ALIGN); 1320 1320 1321 1321 dma = pci_map_single(pdev, skb->data, 1322 1322 rds_ring->dma_size, PCI_DMA_FROMDEVICE); ··· 1404 1404 if (pkt_offset) 1405 1405 skb_pull(skb, pkt_offset); 1406 1406 1407 - skb->truesize = skb->len + sizeof(struct sk_buff); 1408 1407 skb->protocol = eth_type_trans(skb, netdev); 1409 1408 1410 1409 napi_gro_receive(&sds_ring->napi, skb); ··· 1464 1465 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; 1465 1466 1466 1467 skb_put(skb, lro_length + data_offset); 1467 - 1468 - skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); 1469 1468 1470 1469 skb_pull(skb, l2_hdr_offset); 1471 1470 skb->protocol = eth_type_trans(skb, netdev); ··· 1696 1699 1697 1700 if (pkt_offset) 1698 1701 skb_pull(skb, pkt_offset); 1699 - 1700 - skb->truesize = skb->len + sizeof(struct sk_buff); 1701 1702 1702 1703 if (!qlcnic_check_loopback_buff(skb->data)) 1703 1704 adapter->diag_cnt++;
+1 -1
drivers/net/rionet.c
··· 384 384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? 385 385 __ilog2(sizeof(void *)) + 4 : 0); 386 386 unregister_netdev(ndev); 387 - kfree(ndev); 387 + free_netdev(ndev); 388 388 389 389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { 390 390 list_del(&peer->node);
+1 -1
drivers/net/sgiseeq.c
··· 804 804 err_out_free_page: 805 805 free_page((unsigned long) sp->srings); 806 806 err_out_free_dev: 807 - kfree(dev); 807 + free_netdev(dev); 808 808 809 809 err_out: 810 810 return err;
+1
drivers/net/smsc911x.c
··· 58 58 59 59 MODULE_LICENSE("GPL"); 60 60 MODULE_VERSION(SMSC_DRV_VERSION); 61 + MODULE_ALIAS("platform:smsc911x"); 61 62 62 63 #if USE_DEBUG > 0 63 64 static int debug = 16;
+38 -5
drivers/net/tulip/de2104x.c
··· 243 243 NWayState = (1 << 14) | (1 << 13) | (1 << 12), 244 244 NWayRestart = (1 << 12), 245 245 NonselPortActive = (1 << 9), 246 + SelPortActive = (1 << 8), 246 247 LinkFailStatus = (1 << 2), 247 248 NetCxnErr = (1 << 1), 248 249 }; ··· 364 363 365 364 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ 366 365 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; 367 - static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, }; 366 + static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; 367 + /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */ 368 + static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; 368 369 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; 369 370 370 371 ··· 1067 1064 unsigned int carrier; 1068 1065 unsigned long flags; 1069 1066 1067 + /* clear port active bits */ 1068 + dw32(SIAStatus, NonselPortActive | SelPortActive); 1069 + 1070 1070 carrier = (status & NetCxnErr) ? 0 : 1; 1071 1071 1072 1072 if (carrier) { ··· 1164 1158 static void de_media_interrupt (struct de_private *de, u32 status) 1165 1159 { 1166 1160 if (status & LinkPass) { 1161 + /* Ignore if current media is AUI or BNC and we can't use TP */ 1162 + if ((de->media_type == DE_MEDIA_AUI || 1163 + de->media_type == DE_MEDIA_BNC) && 1164 + (de->media_lock || 1165 + !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))) 1166 + return; 1167 + /* If current media is not TP, change it to TP */ 1168 + if ((de->media_type == DE_MEDIA_AUI || 1169 + de->media_type == DE_MEDIA_BNC)) { 1170 + de->media_type = DE_MEDIA_TP_AUTO; 1171 + de_stop_rxtx(de); 1172 + de_set_media(de); 1173 + de_start_rxtx(de); 1174 + } 1167 1175 de_link_up(de); 1168 1176 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); 1169 1177 return; 1170 1178 } 1171 1179 1172 1180 BUG_ON(!(status & LinkFail)); 1173 - 1174 - if (netif_carrier_ok(de->dev)) { 1181 + /* Mark the link as down only if current media is TP */ 1182 + if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI && 1183 + de->media_type != DE_MEDIA_BNC) { 1175 1184 de_link_down(de); 1176 1185 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); 1177 1186 } ··· 1250 1229 if (de->de21040) 1251 1230 return; 1252 1231 1232 + dw32(CSR13, 0); /* Reset phy */ 1253 1233 pci_read_config_dword(de->pdev, PCIPM, &pmctl); 1254 1234 pmctl |= PM_Sleep; 1255 1235 pci_write_config_dword(de->pdev, PCIPM, pmctl); ··· 1596 1574 return 0; /* nothing to change */ 1597 1575 1598 1576 de_link_down(de); 1577 + mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); 1599 1578 de_stop_rxtx(de); 1600 1579 1601 1580 de->media_type = new_media; 1602 1581 de->media_lock = media_lock; 1603 1582 de->media_advertise = ecmd->advertising; 1604 1583 de_set_media(de); 1584 + if (netif_running(de->dev)) 1585 + de_start_rxtx(de); 1605 1586 1606 1587 return 0; 1607 1588 } ··· 1936 1911 for (i = 0; i < DE_MAX_MEDIA; i++) { 1937 1912 if (de->media[i].csr13 == 0xffff) 1938 1913 de->media[i].csr13 = t21041_csr13[i]; 1939 - if (de->media[i].csr14 == 0xffff) 1940 - de->media[i].csr14 = t21041_csr14[i]; 1914 + if (de->media[i].csr14 == 0xffff) { 1915 + /* autonegotiation is broken at least on some chip 1916 + revisions - rev. 0x21 works, 0x11 does not */ 1917 + if (de->pdev->revision < 0x20) 1918 + de->media[i].csr14 = t21041_csr14_brk[i]; 1919 + else 1920 + de->media[i].csr14 = t21041_csr14[i]; 1921 + } 1941 1922 if (de->media[i].csr15 == 0xffff) 1942 1923 de->media[i].csr15 = t21041_csr15[i]; 1943 1924 } ··· 2189 2158 dev_err(&dev->dev, "pci_enable_device failed in resume\n"); 2190 2159 goto out; 2191 2160 } 2161 + pci_set_master(pdev); 2162 + de_init_rings(de); 2192 2163 de_init_hw(de); 2193 2164 out_attach: 2194 2165 netif_device_attach(dev);
+5
drivers/net/wireless/iwlwifi/iwl-core.c
··· 2613 2613 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2614 2614 return -EINVAL; 2615 2615 2616 + if (test_bit(STATUS_SCANNING, &priv->status)) { 2617 + IWL_DEBUG_INFO(priv, "scan in progress.\n"); 2618 + return -EINVAL; 2619 + } 2620 + 2616 2621 if (mode >= IWL_MAX_FORCE_RESET) { 2617 2622 IWL_DEBUG_INFO(priv, "invalid reset request.\n"); 2618 2623 return -EINVAL;
+27
drivers/pci/intel-iommu.c
··· 3757 3757 3758 3758 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 3759 3759 3760 + #define GGC 0x52 3761 + #define GGC_MEMORY_SIZE_MASK (0xf << 8) 3762 + #define GGC_MEMORY_SIZE_NONE (0x0 << 8) 3763 + #define GGC_MEMORY_SIZE_1M (0x1 << 8) 3764 + #define GGC_MEMORY_SIZE_2M (0x3 << 8) 3765 + #define GGC_MEMORY_VT_ENABLED (0x8 << 8) 3766 + #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8) 3767 + #define GGC_MEMORY_SIZE_3M_VT (0xa << 8) 3768 + #define GGC_MEMORY_SIZE_4M_VT (0xb << 8) 3769 + 3770 + static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) 3771 + { 3772 + unsigned short ggc; 3773 + 3774 + if (pci_read_config_word(dev, GGC, &ggc)) 3775 + return; 3776 + 3777 + if (!(ggc & GGC_MEMORY_VT_ENABLED)) { 3778 + printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); 3779 + dmar_map_gfx = 0; 3780 + } 3781 + } 3782 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); 3783 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); 3784 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); 3785 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); 3786 + 3760 3787 /* On Tylersburg chipsets, some BIOSes have been known to enable the 3761 3788 ISOCH DMAR unit for the Azalia sound device, but not give it any 3762 3789 TLB entries, which causes it to deadlock. Check for that. We do
+1 -1
drivers/pci/iov.c
··· 608 608 * the VF BAR size multiplied by the number of VFs. The alignment 609 609 * is just the VF BAR size. 610 610 */ 611 - int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 611 + resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 612 612 { 613 613 struct resource tmp; 614 614 enum pci_bar_type type;
+3 -2
drivers/pci/pci.h
··· 264 264 extern void pci_iov_release(struct pci_dev *dev); 265 265 extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 266 266 enum pci_bar_type *type); 267 - extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 267 + extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, 268 + int resno); 268 269 extern void pci_restore_iov_state(struct pci_dev *dev); 269 270 extern int pci_iov_bus_range(struct pci_bus *bus); 270 271 ··· 321 320 } 322 321 #endif /* CONFIG_PCI_IOV */ 323 322 324 - static inline int pci_resource_alignment(struct pci_dev *dev, 323 + static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, 325 324 struct resource *res) 326 325 { 327 326 #ifdef CONFIG_PCI_IOV
+6
drivers/pcmcia/pcmcia_resource.c
··· 595 595 if (c->io[1].end) { 596 596 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); 597 597 if (ret) { 598 + struct resource tmp = c->io[0]; 599 + /* release the previously allocated resource */ 598 600 release_io_space(s, &c->io[0]); 601 + /* but preserve the settings, for they worked... */ 602 + c->io[0].end = resource_size(&tmp); 603 + c->io[0].start = tmp.start; 604 + c->io[0].flags = tmp.flags; 599 605 goto out; 600 606 } 601 607 } else
+1 -1
drivers/pcmcia/pd6729.c
··· 646 646 if (!pci_resource_start(dev, 0)) { 647 647 dev_warn(&dev->dev, "refusing to load the driver as the " 648 648 "io_base is NULL.\n"); 649 - goto err_out_free_mem; 649 + goto err_out_disable; 650 650 } 651 651 652 652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
+2 -2
drivers/s390/net/ctcm_main.c
··· 1154 1154 dev_fsm, dev_fsm_len, GFP_KERNEL); 1155 1155 if (priv->fsm == NULL) { 1156 1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); 1157 - kfree(dev); 1157 + free_netdev(dev); 1158 1158 return NULL; 1159 1159 } 1160 1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED); ··· 1165 1165 grp = ctcmpc_init_mpc_group(priv); 1166 1166 if (grp == NULL) { 1167 1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); 1168 - kfree(dev); 1168 + free_netdev(dev); 1169 1169 return NULL; 1170 1170 } 1171 1171 tasklet_init(&grp->mpc_tasklet2,
+4
drivers/serial/ioc3_serial.c
··· 2017 2017 struct ioc3_port *port; 2018 2018 struct ioc3_port *ports[PORTS_PER_CARD]; 2019 2019 int phys_port; 2020 + int cnt; 2020 2021 2021 2022 DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd)); 2022 2023 ··· 2147 2146 2148 2147 /* error exits that give back resources */ 2149 2148 out4: 2149 + for (cnt = 0; cnt < phys_port; cnt++) 2150 + kfree(ports[cnt]); 2151 + 2150 2152 kfree(card_ptr); 2151 2153 return ret; 2152 2154 }
+1 -1
drivers/vhost/net.c
··· 243 243 int r, nlogs = 0; 244 244 245 245 while (datalen > 0) { 246 - if (unlikely(headcount >= VHOST_NET_MAX_SG)) { 246 + if (unlikely(seg >= VHOST_NET_MAX_SG)) { 247 247 r = -ENOBUFS; 248 248 goto err; 249 249 }
+4 -3
drivers/vhost/vhost.c
··· 858 858 if (r < 0) 859 859 return r; 860 860 len -= l; 861 - if (!len) 861 + if (!len) { 862 + if (vq->log_ctx) 863 + eventfd_signal(vq->log_ctx, 1); 862 864 return 0; 865 + } 863 866 } 864 - if (vq->log_ctx) 865 - eventfd_signal(vq->log_ctx, 1); 866 867 /* Length written exceeds what we have stored. This is a bug. */ 867 868 BUG(); 868 869 return 0;
+2 -2
drivers/video/pxa168fb.c
··· 298 298 * Set bit to enable graphics DMA. 299 299 */ 300 300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); 301 - x |= fbi->active ? 0x00000100 : 0; 302 - fbi->active = 0; 301 + x &= ~CFG_GRA_ENA_MASK; 302 + x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0); 303 303 304 304 /* 305 305 * If we are in a pseudo-color mode, we need to enable
+2 -2
include/linux/netlink.h
··· 27 27 28 28 #define MAX_LINKS 32 29 29 30 - struct net; 31 - 32 30 struct sockaddr_nl { 33 31 sa_family_t nl_family; /* AF_NETLINK */ 34 32 unsigned short nl_pad; /* zero */ ··· 148 150 149 151 #include <linux/capability.h> 150 152 #include <linux/skbuff.h> 153 + 154 + struct net; 151 155 152 156 static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) 153 157 {
+3
include/linux/pci_ids.h
··· 393 393 #define PCI_DEVICE_ID_VLSI_82C147 0x0105 394 394 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 395 395 396 + /* AMD RD890 Chipset */ 397 + #define PCI_DEVICE_ID_RD890_IOMMU 0x5a23 398 + 396 399 #define PCI_VENDOR_ID_ADL 0x1005 397 400 #define PCI_DEVICE_ID_ADL_2301 0x2301 398 401
+1 -1
include/linux/socket.h
··· 322 322 int offset, 323 323 unsigned int len, __wsum *csump); 324 324 325 - extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); 325 + extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); 326 326 extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); 327 327 extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, 328 328 int offset, int len);
+1
include/net/addrconf.h
··· 121 121 * IPv6 Address Label subsystem (addrlabel.c) 122 122 */ 123 123 extern int ipv6_addr_label_init(void); 124 + extern void ipv6_addr_label_cleanup(void); 124 125 extern void ipv6_addr_label_rtnl_register(void); 125 126 extern u32 ipv6_addr_label(struct net *net, 126 127 const struct in6_addr *addr,
+1
include/net/dst.h
··· 242 242 dev->stats.rx_packets++; 243 243 dev->stats.rx_bytes += skb->len; 244 244 skb->rxhash = 0; 245 + skb_set_queue_mapping(skb, 0); 245 246 skb_dst_drop(skb); 246 247 nf_reset(skb); 247 248 }
+2
include/net/route.h
··· 199 199 fl.fl_ip_sport = sport; 200 200 fl.fl_ip_dport = dport; 201 201 fl.proto = protocol; 202 + if (inet_sk(sk)->transparent) 203 + fl.flags |= FLOWI_FLAG_ANYSRC; 202 204 ip_rt_put(*rp); 203 205 *rp = NULL; 204 206 security_sk_classify_flow(sk, &fl);
+2 -2
include/net/xfrm.h
··· 298 298 const struct xfrm_type *type_map[IPPROTO_MAX]; 299 299 struct xfrm_mode *mode_map[XFRM_MODE_MAX]; 300 300 int (*init_flags)(struct xfrm_state *x); 301 - void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl, 302 - struct xfrm_tmpl *tmpl, 301 + void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl); 302 + void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl, 303 303 xfrm_address_t *daddr, xfrm_address_t *saddr); 304 304 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 305 305 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
+18 -11
net/9p/trans_rdma.c
··· 426 426 427 427 /* Allocate an fcall for the reply */ 428 428 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL); 429 - if (!rpl_context) 429 + if (!rpl_context) { 430 + err = -ENOMEM; 430 431 goto err_close; 432 + } 431 433 432 434 /* 433 435 * If the request has a buffer, steal it, otherwise ··· 447 445 } 448 446 rpl_context->rc = req->rc; 449 447 if (!rpl_context->rc) { 450 - kfree(rpl_context); 451 - goto err_close; 448 + err = -ENOMEM; 449 + goto err_free2; 452 450 } 453 451 454 452 /* ··· 460 458 */ 461 459 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { 462 460 err = post_recv(client, rpl_context); 463 - if (err) { 464 - kfree(rpl_context->rc); 465 - kfree(rpl_context); 466 - goto err_close; 467 - } 461 + if (err) 462 + goto err_free1; 468 463 } else 469 464 atomic_dec(&rdma->rq_count); 470 465 ··· 470 471 471 472 /* Post the request */ 472 473 c = kmalloc(sizeof *c, GFP_KERNEL); 473 - if (!c) 474 - goto err_close; 474 + if (!c) { 475 + err = -ENOMEM; 476 + goto err_free1; 477 + } 475 478 c->req = req; 476 479 477 480 c->busa = ib_dma_map_single(rdma->cm_id->device, ··· 500 499 return ib_post_send(rdma->qp, &wr, &bad_wr); 501 500 502 501 error: 502 + kfree(c); 503 + kfree(rpl_context->rc); 504 + kfree(rpl_context); 503 505 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n"); 504 506 return -EIO; 505 - 507 + err_free1: 508 + kfree(rpl_context->rc); 509 + err_free2: 510 + kfree(rpl_context); 506 511 err_close: 507 512 spin_lock_irqsave(&rdma->req_lock, flags); 508 513 if (rdma->state < P9_RDMA_CLOSING) {
+2 -1
net/9p/trans_virtio.c
··· 329 329 330 330 mutex_lock(&virtio_9p_lock); 331 331 list_for_each_entry(chan, &virtio_chan_list, chan_list) { 332 - if (!strncmp(devname, chan->tag, chan->tag_len)) { 332 + if (!strncmp(devname, chan->tag, chan->tag_len) && 333 + strlen(devname) == chan->tag_len) { 333 334 if (!chan->inuse) { 334 335 chan->inuse = true; 335 336 found = 1;
+2 -10
net/atm/br2684.c
··· 399 399 unregister_netdev(net_dev); 400 400 free_netdev(net_dev); 401 401 } 402 - read_lock_irq(&devs_lock); 403 - if (list_empty(&br2684_devs)) { 404 - /* last br2684 device */ 405 - unregister_atmdevice_notifier(&atm_dev_notifier); 406 - } 407 - read_unlock_irq(&devs_lock); 408 402 return; 409 403 } 410 404 ··· 669 675 670 676 if (list_empty(&br2684_devs)) { 671 677 /* 1st br2684 device */ 672 - register_atmdevice_notifier(&atm_dev_notifier); 673 678 brdev->number = 1; 674 679 } else 675 680 brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; ··· 808 815 return -ENOMEM; 809 816 #endif 810 817 register_atm_ioctl(&br2684_ioctl_ops); 818 + register_atmdevice_notifier(&atm_dev_notifier); 811 819 return 0; 812 820 } 813 821 ··· 824 830 #endif 825 831 826 832 827 - /* if not already empty */ 828 - if (!list_empty(&br2684_devs)) 829 - unregister_atmdevice_notifier(&atm_dev_notifier); 833 + unregister_atmdevice_notifier(&atm_dev_notifier); 830 834 831 835 while (!list_empty(&br2684_devs)) { 832 836 net_dev = list_entry_brdev(br2684_devs.next);
+3 -2
net/core/iovec.c
··· 35 35 * in any case. 36 36 */ 37 37 38 - int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 38 + long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 39 39 { 40 - int size, err, ct; 40 + int size, ct; 41 + long err; 41 42 42 43 if (m->msg_namelen) { 43 44 if (mode == VERIFY_READ) {
+4 -4
net/core/sock.c
··· 1351 1351 { 1352 1352 int uid; 1353 1353 1354 - read_lock(&sk->sk_callback_lock); 1354 + read_lock_bh(&sk->sk_callback_lock); 1355 1355 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1356 - read_unlock(&sk->sk_callback_lock); 1356 + read_unlock_bh(&sk->sk_callback_lock); 1357 1357 return uid; 1358 1358 } 1359 1359 EXPORT_SYMBOL(sock_i_uid); ··· 1362 1362 { 1363 1363 unsigned long ino; 1364 1364 1365 - read_lock(&sk->sk_callback_lock); 1365 + read_lock_bh(&sk->sk_callback_lock); 1366 1366 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1367 - read_unlock(&sk->sk_callback_lock); 1367 + read_unlock_bh(&sk->sk_callback_lock); 1368 1368 return ino; 1369 1369 } 1370 1370 EXPORT_SYMBOL(sock_i_ino);
+4 -4
net/ipv4/ip_gre.c
··· 45 45 #include <net/netns/generic.h> 46 46 #include <net/rtnetlink.h> 47 47 48 - #ifdef CONFIG_IPV6 48 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 49 49 #include <net/ipv6.h> 50 50 #include <net/ip6_fib.h> 51 51 #include <net/ip6_route.h> ··· 699 699 if ((dst = rt->rt_gateway) == 0) 700 700 goto tx_error_icmp; 701 701 } 702 - #ifdef CONFIG_IPV6 702 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 703 703 else if (skb->protocol == htons(ETH_P_IPV6)) { 704 704 struct in6_addr *addr6; 705 705 int addr_type; ··· 774 774 goto tx_error; 775 775 } 776 776 } 777 - #ifdef CONFIG_IPV6 777 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 778 778 else if (skb->protocol == htons(ETH_P_IPV6)) { 779 779 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); 780 780 ··· 850 850 if ((iph->ttl = tiph->ttl) == 0) { 851 851 if (skb->protocol == htons(ETH_P_IP)) 852 852 iph->ttl = old_iph->ttl; 853 - #ifdef CONFIG_IPV6 853 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 854 854 else if (skb->protocol == htons(ETH_P_IPV6)) 855 855 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 856 856 #endif
+13 -6
net/ipv4/ip_output.c
··· 488 488 * we can switch to copy when see the first bad fragment. 489 489 */ 490 490 if (skb_has_frags(skb)) { 491 - struct sk_buff *frag; 491 + struct sk_buff *frag, *frag2; 492 492 int first_len = skb_pagelen(skb); 493 - int truesizes = 0; 494 493 495 494 if (first_len - hlen > mtu || 496 495 ((first_len - hlen) & 7) || ··· 502 503 if (frag->len > mtu || 503 504 ((frag->len & 7) && frag->next) || 504 505 skb_headroom(frag) < hlen) 505 - goto slow_path; 506 + goto slow_path_clean; 506 507 507 508 /* Partially cloned skb? */ 508 509 if (skb_shared(frag)) 509 - goto slow_path; 510 + goto slow_path_clean; 510 511 511 512 BUG_ON(frag->sk); 512 513 if (skb->sk) { 513 514 frag->sk = skb->sk; 514 515 frag->destructor = sock_wfree; 515 516 } 516 - truesizes += frag->truesize; 517 + skb->truesize -= frag->truesize; 517 518 } 518 519 519 520 /* Everything is OK. Generate! */ ··· 523 524 frag = skb_shinfo(skb)->frag_list; 524 525 skb_frag_list_init(skb); 525 526 skb->data_len = first_len - skb_headlen(skb); 526 - skb->truesize -= truesizes; 527 527 skb->len = first_len; 528 528 iph->tot_len = htons(first_len); 529 529 iph->frag_off = htons(IP_MF); ··· 574 576 } 575 577 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 576 578 return err; 579 + 580 + slow_path_clean: 581 + skb_walk_frags(skb, frag2) { 582 + if (frag2 == frag) 583 + break; 584 + frag2->sk = NULL; 585 + frag2->destructor = NULL; 586 + skb->truesize += frag2->truesize; 587 + } 577 588 } 578 589 579 590 slow_path:
+1
net/ipv4/netfilter/ipt_REJECT.c
··· 112 112 /* ip_route_me_harder expects skb->dst to be set */ 113 113 skb_dst_set_noref(nskb, skb_dst(oldskb)); 114 114 115 + nskb->protocol = htons(ETH_P_IP); 115 116 if (ip_route_me_harder(nskb, addr_type)) 116 117 goto free_nskb; 117 118
+3 -1
net/ipv4/netfilter/nf_defrag_ipv4.c
··· 66 66 const struct net_device *out, 67 67 int (*okfn)(struct sk_buff *)) 68 68 { 69 + struct sock *sk = skb->sk; 69 70 struct inet_sock *inet = inet_sk(skb->sk); 70 71 71 - if (inet && inet->nodefrag) 72 + if (sk && (sk->sk_family == PF_INET) && 73 + inet->nodefrag) 72 74 return NF_ACCEPT; 73 75 74 76 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+4 -2
net/ipv4/netfilter/nf_nat_snmp_basic.c
··· 893 893 unsigned char s[4]; 894 894 895 895 if (offset & 1) { 896 - s[0] = s[2] = 0; 896 + s[0] = ~0; 897 897 s[1] = ~*optr; 898 + s[2] = 0; 898 899 s[3] = *nptr; 899 900 } else { 900 - s[1] = s[3] = 0; 901 901 s[0] = ~*optr; 902 + s[1] = ~0; 902 903 s[2] = *nptr; 904 + s[3] = 0; 903 905 } 904 906 905 907 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
+1 -1
net/ipv4/route.c
··· 1231 1231 } 1232 1232 1233 1233 if (net_ratelimit()) 1234 - printk(KERN_WARNING "Neighbour table overflow.\n"); 1234 + printk(KERN_WARNING "ipv4: Neighbour table overflow.\n"); 1235 1235 rt_drop(rt); 1236 1236 return -ENOBUFS; 1237 1237 }
+6 -3
net/ipv4/tcp.c
··· 386 386 */ 387 387 388 388 mask = 0; 389 - if (sk->sk_err) 390 - mask = POLLERR; 391 389 392 390 /* 393 391 * POLLHUP is certainly not done right. But poll() doesn't ··· 455 457 if (tp->urg_data & TCP_URG_VALID) 456 458 mask |= POLLPRI; 457 459 } 460 + /* This barrier is coupled with smp_wmb() in tcp_reset() */ 461 + smp_rmb(); 462 + if (sk->sk_err) 463 + mask |= POLLERR; 464 + 458 465 return mask; 459 466 } 460 467 EXPORT_SYMBOL(tcp_poll); ··· 943 940 sg = sk->sk_route_caps & NETIF_F_SG; 944 941 945 942 while (--iovlen >= 0) { 946 - int seglen = iov->iov_len; 943 + size_t seglen = iov->iov_len; 947 944 unsigned char __user *from = iov->iov_base; 948 945 949 946 iov++;
+4 -1
net/ipv4/tcp_input.c
··· 2545 2545 cnt += tcp_skb_pcount(skb); 2546 2546 2547 2547 if (cnt > packets) { 2548 - if (tcp_is_sack(tp) || (oldcnt >= packets)) 2548 + if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || 2549 + (oldcnt >= packets)) 2549 2550 break; 2550 2551 2551 2552 mss = skb_shinfo(skb)->gso_size; ··· 4049 4048 default: 4050 4049 sk->sk_err = ECONNRESET; 4051 4050 } 4051 + /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4052 + smp_wmb(); 4052 4053 4053 4054 if (!sock_flag(sk, SOCK_DEAD)) 4054 4055 sk->sk_error_report(sk);
+1 -1
net/ipv4/xfrm4_policy.c
··· 61 61 62 62 static int xfrm4_get_tos(struct flowi *fl) 63 63 { 64 - return fl->fl4_tos; 64 + return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */ 65 65 } 66 66 67 67 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
+19 -14
net/ipv4/xfrm4_state.c
··· 21 21 } 22 22 23 23 static void 24 - __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl, 25 - struct xfrm_tmpl *tmpl, 26 - xfrm_address_t *daddr, xfrm_address_t *saddr) 24 + __xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl) 27 25 { 28 - x->sel.daddr.a4 = fl->fl4_dst; 29 - x->sel.saddr.a4 = fl->fl4_src; 30 - x->sel.dport = xfrm_flowi_dport(fl); 31 - x->sel.dport_mask = htons(0xffff); 32 - x->sel.sport = xfrm_flowi_sport(fl); 33 - x->sel.sport_mask = htons(0xffff); 34 - x->sel.family = AF_INET; 35 - x->sel.prefixlen_d = 32; 36 - x->sel.prefixlen_s = 32; 37 - x->sel.proto = fl->proto; 38 - x->sel.ifindex = fl->oif; 26 + sel->daddr.a4 = fl->fl4_dst; 27 + sel->saddr.a4 = fl->fl4_src; 28 + sel->dport = xfrm_flowi_dport(fl); 29 + sel->dport_mask = htons(0xffff); 30 + sel->sport = xfrm_flowi_sport(fl); 31 + sel->sport_mask = htons(0xffff); 32 + sel->family = AF_INET; 33 + sel->prefixlen_d = 32; 34 + sel->prefixlen_s = 32; 35 + sel->proto = fl->proto; 36 + sel->ifindex = fl->oif; 37 + } 38 + 39 + static void 40 + xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl, 41 + xfrm_address_t *daddr, xfrm_address_t *saddr) 42 + { 39 43 x->id = tmpl->id; 40 44 if (x->id.daddr.a4 == 0) 41 45 x->id.daddr.a4 = daddr->a4; ··· 74 70 .owner = THIS_MODULE, 75 71 .init_flags = xfrm4_init_flags, 76 72 .init_tempsel = __xfrm4_init_tempsel, 73 + .init_temprop = xfrm4_init_temprop, 77 74 .output = xfrm4_output, 78 75 .extract_input = xfrm4_extract_input, 79 76 .extract_output = xfrm4_extract_output,
+8 -3
net/ipv6/addrconf.c
··· 4637 4637 if (err < 0) { 4638 4638 printk(KERN_CRIT "IPv6 Addrconf:" 4639 4639 " cannot initialize default policy table: %d.\n", err); 4640 - return err; 4640 + goto out; 4641 4641 } 4642 4642 4643 - register_pernet_subsys(&addrconf_ops); 4643 + err = register_pernet_subsys(&addrconf_ops); 4644 + if (err < 0) 4645 + goto out_addrlabel; 4644 4646 4645 4647 /* The addrconf netdev notifier requires that loopback_dev 4646 4648 * has it's ipv6 private information allocated and setup ··· 4694 4692 unregister_netdevice_notifier(&ipv6_dev_notf); 4695 4693 errlo: 4696 4694 unregister_pernet_subsys(&addrconf_ops); 4697 - 4695 + out_addrlabel: 4696 + ipv6_addr_label_cleanup(); 4697 + out: 4698 4698 return err; 4699 4699 } 4700 4700 ··· 4707 4703 4708 4704 unregister_netdevice_notifier(&ipv6_dev_notf); 4709 4705 unregister_pernet_subsys(&addrconf_ops); 4706 + ipv6_addr_label_cleanup(); 4710 4707 4711 4708 rtnl_lock(); 4712 4709
+5
net/ipv6/addrlabel.c
··· 393 393 return register_pernet_subsys(&ipv6_addr_label_ops); 394 394 } 395 395 396 + void ipv6_addr_label_cleanup(void) 397 + { 398 + unregister_pernet_subsys(&ipv6_addr_label_ops); 399 + } 400 + 396 401 static const struct nla_policy ifal_policy[IFAL_MAX+1] = { 397 402 [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), }, 398 403 [IFAL_LABEL] = { .len = sizeof(u32), },
+13 -5
net/ipv6/ip6_output.c
··· 639 639 640 640 if (skb_has_frags(skb)) { 641 641 int first_len = skb_pagelen(skb); 642 - int truesizes = 0; 642 + struct sk_buff *frag2; 643 643 644 644 if (first_len - hlen > mtu || 645 645 ((first_len - hlen) & 7) || ··· 651 651 if (frag->len > mtu || 652 652 ((frag->len & 7) && frag->next) || 653 653 skb_headroom(frag) < hlen) 654 - goto slow_path; 654 + goto slow_path_clean; 655 655 656 656 /* Partially cloned skb? */ 657 657 if (skb_shared(frag)) 658 - goto slow_path; 658 + goto slow_path_clean; 659 659 660 660 BUG_ON(frag->sk); 661 661 if (skb->sk) { 662 662 frag->sk = skb->sk; 663 663 frag->destructor = sock_wfree; 664 - truesizes += frag->truesize; 665 664 } 665 + skb->truesize -= frag->truesize; 666 666 } 667 667 668 668 err = 0; ··· 693 693 694 694 first_len = skb_pagelen(skb); 695 695 skb->data_len = first_len - skb_headlen(skb); 696 - skb->truesize -= truesizes; 697 696 skb->len = first_len; 698 697 ipv6_hdr(skb)->payload_len = htons(first_len - 699 698 sizeof(struct ipv6hdr)); ··· 755 756 IPSTATS_MIB_FRAGFAILS); 756 757 dst_release(&rt->dst); 757 758 return err; 759 + 760 + slow_path_clean: 761 + skb_walk_frags(skb, frag2) { 762 + if (frag2 == frag) 763 + break; 764 + frag2->sk = NULL; 765 + frag2->destructor = NULL; 766 + skb->truesize += frag2->truesize; 767 + } 758 768 } 759 769 760 770 slow_path:
+1 -1
net/ipv6/route.c
··· 670 670 671 671 if (net_ratelimit()) 672 672 printk(KERN_WARNING 673 - "Neighbour table overflow.\n"); 673 + "ipv6: Neighbour table overflow.\n"); 674 674 dst_free(&rt->dst); 675 675 return NULL; 676 676 }
+19 -14
net/ipv6/xfrm6_state.c
··· 20 20 #include <net/addrconf.h> 21 21 22 22 static void 23 - __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl, 24 - struct xfrm_tmpl *tmpl, 25 - xfrm_address_t *daddr, xfrm_address_t *saddr) 23 + __xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl) 26 24 { 27 25 /* Initialize temporary selector matching only 28 26 * to current session. */ 29 - ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst); 30 - ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src); 31 - x->sel.dport = xfrm_flowi_dport(fl); 32 - x->sel.dport_mask = htons(0xffff); 33 - x->sel.sport = xfrm_flowi_sport(fl); 34 - x->sel.sport_mask = htons(0xffff); 35 - x->sel.family = AF_INET6; 36 - x->sel.prefixlen_d = 128; 37 - x->sel.prefixlen_s = 128; 38 - x->sel.proto = fl->proto; 39 - x->sel.ifindex = fl->oif; 27 + ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst); 28 + ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src); 29 + sel->dport = xfrm_flowi_dport(fl); 30 + sel->dport_mask = htons(0xffff); 31 + sel->sport = xfrm_flowi_sport(fl); 32 + sel->sport_mask = htons(0xffff); 33 + sel->family = AF_INET6; 34 + sel->prefixlen_d = 128; 35 + sel->prefixlen_s = 128; 36 + sel->proto = fl->proto; 37 + sel->ifindex = fl->oif; 38 + } 39 + 40 + static void 41 + xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl, 42 + xfrm_address_t *daddr, xfrm_address_t *saddr) 43 + { 40 44 x->id = tmpl->id; 41 45 if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) 42 46 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); ··· 172 168 .eth_proto = htons(ETH_P_IPV6), 173 169 .owner = THIS_MODULE, 174 170 .init_tempsel = __xfrm6_init_tempsel, 171 + .init_temprop = xfrm6_init_temprop, 175 172 .tmpl_sort = __xfrm6_tmpl_sort, 176 173 .state_sort = __xfrm6_state_sort, 177 174 .output = xfrm6_output,
+3 -1
net/netfilter/nf_conntrack_extend.c
··· 48 48 { 49 49 unsigned int off, len; 50 50 struct nf_ct_ext_type *t; 51 + size_t alloc_size; 51 52 52 53 rcu_read_lock(); 53 54 t = rcu_dereference(nf_ct_ext_types[id]); 54 55 BUG_ON(t == NULL); 55 56 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 56 57 len = off + t->len; 58 + alloc_size = t->alloc_size; 57 59 rcu_read_unlock(); 58 60 59 - *ext = kzalloc(t->alloc_size, gfp); 61 + *ext = kzalloc(alloc_size, gfp); 60 62 if (!*ext) 61 63 return NULL; 62 64
+1 -1
net/netfilter/nf_conntrack_sip.c
··· 1376 1376 unsigned int msglen, origlen; 1377 1377 const char *dptr, *end; 1378 1378 s16 diff, tdiff = 0; 1379 - int ret; 1379 + int ret = NF_ACCEPT; 1380 1380 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; 1381 1381 1382 1382 if (ctinfo != IP_CT_ESTABLISHED &&
+5 -1
net/netfilter/nf_tproxy_core.c
··· 70 70 int 71 71 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) 72 72 { 73 - if (inet_sk(sk)->transparent) { 73 + bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? 74 + inet_twsk(sk)->tw_transparent : 75 + inet_sk(sk)->transparent; 76 + 77 + if (transparent) { 74 78 skb_orphan(skb); 75 79 skb->sk = sk; 76 80 skb->destructor = nf_tproxy_destructor;
+2 -2
net/rds/tcp_connect.c
··· 43 43 struct rds_connection *conn; 44 44 struct rds_tcp_connection *tc; 45 45 46 - read_lock(&sk->sk_callback_lock); 46 + read_lock_bh(&sk->sk_callback_lock); 47 47 conn = sk->sk_user_data; 48 48 if (conn == NULL) { 49 49 state_change = sk->sk_state_change; ··· 68 68 break; 69 69 } 70 70 out: 71 - read_unlock(&sk->sk_callback_lock); 71 + read_unlock_bh(&sk->sk_callback_lock); 72 72 state_change(sk); 73 73 } 74 74
+2 -2
net/rds/tcp_listen.c
··· 114 114 115 115 rdsdebug("listen data ready sk %p\n", sk); 116 116 117 - read_lock(&sk->sk_callback_lock); 117 + read_lock_bh(&sk->sk_callback_lock); 118 118 ready = sk->sk_user_data; 119 119 if (ready == NULL) { /* check for teardown race */ 120 120 ready = sk->sk_data_ready; ··· 131 131 queue_work(rds_wq, &rds_tcp_listen_work); 132 132 133 133 out: 134 - read_unlock(&sk->sk_callback_lock); 134 + read_unlock_bh(&sk->sk_callback_lock); 135 135 ready(sk, bytes); 136 136 } 137 137
+2 -2
net/rds/tcp_recv.c
··· 324 324 325 325 rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 326 326 327 - read_lock(&sk->sk_callback_lock); 327 + read_lock_bh(&sk->sk_callback_lock); 328 328 conn = sk->sk_user_data; 329 329 if (conn == NULL) { /* check for teardown race */ 330 330 ready = sk->sk_data_ready; ··· 338 338 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) 339 339 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 340 340 out: 341 - read_unlock(&sk->sk_callback_lock); 341 + read_unlock_bh(&sk->sk_callback_lock); 342 342 ready(sk, bytes); 343 343 } 344 344
+2 -2
net/rds/tcp_send.c
··· 224 224 struct rds_connection *conn; 225 225 struct rds_tcp_connection *tc; 226 226 227 - read_lock(&sk->sk_callback_lock); 227 + read_lock_bh(&sk->sk_callback_lock); 228 228 conn = sk->sk_user_data; 229 229 if (conn == NULL) { 230 230 write_space = sk->sk_write_space; ··· 244 244 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 245 245 246 246 out: 247 - read_unlock(&sk->sk_callback_lock); 247 + read_unlock_bh(&sk->sk_callback_lock); 248 248 249 249 /* 250 250 * write_space is only called when data leaves tcp's send queue if
+2 -2
net/rose/af_rose.c
··· 679 679 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 680 680 return -EINVAL; 681 681 682 - if (addr->srose_ndigis > ROSE_MAX_DIGIS) 682 + if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) 683 683 return -EINVAL; 684 684 685 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { ··· 739 739 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 740 740 return -EINVAL; 741 741 742 - if (addr->srose_ndigis > ROSE_MAX_DIGIS) 742 + if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) 743 743 return -EINVAL; 744 744 745 745 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
+14 -14
net/sunrpc/xprtsock.c
··· 800 800 u32 _xid; 801 801 __be32 *xp; 802 802 803 - read_lock(&sk->sk_callback_lock); 803 + read_lock_bh(&sk->sk_callback_lock); 804 804 dprintk("RPC: xs_udp_data_ready...\n"); 805 805 if (!(xprt = xprt_from_sock(sk))) 806 806 goto out; ··· 852 852 dropit: 853 853 skb_free_datagram(sk, skb); 854 854 out: 855 - read_unlock(&sk->sk_callback_lock); 855 + read_unlock_bh(&sk->sk_callback_lock); 856 856 } 857 857 858 858 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) ··· 1229 1229 1230 1230 dprintk("RPC: xs_tcp_data_ready...\n"); 1231 1231 1232 - read_lock(&sk->sk_callback_lock); 1232 + read_lock_bh(&sk->sk_callback_lock); 1233 1233 if (!(xprt = xprt_from_sock(sk))) 1234 1234 goto out; 1235 1235 if (xprt->shutdown) ··· 1248 1248 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1249 1249 } while (read > 0); 1250 1250 out: 1251 - read_unlock(&sk->sk_callback_lock); 1251 + read_unlock_bh(&sk->sk_callback_lock); 1252 1252 } 1253 1253 1254 1254 /* ··· 1301 1301 { 1302 1302 struct rpc_xprt *xprt; 1303 1303 1304 - read_lock(&sk->sk_callback_lock); 1304 + read_lock_bh(&sk->sk_callback_lock); 1305 1305 if (!(xprt = xprt_from_sock(sk))) 1306 1306 goto out; 1307 1307 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); ··· 1313 1313 1314 1314 switch (sk->sk_state) { 1315 1315 case TCP_ESTABLISHED: 1316 - spin_lock_bh(&xprt->transport_lock); 1316 + spin_lock(&xprt->transport_lock); 1317 1317 if (!xprt_test_and_set_connected(xprt)) { 1318 1318 struct sock_xprt *transport = container_of(xprt, 1319 1319 struct sock_xprt, xprt); ··· 1327 1327 1328 1328 xprt_wake_pending_tasks(xprt, -EAGAIN); 1329 1329 } 1330 - spin_unlock_bh(&xprt->transport_lock); 1330 + spin_unlock(&xprt->transport_lock); 1331 1331 break; 1332 1332 case TCP_FIN_WAIT1: 1333 1333 /* The client initiated a shutdown of the socket */ ··· 1365 1365 xs_sock_mark_closed(xprt); 1366 1366 } 1367 1367 out: 1368 - read_unlock(&sk->sk_callback_lock); 1368 + read_unlock_bh(&sk->sk_callback_lock); 1369 1369 } 1370 1370 1371 1371 /** ··· 1376 1376 { 1377 1377 struct rpc_xprt *xprt; 1378 1378 1379 - read_lock(&sk->sk_callback_lock); 1379 + read_lock_bh(&sk->sk_callback_lock); 1380 1380 if (!(xprt = xprt_from_sock(sk))) 1381 1381 goto out; 1382 1382 dprintk("RPC: %s client %p...\n" ··· 1384 1384 __func__, xprt, sk->sk_err); 1385 1385 xprt_wake_pending_tasks(xprt, -EAGAIN); 1386 1386 out: 1387 - read_unlock(&sk->sk_callback_lock); 1387 + read_unlock_bh(&sk->sk_callback_lock); 1388 1388 } 1389 1389 1390 1390 static void xs_write_space(struct sock *sk) ··· 1416 1416 */ 1417 1417 static void xs_udp_write_space(struct sock *sk) 1418 1418 { 1419 - read_lock(&sk->sk_callback_lock); 1419 + read_lock_bh(&sk->sk_callback_lock); 1420 1420 1421 1421 /* from net/core/sock.c:sock_def_write_space */ 1422 1422 if (sock_writeable(sk)) 1423 1423 xs_write_space(sk); 1424 1424 1425 - read_unlock(&sk->sk_callback_lock); 1425 + read_unlock_bh(&sk->sk_callback_lock); 1426 1426 } 1427 1427 1428 1428 /** ··· 1437 1437 */ 1438 1438 static void xs_tcp_write_space(struct sock *sk) 1439 1439 { 1440 - read_lock(&sk->sk_callback_lock); 1440 + read_lock_bh(&sk->sk_callback_lock); 1441 1441 1442 1442 /* from net/core/stream.c:sk_stream_write_space */ 1443 1443 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 1444 1444 xs_write_space(sk); 1445 1445 1446 - read_unlock(&sk->sk_callback_lock); 1446 + read_unlock_bh(&sk->sk_callback_lock); 1447 1447 } 1448 1448 1449 1449 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
+1 -1
net/wireless/wext-priv.c
··· 152 152 } else if (!iwp->pointer) 153 153 return -EFAULT; 154 154 155 - extra = kmalloc(extra_size, GFP_KERNEL); 155 + extra = kzalloc(extra_size, GFP_KERNEL); 156 156 if (!extra) 157 157 return -ENOMEM; 158 158
+2 -3
net/xfrm/xfrm_policy.c
··· 1175 1175 tmpl->mode == XFRM_MODE_BEET) { 1176 1176 remote = &tmpl->id.daddr; 1177 1177 local = &tmpl->saddr; 1178 - family = tmpl->encap_family; 1179 - if (xfrm_addr_any(local, family)) { 1180 - error = xfrm_get_saddr(net, &tmp, remote, family); 1178 + if (xfrm_addr_any(local, tmpl->encap_family)) { 1179 + error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); 1181 1180 if (error) 1182 1181 goto fail; 1183 1182 local = &tmp;
+27 -18
net/xfrm/xfrm_state.c
··· 656 656 EXPORT_SYMBOL(xfrm_sad_getinfo); 657 657 658 658 static int 659 - xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl, 660 - struct xfrm_tmpl *tmpl, 661 - xfrm_address_t *daddr, xfrm_address_t *saddr, 662 - unsigned short family) 659 + xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl, 660 + struct xfrm_tmpl *tmpl, 661 + xfrm_address_t *daddr, xfrm_address_t *saddr, 662 + unsigned short family) 663 663 { 664 664 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 665 665 if (!afinfo) 666 666 return -1; 667 - afinfo->init_tempsel(x, fl, tmpl, daddr, saddr); 667 + afinfo->init_tempsel(&x->sel, fl); 668 + 669 + if (family != tmpl->encap_family) { 670 + xfrm_state_put_afinfo(afinfo); 671 + afinfo = xfrm_state_get_afinfo(tmpl->encap_family); 672 + if (!afinfo) 673 + return -1; 674 + } 675 + afinfo->init_temprop(x, tmpl, daddr, saddr); 668 676 xfrm_state_put_afinfo(afinfo); 669 677 return 0; 670 678 } ··· 798 790 int error = 0; 799 791 struct xfrm_state *best = NULL; 800 792 u32 mark = pol->mark.v & pol->mark.m; 793 + unsigned short encap_family = tmpl->encap_family; 801 794 802 795 to_put = NULL; 803 796 804 797 spin_lock_bh(&xfrm_state_lock); 805 - h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family); 798 + h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); 806 799 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 807 - if (x->props.family == family && 800 + if (x->props.family == encap_family && 808 801 x->props.reqid == tmpl->reqid && 809 802 (mark & x->mark.m) == x->mark.v && 810 803 !(x->props.flags & XFRM_STATE_WILDRECV) && 811 - xfrm_state_addr_check(x, daddr, saddr, family) && 804 + xfrm_state_addr_check(x, daddr, saddr, encap_family) && 812 805 tmpl->mode == x->props.mode && 813 806 tmpl->id.proto == x->id.proto && 814 807 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 815 - xfrm_state_look_at(pol, x, fl, family, daddr, saddr, 808 + xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr, 816 809 &best, &acquire_in_progress, &error); 817 810 } 818 811 if (best) 819 812 goto found; 820 813 821 - h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); 814 + h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); 822 815 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { 823 - if (x->props.family == family && 816 + if (x->props.family == encap_family && 824 817 x->props.reqid == tmpl->reqid && 825 818 (mark & x->mark.m) == x->mark.v && 826 819 !(x->props.flags & XFRM_STATE_WILDRECV) && 827 - xfrm_state_addr_check(x, daddr, saddr, family) && 820 + xfrm_state_addr_check(x, daddr, saddr, encap_family) && 828 821 tmpl->mode == x->props.mode && 829 822 tmpl->id.proto == x->id.proto && 830 823 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 831 - xfrm_state_look_at(pol, x, fl, family, daddr, saddr, 824 + xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr, 832 825 &best, &acquire_in_progress, &error); 833 826 } 834 827 ··· 838 829 if (!x && !error && !acquire_in_progress) { 839 830 if (tmpl->id.spi && 840 831 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, 841 - tmpl->id.proto, family)) != NULL) { 832 + tmpl->id.proto, encap_family)) != NULL) { 842 833 to_put = x0; 843 834 error = -EEXIST; 844 835 goto out; ··· 848 839 error = -ENOMEM; 849 840 goto out; 850 841 } 851 - /* Initialize temporary selector matching only 842 + /* Initialize temporary state matching only 852 843 * to current session. */ 853 - xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family); 844 + xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); 854 845 memcpy(&x->mark, &pol->mark, sizeof(x->mark)); 855 846 856 847 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); ··· 865 856 x->km.state = XFRM_STATE_ACQ; 866 857 list_add(&x->km.all, &net->xfrm.state_all); 867 858 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); 868 - h = xfrm_src_hash(net, daddr, saddr, family); 859 + h = xfrm_src_hash(net, daddr, saddr, encap_family); 869 860 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); 870 861 if (x->id.spi) { 871 - h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family); 862 + h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); 872 863 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); 873 864 } 874 865 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
+5 -1
security/tomoyo/common.c
··· 1416 1416 const pid_t gpid = task_pid_nr(current); 1417 1417 static const int tomoyo_buffer_len = 4096; 1418 1418 char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); 1419 + pid_t ppid; 1419 1420 if (!buffer) 1420 1421 return NULL; 1421 1422 do_gettimeofday(&tv); 1423 + rcu_read_lock(); 1424 + ppid = task_tgid_vnr(current->real_parent); 1425 + rcu_read_unlock(); 1422 1426 snprintf(buffer, tomoyo_buffer_len - 1, 1423 1427 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)" 1424 1428 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u" 1425 1429 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }", 1426 1430 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid, 1427 - (pid_t) sys_getpid(), (pid_t) sys_getppid(), 1431 + task_tgid_vnr(current), ppid, 1428 1432 current_uid(), current_gid(), current_euid(), 1429 1433 current_egid(), current_suid(), current_sgid(), 1430 1434 current_fsuid(), current_fsgid());
-3
security/tomoyo/common.h
··· 689 689 690 690 /********** Function prototypes. **********/ 691 691 692 - extern asmlinkage long sys_getpid(void); 693 - extern asmlinkage long sys_getppid(void); 694 - 695 692 /* Check whether the given string starts with the given keyword. */ 696 693 bool tomoyo_str_starts(char **src, const char *find); 697 694 /* Get tomoyo_realpath() of current process. */