Auto-update from upstream

Tony Luck 729c80c6 4eaefb39

+711 -527
+1 -2
CREDITS
··· 2423 2423 S: Canada 2424 2424 2425 2425 N: Zwane Mwaikambo 2426 - E: zwane@linuxpower.ca 2427 - W: http://function.linuxpower.ca 2426 + E: zwane@arm.linux.org.uk 2428 2427 D: Various driver hacking 2429 2428 D: Lowlevel x86 kernel hacking 2430 2429 D: General debugging
+2 -2
MAINTAINERS
··· 1739 1739 1740 1740 OPL3-SA2, SA3, and SAx DRIVER 1741 1741 P: Zwane Mwaikambo 1742 - M: zwane@commfireservices.com 1742 + M: zwane@arm.linux.org.uk 1743 1743 L: linux-sound@vger.kernel.org 1744 1744 S: Maintained 1745 1745 ··· 1995 1995 1996 1996 SC1200 WDT DRIVER 1997 1997 P: Zwane Mwaikambo 1998 - M: zwane@commfireservices.com 1998 + M: zwane@arm.linux.org.uk 1999 1999 S: Maintained 2000 2000 2001 2001 SCHEDULER
-4
arch/i386/kernel/apic.c
··· 726 726 static int __init detect_init_APIC (void) 727 727 { 728 728 u32 h, l, features; 729 - extern void get_cpu_vendor(struct cpuinfo_x86*); 730 729 731 730 /* Disabled by kernel option? */ 732 731 if (enable_local_apic < 0) 733 732 return -1; 734 - 735 - /* Workaround for us being called before identify_cpu(). */ 736 - get_cpu_vendor(&boot_cpu_data); 737 733 738 734 switch (boot_cpu_data.x86_vendor) { 739 735 case X86_VENDOR_AMD:
+2 -2
arch/i386/kernel/nmi.c
··· 195 195 wrmsr(MSR_P6_EVNTSEL0, 0, 0); 196 196 break; 197 197 case 15: 198 - if (boot_cpu_data.x86_model > 0x3) 198 + if (boot_cpu_data.x86_model > 0x4) 199 199 break; 200 200 201 201 wrmsr(MSR_P4_IQ_CCCR0, 0, 0); ··· 432 432 setup_p6_watchdog(); 433 433 break; 434 434 case 15: 435 - if (boot_cpu_data.x86_model > 0x3) 435 + if (boot_cpu_data.x86_model > 0x4) 436 436 return; 437 437 438 438 if (!setup_p4_watchdog())
+7 -3
arch/ppc/syslib/ppc4xx_dma.c
··· 620 620 return DMA_STATUS_GOOD; 621 621 } 622 622 623 + #ifdef CONFIG_PPC4xx_EDMA 623 624 /* 624 625 * Enables the burst on the channel (BTEN bit in the control/count register) 625 626 * Note: ··· 686 685 return DMA_STATUS_GOOD; 687 686 } 688 687 688 + EXPORT_SYMBOL(ppc4xx_enable_burst); 689 + EXPORT_SYMBOL(ppc4xx_disable_burst); 690 + EXPORT_SYMBOL(ppc4xx_set_burst_size); 691 + #endif /* CONFIG_PPC4xx_EDMA */ 692 + 689 693 EXPORT_SYMBOL(ppc4xx_init_dma_channel); 690 694 EXPORT_SYMBOL(ppc4xx_get_channel_config); 691 695 EXPORT_SYMBOL(ppc4xx_set_channel_priority); ··· 709 703 EXPORT_SYMBOL(ppc4xx_disable_dma_interrupt); 710 704 EXPORT_SYMBOL(ppc4xx_get_dma_status); 711 705 EXPORT_SYMBOL(ppc4xx_clr_dma_status); 712 - EXPORT_SYMBOL(ppc4xx_enable_burst); 713 - EXPORT_SYMBOL(ppc4xx_disable_burst); 714 - EXPORT_SYMBOL(ppc4xx_set_burst_size); 706 +
+5
arch/sh/kernel/entry.S
··· 1145 1145 .long sys_add_key /* 285 */ 1146 1146 .long sys_request_key 1147 1147 .long sys_keyctl 1148 + .long sys_ioprio_set 1149 + .long sys_ioprio_get 1150 + .long sys_inotify_init /* 290 */ 1151 + .long sys_inotify_add_watch 1152 + .long sys_inotify_rm_watch 1148 1153 1149 1154 /* End of entry.S */
+5
arch/sh64/kernel/syscalls.S
··· 342 342 .long sys_add_key 343 343 .long sys_request_key 344 344 .long sys_keyctl /* 315 */ 345 + .long sys_ioprio_set 346 + .long sys_ioprio_get 347 + .long sys_inotify_init 348 + .long sys_inotify_add_watch 349 + .long sys_inotify_rm_watch /* 320 */ 345 350
+3 -2
arch/sparc/kernel/sparc_ksyms.c
··· 98 98 * The module references will be fixed up by module_frob_arch_sections. 99 99 */ 100 100 #define DOT_ALIAS2(__ret, __x, __arg1, __arg2) \ 101 - extern __ret __x(__arg1, __arg2) \ 102 - __attribute__((weak, alias("." # __x))); 101 + extern __ret __x(__arg1, __arg2); \ 102 + asm(".weak " #__x);\ 103 + asm(#__x "=." #__x); 103 104 104 105 DOT_ALIAS2(int, div, int, int) 105 106 DOT_ALIAS2(int, mul, int, int)
+1 -1
arch/sparc64/kernel/Makefile
··· 8 8 extra-y := head.o init_task.o vmlinux.lds 9 9 10 10 obj-y := process.o setup.o cpu.o idprom.o \ 11 - traps.o devices.o auxio.o \ 11 + traps.o devices.o auxio.o una_asm.o \ 12 12 irq.o ptrace.o time.o sys_sparc.o signal.o \ 13 13 unaligned.o central.o pci.o starfire.o semaphore.o \ 14 14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
+3
arch/sparc64/kernel/traps.c
··· 2127 2127 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || 2128 2128 TI_NEW_CHILD != offsetof(struct thread_info, new_child) || 2129 2129 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || 2130 + TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) || 2131 + TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) || 2132 + TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) || 2130 2133 TI_FPREGS != offsetof(struct thread_info, fpregs) || 2131 2134 (TI_FPREGS & (64 - 1))) 2132 2135 thread_info_offsets_are_bolixed_dave();
+153
arch/sparc64/kernel/una_asm.S
··· 1 + /* una_asm.S: Kernel unaligned trap assembler helpers. 2 + * 3 + * Copyright (C) 1996,2005 David S. Miller (davem@davemloft.net) 4 + * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 5 + */ 6 + 7 + .text 8 + 9 + kernel_unaligned_trap_fault: 10 + call kernel_mna_trap_fault 11 + nop 12 + retl 13 + nop 14 + .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault 15 + 16 + .globl __do_int_store 17 + __do_int_store: 18 + rd %asi, %o4 19 + wr %o3, 0, %asi 20 + ldx [%o2], %g3 21 + cmp %o1, 2 22 + be,pn %icc, 2f 23 + cmp %o1, 4 24 + be,pt %icc, 1f 25 + srlx %g3, 24, %g2 26 + srlx %g3, 56, %g1 27 + srlx %g3, 48, %g7 28 + 4: stba %g1, [%o0] %asi 29 + srlx %g3, 40, %g1 30 + 5: stba %g7, [%o0 + 1] %asi 31 + srlx %g3, 32, %g7 32 + 6: stba %g1, [%o0 + 2] %asi 33 + 7: stba %g7, [%o0 + 3] %asi 34 + srlx %g3, 16, %g1 35 + 8: stba %g2, [%o0 + 4] %asi 36 + srlx %g3, 8, %g7 37 + 9: stba %g1, [%o0 + 5] %asi 38 + 10: stba %g7, [%o0 + 6] %asi 39 + ba,pt %xcc, 0f 40 + 11: stba %g3, [%o0 + 7] %asi 41 + 1: srl %g3, 16, %g7 42 + 12: stba %g2, [%o0] %asi 43 + srl %g3, 8, %g2 44 + 13: stba %g7, [%o0 + 1] %asi 45 + 14: stba %g2, [%o0 + 2] %asi 46 + ba,pt %xcc, 0f 47 + 15: stba %g3, [%o0 + 3] %asi 48 + 2: srl %g3, 8, %g2 49 + 16: stba %g2, [%o0] %asi 50 + 17: stba %g3, [%o0 + 1] %asi 51 + 0: 52 + wr %o4, 0x0, %asi 53 + retl 54 + nop 55 + .size __do_int_store, .-__do_int_store 56 + 57 + .section __ex_table 58 + .word 4b, kernel_unaligned_trap_fault 59 + .word 5b, kernel_unaligned_trap_fault 60 + .word 6b, kernel_unaligned_trap_fault 61 + .word 7b, kernel_unaligned_trap_fault 62 + .word 8b, kernel_unaligned_trap_fault 63 + .word 9b, kernel_unaligned_trap_fault 64 + .word 10b, kernel_unaligned_trap_fault 65 + .word 11b, kernel_unaligned_trap_fault 66 + .word 12b, kernel_unaligned_trap_fault 67 + .word 13b, kernel_unaligned_trap_fault 68 + .word 14b, kernel_unaligned_trap_fault 69 + .word 15b, kernel_unaligned_trap_fault 70 + .word 16b, kernel_unaligned_trap_fault 71 + .word 17b, kernel_unaligned_trap_fault 72 + .previous 73 + 74 + .globl do_int_load 75 + do_int_load: 76 + rd %asi, %o5 77 + wr %o4, 0, %asi 78 + cmp %o1, 8 79 + bge,pn %icc, 9f 80 + cmp %o1, 4 81 + be,pt %icc, 6f 82 + 4: lduba [%o2] %asi, %g2 83 + 5: lduba [%o2 + 1] %asi, %g3 84 + sll %g2, 8, %g2 85 + brz,pt %o3, 3f 86 + add %g2, %g3, %g2 87 + sllx %g2, 48, %g2 88 + srax %g2, 48, %g2 89 + 3: ba,pt %xcc, 0f 90 + stx %g2, [%o0] 91 + 6: lduba [%o2 + 1] %asi, %g3 92 + sll %g2, 24, %g2 93 + 7: lduba [%o2 + 2] %asi, %g7 94 + sll %g3, 16, %g3 95 + 8: lduba [%o2 + 3] %asi, %g1 96 + sll %g7, 8, %g7 97 + or %g2, %g3, %g2 98 + or %g7, %g1, %g7 99 + or %g2, %g7, %g2 100 + brnz,a,pt %o3, 3f 101 + sra %g2, 0, %g2 102 + 3: ba,pt %xcc, 0f 103 + stx %g2, [%o0] 104 + 9: lduba [%o2] %asi, %g2 105 + 10: lduba [%o2 + 1] %asi, %g3 106 + sllx %g2, 56, %g2 107 + 11: lduba [%o2 + 2] %asi, %g7 108 + sllx %g3, 48, %g3 109 + 12: lduba [%o2 + 3] %asi, %g1 110 + sllx %g7, 40, %g7 111 + sllx %g1, 32, %g1 112 + or %g2, %g3, %g2 113 + or %g7, %g1, %g7 114 + 13: lduba [%o2 + 4] %asi, %g3 115 + or %g2, %g7, %g7 116 + 14: lduba [%o2 + 5] %asi, %g1 117 + sllx %g3, 24, %g3 118 + 15: lduba [%o2 + 6] %asi, %g2 119 + sllx %g1, 16, %g1 120 + or %g7, %g3, %g7 121 + 16: lduba [%o2 + 7] %asi, %g3 122 + sllx %g2, 8, %g2 123 + or %g7, %g1, %g7 124 + or %g2, %g3, %g2 125 + or %g7, %g2, %g7 126 + cmp %o1, 8 127 + be,a,pt %icc, 0f 128 + stx %g7, [%o0] 129 + srlx %g7, 32, %g2 130 + sra %g7, 0, %g7 131 + stx %g2, [%o0] 132 + stx %g7, [%o0 + 8] 133 + 0: 134 + wr %o5, 0x0, %asi 135 + retl 136 + nop 137 + .size __do_int_load, .-__do_int_load 138 + 139 + .section __ex_table 140 + .word 4b, kernel_unaligned_trap_fault 141 + .word 5b, kernel_unaligned_trap_fault 142 + .word 6b, kernel_unaligned_trap_fault 143 + .word 7b, kernel_unaligned_trap_fault 144 + .word 8b, kernel_unaligned_trap_fault 145 + .word 9b, kernel_unaligned_trap_fault 146 + .word 10b, kernel_unaligned_trap_fault 147 + .word 11b, kernel_unaligned_trap_fault 148 + .word 12b, kernel_unaligned_trap_fault 149 + .word 13b, kernel_unaligned_trap_fault 150 + .word 14b, kernel_unaligned_trap_fault 151 + .word 15b, kernel_unaligned_trap_fault 152 + .word 16b, kernel_unaligned_trap_fault 153 + .previous
+56 -199
arch/sparc64/kernel/unaligned.c
··· 180 180 die_if_kernel(str, regs); 181 181 } 182 182 183 - #define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \ 184 - __asm__ __volatile__ ( \ 185 - "wr %4, 0, %%asi\n\t" \ 186 - "cmp %1, 8\n\t" \ 187 - "bge,pn %%icc, 9f\n\t" \ 188 - " cmp %1, 4\n\t" \ 189 - "be,pt %%icc, 6f\n" \ 190 - "4:\t" " lduba [%2] %%asi, %%l1\n" \ 191 - "5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \ 192 - "sll %%l1, 8, %%l1\n\t" \ 193 - "brz,pt %3, 3f\n\t" \ 194 - " add %%l1, %%l2, %%l1\n\t" \ 195 - "sllx %%l1, 48, %%l1\n\t" \ 196 - "srax %%l1, 48, %%l1\n" \ 197 - "3:\t" "ba,pt %%xcc, 0f\n\t" \ 198 - " stx %%l1, [%0]\n" \ 199 - "6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \ 200 - "sll %%l1, 24, %%l1\n" \ 201 - "7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \ 202 - "sll %%l2, 16, %%l2\n" \ 203 - "8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \ 204 - "sll %%g7, 8, %%g7\n\t" \ 205 - "or %%l1, %%l2, %%l1\n\t" \ 206 - "or %%g7, %%g1, %%g7\n\t" \ 207 - "or %%l1, %%g7, %%l1\n\t" \ 208 - "brnz,a,pt %3, 3f\n\t" \ 209 - " sra %%l1, 0, %%l1\n" \ 210 - "3:\t" "ba,pt %%xcc, 0f\n\t" \ 211 - " stx %%l1, [%0]\n" \ 212 - "9:\t" "lduba [%2] %%asi, %%l1\n" \ 213 - "10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \ 214 - "sllx %%l1, 56, %%l1\n" \ 215 - "11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \ 216 - "sllx %%l2, 48, %%l2\n" \ 217 - "12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \ 218 - "sllx %%g7, 40, %%g7\n\t" \ 219 - "sllx %%g1, 32, %%g1\n\t" \ 220 - "or %%l1, %%l2, %%l1\n\t" \ 221 - "or %%g7, %%g1, %%g7\n" \ 222 - "13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \ 223 - "or %%l1, %%g7, %%g7\n" \ 224 - "14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \ 225 - "sllx %%l2, 24, %%l2\n" \ 226 - "15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \ 227 - "sllx %%g1, 16, %%g1\n\t" \ 228 - "or %%g7, %%l2, %%g7\n" \ 229 - "16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \ 230 - "sllx %%l1, 8, %%l1\n\t" \ 231 - "or %%g7, %%g1, %%g7\n\t" \ 232 - "or %%l1, %%l2, %%l1\n\t" \ 233 - "or %%g7, %%l1, %%g7\n\t" \ 234 - "cmp %1, 8\n\t" \ 235 - "be,a,pt %%icc, 0f\n\t" \ 236 - " stx %%g7, [%0]\n\t" \ 237 - "srlx %%g7, 32, %%l1\n\t" \ 238 - "sra %%g7, 0, %%g7\n\t" \ 239 - "stx %%l1, [%0]\n\t" \ 240 - "stx %%g7, [%0 + 8]\n" \ 241 - "0:\n\t" \ 242 - "wr %%g0, %5, %%asi\n\n\t" \ 243 - ".section __ex_table\n\t" \ 244 - ".word 4b, " #errh "\n\t" \ 245 - ".word 5b, " #errh "\n\t" \ 246 - ".word 6b, " #errh "\n\t" \ 247 - ".word 7b, " #errh "\n\t" \ 248 - ".word 8b, " #errh "\n\t" \ 249 - ".word 9b, " #errh "\n\t" \ 250 - ".word 10b, " #errh "\n\t" \ 251 - ".word 11b, " #errh "\n\t" \ 252 - ".word 12b, " #errh "\n\t" \ 253 - ".word 13b, " #errh "\n\t" \ 254 - ".word 14b, " #errh "\n\t" \ 255 - ".word 15b, " #errh "\n\t" \ 256 - ".word 16b, " #errh "\n\n\t" \ 257 - ".previous\n\t" \ 258 - : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \ 259 - "r" (asi), "i" (ASI_AIUS) \ 260 - : "l1", "l2", "g7", "g1", "cc"); \ 261 - }) 183 + extern void do_int_load(unsigned long *dest_reg, int size, 184 + unsigned long *saddr, int is_signed, int asi); 262 185 263 - #define store_common(dst_addr, size, src_val, asi, errh) ({ \ 264 - __asm__ __volatile__ ( \ 265 - "wr %3, 0, %%asi\n\t" \ 266 - "ldx [%2], %%l1\n" \ 267 - "cmp %1, 2\n\t" \ 268 - "be,pn %%icc, 2f\n\t" \ 269 - " cmp %1, 4\n\t" \ 270 - "be,pt %%icc, 1f\n\t" \ 271 - " srlx %%l1, 24, %%l2\n\t" \ 272 - "srlx %%l1, 56, %%g1\n\t" \ 273 - "srlx %%l1, 48, %%g7\n" \ 274 - "4:\t" "stba %%g1, [%0] %%asi\n\t" \ 275 - "srlx %%l1, 40, %%g1\n" \ 276 - "5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \ 277 - "srlx %%l1, 32, %%g7\n" \ 278 - "6:\t" "stba %%g1, [%0 + 2] %%asi\n" \ 279 - "7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \ 280 - "srlx %%l1, 16, %%g1\n" \ 281 - "8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \ 282 - "srlx %%l1, 8, %%g7\n" \ 283 - "9:\t" "stba %%g1, [%0 + 5] %%asi\n" \ 284 - "10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \ 285 - "ba,pt %%xcc, 0f\n" \ 286 - "11:\t" " stba %%l1, [%0 + 7] %%asi\n" \ 287 - "1:\t" "srl %%l1, 16, %%g7\n" \ 288 - "12:\t" "stba %%l2, [%0] %%asi\n\t" \ 289 - "srl %%l1, 8, %%l2\n" \ 290 - "13:\t" "stba %%g7, [%0 + 1] %%asi\n" \ 291 - "14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \ 292 - "ba,pt %%xcc, 0f\n" \ 293 - "15:\t" " stba %%l1, [%0 + 3] %%asi\n" \ 294 - "2:\t" "srl %%l1, 8, %%l2\n" \ 295 - "16:\t" "stba %%l2, [%0] %%asi\n" \ 296 - "17:\t" "stba %%l1, [%0 + 1] %%asi\n" \ 297 - "0:\n\t" \ 298 - "wr %%g0, %4, %%asi\n\n\t" \ 299 - ".section __ex_table\n\t" \ 300 - ".word 4b, " #errh "\n\t" \ 301 - ".word 5b, " #errh "\n\t" \ 302 - ".word 6b, " #errh "\n\t" \ 303 - ".word 7b, " #errh "\n\t" \ 304 - ".word 8b, " #errh "\n\t" \ 305 - ".word 9b, " #errh "\n\t" \ 306 - ".word 10b, " #errh "\n\t" \ 307 - ".word 11b, " #errh "\n\t" \ 308 - ".word 12b, " #errh "\n\t" \ 309 - ".word 13b, " #errh "\n\t" \ 310 - ".word 14b, " #errh "\n\t" \ 311 - ".word 15b, " #errh "\n\t" \ 312 - ".word 16b, " #errh "\n\t" \ 313 - ".word 17b, " #errh "\n\n\t" \ 314 - ".previous\n\t" \ 315 - : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\ 316 - : "l1", "l2", "g7", "g1", "cc"); \ 317 - }) 186 + extern void __do_int_store(unsigned long *dst_addr, int size, 187 + unsigned long *src_val, int asi); 318 188 319 - #define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \ 320 - unsigned long zero = 0; \ 321 - unsigned long *src_val = &zero; \ 322 - \ 323 - if (size == 16) { \ 324 - size = 8; \ 325 - zero = (((long)(reg_num ? \ 326 - (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \ 327 - (unsigned)fetch_reg(reg_num + 1, regs); \ 328 - } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \ 329 - store_common(dst_addr, size, src_val, asi, errh); \ 330 - }) 189 + static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, 190 + struct pt_regs *regs, int asi) 191 + { 192 + unsigned long zero = 0; 193 + unsigned long *src_val = &zero; 331 194 332 - extern void smp_capture(void); 333 - extern void smp_release(void); 334 - 335 - #define do_atomic(srcdest_reg, mem, errh) ({ \ 336 - unsigned long flags, tmp; \ 337 - \ 338 - smp_capture(); \ 339 - local_irq_save(flags); \ 340 - tmp = *srcdest_reg; \ 341 - do_integer_load(srcdest_reg, 4, mem, 0, errh); \ 342 - store_common(mem, 4, &tmp, errh); \ 343 - local_irq_restore(flags); \ 344 - smp_release(); \ 345 - }) 195 + if (size == 16) { 196 + size = 8; 197 + zero = (((long)(reg_num ? 198 + (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | 199 + (unsigned)fetch_reg(reg_num + 1, regs); 200 + } else if (reg_num) { 201 + src_val = fetch_reg_addr(reg_num, regs); 202 + } 203 + __do_int_store(dst_addr, size, src_val, asi); 204 + } 346 205 347 206 static inline void advance(struct pt_regs *regs) 348 207 { ··· 223 364 return !floating_point_load_or_store_p(insn); 224 365 } 225 366 226 - void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault"); 227 - 228 - void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) 367 + void kernel_mna_trap_fault(void) 229 368 { 230 - unsigned long g2 = regs->u_regs [UREG_G2]; 369 + struct pt_regs *regs = current_thread_info()->kern_una_regs; 370 + unsigned int insn = current_thread_info()->kern_una_insn; 371 + unsigned long g2 = regs->u_regs[UREG_G2]; 231 372 unsigned long fixup = search_extables_range(regs->tpc, &g2); 232 373 233 374 if (!fixup) { 234 - unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); 375 + unsigned long address; 376 + 377 + address = compute_effective_address(regs, insn, 378 + ((insn >> 25) & 0x1f)); 235 379 if (address < PAGE_SIZE) { 236 - printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler"); 380 + printk(KERN_ALERT "Unable to handle kernel NULL " 381 + "pointer dereference in mna handler"); 237 382 } else 238 - printk(KERN_ALERT "Unable to handle kernel paging request in mna handler"); 383 + printk(KERN_ALERT "Unable to handle kernel paging " 384 + "request in mna handler"); 239 385 printk(KERN_ALERT " at virtual address %016lx\n",address); 240 - printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n", 386 + printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n", 241 387 (current->mm ? CTX_HWBITS(current->mm->context) : 242 388 CTX_HWBITS(current->active_mm->context))); 243 - printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n", 389 + printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n", 244 390 (current->mm ? (unsigned long) current->mm->pgd : 245 391 (unsigned long) current->active_mm->pgd)); 246 392 die_if_kernel("Oops", regs); ··· 264 400 enum direction dir = decode_direction(insn); 265 401 int size = decode_access_size(insn); 266 402 403 + current_thread_info()->kern_una_regs = regs; 404 + current_thread_info()->kern_una_insn = insn; 405 + 267 406 if (!ok_for_kernel(insn) || dir == both) { 268 - printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n", 269 - regs->tpc); 270 - unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs); 407 + printk("Unsupported unaligned load/store trap for kernel " 408 + "at <%016lx>.\n", regs->tpc); 409 + unaligned_panic("Kernel does fpu/atomic " 410 + "unaligned load/store.", regs); 271 411 272 - __asm__ __volatile__ ("\n" 273 - "kernel_unaligned_trap_fault:\n\t" 274 - "mov %0, %%o0\n\t" 275 - "call kernel_mna_trap_fault\n\t" 276 - " mov %1, %%o1\n\t" 277 - : 278 - : "r" (regs), "r" (insn) 279 - : "o0", "o1", "o2", "o3", "o4", "o5", "o7", 280 - "g1", "g2", "g3", "g4", "g7", "cc"); 412 + kernel_mna_trap_fault(); 281 413 } else { 282 - unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); 414 + unsigned long addr; 283 415 416 + addr = compute_effective_address(regs, insn, 417 + ((insn >> 25) & 0x1f)); 284 418 #ifdef DEBUG_MNA 285 - printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n", 286 - regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]); 419 + printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] " 420 + "retpc[%016lx]\n", 421 + regs->tpc, dirstrings[dir], addr, size, 422 + regs->u_regs[UREG_RETPC]); 287 423 #endif 288 424 switch (dir) { 289 425 case load: 290 - do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs), 291 - size, (unsigned long *) addr, 292 - decode_signedness(insn), decode_asi(insn, regs), 293 - kernel_unaligned_trap_fault); 426 + do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), 427 + size, (unsigned long *) addr, 428 + decode_signedness(insn), 429 + decode_asi(insn, regs)); 294 430 break; 295 431 296 432 case store: 297 - do_integer_store(((insn>>25)&0x1f), size, 298 - (unsigned long *) addr, regs, 299 - decode_asi(insn, regs), 300 - kernel_unaligned_trap_fault); 433 + do_int_store(((insn>>25)&0x1f), size, 434 + (unsigned long *) addr, regs, 435 + decode_asi(insn, regs)); 301 436 break; 302 - #if 0 /* unsupported */ 303 - case both: 304 - do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs), 305 - (unsigned long *) addr, 306 - kernel_unaligned_trap_fault); 307 - break; 308 - #endif 437 + 309 438 default: 310 439 panic("Impossible kernel unaligned trap."); 311 440 /* Not reached... */
+27 -9
arch/sparc64/kernel/us2e_cpufreq.c
··· 88 88 { 89 89 unsigned long old_refr_count, refr_count, mctrl; 90 90 91 - 92 91 refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); 93 92 refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); 94 93 ··· 229 230 return ret; 230 231 } 231 232 233 + static unsigned int us2e_freq_get(unsigned int cpu) 234 + { 235 + cpumask_t cpus_allowed; 236 + unsigned long clock_tick, estar; 237 + 238 + if (!cpu_online(cpu)) 239 + return 0; 240 + 241 + cpus_allowed = current->cpus_allowed; 242 + set_cpus_allowed(current, cpumask_of_cpu(cpu)); 243 + 244 + clock_tick = sparc64_get_clock_tick(cpu) / 1000; 245 + estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); 246 + 247 + set_cpus_allowed(current, cpus_allowed); 248 + 249 + return clock_tick / estar_to_divisor(estar); 250 + } 251 + 232 252 static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) 233 253 { 234 254 unsigned long new_bits, new_freq; ··· 261 243 cpus_allowed = current->cpus_allowed; 262 244 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 263 245 264 - new_freq = clock_tick = sparc64_get_clock_tick(cpu); 246 + new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; 265 247 new_bits = index_to_estar_mode(index); 266 248 divisor = index_to_divisor(index); 267 249 new_freq /= divisor; ··· 276 258 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 277 259 278 260 if (old_divisor != divisor) 279 - us2e_transition(estar, new_bits, clock_tick, old_divisor, divisor); 261 + us2e_transition(estar, new_bits, clock_tick * 1000, 262 + old_divisor, divisor); 280 263 281 264 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 282 265 ··· 291 272 unsigned int new_index = 0; 292 273 293 274 if (cpufreq_frequency_table_target(policy, 294 - &us2e_freq_table[policy->cpu].table[0], 295 - target_freq, 296 - relation, 297 - &new_index)) 275 + &us2e_freq_table[policy->cpu].table[0], 276 + target_freq, relation, &new_index)) 298 277 return -EINVAL; 299 278 300 279 us2e_set_cpu_divider_index(policy->cpu, new_index); ··· 309 292 static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) 310 293 { 311 294 unsigned int cpu = policy->cpu; 312 - unsigned long clock_tick = sparc64_get_clock_tick(cpu); 295 + unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; 313 296 struct cpufreq_frequency_table *table = 314 297 &us2e_freq_table[cpu].table[0]; 315 298 ··· 368 351 memset(us2e_freq_table, 0, 369 352 (NR_CPUS * sizeof(struct us2e_freq_percpu_info))); 370 353 354 + driver->init = us2e_freq_cpu_init; 371 355 driver->verify = us2e_freq_verify; 372 356 driver->target = us2e_freq_target; 373 - driver->init = us2e_freq_cpu_init; 357 + driver->get = us2e_freq_get; 374 358 driver->exit = us2e_freq_cpu_exit; 375 359 driver->owner = THIS_MODULE, 376 360 strcpy(driver->name, "UltraSPARC-IIe");
+25 -4
arch/sparc64/kernel/us3_cpufreq.c
··· 56 56 57 57 static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg) 58 58 { 59 - unsigned long clock_tick = sparc64_get_clock_tick(cpu); 59 + unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; 60 60 unsigned long ret; 61 61 62 62 switch (safari_cfg & SAFARI_CFG_DIV_MASK) { ··· 76 76 return ret; 77 77 } 78 78 79 + static unsigned int us3_freq_get(unsigned int cpu) 80 + { 81 + cpumask_t cpus_allowed; 82 + unsigned long reg; 83 + unsigned int ret; 84 + 85 + if (!cpu_online(cpu)) 86 + return 0; 87 + 88 + cpus_allowed = current->cpus_allowed; 89 + set_cpus_allowed(current, cpumask_of_cpu(cpu)); 90 + 91 + reg = read_safari_cfg(); 92 + ret = get_current_freq(cpu, reg); 93 + 94 + set_cpus_allowed(current, cpus_allowed); 95 + 96 + return ret; 97 + } 98 + 79 99 static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) 80 100 { 81 101 unsigned long new_bits, new_freq, reg; ··· 108 88 cpus_allowed = current->cpus_allowed; 109 89 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 110 90 111 - new_freq = sparc64_get_clock_tick(cpu); 91 + new_freq = sparc64_get_clock_tick(cpu) / 1000; 112 92 switch (index) { 113 93 case 0: 114 94 new_bits = SAFARI_CFG_DIV_1; ··· 170 150 static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) 171 151 { 172 152 unsigned int cpu = policy->cpu; 173 - unsigned long clock_tick = sparc64_get_clock_tick(cpu); 153 + unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; 174 154 struct cpufreq_frequency_table *table = 175 155 &us3_freq_table[cpu].table[0]; 176 156 ··· 226 206 memset(us3_freq_table, 0, 227 207 (NR_CPUS * sizeof(struct us3_freq_percpu_info))); 228 208 209 + driver->init = us3_freq_cpu_init; 229 210 driver->verify = us3_freq_verify; 230 211 driver->target = us3_freq_target; 231 - driver->init = us3_freq_cpu_init; 212 + driver->get = us3_freq_get; 232 213 driver->exit = us3_freq_cpu_exit; 233 214 driver->owner = THIS_MODULE, 234 215 strcpy(driver->name, "UltraSPARC-III");
+5 -1
arch/um/kernel/skas/process.c
··· 61 61 62 62 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); 63 63 } while((n >= 0) && WIFSTOPPED(status) && 64 - (WSTOPSIG(status) == SIGVTALRM)); 64 + ((WSTOPSIG(status) == SIGVTALRM) || 65 + /* running UML inside a detached screen can cause 66 + * SIGWINCHes 67 + */ 68 + (WSTOPSIG(status) == SIGWINCH))); 65 69 66 70 if((n < 0) || !WIFSTOPPED(status) || 67 71 (WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){
-1
arch/um/os-Linux/elf_aux.c
··· 9 9 */ 10 10 #include <elf.h> 11 11 #include <stddef.h> 12 - #include <asm/elf.h> 13 12 #include "init.h" 14 13 #include "elf_user.h" 15 14 #include "mem_user.h"
+8 -7
arch/x86_64/kernel/smpboot.c
··· 492 492 */ 493 493 set_cpu_sibling_map(smp_processor_id()); 494 494 495 + /* 496 + * Wait for TSC sync to not schedule things before. 497 + * We still process interrupts, which could see an inconsistent 498 + * time in that window unfortunately. 499 + * Do this here because TSC sync has global unprotected state. 500 + */ 501 + tsc_sync_wait(); 502 + 495 503 /* 496 504 * We need to hold call_lock, so there is no inconsistency 497 505 * between the time smp_call_function() determines number of ··· 516 508 cpu_set(smp_processor_id(), cpu_online_map); 517 509 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 518 510 unlock_ipi_call_lock(); 519 - 520 - mb(); 521 - 522 - /* Wait for TSC sync to not schedule things before. 523 - We still process interrupts, which could see an inconsistent 524 - time in that window unfortunately. */ 525 - tsc_sync_wait(); 526 511 527 512 cpu_idle(); 528 513 }
+1 -3
arch/x86_64/mm/fault.c
··· 211 211 { 212 212 if (tsk->pid == 1) 213 213 return 1; 214 - /* Warn for strace, but not for gdb */ 215 - if (!test_ti_thread_flag(tsk->thread_info, TIF_SYSCALL_TRACE) && 216 - (tsk->ptrace & PT_PTRACED)) 214 + if (tsk->ptrace & PT_PTRACED) 217 215 return 0; 218 216 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || 219 217 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
+1
drivers/ide/Kconfig
··· 764 764 config BLK_DEV_IDEDMA_PMAC 765 765 bool "PowerMac IDE DMA support" 766 766 depends on BLK_DEV_IDE_PMAC 767 + select BLK_DEV_IDEDMA_PCI 767 768 help 768 769 This option allows the driver for the built-in IDE controller on 769 770 Power Macintoshes and PowerBooks to use DMA (direct memory access)
+1 -1
drivers/ide/ide-floppy.c
··· 317 317 unsigned long flags; 318 318 } idefloppy_floppy_t; 319 319 320 - #define IDEFLOPPY_TICKS_DELAY 3 /* default delay for ZIP 100 */ 320 + #define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */ 321 321 322 322 /* 323 323 * Floppy flag bits values.
+7
drivers/ide/pci/generic.c
··· 173 173 .channels = 2, 174 174 .autodma = NOAUTODMA, 175 175 .bootable = ON_BOARD, 176 + },{ /* 14 */ 177 + .name = "Revolution", 178 + .init_hwif = init_hwif_generic, 179 + .channels = 2, 180 + .autodma = AUTODMA, 181 + .bootable = OFF_BOARD, 176 182 } 177 183 }; 178 184 ··· 237 231 { PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11}, 238 232 { PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12}, 239 233 { PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13}, 234 + { PCI_VENDOR_ID_NETCELL,PCI_DEVICE_ID_REVOLUTION, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14}, 240 235 /* Must come last. If you add entries adjust this table appropriately and the init_one code */ 241 236 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 0}, 242 237 { 0, },
+23
drivers/ide/pci/serverworks.c
··· 21 21 * 22 22 * CSB6: `Champion South Bridge' IDE Interface (optional: third channel) 23 23 * 24 + * HT1000: AKA BCM5785 - Hypertransport Southbridge for Opteron systems. IDE 25 + * controller same as the CSB6. Single channel ATA100 only. 26 + * 24 27 * Documentation: 25 28 * Available under NDA only. Errata info very hard to get. 26 29 * ··· 74 71 if (!svwks_revision) 75 72 pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision); 76 73 74 + if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) 75 + return 2; 77 76 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { 78 77 u32 reg = 0; 79 78 if (isa_dev) ··· 114 109 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE: 115 110 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE: 116 111 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2: 112 + case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE: 117 113 return 1; 118 114 default: 119 115 break; ··· 444 438 btr |= (svwks_revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2; 445 439 pci_write_config_byte(dev, 0x5A, btr); 446 440 } 441 + /* Setup HT1000 SouthBridge Controller - Single Channel Only */ 442 + else if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) { 443 + pci_read_config_byte(dev, 0x5A, &btr); 444 + btr &= ~0x40; 445 + btr |= 0x3; 446 + pci_write_config_byte(dev, 0x5A, btr); 447 + } 447 448 448 449 return (dev->irq) ? dev->irq : 0; 449 450 } ··· 642 629 .channels = 1, /* 2 */ 643 630 .autodma = AUTODMA, 644 631 .bootable = ON_BOARD, 632 + },{ /* 4 */ 633 + .name = "SvrWks HT1000", 634 + .init_setup = init_setup_svwks, 635 + .init_chipset = init_chipset_svwks, 636 + .init_hwif = init_hwif_svwks, 637 + .init_dma = init_dma_svwks, 638 + .channels = 1, /* 2 */ 639 + .autodma = AUTODMA, 640 + .bootable = ON_BOARD, 645 641 } 646 642 }; 647 643 ··· 675 653 { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 676 654 { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 677 655 { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3}, 656 + { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, 678 657 { 0, }, 679 658 }; 680 659 MODULE_DEVICE_TABLE(pci, svwks_pci_tbl);
+1 -1
drivers/ide/ppc/pmac.c
··· 1664 1664 }; 1665 1665 1666 1666 static struct pci_device_id pmac_ide_pci_match[] = { 1667 - { PCI_VENDOR_ID_APPLE, PCI_DEVIEC_ID_APPLE_UNI_N_ATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1667 + { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1668 1668 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1669 1669 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1670 1670 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_ATA,
+1
drivers/ide/setup-pci.c
··· 229 229 case PCI_DEVICE_ID_AMD_VIPER_7409: 230 230 case PCI_DEVICE_ID_CMD_643: 231 231 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE: 232 + case PCI_DEVICE_ID_REVOLUTION: 232 233 simplex_stat = hwif->INB(dma_base + 2); 233 234 hwif->OUTB((simplex_stat&0x60),(dma_base + 2)); 234 235 simplex_stat = hwif->INB(dma_base + 2);
+3
drivers/md/md.c
··· 623 623 mddev->raid_disks = sb->raid_disks; 624 624 mddev->size = sb->size; 625 625 mddev->events = md_event(sb); 626 + mddev->bitmap_offset = 0; 626 627 627 628 if (sb->state & (1<<MD_SB_CLEAN)) 628 629 mddev->recovery_cp = MaxSector; ··· 939 938 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 940 939 mddev->size = le64_to_cpu(sb->size)/2; 941 940 mddev->events = le64_to_cpu(sb->events); 941 + mddev->bitmap_offset = 0; 942 942 943 943 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 944 944 memcpy(mddev->uuid, sb->set_uuid, 16); ··· 1826 1824 fput(mddev->bitmap_file); 1827 1825 mddev->bitmap_file = NULL; 1828 1826 } 1827 + mddev->bitmap_offset = 0; 1829 1828 1830 1829 /* 1831 1830 * Free resources if final stop
+7
drivers/net/8139cp.c
··· 1897 1897 { 1898 1898 struct net_device *dev; 1899 1899 struct cp_private *cp; 1900 + unsigned long flags; 1900 1901 1901 1902 dev = pci_get_drvdata (pdev); 1902 1903 cp = netdev_priv(dev); ··· 1911 1910 1912 1911 cp_init_hw (cp); 1913 1912 netif_start_queue (dev); 1913 + 1914 + spin_lock_irqsave (&cp->lock, flags); 1915 + 1916 + mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE); 1917 + 1918 + spin_unlock_irqrestore (&cp->lock, flags); 1914 1919 1915 1920 return 0; 1916 1921 }
+26 -26
drivers/net/dm9000.c
··· 48 48 * net_device_stats 49 49 * * introduced tx_timeout function 50 50 * * reworked locking 51 + * 52 + * 01-Jul-2005 Ben Dooks <ben@simtec.co.uk> 53 + * * fixed spinlock call without pointer 54 + * * ensure spinlock is initialised 51 55 */ 52 56 53 57 #include <linux/module.h> ··· 152 148 static int dm9000_open(struct net_device *); 153 149 static int dm9000_start_xmit(struct sk_buff *, struct net_device *); 154 150 static int dm9000_stop(struct net_device *); 155 - static int dm9000_do_ioctl(struct net_device *, struct ifreq *, int); 156 151 157 152 158 153 static void dm9000_timer(unsigned long); ··· 325 322 326 323 /* Save previous register address */ 327 324 reg_save = readb(db->io_addr); 328 - spin_lock_irqsave(db->lock,flags); 325 + spin_lock_irqsave(&db->lock,flags); 329 326 330 327 netif_stop_queue(dev); 331 328 dm9000_reset(db); ··· 336 333 337 334 /* Restore previous register address */ 338 335 writeb(reg_save, db->io_addr); 339 - spin_unlock_irqrestore(db->lock,flags); 336 + spin_unlock_irqrestore(&db->lock,flags); 340 337 } 341 338 342 339 ··· 390 387 int i; 391 388 u32 id_val; 392 389 393 - printk(KERN_INFO "%s Ethernet Driver\n", CARDNAME); 394 - 395 390 /* Init network device */ 396 391 ndev = alloc_etherdev(sizeof (struct board_info)); 397 392 if (!ndev) { ··· 405 404 /* setup board info structure */ 406 405 db = (struct board_info *) ndev->priv; 407 406 memset(db, 0, sizeof (*db)); 407 + 408 + spin_lock_init(&db->lock); 408 409 409 410 if (pdev->num_resources < 2) { 410 411 ret = -ENODEV; ··· 544 541 ndev->stop = &dm9000_stop; 545 542 ndev->get_stats = &dm9000_get_stats; 546 543 ndev->set_multicast_list = &dm9000_hash_table; 547 - ndev->do_ioctl = &dm9000_do_ioctl; 548 544 549 545 #ifdef DM9000_PROGRAM_EEPROM 550 546 program_eeprom(db); ··· 614 612 615 613 /* set and active a timer process */ 616 614 init_timer(&db->timer); 617 - db->timer.expires = DM9000_TIMER_WUT * 2; 615 + db->timer.expires = DM9000_TIMER_WUT; 618 616 db->timer.data = (unsigned long) dev; 619 617 db->timer.function = &dm9000_timer; 620 618 add_timer(&db->timer); ··· 847 845 return &db->stats; 848 846 } 849 847 850 - /* 851 - * Process the upper socket ioctl command 852 - */ 853 - static int 854 - dm9000_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 855 - { 856 - PRINTK1("entering %s\n",__FUNCTION__); 857 - return 0; 858 - } 859 848 860 849 /* 861 850 * A periodic timer routine ··· 857 864 { 858 865 struct net_device *dev = (struct net_device *) data; 859 866 board_info_t *db = (board_info_t *) dev->priv; 860 - u8 reg_save; 861 - unsigned long flags; 862 867 863 868 PRINTK3("dm9000_timer()\n"); 864 869 865 - spin_lock_irqsave(db->lock,flags); 866 - /* Save previous register address */ 867 - reg_save = readb(db->io_addr); 868 - 869 870 mii_check_media(&db->mii, netif_msg_link(db), 0); 870 - 871 - /* Restore previous register address */ 872 - writeb(reg_save, db->io_addr); 873 - spin_unlock_irqrestore(db->lock,flags); 874 871 875 872 /* Set timer again */ 876 873 db->timer.expires = DM9000_TIMER_WUT; ··· 1081 1098 { 1082 1099 board_info_t *db = (board_info_t *) dev->priv; 1083 1100 unsigned long flags; 1101 + unsigned int reg_save; 1084 1102 int ret; 1085 1103 1086 1104 spin_lock_irqsave(&db->lock,flags); 1105 + 1106 + /* Save previous register address */ 1107 + reg_save = readb(db->io_addr); 1108 + 1087 1109 /* Fill the phyxcer register into REG_0C */ 1088 1110 iow(db, DM9000_EPAR, DM9000_PHY | reg); 1089 1111 ··· 1098 1110 1099 1111 /* The read data keeps on REG_0D & REG_0E */ 1100 1112 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); 1113 + 1114 + /* restore the previous address */ 1115 + writeb(reg_save, db->io_addr); 1101 1116 1102 1117 spin_unlock_irqrestore(&db->lock,flags); 1103 1118 ··· 1115 1124 { 1116 1125 board_info_t *db = (board_info_t *) dev->priv; 1117 1126 unsigned long flags; 1127 + unsigned long reg_save; 1118 1128 1119 1129 spin_lock_irqsave(&db->lock,flags); 1130 + 1131 + /* Save previous register address */ 1132 + reg_save = readb(db->io_addr); 1120 1133 1121 1134 /* Fill the phyxcer register into REG_0C */ 1122 1135 iow(db, DM9000_EPAR, DM9000_PHY | reg); ··· 1132 1137 iow(db, DM9000_EPCR, 0xa); /* Issue phyxcer write command */ 1133 1138 udelay(500); /* Wait write complete */ 1134 1139 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ 1140 + 1141 + /* restore the previous address */ 1142 + writeb(reg_save, db->io_addr); 1135 1143 1136 1144 spin_unlock_irqrestore(&db->lock,flags); 1137 1145 } ··· 1200 1202 static int __init 1201 1203 dm9000_init(void) 1202 1204 { 1205 + printk(KERN_INFO "%s Ethernet Driver\n", CARDNAME); 1206 + 1203 1207 return driver_register(&dm9000_driver); /* search board and register */ 1204 1208 } 1205 1209
+4 -4
drivers/net/ioc3-eth.c
··· 499 499 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG); 500 500 while (ioc3_r_micr() & MICR_BUSY); 501 501 502 - return ioc3_r_micr() & MIDR_DATA_MASK; 502 + return ioc3_r_midr_r() & MIDR_DATA_MASK; 503 503 } 504 504 505 505 static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data) ··· 1291 1291 dev->features = NETIF_F_IP_CSUM; 1292 1292 #endif 1293 1293 1294 - ioc3_setup_duplex(ip); 1295 1294 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); 1296 1295 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); 1297 1296 ··· 1299 1300 goto out_stop; 1300 1301 1301 1302 mii_check_media(&ip->mii, 1, 1); 1303 + ioc3_setup_duplex(ip); 1302 1304 1303 1305 vendor = (sw_physid1 << 12) | (sw_physid2 >> 4); 1304 1306 model = (sw_physid2 >> 4) & 0x3f; ··· 1524 1524 struct ethtool_drvinfo *info) 1525 1525 { 1526 1526 struct ioc3_private *ip = netdev_priv(dev); 1527 - 1527 + 1528 1528 strcpy (info->driver, IOC3_NAME); 1529 1529 strcpy (info->version, IOC3_VERSION); 1530 1530 strcpy (info->bus_info, pci_name(ip->pdev)); ··· 1550 1550 spin_lock_irq(&ip->ioc3_lock); 1551 1551 rc = mii_ethtool_sset(&ip->mii, cmd); 1552 1552 spin_unlock_irq(&ip->ioc3_lock); 1553 - 1553 + 1554 1554 return rc; 1555 1555 } 1556 1556
+1 -1
drivers/net/loopback.c
··· 214 214 .ethtool_ops = &loopback_ethtool_ops, 215 215 }; 216 216 217 - /* Setup and register the of the LOOPBACK device. */ 217 + /* Setup and register the loopback device. */ 218 218 int __init loopback_init(void) 219 219 { 220 220 struct net_device_stats *stats;
+4 -2
drivers/net/tg3.c
··· 66 66 67 67 #define DRV_MODULE_NAME "tg3" 68 68 #define PFX DRV_MODULE_NAME ": " 69 - #define DRV_MODULE_VERSION "3.35" 70 - #define DRV_MODULE_RELDATE "August 6, 2005" 69 + #define DRV_MODULE_VERSION "3.36" 70 + #define DRV_MODULE_RELDATE "August 19, 2005" 71 71 72 72 #define TG3_DEF_MAC_MODE 0 73 73 #define TG3_DEF_RX_MODE 0 ··· 8970 8970 tp->phy_id = hw_phy_id; 8971 8971 if (hw_phy_id_masked == PHY_ID_BCM8002) 8972 8972 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 8973 + else 8974 + tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; 8973 8975 } else { 8974 8976 if (tp->phy_id != PHY_ID_INVALID) { 8975 8977 /* Do nothing, phy ID already set up in
+1 -1
drivers/video/radeonfb.c
··· 80 80 #include <video/radeon.h> 81 81 #include <linux/radeonfb.h> 82 82 83 - #define DEBUG 1 83 + #define DEBUG 0 84 84 85 85 #if DEBUG 86 86 #define RTRACE printk
+4 -4
fs/afs/mntpt.c
··· 30 30 struct dentry *dentry, 31 31 struct nameidata *nd); 32 32 static int afs_mntpt_open(struct inode *inode, struct file *file); 33 - static int afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd); 33 + static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd); 34 34 35 35 struct file_operations afs_mntpt_file_operations = { 36 36 .open = afs_mntpt_open, ··· 233 233 /* 234 234 * follow a link from a mountpoint directory, thus causing it to be mounted 235 235 */ 236 - static int afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) 236 + static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) 237 237 { 238 238 struct vfsmount *newmnt; 239 239 struct dentry *old_dentry; ··· 249 249 newmnt = afs_mntpt_do_automount(dentry); 250 250 if (IS_ERR(newmnt)) { 251 251 path_release(nd); 252 - return PTR_ERR(newmnt); 252 + return (void *)newmnt; 253 253 } 254 254 255 255 old_dentry = nd->dentry; ··· 267 267 } 268 268 269 269 kleave(" = %d", err); 270 - return err; 270 + return ERR_PTR(err); 271 271 } /* end afs_mntpt_follow_link() */ 272 272 273 273 /*****************************************************************************/
+3 -2
fs/autofs/symlink.c
··· 12 12 13 13 #include "autofs_i.h" 14 14 15 - static int autofs_follow_link(struct dentry *dentry, struct nameidata *nd) 15 + /* Nothing to release.. */ 16 + static void *autofs_follow_link(struct dentry *dentry, struct nameidata *nd) 16 17 { 17 18 char *s=((struct autofs_symlink *)dentry->d_inode->u.generic_ip)->data; 18 19 nd_set_link(nd, s); 19 - return 0; 20 + return NULL; 20 21 } 21 22 22 23 struct inode_operations autofs_symlink_inode_operations = {
+2 -2
fs/autofs4/symlink.c
··· 12 12 13 13 #include "autofs_i.h" 14 14 15 - static int autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) 15 + static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) 16 16 { 17 17 struct autofs_info *ino = autofs4_dentry_ino(dentry); 18 18 nd_set_link(nd, (char *)ino->u.symlink); 19 - return 0; 19 + return NULL; 20 20 } 21 21 22 22 struct inode_operations autofs4_symlink_inode_operations = {
+5 -5
fs/befs/linuxvfs.c
··· 41 41 static void befs_destroy_inode(struct inode *inode); 42 42 static int befs_init_inodecache(void); 43 43 static void befs_destroy_inodecache(void); 44 - static int befs_follow_link(struct dentry *, struct nameidata *); 45 - static void befs_put_link(struct dentry *, struct nameidata *); 44 + static void *befs_follow_link(struct dentry *, struct nameidata *); 45 + static void befs_put_link(struct dentry *, struct nameidata *, void *); 46 46 static int befs_utf2nls(struct super_block *sb, const char *in, int in_len, 47 47 char **out, int *out_len); 48 48 static int befs_nls2utf(struct super_block *sb, const char *in, int in_len, ··· 461 461 * The data stream become link name. Unless the LONG_SYMLINK 462 462 * flag is set. 463 463 */ 464 - static int 464 + static void * 465 465 befs_follow_link(struct dentry *dentry, struct nameidata *nd) 466 466 { 467 467 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); ··· 487 487 } 488 488 489 489 nd_set_link(nd, link); 490 - return 0; 490 + return NULL; 491 491 } 492 492 493 - static void befs_put_link(struct dentry *dentry, struct nameidata *nd) 493 + static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) 494 494 { 495 495 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); 496 496 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
+2 -2
fs/cifs/cifsfs.h
··· 83 83 extern struct dentry_operations cifs_dentry_ops; 84 84 85 85 /* Functions related to symlinks */ 86 - extern int cifs_follow_link(struct dentry *direntry, struct nameidata *nd); 87 - extern void cifs_put_link(struct dentry *direntry, struct nameidata *nd); 86 + extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd); 87 + extern void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *); 88 88 extern int cifs_readlink(struct dentry *direntry, char __user *buffer, 89 89 int buflen); 90 90 extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
+3 -3
fs/cifs/link.c
··· 92 92 return rc; 93 93 } 94 94 95 - int 95 + void * 96 96 cifs_follow_link(struct dentry *direntry, struct nameidata *nd) 97 97 { 98 98 struct inode *inode = direntry->d_inode; ··· 148 148 out_no_free: 149 149 FreeXid(xid); 150 150 nd_set_link(nd, target_path); 151 - return 0; 151 + return NULL; /* No cookie */ 152 152 } 153 153 154 154 int ··· 330 330 return rc; 331 331 } 332 332 333 - void cifs_put_link(struct dentry *direntry, struct nameidata *nd) 333 + void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie) 334 334 { 335 335 char *p = nd_get_link(nd); 336 336 if (!IS_ERR(p))
+2 -2
fs/devfs/base.c
··· 2491 2491 return 0; 2492 2492 } /* End Function devfs_mknod */ 2493 2493 2494 - static int devfs_follow_link(struct dentry *dentry, struct nameidata *nd) 2494 + static void *devfs_follow_link(struct dentry *dentry, struct nameidata *nd) 2495 2495 { 2496 2496 struct devfs_entry *p = get_devfs_entry_from_vfs_inode(dentry->d_inode); 2497 2497 nd_set_link(nd, p ? p->u.symlink.linkname : ERR_PTR(-ENODEV)); 2498 - return 0; 2498 + return NULL; 2499 2499 } /* End Function devfs_follow_link */ 2500 2500 2501 2501 static struct inode_operations devfs_iops = {
+2 -2
fs/ext2/symlink.c
··· 21 21 #include "xattr.h" 22 22 #include <linux/namei.h> 23 23 24 - static int ext2_follow_link(struct dentry *dentry, struct nameidata *nd) 24 + static void *ext2_follow_link(struct dentry *dentry, struct nameidata *nd) 25 25 { 26 26 struct ext2_inode_info *ei = EXT2_I(dentry->d_inode); 27 27 nd_set_link(nd, (char *)ei->i_data); 28 - return 0; 28 + return NULL; 29 29 } 30 30 31 31 struct inode_operations ext2_symlink_inode_operations = {
+2 -2
fs/ext3/symlink.c
··· 23 23 #include <linux/namei.h> 24 24 #include "xattr.h" 25 25 26 - static int ext3_follow_link(struct dentry *dentry, struct nameidata *nd) 26 + static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd) 27 27 { 28 28 struct ext3_inode_info *ei = EXT3_I(dentry->d_inode); 29 29 nd_set_link(nd, (char*)ei->i_data); 30 - return 0; 30 + return NULL; 31 31 } 32 32 33 33 struct inode_operations ext3_symlink_inode_operations = {
+3 -3
fs/freevxfs/vxfs_immed.c
··· 38 38 #include "vxfs_inode.h" 39 39 40 40 41 - static int vxfs_immed_follow_link(struct dentry *, struct nameidata *); 41 + static void * vxfs_immed_follow_link(struct dentry *, struct nameidata *); 42 42 43 43 static int vxfs_immed_readpage(struct file *, struct page *); 44 44 ··· 72 72 * Returns: 73 73 * Zero on success, else a negative error code. 74 74 */ 75 - static int 75 + static void * 76 76 vxfs_immed_follow_link(struct dentry *dp, struct nameidata *np) 77 77 { 78 78 struct vxfs_inode_info *vip = VXFS_INO(dp->d_inode); 79 79 nd_set_link(np, vip->vii_immed.vi_immed); 80 - return 0; 80 + return NULL; 81 81 } 82 82 83 83 /**
+2
fs/ioprio.c
··· 62 62 63 63 break; 64 64 case IOPRIO_CLASS_IDLE: 65 + if (!capable(CAP_SYS_ADMIN)) 66 + return -EPERM; 65 67 break; 66 68 default: 67 69 return -EINVAL;
+9 -7
fs/jffs2/symlink.c
··· 18 18 #include <linux/namei.h> 19 19 #include "nodelist.h" 20 20 21 - static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd); 21 + static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd); 22 22 23 23 struct inode_operations jffs2_symlink_inode_operations = 24 24 { ··· 27 27 .setattr = jffs2_setattr 28 28 }; 29 29 30 - static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) 30 + static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) 31 31 { 32 32 struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); 33 + char *p = (char *)f->dents; 33 34 34 35 /* 35 36 * We don't acquire the f->sem mutex here since the only data we ··· 46 45 * nd_set_link() call. 47 46 */ 48 47 49 - if (!f->dents) { 48 + if (!p) { 50 49 printk(KERN_ERR "jffs2_follow_link(): can't find symlink taerget\n"); 51 - return -EIO; 50 + p = ERR_PTR(-EIO); 51 + } else { 52 + D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->dents)); 52 53 } 53 - D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->dents)); 54 54 55 - nd_set_link(nd, (char *)f->dents); 55 + nd_set_link(nd, p); 56 56 57 57 /* 58 58 * We unlock the f->sem mutex but VFS will use the f->dents string. This is safe 59 59 * since the only way that may cause f->dents to be changed is iput() operation. 60 60 * But VFS will not use f->dents after iput() has been called. 61 61 */ 62 - return 0; 62 + return NULL; 63 63 } 64 64
+2 -2
fs/jfs/symlink.c
··· 22 22 #include "jfs_inode.h" 23 23 #include "jfs_xattr.h" 24 24 25 - static int jfs_follow_link(struct dentry *dentry, struct nameidata *nd) 25 + static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd) 26 26 { 27 27 char *s = JFS_IP(dentry->d_inode)->i_inline; 28 28 nd_set_link(nd, s); 29 - return 0; 29 + return NULL; 30 30 } 31 31 32 32 struct inode_operations jfs_symlink_inode_operations = {
+21 -19
fs/namei.c
··· 501 501 static inline int __do_follow_link(struct path *path, struct nameidata *nd) 502 502 { 503 503 int error; 504 + void *cookie; 504 505 struct dentry *dentry = path->dentry; 505 506 506 507 touch_atime(path->mnt, dentry); ··· 509 508 510 509 if (path->mnt == nd->mnt) 511 510 mntget(path->mnt); 512 - error = dentry->d_inode->i_op->follow_link(dentry, nd); 513 - if (!error) { 511 + cookie = dentry->d_inode->i_op->follow_link(dentry, nd); 512 + error = PTR_ERR(cookie); 513 + if (!IS_ERR(cookie)) { 514 514 char *s = nd_get_link(nd); 515 + error = 0; 515 516 if (s) 516 517 error = __vfs_follow_link(nd, s); 517 518 if (dentry->d_inode->i_op->put_link) 518 - dentry->d_inode->i_op->put_link(dentry, nd); 519 + dentry->d_inode->i_op->put_link(dentry, nd, cookie); 519 520 } 520 521 dput(dentry); 521 522 mntput(path->mnt); ··· 2347 2344 int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) 2348 2345 { 2349 2346 struct nameidata nd; 2350 - int res; 2347 + void *cookie; 2348 + 2351 2349 nd.depth = 0; 2352 - res = dentry->d_inode->i_op->follow_link(dentry, &nd); 2353 - if (!res) { 2354 - res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd)); 2350 + cookie = dentry->d_inode->i_op->follow_link(dentry, &nd); 2351 + if (!IS_ERR(cookie)) { 2352 + int res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd)); 2355 2353 if (dentry->d_inode->i_op->put_link) 2356 - dentry->d_inode->i_op->put_link(dentry, &nd); 2354 + dentry->d_inode->i_op->put_link(dentry, &nd, cookie); 2355 + cookie = ERR_PTR(res); 2357 2356 } 2358 - return res; 2357 + return PTR_ERR(cookie); 2359 2358 } 2360 2359 2361 2360 int vfs_follow_link(struct nameidata *nd, const char *link) ··· 2400 2395 return res; 2401 2396 } 2402 2397 2403 - int page_follow_link_light(struct dentry *dentry, struct nameidata *nd) 2398 + void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd) 2404 2399 { 2405 - struct page *page; 2400 + struct page *page = NULL; 2406 2401 nd_set_link(nd, page_getlink(dentry, &page)); 2407 - return 0; 2402 + return page; 2408 2403 } 2409 2404 2410 - void page_put_link(struct dentry *dentry, struct nameidata *nd) 2405 + void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2411 2406 { 2412 - if (!IS_ERR(nd_get_link(nd))) { 2413 - struct page *page; 2414 - page = find_get_page(dentry->d_inode->i_mapping, 0); 2415 - if (!page) 2416 - BUG(); 2407 + struct page *page = cookie; 2408 + 2409 + if (page) { 2417 2410 kunmap(page); 2418 - page_cache_release(page); 2419 2411 page_cache_release(page); 2420 2412 } 2421 2413 }
+19 -9
fs/nfs/dir.c
··· 182 182 /* We requested READDIRPLUS, but the server doesn't grok it */ 183 183 if (error == -ENOTSUPP && desc->plus) { 184 184 NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS; 185 - NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS; 185 + clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode)); 186 186 desc->plus = 0; 187 187 goto again; 188 188 } 189 189 goto error; 190 190 } 191 191 SetPageUptodate(page); 192 - NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME; 192 + spin_lock(&inode->i_lock); 193 + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; 194 + spin_unlock(&inode->i_lock); 193 195 /* Ensure consistent page alignment of the data. 194 196 * Note: assumes we have exclusive access to this mapping either 195 197 * through inode->i_sem or some other mechanism. ··· 464 462 page, 465 463 NFS_SERVER(inode)->dtsize, 466 464 desc->plus); 467 - NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME; 465 + spin_lock(&inode->i_lock); 466 + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; 467 + spin_unlock(&inode->i_lock); 468 468 desc->page = page; 469 469 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ 470 470 if (desc->error >= 0) { ··· 549 545 break; 550 546 } 551 547 if (res == -ETOOSMALL && desc->plus) { 552 - NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS; 548 + clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode)); 553 549 nfs_zap_caches(inode); 554 550 desc->plus = 0; 555 551 desc->entry->eof = 0; ··· 612 608 { 613 609 if (IS_ROOT(dentry)) 614 610 return 1; 615 - if ((NFS_FLAGS(dir) & NFS_INO_INVALID_ATTR) != 0 611 + if ((NFS_I(dir)->cache_validity & NFS_INO_INVALID_ATTR) != 0 616 612 || nfs_attribute_timeout(dir)) 617 613 return 0; 618 614 return nfs_verify_change_attribute(dir, (unsigned long)dentry->d_fsdata); ··· 939 935 error = nfs_revalidate_inode(NFS_SERVER(dir), dir); 940 936 if (error < 0) { 941 937 res = ERR_PTR(error); 938 + unlock_kernel(); 942 939 goto out; 943 940 } 944 941 ··· 1580 1575 1581 1576 int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res) 1582 1577 { 1583 - struct nfs_access_entry *cache = &NFS_I(inode)->cache_access; 1578 + struct nfs_inode *nfsi = NFS_I(inode); 1579 + struct nfs_access_entry *cache = &nfsi->cache_access; 1584 1580 1585 1581 if (cache->cred != cred 1586 1582 || time_after(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode)) 1587 - || (NFS_FLAGS(inode) & NFS_INO_INVALID_ACCESS)) 1583 + || (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)) 1588 1584 return -ENOENT; 1589 1585 memcpy(res, cache, sizeof(*res)); 1590 1586 return 0; ··· 1593 1587 1594 1588 void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set) 1595 1589 { 1596 - struct nfs_access_entry *cache = &NFS_I(inode)->cache_access; 1590 + struct nfs_inode *nfsi = NFS_I(inode); 1591 + struct nfs_access_entry *cache = &nfsi->cache_access; 1597 1592 1598 1593 if (cache->cred != set->cred) { 1599 1594 if (cache->cred) 1600 1595 put_rpccred(cache->cred); 1601 1596 cache->cred = get_rpccred(set->cred); 1602 1597 } 1603 - NFS_FLAGS(inode) &= ~NFS_INO_INVALID_ACCESS; 1598 + /* FIXME: replace current access_cache BKL reliance with inode->i_lock */ 1599 + spin_lock(&inode->i_lock); 1600 + nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS; 1601 + spin_unlock(&inode->i_lock); 1604 1602 cache->jiffies = set->jiffies; 1605 1603 cache->mask = set->mask; 1606 1604 }
+3 -2
fs/nfs/file.c
··· 134 134 */ 135 135 static int nfs_revalidate_file(struct inode *inode, struct file *filp) 136 136 { 137 + struct nfs_inode *nfsi = NFS_I(inode); 137 138 int retval = 0; 138 139 139 - if ((NFS_FLAGS(inode) & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode)) 140 + if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode)) 140 141 retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode); 141 142 nfs_revalidate_mapping(inode, filp->f_mapping); 142 143 return 0; ··· 165 164 goto force_reval; 166 165 if (nfsi->npages != 0) 167 166 return 0; 168 - if (!(NFS_FLAGS(inode) & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode)) 167 + if (!(nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode)) 169 168 return 0; 170 169 force_reval: 171 170 return __nfs_revalidate_inode(server, inode);
+105 -57
fs/nfs/inode.c
··· 615 615 struct nfs_inode *nfsi = NFS_I(inode); 616 616 int mode = inode->i_mode; 617 617 618 + spin_lock(&inode->i_lock); 619 + 618 620 NFS_ATTRTIMEO(inode) = NFS_MINATTRTIMEO(inode); 619 621 NFS_ATTRTIMEO_UPDATE(inode) = jiffies; 620 622 621 623 memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); 622 624 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) 623 - nfsi->flags |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; 625 + nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; 624 626 else 625 - nfsi->flags |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; 627 + nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; 628 + 629 + spin_unlock(&inode->i_lock); 626 630 } 627 631 628 632 static void nfs_zap_acl_cache(struct inode *inode) ··· 636 632 clear_acl_cache = NFS_PROTO(inode)->clear_acl_cache; 637 633 if (clear_acl_cache != NULL) 638 634 clear_acl_cache(inode); 639 - NFS_I(inode)->flags &= ~NFS_INO_INVALID_ACL; 635 + spin_lock(&inode->i_lock); 636 + NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_ACL; 637 + spin_unlock(&inode->i_lock); 640 638 } 641 639 642 640 /* ··· 745 739 inode->i_fop = &nfs_dir_operations; 746 740 if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS) 747 741 && fattr->size <= NFS_LIMIT_READDIRPLUS) 748 - NFS_FLAGS(inode) |= NFS_INO_ADVISE_RDPLUS; 742 + set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode)); 749 743 } else if (S_ISLNK(inode->i_mode)) 750 744 inode->i_op = &nfs_symlink_inode_operations; 751 745 else ··· 847 841 inode->i_uid = attr->ia_uid; 848 842 if ((attr->ia_valid & ATTR_GID) != 0) 849 843 inode->i_gid = attr->ia_gid; 850 - NFS_FLAGS(inode) |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 844 + spin_lock(&inode->i_lock); 845 + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 846 + spin_unlock(&inode->i_lock); 851 847 } 852 848 if ((attr->ia_valid & ATTR_SIZE) != 0) { 853 849 inode->i_size = attr->ia_size; ··· 857 849 } 858 850 } 859 851 852 + static int nfs_wait_schedule(void *word) 853 + { 854 + if (signal_pending(current)) 855 + return -ERESTARTSYS; 856 + schedule(); 857 + return 0; 858 + } 859 + 860 860 /* 861 861 * Wait for the inode to get unlocked. 862 - * (Used for NFS_INO_LOCKED and NFS_INO_REVALIDATING). 863 862 */ 864 - static int 865 - nfs_wait_on_inode(struct inode *inode, int flag) 863 + static int nfs_wait_on_inode(struct inode *inode) 866 864 { 867 865 struct rpc_clnt *clnt = NFS_CLIENT(inode); 868 866 struct nfs_inode *nfsi = NFS_I(inode); 869 - 867 + sigset_t oldmask; 870 868 int error; 871 - if (!(NFS_FLAGS(inode) & flag)) 872 - return 0; 869 + 873 870 atomic_inc(&inode->i_count); 874 - error = nfs_wait_event(clnt, nfsi->nfs_i_wait, 875 - !(NFS_FLAGS(inode) & flag)); 871 + rpc_clnt_sigmask(clnt, &oldmask); 872 + error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING, 873 + nfs_wait_schedule, TASK_INTERRUPTIBLE); 874 + rpc_clnt_sigunmask(clnt, &oldmask); 876 875 iput(inode); 876 + 877 877 return error; 878 + } 879 + 880 + static void nfs_wake_up_inode(struct inode *inode) 881 + { 882 + struct nfs_inode *nfsi = NFS_I(inode); 883 + 884 + clear_bit(NFS_INO_REVALIDATING, &nfsi->flags); 885 + smp_mb__after_clear_bit(); 886 + wake_up_bit(&nfsi->flags, NFS_INO_REVALIDATING); 878 887 } 879 888 880 889 int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 881 890 { 882 891 struct inode *inode = dentry->d_inode; 883 - struct nfs_inode *nfsi = NFS_I(inode); 884 - int need_atime = nfsi->flags & NFS_INO_INVALID_ATIME; 892 + int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; 885 893 int err; 886 894 887 895 if (__IS_FLG(inode, MS_NOATIME)) ··· 1043 1019 struct nfs_fattr fattr; 1044 1020 struct nfs_inode *nfsi = NFS_I(inode); 1045 1021 unsigned long verifier; 1046 - unsigned int flags; 1022 + unsigned long cache_validity; 1047 1023 1048 1024 dfprintk(PAGECACHE, "NFS: revalidating (%s/%Ld)\n", 1049 1025 inode->i_sb->s_id, (long long)NFS_FILEID(inode)); ··· 1054 1030 if (NFS_STALE(inode)) 1055 1031 goto out_nowait; 1056 1032 1057 - while (NFS_REVALIDATING(inode)) { 1058 - status = nfs_wait_on_inode(inode, NFS_INO_REVALIDATING); 1059 - if (status < 0) 1060 - goto out_nowait; 1061 - if (NFS_ATTRTIMEO(inode) == 0) 1062 - continue; 1063 - if (NFS_FLAGS(inode) & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ATIME)) 1064 - continue; 1065 - status = NFS_STALE(inode) ? -ESTALE : 0; 1066 - goto out_nowait; 1033 + status = nfs_wait_on_inode(inode); 1034 + if (status < 0) 1035 + goto out; 1036 + if (NFS_STALE(inode)) { 1037 + status = -ESTALE; 1038 + /* Do we trust the cached ESTALE? */ 1039 + if (NFS_ATTRTIMEO(inode) != 0) { 1040 + if (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ATIME)) { 1041 + /* no */ 1042 + } else 1043 + goto out; 1044 + } 1067 1045 } 1068 - NFS_FLAGS(inode) |= NFS_INO_REVALIDATING; 1069 1046 1070 1047 /* Protect against RPC races by saving the change attribute */ 1071 1048 verifier = nfs_save_change_attribute(inode); ··· 1078 1053 if (status == -ESTALE) { 1079 1054 nfs_zap_caches(inode); 1080 1055 if (!S_ISDIR(inode->i_mode)) 1081 - NFS_FLAGS(inode) |= NFS_INO_STALE; 1056 + set_bit(NFS_INO_STALE, &NFS_FLAGS(inode)); 1082 1057 } 1083 1058 goto out; 1084 1059 } ··· 1090 1065 (long long)NFS_FILEID(inode), status); 1091 1066 goto out; 1092 1067 } 1093 - flags = nfsi->flags; 1094 - nfsi->flags &= ~NFS_INO_REVAL_PAGECACHE; 1068 + spin_lock(&inode->i_lock); 1069 + cache_validity = nfsi->cache_validity; 1070 + nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE; 1071 + 1095 1072 /* 1096 1073 * We may need to keep the attributes marked as invalid if 1097 1074 * we raced with nfs_end_attr_update(). 1098 1075 */ 1099 1076 if (verifier == nfsi->cache_change_attribute) 1100 - nfsi->flags &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME); 1101 - /* Do the page cache invalidation */ 1077 + nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME); 1078 + spin_unlock(&inode->i_lock); 1079 + 1102 1080 nfs_revalidate_mapping(inode, inode->i_mapping); 1103 - if (flags & NFS_INO_INVALID_ACL) 1081 + 1082 + if (cache_validity & NFS_INO_INVALID_ACL) 1104 1083 nfs_zap_acl_cache(inode); 1084 + 1105 1085 dfprintk(PAGECACHE, "NFS: (%s/%Ld) revalidation complete\n", 1106 1086 inode->i_sb->s_id, 1107 1087 (long long)NFS_FILEID(inode)); 1108 1088 1109 - out: 1110 - NFS_FLAGS(inode) &= ~NFS_INO_REVALIDATING; 1111 - wake_up(&nfsi->nfs_i_wait); 1089 + out: 1090 + nfs_wake_up_inode(inode); 1091 + 1112 1092 out_nowait: 1113 1093 unlock_kernel(); 1114 1094 return status; ··· 1137 1107 */ 1138 1108 int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) 1139 1109 { 1140 - if (!(NFS_FLAGS(inode) & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA)) 1110 + if (!(NFS_I(inode)->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA)) 1141 1111 && !nfs_attribute_timeout(inode)) 1142 1112 return NFS_STALE(inode) ? -ESTALE : 0; 1143 1113 return __nfs_revalidate_inode(server, inode); ··· 1152 1122 { 1153 1123 struct nfs_inode *nfsi = NFS_I(inode); 1154 1124 1155 - if (nfsi->flags & NFS_INO_INVALID_DATA) { 1125 + if (nfsi->cache_validity & NFS_INO_INVALID_DATA) { 1156 1126 if (S_ISREG(inode->i_mode)) { 1157 1127 if (filemap_fdatawrite(mapping) == 0) 1158 1128 filemap_fdatawait(mapping); 1159 1129 nfs_wb_all(inode); 1160 1130 } 1161 1131 invalidate_inode_pages2(mapping); 1162 - nfsi->flags &= ~NFS_INO_INVALID_DATA; 1132 + 1133 + spin_lock(&inode->i_lock); 1134 + nfsi->cache_validity &= ~NFS_INO_INVALID_DATA; 1163 1135 if (S_ISDIR(inode->i_mode)) { 1164 1136 memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf)); 1165 1137 /* This ensures we revalidate child dentries */ 1166 1138 nfsi->cache_change_attribute++; 1167 1139 } 1140 + spin_unlock(&inode->i_lock); 1141 + 1168 1142 dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n", 1169 1143 inode->i_sb->s_id, 1170 1144 (long long)NFS_FILEID(inode)); ··· 1198 1164 1199 1165 if (!nfs_have_delegation(inode, FMODE_READ)) { 1200 1166 /* Mark the attribute cache for revalidation */ 1201 - nfsi->flags |= NFS_INO_INVALID_ATTR; 1167 + spin_lock(&inode->i_lock); 1168 + nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 1202 1169 /* Directories and symlinks: invalidate page cache too */ 1203 1170 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 1204 - nfsi->flags |= NFS_INO_INVALID_DATA; 1171 + nfsi->cache_validity |= NFS_INO_INVALID_DATA; 1172 + spin_unlock(&inode->i_lock); 1205 1173 } 1206 1174 nfsi->cache_change_attribute ++; 1207 1175 atomic_dec(&nfsi->data_updates); ··· 1228 1192 if (nfs_have_delegation(inode, FMODE_READ)) 1229 1193 return 0; 1230 1194 1195 + spin_lock(&inode->i_lock); 1196 + 1231 1197 /* Are we in the process of updating data on the server? */ 1232 1198 data_unstable = nfs_caches_unstable(inode); 1233 1199 ··· 1238 1200 && nfsi->change_attr == fattr->pre_change_attr) 1239 1201 nfsi->change_attr = fattr->change_attr; 1240 1202 if (nfsi->change_attr != fattr->change_attr) { 1241 - nfsi->flags |= NFS_INO_INVALID_ATTR; 1203 + nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 1242 1204 if (!data_unstable) 1243 - nfsi->flags |= NFS_INO_REVAL_PAGECACHE; 1205 + nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE; 1244 1206 } 1245 1207 } 1246 1208 1247 - if ((fattr->valid & NFS_ATTR_FATTR) == 0) 1209 + if ((fattr->valid & NFS_ATTR_FATTR) == 0) { 1210 + spin_unlock(&inode->i_lock); 1248 1211 return 0; 1212 + } 1249 1213 1250 1214 /* Has the inode gone and changed behind our back? */ 1251 1215 if (nfsi->fileid != fattr->fileid 1252 - || (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) 1216 + || (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) { 1217 + spin_unlock(&inode->i_lock); 1253 1218 return -EIO; 1219 + } 1254 1220 1255 1221 cur_size = i_size_read(inode); 1256 1222 new_isize = nfs_size_to_loff_t(fattr->size); ··· 1269 1227 1270 1228 /* Verify a few of the more important attributes */ 1271 1229 if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) { 1272 - nfsi->flags |= NFS_INO_INVALID_ATTR; 1230 + nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 1273 1231 if (!data_unstable) 1274 - nfsi->flags |= NFS_INO_REVAL_PAGECACHE; 1232 + nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE; 1275 1233 } 1276 1234 if (cur_size != new_isize) { 1277 - nfsi->flags |= NFS_INO_INVALID_ATTR; 1235 + nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 1278 1236 if (nfsi->npages == 0) 1279 - nfsi->flags |= NFS_INO_REVAL_PAGECACHE; 1237 + nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE; 1280 1238 } 1281 1239 1282 1240 /* Have any file permissions changed? */ 1283 1241 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO) 1284 1242 || inode->i_uid != fattr->uid 1285 1243 || inode->i_gid != fattr->gid) 1286 - nfsi->flags |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL; 1244 + nfsi->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL; 1287 1245 1288 1246 /* Has the link count changed? */ 1289 1247 if (inode->i_nlink != fattr->nlink) 1290 - nfsi->flags |= NFS_INO_INVALID_ATTR; 1248 + nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 1291 1249 1292 1250 if (!timespec_equal(&inode->i_atime, &fattr->atime)) 1293 - nfsi->flags |= NFS_INO_INVALID_ATIME; 1251 + nfsi->cache_validity |= NFS_INO_INVALID_ATIME; 1294 1252 1295 1253 nfsi->read_cache_jiffies = fattr->timestamp; 1254 + spin_unlock(&inode->i_lock); 1296 1255 return 0; 1297 1256 } 1298 1257 ··· 1332 1289 goto out_err; 1333 1290 } 1334 1291 1292 + spin_lock(&inode->i_lock); 1293 + 1335 1294 /* 1336 1295 * Make sure the inode's type hasn't changed. 1337 1296 */ 1338 - if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) 1297 + if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) { 1298 + spin_unlock(&inode->i_lock); 1339 1299 goto out_changed; 1300 + } 1340 1301 1341 1302 /* 1342 1303 * Update the read time so we don't revalidate too often. ··· 1431 1384 || S_ISLNK(inode->i_mode))) 1432 1385 invalid &= ~NFS_INO_INVALID_DATA; 1433 1386 if (!nfs_have_delegation(inode, FMODE_READ)) 1434 - nfsi->flags |= invalid; 1387 + nfsi->cache_validity |= invalid; 1435 1388 1389 + spin_unlock(&inode->i_lock); 1436 1390 return 0; 1437 1391 out_changed: 1438 1392 /* ··· 1450 1402 */ 1451 1403 nfs_invalidate_inode(inode); 1452 1404 out_err: 1453 - NFS_FLAGS(inode) |= NFS_INO_STALE; 1405 + set_bit(NFS_INO_STALE, &NFS_FLAGS(inode)); 1454 1406 return -ESTALE; 1455 1407 } 1456 1408 ··· 2009 1961 nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL); 2010 1962 if (!nfsi) 2011 1963 return NULL; 2012 - nfsi->flags = 0; 1964 + nfsi->flags = 0UL; 1965 + nfsi->cache_validity = 0UL; 2013 1966 #ifdef CONFIG_NFS_V3_ACL 2014 1967 nfsi->acl_access = ERR_PTR(-EAGAIN); 2015 1968 nfsi->acl_default = ERR_PTR(-EAGAIN); ··· 2042 1993 nfsi->ndirty = 0; 2043 1994 nfsi->ncommit = 0; 2044 1995 nfsi->npages = 0; 2045 - init_waitqueue_head(&nfsi->nfs_i_wait); 2046 1996 nfs4_init_once(nfsi); 2047 1997 } 2048 1998 }
+3 -1
fs/nfs/nfs3acl.c
··· 308 308 nfs_begin_data_update(inode); 309 309 status = rpc_call(server->client_acl, ACLPROC3_SETACL, 310 310 &args, &fattr, 0); 311 - NFS_FLAGS(inode) |= NFS_INO_INVALID_ACCESS; 311 + spin_lock(&inode->i_lock); 312 + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS; 313 + spin_unlock(&inode->i_lock); 312 314 nfs_end_data_update(inode); 313 315 dprintk("NFS reply setacl: %d\n", status); 314 316
+6 -2
fs/nfs/read.c
··· 140 140 if (rdata->res.eof != 0 || result == 0) 141 141 break; 142 142 } while (count); 143 - NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME; 143 + spin_lock(&inode->i_lock); 144 + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; 145 + spin_unlock(&inode->i_lock); 144 146 145 147 if (count) 146 148 memclear_highpage_flush(page, rdata->args.pgbase, count); ··· 475 473 } 476 474 task->tk_status = -EIO; 477 475 } 478 - NFS_FLAGS(data->inode) |= NFS_INO_INVALID_ATIME; 476 + spin_lock(&data->inode->i_lock); 477 + NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME; 478 + spin_unlock(&data->inode->i_lock); 479 479 data->complete(data, status); 480 480 } 481 481
+8 -29
fs/nfs/symlink.c
··· 27 27 28 28 /* Symlink caching in the page cache is even more simplistic 29 29 * and straight-forward than readdir caching. 30 - * 31 - * At the beginning of the page we store pointer to struct page in question, 32 - * simplifying nfs_put_link() (if inode got invalidated we can't find the page 33 - * to be freed via pagecache lookup). 34 - * The NUL-terminated string follows immediately thereafter. 35 30 */ 36 - 37 - struct nfs_symlink { 38 - struct page *page; 39 - char body[0]; 40 - }; 41 31 42 32 static int nfs_symlink_filler(struct inode *inode, struct page *page) 43 33 { 44 - const unsigned int pgbase = offsetof(struct nfs_symlink, body); 45 - const unsigned int pglen = PAGE_SIZE - pgbase; 46 34 int error; 47 35 48 36 lock_kernel(); 49 - error = NFS_PROTO(inode)->readlink(inode, page, pgbase, pglen); 37 + error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE); 50 38 unlock_kernel(); 51 39 if (error < 0) 52 40 goto error; ··· 48 60 return -EIO; 49 61 } 50 62 51 - static int nfs_follow_link(struct dentry *dentry, struct nameidata *nd) 63 + static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd) 52 64 { 53 65 struct inode *inode = dentry->d_inode; 54 66 struct page *page; 55 - struct nfs_symlink *p; 56 67 void *err = ERR_PTR(nfs_revalidate_inode(NFS_SERVER(inode), inode)); 57 68 if (err) 58 69 goto read_failed; ··· 65 78 err = ERR_PTR(-EIO); 66 79 goto getlink_read_error; 67 80 } 68 - p = kmap(page); 69 - p->page = page; 70 - nd_set_link(nd, p->body); 71 - return 0; 81 + nd_set_link(nd, kmap(page)); 82 + return page; 72 83 73 84 getlink_read_error: 74 85 page_cache_release(page); 75 86 read_failed: 76 87 nd_set_link(nd, err); 77 - return 0; 88 + return NULL; 78 89 } 79 90 80 - static void nfs_put_link(struct dentry *dentry, struct nameidata *nd) 91 + static void nfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 81 92 { 82 - char *s = nd_get_link(nd); 83 - if (!IS_ERR(s)) { 84 - struct nfs_symlink *p; 85 - struct page *page; 86 - 87 - p = container_of(s, struct nfs_symlink, body[0]); 88 - page = p->page; 89 - 93 + if (cookie) { 94 + struct page *page = cookie; 90 95 kunmap(page); 91 96 page_cache_release(page); 92 97 }
+4 -4
fs/proc/base.c
··· 890 890 }; 891 891 #endif /* CONFIG_SECCOMP */ 892 892 893 - static int proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) 893 + static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) 894 894 { 895 895 struct inode *inode = dentry->d_inode; 896 896 int error = -EACCES; ··· 907 907 error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt); 908 908 nd->last_type = LAST_BIND; 909 909 out: 910 - return error; 910 + return ERR_PTR(error); 911 911 } 912 912 913 913 static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt, ··· 1692 1692 return vfs_readlink(dentry,buffer,buflen,tmp); 1693 1693 } 1694 1694 1695 - static int proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) 1695 + static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) 1696 1696 { 1697 1697 char tmp[30]; 1698 1698 sprintf(tmp, "%d", current->tgid); 1699 - return vfs_follow_link(nd,tmp); 1699 + return ERR_PTR(vfs_follow_link(nd,tmp)); 1700 1700 } 1701 1701 1702 1702 static struct inode_operations proc_self_inode_operations = {
+2 -2
fs/proc/generic.c
··· 329 329 spin_unlock(&proc_inum_lock); 330 330 } 331 331 332 - static int proc_follow_link(struct dentry *dentry, struct nameidata *nd) 332 + static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) 333 333 { 334 334 nd_set_link(nd, PDE(dentry->d_inode)->data); 335 - return 0; 335 + return NULL; 336 336 } 337 337 338 338 static struct inode_operations proc_link_inode_operations = {
+1 -1
fs/reiserfs/inode.c
··· 1985 1985 * iput doesn't deadlock in reiserfs_delete_xattrs. The locking 1986 1986 * code really needs to be reworked, but this will take care of it 1987 1987 * for now. -jeffm */ 1988 - if (REISERFS_I(dir)->i_acl_default) { 1988 + if (REISERFS_I(dir)->i_acl_default && !IS_ERR(REISERFS_I(dir)->i_acl_default)) { 1989 1989 reiserfs_write_unlock_xattrs(dir->i_sb); 1990 1990 iput(inode); 1991 1991 reiserfs_write_lock_xattrs(dir->i_sb);
+3 -3
fs/smbfs/symlink.c
··· 34 34 return smb_proc_symlink(server_from_dentry(dentry), dentry, oldname); 35 35 } 36 36 37 - static int smb_follow_link(struct dentry *dentry, struct nameidata *nd) 37 + static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd) 38 38 { 39 39 char *link = __getname(); 40 40 DEBUG1("followlink of %s/%s\n", DENTRY_PATH(dentry)); ··· 52 52 } 53 53 } 54 54 nd_set_link(nd, link); 55 - return 0; 55 + return NULL; 56 56 } 57 57 58 - static void smb_put_link(struct dentry *dentry, struct nameidata *nd) 58 + static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p) 59 59 { 60 60 char *s = nd_get_link(nd); 61 61 if (!IS_ERR(s))
+3 -3
fs/sysfs/symlink.c
··· 151 151 152 152 } 153 153 154 - static int sysfs_follow_link(struct dentry *dentry, struct nameidata *nd) 154 + static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd) 155 155 { 156 156 int error = -ENOMEM; 157 157 unsigned long page = get_zeroed_page(GFP_KERNEL); 158 158 if (page) 159 159 error = sysfs_getlink(dentry, (char *) page); 160 160 nd_set_link(nd, error ? ERR_PTR(error) : (char *)page); 161 - return 0; 161 + return NULL; 162 162 } 163 163 164 - static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd) 164 + static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 165 165 { 166 166 char *page = nd_get_link(nd); 167 167 if (!IS_ERR(page))
+2 -2
fs/sysv/symlink.c
··· 8 8 #include "sysv.h" 9 9 #include <linux/namei.h> 10 10 11 - static int sysv_follow_link(struct dentry *dentry, struct nameidata *nd) 11 + static void *sysv_follow_link(struct dentry *dentry, struct nameidata *nd) 12 12 { 13 13 nd_set_link(nd, (char *)SYSV_I(dentry->d_inode)->i_data); 14 - return 0; 14 + return NULL; 15 15 } 16 16 17 17 struct inode_operations sysv_fast_symlink_inode_operations = {
+2 -2
fs/ufs/symlink.c
··· 29 29 #include <linux/namei.h> 30 30 #include <linux/ufs_fs.h> 31 31 32 - static int ufs_follow_link(struct dentry *dentry, struct nameidata *nd) 32 + static void *ufs_follow_link(struct dentry *dentry, struct nameidata *nd) 33 33 { 34 34 struct ufs_inode_info *p = UFS_I(dentry->d_inode); 35 35 nd_set_link(nd, (char*)p->i_u1.i_symlink); 36 - return 0; 36 + return NULL; 37 37 } 38 38 39 39 struct inode_operations ufs_fast_symlink_inode_operations = {
+5 -5
fs/xfs/linux-2.6/xfs_iops.c
··· 374 374 * we need to be very careful about how much stack we use. 375 375 * uio is kmalloced for this reason... 376 376 */ 377 - STATIC int 377 + STATIC void * 378 378 linvfs_follow_link( 379 379 struct dentry *dentry, 380 380 struct nameidata *nd) ··· 391 391 link = (char *)kmalloc(MAXNAMELEN+1, GFP_KERNEL); 392 392 if (!link) { 393 393 nd_set_link(nd, ERR_PTR(-ENOMEM)); 394 - return 0; 394 + return NULL; 395 395 } 396 396 397 397 uio = (uio_t *)kmalloc(sizeof(uio_t), GFP_KERNEL); 398 398 if (!uio) { 399 399 kfree(link); 400 400 nd_set_link(nd, ERR_PTR(-ENOMEM)); 401 - return 0; 401 + return NULL; 402 402 } 403 403 404 404 vp = LINVFS_GET_VP(dentry->d_inode); ··· 422 422 kfree(uio); 423 423 424 424 nd_set_link(nd, link); 425 - return 0; 425 + return NULL; 426 426 } 427 427 428 - static void linvfs_put_link(struct dentry *dentry, struct nameidata *nd) 428 + static void linvfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) 429 429 { 430 430 char *s = nd_get_link(nd); 431 431 if (!IS_ERR(s))
-4
include/asm-ppc/ibm44x.h
··· 423 423 #define MQ0_CONFIG_SIZE_2G 0x0000c000 424 424 425 425 /* Internal SRAM Controller 440GX/440SP */ 426 - #ifdef CONFIG_440SP 427 - #define DCRN_SRAM0_BASE 0x100 428 - #else /* 440GX */ 429 426 #define DCRN_SRAM0_BASE 0x000 430 - #endif 431 427 432 428 #define DCRN_SRAM0_SB0CR (DCRN_SRAM0_BASE + 0x020) 433 429 #define DCRN_SRAM0_SB1CR (DCRN_SRAM0_BASE + 0x021)
+1 -1
include/asm-ppc/ppc4xx_dma.h
··· 285 285 286 286 #define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan)) 287 287 288 - #elif defined(CONFIG_STBXXX_DMA) /* stb03xxx */ 288 + #elif defined(CONFIG_STB03xxx) /* stb03xxx */ 289 289 290 290 #define DMA_PPC4xx_SIZE 4096 291 291
+7 -1
include/asm-sh/unistd.h
··· 295 295 #define __NR_add_key 285 296 296 #define __NR_request_key 286 297 297 #define __NR_keyctl 287 298 + #define __NR_ioprio_set 288 299 + #define __NR_ioprio_get 289 300 + #define __NR_inotify_init 290 301 + #define __NR_inotify_add_watch 291 302 + #define __NR_inotify_rm_watch 292 298 303 299 - #define NR_syscalls 288 304 + 305 + #define NR_syscalls 293 300 306 301 307 /* user-visible error numbers are in the range -1 - -124: see <asm-sh/errno.h> */ 302 308
+6 -1
include/asm-sh64/unistd.h
··· 338 338 #define __NR_add_key 313 339 339 #define __NR_request_key 314 340 340 #define __NR_keyctl 315 341 + #define __NR_ioprio_set 316 342 + #define __NR_ioprio_get 317 343 + #define __NR_inotify_init 318 344 + #define __NR_inotify_add_watch 319 345 + #define __NR_inotify_rm_watch 320 341 346 342 - #define NR_syscalls 316 347 + #define NR_syscalls 321 343 348 344 349 /* user-visible error numbers are in the range -1 - -125: see <asm-sh64/errno.h> */ 345 350
+5
include/asm-sparc64/thread_info.h
··· 68 68 69 69 struct restart_block restart_block; 70 70 71 + struct pt_regs *kern_una_regs; 72 + unsigned int kern_una_insn; 73 + 71 74 unsigned long fpregs[0] __attribute__ ((aligned(64))); 72 75 }; 73 76 ··· 106 103 #define TI_PCR 0x00000490 107 104 #define TI_CEE_STUFF 0x00000498 108 105 #define TI_RESTART_BLOCK 0x000004a0 106 + #define TI_KUNA_REGS 0x000004c8 107 + #define TI_KUNA_INSN 0x000004d0 109 108 #define TI_FPREGS 0x00000500 110 109 111 110 /* We embed this in the uppermost byte of thread_info->flags */
+4 -4
include/linux/fs.h
··· 993 993 int (*rename) (struct inode *, struct dentry *, 994 994 struct inode *, struct dentry *); 995 995 int (*readlink) (struct dentry *, char __user *,int); 996 - int (*follow_link) (struct dentry *, struct nameidata *); 997 - void (*put_link) (struct dentry *, struct nameidata *); 996 + void * (*follow_link) (struct dentry *, struct nameidata *); 997 + void (*put_link) (struct dentry *, struct nameidata *, void *); 998 998 void (*truncate) (struct inode *); 999 999 int (*permission) (struct inode *, int, struct nameidata *); 1000 1000 int (*setattr) (struct dentry *, struct iattr *); ··· 1602 1602 extern int vfs_readlink(struct dentry *, char __user *, int, const char *); 1603 1603 extern int vfs_follow_link(struct nameidata *, const char *); 1604 1604 extern int page_readlink(struct dentry *, char __user *, int); 1605 - extern int page_follow_link_light(struct dentry *, struct nameidata *); 1606 - extern void page_put_link(struct dentry *, struct nameidata *); 1605 + extern void *page_follow_link_light(struct dentry *, struct nameidata *); 1606 + extern void page_put_link(struct dentry *, struct nameidata *, void *); 1607 1607 extern int page_symlink(struct inode *inode, const char *symname, int len); 1608 1608 extern struct inode_operations page_symlink_inode_operations; 1609 1609 extern int generic_readlink(struct dentry *, char __user *, int);
+23 -18
include/linux/nfs_fs.h
··· 112 112 /* 113 113 * Various flags 114 114 */ 115 - unsigned int flags; 115 + unsigned long flags; /* atomic bit ops */ 116 + unsigned long cache_validity; /* bit mask */ 116 117 117 118 /* 118 119 * read_cache_jiffies is when we started read-caching this inode, ··· 175 174 /* Open contexts for shared mmap writes */ 176 175 struct list_head open_files; 177 176 178 - wait_queue_head_t nfs_i_wait; 179 - 180 177 #ifdef CONFIG_NFS_V4 181 178 struct nfs4_cached_acl *nfs4_acl; 182 179 /* NFSv4 state */ ··· 187 188 }; 188 189 189 190 /* 190 - * Legal inode flag values 191 + * Cache validity bit flags 191 192 */ 192 - #define NFS_INO_STALE 0x0001 /* possible stale inode */ 193 - #define NFS_INO_ADVISE_RDPLUS 0x0002 /* advise readdirplus */ 194 - #define NFS_INO_REVALIDATING 0x0004 /* revalidating attrs */ 195 - #define NFS_INO_INVALID_ATTR 0x0008 /* cached attrs are invalid */ 196 - #define NFS_INO_INVALID_DATA 0x0010 /* cached data is invalid */ 197 - #define NFS_INO_INVALID_ATIME 0x0020 /* cached atime is invalid */ 198 - #define NFS_INO_INVALID_ACCESS 0x0040 /* cached access cred invalid */ 199 - #define NFS_INO_INVALID_ACL 0x0080 /* cached acls are invalid */ 200 - #define NFS_INO_REVAL_PAGECACHE 0x1000 /* must revalidate pagecache */ 193 + #define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */ 194 + #define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */ 195 + #define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */ 196 + #define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */ 197 + #define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */ 198 + #define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */ 199 + 200 + /* 201 + * Bit offsets in flags field 202 + */ 203 + #define NFS_INO_REVALIDATING (0) /* revalidating attrs */ 204 + #define NFS_INO_ADVISE_RDPLUS (1) /* advise readdirplus */ 205 + #define NFS_INO_STALE (2) /* possible stale inode */ 201 206 202 207 static inline struct nfs_inode *NFS_I(struct inode *inode) 203 208 { ··· 227 224 #define NFS_ATTRTIMEO_UPDATE(inode) (NFS_I(inode)->attrtimeo_timestamp) 228 225 229 226 #define NFS_FLAGS(inode) (NFS_I(inode)->flags) 230 - #define NFS_REVALIDATING(inode) (NFS_FLAGS(inode) & NFS_INO_REVALIDATING) 231 - #define NFS_STALE(inode) (NFS_FLAGS(inode) & NFS_INO_STALE) 227 + #define NFS_STALE(inode) (test_bit(NFS_INO_STALE, &NFS_FLAGS(inode))) 232 228 233 229 #define NFS_FILEID(inode) (NFS_I(inode)->fileid) 234 230 ··· 238 236 239 237 static inline void NFS_CACHEINV(struct inode *inode) 240 238 { 241 - if (!nfs_caches_unstable(inode)) 242 - NFS_FLAGS(inode) |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS; 239 + if (!nfs_caches_unstable(inode)) { 240 + spin_lock(&inode->i_lock); 241 + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS; 242 + spin_unlock(&inode->i_lock); 243 + } 243 244 } 244 245 245 246 static inline int nfs_server_capable(struct inode *inode, int cap) ··· 252 247 253 248 static inline int NFS_USE_READDIRPLUS(struct inode *inode) 254 249 { 255 - return NFS_FLAGS(inode) & NFS_INO_ADVISE_RDPLUS; 250 + return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode)); 256 251 } 257 252 258 253 /**
+5 -1
include/linux/pci_ids.h
··· 881 881 #define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e 882 882 #define PCI_DEVICE_ID_APPLE_UNI_N_FW2 0x0030 883 883 #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032 884 - #define PCI_DEVIEC_ID_APPLE_UNI_N_ATA 0x0033 884 + #define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033 885 885 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034 886 886 #define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b 887 887 #define PCI_DEVICE_ID_APPLE_KEYLARGO_I 0x003e ··· 1580 1580 #define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211 1581 1581 #define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212 1582 1582 #define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213 1583 + #define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214 1583 1584 #define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217 1584 1585 #define PCI_DEVICE_ID_SERVERWORKS_OSB4USB 0x0220 1585 1586 #define PCI_DEVICE_ID_SERVERWORKS_CSB5USB PCI_DEVICE_ID_SERVERWORKS_OSB4USB ··· 2184 2183 2185 2184 #define PCI_VENDOR_ID_SIBYTE 0x166d 2186 2185 #define PCI_DEVICE_ID_BCM1250_HT 0x0002 2186 + 2187 + #define PCI_VENDOR_ID_NETCELL 0x169c 2188 + #define PCI_DEVICE_ID_REVOLUTION 0x0044 2187 2189 2188 2190 #define PCI_VENDOR_ID_LINKSYS 0x1737 2189 2191 #define PCI_DEVICE_ID_LINKSYS_EG1032 0x1032
+2 -2
kernel/sched.c
··· 3378 3378 */ 3379 3379 int can_nice(const task_t *p, const int nice) 3380 3380 { 3381 - /* convert nice value [19,-20] to rlimit style value [0,39] */ 3382 - int nice_rlim = 19 - nice; 3381 + /* convert nice value [19,-20] to rlimit style value [1,40] */ 3382 + int nice_rlim = 20 - nice; 3383 3383 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || 3384 3384 capable(CAP_SYS_NICE)); 3385 3385 }
+6 -11
mm/shmem.c
··· 1773 1773 return 0; 1774 1774 } 1775 1775 1776 - static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 1776 + static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 1777 1777 { 1778 1778 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); 1779 - return 0; 1779 + return NULL; 1780 1780 } 1781 1781 1782 - static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 1782 + static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 1783 1783 { 1784 1784 struct page *page = NULL; 1785 1785 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 1786 1786 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); 1787 - return 0; 1787 + return page; 1788 1788 } 1789 1789 1790 - static void shmem_put_link(struct dentry *dentry, struct nameidata *nd) 1790 + static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 1791 1791 { 1792 1792 if (!IS_ERR(nd_get_link(nd))) { 1793 - struct page *page; 1794 - 1795 - page = find_get_page(dentry->d_inode->i_mapping, 0); 1796 - if (!page) 1797 - BUG(); 1793 + struct page *page = cookie; 1798 1794 kunmap(page); 1799 1795 mark_page_accessed(page); 1800 - page_cache_release(page); 1801 1796 page_cache_release(page); 1802 1797 } 1803 1798 }
+12 -10
net/802/tr.c
··· 251 251 unsigned int hash; 252 252 struct rif_cache *entry; 253 253 unsigned char *olddata; 254 + unsigned long flags; 254 255 static const unsigned char mcast_func_addr[] 255 256 = {0xC0,0x00,0x00,0x04,0x00,0x00}; 256 257 257 - spin_lock_bh(&rif_lock); 258 + spin_lock_irqsave(&rif_lock, flags); 258 259 259 260 /* 260 261 * Broadcasts are single route as stated in RFC 1042 ··· 324 323 else 325 324 slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8); 326 325 olddata = skb->data; 327 - spin_unlock_bh(&rif_lock); 326 + spin_unlock_irqrestore(&rif_lock, flags); 328 327 329 328 skb_pull(skb, slack); 330 329 memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack); ··· 338 337 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) 339 338 { 340 339 unsigned int hash, rii_p = 0; 340 + unsigned long flags; 341 341 struct rif_cache *entry; 342 342 343 343 344 - spin_lock_bh(&rif_lock); 344 + spin_lock_irqsave(&rif_lock, flags); 345 345 346 346 /* 347 347 * Firstly see if the entry exists ··· 380 378 if(!entry) 381 379 { 382 380 printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n"); 383 - spin_unlock_bh(&rif_lock); 381 + spin_unlock_irqrestore(&rif_lock, flags); 384 382 return; 385 383 } 386 384 ··· 422 420 } 423 421 entry->last_used=jiffies; 424 422 } 425 - spin_unlock_bh(&rif_lock); 423 + spin_unlock_irqrestore(&rif_lock, flags); 426 424 } 427 425 428 426 /* ··· 432 430 static void rif_check_expire(unsigned long dummy) 433 431 { 434 432 int i; 435 - unsigned long next_interval = jiffies + sysctl_tr_rif_timeout/2; 433 + unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2; 436 434 437 - spin_lock_bh(&rif_lock); 435 + spin_lock_irqsave(&rif_lock, flags); 438 436 439 437 for(i =0; i < RIF_TABLE_SIZE; i++) { 440 438 struct rif_cache *entry, **pentry; ··· 456 454 } 457 455 } 458 456 459 - spin_unlock_bh(&rif_lock); 457 + spin_unlock_irqrestore(&rif_lock, flags); 460 458 461 459 mod_timer(&rif_timer, next_interval); 462 460 ··· 487 485 488 486 static void *rif_seq_start(struct seq_file *seq, loff_t *pos) 489 487 { 490 - spin_lock_bh(&rif_lock); 488 + spin_lock_irq(&rif_lock); 491 489 492 490 return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN; 493 491 } ··· 518 516 519 517 static void rif_seq_stop(struct seq_file *seq, void *v) 520 518 { 521 - spin_unlock_bh(&rif_lock); 519 + spin_unlock_irq(&rif_lock); 522 520 } 523 521 524 522 static int rif_seq_show(struct seq_file *seq, void *v)
+6 -6
net/ipv4/icmp.c
··· 349 349 { 350 350 struct sk_buff *skb; 351 351 352 - ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param, 353 - icmp_param->data_len+icmp_param->head_len, 354 - icmp_param->head_len, 355 - ipc, rt, MSG_DONTWAIT); 356 - 357 - if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { 352 + if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param, 353 + icmp_param->data_len+icmp_param->head_len, 354 + icmp_param->head_len, 355 + ipc, rt, MSG_DONTWAIT) < 0) 356 + ip_flush_pending_frames(icmp_socket->sk); 357 + else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { 358 358 struct icmphdr *icmph = skb->h.icmph; 359 359 unsigned int csum = 0; 360 360 struct sk_buff *skb1;
+1 -1
net/ipv4/ipcomp.c
··· 358 358 int cpu; 359 359 360 360 /* This can be any valid CPU ID so we don't need locking. */ 361 - cpu = smp_processor_id(); 361 + cpu = raw_smp_processor_id(); 362 362 363 363 list_for_each_entry(pos, &ipcomp_tfms_list, list) { 364 364 struct crypto_tfm *tfm;
+9 -8
net/ipv4/netfilter/ipt_ECN.c
··· 61 61 if (!tcph) 62 62 return 0; 63 63 64 - if (!(einfo->operation & IPT_ECN_OP_SET_ECE 65 - || tcph->ece == einfo->proto.tcp.ece) 66 - && (!(einfo->operation & IPT_ECN_OP_SET_CWR 67 - || tcph->cwr == einfo->proto.tcp.cwr))) 64 + if ((!(einfo->operation & IPT_ECN_OP_SET_ECE) || 65 + tcph->ece == einfo->proto.tcp.ece) && 66 + ((!(einfo->operation & IPT_ECN_OP_SET_CWR) || 67 + tcph->cwr == einfo->proto.tcp.cwr))) 68 68 return 1; 69 69 70 70 if (!skb_ip_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph))) 71 71 return 0; 72 72 tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4; 73 + 74 + if ((*pskb)->ip_summed == CHECKSUM_HW && 75 + skb_checksum_help(*pskb, inward)) 76 + return 0; 73 77 74 78 diffs[0] = ((u_int16_t *)tcph)[6]; 75 79 if (einfo->operation & IPT_ECN_OP_SET_ECE) ··· 83 79 diffs[1] = ((u_int16_t *)tcph)[6]; 84 80 diffs[0] = diffs[0] ^ 0xFFFF; 85 81 86 - if ((*pskb)->ip_summed != CHECKSUM_HW) 82 + if ((*pskb)->ip_summed != CHECKSUM_UNNECESSARY) 87 83 tcph->check = csum_fold(csum_partial((char *)diffs, 88 84 sizeof(diffs), 89 85 tcph->check^0xFFFF)); 90 - else 91 - if (skb_checksum_help(*pskb, inward)) 92 - return 0; 93 86 (*pskb)->nfcache |= NFC_ALTERED; 94 87 return 1; 95 88 }
+4 -3
net/ipv4/netfilter/ipt_TCPMSS.c
··· 61 61 if (!skb_ip_make_writable(pskb, (*pskb)->len)) 62 62 return NF_DROP; 63 63 64 + if ((*pskb)->ip_summed == CHECKSUM_HW && 65 + skb_checksum_help(*pskb, out == NULL)) 66 + return NF_DROP; 67 + 64 68 iph = (*pskb)->nh.iph; 65 69 tcplen = (*pskb)->len - iph->ihl*4; 66 70 ··· 190 186 newmss); 191 187 192 188 retmodified: 193 - /* We never hw checksum SYN packets. */ 194 - BUG_ON((*pskb)->ip_summed == CHECKSUM_HW); 195 - 196 189 (*pskb)->nfcache |= NFC_UNKNOWN | NFC_ALTERED; 197 190 return IPT_CONTINUE; 198 191 }
+1 -1
net/ipv6/ipcomp6.c
··· 354 354 int cpu; 355 355 356 356 /* This can be any valid CPU ID so we don't need locking. */ 357 - cpu = smp_processor_id(); 357 + cpu = raw_smp_processor_id(); 358 358 359 359 list_for_each_entry(pos, &ipcomp6_tfms_list, list) { 360 360 struct crypto_tfm *tfm;
+7 -2
scripts/mod/modpost.c
··· 359 359 /* ignore __this_module, it will be resolved shortly */ 360 360 if (strcmp(symname, MODULE_SYMBOL_PREFIX "__this_module") == 0) 361 361 break; 362 - #ifdef STT_REGISTER 362 + /* cope with newer glibc (2.3.4 or higher) STT_ definition in elf.h */ 363 + #if defined(STT_REGISTER) || defined(STT_SPARC_REGISTER) 364 + /* add compatibility with older glibc */ 365 + #ifndef STT_SPARC_REGISTER 366 + #define STT_SPARC_REGISTER STT_REGISTER 367 + #endif 363 368 if (info->hdr->e_machine == EM_SPARC || 364 369 info->hdr->e_machine == EM_SPARCV9) { 365 370 /* Ignore register directives. */ 366 - if (ELF_ST_TYPE(sym->st_info) == STT_REGISTER) 371 + if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER) 367 372 break; 368 373 } 369 374 #endif