Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git

+1361 -641
+7
arch/arm/boot/compressed/head-xscale.S
··· 47 47 orr r7, r7, #(MACH_TYPE_GTWX5715 & 0xff00) 48 48 #endif 49 49 50 + #ifdef CONFIG_ARCH_IXP2000 51 + mov r1, #-1 52 + mov r0, #0xd6000000 53 + str r1, [r0, #0x14] 54 + str r1, [r0, #0x18] 55 + #endif 56 +
+11 -5
arch/arm/kernel/entry-armv.S
··· 269 269 add r5, sp, #S_PC 270 270 ldmia r7, {r2 - r4} @ Get USR pc, cpsr 271 271 272 - #if __LINUX_ARM_ARCH__ < 6 272 + #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 273 273 @ make sure our user space atomic helper is aborted 274 274 cmp r2, #VIRT_OFFSET 275 275 bichs r3, r3, #PSR_Z_BIT ··· 616 616 617 617 __kuser_cmpxchg: @ 0xffff0fc0 618 618 619 - #if __LINUX_ARM_ARCH__ < 6 619 + #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 620 620 621 - #ifdef CONFIG_SMP /* sanity check */ 622 - #error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" 623 - #endif 621 + /* 622 + * Poor you. No fast solution possible... 623 + * The kernel itself must perform the operation. 624 + * A special ghost syscall is used for that (see traps.c). 625 + */ 626 + swi #0x9ffff0 627 + mov pc, lr 628 + 629 + #elif __LINUX_ARM_ARCH__ < 6 624 630 625 631 /* 626 632 * Theory of operation:
+49
arch/arm/kernel/traps.c
··· 464 464 #endif 465 465 return 0; 466 466 467 + #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG 468 + /* 469 + * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. 470 + * Return zero in r0 if *MEM was changed or non-zero if no exchange 471 + * happened. Also set the user C flag accordingly. 472 + * If access permissions have to be fixed up then non-zero is 473 + * returned and the operation has to be re-attempted. 474 + * 475 + * *NOTE*: This is a ghost syscall private to the kernel. Only the 476 + * __kuser_cmpxchg code in entry-armv.S should be aware of its 477 + * existence. Don't ever use this from user code. 478 + */ 479 + case 0xfff0: 480 + { 481 + extern void do_DataAbort(unsigned long addr, unsigned int fsr, 482 + struct pt_regs *regs); 483 + unsigned long val; 484 + unsigned long addr = regs->ARM_r2; 485 + struct mm_struct *mm = current->mm; 486 + pgd_t *pgd; pmd_t *pmd; pte_t *pte; 487 + 488 + regs->ARM_cpsr &= ~PSR_C_BIT; 489 + spin_lock(&mm->page_table_lock); 490 + pgd = pgd_offset(mm, addr); 491 + if (!pgd_present(*pgd)) 492 + goto bad_access; 493 + pmd = pmd_offset(pgd, addr); 494 + if (!pmd_present(*pmd)) 495 + goto bad_access; 496 + pte = pte_offset_map(pmd, addr); 497 + if (!pte_present(*pte) || !pte_write(*pte)) 498 + goto bad_access; 499 + val = *(unsigned long *)addr; 500 + val -= regs->ARM_r0; 501 + if (val == 0) { 502 + *(unsigned long *)addr = regs->ARM_r1; 503 + regs->ARM_cpsr |= PSR_C_BIT; 504 + } 505 + spin_unlock(&mm->page_table_lock); 506 + return val; 507 + 508 + bad_access: 509 + spin_unlock(&mm->page_table_lock); 510 + /* simulate a read access fault */ 511 + do_DataAbort(addr, 15 + (1 << 11), regs); 512 + return -1; 513 + } 514 + #endif 515 + 467 516 default: 468 517 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS 469 518 if not implemented, rather than raising SIGILL. This
+3 -3
arch/arm/lib/io-writesw-armv4.S
··· 87 87 subs r2, r2, #2 88 88 orr ip, ip, r3, push_hbyte1 89 89 strh ip, [r0] 90 - bpl 2b 90 + bpl 1b 91 91 92 - 3: tst r2, #1 93 - 2: movne ip, r3, lsr #8 92 + tst r2, #1 93 + 3: movne ip, r3, lsr #8 94 94 strneh ip, [r0] 95 95 mov pc, lr
+9
arch/arm/mach-pxa/mainstone.c
··· 304 304 PWER = 0xC0000002; 305 305 PRER = 0x00000002; 306 306 PFER = 0x00000002; 307 + /* for use I SRAM as framebuffer. */ 308 + PSLR |= 0xF04; 309 + PCFR = 0x66; 310 + /* For Keypad wakeup. */ 311 + KPC &=~KPC_ASACT; 312 + KPC |=KPC_AS; 313 + PKWR = 0x000FD000; 314 + /* Need read PKWR back after set it. */ 315 + PKWR; 307 316 } 308 317 309 318 MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
+18 -14
arch/arm/mach-pxa/pm.c
··· 29 29 */ 30 30 #undef DEBUG 31 31 32 - extern void pxa_cpu_suspend(void); 33 - extern void pxa_cpu_resume(void); 34 - 35 32 #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x 36 33 #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] 37 34 ··· 60 63 SLEEP_SAVE_ICMR, 61 64 SLEEP_SAVE_CKEN, 62 65 66 + #ifdef CONFIG_PXA27x 67 + SLEEP_SAVE_MDREFR, 68 + SLEEP_SAVE_PWER, SLEEP_SAVE_PCFR, SLEEP_SAVE_PRER, 69 + SLEEP_SAVE_PFER, SLEEP_SAVE_PKWR, 70 + #endif 71 + 63 72 SLEEP_SAVE_CKSUM, 64 73 65 74 SLEEP_SAVE_SIZE ··· 78 75 unsigned long checksum = 0; 79 76 struct timespec delta, rtc; 80 77 int i; 81 - 82 - if (state != PM_SUSPEND_MEM) 83 - return -EINVAL; 78 + extern void pxa_cpu_pm_enter(suspend_state_t state); 84 79 85 80 #ifdef CONFIG_IWMMXT 86 81 /* force any iWMMXt context to ram **/ ··· 101 100 SAVE(GAFR2_L); SAVE(GAFR2_U); 102 101 103 102 #ifdef CONFIG_PXA27x 103 + SAVE(MDREFR); 104 104 SAVE(GPLR3); SAVE(GPDR3); SAVE(GRER3); SAVE(GFER3); SAVE(PGSR3); 105 105 SAVE(GAFR3_L); SAVE(GAFR3_U); 106 + SAVE(PWER); SAVE(PCFR); SAVE(PRER); 107 + SAVE(PFER); SAVE(PKWR); 106 108 #endif 107 109 108 110 SAVE(ICMR); 109 111 ICMR = 0; 110 112 111 113 SAVE(CKEN); 112 - CKEN = 0; 113 - 114 114 SAVE(PSTR); 115 115 116 116 /* Note: wake up source are set up in each machine specific files */ ··· 125 123 /* Clear sleep reset status */ 126 124 RCSR = RCSR_SMR; 127 125 128 - /* set resume return address */ 129 - PSPR = virt_to_phys(pxa_cpu_resume); 130 - 131 126 /* before sleeping, calculate and save a checksum */ 132 127 for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++) 133 128 checksum += sleep_save[i]; 134 129 sleep_save[SLEEP_SAVE_CKSUM] = checksum; 135 130 136 131 /* *** go zzz *** */ 137 - pxa_cpu_suspend(); 132 + pxa_cpu_pm_enter(state); 138 133 139 134 /* after sleeping, validate the checksum */ 140 135 checksum = 0; ··· 144 145 LUB_HEXLED = 0xbadbadc5; 145 146 #endif 146 147 while (1) 147 - pxa_cpu_suspend(); 148 + pxa_cpu_pm_enter(state); 148 149 } 149 150 150 151 /* ensure not to come back here if it wasn't intended */ ··· 161 162 RESTORE(PGSR0); RESTORE(PGSR1); RESTORE(PGSR2); 162 163 163 164 #ifdef CONFIG_PXA27x 165 + RESTORE(MDREFR); 164 166 RESTORE(GAFR3_L); RESTORE(GAFR3_U); RESTORE_GPLEVEL(3); 165 167 RESTORE(GPDR3); RESTORE(GRER3); RESTORE(GFER3); RESTORE(PGSR3); 168 + RESTORE(PWER); RESTORE(PCFR); RESTORE(PRER); 169 + RESTORE(PFER); RESTORE(PKWR); 166 170 #endif 167 171 168 172 PSSR = PSSR_RDH | PSSR_PH; ··· 199 197 */ 200 198 static int pxa_pm_prepare(suspend_state_t state) 201 199 { 202 - return 0; 200 + extern int pxa_cpu_pm_prepare(suspend_state_t state); 201 + 202 + return pxa_cpu_pm_prepare(state); 203 203 } 204 204 205 205 /*
+29
arch/arm/mach-pxa/pxa25x.c
··· 102 102 } 103 103 104 104 EXPORT_SYMBOL(get_lcdclk_frequency_10khz); 105 + 106 + 107 + int pxa_cpu_pm_prepare(suspend_state_t state) 108 + { 109 + switch (state) { 110 + case PM_SUSPEND_MEM: 111 + break; 112 + default: 113 + return -EINVAL; 114 + } 115 + 116 + return 0; 117 + } 118 + 119 + void pxa_cpu_pm_enter(suspend_state_t state) 120 + { 121 + extern void pxa_cpu_suspend(unsigned int); 122 + extern void pxa_cpu_resume(void); 123 + 124 + CKEN = 0; 125 + 126 + switch (state) { 127 + case PM_SUSPEND_MEM: 128 + /* set resume return address */ 129 + PSPR = virt_to_phys(pxa_cpu_resume); 130 + pxa_cpu_suspend(3); 131 + break; 132 + } 133 + }
+32
arch/arm/mach-pxa/pxa27x.c
··· 120 120 EXPORT_SYMBOL(get_memclk_frequency_10khz); 121 121 EXPORT_SYMBOL(get_lcdclk_frequency_10khz); 122 122 123 + int pxa_cpu_pm_prepare(suspend_state_t state) 124 + { 125 + switch (state) { 126 + case PM_SUSPEND_MEM: 127 + return 0; 128 + default: 129 + return -EINVAL; 130 + } 131 + } 132 + 133 + void pxa_cpu_pm_enter(suspend_state_t state) 134 + { 135 + extern void pxa_cpu_standby(void); 136 + extern void pxa_cpu_suspend(unsigned int); 137 + extern void pxa_cpu_resume(void); 138 + 139 + CKEN = CKEN22_MEMC | CKEN9_OSTIMER; 140 + 141 + /* ensure voltage-change sequencer not initiated, which hangs */ 142 + PCFR &= ~PCFR_FVC; 143 + 144 + /* Clear edge-detect status register. */ 145 + PEDR = 0xDF12FE1B; 146 + 147 + switch (state) { 148 + case PM_SUSPEND_MEM: 149 + /* set resume return address */ 150 + PSPR = virt_to_phys(pxa_cpu_resume); 151 + pxa_cpu_suspend(3); 152 + break; 153 + } 154 + } 123 155 124 156 /* 125 157 * device registration specific to PXA27x.
+4
arch/arm/mach-s3c2410/dma.c
··· 785 785 chan->client = NULL; 786 786 chan->in_use = 0; 787 787 788 + if (chan->irq_claimed) 789 + free_irq(chan->irq, (void *)chan); 790 + chan->irq_claimed = 0; 791 + 788 792 local_irq_restore(flags); 789 793 790 794 return 0;
+8 -7
arch/arm/mm/Kconfig
··· 228 228 select CPU_CACHE_V4WB 229 229 select CPU_CACHE_VIVT 230 230 select CPU_TLB_V4WB 231 - select CPU_MINICACHE 232 231 233 232 # XScale 234 233 config CPU_XSCALE ··· 238 239 select CPU_ABRT_EV5T 239 240 select CPU_CACHE_VIVT 240 241 select CPU_TLB_V4WBI 241 - select CPU_MINICACHE 242 242 243 243 # ARMv6 244 244 config CPU_V6 ··· 343 345 config CPU_TLB_V6 344 346 bool 345 347 346 - config CPU_MINICACHE 347 - bool 348 - help 349 - Processor has a minicache. 350 - 351 348 comment "Processor Features" 352 349 353 350 config ARM_THUMB ··· 421 428 all SMP capable ARMv6's) or later processors. User space may 422 429 assume directly accessing that register and always obtain the 423 430 expected value only on ARMv7 and above. 431 + 432 + config NEEDS_SYSCALL_FOR_CMPXCHG 433 + bool 434 + default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3) 435 + help 436 + SMP on a pre-ARMv6 processor? Well OK then. 437 + Forget about fast user space cmpxchg support. 438 + It is just not possible. 424 439
-2
arch/arm/mm/Makefile
··· 31 31 obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o 32 32 obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o 33 33 34 - obj-$(CONFIG_CPU_MINICACHE) += minicache.o 35 - 36 34 obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o 37 35 obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o 38 36 obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
-113
arch/arm/mm/copypage-xscale.S
··· 1 - /* 2 - * linux/arch/arm/lib/copypage-xscale.S 3 - * 4 - * Copyright (C) 2001 Russell King 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - */ 10 - #include <linux/linkage.h> 11 - #include <linux/init.h> 12 - #include <asm/constants.h> 13 - 14 - /* 15 - * General note: 16 - * We don't really want write-allocate cache behaviour for these functions 17 - * since that will just eat through 8K of the cache. 18 - */ 19 - 20 - .text 21 - .align 5 22 - /* 23 - * XScale optimised copy_user_page 24 - * r0 = destination 25 - * r1 = source 26 - * r2 = virtual user address of ultimate destination page 27 - * 28 - * The source page may have some clean entries in the cache already, but we 29 - * can safely ignore them - break_cow() will flush them out of the cache 30 - * if we eventually end up using our copied page. 31 - * 32 - * What we could do is use the mini-cache to buffer reads from the source 33 - * page. We rely on the mini-cache being smaller than one page, so we'll 34 - * cycle through the complete cache anyway. 35 - */ 36 - ENTRY(xscale_mc_copy_user_page) 37 - stmfd sp!, {r4, r5, lr} 38 - mov r5, r0 39 - mov r0, r1 40 - bl map_page_minicache 41 - mov r1, r5 42 - mov lr, #PAGE_SZ/64-1 43 - 44 - /* 45 - * Strangely enough, best performance is achieved 46 - * when prefetching destination as well. (NP) 47 - */ 48 - pld [r0, #0] 49 - pld [r0, #32] 50 - pld [r1, #0] 51 - pld [r1, #32] 52 - 53 - 1: pld [r0, #64] 54 - pld [r0, #96] 55 - pld [r1, #64] 56 - pld [r1, #96] 57 - 58 - 2: ldrd r2, [r0], #8 59 - ldrd r4, [r0], #8 60 - mov ip, r1 61 - strd r2, [r1], #8 62 - ldrd r2, [r0], #8 63 - strd r4, [r1], #8 64 - ldrd r4, [r0], #8 65 - strd r2, [r1], #8 66 - strd r4, [r1], #8 67 - mcr p15, 0, ip, c7, c10, 1 @ clean D line 68 - ldrd r2, [r0], #8 69 - mcr p15, 0, ip, c7, c6, 1 @ invalidate D line 70 - ldrd r4, [r0], #8 71 - mov ip, r1 72 - strd r2, [r1], #8 73 - ldrd r2, [r0], #8 74 - strd r4, [r1], #8 75 - ldrd r4, [r0], #8 76 - strd r2, [r1], #8 77 - strd r4, [r1], #8 78 - mcr p15, 0, ip, c7, c10, 1 @ clean D line 79 - subs lr, lr, #1 80 - mcr p15, 0, ip, c7, c6, 1 @ invalidate D line 81 - bgt 1b 82 - beq 2b 83 - 84 - ldmfd sp!, {r4, r5, pc} 85 - 86 - .align 5 87 - /* 88 - * XScale optimised clear_user_page 89 - * r0 = destination 90 - * r1 = virtual user address of ultimate destination page 91 - */ 92 - ENTRY(xscale_mc_clear_user_page) 93 - mov r1, #PAGE_SZ/32 94 - mov r2, #0 95 - mov r3, #0 96 - 1: mov ip, r0 97 - strd r2, [r0], #8 98 - strd r2, [r0], #8 99 - strd r2, [r0], #8 100 - strd r2, [r0], #8 101 - mcr p15, 0, ip, c7, c10, 1 @ clean D line 102 - subs r1, r1, #1 103 - mcr p15, 0, ip, c7, c6, 1 @ invalidate D line 104 - bne 1b 105 - mov pc, lr 106 - 107 - __INITDATA 108 - 109 - .type xscale_mc_user_fns, #object 110 - ENTRY(xscale_mc_user_fns) 111 - .long xscale_mc_clear_user_page 112 - .long xscale_mc_copy_user_page 113 - .size xscale_mc_user_fns, . - xscale_mc_user_fns
+131
arch/arm/mm/copypage-xscale.c
··· 1 + /* 2 + * linux/arch/arm/lib/copypage-xscale.S 3 + * 4 + * Copyright (C) 1995-2005 Russell King 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This handles the mini data cache, as found on SA11x0 and XScale 11 + * processors. When we copy a user page page, we map it in such a way 12 + * that accesses to this page will not touch the main data cache, but 13 + * will be cached in the mini data cache. This prevents us thrashing 14 + * the main data cache on page faults. 15 + */ 16 + #include <linux/init.h> 17 + #include <linux/mm.h> 18 + 19 + #include <asm/page.h> 20 + #include <asm/pgtable.h> 21 + #include <asm/tlbflush.h> 22 + 23 + /* 24 + * 0xffff8000 to 0xffffffff is reserved for any ARM architecture 25 + * specific hacks for copying pages efficiently. 26 + */ 27 + #define COPYPAGE_MINICACHE 0xffff8000 28 + 29 + #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 30 + L_PTE_CACHEABLE) 31 + 32 + #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 33 + 34 + static DEFINE_SPINLOCK(minicache_lock); 35 + 36 + /* 37 + * XScale mini-dcache optimised copy_user_page 38 + * 39 + * We flush the destination cache lines just before we write the data into the 40 + * corresponding address. Since the Dcache is read-allocate, this removes the 41 + * Dcache aliasing issue. The writes will be forwarded to the write buffer, 42 + * and merged as appropriate. 43 + */ 44 + static void __attribute__((naked)) 45 + mc_copy_user_page(void *from, void *to) 46 + { 47 + /* 48 + * Strangely enough, best performance is achieved 49 + * when prefetching destination as well. (NP) 50 + */ 51 + asm volatile( 52 + "stmfd sp!, {r4, r5, lr} \n\ 53 + mov lr, %2 \n\ 54 + pld [r0, #0] \n\ 55 + pld [r0, #32] \n\ 56 + pld [r1, #0] \n\ 57 + pld [r1, #32] \n\ 58 + 1: pld [r0, #64] \n\ 59 + pld [r0, #96] \n\ 60 + pld [r1, #64] \n\ 61 + pld [r1, #96] \n\ 62 + 2: ldrd r2, [r0], #8 \n\ 63 + ldrd r4, [r0], #8 \n\ 64 + mov ip, r1 \n\ 65 + strd r2, [r1], #8 \n\ 66 + ldrd r2, [r0], #8 \n\ 67 + strd r4, [r1], #8 \n\ 68 + ldrd r4, [r0], #8 \n\ 69 + strd r2, [r1], #8 \n\ 70 + strd r4, [r1], #8 \n\ 71 + mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 72 + ldrd r2, [r0], #8 \n\ 73 + mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 74 + ldrd r4, [r0], #8 \n\ 75 + mov ip, r1 \n\ 76 + strd r2, [r1], #8 \n\ 77 + ldrd r2, [r0], #8 \n\ 78 + strd r4, [r1], #8 \n\ 79 + ldrd r4, [r0], #8 \n\ 80 + strd r2, [r1], #8 \n\ 81 + strd r4, [r1], #8 \n\ 82 + mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 83 + subs lr, lr, #1 \n\ 84 + mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 85 + bgt 1b \n\ 86 + beq 2b \n\ 87 + ldmfd sp!, {r4, r5, pc} " 88 + : 89 + : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); 90 + } 91 + 92 + void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 93 + { 94 + spin_lock(&minicache_lock); 95 + 96 + set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot)); 97 + flush_tlb_kernel_page(COPYPAGE_MINICACHE); 98 + 99 + mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 100 + 101 + spin_unlock(&minicache_lock); 102 + } 103 + 104 + /* 105 + * XScale optimised clear_user_page 106 + */ 107 + void __attribute__((naked)) 108 + xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) 109 + { 110 + asm volatile( 111 + "mov r1, %0 \n\ 112 + mov r2, #0 \n\ 113 + mov r3, #0 \n\ 114 + 1: mov ip, r0 \n\ 115 + strd r2, [r0], #8 \n\ 116 + strd r2, [r0], #8 \n\ 117 + strd r2, [r0], #8 \n\ 118 + strd r2, [r0], #8 \n\ 119 + mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 120 + subs r1, r1, #1 \n\ 121 + mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 122 + bne 1b \n\ 123 + mov pc, lr" 124 + : 125 + : "I" (PAGE_SIZE / 32)); 126 + } 127 + 128 + struct cpu_user_fns xscale_mc_user_fns __initdata = { 129 + .cpu_clear_user_page = xscale_mc_clear_user_page, 130 + .cpu_copy_user_page = xscale_mc_copy_user_page, 131 + };
-73
arch/arm/mm/minicache.c
··· 1 - /* 2 - * linux/arch/arm/mm/minicache.c 3 - * 4 - * Copyright (C) 2001 Russell King 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - * 10 - * This handles the mini data cache, as found on SA11x0 and XScale 11 - * processors. When we copy a user page page, we map it in such a way 12 - * that accesses to this page will not touch the main data cache, but 13 - * will be cached in the mini data cache. This prevents us thrashing 14 - * the main data cache on page faults. 15 - */ 16 - #include <linux/init.h> 17 - #include <linux/mm.h> 18 - 19 - #include <asm/page.h> 20 - #include <asm/pgtable.h> 21 - #include <asm/tlbflush.h> 22 - 23 - /* 24 - * 0xffff8000 to 0xffffffff is reserved for any ARM architecture 25 - * specific hacks for copying pages efficiently. 26 - */ 27 - #define minicache_address (0xffff8000) 28 - #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 29 - L_PTE_CACHEABLE) 30 - 31 - static pte_t *minicache_pte; 32 - 33 - /* 34 - * Note that this is intended to be called only from the copy_user_page 35 - * asm code; anything else will require special locking to prevent the 36 - * mini-cache space being re-used. (Note: probably preempt unsafe). 37 - * 38 - * We rely on the fact that the minicache is 2K, and we'll be pushing 39 - * 4K of data through it, so we don't actually have to specifically 40 - * flush the minicache when we change the mapping. 41 - * 42 - * Note also: assert(PAGE_OFFSET <= virt < high_memory). 43 - * Unsafe: preempt, kmap. 44 - */ 45 - unsigned long map_page_minicache(unsigned long virt) 46 - { 47 - set_pte(minicache_pte, pfn_pte(__pa(virt) >> PAGE_SHIFT, minicache_pgprot)); 48 - flush_tlb_kernel_page(minicache_address); 49 - 50 - return minicache_address; 51 - } 52 - 53 - static int __init minicache_init(void) 54 - { 55 - pgd_t *pgd; 56 - pmd_t *pmd; 57 - 58 - spin_lock(&init_mm.page_table_lock); 59 - 60 - pgd = pgd_offset_k(minicache_address); 61 - pmd = pmd_alloc(&init_mm, pgd, minicache_address); 62 - if (!pmd) 63 - BUG(); 64 - minicache_pte = pte_alloc_kernel(&init_mm, pmd, minicache_address); 65 - if (!minicache_pte) 66 - BUG(); 67 - 68 - spin_unlock(&init_mm.page_table_lock); 69 - 70 - return 0; 71 - } 72 - 73 - core_initcall(minicache_init);
+1 -1
arch/i386/kernel/Makefile
··· 43 43 # Note: kbuild does not track this dependency due to usage of .incbin 44 44 $(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so 45 45 targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so) 46 - targets += vsyscall.lds 46 + targets += vsyscall-note.o vsyscall.lds 47 47 48 48 # The DSO images are built using a special linker script. 49 49 quiet_cmd_syscall = SYSCALL $@
+6 -4
arch/ia64/kernel/module.c
··· 825 825 * XXX Should have an arch-hook for running this after final section 826 826 * addresses have been selected... 827 827 */ 828 - /* See if gp can cover the entire core module: */ 829 - uint64_t gp = (uint64_t) mod->module_core + MAX_LTOFF / 2; 830 - if (mod->core_size >= MAX_LTOFF) 828 + uint64_t gp; 829 + if (mod->core_size > MAX_LTOFF) 831 830 /* 832 831 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated 833 832 * at the end of the module. 834 833 */ 835 - gp = (uint64_t) mod->module_core + mod->core_size - MAX_LTOFF / 2; 834 + gp = mod->core_size - MAX_LTOFF / 2; 835 + else 836 + gp = mod->core_size / 2; 837 + gp = (uint64_t) mod->module_core + ((gp + 7) & -8); 836 838 mod->arch.gp = gp; 837 839 DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); 838 840 }
+6
arch/ia64/kernel/ptrace.c
··· 635 635 { 636 636 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); 637 637 638 + /* 639 + * Prevent migrating this task while 640 + * we're fiddling with the FPU state 641 + */ 642 + preempt_disable(); 638 643 if (ia64_is_local_fpu_owner(task) && psr->mfh) { 639 644 psr->mfh = 0; 640 645 task->thread.flags |= IA64_THREAD_FPH_VALID; 641 646 ia64_save_fpu(&task->thread.fph[0]); 642 647 } 648 + preempt_enable(); 643 649 } 644 650 645 651 /*
+2 -1
arch/ia64/kernel/setup.c
··· 720 720 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 721 721 722 722 /* 723 - * Initialize default control register to defer all speculative faults. The 723 + * Initialize default control register to defer speculative faults except 724 + * for those arising from TLB misses, which are not deferred. The 724 725 * kernel MUST NOT depend on a particular setting of these bits (in other words, 725 726 * the kernel must have recovery code for all speculative accesses). Turn on 726 727 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
+28 -1
arch/ia64/kernel/traps.c
··· 111 111 siginfo_t siginfo; 112 112 int sig, code; 113 113 114 + /* break.b always sets cr.iim to 0, which causes problems for 115 + * debuggers. Get the real break number from the original instruction, 116 + * but only for kernel code. User space break.b is left alone, to 117 + * preserve the existing behaviour. All break codings have the same 118 + * format, so there is no need to check the slot type. 119 + */ 120 + if (break_num == 0 && !user_mode(regs)) { 121 + struct ia64_psr *ipsr = ia64_psr(regs); 122 + unsigned long *bundle = (unsigned long *)regs->cr_iip; 123 + unsigned long slot; 124 + switch (ipsr->ri) { 125 + case 0: slot = (bundle[0] >> 5); break; 126 + case 1: slot = (bundle[0] >> 46) | (bundle[1] << 18); break; 127 + default: slot = (bundle[1] >> 23); break; 128 + } 129 + break_num = ((slot >> 36 & 1) << 20) | (slot >> 6 & 0xfffff); 130 + } 131 + 114 132 /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ 115 133 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); 116 134 siginfo.si_imm = break_num; ··· 220 202 221 203 /* first, grant user-level access to fph partition: */ 222 204 psr->dfh = 0; 205 + 206 + /* 207 + * Make sure that no other task gets in on this processor 208 + * while we're claiming the FPU 209 + */ 210 + preempt_disable(); 223 211 #ifndef CONFIG_SMP 224 212 { 225 213 struct task_struct *fpu_owner 226 214 = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER); 227 215 228 - if (ia64_is_local_fpu_owner(current)) 216 + if (ia64_is_local_fpu_owner(current)) { 217 + preempt_enable_no_resched(); 229 218 return; 219 + } 230 220 231 221 if (fpu_owner) 232 222 ia64_flush_fph(fpu_owner); ··· 252 226 */ 253 227 psr->mfh = 1; 254 228 } 229 + preempt_enable_no_resched(); 255 230 } 256 231 257 232 static inline int
+17 -2
arch/ia64/mm/init.c
··· 305 305 struct page *page; 306 306 307 307 /* 308 - * Map the gate page twice: once read-only to export the ELF headers etc. and once 309 - * execute-only page to enable privilege-promotion via "epc": 308 + * Map the gate page twice: once read-only to export the ELF 309 + * headers etc. and once execute-only page to enable 310 + * privilege-promotion via "epc": 310 311 */ 311 312 page = virt_to_page(ia64_imva(__start_gate_section)); 312 313 put_kernel_page(page, GATE_ADDR, PAGE_READONLY); ··· 316 315 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); 317 316 #else 318 317 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); 318 + /* Fill in the holes (if any) with read-only zero pages: */ 319 + { 320 + unsigned long addr; 321 + 322 + for (addr = GATE_ADDR + PAGE_SIZE; 323 + addr < GATE_ADDR + PERCPU_PAGE_SIZE; 324 + addr += PAGE_SIZE) 325 + { 326 + put_kernel_page(ZERO_PAGE(0), addr, 327 + PAGE_READONLY); 328 + put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE, 329 + PAGE_READONLY); 330 + } 331 + } 319 332 #endif 320 333 ia64_patch_gate(); 321 334 }
+2 -2
arch/ia64/sn/kernel/setup.c
··· 222 222 223 223 extern int platform_intr_list[]; 224 224 extern nasid_t master_nasid; 225 - static int shub_1_1_found __initdata; 225 + static int __initdata shub_1_1_found = 0; 226 226 227 227 /* 228 228 * sn_check_for_wars ··· 251 251 } else { 252 252 for_each_online_node(cnode) { 253 253 if (is_shub_1_1(cnodeid_to_nasid(cnode))) 254 - sn_hub_info->shub_1_1_found = 1; 254 + shub_1_1_found = 1; 255 255 } 256 256 } 257 257 }
+22
arch/ppc/kernel/cputable.c
··· 838 838 .icache_bsize = 32, 839 839 .dcache_bsize = 32, 840 840 }, 841 + { /* 405EP */ 842 + .pvr_mask = 0xffff0000, 843 + .pvr_value = 0x51210000, 844 + .cpu_name = "405EP", 845 + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 846 + CPU_FTR_USE_TB, 847 + .cpu_user_features = PPC_FEATURE_32 | 848 + PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 849 + .icache_bsize = 32, 850 + .dcache_bsize = 32, 851 + }, 852 + { /* 405EP */ 853 + .pvr_mask = 0xffff0000, 854 + .pvr_value = 0x51210000, 855 + .cpu_name = "405EP", 856 + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 857 + CPU_FTR_USE_TB, 858 + .cpu_user_features = PPC_FEATURE_32 | 859 + PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 860 + .icache_bsize = 32, 861 + .dcache_bsize = 32, 862 + }, 841 863 842 864 #endif /* CONFIG_40x */ 843 865 #ifdef CONFIG_44x
+3 -3
arch/ppc/kernel/misc.S
··· 619 619 _GLOBAL(flush_icache_range) 620 620 BEGIN_FTR_SECTION 621 621 blr /* for 601, do nothing */ 622 - END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE) 622 + END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) 623 623 li r5,L1_CACHE_LINE_SIZE-1 624 624 andc r3,r3,r5 625 625 subf r4,r3,r4 ··· 736 736 _GLOBAL(__flush_dcache_icache) 737 737 BEGIN_FTR_SECTION 738 738 blr /* for 601, do nothing */ 739 - END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE) 739 + END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) 740 740 rlwinm r3,r3,0,0,19 /* Get page base address */ 741 741 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ 742 742 mtctr r4 ··· 764 764 _GLOBAL(__flush_dcache_icache_phys) 765 765 BEGIN_FTR_SECTION 766 766 blr /* for 601, do nothing */ 767 - END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE) 767 + END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) 768 768 mfmsr r10 769 769 rlwinm r0,r10,0,28,26 /* clear DR */ 770 770 mtmsr r0
+22 -6
arch/ppc64/boot/prom.c
··· 11 11 #include <linux/string.h> 12 12 #include <linux/ctype.h> 13 13 14 + extern __u32 __div64_32(unsigned long long *dividend, __u32 divisor); 15 + 16 + /* The unnecessary pointer compare is there 17 + * to check for type safety (n must be 64bit) 18 + */ 19 + # define do_div(n,base) ({ \ 20 + __u32 __base = (base); \ 21 + __u32 __rem; \ 22 + (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \ 23 + if (((n) >> 32) == 0) { \ 24 + __rem = (__u32)(n) % __base; \ 25 + (n) = (__u32)(n) / __base; \ 26 + } else \ 27 + __rem = __div64_32(&(n), __base); \ 28 + __rem; \ 29 + }) 30 + 14 31 int (*prom)(void *); 15 32 16 33 void *chosen_handle; ··· 369 352 #define SPECIAL 32 /* 0x */ 370 353 #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ 371 354 372 - static char * number(char * str, long num, int base, int size, int precision, int type) 355 + static char * number(char * str, unsigned long long num, int base, int size, int precision, int type) 373 356 { 374 357 char c,sign,tmp[66]; 375 358 const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; ··· 384 367 c = (type & ZEROPAD) ? '0' : ' '; 385 368 sign = 0; 386 369 if (type & SIGN) { 387 - if (num < 0) { 370 + if ((signed long long)num < 0) { 388 371 sign = '-'; 389 - num = -num; 372 + num = - (signed long long)num; 390 373 size--; 391 374 } else if (type & PLUS) { 392 375 sign = '+'; ··· 406 389 if (num == 0) 407 390 tmp[i++]='0'; 408 391 else while (num != 0) { 409 - tmp[i++] = digits[num % base]; 410 - num /= base; 392 + tmp[i++] = digits[do_div(num, base)]; 411 393 } 412 394 if (i > precision) 413 395 precision = i; ··· 442 426 int vsprintf(char *buf, const char *fmt, va_list args) 443 427 { 444 428 int len; 445 - unsigned long num; 429 + unsigned long long num; 446 430 int i, base; 447 431 char * str; 448 432 const char *s;
+10 -8
arch/ppc64/kernel/kprobes.c
··· 45 45 46 46 int arch_prepare_kprobe(struct kprobe *p) 47 47 { 48 + int ret = 0; 48 49 kprobe_opcode_t insn = *p->addr; 49 50 50 - if (IS_MTMSRD(insn) || IS_RFID(insn)) 51 - /* cannot put bp on RFID/MTMSRD */ 52 - return 1; 53 - return 0; 51 + if ((unsigned long)p->addr & 0x03) { 52 + printk("Attempt to register kprobe at an unaligned address\n"); 53 + ret = -EINVAL; 54 + } else if (IS_MTMSRD(insn) || IS_RFID(insn)) { 55 + printk("Cannot register a kprobe on rfid or mtmsrd\n"); 56 + ret = -EINVAL; 57 + } 58 + return ret; 54 59 } 55 60 56 61 void arch_copy_kprobe(struct kprobe *p) ··· 177 172 ret = emulate_step(regs, p->ainsn.insn[0]); 178 173 if (ret == 0) 179 174 regs->nip = (unsigned long)p->addr + 4; 180 - 181 - regs->msr &= ~MSR_SE; 182 175 } 183 176 184 177 static inline int post_kprobe_handler(struct pt_regs *regs) ··· 213 210 214 211 if (kprobe_status & KPROBE_HIT_SS) { 215 212 resume_execution(current_kprobe, regs); 213 + regs->msr &= ~MSR_SE; 216 214 regs->msr |= kprobe_saved_msr; 217 215 218 216 unlock_kprobes(); ··· 237 233 */ 238 234 preempt_disable(); 239 235 switch (val) { 240 - case DIE_IABR_MATCH: 241 - case DIE_DABR_MATCH: 242 236 case DIE_BPT: 243 237 if (kprobe_handler(args->regs)) 244 238 ret = NOTIFY_STOP;
+1 -1
arch/ppc64/kernel/misc.S
··· 792 792 .llong .compat_sys_newstat 793 793 .llong .compat_sys_newlstat 794 794 .llong .compat_sys_newfstat 795 - .llong .sys_uname 795 + .llong .sys32_uname 796 796 .llong .sys_ni_syscall /* 110 old iopl syscall */ 797 797 .llong .sys_vhangup 798 798 .llong .sys_ni_syscall /* old idle syscall */
+33 -37
arch/ppc64/kernel/sys_ppc32.c
··· 791 791 } 792 792 793 793 794 - asmlinkage int ppc64_newuname(struct new_utsname __user * name) 795 - { 796 - int errno = sys_newuname(name); 797 - 798 - if (current->personality == PER_LINUX32 && !errno) { 799 - if(copy_to_user(name->machine, "ppc\0\0", 8)) { 800 - errno = -EFAULT; 801 - } 802 - } 803 - return errno; 804 - } 805 - 806 - asmlinkage int ppc64_personality(unsigned long personality) 807 - { 808 - int ret; 809 - if (current->personality == PER_LINUX32 && personality == PER_LINUX) 810 - personality = PER_LINUX32; 811 - ret = sys_personality(personality); 812 - if (ret == PER_LINUX32) 813 - ret = PER_LINUX; 814 - return ret; 815 - } 816 - 817 - 818 - 819 794 /* Note: it is necessary to treat mode as an unsigned int, 820 795 * with the corresponding cast to a signed int to insure that the 821 796 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) ··· 1133 1158 } 1134 1159 #endif 1135 1160 1161 + asmlinkage int sys32_uname(struct old_utsname __user * name) 1162 + { 1163 + int err = 0; 1164 + 1165 + down_read(&uts_sem); 1166 + if (copy_to_user(name, &system_utsname, sizeof(*name))) 1167 + err = -EFAULT; 1168 + up_read(&uts_sem); 1169 + if (!err && personality(current->personality) == PER_LINUX32) { 1170 + /* change "ppc64" to "ppc" */ 1171 + if (__put_user(0, name->machine + 3) 1172 + || __put_user(0, name->machine + 4)) 1173 + err = -EFAULT; 1174 + } 1175 + return err; 1176 + } 1177 + 1136 1178 asmlinkage int sys32_olduname(struct oldold_utsname __user * name) 1137 1179 { 1138 1180 int error; 1139 - 1140 - if (!name) 1141 - return -EFAULT; 1181 + 1142 1182 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname))) 1143 1183 return -EFAULT; 1144 1184 1145 1185 down_read(&uts_sem); 1146 1186 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN); 1147 - error -= __put_user(0,name->sysname+__OLD_UTS_LEN); 1148 - error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN); 1149 - error -= __put_user(0,name->nodename+__OLD_UTS_LEN); 1150 - error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN); 1151 - error -= __put_user(0,name->release+__OLD_UTS_LEN); 1152 - error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN); 1153 - error -= __put_user(0,name->version+__OLD_UTS_LEN); 1154 - error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN); 1155 - error = __put_user(0,name->machine+__OLD_UTS_LEN); 1187 + error |= __put_user(0,name->sysname+__OLD_UTS_LEN); 1188 + error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN); 1189 + error |= __put_user(0,name->nodename+__OLD_UTS_LEN); 1190 + error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN); 1191 + error |= __put_user(0,name->release+__OLD_UTS_LEN); 1192 + error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN); 1193 + error |= __put_user(0,name->version+__OLD_UTS_LEN); 1194 + error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN); 1195 + error |= __put_user(0,name->machine+__OLD_UTS_LEN); 1196 + if (personality(current->personality) == PER_LINUX32) { 1197 + /* change "ppc64" to "ppc" */ 1198 + error |= __put_user(0, name->machine + 3); 1199 + error |= __put_user(0, name->machine + 4); 1200 + } 1201 + 1156 1202 up_read(&uts_sem); 1157 1203 1158 1204 error = error ? -EFAULT : 0;
+22 -13
arch/ppc64/kernel/syscalls.c
··· 199 199 return ret; 200 200 } 201 201 202 - static int __init set_fakeppc(char *str) 202 + long ppc64_personality(unsigned long personality) 203 203 { 204 - if (*str) 205 - return 0; 206 - init_task.personality = PER_LINUX32; 207 - return 1; 208 - } 209 - __setup("fakeppc", set_fakeppc); 204 + long ret; 210 205 211 - asmlinkage int sys_uname(struct old_utsname __user * name) 206 + if (personality(current->personality) == PER_LINUX32 207 + && personality == PER_LINUX) 208 + personality = PER_LINUX32; 209 + ret = sys_personality(personality); 210 + if (ret == PER_LINUX32) 211 + ret = PER_LINUX; 212 + return ret; 213 + } 214 + 215 + long ppc64_newuname(struct new_utsname __user * name) 212 216 { 213 - int err = -EFAULT; 214 - 217 + int err = 0; 218 + 215 219 down_read(&uts_sem); 216 - if (name && !copy_to_user(name, &system_utsname, sizeof (*name))) 217 - err = 0; 220 + if (copy_to_user(name, &system_utsname, sizeof(*name))) 221 + err = -EFAULT; 218 222 up_read(&uts_sem); 219 - 223 + if (!err && personality(current->personality) == PER_LINUX32) { 224 + /* change ppc64 to ppc */ 225 + if (__put_user(0, name->machine + 3) 226 + || __put_user(0, name->machine + 4)) 227 + err = -EFAULT; 228 + } 220 229 return err; 221 230 } 222 231
+6
arch/um/Kconfig_char
··· 204 204 http://sourceforge.net/projects/gkernel/). rngd periodically reads 205 205 /dev/hwrng and injects the entropy into /dev/random. 206 206 207 + config MMAPPER 208 + tristate "iomem emulation driver" 209 + help 210 + This driver allows a host file to be used as emulated IO memory inside 211 + UML. 212 + 207 213 endmenu 208 214
+17 -9
arch/um/drivers/chan_user.c
··· 143 143 { 144 144 struct winch_data data; 145 145 unsigned long stack; 146 - int fds[2], pid, n, err; 146 + int fds[2], n, err; 147 147 char c; 148 148 149 149 err = os_pipe(fds, 1, 1); 150 150 if(err < 0){ 151 151 printk("winch_tramp : os_pipe failed, err = %d\n", -err); 152 - return(err); 152 + goto out; 153 153 } 154 154 155 155 data = ((struct winch_data) { .pty_fd = fd, 156 156 .pipe_fd = fds[1], 157 157 .close_me = fds[0] } ); 158 - pid = run_helper_thread(winch_thread, &data, 0, &stack, 0); 159 - if(pid < 0){ 158 + err = run_helper_thread(winch_thread, &data, 0, &stack, 0); 159 + if(err < 0){ 160 160 printk("fork of winch_thread failed - errno = %d\n", errno); 161 - return(pid); 161 + goto out_close; 162 162 } 163 163 164 164 os_close_file(fds[1]); ··· 168 168 printk("winch_tramp : failed to read synchronization byte\n"); 169 169 printk("read failed, err = %d\n", -n); 170 170 printk("fd %d will not support SIGWINCH\n", fd); 171 - *fd_out = -1; 171 + err = -EINVAL; 172 + goto out_close1; 172 173 } 173 - return(pid); 174 + return err ; 175 + 176 + out_close: 177 + os_close_file(fds[1]); 178 + out_close1: 179 + os_close_file(fds[0]); 180 + out: 181 + return err; 174 182 } 175 183 176 184 void register_winch(int fd, struct tty_struct *tty) 177 185 { 178 - int pid, thread, thread_fd; 186 + int pid, thread, thread_fd = -1; 179 187 int count; 180 188 char c = 1; 181 189 ··· 194 186 if(!CHOOSE_MODE_PROC(is_tracer_winch, is_skas_winch, pid, fd, 195 187 tty) && (pid == -1)){ 196 188 thread = winch_tramp(fd, tty, &thread_fd); 197 - if(fd != -1){ 189 + if(thread > 0){ 198 190 register_winch_irq(thread_fd, fd, thread, tty); 199 191 200 192 count = os_write_file(thread_fd, &c, sizeof(c));
+20 -4
arch/um/drivers/mmapper_kern.c
··· 18 18 #include <linux/slab.h> 19 19 #include <linux/init.h> 20 20 #include <linux/smp_lock.h> 21 + #include <linux/miscdevice.h> 21 22 #include <asm/uaccess.h> 22 23 #include <asm/irq.h> 23 24 #include <asm/pgtable.h> ··· 118 117 .release = mmapper_release, 119 118 }; 120 119 120 + static struct miscdevice mmapper_dev = { 121 + .minor = MISC_DYNAMIC_MINOR, 122 + .name = "mmapper", 123 + .fops = &mmapper_fops 124 + }; 125 + 121 126 static int __init mmapper_init(void) 122 127 { 128 + int err; 129 + 123 130 printk(KERN_INFO "Mapper v0.1\n"); 124 131 125 132 v_buf = (char *) find_iomem("mmapper", &mmapper_size); 126 133 if(mmapper_size == 0){ 127 134 printk(KERN_ERR "mmapper_init - find_iomem failed\n"); 128 - return(0); 135 + goto out; 136 + } 137 + 138 + err = misc_register(&mmapper_dev); 139 + if(err){ 140 + printk(KERN_ERR "mmapper - misc_register failed, err = %d\n", 141 + err); 142 + goto out; 129 143 } 130 144 131 145 p_buf = __pa(v_buf); 132 - 133 - devfs_mk_cdev(MKDEV(30, 0), S_IFCHR|S_IRUGO|S_IWUGO, "mmapper"); 134 - return(0); 146 + out: 147 + return 0; 135 148 } 136 149 137 150 static void mmapper_exit(void) 138 151 { 152 + misc_deregister(&mmapper_dev); 139 153 } 140 154 141 155 module_init(mmapper_init);
+1 -1
arch/um/drivers/net_user.c
··· 32 32 return(0); 33 33 } 34 34 35 - void tap_check_ips(char *gate_addr, char *eth_addr) 35 + void tap_check_ips(char *gate_addr, unsigned char *eth_addr) 36 36 { 37 37 int tap_addr[4]; 38 38
+2 -2
arch/um/drivers/slip.h
··· 12 12 char *addr; 13 13 char *gate_addr; 14 14 int slave; 15 - char ibuf[ENC_BUF_SIZE]; 16 - char obuf[ENC_BUF_SIZE]; 15 + unsigned char ibuf[ENC_BUF_SIZE]; 16 + unsigned char obuf[ENC_BUF_SIZE]; 17 17 int more; /* more data: do not read fd until ibuf has been drained */ 18 18 int pos; 19 19 int esc;
+2 -1
arch/um/drivers/slip_proto.h
··· 12 12 #define SLIP_ESC_END 0334 /* ESC ESC_END means END 'data' */ 13 13 #define SLIP_ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */ 14 14 15 - static inline int slip_unesc(unsigned char c,char *buf,int *pos, int *esc) 15 + static inline int slip_unesc(unsigned char c, unsigned char *buf, int *pos, 16 + int *esc) 16 17 { 17 18 int ret; 18 19
+2 -2
arch/um/drivers/slirp.h
··· 24 24 struct arg_list_dummy_wrapper argw; 25 25 int pid; 26 26 int slave; 27 - char ibuf[ENC_BUF_SIZE]; 28 - char obuf[ENC_BUF_SIZE]; 27 + unsigned char ibuf[ENC_BUF_SIZE]; 28 + unsigned char obuf[ENC_BUF_SIZE]; 29 29 int more; /* more data: do not read fd until ibuf has been drained */ 30 30 int pos; 31 31 int esc;
+3 -3
arch/um/drivers/stderr_console.c
··· 22 22 } 23 23 24 24 static struct console stderr_console = { 25 - .name "stderr", 26 - .write stderr_console_write, 27 - .flags CON_PRINTBUFFER, 25 + .name = "stderr", 26 + .write = stderr_console_write, 27 + .flags = CON_PRINTBUFFER, 28 28 }; 29 29 30 30 static int __init stderr_console_init(void)
+1 -1
arch/um/include/mconsole.h
··· 56 56 int as_interrupt; 57 57 58 58 int originating_fd; 59 - int originlen; 59 + unsigned int originlen; 60 60 unsigned char origin[128]; /* sockaddr_un */ 61 61 62 62 struct mconsole_request request;
+1 -1
arch/um/include/net_user.h
··· 35 35 extern void free_output_buffer(void *buffer); 36 36 37 37 extern int tap_open_common(void *dev, char *gate_addr); 38 - extern void tap_check_ips(char *gate_addr, char *eth_addr); 38 + extern void tap_check_ips(char *gate_addr, unsigned char *eth_addr); 39 39 40 40 extern void read_output(int fd, char *output_out, int len); 41 41
+1 -1
arch/um/include/os.h
··· 136 136 extern int os_open_file(char *file, struct openflags flags, int mode); 137 137 extern int os_read_file(int fd, void *buf, int len); 138 138 extern int os_write_file(int fd, const void *buf, int count); 139 - extern int os_file_size(char *file, long long *size_out); 139 + extern int os_file_size(char *file, unsigned long long *size_out); 140 140 extern int os_file_modtime(char *file, unsigned long *modtime); 141 141 extern int os_pipe(int *fd, int stream, int close_on_exec); 142 142 extern int os_set_fd_async(int fd, int owner);
-3
arch/um/include/user_util.h
··· 41 41 extern char host_info[]; 42 42 43 43 extern char saved_command_line[]; 44 - extern char command_line[]; 45 - 46 - extern char *tempdir; 47 44 48 45 extern unsigned long _stext, _etext, _sdata, _edata, __bss_start, _end; 49 46 extern unsigned long _unprotected_end;
+6 -1
arch/um/kernel/skas/process_kern.c
··· 68 68 * 0 if it just exits 69 69 */ 70 70 n = run_kernel_thread(fn, arg, &current->thread.exec_buf); 71 - if(n == 1) 71 + if(n == 1){ 72 + /* Handle any immediate reschedules or signals */ 73 + interrupt_end(); 72 74 userspace(&current->thread.regs.regs); 75 + } 73 76 else do_exit(0); 74 77 } 75 78 ··· 99 96 schedule_tail(current->thread.prev_sched); 100 97 current->thread.prev_sched = NULL; 101 98 99 + /* Handle any immediate reschedules or signals */ 100 + interrupt_end(); 102 101 userspace(&current->thread.regs.regs); 103 102 } 104 103
+5 -1
arch/um/os-Linux/elf_aux.c
··· 45 45 elf_aux_hwcap = auxv->a_un.a_val; 46 46 break; 47 47 case AT_PLATFORM: 48 - elf_aux_platform = auxv->a_un.a_ptr; 48 + /* elf.h removed the pointer elements from 49 + * a_un, so we have to use a_val, which is 50 + * all that's left. 51 + */ 52 + elf_aux_platform = (char *) auxv->a_un.a_val; 49 53 break; 50 54 case AT_PAGESZ: 51 55 page_size = auxv->a_un.a_val;
+1 -1
arch/um/os-Linux/file.c
··· 363 363 (int (*)(int, void *, int)) write, copy_to_user_proc)); 364 364 } 365 365 366 - int os_file_size(char *file, long long *size_out) 366 + int os_file_size(char *file, unsigned long long *size_out) 367 367 { 368 368 struct uml_stat buf; 369 369 int err;
+11 -30
arch/x86_64/kernel/aperture.c
··· 33 33 34 34 int fix_aperture __initdata = 1; 35 35 36 - #define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16)) 36 + /* This code runs before the PCI subsystem is initialized, so just 37 + access the northbridge directly. */ 37 38 38 - static struct resource aper_res = { 39 - .name = "Aperture", 40 - .flags = IORESOURCE_MEM, 41 - }; 39 + #define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16)) 42 40 43 41 static u32 __init allocate_aperture(void) 44 42 { ··· 53 55 aper_size = (32 * 1024 * 1024) << fallback_aper_order; 54 56 55 57 /* 56 - * Aperture has to be naturally aligned. This means an 2GB 57 - * aperture won't have much chances to find a place in the 58 - * lower 4GB of memory. Unfortunately we cannot move it up 59 - * because that would make the IOMMU useless. 58 + * Aperture has to be naturally aligned. This means an 2GB aperture won't 59 + * have much chances to find a place in the lower 4GB of memory. 60 + * Unfortunately we cannot move it up because that would make the 61 + * IOMMU useless. 60 62 */ 61 - 62 - /* First try to find some free unused space */ 63 - if (!allocate_resource(&iomem_resource, &aper_res, 64 - aper_size, 65 - 0, 0xffffffff, 66 - aper_size, 67 - NULL, NULL)) { 68 - printk(KERN_INFO "Putting aperture at %lx-%lx\n", 69 - aper_res.start, aper_res.end); 70 - return aper_res.start; 71 - } 72 - 73 - /* No free space found. Go on to waste some memory... */ 74 63 p = __alloc_bootmem_node(nd0, aper_size, aper_size, 0); 75 64 if (!p || __pa(p)+aper_size > 0xffffffff) { 76 65 printk("Cannot allocate aperture memory hole (%p,%uK)\n", ··· 66 81 free_bootmem_node(nd0, (unsigned long)p, aper_size); 67 82 return 0; 68 83 } 69 - printk("Mapping aperture over %d KB of precious RAM @ %lx\n", 84 + printk("Mapping aperture over %d KB of RAM @ %lx\n", 70 85 aper_size >> 10, __pa(p)); 71 86 return (u32)__pa(p); 72 87 } ··· 87 102 printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name); 88 103 return 0; 89 104 } 90 - /* Don't check the resource here because the aperture is usually 91 - in an e820 reserved area, and we allocated these earlier. */ 92 105 return 1; 93 106 } 94 107 95 - /* 96 - * Find a PCI capability. 97 - * This code runs before the PCI subsystem is initialized, so just 98 - * access the northbridge directly. 99 - */ 108 + /* Find a PCI capability */ 100 109 static __u32 __init find_cap(int num, int slot, int func, int cap) 101 110 { 102 111 u8 pos; ··· 255 276 fallback_aper_force) { 256 277 printk("Your BIOS doesn't leave a aperture memory hole\n"); 257 278 printk("Please enable the IOMMU option in the BIOS setup\n"); 279 + printk("This costs you %d MB of RAM\n", 280 + 32 << fallback_aper_order); 258 281 259 282 aper_order = fallback_aper_order; 260 283 aper_alloc = allocate_aperture();
+2 -2
drivers/block/ub.c
··· 51 51 * This many LUNs per USB device. 52 52 * Every one of them takes a host, see UB_MAX_HOSTS. 53 53 */ 54 - #define UB_MAX_LUNS 4 54 + #define UB_MAX_LUNS 9 55 55 56 56 /* 57 57 */ ··· 2100 2100 nluns = rc; 2101 2101 break; 2102 2102 } 2103 - mdelay(100); 2103 + msleep(100); 2104 2104 } 2105 2105 2106 2106 for (i = 0; i < nluns; i++) {
+2
drivers/char/agp/agp.h
··· 278 278 #define AGP_GENERIC_SIZES_ENTRIES 11 279 279 extern struct aper_size_info_16 agp3_generic_sizes[]; 280 280 281 + #define virt_to_gart(x) (phys_to_gart(virt_to_phys(x))) 282 + #define gart_to_virt(x) (phys_to_virt(gart_to_phys(x))) 281 283 282 284 extern int agp_off; 283 285 extern int agp_try_unsupported_boot;
+2 -2
drivers/char/agp/ali-agp.c
··· 150 150 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 151 151 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 152 152 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 153 - virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN )); 153 + virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN )); 154 154 return addr; 155 155 } 156 156 ··· 174 174 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 175 175 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 176 176 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 177 - virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN)); 177 + virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN)); 178 178 agp_generic_destroy_page(addr); 179 179 } 180 180
+3 -3
drivers/char/agp/amd-k7-agp.c
··· 43 43 44 44 SetPageReserved(virt_to_page(page_map->real)); 45 45 global_cache_flush(); 46 - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 46 + page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 47 47 PAGE_SIZE); 48 48 if (page_map->remapped == NULL) { 49 49 ClearPageReserved(virt_to_page(page_map->real)); ··· 154 154 155 155 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 156 156 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; 157 - agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); 157 + agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 158 158 159 159 /* Get the address for the gart region. 160 160 * This is a bus address even on the alpha, b/c its ··· 167 167 168 168 /* Calculate the agp offset */ 169 169 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { 170 - writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1, 170 + writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1, 171 171 page_dir.remapped+GET_PAGE_DIR_OFF(addr)); 172 172 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ 173 173 }
+2 -2
drivers/char/agp/amd64-agp.c
··· 219 219 220 220 static int amd_8151_configure(void) 221 221 { 222 - unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 222 + unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real); 223 223 224 224 /* Configure AGP regs in each x86-64 host bridge. */ 225 225 for_each_nb() { ··· 591 591 { 592 592 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 593 593 594 - release_mem_region(virt_to_phys(bridge->gatt_table_real), 594 + release_mem_region(virt_to_gart(bridge->gatt_table_real), 595 595 amd64_aperture_sizes[bridge->aperture_size_idx].size); 596 596 agp_remove_bridge(bridge); 597 597 agp_put_bridge(bridge);
+3 -3
drivers/char/agp/ati-agp.c
··· 61 61 62 62 SetPageReserved(virt_to_page(page_map->real)); 63 63 err = map_page_into_agp(virt_to_page(page_map->real)); 64 - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 64 + page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 65 65 PAGE_SIZE); 66 66 if (page_map->remapped == NULL || err) { 67 67 ClearPageReserved(virt_to_page(page_map->real)); ··· 343 343 344 344 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 345 345 agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped; 346 - agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real); 346 + agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 347 347 348 348 /* Write out the size register */ 349 349 current_size = A_SIZE_LVL2(agp_bridge->current_size); ··· 373 373 374 374 /* Calculate the agp offset */ 375 375 for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { 376 - writel(virt_to_bus(ati_generic_private.gatt_pages[i]->real) | 1, 376 + writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1, 377 377 page_dir.remapped+GET_PAGE_DIR_OFF(addr)); 378 378 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ 379 379 }
+3 -3
drivers/char/agp/backend.c
··· 148 148 return -ENOMEM; 149 149 } 150 150 151 - bridge->scratch_page_real = virt_to_phys(addr); 151 + bridge->scratch_page_real = virt_to_gart(addr); 152 152 bridge->scratch_page = 153 153 bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0); 154 154 } ··· 189 189 err_out: 190 190 if (bridge->driver->needs_scratch_page) 191 191 bridge->driver->agp_destroy_page( 192 - phys_to_virt(bridge->scratch_page_real)); 192 + gart_to_virt(bridge->scratch_page_real)); 193 193 if (got_gatt) 194 194 bridge->driver->free_gatt_table(bridge); 195 195 if (got_keylist) { ··· 214 214 if (bridge->driver->agp_destroy_page && 215 215 bridge->driver->needs_scratch_page) 216 216 bridge->driver->agp_destroy_page( 217 - phys_to_virt(bridge->scratch_page_real)); 217 + gart_to_virt(bridge->scratch_page_real)); 218 218 } 219 219 220 220 /* When we remove the global variable agp_bridge from all drivers
+1 -1
drivers/char/agp/efficeon-agp.c
··· 219 219 220 220 efficeon_private.l1_table[index] = page; 221 221 222 - value = __pa(page) | pati | present | index; 222 + value = virt_to_gart(page) | pati | present | index; 223 223 224 224 pci_write_config_dword(agp_bridge->dev, 225 225 EFFICEON_ATTPAGE, value);
+11 -25
drivers/char/agp/generic.c
··· 153 153 } 154 154 if (curr->page_count != 0) { 155 155 for (i = 0; i < curr->page_count; i++) { 156 - curr->bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i])); 156 + curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i])); 157 157 } 158 158 } 159 159 agp_free_key(curr->key); ··· 209 209 agp_free_memory(new); 210 210 return NULL; 211 211 } 212 - new->memory[i] = virt_to_phys(addr); 212 + new->memory[i] = virt_to_gart(addr); 213 213 new->page_count++; 214 214 } 215 215 new->bridge = bridge; ··· 295 295 EXPORT_SYMBOL_GPL(agp_num_entries); 296 296 297 297 298 - static int check_bridge_mode(struct pci_dev *dev) 299 - { 300 - u32 agp3; 301 - u8 cap_ptr; 302 - 303 - cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP); 304 - pci_read_config_dword(dev, cap_ptr+AGPSTAT, &agp3); 305 - if (agp3 & AGPSTAT_MODE_3_0) 306 - return 1; 307 - return 0; 308 - } 309 - 310 - 311 298 /** 312 299 * agp_copy_info - copy bridge state information 313 300 * ··· 315 328 info->version.minor = bridge->version->minor; 316 329 info->chipset = SUPPORTED; 317 330 info->device = bridge->dev; 318 - if (check_bridge_mode(bridge->dev)) 331 + if (bridge->mode & AGPSTAT_MODE_3_0) 319 332 info->mode = bridge->mode & ~AGP3_RESERVED_MASK; 320 333 else 321 334 info->mode = bridge->mode & ~AGP2_RESERVED_MASK; ··· 648 661 bridge_agpstat &= ~AGPSTAT_FW; 649 662 650 663 /* Check to see if we are operating in 3.0 mode */ 651 - if (check_bridge_mode(agp_bridge->dev)) 664 + if (agp_bridge->mode & AGPSTAT_MODE_3_0) 652 665 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); 653 666 else 654 667 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); ··· 719 732 720 733 /* Do AGP version specific frobbing. */ 721 734 if (bridge->major_version >= 3) { 722 - if (check_bridge_mode(bridge->dev)) { 735 + if (bridge->mode & AGPSTAT_MODE_3_0) { 723 736 /* If we have 3.5, we can do the isoch stuff. */ 724 737 if (bridge->minor_version >= 5) 725 738 agp_3_5_enable(bridge); ··· 793 806 break; 794 807 } 795 808 796 - table = (char *) __get_free_pages(GFP_KERNEL, 797 - page_order); 809 + table = alloc_gatt_pages(page_order); 798 810 799 811 if (table == NULL) { 800 812 i++; ··· 824 838 size = ((struct aper_size_info_fixed *) temp)->size; 825 839 page_order = ((struct aper_size_info_fixed *) temp)->page_order; 826 840 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; 827 - table = (char *) __get_free_pages(GFP_KERNEL, page_order); 841 + table = alloc_gatt_pages(page_order); 828 842 } 829 843 830 844 if (table == NULL) ··· 839 853 agp_gatt_table = (void *)table; 840 854 841 855 bridge->driver->cache_flush(); 842 - bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 856 + bridge->gatt_table = ioremap_nocache(virt_to_gart(table), 843 857 (PAGE_SIZE * (1 << page_order))); 844 858 bridge->driver->cache_flush(); 845 859 ··· 847 861 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 848 862 ClearPageReserved(page); 849 863 850 - free_pages((unsigned long) table, page_order); 864 + free_gatt_pages(table, page_order); 851 865 852 866 return -ENOMEM; 853 867 } 854 - bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); 868 + bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real); 855 869 856 870 /* AK: bogus, should encode addresses > 4GB */ 857 871 for (i = 0; i < num_entries; i++) { ··· 905 919 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 906 920 ClearPageReserved(page); 907 921 908 - free_pages((unsigned long) bridge->gatt_table_real, page_order); 922 + free_gatt_pages(bridge->gatt_table_real, page_order); 909 923 910 924 agp_gatt_table = NULL; 911 925 bridge->gatt_table = NULL;
+2 -2
drivers/char/agp/hp-agp.c
··· 110 110 hp->gart_size = HP_ZX1_GART_SIZE; 111 111 hp->gatt_entries = hp->gart_size / hp->io_page_size; 112 112 113 - hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE)); 113 + hp->io_pdir = gart_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE)); 114 114 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; 115 115 116 116 if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { ··· 248 248 agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS); 249 249 250 250 if (hp->io_pdir_owner) { 251 - writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE); 251 + writel(virt_to_gart(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE); 252 252 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE); 253 253 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG); 254 254 readl(hp->ioc_regs+HP_ZX1_TCNFG);
+2 -2
drivers/char/agp/i460-agp.c
··· 372 372 } 373 373 memset(lp->alloced_map, 0, map_size); 374 374 375 - lp->paddr = virt_to_phys(lpage); 375 + lp->paddr = virt_to_gart(lpage); 376 376 lp->refcount = 0; 377 377 atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); 378 378 return 0; ··· 383 383 kfree(lp->alloced_map); 384 384 lp->alloced_map = NULL; 385 385 386 - free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT); 386 + free_pages((unsigned long) gart_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT); 387 387 atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); 388 388 } 389 389
+16 -5
drivers/char/agp/intel-agp.c
··· 286 286 if (new == NULL) 287 287 return NULL; 288 288 289 - new->memory[0] = virt_to_phys(addr); 289 + new->memory[0] = virt_to_gart(addr); 290 290 if (pg_count == 4) { 291 291 /* kludge to get 4 physical pages for ARGB cursor */ 292 292 new->memory[1] = new->memory[0] + PAGE_SIZE; ··· 329 329 agp_free_key(curr->key); 330 330 if(curr->type == AGP_PHYS_MEMORY) { 331 331 if (curr->page_count == 4) 332 - i8xx_destroy_pages(phys_to_virt(curr->memory[0])); 332 + i8xx_destroy_pages(gart_to_virt(curr->memory[0])); 333 333 else 334 334 agp_bridge->driver->agp_destroy_page( 335 - phys_to_virt(curr->memory[0])); 335 + gart_to_virt(curr->memory[0])); 336 336 vfree(curr->memory); 337 337 } 338 338 kfree(curr); ··· 418 418 case I915_GMCH_GMS_STOLEN_48M: 419 419 /* Check it's really I915G */ 420 420 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || 421 - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) 421 + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || 422 + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB) 422 423 gtt_entries = MB(48) - KB(size); 423 424 else 424 425 gtt_entries = 0; ··· 427 426 case I915_GMCH_GMS_STOLEN_64M: 428 427 /* Check it's really I915G */ 429 428 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || 430 - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) 429 + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || 430 + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB) 431 431 gtt_entries = MB(64) - KB(size); 432 432 else 433 433 gtt_entries = 0; ··· 1664 1662 } 1665 1663 name = "915GM"; 1666 1664 break; 1665 + case PCI_DEVICE_ID_INTEL_82945G_HB: 1666 + if (find_i830(PCI_DEVICE_ID_INTEL_82945G_IG)) { 1667 + bridge->driver = &intel_915_driver; 1668 + } else { 1669 + bridge->driver = &intel_845_driver; 1670 + } 1671 + name = "945G"; 1672 + break; 1667 1673 case PCI_DEVICE_ID_INTEL_7505_0: 1668 1674 bridge->driver = &intel_7505_driver; 1669 1675 name = "E7505"; ··· 1811 1801 ID(PCI_DEVICE_ID_INTEL_7205_0), 1812 1802 ID(PCI_DEVICE_ID_INTEL_82915G_HB), 1813 1803 ID(PCI_DEVICE_ID_INTEL_82915GM_HB), 1804 + ID(PCI_DEVICE_ID_INTEL_82945G_HB), 1814 1805 { } 1815 1806 }; 1816 1807
+9 -3
drivers/char/agp/sgi-agp.c
··· 133 133 off_t j; 134 134 void *temp; 135 135 struct agp_bridge_data *bridge; 136 + u64 *table; 136 137 137 138 bridge = mem->bridge; 138 139 if (!bridge) 139 140 return -EINVAL; 141 + 142 + table = (u64 *)bridge->gatt_table; 140 143 141 144 temp = bridge->current_size; 142 145 ··· 178 175 j = pg_start; 179 176 180 177 while (j < (pg_start + mem->page_count)) { 181 - if (*(bridge->gatt_table + j)) 178 + if (table[j]) 182 179 return -EBUSY; 183 180 j++; 184 181 } ··· 189 186 } 190 187 191 188 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 192 - *(bridge->gatt_table + j) = 189 + table[j] = 193 190 bridge->driver->mask_memory(bridge, mem->memory[i], 194 191 mem->type); 195 192 } ··· 203 200 { 204 201 size_t i; 205 202 struct agp_bridge_data *bridge; 203 + u64 *table; 206 204 207 205 bridge = mem->bridge; 208 206 if (!bridge) ··· 213 209 return -EINVAL; 214 210 } 215 211 212 + table = (u64 *)bridge->gatt_table; 213 + 216 214 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 217 - *(bridge->gatt_table + i) = 0; 215 + table[i] = 0; 218 216 } 219 217 220 218 bridge->driver->tlb_flush(mem);
+4 -4
drivers/char/agp/sworks-agp.c
··· 51 51 } 52 52 SetPageReserved(virt_to_page(page_map->real)); 53 53 global_cache_flush(); 54 - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 54 + page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 55 55 PAGE_SIZE); 56 56 if (page_map->remapped == NULL) { 57 57 ClearPageReserved(virt_to_page(page_map->real)); ··· 162 162 /* Create a fake scratch directory */ 163 163 for(i = 0; i < 1024; i++) { 164 164 writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i); 165 - writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); 165 + writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); 166 166 } 167 167 168 168 retval = serverworks_create_gatt_pages(value->num_entries / 1024); ··· 174 174 175 175 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 176 176 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; 177 - agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); 177 + agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 178 178 179 179 /* Get the address for the gart region. 180 180 * This is a bus address even on the alpha, b/c its ··· 187 187 /* Calculate the agp offset */ 188 188 189 189 for(i = 0; i < value->num_entries / 1024; i++) 190 - writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); 190 + writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); 191 191 192 192 return 0; 193 193 }
+1 -1
drivers/char/agp/uninorth-agp.c
··· 407 407 408 408 bridge->gatt_table_real = (u32 *) table; 409 409 bridge->gatt_table = (u32 *)table; 410 - bridge->gatt_bus_addr = virt_to_phys(table); 410 + bridge->gatt_bus_addr = virt_to_gart(table); 411 411 412 412 for (i = 0; i < num_entries; i++) 413 413 bridge->gatt_table[i] = 0;
+13 -25
drivers/char/mxser.c
··· 1995 1995 unsigned char ch, gdl; 1996 1996 int ignored = 0; 1997 1997 int cnt = 0; 1998 - unsigned char *cp; 1999 - char *fp; 2000 - int count; 2001 1998 int recv_room; 2002 1999 int max = 256; 2003 2000 unsigned long flags; ··· 2007 2010 mxser_stoprx(tty); 2008 2011 //return; 2009 2012 } 2010 - 2011 - cp = tty->flip.char_buf; 2012 - fp = tty->flip.flag_buf; 2013 - count = 0; 2014 2013 2015 2014 // following add by Victor Yu. 09-02-2002 2016 2015 if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) { ··· 2034 2041 } 2035 2042 while (gdl--) { 2036 2043 ch = inb(info->base + UART_RX); 2037 - count++; 2038 - *cp++ = ch; 2039 - *fp++ = 0; 2044 + tty_insert_flip_char(tty, ch, 0); 2040 2045 cnt++; 2041 2046 /* 2042 - if((count>=HI_WATER) && (info->stop_rx==0)){ 2047 + if((cnt>=HI_WATER) && (info->stop_rx==0)){ 2043 2048 mxser_stoprx(tty); 2044 2049 info->stop_rx=1; 2045 2050 break; ··· 2052 2061 if (max-- < 0) 2053 2062 break; 2054 2063 /* 2055 - if((count>=HI_WATER) && (info->stop_rx==0)){ 2064 + if((cnt>=HI_WATER) && (info->stop_rx==0)){ 2056 2065 mxser_stoprx(tty); 2057 2066 info->stop_rx=1; 2058 2067 break; ··· 2069 2078 if (++ignored > 100) 2070 2079 break; 2071 2080 } else { 2072 - count++; 2081 + char flag = 0; 2073 2082 if (*status & UART_LSR_SPECIAL) { 2074 2083 if (*status & UART_LSR_BI) { 2075 - *fp++ = TTY_BREAK; 2084 + flag = TTY_BREAK; 2076 2085 /* added by casper 1/11/2000 */ 2077 2086 info->icount.brk++; 2078 - 2079 2087 /* */ 2080 2088 if (info->flags & ASYNC_SAK) 2081 2089 do_SAK(tty); 2082 2090 } else if (*status & UART_LSR_PE) { 2083 - *fp++ = TTY_PARITY; 2091 + flag = TTY_PARITY; 2084 2092 /* added by casper 1/11/2000 */ 2085 2093 info->icount.parity++; 2086 2094 /* */ 2087 2095 } else if (*status & UART_LSR_FE) { 2088 - *fp++ = TTY_FRAME; 2096 + flag = TTY_FRAME; 2089 2097 /* added by casper 1/11/2000 */ 2090 2098 info->icount.frame++; 2091 2099 /* */ 2092 2100 } else if (*status & UART_LSR_OE) { 2093 - *fp++ = TTY_OVERRUN; 2101 + flag = TTY_OVERRUN; 2094 2102 /* added by casper 1/11/2000 */ 2095 2103 info->icount.overrun++; 2096 2104 /* */ 2097 - } else 2098 - *fp++ = 0; 2099 - } else 2100 - *fp++ = 0; 2101 - *cp++ = ch; 2105 + } 2106 + } 2107 + tty_insert_flip_char(tty, ch, flag); 2102 2108 cnt++; 2103 2109 if (cnt >= recv_room) { 2104 2110 if (!info->ldisc_stop_rx) { ··· 2120 2132 // above add by Victor Yu. 09-02-2002 2121 2133 } while (*status & UART_LSR_DR); 2122 2134 2123 - end_intr: // add by Victor Yu. 09-02-2002 2135 + end_intr: // add by Victor Yu. 09-02-2002 2124 2136 2125 2137 mxvar_log.rxcnt[info->port] += cnt; 2126 2138 info->mon_data.rxcnt += cnt; 2127 2139 info->mon_data.up_rxcnt += cnt; 2128 2140 spin_unlock_irqrestore(&info->slock, flags); 2129 - 2141 + 2130 2142 tty_flip_buffer_push(tty); 2131 2143 } 2132 2144
+1 -1
drivers/input/keyboard/atkbd.c
··· 54 54 module_param_named(softraw, atkbd_softraw, bool, 0); 55 55 MODULE_PARM_DESC(softraw, "Use software generated rawmode"); 56 56 57 - static int atkbd_scroll = 1; 57 + static int atkbd_scroll = 0; 58 58 module_param_named(scroll, atkbd_scroll, bool, 0); 59 59 MODULE_PARM_DESC(scroll, "Enable scroll-wheel on MS Office and similar keyboards"); 60 60
+3
drivers/md/dm-mpath.c
··· 985 985 if (!error) 986 986 return 0; /* I/O complete */ 987 987 988 + if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 989 + return error; 990 + 988 991 spin_lock(&m->lock); 989 992 if (!m->nr_valid_paths) { 990 993 if (!m->queue_if_no_path || m->suspended) {
+34 -5
drivers/net/tg3.c
··· 7 7 * Copyright (C) 2005 Broadcom Corporation. 8 8 * 9 9 * Firmware is: 10 - * Copyright (C) 2000-2003 Broadcom Corporation. 10 + * Derived from proprietary unpublished source code, 11 + * Copyright (C) 2000-2003 Broadcom Corporation. 12 + * 13 + * Permission is hereby granted for the distribution of this firmware 14 + * data in hexadecimal or equivalent format, provided this copyright 15 + * notice is accompanying it. 11 16 */ 12 17 13 18 #include <linux/config.h> ··· 66 61 67 62 #define DRV_MODULE_NAME "tg3" 68 63 #define PFX DRV_MODULE_NAME ": " 69 - #define DRV_MODULE_VERSION "3.29" 70 - #define DRV_MODULE_RELDATE "May 23, 2005" 64 + #define DRV_MODULE_VERSION "3.31" 65 + #define DRV_MODULE_RELDATE "June 8, 2005" 71 66 72 67 #define TG3_DEF_MAC_MODE 0 73 68 #define TG3_DEF_RX_MODE 0 ··· 8560 8555 8561 8556 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 8562 8557 tp->led_ctrl = LED_CTRL_MODE_MAC; 8558 + 8559 + /* Default to PHY_1_MODE if 0 (MAC_MODE) is 8560 + * read on some older 5700/5701 bootcode. 8561 + */ 8562 + if (GET_ASIC_REV(tp->pci_chip_rev_id) == 8563 + ASIC_REV_5700 || 8564 + GET_ASIC_REV(tp->pci_chip_rev_id) == 8565 + ASIC_REV_5701) 8566 + tp->led_ctrl = LED_CTRL_MODE_PHY_1; 8567 + 8563 8568 break; 8564 8569 8565 8570 case SHASTA_EXT_LED_SHARED: ··· 9695 9680 } 9696 9681 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 9697 9682 DMA_RWCTRL_WRITE_BNDRY_16) { 9683 + static struct pci_device_id dma_wait_state_chipsets[] = { 9684 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 9685 + PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 9686 + { }, 9687 + }; 9688 + 9698 9689 /* DMA test passed without adjusting DMA boundary, 9699 - * just restore the calculated DMA boundary 9690 + * now look for chipsets that are known to expose the 9691 + * DMA bug without failing the test. 9700 9692 */ 9701 - tp->dma_rwctrl = saved_dma_rwctrl; 9693 + if (pci_dev_present(dma_wait_state_chipsets)) { 9694 + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 9695 + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 9696 + } 9697 + else 9698 + /* Safe to use the calculated DMA boundary. */ 9699 + tp->dma_rwctrl = saved_dma_rwctrl; 9700 + 9702 9701 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 9703 9702 } 9704 9703
+2
drivers/pci/hotplug/cpci_hotplug_core.c
··· 217 217 kfree(slot->hotplug_slot->info); 218 218 kfree(slot->hotplug_slot->name); 219 219 kfree(slot->hotplug_slot); 220 + if (slot->dev) 221 + pci_dev_put(slot->dev); 220 222 kfree(slot); 221 223 } 222 224
+4 -1
drivers/pci/hotplug/cpci_hotplug_pci.c
··· 315 315 PCI_DEVFN(PCI_SLOT(slot->devfn), i)); 316 316 if (dev) { 317 317 pci_remove_bus_device(dev); 318 - slot->dev = NULL; 318 + pci_dev_put(dev); 319 319 } 320 320 } 321 + pci_dev_put(slot->dev); 322 + slot->dev = NULL; 323 + 321 324 dbg("%s - exit", __FUNCTION__); 322 325 return 0; 323 326 }
+1
drivers/pci/pci.ids
··· 7173 7173 080f Sentry5 DDR/SDR RAM Controller 7174 7174 0811 Sentry5 External Interface Core 7175 7175 0816 BCM3302 Sentry5 MIPS32 CPU 7176 + 1600 NetXtreme BCM5752 Gigabit Ethernet PCI Express 7176 7177 1644 NetXtreme BCM5700 Gigabit Ethernet 7177 7178 1014 0277 Broadcom Vigil B5700 1000Base-T 7178 7179 1028 00d1 Broadcom BCM5700
+13 -22
drivers/pci/quirks.c
··· 460 460 461 461 462 462 /* 463 - * Via 686A/B: The PCI_INTERRUPT_LINE register for the on-chip 464 - * devices, USB0/1, AC97, MC97, and ACPI, has an unusual feature: 465 - * when written, it makes an internal connection to the PIC. 466 - * For these devices, this register is defined to be 4 bits wide. 467 - * Normally this is fine. However for IO-APIC motherboards, or 468 - * non-x86 architectures (yes Via exists on PPC among other places), 469 - * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get 470 - * interrupts delivered properly. 471 - */ 472 - 473 - /* 474 463 * FIXME: it is questionable that quirk_via_acpi 475 464 * is needed. It shows up as an ISA bridge, and does not 476 465 * support the PCI_INTERRUPT_LINE register at all. Therefore ··· 481 492 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi ); 482 493 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi ); 483 494 484 - static void quirk_via_irqpic(struct pci_dev *dev) 495 + /* 496 + * Via 686A/B: The PCI_INTERRUPT_LINE register for the on-chip 497 + * devices, USB0/1, AC97, MC97, and ACPI, has an unusual feature: 498 + * when written, it makes an internal connection to the PIC. 499 + * For these devices, this register is defined to be 4 bits wide. 500 + * Normally this is fine. However for IO-APIC motherboards, or 501 + * non-x86 architectures (yes Via exists on PPC among other places), 502 + * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get 503 + * interrupts delivered properly. 504 + */ 505 + static void quirk_via_irq(struct pci_dev *dev) 485 506 { 486 507 u8 irq, new_irq; 487 508 488 - #ifdef CONFIG_X86_IO_APIC 489 - if (nr_ioapics && !skip_ioapic_setup) 490 - return; 491 - #endif 492 - #ifdef CONFIG_ACPI 493 - if (acpi_irq_model != ACPI_IRQ_MODEL_PIC) 494 - return; 495 - #endif 496 509 new_irq = dev->irq & 0xf; 497 510 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 498 511 if (new_irq != irq) { 499 - printk(KERN_INFO "PCI: Via PIC IRQ fixup for %s, from %d to %d\n", 512 + printk(KERN_INFO "PCI: Via IRQ fixup for %s, from %d to %d\n", 500 513 pci_name(dev), irq, new_irq); 501 514 udelay(15); /* unknown if delay really needed */ 502 515 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); 503 516 } 504 517 } 505 - DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_irqpic); 518 + DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_irq); 506 519 507 520 /* 508 521 * PIIX3 USB: We have to disable USB interrupts that are
+1 -3
drivers/scsi/libata-core.c
··· 2577 2577 next_sg: 2578 2578 sg = &qc->sg[qc->cursg]; 2579 2579 2580 - next_page: 2581 2580 page = sg->page; 2582 2581 offset = sg->offset + qc->cursg_ofs; 2583 2582 ··· 2584 2585 page = nth_page(page, (offset >> PAGE_SHIFT)); 2585 2586 offset %= PAGE_SIZE; 2586 2587 2588 + /* don't overrun current sg */ 2587 2589 count = min(sg->length - qc->cursg_ofs, bytes); 2588 2590 2589 2591 /* don't cross page boundaries */ ··· 2609 2609 kunmap(page); 2610 2610 2611 2611 if (bytes) { 2612 - if (qc->cursg_ofs < sg->length) 2613 - goto next_page; 2614 2612 goto next_sg; 2615 2613 } 2616 2614 }
+7 -1
drivers/scsi/sata_sil.c
··· 432 432 writeb(cls, mmio_base + SIL_FIFO_R0); 433 433 writeb(cls, mmio_base + SIL_FIFO_W0); 434 434 writeb(cls, mmio_base + SIL_FIFO_R1); 435 - writeb(cls, mmio_base + SIL_FIFO_W2); 435 + writeb(cls, mmio_base + SIL_FIFO_W1); 436 + if (ent->driver_data == sil_3114) { 437 + writeb(cls, mmio_base + SIL_FIFO_R2); 438 + writeb(cls, mmio_base + SIL_FIFO_W2); 439 + writeb(cls, mmio_base + SIL_FIFO_R3); 440 + writeb(cls, mmio_base + SIL_FIFO_W3); 441 + } 436 442 } else 437 443 printk(KERN_WARNING DRV_NAME "(%s): cache line size not set. Driver may not function\n", 438 444 pci_name(pdev));
+1 -1
drivers/serial/sa1100.c
··· 197 197 sa1100_rx_chars(struct sa1100_port *sport, struct pt_regs *regs) 198 198 { 199 199 struct tty_struct *tty = sport->port.info->tty; 200 - unsigned int status, ch, flg, ignored = 0; 200 + unsigned int status, ch, flg; 201 201 202 202 status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | 203 203 UTSR0_TO_SM(UART_GET_UTSR0(sport));
+91 -27
drivers/usb/serial/ftdi_sio.c
··· 264 264 /* 265 265 * Version Information 266 266 */ 267 - #define DRIVER_VERSION "v1.4.1" 267 + #define DRIVER_VERSION "v1.4.2" 268 268 #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" 269 269 #define DRIVER_DESC "USB FTDI Serial Converters Driver" 270 270 ··· 687 687 char prev_status, diff_status; /* Used for TIOCMIWAIT */ 688 688 __u8 rx_flags; /* receive state flags (throttling) */ 689 689 spinlock_t rx_lock; /* spinlock for receive state */ 690 + struct work_struct rx_work; 691 + int rx_processed; 690 692 691 693 __u16 interface; /* FT2232C port interface (0 for FT232/245) */ 692 694 ··· 719 717 static int ftdi_chars_in_buffer (struct usb_serial_port *port); 720 718 static void ftdi_write_bulk_callback (struct urb *urb, struct pt_regs *regs); 721 719 static void ftdi_read_bulk_callback (struct urb *urb, struct pt_regs *regs); 722 - static void ftdi_process_read (struct usb_serial_port *port); 720 + static void ftdi_process_read (void *param); 723 721 static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old); 724 722 static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file); 725 723 static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear); ··· 1389 1387 port->read_urb->transfer_buffer_length = BUFSZ; 1390 1388 } 1391 1389 1390 + INIT_WORK(&priv->rx_work, ftdi_process_read, port); 1391 + 1392 1392 /* Free port's existing write urb and transfer buffer. */ 1393 1393 if (port->write_urb) { 1394 1394 usb_free_urb (port->write_urb); ··· 1621 1617 spin_unlock_irqrestore(&priv->rx_lock, flags); 1622 1618 1623 1619 /* Start reading from the device */ 1620 + priv->rx_processed = 0; 1624 1621 usb_fill_bulk_urb(port->read_urb, dev, 1625 1622 usb_rcvbulkpipe(dev, port->bulk_in_endpointAddress), 1626 1623 port->read_urb->transfer_buffer, port->read_urb->transfer_buffer_length, ··· 1672 1667 err("Error from RTS LOW urb"); 1673 1668 } 1674 1669 } /* Note change no line if hupcl is off */ 1670 + 1671 + /* cancel any scheduled reading */ 1672 + cancel_delayed_work(&priv->rx_work); 1673 + flush_scheduled_work(); 1675 1674 1676 1675 /* shutdown our bulk read */ 1677 1676 if (port->read_urb) ··· 1871 1862 return; 1872 1863 } 1873 1864 1874 - /* If throttled, delay receive processing until unthrottled. */ 1875 - spin_lock(&priv->rx_lock); 1876 - if (priv->rx_flags & THROTTLED) { 1877 - dbg("Deferring read urb processing until unthrottled"); 1878 - priv->rx_flags |= ACTUALLY_THROTTLED; 1879 - spin_unlock(&priv->rx_lock); 1880 - return; 1881 - } 1882 - spin_unlock(&priv->rx_lock); 1883 - 1884 1865 ftdi_process_read(port); 1885 1866 1886 1867 } /* ftdi_read_bulk_callback */ 1887 1868 1888 1869 1889 - static void ftdi_process_read (struct usb_serial_port *port) 1870 + static void ftdi_process_read (void *param) 1890 1871 { /* ftdi_process_read */ 1872 + struct usb_serial_port *port = (struct usb_serial_port*)param; 1891 1873 struct urb *urb; 1892 1874 struct tty_struct *tty; 1893 1875 struct ftdi_private *priv; ··· 1889 1889 int result; 1890 1890 int need_flip; 1891 1891 int packet_offset; 1892 + unsigned long flags; 1892 1893 1893 1894 dbg("%s - port %d", __FUNCTION__, port->number); 1894 1895 ··· 1916 1915 1917 1916 data = urb->transfer_buffer; 1918 1917 1919 - /* The first two bytes of every read packet are status */ 1920 - if (urb->actual_length > 2) { 1921 - usb_serial_debug_data(debug, &port->dev, __FUNCTION__, urb->actual_length, data); 1918 + if (priv->rx_processed) { 1919 + dbg("%s - already processed: %d bytes, %d remain", __FUNCTION__, 1920 + priv->rx_processed, 1921 + urb->actual_length - priv->rx_processed); 1922 1922 } else { 1923 - dbg("Status only: %03oo %03oo",data[0],data[1]); 1924 - } 1923 + /* The first two bytes of every read packet are status */ 1924 + if (urb->actual_length > 2) { 1925 + usb_serial_debug_data(debug, &port->dev, __FUNCTION__, urb->actual_length, data); 1926 + } else { 1927 + dbg("Status only: %03oo %03oo",data[0],data[1]); 1928 + } 1929 + } 1925 1930 1926 1931 1927 1932 /* TO DO -- check for hung up line and handle appropriately: */ ··· 1936 1929 /* if CD is dropped and the line is not CLOCAL then we should hangup */ 1937 1930 1938 1931 need_flip = 0; 1939 - for (packet_offset=0; packet_offset < urb->actual_length; packet_offset += PKTSZ) { 1932 + for (packet_offset = priv->rx_processed; packet_offset < urb->actual_length; packet_offset += PKTSZ) { 1933 + int length; 1934 + 1940 1935 /* Compare new line status to the old one, signal if different */ 1936 + /* N.B. packet may be processed more than once, but differences 1937 + * are only processed once. */ 1941 1938 if (priv != NULL) { 1942 1939 char new_status = data[packet_offset+0] & FTDI_STATUS_B0_MASK; 1943 1940 if (new_status != priv->prev_status) { ··· 1949 1938 wake_up_interruptible(&priv->delta_msr_wait); 1950 1939 priv->prev_status = new_status; 1951 1940 } 1941 + } 1942 + 1943 + length = min(PKTSZ, urb->actual_length-packet_offset)-2; 1944 + if (length < 0) { 1945 + err("%s - bad packet length: %d", __FUNCTION__, length+2); 1946 + length = 0; 1947 + } 1948 + 1949 + /* have to make sure we don't overflow the buffer 1950 + with tty_insert_flip_char's */ 1951 + if (tty->flip.count+length > TTY_FLIPBUF_SIZE) { 1952 + tty_flip_buffer_push(tty); 1953 + need_flip = 0; 1954 + 1955 + if (tty->flip.count != 0) { 1956 + /* flip didn't work, this happens when ftdi_process_read() is 1957 + * called from ftdi_unthrottle, because TTY_DONT_FLIP is set */ 1958 + dbg("%s - flip buffer push failed", __FUNCTION__); 1959 + break; 1960 + } 1961 + } 1962 + if (priv->rx_flags & THROTTLED) { 1963 + dbg("%s - throttled", __FUNCTION__); 1964 + break; 1965 + } 1966 + if (tty->ldisc.receive_room(tty)-tty->flip.count < length) { 1967 + /* break out & wait for throttling/unthrottling to happen */ 1968 + dbg("%s - receive room low", __FUNCTION__); 1969 + break; 1952 1970 } 1953 1971 1954 1972 /* Handle errors and break */ ··· 2002 1962 error_flag = TTY_FRAME; 2003 1963 dbg("FRAMING error"); 2004 1964 } 2005 - if (urb->actual_length > packet_offset + 2) { 2006 - for (i = 2; (i < PKTSZ) && ((i+packet_offset) < urb->actual_length); ++i) { 2007 - /* have to make sure we don't overflow the buffer 2008 - with tty_insert_flip_char's */ 2009 - if(tty->flip.count >= TTY_FLIPBUF_SIZE) { 2010 - tty_flip_buffer_push(tty); 2011 - } 1965 + if (length > 0) { 1966 + for (i = 2; i < length+2; i++) { 2012 1967 /* Note that the error flag is duplicated for 2013 1968 every character received since we don't know 2014 1969 which character it applied to */ ··· 2039 2004 if (need_flip) { 2040 2005 tty_flip_buffer_push(tty); 2041 2006 } 2007 + 2008 + if (packet_offset < urb->actual_length) { 2009 + /* not completely processed - record progress */ 2010 + priv->rx_processed = packet_offset; 2011 + dbg("%s - incomplete, %d bytes processed, %d remain", 2012 + __FUNCTION__, packet_offset, 2013 + urb->actual_length - packet_offset); 2014 + /* check if we were throttled while processing */ 2015 + spin_lock_irqsave(&priv->rx_lock, flags); 2016 + if (priv->rx_flags & THROTTLED) { 2017 + priv->rx_flags |= ACTUALLY_THROTTLED; 2018 + spin_unlock_irqrestore(&priv->rx_lock, flags); 2019 + dbg("%s - deferring remainder until unthrottled", 2020 + __FUNCTION__); 2021 + return; 2022 + } 2023 + spin_unlock_irqrestore(&priv->rx_lock, flags); 2024 + /* if the port is closed stop trying to read */ 2025 + if (port->open_count > 0){ 2026 + /* delay processing of remainder */ 2027 + schedule_delayed_work(&priv->rx_work, 1); 2028 + } else { 2029 + dbg("%s - port is closed", __FUNCTION__); 2030 + } 2031 + return; 2032 + } 2033 + 2034 + /* urb is completely processed */ 2035 + priv->rx_processed = 0; 2042 2036 2043 2037 /* if the port is closed stop trying to read */ 2044 2038 if (port->open_count > 0){ ··· 2508 2444 spin_unlock_irqrestore(&priv->rx_lock, flags); 2509 2445 2510 2446 if (actually_throttled) 2511 - ftdi_process_read(port); 2447 + schedule_work(&priv->rx_work); 2512 2448 } 2513 2449 2514 2450 static int __init ftdi_init (void)
+3 -3
fs/binfmt_flat.c
··· 520 520 DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n"); 521 521 522 522 down_write(&current->mm->mmap_sem); 523 - textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, 0, 0); 523 + textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, MAP_SHARED, 0); 524 524 up_write(&current->mm->mmap_sem); 525 525 if (!textpos || textpos >= (unsigned long) -4096) { 526 526 if (!textpos) ··· 532 532 down_write(&current->mm->mmap_sem); 533 533 realdatastart = do_mmap(0, 0, data_len + extra + 534 534 MAX_SHARED_LIBS * sizeof(unsigned long), 535 - PROT_READ|PROT_WRITE|PROT_EXEC, 0, 0); 535 + PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); 536 536 up_write(&current->mm->mmap_sem); 537 537 538 538 if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) { ··· 574 574 down_write(&current->mm->mmap_sem); 575 575 textpos = do_mmap(0, 0, text_len + data_len + extra + 576 576 MAX_SHARED_LIBS * sizeof(unsigned long), 577 - PROT_READ | PROT_EXEC | PROT_WRITE, 0, 0); 577 + PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); 578 578 up_write(&current->mm->mmap_sem); 579 579 if (!textpos || textpos >= (unsigned long) -4096) { 580 580 if (!textpos)
+89 -70
fs/namei.c
··· 493 493 return PTR_ERR(link); 494 494 } 495 495 496 - static inline int __do_follow_link(struct dentry *dentry, struct nameidata *nd) 496 + struct path { 497 + struct vfsmount *mnt; 498 + struct dentry *dentry; 499 + }; 500 + 501 + static inline int __do_follow_link(struct path *path, struct nameidata *nd) 497 502 { 498 503 int error; 504 + struct dentry *dentry = path->dentry; 499 505 500 - touch_atime(nd->mnt, dentry); 506 + touch_atime(path->mnt, dentry); 501 507 nd_set_link(nd, NULL); 508 + 509 + if (path->mnt == nd->mnt) 510 + mntget(path->mnt); 502 511 error = dentry->d_inode->i_op->follow_link(dentry, nd); 503 512 if (!error) { 504 513 char *s = nd_get_link(nd); ··· 516 507 if (dentry->d_inode->i_op->put_link) 517 508 dentry->d_inode->i_op->put_link(dentry, nd); 518 509 } 510 + dput(dentry); 511 + mntput(path->mnt); 519 512 520 513 return error; 521 514 } ··· 529 518 * Without that kind of total limit, nasty chains of consecutive 530 519 * symlinks can cause almost arbitrarily long lookups. 531 520 */ 532 - static inline int do_follow_link(struct dentry *dentry, struct nameidata *nd) 521 + static inline int do_follow_link(struct path *path, struct nameidata *nd) 533 522 { 534 523 int err = -ELOOP; 535 524 if (current->link_count >= MAX_NESTED_LINKS) ··· 538 527 goto loop; 539 528 BUG_ON(nd->depth >= MAX_NESTED_LINKS); 540 529 cond_resched(); 541 - err = security_inode_follow_link(dentry, nd); 530 + err = security_inode_follow_link(path->dentry, nd); 542 531 if (err) 543 532 goto loop; 544 533 current->link_count++; 545 534 current->total_link_count++; 546 535 nd->depth++; 547 - err = __do_follow_link(dentry, nd); 536 + err = __do_follow_link(path, nd); 548 537 current->link_count--; 549 538 nd->depth--; 550 539 return err; 551 540 loop: 541 + dput(path->dentry); 542 + if (path->mnt != nd->mnt) 543 + mntput(path->mnt); 552 544 path_release(nd); 553 545 return err; 554 546 } ··· 579 565 /* no need for dcache_lock, as serialization is taken care in 580 566 * namespace.c 581 567 */ 582 - static int follow_mount(struct vfsmount **mnt, struct dentry **dentry) 568 + static int __follow_mount(struct path *path) 583 569 { 584 570 int res = 0; 585 - while (d_mountpoint(*dentry)) { 586 - struct vfsmount *mounted = lookup_mnt(*mnt, *dentry); 571 + while (d_mountpoint(path->dentry)) { 572 + struct vfsmount *mounted = lookup_mnt(path->mnt, path->dentry); 587 573 if (!mounted) 588 574 break; 589 - mntput(*mnt); 590 - *mnt = mounted; 591 - dput(*dentry); 592 - *dentry = dget(mounted->mnt_root); 575 + dput(path->dentry); 576 + if (res) 577 + mntput(path->mnt); 578 + path->mnt = mounted; 579 + path->dentry = dget(mounted->mnt_root); 593 580 res = 1; 594 581 } 595 582 return res; 596 583 } 597 584 585 + static void follow_mount(struct vfsmount **mnt, struct dentry **dentry) 586 + { 587 + while (d_mountpoint(*dentry)) { 588 + struct vfsmount *mounted = lookup_mnt(*mnt, *dentry); 589 + if (!mounted) 590 + break; 591 + dput(*dentry); 592 + mntput(*mnt); 593 + *mnt = mounted; 594 + *dentry = dget(mounted->mnt_root); 595 + } 596 + } 597 + 598 598 /* no need for dcache_lock, as serialization is taken care in 599 599 * namespace.c 600 600 */ 601 - static inline int __follow_down(struct vfsmount **mnt, struct dentry **dentry) 601 + int follow_down(struct vfsmount **mnt, struct dentry **dentry) 602 602 { 603 603 struct vfsmount *mounted; 604 604 605 605 mounted = lookup_mnt(*mnt, *dentry); 606 606 if (mounted) { 607 + dput(*dentry); 607 608 mntput(*mnt); 608 609 *mnt = mounted; 609 - dput(*dentry); 610 610 *dentry = dget(mounted->mnt_root); 611 611 return 1; 612 612 } 613 613 return 0; 614 614 } 615 615 616 - int follow_down(struct vfsmount **mnt, struct dentry **dentry) 617 - { 618 - return __follow_down(mnt,dentry); 619 - } 620 - 621 - static inline void follow_dotdot(struct vfsmount **mnt, struct dentry **dentry) 616 + static inline void follow_dotdot(struct nameidata *nd) 622 617 { 623 618 while(1) { 624 619 struct vfsmount *parent; 625 - struct dentry *old = *dentry; 620 + struct dentry *old = nd->dentry; 626 621 627 622 read_lock(&current->fs->lock); 628 - if (*dentry == current->fs->root && 629 - *mnt == current->fs->rootmnt) { 623 + if (nd->dentry == current->fs->root && 624 + nd->mnt == current->fs->rootmnt) { 630 625 read_unlock(&current->fs->lock); 631 626 break; 632 627 } 633 628 read_unlock(&current->fs->lock); 634 629 spin_lock(&dcache_lock); 635 - if (*dentry != (*mnt)->mnt_root) { 636 - *dentry = dget((*dentry)->d_parent); 630 + if (nd->dentry != nd->mnt->mnt_root) { 631 + nd->dentry = dget(nd->dentry->d_parent); 637 632 spin_unlock(&dcache_lock); 638 633 dput(old); 639 634 break; 640 635 } 641 636 spin_unlock(&dcache_lock); 642 637 spin_lock(&vfsmount_lock); 643 - parent = (*mnt)->mnt_parent; 644 - if (parent == *mnt) { 638 + parent = nd->mnt->mnt_parent; 639 + if (parent == nd->mnt) { 645 640 spin_unlock(&vfsmount_lock); 646 641 break; 647 642 } 648 643 mntget(parent); 649 - *dentry = dget((*mnt)->mnt_mountpoint); 644 + nd->dentry = dget(nd->mnt->mnt_mountpoint); 650 645 spin_unlock(&vfsmount_lock); 651 646 dput(old); 652 - mntput(*mnt); 653 - *mnt = parent; 647 + mntput(nd->mnt); 648 + nd->mnt = parent; 654 649 } 655 - follow_mount(mnt, dentry); 650 + follow_mount(&nd->mnt, &nd->dentry); 656 651 } 657 - 658 - struct path { 659 - struct vfsmount *mnt; 660 - struct dentry *dentry; 661 - }; 662 652 663 653 /* 664 654 * It's more convoluted than I'd like it to be, but... it's still fairly ··· 682 664 done: 683 665 path->mnt = mnt; 684 666 path->dentry = dentry; 667 + __follow_mount(path); 685 668 return 0; 686 669 687 670 need_lookup: ··· 770 751 case 2: 771 752 if (this.name[1] != '.') 772 753 break; 773 - follow_dotdot(&nd->mnt, &nd->dentry); 754 + follow_dotdot(nd); 774 755 inode = nd->dentry->d_inode; 775 756 /* fallthrough */ 776 757 case 1: ··· 790 771 err = do_lookup(nd, &this, &next); 791 772 if (err) 792 773 break; 793 - /* Check mountpoints.. */ 794 - follow_mount(&next.mnt, &next.dentry); 795 774 796 775 err = -ENOENT; 797 776 inode = next.dentry->d_inode; ··· 800 783 goto out_dput; 801 784 802 785 if (inode->i_op->follow_link) { 803 - mntget(next.mnt); 804 - err = do_follow_link(next.dentry, nd); 805 - dput(next.dentry); 806 - mntput(next.mnt); 786 + err = do_follow_link(&next, nd); 807 787 if (err) 808 788 goto return_err; 809 789 err = -ENOENT; ··· 812 798 break; 813 799 } else { 814 800 dput(nd->dentry); 801 + if (nd->mnt != next.mnt) 802 + mntput(nd->mnt); 815 803 nd->mnt = next.mnt; 816 804 nd->dentry = next.dentry; 817 805 } ··· 835 819 case 2: 836 820 if (this.name[1] != '.') 837 821 break; 838 - follow_dotdot(&nd->mnt, &nd->dentry); 822 + follow_dotdot(nd); 839 823 inode = nd->dentry->d_inode; 840 824 /* fallthrough */ 841 825 case 1: ··· 849 833 err = do_lookup(nd, &this, &next); 850 834 if (err) 851 835 break; 852 - follow_mount(&next.mnt, &next.dentry); 853 836 inode = next.dentry->d_inode; 854 837 if ((lookup_flags & LOOKUP_FOLLOW) 855 838 && inode && inode->i_op && inode->i_op->follow_link) { 856 - mntget(next.mnt); 857 - err = do_follow_link(next.dentry, nd); 858 - dput(next.dentry); 859 - mntput(next.mnt); 839 + err = do_follow_link(&next, nd); 860 840 if (err) 861 841 goto return_err; 862 842 inode = nd->dentry->d_inode; 863 843 } else { 864 844 dput(nd->dentry); 845 + if (nd->mnt != next.mnt) 846 + mntput(nd->mnt); 865 847 nd->mnt = next.mnt; 866 848 nd->dentry = next.dentry; 867 849 } ··· 899 885 return 0; 900 886 out_dput: 901 887 dput(next.dentry); 888 + if (nd->mnt != next.mnt) 889 + mntput(next.mnt); 902 890 break; 903 891 } 904 892 path_release(nd); ··· 1414 1398 int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd) 1415 1399 { 1416 1400 int acc_mode, error = 0; 1417 - struct dentry *dentry; 1401 + struct path path; 1418 1402 struct dentry *dir; 1419 1403 int count = 0; 1420 1404 ··· 1458 1442 dir = nd->dentry; 1459 1443 nd->flags &= ~LOOKUP_PARENT; 1460 1444 down(&dir->d_inode->i_sem); 1461 - dentry = __lookup_hash(&nd->last, nd->dentry, nd); 1445 + path.dentry = __lookup_hash(&nd->last, nd->dentry, nd); 1446 + path.mnt = nd->mnt; 1462 1447 1463 1448 do_last: 1464 - error = PTR_ERR(dentry); 1465 - if (IS_ERR(dentry)) { 1449 + error = PTR_ERR(path.dentry); 1450 + if (IS_ERR(path.dentry)) { 1466 1451 up(&dir->d_inode->i_sem); 1467 1452 goto exit; 1468 1453 } 1469 1454 1470 1455 /* Negative dentry, just create the file */ 1471 - if (!dentry->d_inode) { 1456 + if (!path.dentry->d_inode) { 1472 1457 if (!IS_POSIXACL(dir->d_inode)) 1473 1458 mode &= ~current->fs->umask; 1474 - error = vfs_create(dir->d_inode, dentry, mode, nd); 1459 + error = vfs_create(dir->d_inode, path.dentry, mode, nd); 1475 1460 up(&dir->d_inode->i_sem); 1476 1461 dput(nd->dentry); 1477 - nd->dentry = dentry; 1462 + nd->dentry = path.dentry; 1478 1463 if (error) 1479 1464 goto exit; 1480 1465 /* Don't check for write permission, don't truncate */ ··· 1493 1476 if (flag & O_EXCL) 1494 1477 goto exit_dput; 1495 1478 1496 - if (d_mountpoint(dentry)) { 1479 + if (__follow_mount(&path)) { 1497 1480 error = -ELOOP; 1498 1481 if (flag & O_NOFOLLOW) 1499 1482 goto exit_dput; 1500 - while (__follow_down(&nd->mnt,&dentry) && d_mountpoint(dentry)); 1501 1483 } 1502 1484 error = -ENOENT; 1503 - if (!dentry->d_inode) 1485 + if (!path.dentry->d_inode) 1504 1486 goto exit_dput; 1505 - if (dentry->d_inode->i_op && dentry->d_inode->i_op->follow_link) 1487 + if (path.dentry->d_inode->i_op && path.dentry->d_inode->i_op->follow_link) 1506 1488 goto do_link; 1507 1489 1508 1490 dput(nd->dentry); 1509 - nd->dentry = dentry; 1491 + nd->dentry = path.dentry; 1492 + if (nd->mnt != path.mnt) 1493 + mntput(nd->mnt); 1494 + nd->mnt = path.mnt; 1510 1495 error = -EISDIR; 1511 - if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) 1496 + if (path.dentry->d_inode && S_ISDIR(path.dentry->d_inode->i_mode)) 1512 1497 goto exit; 1513 1498 ok: 1514 1499 error = may_open(nd, acc_mode, flag); ··· 1519 1500 return 0; 1520 1501 1521 1502 exit_dput: 1522 - dput(dentry); 1503 + dput(path.dentry); 1504 + if (nd->mnt != path.mnt) 1505 + mntput(path.mnt); 1523 1506 exit: 1524 1507 path_release(nd); 1525 1508 return error; ··· 1541 1520 * are done. Procfs-like symlinks just set LAST_BIND. 1542 1521 */ 1543 1522 nd->flags |= LOOKUP_PARENT; 1544 - error = security_inode_follow_link(dentry, nd); 1523 + error = security_inode_follow_link(path.dentry, nd); 1545 1524 if (error) 1546 1525 goto exit_dput; 1547 - error = __do_follow_link(dentry, nd); 1548 - dput(dentry); 1526 + error = __do_follow_link(&path, nd); 1549 1527 if (error) 1550 1528 return error; 1551 1529 nd->flags &= ~LOOKUP_PARENT; 1552 - if (nd->last_type == LAST_BIND) { 1553 - dentry = nd->dentry; 1530 + if (nd->last_type == LAST_BIND) 1554 1531 goto ok; 1555 - } 1556 1532 error = -EISDIR; 1557 1533 if (nd->last_type != LAST_NORM) 1558 1534 goto exit; ··· 1564 1546 } 1565 1547 dir = nd->dentry; 1566 1548 down(&dir->d_inode->i_sem); 1567 - dentry = __lookup_hash(&nd->last, nd->dentry, nd); 1549 + path.dentry = __lookup_hash(&nd->last, nd->dentry, nd); 1550 + path.mnt = nd->mnt; 1568 1551 putname(nd->last.name); 1569 1552 goto do_last; 1570 1553 }
+35 -14
fs/nfs/dir.c
··· 528 528 dentry->d_time = jiffies; 529 529 } 530 530 531 + /* 532 + * Return the intent data that applies to this particular path component 533 + * 534 + * Note that the current set of intents only apply to the very last 535 + * component of the path. 536 + * We check for this using LOOKUP_CONTINUE and LOOKUP_PARENT. 537 + */ 538 + static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd, unsigned int mask) 539 + { 540 + if (nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT)) 541 + return 0; 542 + return nd->flags & mask; 543 + } 544 + 545 + /* 546 + * Inode and filehandle revalidation for lookups. 547 + * 548 + * We force revalidation in the cases where the VFS sets LOOKUP_REVAL, 549 + * or if the intent information indicates that we're about to open this 550 + * particular file and the "nocto" mount flag is not set. 551 + * 552 + */ 531 553 static inline 532 554 int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd) 533 555 { 534 556 struct nfs_server *server = NFS_SERVER(inode); 535 557 536 558 if (nd != NULL) { 537 - int ndflags = nd->flags; 538 559 /* VFS wants an on-the-wire revalidation */ 539 - if (ndflags & LOOKUP_REVAL) 560 + if (nd->flags & LOOKUP_REVAL) 540 561 goto out_force; 541 562 /* This is an open(2) */ 542 - if ((ndflags & LOOKUP_OPEN) && 543 - !(ndflags & LOOKUP_CONTINUE) && 563 + if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 && 544 564 !(server->flags & NFS_MOUNT_NOCTO)) 545 565 goto out_force; 546 566 } ··· 580 560 int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry, 581 561 struct nameidata *nd) 582 562 { 583 - int ndflags = 0; 584 - 585 - if (nd) 586 - ndflags = nd->flags; 587 563 /* Don't revalidate a negative dentry if we're creating a new file */ 588 - if ((ndflags & LOOKUP_CREATE) && !(ndflags & LOOKUP_CONTINUE)) 564 + if (nd != NULL && nfs_lookup_check_intent(nd, LOOKUP_CREATE) != 0) 589 565 return 0; 590 566 return !nfs_check_verifier(dir, dentry); 591 567 } ··· 716 700 .d_iput = nfs_dentry_iput, 717 701 }; 718 702 703 + /* 704 + * Use intent information to check whether or not we're going to do 705 + * an O_EXCL create using this path component. 706 + */ 719 707 static inline 720 708 int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd) 721 709 { 722 710 if (NFS_PROTO(dir)->version == 2) 723 711 return 0; 724 - if (!nd || (nd->flags & LOOKUP_CONTINUE) || !(nd->flags & LOOKUP_CREATE)) 712 + if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_CREATE) == 0) 725 713 return 0; 726 714 return (nd->intent.open.flags & O_EXCL) != 0; 727 715 } ··· 792 772 .d_iput = nfs_dentry_iput, 793 773 }; 794 774 775 + /* 776 + * Use intent information to determine whether we need to substitute 777 + * the NFSv4-style stateful OPEN for the LOOKUP call 778 + */ 795 779 static int is_atomic_open(struct inode *dir, struct nameidata *nd) 796 780 { 797 - if (!nd) 798 - return 0; 799 - /* Check that we are indeed trying to open this file */ 800 - if ((nd->flags & LOOKUP_CONTINUE) || !(nd->flags & LOOKUP_OPEN)) 781 + if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_OPEN) == 0) 801 782 return 0; 802 783 /* NFS does not (yet) have a stateful open for directories */ 803 784 if (nd->flags & LOOKUP_DIRECTORY)
+10
include/asm-alpha/agp.h
··· 10 10 #define flush_agp_mappings() 11 11 #define flush_agp_cache() mb() 12 12 13 + /* Convert a physical address to an address suitable for the GART. */ 14 + #define phys_to_gart(x) (x) 15 + #define gart_to_phys(x) (x) 16 + 17 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 18 + #define alloc_gatt_pages(order) \ 19 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 20 + #define free_gatt_pages(table, order) \ 21 + free_pages((unsigned long)(table), (order)) 22 + 13 23 #endif
+8 -8
include/asm-arm/arch-ixp2000/io.h
··· 75 75 * Is this cycle meant for the CS8900? 76 76 */ 77 77 if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && 78 - ((port >= IXDP2X01_CS8900_VIRT_BASE) && 79 - (port <= IXDP2X01_CS8900_VIRT_END))) { 78 + (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) && 79 + ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) { 80 80 u8 *buf8 = (u8*)buf; 81 81 register u32 tmp32; 82 82 ··· 100 100 * Is this cycle meant for the CS8900? 101 101 */ 102 102 if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && 103 - ((port >= IXDP2X01_CS8900_VIRT_BASE) && 104 - (port <= IXDP2X01_CS8900_VIRT_END))) { 103 + (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) && 104 + ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) { 105 105 register u32 tmp32; 106 106 u8 *buf8 = (u8*)buf; 107 107 do { ··· 124 124 * Is this cycle meant for the CS8900? 125 125 */ 126 126 if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && 127 - ((port >= IXDP2X01_CS8900_VIRT_BASE) && 128 - (port <= IXDP2X01_CS8900_VIRT_END))) { 127 + (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) && 128 + ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) { 129 129 return (u16)(*port); 130 130 } 131 131 ··· 137 137 register volatile u32 *port = (volatile u32 *)ptr; 138 138 139 139 if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && 140 - ((port >= IXDP2X01_CS8900_VIRT_BASE) && 141 - (port <= IXDP2X01_CS8900_VIRT_END))) { 140 + (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) && 141 + ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) { 142 142 *port = value; 143 143 return; 144 144 }
+2
include/asm-arm/arch-pxa/pxa-regs.h
··· 1296 1296 #define GPIO111_MMCDAT3 111 /* MMC DAT3 (PXA27x) */ 1297 1297 #define GPIO111_MMCCS1 111 /* MMC Chip Select 1 (PXA27x) */ 1298 1298 #define GPIO112_MMCCMD 112 /* MMC CMD (PXA27x) */ 1299 + #define GPIO113_I2S_SYSCLK 113 /* I2S System Clock (PXA27x) */ 1299 1300 #define GPIO113_AC97_RESET_N 113 /* AC97 NRESET on (PXA27x) */ 1300 1301 1301 1302 /* GPIO alternate function mode & direction */ ··· 1429 1428 #define GPIO111_MMCDAT3_MD (111 | GPIO_ALT_FN_1_OUT) 1430 1429 #define GPIO110_MMCCS1_MD (111 | GPIO_ALT_FN_1_OUT) 1431 1430 #define GPIO112_MMCCMD_MD (112 | GPIO_ALT_FN_1_OUT) 1431 + #define GPIO113_I2S_SYSCLK_MD (113 | GPIO_ALT_FN_1_OUT) 1432 1432 #define GPIO113_AC97_RESET_N_MD (113 | GPIO_ALT_FN_2_OUT) 1433 1433 #define GPIO117_I2CSCL_MD (117 | GPIO_ALT_FN_1_OUT) 1434 1434 #define GPIO118_I2CSDA_MD (118 | GPIO_ALT_FN_1_IN)
+2 -2
include/asm-arm/elf.h
··· 38 38 */ 39 39 #define ELF_CLASS ELFCLASS32 40 40 #ifdef __ARMEB__ 41 - #define ELF_DATA ELFDATA2MSB; 41 + #define ELF_DATA ELFDATA2MSB 42 42 #else 43 - #define ELF_DATA ELFDATA2LSB; 43 + #define ELF_DATA ELFDATA2LSB 44 44 #endif 45 45 #define ELF_ARCH EM_ARM 46 46
+1 -1
include/asm-arm26/elf.h
··· 36 36 * These are used to set parameters in the core dumps. 37 37 */ 38 38 #define ELF_CLASS ELFCLASS32 39 - #define ELF_DATA ELFDATA2LSB; 39 + #define ELF_DATA ELFDATA2LSB 40 40 #define ELF_ARCH EM_ARM 41 41 42 42 #define USE_ELF_CORE_DUMP
+4 -2
include/asm-h8300/kmap_types.h
··· 1 - #ifndef _ASM_KMAP_TYPES_H 2 - #define _ASM_KMAP_TYPES_H 1 + #ifndef _ASM_H8300_KMAP_TYPES_H 2 + #define _ASM_H8300_KMAP_TYPES_H 3 3 4 4 enum km_type { 5 5 KM_BOUNCE_READ, ··· 13 13 KM_PTE1, 14 14 KM_IRQ0, 15 15 KM_IRQ1, 16 + KM_SOFTIRQ0, 17 + KM_SOFTIRQ1, 16 18 KM_TYPE_NR 17 19 }; 18 20
+3
include/asm-h8300/mman.h
··· 4 4 #define PROT_READ 0x1 /* page can be read */ 5 5 #define PROT_WRITE 0x2 /* page can be written */ 6 6 #define PROT_EXEC 0x4 /* page can be executed */ 7 + #define PROT_SEM 0x8 /* page may be used for atomic ops */ 7 8 #define PROT_NONE 0x0 /* page can not be accessed */ 8 9 #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ 9 10 #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ ··· 20 19 #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ 21 20 #define MAP_LOCKED 0x2000 /* pages are locked */ 22 21 #define MAP_NORESERVE 0x4000 /* don't check for reservations */ 22 + #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 23 + #define MAP_NONBLOCK 0x10000 /* do not block on IO */ 23 24 24 25 #define MS_ASYNC 1 /* sync memory asynchronously */ 25 26 #define MS_INVALIDATE 2 /* invalidate the caches */
+10
include/asm-i386/agp.h
··· 21 21 worth it. Would need a page for it. */ 22 22 #define flush_agp_cache() asm volatile("wbinvd":::"memory") 23 23 24 + /* Convert a physical address to an address suitable for the GART. */ 25 + #define phys_to_gart(x) (x) 26 + #define gart_to_phys(x) (x) 27 + 28 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 29 + #define alloc_gatt_pages(order) \ 30 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 31 + #define free_gatt_pages(table, order) \ 32 + free_pages((unsigned long)(table), (order)) 33 + 24 34 #endif
+1 -1
include/asm-i386/mach-numaq/mach_ipi.h
··· 1 1 #ifndef __ASM_MACH_IPI_H 2 2 #define __ASM_MACH_IPI_H 3 3 4 - inline void send_IPI_mask_sequence(cpumask_t, int vector); 4 + void send_IPI_mask_sequence(cpumask_t, int vector); 5 5 6 6 static inline void send_IPI_mask(cpumask_t mask, int vector) 7 7 {
+10
include/asm-ia64/agp.h
··· 18 18 #define flush_agp_mappings() /* nothing */ 19 19 #define flush_agp_cache() mb() 20 20 21 + /* Convert a physical address to an address suitable for the GART. */ 22 + #define phys_to_gart(x) (x) 23 + #define gart_to_phys(x) (x) 24 + 25 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 26 + #define alloc_gatt_pages(order) \ 27 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 28 + #define free_gatt_pages(table, order) \ 29 + free_pages((unsigned long)(table), (order)) 30 + 21 31 #endif /* _ASM_IA64_AGP_H */
+6 -2
include/asm-ia64/pgtable.h
··· 8 8 * This hopefully works with any (fixed) IA-64 page-size, as defined 9 9 * in <asm/page.h>. 10 10 * 11 - * Copyright (C) 1998-2004 Hewlett-Packard Co 11 + * Copyright (C) 1998-2005 Hewlett-Packard Co 12 12 * David Mosberger-Tang <davidm@hpl.hp.com> 13 13 */ 14 14 ··· 551 551 552 552 /* These tell get_user_pages() that the first gate page is accessible from user-level. */ 553 553 #define FIXADDR_USER_START GATE_ADDR 554 - #define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) 554 + #ifdef HAVE_BUGGY_SEGREL 555 + # define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE) 556 + #else 557 + # define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) 558 + #endif 555 559 556 560 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 557 561 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+8 -2
include/asm-ia64/processor.h
··· 403 403 * task_struct at this point. 404 404 */ 405 405 406 - /* Return TRUE if task T owns the fph partition of the CPU we're running on. */ 406 + /* 407 + * Return TRUE if task T owns the fph partition of the CPU we're running on. 408 + * Must be called from code that has preemption disabled. 409 + */ 407 410 #define ia64_is_local_fpu_owner(t) \ 408 411 ({ \ 409 412 struct task_struct *__ia64_islfo_task = (t); \ ··· 414 411 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ 415 412 }) 416 413 417 - /* Mark task T as owning the fph partition of the CPU we're running on. */ 414 + /* 415 + * Mark task T as owning the fph partition of the CPU we're running on. 416 + * Must be called from code that has preemption disabled. 417 + */ 418 418 #define ia64_set_local_fpu_owner(t) do { \ 419 419 struct task_struct *__ia64_slfo_task = (t); \ 420 420 __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
+10
include/asm-ppc/agp.h
··· 10 10 #define flush_agp_mappings() 11 11 #define flush_agp_cache() mb() 12 12 13 + /* Convert a physical address to an address suitable for the GART. */ 14 + #define phys_to_gart(x) (x) 15 + #define gart_to_phys(x) (x) 16 + 17 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 18 + #define alloc_gatt_pages(order) \ 19 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 20 + #define free_gatt_pages(table, order) \ 21 + free_pages((unsigned long)(table), (order)) 22 + 13 23 #endif
+1 -1
include/asm-ppc/sigcontext.h
··· 2 2 #define _ASM_PPC_SIGCONTEXT_H 3 3 4 4 #include <asm/ptrace.h> 5 - 5 + #include <linux/compiler.h> 6 6 7 7 struct sigcontext { 8 8 unsigned long _unused[4];
+10
include/asm-ppc64/agp.h
··· 10 10 #define flush_agp_mappings() 11 11 #define flush_agp_cache() mb() 12 12 13 + /* Convert a physical address to an address suitable for the GART. */ 14 + #define phys_to_gart(x) (x) 15 + #define gart_to_phys(x) (x) 16 + 17 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 18 + #define alloc_gatt_pages(order) \ 19 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 20 + #define free_gatt_pages(table, order) \ 21 + free_pages((unsigned long)(table), (order)) 22 + 13 23 #endif
+1 -3
include/asm-ppc64/elf.h
··· 221 221 set_thread_flag(TIF_ABI_PENDING); \ 222 222 else \ 223 223 clear_thread_flag(TIF_ABI_PENDING); \ 224 - if (ibcs2) \ 225 - set_personality(PER_SVR4); \ 226 - else if (current->personality != PER_LINUX32) \ 224 + if (personality(current->personality) != PER_LINUX32) \ 227 225 set_personality(PER_LINUX); \ 228 226 } while (0) 229 227
+3 -2
include/asm-sparc/uaccess.h
··· 41 41 * No one can read/write anything from userland in the kernel space by setting 42 42 * large size and address near to PAGE_OFFSET - a fault will break his intentions. 43 43 */ 44 - #define __user_ok(addr,size) ((addr) < STACK_TOP) 44 + #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) 45 45 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 46 46 #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) 47 - #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) 47 + #define access_ok(type, addr, size) \ 48 + ({ (void)(type); __access_ok((unsigned long)(addr), size); }) 48 49 49 50 /* this function will go away soon - use access_ok() instead */ 50 51 static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
+10
include/asm-sparc64/agp.h
··· 8 8 #define flush_agp_mappings() 9 9 #define flush_agp_cache() mb() 10 10 11 + /* Convert a physical address to an address suitable for the GART. */ 12 + #define phys_to_gart(x) (x) 13 + #define gart_to_phys(x) (x) 14 + 15 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 16 + #define alloc_gatt_pages(order) \ 17 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 18 + #define free_gatt_pages(table, order) \ 19 + free_pages((unsigned long)(table), (order)) 20 + 11 21 #endif
+10
include/asm-x86_64/agp.h
··· 19 19 worth it. Would need a page for it. */ 20 20 #define flush_agp_cache() asm volatile("wbinvd":::"memory") 21 21 22 + /* Convert a physical address to an address suitable for the GART. */ 23 + #define phys_to_gart(x) (x) 24 + #define gart_to_phys(x) (x) 25 + 26 + /* GATT allocation. Returns/accepts GATT kernel virtual address. */ 27 + #define alloc_gatt_pages(order) \ 28 + ((char *)__get_free_pages(GFP_KERNEL, (order))) 29 + #define free_gatt_pages(table, order) \ 30 + free_pages((unsigned long)(table), (order)) 31 + 22 32 #endif
+2
include/linux/acpi.h
··· 25 25 #ifndef _LINUX_ACPI_H 26 26 #define _LINUX_ACPI_H 27 27 28 + #include <linux/config.h> 29 + 28 30 #ifdef CONFIG_ACPI 29 31 30 32 #ifndef _LINUX
+3
include/linux/pci_ids.h
··· 874 874 #define PCI_DEVICE_ID_APPLE_KL_USB_P 0x0026 875 875 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027 876 876 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d 877 + #define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e 877 878 #define PCI_DEVICE_ID_APPLE_UNI_N_FW2 0x0030 878 879 #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032 879 880 #define PCI_DEVIEC_ID_APPLE_UNI_N_ATA 0x0033 ··· 2383 2382 #define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 2384 2383 #define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590 2385 2384 #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 2385 + #define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 2386 + #define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 2386 2387 #define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 2387 2388 #define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641 2388 2389 #define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
+30
include/linux/tc_ematch/tc_em_meta.h
··· 56 56 TCF_META_ID_TCCLASSID, 57 57 TCF_META_ID_RTCLASSID, 58 58 TCF_META_ID_RTIIF, 59 + TCF_META_ID_SK_FAMILY, 60 + TCF_META_ID_SK_STATE, 61 + TCF_META_ID_SK_REUSE, 62 + TCF_META_ID_SK_BOUND_IF, 63 + TCF_META_ID_SK_REFCNT, 64 + TCF_META_ID_SK_SHUTDOWN, 65 + TCF_META_ID_SK_PROTO, 66 + TCF_META_ID_SK_TYPE, 67 + TCF_META_ID_SK_RCVBUF, 68 + TCF_META_ID_SK_RMEM_ALLOC, 69 + TCF_META_ID_SK_WMEM_ALLOC, 70 + TCF_META_ID_SK_OMEM_ALLOC, 71 + TCF_META_ID_SK_WMEM_QUEUED, 72 + TCF_META_ID_SK_RCV_QLEN, 73 + TCF_META_ID_SK_SND_QLEN, 74 + TCF_META_ID_SK_ERR_QLEN, 75 + TCF_META_ID_SK_FORWARD_ALLOCS, 76 + TCF_META_ID_SK_SNDBUF, 77 + TCF_META_ID_SK_ALLOCS, 78 + TCF_META_ID_SK_ROUTE_CAPS, 79 + TCF_META_ID_SK_HASHENT, 80 + TCF_META_ID_SK_LINGERTIME, 81 + TCF_META_ID_SK_ACK_BACKLOG, 82 + TCF_META_ID_SK_MAX_ACK_BACKLOG, 83 + TCF_META_ID_SK_PRIO, 84 + TCF_META_ID_SK_RCVLOWAT, 85 + TCF_META_ID_SK_RCVTIMEO, 86 + TCF_META_ID_SK_SNDTIMEO, 87 + TCF_META_ID_SK_SENDMSG_OFF, 88 + TCF_META_ID_SK_WRITE_PENDING, 59 89 __TCF_META_ID_MAX 60 90 }; 61 91 #define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1)
+7 -1
mm/filemap.c
··· 1968 1968 do { 1969 1969 unsigned long index; 1970 1970 unsigned long offset; 1971 + unsigned long maxlen; 1971 1972 size_t copied; 1972 1973 1973 1974 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ ··· 1983 1982 * same page as we're writing to, without it being marked 1984 1983 * up-to-date. 1985 1984 */ 1986 - fault_in_pages_readable(buf, bytes); 1985 + maxlen = cur_iov->iov_len - iov_base; 1986 + if (maxlen > bytes) 1987 + maxlen = bytes; 1988 + fault_in_pages_readable(buf, maxlen); 1987 1989 1988 1990 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); 1989 1991 if (!page) { ··· 2028 2024 filemap_set_next_iovec(&cur_iov, 2029 2025 &iov_base, status); 2030 2026 buf = cur_iov->iov_base + iov_base; 2027 + } else { 2028 + iov_base += status; 2031 2029 } 2032 2030 } 2033 2031 }
+1
net/core/dev.c
··· 1744 1744 struct softnet_data *queue = &__get_cpu_var(softnet_data); 1745 1745 unsigned long start_time = jiffies; 1746 1746 1747 + backlog_dev->weight = weight_p; 1747 1748 for (;;) { 1748 1749 struct sk_buff *skb; 1749 1750 struct net_device *dev;
+1 -1
net/core/ethtool.c
··· 356 356 { 357 357 struct ethtool_coalesce coalesce; 358 358 359 - if (!dev->ethtool_ops->get_coalesce) 359 + if (!dev->ethtool_ops->set_coalesce) 360 360 return -EOPNOTSUPP; 361 361 362 362 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
+17
net/core/net-sysfs.c
··· 185 185 static CLASS_DEVICE_ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, 186 186 store_tx_queue_len); 187 187 188 + NETDEVICE_SHOW(weight, fmt_dec); 189 + 190 + static int change_weight(struct net_device *net, unsigned long new_weight) 191 + { 192 + net->weight = new_weight; 193 + return 0; 194 + } 195 + 196 + static ssize_t store_weight(struct class_device *dev, const char *buf, size_t len) 197 + { 198 + return netdev_store(dev, buf, len, change_weight); 199 + } 200 + 201 + static CLASS_DEVICE_ATTR(weight, S_IRUGO | S_IWUSR, show_weight, 202 + store_weight); 203 + 188 204 189 205 static struct class_device_attribute *net_class_attributes[] = { 190 206 &class_device_attr_ifindex, ··· 210 194 &class_device_attr_features, 211 195 &class_device_attr_mtu, 212 196 &class_device_attr_flags, 197 + &class_device_attr_weight, 213 198 &class_device_attr_type, 214 199 &class_device_attr_address, 215 200 &class_device_attr_broadcast,
+1
net/ipv6/ip6_tunnel.c
··· 882 882 t->parms.hop_limit = p->hop_limit; 883 883 t->parms.encap_limit = p->encap_limit; 884 884 t->parms.flowinfo = p->flowinfo; 885 + t->parms.link = p->link; 885 886 ip6ip6_tnl_link_config(t); 886 887 return 0; 887 888 }
+1 -1
net/sched/Kconfig
··· 405 405 ---help--- 406 406 Size of the local stack variable used while evaluating the tree of 407 407 ematches. Limits the depth of the tree, i.e. the number of 408 - encapsulated precedences. Every level requires 4 bytes of addtional 408 + encapsulated precedences. Every level requires 4 bytes of additional 409 409 stack space. 410 410 411 411 config NET_EMATCH_CMP
+3
net/sched/cls_basic.c
··· 261 261 rta = (struct rtattr *) b; 262 262 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 263 263 264 + if (f->res.classid) 265 + RTA_PUT(skb, TCA_BASIC_CLASSID, sizeof(u32), &f->res.classid); 266 + 264 267 if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 || 265 268 tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0) 266 269 goto rtattr_failure;
+269 -26
net/sched/em_meta.c
··· 32 32 * +-----------+ +-----------+ 33 33 * | | 34 34 * ---> meta_ops[INT][INDEV](...) | 35 - * | | 35 + * | | 36 36 * ----------- | 37 37 * V V 38 38 * +-----------+ +-----------+ ··· 70 70 #include <net/dst.h> 71 71 #include <net/route.h> 72 72 #include <net/pkt_cls.h> 73 + #include <net/sock.h> 73 74 74 75 struct meta_obj 75 76 { ··· 285 284 } 286 285 287 286 /************************************************************************** 287 + * Socket Attributes 288 + **************************************************************************/ 289 + 290 + #define SKIP_NONLOCAL(skb) \ 291 + if (unlikely(skb->sk == NULL)) { \ 292 + *err = -1; \ 293 + return; \ 294 + } 295 + 296 + META_COLLECTOR(int_sk_family) 297 + { 298 + SKIP_NONLOCAL(skb); 299 + dst->value = skb->sk->sk_family; 300 + } 301 + 302 + META_COLLECTOR(int_sk_state) 303 + { 304 + SKIP_NONLOCAL(skb); 305 + dst->value = skb->sk->sk_state; 306 + } 307 + 308 + META_COLLECTOR(int_sk_reuse) 309 + { 310 + SKIP_NONLOCAL(skb); 311 + dst->value = skb->sk->sk_reuse; 312 + } 313 + 314 + META_COLLECTOR(int_sk_bound_if) 315 + { 316 + SKIP_NONLOCAL(skb); 317 + /* No error if bound_dev_if is 0, legal userspace check */ 318 + dst->value = skb->sk->sk_bound_dev_if; 319 + } 320 + 321 + META_COLLECTOR(var_sk_bound_if) 322 + { 323 + SKIP_NONLOCAL(skb); 324 + 325 + if (skb->sk->sk_bound_dev_if == 0) { 326 + dst->value = (unsigned long) "any"; 327 + dst->len = 3; 328 + } else { 329 + struct net_device *dev; 330 + 331 + dev = dev_get_by_index(skb->sk->sk_bound_dev_if); 332 + *err = var_dev(dev, dst); 333 + if (dev) 334 + dev_put(dev); 335 + } 336 + } 337 + 338 + META_COLLECTOR(int_sk_refcnt) 339 + { 340 + SKIP_NONLOCAL(skb); 341 + dst->value = atomic_read(&skb->sk->sk_refcnt); 342 + } 343 + 344 + META_COLLECTOR(int_sk_rcvbuf) 345 + { 346 + SKIP_NONLOCAL(skb); 347 + dst->value = skb->sk->sk_rcvbuf; 348 + } 349 + 350 + META_COLLECTOR(int_sk_shutdown) 351 + { 352 + SKIP_NONLOCAL(skb); 353 + dst->value = skb->sk->sk_shutdown; 354 + } 355 + 356 + META_COLLECTOR(int_sk_proto) 357 + { 358 + SKIP_NONLOCAL(skb); 359 + dst->value = skb->sk->sk_protocol; 360 + } 361 + 362 + META_COLLECTOR(int_sk_type) 363 + { 364 + SKIP_NONLOCAL(skb); 365 + dst->value = skb->sk->sk_type; 366 + } 367 + 368 + META_COLLECTOR(int_sk_rmem_alloc) 369 + { 370 + SKIP_NONLOCAL(skb); 371 + dst->value = atomic_read(&skb->sk->sk_rmem_alloc); 372 + } 373 + 374 + META_COLLECTOR(int_sk_wmem_alloc) 375 + { 376 + SKIP_NONLOCAL(skb); 377 + dst->value = atomic_read(&skb->sk->sk_wmem_alloc); 378 + } 379 + 380 + META_COLLECTOR(int_sk_omem_alloc) 381 + { 382 + SKIP_NONLOCAL(skb); 383 + dst->value = atomic_read(&skb->sk->sk_omem_alloc); 384 + } 385 + 386 + META_COLLECTOR(int_sk_rcv_qlen) 387 + { 388 + SKIP_NONLOCAL(skb); 389 + dst->value = skb->sk->sk_receive_queue.qlen; 390 + } 391 + 392 + META_COLLECTOR(int_sk_snd_qlen) 393 + { 394 + SKIP_NONLOCAL(skb); 395 + dst->value = skb->sk->sk_write_queue.qlen; 396 + } 397 + 398 + META_COLLECTOR(int_sk_wmem_queued) 399 + { 400 + SKIP_NONLOCAL(skb); 401 + dst->value = skb->sk->sk_wmem_queued; 402 + } 403 + 404 + META_COLLECTOR(int_sk_fwd_alloc) 405 + { 406 + SKIP_NONLOCAL(skb); 407 + dst->value = skb->sk->sk_forward_alloc; 408 + } 409 + 410 + META_COLLECTOR(int_sk_sndbuf) 411 + { 412 + SKIP_NONLOCAL(skb); 413 + dst->value = skb->sk->sk_sndbuf; 414 + } 415 + 416 + META_COLLECTOR(int_sk_alloc) 417 + { 418 + SKIP_NONLOCAL(skb); 419 + dst->value = skb->sk->sk_allocation; 420 + } 421 + 422 + META_COLLECTOR(int_sk_route_caps) 423 + { 424 + SKIP_NONLOCAL(skb); 425 + dst->value = skb->sk->sk_route_caps; 426 + } 427 + 428 + META_COLLECTOR(int_sk_hashent) 429 + { 430 + SKIP_NONLOCAL(skb); 431 + dst->value = skb->sk->sk_hashent; 432 + } 433 + 434 + META_COLLECTOR(int_sk_lingertime) 435 + { 436 + SKIP_NONLOCAL(skb); 437 + dst->value = skb->sk->sk_lingertime / HZ; 438 + } 439 + 440 + META_COLLECTOR(int_sk_err_qlen) 441 + { 442 + SKIP_NONLOCAL(skb); 443 + dst->value = skb->sk->sk_error_queue.qlen; 444 + } 445 + 446 + META_COLLECTOR(int_sk_ack_bl) 447 + { 448 + SKIP_NONLOCAL(skb); 449 + dst->value = skb->sk->sk_ack_backlog; 450 + } 451 + 452 + META_COLLECTOR(int_sk_max_ack_bl) 453 + { 454 + SKIP_NONLOCAL(skb); 455 + dst->value = skb->sk->sk_max_ack_backlog; 456 + } 457 + 458 + META_COLLECTOR(int_sk_prio) 459 + { 460 + SKIP_NONLOCAL(skb); 461 + dst->value = skb->sk->sk_priority; 462 + } 463 + 464 + META_COLLECTOR(int_sk_rcvlowat) 465 + { 466 + SKIP_NONLOCAL(skb); 467 + dst->value = skb->sk->sk_rcvlowat; 468 + } 469 + 470 + META_COLLECTOR(int_sk_rcvtimeo) 471 + { 472 + SKIP_NONLOCAL(skb); 473 + dst->value = skb->sk->sk_rcvtimeo / HZ; 474 + } 475 + 476 + META_COLLECTOR(int_sk_sndtimeo) 477 + { 478 + SKIP_NONLOCAL(skb); 479 + dst->value = skb->sk->sk_sndtimeo / HZ; 480 + } 481 + 482 + META_COLLECTOR(int_sk_sendmsg_off) 483 + { 484 + SKIP_NONLOCAL(skb); 485 + dst->value = skb->sk->sk_sndmsg_off; 486 + } 487 + 488 + META_COLLECTOR(int_sk_write_pend) 489 + { 490 + SKIP_NONLOCAL(skb); 491 + dst->value = skb->sk->sk_write_pending; 492 + } 493 + 494 + /************************************************************************** 288 495 * Meta value collectors assignment table 289 496 **************************************************************************/ 290 497 ··· 502 293 struct meta_value *, struct meta_obj *, int *); 503 294 }; 504 295 296 + #define META_ID(name) TCF_META_ID_##name 297 + #define META_FUNC(name) { .get = meta_##name } 298 + 505 299 /* Meta value operations table listing all meta value collectors and 506 300 * assigns them to a type and meta id. */ 507 301 static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { 508 302 [TCF_META_TYPE_VAR] = { 509 - [TCF_META_ID_DEV] = { .get = meta_var_dev }, 510 - [TCF_META_ID_INDEV] = { .get = meta_var_indev }, 511 - [TCF_META_ID_REALDEV] = { .get = meta_var_realdev } 303 + [META_ID(DEV)] = META_FUNC(var_dev), 304 + [META_ID(INDEV)] = META_FUNC(var_indev), 305 + [META_ID(REALDEV)] = META_FUNC(var_realdev), 306 + [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if), 512 307 }, 513 308 [TCF_META_TYPE_INT] = { 514 - [TCF_META_ID_RANDOM] = { .get = meta_int_random }, 515 - [TCF_META_ID_LOADAVG_0] = { .get = meta_int_loadavg_0 }, 516 - [TCF_META_ID_LOADAVG_1] = { .get = meta_int_loadavg_1 }, 517 - [TCF_META_ID_LOADAVG_2] = { .get = meta_int_loadavg_2 }, 518 - [TCF_META_ID_DEV] = { .get = meta_int_dev }, 519 - [TCF_META_ID_INDEV] = { .get = meta_int_indev }, 520 - [TCF_META_ID_REALDEV] = { .get = meta_int_realdev }, 521 - [TCF_META_ID_PRIORITY] = { .get = meta_int_priority }, 522 - [TCF_META_ID_PROTOCOL] = { .get = meta_int_protocol }, 523 - [TCF_META_ID_SECURITY] = { .get = meta_int_security }, 524 - [TCF_META_ID_PKTTYPE] = { .get = meta_int_pkttype }, 525 - [TCF_META_ID_PKTLEN] = { .get = meta_int_pktlen }, 526 - [TCF_META_ID_DATALEN] = { .get = meta_int_datalen }, 527 - [TCF_META_ID_MACLEN] = { .get = meta_int_maclen }, 309 + [META_ID(RANDOM)] = META_FUNC(int_random), 310 + [META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0), 311 + [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1), 312 + [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2), 313 + [META_ID(DEV)] = META_FUNC(int_dev), 314 + [META_ID(INDEV)] = META_FUNC(int_indev), 315 + [META_ID(REALDEV)] = META_FUNC(int_realdev), 316 + [META_ID(PRIORITY)] = META_FUNC(int_priority), 317 + [META_ID(PROTOCOL)] = META_FUNC(int_protocol), 318 + [META_ID(SECURITY)] = META_FUNC(int_security), 319 + [META_ID(PKTTYPE)] = META_FUNC(int_pkttype), 320 + [META_ID(PKTLEN)] = META_FUNC(int_pktlen), 321 + [META_ID(DATALEN)] = META_FUNC(int_datalen), 322 + [META_ID(MACLEN)] = META_FUNC(int_maclen), 528 323 #ifdef CONFIG_NETFILTER 529 - [TCF_META_ID_NFMARK] = { .get = meta_int_nfmark }, 324 + [META_ID(NFMARK)] = META_FUNC(int_nfmark), 530 325 #endif 531 - [TCF_META_ID_TCINDEX] = { .get = meta_int_tcindex }, 326 + [META_ID(TCINDEX)] = META_FUNC(int_tcindex), 532 327 #ifdef CONFIG_NET_CLS_ACT 533 - [TCF_META_ID_TCVERDICT] = { .get = meta_int_tcverd }, 534 - [TCF_META_ID_TCCLASSID] = { .get = meta_int_tcclassid }, 328 + [META_ID(TCVERDICT)] = META_FUNC(int_tcverd), 329 + [META_ID(TCCLASSID)] = META_FUNC(int_tcclassid), 535 330 #endif 536 331 #ifdef CONFIG_NET_CLS_ROUTE 537 - [TCF_META_ID_RTCLASSID] = { .get = meta_int_rtclassid }, 332 + [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid), 538 333 #endif 539 - [TCF_META_ID_RTIIF] = { .get = meta_int_rtiif } 334 + [META_ID(RTIIF)] = META_FUNC(int_rtiif), 335 + [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family), 336 + [META_ID(SK_STATE)] = META_FUNC(int_sk_state), 337 + [META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse), 338 + [META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if), 339 + [META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt), 340 + [META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf), 341 + [META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf), 342 + [META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown), 343 + [META_ID(SK_PROTO)] = META_FUNC(int_sk_proto), 344 + [META_ID(SK_TYPE)] = META_FUNC(int_sk_type), 345 + [META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc), 346 + [META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc), 347 + [META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc), 348 + [META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued), 349 + [META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen), 350 + [META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen), 351 + [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen), 352 + [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc), 353 + [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc), 354 + [META_ID(SK_ROUTE_CAPS)] = META_FUNC(int_sk_route_caps), 355 + [META_ID(SK_HASHENT)] = META_FUNC(int_sk_hashent), 356 + [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime), 357 + [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl), 358 + [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl), 359 + [META_ID(SK_PRIO)] = META_FUNC(int_sk_prio), 360 + [META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat), 361 + [META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo), 362 + [META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo), 363 + [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off), 364 + [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend), 540 365 } 541 366 }; 542 367 ··· 639 396 /* Let gcc optimize it, the unlikely is not really based on 640 397 * some numbers but jump free code for mismatches seems 641 398 * more logical. */ 642 - if (unlikely(a == b)) 399 + if (unlikely(a->value == b->value)) 643 400 return 0; 644 - else if (a < b) 401 + else if (a->value < b->value) 645 402 return -1; 646 403 else 647 404 return 1;