Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge HEAD from master.kernel.org:/home/rmk/linux-2.6-arm

+143 -31
+2 -2
arch/arm/Kconfig
··· 365 365 366 366 Please note that dynamic tick may affect the accuracy of 367 367 timekeeping on some platforms depending on the implementation. 368 - Currently at least OMAP platform is known to have accurate 369 - timekeeping with dynamic tick. 368 + Currently at least OMAP, PXA2xx and SA11x0 platforms are known 369 + to have accurate timekeeping with dynamic tick. 370 370 371 371 config ARCH_DISCONTIGMEM_ENABLE 372 372 bool
+1 -1
arch/arm/kernel/calls.S
··· 284 284 .long sys_fstatfs64 285 285 .long sys_tgkill 286 286 .long sys_utimes 287 - /* 270 */ .long sys_fadvise64_64 287 + /* 270 */ .long sys_arm_fadvise64_64_wrapper 288 288 .long sys_pciconfig_iobase 289 289 .long sys_pciconfig_read 290 290 .long sys_pciconfig_write
+4
arch/arm/kernel/entry-common.S
··· 265 265 str r5, [sp, #4] @ push sixth arg 266 266 b sys_futex 267 267 268 + sys_arm_fadvise64_64_wrapper: 269 + str r5, [sp, #4] @ push r5 to stack 270 + b sys_arm_fadvise64_64 271 + 268 272 /* 269 273 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 270 274 * offset, we return EINVAL.
+10
arch/arm/kernel/sys_arm.c
··· 311 311 return ret; 312 312 } 313 313 EXPORT_SYMBOL(execve); 314 + 315 + /* 316 + * Since loff_t is a 64 bit type we avoid a lot of ABI hastle 317 + * with a different argument ordering. 318 + */ 319 + asmlinkage long sys_arm_fadvise64_64(int fd, int advice, 320 + loff_t offset, loff_t len) 321 + { 322 + return sys_fadvise64_64(fd, offset, len, advice); 323 + }
+55 -3
arch/arm/mach-pxa/time.c
··· 70 70 return usec; 71 71 } 72 72 73 + #ifdef CONFIG_NO_IDLE_HZ 74 + static unsigned long initial_match; 75 + static int match_posponed; 76 + #endif 77 + 73 78 static irqreturn_t 74 79 pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 75 80 { ··· 82 77 83 78 write_seqlock(&xtime_lock); 84 79 80 + #ifdef CONFIG_NO_IDLE_HZ 81 + if (match_posponed) { 82 + match_posponed = 0; 83 + OSMR0 = initial_match; 84 + } 85 + #endif 86 + 85 87 /* Loop until we get ahead of the free running timer. 86 88 * This ensures an exact clock tick count and time accuracy. 87 - * IRQs are disabled inside the loop to ensure coherence between 88 - * lost_ticks (updated in do_timer()) and the match reg value, so we 89 - * can use do_gettimeofday() from interrupt handlers. 89 + * Since IRQs are disabled at this point, coherence between 90 + * lost_ticks(updated in do_timer()) and the match reg value is 91 + * ensured, hence we can use do_gettimeofday() from interrupt 92 + * handlers. 90 93 * 91 94 * HACK ALERT: it seems that the PXA timer regs aren't updated right 92 95 * away in all cases when a write occurs. We therefore compare with ··· 139 126 OSCR = 0; /* initialize free-running timer, force first match */ 140 127 } 141 128 129 + #ifdef CONFIG_NO_IDLE_HZ 130 + static int pxa_dyn_tick_enable_disable(void) 131 + { 132 + /* nothing to do */ 133 + return 0; 134 + } 135 + 136 + static void pxa_dyn_tick_reprogram(unsigned long ticks) 137 + { 138 + if (ticks > 1) { 139 + initial_match = OSMR0; 140 + OSMR0 = initial_match + ticks * LATCH; 141 + match_posponed = 1; 142 + } 143 + } 144 + 145 + static irqreturn_t 146 + pxa_dyn_tick_handler(int irq, void *dev_id, struct pt_regs *regs) 147 + { 148 + if (match_posponed) { 149 + match_posponed = 0; 150 + OSMR0 = initial_match; 151 + if ( (signed long)(initial_match - OSCR) <= 8 ) 152 + return pxa_timer_interrupt(irq, dev_id, regs); 153 + } 154 + return IRQ_NONE; 155 + } 156 + 157 + static struct dyn_tick_timer pxa_dyn_tick = { 158 + .enable = pxa_dyn_tick_enable_disable, 159 + .disable = pxa_dyn_tick_enable_disable, 160 + .reprogram = pxa_dyn_tick_reprogram, 161 + .handler = pxa_dyn_tick_handler, 162 + }; 163 + #endif 164 + 142 165 #ifdef CONFIG_PM 143 166 static unsigned long osmr[4], oier; 144 167 ··· 210 161 .suspend = pxa_timer_suspend, 211 162 .resume = pxa_timer_resume, 212 163 .offset = pxa_gettimeoffset, 164 + #ifdef CONFIG_NO_IDLE_HZ 165 + .dyn_tick = &pxa_dyn_tick, 166 + #endif 213 167 };
+59 -9
arch/arm/mach-sa1100/time.c
··· 70 70 return usec; 71 71 } 72 72 73 - /* 74 - * We will be entered with IRQs enabled. 75 - * 76 - * Loop until we get ahead of the free running timer. 77 - * This ensures an exact clock tick count and time accuracy. 78 - * IRQs are disabled inside the loop to ensure coherence between 79 - * lost_ticks (updated in do_timer()) and the match reg value, so we 80 - * can use do_gettimeofday() from interrupt handlers. 81 - */ 73 + #ifdef CONFIG_NO_IDLE_HZ 74 + static unsigned long initial_match; 75 + static int match_posponed; 76 + #endif 77 + 82 78 static irqreturn_t 83 79 sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 84 80 { ··· 82 86 83 87 write_seqlock(&xtime_lock); 84 88 89 + #ifdef CONFIG_NO_IDLE_HZ 90 + if (match_posponed) { 91 + match_posponed = 0; 92 + OSMR0 = initial_match; 93 + } 94 + #endif 95 + 96 + /* 97 + * Loop until we get ahead of the free running timer. 98 + * This ensures an exact clock tick count and time accuracy. 99 + * Since IRQs are disabled at this point, coherence between 100 + * lost_ticks(updated in do_timer()) and the match reg value is 101 + * ensured, hence we can use do_gettimeofday() from interrupt 102 + * handlers. 103 + */ 85 104 do { 86 105 timer_tick(regs); 87 106 OSSR = OSSR_M0; /* Clear match on timer 0 */ ··· 130 119 OIER |= OIER_E0; /* enable match on timer 0 to cause interrupts */ 131 120 OSCR = 0; /* initialize free-running timer, force first match */ 132 121 } 122 + 123 + #ifdef CONFIG_NO_IDLE_HZ 124 + static int sa1100_dyn_tick_enable_disable(void) 125 + { 126 + /* nothing to do */ 127 + return 0; 128 + } 129 + 130 + static void sa1100_dyn_tick_reprogram(unsigned long ticks) 131 + { 132 + if (ticks > 1) { 133 + initial_match = OSMR0; 134 + OSMR0 = initial_match + ticks * LATCH; 135 + match_posponed = 1; 136 + } 137 + } 138 + 139 + static irqreturn_t 140 + sa1100_dyn_tick_handler(int irq, void *dev_id, struct pt_regs *regs) 141 + { 142 + if (match_posponed) { 143 + match_posponed = 0; 144 + OSMR0 = initial_match; 145 + if ((signed long)(initial_match - OSCR) <= 0) 146 + return sa1100_timer_interrupt(irq, dev_id, regs); 147 + } 148 + return IRQ_NONE; 149 + } 150 + 151 + static struct dyn_tick_timer sa1100_dyn_tick = { 152 + .enable = sa1100_dyn_tick_enable_disable, 153 + .disable = sa1100_dyn_tick_enable_disable, 154 + .reprogram = sa1100_dyn_tick_reprogram, 155 + .handler = sa1100_dyn_tick_handler, 156 + }; 157 + #endif 133 158 134 159 #ifdef CONFIG_PM 135 160 unsigned long osmr[4], oier; ··· 203 156 .suspend = sa1100_timer_suspend, 204 157 .resume = sa1100_timer_resume, 205 158 .offset = sa1100_gettimeoffset, 159 + #ifdef CONFIG_NO_IDLE_HZ 160 + .dyn_tick = &sa1100_dyn_tick, 161 + #endif 206 162 };
+11 -15
arch/arm/mm/mm-armv.c
··· 295 295 pte_t *ptep; 296 296 297 297 if (pmd_none(*pmdp)) { 298 - unsigned long pmdval; 299 298 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * 300 299 sizeof(pte_t)); 301 300 302 - pmdval = __pa(ptep) | prot_l1; 303 - pmdp[0] = __pmd(pmdval); 304 - pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); 305 - flush_pmd_entry(pmdp); 301 + __pmd_populate(pmdp, __pa(ptep) | prot_l1); 306 302 } 307 303 ptep = pte_offset_kernel(pmdp, virt); 308 304 ··· 453 457 454 458 for (i = 0; i < 16; i++) { 455 459 unsigned long v = pgprot_val(protection_map[i]); 456 - v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot; 460 + v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot; 457 461 protection_map[i] = __pgprot(v); 458 462 } 459 463 ··· 577 581 */ 578 582 void setup_mm_for_reboot(char mode) 579 583 { 580 - unsigned long pmdval; 584 + unsigned long base_pmdval; 581 585 pgd_t *pgd; 582 - pmd_t *pmd; 583 586 int i; 584 - int cpu_arch = cpu_architecture(); 585 587 586 588 if (current->mm && current->mm->pgd) 587 589 pgd = current->mm->pgd; 588 590 else 589 591 pgd = init_mm.pgd; 590 592 591 - for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) { 592 - pmdval = (i << PGDIR_SHIFT) | 593 - PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | 594 - PMD_TYPE_SECT; 595 - if (cpu_arch <= CPU_ARCH_ARMv5TEJ) 596 - pmdval |= PMD_BIT4; 593 + base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 594 + if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ) 595 + base_pmdval |= PMD_BIT4; 596 + 597 + for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { 598 + unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; 599 + pmd_t *pmd; 600 + 597 601 pmd = pmd_off(pgd, i << PGDIR_SHIFT); 598 602 pmd[0] = __pmd(pmdval); 599 603 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
+1 -1
include/asm-arm/unistd.h
··· 295 295 #define __NR_fstatfs64 (__NR_SYSCALL_BASE+267) 296 296 #define __NR_tgkill (__NR_SYSCALL_BASE+268) 297 297 #define __NR_utimes (__NR_SYSCALL_BASE+269) 298 - #define __NR_fadvise64_64 (__NR_SYSCALL_BASE+270) 298 + #define __NR_arm_fadvise64_64 (__NR_SYSCALL_BASE+270) 299 299 #define __NR_pciconfig_iobase (__NR_SYSCALL_BASE+271) 300 300 #define __NR_pciconfig_read (__NR_SYSCALL_BASE+272) 301 301 #define __NR_pciconfig_write (__NR_SYSCALL_BASE+273)