Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'timers-for-linus-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'timers-for-linus-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
hrtimer: Fix /proc/timer_list regression
itimers: Fix racy writes to cpu_itimer fields
timekeeping: Fix clock_gettime vsyscall time warp

+23 -17
+2 -2
arch/ia64/kernel/time.c
··· 473 473 { 474 474 } 475 475 476 - void update_vsyscall(struct timespec *wall, struct clocksource *c) 476 + void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult) 477 477 { 478 478 unsigned long flags; 479 479 ··· 481 481 482 482 /* copy fsyscall clock data */ 483 483 fsyscall_gtod_data.clk_mask = c->mask; 484 - fsyscall_gtod_data.clk_mult = c->mult; 484 + fsyscall_gtod_data.clk_mult = mult; 485 485 fsyscall_gtod_data.clk_shift = c->shift; 486 486 fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; 487 487 fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
+3 -2
arch/powerpc/kernel/time.c
··· 834 834 return (cycle_t)get_tb(); 835 835 } 836 836 837 - void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) 837 + void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, 838 + u32 mult) 838 839 { 839 840 u64 t2x, stamp_xsec; 840 841 ··· 848 847 849 848 /* XXX this assumes clock->shift == 22 */ 850 849 /* 4611686018 ~= 2^(20+64-22) / 1e9 */ 851 - t2x = (u64) clock->mult * 4611686018ULL; 850 + t2x = (u64) mult * 4611686018ULL; 852 851 stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; 853 852 do_div(stamp_xsec, 1000000000); 854 853 stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
+2 -1
arch/s390/kernel/time.c
··· 214 214 return &clocksource_tod; 215 215 } 216 216 217 - void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) 217 + void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, 218 + u32 mult) 218 219 { 219 220 if (clock != &clocksource_tod) 220 221 return;
+3 -2
arch/x86/kernel/vsyscall_64.c
··· 73 73 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 74 74 } 75 75 76 - void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) 76 + void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, 77 + u32 mult) 77 78 { 78 79 unsigned long flags; 79 80 ··· 83 82 vsyscall_gtod_data.clock.vread = clock->vread; 84 83 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; 85 84 vsyscall_gtod_data.clock.mask = clock->mask; 86 - vsyscall_gtod_data.clock.mult = clock->mult; 85 + vsyscall_gtod_data.clock.mult = mult; 87 86 vsyscall_gtod_data.clock.shift = clock->shift; 88 87 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 89 88 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
+4 -2
include/linux/clocksource.h
··· 292 292 } 293 293 294 294 #ifdef CONFIG_GENERIC_TIME_VSYSCALL 295 - extern void update_vsyscall(struct timespec *ts, struct clocksource *c); 295 + extern void 296 + update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult); 296 297 extern void update_vsyscall_tz(void); 297 298 #else 298 - static inline void update_vsyscall(struct timespec *ts, struct clocksource *c) 299 + static inline void 300 + update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult) 299 301 { 300 302 } 301 303
+1 -3
include/linux/hrtimer.h
··· 446 446 447 447 static inline void timer_stats_account_hrtimer(struct hrtimer *timer) 448 448 { 449 - if (likely(!timer->start_site)) 449 + if (likely(!timer_stats_active)) 450 450 return; 451 451 timer_stats_update_stats(timer, timer->start_pid, timer->start_site, 452 452 timer->function, timer->start_comm, 0); ··· 457 457 458 458 static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) 459 459 { 460 - if (likely(!timer_stats_active)) 461 - return; 462 460 __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0)); 463 461 } 464 462
+5 -2
kernel/itimer.c
··· 146 146 { 147 147 cputime_t cval, nval, cinterval, ninterval; 148 148 s64 ns_ninterval, ns_nval; 149 + u32 error, incr_error; 149 150 struct cpu_itimer *it = &tsk->signal->it[clock_id]; 150 151 151 152 nval = timeval_to_cputime(&value->it_value); ··· 154 153 ninterval = timeval_to_cputime(&value->it_interval); 155 154 ns_ninterval = timeval_to_ns(&value->it_interval); 156 155 157 - it->incr_error = cputime_sub_ns(ninterval, ns_ninterval); 158 - it->error = cputime_sub_ns(nval, ns_nval); 156 + error = cputime_sub_ns(nval, ns_nval); 157 + incr_error = cputime_sub_ns(ninterval, ns_ninterval); 159 158 160 159 spin_lock_irq(&tsk->sighand->siglock); 161 160 ··· 169 168 } 170 169 it->expires = nval; 171 170 it->incr = ninterval; 171 + it->error = error; 172 + it->incr_error = incr_error; 172 173 trace_itimer_state(clock_id == CPUCLOCK_VIRT ? 173 174 ITIMER_VIRTUAL : ITIMER_PROF, value, nval); 174 175
+3 -3
kernel/time/timekeeping.c
··· 170 170 { 171 171 xtime.tv_sec += leapsecond; 172 172 wall_to_monotonic.tv_sec -= leapsecond; 173 - update_vsyscall(&xtime, timekeeper.clock); 173 + update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); 174 174 } 175 175 176 176 #ifdef CONFIG_GENERIC_TIME ··· 328 328 timekeeper.ntp_error = 0; 329 329 ntp_clear(); 330 330 331 - update_vsyscall(&xtime, timekeeper.clock); 331 + update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); 332 332 333 333 write_sequnlock_irqrestore(&xtime_lock, flags); 334 334 ··· 840 840 timekeeper.ntp_error_shift; 841 841 842 842 /* check to see if there is a new clocksource to use */ 843 - update_vsyscall(&xtime, timekeeper.clock); 843 + update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); 844 844 } 845 845 846 846 /**