Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched_clock: fix cpu_clock()

+34 -50
+34 -50
kernel/sched_clock.c
··· 12 * 13 * Create a semi stable clock from a mixture of other events, including: 14 * - gtod 15 - * - jiffies 16 * - sched_clock() 17 * - explicit idle events 18 * 19 * We use gtod as base and the unstable clock deltas. The deltas are filtered, 20 - * making it monotonic and keeping it within an expected window. This window 21 - * is set up using jiffies. 22 * 23 * Furthermore, explicit sleep and wakeup hooks allow us to account for time 24 * that is otherwise invisible (TSC gets stopped). 25 * 26 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat 27 - * consistent between cpus (never more than 1 jiffies difference). 28 */ 29 #include <linux/sched.h> 30 #include <linux/percpu.h> ··· 52 */ 53 raw_spinlock_t lock; 54 55 - unsigned long tick_jiffies; 56 u64 tick_raw; 57 u64 tick_gtod; 58 u64 clock; ··· 72 void sched_clock_init(void) 73 { 74 u64 ktime_now = ktime_to_ns(ktime_get()); 75 - unsigned long now_jiffies = jiffies; 76 int cpu; 77 78 for_each_possible_cpu(cpu) { 79 struct sched_clock_data *scd = cpu_sdc(cpu); 80 81 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 82 - scd->tick_jiffies = now_jiffies; 83 scd->tick_raw = 0; 84 scd->tick_gtod = ktime_now; 85 scd->clock = ktime_now; ··· 87 } 88 89 /* 90 * update the percpu scd from the raw @now value 91 * 92 * - filter out backward motion 93 - * - use jiffies to generate a min,max window to clip the raw values 94 */ 95 static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) 96 { 97 - unsigned long now_jiffies = jiffies; 98 - long delta_jiffies = now_jiffies - scd->tick_jiffies; 99 - u64 clock = scd->clock; 100 - u64 min_clock, max_clock; 101 s64 delta = now - scd->tick_raw; 102 103 WARN_ON_ONCE(!irqs_disabled()); 104 - min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; 105 106 - if (unlikely(delta < 0)) { 107 - clock++; 108 - goto out; 109 - } 110 111 - max_clock = min_clock + TICK_NSEC; 112 113 - if (unlikely(clock + delta > max_clock)) { 114 - if (clock < max_clock) 115 - clock = max_clock; 116 - else 117 - clock++; 118 - } else { 119 - clock += delta; 120 - } 121 122 - out: 123 - if (unlikely(clock < min_clock)) 124 - clock = min_clock; 125 126 - scd->tick_jiffies = now_jiffies; 127 scd->clock = clock; 128 129 - return clock; 130 } 131 132 static void lock_double_clock(struct sched_clock_data *data1, ··· 171 * larger time as the latest time for both 172 * runqueues. (this creates monotonic movement) 173 */ 174 - if (likely(remote_clock < this_clock)) { 175 clock = this_clock; 176 scd->clock = clock; 177 } else { ··· 207 now = sched_clock(); 208 209 __raw_spin_lock(&scd->lock); 210 - __update_sched_clock(scd, now); 211 - /* 212 - * update tick_gtod after __update_sched_clock() because that will 213 - * already observe 1 new jiffy; adding a new tick_gtod to that would 214 - * increase the clock 2 jiffies. 215 - */ 216 scd->tick_raw = now; 217 scd->tick_gtod = now_gtod; 218 __raw_spin_unlock(&scd->lock); 219 } 220 ··· 227 */ 228 void sched_clock_idle_wakeup_event(u64 delta_ns) 229 { 230 - struct sched_clock_data *scd = this_scd(); 231 - 232 - /* 233 - * Override the previous timestamp and ignore all 234 - * sched_clock() deltas that occured while we idled, 235 - * and use the PM-provided delta_ns to advance the 236 - * rq clock: 237 - */ 238 - __raw_spin_lock(&scd->lock); 239 - scd->clock += delta_ns; 240 - __raw_spin_unlock(&scd->lock); 241 - 242 touch_softlockup_watchdog(); 243 } 244 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
··· 12 * 13 * Create a semi stable clock from a mixture of other events, including: 14 * - gtod 15 * - sched_clock() 16 * - explicit idle events 17 * 18 * We use gtod as base and the unstable clock deltas. The deltas are filtered, 19 + * making it monotonic and keeping it within an expected window. 20 * 21 * Furthermore, explicit sleep and wakeup hooks allow us to account for time 22 * that is otherwise invisible (TSC gets stopped). 23 * 24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat 25 + * consistent between cpus (never more than 2 jiffies difference). 26 */ 27 #include <linux/sched.h> 28 #include <linux/percpu.h> ··· 54 */ 55 raw_spinlock_t lock; 56 57 u64 tick_raw; 58 u64 tick_gtod; 59 u64 clock; ··· 75 void sched_clock_init(void) 76 { 77 u64 ktime_now = ktime_to_ns(ktime_get()); 78 int cpu; 79 80 for_each_possible_cpu(cpu) { 81 struct sched_clock_data *scd = cpu_sdc(cpu); 82 83 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 84 scd->tick_raw = 0; 85 scd->tick_gtod = ktime_now; 86 scd->clock = ktime_now; ··· 92 } 93 94 /* 95 + * min,max except they take wrapping into account 96 + */ 97 + 98 + static inline u64 wrap_min(u64 x, u64 y) 99 + { 100 + return (s64)(x - y) < 0 ? x : y; 101 + } 102 + 103 + static inline u64 wrap_max(u64 x, u64 y) 104 + { 105 + return (s64)(x - y) > 0 ? x : y; 106 + } 107 + 108 + /* 109 * update the percpu scd from the raw @now value 110 * 111 * - filter out backward motion 112 + * - use the GTOD tick value to create a window to filter crazy TSC values 113 */ 114 static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) 115 { 116 s64 delta = now - scd->tick_raw; 117 + u64 clock, min_clock, max_clock; 118 119 WARN_ON_ONCE(!irqs_disabled()); 120 121 + if (unlikely(delta < 0)) 122 + delta = 0; 123 124 + /* 125 + * scd->clock = clamp(scd->tick_gtod + delta, 126 + * max(scd->tick_gtod, scd->clock), 127 + * scd->tick_gtod + TICK_NSEC); 128 + */ 129 130 + clock = scd->tick_gtod + delta; 131 + min_clock = wrap_max(scd->tick_gtod, scd->clock); 132 + max_clock = scd->tick_gtod + TICK_NSEC; 133 134 + clock = wrap_max(clock, min_clock); 135 + clock = wrap_min(clock, max_clock); 136 137 scd->clock = clock; 138 139 + return scd->clock; 140 } 141 142 static void lock_double_clock(struct sched_clock_data *data1, ··· 171 * larger time as the latest time for both 172 * runqueues. (this creates monotonic movement) 173 */ 174 + if (likely((s64)(remote_clock - this_clock) < 0)) { 175 clock = this_clock; 176 scd->clock = clock; 177 } else { ··· 207 now = sched_clock(); 208 209 __raw_spin_lock(&scd->lock); 210 scd->tick_raw = now; 211 scd->tick_gtod = now_gtod; 212 + __update_sched_clock(scd, now); 213 __raw_spin_unlock(&scd->lock); 214 } 215 ··· 232 */ 233 void sched_clock_idle_wakeup_event(u64 delta_ns) 234 { 235 + sched_clock_tick(); 236 touch_softlockup_watchdog(); 237 } 238 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);