···12 *13 * Create a semi stable clock from a mixture of other events, including:14 * - gtod15- * - jiffies16 * - sched_clock()17 * - explicit idle events18 *19 * We use gtod as base and the unstable clock deltas. The deltas are filtered,20- * making it monotonic and keeping it within an expected window. This window21- * is set up using jiffies.22 *23 * Furthermore, explicit sleep and wakeup hooks allow us to account for time24 * that is otherwise invisible (TSC gets stopped).25 *26 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat27- * consistent between cpus (never more than 1 jiffies difference).28 */29#include <linux/sched.h>30#include <linux/percpu.h>···52 */53 raw_spinlock_t lock;5455- unsigned long tick_jiffies;56 u64 tick_raw;57 u64 tick_gtod;58 u64 clock;···72void sched_clock_init(void)73{74 u64 ktime_now = ktime_to_ns(ktime_get());75- unsigned long now_jiffies = jiffies;76 int cpu;7778 for_each_possible_cpu(cpu) {79 struct sched_clock_data *scd = cpu_sdc(cpu);8081 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;82- scd->tick_jiffies = now_jiffies;83 scd->tick_raw = 0;84 scd->tick_gtod = ktime_now;85 scd->clock = ktime_now;···87}8889/*0000000000000090 * update the percpu scd from the raw @now value91 *92 * - filter out backward motion93- * - use jiffies to generate a min,max window to clip the raw values94 */95static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)96{97- unsigned long now_jiffies = jiffies;98- long delta_jiffies = now_jiffies - scd->tick_jiffies;99- u64 clock = scd->clock;100- u64 min_clock, max_clock;101 s64 delta = now - scd->tick_raw;0102103 WARN_ON_ONCE(!irqs_disabled());104- min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;105106- if (unlikely(delta < 0)) {107- clock++;108- goto out;109- }110111- max_clock = min_clock + TICK_NSEC;0000112113- if (unlikely(clock + delta > max_clock)) {114- if (clock < max_clock)115- clock = max_clock;116- else117- clock++;118- } else {119- clock += delta;120- }121122- out:123- if (unlikely(clock < min_clock))124- clock = min_clock;125126- scd->tick_jiffies = now_jiffies;127 scd->clock = clock;128129- return clock;130}131132static void lock_double_clock(struct sched_clock_data *data1,···171 * larger time as the latest time for both172 * runqueues. (this creates monotonic movement)173 */174- if (likely(remote_clock < this_clock)) {175 clock = this_clock;176 scd->clock = clock;177 } else {···207 now = sched_clock();208209 __raw_spin_lock(&scd->lock);210- __update_sched_clock(scd, now);211- /*212- * update tick_gtod after __update_sched_clock() because that will213- * already observe 1 new jiffy; adding a new tick_gtod to that would214- * increase the clock 2 jiffies.215- */216 scd->tick_raw = now;217 scd->tick_gtod = now_gtod;0218 __raw_spin_unlock(&scd->lock);219}220···227 */228void sched_clock_idle_wakeup_event(u64 delta_ns)229{230- struct sched_clock_data *scd = this_scd();231-232- /*233- * Override the previous timestamp and ignore all234- * sched_clock() deltas that occured while we idled,235- * and use the PM-provided delta_ns to advance the236- * rq clock:237- */238- __raw_spin_lock(&scd->lock);239- scd->clock += delta_ns;240- __raw_spin_unlock(&scd->lock);241-242 touch_softlockup_watchdog();243}244EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
···12 *13 * Create a semi stable clock from a mixture of other events, including:14 * - gtod015 * - sched_clock()16 * - explicit idle events17 *18 * We use gtod as base and the unstable clock deltas. The deltas are filtered,19+ * making it monotonic and keeping it within an expected window.020 *21 * Furthermore, explicit sleep and wakeup hooks allow us to account for time22 * that is otherwise invisible (TSC gets stopped).23 *24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat25+ * consistent between cpus (never more than 2 jiffies difference).26 */27#include <linux/sched.h>28#include <linux/percpu.h>···54 */55 raw_spinlock_t lock;56057 u64 tick_raw;58 u64 tick_gtod;59 u64 clock;···75void sched_clock_init(void)76{77 u64 ktime_now = ktime_to_ns(ktime_get());078 int cpu;7980 for_each_possible_cpu(cpu) {81 struct sched_clock_data *scd = cpu_sdc(cpu);8283 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;084 scd->tick_raw = 0;85 scd->tick_gtod = ktime_now;86 scd->clock = ktime_now;···92}9394/*95+ * min,max except they take wrapping into account96+ */97+98+static inline u64 wrap_min(u64 x, u64 y)99+{100+ return (s64)(x - y) < 0 ? x : y;101+}102+103+static inline u64 wrap_max(u64 x, u64 y)104+{105+ return (s64)(x - y) > 0 ? x : y;106+}107+108+/*109 * update the percpu scd from the raw @now value110 *111 * - filter out backward motion112+ * - use the GTOD tick value to create a window to filter crazy TSC values113 */114static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)115{0000116 s64 delta = now - scd->tick_raw;117+ u64 clock, min_clock, max_clock;118119 WARN_ON_ONCE(!irqs_disabled());0120121+ if (unlikely(delta < 0))122+ delta = 0;00123124+ /*125+ * scd->clock = clamp(scd->tick_gtod + delta,126+ * max(scd->tick_gtod, scd->clock),127+ * scd->tick_gtod + TICK_NSEC);128+ */129130+ clock = scd->tick_gtod + delta;131+ min_clock = wrap_max(scd->tick_gtod, scd->clock);132+ max_clock = scd->tick_gtod + TICK_NSEC;00000133134+ clock = wrap_max(clock, min_clock);135+ clock = wrap_min(clock, max_clock);01360137 scd->clock = clock;138139+ return scd->clock;140}141142static void lock_double_clock(struct sched_clock_data *data1,···171 * larger time as the latest time for both172 * runqueues. (this creates monotonic movement)173 */174+ if (likely((s64)(remote_clock - this_clock) < 0)) {175 clock = this_clock;176 scd->clock = clock;177 } else {···207 now = sched_clock();208209 __raw_spin_lock(&scd->lock);000000210 scd->tick_raw = now;211 scd->tick_gtod = now_gtod;212+ __update_sched_clock(scd, now);213 __raw_spin_unlock(&scd->lock);214}215···232 */233void sched_clock_idle_wakeup_event(u64 delta_ns)234{235+ sched_clock_tick();00000000000236 touch_softlockup_watchdog();237}238EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);