Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timekeeping fix from Thomas Gleixner:
"A single fix for a regression caused by the generic VDSO
implementation where a math overflow causes CLOCK_BOOTTIME to become a
random number generator"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
timekeeping/vsyscall: Prevent math overflow in BOOTTIME update

Changed files
+23 -9
include
kernel
+5
include/linux/timekeeper_internal.h
··· 57 57 * @cs_was_changed_seq: The sequence number of clocksource change events 58 58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second 59 59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds 60 + * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset 60 61 * @cycle_interval: Number of clock cycles in one NTP interval 61 62 * @xtime_interval: Number of clock shifted nano seconds in one NTP 62 63 * interval. ··· 85 84 * 86 85 * wall_to_monotonic is no longer the boot time, getboottime must be 87 86 * used instead. 87 + * 88 + * @monotonic_to_boottime is a timespec64 representation of @offs_boot to 89 + * accelerate the VDSO update for CLOCK_BOOTTIME. 88 90 */ 89 91 struct timekeeper { 90 92 struct tk_read_base tkr_mono; ··· 103 99 u8 cs_was_changed_seq; 104 100 ktime_t next_leap_ktime; 105 101 u64 raw_sec; 102 + struct timespec64 monotonic_to_boot; 106 103 107 104 /* The following members are for timekeeping internal use */ 108 105 u64 cycle_interval;
+5
kernel/time/timekeeping.c
··· 146 146 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) 147 147 { 148 148 tk->offs_boot = ktime_add(tk->offs_boot, delta); 149 + /* 150 + * Timespec representation for VDSO update to avoid 64bit division 151 + * on every update. 152 + */ 153 + tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot); 149 154 } 150 155 151 156 /*
+13 -9
kernel/time/vsyscall.c
··· 17 17 struct timekeeper *tk) 18 18 { 19 19 struct vdso_timestamp *vdso_ts; 20 - u64 nsec; 20 + u64 nsec, sec; 21 21 22 22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; 23 23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; ··· 45 45 } 46 46 vdso_ts->nsec = nsec; 47 47 48 - /* CLOCK_MONOTONIC_RAW */ 49 - vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; 50 - vdso_ts->sec = tk->raw_sec; 51 - vdso_ts->nsec = tk->tkr_raw.xtime_nsec; 48 + /* Copy MONOTONIC time for BOOTTIME */ 49 + sec = vdso_ts->sec; 50 + /* Add the boot offset */ 51 + sec += tk->monotonic_to_boot.tv_sec; 52 + nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift; 52 53 53 54 /* CLOCK_BOOTTIME */ 54 55 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; 55 - vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 56 - nsec = tk->tkr_mono.xtime_nsec; 57 - nsec += ((u64)(tk->wall_to_monotonic.tv_nsec + 58 - ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift); 56 + vdso_ts->sec = sec; 57 + 59 58 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { 60 59 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); 61 60 vdso_ts->sec++; 62 61 } 63 62 vdso_ts->nsec = nsec; 63 + 64 + /* CLOCK_MONOTONIC_RAW */ 65 + vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; 66 + vdso_ts->sec = tk->raw_sec; 67 + vdso_ts->nsec = tk->tkr_raw.xtime_nsec; 64 68 65 69 /* CLOCK_TAI */ 66 70 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];