Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

time: Convert x86_64 to using new update_vsyscall

Switch x86_64 to using sub-ns precise vsyscall

Cc: Tony Luck <tony.luck@intel.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Paul Turner <pjt@google.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Prarit Bhargava <prarit@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: John Stultz <john.stultz@linaro.org>

+44 -29
+1 -1
arch/x86/Kconfig
··· 93 93 select GENERIC_CLOCKEVENTS 94 94 select ARCH_CLOCKSOURCE_DATA if X86_64 95 95 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) 96 - select GENERIC_TIME_VSYSCALL_OLD if X86_64 96 + select GENERIC_TIME_VSYSCALL if X86_64 97 97 select KTIME_SCALAR if X86_32 98 98 select GENERIC_STRNCPY_FROM_USER 99 99 select GENERIC_STRNLEN_USER
+2 -2
arch/x86/include/asm/vgtod.h
··· 17 17 18 18 /* open coded 'struct timespec' */ 19 19 time_t wall_time_sec; 20 - u32 wall_time_nsec; 21 - u32 monotonic_time_nsec; 20 + u64 wall_time_snsec; 21 + u64 monotonic_time_snsec; 22 22 time_t monotonic_time_sec; 23 23 24 24 struct timezone sys_tz;
+27 -18
arch/x86/kernel/vsyscall_64.c
··· 82 82 vsyscall_gtod_data.sys_tz = sys_tz; 83 83 } 84 84 85 - void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 86 - struct clocksource *clock, u32 mult) 85 + void update_vsyscall(struct timekeeper *tk) 87 86 { 88 - struct timespec monotonic; 87 + struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data; 89 88 90 - write_seqcount_begin(&vsyscall_gtod_data.seq); 89 + write_seqcount_begin(&vdata->seq); 91 90 92 91 /* copy vsyscall data */ 93 - vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; 94 - vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; 95 - vsyscall_gtod_data.clock.mask = clock->mask; 96 - vsyscall_gtod_data.clock.mult = mult; 97 - vsyscall_gtod_data.clock.shift = clock->shift; 92 + vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; 93 + vdata->clock.cycle_last = tk->clock->cycle_last; 94 + vdata->clock.mask = tk->clock->mask; 95 + vdata->clock.mult = tk->mult; 96 + vdata->clock.shift = tk->shift; 98 97 99 - vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 100 - vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 98 + vdata->wall_time_sec = tk->xtime_sec; 99 + vdata->wall_time_snsec = tk->xtime_nsec; 101 100 102 - monotonic = timespec_add(*wall_time, *wtm); 103 - vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec; 104 - vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec; 101 + vdata->monotonic_time_sec = tk->xtime_sec 102 + + tk->wall_to_monotonic.tv_sec; 103 + vdata->monotonic_time_snsec = tk->xtime_nsec 104 + + (tk->wall_to_monotonic.tv_nsec 105 + << tk->shift); 106 + while (vdata->monotonic_time_snsec >= 107 + (((u64)NSEC_PER_SEC) << tk->shift)) { 108 + vdata->monotonic_time_snsec -= 109 + ((u64)NSEC_PER_SEC) << tk->shift; 110 + vdata->monotonic_time_sec++; 111 + } 105 112 106 - vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); 107 - vsyscall_gtod_data.monotonic_time_coarse = 108 - timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm); 113 + vdata->wall_time_coarse.tv_sec = tk->xtime_sec; 114 + vdata->wall_time_coarse.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); 109 115 110 - write_seqcount_end(&vsyscall_gtod_data.seq); 116 + vdata->monotonic_time_coarse = timespec_add(vdata->wall_time_coarse, 117 + tk->wall_to_monotonic); 118 + 119 + write_seqcount_end(&vdata->seq); 111 120 } 112 121 113 122 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+14 -8
arch/x86/vdso/vclock_gettime.c
··· 80 80 } 81 81 82 82 83 - notrace static inline long vgetns(void) 83 + notrace static inline u64 vgetsns(void) 84 84 { 85 85 long v; 86 86 cycles_t cycles; ··· 91 91 else 92 92 return 0; 93 93 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; 94 - return (v * gtod->clock.mult) >> gtod->clock.shift; 94 + return v * gtod->clock.mult; 95 95 } 96 96 97 97 /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ 98 98 notrace static int __always_inline do_realtime(struct timespec *ts) 99 99 { 100 - unsigned long seq, ns; 100 + unsigned long seq; 101 + u64 ns; 101 102 int mode; 102 103 104 + ts->tv_nsec = 0; 103 105 do { 104 106 seq = read_seqcount_begin(&gtod->seq); 105 107 mode = gtod->clock.vclock_mode; 106 108 ts->tv_sec = gtod->wall_time_sec; 107 - ts->tv_nsec = gtod->wall_time_nsec; 108 - ns = vgetns(); 109 + ns = gtod->wall_time_snsec; 110 + ns += vgetsns(); 111 + ns >>= gtod->clock.shift; 109 112 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 110 113 111 114 timespec_add_ns(ts, ns); ··· 117 114 118 115 notrace static int do_monotonic(struct timespec *ts) 119 116 { 120 - unsigned long seq, ns; 117 + unsigned long seq; 118 + u64 ns; 121 119 int mode; 122 120 121 + ts->tv_nsec = 0; 123 122 do { 124 123 seq = read_seqcount_begin(&gtod->seq); 125 124 mode = gtod->clock.vclock_mode; 126 125 ts->tv_sec = gtod->monotonic_time_sec; 127 - ts->tv_nsec = gtod->monotonic_time_nsec; 128 - ns = vgetns(); 126 + ns = gtod->monotonic_time_snsec; 127 + ns += vgetsns(); 128 + ns >>= gtod->clock.shift; 129 129 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); 130 130 timespec_add_ns(ts, ns); 131 131