Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic userspace implementations of gettimeofday() and similar.
4 */
5#include <vdso/auxclock.h>
6#include <vdso/clocksource.h>
7#include <vdso/datapage.h>
8#include <vdso/helpers.h>
9#include <vdso/ktime.h>
10#include <vdso/limits.h>
11#include <vdso/math64.h>
12#include <vdso/time32.h>
13#include <vdso/time64.h>
14
15/*
16 * The generic vDSO implementation requires that gettimeofday.h
17 * provides:
18 * - __arch_get_hw_counter(): to get the hw counter based on the
19 * clock_mode.
20 * - gettimeofday_fallback(): fallback for gettimeofday.
21 * - clock_gettime_fallback(): fallback for clock_gettime.
22 * - clock_getres_fallback(): fallback for clock_getres.
23 */
24#include <asm/vdso/gettimeofday.h>
25
26/* Bring in default accessors */
27#include <vdso/vsyscall.h>
28
29#ifndef vdso_calc_ns
30
31#ifdef VDSO_DELTA_NOMASK
32# define VDSO_DELTA_MASK(vd) ULLONG_MAX
33#else
34# define VDSO_DELTA_MASK(vd) (vd->mask)
35#endif
36
37#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT
38static __always_inline bool vdso_delta_ok(const struct vdso_clock *vc, u64 delta)
39{
40 return delta < vc->max_cycles;
41}
42#else
43static __always_inline bool vdso_delta_ok(const struct vdso_clock *vc, u64 delta)
44{
45 return true;
46}
47#endif
48
49#ifndef vdso_shift_ns
50static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
51{
52 return ns >> shift;
53}
54#endif
55
56/*
57 * Default implementation which works for all sane clocksources. That
58 * obviously excludes x86/TSC.
59 */
60static __always_inline u64 vdso_calc_ns(const struct vdso_clock *vc, u64 cycles, u64 base)
61{
62 u64 delta = (cycles - vc->cycle_last) & VDSO_DELTA_MASK(vc);
63
64 if (likely(vdso_delta_ok(vc, delta)))
65 return vdso_shift_ns((delta * vc->mult) + base, vc->shift);
66
67 return mul_u64_u32_add_u64_shr(delta, vc->mult, base, vc->shift);
68}
69#endif /* vdso_calc_ns */
70
71#ifndef __arch_vdso_hres_capable
72static inline bool __arch_vdso_hres_capable(void)
73{
74 return true;
75}
76#endif
77
78#ifndef vdso_clocksource_ok
79static inline bool vdso_clocksource_ok(const struct vdso_clock *vc)
80{
81 return vc->clock_mode != VDSO_CLOCKMODE_NONE;
82}
83#endif
84
85#ifndef vdso_cycles_ok
86static inline bool vdso_cycles_ok(u64 cycles)
87{
88 return true;
89}
90#endif
91
92static __always_inline bool vdso_clockid_valid(clockid_t clock)
93{
94 /* Check for negative values or invalid clocks */
95 return likely((u32) clock <= CLOCK_AUX_LAST);
96}
97
98/*
99 * Must not be invoked within the sequence read section as a race inside
100 * that loop could result in __iter_div_u64_rem() being extremely slow.
101 */
102static __always_inline void vdso_set_timespec(struct __kernel_timespec *ts, u64 sec, u64 ns)
103{
104 ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
105 ts->tv_nsec = ns;
106}
107
108static __always_inline
109bool vdso_get_timestamp(const struct vdso_time_data *vd, const struct vdso_clock *vc,
110 unsigned int clkidx, u64 *sec, u64 *ns)
111{
112 const struct vdso_timestamp *vdso_ts = &vc->basetime[clkidx];
113 u64 cycles;
114
115 if (unlikely(!vdso_clocksource_ok(vc)))
116 return false;
117
118 cycles = __arch_get_hw_counter(vc->clock_mode, vd);
119 if (unlikely(!vdso_cycles_ok(cycles)))
120 return false;
121
122 *ns = vdso_calc_ns(vc, cycles, vdso_ts->nsec);
123 *sec = vdso_ts->sec;
124
125 return true;
126}
127
128static __always_inline
129const struct vdso_time_data *__arch_get_vdso_u_timens_data(const struct vdso_time_data *vd)
130{
131 return (void *)vd + PAGE_SIZE;
132}
133
134static __always_inline
135bool do_hres_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
136 clockid_t clk, struct __kernel_timespec *ts)
137{
138 const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
139 const struct timens_offset *offs = &vcns->offset[clk];
140 const struct vdso_clock *vc = vd->clock_data;
141 u32 seq;
142 s64 sec;
143 u64 ns;
144
145 if (clk != CLOCK_MONOTONIC_RAW)
146 vc = &vc[CS_HRES_COARSE];
147 else
148 vc = &vc[CS_RAW];
149
150 do {
151 seq = vdso_read_begin(vc);
152
153 if (!vdso_get_timestamp(vd, vc, clk, &sec, &ns))
154 return false;
155 } while (vdso_read_retry(vc, seq));
156
157 /* Add the namespace offset */
158 sec += offs->sec;
159 ns += offs->nsec;
160
161 vdso_set_timespec(ts, sec, ns);
162
163 return true;
164}
165
166static __always_inline
167bool do_hres(const struct vdso_time_data *vd, const struct vdso_clock *vc,
168 clockid_t clk, struct __kernel_timespec *ts)
169{
170 u64 sec, ns;
171 u32 seq;
172
173 /* Allows to compile the high resolution parts out */
174 if (!__arch_vdso_hres_capable())
175 return false;
176
177 do {
178 if (vdso_read_begin_timens(vc, &seq))
179 return do_hres_timens(vd, vc, clk, ts);
180
181 if (!vdso_get_timestamp(vd, vc, clk, &sec, &ns))
182 return false;
183 } while (vdso_read_retry(vc, seq));
184
185 vdso_set_timespec(ts, sec, ns);
186
187 return true;
188}
189
190static __always_inline
191bool do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
192 clockid_t clk, struct __kernel_timespec *ts)
193{
194 const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
195 const struct timens_offset *offs = &vcns->offset[clk];
196 const struct vdso_clock *vc = vd->clock_data;
197 const struct vdso_timestamp *vdso_ts;
198 u64 nsec;
199 s64 sec;
200 s32 seq;
201
202 vdso_ts = &vc->basetime[clk];
203
204 do {
205 seq = vdso_read_begin(vc);
206 sec = vdso_ts->sec;
207 nsec = vdso_ts->nsec;
208 } while (vdso_read_retry(vc, seq));
209
210 /* Add the namespace offset */
211 sec += offs->sec;
212 nsec += offs->nsec;
213
214 vdso_set_timespec(ts, sec, nsec);
215
216 return true;
217}
218
219static __always_inline
220bool do_coarse(const struct vdso_time_data *vd, const struct vdso_clock *vc,
221 clockid_t clk, struct __kernel_timespec *ts)
222{
223 const struct vdso_timestamp *vdso_ts = &vc->basetime[clk];
224 u32 seq;
225
226 do {
227 if (vdso_read_begin_timens(vc, &seq))
228 return do_coarse_timens(vd, vc, clk, ts);
229
230 ts->tv_sec = vdso_ts->sec;
231 ts->tv_nsec = vdso_ts->nsec;
232 } while (vdso_read_retry(vc, seq));
233
234 return true;
235}
236
237static __always_inline
238bool do_aux(const struct vdso_time_data *vd, clockid_t clock, struct __kernel_timespec *ts)
239{
240 const struct vdso_clock *vc;
241 u32 seq, idx;
242 u64 sec, ns;
243
244 if (!IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS))
245 return false;
246
247 idx = clock - CLOCK_AUX;
248 vc = &vd->aux_clock_data[idx];
249
250 do {
251 if (vdso_read_begin_timens(vc, &seq)) {
252 vd = __arch_get_vdso_u_timens_data(vd);
253 vc = &vd->aux_clock_data[idx];
254 /* Re-read from the real time data page */
255 continue;
256 }
257
258 /* Auxclock disabled? */
259 if (vc->clock_mode == VDSO_CLOCKMODE_NONE)
260 return false;
261
262 if (!vdso_get_timestamp(vd, vc, VDSO_BASE_AUX, &sec, &ns))
263 return false;
264 } while (vdso_read_retry(vc, seq));
265
266 vdso_set_timespec(ts, sec, ns);
267
268 return true;
269}
270
271static __always_inline bool
272__cvdso_clock_gettime_common(const struct vdso_time_data *vd, clockid_t clock,
273 struct __kernel_timespec *ts)
274{
275 const struct vdso_clock *vc = vd->clock_data;
276 u32 msk;
277
278 if (!vdso_clockid_valid(clock))
279 return false;
280
281 /*
282 * Convert the clockid to a bitmask and use it to check which
283 * clocks are handled in the VDSO directly.
284 */
285 msk = 1U << clock;
286 if (likely(msk & VDSO_HRES))
287 vc = &vc[CS_HRES_COARSE];
288 else if (msk & VDSO_COARSE)
289 return do_coarse(vd, &vc[CS_HRES_COARSE], clock, ts);
290 else if (msk & VDSO_RAW)
291 vc = &vc[CS_RAW];
292 else if (msk & VDSO_AUX)
293 return do_aux(vd, clock, ts);
294 else
295 return false;
296
297 return do_hres(vd, vc, clock, ts);
298}
299
300static int
301__cvdso_clock_gettime_data(const struct vdso_time_data *vd, clockid_t clock,
302 struct __kernel_timespec *ts)
303{
304 bool ok;
305
306 ok = __cvdso_clock_gettime_common(vd, clock, ts);
307
308 if (unlikely(!ok))
309 return clock_gettime_fallback(clock, ts);
310 return 0;
311}
312
313static __maybe_unused int
314__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
315{
316 return __cvdso_clock_gettime_data(__arch_get_vdso_u_time_data(), clock, ts);
317}
318
319#ifdef BUILD_VDSO32
320static int
321__cvdso_clock_gettime32_data(const struct vdso_time_data *vd, clockid_t clock,
322 struct old_timespec32 *res)
323{
324 struct __kernel_timespec ts;
325 bool ok;
326
327 ok = __cvdso_clock_gettime_common(vd, clock, &ts);
328
329 if (unlikely(!ok))
330 return clock_gettime32_fallback(clock, res);
331
332 /* For ok == true */
333 res->tv_sec = ts.tv_sec;
334 res->tv_nsec = ts.tv_nsec;
335
336 return 0;
337}
338
339static __maybe_unused int
340__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
341{
342 return __cvdso_clock_gettime32_data(__arch_get_vdso_u_time_data(), clock, res);
343}
344#endif /* BUILD_VDSO32 */
345
346static int
347__cvdso_gettimeofday_data(const struct vdso_time_data *vd,
348 struct __kernel_old_timeval *tv, struct timezone *tz)
349{
350 const struct vdso_clock *vc = vd->clock_data;
351
352 if (likely(tv != NULL)) {
353 struct __kernel_timespec ts;
354
355 if (!do_hres(vd, &vc[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
356 return gettimeofday_fallback(tv, tz);
357
358 tv->tv_sec = ts.tv_sec;
359 tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
360 }
361
362 if (unlikely(tz != NULL)) {
363 if (vdso_is_timens_clock(vc))
364 vd = __arch_get_vdso_u_timens_data(vd);
365
366 tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
367 tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
368 }
369
370 return 0;
371}
372
373static __maybe_unused int
374__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
375{
376 return __cvdso_gettimeofday_data(__arch_get_vdso_u_time_data(), tv, tz);
377}
378
379#ifdef VDSO_HAS_TIME
380static __kernel_old_time_t
381__cvdso_time_data(const struct vdso_time_data *vd, __kernel_old_time_t *time)
382{
383 const struct vdso_clock *vc = vd->clock_data;
384 __kernel_old_time_t t;
385
386 if (vdso_is_timens_clock(vc)) {
387 vd = __arch_get_vdso_u_timens_data(vd);
388 vc = vd->clock_data;
389 }
390
391 t = READ_ONCE(vc[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
392
393 if (time)
394 *time = t;
395
396 return t;
397}
398
399static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
400{
401 return __cvdso_time_data(__arch_get_vdso_u_time_data(), time);
402}
403#endif /* VDSO_HAS_TIME */
404
405#ifdef VDSO_HAS_CLOCK_GETRES
406static __always_inline
407bool __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock,
408 struct __kernel_timespec *res)
409{
410 const struct vdso_clock *vc = vd->clock_data;
411 u32 msk;
412 u64 ns;
413
414 if (!vdso_clockid_valid(clock))
415 return false;
416
417 if (vdso_is_timens_clock(vc))
418 vd = __arch_get_vdso_u_timens_data(vd);
419
420 /*
421 * Convert the clockid to a bitmask and use it to check which
422 * clocks are handled in the VDSO directly.
423 */
424 msk = 1U << clock;
425 if (msk & (VDSO_HRES | VDSO_RAW)) {
426 /*
427 * Preserves the behaviour of posix_get_hrtimer_res().
428 */
429 ns = READ_ONCE(vd->hrtimer_res);
430 } else if (msk & VDSO_COARSE) {
431 /*
432 * Preserves the behaviour of posix_get_coarse_res().
433 */
434 ns = LOW_RES_NSEC;
435 } else if (msk & VDSO_AUX) {
436 ns = aux_clock_resolution_ns();
437 } else {
438 return false;
439 }
440
441 if (likely(res)) {
442 res->tv_sec = 0;
443 res->tv_nsec = ns;
444 }
445 return true;
446}
447
448static
449int __cvdso_clock_getres_data(const struct vdso_time_data *vd, clockid_t clock,
450 struct __kernel_timespec *res)
451{
452 bool ok;
453
454 ok = __cvdso_clock_getres_common(vd, clock, res);
455
456 if (unlikely(!ok))
457 return clock_getres_fallback(clock, res);
458 return 0;
459}
460
461static __maybe_unused
462int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
463{
464 return __cvdso_clock_getres_data(__arch_get_vdso_u_time_data(), clock, res);
465}
466
467#ifdef BUILD_VDSO32
468static int
469__cvdso_clock_getres_time32_data(const struct vdso_time_data *vd, clockid_t clock,
470 struct old_timespec32 *res)
471{
472 struct __kernel_timespec ts;
473 bool ok;
474
475 ok = __cvdso_clock_getres_common(vd, clock, &ts);
476
477 if (unlikely(!ok))
478 return clock_getres32_fallback(clock, res);
479
480 if (likely(res)) {
481 res->tv_sec = ts.tv_sec;
482 res->tv_nsec = ts.tv_nsec;
483 }
484 return 0;
485}
486
487static __maybe_unused int
488__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
489{
490 return __cvdso_clock_getres_time32_data(__arch_get_vdso_u_time_data(),
491 clock, res);
492}
493#endif /* BUILD_VDSO32 */
494#endif /* VDSO_HAS_CLOCK_GETRES */