Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'timers-core-2025-01-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer and timekeeping updates from Thomas Gleixner:

- Just boring cleanups, typo and comment fixes and trivial optimizations

* tag 'timers-core-2025-01-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
timers/migration: Simplify top level detection on group setup
timers: Optimize get_timer_[this_]cpu_base()
timekeeping: Remove unused ktime_get_fast_timestamps()
timer/migration: Fix kernel-doc warnings for union tmigr_state
tick/broadcast: Add kernel-doc for function parameters
hrtimers: Update the return type of enqueue_hrtimer()
clocksource/wdtest: Print time values for short udelay(1)
posix-timers: Fix typo in __lock_timer()
vdso: Correct typo in PAGE_SHIFT comment

+38 -121
-15
include/linux/timekeeping.h
··· 264 264 extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); 265 265 266 266 /** 267 - * struct ktime_timestamps - Simultaneous mono/boot/real timestamps 268 - * @mono: Monotonic timestamp 269 - * @boot: Boottime timestamp 270 - * @real: Realtime timestamp 271 - */ 272 - struct ktime_timestamps { 273 - u64 mono; 274 - u64 boot; 275 - u64 real; 276 - }; 277 - 278 - /** 279 267 * struct system_time_snapshot - simultaneous raw/real time capture with 280 268 * counter value 281 269 * @cycles: Clocksource counter value to produce the system times ··· 332 344 * Simultaneously snapshot realtime and monotonic raw clocks 333 345 */ 334 346 extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); 335 - 336 - /* NMI safe mono/boot/realtime timestamps */ 337 - extern void ktime_get_fast_timestamps(struct ktime_timestamps *snap); 338 347 339 348 /* 340 349 * Persistent clock related interfaces
+1 -1
include/vdso/page.h
··· 8 8 * PAGE_SHIFT determines the page size. 9 9 * 10 10 * Note: This definition is required because PAGE_SHIFT is used 11 - * in several places throuout the codebase. 11 + * in several places throughout the codebase. 12 12 */ 13 13 #define PAGE_SHIFT CONFIG_PAGE_SHIFT 14 14
+2 -1
kernel/time/clocksource-wdtest.c
··· 137 137 udelay(1); 138 138 j2 = clocksource_wdtest_ktime.read(&clocksource_wdtest_ktime); 139 139 pr_info("--- tsc-like times: %lu - %lu = %lu.\n", j2, j1, j2 - j1); 140 - WARN_ON_ONCE(time_before(j2, j1 + NSEC_PER_USEC)); 140 + WARN_ONCE(time_before(j2, j1 + NSEC_PER_USEC), 141 + "Expected at least 1000ns, got %lu.\n", j2 - j1); 141 142 142 143 /* Verify tsc-like stability with various numbers of errors injected. */ 143 144 max_retries = clocksource_get_max_watchdog_retry();
+3 -4
kernel/time/hrtimer.c
··· 1067 1067 * The timer is inserted in expiry order. Insertion into the 1068 1068 * red black tree is O(log(n)). Must hold the base lock. 1069 1069 * 1070 - * Returns 1 when the new timer is the leftmost timer in the tree. 1070 + * Returns true when the new timer is the leftmost timer in the tree. 1071 1071 */ 1072 - static int enqueue_hrtimer(struct hrtimer *timer, 1073 - struct hrtimer_clock_base *base, 1074 - enum hrtimer_mode mode) 1072 + static bool enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, 1073 + enum hrtimer_mode mode) 1075 1074 { 1076 1075 debug_activate(timer, mode); 1077 1076 WARN_ON_ONCE(!base->cpu_base->online);
+1 -1
kernel/time/posix-timers.c
··· 538 538 * When the reference count reaches zero, the timer is scheduled 539 539 * for RCU removal after the grace period. 540 540 * 541 - * Holding rcu_read_lock() accross the lookup ensures that 541 + * Holding rcu_read_lock() across the lookup ensures that 542 542 * the timer cannot be freed. 543 543 * 544 544 * The lookup validates locklessly that timr::it_signal ==
+2
kernel/time/tick-broadcast.c
··· 1020 1020 1021 1021 /** 1022 1022 * tick_broadcast_setup_oneshot - setup the broadcast device 1023 + * @bc: the broadcast device 1024 + * @from_periodic: true if called from periodic mode 1023 1025 */ 1024 1026 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc, 1025 1027 bool from_periodic)
+13 -74
kernel/time/timekeeping.c
··· 485 485 } 486 486 EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns); 487 487 488 - static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono) 489 - { 490 - struct tk_read_base *tkr; 491 - u64 basem, baser, delta; 492 - unsigned int seq; 493 - 494 - do { 495 - seq = raw_read_seqcount_latch(&tkf->seq); 496 - tkr = tkf->base + (seq & 0x01); 497 - basem = ktime_to_ns(tkr->base); 498 - baser = ktime_to_ns(tkr->base_real); 499 - delta = timekeeping_get_ns(tkr); 500 - } while (raw_read_seqcount_latch_retry(&tkf->seq, seq)); 501 - 502 - if (mono) 503 - *mono = basem + delta; 504 - return baser + delta; 505 - } 506 - 507 488 /** 508 489 * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime. 509 490 * ··· 492 511 */ 493 512 u64 ktime_get_real_fast_ns(void) 494 513 { 495 - return __ktime_get_real_fast(&tk_fast_mono, NULL); 514 + struct tk_fast *tkf = &tk_fast_mono; 515 + struct tk_read_base *tkr; 516 + u64 baser, delta; 517 + unsigned int seq; 518 + 519 + do { 520 + seq = raw_read_seqcount_latch(&tkf->seq); 521 + tkr = tkf->base + (seq & 0x01); 522 + baser = ktime_to_ns(tkr->base_real); 523 + delta = timekeeping_get_ns(tkr); 524 + } while (raw_read_seqcount_latch_retry(&tkf->seq, seq)); 525 + 526 + return baser + delta; 496 527 } 497 528 EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns); 498 - 499 - /** 500 - * ktime_get_fast_timestamps: - NMI safe timestamps 501 - * @snapshot: Pointer to timestamp storage 502 - * 503 - * Stores clock monotonic, boottime and realtime timestamps. 504 - * 505 - * Boot time is a racy access on 32bit systems if the sleep time injection 506 - * happens late during resume and not in timekeeping_resume(). That could 507 - * be avoided by expanding struct tk_read_base with boot offset for 32bit 508 - * and adding more overhead to the update. As this is a hard to observe 509 - * once per resume event which can be filtered with reasonable effort using 510 - * the accurate mono/real timestamps, it's probably not worth the trouble. 511 - * 512 - * Aside of that it might be possible on 32 and 64 bit to observe the 513 - * following when the sleep time injection happens late: 514 - * 515 - * CPU 0 CPU 1 516 - * timekeeping_resume() 517 - * ktime_get_fast_timestamps() 518 - * mono, real = __ktime_get_real_fast() 519 - * inject_sleep_time() 520 - * update boot offset 521 - * boot = mono + bootoffset; 522 - * 523 - * That means that boot time already has the sleep time adjustment, but 524 - * real time does not. On the next readout both are in sync again. 525 - * 526 - * Preventing this for 64bit is not really feasible without destroying the 527 - * careful cache layout of the timekeeper because the sequence count and 528 - * struct tk_read_base would then need two cache lines instead of one. 529 - * 530 - * Access to the time keeper clock source is disabled across the innermost 531 - * steps of suspend/resume. The accessors still work, but the timestamps 532 - * are frozen until time keeping is resumed which happens very early. 533 - * 534 - * For regular suspend/resume there is no observable difference vs. sched 535 - * clock, but it might affect some of the nasty low level debug printks. 536 - * 537 - * OTOH, access to sched clock is not guaranteed across suspend/resume on 538 - * all systems either so it depends on the hardware in use. 539 - * 540 - * If that turns out to be a real problem then this could be mitigated by 541 - * using sched clock in a similar way as during early boot. But it's not as 542 - * trivial as on early boot because it needs some careful protection 543 - * against the clock monotonic timestamp jumping backwards on resume. 544 - */ 545 - void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot) 546 - { 547 - struct timekeeper *tk = &tk_core.timekeeper; 548 - 549 - snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono); 550 - snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot)); 551 - } 552 529 553 530 /** 554 531 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
+6 -10
kernel/time/timer.c
··· 956 956 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) 957 957 { 958 958 int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL; 959 - struct timer_base *base; 960 - 961 - base = per_cpu_ptr(&timer_bases[index], cpu); 962 959 963 960 /* 964 961 * If the timer is deferrable and NO_HZ_COMMON is set then we need 965 962 * to use the deferrable base. 966 963 */ 967 964 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 968 - base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); 969 - return base; 965 + index = BASE_DEF; 966 + 967 + return per_cpu_ptr(&timer_bases[index], cpu); 970 968 } 971 969 972 970 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) 973 971 { 974 972 int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL; 975 - struct timer_base *base; 976 - 977 - base = this_cpu_ptr(&timer_bases[index]); 978 973 979 974 /* 980 975 * If the timer is deferrable and NO_HZ_COMMON is set then we need 981 976 * to use the deferrable base. 982 977 */ 983 978 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 984 - base = this_cpu_ptr(&timer_bases[BASE_DEF]); 985 - return base; 979 + index = BASE_DEF; 980 + 981 + return this_cpu_ptr(&timer_bases[index]); 986 982 } 987 983 988 984 static inline struct timer_base *get_timer_base(u32 tflags)
+1 -3
kernel/time/timer_migration.c
··· 1670 1670 * be different from tmigr_hierarchy_levels, contains only a 1671 1671 * single group. 1672 1672 */ 1673 - if (group->parent || i == tmigr_hierarchy_levels || 1674 - (list_empty(&tmigr_level_list[i]) && 1675 - list_is_singular(&tmigr_level_list[i - 1]))) 1673 + if (group->parent || list_is_singular(&tmigr_level_list[i - 1])) 1676 1674 break; 1677 1675 1678 1676 } while (i < tmigr_hierarchy_levels);
+9 -12
kernel/time/timer_migration.h
··· 110 110 * union tmigr_state - state of tmigr_group 111 111 * @state: Combined version of the state - only used for atomic 112 112 * read/cmpxchg function 113 - * @struct: Split version of the state - only use the struct members to 113 + * &anon struct: Split version of the state - only use the struct members to 114 114 * update information to stay independent of endianness 115 + * @active: Contains each mask bit of the active children 116 + * @migrator: Contains mask of the child which is migrator 117 + * @seq: Sequence counter needs to be increased when an update 118 + * to the tmigr_state is done. It prevents a race when 119 + * updates in the child groups are propagated in changed 120 + * order. Detailed information about the scenario is 121 + * given in the documentation at the begin of 122 + * timer_migration.c. 115 123 */ 116 124 union tmigr_state { 117 125 u32 state; 118 - /** 119 - * struct - split state of tmigr_group 120 - * @active: Contains each mask bit of the active children 121 - * @migrator: Contains mask of the child which is migrator 122 - * @seq: Sequence counter needs to be increased when an update 123 - * to the tmigr_state is done. It prevents a race when 124 - * updates in the child groups are propagated in changed 125 - * order. Detailed information about the scenario is 126 - * given in the documentation at the begin of 127 - * timer_migration.c. 128 - */ 129 126 struct { 130 127 u8 active; 131 128 u8 migrator;