Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'fortglx/4.19/time-part2' of https://git.linaro.org/people/john.stultz/linux into timers/core

Pull the second set of timekeeping things for 4.19 from John Stultz

* NTP argument clenaups and constification from Ondrej Mosnacek
* Fix to avoid RTC injecting sleeptime when suspend fails from
Mukesh Ojha
* Broading suspsend-timing to include non-stop clocksources that
aren't currently used for timekeeping from Baolin Wang

+218 -44
+3
include/linux/clocksource.h
··· 194 194 extern void clocksource_resume(void); 195 195 extern struct clocksource * __init clocksource_default_clock(void); 196 196 extern void clocksource_mark_unstable(struct clocksource *cs); 197 + extern void 198 + clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles); 199 + extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now); 197 200 198 201 extern u64 199 202 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
+1 -1
include/linux/timekeeping.h
··· 177 177 extern bool timekeeping_rtc_skipsuspend(void); 178 178 extern bool timekeeping_rtc_skipresume(void); 179 179 180 - extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); 180 + extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); 181 181 182 182 /* 183 183 * struct system_time_snapshot - simultaneous raw/real time capture with
+149
kernel/time/clocksource.c
··· 94 94 /*[Clocksource internal variables]--------- 95 95 * curr_clocksource: 96 96 * currently selected clocksource. 97 + * suspend_clocksource: 98 + * used to calculate the suspend time. 97 99 * clocksource_list: 98 100 * linked list with the registered clocksources 99 101 * clocksource_mutex: ··· 104 102 * Name of the user-specified clocksource. 105 103 */ 106 104 static struct clocksource *curr_clocksource; 105 + static struct clocksource *suspend_clocksource; 107 106 static LIST_HEAD(clocksource_list); 108 107 static DEFINE_MUTEX(clocksource_mutex); 109 108 static char override_name[CS_NAME_LEN]; 110 109 static int finished_booting; 110 + static u64 suspend_start; 111 111 112 112 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG 113 113 static void clocksource_watchdog_work(struct work_struct *work); ··· 450 446 static inline void clocksource_watchdog_unlock(unsigned long *flags) { } 451 447 452 448 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 449 + 450 + static bool clocksource_is_suspend(struct clocksource *cs) 451 + { 452 + return cs == suspend_clocksource; 453 + } 454 + 455 + static void __clocksource_suspend_select(struct clocksource *cs) 456 + { 457 + /* 458 + * Skip the clocksource which will be stopped in suspend state. 459 + */ 460 + if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) 461 + return; 462 + 463 + /* 464 + * The nonstop clocksource can be selected as the suspend clocksource to 465 + * calculate the suspend time, so it should not supply suspend/resume 466 + * interfaces to suspend the nonstop clocksource when system suspends. 467 + */ 468 + if (cs->suspend || cs->resume) { 469 + pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n", 470 + cs->name); 471 + } 472 + 473 + /* Pick the best rating. */ 474 + if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) 475 + suspend_clocksource = cs; 476 + } 477 + 478 + /** 479 + * clocksource_suspend_select - Select the best clocksource for suspend timing 480 + * @fallback: if select a fallback clocksource 481 + */ 482 + static void clocksource_suspend_select(bool fallback) 483 + { 484 + struct clocksource *cs, *old_suspend; 485 + 486 + old_suspend = suspend_clocksource; 487 + if (fallback) 488 + suspend_clocksource = NULL; 489 + 490 + list_for_each_entry(cs, &clocksource_list, list) { 491 + /* Skip current if we were requested for a fallback. */ 492 + if (fallback && cs == old_suspend) 493 + continue; 494 + 495 + __clocksource_suspend_select(cs); 496 + } 497 + } 498 + 499 + /** 500 + * clocksource_start_suspend_timing - Start measuring the suspend timing 501 + * @cs: current clocksource from timekeeping 502 + * @start_cycles: current cycles from timekeeping 503 + * 504 + * This function will save the start cycle values of suspend timer to calculate 505 + * the suspend time when resuming system. 506 + * 507 + * This function is called late in the suspend process from timekeeping_suspend(), 508 + * that means processes are freezed, non-boot cpus and interrupts are disabled 509 + * now. It is therefore possible to start the suspend timer without taking the 510 + * clocksource mutex. 511 + */ 512 + void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) 513 + { 514 + if (!suspend_clocksource) 515 + return; 516 + 517 + /* 518 + * If current clocksource is the suspend timer, we should use the 519 + * tkr_mono.cycle_last value as suspend_start to avoid same reading 520 + * from suspend timer. 521 + */ 522 + if (clocksource_is_suspend(cs)) { 523 + suspend_start = start_cycles; 524 + return; 525 + } 526 + 527 + if (suspend_clocksource->enable && 528 + suspend_clocksource->enable(suspend_clocksource)) { 529 + pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); 530 + return; 531 + } 532 + 533 + suspend_start = suspend_clocksource->read(suspend_clocksource); 534 + } 535 + 536 + /** 537 + * clocksource_stop_suspend_timing - Stop measuring the suspend timing 538 + * @cs: current clocksource from timekeeping 539 + * @cycle_now: current cycles from timekeeping 540 + * 541 + * This function will calculate the suspend time from suspend timer. 542 + * 543 + * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. 544 + * 545 + * This function is called early in the resume process from timekeeping_resume(), 546 + * that means there is only one cpu, no processes are running and the interrupts 547 + * are disabled. It is therefore possible to stop the suspend timer without 548 + * taking the clocksource mutex. 549 + */ 550 + u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) 551 + { 552 + u64 now, delta, nsec = 0; 553 + 554 + if (!suspend_clocksource) 555 + return 0; 556 + 557 + /* 558 + * If current clocksource is the suspend timer, we should use the 559 + * tkr_mono.cycle_last value from timekeeping as current cycle to 560 + * avoid same reading from suspend timer. 561 + */ 562 + if (clocksource_is_suspend(cs)) 563 + now = cycle_now; 564 + else 565 + now = suspend_clocksource->read(suspend_clocksource); 566 + 567 + if (now > suspend_start) { 568 + delta = clocksource_delta(now, suspend_start, 569 + suspend_clocksource->mask); 570 + nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, 571 + suspend_clocksource->shift); 572 + } 573 + 574 + /* 575 + * Disable the suspend timer to save power if current clocksource is 576 + * not the suspend timer. 577 + */ 578 + if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) 579 + suspend_clocksource->disable(suspend_clocksource); 580 + 581 + return nsec; 582 + } 453 583 454 584 /** 455 585 * clocksource_suspend - suspend the clocksource(s) ··· 930 792 931 793 clocksource_select(); 932 794 clocksource_select_watchdog(false); 795 + __clocksource_suspend_select(cs); 933 796 mutex_unlock(&clocksource_mutex); 934 797 return 0; 935 798 } ··· 959 820 960 821 clocksource_select(); 961 822 clocksource_select_watchdog(false); 823 + clocksource_suspend_select(false); 962 824 mutex_unlock(&clocksource_mutex); 963 825 } 964 826 EXPORT_SYMBOL(clocksource_change_rating); ··· 983 843 clocksource_select_fallback(); 984 844 if (curr_clocksource == cs) 985 845 return -EBUSY; 846 + } 847 + 848 + if (clocksource_is_suspend(cs)) { 849 + /* 850 + * Select and try to install a replacement suspend clocksource. 851 + * If no replacement suspend clocksource, we will just let the 852 + * clocksource go and have no suspend clocksource. 853 + */ 854 + clocksource_suspend_select(true); 986 855 } 987 856 988 857 clocksource_watchdog_lock(&flags);
+7 -10
kernel/time/ntp.c
··· 642 642 /* 643 643 * Propagate a new txc->status value into the NTP state: 644 644 */ 645 - static inline void process_adj_status(struct timex *txc, struct timespec64 *ts) 645 + static inline void process_adj_status(const struct timex *txc) 646 646 { 647 647 if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { 648 648 time_state = TIME_OK; ··· 665 665 } 666 666 667 667 668 - static inline void process_adjtimex_modes(struct timex *txc, 669 - struct timespec64 *ts, 670 - s32 *time_tai) 668 + static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai) 671 669 { 672 670 if (txc->modes & ADJ_STATUS) 673 - process_adj_status(txc, ts); 671 + process_adj_status(txc); 674 672 675 673 if (txc->modes & ADJ_NANO) 676 674 time_status |= STA_NANO; ··· 716 718 * adjtimex mainly allows reading (and writing, if superuser) of 717 719 * kernel time-keeping variables. used by xntpd. 718 720 */ 719 - int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai) 721 + int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai) 720 722 { 721 723 int result; 722 724 ··· 733 735 734 736 /* If there are input parameters, then process them: */ 735 737 if (txc->modes) 736 - process_adjtimex_modes(txc, ts, time_tai); 738 + process_adjtimex_modes(txc, time_tai); 737 739 738 740 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, 739 741 NTP_SCALE_SHIFT); ··· 1020 1022 1021 1023 static int __init ntp_tick_adj_setup(char *str) 1022 1024 { 1023 - int rc = kstrtol(str, 0, (long *)&ntp_tick_adj); 1024 - 1025 + int rc = kstrtos64(str, 0, &ntp_tick_adj); 1025 1026 if (rc) 1026 1027 return rc; 1027 - ntp_tick_adj <<= NTP_SCALE_SHIFT; 1028 1028 1029 + ntp_tick_adj <<= NTP_SCALE_SHIFT; 1029 1030 return 1; 1030 1031 } 1031 1032
+2 -2
kernel/time/ntp_internal.h
··· 8 8 extern u64 ntp_tick_length(void); 9 9 extern ktime_t ntp_get_next_leap(void); 10 10 extern int second_overflow(time64_t secs); 11 - extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); 12 - extern void __hardpps(const struct timespec64 *, const struct timespec64 *); 11 + extern int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai); 12 + extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts); 13 13 #endif /* _LINUX_NTP_INTERNAL_H */
+54 -29
kernel/time/timekeeping.c
··· 105 105 } 106 106 } 107 107 108 - static inline struct timespec64 tk_xtime(struct timekeeper *tk) 108 + static inline struct timespec64 tk_xtime(const struct timekeeper *tk) 109 109 { 110 110 struct timespec64 ts; 111 111 ··· 162 162 * a read of the fast-timekeeper tkrs (which is protected by its own locking 163 163 * and update logic). 164 164 */ 165 - static inline u64 tk_clock_read(struct tk_read_base *tkr) 165 + static inline u64 tk_clock_read(const struct tk_read_base *tkr) 166 166 { 167 167 struct clocksource *clock = READ_ONCE(tkr->clock); 168 168 ··· 211 211 } 212 212 } 213 213 214 - static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) 214 + static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr) 215 215 { 216 216 struct timekeeper *tk = &tk_core.timekeeper; 217 217 u64 now, last, mask, max, delta; ··· 255 255 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset) 256 256 { 257 257 } 258 - static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) 258 + static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr) 259 259 { 260 260 u64 cycle_now, delta; 261 261 ··· 352 352 static inline u32 arch_gettimeoffset(void) { return 0; } 353 353 #endif 354 354 355 - static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta) 355 + static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta) 356 356 { 357 357 u64 nsec; 358 358 ··· 363 363 return nsec + arch_gettimeoffset(); 364 364 } 365 365 366 - static inline u64 timekeeping_get_ns(struct tk_read_base *tkr) 366 + static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr) 367 367 { 368 368 u64 delta; 369 369 ··· 371 371 return timekeeping_delta_to_ns(tkr, delta); 372 372 } 373 373 374 - static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles) 374 + static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles) 375 375 { 376 376 u64 delta; 377 377 ··· 394 394 * slightly wrong timestamp (a few nanoseconds). See 395 395 * @ktime_get_mono_fast_ns. 396 396 */ 397 - static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf) 397 + static void update_fast_timekeeper(const struct tk_read_base *tkr, 398 + struct tk_fast *tkf) 398 399 { 399 400 struct tk_read_base *base = tkf->base; 400 401 ··· 550 549 * number of cycles every time until timekeeping is resumed at which time the 551 550 * proper readout base for the fast timekeeper will be restored automatically. 552 551 */ 553 - static void halt_fast_timekeeper(struct timekeeper *tk) 552 + static void halt_fast_timekeeper(const struct timekeeper *tk) 554 553 { 555 554 static struct tk_read_base tkr_dummy; 556 - struct tk_read_base *tkr = &tk->tkr_mono; 555 + const struct tk_read_base *tkr = &tk->tkr_mono; 557 556 558 557 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); 559 558 cycles_at_suspend = tk_clock_read(tkr); ··· 1278 1277 * 1279 1278 * Adds or subtracts an offset value from the current time. 1280 1279 */ 1281 - static int timekeeping_inject_offset(struct timespec64 *ts) 1280 + static int timekeeping_inject_offset(const struct timespec64 *ts) 1282 1281 { 1283 1282 struct timekeeper *tk = &tk_core.timekeeper; 1284 1283 unsigned long flags; ··· 1519 1518 ts->tv_nsec = 0; 1520 1519 } 1521 1520 1522 - /* Flag for if timekeeping_resume() has injected sleeptime */ 1523 - static bool sleeptime_injected; 1521 + /* 1522 + * Flag reflecting whether timekeeping_resume() has injected sleeptime. 1523 + * 1524 + * The flag starts of false and is only set when a suspend reaches 1525 + * timekeeping_suspend(), timekeeping_resume() sets it to false when the 1526 + * timekeeper clocksource is not stopping across suspend and has been 1527 + * used to update sleep time. If the timekeeper clocksource has stopped 1528 + * then the flag stays true and is used by the RTC resume code to decide 1529 + * whether sleeptime must be injected and if so the flag gets false then. 1530 + * 1531 + * If a suspend fails before reaching timekeeping_resume() then the flag 1532 + * stays false and prevents erroneous sleeptime injection. 1533 + */ 1534 + static bool suspend_timing_needed; 1524 1535 1525 1536 /* Flag for if there is a persistent clock on this platform */ 1526 1537 static bool persistent_clock_exists; ··· 1598 1585 * adds the sleep offset to the timekeeping variables. 1599 1586 */ 1600 1587 static void __timekeeping_inject_sleeptime(struct timekeeper *tk, 1601 - struct timespec64 *delta) 1588 + const struct timespec64 *delta) 1602 1589 { 1603 1590 if (!timespec64_valid_strict(delta)) { 1604 1591 printk_deferred(KERN_WARNING ··· 1631 1618 */ 1632 1619 bool timekeeping_rtc_skipresume(void) 1633 1620 { 1634 - return sleeptime_injected; 1621 + return !suspend_timing_needed; 1635 1622 } 1636 1623 1637 1624 /** ··· 1659 1646 * This function should only be called by rtc_resume(), and allows 1660 1647 * a suspend offset to be injected into the timekeeping values. 1661 1648 */ 1662 - void timekeeping_inject_sleeptime64(struct timespec64 *delta) 1649 + void timekeeping_inject_sleeptime64(const struct timespec64 *delta) 1663 1650 { 1664 1651 struct timekeeper *tk = &tk_core.timekeeper; 1665 1652 unsigned long flags; 1666 1653 1667 1654 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1668 1655 write_seqcount_begin(&tk_core.seq); 1656 + 1657 + suspend_timing_needed = false; 1669 1658 1670 1659 timekeeping_forward_now(tk); 1671 1660 ··· 1692 1677 struct clocksource *clock = tk->tkr_mono.clock; 1693 1678 unsigned long flags; 1694 1679 struct timespec64 ts_new, ts_delta; 1695 - u64 cycle_now; 1680 + u64 cycle_now, nsec; 1681 + bool inject_sleeptime = false; 1696 1682 1697 - sleeptime_injected = false; 1698 1683 read_persistent_clock64(&ts_new); 1699 1684 1700 1685 clockevents_resume(); ··· 1716 1701 * usable source. The rtc part is handled separately in rtc core code. 1717 1702 */ 1718 1703 cycle_now = tk_clock_read(&tk->tkr_mono); 1719 - if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && 1720 - cycle_now > tk->tkr_mono.cycle_last) { 1721 - u64 nsec, cyc_delta; 1722 - 1723 - cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, 1724 - tk->tkr_mono.mask); 1725 - nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift); 1704 + nsec = clocksource_stop_suspend_timing(clock, cycle_now); 1705 + if (nsec > 0) { 1726 1706 ts_delta = ns_to_timespec64(nsec); 1727 - sleeptime_injected = true; 1707 + inject_sleeptime = true; 1728 1708 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) { 1729 1709 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time); 1730 - sleeptime_injected = true; 1710 + inject_sleeptime = true; 1731 1711 } 1732 1712 1733 - if (sleeptime_injected) 1713 + if (inject_sleeptime) { 1714 + suspend_timing_needed = false; 1734 1715 __timekeeping_inject_sleeptime(tk, &ts_delta); 1716 + } 1735 1717 1736 1718 /* Re-base the last cycle value */ 1737 1719 tk->tkr_mono.cycle_last = cycle_now; ··· 1752 1740 unsigned long flags; 1753 1741 struct timespec64 delta, delta_delta; 1754 1742 static struct timespec64 old_delta; 1743 + struct clocksource *curr_clock; 1744 + u64 cycle_now; 1755 1745 1756 1746 read_persistent_clock64(&timekeeping_suspend_time); 1757 1747 ··· 1765 1751 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec) 1766 1752 persistent_clock_exists = true; 1767 1753 1754 + suspend_timing_needed = true; 1755 + 1768 1756 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1769 1757 write_seqcount_begin(&tk_core.seq); 1770 1758 timekeeping_forward_now(tk); 1771 1759 timekeeping_suspended = 1; 1760 + 1761 + /* 1762 + * Since we've called forward_now, cycle_last stores the value 1763 + * just read from the current clocksource. Save this to potentially 1764 + * use in suspend timing. 1765 + */ 1766 + curr_clock = tk->tkr_mono.clock; 1767 + cycle_now = tk->tkr_mono.cycle_last; 1768 + clocksource_start_suspend_timing(curr_clock, cycle_now); 1772 1769 1773 1770 if (persistent_clock_exists) { 1774 1771 /* ··· 2265 2240 /** 2266 2241 * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex 2267 2242 */ 2268 - static int timekeeping_validate_timex(struct timex *txc) 2243 + static int timekeeping_validate_timex(const struct timex *txc) 2269 2244 { 2270 2245 if (txc->modes & ADJ_ADJTIME) { 2271 2246 /* singleshot must not be used with any other mode bits */
+1 -1
kernel/time/timekeeping_debug.c
··· 70 70 } 71 71 late_initcall(tk_debug_sleep_time_init); 72 72 73 - void tk_debug_account_sleep_time(struct timespec64 *t) 73 + void tk_debug_account_sleep_time(const struct timespec64 *t) 74 74 { 75 75 /* Cap bin index so we don't overflow the array */ 76 76 int bin = min(fls(t->tv_sec), NUM_BINS-1);
+1 -1
kernel/time/timekeeping_internal.h
··· 8 8 #include <linux/time.h> 9 9 10 10 #ifdef CONFIG_DEBUG_FS 11 - extern void tk_debug_account_sleep_time(struct timespec64 *t); 11 + extern void tk_debug_account_sleep_time(const struct timespec64 *t); 12 12 #else 13 13 #define tk_debug_account_sleep_time(x) 14 14 #endif