Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Thomas Gleixner:
"The timer departement provides:

- More y2038 work in the area of ntp and pps.

- Optimization of posix cpu timers

- New time related selftests

- Some new clocksource drivers

- The usual pile of fixes, cleanups and improvements"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
timeconst: Update path in comment
timers/x86/hpet: Type adjustments
clocksource/drivers/armada-370-xp: Implement ARM delay timer
clocksource/drivers/tango_xtal: Add new timer for Tango SoCs
clocksource/drivers/imx: Allow timer irq affinity change
clocksource/drivers/exynos_mct: Use container_of() instead of this_cpu_ptr()
clocksource/drivers/h8300_*: Remove unneeded memset()s
clocksource/drivers/sh_cmt: Remove unneeded memset() in sh_cmt_setup()
clocksource/drivers/em_sti: Remove unneeded memset()s
clocksource/drivers/mediatek: Use GPT as sched clock source
clockevents/drivers/mtk: Fix spurious interrupt leading to crash
posix_cpu_timer: Reduce unnecessary sighand lock contention
posix_cpu_timer: Convert cputimer->running to bool
posix_cpu_timer: Check thread timers only when there are active thread timers
posix_cpu_timer: Optimize fastpath_timer_check()
timers, kselftest: Add 'adjtick' test to validate adjtimex() tick adjustments
timers: Use __fls in apply_slack()
clocksource: Remove return statement from void functions
net: sfc: avoid using timespec
ntp/pps: use y2038 safe types in pps_event_time
...

+468 -120
+3 -3
arch/x86/include/asm/hpet.h
··· 63 63 /* hpet memory map physical address */ 64 64 extern unsigned long hpet_address; 65 65 extern unsigned long force_hpet_address; 66 - extern int boot_hpet_disable; 66 + extern bool boot_hpet_disable; 67 67 extern u8 hpet_blockid; 68 - extern int hpet_force_user; 69 - extern u8 hpet_msi_disable; 68 + extern bool hpet_force_user; 69 + extern bool hpet_msi_disable; 70 70 extern int is_hpet_enabled(void); 71 71 extern int hpet_enable(void); 72 72 extern void hpet_disable(void);
+1 -1
arch/x86/kernel/early-quirks.c
··· 584 584 static void __init force_disable_hpet(int num, int slot, int func) 585 585 { 586 586 #ifdef CONFIG_HPET_TIMER 587 - boot_hpet_disable = 1; 587 + boot_hpet_disable = true; 588 588 pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); 589 589 #endif 590 590 }
+14 -15
arch/x86/kernel/hpet.c
··· 37 37 */ 38 38 unsigned long hpet_address; 39 39 u8 hpet_blockid; /* OS timer block num */ 40 - u8 hpet_msi_disable; 40 + bool hpet_msi_disable; 41 41 42 42 #ifdef CONFIG_PCI_MSI 43 - static unsigned long hpet_num_timers; 43 + static unsigned int hpet_num_timers; 44 44 #endif 45 45 static void __iomem *hpet_virt_address; 46 46 ··· 86 86 /* 87 87 * HPET command line enable / disable 88 88 */ 89 - int boot_hpet_disable; 90 - int hpet_force_user; 91 - static int hpet_verbose; 89 + bool boot_hpet_disable; 90 + bool hpet_force_user; 91 + static bool hpet_verbose; 92 92 93 93 static int __init hpet_setup(char *str) 94 94 { ··· 98 98 if (next) 99 99 *next++ = 0; 100 100 if (!strncmp("disable", str, 7)) 101 - boot_hpet_disable = 1; 101 + boot_hpet_disable = true; 102 102 if (!strncmp("force", str, 5)) 103 - hpet_force_user = 1; 103 + hpet_force_user = true; 104 104 if (!strncmp("verbose", str, 7)) 105 - hpet_verbose = 1; 105 + hpet_verbose = true; 106 106 str = next; 107 107 } 108 108 return 1; ··· 111 111 112 112 static int __init disable_hpet(char *str) 113 113 { 114 - boot_hpet_disable = 1; 114 + boot_hpet_disable = true; 115 115 return 1; 116 116 } 117 117 __setup("nohpet", disable_hpet); ··· 124 124 /* 125 125 * HPET timer interrupt enable / disable 126 126 */ 127 - static int hpet_legacy_int_enabled; 127 + static bool hpet_legacy_int_enabled; 128 128 129 129 /** 130 130 * is_hpet_enabled - check whether the hpet timer interrupt is enabled ··· 230 230 231 231 static void hpet_stop_counter(void) 232 232 { 233 - unsigned long cfg = hpet_readl(HPET_CFG); 233 + u32 cfg = hpet_readl(HPET_CFG); 234 234 cfg &= ~HPET_CFG_ENABLE; 235 235 hpet_writel(cfg, HPET_CFG); 236 236 } ··· 272 272 273 273 cfg |= HPET_CFG_LEGACY; 274 274 hpet_writel(cfg, HPET_CFG); 275 - hpet_legacy_int_enabled = 1; 275 + hpet_legacy_int_enabled = true; 276 276 } 277 277 278 278 static void hpet_legacy_clockevent_register(void) ··· 983 983 cfg = *hpet_boot_cfg; 984 984 else if (hpet_legacy_int_enabled) { 985 985 cfg &= ~HPET_CFG_LEGACY; 986 - hpet_legacy_int_enabled = 0; 986 + hpet_legacy_int_enabled = false; 987 987 } 988 988 cfg &= ~HPET_CFG_ENABLE; 989 989 hpet_writel(cfg, HPET_CFG); ··· 1121 1121 1122 1122 static void hpet_disable_rtc_channel(void) 1123 1123 { 1124 - unsigned long cfg; 1125 - cfg = hpet_readl(HPET_T1_CFG); 1124 + u32 cfg = hpet_readl(HPET_T1_CFG); 1126 1125 cfg &= ~HPET_TN_ENABLE; 1127 1126 hpet_writel(cfg, HPET_T1_CFG); 1128 1127 }
+1 -1
arch/x86/kernel/quirks.c
··· 524 524 */ 525 525 static void force_disable_hpet_msi(struct pci_dev *unused) 526 526 { 527 - hpet_msi_disable = 1; 527 + hpet_msi_disable = true; 528 528 } 529 529 530 530 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
+4
drivers/clocksource/Kconfig
··· 279 279 depends on MIPS_GIC 280 280 select CLKSRC_OF 281 281 282 + config CLKSRC_TANGO_XTAL 283 + bool 284 + select CLKSRC_OF 285 + 282 286 config CLKSRC_PXA 283 287 def_bool y if ARCH_PXA || ARCH_SA1100 284 288 select CLKSRC_OF if OF
+1
drivers/clocksource/Makefile
··· 56 56 obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o 57 57 obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o 58 58 obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o 59 + obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o 59 60 obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o 60 61 obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o 61 62 obj-$(CONFIG_H8300) += h8300_timer8.o
-2
drivers/clocksource/em_sti.c
··· 228 228 { 229 229 struct clocksource *cs = &p->cs; 230 230 231 - memset(cs, 0, sizeof(*cs)); 232 231 cs->name = dev_name(&p->pdev->dev); 233 232 cs->rating = 200; 234 233 cs->read = em_sti_clocksource_read; ··· 284 285 { 285 286 struct clock_event_device *ced = &p->ced; 286 287 287 - memset(ced, 0, sizeof(*ced)); 288 288 ced->name = dev_name(&p->pdev->dev); 289 289 ced->features = CLOCK_EVT_FEAT_ONESHOT; 290 290 ced->rating = 200;
+8 -4
drivers/clocksource/exynos_mct.c
··· 382 382 static int exynos4_tick_set_next_event(unsigned long cycles, 383 383 struct clock_event_device *evt) 384 384 { 385 - struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); 385 + struct mct_clock_event_device *mevt; 386 386 387 + mevt = container_of(evt, struct mct_clock_event_device, evt); 387 388 exynos4_mct_tick_start(cycles, mevt); 388 - 389 389 return 0; 390 390 } 391 391 392 392 static int set_state_shutdown(struct clock_event_device *evt) 393 393 { 394 - exynos4_mct_tick_stop(this_cpu_ptr(&percpu_mct_tick)); 394 + struct mct_clock_event_device *mevt; 395 + 396 + mevt = container_of(evt, struct mct_clock_event_device, evt); 397 + exynos4_mct_tick_stop(mevt); 395 398 return 0; 396 399 } 397 400 398 401 static int set_state_periodic(struct clock_event_device *evt) 399 402 { 400 - struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); 403 + struct mct_clock_event_device *mevt; 401 404 unsigned long cycles_per_jiffy; 402 405 406 + mevt = container_of(evt, struct mct_clock_event_device, evt); 403 407 cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) 404 408 >> evt->shift); 405 409 exynos4_mct_tick_stop(mevt);
-1
drivers/clocksource/h8300_timer16.c
··· 153 153 int ret, irq; 154 154 unsigned int ch; 155 155 156 - memset(p, 0, sizeof(*p)); 157 156 p->pdev = pdev; 158 157 159 158 res[REG_CH] = platform_get_resource(p->pdev,
-1
drivers/clocksource/h8300_timer8.c
··· 215 215 int irq; 216 216 int ret; 217 217 218 - memset(p, 0, sizeof(*p)); 219 218 p->pdev = pdev; 220 219 221 220 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
-1
drivers/clocksource/h8300_tpu.c
··· 123 123 { 124 124 struct resource *res[2]; 125 125 126 - memset(p, 0, sizeof(*p)); 127 126 p->pdev = pdev; 128 127 129 128 res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L);
+16 -10
drivers/clocksource/mtk_timer.c
··· 24 24 #include <linux/of.h> 25 25 #include <linux/of_address.h> 26 26 #include <linux/of_irq.h> 27 + #include <linux/sched_clock.h> 27 28 #include <linux/slab.h> 28 29 29 30 #define GPT_IRQ_EN_REG 0x00 ··· 59 58 u32 ticks_per_jiffy; 60 59 struct clock_event_device dev; 61 60 }; 61 + 62 + static void __iomem *gpt_sched_reg __read_mostly; 63 + 64 + static u64 notrace mtk_read_sched_clock(void) 65 + { 66 + return readl_relaxed(gpt_sched_reg); 67 + } 62 68 63 69 static inline struct mtk_clock_event_device *to_mtk_clk( 64 70 struct clock_event_device *c) ··· 149 141 return IRQ_HANDLED; 150 142 } 151 143 152 - static void mtk_timer_global_reset(struct mtk_clock_event_device *evt) 153 - { 154 - /* Disable all interrupts */ 155 - writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG); 156 - /* Acknowledge all interrupts */ 157 - writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG); 158 - } 159 - 160 144 static void 161 145 mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option) 162 146 { ··· 167 167 static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer) 168 168 { 169 169 u32 val; 170 + 171 + /* Disable all interrupts */ 172 + writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG); 173 + 174 + /* Acknowledge all spurious pending interrupts */ 175 + writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG); 170 176 171 177 val = readl(evt->gpt_base + GPT_IRQ_EN_REG); 172 178 writel(val | GPT_IRQ_ENABLE(timer), ··· 226 220 } 227 221 rate = clk_get_rate(clk); 228 222 229 - mtk_timer_global_reset(evt); 230 - 231 223 if (request_irq(evt->dev.irq, mtk_timer_interrupt, 232 224 IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { 233 225 pr_warn("failed to setup irq %d\n", evt->dev.irq); ··· 238 234 mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); 239 235 clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), 240 236 node->name, rate, 300, 32, clocksource_mmio_readl_up); 237 + gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC); 238 + sched_clock_register(mtk_read_sched_clock, 32, rate); 241 239 242 240 /* Configure clock event */ 243 241 mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
-1
drivers/clocksource/sh_cmt.c
··· 962 962 unsigned int i; 963 963 int ret; 964 964 965 - memset(cmt, 0, sizeof(*cmt)); 966 965 cmt->pdev = pdev; 967 966 raw_spin_lock_init(&cmt->lock); 968 967
+66
drivers/clocksource/tango_xtal.c
··· 1 + #include <linux/clocksource.h> 2 + #include <linux/sched_clock.h> 3 + #include <linux/of_address.h> 4 + #include <linux/printk.h> 5 + #include <linux/delay.h> 6 + #include <linux/init.h> 7 + #include <linux/clk.h> 8 + 9 + static void __iomem *xtal_in_cnt; 10 + static struct delay_timer delay_timer; 11 + 12 + static unsigned long notrace read_xtal_counter(void) 13 + { 14 + return readl_relaxed(xtal_in_cnt); 15 + } 16 + 17 + static u64 notrace read_sched_clock(void) 18 + { 19 + return read_xtal_counter(); 20 + } 21 + 22 + static cycle_t read_clocksource(struct clocksource *cs) 23 + { 24 + return read_xtal_counter(); 25 + } 26 + 27 + static struct clocksource tango_xtal = { 28 + .name = "tango-xtal", 29 + .rating = 350, 30 + .read = read_clocksource, 31 + .mask = CLOCKSOURCE_MASK(32), 32 + .flags = CLOCK_SOURCE_IS_CONTINUOUS, 33 + }; 34 + 35 + static void __init tango_clocksource_init(struct device_node *np) 36 + { 37 + struct clk *clk; 38 + int xtal_freq, ret; 39 + 40 + xtal_in_cnt = of_iomap(np, 0); 41 + if (xtal_in_cnt == NULL) { 42 + pr_err("%s: invalid address\n", np->full_name); 43 + return; 44 + } 45 + 46 + clk = of_clk_get(np, 0); 47 + if (IS_ERR(clk)) { 48 + pr_err("%s: invalid clock\n", np->full_name); 49 + return; 50 + } 51 + 52 + xtal_freq = clk_get_rate(clk); 53 + delay_timer.freq = xtal_freq; 54 + delay_timer.read_current_timer = read_xtal_counter; 55 + 56 + ret = clocksource_register_hz(&tango_xtal, xtal_freq); 57 + if (ret != 0) { 58 + pr_err("%s: registration failed\n", np->full_name); 59 + return; 60 + } 61 + 62 + sched_clock_register(read_sched_clock, 32, xtal_freq); 63 + register_current_timer_delay(&delay_timer); 64 + } 65 + 66 + CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
+14
drivers/clocksource/time-armada-370-xp.c
··· 45 45 #include <linux/percpu.h> 46 46 #include <linux/syscore_ops.h> 47 47 48 + #include <asm/delay.h> 49 + 48 50 /* 49 51 * Timer block registers. 50 52 */ ··· 251 249 .resume = armada_370_xp_timer_resume, 252 250 }; 253 251 252 + static unsigned long armada_370_delay_timer_read(void) 253 + { 254 + return ~readl(timer_base + TIMER0_VAL_OFF); 255 + } 256 + 257 + static struct delay_timer armada_370_delay_timer = { 258 + .read_current_timer = armada_370_delay_timer_read, 259 + }; 260 + 254 261 static void __init armada_370_xp_timer_common_init(struct device_node *np) 255 262 { 256 263 u32 clr = 0, set = 0; ··· 297 286 atomic_io_modify(timer_base + TIMER_CTRL_OFF, 298 287 TIMER0_RELOAD_EN | enable_mask, 299 288 TIMER0_RELOAD_EN | enable_mask); 289 + 290 + armada_370_delay_timer.freq = timer_clk; 291 + register_current_timer_delay(&armada_370_delay_timer); 300 292 301 293 /* 302 294 * Set scale and timer for sched_clock.
+2 -1
drivers/clocksource/timer-imx-gpt.c
··· 305 305 struct irqaction *act = &imxtm->act; 306 306 307 307 ced->name = "mxc_timer1"; 308 - ced->features = CLOCK_EVT_FEAT_ONESHOT; 308 + ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; 309 309 ced->set_state_shutdown = mxc_shutdown; 310 310 ced->set_state_oneshot = mxc_set_oneshot; 311 311 ced->tick_resume = mxc_shutdown; 312 312 ced->set_next_event = imxtm->gpt->set_next_event; 313 313 ced->rating = 200; 314 314 ced->cpumask = cpumask_of(0); 315 + ced->irq = imxtm->irq; 315 316 clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per), 316 317 0xff, 0xfffffffe); 317 318
+15 -15
drivers/net/ethernet/sfc/ptp.c
··· 401 401 /* For Siena platforms NIC time is s and ns */ 402 402 static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) 403 403 { 404 - struct timespec ts = ns_to_timespec(ns); 405 - *nic_major = ts.tv_sec; 404 + struct timespec64 ts = ns_to_timespec64(ns); 405 + *nic_major = (u32)ts.tv_sec; 406 406 *nic_minor = ts.tv_nsec; 407 407 } 408 408 ··· 431 431 */ 432 432 static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) 433 433 { 434 - struct timespec ts = ns_to_timespec(ns); 435 - u32 maj = ts.tv_sec; 434 + struct timespec64 ts = ns_to_timespec64(ns); 435 + u32 maj = (u32)ts.tv_sec; 436 436 u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + 437 437 (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); 438 438 ··· 646 646 struct pps_event_time *last_time) 647 647 { 648 648 struct pps_event_time now; 649 - struct timespec limit; 649 + struct timespec64 limit; 650 650 struct efx_ptp_data *ptp = efx->ptp_data; 651 - struct timespec start; 651 + struct timespec64 start; 652 652 int *mc_running = ptp->start.addr; 653 653 654 654 pps_get_ts(&now); 655 655 start = now.ts_real; 656 656 limit = now.ts_real; 657 - timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS); 657 + timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS); 658 658 659 659 /* Write host time for specified period or until MC is done */ 660 - while ((timespec_compare(&now.ts_real, &limit) < 0) && 660 + while ((timespec64_compare(&now.ts_real, &limit) < 0) && 661 661 ACCESS_ONCE(*mc_running)) { 662 - struct timespec update_time; 662 + struct timespec64 update_time; 663 663 unsigned int host_time; 664 664 665 665 /* Don't update continuously to avoid saturating the PCIe bus */ 666 666 update_time = now.ts_real; 667 - timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); 667 + timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); 668 668 do { 669 669 pps_get_ts(&now); 670 - } while ((timespec_compare(&now.ts_real, &update_time) < 0) && 670 + } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && 671 671 ACCESS_ONCE(*mc_running)); 672 672 673 673 /* Synchronise NIC with single word of time only */ ··· 723 723 struct efx_ptp_data *ptp = efx->ptp_data; 724 724 u32 last_sec; 725 725 u32 start_sec; 726 - struct timespec delta; 726 + struct timespec64 delta; 727 727 ktime_t mc_time; 728 728 729 729 if (number_readings == 0) ··· 737 737 */ 738 738 for (i = 0; i < number_readings; i++) { 739 739 s32 window, corrected; 740 - struct timespec wait; 740 + struct timespec64 wait; 741 741 742 742 efx_ptp_read_timeset( 743 743 MCDI_ARRAY_STRUCT_PTR(synch_buf, 744 744 PTP_OUT_SYNCHRONIZE_TIMESET, i), 745 745 &ptp->timeset[i]); 746 746 747 - wait = ktime_to_timespec( 747 + wait = ktime_to_timespec64( 748 748 ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); 749 749 window = ptp->timeset[i].window; 750 750 corrected = window - wait.tv_nsec; ··· 803 803 ptp->timeset[last_good].minor, 0); 804 804 805 805 /* Calculate delay from NIC top of second to last_time */ 806 - delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec; 806 + delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec; 807 807 808 808 /* Set PPS timestamp to match NIC top of second */ 809 809 ptp->host_time_pps = *last_time;
+2 -2
drivers/pps/kapi.c
··· 179 179 /* check event type */ 180 180 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); 181 181 182 - dev_dbg(pps->dev, "PPS event at %ld.%09ld\n", 183 - ts->ts_real.tv_sec, ts->ts_real.tv_nsec); 182 + dev_dbg(pps->dev, "PPS event at %lld.%09ld\n", 183 + (s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec); 184 184 185 185 timespec_to_pps_ktime(&ts_real, ts->ts_real); 186 186
+2 -1
include/linux/init_task.h
··· 59 59 .rlim = INIT_RLIMITS, \ 60 60 .cputimer = { \ 61 61 .cputime_atomic = INIT_CPUTIME_ATOMIC, \ 62 - .running = 0, \ 62 + .running = false, \ 63 + .checking_timer = false, \ 63 64 }, \ 64 65 INIT_PREV_CPUTIME(sig) \ 65 66 .cred_guard_mutex = \
+8 -8
include/linux/pps_kernel.h
··· 48 48 49 49 struct pps_event_time { 50 50 #ifdef CONFIG_NTP_PPS 51 - struct timespec ts_raw; 51 + struct timespec64 ts_raw; 52 52 #endif /* CONFIG_NTP_PPS */ 53 - struct timespec ts_real; 53 + struct timespec64 ts_real; 54 54 }; 55 55 56 56 /* The main struct */ ··· 105 105 struct pps_device *pps_lookup_dev(void const *cookie); 106 106 107 107 static inline void timespec_to_pps_ktime(struct pps_ktime *kt, 108 - struct timespec ts) 108 + struct timespec64 ts) 109 109 { 110 110 kt->sec = ts.tv_sec; 111 111 kt->nsec = ts.tv_nsec; ··· 115 115 116 116 static inline void pps_get_ts(struct pps_event_time *ts) 117 117 { 118 - getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); 118 + ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real); 119 119 } 120 120 121 121 #else /* CONFIG_NTP_PPS */ 122 122 123 123 static inline void pps_get_ts(struct pps_event_time *ts) 124 124 { 125 - getnstimeofday(&ts->ts_real); 125 + ktime_get_real_ts64(&ts->ts_real); 126 126 } 127 127 128 128 #endif /* CONFIG_NTP_PPS */ 129 129 130 130 /* Subtract known time delay from PPS event time(s) */ 131 - static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta) 131 + static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) 132 132 { 133 - ts->ts_real = timespec_sub(ts->ts_real, delta); 133 + ts->ts_real = timespec64_sub(ts->ts_real, delta); 134 134 #ifdef CONFIG_NTP_PPS 135 - ts->ts_raw = timespec_sub(ts->ts_raw, delta); 135 + ts->ts_raw = timespec64_sub(ts->ts_raw, delta); 136 136 #endif 137 137 } 138 138
+6 -3
include/linux/sched.h
··· 617 617 /** 618 618 * struct thread_group_cputimer - thread group interval timer counts 619 619 * @cputime_atomic: atomic thread group interval timers. 620 - * @running: non-zero when there are timers running and 621 - * @cputime receives updates. 620 + * @running: true when there are timers running and 621 + * @cputime_atomic receives updates. 622 + * @checking_timer: true when a thread in the group is in the 623 + * process of checking for thread group timers. 622 624 * 623 625 * This structure contains the version of task_cputime, above, that is 624 626 * used for thread group CPU timer calculations. 625 627 */ 626 628 struct thread_group_cputimer { 627 629 struct task_cputime_atomic cputime_atomic; 628 - int running; 630 + bool running; 631 + bool checking_timer; 629 632 }; 630 633 631 634 #include <linux/rwsem.h>
+2 -2
include/linux/timekeeping.h
··· 263 263 /* 264 264 * PPS accessor 265 265 */ 266 - extern void getnstime_raw_and_real(struct timespec *ts_raw, 267 - struct timespec *ts_real); 266 + extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, 267 + struct timespec64 *ts_real); 268 268 269 269 /* 270 270 * Persistent clock related interfaces
+1 -1
include/linux/timex.h
··· 152 152 #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) 153 153 154 154 extern int do_adjtimex(struct timex *); 155 - extern void hardpps(const struct timespec *, const struct timespec *); 155 + extern void hardpps(const struct timespec64 *, const struct timespec64 *); 156 156 157 157 int read_current_timer(unsigned long *timer_val); 158 158 void ntp_notify_cmos_timer(void);
+1 -1
kernel/fork.c
··· 1101 1101 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 1102 1102 if (cpu_limit != RLIM_INFINITY) { 1103 1103 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); 1104 - sig->cputimer.running = 1; 1104 + sig->cputimer.running = true; 1105 1105 } 1106 1106 1107 1107 /* The timer lists. */
+3 -4
kernel/time/clocksource.c
··· 479 479 * return half the number of nanoseconds the hardware counter can technically 480 480 * cover. This is done so that we can potentially detect problems caused by 481 481 * delayed timers or bad hardware, which might result in time intervals that 482 - * are larger then what the math used can handle without overflows. 482 + * are larger than what the math used can handle without overflows. 483 483 */ 484 484 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) 485 485 { ··· 595 595 */ 596 596 static void clocksource_select(void) 597 597 { 598 - return __clocksource_select(false); 598 + __clocksource_select(false); 599 599 } 600 600 601 601 static void clocksource_select_fallback(void) 602 602 { 603 - return __clocksource_select(true); 603 + __clocksource_select(true); 604 604 } 605 605 606 606 #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ 607 - 608 607 static inline void clocksource_select(void) { } 609 608 static inline void clocksource_select_fallback(void) { } 610 609
+1 -1
kernel/time/hrtimer.c
··· 59 59 /* 60 60 * The timer bases: 61 61 * 62 - * There are more clockids then hrtimer bases. Thus, we index 62 + * There are more clockids than hrtimer bases. Thus, we index 63 63 * into the timer bases by the hrtimer_base_type enum. When trying 64 64 * to reach a base using a clockid, hrtimer_clockid_to_base() 65 65 * is used to convert from clockid to the proper hrtimer_base_type.
+8 -8
kernel/time/ntp.c
··· 99 99 static int pps_valid; /* signal watchdog counter */ 100 100 static long pps_tf[3]; /* phase median filter */ 101 101 static long pps_jitter; /* current jitter (ns) */ 102 - static struct timespec pps_fbase; /* beginning of the last freq interval */ 102 + static struct timespec64 pps_fbase; /* beginning of the last freq interval */ 103 103 static int pps_shift; /* current interval duration (s) (shift) */ 104 104 static int pps_intcnt; /* interval counter */ 105 105 static s64 pps_freq; /* frequency offset (scaled ns/s) */ ··· 509 509 static void sync_cmos_clock(struct work_struct *work) 510 510 { 511 511 struct timespec64 now; 512 - struct timespec next; 512 + struct timespec64 next; 513 513 int fail = 1; 514 514 515 515 /* ··· 559 559 next.tv_nsec -= NSEC_PER_SEC; 560 560 } 561 561 queue_delayed_work(system_power_efficient_wq, 562 - &sync_cmos_work, timespec_to_jiffies(&next)); 562 + &sync_cmos_work, timespec64_to_jiffies(&next)); 563 563 } 564 564 565 565 void ntp_notify_cmos_timer(void) ··· 773 773 * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] 774 774 * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */ 775 775 struct pps_normtime { 776 - __kernel_time_t sec; /* seconds */ 776 + s64 sec; /* seconds */ 777 777 long nsec; /* nanoseconds */ 778 778 }; 779 779 780 780 /* normalize the timestamp so that nsec is in the 781 781 ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */ 782 - static inline struct pps_normtime pps_normalize_ts(struct timespec ts) 782 + static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts) 783 783 { 784 784 struct pps_normtime norm = { 785 785 .sec = ts.tv_sec, ··· 861 861 pps_errcnt++; 862 862 pps_dec_freq_interval(); 863 863 printk_deferred(KERN_ERR 864 - "hardpps: PPSERROR: interval too long - %ld s\n", 864 + "hardpps: PPSERROR: interval too long - %lld s\n", 865 865 freq_norm.sec); 866 866 return 0; 867 867 } ··· 948 948 * This code is based on David Mills's reference nanokernel 949 949 * implementation. It was mostly rewritten but keeps the same idea. 950 950 */ 951 - void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) 951 + void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) 952 952 { 953 953 struct pps_normtime pts_norm, freq_norm; 954 954 ··· 969 969 } 970 970 971 971 /* ok, now we have a base for frequency calculation */ 972 - freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase)); 972 + freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase)); 973 973 974 974 /* check that the signal is in the range 975 975 * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
+1 -1
kernel/time/ntp_internal.h
··· 9 9 extern int second_overflow(unsigned long secs); 10 10 extern int ntp_validate_timex(struct timex *); 11 11 extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); 12 - extern void __hardpps(const struct timespec *, const struct timespec *); 12 + extern void __hardpps(const struct timespec64 *, const struct timespec64 *); 13 13 #endif /* _LINUX_NTP_INTERNAL_H */
+45 -18
kernel/time/posix-cpu-timers.c
··· 249 249 * but barriers are not required because update_gt_cputime() 250 250 * can handle concurrent updates. 251 251 */ 252 - WRITE_ONCE(cputimer->running, 1); 252 + WRITE_ONCE(cputimer->running, true); 253 253 } 254 254 sample_cputime_atomic(times, &cputimer->cputime_atomic); 255 255 } ··· 864 864 unsigned long long expires; 865 865 unsigned long soft; 866 866 867 + /* 868 + * If cputime_expires is zero, then there are no active 869 + * per thread CPU timers. 870 + */ 871 + if (task_cputime_zero(&tsk->cputime_expires)) 872 + return; 873 + 867 874 expires = check_timers_list(timers, firing, prof_ticks(tsk)); 868 875 tsk_expires->prof_exp = expires_to_cputime(expires); 869 876 ··· 918 911 struct thread_group_cputimer *cputimer = &sig->cputimer; 919 912 920 913 /* Turn off cputimer->running. This is done without locking. */ 921 - WRITE_ONCE(cputimer->running, 0); 914 + WRITE_ONCE(cputimer->running, false); 922 915 } 923 916 924 917 static u32 onecputick; ··· 967 960 struct list_head *timers = sig->cpu_timers; 968 961 struct task_cputime cputime; 969 962 unsigned long soft; 963 + 964 + /* 965 + * If cputimer is not running, then there are no active 966 + * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). 967 + */ 968 + if (!READ_ONCE(tsk->signal->cputimer.running)) 969 + return; 970 + 971 + /* 972 + * Signify that a thread is checking for process timers. 973 + * Write access to this field is protected by the sighand lock. 974 + */ 975 + sig->cputimer.checking_timer = true; 970 976 971 977 /* 972 978 * Collect the current process totals. ··· 1035 1015 sig->cputime_expires.sched_exp = sched_expires; 1036 1016 if (task_cputime_zero(&sig->cputime_expires)) 1037 1017 stop_process_timers(sig); 1018 + 1019 + sig->cputimer.checking_timer = false; 1038 1020 } 1039 1021 1040 1022 /* ··· 1139 1117 static inline int fastpath_timer_check(struct task_struct *tsk) 1140 1118 { 1141 1119 struct signal_struct *sig; 1142 - cputime_t utime, stime; 1143 - 1144 - task_cputime(tsk, &utime, &stime); 1145 1120 1146 1121 if (!task_cputime_zero(&tsk->cputime_expires)) { 1147 - struct task_cputime task_sample = { 1148 - .utime = utime, 1149 - .stime = stime, 1150 - .sum_exec_runtime = tsk->se.sum_exec_runtime 1151 - }; 1122 + struct task_cputime task_sample; 1152 1123 1124 + task_cputime(tsk, &task_sample.utime, &task_sample.stime); 1125 + task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; 1153 1126 if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) 1154 1127 return 1; 1155 1128 } 1156 1129 1157 1130 sig = tsk->signal; 1158 - /* Check if cputimer is running. This is accessed without locking. */ 1159 - if (READ_ONCE(sig->cputimer.running)) { 1131 + /* 1132 + * Check if thread group timers expired when the cputimer is 1133 + * running and no other thread in the group is already checking 1134 + * for thread group cputimers. These fields are read without the 1135 + * sighand lock. However, this is fine because this is meant to 1136 + * be a fastpath heuristic to determine whether we should try to 1137 + * acquire the sighand lock to check/handle timers. 1138 + * 1139 + * In the worst case scenario, if 'running' or 'checking_timer' gets 1140 + * set but the current thread doesn't see the change yet, we'll wait 1141 + * until the next thread in the group gets a scheduler interrupt to 1142 + * handle the timer. This isn't an issue in practice because these 1143 + * types of delays with signals actually getting sent are expected. 1144 + */ 1145 + if (READ_ONCE(sig->cputimer.running) && 1146 + !READ_ONCE(sig->cputimer.checking_timer)) { 1160 1147 struct task_cputime group_sample; 1161 1148 1162 1149 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); ··· 1205 1174 * put them on the firing list. 1206 1175 */ 1207 1176 check_thread_timers(tsk, &firing); 1208 - /* 1209 - * If there are any active process wide timers (POSIX 1.b, itimers, 1210 - * RLIMIT_CPU) cputimer must be running. 1211 - */ 1212 - if (READ_ONCE(tsk->signal->cputimer.running)) 1213 - check_process_timers(tsk, &firing); 1177 + 1178 + check_process_timers(tsk, &firing); 1214 1179 1215 1180 /* 1216 1181 * We must release these locks before taking any timer's lock.
+1 -1
kernel/time/timeconst.bc
··· 39 39 } 40 40 41 41 define timeconst(hz) { 42 - print "/* Automatically generated by kernel/timeconst.bc */\n" 42 + print "/* Automatically generated by kernel/time/timeconst.bc */\n" 43 43 print "/* Time conversion constants for HZ == ", hz, " */\n" 44 44 print "\n" 45 45
+9 -9
kernel/time/timekeeping.c
··· 849 849 #ifdef CONFIG_NTP_PPS 850 850 851 851 /** 852 - * getnstime_raw_and_real - get day and raw monotonic time in timespec format 852 + * ktime_get_raw_and_real_ts64 - get day and raw monotonic time in timespec format 853 853 * @ts_raw: pointer to the timespec to be set to raw monotonic time 854 854 * @ts_real: pointer to the timespec to be set to the time of day 855 855 * ··· 857 857 * same time atomically and stores the resulting timestamps in timespec 858 858 * format. 859 859 */ 860 - void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) 860 + void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, struct timespec64 *ts_real) 861 861 { 862 862 struct timekeeper *tk = &tk_core.timekeeper; 863 863 unsigned long seq; ··· 868 868 do { 869 869 seq = read_seqcount_begin(&tk_core.seq); 870 870 871 - *ts_raw = timespec64_to_timespec(tk->raw_time); 871 + *ts_raw = tk->raw_time; 872 872 ts_real->tv_sec = tk->xtime_sec; 873 873 ts_real->tv_nsec = 0; 874 874 ··· 877 877 878 878 } while (read_seqcount_retry(&tk_core.seq, seq)); 879 879 880 - timespec_add_ns(ts_raw, nsecs_raw); 881 - timespec_add_ns(ts_real, nsecs_real); 880 + timespec64_add_ns(ts_raw, nsecs_raw); 881 + timespec64_add_ns(ts_real, nsecs_real); 882 882 } 883 - EXPORT_SYMBOL(getnstime_raw_and_real); 883 + EXPORT_SYMBOL(ktime_get_raw_and_real_ts64); 884 884 885 885 #endif /* CONFIG_NTP_PPS */ 886 886 ··· 1674 1674 /** 1675 1675 * accumulate_nsecs_to_secs - Accumulates nsecs into secs 1676 1676 * 1677 - * Helper function that accumulates a the nsecs greater then a second 1677 + * Helper function that accumulates the nsecs greater than a second 1678 1678 * from the xtime_nsec field to the xtime_secs field. 1679 1679 * It also calls into the NTP code to handle leapsecond processing. 1680 1680 * ··· 1726 1726 cycle_t interval = tk->cycle_interval << shift; 1727 1727 u64 raw_nsecs; 1728 1728 1729 - /* If the offset is smaller then a shifted interval, do nothing */ 1729 + /* If the offset is smaller than a shifted interval, do nothing */ 1730 1730 if (offset < interval) 1731 1731 return offset; 1732 1732 ··· 2025 2025 /** 2026 2026 * hardpps() - Accessor function to NTP __hardpps function 2027 2027 */ 2028 - void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) 2028 + void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) 2029 2029 { 2030 2030 unsigned long flags; 2031 2031
+10 -3
kernel/time/timer.c
··· 461 461 462 462 static void timer_stats_account_timer(struct timer_list *timer) 463 463 { 464 - if (likely(!timer->start_site)) 464 + void *site; 465 + 466 + /* 467 + * start_site can be concurrently reset by 468 + * timer_stats_timer_clear_start_info() 469 + */ 470 + site = READ_ONCE(timer->start_site); 471 + if (likely(!site)) 465 472 return; 466 473 467 - timer_stats_update_stats(timer, timer->start_pid, timer->start_site, 474 + timer_stats_update_stats(timer, timer->start_pid, site, 468 475 timer->function, timer->start_comm, 469 476 timer->flags); 470 477 } ··· 874 867 if (mask == 0) 875 868 return expires; 876 869 877 - bit = find_last_bit(&mask, BITS_PER_LONG); 870 + bit = __fls(mask); 878 871 879 872 mask = (1UL << bit) - 1; 880 873
+2 -1
tools/testing/selftests/timers/Makefile
··· 8 8 TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \ 9 9 inconsistency-check raw_skew threadtest rtctest 10 10 11 - TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex change_skew \ 11 + TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \ 12 12 skew_consistency clocksource-switch leap-a-day \ 13 13 leapcrash set-tai set-2038 14 14 ··· 24 24 run_destructive_tests: run_tests 25 25 ./alarmtimer-suspend 26 26 ./valid-adjtimex 27 + ./adjtick 27 28 ./change_skew 28 29 ./skew_consistency 29 30 ./clocksource-switch
+221
tools/testing/selftests/timers/adjtick.c
··· 1 + /* adjtimex() tick adjustment test 2 + * by: John Stultz <john.stultz@linaro.org> 3 + * (C) Copyright Linaro Limited 2015 4 + * Licensed under the GPLv2 5 + * 6 + * To build: 7 + * $ gcc adjtick.c -o adjtick -lrt 8 + * 9 + * This program is free software: you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation, either version 2 of the License, or 12 + * (at your option) any later version. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 + * GNU General Public License for more details. 18 + */ 19 + #include <stdio.h> 20 + #include <unistd.h> 21 + #include <stdlib.h> 22 + #include <sys/time.h> 23 + #include <sys/timex.h> 24 + #include <time.h> 25 + 26 + #ifdef KTEST 27 + #include "../kselftest.h" 28 + #else 29 + static inline int ksft_exit_pass(void) 30 + { 31 + exit(0); 32 + } 33 + static inline int ksft_exit_fail(void) 34 + { 35 + exit(1); 36 + } 37 + #endif 38 + 39 + #define CLOCK_MONOTONIC_RAW 4 40 + 41 + #define NSEC_PER_SEC 1000000000LL 42 + #define USEC_PER_SEC 1000000 43 + 44 + #define MILLION 1000000 45 + 46 + long systick; 47 + 48 + long long llabs(long long val) 49 + { 50 + if (val < 0) 51 + val = -val; 52 + return val; 53 + } 54 + 55 + unsigned long long ts_to_nsec(struct timespec ts) 56 + { 57 + return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; 58 + } 59 + 60 + struct timespec nsec_to_ts(long long ns) 61 + { 62 + struct timespec ts; 63 + 64 + ts.tv_sec = ns/NSEC_PER_SEC; 65 + ts.tv_nsec = ns%NSEC_PER_SEC; 66 + 67 + return ts; 68 + } 69 + 70 + long long diff_timespec(struct timespec start, struct timespec end) 71 + { 72 + long long start_ns, end_ns; 73 + 74 + start_ns = ts_to_nsec(start); 75 + end_ns = ts_to_nsec(end); 76 + 77 + return end_ns - start_ns; 78 + } 79 + 80 + void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw) 81 + { 82 + struct timespec start, mid, end; 83 + long long diff = 0, tmp; 84 + int i; 85 + 86 + clock_gettime(CLOCK_MONOTONIC, mon); 87 + clock_gettime(CLOCK_MONOTONIC_RAW, raw); 88 + 89 + /* Try to get a more tightly bound pairing */ 90 + for (i = 0; i < 3; i++) { 91 + long long newdiff; 92 + 93 + clock_gettime(CLOCK_MONOTONIC, &start); 94 + clock_gettime(CLOCK_MONOTONIC_RAW, &mid); 95 + clock_gettime(CLOCK_MONOTONIC, &end); 96 + 97 + newdiff = diff_timespec(start, end); 98 + if (diff == 0 || newdiff < diff) { 99 + diff = newdiff; 100 + *raw = mid; 101 + tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2; 102 + *mon = nsec_to_ts(tmp); 103 + } 104 + } 105 + } 106 + 107 + long long get_ppm_drift(void) 108 + { 109 + struct timespec mon_start, raw_start, mon_end, raw_end; 110 + long long delta1, delta2, eppm; 111 + 112 + get_monotonic_and_raw(&mon_start, &raw_start); 113 + 114 + sleep(15); 115 + 116 + get_monotonic_and_raw(&mon_end, &raw_end); 117 + 118 + delta1 = diff_timespec(mon_start, mon_end); 119 + delta2 = diff_timespec(raw_start, raw_end); 120 + 121 + eppm = (delta1*MILLION)/delta2 - MILLION; 122 + 123 + return eppm; 124 + } 125 + 126 + int check_tick_adj(long tickval) 127 + { 128 + long long eppm, ppm; 129 + struct timex tx1; 130 + 131 + tx1.modes = ADJ_TICK; 132 + tx1.modes |= ADJ_OFFSET; 133 + tx1.modes |= ADJ_FREQUENCY; 134 + tx1.modes |= ADJ_STATUS; 135 + 136 + tx1.status = STA_PLL; 137 + tx1.offset = 0; 138 + tx1.freq = 0; 139 + tx1.tick = tickval; 140 + 141 + adjtimex(&tx1); 142 + 143 + sleep(1); 144 + 145 + ppm = ((long long)tickval * MILLION)/systick - MILLION; 146 + printf("Estimating tick (act: %ld usec, %lld ppm): ", tickval, ppm); 147 + 148 + eppm = get_ppm_drift(); 149 + printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm); 150 + 151 + tx1.modes = 0; 152 + adjtimex(&tx1); 153 + 154 + if (tx1.offset || tx1.freq || tx1.tick != tickval) { 155 + printf(" [ERROR]\n"); 156 + printf("\tUnexpected adjtimex return values, make sure ntpd is not running.\n"); 157 + return -1; 158 + } 159 + 160 + /* 161 + * Here we use 100ppm difference as an error bound. 162 + * We likely should see better, but some coarse clocksources 163 + * cannot match the HZ tick size accurately, so we have a 164 + * internal correction factor that doesn't scale exactly 165 + * with the adjustment, resulting in > 10ppm error during 166 + * a 10% adjustment. 100ppm also gives us more breathing 167 + * room for interruptions during the measurement. 168 + */ 169 + if (llabs(eppm - ppm) > 100) { 170 + printf(" [FAILED]\n"); 171 + return -1; 172 + } 173 + printf(" [OK]\n"); 174 + 175 + return 0; 176 + } 177 + 178 + int main(int argv, char **argc) 179 + { 180 + struct timespec raw; 181 + long tick, max, interval, err; 182 + struct timex tx1; 183 + 184 + err = 0; 185 + setbuf(stdout, NULL); 186 + 187 + if (clock_gettime(CLOCK_MONOTONIC_RAW, &raw)) { 188 + printf("ERR: NO CLOCK_MONOTONIC_RAW\n"); 189 + return -1; 190 + } 191 + 192 + printf("Each iteration takes about 15 seconds\n"); 193 + 194 + systick = sysconf(_SC_CLK_TCK); 195 + systick = USEC_PER_SEC/sysconf(_SC_CLK_TCK); 196 + max = systick/10; /* +/- 10% */ 197 + interval = max/4; /* in 4 steps each side */ 198 + 199 + for (tick = (systick - max); tick < (systick + max); tick += interval) { 200 + if (check_tick_adj(tick)) { 201 + err = 1; 202 + break; 203 + } 204 + } 205 + 206 + /* Reset things to zero */ 207 + tx1.modes = ADJ_TICK; 208 + tx1.modes |= ADJ_OFFSET; 209 + tx1.modes |= ADJ_FREQUENCY; 210 + 211 + tx1.offset = 0; 212 + tx1.freq = 0; 213 + tx1.tick = systick; 214 + 215 + adjtimex(&tx1); 216 + 217 + if (err) 218 + return ksft_exit_fail(); 219 + 220 + return ksft_exit_pass(); 221 + }