Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Thomas Gleixner:
"The timer departement delivers:

- a regression fix for the NTP code along with a proper selftest
- prevent a spurious timer interrupt in the NOHZ lowres code
- a fix for user space interfaces returning the remaining time on
architectures with CONFIG_TIME_LOW_RES=y
- a few patches to fix COMPILE_TEST fallout"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
tick/nohz: Set the correct expiry when switching to nohz/lowres mode
clocksource: Fix dependencies for archs w/o HAS_IOMEM
clocksource: Select CLKSRC_MMIO where needed
tick/sched: Hide unused oneshot timer code
kselftests: timers: Add adjtimex SETOFFSET validity tests
ntp: Fix ADJ_SETOFFSET being used w/ ADJ_NANO
itimers: Handle relative timers with CONFIG_TIME_LOW_RES proper
posix-timers: Handle relative timers with CONFIG_TIME_LOW_RES proper
timerfd: Handle relative timers with CONFIG_TIME_LOW_RES proper
hrtimer: Handle remaining time proper for TIME_LOW_RES
clockevents/tcb_clksrc: Prevent disabling an already disabled clock

+245 -36
+12
drivers/clocksource/Kconfig
··· 30 30 config DIGICOLOR_TIMER 31 31 bool "Digicolor timer driver" if COMPILE_TEST 32 32 depends on GENERIC_CLOCKEVENTS 33 + select CLKSRC_MMIO 34 + depends on HAS_IOMEM 33 35 help 34 36 Enables the support for the digicolor timer driver. 35 37 ··· 57 55 bool "Armada 370 and XP timer driver" if COMPILE_TEST 58 56 depends on ARM 59 57 select CLKSRC_OF 58 + select CLKSRC_MMIO 60 59 help 61 60 Enables the support for the Armada 370 and XP timer driver. 62 61 ··· 79 76 config SUN4I_TIMER 80 77 bool "Sun4i timer driver" if COMPILE_TEST 81 78 depends on GENERIC_CLOCKEVENTS 79 + depends on HAS_IOMEM 82 80 select CLKSRC_MMIO 83 81 help 84 82 Enables support for the Sun4i timer. ··· 93 89 94 90 config TEGRA_TIMER 95 91 bool "Tegra timer driver" if COMPILE_TEST 92 + select CLKSRC_MMIO 96 93 depends on ARM 97 94 help 98 95 Enables support for the Tegra driver. ··· 101 96 config VT8500_TIMER 102 97 bool "VT8500 timer driver" if COMPILE_TEST 103 98 depends on GENERIC_CLOCKEVENTS 99 + depends on HAS_IOMEM 104 100 help 105 101 Enables support for the VT8500 driver. 106 102 ··· 137 131 config CLKSRC_DBX500_PRCMU 138 132 bool "Clocksource PRCMU Timer" if COMPILE_TEST 139 133 depends on GENERIC_CLOCKEVENTS 134 + depends on HAS_IOMEM 140 135 help 141 136 Use the always on PRCMU Timer as clocksource 142 137 ··· 255 248 config CLKSRC_SAMSUNG_PWM 256 249 bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST 257 250 depends on GENERIC_CLOCKEVENTS 251 + depends on HAS_IOMEM 258 252 help 259 253 This is a new clocksource driver for the PWM timer found in 260 254 Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver ··· 265 257 config FSL_FTM_TIMER 266 258 bool "Freescale FlexTimer Module driver" if COMPILE_TEST 267 259 depends on GENERIC_CLOCKEVENTS 260 + depends on HAS_IOMEM 268 261 select CLKSRC_MMIO 269 262 help 270 263 Support for Freescale FlexTimer Module (FTM) timer. 271 264 272 265 config VF_PIT_TIMER 273 266 bool 267 + select CLKSRC_MMIO 274 268 help 275 269 Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. 276 270 ··· 370 360 config CLKSRC_PXA 371 361 bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST 372 362 depends on GENERIC_CLOCKEVENTS 363 + depends on HAS_IOMEM 373 364 select CLKSRC_MMIO 374 365 help 375 366 This enables OST0 support available on PXA and SA-11x0 ··· 405 394 bool "Low power clocksource found in the LPC" if COMPILE_TEST 406 395 select CLKSRC_OF if OF 407 396 depends on HAS_IOMEM 397 + select CLKSRC_MMIO 408 398 help 409 399 Enable this option to use the Low Power controller timer 410 400 as clocksource.
+2 -1
drivers/clocksource/tcb_clksrc.c
··· 98 98 99 99 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); 100 100 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); 101 - clk_disable(tcd->clk); 101 + if (!clockevent_state_detached(d)) 102 + clk_disable(tcd->clk); 102 103 103 104 return 0; 104 105 }
+1 -1
fs/timerfd.c
··· 153 153 if (isalarm(ctx)) 154 154 remaining = alarm_expires_remaining(&ctx->t.alarm); 155 155 else 156 - remaining = hrtimer_expires_remaining(&ctx->t.tmr); 156 + remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr); 157 157 158 158 return remaining.tv64 < 0 ? ktime_set(0, 0): remaining; 159 159 }
+31 -3
include/linux/hrtimer.h
··· 87 87 * @function: timer expiry callback function 88 88 * @base: pointer to the timer base (per cpu and per clock) 89 89 * @state: state information (See bit values above) 90 - * @start_pid: timer statistics field to store the pid of the task which 90 + * @is_rel: Set if the timer was armed relative 91 + * @start_pid: timer statistics field to store the pid of the task which 91 92 * started the timer 92 93 * @start_site: timer statistics field to store the site where the timer 93 94 * was started ··· 102 101 ktime_t _softexpires; 103 102 enum hrtimer_restart (*function)(struct hrtimer *); 104 103 struct hrtimer_clock_base *base; 105 - unsigned long state; 104 + u8 state; 105 + u8 is_rel; 106 106 #ifdef CONFIG_TIMER_STATS 107 107 int start_pid; 108 108 void *start_site; ··· 323 321 324 322 #endif 325 323 324 + static inline ktime_t 325 + __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now) 326 + { 327 + ktime_t rem = ktime_sub(timer->node.expires, now); 328 + 329 + /* 330 + * Adjust relative timers for the extra we added in 331 + * hrtimer_start_range_ns() to prevent short timeouts. 332 + */ 333 + if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) 334 + rem.tv64 -= hrtimer_resolution; 335 + return rem; 336 + } 337 + 338 + static inline ktime_t 339 + hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) 340 + { 341 + return __hrtimer_expires_remaining_adjusted(timer, 342 + timer->base->get_time()); 343 + } 344 + 326 345 extern void clock_was_set(void); 327 346 #ifdef CONFIG_TIMERFD 328 347 extern void timerfd_clock_was_set(void); ··· 413 390 } 414 391 415 392 /* Query timers: */ 416 - extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); 393 + extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); 394 + 395 + static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 396 + { 397 + return __hrtimer_get_remaining(timer, false); 398 + } 417 399 418 400 extern u64 hrtimer_get_next_event(void); 419 401
+37 -18
kernel/time/hrtimer.c
··· 897 897 */ 898 898 static void __remove_hrtimer(struct hrtimer *timer, 899 899 struct hrtimer_clock_base *base, 900 - unsigned long newstate, int reprogram) 900 + u8 newstate, int reprogram) 901 901 { 902 902 struct hrtimer_cpu_base *cpu_base = base->cpu_base; 903 - unsigned int state = timer->state; 903 + u8 state = timer->state; 904 904 905 905 timer->state = newstate; 906 906 if (!(state & HRTIMER_STATE_ENQUEUED)) ··· 930 930 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart) 931 931 { 932 932 if (hrtimer_is_queued(timer)) { 933 - unsigned long state = timer->state; 933 + u8 state = timer->state; 934 934 int reprogram; 935 935 936 936 /* ··· 954 954 return 0; 955 955 } 956 956 957 + static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, 958 + const enum hrtimer_mode mode) 959 + { 960 + #ifdef CONFIG_TIME_LOW_RES 961 + /* 962 + * CONFIG_TIME_LOW_RES indicates that the system has no way to return 963 + * granular time values. For relative timers we add hrtimer_resolution 964 + * (i.e. one jiffie) to prevent short timeouts. 965 + */ 966 + timer->is_rel = mode & HRTIMER_MODE_REL; 967 + if (timer->is_rel) 968 + tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution)); 969 + #endif 970 + return tim; 971 + } 972 + 957 973 /** 958 974 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU 959 975 * @timer: the timer to be added ··· 990 974 /* Remove an active timer from the queue: */ 991 975 remove_hrtimer(timer, base, true); 992 976 993 - if (mode & HRTIMER_MODE_REL) { 977 + if (mode & HRTIMER_MODE_REL) 994 978 tim = ktime_add_safe(tim, base->get_time()); 995 - /* 996 - * CONFIG_TIME_LOW_RES is a temporary way for architectures 997 - * to signal that they simply return xtime in 998 - * do_gettimeoffset(). In this case we want to round up by 999 - * resolution when starting a relative timer, to avoid short 1000 - * timeouts. This will go away with the GTOD framework. 1001 - */ 1002 - #ifdef CONFIG_TIME_LOW_RES 1003 - tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution)); 1004 - #endif 1005 - } 979 + 980 + tim = hrtimer_update_lowres(timer, tim, mode); 1006 981 1007 982 hrtimer_set_expires_range_ns(timer, tim, delta_ns); 1008 983 ··· 1081 1074 /** 1082 1075 * hrtimer_get_remaining - get remaining time for the timer 1083 1076 * @timer: the timer to read 1077 + * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y 1084 1078 */ 1085 - ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 1079 + ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust) 1086 1080 { 1087 1081 unsigned long flags; 1088 1082 ktime_t rem; 1089 1083 1090 1084 lock_hrtimer_base(timer, &flags); 1091 - rem = hrtimer_expires_remaining(timer); 1085 + if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust) 1086 + rem = hrtimer_expires_remaining_adjusted(timer); 1087 + else 1088 + rem = hrtimer_expires_remaining(timer); 1092 1089 unlock_hrtimer_base(timer, &flags); 1093 1090 1094 1091 return rem; 1095 1092 } 1096 - EXPORT_SYMBOL_GPL(hrtimer_get_remaining); 1093 + EXPORT_SYMBOL_GPL(__hrtimer_get_remaining); 1097 1094 1098 1095 #ifdef CONFIG_NO_HZ_COMMON 1099 1096 /** ··· 1229 1218 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); 1230 1219 timer_stats_account_hrtimer(timer); 1231 1220 fn = timer->function; 1221 + 1222 + /* 1223 + * Clear the 'is relative' flag for the TIME_LOW_RES case. If the 1224 + * timer is restarted with a period then it becomes an absolute 1225 + * timer. If its not restarted it does not matter. 1226 + */ 1227 + if (IS_ENABLED(CONFIG_TIME_LOW_RES)) 1228 + timer->is_rel = false; 1232 1229 1233 1230 /* 1234 1231 * Because we run timers from hardirq context, there is no chance
+1 -1
kernel/time/itimer.c
··· 26 26 */ 27 27 static struct timeval itimer_get_remtime(struct hrtimer *timer) 28 28 { 29 - ktime_t rem = hrtimer_get_remaining(timer); 29 + ktime_t rem = __hrtimer_get_remaining(timer, true); 30 30 31 31 /* 32 32 * Racy but safe: if the itimer expires after the above
+12 -2
kernel/time/ntp.c
··· 685 685 if (!capable(CAP_SYS_TIME)) 686 686 return -EPERM; 687 687 688 - if (!timeval_inject_offset_valid(&txc->time)) 689 - return -EINVAL; 688 + if (txc->modes & ADJ_NANO) { 689 + struct timespec ts; 690 + 691 + ts.tv_sec = txc->time.tv_sec; 692 + ts.tv_nsec = txc->time.tv_usec; 693 + if (!timespec_inject_offset_valid(&ts)) 694 + return -EINVAL; 695 + 696 + } else { 697 + if (!timeval_inject_offset_valid(&txc->time)) 698 + return -EINVAL; 699 + } 690 700 } 691 701 692 702 /*
+1 -1
kernel/time/posix-timers.c
··· 760 760 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 761 761 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); 762 762 763 - remaining = ktime_sub(hrtimer_get_expires(timer), now); 763 + remaining = __hrtimer_expires_remaining_adjusted(timer, now); 764 764 /* Return 0 only, when the timer is expired and not pending */ 765 765 if (remaining.tv64 <= 0) { 766 766 /*
+9 -7
kernel/time/tick-sched.c
··· 36 36 */ 37 37 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 38 38 39 - /* 40 - * The time, when the last jiffy update happened. Protected by jiffies_lock. 41 - */ 42 - static ktime_t last_jiffies_update; 43 - 44 39 struct tick_sched *tick_get_tick_sched(int cpu) 45 40 { 46 41 return &per_cpu(tick_cpu_sched, cpu); 47 42 } 43 + 44 + #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) 45 + /* 46 + * The time, when the last jiffy update happened. Protected by jiffies_lock. 47 + */ 48 + static ktime_t last_jiffies_update; 48 49 49 50 /* 50 51 * Must be called with interrupts disabled ! ··· 152 151 update_process_times(user_mode(regs)); 153 152 profile_tick(CPU_PROFILING); 154 153 } 154 + #endif 155 155 156 156 #ifdef CONFIG_NO_HZ_FULL 157 157 cpumask_var_t tick_nohz_full_mask; ··· 995 993 /* Get the next period */ 996 994 next = tick_init_jiffy_update(); 997 995 998 - hrtimer_forward_now(&ts->sched_timer, tick_period); 999 996 hrtimer_set_expires(&ts->sched_timer, next); 1000 - tick_program_event(next, 1); 997 + hrtimer_forward_now(&ts->sched_timer, tick_period); 998 + tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1001 999 tick_nohz_activate(ts, NOHZ_MODE_LOWRES); 1002 1000 } 1003 1001
+1 -1
kernel/time/timer_list.c
··· 69 69 print_name_offset(m, taddr); 70 70 SEQ_printf(m, ", "); 71 71 print_name_offset(m, timer->function); 72 - SEQ_printf(m, ", S:%02lx", timer->state); 72 + SEQ_printf(m, ", S:%02x", timer->state); 73 73 #ifdef CONFIG_TIMER_STATS 74 74 SEQ_printf(m, ", "); 75 75 print_name_offset(m, timer->start_site);
+138 -1
tools/testing/selftests/timers/valid-adjtimex.c
··· 45 45 } 46 46 #endif 47 47 48 - #define NSEC_PER_SEC 1000000000L 48 + #define NSEC_PER_SEC 1000000000LL 49 + #define USEC_PER_SEC 1000000LL 50 + 51 + #define ADJ_SETOFFSET 0x0100 52 + 53 + #include <sys/syscall.h> 54 + static int clock_adjtime(clockid_t id, struct timex *tx) 55 + { 56 + return syscall(__NR_clock_adjtime, id, tx); 57 + } 58 + 49 59 50 60 /* clear NTP time_status & time_state */ 51 61 int clear_time_state(void) ··· 203 193 } 204 194 205 195 196 + int set_offset(long long offset, int use_nano) 197 + { 198 + struct timex tmx = {}; 199 + int ret; 200 + 201 + tmx.modes = ADJ_SETOFFSET; 202 + if (use_nano) { 203 + tmx.modes |= ADJ_NANO; 204 + 205 + tmx.time.tv_sec = offset / NSEC_PER_SEC; 206 + tmx.time.tv_usec = offset % NSEC_PER_SEC; 207 + 208 + if (offset < 0 && tmx.time.tv_usec) { 209 + tmx.time.tv_sec -= 1; 210 + tmx.time.tv_usec += NSEC_PER_SEC; 211 + } 212 + } else { 213 + tmx.time.tv_sec = offset / USEC_PER_SEC; 214 + tmx.time.tv_usec = offset % USEC_PER_SEC; 215 + 216 + if (offset < 0 && tmx.time.tv_usec) { 217 + tmx.time.tv_sec -= 1; 218 + tmx.time.tv_usec += USEC_PER_SEC; 219 + } 220 + } 221 + 222 + ret = clock_adjtime(CLOCK_REALTIME, &tmx); 223 + if (ret < 0) { 224 + printf("(sec: %ld usec: %ld) ", tmx.time.tv_sec, tmx.time.tv_usec); 225 + printf("[FAIL]\n"); 226 + return -1; 227 + } 228 + return 0; 229 + } 230 + 231 + int set_bad_offset(long sec, long usec, int use_nano) 232 + { 233 + struct timex tmx = {}; 234 + int ret; 235 + 236 + tmx.modes = ADJ_SETOFFSET; 237 + if (use_nano) 238 + tmx.modes |= ADJ_NANO; 239 + 240 + tmx.time.tv_sec = sec; 241 + tmx.time.tv_usec = usec; 242 + ret = clock_adjtime(CLOCK_REALTIME, &tmx); 243 + if (ret >= 0) { 244 + printf("Invalid (sec: %ld usec: %ld) did not fail! ", tmx.time.tv_sec, tmx.time.tv_usec); 245 + printf("[FAIL]\n"); 246 + return -1; 247 + } 248 + return 0; 249 + } 250 + 251 + int validate_set_offset(void) 252 + { 253 + printf("Testing ADJ_SETOFFSET... "); 254 + 255 + /* Test valid values */ 256 + if (set_offset(NSEC_PER_SEC - 1, 1)) 257 + return -1; 258 + 259 + if (set_offset(-NSEC_PER_SEC + 1, 1)) 260 + return -1; 261 + 262 + if (set_offset(-NSEC_PER_SEC - 1, 1)) 263 + return -1; 264 + 265 + if (set_offset(5 * NSEC_PER_SEC, 1)) 266 + return -1; 267 + 268 + if (set_offset(-5 * NSEC_PER_SEC, 1)) 269 + return -1; 270 + 271 + if (set_offset(5 * NSEC_PER_SEC + NSEC_PER_SEC / 2, 1)) 272 + return -1; 273 + 274 + if (set_offset(-5 * NSEC_PER_SEC - NSEC_PER_SEC / 2, 1)) 275 + return -1; 276 + 277 + if (set_offset(USEC_PER_SEC - 1, 0)) 278 + return -1; 279 + 280 + if (set_offset(-USEC_PER_SEC + 1, 0)) 281 + return -1; 282 + 283 + if (set_offset(-USEC_PER_SEC - 1, 0)) 284 + return -1; 285 + 286 + if (set_offset(5 * USEC_PER_SEC, 0)) 287 + return -1; 288 + 289 + if (set_offset(-5 * USEC_PER_SEC, 0)) 290 + return -1; 291 + 292 + if (set_offset(5 * USEC_PER_SEC + USEC_PER_SEC / 2, 0)) 293 + return -1; 294 + 295 + if (set_offset(-5 * USEC_PER_SEC - USEC_PER_SEC / 2, 0)) 296 + return -1; 297 + 298 + /* Test invalid values */ 299 + if (set_bad_offset(0, -1, 1)) 300 + return -1; 301 + if (set_bad_offset(0, -1, 0)) 302 + return -1; 303 + if (set_bad_offset(0, 2 * NSEC_PER_SEC, 1)) 304 + return -1; 305 + if (set_bad_offset(0, 2 * USEC_PER_SEC, 0)) 306 + return -1; 307 + if (set_bad_offset(0, NSEC_PER_SEC, 1)) 308 + return -1; 309 + if (set_bad_offset(0, USEC_PER_SEC, 0)) 310 + return -1; 311 + if (set_bad_offset(0, -NSEC_PER_SEC, 1)) 312 + return -1; 313 + if (set_bad_offset(0, -USEC_PER_SEC, 0)) 314 + return -1; 315 + 316 + printf("[OK]\n"); 317 + return 0; 318 + } 319 + 206 320 int main(int argc, char **argv) 207 321 { 208 322 if (validate_freq()) 323 + return ksft_exit_fail(); 324 + 325 + if (validate_set_offset()) 209 326 return ksft_exit_fail(); 210 327 211 328 return ksft_exit_pass();