Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

time, drivers/rtc: Don't bother with rtc_resume() for the nonstop clocksource

If a system does not provide a persistent_clock(), the time
will be updated on resume by rtc_resume(). With the addition
of the non-stop clocksources for suspend timing, those systems
set the time on resume in timekeeping_resume(), but may not
provide a valid persistent_clock().

This results in the rtc_resume() logic thinking no one has set
the time and it then will over-write the suspend time again,
which is not necessary and only increases clock error.

So, fix this for rtc_resume().

This patch also improves the name of persistent_clock_exist to
make it more grammatical.

Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1427945681-29972-19-git-send-email-john.stultz@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Xunlei Pang and committed by
Ingo Molnar
0fa88cb4 264bb3f7

+54 -25
+2 -2
drivers/rtc/class.c
··· 55 55 struct timespec64 delta, delta_delta; 56 56 int err; 57 57 58 - if (has_persistent_clock()) 58 + if (timekeeping_rtc_skipsuspend()) 59 59 return 0; 60 60 61 61 if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) ··· 102 102 struct timespec64 sleep_time; 103 103 int err; 104 104 105 - if (has_persistent_clock()) 105 + if (timekeeping_rtc_skipresume()) 106 106 return 0; 107 107 108 108 rtc_hctosys_ret = -ENODEV;
+3 -6
include/linux/timekeeping.h
··· 248 248 /* 249 249 * RTC specific 250 250 */ 251 + extern bool timekeeping_rtc_skipsuspend(void); 252 + extern bool timekeeping_rtc_skipresume(void); 253 + 251 254 extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); 252 255 253 256 /* ··· 262 259 /* 263 260 * Persistent clock related interfaces 264 261 */ 265 - extern bool persistent_clock_exist; 266 262 extern int persistent_clock_is_local; 267 - 268 - static inline bool has_persistent_clock(void) 269 - { 270 - return persistent_clock_exist; 271 - } 272 263 273 264 extern void read_persistent_clock(struct timespec *ts); 274 265 extern void read_persistent_clock64(struct timespec64 *ts);
+49 -17
kernel/time/timekeeping.c
··· 64 64 /* flag for if timekeeping is suspended */ 65 65 int __read_mostly timekeeping_suspended; 66 66 67 - /* Flag for if there is a persistent clock on this platform */ 68 - bool __read_mostly persistent_clock_exist = false; 69 - 70 67 static inline void tk_normalize_xtime(struct timekeeper *tk) 71 68 { 72 69 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { ··· 1201 1204 *ts64 = timespec_to_timespec64(ts); 1202 1205 } 1203 1206 1207 + /* Flag for if timekeeping_resume() has injected sleeptime */ 1208 + static bool sleeptime_injected; 1209 + 1210 + /* Flag for if there is a persistent clock on this platform */ 1211 + static bool persistent_clock_exists; 1212 + 1204 1213 /* 1205 1214 * timekeeping_init - Initializes the clocksource and common timekeeping values 1206 1215 */ ··· 1224 1221 now.tv_sec = 0; 1225 1222 now.tv_nsec = 0; 1226 1223 } else if (now.tv_sec || now.tv_nsec) 1227 - persistent_clock_exist = true; 1224 + persistent_clock_exists = true; 1228 1225 1229 1226 read_boot_clock64(&boot); 1230 1227 if (!timespec64_valid_strict(&boot)) { ··· 1285 1282 1286 1283 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE) 1287 1284 /** 1285 + * We have three kinds of time sources to use for sleep time 1286 + * injection, the preference order is: 1287 + * 1) non-stop clocksource 1288 + * 2) persistent clock (ie: RTC accessible when irqs are off) 1289 + * 3) RTC 1290 + * 1291 + * 1) and 2) are used by timekeeping, 3) by RTC subsystem. 1292 + * If system has neither 1) nor 2), 3) will be used finally. 1293 + * 1294 + * 1295 + * If timekeeping has injected sleeptime via either 1) or 2), 1296 + * 3) becomes needless, so in this case we don't need to call 1297 + * rtc_resume(), and this is what timekeeping_rtc_skipresume() 1298 + * means. 1299 + */ 1300 + bool timekeeping_rtc_skipresume(void) 1301 + { 1302 + return sleeptime_injected; 1303 + } 1304 + 1305 + /** 1306 + * 1) can be determined whether to use or not only when doing 1307 + * timekeeping_resume() which is invoked after rtc_suspend(), 1308 + * so we can't skip rtc_suspend() surely if system has 1). 1309 + * 1310 + * But if system has 2), 2) will definitely be used, so in this 1311 + * case we don't need to call rtc_suspend(), and this is what 1312 + * timekeeping_rtc_skipsuspend() means. 1313 + */ 1314 + bool timekeeping_rtc_skipsuspend(void) 1315 + { 1316 + return persistent_clock_exists; 1317 + } 1318 + 1319 + /** 1288 1320 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values 1289 1321 * @delta: pointer to a timespec64 delta value 1290 1322 * 1291 1323 * This hook is for architectures that cannot support read_persistent_clock64 1292 1324 * because their RTC/persistent clock is only accessible when irqs are enabled. 1325 + * and also don't have an effective nonstop clocksource. 1293 1326 * 1294 1327 * This function should only be called by rtc_resume(), and allows 1295 1328 * a suspend offset to be injected into the timekeeping values. ··· 1334 1295 { 1335 1296 struct timekeeper *tk = &tk_core.timekeeper; 1336 1297 unsigned long flags; 1337 - 1338 - /* 1339 - * Make sure we don't set the clock twice, as timekeeping_resume() 1340 - * already did it 1341 - */ 1342 - if (has_persistent_clock()) 1343 - return; 1344 1298 1345 1299 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1346 1300 write_seqcount_begin(&tk_core.seq); ··· 1366 1334 unsigned long flags; 1367 1335 struct timespec64 ts_new, ts_delta; 1368 1336 cycle_t cycle_now, cycle_delta; 1369 - bool suspendtime_found = false; 1370 1337 1338 + sleeptime_injected = false; 1371 1339 read_persistent_clock64(&ts_new); 1372 1340 1373 1341 clockevents_resume(); ··· 1413 1381 nsec += ((u64) cycle_delta * mult) >> shift; 1414 1382 1415 1383 ts_delta = ns_to_timespec64(nsec); 1416 - suspendtime_found = true; 1384 + sleeptime_injected = true; 1417 1385 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) { 1418 1386 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time); 1419 - suspendtime_found = true; 1387 + sleeptime_injected = true; 1420 1388 } 1421 1389 1422 - if (suspendtime_found) 1390 + if (sleeptime_injected) 1423 1391 __timekeeping_inject_sleeptime(tk, &ts_delta); 1424 1392 1425 1393 /* Re-base the last cycle value */ ··· 1453 1421 * value returned, update the persistent_clock_exists flag. 1454 1422 */ 1455 1423 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec) 1456 - persistent_clock_exist = true; 1424 + persistent_clock_exists = true; 1457 1425 1458 1426 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1459 1427 write_seqcount_begin(&tk_core.seq); 1460 1428 timekeeping_forward_now(tk); 1461 1429 timekeeping_suspended = 1; 1462 1430 1463 - if (has_persistent_clock()) { 1431 + if (persistent_clock_exists) { 1464 1432 /* 1465 1433 * To avoid drift caused by repeated suspend/resumes, 1466 1434 * which each can add ~1 second drift error,