Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] NTP: ntp-helper functions

This patch cleans up a commonly repeated set of changes to the NTP state
variables by adding two helper inline functions:

ntp_clear(): Clears the ntp state variables

ntp_synced(): Returns 1 if the system is synced with a time server.

This was compile tested for alpha, arm, i386, x86-64, ppc64, s390, sparc,
sparc64.

Signed-off-by: John Stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

john stultz and committed by
Linus Torvalds
b149ee22 6c231b7b

+65 -111
+2 -5
arch/alpha/kernel/time.c
··· 149 149 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 150 150 * called as close as possible to 500 ms before the new second starts. 151 151 */ 152 - if ((time_status & STA_UNSYNC) == 0 152 + if (ntp_synced() 153 153 && xtime.tv_sec > state.last_rtc_update + 660 154 154 && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2 155 155 && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) { ··· 502 502 set_normalized_timespec(&xtime, sec, nsec); 503 503 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 504 504 505 - time_adjust = 0; /* stop active adjtime() */ 506 - time_status |= STA_UNSYNC; 507 - time_maxerror = NTP_PHASE_LIMIT; 508 - time_esterror = NTP_PHASE_LIMIT; 505 + ntp_clear(); 509 506 510 507 write_sequnlock_irq(&xtime_lock); 511 508 clock_was_set();
+2 -5
arch/arm/kernel/time.c
··· 102 102 */ 103 103 static inline void do_set_rtc(void) 104 104 { 105 - if (time_status & STA_UNSYNC || set_rtc == NULL) 105 + if (!ntp_synced() || set_rtc == NULL) 106 106 return; 107 107 108 108 if (next_rtc_update && ··· 292 292 set_normalized_timespec(&xtime, sec, nsec); 293 293 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 294 294 295 - time_adjust = 0; /* stop active adjtime() */ 296 - time_status |= STA_UNSYNC; 297 - time_maxerror = NTP_PHASE_LIMIT; 298 - time_esterror = NTP_PHASE_LIMIT; 295 + ntp_clear(); 299 296 write_sequnlock_irq(&xtime_lock); 300 297 clock_was_set(); 301 298 return 0;
+2 -5
arch/arm26/kernel/time.c
··· 114 114 */ 115 115 static inline void do_set_rtc(void) 116 116 { 117 - if (time_status & STA_UNSYNC || set_rtc == NULL) 117 + if (!ntp_synced() || set_rtc == NULL) 118 118 return; 119 119 120 120 //FIXME - timespec.tv_sec is a time_t not unsigned long ··· 189 189 190 190 xtime.tv_sec = tv->tv_sec; 191 191 xtime.tv_nsec = tv->tv_nsec; 192 - time_adjust = 0; /* stop active adjtime() */ 193 - time_status |= STA_UNSYNC; 194 - time_maxerror = NTP_PHASE_LIMIT; 195 - time_esterror = NTP_PHASE_LIMIT; 192 + ntp_clear(); 196 193 write_sequnlock_irq(&xtime_lock); 197 194 clock_was_set(); 198 195 return 0;
+1 -1
arch/cris/arch-v10/kernel/time.c
··· 240 240 * The division here is not time critical since it will run once in 241 241 * 11 minutes 242 242 */ 243 - if ((time_status & STA_UNSYNC) == 0 && 243 + if (ntp_synced() && 244 244 xtime.tv_sec > last_rtc_update + 660 && 245 245 (xtime.tv_nsec / 1000) >= 500000 - (tick_nsec / 1000) / 2 && 246 246 (xtime.tv_nsec / 1000) <= 500000 + (tick_nsec / 1000) / 2) {
+1 -4
arch/cris/kernel/time.c
··· 114 114 set_normalized_timespec(&xtime, sec, nsec); 115 115 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 116 116 117 - time_adjust = 0; /* stop active adjtime() */ 118 - time_status |= STA_UNSYNC; 119 - time_maxerror = NTP_PHASE_LIMIT; 120 - time_esterror = NTP_PHASE_LIMIT; 117 + ntp_clear(); 121 118 write_sequnlock_irq(&xtime_lock); 122 119 clock_was_set(); 123 120 return 0;
+2 -5
arch/frv/kernel/time.c
··· 85 85 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 86 86 * called as close as possible to 500 ms before the new second starts. 87 87 */ 88 - if ((time_status & STA_UNSYNC) == 0 && 88 + if (ntp_synced() && 89 89 xtime.tv_sec > last_rtc_update + 660 && 90 90 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 91 91 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2 ··· 216 216 set_normalized_timespec(&xtime, sec, nsec); 217 217 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 218 218 219 - time_adjust = 0; /* stop active adjtime() */ 220 - time_status |= STA_UNSYNC; 221 - time_maxerror = NTP_PHASE_LIMIT; 222 - time_esterror = NTP_PHASE_LIMIT; 219 + ntp_clear(); 223 220 write_sequnlock_irq(&xtime_lock); 224 221 clock_was_set(); 225 222 return 0;
+1 -4
arch/h8300/kernel/time.c
··· 116 116 117 117 xtime.tv_sec = tv->tv_sec; 118 118 xtime.tv_nsec = tv->tv_nsec; 119 - time_adjust = 0; /* stop active adjtime() */ 120 - time_status |= STA_UNSYNC; 121 - time_maxerror = NTP_PHASE_LIMIT; 122 - time_esterror = NTP_PHASE_LIMIT; 119 + ntp_clear(); 123 120 write_sequnlock_irq(&xtime_lock); 124 121 clock_was_set(); 125 122 return 0;
+2 -5
arch/i386/kernel/time.c
··· 194 194 set_normalized_timespec(&xtime, sec, nsec); 195 195 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 196 196 197 - time_adjust = 0; /* stop active adjtime() */ 198 - time_status |= STA_UNSYNC; 199 - time_maxerror = NTP_PHASE_LIMIT; 200 - time_esterror = NTP_PHASE_LIMIT; 197 + ntp_clear(); 201 198 write_sequnlock_irq(&xtime_lock); 202 199 clock_was_set(); 203 200 return 0; ··· 344 347 * This code is run on a timer. If the clock is set, that timer 345 348 * may not expire at the correct time. Thus, we adjust... 346 349 */ 347 - if ((time_status & STA_UNSYNC) != 0) 350 + if (!ntp_synced()) 348 351 /* 349 352 * Not synced, exit, do not restart a timer (if one is 350 353 * running, let it run out).
+2 -5
arch/m32r/kernel/time.c
··· 171 171 set_normalized_timespec(&xtime, sec, nsec); 172 172 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 173 173 174 - time_adjust = 0; /* stop active adjtime() */ 175 - time_status |= STA_UNSYNC; 176 - time_maxerror = NTP_PHASE_LIMIT; 177 - time_esterror = NTP_PHASE_LIMIT; 174 + ntp_clear(); 178 175 write_sequnlock_irq(&xtime_lock); 179 176 clock_was_set(); 180 177 ··· 218 221 * called as close as possible to 500 ms before the new second starts. 219 222 */ 220 223 write_seqlock(&xtime_lock); 221 - if ((time_status & STA_UNSYNC) == 0 224 + if (ntp_synced() 222 225 && xtime.tv_sec > last_rtc_update + 660 223 226 && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2 224 227 && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
+1 -4
arch/m68k/kernel/time.c
··· 166 166 set_normalized_timespec(&xtime, sec, nsec); 167 167 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 168 168 169 - time_adjust = 0; /* stop active adjtime() */ 170 - time_status |= STA_UNSYNC; 171 - time_maxerror = NTP_PHASE_LIMIT; 172 - time_esterror = NTP_PHASE_LIMIT; 169 + ntp_clear(); 173 170 write_sequnlock_irq(&xtime_lock); 174 171 clock_was_set(); 175 172 return 0;
+2 -5
arch/m68knommu/kernel/time.c
··· 68 68 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 69 69 * called as close as possible to 500 ms before the new second starts. 70 70 */ 71 - if ((time_status & STA_UNSYNC) == 0 && 71 + if (ntp_synced() && 72 72 xtime.tv_sec > last_rtc_update + 660 && 73 73 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 74 74 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { ··· 178 178 set_normalized_timespec(&xtime, sec, nsec); 179 179 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 180 180 181 - time_adjust = 0; /* stop active adjtime() */ 182 - time_status |= STA_UNSYNC; 183 - time_maxerror = NTP_PHASE_LIMIT; 184 - time_esterror = NTP_PHASE_LIMIT; 181 + ntp_clear(); 185 182 write_sequnlock_irq(&xtime_lock); 186 183 clock_was_set(); 187 184 return 0;
+1 -4
arch/mips/kernel/sysirix.c
··· 632 632 write_seqlock_irq(&xtime_lock); 633 633 xtime.tv_sec = value; 634 634 xtime.tv_nsec = 0; 635 - time_adjust = 0; /* stop active adjtime() */ 636 - time_status |= STA_UNSYNC; 637 - time_maxerror = NTP_PHASE_LIMIT; 638 - time_esterror = NTP_PHASE_LIMIT; 635 + ntp_clear(); 639 636 write_sequnlock_irq(&xtime_lock); 640 637 641 638 return 0;
+2 -5
arch/mips/kernel/time.c
··· 223 223 set_normalized_timespec(&xtime, sec, nsec); 224 224 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 225 225 226 - time_adjust = 0; /* stop active adjtime() */ 227 - time_status |= STA_UNSYNC; 228 - time_maxerror = NTP_PHASE_LIMIT; 229 - time_esterror = NTP_PHASE_LIMIT; 226 + ntp_clear(); 230 227 231 228 write_sequnlock_irq(&xtime_lock); 232 229 clock_was_set(); ··· 439 442 * called as close as possible to 500 ms before the new second starts. 440 443 */ 441 444 write_seqlock(&xtime_lock); 442 - if ((time_status & STA_UNSYNC) == 0 && 445 + if (ntp_synced() && 443 446 xtime.tv_sec > last_rtc_update + 660 && 444 447 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 445 448 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+1 -1
arch/mips/sgi-ip27/ip27-timer.c
··· 118 118 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 119 119 * called as close as possible to when a second starts. 120 120 */ 121 - if ((time_status & STA_UNSYNC) == 0 && 121 + if (ntp_synced() && 122 122 xtime.tv_sec > last_rtc_update + 660 && 123 123 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 124 124 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+1 -4
arch/parisc/kernel/time.c
··· 188 188 set_normalized_timespec(&xtime, sec, nsec); 189 189 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 190 190 191 - time_adjust = 0; /* stop active adjtime() */ 192 - time_status |= STA_UNSYNC; 193 - time_maxerror = NTP_PHASE_LIMIT; 194 - time_esterror = NTP_PHASE_LIMIT; 191 + ntp_clear(); 195 192 } 196 193 write_sequnlock_irq(&xtime_lock); 197 194 clock_was_set();
+2 -5
arch/ppc/kernel/time.c
··· 169 169 * We should have an rtc call that only sets the minutes and 170 170 * seconds like on Intel to avoid problems with non UTC clocks. 171 171 */ 172 - if ( ppc_md.set_rtc_time && (time_status & STA_UNSYNC) == 0 && 172 + if ( ppc_md.set_rtc_time && ntp_synced() && 173 173 xtime.tv_sec - last_rtc_update >= 659 && 174 174 abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ && 175 175 jiffies - wall_jiffies == 1) { ··· 271 271 */ 272 272 last_rtc_update = new_sec - 658; 273 273 274 - time_adjust = 0; /* stop active adjtime() */ 275 - time_status |= STA_UNSYNC; 276 - time_maxerror = NTP_PHASE_LIMIT; 277 - time_esterror = NTP_PHASE_LIMIT; 274 + ntp_clear(); 278 275 write_sequnlock_irqrestore(&xtime_lock, flags); 279 276 clock_was_set(); 280 277 return 0;
+2 -5
arch/ppc64/kernel/time.c
··· 128 128 * We should have an rtc call that only sets the minutes and 129 129 * seconds like on Intel to avoid problems with non UTC clocks. 130 130 */ 131 - if ( (time_status & STA_UNSYNC) == 0 && 131 + if (ntp_synced() && 132 132 xtime.tv_sec - last_rtc_update >= 659 && 133 133 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && 134 134 jiffies - wall_jiffies == 1) { ··· 435 435 */ 436 436 last_rtc_update = new_sec - 658; 437 437 438 - time_adjust = 0; /* stop active adjtime() */ 439 - time_status |= STA_UNSYNC; 440 - time_maxerror = NTP_PHASE_LIMIT; 441 - time_esterror = NTP_PHASE_LIMIT; 438 + ntp_clear(); 442 439 443 440 delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp), 444 441 do_gtod.varp->tb_to_xs );
+1 -4
arch/s390/kernel/time.c
··· 139 139 set_normalized_timespec(&xtime, sec, nsec); 140 140 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 141 141 142 - time_adjust = 0; /* stop active adjtime() */ 143 - time_status |= STA_UNSYNC; 144 - time_maxerror = NTP_PHASE_LIMIT; 145 - time_esterror = NTP_PHASE_LIMIT; 142 + ntp_clear(); 146 143 write_sequnlock_irq(&xtime_lock); 147 144 clock_was_set(); 148 145 return 0;
+2 -5
arch/sh/kernel/time.c
··· 215 215 set_normalized_timespec(&xtime, sec, nsec); 216 216 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 217 217 218 - time_adjust = 0; /* stop active adjtime() */ 219 - time_status |= STA_UNSYNC; 220 - time_maxerror = NTP_PHASE_LIMIT; 221 - time_esterror = NTP_PHASE_LIMIT; 218 + ntp_clear(); 222 219 write_sequnlock_irq(&xtime_lock); 223 220 clock_was_set(); 224 221 ··· 249 252 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 250 253 * called as close as possible to 500 ms before the new second starts. 251 254 */ 252 - if ((time_status & STA_UNSYNC) == 0 && 255 + if (ntp_synced() && 253 256 xtime.tv_sec > last_rtc_update + 660 && 254 257 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 255 258 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+2 -5
arch/sh64/kernel/time.c
··· 247 247 set_normalized_timespec(&xtime, sec, nsec); 248 248 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 249 249 250 - time_adjust = 0; /* stop active adjtime() */ 251 - time_status |= STA_UNSYNC; 252 - time_maxerror = NTP_PHASE_LIMIT; 253 - time_esterror = NTP_PHASE_LIMIT; 250 + ntp_clear(); 254 251 write_sequnlock_irq(&xtime_lock); 255 252 clock_was_set(); 256 253 ··· 325 328 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 326 329 * called as close as possible to 500 ms before the new second starts. 327 330 */ 328 - if ((time_status & STA_UNSYNC) == 0 && 331 + if (ntp_synced() && 329 332 xtime.tv_sec > last_rtc_update + 660 && 330 333 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 331 334 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+1 -4
arch/sparc/kernel/pcic.c
··· 840 840 841 841 xtime.tv_sec = tv->tv_sec; 842 842 xtime.tv_nsec = tv->tv_nsec; 843 - time_adjust = 0; /* stop active adjtime() */ 844 - time_status |= STA_UNSYNC; 845 - time_maxerror = NTP_PHASE_LIMIT; 846 - time_esterror = NTP_PHASE_LIMIT; 843 + ntp_clear(); 847 844 return 0; 848 845 } 849 846
+2 -5
arch/sparc/kernel/time.c
··· 139 139 140 140 141 141 /* Determine when to update the Mostek clock. */ 142 - if ((time_status & STA_UNSYNC) == 0 && 142 + if (ntp_synced() && 143 143 xtime.tv_sec > last_rtc_update + 660 && 144 144 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 145 145 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { ··· 554 554 set_normalized_timespec(&xtime, sec, nsec); 555 555 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 556 556 557 - time_adjust = 0; /* stop active adjtime() */ 558 - time_status |= STA_UNSYNC; 559 - time_maxerror = NTP_PHASE_LIMIT; 560 - time_esterror = NTP_PHASE_LIMIT; 557 + ntp_clear(); 561 558 return 0; 562 559 } 563 560
+1 -1
arch/sparc64/kernel/time.c
··· 449 449 static long last_rtc_update; 450 450 451 451 /* Determine when to update the Mostek clock. */ 452 - if ((time_status & STA_UNSYNC) == 0 && 452 + if (ntp_synced() && 453 453 xtime.tv_sec > last_rtc_update + 660 && 454 454 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 455 455 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+2 -5
arch/v850/kernel/time.c
··· 66 66 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 67 67 * called as close as possible to 500 ms before the new second starts. 68 68 */ 69 - if ((time_status & STA_UNSYNC) == 0 && 69 + if (ntp_synced() && 70 70 xtime.tv_sec > last_rtc_update + 660 && 71 71 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 72 72 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { ··· 169 169 xtime.tv_sec = tv->tv_sec; 170 170 xtime.tv_nsec = tv->tv_nsec; 171 171 172 - time_adjust = 0; /* stop active adjtime () */ 173 - time_status |= STA_UNSYNC; 174 - time_maxerror = NTP_PHASE_LIMIT; 175 - time_esterror = NTP_PHASE_LIMIT; 172 + ntp_clear(); 176 173 177 174 write_sequnlock_irq (&xtime_lock); 178 175 clock_was_set();
+2 -5
arch/x86_64/kernel/time.c
··· 176 176 set_normalized_timespec(&xtime, sec, nsec); 177 177 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 178 178 179 - time_adjust = 0; /* stop active adjtime() */ 180 - time_status |= STA_UNSYNC; 181 - time_maxerror = NTP_PHASE_LIMIT; 182 - time_esterror = NTP_PHASE_LIMIT; 179 + ntp_clear(); 183 180 184 181 write_sequnlock_irq(&xtime_lock); 185 182 clock_was_set(); ··· 468 471 * off) isn't likely to go away much sooner anyway. 469 472 */ 470 473 471 - if ((~time_status & STA_UNSYNC) && xtime.tv_sec > rtc_update && 474 + if (ntp_synced() && xtime.tv_sec > rtc_update && 472 475 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) { 473 476 set_rtc_mmss(xtime.tv_sec); 474 477 rtc_update = xtime.tv_sec + 660;
+2 -5
arch/xtensa/kernel/time.c
··· 122 122 set_normalized_timespec(&xtime, sec, nsec); 123 123 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 124 124 125 - time_adjust = 0; /* stop active adjtime() */ 126 - time_status |= STA_UNSYNC; 127 - time_maxerror = NTP_PHASE_LIMIT; 128 - time_esterror = NTP_PHASE_LIMIT; 125 + ntp_clear(); 129 126 write_sequnlock_irq(&xtime_lock); 130 127 return 0; 131 128 } ··· 181 184 next += CCOUNT_PER_JIFFY; 182 185 do_timer (regs); /* Linux handler in kernel/timer.c */ 183 186 184 - if ((time_status & STA_UNSYNC) == 0 && 187 + if (ntp_synced() && 185 188 xtime.tv_sec - last_rtc_update >= 659 && 186 189 abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ && 187 190 jiffies - wall_jiffies == 1) {
+23
include/linux/timex.h
··· 260 260 extern long pps_errcnt; /* calibration errors */ 261 261 extern long pps_stbcnt; /* stability limit exceeded */ 262 262 263 + /** 264 + * ntp_clear - Clears the NTP state variables 265 + * 266 + * Must be called while holding a write on the xtime_lock 267 + */ 268 + static inline void ntp_clear(void) 269 + { 270 + time_adjust = 0; /* stop active adjtime() */ 271 + time_status |= STA_UNSYNC; 272 + time_maxerror = NTP_PHASE_LIMIT; 273 + time_esterror = NTP_PHASE_LIMIT; 274 + } 275 + 276 + /** 277 + * ntp_synced - Returns 1 if the NTP status is not UNSYNC 278 + * 279 + */ 280 + static inline int ntp_synced(void) 281 + { 282 + return !(time_status & STA_UNSYNC); 283 + } 284 + 285 + 263 286 #ifdef CONFIG_TIME_INTERPOLATION 264 287 265 288 #define TIME_SOURCE_CPU 0