at v3.12-rc7 714 lines 19 kB view raw
1/* 2 * linux/kernel/time.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * This file contains the interface functions for the various 7 * time related system calls: time, stime, gettimeofday, settimeofday, 8 * adjtime 9 */ 10/* 11 * Modification history kernel/time.c 12 * 13 * 1993-09-02 Philip Gladstone 14 * Created file with time related functions from sched/core.c and adjtimex() 15 * 1993-10-08 Torsten Duwe 16 * adjtime interface update and CMOS clock write code 17 * 1995-08-13 Torsten Duwe 18 * kernel PLL updated to 1994-12-13 specs (rfc-1589) 19 * 1999-01-16 Ulrich Windl 20 * Introduced error checking for many cases in adjtimex(). 21 * Updated NTP code according to technical memorandum Jan '96 22 * "A Kernel Model for Precision Timekeeping" by Dave Mills 23 * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10) 24 * (Even though the technical memorandum forbids it) 25 * 2004-07-14 Christoph Lameter 26 * Added getnstimeofday to allow the posix timer functions to return 27 * with nanosecond accuracy 28 */ 29 30#include <linux/export.h> 31#include <linux/timex.h> 32#include <linux/capability.h> 33#include <linux/timekeeper_internal.h> 34#include <linux/errno.h> 35#include <linux/syscalls.h> 36#include <linux/security.h> 37#include <linux/fs.h> 38#include <linux/math64.h> 39#include <linux/ptrace.h> 40 41#include <asm/uaccess.h> 42#include <asm/unistd.h> 43 44#include "timeconst.h" 45 46/* 47 * The timezone where the local system is located. Used as a default by some 48 * programs who obtain this value by using gettimeofday. 49 */ 50struct timezone sys_tz; 51 52EXPORT_SYMBOL(sys_tz); 53 54#ifdef __ARCH_WANT_SYS_TIME 55 56/* 57 * sys_time() can be implemented in user-level using 58 * sys_gettimeofday(). Is this for backwards compatibility? If so, 59 * why not move it into the appropriate arch directory (for those 60 * architectures that need it). 61 */ 62SYSCALL_DEFINE1(time, time_t __user *, tloc) 63{ 64 time_t i = get_seconds(); 65 66 if (tloc) { 67 if (put_user(i,tloc)) 68 return -EFAULT; 69 } 70 force_successful_syscall_return(); 71 return i; 72} 73 74/* 75 * sys_stime() can be implemented in user-level using 76 * sys_settimeofday(). Is this for backwards compatibility? If so, 77 * why not move it into the appropriate arch directory (for those 78 * architectures that need it). 79 */ 80 81SYSCALL_DEFINE1(stime, time_t __user *, tptr) 82{ 83 struct timespec tv; 84 int err; 85 86 if (get_user(tv.tv_sec, tptr)) 87 return -EFAULT; 88 89 tv.tv_nsec = 0; 90 91 err = security_settime(&tv, NULL); 92 if (err) 93 return err; 94 95 do_settimeofday(&tv); 96 return 0; 97} 98 99#endif /* __ARCH_WANT_SYS_TIME */ 100 101SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv, 102 struct timezone __user *, tz) 103{ 104 if (likely(tv != NULL)) { 105 struct timeval ktv; 106 do_gettimeofday(&ktv); 107 if (copy_to_user(tv, &ktv, sizeof(ktv))) 108 return -EFAULT; 109 } 110 if (unlikely(tz != NULL)) { 111 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) 112 return -EFAULT; 113 } 114 return 0; 115} 116 117/* 118 * Indicates if there is an offset between the system clock and the hardware 119 * clock/persistent clock/rtc. 120 */ 121int persistent_clock_is_local; 122 123/* 124 * Adjust the time obtained from the CMOS to be UTC time instead of 125 * local time. 126 * 127 * This is ugly, but preferable to the alternatives. Otherwise we 128 * would either need to write a program to do it in /etc/rc (and risk 129 * confusion if the program gets run more than once; it would also be 130 * hard to make the program warp the clock precisely n hours) or 131 * compile in the timezone information into the kernel. Bad, bad.... 132 * 133 * - TYT, 1992-01-01 134 * 135 * The best thing to do is to keep the CMOS clock in universal time (UTC) 136 * as real UNIX machines always do it. This avoids all headaches about 137 * daylight saving times and warping kernel clocks. 138 */ 139static inline void warp_clock(void) 140{ 141 if (sys_tz.tz_minuteswest != 0) { 142 struct timespec adjust; 143 144 persistent_clock_is_local = 1; 145 adjust.tv_sec = sys_tz.tz_minuteswest * 60; 146 adjust.tv_nsec = 0; 147 timekeeping_inject_offset(&adjust); 148 } 149} 150 151/* 152 * In case for some reason the CMOS clock has not already been running 153 * in UTC, but in some local time: The first time we set the timezone, 154 * we will warp the clock so that it is ticking UTC time instead of 155 * local time. Presumably, if someone is setting the timezone then we 156 * are running in an environment where the programs understand about 157 * timezones. This should be done at boot time in the /etc/rc script, 158 * as soon as possible, so that the clock can be set right. Otherwise, 159 * various programs will get confused when the clock gets warped. 160 */ 161 162int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) 163{ 164 static int firsttime = 1; 165 int error = 0; 166 167 if (tv && !timespec_valid(tv)) 168 return -EINVAL; 169 170 error = security_settime(tv, tz); 171 if (error) 172 return error; 173 174 if (tz) { 175 sys_tz = *tz; 176 update_vsyscall_tz(); 177 if (firsttime) { 178 firsttime = 0; 179 if (!tv) 180 warp_clock(); 181 } 182 } 183 if (tv) 184 return do_settimeofday(tv); 185 return 0; 186} 187 188SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv, 189 struct timezone __user *, tz) 190{ 191 struct timeval user_tv; 192 struct timespec new_ts; 193 struct timezone new_tz; 194 195 if (tv) { 196 if (copy_from_user(&user_tv, tv, sizeof(*tv))) 197 return -EFAULT; 198 new_ts.tv_sec = user_tv.tv_sec; 199 new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; 200 } 201 if (tz) { 202 if (copy_from_user(&new_tz, tz, sizeof(*tz))) 203 return -EFAULT; 204 } 205 206 return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); 207} 208 209SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p) 210{ 211 struct timex txc; /* Local copy of parameter */ 212 int ret; 213 214 /* Copy the user data space into the kernel copy 215 * structure. But bear in mind that the structures 216 * may change 217 */ 218 if(copy_from_user(&txc, txc_p, sizeof(struct timex))) 219 return -EFAULT; 220 ret = do_adjtimex(&txc); 221 return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret; 222} 223 224/** 225 * current_fs_time - Return FS time 226 * @sb: Superblock. 227 * 228 * Return the current time truncated to the time granularity supported by 229 * the fs. 230 */ 231struct timespec current_fs_time(struct super_block *sb) 232{ 233 struct timespec now = current_kernel_time(); 234 return timespec_trunc(now, sb->s_time_gran); 235} 236EXPORT_SYMBOL(current_fs_time); 237 238/* 239 * Convert jiffies to milliseconds and back. 240 * 241 * Avoid unnecessary multiplications/divisions in the 242 * two most common HZ cases: 243 */ 244unsigned int jiffies_to_msecs(const unsigned long j) 245{ 246#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) 247 return (MSEC_PER_SEC / HZ) * j; 248#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) 249 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); 250#else 251# if BITS_PER_LONG == 32 252 return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32; 253# else 254 return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN; 255# endif 256#endif 257} 258EXPORT_SYMBOL(jiffies_to_msecs); 259 260unsigned int jiffies_to_usecs(const unsigned long j) 261{ 262#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) 263 return (USEC_PER_SEC / HZ) * j; 264#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) 265 return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); 266#else 267# if BITS_PER_LONG == 32 268 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; 269# else 270 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; 271# endif 272#endif 273} 274EXPORT_SYMBOL(jiffies_to_usecs); 275 276/** 277 * timespec_trunc - Truncate timespec to a granularity 278 * @t: Timespec 279 * @gran: Granularity in ns. 280 * 281 * Truncate a timespec to a granularity. gran must be smaller than a second. 282 * Always rounds down. 283 * 284 * This function should be only used for timestamps returned by 285 * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because 286 * it doesn't handle the better resolution of the latter. 287 */ 288struct timespec timespec_trunc(struct timespec t, unsigned gran) 289{ 290 /* 291 * Division is pretty slow so avoid it for common cases. 292 * Currently current_kernel_time() never returns better than 293 * jiffies resolution. Exploit that. 294 */ 295 if (gran <= jiffies_to_usecs(1) * 1000) { 296 /* nothing */ 297 } else if (gran == 1000000000) { 298 t.tv_nsec = 0; 299 } else { 300 t.tv_nsec -= t.tv_nsec % gran; 301 } 302 return t; 303} 304EXPORT_SYMBOL(timespec_trunc); 305 306/* Converts Gregorian date to seconds since 1970-01-01 00:00:00. 307 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 308 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. 309 * 310 * [For the Julian calendar (which was used in Russia before 1917, 311 * Britain & colonies before 1752, anywhere else before 1582, 312 * and is still in use by some communities) leave out the 313 * -year/100+year/400 terms, and add 10.] 314 * 315 * This algorithm was first published by Gauss (I think). 316 * 317 * WARNING: this function will overflow on 2106-02-07 06:28:16 on 318 * machines where long is 32-bit! (However, as time_t is signed, we 319 * will already get problems at other places on 2038-01-19 03:14:08) 320 */ 321unsigned long 322mktime(const unsigned int year0, const unsigned int mon0, 323 const unsigned int day, const unsigned int hour, 324 const unsigned int min, const unsigned int sec) 325{ 326 unsigned int mon = mon0, year = year0; 327 328 /* 1..12 -> 11,12,1..10 */ 329 if (0 >= (int) (mon -= 2)) { 330 mon += 12; /* Puts Feb last since it has leap day */ 331 year -= 1; 332 } 333 334 return ((((unsigned long) 335 (year/4 - year/100 + year/400 + 367*mon/12 + day) + 336 year*365 - 719499 337 )*24 + hour /* now have hours */ 338 )*60 + min /* now have minutes */ 339 )*60 + sec; /* finally seconds */ 340} 341 342EXPORT_SYMBOL(mktime); 343 344/** 345 * set_normalized_timespec - set timespec sec and nsec parts and normalize 346 * 347 * @ts: pointer to timespec variable to be set 348 * @sec: seconds to set 349 * @nsec: nanoseconds to set 350 * 351 * Set seconds and nanoseconds field of a timespec variable and 352 * normalize to the timespec storage format 353 * 354 * Note: The tv_nsec part is always in the range of 355 * 0 <= tv_nsec < NSEC_PER_SEC 356 * For negative values only the tv_sec field is negative ! 357 */ 358void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec) 359{ 360 while (nsec >= NSEC_PER_SEC) { 361 /* 362 * The following asm() prevents the compiler from 363 * optimising this loop into a modulo operation. See 364 * also __iter_div_u64_rem() in include/linux/time.h 365 */ 366 asm("" : "+rm"(nsec)); 367 nsec -= NSEC_PER_SEC; 368 ++sec; 369 } 370 while (nsec < 0) { 371 asm("" : "+rm"(nsec)); 372 nsec += NSEC_PER_SEC; 373 --sec; 374 } 375 ts->tv_sec = sec; 376 ts->tv_nsec = nsec; 377} 378EXPORT_SYMBOL(set_normalized_timespec); 379 380/** 381 * ns_to_timespec - Convert nanoseconds to timespec 382 * @nsec: the nanoseconds value to be converted 383 * 384 * Returns the timespec representation of the nsec parameter. 385 */ 386struct timespec ns_to_timespec(const s64 nsec) 387{ 388 struct timespec ts; 389 s32 rem; 390 391 if (!nsec) 392 return (struct timespec) {0, 0}; 393 394 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); 395 if (unlikely(rem < 0)) { 396 ts.tv_sec--; 397 rem += NSEC_PER_SEC; 398 } 399 ts.tv_nsec = rem; 400 401 return ts; 402} 403EXPORT_SYMBOL(ns_to_timespec); 404 405/** 406 * ns_to_timeval - Convert nanoseconds to timeval 407 * @nsec: the nanoseconds value to be converted 408 * 409 * Returns the timeval representation of the nsec parameter. 410 */ 411struct timeval ns_to_timeval(const s64 nsec) 412{ 413 struct timespec ts = ns_to_timespec(nsec); 414 struct timeval tv; 415 416 tv.tv_sec = ts.tv_sec; 417 tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000; 418 419 return tv; 420} 421EXPORT_SYMBOL(ns_to_timeval); 422 423/* 424 * When we convert to jiffies then we interpret incoming values 425 * the following way: 426 * 427 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) 428 * 429 * - 'too large' values [that would result in larger than 430 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. 431 * 432 * - all other values are converted to jiffies by either multiplying 433 * the input value by a factor or dividing it with a factor 434 * 435 * We must also be careful about 32-bit overflows. 436 */ 437unsigned long msecs_to_jiffies(const unsigned int m) 438{ 439 /* 440 * Negative value, means infinite timeout: 441 */ 442 if ((int)m < 0) 443 return MAX_JIFFY_OFFSET; 444 445#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) 446 /* 447 * HZ is equal to or smaller than 1000, and 1000 is a nice 448 * round multiple of HZ, divide with the factor between them, 449 * but round upwards: 450 */ 451 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); 452#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) 453 /* 454 * HZ is larger than 1000, and HZ is a nice round multiple of 455 * 1000 - simply multiply with the factor between them. 456 * 457 * But first make sure the multiplication result cannot 458 * overflow: 459 */ 460 if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) 461 return MAX_JIFFY_OFFSET; 462 463 return m * (HZ / MSEC_PER_SEC); 464#else 465 /* 466 * Generic case - multiply, round and divide. But first 467 * check that if we are doing a net multiplication, that 468 * we wouldn't overflow: 469 */ 470 if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) 471 return MAX_JIFFY_OFFSET; 472 473 return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) 474 >> MSEC_TO_HZ_SHR32; 475#endif 476} 477EXPORT_SYMBOL(msecs_to_jiffies); 478 479unsigned long usecs_to_jiffies(const unsigned int u) 480{ 481 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) 482 return MAX_JIFFY_OFFSET; 483#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) 484 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); 485#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) 486 return u * (HZ / USEC_PER_SEC); 487#else 488 return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) 489 >> USEC_TO_HZ_SHR32; 490#endif 491} 492EXPORT_SYMBOL(usecs_to_jiffies); 493 494/* 495 * The TICK_NSEC - 1 rounds up the value to the next resolution. Note 496 * that a remainder subtract here would not do the right thing as the 497 * resolution values don't fall on second boundries. I.e. the line: 498 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. 499 * 500 * Rather, we just shift the bits off the right. 501 * 502 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec 503 * value to a scaled second value. 504 */ 505unsigned long 506timespec_to_jiffies(const struct timespec *value) 507{ 508 unsigned long sec = value->tv_sec; 509 long nsec = value->tv_nsec + TICK_NSEC - 1; 510 511 if (sec >= MAX_SEC_IN_JIFFIES){ 512 sec = MAX_SEC_IN_JIFFIES; 513 nsec = 0; 514 } 515 return (((u64)sec * SEC_CONVERSION) + 516 (((u64)nsec * NSEC_CONVERSION) >> 517 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; 518 519} 520EXPORT_SYMBOL(timespec_to_jiffies); 521 522void 523jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) 524{ 525 /* 526 * Convert jiffies to nanoseconds and separate with 527 * one divide. 528 */ 529 u32 rem; 530 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, 531 NSEC_PER_SEC, &rem); 532 value->tv_nsec = rem; 533} 534EXPORT_SYMBOL(jiffies_to_timespec); 535 536/* Same for "timeval" 537 * 538 * Well, almost. The problem here is that the real system resolution is 539 * in nanoseconds and the value being converted is in micro seconds. 540 * Also for some machines (those that use HZ = 1024, in-particular), 541 * there is a LARGE error in the tick size in microseconds. 542 543 * The solution we use is to do the rounding AFTER we convert the 544 * microsecond part. Thus the USEC_ROUND, the bits to be shifted off. 545 * Instruction wise, this should cost only an additional add with carry 546 * instruction above the way it was done above. 547 */ 548unsigned long 549timeval_to_jiffies(const struct timeval *value) 550{ 551 unsigned long sec = value->tv_sec; 552 long usec = value->tv_usec; 553 554 if (sec >= MAX_SEC_IN_JIFFIES){ 555 sec = MAX_SEC_IN_JIFFIES; 556 usec = 0; 557 } 558 return (((u64)sec * SEC_CONVERSION) + 559 (((u64)usec * USEC_CONVERSION + USEC_ROUND) >> 560 (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; 561} 562EXPORT_SYMBOL(timeval_to_jiffies); 563 564void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value) 565{ 566 /* 567 * Convert jiffies to nanoseconds and separate with 568 * one divide. 569 */ 570 u32 rem; 571 572 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, 573 NSEC_PER_SEC, &rem); 574 value->tv_usec = rem / NSEC_PER_USEC; 575} 576EXPORT_SYMBOL(jiffies_to_timeval); 577 578/* 579 * Convert jiffies/jiffies_64 to clock_t and back. 580 */ 581clock_t jiffies_to_clock_t(unsigned long x) 582{ 583#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 584# if HZ < USER_HZ 585 return x * (USER_HZ / HZ); 586# else 587 return x / (HZ / USER_HZ); 588# endif 589#else 590 return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ); 591#endif 592} 593EXPORT_SYMBOL(jiffies_to_clock_t); 594 595unsigned long clock_t_to_jiffies(unsigned long x) 596{ 597#if (HZ % USER_HZ)==0 598 if (x >= ~0UL / (HZ / USER_HZ)) 599 return ~0UL; 600 return x * (HZ / USER_HZ); 601#else 602 /* Don't worry about loss of precision here .. */ 603 if (x >= ~0UL / HZ * USER_HZ) 604 return ~0UL; 605 606 /* .. but do try to contain it here */ 607 return div_u64((u64)x * HZ, USER_HZ); 608#endif 609} 610EXPORT_SYMBOL(clock_t_to_jiffies); 611 612u64 jiffies_64_to_clock_t(u64 x) 613{ 614#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 615# if HZ < USER_HZ 616 x = div_u64(x * USER_HZ, HZ); 617# elif HZ > USER_HZ 618 x = div_u64(x, HZ / USER_HZ); 619# else 620 /* Nothing to do */ 621# endif 622#else 623 /* 624 * There are better ways that don't overflow early, 625 * but even this doesn't overflow in hundreds of years 626 * in 64 bits, so.. 627 */ 628 x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ)); 629#endif 630 return x; 631} 632EXPORT_SYMBOL(jiffies_64_to_clock_t); 633 634u64 nsec_to_clock_t(u64 x) 635{ 636#if (NSEC_PER_SEC % USER_HZ) == 0 637 return div_u64(x, NSEC_PER_SEC / USER_HZ); 638#elif (USER_HZ % 512) == 0 639 return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512); 640#else 641 /* 642 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, 643 * overflow after 64.99 years. 644 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... 645 */ 646 return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ); 647#endif 648} 649 650/** 651 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64 652 * 653 * @n: nsecs in u64 654 * 655 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. 656 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed 657 * for scheduler, not for use in device drivers to calculate timeout value. 658 * 659 * note: 660 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) 661 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years 662 */ 663u64 nsecs_to_jiffies64(u64 n) 664{ 665#if (NSEC_PER_SEC % HZ) == 0 666 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ 667 return div_u64(n, NSEC_PER_SEC / HZ); 668#elif (HZ % 512) == 0 669 /* overflow after 292 years if HZ = 1024 */ 670 return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); 671#else 672 /* 673 * Generic case - optimized for cases where HZ is a multiple of 3. 674 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. 675 */ 676 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); 677#endif 678} 679 680/** 681 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies 682 * 683 * @n: nsecs in u64 684 * 685 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. 686 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed 687 * for scheduler, not for use in device drivers to calculate timeout value. 688 * 689 * note: 690 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) 691 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years 692 */ 693unsigned long nsecs_to_jiffies(u64 n) 694{ 695 return (unsigned long)nsecs_to_jiffies64(n); 696} 697 698/* 699 * Add two timespec values and do a safety check for overflow. 700 * It's assumed that both values are valid (>= 0) 701 */ 702struct timespec timespec_add_safe(const struct timespec lhs, 703 const struct timespec rhs) 704{ 705 struct timespec res; 706 707 set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec, 708 lhs.tv_nsec + rhs.tv_nsec); 709 710 if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec) 711 res.tv_sec = TIME_T_MAX; 712 713 return res; 714}