Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

clocksource: Extract max nsec calculation into separate function

We need to calculate the same number in the clocksource code and
the sched_clock code, so extract this code into its own function.
We also drop the min_t and just use min() because the two types
are the same.

Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: John Stultz <john.stultz@linaro.org>

authored by

Stephen Boyd and committed by
John Stultz
87d8b9eb ad81f054

+32 -15
+2
include/linux/clocksource.h
··· 292 292 extern struct clocksource * __init __weak clocksource_default_clock(void); 293 293 extern void clocksource_mark_unstable(struct clocksource *cs); 294 294 295 + extern u64 296 + clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask); 295 297 extern void 296 298 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); 297 299
+30 -15
kernel/time/clocksource.c
··· 537 537 } 538 538 539 539 /** 540 - * clocksource_max_deferment - Returns max time the clocksource can be deferred 541 - * @cs: Pointer to clocksource 542 - * 540 + * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted 541 + * @mult: cycle to nanosecond multiplier 542 + * @shift: cycle to nanosecond divisor (power of two) 543 + * @maxadj: maximum adjustment value to mult (~11%) 544 + * @mask: bitmask for two's complement subtraction of non 64 bit counters 543 545 */ 544 - static u64 clocksource_max_deferment(struct clocksource *cs) 546 + u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask) 545 547 { 546 548 u64 max_nsecs, max_cycles; 547 549 548 550 /* 549 551 * Calculate the maximum number of cycles that we can pass to the 550 552 * cyc2ns function without overflowing a 64-bit signed result. The 551 - * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj) 553 + * maximum number of cycles is equal to ULLONG_MAX/(mult+maxadj) 552 554 * which is equivalent to the below. 553 - * max_cycles < (2^63)/(cs->mult + cs->maxadj) 554 - * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj))) 555 - * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj)) 556 - * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj)) 557 - * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj)) 555 + * max_cycles < (2^63)/(mult + maxadj) 556 + * max_cycles < 2^(log2((2^63)/(mult + maxadj))) 557 + * max_cycles < 2^(log2(2^63) - log2(mult + maxadj)) 558 + * max_cycles < 2^(63 - log2(mult + maxadj)) 559 + * max_cycles < 1 << (63 - log2(mult + maxadj)) 558 560 * Please note that we add 1 to the result of the log2 to account for 559 561 * any rounding errors, ensure the above inequality is satisfied and 560 562 * no overflow will occur. 561 563 */ 562 - max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1)); 564 + max_cycles = 1ULL << (63 - (ilog2(mult + maxadj) + 1)); 563 565 564 566 /* 565 567 * The actual maximum number of cycles we can defer the clocksource is 566 - * determined by the minimum of max_cycles and cs->mask. 568 + * determined by the minimum of max_cycles and mask. 567 569 * Note: Here we subtract the maxadj to make sure we don't sleep for 568 570 * too long if there's a large negative adjustment. 569 571 */ 570 - max_cycles = min_t(u64, max_cycles, (u64) cs->mask); 571 - max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj, 572 - cs->shift); 572 + max_cycles = min(max_cycles, mask); 573 + max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); 573 574 575 + return max_nsecs; 576 + } 577 + 578 + /** 579 + * clocksource_max_deferment - Returns max time the clocksource can be deferred 580 + * @cs: Pointer to clocksource 581 + * 582 + */ 583 + static u64 clocksource_max_deferment(struct clocksource *cs) 584 + { 585 + u64 max_nsecs; 586 + 587 + max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj, 588 + cs->mask); 574 589 /* 575 590 * To ensure that the clocksource does not wrap whilst we are idle, 576 591 * limit the time the clocksource can be deferred by 12.5%. Please