arch/tile: Use separate, better minsec values for clocksource and sched_clock.

We were using the same 5-sec minsec for the clocksource and sched_clock
that we were using for the clock_event_device. For the clock_event_device
that's exactly right since it has a short maximum countdown time.
But for sched_clock we want to avoid wraparound when converting from
ticks to nsec over a much longer window, so we force a shift of 10.
And for clocksource it seems dodgy to use a 5-sec minsec as well, so we
copy some other platforms and force a shift of 22.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

+19 -14
+19 -14
arch/tile/kernel/time.c
··· 36 36 /* How many cycles per second we are running at. */ 37 37 static cycles_t cycles_per_sec __write_once; 38 38 39 - /* 40 - * We set up shift and multiply values with a minsec of five seconds, 41 - * since our timer counter counts down 31 bits at a frequency of 42 - * no less than 500 MHz. See @minsec for clocks_calc_mult_shift(). 43 - * We could use a different value for the 64-bit free-running 44 - * cycle counter, but we use the same one for consistency, and since 45 - * we will be reasonably precise with this value anyway. 46 - */ 47 - #define TILE_MINSEC 5 48 - 49 39 cycles_t get_clock_rate(void) 50 40 { 51 41 return cycles_per_sec; ··· 58 68 } 59 69 #endif 60 70 71 + /* 72 + * We use a relatively small shift value so that sched_clock() 73 + * won't wrap around very often. 74 + */ 75 + #define SCHED_CLOCK_SHIFT 10 76 + 77 + static unsigned long sched_clock_mult __write_once; 78 + 61 79 static cycles_t clocksource_get_cycles(struct clocksource *cs) 62 80 { 63 81 return get_cycles(); ··· 76 78 .rating = 300, 77 79 .read = clocksource_get_cycles, 78 80 .mask = CLOCKSOURCE_MASK(64), 81 + .shift = 22, /* typical value, e.g. x86 tsc uses this */ 79 82 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 80 83 }; 81 84 ··· 87 88 void __init setup_clock(void) 88 89 { 89 90 cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED); 90 - clocksource_calc_mult_shift(&cycle_counter_cs, cycles_per_sec, 91 - TILE_MINSEC); 91 + sched_clock_mult = 92 + clocksource_hz2mult(cycles_per_sec, SCHED_CLOCK_SHIFT); 93 + cycle_counter_cs.mult = 94 + clocksource_hz2mult(cycles_per_sec, cycle_counter_cs.shift); 92 95 } 93 96 94 97 void __init calibrate_delay(void) ··· 118 117 * counter, plus bit 31, which signifies that the counter has wrapped 119 118 * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be 120 119 * raised as long as bit 31 is set. 120 + * 121 + * The TILE_MINSEC value represents the largest range of real-time 122 + * we can possibly cover with the timer, based on MAX_TICK combined 123 + * with the slowest reasonable clock rate we might run at. 121 124 */ 122 125 123 126 #define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */ 127 + #define TILE_MINSEC 5 /* timer covers no more than 5 seconds */ 124 128 125 129 static int tile_timer_set_next_event(unsigned long ticks, 126 130 struct clock_event_device *evt) ··· 217 211 unsigned long long sched_clock(void) 218 212 { 219 213 return clocksource_cyc2ns(get_cycles(), 220 - cycle_counter_cs.mult, 221 - cycle_counter_cs.shift); 214 + sched_clock_mult, SCHED_CLOCK_SHIFT); 222 215 } 223 216 224 217 int setup_profiling_timer(unsigned int multiplier)