at v3.0 11 kB view raw
1/* linux/include/linux/clocksource.h 2 * 3 * This file contains the structure definitions for clocksources. 4 * 5 * If you are not a clocksource, or timekeeping code, you should 6 * not be including this file! 7 */ 8#ifndef _LINUX_CLOCKSOURCE_H 9#define _LINUX_CLOCKSOURCE_H 10 11#include <linux/types.h> 12#include <linux/timex.h> 13#include <linux/time.h> 14#include <linux/list.h> 15#include <linux/cache.h> 16#include <linux/timer.h> 17#include <linux/init.h> 18#include <asm/div64.h> 19#include <asm/io.h> 20 21/* clocksource cycle base type */ 22typedef u64 cycle_t; 23struct clocksource; 24 25/** 26 * struct cyclecounter - hardware abstraction for a free running counter 27 * Provides completely state-free accessors to the underlying hardware. 28 * Depending on which hardware it reads, the cycle counter may wrap 29 * around quickly. Locking rules (if necessary) have to be defined 30 * by the implementor and user of specific instances of this API. 31 * 32 * @read: returns the current cycle value 33 * @mask: bitmask for two's complement 34 * subtraction of non 64 bit counters, 35 * see CLOCKSOURCE_MASK() helper macro 36 * @mult: cycle to nanosecond multiplier 37 * @shift: cycle to nanosecond divisor (power of two) 38 */ 39struct cyclecounter { 40 cycle_t (*read)(const struct cyclecounter *cc); 41 cycle_t mask; 42 u32 mult; 43 u32 shift; 44}; 45 46/** 47 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds 48 * Contains the state needed by timecounter_read() to detect 49 * cycle counter wrap around. Initialize with 50 * timecounter_init(). Also used to convert cycle counts into the 51 * corresponding nanosecond counts with timecounter_cyc2time(). Users 52 * of this code are responsible for initializing the underlying 53 * cycle counter hardware, locking issues and reading the time 54 * more often than the cycle counter wraps around. The nanosecond 55 * counter will only wrap around after ~585 years. 56 * 57 * @cc: the cycle counter used by this instance 58 * @cycle_last: most recent cycle counter value seen by 59 * timecounter_read() 60 * @nsec: continuously increasing count 61 */ 62struct timecounter { 63 const struct cyclecounter *cc; 64 cycle_t cycle_last; 65 u64 nsec; 66}; 67 68/** 69 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds 70 * @tc: Pointer to cycle counter. 71 * @cycles: Cycles 72 * 73 * XXX - This could use some mult_lxl_ll() asm optimization. Same code 74 * as in cyc2ns, but with unsigned result. 75 */ 76static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, 77 cycle_t cycles) 78{ 79 u64 ret = (u64)cycles; 80 ret = (ret * cc->mult) >> cc->shift; 81 return ret; 82} 83 84/** 85 * timecounter_init - initialize a time counter 86 * @tc: Pointer to time counter which is to be initialized/reset 87 * @cc: A cycle counter, ready to be used. 88 * @start_tstamp: Arbitrary initial time stamp. 89 * 90 * After this call the current cycle register (roughly) corresponds to 91 * the initial time stamp. Every call to timecounter_read() increments 92 * the time stamp counter by the number of elapsed nanoseconds. 93 */ 94extern void timecounter_init(struct timecounter *tc, 95 const struct cyclecounter *cc, 96 u64 start_tstamp); 97 98/** 99 * timecounter_read - return nanoseconds elapsed since timecounter_init() 100 * plus the initial time stamp 101 * @tc: Pointer to time counter. 102 * 103 * In other words, keeps track of time since the same epoch as 104 * the function which generated the initial time stamp. 105 */ 106extern u64 timecounter_read(struct timecounter *tc); 107 108/** 109 * timecounter_cyc2time - convert a cycle counter to same 110 * time base as values returned by 111 * timecounter_read() 112 * @tc: Pointer to time counter. 113 * @cycle: a value returned by tc->cc->read() 114 * 115 * Cycle counts that are converted correctly as long as they 116 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], 117 * with "max cycle count" == cs->mask+1. 118 * 119 * This allows conversion of cycle counter values which were generated 120 * in the past. 121 */ 122extern u64 timecounter_cyc2time(struct timecounter *tc, 123 cycle_t cycle_tstamp); 124 125/** 126 * struct clocksource - hardware abstraction for a free running counter 127 * Provides mostly state-free accessors to the underlying hardware. 128 * This is the structure used for system time. 129 * 130 * @name: ptr to clocksource name 131 * @list: list head for registration 132 * @rating: rating value for selection (higher is better) 133 * To avoid rating inflation the following 134 * list should give you a guide as to how 135 * to assign your clocksource a rating 136 * 1-99: Unfit for real use 137 * Only available for bootup and testing purposes. 138 * 100-199: Base level usability. 139 * Functional for real use, but not desired. 140 * 200-299: Good. 141 * A correct and usable clocksource. 142 * 300-399: Desired. 143 * A reasonably fast and accurate clocksource. 144 * 400-499: Perfect 145 * The ideal clocksource. A must-use where 146 * available. 147 * @read: returns a cycle value, passes clocksource as argument 148 * @enable: optional function to enable the clocksource 149 * @disable: optional function to disable the clocksource 150 * @mask: bitmask for two's complement 151 * subtraction of non 64 bit counters 152 * @mult: cycle to nanosecond multiplier 153 * @shift: cycle to nanosecond divisor (power of two) 154 * @max_idle_ns: max idle time permitted by the clocksource (nsecs) 155 * @flags: flags describing special properties 156 * @vread: vsyscall based read 157 * @suspend: suspend function for the clocksource, if necessary 158 * @resume: resume function for the clocksource, if necessary 159 */ 160struct clocksource { 161 /* 162 * Hotpath data, fits in a single cache line when the 163 * clocksource itself is cacheline aligned. 164 */ 165 cycle_t (*read)(struct clocksource *cs); 166 cycle_t cycle_last; 167 cycle_t mask; 168 u32 mult; 169 u32 shift; 170 u64 max_idle_ns; 171 172#ifdef CONFIG_IA64 173 void *fsys_mmio; /* used by fsyscall asm code */ 174#define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) 175#else 176#define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) 177#endif 178 const char *name; 179 struct list_head list; 180 int rating; 181 cycle_t (*vread)(void); 182 int (*enable)(struct clocksource *cs); 183 void (*disable)(struct clocksource *cs); 184 unsigned long flags; 185 void (*suspend)(struct clocksource *cs); 186 void (*resume)(struct clocksource *cs); 187 188#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 189 /* Watchdog related data, used by the framework */ 190 struct list_head wd_list; 191 cycle_t cs_last; 192 cycle_t wd_last; 193#endif 194} ____cacheline_aligned; 195 196/* 197 * Clock source flags bits:: 198 */ 199#define CLOCK_SOURCE_IS_CONTINUOUS 0x01 200#define CLOCK_SOURCE_MUST_VERIFY 0x02 201 202#define CLOCK_SOURCE_WATCHDOG 0x10 203#define CLOCK_SOURCE_VALID_FOR_HRES 0x20 204#define CLOCK_SOURCE_UNSTABLE 0x40 205 206/* simplify initialization of mask field */ 207#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 208 209/** 210 * clocksource_khz2mult - calculates mult from khz and shift 211 * @khz: Clocksource frequency in KHz 212 * @shift_constant: Clocksource shift factor 213 * 214 * Helper functions that converts a khz counter frequency to a timsource 215 * multiplier, given the clocksource shift value 216 */ 217static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) 218{ 219 /* khz = cyc/(Million ns) 220 * mult/2^shift = ns/cyc 221 * mult = ns/cyc * 2^shift 222 * mult = 1Million/khz * 2^shift 223 * mult = 1000000 * 2^shift / khz 224 * mult = (1000000<<shift) / khz 225 */ 226 u64 tmp = ((u64)1000000) << shift_constant; 227 228 tmp += khz/2; /* round for do_div */ 229 do_div(tmp, khz); 230 231 return (u32)tmp; 232} 233 234/** 235 * clocksource_hz2mult - calculates mult from hz and shift 236 * @hz: Clocksource frequency in Hz 237 * @shift_constant: Clocksource shift factor 238 * 239 * Helper functions that converts a hz counter 240 * frequency to a timsource multiplier, given the 241 * clocksource shift value 242 */ 243static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) 244{ 245 /* hz = cyc/(Billion ns) 246 * mult/2^shift = ns/cyc 247 * mult = ns/cyc * 2^shift 248 * mult = 1Billion/hz * 2^shift 249 * mult = 1000000000 * 2^shift / hz 250 * mult = (1000000000<<shift) / hz 251 */ 252 u64 tmp = ((u64)1000000000) << shift_constant; 253 254 tmp += hz/2; /* round for do_div */ 255 do_div(tmp, hz); 256 257 return (u32)tmp; 258} 259 260/** 261 * clocksource_cyc2ns - converts clocksource cycles to nanoseconds 262 * 263 * Converts cycles to nanoseconds, using the given mult and shift. 264 * 265 * XXX - This could use some mult_lxl_ll() asm optimization 266 */ 267static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) 268{ 269 return ((u64) cycles * mult) >> shift; 270} 271 272 273extern int clocksource_register(struct clocksource*); 274extern void clocksource_unregister(struct clocksource*); 275extern void clocksource_touch_watchdog(void); 276extern struct clocksource* clocksource_get_next(void); 277extern void clocksource_change_rating(struct clocksource *cs, int rating); 278extern void clocksource_suspend(void); 279extern void clocksource_resume(void); 280extern struct clocksource * __init __weak clocksource_default_clock(void); 281extern void clocksource_mark_unstable(struct clocksource *cs); 282 283extern void 284clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); 285 286/* 287 * Don't call __clocksource_register_scale directly, use 288 * clocksource_register_hz/khz 289 */ 290extern int 291__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); 292extern void 293__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); 294 295static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) 296{ 297 return __clocksource_register_scale(cs, 1, hz); 298} 299 300static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) 301{ 302 return __clocksource_register_scale(cs, 1000, khz); 303} 304 305static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) 306{ 307 __clocksource_updatefreq_scale(cs, 1, hz); 308} 309 310static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) 311{ 312 __clocksource_updatefreq_scale(cs, 1000, khz); 313} 314 315static inline void 316clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) 317{ 318 return clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 319 NSEC_PER_SEC, minsec); 320} 321 322#ifdef CONFIG_GENERIC_TIME_VSYSCALL 323extern void 324update_vsyscall(struct timespec *ts, struct timespec *wtm, 325 struct clocksource *c, u32 mult); 326extern void update_vsyscall_tz(void); 327#else 328static inline void 329update_vsyscall(struct timespec *ts, struct timespec *wtm, 330 struct clocksource *c, u32 mult) 331{ 332} 333 334static inline void update_vsyscall_tz(void) 335{ 336} 337#endif 338 339extern void timekeeping_notify(struct clocksource *clock); 340 341extern cycle_t clocksource_mmio_readl_up(struct clocksource *); 342extern cycle_t clocksource_mmio_readl_down(struct clocksource *); 343extern cycle_t clocksource_mmio_readw_up(struct clocksource *); 344extern cycle_t clocksource_mmio_readw_down(struct clocksource *); 345 346extern int clocksource_mmio_init(void __iomem *, const char *, 347 unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); 348 349extern int clocksource_i8253_init(void); 350 351#endif /* _LINUX_CLOCKSOURCE_H */