at v3.2-rc4 11 kB view raw
1/* linux/include/linux/clocksource.h 2 * 3 * This file contains the structure definitions for clocksources. 4 * 5 * If you are not a clocksource, or timekeeping code, you should 6 * not be including this file! 7 */ 8#ifndef _LINUX_CLOCKSOURCE_H 9#define _LINUX_CLOCKSOURCE_H 10 11#include <linux/types.h> 12#include <linux/timex.h> 13#include <linux/time.h> 14#include <linux/list.h> 15#include <linux/cache.h> 16#include <linux/timer.h> 17#include <linux/init.h> 18#include <asm/div64.h> 19#include <asm/io.h> 20 21/* clocksource cycle base type */ 22typedef u64 cycle_t; 23struct clocksource; 24 25#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA 26#include <asm/clocksource.h> 27#endif 28 29/** 30 * struct cyclecounter - hardware abstraction for a free running counter 31 * Provides completely state-free accessors to the underlying hardware. 32 * Depending on which hardware it reads, the cycle counter may wrap 33 * around quickly. Locking rules (if necessary) have to be defined 34 * by the implementor and user of specific instances of this API. 35 * 36 * @read: returns the current cycle value 37 * @mask: bitmask for two's complement 38 * subtraction of non 64 bit counters, 39 * see CLOCKSOURCE_MASK() helper macro 40 * @mult: cycle to nanosecond multiplier 41 * @shift: cycle to nanosecond divisor (power of two) 42 */ 43struct cyclecounter { 44 cycle_t (*read)(const struct cyclecounter *cc); 45 cycle_t mask; 46 u32 mult; 47 u32 shift; 48}; 49 50/** 51 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds 52 * Contains the state needed by timecounter_read() to detect 53 * cycle counter wrap around. Initialize with 54 * timecounter_init(). Also used to convert cycle counts into the 55 * corresponding nanosecond counts with timecounter_cyc2time(). Users 56 * of this code are responsible for initializing the underlying 57 * cycle counter hardware, locking issues and reading the time 58 * more often than the cycle counter wraps around. The nanosecond 59 * counter will only wrap around after ~585 years. 60 * 61 * @cc: the cycle counter used by this instance 62 * @cycle_last: most recent cycle counter value seen by 63 * timecounter_read() 64 * @nsec: continuously increasing count 65 */ 66struct timecounter { 67 const struct cyclecounter *cc; 68 cycle_t cycle_last; 69 u64 nsec; 70}; 71 72/** 73 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds 74 * @tc: Pointer to cycle counter. 75 * @cycles: Cycles 76 * 77 * XXX - This could use some mult_lxl_ll() asm optimization. Same code 78 * as in cyc2ns, but with unsigned result. 79 */ 80static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, 81 cycle_t cycles) 82{ 83 u64 ret = (u64)cycles; 84 ret = (ret * cc->mult) >> cc->shift; 85 return ret; 86} 87 88/** 89 * timecounter_init - initialize a time counter 90 * @tc: Pointer to time counter which is to be initialized/reset 91 * @cc: A cycle counter, ready to be used. 92 * @start_tstamp: Arbitrary initial time stamp. 93 * 94 * After this call the current cycle register (roughly) corresponds to 95 * the initial time stamp. Every call to timecounter_read() increments 96 * the time stamp counter by the number of elapsed nanoseconds. 97 */ 98extern void timecounter_init(struct timecounter *tc, 99 const struct cyclecounter *cc, 100 u64 start_tstamp); 101 102/** 103 * timecounter_read - return nanoseconds elapsed since timecounter_init() 104 * plus the initial time stamp 105 * @tc: Pointer to time counter. 106 * 107 * In other words, keeps track of time since the same epoch as 108 * the function which generated the initial time stamp. 109 */ 110extern u64 timecounter_read(struct timecounter *tc); 111 112/** 113 * timecounter_cyc2time - convert a cycle counter to same 114 * time base as values returned by 115 * timecounter_read() 116 * @tc: Pointer to time counter. 117 * @cycle: a value returned by tc->cc->read() 118 * 119 * Cycle counts that are converted correctly as long as they 120 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], 121 * with "max cycle count" == cs->mask+1. 122 * 123 * This allows conversion of cycle counter values which were generated 124 * in the past. 125 */ 126extern u64 timecounter_cyc2time(struct timecounter *tc, 127 cycle_t cycle_tstamp); 128 129/** 130 * struct clocksource - hardware abstraction for a free running counter 131 * Provides mostly state-free accessors to the underlying hardware. 132 * This is the structure used for system time. 133 * 134 * @name: ptr to clocksource name 135 * @list: list head for registration 136 * @rating: rating value for selection (higher is better) 137 * To avoid rating inflation the following 138 * list should give you a guide as to how 139 * to assign your clocksource a rating 140 * 1-99: Unfit for real use 141 * Only available for bootup and testing purposes. 142 * 100-199: Base level usability. 143 * Functional for real use, but not desired. 144 * 200-299: Good. 145 * A correct and usable clocksource. 146 * 300-399: Desired. 147 * A reasonably fast and accurate clocksource. 148 * 400-499: Perfect 149 * The ideal clocksource. A must-use where 150 * available. 151 * @read: returns a cycle value, passes clocksource as argument 152 * @enable: optional function to enable the clocksource 153 * @disable: optional function to disable the clocksource 154 * @mask: bitmask for two's complement 155 * subtraction of non 64 bit counters 156 * @mult: cycle to nanosecond multiplier 157 * @shift: cycle to nanosecond divisor (power of two) 158 * @max_idle_ns: max idle time permitted by the clocksource (nsecs) 159 * @maxadj maximum adjustment value to mult (~11%) 160 * @flags: flags describing special properties 161 * @archdata: arch-specific data 162 * @suspend: suspend function for the clocksource, if necessary 163 * @resume: resume function for the clocksource, if necessary 164 */ 165struct clocksource { 166 /* 167 * Hotpath data, fits in a single cache line when the 168 * clocksource itself is cacheline aligned. 169 */ 170 cycle_t (*read)(struct clocksource *cs); 171 cycle_t cycle_last; 172 cycle_t mask; 173 u32 mult; 174 u32 shift; 175 u64 max_idle_ns; 176 u32 maxadj; 177#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA 178 struct arch_clocksource_data archdata; 179#endif 180 181 const char *name; 182 struct list_head list; 183 int rating; 184 int (*enable)(struct clocksource *cs); 185 void (*disable)(struct clocksource *cs); 186 unsigned long flags; 187 void (*suspend)(struct clocksource *cs); 188 void (*resume)(struct clocksource *cs); 189 190#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 191 /* Watchdog related data, used by the framework */ 192 struct list_head wd_list; 193 cycle_t cs_last; 194 cycle_t wd_last; 195#endif 196} ____cacheline_aligned; 197 198/* 199 * Clock source flags bits:: 200 */ 201#define CLOCK_SOURCE_IS_CONTINUOUS 0x01 202#define CLOCK_SOURCE_MUST_VERIFY 0x02 203 204#define CLOCK_SOURCE_WATCHDOG 0x10 205#define CLOCK_SOURCE_VALID_FOR_HRES 0x20 206#define CLOCK_SOURCE_UNSTABLE 0x40 207 208/* simplify initialization of mask field */ 209#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 210 211/** 212 * clocksource_khz2mult - calculates mult from khz and shift 213 * @khz: Clocksource frequency in KHz 214 * @shift_constant: Clocksource shift factor 215 * 216 * Helper functions that converts a khz counter frequency to a timsource 217 * multiplier, given the clocksource shift value 218 */ 219static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) 220{ 221 /* khz = cyc/(Million ns) 222 * mult/2^shift = ns/cyc 223 * mult = ns/cyc * 2^shift 224 * mult = 1Million/khz * 2^shift 225 * mult = 1000000 * 2^shift / khz 226 * mult = (1000000<<shift) / khz 227 */ 228 u64 tmp = ((u64)1000000) << shift_constant; 229 230 tmp += khz/2; /* round for do_div */ 231 do_div(tmp, khz); 232 233 return (u32)tmp; 234} 235 236/** 237 * clocksource_hz2mult - calculates mult from hz and shift 238 * @hz: Clocksource frequency in Hz 239 * @shift_constant: Clocksource shift factor 240 * 241 * Helper functions that converts a hz counter 242 * frequency to a timsource multiplier, given the 243 * clocksource shift value 244 */ 245static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) 246{ 247 /* hz = cyc/(Billion ns) 248 * mult/2^shift = ns/cyc 249 * mult = ns/cyc * 2^shift 250 * mult = 1Billion/hz * 2^shift 251 * mult = 1000000000 * 2^shift / hz 252 * mult = (1000000000<<shift) / hz 253 */ 254 u64 tmp = ((u64)1000000000) << shift_constant; 255 256 tmp += hz/2; /* round for do_div */ 257 do_div(tmp, hz); 258 259 return (u32)tmp; 260} 261 262/** 263 * clocksource_cyc2ns - converts clocksource cycles to nanoseconds 264 * 265 * Converts cycles to nanoseconds, using the given mult and shift. 266 * 267 * XXX - This could use some mult_lxl_ll() asm optimization 268 */ 269static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) 270{ 271 return ((u64) cycles * mult) >> shift; 272} 273 274 275extern int clocksource_register(struct clocksource*); 276extern void clocksource_unregister(struct clocksource*); 277extern void clocksource_touch_watchdog(void); 278extern struct clocksource* clocksource_get_next(void); 279extern void clocksource_change_rating(struct clocksource *cs, int rating); 280extern void clocksource_suspend(void); 281extern void clocksource_resume(void); 282extern struct clocksource * __init __weak clocksource_default_clock(void); 283extern void clocksource_mark_unstable(struct clocksource *cs); 284 285extern void 286clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); 287 288/* 289 * Don't call __clocksource_register_scale directly, use 290 * clocksource_register_hz/khz 291 */ 292extern int 293__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); 294extern void 295__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); 296 297static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) 298{ 299 return __clocksource_register_scale(cs, 1, hz); 300} 301 302static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) 303{ 304 return __clocksource_register_scale(cs, 1000, khz); 305} 306 307static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) 308{ 309 __clocksource_updatefreq_scale(cs, 1, hz); 310} 311 312static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) 313{ 314 __clocksource_updatefreq_scale(cs, 1000, khz); 315} 316 317static inline void 318clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) 319{ 320 return clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 321 NSEC_PER_SEC, minsec); 322} 323 324#ifdef CONFIG_GENERIC_TIME_VSYSCALL 325extern void 326update_vsyscall(struct timespec *ts, struct timespec *wtm, 327 struct clocksource *c, u32 mult); 328extern void update_vsyscall_tz(void); 329#else 330static inline void 331update_vsyscall(struct timespec *ts, struct timespec *wtm, 332 struct clocksource *c, u32 mult) 333{ 334} 335 336static inline void update_vsyscall_tz(void) 337{ 338} 339#endif 340 341extern void timekeeping_notify(struct clocksource *clock); 342 343extern cycle_t clocksource_mmio_readl_up(struct clocksource *); 344extern cycle_t clocksource_mmio_readl_down(struct clocksource *); 345extern cycle_t clocksource_mmio_readw_up(struct clocksource *); 346extern cycle_t clocksource_mmio_readw_down(struct clocksource *); 347 348extern int clocksource_mmio_init(void __iomem *, const char *, 349 unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); 350 351extern int clocksource_i8253_init(void); 352 353#endif /* _LINUX_CLOCKSOURCE_H */