at v4.11 2.5 kB view raw
1#ifndef _LINUX_SCHED_CLOCK_H 2#define _LINUX_SCHED_CLOCK_H 3 4#include <linux/smp.h> 5 6/* 7 * Do not use outside of architecture code which knows its limitations. 8 * 9 * sched_clock() has no promise of monotonicity or bounded drift between 10 * CPUs, use (which you should not) requires disabling IRQs. 11 * 12 * Please use one of the three interfaces below. 13 */ 14extern unsigned long long notrace sched_clock(void); 15 16/* 17 * See the comment in kernel/sched/clock.c 18 */ 19extern u64 running_clock(void); 20extern u64 sched_clock_cpu(int cpu); 21 22 23extern void sched_clock_init(void); 24 25#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 26static inline void sched_clock_init_late(void) 27{ 28} 29 30static inline void sched_clock_tick(void) 31{ 32} 33 34static inline void clear_sched_clock_stable(void) 35{ 36} 37 38static inline void sched_clock_idle_sleep_event(void) 39{ 40} 41 42static inline void sched_clock_idle_wakeup_event(u64 delta_ns) 43{ 44} 45 46static inline u64 cpu_clock(int cpu) 47{ 48 return sched_clock(); 49} 50 51static inline u64 local_clock(void) 52{ 53 return sched_clock(); 54} 55#else 56extern void sched_clock_init_late(void); 57extern int sched_clock_stable(void); 58extern void clear_sched_clock_stable(void); 59 60/* 61 * When sched_clock_stable(), __sched_clock_offset provides the offset 62 * between local_clock() and sched_clock(). 63 */ 64extern u64 __sched_clock_offset; 65 66 67extern void sched_clock_tick(void); 68extern void sched_clock_idle_sleep_event(void); 69extern void sched_clock_idle_wakeup_event(u64 delta_ns); 70 71/* 72 * As outlined in clock.c, provides a fast, high resolution, nanosecond 73 * time source that is monotonic per cpu argument and has bounded drift 74 * between cpus. 75 * 76 * ######################### BIG FAT WARNING ########################## 77 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # 78 * # go backwards !! # 79 * #################################################################### 80 */ 81static inline u64 cpu_clock(int cpu) 82{ 83 return sched_clock_cpu(cpu); 84} 85 86static inline u64 local_clock(void) 87{ 88 return sched_clock_cpu(raw_smp_processor_id()); 89} 90#endif 91 92#ifdef CONFIG_IRQ_TIME_ACCOUNTING 93/* 94 * An i/f to runtime opt-in for irq time accounting based off of sched_clock. 95 * The reason for this explicit opt-in is not to have perf penalty with 96 * slow sched_clocks. 97 */ 98extern void enable_sched_clock_irqtime(void); 99extern void disable_sched_clock_irqtime(void); 100#else 101static inline void enable_sched_clock_irqtime(void) {} 102static inline void disable_sched_clock_irqtime(void) {} 103#endif 104 105#endif /* _LINUX_SCHED_CLOCK_H */