at v4.10 3.7 kB view raw
1#ifndef _LINUX_KERNEL_VTIME_H 2#define _LINUX_KERNEL_VTIME_H 3 4#include <linux/context_tracking_state.h> 5#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 6#include <asm/vtime.h> 7#endif 8 9 10struct task_struct; 11 12/* 13 * vtime_accounting_cpu_enabled() definitions/declarations 14 */ 15#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) 16static inline bool vtime_accounting_cpu_enabled(void) { return true; } 17#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) 18/* 19 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful 20 * in that case and compute the tickless cputime. 21 * For now vtime state is tied to context tracking. We might want to decouple 22 * those later if necessary. 23 */ 24static inline bool vtime_accounting_enabled(void) 25{ 26 return context_tracking_is_enabled(); 27} 28 29static inline bool vtime_accounting_cpu_enabled(void) 30{ 31 if (vtime_accounting_enabled()) { 32 if (context_tracking_cpu_is_enabled()) 33 return true; 34 } 35 36 return false; 37} 38#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 39static inline bool vtime_accounting_cpu_enabled(void) { return false; } 40#endif 41 42 43/* 44 * Common vtime APIs 45 */ 46#ifdef CONFIG_VIRT_CPU_ACCOUNTING 47 48#ifdef __ARCH_HAS_VTIME_TASK_SWITCH 49extern void vtime_task_switch(struct task_struct *prev); 50#else 51extern void vtime_common_task_switch(struct task_struct *prev); 52static inline void vtime_task_switch(struct task_struct *prev) 53{ 54 if (vtime_accounting_cpu_enabled()) 55 vtime_common_task_switch(prev); 56} 57#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */ 58 59extern void vtime_account_system(struct task_struct *tsk); 60extern void vtime_account_idle(struct task_struct *tsk); 61extern void vtime_account_user(struct task_struct *tsk); 62 63#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 64 65static inline void vtime_task_switch(struct task_struct *prev) { } 66static inline void vtime_account_system(struct task_struct *tsk) { } 67static inline void vtime_account_user(struct task_struct *tsk) { } 68#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 69 70#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 71extern void arch_vtime_task_switch(struct task_struct *tsk); 72extern void vtime_user_enter(struct task_struct *tsk); 73 74static inline void vtime_user_exit(struct task_struct *tsk) 75{ 76 vtime_account_user(tsk); 77} 78extern void vtime_guest_enter(struct task_struct *tsk); 79extern void vtime_guest_exit(struct task_struct *tsk); 80extern void vtime_init_idle(struct task_struct *tsk, int cpu); 81#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 82static inline void vtime_user_enter(struct task_struct *tsk) { } 83static inline void vtime_user_exit(struct task_struct *tsk) { } 84static inline void vtime_guest_enter(struct task_struct *tsk) { } 85static inline void vtime_guest_exit(struct task_struct *tsk) { } 86static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } 87#endif 88 89#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 90extern void vtime_account_irq_enter(struct task_struct *tsk); 91static inline void vtime_account_irq_exit(struct task_struct *tsk) 92{ 93 /* On hard|softirq exit we always account to hard|softirq cputime */ 94 vtime_account_system(tsk); 95} 96#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 97static inline void vtime_account_irq_enter(struct task_struct *tsk) { } 98static inline void vtime_account_irq_exit(struct task_struct *tsk) { } 99#endif 100 101 102#ifdef CONFIG_IRQ_TIME_ACCOUNTING 103extern void irqtime_account_irq(struct task_struct *tsk); 104#else 105static inline void irqtime_account_irq(struct task_struct *tsk) { } 106#endif 107 108static inline void account_irq_enter_time(struct task_struct *tsk) 109{ 110 vtime_account_irq_enter(tsk); 111 irqtime_account_irq(tsk); 112} 113 114static inline void account_irq_exit_time(struct task_struct *tsk) 115{ 116 vtime_account_irq_exit(tsk); 117 irqtime_account_irq(tsk); 118} 119 120#endif /* _LINUX_KERNEL_VTIME_H */