at master 4.6 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_KERNEL_VTIME_H 3#define _LINUX_KERNEL_VTIME_H 4 5#include <linux/context_tracking_state.h> 6#include <linux/sched.h> 7 8/* 9 * Common vtime APIs 10 */ 11#ifdef CONFIG_VIRT_CPU_ACCOUNTING 12extern void vtime_account_kernel(struct task_struct *tsk); 13extern void vtime_account_idle(struct task_struct *tsk); 14#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 15 16#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 17extern void vtime_user_enter(struct task_struct *tsk); 18extern void vtime_user_exit(struct task_struct *tsk); 19extern void vtime_guest_enter(struct task_struct *tsk); 20extern void vtime_guest_exit(struct task_struct *tsk); 21extern void vtime_init_idle(struct task_struct *tsk, int cpu); 22#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 23static inline void vtime_user_enter(struct task_struct *tsk) { } 24static inline void vtime_user_exit(struct task_struct *tsk) { } 25static inline void vtime_guest_enter(struct task_struct *tsk) { } 26static inline void vtime_guest_exit(struct task_struct *tsk) { } 27static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } 28#endif 29 30#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 31extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset); 32extern void vtime_account_softirq(struct task_struct *tsk); 33extern void vtime_account_hardirq(struct task_struct *tsk); 34extern void vtime_flush(struct task_struct *tsk); 35#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 36static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { } 37static inline void vtime_account_softirq(struct task_struct *tsk) { } 38static inline void vtime_account_hardirq(struct task_struct *tsk) { } 39static inline void vtime_flush(struct task_struct *tsk) { } 40#endif 41 42/* 43 * vtime_accounting_enabled_this_cpu() definitions/declarations 44 */ 45#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) 46 47static inline bool vtime_accounting_enabled_this_cpu(void) { return true; } 48extern void vtime_task_switch(struct task_struct *prev); 49 50static __always_inline void vtime_account_guest_enter(void) 51{ 52 vtime_account_kernel(current); 53 current->flags |= PF_VCPU; 54} 55 56static __always_inline void vtime_account_guest_exit(void) 57{ 58 vtime_account_kernel(current); 59 current->flags &= ~PF_VCPU; 60} 61 62#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) 63 64/* 65 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful 66 * in that case and compute the tickless cputime. 67 * For now vtime state is tied to context tracking. We might want to decouple 68 * those later if necessary. 69 */ 70static inline bool vtime_accounting_enabled(void) 71{ 72 return context_tracking_enabled(); 73} 74 75static inline bool vtime_accounting_enabled_cpu(int cpu) 76{ 77 return context_tracking_enabled_cpu(cpu); 78} 79 80static inline bool vtime_accounting_enabled_this_cpu(void) 81{ 82 return context_tracking_enabled_this_cpu(); 83} 84 85extern void vtime_task_switch_generic(struct task_struct *prev); 86 87static inline void vtime_task_switch(struct task_struct *prev) 88{ 89 if (vtime_accounting_enabled_this_cpu()) 90 vtime_task_switch_generic(prev); 91} 92 93static __always_inline void vtime_account_guest_enter(void) 94{ 95 if (vtime_accounting_enabled_this_cpu()) 96 vtime_guest_enter(current); 97 else 98 current->flags |= PF_VCPU; 99} 100 101static __always_inline void vtime_account_guest_exit(void) 102{ 103 if (vtime_accounting_enabled_this_cpu()) 104 vtime_guest_exit(current); 105 else 106 current->flags &= ~PF_VCPU; 107} 108 109#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 110 111static inline bool vtime_accounting_enabled_this_cpu(void) { return false; } 112static inline void vtime_task_switch(struct task_struct *prev) { } 113 114static __always_inline void vtime_account_guest_enter(void) 115{ 116 current->flags |= PF_VCPU; 117} 118 119static __always_inline void vtime_account_guest_exit(void) 120{ 121 current->flags &= ~PF_VCPU; 122} 123 124#endif 125 126 127#ifdef CONFIG_IRQ_TIME_ACCOUNTING 128extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset); 129#else 130static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { } 131#endif 132 133static inline void account_softirq_enter(struct task_struct *tsk) 134{ 135 vtime_account_irq(tsk, SOFTIRQ_OFFSET); 136 irqtime_account_irq(tsk, SOFTIRQ_OFFSET); 137} 138 139static inline void account_softirq_exit(struct task_struct *tsk) 140{ 141 vtime_account_softirq(tsk); 142 irqtime_account_irq(tsk, 0); 143} 144 145static inline void account_hardirq_enter(struct task_struct *tsk) 146{ 147 vtime_account_irq(tsk, HARDIRQ_OFFSET); 148 irqtime_account_irq(tsk, HARDIRQ_OFFSET); 149} 150 151static inline void account_hardirq_exit(struct task_struct *tsk) 152{ 153 vtime_account_hardirq(tsk); 154 irqtime_account_irq(tsk, 0); 155} 156 157#endif /* _LINUX_KERNEL_VTIME_H */