Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KERNEL_VTIME_H
3#define _LINUX_KERNEL_VTIME_H
4
5#include <linux/context_tracking_state.h>
6#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
7#include <asm/vtime.h>
8#endif
9
10
11struct task_struct;
12
13/*
14 * vtime_accounting_enabled_this_cpu() definitions/declarations
15 */
16#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
17
18static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
19extern void vtime_task_switch(struct task_struct *prev);
20
21#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
22
23/*
24 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
25 * in that case and compute the tickless cputime.
26 * For now vtime state is tied to context tracking. We might want to decouple
27 * those later if necessary.
28 */
29static inline bool vtime_accounting_enabled(void)
30{
31 return context_tracking_enabled();
32}
33
34static inline bool vtime_accounting_enabled_cpu(int cpu)
35{
36 return context_tracking_enabled_cpu(cpu);
37}
38
39static inline bool vtime_accounting_enabled_this_cpu(void)
40{
41 return context_tracking_enabled_this_cpu();
42}
43
44extern void vtime_task_switch_generic(struct task_struct *prev);
45
46static inline void vtime_task_switch(struct task_struct *prev)
47{
48 if (vtime_accounting_enabled_this_cpu())
49 vtime_task_switch_generic(prev);
50}
51
52#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
53
54static inline bool vtime_accounting_enabled_cpu(int cpu) {return false; }
55static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
56static inline void vtime_task_switch(struct task_struct *prev) { }
57
58#endif
59
60/*
61 * Common vtime APIs
62 */
63#ifdef CONFIG_VIRT_CPU_ACCOUNTING
64extern void vtime_account_kernel(struct task_struct *tsk);
65extern void vtime_account_idle(struct task_struct *tsk);
66#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
67static inline void vtime_account_kernel(struct task_struct *tsk) { }
68#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
69
70#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
71extern void arch_vtime_task_switch(struct task_struct *tsk);
72extern void vtime_user_enter(struct task_struct *tsk);
73extern void vtime_user_exit(struct task_struct *tsk);
74extern void vtime_guest_enter(struct task_struct *tsk);
75extern void vtime_guest_exit(struct task_struct *tsk);
76extern void vtime_init_idle(struct task_struct *tsk, int cpu);
77#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
78static inline void vtime_user_enter(struct task_struct *tsk) { }
79static inline void vtime_user_exit(struct task_struct *tsk) { }
80static inline void vtime_guest_enter(struct task_struct *tsk) { }
81static inline void vtime_guest_exit(struct task_struct *tsk) { }
82static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
83#endif
84
85#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
86extern void vtime_account_irq_enter(struct task_struct *tsk);
87static inline void vtime_account_irq_exit(struct task_struct *tsk)
88{
89 /* On hard|softirq exit we always account to hard|softirq cputime */
90 vtime_account_kernel(tsk);
91}
92extern void vtime_flush(struct task_struct *tsk);
93#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
94static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
95static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
96static inline void vtime_flush(struct task_struct *tsk) { }
97#endif
98
99
100#ifdef CONFIG_IRQ_TIME_ACCOUNTING
101extern void irqtime_account_irq(struct task_struct *tsk);
102#else
103static inline void irqtime_account_irq(struct task_struct *tsk) { }
104#endif
105
106static inline void account_irq_enter_time(struct task_struct *tsk)
107{
108 vtime_account_irq_enter(tsk);
109 irqtime_account_irq(tsk);
110}
111
112static inline void account_irq_exit_time(struct task_struct *tsk)
113{
114 vtime_account_irq_exit(tsk);
115 irqtime_account_irq(tsk);
116}
117
118#endif /* _LINUX_KERNEL_VTIME_H */