at v6.11 4.3 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_CONTEXT_TRACKING_H 3#define _LINUX_CONTEXT_TRACKING_H 4 5#include <linux/sched.h> 6#include <linux/vtime.h> 7#include <linux/context_tracking_state.h> 8#include <linux/instrumentation.h> 9 10#include <asm/ptrace.h> 11 12 13#ifdef CONFIG_CONTEXT_TRACKING_USER 14extern void ct_cpu_track_user(int cpu); 15 16/* Called with interrupts disabled. */ 17extern void __ct_user_enter(enum ctx_state state); 18extern void __ct_user_exit(enum ctx_state state); 19 20extern void ct_user_enter(enum ctx_state state); 21extern void ct_user_exit(enum ctx_state state); 22 23extern void user_enter_callable(void); 24extern void user_exit_callable(void); 25 26static inline void user_enter(void) 27{ 28 if (context_tracking_enabled()) 29 ct_user_enter(CONTEXT_USER); 30 31} 32static inline void user_exit(void) 33{ 34 if (context_tracking_enabled()) 35 ct_user_exit(CONTEXT_USER); 36} 37 38/* Called with interrupts disabled. */ 39static __always_inline void user_enter_irqoff(void) 40{ 41 if (context_tracking_enabled()) 42 __ct_user_enter(CONTEXT_USER); 43 44} 45static __always_inline void user_exit_irqoff(void) 46{ 47 if (context_tracking_enabled()) 48 __ct_user_exit(CONTEXT_USER); 49} 50 51static inline enum ctx_state exception_enter(void) 52{ 53 enum ctx_state prev_ctx; 54 55 if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) || 56 !context_tracking_enabled()) 57 return 0; 58 59 prev_ctx = __ct_state(); 60 if (prev_ctx != CONTEXT_KERNEL) 61 ct_user_exit(prev_ctx); 62 63 return prev_ctx; 64} 65 66static inline void exception_exit(enum ctx_state prev_ctx) 67{ 68 if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) && 69 context_tracking_enabled()) { 70 if (prev_ctx != CONTEXT_KERNEL) 71 ct_user_enter(prev_ctx); 72 } 73} 74 75static __always_inline bool context_tracking_guest_enter(void) 76{ 77 if (context_tracking_enabled()) 78 __ct_user_enter(CONTEXT_GUEST); 79 80 return context_tracking_enabled_this_cpu(); 81} 82 83static __always_inline bool context_tracking_guest_exit(void) 84{ 85 if (context_tracking_enabled()) 86 __ct_user_exit(CONTEXT_GUEST); 87 88 return context_tracking_enabled_this_cpu(); 89} 90 91#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) 92 93#else 94static inline void user_enter(void) { } 95static inline void user_exit(void) { } 96static inline void user_enter_irqoff(void) { } 97static inline void user_exit_irqoff(void) { } 98static inline int exception_enter(void) { return 0; } 99static inline void exception_exit(enum ctx_state prev_ctx) { } 100static inline int ct_state(void) { return -1; } 101static inline int __ct_state(void) { return -1; } 102static __always_inline bool context_tracking_guest_enter(void) { return false; } 103static __always_inline bool context_tracking_guest_exit(void) { return false; } 104#define CT_WARN_ON(cond) do { } while (0) 105#endif /* !CONFIG_CONTEXT_TRACKING_USER */ 106 107#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE 108extern void context_tracking_init(void); 109#else 110static inline void context_tracking_init(void) { } 111#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */ 112 113#ifdef CONFIG_CONTEXT_TRACKING_IDLE 114extern void ct_idle_enter(void); 115extern void ct_idle_exit(void); 116 117/* 118 * Is the current CPU in an extended quiescent state? 119 * 120 * No ordering, as we are sampling CPU-local information. 121 */ 122static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) 123{ 124 return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); 125} 126 127/* 128 * Increment the current CPU's context_tracking structure's ->state field 129 * with ordering. Return the new value. 130 */ 131static __always_inline unsigned long ct_state_inc(int incby) 132{ 133 return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state)); 134} 135 136static __always_inline bool warn_rcu_enter(void) 137{ 138 bool ret = false; 139 140 /* 141 * Horrible hack to shut up recursive RCU isn't watching fail since 142 * lots of the actual reporting also relies on RCU. 143 */ 144 preempt_disable_notrace(); 145 if (rcu_dynticks_curr_cpu_in_eqs()) { 146 ret = true; 147 ct_state_inc(RCU_DYNTICKS_IDX); 148 } 149 150 return ret; 151} 152 153static __always_inline void warn_rcu_exit(bool rcu) 154{ 155 if (rcu) 156 ct_state_inc(RCU_DYNTICKS_IDX); 157 preempt_enable_notrace(); 158} 159 160#else 161static inline void ct_idle_enter(void) { } 162static inline void ct_idle_exit(void) { } 163 164static __always_inline bool warn_rcu_enter(void) { return false; } 165static __always_inline void warn_rcu_exit(bool rcu) { } 166#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */ 167 168#endif