Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

context_tracking: avoid irq_save/irq_restore on guest entry and exit

guest_enter and guest_exit must be called with interrupts disabled,
since they take the vtime_seqlock with write_seq{lock,unlock}.
Therefore, it is not necessary to check for exceptions, nor to
save/restore the IRQ state, when context tracking functions are
called by guest_enter and guest_exit.

Split the body of context_tracking_entry and context_tracking_exit
out to __-prefixed functions, and use them from KVM.

Rik van Riel has measured this to speed up a tight vmentry/vmexit
loop by about 2%.

Cc: Andy Lutomirski <luto@kernel.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Tested-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

+44 -28
+6 -2
include/linux/context_tracking.h
··· 10 10 #ifdef CONFIG_CONTEXT_TRACKING 11 11 extern void context_tracking_cpu_set(int cpu); 12 12 13 + /* Called with interrupts disabled. */ 14 + extern void __context_tracking_enter(enum ctx_state state); 15 + extern void __context_tracking_exit(enum ctx_state state); 16 + 13 17 extern void context_tracking_enter(enum ctx_state state); 14 18 extern void context_tracking_exit(enum ctx_state state); 15 19 extern void context_tracking_user_enter(void); ··· 92 88 current->flags |= PF_VCPU; 93 89 94 90 if (context_tracking_is_enabled()) 95 - context_tracking_enter(CONTEXT_GUEST); 91 + __context_tracking_enter(CONTEXT_GUEST); 96 92 } 97 93 98 94 static inline void guest_exit(void) 99 95 { 100 96 if (context_tracking_is_enabled()) 101 - context_tracking_exit(CONTEXT_GUEST); 97 + __context_tracking_exit(CONTEXT_GUEST); 102 98 103 99 if (vtime_accounting_enabled()) 104 100 vtime_guest_exit(current);
+38 -26
kernel/context_tracking.c
··· 58 58 * instructions to execute won't use any RCU read side critical section 59 59 * because this function sets RCU in extended quiescent state. 60 60 */ 61 - void context_tracking_enter(enum ctx_state state) 61 + void __context_tracking_enter(enum ctx_state state) 62 62 { 63 - unsigned long flags; 64 - 65 - /* 66 - * Some contexts may involve an exception occuring in an irq, 67 - * leading to that nesting: 68 - * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() 69 - * This would mess up the dyntick_nesting count though. And rcu_irq_*() 70 - * helpers are enough to protect RCU uses inside the exception. So 71 - * just return immediately if we detect we are in an IRQ. 72 - */ 73 - if (in_interrupt()) 74 - return; 75 - 76 63 /* Kernel threads aren't supposed to go to userspace */ 77 64 WARN_ON_ONCE(!current->mm); 78 65 79 - local_irq_save(flags); 80 66 if (!context_tracking_recursion_enter()) 81 - goto out_irq_restore; 67 + return; 82 68 83 69 if ( __this_cpu_read(context_tracking.state) != state) { 84 70 if (__this_cpu_read(context_tracking.active)) { ··· 97 111 __this_cpu_write(context_tracking.state, state); 98 112 } 99 113 context_tracking_recursion_exit(); 100 - out_irq_restore: 114 + } 115 + NOKPROBE_SYMBOL(__context_tracking_enter); 116 + EXPORT_SYMBOL_GPL(__context_tracking_enter); 117 + 118 + void context_tracking_enter(enum ctx_state state) 119 + { 120 + unsigned long flags; 121 + 122 + /* 123 + * Some contexts may involve an exception occuring in an irq, 124 + * leading to that nesting: 125 + * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() 126 + * This would mess up the dyntick_nesting count though. And rcu_irq_*() 127 + * helpers are enough to protect RCU uses inside the exception. So 128 + * just return immediately if we detect we are in an IRQ. 129 + */ 130 + if (in_interrupt()) 131 + return; 132 + 133 + local_irq_save(flags); 134 + __context_tracking_enter(state); 101 135 local_irq_restore(flags); 102 136 } 103 137 NOKPROBE_SYMBOL(context_tracking_enter); ··· 141 135 * This call supports re-entrancy. This way it can be called from any exception 142 136 * handler without needing to know if we came from userspace or not. 143 137 */ 144 - void context_tracking_exit(enum ctx_state state) 138 + void __context_tracking_exit(enum ctx_state state) 145 139 { 146 - unsigned long flags; 147 - 148 - if (in_interrupt()) 149 - return; 150 - 151 - local_irq_save(flags); 152 140 if (!context_tracking_recursion_enter()) 153 - goto out_irq_restore; 141 + return; 154 142 155 143 if (__this_cpu_read(context_tracking.state) == state) { 156 144 if (__this_cpu_read(context_tracking.active)) { ··· 161 161 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL); 162 162 } 163 163 context_tracking_recursion_exit(); 164 - out_irq_restore: 164 + } 165 + NOKPROBE_SYMBOL(__context_tracking_exit); 166 + EXPORT_SYMBOL_GPL(__context_tracking_exit); 167 + 168 + void context_tracking_exit(enum ctx_state state) 169 + { 170 + unsigned long flags; 171 + 172 + if (in_interrupt()) 173 + return; 174 + 175 + local_irq_save(flags); 176 + __context_tracking_exit(state); 165 177 local_irq_restore(flags); 166 178 } 167 179 NOKPROBE_SYMBOL(context_tracking_exit);