Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/core: Stop setting PREEMPT_ACTIVE

Now that nothing tests for PREEMPT_ACTIVE anymore, stop setting it.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
3d8f74dd c73464b1

+6 -25
-12
include/linux/preempt.h
··· 146 146 #define preempt_count_inc() preempt_count_add(1) 147 147 #define preempt_count_dec() preempt_count_sub(1) 148 148 149 - #define preempt_active_enter() \ 150 - do { \ 151 - preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ 152 - barrier(); \ 153 - } while (0) 154 - 155 - #define preempt_active_exit() \ 156 - do { \ 157 - barrier(); \ 158 - preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ 159 - } while (0) 160 - 161 149 #ifdef CONFIG_PREEMPT_COUNT 162 150 163 151 #define preempt_disable() \
+6 -13
kernel/sched/core.c
··· 3201 3201 static void __sched notrace preempt_schedule_common(void) 3202 3202 { 3203 3203 do { 3204 - preempt_active_enter(); 3204 + preempt_disable(); 3205 3205 __schedule(true); 3206 - preempt_active_exit(); 3206 + sched_preempt_enable_no_resched(); 3207 3207 3208 3208 /* 3209 3209 * Check again in case we missed a preemption opportunity ··· 3254 3254 return; 3255 3255 3256 3256 do { 3257 - /* 3258 - * Use raw __prempt_count() ops that don't call function. 3259 - * We can't call functions before disabling preemption which 3260 - * disarm preemption tracing recursions. 3261 - */ 3262 - __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); 3263 - barrier(); 3257 + preempt_disable_notrace(); 3264 3258 /* 3265 3259 * Needs preempt disabled in case user_exit() is traced 3266 3260 * and the tracer calls preempt_enable_notrace() causing ··· 3264 3270 __schedule(true); 3265 3271 exception_exit(prev_ctx); 3266 3272 3267 - barrier(); 3268 - __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); 3273 + preempt_enable_no_resched_notrace(); 3269 3274 } while (need_resched()); 3270 3275 } 3271 3276 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); ··· 3287 3294 prev_state = exception_enter(); 3288 3295 3289 3296 do { 3290 - preempt_active_enter(); 3297 + preempt_disable(); 3291 3298 local_irq_enable(); 3292 3299 __schedule(true); 3293 3300 local_irq_disable(); 3294 - preempt_active_exit(); 3301 + sched_preempt_enable_no_resched(); 3295 3302 } while (need_resched()); 3296 3303 3297 3304 exception_exit(prev_state);