Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/core, sched/x86: Kill thread_info::saved_preempt_count

With the introduction of the context switch preempt_count invariant,
and the demise of PREEMPT_ACTIVE, its pointless to save/restore the
per-cpu preemption count, it must always be 2.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
d87b7a33 da7142e2

+1 -22
+1 -4
arch/x86/include/asm/preempt.h
··· 30 30 /* 31 31 * must be macros to avoid header recursion hell 32 32 */ 33 - #define init_task_preempt_count(p) do { \ 34 - task_thread_info(p)->saved_preempt_count = FORK_PREEMPT_COUNT; \ 35 - } while (0) 33 + #define init_task_preempt_count(p) do { } while (0) 36 34 37 35 #define init_idle_preempt_count(p, cpu) do { \ 38 - task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \ 39 36 per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ 40 37 } while (0) 41 38
-2
arch/x86/include/asm/thread_info.h
··· 57 57 __u32 flags; /* low level flags */ 58 58 __u32 status; /* thread synchronous flags */ 59 59 __u32 cpu; /* current CPU */ 60 - int saved_preempt_count; 61 60 mm_segment_t addr_limit; 62 61 void __user *sysenter_return; 63 62 unsigned int sig_on_uaccess_error:1; ··· 68 69 .task = &tsk, \ 69 70 .flags = 0, \ 70 71 .cpu = 0, \ 71 - .saved_preempt_count = INIT_PREEMPT_COUNT, \ 72 72 .addr_limit = KERNEL_DS, \ 73 73 } 74 74
-8
arch/x86/kernel/process_32.c
··· 280 280 set_iopl_mask(next->iopl); 281 281 282 282 /* 283 - * If it were not for PREEMPT_ACTIVE we could guarantee that the 284 - * preempt_count of all tasks was equal here and this would not be 285 - * needed. 286 - */ 287 - task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count); 288 - this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count); 289 - 290 - /* 291 283 * Now maybe handle debug registers and/or IO bitmaps 292 284 */ 293 285 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
-8
arch/x86/kernel/process_64.c
··· 401 401 */ 402 402 this_cpu_write(current_task, next_p); 403 403 404 - /* 405 - * If it were not for PREEMPT_ACTIVE we could guarantee that the 406 - * preempt_count of all tasks was equal here and this would not be 407 - * needed. 408 - */ 409 - task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count); 410 - this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count); 411 - 412 404 /* Reload esp0 and ss1. This changes current_thread_info(). */ 413 405 load_sp0(tss, next); 414 406