Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/smp: Move cpu number to percpu hot section

No functional change.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250303165246.2175811-5-brgerst@gmail.com

authored by

Brian Gerst and committed by
Ingo Molnar
01c7bc51 46e8fff6

+10 -7
-1
arch/x86/include/asm/current.h
··· 14 14 15 15 struct pcpu_hot { 16 16 struct task_struct *current_task; 17 - int cpu_number; 18 17 #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING 19 18 u64 call_depth; 20 19 #endif
+4 -3
arch/x86/include/asm/smp.h
··· 6 6 #include <linux/thread_info.h> 7 7 8 8 #include <asm/cpumask.h> 9 - #include <asm/current.h> 9 + 10 + DECLARE_PER_CPU_CACHE_HOT(int, cpu_number); 10 11 11 12 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); 12 13 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); ··· 133 132 * This function is needed by all SMP systems. It must _always_ be valid 134 133 * from the initial startup. 135 134 */ 136 - #define raw_smp_processor_id() this_cpu_read(pcpu_hot.cpu_number) 137 - #define __smp_processor_id() __this_cpu_read(pcpu_hot.cpu_number) 135 + #define raw_smp_processor_id() this_cpu_read(cpu_number) 136 + #define __smp_processor_id() __this_cpu_read(cpu_number) 138 137 139 138 static inline struct cpumask *cpu_llc_shared_mask(int cpu) 140 139 {
+4 -1
arch/x86/kernel/setup_percpu.c
··· 23 23 #include <asm/cpumask.h> 24 24 #include <asm/cpu.h> 25 25 26 + DEFINE_PER_CPU_CACHE_HOT(int, cpu_number); 27 + EXPORT_PER_CPU_SYMBOL(cpu_number); 28 + 26 29 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); 27 30 EXPORT_PER_CPU_SYMBOL(this_cpu_off); 28 31 ··· 164 161 for_each_possible_cpu(cpu) { 165 162 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; 166 163 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 167 - per_cpu(pcpu_hot.cpu_number, cpu) = cpu; 164 + per_cpu(cpu_number, cpu) = cpu; 168 165 setup_percpu_segment(cpu); 169 166 /* 170 167 * Copy data used in early init routines from the
+2 -2
kernel/bpf/verifier.c
··· 21702 21702 if (insn->imm == BPF_FUNC_get_smp_processor_id && 21703 21703 verifier_inlines_helper_call(env, insn->imm)) { 21704 21704 /* BPF_FUNC_get_smp_processor_id inlining is an 21705 - * optimization, so if pcpu_hot.cpu_number is ever 21705 + * optimization, so if cpu_number is ever 21706 21706 * changed in some incompatible and hard to support 21707 21707 * way, it's fine to back out this inlining logic 21708 21708 */ 21709 21709 #ifdef CONFIG_SMP 21710 - insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number); 21710 + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)&cpu_number); 21711 21711 insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); 21712 21712 insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0); 21713 21713 cnt = 3;