Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

alpha: switch to dynamic percpu allocator

Alpha implements custom SHIFT_PERCPU_PTR for modules because percpu
area can be located far away from the 4G area where the module text is
located. The custom SHIFT_PERCPU_PTR forces GOT usage using ldq
instruction with literal relocation; however, the relocation can't be
used with dynamically allocated percpu variables. Fortunately,
similar result can be achieved by using weak percpu variable
definitions.

This patch makes alpha use weak definitions and switch to dynamic
percpu allocator.

asm/tlbflush.h was getting linux/sched.h via asm/percpu.h which no
longer needs it. Include linux/sched.h directly in asm/tlbflush.h.

Compile tested. Generation of litereal relocation verified.

This patch is based on Ivan Kokshaysky's alpha percpu patch.

[ Impact: use dynamic percpu allocator ]

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Richard Henderson <rth@twiddle.net>

Tejun Heo 9b7dbc7d 6088464c

+9 -90
-3
arch/alpha/Kconfig
··· 70 70 depends on SMP 71 71 default y 72 72 73 - config HAVE_LEGACY_PER_CPU_AREA 74 - def_bool y 75 - 76 73 source "init/Kconfig" 77 74 source "kernel/Kconfig.freezer" 78 75
+8 -87
arch/alpha/include/asm/percpu.h
··· 1 1 #ifndef __ALPHA_PERCPU_H 2 2 #define __ALPHA_PERCPU_H 3 3 4 - #include <linux/compiler.h> 5 - #include <linux/threads.h> 6 - #include <linux/percpu-defs.h> 7 - 8 4 /* 9 - * Determine the real variable name from the name visible in the 10 - * kernel sources. 11 - */ 12 - #define per_cpu_var(var) per_cpu__##var 13 - 14 - #ifdef CONFIG_SMP 15 - 16 - /* 17 - * per_cpu_offset() is the offset that has to be added to a 18 - * percpu variable to get to the instance for a certain processor. 19 - */ 20 - extern unsigned long __per_cpu_offset[NR_CPUS]; 21 - 22 - #define per_cpu_offset(x) (__per_cpu_offset[x]) 23 - 24 - #define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) 25 - #ifdef CONFIG_DEBUG_PREEMPT 26 - #define my_cpu_offset per_cpu_offset(smp_processor_id()) 27 - #else 28 - #define my_cpu_offset __my_cpu_offset 29 - #endif 30 - 31 - #ifndef MODULE 32 - #define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) 33 - #else 34 - /* 35 - * To calculate addresses of locally defined variables, GCC uses 32-bit 36 - * displacement from the GP. Which doesn't work for per cpu variables in 37 - * modules, as an offset to the kernel per cpu area is way above 4G. 5 + * To calculate addresses of locally defined variables, GCC uses 6 + * 32-bit displacement from the GP. Which doesn't work for per cpu 7 + * variables in modules, as an offset to the kernel per cpu area is 8 + * way above 4G. 38 9 * 39 - * This forces allocation of a GOT entry for per cpu variable using 40 - * ldq instruction with a 'literal' relocation. 10 + * Always use weak definitions for percpu variables in modules. 41 11 */ 42 - #define SHIFT_PERCPU_PTR(var, offset) ({ \ 43 - extern int simple_identifier_##var(void); \ 44 - unsigned long __ptr, tmp_gp; \ 45 - asm ( "br %1, 1f \n\ 46 - 1: ldgp %1, 0(%1) \n\ 47 - ldq %0, per_cpu__" #var"(%1)\t!literal" \ 48 - : "=&r"(__ptr), "=&r"(tmp_gp)); \ 49 - (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) 50 - 51 - #endif /* MODULE */ 52 - 53 - /* 54 - * A percpu variable may point to a discarded regions. The following are 55 - * established ways to produce a usable pointer from the percpu variable 56 - * offset. 57 - */ 58 - #define per_cpu(var, cpu) \ 59 - (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu))) 60 - #define __get_cpu_var(var) \ 61 - (*SHIFT_PERCPU_PTR(var, my_cpu_offset)) 62 - #define __raw_get_cpu_var(var) \ 63 - (*SHIFT_PERCPU_PTR(var, __my_cpu_offset)) 64 - 65 - #else /* ! SMP */ 66 - 67 - #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) 68 - #define __get_cpu_var(var) per_cpu_var(var) 69 - #define __raw_get_cpu_var(var) per_cpu_var(var) 70 - 71 - #endif /* SMP */ 72 - 73 - #ifdef CONFIG_SMP 74 - #define PER_CPU_BASE_SECTION ".data.percpu" 75 - #else 76 - #define PER_CPU_BASE_SECTION ".data" 12 + #if defined(MODULE) && defined(CONFIG_SMP) 13 + #define ARCH_NEEDS_WEAK_PER_CPU 77 14 #endif 78 15 79 - #ifdef CONFIG_SMP 80 - 81 - #ifdef MODULE 82 - #define PER_CPU_SHARED_ALIGNED_SECTION "" 83 - #else 84 - #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" 85 - #endif 86 - #define PER_CPU_FIRST_SECTION ".first" 87 - 88 - #else 89 - 90 - #define PER_CPU_SHARED_ALIGNED_SECTION "" 91 - #define PER_CPU_FIRST_SECTION "" 92 - 93 - #endif 94 - 95 - #define PER_CPU_ATTRIBUTES 16 + #include <asm-generic/percpu.h> 96 17 97 18 #endif /* __ALPHA_PERCPU_H */
+1
arch/alpha/include/asm/tlbflush.h
··· 2 2 #define _ALPHA_TLBFLUSH_H 3 3 4 4 #include <linux/mm.h> 5 + #include <linux/sched.h> 5 6 #include <asm/compiler.h> 6 7 #include <asm/pgalloc.h> 7 8