Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _ASM_X86_PERCPU_H
2#define _ASM_X86_PERCPU_H
3
4#ifdef CONFIG_X86_64
5#define __percpu_seg gs
6#define __percpu_mov_op movq
7#else
8#define __percpu_seg fs
9#define __percpu_mov_op movl
10#endif
11
12#ifdef __ASSEMBLY__
13
14/*
15 * PER_CPU finds an address of a per-cpu variable.
16 *
17 * Args:
18 * var - variable name
19 * reg - 32bit register
20 *
21 * The resulting address is stored in the "reg" argument.
22 *
23 * Example:
24 * PER_CPU(cpu_gdt_descr, %ebx)
25 */
26#ifdef CONFIG_SMP
27#define PER_CPU(var, reg) \
28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
29 lea var(reg), reg
30#define PER_CPU_VAR(var) %__percpu_seg:var
31#else /* ! SMP */
32#define PER_CPU(var, reg) __percpu_mov_op $var, reg
33#define PER_CPU_VAR(var) var
34#endif /* SMP */
35
36#ifdef CONFIG_X86_64_SMP
37#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
38#else
39#define INIT_PER_CPU_VAR(var) var
40#endif
41
42#else /* ...!ASSEMBLY */
43
44#include <linux/kernel.h>
45#include <linux/stringify.h>
46
47#ifdef CONFIG_SMP
48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
49#define __my_cpu_offset percpu_read(this_cpu_off)
50
51/*
52 * Compared to the generic __my_cpu_offset version, the following
53 * saves one instruction and avoids clobbering a temp register.
54 */
55#define __this_cpu_ptr(ptr) \
56({ \
57 unsigned long tcp_ptr__; \
58 __verify_pcpu_ptr(ptr); \
59 asm volatile("add " __percpu_arg(1) ", %0" \
60 : "=r" (tcp_ptr__) \
61 : "m" (this_cpu_off), "0" (ptr)); \
62 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
63})
64#else
65#define __percpu_arg(x) "%P" #x
66#endif
67
68/*
69 * Initialized pointers to per-cpu variables needed for the boot
70 * processor need to use these macros to get the proper address
71 * offset from __per_cpu_load on SMP.
72 *
73 * There also must be an entry in vmlinux_64.lds.S
74 */
75#define DECLARE_INIT_PER_CPU(var) \
76 extern typeof(var) init_per_cpu_var(var)
77
78#ifdef CONFIG_X86_64_SMP
79#define init_per_cpu_var(var) init_per_cpu__##var
80#else
81#define init_per_cpu_var(var) var
82#endif
83
84/* For arch-specific code, we can use direct single-insn ops (they
85 * don't give an lvalue though). */
86extern void __bad_percpu_size(void);
87
88#define percpu_to_op(op, var, val) \
89do { \
90 typedef typeof(var) pto_T__; \
91 if (0) { \
92 pto_T__ pto_tmp__; \
93 pto_tmp__ = (val); \
94 (void)pto_tmp__; \
95 } \
96 switch (sizeof(var)) { \
97 case 1: \
98 asm(op "b %1,"__percpu_arg(0) \
99 : "+m" (var) \
100 : "qi" ((pto_T__)(val))); \
101 break; \
102 case 2: \
103 asm(op "w %1,"__percpu_arg(0) \
104 : "+m" (var) \
105 : "ri" ((pto_T__)(val))); \
106 break; \
107 case 4: \
108 asm(op "l %1,"__percpu_arg(0) \
109 : "+m" (var) \
110 : "ri" ((pto_T__)(val))); \
111 break; \
112 case 8: \
113 asm(op "q %1,"__percpu_arg(0) \
114 : "+m" (var) \
115 : "re" ((pto_T__)(val))); \
116 break; \
117 default: __bad_percpu_size(); \
118 } \
119} while (0)
120
121/*
122 * Generate a percpu add to memory instruction and optimize code
123 * if one is added or subtracted.
124 */
125#define percpu_add_op(var, val) \
126do { \
127 typedef typeof(var) pao_T__; \
128 const int pao_ID__ = (__builtin_constant_p(val) && \
129 ((val) == 1 || (val) == -1)) ? (val) : 0; \
130 if (0) { \
131 pao_T__ pao_tmp__; \
132 pao_tmp__ = (val); \
133 (void)pao_tmp__; \
134 } \
135 switch (sizeof(var)) { \
136 case 1: \
137 if (pao_ID__ == 1) \
138 asm("incb "__percpu_arg(0) : "+m" (var)); \
139 else if (pao_ID__ == -1) \
140 asm("decb "__percpu_arg(0) : "+m" (var)); \
141 else \
142 asm("addb %1, "__percpu_arg(0) \
143 : "+m" (var) \
144 : "qi" ((pao_T__)(val))); \
145 break; \
146 case 2: \
147 if (pao_ID__ == 1) \
148 asm("incw "__percpu_arg(0) : "+m" (var)); \
149 else if (pao_ID__ == -1) \
150 asm("decw "__percpu_arg(0) : "+m" (var)); \
151 else \
152 asm("addw %1, "__percpu_arg(0) \
153 : "+m" (var) \
154 : "ri" ((pao_T__)(val))); \
155 break; \
156 case 4: \
157 if (pao_ID__ == 1) \
158 asm("incl "__percpu_arg(0) : "+m" (var)); \
159 else if (pao_ID__ == -1) \
160 asm("decl "__percpu_arg(0) : "+m" (var)); \
161 else \
162 asm("addl %1, "__percpu_arg(0) \
163 : "+m" (var) \
164 : "ri" ((pao_T__)(val))); \
165 break; \
166 case 8: \
167 if (pao_ID__ == 1) \
168 asm("incq "__percpu_arg(0) : "+m" (var)); \
169 else if (pao_ID__ == -1) \
170 asm("decq "__percpu_arg(0) : "+m" (var)); \
171 else \
172 asm("addq %1, "__percpu_arg(0) \
173 : "+m" (var) \
174 : "re" ((pao_T__)(val))); \
175 break; \
176 default: __bad_percpu_size(); \
177 } \
178} while (0)
179
180#define percpu_from_op(op, var, constraint) \
181({ \
182 typeof(var) pfo_ret__; \
183 switch (sizeof(var)) { \
184 case 1: \
185 asm(op "b "__percpu_arg(1)",%0" \
186 : "=q" (pfo_ret__) \
187 : constraint); \
188 break; \
189 case 2: \
190 asm(op "w "__percpu_arg(1)",%0" \
191 : "=r" (pfo_ret__) \
192 : constraint); \
193 break; \
194 case 4: \
195 asm(op "l "__percpu_arg(1)",%0" \
196 : "=r" (pfo_ret__) \
197 : constraint); \
198 break; \
199 case 8: \
200 asm(op "q "__percpu_arg(1)",%0" \
201 : "=r" (pfo_ret__) \
202 : constraint); \
203 break; \
204 default: __bad_percpu_size(); \
205 } \
206 pfo_ret__; \
207})
208
209#define percpu_unary_op(op, var) \
210({ \
211 switch (sizeof(var)) { \
212 case 1: \
213 asm(op "b "__percpu_arg(0) \
214 : "+m" (var)); \
215 break; \
216 case 2: \
217 asm(op "w "__percpu_arg(0) \
218 : "+m" (var)); \
219 break; \
220 case 4: \
221 asm(op "l "__percpu_arg(0) \
222 : "+m" (var)); \
223 break; \
224 case 8: \
225 asm(op "q "__percpu_arg(0) \
226 : "+m" (var)); \
227 break; \
228 default: __bad_percpu_size(); \
229 } \
230})
231
232/*
233 * percpu_read() makes gcc load the percpu variable every time it is
234 * accessed while percpu_read_stable() allows the value to be cached.
235 * percpu_read_stable() is more efficient and can be used if its value
236 * is guaranteed to be valid across cpus. The current users include
237 * get_current() and get_thread_info() both of which are actually
238 * per-thread variables implemented as per-cpu variables and thus
239 * stable for the duration of the respective task.
240 */
241#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
242#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
243#define percpu_write(var, val) percpu_to_op("mov", var, val)
244#define percpu_add(var, val) percpu_add_op(var, val)
245#define percpu_sub(var, val) percpu_add_op(var, -(val))
246#define percpu_and(var, val) percpu_to_op("and", var, val)
247#define percpu_or(var, val) percpu_to_op("or", var, val)
248#define percpu_xor(var, val) percpu_to_op("xor", var, val)
249#define percpu_inc(var) percpu_unary_op("inc", var)
250
251#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
252#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
253#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
254
255#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
256#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
257#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
258#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
259#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
260#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
261#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
262#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
263#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
264#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
265#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
266#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
267#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
268#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
269#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
270
271#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
272#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
273#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
274#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
275#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
276#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
277#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
278#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
279#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
280#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
281#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
282#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
283#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
284#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
285#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
286#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
287#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
288#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
289
290#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
291#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
292#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
293#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
294#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
295#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
296#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
297#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
298#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
299#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
300#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
301#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
302
303/*
304 * Per cpu atomic 64 bit operations are only available under 64 bit.
305 * 32 bit must fall back to generic operations.
306 */
307#ifdef CONFIG_X86_64
308#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
309#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
310#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
311#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
312#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
313#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
314
315#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
316#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
317#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
318#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
319#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
320#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
321
322#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
323#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
324#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
325#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
326
327#endif
328
329/* This is not atomic against other CPUs -- CPU preemption needs to be off */
330#define x86_test_and_clear_bit_percpu(bit, var) \
331({ \
332 int old__; \
333 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
334 : "=r" (old__), "+m" (var) \
335 : "dIr" (bit)); \
336 old__; \
337})
338
339#include <asm-generic/percpu.h>
340
341/* We can use this directly for local CPU (faster). */
342DECLARE_PER_CPU(unsigned long, this_cpu_off);
343
344#endif /* !__ASSEMBLY__ */
345
346#ifdef CONFIG_SMP
347
348/*
349 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
350 * variables that are initialized and accessed before there are per_cpu
351 * areas allocated.
352 */
353
354#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
355 DEFINE_PER_CPU(_type, _name) = _initvalue; \
356 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
357 { [0 ... NR_CPUS-1] = _initvalue }; \
358 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
359
360#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
361 EXPORT_PER_CPU_SYMBOL(_name)
362
363#define DECLARE_EARLY_PER_CPU(_type, _name) \
364 DECLARE_PER_CPU(_type, _name); \
365 extern __typeof__(_type) *_name##_early_ptr; \
366 extern __typeof__(_type) _name##_early_map[]
367
368#define early_per_cpu_ptr(_name) (_name##_early_ptr)
369#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
370#define early_per_cpu(_name, _cpu) \
371 *(early_per_cpu_ptr(_name) ? \
372 &early_per_cpu_ptr(_name)[_cpu] : \
373 &per_cpu(_name, _cpu))
374
375#else /* !CONFIG_SMP */
376#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
377 DEFINE_PER_CPU(_type, _name) = _initvalue
378
379#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
380 EXPORT_PER_CPU_SYMBOL(_name)
381
382#define DECLARE_EARLY_PER_CPU(_type, _name) \
383 DECLARE_PER_CPU(_type, _name)
384
385#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
386#define early_per_cpu_ptr(_name) NULL
387/* no early_per_cpu_map() */
388
389#endif /* !CONFIG_SMP */
390
391#endif /* _ASM_X86_PERCPU_H */