Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PERCPU_H
3#define _ASM_X86_PERCPU_H
4
5#ifdef CONFIG_X86_64
6#define __percpu_seg gs
7#define __percpu_rel (%rip)
8#else
9#define __percpu_seg fs
10#define __percpu_rel
11#endif
12
13#ifdef __ASSEMBLY__
14
15#ifdef CONFIG_SMP
16#define __percpu %__percpu_seg:
17#else
18#define __percpu
19#endif
20
21#define PER_CPU_VAR(var) __percpu(var)__percpu_rel
22
23#ifdef CONFIG_X86_64_SMP
24#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
25#else
26#define INIT_PER_CPU_VAR(var) var
27#endif
28
29#else /* ...!ASSEMBLY */
30
31#include <linux/build_bug.h>
32#include <linux/stringify.h>
33#include <asm/asm.h>
34
35#ifdef CONFIG_SMP
36
37#ifdef CONFIG_CC_HAS_NAMED_AS
38
39#ifdef __CHECKER__
40#define __seg_gs __attribute__((address_space(__seg_gs)))
41#define __seg_fs __attribute__((address_space(__seg_fs)))
42#endif
43
44#ifdef CONFIG_X86_64
45#define __percpu_seg_override __seg_gs
46#else
47#define __percpu_seg_override __seg_fs
48#endif
49
50#define __percpu_prefix ""
51
52#else /* CONFIG_CC_HAS_NAMED_AS */
53
54#define __percpu_seg_override
55#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
56
57#endif /* CONFIG_CC_HAS_NAMED_AS */
58
59#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
60#define __my_cpu_offset this_cpu_read(this_cpu_off)
61
62/*
63 * Compared to the generic __my_cpu_offset version, the following
64 * saves one instruction and avoids clobbering a temp register.
65 *
66 * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
67 * kernel, because games are played with CONFIG_X86_64 there and
68 * sizeof(this_cpu_off) becames 4.
69 */
70#ifndef BUILD_VDSO32_64
71#define arch_raw_cpu_ptr(_ptr) \
72({ \
73 unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off); \
74 tcp_ptr__ += (__force unsigned long)(_ptr); \
75 (typeof(*(_ptr)) __kernel __force *)tcp_ptr__; \
76})
77#else
78#define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; })
79#endif
80
81#define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel
82
83#else /* CONFIG_SMP */
84#define __percpu_seg_override
85#define __percpu_prefix ""
86#define __force_percpu_prefix ""
87
88#define PER_CPU_VAR(var) (var)__percpu_rel
89
90#endif /* CONFIG_SMP */
91
92#define __my_cpu_type(var) typeof(var) __percpu_seg_override
93#define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr)
94#define __my_cpu_var(var) (*__my_cpu_ptr(&(var)))
95#define __percpu_arg(x) __percpu_prefix "%" #x
96#define __force_percpu_arg(x) __force_percpu_prefix "%" #x
97
98/*
99 * Initialized pointers to per-cpu variables needed for the boot
100 * processor need to use these macros to get the proper address
101 * offset from __per_cpu_load on SMP.
102 *
103 * There also must be an entry in vmlinux_64.lds.S
104 */
105#define DECLARE_INIT_PER_CPU(var) \
106 extern typeof(var) init_per_cpu_var(var)
107
108#ifdef CONFIG_X86_64_SMP
109#define init_per_cpu_var(var) init_per_cpu__##var
110#else
111#define init_per_cpu_var(var) var
112#endif
113
114/* For arch-specific code, we can use direct single-insn ops (they
115 * don't give an lvalue though). */
116
117#define __pcpu_type_1 u8
118#define __pcpu_type_2 u16
119#define __pcpu_type_4 u32
120#define __pcpu_type_8 u64
121
122#define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff))
123#define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff))
124#define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff))
125#define __pcpu_cast_8(val) ((u64)(val))
126
127#define __pcpu_op1_1(op, dst) op "b " dst
128#define __pcpu_op1_2(op, dst) op "w " dst
129#define __pcpu_op1_4(op, dst) op "l " dst
130#define __pcpu_op1_8(op, dst) op "q " dst
131
132#define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
133#define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
134#define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
135#define __pcpu_op2_8(op, src, dst) op "q " src ", " dst
136
137#define __pcpu_reg_1(mod, x) mod "q" (x)
138#define __pcpu_reg_2(mod, x) mod "r" (x)
139#define __pcpu_reg_4(mod, x) mod "r" (x)
140#define __pcpu_reg_8(mod, x) mod "r" (x)
141
142#define __pcpu_reg_imm_1(x) "qi" (x)
143#define __pcpu_reg_imm_2(x) "ri" (x)
144#define __pcpu_reg_imm_4(x) "ri" (x)
145#define __pcpu_reg_imm_8(x) "re" (x)
146
147#define percpu_to_op(size, qual, op, _var, _val) \
148do { \
149 __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
150 if (0) { \
151 typeof(_var) pto_tmp__; \
152 pto_tmp__ = (_val); \
153 (void)pto_tmp__; \
154 } \
155 asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \
156 : [var] "+m" (__my_cpu_var(_var)) \
157 : [val] __pcpu_reg_imm_##size(pto_val__)); \
158} while (0)
159
160#define percpu_unary_op(size, qual, op, _var) \
161({ \
162 asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \
163 : [var] "+m" (__my_cpu_var(_var))); \
164})
165
166/*
167 * Generate a percpu add to memory instruction and optimize code
168 * if one is added or subtracted.
169 */
170#define percpu_add_op(size, qual, var, val) \
171do { \
172 const int pao_ID__ = (__builtin_constant_p(val) && \
173 ((val) == 1 || (val) == -1)) ? \
174 (int)(val) : 0; \
175 if (0) { \
176 typeof(var) pao_tmp__; \
177 pao_tmp__ = (val); \
178 (void)pao_tmp__; \
179 } \
180 if (pao_ID__ == 1) \
181 percpu_unary_op(size, qual, "inc", var); \
182 else if (pao_ID__ == -1) \
183 percpu_unary_op(size, qual, "dec", var); \
184 else \
185 percpu_to_op(size, qual, "add", var, val); \
186} while (0)
187
188#define percpu_from_op(size, qual, op, _var) \
189({ \
190 __pcpu_type_##size pfo_val__; \
191 asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \
192 : [val] __pcpu_reg_##size("=", pfo_val__) \
193 : [var] "m" (__my_cpu_var(_var))); \
194 (typeof(_var))(unsigned long) pfo_val__; \
195})
196
197#define percpu_stable_op(size, op, _var) \
198({ \
199 __pcpu_type_##size pfo_val__; \
200 asm(__pcpu_op2_##size(op, __force_percpu_arg(a[var]), "%[val]") \
201 : [val] __pcpu_reg_##size("=", pfo_val__) \
202 : [var] "i" (&(_var))); \
203 (typeof(_var))(unsigned long) pfo_val__; \
204})
205
206/*
207 * Add return operation
208 */
209#define percpu_add_return_op(size, qual, _var, _val) \
210({ \
211 __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \
212 asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \
213 __percpu_arg([var])) \
214 : [tmp] __pcpu_reg_##size("+", paro_tmp__), \
215 [var] "+m" (__my_cpu_var(_var)) \
216 : : "memory"); \
217 (typeof(_var))(unsigned long) (paro_tmp__ + _val); \
218})
219
220/*
221 * raw_cpu_xchg() can use a load-store since
222 * it is not required to be IRQ-safe.
223 */
224#define raw_percpu_xchg_op(_var, _nval) \
225({ \
226 typeof(_var) pxo_old__ = raw_cpu_read(_var); \
227 raw_cpu_write(_var, _nval); \
228 pxo_old__; \
229})
230
231/*
232 * this_cpu_xchg() is implemented using cmpxchg without a lock prefix.
233 * xchg is expensive due to the implied lock prefix. The processor
234 * cannot prefetch cachelines if xchg is used.
235 */
236#define this_percpu_xchg_op(_var, _nval) \
237({ \
238 typeof(_var) pxo_old__ = this_cpu_read(_var); \
239 do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval)); \
240 pxo_old__; \
241})
242
243/*
244 * cmpxchg has no such implied lock semantics as a result it is much
245 * more efficient for cpu local operations.
246 */
247#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \
248({ \
249 __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \
250 __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
251 asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \
252 __percpu_arg([var])) \
253 : [oval] "+a" (pco_old__), \
254 [var] "+m" (__my_cpu_var(_var)) \
255 : [nval] __pcpu_reg_##size(, pco_new__) \
256 : "memory"); \
257 (typeof(_var))(unsigned long) pco_old__; \
258})
259
260#define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval) \
261({ \
262 bool success; \
263 __pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \
264 __pcpu_type_##size pco_old__ = *pco_oval__; \
265 __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
266 asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \
267 __percpu_arg([var])) \
268 CC_SET(z) \
269 : CC_OUT(z) (success), \
270 [oval] "+a" (pco_old__), \
271 [var] "+m" (__my_cpu_var(_var)) \
272 : [nval] __pcpu_reg_##size(, pco_new__) \
273 : "memory"); \
274 if (unlikely(!success)) \
275 *pco_oval__ = pco_old__; \
276 likely(success); \
277})
278
279#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
280#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \
281({ \
282 union { \
283 u64 var; \
284 struct { \
285 u32 low, high; \
286 }; \
287 } old__, new__; \
288 \
289 old__.var = _oval; \
290 new__.var = _nval; \
291 \
292 asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
293 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
294 : [var] "+m" (__my_cpu_var(_var)), \
295 "+a" (old__.low), \
296 "+d" (old__.high) \
297 : "b" (new__.low), \
298 "c" (new__.high), \
299 "S" (&(_var)) \
300 : "memory"); \
301 \
302 old__.var; \
303})
304
305#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, , pcp, oval, nval)
306#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, volatile, pcp, oval, nval)
307
308#define percpu_try_cmpxchg64_op(size, qual, _var, _ovalp, _nval) \
309({ \
310 bool success; \
311 u64 *_oval = (u64 *)(_ovalp); \
312 union { \
313 u64 var; \
314 struct { \
315 u32 low, high; \
316 }; \
317 } old__, new__; \
318 \
319 old__.var = *_oval; \
320 new__.var = _nval; \
321 \
322 asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
323 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
324 CC_SET(z) \
325 : CC_OUT(z) (success), \
326 [var] "+m" (__my_cpu_var(_var)), \
327 "+a" (old__.low), \
328 "+d" (old__.high) \
329 : "b" (new__.low), \
330 "c" (new__.high), \
331 "S" (&(_var)) \
332 : "memory"); \
333 if (unlikely(!success)) \
334 *_oval = old__.var; \
335 likely(success); \
336})
337
338#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval)
339#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval)
340#endif
341
342#ifdef CONFIG_X86_64
343#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval);
344#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval);
345
346#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval);
347#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval);
348
349#define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval) \
350({ \
351 union { \
352 u128 var; \
353 struct { \
354 u64 low, high; \
355 }; \
356 } old__, new__; \
357 \
358 old__.var = _oval; \
359 new__.var = _nval; \
360 \
361 asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
362 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
363 : [var] "+m" (__my_cpu_var(_var)), \
364 "+a" (old__.low), \
365 "+d" (old__.high) \
366 : "b" (new__.low), \
367 "c" (new__.high), \
368 "S" (&(_var)) \
369 : "memory"); \
370 \
371 old__.var; \
372})
373
374#define raw_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, , pcp, oval, nval)
375#define this_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, volatile, pcp, oval, nval)
376
377#define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval) \
378({ \
379 bool success; \
380 u128 *_oval = (u128 *)(_ovalp); \
381 union { \
382 u128 var; \
383 struct { \
384 u64 low, high; \
385 }; \
386 } old__, new__; \
387 \
388 old__.var = *_oval; \
389 new__.var = _nval; \
390 \
391 asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
392 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
393 CC_SET(z) \
394 : CC_OUT(z) (success), \
395 [var] "+m" (__my_cpu_var(_var)), \
396 "+a" (old__.low), \
397 "+d" (old__.high) \
398 : "b" (new__.low), \
399 "c" (new__.high), \
400 "S" (&(_var)) \
401 : "memory"); \
402 if (unlikely(!success)) \
403 *_oval = old__.var; \
404 likely(success); \
405})
406
407#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval)
408#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
409#endif
410
411/*
412 * this_cpu_read() makes gcc load the percpu variable every time it is
413 * accessed while this_cpu_read_stable() allows the value to be cached.
414 * this_cpu_read_stable() is more efficient and can be used if its value
415 * is guaranteed to be valid across cpus. The current users include
416 * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
417 * actually per-thread variables implemented as per-CPU variables and
418 * thus stable for the duration of the respective task.
419 */
420#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
421
422#ifdef CONFIG_USE_X86_SEG_SUPPORT
423
424#define __raw_cpu_read(qual, pcp) \
425({ \
426 *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)); \
427})
428
429#define __raw_cpu_write(qual, pcp, val) \
430do { \
431 *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val); \
432} while (0)
433
434#define raw_cpu_read_1(pcp) __raw_cpu_read(, pcp)
435#define raw_cpu_read_2(pcp) __raw_cpu_read(, pcp)
436#define raw_cpu_read_4(pcp) __raw_cpu_read(, pcp)
437#define raw_cpu_write_1(pcp, val) __raw_cpu_write(, pcp, val)
438#define raw_cpu_write_2(pcp, val) __raw_cpu_write(, pcp, val)
439#define raw_cpu_write_4(pcp, val) __raw_cpu_write(, pcp, val)
440
441#define this_cpu_read_1(pcp) __raw_cpu_read(volatile, pcp)
442#define this_cpu_read_2(pcp) __raw_cpu_read(volatile, pcp)
443#define this_cpu_read_4(pcp) __raw_cpu_read(volatile, pcp)
444#define this_cpu_write_1(pcp, val) __raw_cpu_write(volatile, pcp, val)
445#define this_cpu_write_2(pcp, val) __raw_cpu_write(volatile, pcp, val)
446#define this_cpu_write_4(pcp, val) __raw_cpu_write(volatile, pcp, val)
447
448#ifdef CONFIG_X86_64
449#define raw_cpu_read_8(pcp) __raw_cpu_read(, pcp)
450#define raw_cpu_write_8(pcp, val) __raw_cpu_write(, pcp, val)
451
452#define this_cpu_read_8(pcp) __raw_cpu_read(volatile, pcp)
453#define this_cpu_write_8(pcp, val) __raw_cpu_write(volatile, pcp, val)
454#endif
455
456#define this_cpu_read_const(pcp) __raw_cpu_read(, pcp)
457#else /* CONFIG_USE_X86_SEG_SUPPORT */
458
459#define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp)
460#define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp)
461#define raw_cpu_read_4(pcp) percpu_from_op(4, , "mov", pcp)
462#define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val)
463#define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val)
464#define raw_cpu_write_4(pcp, val) percpu_to_op(4, , "mov", (pcp), val)
465
466#define this_cpu_read_1(pcp) percpu_from_op(1, volatile, "mov", pcp)
467#define this_cpu_read_2(pcp) percpu_from_op(2, volatile, "mov", pcp)
468#define this_cpu_read_4(pcp) percpu_from_op(4, volatile, "mov", pcp)
469#define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val)
470#define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val)
471#define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val)
472
473#ifdef CONFIG_X86_64
474#define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp)
475#define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val)
476
477#define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp)
478#define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val)
479#endif
480
481/*
482 * The generic per-cpu infrastrucutre is not suitable for
483 * reading const-qualified variables.
484 */
485#define this_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; })
486#endif /* CONFIG_USE_X86_SEG_SUPPORT */
487
488#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
489#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
490#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
491
492#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val)
493#define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val)
494#define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val)
495#define raw_cpu_and_1(pcp, val) percpu_to_op(1, , "and", (pcp), val)
496#define raw_cpu_and_2(pcp, val) percpu_to_op(2, , "and", (pcp), val)
497#define raw_cpu_and_4(pcp, val) percpu_to_op(4, , "and", (pcp), val)
498#define raw_cpu_or_1(pcp, val) percpu_to_op(1, , "or", (pcp), val)
499#define raw_cpu_or_2(pcp, val) percpu_to_op(2, , "or", (pcp), val)
500#define raw_cpu_or_4(pcp, val) percpu_to_op(4, , "or", (pcp), val)
501#define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val)
502#define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val)
503#define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val)
504
505#define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val)
506#define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val)
507#define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val)
508#define this_cpu_and_1(pcp, val) percpu_to_op(1, volatile, "and", (pcp), val)
509#define this_cpu_and_2(pcp, val) percpu_to_op(2, volatile, "and", (pcp), val)
510#define this_cpu_and_4(pcp, val) percpu_to_op(4, volatile, "and", (pcp), val)
511#define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val)
512#define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val)
513#define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val)
514#define this_cpu_xchg_1(pcp, nval) this_percpu_xchg_op(pcp, nval)
515#define this_cpu_xchg_2(pcp, nval) this_percpu_xchg_op(pcp, nval)
516#define this_cpu_xchg_4(pcp, nval) this_percpu_xchg_op(pcp, nval)
517
518#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val)
519#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val)
520#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val)
521#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval)
522#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval)
523#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval)
524#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, , pcp, ovalp, nval)
525#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, , pcp, ovalp, nval)
526#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, , pcp, ovalp, nval)
527
528#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val)
529#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val)
530#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val)
531#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
532#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
533#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
534#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval)
535#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval)
536#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)
537
538/*
539 * Per cpu atomic 64 bit operations are only available under 64 bit.
540 * 32 bit must fall back to generic operations.
541 */
542#ifdef CONFIG_X86_64
543#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
544
545#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
546#define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val)
547#define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val)
548#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val)
549#define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval)
550#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval)
551#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval)
552
553#define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val)
554#define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val)
555#define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val)
556#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val)
557#define this_cpu_xchg_8(pcp, nval) this_percpu_xchg_op(pcp, nval)
558#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
559#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
560
561#define raw_cpu_read_long(pcp) raw_cpu_read_8(pcp)
562#else
563/* There is no generic 64 bit read stable operation for 32 bit targets. */
564#define this_cpu_read_stable_8(pcp) ({ BUILD_BUG(); (typeof(pcp))0; })
565
566#define raw_cpu_read_long(pcp) raw_cpu_read_4(pcp)
567#endif
568
569#define x86_this_cpu_constant_test_bit(_nr, _var) \
570({ \
571 unsigned long __percpu *addr__ = \
572 (unsigned long __percpu *)&(_var) + ((_nr) / BITS_PER_LONG); \
573 !!((1UL << ((_nr) % BITS_PER_LONG)) & raw_cpu_read(*addr__)); \
574})
575
576#define x86_this_cpu_variable_test_bit(_nr, _var) \
577({ \
578 bool oldbit; \
579 \
580 asm volatile("btl %[nr], " __percpu_arg([var]) \
581 CC_SET(c) \
582 : CC_OUT(c) (oldbit) \
583 : [var] "m" (__my_cpu_var(_var)), \
584 [nr] "rI" (_nr)); \
585 oldbit; \
586})
587
588#define x86_this_cpu_test_bit(_nr, _var) \
589 (__builtin_constant_p(_nr) \
590 ? x86_this_cpu_constant_test_bit(_nr, _var) \
591 : x86_this_cpu_variable_test_bit(_nr, _var))
592
593
594#include <asm-generic/percpu.h>
595
596/* We can use this directly for local CPU (faster). */
597DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
598
599#endif /* !__ASSEMBLY__ */
600
601#ifdef CONFIG_SMP
602
603/*
604 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
605 * variables that are initialized and accessed before there are per_cpu
606 * areas allocated.
607 */
608
609#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
610 DEFINE_PER_CPU(_type, _name) = _initvalue; \
611 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
612 { [0 ... NR_CPUS-1] = _initvalue }; \
613 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
614
615#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
616 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
617 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
618 { [0 ... NR_CPUS-1] = _initvalue }; \
619 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
620
621#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
622 EXPORT_PER_CPU_SYMBOL(_name)
623
624#define DECLARE_EARLY_PER_CPU(_type, _name) \
625 DECLARE_PER_CPU(_type, _name); \
626 extern __typeof__(_type) *_name##_early_ptr; \
627 extern __typeof__(_type) _name##_early_map[]
628
629#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
630 DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
631 extern __typeof__(_type) *_name##_early_ptr; \
632 extern __typeof__(_type) _name##_early_map[]
633
634#define early_per_cpu_ptr(_name) (_name##_early_ptr)
635#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
636#define early_per_cpu(_name, _cpu) \
637 *(early_per_cpu_ptr(_name) ? \
638 &early_per_cpu_ptr(_name)[_cpu] : \
639 &per_cpu(_name, _cpu))
640
641#else /* !CONFIG_SMP */
642#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
643 DEFINE_PER_CPU(_type, _name) = _initvalue
644
645#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
646 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
647
648#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
649 EXPORT_PER_CPU_SYMBOL(_name)
650
651#define DECLARE_EARLY_PER_CPU(_type, _name) \
652 DECLARE_PER_CPU(_type, _name)
653
654#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
655 DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
656
657#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
658#define early_per_cpu_ptr(_name) NULL
659/* no early_per_cpu_map() */
660
661#endif /* !CONFIG_SMP */
662
663#endif /* _ASM_X86_PERCPU_H */