Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

local_t: Remove cpu_local_xx macros

These macros have not been used for awhile now.

Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Tejun Heo <tj@kernel.org>

authored by

Christoph Lameter and committed by
Tejun Heo
38b7827f 32032df6

-148
-17
arch/alpha/include/asm/local.h
··· 98 98 #define __local_add(i,l) ((l)->a.counter+=(i)) 99 99 #define __local_sub(i,l) ((l)->a.counter-=(i)) 100 100 101 - /* Use these for per-cpu local_t variables: on some archs they are 102 - * much more efficient than these naive implementations. Note they take 103 - * a variable, not an address. 104 - */ 105 - #define cpu_local_read(l) local_read(&__get_cpu_var(l)) 106 - #define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i)) 107 - 108 - #define cpu_local_inc(l) local_inc(&__get_cpu_var(l)) 109 - #define cpu_local_dec(l) local_dec(&__get_cpu_var(l)) 110 - #define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l)) 111 - #define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l)) 112 - 113 - #define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l)) 114 - #define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l)) 115 - #define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l)) 116 - #define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l)) 117 - 118 101 #endif /* _ALPHA_LOCAL_H */
-25
arch/m32r/include/asm/local.h
··· 338 338 * a variable, not an address. 339 339 */ 340 340 341 - /* Need to disable preemption for the cpu local counters otherwise we could 342 - still access a variable of a previous CPU in a non local way. */ 343 - #define cpu_local_wrap_v(l) \ 344 - ({ local_t res__; \ 345 - preempt_disable(); \ 346 - res__ = (l); \ 347 - preempt_enable(); \ 348 - res__; }) 349 - #define cpu_local_wrap(l) \ 350 - ({ preempt_disable(); \ 351 - l; \ 352 - preempt_enable(); }) \ 353 - 354 - #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) 355 - #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) 356 - #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) 357 - #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) 358 - #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) 359 - #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) 360 - 361 - #define __cpu_local_inc(l) cpu_local_inc(l) 362 - #define __cpu_local_dec(l) cpu_local_dec(l) 363 - #define __cpu_local_add(i, l) cpu_local_add((i), (l)) 364 - #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) 365 - 366 341 #endif /* __M32R_LOCAL_H */
-25
arch/mips/include/asm/local.h
··· 193 193 #define __local_add(i, l) ((l)->a.counter+=(i)) 194 194 #define __local_sub(i, l) ((l)->a.counter-=(i)) 195 195 196 - /* Need to disable preemption for the cpu local counters otherwise we could 197 - still access a variable of a previous CPU in a non atomic way. */ 198 - #define cpu_local_wrap_v(l) \ 199 - ({ local_t res__; \ 200 - preempt_disable(); \ 201 - res__ = (l); \ 202 - preempt_enable(); \ 203 - res__; }) 204 - #define cpu_local_wrap(l) \ 205 - ({ preempt_disable(); \ 206 - l; \ 207 - preempt_enable(); }) \ 208 - 209 - #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) 210 - #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) 211 - #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) 212 - #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) 213 - #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) 214 - #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) 215 - 216 - #define __cpu_local_inc(l) cpu_local_inc(l) 217 - #define __cpu_local_dec(l) cpu_local_dec(l) 218 - #define __cpu_local_add(i, l) cpu_local_add((i), (l)) 219 - #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) 220 - 221 196 #endif /* _ARCH_MIPS_LOCAL_H */
-25
arch/powerpc/include/asm/local.h
··· 172 172 #define __local_add(i,l) ((l)->a.counter+=(i)) 173 173 #define __local_sub(i,l) ((l)->a.counter-=(i)) 174 174 175 - /* Need to disable preemption for the cpu local counters otherwise we could 176 - still access a variable of a previous CPU in a non atomic way. */ 177 - #define cpu_local_wrap_v(l) \ 178 - ({ local_t res__; \ 179 - preempt_disable(); \ 180 - res__ = (l); \ 181 - preempt_enable(); \ 182 - res__; }) 183 - #define cpu_local_wrap(l) \ 184 - ({ preempt_disable(); \ 185 - l; \ 186 - preempt_enable(); }) \ 187 - 188 - #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) 189 - #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) 190 - #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) 191 - #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) 192 - #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) 193 - #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) 194 - 195 - #define __cpu_local_inc(l) cpu_local_inc(l) 196 - #define __cpu_local_dec(l) cpu_local_dec(l) 197 - #define __cpu_local_add(i, l) cpu_local_add((i), (l)) 198 - #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) 199 - 200 175 #endif /* _ARCH_POWERPC_LOCAL_H */
-37
arch/x86/include/asm/local.h
··· 195 195 #define __local_add(i, l) local_add((i), (l)) 196 196 #define __local_sub(i, l) local_sub((i), (l)) 197 197 198 - /* Use these for per-cpu local_t variables: on some archs they are 199 - * much more efficient than these naive implementations. Note they take 200 - * a variable, not an address. 201 - * 202 - * X86_64: This could be done better if we moved the per cpu data directly 203 - * after GS. 204 - */ 205 - 206 - /* Need to disable preemption for the cpu local counters otherwise we could 207 - still access a variable of a previous CPU in a non atomic way. */ 208 - #define cpu_local_wrap_v(l) \ 209 - ({ \ 210 - local_t res__; \ 211 - preempt_disable(); \ 212 - res__ = (l); \ 213 - preempt_enable(); \ 214 - res__; \ 215 - }) 216 - #define cpu_local_wrap(l) \ 217 - ({ \ 218 - preempt_disable(); \ 219 - (l); \ 220 - preempt_enable(); \ 221 - }) \ 222 - 223 - #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) 224 - #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) 225 - #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) 226 - #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) 227 - #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) 228 - #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) 229 - 230 - #define __cpu_local_inc(l) cpu_local_inc((l)) 231 - #define __cpu_local_dec(l) cpu_local_dec((l)) 232 - #define __cpu_local_add(i, l) cpu_local_add((i), (l)) 233 - #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) 234 - 235 198 #endif /* _ASM_X86_LOCAL_H */
-19
include/asm-generic/local.h
··· 52 52 #define __local_add(i,l) local_set((l), local_read(l) + (i)) 53 53 #define __local_sub(i,l) local_set((l), local_read(l) - (i)) 54 54 55 - /* Use these for per-cpu local_t variables: on some archs they are 56 - * much more efficient than these naive implementations. Note they take 57 - * a variable (eg. mystruct.foo), not an address. 58 - */ 59 - #define cpu_local_read(l) local_read(&__get_cpu_var(l)) 60 - #define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i)) 61 - #define cpu_local_inc(l) local_inc(&__get_cpu_var(l)) 62 - #define cpu_local_dec(l) local_dec(&__get_cpu_var(l)) 63 - #define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l)) 64 - #define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l)) 65 - 66 - /* Non-atomic increments, ie. preemption disabled and won't be touched 67 - * in interrupt, etc. Some archs can optimize this case well. 68 - */ 69 - #define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l)) 70 - #define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l)) 71 - #define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l)) 72 - #define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l)) 73 - 74 55 #endif /* _ASM_GENERIC_LOCAL_H */