Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

percpu: remove per_cpu__ prefix.

Now that the return from alloc_percpu is compatible with the address
of per-cpu vars, it makes sense to hand around the address of per-cpu
variables. To make this sane, we remove the per_cpu__ prefix we used
created to stop people accidentally using these vars directly.

Now we have sparse, we can use that (next patch).

tj: * Updated to convert stuff which were missed by or added after the
original patch.

* Kill per_cpu_var() macro.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>

authored by

Rusty Russell and committed by
Tejun Heo
dd17c8f7 390dfd95

+80 -90
+2 -2
arch/blackfin/mach-common/entry.S
··· 835 835 836 836 ENTRY(_ret_from_exception) 837 837 #ifdef CONFIG_IPIPE 838 - p2.l = _per_cpu__ipipe_percpu_domain; 839 - p2.h = _per_cpu__ipipe_percpu_domain; 838 + p2.l = _ipipe_percpu_domain; 839 + p2.h = _ipipe_percpu_domain; 840 840 r0.l = _ipipe_root; 841 841 r0.h = _ipipe_root; 842 842 r2 = [p2];
+1 -1
arch/cris/arch-v10/kernel/entry.S
··· 358 358 1: btstq 12, $r1 ; Refill? 359 359 bpl 2f 360 360 lsrq 24, $r1 ; Get PGD index (bit 24-31) 361 - move.d [per_cpu__current_pgd], $r0 ; PGD for the current process 361 + move.d [current_pgd], $r0 ; PGD for the current process 362 362 move.d [$r0+$r1.d], $r0 ; Get PMD 363 363 beq 2f 364 364 nop
+1 -1
arch/cris/arch-v32/mm/mmu.S
··· 115 115 #ifdef CONFIG_SMP 116 116 move $s7, $acr ; PGD 117 117 #else 118 - move.d per_cpu__current_pgd, $acr ; PGD 118 + move.d current_pgd, $acr ; PGD 119 119 #endif 120 120 ; Look up PMD in PGD 121 121 lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
+2 -2
arch/ia64/include/asm/percpu.h
··· 9 9 #define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE 10 10 11 11 #ifdef __ASSEMBLY__ 12 - # define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */ 12 + # define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */ 13 13 #else /* !__ASSEMBLY__ */ 14 14 15 15 ··· 39 39 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly 40 40 * more efficient. 41 41 */ 42 - #define __ia64_per_cpu_var(var) per_cpu__##var 42 + #define __ia64_per_cpu_var(var) var 43 43 44 44 #include <asm-generic/percpu.h> 45 45
+2 -2
arch/ia64/kernel/ia64_ksyms.c
··· 30 30 #endif 31 31 32 32 #include <asm/processor.h> 33 - EXPORT_SYMBOL(per_cpu__ia64_cpu_info); 33 + EXPORT_SYMBOL(ia64_cpu_info); 34 34 #ifdef CONFIG_SMP 35 - EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); 35 + EXPORT_SYMBOL(local_per_cpu_offset); 36 36 #endif 37 37 38 38 #include <asm/uaccess.h>
+1 -1
arch/ia64/mm/discontig.c
··· 459 459 cpu = 0; 460 460 node = node_cpuid[cpu].nid; 461 461 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + 462 - ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start)); 462 + ((char *)&ia64_cpu_info - __per_cpu_start)); 463 463 cpu0_cpu_info->node_data = mem_data[node].node_data; 464 464 } 465 465 #endif /* CONFIG_SMP */
+1 -1
arch/microblaze/include/asm/entry.h
··· 21 21 * places 22 22 */ 23 23 24 - #define PER_CPU(var) per_cpu__##var 24 + #define PER_CPU(var) var 25 25 26 26 # ifndef __ASSEMBLY__ 27 27 DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
+4 -4
arch/parisc/lib/fixup.S
··· 36 36 #endif 37 37 /* t2 = &__per_cpu_offset[smp_processor_id()]; */ 38 38 LDREGX \t2(\t1),\t2 39 - addil LT%per_cpu__exception_data,%r27 40 - LDREG RT%per_cpu__exception_data(%r1),\t1 39 + addil LT%exception_data,%r27 40 + LDREG RT%exception_data(%r1),\t1 41 41 /* t1 = &__get_cpu_var(exception_data) */ 42 42 add,l \t1,\t2,\t1 43 43 /* t1 = t1->fault_ip */ ··· 46 46 #else 47 47 .macro get_fault_ip t1 t2 48 48 /* t1 = &__get_cpu_var(exception_data) */ 49 - addil LT%per_cpu__exception_data,%r27 50 - LDREG RT%per_cpu__exception_data(%r1),\t2 49 + addil LT%exception_data,%r27 50 + LDREG RT%exception_data(%r1),\t2 51 51 /* t1 = t2->fault_ip */ 52 52 LDREG EXCDATA_IP(\t2), \t1 53 53 .endm
+1 -1
arch/powerpc/platforms/pseries/hvCall.S
··· 55 55 /* calculate address of stat structure r4 = opcode */ \ 56 56 srdi r4,r4,2; /* index into array */ \ 57 57 mulli r4,r4,HCALL_STAT_SIZE; \ 58 - LOAD_REG_ADDR(r7, per_cpu__hcall_stats); \ 58 + LOAD_REG_ADDR(r7, hcall_stats); \ 59 59 add r4,r4,r7; \ 60 60 ld r7,PACA_DATA_OFFSET(r13); /* per cpu offset */ \ 61 61 add r4,r4,r7; \
+3 -3
arch/sparc/kernel/nmi.c
··· 112 112 touched = 1; 113 113 } 114 114 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 115 - __this_cpu_inc(per_cpu_var(alert_counter)); 116 - if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz) 115 + __this_cpu_inc(alert_counter); 116 + if (__this_cpu_read(alert_counter) == 30 * nmi_hz) 117 117 die_nmi("BUG: NMI Watchdog detected LOCKUP", 118 118 regs, panic_on_timeout); 119 119 } else { 120 120 __get_cpu_var(last_irq_sum) = sum; 121 - __this_cpu_write(per_cpu_var(alert_counter), 0); 121 + __this_cpu_write(alert_counter, 0); 122 122 } 123 123 if (__get_cpu_var(wd_enabled)) { 124 124 write_pic(picl_value(nmi_hz));
+4 -4
arch/sparc/kernel/rtrap_64.S
··· 149 149 rtrap_irq: 150 150 rtrap: 151 151 #ifndef CONFIG_SMP 152 - sethi %hi(per_cpu____cpu_data), %l0 153 - lduw [%l0 + %lo(per_cpu____cpu_data)], %l1 152 + sethi %hi(__cpu_data), %l0 153 + lduw [%l0 + %lo(__cpu_data)], %l1 154 154 #else 155 - sethi %hi(per_cpu____cpu_data), %l0 156 - or %l0, %lo(per_cpu____cpu_data), %l0 155 + sethi %hi(__cpu_data), %l0 156 + or %l0, %lo(__cpu_data), %l0 157 157 lduw [%l0 + %g5], %l1 158 158 #endif 159 159 cmp %l1, 0
+17 -20
arch/x86/include/asm/percpu.h
··· 25 25 */ 26 26 #ifdef CONFIG_SMP 27 27 #define PER_CPU(var, reg) \ 28 - __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ 29 - lea per_cpu__##var(reg), reg 30 - #define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var 28 + __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ 29 + lea var(reg), reg 30 + #define PER_CPU_VAR(var) %__percpu_seg:var 31 31 #else /* ! SMP */ 32 - #define PER_CPU(var, reg) \ 33 - __percpu_mov_op $per_cpu__##var, reg 34 - #define PER_CPU_VAR(var) per_cpu__##var 32 + #define PER_CPU(var, reg) __percpu_mov_op $var, reg 33 + #define PER_CPU_VAR(var) var 35 34 #endif /* SMP */ 36 35 37 36 #ifdef CONFIG_X86_64_SMP 38 37 #define INIT_PER_CPU_VAR(var) init_per_cpu__##var 39 38 #else 40 - #define INIT_PER_CPU_VAR(var) per_cpu__##var 39 + #define INIT_PER_CPU_VAR(var) var 41 40 #endif 42 41 43 42 #else /* ...!ASSEMBLY */ ··· 59 60 * There also must be an entry in vmlinux_64.lds.S 60 61 */ 61 62 #define DECLARE_INIT_PER_CPU(var) \ 62 - extern typeof(per_cpu_var(var)) init_per_cpu_var(var) 63 + extern typeof(var) init_per_cpu_var(var) 63 64 64 65 #ifdef CONFIG_X86_64_SMP 65 66 #define init_per_cpu_var(var) init_per_cpu__##var 66 67 #else 67 - #define init_per_cpu_var(var) per_cpu_var(var) 68 + #define init_per_cpu_var(var) var 68 69 #endif 69 70 70 71 /* For arch-specific code, we can use direct single-insn ops (they ··· 141 142 * per-thread variables implemented as per-cpu variables and thus 142 143 * stable for the duration of the respective task. 143 144 */ 144 - #define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \ 145 - "m" (per_cpu__##var)) 146 - #define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \ 147 - "p" (&per_cpu__##var)) 148 - #define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) 149 - #define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) 150 - #define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) 151 - #define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) 152 - #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) 153 - #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) 145 + #define percpu_read(var) percpu_from_op("mov", var, "m" (var)) 146 + #define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) 147 + #define percpu_write(var, val) percpu_to_op("mov", var, val) 148 + #define percpu_add(var, val) percpu_to_op("add", var, val) 149 + #define percpu_sub(var, val) percpu_to_op("sub", var, val) 150 + #define percpu_and(var, val) percpu_to_op("and", var, val) 151 + #define percpu_or(var, val) percpu_to_op("or", var, val) 152 + #define percpu_xor(var, val) percpu_to_op("xor", var, val) 154 153 155 154 #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 156 155 #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) ··· 233 236 ({ \ 234 237 int old__; \ 235 238 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ 236 - : "=r" (old__), "+m" (per_cpu__##var) \ 239 + : "=r" (old__), "+m" (var) \ 237 240 : "dIr" (bit)); \ 238 241 old__; \ 239 242 })
+4 -4
arch/x86/include/asm/system.h
··· 31 31 "movl %P[task_canary](%[next]), %%ebx\n\t" \ 32 32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" 33 33 #define __switch_canary_oparam \ 34 - , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) 34 + , [stack_canary] "=m" (stack_canary.canary) 35 35 #define __switch_canary_iparam \ 36 36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 37 37 #else /* CC_STACKPROTECTOR */ ··· 113 113 "movq %P[task_canary](%%rsi),%%r8\n\t" \ 114 114 "movq %%r8,"__percpu_arg([gs_canary])"\n\t" 115 115 #define __switch_canary_oparam \ 116 - , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) 116 + , [gs_canary] "=m" (irq_stack_union.stack_canary) 117 117 #define __switch_canary_iparam \ 118 118 , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 119 119 #else /* CC_STACKPROTECTOR */ ··· 134 134 __switch_canary \ 135 135 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 136 136 "movq %%rax,%%rdi\n\t" \ 137 - "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ 137 + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ 138 138 "jnz ret_from_fork\n\t" \ 139 139 RESTORE_CONTEXT \ 140 140 : "=a" (last) \ ··· 144 144 [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 145 145 [_tif_fork] "i" (_TIF_FORK), \ 146 146 [thread_info] "i" (offsetof(struct task_struct, stack)), \ 147 - [current_task] "m" (per_cpu_var(current_task)) \ 147 + [current_task] "m" (current_task) \ 148 148 __switch_canary_iparam \ 149 149 : "memory", "cc" __EXTRA_CLOBBER) 150 150 #endif
+3 -3
arch/x86/kernel/apic/nmi.c
··· 437 437 * Ayiee, looks like this CPU is stuck ... 438 438 * wait a few IRQs (5 seconds) before doing the oops ... 439 439 */ 440 - __this_cpu_inc(per_cpu_var(alert_counter)); 441 - if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz) 440 + __this_cpu_inc(alert_counter); 441 + if (__this_cpu_read(alert_counter) == 5 * nmi_hz) 442 442 /* 443 443 * die_nmi will return ONLY if NOTIFY_STOP happens.. 444 444 */ ··· 446 446 regs, panic_on_timeout); 447 447 } else { 448 448 __get_cpu_var(last_irq_sum) = sum; 449 - __this_cpu_write(per_cpu_var(alert_counter), 0); 449 + __this_cpu_write(alert_counter, 0); 450 450 } 451 451 452 452 /* see if the nmi watchdog went off */
+3 -3
arch/x86/kernel/head_32.S
··· 438 438 */ 439 439 cmpb $0,ready 440 440 jne 1f 441 - movl $per_cpu__gdt_page,%eax 442 - movl $per_cpu__stack_canary,%ecx 441 + movl $gdt_page,%eax 442 + movl $stack_canary,%ecx 443 443 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 444 444 shrl $16, %ecx 445 445 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) ··· 702 702 .word 0 # 32 bit align gdt_desc.address 703 703 ENTRY(early_gdt_descr) 704 704 .word GDT_ENTRIES*8-1 705 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */ 705 + .long gdt_page /* Overwritten for secondary CPUs */ 706 706 707 707 /* 708 708 * The boot_gdt must mirror the equivalent in setup.S and is
+2 -2
arch/x86/kernel/vmlinux.lds.S
··· 312 312 * Per-cpu symbols which need to be offset from __per_cpu_load 313 313 * for the boot processor. 314 314 */ 315 - #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load 315 + #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load 316 316 INIT_PER_CPU(gdt_page); 317 317 INIT_PER_CPU(irq_stack_union); 318 318 ··· 323 323 "kernel image bigger than KERNEL_IMAGE_SIZE"); 324 324 325 325 #ifdef CONFIG_SMP 326 - . = ASSERT((per_cpu__irq_stack_union == 0), 326 + . = ASSERT((irq_stack_union == 0), 327 327 "irq_stack_union is not at start of per-cpu area"); 328 328 #endif 329 329
+2 -2
arch/x86/xen/xen-asm_32.S
··· 90 90 GET_THREAD_INFO(%eax) 91 91 movl TI_cpu(%eax), %eax 92 92 movl __per_cpu_offset(,%eax,4), %eax 93 - mov per_cpu__xen_vcpu(%eax), %eax 93 + mov xen_vcpu(%eax), %eax 94 94 #else 95 - movl per_cpu__xen_vcpu, %eax 95 + movl xen_vcpu, %eax 96 96 #endif 97 97 98 98 /* check IF state we're restoring */
+6 -6
include/asm-generic/percpu.h
··· 50 50 * offset. 51 51 */ 52 52 #define per_cpu(var, cpu) \ 53 - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu))) 53 + (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) 54 54 #define __get_cpu_var(var) \ 55 - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset)) 55 + (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset)) 56 56 #define __raw_get_cpu_var(var) \ 57 - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) 57 + (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset)) 58 58 59 59 #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) 60 60 #define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) ··· 66 66 67 67 #else /* ! SMP */ 68 68 69 - #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) 70 - #define __get_cpu_var(var) per_cpu_var(var) 71 - #define __raw_get_cpu_var(var) per_cpu_var(var) 69 + #define per_cpu(var, cpu) (*((void)(cpu), &(var))) 70 + #define __get_cpu_var(var) (var) 71 + #define __raw_get_cpu_var(var) (var) 72 72 #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) 73 73 #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) 74 74
+6 -12
include/linux/percpu-defs.h
··· 2 2 #define _LINUX_PERCPU_DEFS_H 3 3 4 4 /* 5 - * Determine the real variable name from the name visible in the 6 - * kernel sources. 7 - */ 8 - #define per_cpu_var(var) per_cpu__##var 9 - 10 - /* 11 5 * Base implementations of per-CPU variable declarations and definitions, where 12 6 * the section in which the variable is to be placed is provided by the 13 7 * 'sec' argument. This may be used to affect the parameters governing the ··· 50 56 */ 51 57 #define DECLARE_PER_CPU_SECTION(type, name, sec) \ 52 58 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 53 - extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name 59 + extern __PCPU_ATTRS(sec) __typeof__(type) name 54 60 55 61 #define DEFINE_PER_CPU_SECTION(type, name, sec) \ 56 62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 57 63 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 58 64 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 59 65 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ 60 - __typeof__(type) per_cpu__##name 66 + __typeof__(type) name 61 67 #else 62 68 /* 63 69 * Normal declaration and definition macros. 64 70 */ 65 71 #define DECLARE_PER_CPU_SECTION(type, name, sec) \ 66 - extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name 72 + extern __PCPU_ATTRS(sec) __typeof__(type) name 67 73 68 74 #define DEFINE_PER_CPU_SECTION(type, name, sec) \ 69 75 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ 70 - __typeof__(type) per_cpu__##name 76 + __typeof__(type) name 71 77 #endif 72 78 73 79 /* ··· 131 137 /* 132 138 * Intermodule exports for per-CPU variables. 133 139 */ 134 - #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) 135 - #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) 140 + #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) 141 + #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) 136 142 137 143 138 144 #endif /* _LINUX_PERCPU_DEFS_H */
+2 -3
include/linux/percpu.h
··· 182 182 #ifndef percpu_read 183 183 # define percpu_read(var) \ 184 184 ({ \ 185 - typeof(per_cpu_var(var)) __tmp_var__; \ 185 + typeof(var) __tmp_var__; \ 186 186 __tmp_var__ = get_cpu_var(var); \ 187 187 put_cpu_var(var); \ 188 188 __tmp_var__; \ ··· 253 253 254 254 /* 255 255 * Optimized manipulation for memory allocated through the per cpu 256 - * allocator or for addresses of per cpu variables (can be determined 257 - * using per_cpu_var(xx). 256 + * allocator or for addresses of per cpu variables. 258 257 * 259 258 * These operation guarantee exclusivity of access for other operations 260 259 * on the *same* processor. The assumption is that per cpu data is only
+4 -4
include/linux/vmstat.h
··· 76 76 77 77 static inline void __count_vm_event(enum vm_event_item item) 78 78 { 79 - __this_cpu_inc(per_cpu_var(vm_event_states).event[item]); 79 + __this_cpu_inc(vm_event_states.event[item]); 80 80 } 81 81 82 82 static inline void count_vm_event(enum vm_event_item item) 83 83 { 84 - this_cpu_inc(per_cpu_var(vm_event_states).event[item]); 84 + this_cpu_inc(vm_event_states.event[item]); 85 85 } 86 86 87 87 static inline void __count_vm_events(enum vm_event_item item, long delta) 88 88 { 89 - __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); 89 + __this_cpu_add(vm_event_states.event[item], delta); 90 90 } 91 91 92 92 static inline void count_vm_events(enum vm_event_item item, long delta) 93 93 { 94 - this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); 94 + this_cpu_add(vm_event_states.event[item], delta); 95 95 } 96 96 97 97 extern void all_vm_events(unsigned long *);
+4 -4
kernel/rcutorture.c
··· 731 731 /* Should not happen, but... */ 732 732 pipe_count = RCU_TORTURE_PIPE_LEN; 733 733 } 734 - __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); 734 + __this_cpu_inc(rcu_torture_count[pipe_count]); 735 735 completed = cur_ops->completed() - completed; 736 736 if (completed > RCU_TORTURE_PIPE_LEN) { 737 737 /* Should not happen, but... */ 738 738 completed = RCU_TORTURE_PIPE_LEN; 739 739 } 740 - __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); 740 + __this_cpu_inc(rcu_torture_batch[completed]); 741 741 preempt_enable(); 742 742 cur_ops->readunlock(idx); 743 743 } ··· 786 786 /* Should not happen, but... */ 787 787 pipe_count = RCU_TORTURE_PIPE_LEN; 788 788 } 789 - __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); 789 + __this_cpu_inc(rcu_torture_count[pipe_count]); 790 790 completed = cur_ops->completed() - completed; 791 791 if (completed > RCU_TORTURE_PIPE_LEN) { 792 792 /* Should not happen, but... */ 793 793 completed = RCU_TORTURE_PIPE_LEN; 794 794 } 795 - __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); 795 + __this_cpu_inc(rcu_torture_batch[completed]); 796 796 preempt_enable(); 797 797 cur_ops->readunlock(idx); 798 798 schedule();
+3 -3
kernel/trace/trace.c
··· 91 91 static inline void ftrace_disable_cpu(void) 92 92 { 93 93 preempt_disable(); 94 - __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); 94 + __this_cpu_inc(ftrace_cpu_disabled); 95 95 } 96 96 97 97 static inline void ftrace_enable_cpu(void) 98 98 { 99 - __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); 99 + __this_cpu_dec(ftrace_cpu_disabled); 100 100 preempt_enable(); 101 101 } 102 102 ··· 1085 1085 struct ftrace_entry *entry; 1086 1086 1087 1087 /* If we are reading the ring buffer, don't trace */ 1088 - if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 1088 + if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 1089 1089 return; 1090 1090 1091 1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
+2 -2
kernel/trace/trace_functions_graph.c
··· 176 176 struct ring_buffer *buffer = tr->buffer; 177 177 struct ftrace_graph_ent_entry *entry; 178 178 179 - if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 179 + if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 180 180 return 0; 181 181 182 182 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, ··· 240 240 struct ring_buffer *buffer = tr->buffer; 241 241 struct ftrace_graph_ret_entry *entry; 242 242 243 - if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 243 + if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 244 244 return; 245 245 246 246 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,