Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: Replace __get_cpu_var uses

__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.

Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.

__get_cpu_var() is defined as :

#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))

__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.

this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.

This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.

Transformations done to __get_cpu_var()

1. Determine the address of the percpu instance of the current processor.

DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);

Converts to

int *x = this_cpu_ptr(&y);

2. Same as #1 but this time an array structure is involved.

DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);

Converts to

int *x = this_cpu_ptr(y);

3. Retrieve the content of the current processors instance of a per cpu
variable.

DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)

Converts to

int x = __this_cpu_read(y);

4. Retrieve the content of a percpu struct

DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);

Converts to

memcpy(&x, this_cpu_ptr(&y), sizeof(x));

5. Assignment to a per cpu variable

DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;

Converts to

__this_cpu_write(y, x);

6. Increment/Decrement etc of a per cpu variable

DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++

Converts to

__this_cpu_inc(y)

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

authored by

Christoph Lameter and committed by
Tejun Heo
89cbc767 532d0d06

+147 -147
+2 -2
arch/x86/include/asm/debugreg.h
··· 97 97 DECLARE_PER_CPU(int, debug_stack_usage); 98 98 static inline void debug_stack_usage_inc(void) 99 99 { 100 - __get_cpu_var(debug_stack_usage)++; 100 + __this_cpu_inc(debug_stack_usage); 101 101 } 102 102 static inline void debug_stack_usage_dec(void) 103 103 { 104 - __get_cpu_var(debug_stack_usage)--; 104 + __this_cpu_dec(debug_stack_usage); 105 105 } 106 106 int is_debug_stack(unsigned long addr); 107 107 void debug_stack_set_zero(void);
+1 -1
arch/x86/include/asm/uv/uv_hub.h
··· 164 164 }; 165 165 166 166 DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 167 - #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) 167 + #define uv_hub_info this_cpu_ptr(&__uv_hub_info) 168 168 #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) 169 169 170 170 /*
+2 -2
arch/x86/kernel/apb_timer.c
··· 146 146 static int __init apbt_clockevent_register(void) 147 147 { 148 148 struct sfi_timer_table_entry *mtmr; 149 - struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); 149 + struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev); 150 150 151 151 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); 152 152 if (mtmr == NULL) { ··· 200 200 if (!cpu) 201 201 return; 202 202 203 - adev = &__get_cpu_var(cpu_apbt_dev); 203 + adev = this_cpu_ptr(&cpu_apbt_dev); 204 204 if (!adev->timer) { 205 205 adev->timer = dw_apb_clockevent_init(cpu, adev->name, 206 206 APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
+2 -2
arch/x86/kernel/apic/apic.c
··· 561 561 */ 562 562 static void setup_APIC_timer(void) 563 563 { 564 - struct clock_event_device *levt = &__get_cpu_var(lapic_events); 564 + struct clock_event_device *levt = this_cpu_ptr(&lapic_events); 565 565 566 566 if (this_cpu_has(X86_FEATURE_ARAT)) { 567 567 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; ··· 696 696 697 697 static int __init calibrate_APIC_clock(void) 698 698 { 699 - struct clock_event_device *levt = &__get_cpu_var(lapic_events); 699 + struct clock_event_device *levt = this_cpu_ptr(&lapic_events); 700 700 void (*real_handler)(struct clock_event_device *dev); 701 701 unsigned long deltaj; 702 702 long delta, deltatsc;
+3 -3
arch/x86/kernel/cpu/common.c
··· 1198 1198 1199 1199 int is_debug_stack(unsigned long addr) 1200 1200 { 1201 - return __get_cpu_var(debug_stack_usage) || 1202 - (addr <= __get_cpu_var(debug_stack_addr) && 1203 - addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); 1201 + return __this_cpu_read(debug_stack_usage) || 1202 + (addr <= __this_cpu_read(debug_stack_addr) && 1203 + addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); 1204 1204 } 1205 1205 NOKPROBE_SYMBOL(is_debug_stack); 1206 1206
+3 -3
arch/x86/kernel/cpu/mcheck/mce-inject.c
··· 83 83 static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) 84 84 { 85 85 int cpu = smp_processor_id(); 86 - struct mce *m = &__get_cpu_var(injectm); 86 + struct mce *m = this_cpu_ptr(&injectm); 87 87 if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) 88 88 return NMI_DONE; 89 89 cpumask_clear_cpu(cpu, mce_inject_cpumask); ··· 97 97 static void mce_irq_ipi(void *info) 98 98 { 99 99 int cpu = smp_processor_id(); 100 - struct mce *m = &__get_cpu_var(injectm); 100 + struct mce *m = this_cpu_ptr(&injectm); 101 101 102 102 if (cpumask_test_cpu(cpu, mce_inject_cpumask) && 103 103 m->inject_flags & MCJ_EXCEPTION) { ··· 109 109 /* Inject mce on current CPU */ 110 110 static int raise_local(void) 111 111 { 112 - struct mce *m = &__get_cpu_var(injectm); 112 + struct mce *m = this_cpu_ptr(&injectm); 113 113 int context = MCJ_CTX(m->inject_flags); 114 114 int ret = 0; 115 115 int cpu = m->extcpu;
+23 -23
arch/x86/kernel/cpu/mcheck/mce.c
··· 400 400 401 401 if (offset < 0) 402 402 return 0; 403 - return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); 403 + return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); 404 404 } 405 405 406 406 if (rdmsrl_safe(msr, &v)) { ··· 422 422 int offset = msr_to_offset(msr); 423 423 424 424 if (offset >= 0) 425 - *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; 425 + *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; 426 426 return; 427 427 } 428 428 wrmsrl(msr, v); ··· 478 478 /* Runs with CPU affinity in workqueue */ 479 479 static int mce_ring_empty(void) 480 480 { 481 - struct mce_ring *r = &__get_cpu_var(mce_ring); 481 + struct mce_ring *r = this_cpu_ptr(&mce_ring); 482 482 483 483 return r->start == r->end; 484 484 } ··· 490 490 491 491 *pfn = 0; 492 492 get_cpu(); 493 - r = &__get_cpu_var(mce_ring); 493 + r = this_cpu_ptr(&mce_ring); 494 494 if (r->start == r->end) 495 495 goto out; 496 496 *pfn = r->ring[r->start]; ··· 504 504 /* Always runs in MCE context with preempt off */ 505 505 static int mce_ring_add(unsigned long pfn) 506 506 { 507 - struct mce_ring *r = &__get_cpu_var(mce_ring); 507 + struct mce_ring *r = this_cpu_ptr(&mce_ring); 508 508 unsigned next; 509 509 510 510 next = (r->end + 1) % MCE_RING_SIZE; ··· 526 526 static void mce_schedule_work(void) 527 527 { 528 528 if (!mce_ring_empty()) 529 - schedule_work(&__get_cpu_var(mce_work)); 529 + schedule_work(this_cpu_ptr(&mce_work)); 530 530 } 531 531 532 532 DEFINE_PER_CPU(struct irq_work, mce_irq_work); ··· 551 551 return; 552 552 } 553 553 554 - irq_work_queue(&__get_cpu_var(mce_irq_work)); 554 + irq_work_queue(this_cpu_ptr(&mce_irq_work)); 555 555 } 556 556 557 557 /* ··· 1045 1045 1046 1046 mce_gather_info(&m, regs); 1047 1047 1048 - final = &__get_cpu_var(mces_seen); 1048 + final = this_cpu_ptr(&mces_seen); 1049 1049 *final = m; 1050 1050 1051 1051 memset(valid_banks, 0, sizeof(valid_banks)); ··· 1278 1278 1279 1279 static int cmc_error_seen(void) 1280 1280 { 1281 - unsigned long *v = &__get_cpu_var(mce_polled_error); 1281 + unsigned long *v = this_cpu_ptr(&mce_polled_error); 1282 1282 1283 1283 return test_and_clear_bit(0, v); 1284 1284 } 1285 1285 1286 1286 static void mce_timer_fn(unsigned long data) 1287 1287 { 1288 - struct timer_list *t = &__get_cpu_var(mce_timer); 1288 + struct timer_list *t = this_cpu_ptr(&mce_timer); 1289 1289 unsigned long iv; 1290 1290 int notify; 1291 1291 1292 1292 WARN_ON(smp_processor_id() != data); 1293 1293 1294 - if (mce_available(__this_cpu_ptr(&cpu_info))) { 1294 + if (mce_available(this_cpu_ptr(&cpu_info))) { 1295 1295 machine_check_poll(MCP_TIMESTAMP, 1296 - &__get_cpu_var(mce_poll_banks)); 1296 + this_cpu_ptr(&mce_poll_banks)); 1297 1297 mce_intel_cmci_poll(); 1298 1298 } 1299 1299 ··· 1323 1323 */ 1324 1324 void mce_timer_kick(unsigned long interval) 1325 1325 { 1326 - struct timer_list *t = &__get_cpu_var(mce_timer); 1326 + struct timer_list *t = this_cpu_ptr(&mce_timer); 1327 1327 unsigned long when = jiffies + interval; 1328 1328 unsigned long iv = __this_cpu_read(mce_next_interval); 1329 1329 ··· 1659 1659 1660 1660 static void __mcheck_cpu_init_timer(void) 1661 1661 { 1662 - struct timer_list *t = &__get_cpu_var(mce_timer); 1662 + struct timer_list *t = this_cpu_ptr(&mce_timer); 1663 1663 unsigned int cpu = smp_processor_id(); 1664 1664 1665 1665 setup_timer(t, mce_timer_fn, cpu); ··· 1702 1702 __mcheck_cpu_init_generic(); 1703 1703 __mcheck_cpu_init_vendor(c); 1704 1704 __mcheck_cpu_init_timer(); 1705 - INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); 1706 - init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); 1705 + INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work); 1706 + init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb); 1707 1707 } 1708 1708 1709 1709 /* ··· 1955 1955 static void __mce_disable_bank(void *arg) 1956 1956 { 1957 1957 int bank = *((int *)arg); 1958 - __clear_bit(bank, __get_cpu_var(mce_poll_banks)); 1958 + __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); 1959 1959 cmci_disable_bank(bank); 1960 1960 } 1961 1961 ··· 2065 2065 static void mce_syscore_resume(void) 2066 2066 { 2067 2067 __mcheck_cpu_init_generic(); 2068 - __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); 2068 + __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); 2069 2069 } 2070 2070 2071 2071 static struct syscore_ops mce_syscore_ops = { ··· 2080 2080 2081 2081 static void mce_cpu_restart(void *data) 2082 2082 { 2083 - if (!mce_available(__this_cpu_ptr(&cpu_info))) 2083 + if (!mce_available(raw_cpu_ptr(&cpu_info))) 2084 2084 return; 2085 2085 __mcheck_cpu_init_generic(); 2086 2086 __mcheck_cpu_init_timer(); ··· 2096 2096 /* Toggle features for corrected errors */ 2097 2097 static void mce_disable_cmci(void *data) 2098 2098 { 2099 - if (!mce_available(__this_cpu_ptr(&cpu_info))) 2099 + if (!mce_available(raw_cpu_ptr(&cpu_info))) 2100 2100 return; 2101 2101 cmci_clear(); 2102 2102 } 2103 2103 2104 2104 static void mce_enable_ce(void *all) 2105 2105 { 2106 - if (!mce_available(__this_cpu_ptr(&cpu_info))) 2106 + if (!mce_available(raw_cpu_ptr(&cpu_info))) 2107 2107 return; 2108 2108 cmci_reenable(); 2109 2109 cmci_recheck(); ··· 2336 2336 unsigned long action = *(unsigned long *)h; 2337 2337 int i; 2338 2338 2339 - if (!mce_available(__this_cpu_ptr(&cpu_info))) 2339 + if (!mce_available(raw_cpu_ptr(&cpu_info))) 2340 2340 return; 2341 2341 2342 2342 if (!(action & CPU_TASKS_FROZEN)) ··· 2354 2354 unsigned long action = *(unsigned long *)h; 2355 2355 int i; 2356 2356 2357 - if (!mce_available(__this_cpu_ptr(&cpu_info))) 2357 + if (!mce_available(raw_cpu_ptr(&cpu_info))) 2358 2358 return; 2359 2359 2360 2360 if (!(action & CPU_TASKS_FROZEN))
+1 -1
arch/x86/kernel/cpu/mcheck/mce_amd.c
··· 310 310 * event. 311 311 */ 312 312 machine_check_poll(MCP_TIMESTAMP, 313 - &__get_cpu_var(mce_poll_banks)); 313 + this_cpu_ptr(&mce_poll_banks)); 314 314 315 315 if (high & MASK_OVERFLOW_HI) { 316 316 rdmsrl(address, m.misc);
+11 -11
arch/x86/kernel/cpu/mcheck/mce_intel.c
··· 86 86 { 87 87 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) 88 88 return; 89 - machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 89 + machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); 90 90 } 91 91 92 92 void mce_intel_hcpu_update(unsigned long cpu) ··· 145 145 u64 val; 146 146 147 147 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 148 - owned = __get_cpu_var(mce_banks_owned); 148 + owned = this_cpu_ptr(mce_banks_owned); 149 149 for_each_set_bit(bank, owned, MAX_NR_BANKS) { 150 150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 151 151 val &= ~MCI_CTL2_CMCI_EN; ··· 195 195 { 196 196 if (cmci_storm_detect()) 197 197 return; 198 - machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 198 + machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); 199 199 mce_notify_irq(); 200 200 } 201 201 ··· 206 206 */ 207 207 static void cmci_discover(int banks) 208 208 { 209 - unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); 209 + unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned); 210 210 unsigned long flags; 211 211 int i; 212 212 int bios_wrong_thresh = 0; ··· 228 228 /* Already owned by someone else? */ 229 229 if (val & MCI_CTL2_CMCI_EN) { 230 230 clear_bit(i, owned); 231 - __clear_bit(i, __get_cpu_var(mce_poll_banks)); 231 + __clear_bit(i, this_cpu_ptr(mce_poll_banks)); 232 232 continue; 233 233 } 234 234 ··· 252 252 /* Did the enable bit stick? -- the bank supports CMCI */ 253 253 if (val & MCI_CTL2_CMCI_EN) { 254 254 set_bit(i, owned); 255 - __clear_bit(i, __get_cpu_var(mce_poll_banks)); 255 + __clear_bit(i, this_cpu_ptr(mce_poll_banks)); 256 256 /* 257 257 * We are able to set thresholds for some banks that 258 258 * had a threshold of 0. This means the BIOS has not ··· 263 263 (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) 264 264 bios_wrong_thresh = 1; 265 265 } else { 266 - WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 266 + WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks))); 267 267 } 268 268 } 269 269 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); ··· 284 284 unsigned long flags; 285 285 int banks; 286 286 287 - if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) 287 + if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) 288 288 return; 289 289 local_irq_save(flags); 290 - machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 290 + machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); 291 291 local_irq_restore(flags); 292 292 } 293 293 ··· 296 296 { 297 297 u64 val; 298 298 299 - if (!test_bit(bank, __get_cpu_var(mce_banks_owned))) 299 + if (!test_bit(bank, this_cpu_ptr(mce_banks_owned))) 300 300 return; 301 301 rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 302 302 val &= ~MCI_CTL2_CMCI_EN; 303 303 wrmsrl(MSR_IA32_MCx_CTL2(bank), val); 304 - __clear_bit(bank, __get_cpu_var(mce_banks_owned)); 304 + __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); 305 305 } 306 306 307 307 /*
+11 -11
arch/x86/kernel/cpu/perf_event.c
··· 487 487 488 488 void x86_pmu_disable_all(void) 489 489 { 490 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 490 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 491 491 int idx; 492 492 493 493 for (idx = 0; idx < x86_pmu.num_counters; idx++) { ··· 505 505 506 506 static void x86_pmu_disable(struct pmu *pmu) 507 507 { 508 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 508 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 509 509 510 510 if (!x86_pmu_initialized()) 511 511 return; ··· 522 522 523 523 void x86_pmu_enable_all(int added) 524 524 { 525 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 525 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 526 526 int idx; 527 527 528 528 for (idx = 0; idx < x86_pmu.num_counters; idx++) { ··· 869 869 870 870 static void x86_pmu_enable(struct pmu *pmu) 871 871 { 872 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 872 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 873 873 struct perf_event *event; 874 874 struct hw_perf_event *hwc; 875 875 int i, added = cpuc->n_added; ··· 1020 1020 */ 1021 1021 static int x86_pmu_add(struct perf_event *event, int flags) 1022 1022 { 1023 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1023 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1024 1024 struct hw_perf_event *hwc; 1025 1025 int assign[X86_PMC_IDX_MAX]; 1026 1026 int n, n0, ret; ··· 1071 1071 1072 1072 static void x86_pmu_start(struct perf_event *event, int flags) 1073 1073 { 1074 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1074 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1075 1075 int idx = event->hw.idx; 1076 1076 1077 1077 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) ··· 1150 1150 1151 1151 void x86_pmu_stop(struct perf_event *event, int flags) 1152 1152 { 1153 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1153 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1154 1154 struct hw_perf_event *hwc = &event->hw; 1155 1155 1156 1156 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { ··· 1172 1172 1173 1173 static void x86_pmu_del(struct perf_event *event, int flags) 1174 1174 { 1175 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1175 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1176 1176 int i; 1177 1177 1178 1178 /* ··· 1227 1227 int idx, handled = 0; 1228 1228 u64 val; 1229 1229 1230 - cpuc = &__get_cpu_var(cpu_hw_events); 1230 + cpuc = this_cpu_ptr(&cpu_hw_events); 1231 1231 1232 1232 /* 1233 1233 * Some chipsets need to unmask the LVTPC in a particular spot ··· 1636 1636 */ 1637 1637 static int x86_pmu_commit_txn(struct pmu *pmu) 1638 1638 { 1639 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1639 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1640 1640 int assign[X86_PMC_IDX_MAX]; 1641 1641 int n, ret; 1642 1642 ··· 1995 1995 if (idx > GDT_ENTRIES) 1996 1996 return 0; 1997 1997 1998 - desc = __this_cpu_ptr(&gdt_page.gdt[0]); 1998 + desc = raw_cpu_ptr(gdt_page.gdt); 1999 1999 } 2000 2000 2001 2001 return get_desc_base(desc + idx);
+2 -2
arch/x86/kernel/cpu/perf_event_amd.c
··· 699 699 700 700 void amd_pmu_enable_virt(void) 701 701 { 702 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 702 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 703 703 704 704 cpuc->perf_ctr_virt_mask = 0; 705 705 ··· 711 711 712 712 void amd_pmu_disable_virt(void) 713 713 { 714 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 714 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 715 715 716 716 /* 717 717 * We only mask out the Host-only bit so that host-only counting works
+9 -9
arch/x86/kernel/cpu/perf_event_intel.c
··· 1045 1045 1046 1046 static void intel_pmu_disable_all(void) 1047 1047 { 1048 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1048 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1049 1049 1050 1050 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 1051 1051 ··· 1058 1058 1059 1059 static void intel_pmu_enable_all(int added) 1060 1060 { 1061 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1061 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1062 1062 1063 1063 intel_pmu_pebs_enable_all(); 1064 1064 intel_pmu_lbr_enable_all(); ··· 1092 1092 */ 1093 1093 static void intel_pmu_nhm_workaround(void) 1094 1094 { 1095 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1095 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1096 1096 static const unsigned long nhm_magic[4] = { 1097 1097 0x4300B5, 1098 1098 0x4300D2, ··· 1191 1191 static void intel_pmu_disable_event(struct perf_event *event) 1192 1192 { 1193 1193 struct hw_perf_event *hwc = &event->hw; 1194 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1194 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1195 1195 1196 1196 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { 1197 1197 intel_pmu_disable_bts(); ··· 1255 1255 static void intel_pmu_enable_event(struct perf_event *event) 1256 1256 { 1257 1257 struct hw_perf_event *hwc = &event->hw; 1258 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1258 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1259 1259 1260 1260 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { 1261 1261 if (!__this_cpu_read(cpu_hw_events.enabled)) ··· 1349 1349 u64 status; 1350 1350 int handled; 1351 1351 1352 - cpuc = &__get_cpu_var(cpu_hw_events); 1352 + cpuc = this_cpu_ptr(&cpu_hw_events); 1353 1353 1354 1354 /* 1355 1355 * No known reason to not always do late ACK, ··· 1781 1781 1782 1782 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) 1783 1783 { 1784 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1784 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1785 1785 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 1786 1786 1787 1787 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; ··· 1802 1802 1803 1803 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) 1804 1804 { 1805 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1805 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1806 1806 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 1807 1807 int idx; 1808 1808 ··· 1836 1836 1837 1837 static void core_pmu_enable_all(int added) 1838 1838 { 1839 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1839 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1840 1840 int idx; 1841 1841 1842 1842 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+10 -10
arch/x86/kernel/cpu/perf_event_intel_ds.c
··· 475 475 476 476 void intel_pmu_disable_bts(void) 477 477 { 478 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 478 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 479 479 unsigned long debugctlmsr; 480 480 481 481 if (!cpuc->ds) ··· 492 492 493 493 int intel_pmu_drain_bts_buffer(void) 494 494 { 495 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 495 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 496 496 struct debug_store *ds = cpuc->ds; 497 497 struct bts_record { 498 498 u64 from; ··· 712 712 713 713 void intel_pmu_pebs_enable(struct perf_event *event) 714 714 { 715 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 715 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 716 716 struct hw_perf_event *hwc = &event->hw; 717 717 718 718 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; ··· 727 727 728 728 void intel_pmu_pebs_disable(struct perf_event *event) 729 729 { 730 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 730 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 731 731 struct hw_perf_event *hwc = &event->hw; 732 732 733 733 cpuc->pebs_enabled &= ~(1ULL << hwc->idx); ··· 745 745 746 746 void intel_pmu_pebs_enable_all(void) 747 747 { 748 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 748 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 749 749 750 750 if (cpuc->pebs_enabled) 751 751 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); ··· 753 753 754 754 void intel_pmu_pebs_disable_all(void) 755 755 { 756 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 756 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 757 757 758 758 if (cpuc->pebs_enabled) 759 759 wrmsrl(MSR_IA32_PEBS_ENABLE, 0); ··· 761 761 762 762 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) 763 763 { 764 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 764 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 765 765 unsigned long from = cpuc->lbr_entries[0].from; 766 766 unsigned long old_to, to = cpuc->lbr_entries[0].to; 767 767 unsigned long ip = regs->ip; ··· 868 868 * We cast to the biggest pebs_record but are careful not to 869 869 * unconditionally access the 'extra' entries. 870 870 */ 871 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 871 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 872 872 struct pebs_record_hsw *pebs = __pebs; 873 873 struct perf_sample_data data; 874 874 struct pt_regs regs; ··· 957 957 958 958 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) 959 959 { 960 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 960 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 961 961 struct debug_store *ds = cpuc->ds; 962 962 struct perf_event *event = cpuc->events[0]; /* PMC0 only */ 963 963 struct pebs_record_core *at, *top; ··· 998 998 999 999 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) 1000 1000 { 1001 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1001 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1002 1002 struct debug_store *ds = cpuc->ds; 1003 1003 struct perf_event *event = NULL; 1004 1004 void *at, *top;
+6 -6
arch/x86/kernel/cpu/perf_event_intel_lbr.c
··· 133 133 static void __intel_pmu_lbr_enable(void) 134 134 { 135 135 u64 debugctl; 136 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 136 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 137 137 138 138 if (cpuc->lbr_sel) 139 139 wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); ··· 183 183 184 184 void intel_pmu_lbr_enable(struct perf_event *event) 185 185 { 186 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 186 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 187 187 188 188 if (!x86_pmu.lbr_nr) 189 189 return; ··· 203 203 204 204 void intel_pmu_lbr_disable(struct perf_event *event) 205 205 { 206 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 206 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 207 207 208 208 if (!x86_pmu.lbr_nr) 209 209 return; ··· 220 220 221 221 void intel_pmu_lbr_enable_all(void) 222 222 { 223 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 223 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 224 224 225 225 if (cpuc->lbr_users) 226 226 __intel_pmu_lbr_enable(); ··· 228 228 229 229 void intel_pmu_lbr_disable_all(void) 230 230 { 231 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 231 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 232 232 233 233 if (cpuc->lbr_users) 234 234 __intel_pmu_lbr_disable(); ··· 332 332 333 333 void intel_pmu_lbr_read(void) 334 334 { 335 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 335 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 336 336 337 337 if (!cpuc->lbr_users) 338 338 return;
+6 -6
arch/x86/kernel/cpu/perf_event_intel_rapl.c
··· 135 135 * or use ldexp(count, -32). 136 136 * Watts = Joules/Time delta 137 137 */ 138 - return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); 138 + return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); 139 139 } 140 140 141 141 static u64 rapl_event_update(struct perf_event *event) ··· 187 187 188 188 static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) 189 189 { 190 - struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 190 + struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); 191 191 struct perf_event *event; 192 192 unsigned long flags; 193 193 ··· 234 234 235 235 static void rapl_pmu_event_start(struct perf_event *event, int mode) 236 236 { 237 - struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 237 + struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); 238 238 unsigned long flags; 239 239 240 240 spin_lock_irqsave(&pmu->lock, flags); ··· 244 244 245 245 static void rapl_pmu_event_stop(struct perf_event *event, int mode) 246 246 { 247 - struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 247 + struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); 248 248 struct hw_perf_event *hwc = &event->hw; 249 249 unsigned long flags; 250 250 ··· 278 278 279 279 static int rapl_pmu_event_add(struct perf_event *event, int mode) 280 280 { 281 - struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); 281 + struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); 282 282 struct hw_perf_event *hwc = &event->hw; 283 283 unsigned long flags; 284 284 ··· 696 696 return -1; 697 697 } 698 698 699 - pmu = __get_cpu_var(rapl_pmu); 699 + pmu = __this_cpu_read(rapl_pmu); 700 700 701 701 pr_info("RAPL PMU detected, hw unit 2^-%d Joules," 702 702 " API unit is 2^-32 Joules,"
+1 -1
arch/x86/kernel/cpu/perf_event_knc.c
··· 217 217 int bit, loops; 218 218 u64 status; 219 219 220 - cpuc = &__get_cpu_var(cpu_hw_events); 220 + cpuc = this_cpu_ptr(&cpu_hw_events); 221 221 222 222 knc_pmu_disable_all(); 223 223
+3 -3
arch/x86/kernel/cpu/perf_event_p4.c
··· 915 915 916 916 static void p4_pmu_disable_all(void) 917 917 { 918 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 918 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 919 919 int idx; 920 920 921 921 for (idx = 0; idx < x86_pmu.num_counters; idx++) { ··· 984 984 985 985 static void p4_pmu_enable_all(int added) 986 986 { 987 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 987 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 988 988 int idx; 989 989 990 990 for (idx = 0; idx < x86_pmu.num_counters; idx++) { ··· 1004 1004 int idx, handled = 0; 1005 1005 u64 val; 1006 1006 1007 - cpuc = &__get_cpu_var(cpu_hw_events); 1007 + cpuc = this_cpu_ptr(&cpu_hw_events); 1008 1008 1009 1009 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1010 1010 int overflow;
+4 -4
arch/x86/kernel/hw_breakpoint.c
··· 108 108 int i; 109 109 110 110 for (i = 0; i < HBP_NUM; i++) { 111 - struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); 111 + struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); 112 112 113 113 if (!*slot) { 114 114 *slot = bp; ··· 122 122 set_debugreg(info->address, i); 123 123 __this_cpu_write(cpu_debugreg[i], info->address); 124 124 125 - dr7 = &__get_cpu_var(cpu_dr7); 125 + dr7 = this_cpu_ptr(&cpu_dr7); 126 126 *dr7 |= encode_dr7(i, info->len, info->type); 127 127 128 128 set_debugreg(*dr7, 7); ··· 146 146 int i; 147 147 148 148 for (i = 0; i < HBP_NUM; i++) { 149 - struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); 149 + struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); 150 150 151 151 if (*slot == bp) { 152 152 *slot = NULL; ··· 157 157 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) 158 158 return; 159 159 160 - dr7 = &__get_cpu_var(cpu_dr7); 160 + dr7 = this_cpu_ptr(&cpu_dr7); 161 161 *dr7 &= ~__encode_dr7(i, info->len, info->type); 162 162 163 163 set_debugreg(*dr7, 7);
+3 -3
arch/x86/kernel/irq_64.c
··· 52 52 regs->sp <= curbase + THREAD_SIZE) 53 53 return; 54 54 55 - irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) + 55 + irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) + 56 56 STACK_TOP_MARGIN; 57 - irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr); 57 + irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr); 58 58 if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) 59 59 return; 60 60 61 - oist = &__get_cpu_var(orig_ist); 61 + oist = this_cpu_ptr(&orig_ist); 62 62 estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; 63 63 estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; 64 64 if (regs->sp >= estack_top && regs->sp <= estack_bottom)
+11 -11
arch/x86/kernel/kvm.c
··· 243 243 { 244 244 u32 reason = 0; 245 245 246 - if (__get_cpu_var(apf_reason).enabled) { 247 - reason = __get_cpu_var(apf_reason).reason; 248 - __get_cpu_var(apf_reason).reason = 0; 246 + if (__this_cpu_read(apf_reason.enabled)) { 247 + reason = __this_cpu_read(apf_reason.reason); 248 + __this_cpu_write(apf_reason.reason, 0); 249 249 } 250 250 251 251 return reason; ··· 318 318 * there's no need for lock or memory barriers. 319 319 * An optimization barrier is implied in apic write. 320 320 */ 321 - if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) 321 + if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) 322 322 return; 323 323 apic_write(APIC_EOI, APIC_EOI_ACK); 324 324 } ··· 329 329 return; 330 330 331 331 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { 332 - u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason)); 332 + u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); 333 333 334 334 #ifdef CONFIG_PREEMPT 335 335 pa |= KVM_ASYNC_PF_SEND_ALWAYS; 336 336 #endif 337 337 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); 338 - __get_cpu_var(apf_reason).enabled = 1; 338 + __this_cpu_write(apf_reason.enabled, 1); 339 339 printk(KERN_INFO"KVM setup async PF for cpu %d\n", 340 340 smp_processor_id()); 341 341 } ··· 344 344 unsigned long pa; 345 345 /* Size alignment is implied but just to make it explicit. */ 346 346 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); 347 - __get_cpu_var(kvm_apic_eoi) = 0; 348 - pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi)) 347 + __this_cpu_write(kvm_apic_eoi, 0); 348 + pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) 349 349 | KVM_MSR_ENABLED; 350 350 wrmsrl(MSR_KVM_PV_EOI_EN, pa); 351 351 } ··· 356 356 357 357 static void kvm_pv_disable_apf(void) 358 358 { 359 - if (!__get_cpu_var(apf_reason).enabled) 359 + if (!__this_cpu_read(apf_reason.enabled)) 360 360 return; 361 361 362 362 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); 363 - __get_cpu_var(apf_reason).enabled = 0; 363 + __this_cpu_write(apf_reason.enabled, 0); 364 364 365 365 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", 366 366 smp_processor_id()); ··· 716 716 if (in_nmi()) 717 717 return; 718 718 719 - w = &__get_cpu_var(klock_waiting); 719 + w = this_cpu_ptr(&klock_waiting); 720 720 cpu = smp_processor_id(); 721 721 start = spin_time_start(); 722 722
+3 -3
arch/x86/kvm/svm.c
··· 670 670 671 671 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { 672 672 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); 673 - __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT; 673 + __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); 674 674 } 675 675 676 676 ··· 1312 1312 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 1313 1313 1314 1314 if (static_cpu_has(X86_FEATURE_TSCRATEMSR) && 1315 - svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) { 1316 - __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio; 1315 + svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) { 1316 + __this_cpu_write(current_tsc_ratio, svm->tsc_ratio); 1317 1317 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); 1318 1318 } 1319 1319 }
+5 -5
arch/x86/kvm/vmx.c
··· 1601 1601 /* 1602 1602 * VT restores TR but not its size. Useless. 1603 1603 */ 1604 - struct desc_ptr *gdt = &__get_cpu_var(host_gdt); 1604 + struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); 1605 1605 struct desc_struct *descs; 1606 1606 1607 1607 descs = (void *)gdt->address; ··· 1647 1647 1648 1648 static unsigned long segment_base(u16 selector) 1649 1649 { 1650 - struct desc_ptr *gdt = &__get_cpu_var(host_gdt); 1650 + struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); 1651 1651 struct desc_struct *d; 1652 1652 unsigned long table_base; 1653 1653 unsigned long v; ··· 1777 1777 */ 1778 1778 if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) 1779 1779 stts(); 1780 - load_gdt(&__get_cpu_var(host_gdt)); 1780 + load_gdt(this_cpu_ptr(&host_gdt)); 1781 1781 } 1782 1782 1783 1783 static void vmx_load_host_state(struct vcpu_vmx *vmx) ··· 1807 1807 } 1808 1808 1809 1809 if (vmx->loaded_vmcs->cpu != cpu) { 1810 - struct desc_ptr *gdt = &__get_cpu_var(host_gdt); 1810 + struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); 1811 1811 unsigned long sysenter_esp; 1812 1812 1813 1813 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); ··· 2744 2744 ept_sync_global(); 2745 2745 } 2746 2746 2747 - native_store_gdt(&__get_cpu_var(host_gdt)); 2747 + native_store_gdt(this_cpu_ptr(&host_gdt)); 2748 2748 2749 2749 return 0; 2750 2750 }
+1 -1
arch/x86/kvm/x86.c
··· 1556 1556 1557 1557 /* Keep irq disabled to prevent changes to the clock */ 1558 1558 local_irq_save(flags); 1559 - this_tsc_khz = __get_cpu_var(cpu_tsc_khz); 1559 + this_tsc_khz = __this_cpu_read(cpu_tsc_khz); 1560 1560 if (unlikely(this_tsc_khz == 0)) { 1561 1561 local_irq_restore(flags); 1562 1562 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
+7 -7
arch/x86/mm/kmemcheck/kmemcheck.c
··· 140 140 141 141 bool kmemcheck_active(struct pt_regs *regs) 142 142 { 143 - struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 143 + struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); 144 144 145 145 return data->balance > 0; 146 146 } ··· 148 148 /* Save an address that needs to be shown/hidden */ 149 149 static void kmemcheck_save_addr(unsigned long addr) 150 150 { 151 - struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 151 + struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); 152 152 153 153 BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); 154 154 data->addr[data->n_addrs++] = addr; ··· 156 156 157 157 static unsigned int kmemcheck_show_all(void) 158 158 { 159 - struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 159 + struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); 160 160 unsigned int i; 161 161 unsigned int n; 162 162 ··· 169 169 170 170 static unsigned int kmemcheck_hide_all(void) 171 171 { 172 - struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 172 + struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); 173 173 unsigned int i; 174 174 unsigned int n; 175 175 ··· 185 185 */ 186 186 void kmemcheck_show(struct pt_regs *regs) 187 187 { 188 - struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 188 + struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); 189 189 190 190 BUG_ON(!irqs_disabled()); 191 191 ··· 226 226 */ 227 227 void kmemcheck_hide(struct pt_regs *regs) 228 228 { 229 - struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 229 + struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); 230 230 int n; 231 231 232 232 BUG_ON(!irqs_disabled()); ··· 528 528 const uint8_t *insn_primary; 529 529 unsigned int size; 530 530 531 - struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); 531 + struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); 532 532 533 533 /* Recursive fault -- ouch. */ 534 534 if (data->busy) {
+4 -4
arch/x86/oprofile/nmi_int.c
··· 64 64 static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) 65 65 { 66 66 if (ctr_running) 67 - model->check_ctrs(regs, &__get_cpu_var(cpu_msrs)); 67 + model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs)); 68 68 else if (!nmi_enabled) 69 69 return NMI_DONE; 70 70 else 71 - model->stop(&__get_cpu_var(cpu_msrs)); 71 + model->stop(this_cpu_ptr(&cpu_msrs)); 72 72 return NMI_HANDLED; 73 73 } 74 74 ··· 91 91 92 92 static void nmi_cpu_start(void *dummy) 93 93 { 94 - struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); 94 + struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs); 95 95 if (!msrs->controls) 96 96 WARN_ON_ONCE(1); 97 97 else ··· 111 111 112 112 static void nmi_cpu_stop(void *dummy) 113 113 { 114 - struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); 114 + struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs); 115 115 if (!msrs->controls) 116 116 WARN_ON_ONCE(1); 117 117 else
+1 -1
arch/x86/platform/uv/uv_time.c
··· 365 365 366 366 static __init void uv_rtc_register_clockevents(struct work_struct *dummy) 367 367 { 368 - struct clock_event_device *ced = &__get_cpu_var(cpu_ced); 368 + struct clock_event_device *ced = this_cpu_ptr(&cpu_ced); 369 369 370 370 *ced = clock_event_device_uv; 371 371 ced->cpumask = cpumask_of(smp_processor_id());
+2 -2
arch/x86/xen/enlighten.c
··· 821 821 822 822 void xen_copy_trap_info(struct trap_info *traps) 823 823 { 824 - const struct desc_ptr *desc = &__get_cpu_var(idt_desc); 824 + const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); 825 825 826 826 xen_convert_trap_info(desc, traps); 827 827 } ··· 838 838 839 839 spin_lock(&lock); 840 840 841 - __get_cpu_var(idt_desc) = *desc; 841 + memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); 842 842 843 843 xen_convert_trap_info(desc, traps); 844 844
+4 -4
arch/x86/xen/multicalls.c
··· 54 54 55 55 void xen_mc_flush(void) 56 56 { 57 - struct mc_buffer *b = &__get_cpu_var(mc_buffer); 57 + struct mc_buffer *b = this_cpu_ptr(&mc_buffer); 58 58 struct multicall_entry *mc; 59 59 int ret = 0; 60 60 unsigned long flags; ··· 131 131 132 132 struct multicall_space __xen_mc_entry(size_t args) 133 133 { 134 - struct mc_buffer *b = &__get_cpu_var(mc_buffer); 134 + struct mc_buffer *b = this_cpu_ptr(&mc_buffer); 135 135 struct multicall_space ret; 136 136 unsigned argidx = roundup(b->argidx, sizeof(u64)); 137 137 ··· 162 162 163 163 struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) 164 164 { 165 - struct mc_buffer *b = &__get_cpu_var(mc_buffer); 165 + struct mc_buffer *b = this_cpu_ptr(&mc_buffer); 166 166 struct multicall_space ret = { NULL, NULL }; 167 167 168 168 BUG_ON(preemptible()); ··· 192 192 193 193 void xen_mc_callback(void (*fn)(void *), void *data) 194 194 { 195 - struct mc_buffer *b = &__get_cpu_var(mc_buffer); 195 + struct mc_buffer *b = this_cpu_ptr(&mc_buffer); 196 196 struct callback *cb; 197 197 198 198 if (b->cbidx == MC_BATCH) {
+1 -1
arch/x86/xen/spinlock.c
··· 109 109 __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) 110 110 { 111 111 int irq = __this_cpu_read(lock_kicker_irq); 112 - struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting); 112 + struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); 113 113 int cpu = smp_processor_id(); 114 114 u64 start; 115 115 unsigned long flags;
+5 -5
arch/x86/xen/time.c
··· 80 80 81 81 BUG_ON(preemptible()); 82 82 83 - state = &__get_cpu_var(xen_runstate); 83 + state = this_cpu_ptr(&xen_runstate); 84 84 85 85 /* 86 86 * The runstate info is always updated by the hypervisor on ··· 123 123 124 124 WARN_ON(state.state != RUNSTATE_running); 125 125 126 - snap = &__get_cpu_var(xen_runstate_snapshot); 126 + snap = this_cpu_ptr(&xen_runstate_snapshot); 127 127 128 128 /* work out how much time the VCPU has not been runn*ing* */ 129 129 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; ··· 158 158 cycle_t ret; 159 159 160 160 preempt_disable_notrace(); 161 - src = &__get_cpu_var(xen_vcpu)->time; 161 + src = this_cpu_ptr(&xen_vcpu->time); 162 162 ret = pvclock_clocksource_read(src); 163 163 preempt_enable_notrace(); 164 164 return ret; ··· 397 397 398 398 static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) 399 399 { 400 - struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt; 400 + struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt); 401 401 irqreturn_t ret; 402 402 403 403 ret = IRQ_NONE; ··· 460 460 { 461 461 BUG_ON(preemptible()); 462 462 463 - clockevents_register_device(&__get_cpu_var(xen_clock_events).evt); 463 + clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); 464 464 } 465 465 466 466 void xen_timer_resume(void)