···208208 * We don't need to disable preemption here because any CPU can209209 * safely use any IOMMU pool.210210 */211211- pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);211211+ pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);212212213213 if (largealloc)214214 pool = &(tbl->large_pool);
+2-2
arch/powerpc/kernel/irq.c
···114114static inline notrace int decrementer_check_overflow(void)115115{116116 u64 now = get_tb_or_rtc();117117- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);117117+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);118118119119 return now >= *next_tb;120120}···499499500500 /* And finally process it */501501 if (unlikely(irq == NO_IRQ))502502- __get_cpu_var(irq_stat).spurious_irqs++;502502+ __this_cpu_inc(irq_stat.spurious_irqs);503503 else504504 generic_handle_irq(irq);505505
···7373 uint64_t nip, uint64_t addr)7474{7575 uint64_t srr1;7676- int index = __get_cpu_var(mce_nest_count)++;7777- struct machine_check_event *mce = &__get_cpu_var(mce_event[index]);7676+ int index = __this_cpu_inc_return(mce_nest_count);7777+ struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);78787979 /*8080 * Return if we don't have enough space to log mce event.···143143 */144144int get_mce_event(struct machine_check_event *mce, bool release)145145{146146- int index = __get_cpu_var(mce_nest_count) - 1;146146+ int index = __this_cpu_read(mce_nest_count) - 1;147147 struct machine_check_event *mc_evt;148148 int ret = 0;149149···153153154154 /* Check if we have MCE info to process. */155155 if (index < MAX_MC_EVT) {156156- mc_evt = &__get_cpu_var(mce_event[index]);156156+ mc_evt = this_cpu_ptr(&mce_event[index]);157157 /* Copy the event structure and release the original */158158 if (mce)159159 *mce = *mc_evt;···163163 }164164 /* Decrement the count to free the slot. */165165 if (release)166166- __get_cpu_var(mce_nest_count)--;166166+ __this_cpu_dec(mce_nest_count);167167168168 return ret;169169}···184184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))185185 return;186186187187- index = __get_cpu_var(mce_queue_count)++;187187+ index = __this_cpu_inc_return(mce_queue_count);188188 /* If queue is full, just return for now. */189189 if (index >= MAX_MC_EVT) {190190- __get_cpu_var(mce_queue_count)--;190190+ __this_cpu_dec(mce_queue_count);191191 return;192192 }193193- __get_cpu_var(mce_event_queue[index]) = evt;193193+ memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));194194195195 /* Queue irq work to process this event later. */196196 irq_work_queue(&mce_event_process_work);···208208 * For now just print it to console.209209 * TODO: log this error event to FSP or nvram.210210 */211211- while (__get_cpu_var(mce_queue_count) > 0) {212212- index = __get_cpu_var(mce_queue_count) - 1;211211+ while (__this_cpu_read(mce_queue_count) > 0) {212212+ index = __this_cpu_read(mce_queue_count) - 1;213213 machine_check_print_event_info(214214- &__get_cpu_var(mce_event_queue[index]));215215- __get_cpu_var(mce_queue_count)--;214214+ this_cpu_ptr(&mce_event_queue[index]));215215+ __this_cpu_dec(mce_queue_count);216216 }217217}218218
···243243244244irqreturn_t smp_ipi_demux(void)245245{246246- struct cpu_messages *info = &__get_cpu_var(ipi_message);246246+ struct cpu_messages *info = this_cpu_ptr(&ipi_message);247247 unsigned int all;248248249249 mb(); /* order any irq clear */···442442 idle_task_exit();443443 cpu = smp_processor_id();444444 printk(KERN_DEBUG "CPU%d offline\n", cpu);445445- __get_cpu_var(cpu_state) = CPU_DEAD;445445+ __this_cpu_write(cpu_state, CPU_DEAD);446446 smp_wmb();447447- while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)447447+ while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE)448448 cpu_relax();449449}450450
+2-2
arch/powerpc/kernel/sysfs.c
···394394 ppc_set_pmu_inuse(1);395395396396 /* Only need to enable them once */397397- if (__get_cpu_var(pmcs_enabled))397397+ if (__this_cpu_read(pmcs_enabled))398398 return;399399400400- __get_cpu_var(pmcs_enabled) = 1;400400+ __this_cpu_write(pmcs_enabled, 1);401401402402 if (ppc_md.enable_pmcs)403403 ppc_md.enable_pmcs();
+11-11
arch/powerpc/kernel/time.c
···458458459459DEFINE_PER_CPU(u8, irq_work_pending);460460461461-#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1462462-#define test_irq_work_pending() __get_cpu_var(irq_work_pending)463463-#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0461461+#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)462462+#define test_irq_work_pending() __this_cpu_read(irq_work_pending)463463+#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)464464465465#endif /* 32 vs 64 bit */466466···482482static void __timer_interrupt(void)483483{484484 struct pt_regs *regs = get_irq_regs();485485- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);486486- struct clock_event_device *evt = &__get_cpu_var(decrementers);485485+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);486486+ struct clock_event_device *evt = this_cpu_ptr(&decrementers);487487 u64 now;488488489489 trace_timer_interrupt_entry(regs);···498498 *next_tb = ~(u64)0;499499 if (evt->event_handler)500500 evt->event_handler(evt);501501- __get_cpu_var(irq_stat).timer_irqs_event++;501501+ __this_cpu_inc(irq_stat.timer_irqs_event);502502 } else {503503 now = *next_tb - now;504504 if (now <= DECREMENTER_MAX)···506506 /* We may have raced with new irq work */507507 if (test_irq_work_pending())508508 set_dec(1);509509- __get_cpu_var(irq_stat).timer_irqs_others++;509509+ __this_cpu_inc(irq_stat.timer_irqs_others);510510 }511511512512#ifdef CONFIG_PPC64513513 /* collect purr register values often, for accurate calculations */514514 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {515515- struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);515515+ struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);516516 cu->current_tb = mfspr(SPRN_PURR);517517 }518518#endif···527527void timer_interrupt(struct pt_regs * regs)528528{529529 struct pt_regs *old_regs;530530- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);530530+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);531531532532 /* Ensure a positive value is written to the decrementer, or else533533 * some CPUs will continue to take decrementer exceptions.···813813static int decrementer_set_next_event(unsigned long evt,814814 struct clock_event_device *dev)815815{816816- __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;816816+ __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);817817 set_dec(evt);818818819819 /* We may have raced with new irq work */···833833/* Interrupt handler for the timer broadcast IPI */834834void tick_broadcast_ipi_handler(void)835835{836836- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);836836+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);837837838838 *next_tb = get_tb_or_rtc();839839 __timer_interrupt();
+4-4
arch/powerpc/kernel/traps.c
···295295{296296 long handled = 0;297297298298- __get_cpu_var(irq_stat).mce_exceptions++;298298+ __this_cpu_inc(irq_stat.mce_exceptions);299299300300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)301301 handled = cur_cpu_spec->machine_check_early(regs);···304304305305long hmi_exception_realmode(struct pt_regs *regs)306306{307307- __get_cpu_var(irq_stat).hmi_exceptions++;307307+ __this_cpu_inc(irq_stat.hmi_exceptions);308308309309 if (ppc_md.hmi_exception_early)310310 ppc_md.hmi_exception_early(regs);···700700 enum ctx_state prev_state = exception_enter();701701 int recover = 0;702702703703- __get_cpu_var(irq_stat).mce_exceptions++;703703+ __this_cpu_inc(irq_stat.mce_exceptions);704704705705 /* See if any machine dependent calls. In theory, we would want706706 * to call the CPU first, and call the ppc_md. one if the CPU···1519151915201520void performance_monitor_exception(struct pt_regs *regs)15211521{15221522- __get_cpu_var(irq_stat).pmu_irqs++;15221522+ __this_cpu_inc(irq_stat.pmu_irqs);1523152315241524 perf_irq(regs);15251525}
+7-7
arch/powerpc/kvm/e500.c
···7676 unsigned long sid;7777 int ret = -1;78787979- sid = ++(__get_cpu_var(pcpu_last_used_sid));7979+ sid = __this_cpu_inc_return(pcpu_last_used_sid);8080 if (sid < NUM_TIDS) {8181- __get_cpu_var(pcpu_sids).entry[sid] = entry;8181+ __this_cpu_write(pcpu_sids)entry[sid], entry);8282 entry->val = sid;8383- entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];8383+ entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);8484 ret = sid;8585 }8686···108108static inline int local_sid_lookup(struct id *entry)109109{110110 if (entry && entry->val != 0 &&111111- __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&112112- entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])111111+ __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&112112+ entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))113113 return entry->val;114114 return -1;115115}···117117/* Invalidate all id mappings on local core -- call with preempt disabled */118118static inline void local_sid_destroy_all(void)119119{120120- __get_cpu_var(pcpu_last_used_sid) = 0;121121- memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));120120+ __this_cpu_write(pcpu_last_used_sid, 0);121121+ memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));122122}123123124124static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
···629629 unsigned long want_v;630630 unsigned long flags;631631 real_pte_t pte;632632- struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);632632+ struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);633633 unsigned long psize = batch->psize;634634 int ssize = batch->ssize;635635 int i;
+1-1
arch/powerpc/mm/hash_utils_64.c
···13221322 else {13231323 int i;13241324 struct ppc64_tlb_batch *batch =13251325- &__get_cpu_var(ppc64_tlb_batch);13251325+ this_cpu_ptr(&ppc64_tlb_batch);1326132613271327 for (i = 0; i < number; i++)13281328 flush_hash_page(batch->vpn[i], batch->pte[i],
+3-3
arch/powerpc/mm/hugetlbpage-book3e.c
···33333434 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;35353636- index = __get_cpu_var(next_tlbcam_idx);3636+ index = this_cpu_read(next_tlbcam_idx);37373838 /* Just round-robin the entries and wrap when we hit the end */3939 if (unlikely(index == ncams - 1))4040- __get_cpu_var(next_tlbcam_idx) = tlbcam_index;4040+ __this_cpu_write(next_tlbcam_idx, tlbcam_index);4141 else4242- __get_cpu_var(next_tlbcam_idx)++;4242+ __this_cpu_inc(next_tlbcam_idx);43434444 return index;4545}
···110110 if (opcode > MAX_HCALL_OPCODE)111111 return;112112113113- h = &__get_cpu_var(hcall_stats)[opcode / 4];113113+ h = this_cpu_ptr(&hcall_stats[opcode / 4]);114114 h->tb_start = mftb();115115 h->purr_start = mfspr(SPRN_PURR);116116}···123123 if (opcode > MAX_HCALL_OPCODE)124124 return;125125126126- h = &__get_cpu_var(hcall_stats)[opcode / 4];126126+ h = this_cpu_ptr(&hcall_stats[opcode / 4]);127127 h->num_calls++;128128 h->tb_total += mftb() - h->tb_start;129129 h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
+4-4
arch/powerpc/platforms/pseries/iommu.c
···199199200200 local_irq_save(flags); /* to protect tcep and the page behind it */201201202202- tcep = __get_cpu_var(tce_page);202202+ tcep = __this_cpu_read(tce_page);203203204204 /* This is safe to do since interrupts are off when we're called205205 * from iommu_alloc{,_sg}()···212212 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,213213 direction, attrs);214214 }215215- __get_cpu_var(tce_page) = tcep;215215+ __this_cpu_write(tce_page, tcep);216216 }217217218218 rpn = __pa(uaddr) >> TCE_SHIFT;···398398 long l, limit;399399400400 local_irq_disable(); /* to protect tcep and the page behind it */401401- tcep = __get_cpu_var(tce_page);401401+ tcep = __this_cpu_read(tce_page);402402403403 if (!tcep) {404404 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);···406406 local_irq_enable();407407 return -ENOMEM;408408 }409409- __get_cpu_var(tce_page) = tcep;409409+ __this_cpu_write(tce_page, tcep);410410 }411411412412 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
+3-3
arch/powerpc/platforms/pseries/lpar.c
···515515 unsigned long vpn;516516 unsigned long i, pix, rc;517517 unsigned long flags = 0;518518- struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);518518+ struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);519519 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);520520 unsigned long param[9];521521 unsigned long hash, index, shift, hidx, slot;···705705706706 local_irq_save(flags);707707708708- depth = &__get_cpu_var(hcall_trace_depth);708708+ depth = this_cpu_ptr(&hcall_trace_depth);709709710710 if (*depth)711711 goto out;···730730731731 local_irq_save(flags);732732733733- depth = &__get_cpu_var(hcall_trace_depth);733733+ depth = this_cpu_ptr(&hcall_trace_depth);734734735735 if (*depth)736736 goto out;
+2-2
arch/powerpc/platforms/pseries/ras.c
···302302 /* If it isn't an extended log we can use the per cpu 64bit buffer */303303 h = (struct rtas_error_log *)&savep[1];304304 if (!rtas_error_extended(h)) {305305- memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64));306306- errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf);305305+ memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64));306306+ errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf);307307 } else {308308 int len, error_log_length;309309
+1-1
arch/powerpc/sysdev/xics/xics-common.c
···155155156156void xics_teardown_cpu(void)157157{158158- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);158158+ struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);159159160160 /*161161 * we have to reset the cppr index to 0 because we're