Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

time: Replace __get_cpu_var uses

Convert uses of __get_cpu_var for creating a address from a percpu
offset to this_cpu_ptr.

The two cases where get_cpu_var is used to actually access a percpu
variable are changed to use this_cpu_read/raw_cpu_read.

Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

authored by

Christoph Lameter and committed by
Tejun Heo
22127e93 bb964a92

+29 -29
+1 -1
drivers/clocksource/dummy_timer.c
··· 28 28 static void dummy_timer_setup(void) 29 29 { 30 30 int cpu = smp_processor_id(); 31 - struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt); 31 + struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt); 32 32 33 33 evt->name = "dummy_timer"; 34 34 evt->features = CLOCK_EVT_FEAT_PERIODIC |
+6 -6
kernel/irq_work.c
··· 95 95 96 96 /* If the work is "lazy", handle it from next tick if any */ 97 97 if (work->flags & IRQ_WORK_LAZY) { 98 - if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) && 98 + if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && 99 99 tick_nohz_tick_stopped()) 100 100 arch_irq_work_raise(); 101 101 } else { 102 - if (llist_add(&work->llnode, &__get_cpu_var(raised_list))) 102 + if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) 103 103 arch_irq_work_raise(); 104 104 } 105 105 ··· 113 113 { 114 114 struct llist_head *raised, *lazy; 115 115 116 - raised = &__get_cpu_var(raised_list); 117 - lazy = &__get_cpu_var(lazy_list); 116 + raised = this_cpu_ptr(&raised_list); 117 + lazy = this_cpu_ptr(&lazy_list); 118 118 if (llist_empty(raised) && llist_empty(lazy)) 119 119 return false; 120 120 ··· 166 166 */ 167 167 void irq_work_run(void) 168 168 { 169 - irq_work_run_list(&__get_cpu_var(raised_list)); 170 - irq_work_run_list(&__get_cpu_var(lazy_list)); 169 + irq_work_run_list(this_cpu_ptr(&raised_list)); 170 + irq_work_run_list(this_cpu_ptr(&lazy_list)); 171 171 } 172 172 EXPORT_SYMBOL_GPL(irq_work_run); 173 173
+1 -1
kernel/sched/clock.c
··· 134 134 135 135 static inline struct sched_clock_data *this_scd(void) 136 136 { 137 - return &__get_cpu_var(sched_clock_data); 137 + return this_cpu_ptr(&sched_clock_data); 138 138 } 139 139 140 140 static inline struct sched_clock_data *cpu_sdc(int cpu)
+2 -2
kernel/softirq.c
··· 485 485 local_irq_disable(); 486 486 list = __this_cpu_read(tasklet_vec.head); 487 487 __this_cpu_write(tasklet_vec.head, NULL); 488 - __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); 488 + __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); 489 489 local_irq_enable(); 490 490 491 491 while (list) { ··· 521 521 local_irq_disable(); 522 522 list = __this_cpu_read(tasklet_hi_vec.head); 523 523 __this_cpu_write(tasklet_hi_vec.head, NULL); 524 - __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); 524 + __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); 525 525 local_irq_enable(); 526 526 527 527 while (list) {
+3 -3
kernel/time/hrtimer.c
··· 1144 1144 1145 1145 memset(timer, 0, sizeof(struct hrtimer)); 1146 1146 1147 - cpu_base = &__raw_get_cpu_var(hrtimer_bases); 1147 + cpu_base = raw_cpu_ptr(&hrtimer_bases); 1148 1148 1149 1149 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) 1150 1150 clock_id = CLOCK_MONOTONIC; ··· 1187 1187 struct hrtimer_cpu_base *cpu_base; 1188 1188 int base = hrtimer_clockid_to_base(which_clock); 1189 1189 1190 - cpu_base = &__raw_get_cpu_var(hrtimer_bases); 1190 + cpu_base = raw_cpu_ptr(&hrtimer_bases); 1191 1191 *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution); 1192 1192 1193 1193 return 0; ··· 1376 1376 if (!hrtimer_hres_active()) 1377 1377 return; 1378 1378 1379 - td = &__get_cpu_var(tick_cpu_device); 1379 + td = this_cpu_ptr(&tick_cpu_device); 1380 1380 if (td && td->evtdev) 1381 1381 hrtimer_interrupt(td->evtdev); 1382 1382 }
+1 -1
kernel/time/tick-broadcast.c
··· 554 554 void tick_check_oneshot_broadcast_this_cpu(void) 555 555 { 556 556 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { 557 - struct tick_device *td = &__get_cpu_var(tick_cpu_device); 557 + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 558 558 559 559 /* 560 560 * We might be in the middle of switching over from
+3 -3
kernel/time/tick-common.c
··· 224 224 225 225 void tick_install_replacement(struct clock_event_device *newdev) 226 226 { 227 - struct tick_device *td = &__get_cpu_var(tick_cpu_device); 227 + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 228 228 int cpu = smp_processor_id(); 229 229 230 230 clockevents_exchange_device(td->evtdev, newdev); ··· 374 374 375 375 void tick_suspend(void) 376 376 { 377 - struct tick_device *td = &__get_cpu_var(tick_cpu_device); 377 + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 378 378 379 379 clockevents_shutdown(td->evtdev); 380 380 } 381 381 382 382 void tick_resume(void) 383 383 { 384 - struct tick_device *td = &__get_cpu_var(tick_cpu_device); 384 + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 385 385 int broadcast = tick_resume_broadcast(); 386 386 387 387 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
+1 -1
kernel/time/tick-oneshot.c
··· 59 59 */ 60 60 int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) 61 61 { 62 - struct tick_device *td = &__get_cpu_var(tick_cpu_device); 62 + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 63 63 struct clock_event_device *dev = td->evtdev; 64 64 65 65 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
+10 -10
kernel/time/tick-sched.c
··· 205 205 */ 206 206 void __tick_nohz_full_check(void) 207 207 { 208 - struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 208 + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 209 209 210 210 if (tick_nohz_full_cpu(smp_processor_id())) { 211 211 if (ts->tick_stopped && !is_idle_task(current)) { ··· 545 545 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; 546 546 ktime_t last_update, expires, ret = { .tv64 = 0 }; 547 547 unsigned long rcu_delta_jiffies; 548 - struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 548 + struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 549 549 u64 time_delta; 550 550 551 551 time_delta = timekeeping_max_deferment(); ··· 813 813 814 814 local_irq_disable(); 815 815 816 - ts = &__get_cpu_var(tick_cpu_sched); 816 + ts = this_cpu_ptr(&tick_cpu_sched); 817 817 ts->inidle = 1; 818 818 __tick_nohz_idle_enter(ts); 819 819 ··· 831 831 */ 832 832 void tick_nohz_irq_exit(void) 833 833 { 834 - struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 834 + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 835 835 836 836 if (ts->inidle) 837 837 __tick_nohz_idle_enter(ts); ··· 846 846 */ 847 847 ktime_t tick_nohz_get_sleep_length(void) 848 848 { 849 - struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 849 + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 850 850 851 851 return ts->sleep_length; 852 852 } ··· 959 959 */ 960 960 static void tick_nohz_handler(struct clock_event_device *dev) 961 961 { 962 - struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 962 + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 963 963 struct pt_regs *regs = get_irq_regs(); 964 964 ktime_t now = ktime_get(); 965 965 ··· 979 979 */ 980 980 static void tick_nohz_switch_to_nohz(void) 981 981 { 982 - struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 982 + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 983 983 ktime_t next; 984 984 985 985 if (!tick_nohz_enabled) ··· 1115 1115 */ 1116 1116 void tick_setup_sched_timer(void) 1117 1117 { 1118 - struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1118 + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1119 1119 ktime_t now = ktime_get(); 1120 1120 1121 1121 /* ··· 1184 1184 */ 1185 1185 void tick_oneshot_notify(void) 1186 1186 { 1187 - struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1187 + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1188 1188 1189 1189 set_bit(0, &ts->check_clocks); 1190 1190 } ··· 1199 1199 */ 1200 1200 int tick_check_oneshot_change(int allow_nohz) 1201 1201 { 1202 - struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1202 + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1203 1203 1204 1204 if (!test_and_clear_bit(0, &ts->check_clocks)) 1205 1205 return 0;
+1 -1
kernel/time/timer.c
··· 655 655 static void do_init_timer(struct timer_list *timer, unsigned int flags, 656 656 const char *name, struct lock_class_key *key) 657 657 { 658 - struct tvec_base *base = __raw_get_cpu_var(tvec_bases); 658 + struct tvec_base *base = raw_cpu_read(tvec_bases); 659 659 660 660 timer->entry.next = NULL; 661 661 timer->base = (void *)((unsigned long)base | flags);