Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/apic: Wrap IPI calls into helper functions

Move them to one place so the static call conversion gets simpler.

No functional change.

[ dhansen: merge against recent x86/apic changes ]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Sohil Mehta <sohil.mehta@intel.com>
Tested-by: Juergen Gross <jgross@suse.com> # Xen PV (dom0 and unpriv. guest)

+51 -20
+1 -1
arch/x86/hyperv/hv_spinlock.c
··· 20 20 21 21 static void hv_qlock_kick(int cpu) 22 22 { 23 - apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR); 23 + __apic_send_IPI(cpu, X86_PLATFORM_IPI_VECTOR); 24 24 } 25 25 26 26 static void hv_qlock_wait(u8 *byte, u8 val)
+30
arch/x86/include/asm/apic.h
··· 401 401 apic->icr_write(low, high); 402 402 } 403 403 404 + static __always_inline void __apic_send_IPI(int cpu, int vector) 405 + { 406 + apic->send_IPI(cpu, vector); 407 + } 408 + 409 + static __always_inline void __apic_send_IPI_mask(const struct cpumask *mask, int vector) 410 + { 411 + apic->send_IPI_mask(mask, vector); 412 + } 413 + 414 + static __always_inline void __apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 415 + { 416 + apic->send_IPI_mask_allbutself(mask, vector); 417 + } 418 + 419 + static __always_inline void __apic_send_IPI_allbutself(int vector) 420 + { 421 + apic->send_IPI_allbutself(vector); 422 + } 423 + 424 + static __always_inline void __apic_send_IPI_all(int vector) 425 + { 426 + apic->send_IPI_all(vector); 427 + } 428 + 429 + static __always_inline void __apic_send_IPI_self(int vector) 430 + { 431 + apic->send_IPI_self(vector); 432 + } 433 + 404 434 static __always_inline void apic_wait_icr_idle(void) 405 435 { 406 436 if (apic->wait_icr_idle)
+1 -1
arch/x86/kernel/apic/apic.c
··· 502 502 static void lapic_timer_broadcast(const struct cpumask *mask) 503 503 { 504 504 #ifdef CONFIG_SMP 505 - apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 505 + __apic_send_IPI_mask(mask, LOCAL_TIMER_VECTOR); 506 506 #endif 507 507 } 508 508
+3 -1
arch/x86/kernel/apic/hw_nmi.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/delay.h> 23 23 24 + #include "local.h" 25 + 24 26 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF 25 27 u64 hw_nmi_get_sample_period(int watchdog_thresh) 26 28 { ··· 33 31 #ifdef arch_trigger_cpumask_backtrace 34 32 static void nmi_raise_cpu_backtrace(cpumask_t *mask) 35 33 { 36 - apic->send_IPI_mask(mask, NMI_VECTOR); 34 + __apic_send_IPI_mask(mask, NMI_VECTOR); 37 35 } 38 36 39 37 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+8 -8
arch/x86/kernel/apic/ipi.c
··· 54 54 return; 55 55 56 56 if (static_branch_likely(&apic_use_ipi_shorthand)) 57 - apic->send_IPI_allbutself(vector); 57 + __apic_send_IPI_allbutself(vector); 58 58 else 59 - apic->send_IPI_mask_allbutself(cpu_online_mask, vector); 59 + __apic_send_IPI_mask_allbutself(cpu_online_mask, vector); 60 60 } 61 61 62 62 /* ··· 70 70 WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); 71 71 return; 72 72 } 73 - apic->send_IPI(cpu, RESCHEDULE_VECTOR); 73 + __apic_send_IPI(cpu, RESCHEDULE_VECTOR); 74 74 } 75 75 76 76 void native_send_call_func_single_ipi(int cpu) 77 77 { 78 - apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); 78 + __apic_send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); 79 79 } 80 80 81 81 void native_send_call_func_ipi(const struct cpumask *mask) ··· 87 87 goto sendmask; 88 88 89 89 if (cpumask_test_cpu(cpu, mask)) 90 - apic->send_IPI_all(CALL_FUNCTION_VECTOR); 90 + __apic_send_IPI_all(CALL_FUNCTION_VECTOR); 91 91 else if (num_online_cpus() > 1) 92 - apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); 92 + __apic_send_IPI_allbutself(CALL_FUNCTION_VECTOR); 93 93 return; 94 94 } 95 95 96 96 sendmask: 97 - apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 97 + __apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 98 98 } 99 99 100 100 #endif /* CONFIG_SMP */ ··· 221 221 */ 222 222 void default_send_IPI_single(int cpu, int vector) 223 223 { 224 - apic->send_IPI_mask(cpumask_of(cpu), vector); 224 + __apic_send_IPI_mask(cpumask_of(cpu), vector); 225 225 } 226 226 227 227 void default_send_IPI_allbutself(int vector)
+1 -1
arch/x86/kernel/apic/vector.c
··· 898 898 unsigned long flags; 899 899 900 900 raw_spin_lock_irqsave(&vector_lock, flags); 901 - apic->send_IPI(apicd->cpu, apicd->vector); 901 + __apic_send_IPI(apicd->cpu, apicd->vector); 902 902 raw_spin_unlock_irqrestore(&vector_lock, flags); 903 903 904 904 return 1;
+1 -2
arch/x86/kernel/cpu/mce/inject.c
··· 270 270 mce_irq_ipi, NULL, 0); 271 271 preempt_enable(); 272 272 } else if (m->inject_flags & MCJ_NMI_BROADCAST) 273 - apic->send_IPI_mask(mce_inject_cpumask, 274 - NMI_VECTOR); 273 + __apic_send_IPI_mask(mce_inject_cpumask, NMI_VECTOR); 275 274 } 276 275 start = jiffies; 277 276 while (!cpumask_empty(mce_inject_cpumask)) {
+1 -1
arch/x86/kernel/irq_work.c
··· 28 28 if (!arch_irq_work_has_interrupt()) 29 29 return; 30 30 31 - apic->send_IPI_self(IRQ_WORK_VECTOR); 31 + __apic_send_IPI_self(IRQ_WORK_VECTOR); 32 32 apic_wait_icr_idle(); 33 33 } 34 34 #endif
+1 -1
arch/x86/kernel/nmi_selftest.c
··· 75 75 /* sync above data before sending NMI */ 76 76 wmb(); 77 77 78 - apic->send_IPI_mask(mask, NMI_VECTOR); 78 + __apic_send_IPI_mask(mask, NMI_VECTOR); 79 79 80 80 /* Don't wait longer than a second */ 81 81 timeout = USEC_PER_SEC;
+1 -1
arch/x86/kernel/smp.c
··· 237 237 pr_emerg("Shutting down cpus with NMI\n"); 238 238 239 239 for_each_cpu(cpu, &cpus_stop_mask) 240 - apic->send_IPI(cpu, NMI_VECTOR); 240 + __apic_send_IPI(cpu, NMI_VECTOR); 241 241 } 242 242 /* 243 243 * Don't wait longer than 10 ms if the caller didn't
+1 -1
arch/x86/kvm/vmx/posted_intr.c
··· 175 175 * scheduled out). 176 176 */ 177 177 if (pi_test_on(&new)) 178 - apic->send_IPI_self(POSTED_INTR_WAKEUP_VECTOR); 178 + __apic_send_IPI_self(POSTED_INTR_WAKEUP_VECTOR); 179 179 180 180 local_irq_restore(flags); 181 181 }
+1 -1
arch/x86/kvm/vmx/vmx.c
··· 4179 4179 */ 4180 4180 4181 4181 if (vcpu != kvm_get_running_vcpu()) 4182 - apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); 4182 + __apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); 4183 4183 return; 4184 4184 } 4185 4185 #endif
+1 -1
arch/x86/platform/uv/uv_nmi.c
··· 601 601 for_each_cpu(cpu, uv_nmi_cpu_mask) 602 602 uv_cpu_nmi_per(cpu).pinging = 1; 603 603 604 - apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); 604 + __apic_send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); 605 605 } 606 606 607 607 /* Clean up flags for CPU's that ignored both NMI and ping */