Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] i386: Add smp_ops interface

Add a smp_ops interface. This abstracts the API defined by
<linux/smp.h> for use within arch/i386. The primary intent is that it
be used by a paravirtualizing hypervisor to implement SMP, but it
could also be used by non-APIC-using sub-architectures.

This is related to CONFIG_PARAVIRT, but is implemented unconditionally
since it is simpler that way and not a highly performance-sensitive
interface.

Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>

authored by

Jeremy Fitzhardinge and committed by
Andi Kleen
01a2f435 4fbb5968

+73 -9
+16 -5
arch/i386/kernel/smp.c
··· 483 483 * it goes straight through and wastes no time serializing 484 484 * anything. Worst case is that we lose a reschedule ... 485 485 */ 486 - void smp_send_reschedule(int cpu) 486 + void native_smp_send_reschedule(int cpu) 487 487 { 488 488 WARN_ON(cpu_is_offline(cpu)); 489 489 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); ··· 560 560 * You must not call this function with disabled interrupts or from a 561 561 * hardware interrupt handler or from a bottom half handler. 562 562 */ 563 - int smp_call_function_mask(cpumask_t mask, 564 - void (*func)(void *), void *info, 565 - int wait) 563 + int native_smp_call_function_mask(cpumask_t mask, 564 + void (*func)(void *), void *info, 565 + int wait) 566 566 { 567 567 struct call_data_struct data; 568 568 cpumask_t allbutself; ··· 681 681 * this function calls the 'stop' function on all other CPUs in the system. 682 682 */ 683 683 684 - void smp_send_stop(void) 684 + void native_smp_send_stop(void) 685 685 { 686 686 /* Don't deadlock on the call lock in panic */ 687 687 int nolock = !spin_trylock(&call_lock); ··· 757 757 758 758 return cpuid >= 0 ? cpuid : 0; 759 759 } 760 + 761 + struct smp_ops smp_ops = { 762 + .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, 763 + .smp_prepare_cpus = native_smp_prepare_cpus, 764 + .cpu_up = native_cpu_up, 765 + .smp_cpus_done = native_smp_cpus_done, 766 + 767 + .smp_send_stop = native_smp_send_stop, 768 + .smp_send_reschedule = native_smp_send_reschedule, 769 + .smp_call_function_mask = native_smp_call_function_mask, 770 + };
+4 -4
arch/i386/kernel/smpboot.c
··· 1171 1171 1172 1172 /* These are wrappers to interface to the new boot process. Someone 1173 1173 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ 1174 - void __init smp_prepare_cpus(unsigned int max_cpus) 1174 + void __init native_smp_prepare_cpus(unsigned int max_cpus) 1175 1175 { 1176 1176 smp_commenced_mask = cpumask_of_cpu(0); 1177 1177 cpu_callin_map = cpumask_of_cpu(0); ··· 1191 1191 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_PDA) : "memory"); 1192 1192 } 1193 1193 1194 - void __init smp_prepare_boot_cpu(void) 1194 + void __init native_smp_prepare_boot_cpu(void) 1195 1195 { 1196 1196 unsigned int cpu = smp_processor_id(); 1197 1197 ··· 1292 1292 } 1293 1293 #endif /* CONFIG_HOTPLUG_CPU */ 1294 1294 1295 - int __cpuinit __cpu_up(unsigned int cpu) 1295 + int __cpuinit native_cpu_up(unsigned int cpu) 1296 1296 { 1297 1297 unsigned long flags; 1298 1298 #ifdef CONFIG_HOTPLUG_CPU ··· 1337 1337 return 0; 1338 1338 } 1339 1339 1340 - void __init smp_cpus_done(unsigned int max_cpus) 1340 + void __init native_smp_cpus_done(unsigned int max_cpus) 1341 1341 { 1342 1342 #ifdef CONFIG_X86_IO_APIC 1343 1343 setup_ioapic_dest();
+53
include/asm-i386/smp.h
··· 49 49 extern void cpu_uninit(void); 50 50 #endif 51 51 52 + struct smp_ops 53 + { 54 + void (*smp_prepare_boot_cpu)(void); 55 + void (*smp_prepare_cpus)(unsigned max_cpus); 56 + int (*cpu_up)(unsigned cpu); 57 + void (*smp_cpus_done)(unsigned max_cpus); 58 + 59 + void (*smp_send_stop)(void); 60 + void (*smp_send_reschedule)(int cpu); 61 + int (*smp_call_function_mask)(cpumask_t mask, 62 + void (*func)(void *info), void *info, 63 + int wait); 64 + }; 65 + 66 + extern struct smp_ops smp_ops; 67 + 68 + static inline void smp_prepare_boot_cpu(void) 69 + { 70 + smp_ops.smp_prepare_boot_cpu(); 71 + } 72 + static inline void smp_prepare_cpus(unsigned int max_cpus) 73 + { 74 + smp_ops.smp_prepare_cpus(max_cpus); 75 + } 76 + static inline int __cpu_up(unsigned int cpu) 77 + { 78 + return smp_ops.cpu_up(cpu); 79 + } 80 + static inline void smp_cpus_done(unsigned int max_cpus) 81 + { 82 + smp_ops.smp_cpus_done(max_cpus); 83 + } 84 + 85 + static inline void smp_send_stop(void) 86 + { 87 + smp_ops.smp_send_stop(); 88 + } 89 + static inline void smp_send_reschedule(int cpu) 90 + { 91 + smp_ops.smp_send_reschedule(cpu); 92 + } 93 + static inline int smp_call_function_mask(cpumask_t mask, 94 + void (*func) (void *info), void *info, 95 + int wait) 96 + { 97 + return smp_ops.smp_call_function_mask(mask, func, info, wait); 98 + } 99 + 100 + void native_smp_prepare_boot_cpu(void); 101 + void native_smp_prepare_cpus(unsigned int max_cpus); 102 + int native_cpu_up(unsigned int cpunum); 103 + void native_smp_cpus_done(unsigned int max_cpus); 104 + 52 105 #ifndef CONFIG_PARAVIRT 53 106 #define startup_ipi_hook(phys_apicid, start_eip, start_esp) \ 54 107 do { } while (0)