at v2.6.39 4.9 kB view raw
1#ifndef __LINUX_SMP_H 2#define __LINUX_SMP_H 3 4/* 5 * Generic SMP support 6 * Alan Cox. <alan@redhat.com> 7 */ 8 9#include <linux/errno.h> 10#include <linux/types.h> 11#include <linux/list.h> 12#include <linux/cpumask.h> 13#include <linux/init.h> 14 15extern void cpu_idle(void); 16 17typedef void (*smp_call_func_t)(void *info); 18struct call_single_data { 19 struct list_head list; 20 smp_call_func_t func; 21 void *info; 22 u16 flags; 23 u16 priv; 24}; 25 26/* total number of cpus in this system (may exceed NR_CPUS) */ 27extern unsigned int total_cpus; 28 29int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, 30 int wait); 31 32#ifdef CONFIG_SMP 33 34#include <linux/preempt.h> 35#include <linux/kernel.h> 36#include <linux/compiler.h> 37#include <linux/thread_info.h> 38#include <asm/smp.h> 39 40/* 41 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. 42 * (defined in asm header): 43 */ 44 45/* 46 * stops all CPUs but the current one: 47 */ 48extern void smp_send_stop(void); 49 50/* 51 * sends a 'reschedule' event to another CPU: 52 */ 53extern void smp_send_reschedule(int cpu); 54 55 56/* 57 * Prepare machine for booting other CPUs. 58 */ 59extern void smp_prepare_cpus(unsigned int max_cpus); 60 61/* 62 * Bring a CPU up 63 */ 64extern int __cpu_up(unsigned int cpunum); 65 66/* 67 * Final polishing of CPUs 68 */ 69extern void smp_cpus_done(unsigned int max_cpus); 70 71/* 72 * Call a function on all other processors 73 */ 74int smp_call_function(smp_call_func_t func, void *info, int wait); 75void smp_call_function_many(const struct cpumask *mask, 76 smp_call_func_t func, void *info, bool wait); 77 78void __smp_call_function_single(int cpuid, struct call_single_data *data, 79 int wait); 80 81int smp_call_function_any(const struct cpumask *mask, 82 smp_call_func_t func, void *info, int wait); 83 84/* 85 * Generic and arch helpers 86 */ 87#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 88void generic_smp_call_function_single_interrupt(void); 89void generic_smp_call_function_interrupt(void); 90void ipi_call_lock(void); 91void ipi_call_unlock(void); 92void ipi_call_lock_irq(void); 93void ipi_call_unlock_irq(void); 94#endif 95 96/* 97 * Call a function on all processors 98 */ 99int on_each_cpu(smp_call_func_t func, void *info, int wait); 100 101#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ 102#define MSG_ALL 0x8001 103 104#define MSG_INVALIDATE_TLB 0x0001 /* Remote processor TLB invalidate */ 105#define MSG_STOP_CPU 0x0002 /* Sent to shut down slave CPU's 106 * when rebooting 107 */ 108#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/ 109#define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */ 110 111/* 112 * Mark the boot cpu "online" so that it can call console drivers in 113 * printk() and can access its per-cpu storage. 114 */ 115void smp_prepare_boot_cpu(void); 116 117extern unsigned int setup_max_cpus; 118extern void __init setup_nr_cpu_ids(void); 119extern void __init smp_init(void); 120 121#else /* !SMP */ 122 123static inline void smp_send_stop(void) { } 124 125/* 126 * These macros fold the SMP functionality into a single CPU system 127 */ 128#define raw_smp_processor_id() 0 129static inline int up_smp_call_function(smp_call_func_t func, void *info) 130{ 131 return 0; 132} 133#define smp_call_function(func, info, wait) \ 134 (up_smp_call_function(func, info)) 135#define on_each_cpu(func,info,wait) \ 136 ({ \ 137 local_irq_disable(); \ 138 func(info); \ 139 local_irq_enable(); \ 140 0; \ 141 }) 142static inline void smp_send_reschedule(int cpu) { } 143#define num_booting_cpus() 1 144#define smp_prepare_boot_cpu() do {} while (0) 145#define smp_call_function_many(mask, func, info, wait) \ 146 (up_smp_call_function(func, info)) 147static inline void init_call_single_data(void) { } 148 149static inline int 150smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, 151 void *info, int wait) 152{ 153 return smp_call_function_single(0, func, info, wait); 154} 155 156#endif /* !SMP */ 157 158/* 159 * smp_processor_id(): get the current CPU ID. 160 * 161 * if DEBUG_PREEMPT is enabled then we check whether it is 162 * used in a preemption-safe way. (smp_processor_id() is safe 163 * if it's used in a preemption-off critical section, or in 164 * a thread that is bound to the current CPU.) 165 * 166 * NOTE: raw_smp_processor_id() is for internal use only 167 * (smp_processor_id() is the preferred variant), but in rare 168 * instances it might also be used to turn off false positives 169 * (i.e. smp_processor_id() use that the debugging code reports but 170 * which use for some reason is legal). Don't use this to hack around 171 * the warning message, as your code might not work under PREEMPT. 172 */ 173#ifdef CONFIG_DEBUG_PREEMPT 174 extern unsigned int debug_smp_processor_id(void); 175# define smp_processor_id() debug_smp_processor_id() 176#else 177# define smp_processor_id() raw_smp_processor_id() 178#endif 179 180#define get_cpu() ({ preempt_disable(); smp_processor_id(); }) 181#define put_cpu() preempt_enable() 182 183/* 184 * Callback to arch code if there's nosmp or maxcpus=0 on the 185 * boot command line: 186 */ 187extern void arch_disable_smp_support(void); 188 189void smp_setup_processor_id(void); 190 191#endif /* __LINUX_SMP_H */