at v5.8-rc2 6.6 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_SMP_H 3#define __LINUX_SMP_H 4 5/* 6 * Generic SMP support 7 * Alan Cox. <alan@redhat.com> 8 */ 9 10#include <linux/errno.h> 11#include <linux/types.h> 12#include <linux/list.h> 13#include <linux/cpumask.h> 14#include <linux/init.h> 15#include <linux/llist.h> 16 17typedef void (*smp_call_func_t)(void *info); 18typedef bool (*smp_cond_func_t)(int cpu, void *info); 19 20enum { 21 CSD_FLAG_LOCK = 0x01, 22 23 /* IRQ_WORK_flags */ 24 25 CSD_TYPE_ASYNC = 0x00, 26 CSD_TYPE_SYNC = 0x10, 27 CSD_TYPE_IRQ_WORK = 0x20, 28 CSD_TYPE_TTWU = 0x30, 29 CSD_FLAG_TYPE_MASK = 0xF0, 30}; 31 32/* 33 * structure shares (partial) layout with struct irq_work 34 */ 35struct __call_single_data { 36 struct llist_node llist; 37 unsigned int flags; 38 smp_call_func_t func; 39 void *info; 40}; 41 42/* Use __aligned() to avoid to use 2 cache lines for 1 csd */ 43typedef struct __call_single_data call_single_data_t 44 __aligned(sizeof(struct __call_single_data)); 45 46/* 47 * Enqueue a llist_node on the call_single_queue; be very careful, read 48 * flush_smp_call_function_queue() in detail. 49 */ 50extern void __smp_call_single_queue(int cpu, struct llist_node *node); 51 52/* total number of cpus in this system (may exceed NR_CPUS) */ 53extern unsigned int total_cpus; 54 55int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, 56 int wait); 57 58/* 59 * Call a function on all processors 60 */ 61void on_each_cpu(smp_call_func_t func, void *info, int wait); 62 63/* 64 * Call a function on processors specified by mask, which might include 65 * the local one. 66 */ 67void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, 68 void *info, bool wait); 69 70/* 71 * Call a function on each processor for which the supplied function 72 * cond_func returns a positive value. This may include the local 73 * processor. 74 */ 75void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, 76 void *info, bool wait); 77 78void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, 79 void *info, bool wait, const struct cpumask *mask); 80 81int smp_call_function_single_async(int cpu, call_single_data_t *csd); 82 83#ifdef CONFIG_SMP 84 85#include <linux/preempt.h> 86#include <linux/kernel.h> 87#include <linux/compiler.h> 88#include <linux/thread_info.h> 89#include <asm/smp.h> 90 91/* 92 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. 93 * (defined in asm header): 94 */ 95 96/* 97 * stops all CPUs but the current one: 98 */ 99extern void smp_send_stop(void); 100 101/* 102 * sends a 'reschedule' event to another CPU: 103 */ 104extern void smp_send_reschedule(int cpu); 105 106 107/* 108 * Prepare machine for booting other CPUs. 109 */ 110extern void smp_prepare_cpus(unsigned int max_cpus); 111 112/* 113 * Bring a CPU up 114 */ 115extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); 116 117/* 118 * Final polishing of CPUs 119 */ 120extern void smp_cpus_done(unsigned int max_cpus); 121 122/* 123 * Call a function on all other processors 124 */ 125void smp_call_function(smp_call_func_t func, void *info, int wait); 126void smp_call_function_many(const struct cpumask *mask, 127 smp_call_func_t func, void *info, bool wait); 128 129int smp_call_function_any(const struct cpumask *mask, 130 smp_call_func_t func, void *info, int wait); 131 132void kick_all_cpus_sync(void); 133void wake_up_all_idle_cpus(void); 134 135/* 136 * Generic and arch helpers 137 */ 138void __init call_function_init(void); 139void generic_smp_call_function_single_interrupt(void); 140#define generic_smp_call_function_interrupt \ 141 generic_smp_call_function_single_interrupt 142 143/* 144 * Mark the boot cpu "online" so that it can call console drivers in 145 * printk() and can access its per-cpu storage. 146 */ 147void smp_prepare_boot_cpu(void); 148 149extern unsigned int setup_max_cpus; 150extern void __init setup_nr_cpu_ids(void); 151extern void __init smp_init(void); 152 153extern int __boot_cpu_id; 154 155static inline int get_boot_cpu_id(void) 156{ 157 return __boot_cpu_id; 158} 159 160#else /* !SMP */ 161 162static inline void smp_send_stop(void) { } 163 164/* 165 * These macros fold the SMP functionality into a single CPU system 166 */ 167#define raw_smp_processor_id() 0 168static inline void up_smp_call_function(smp_call_func_t func, void *info) 169{ 170} 171#define smp_call_function(func, info, wait) \ 172 (up_smp_call_function(func, info)) 173 174static inline void smp_send_reschedule(int cpu) { } 175#define smp_prepare_boot_cpu() do {} while (0) 176#define smp_call_function_many(mask, func, info, wait) \ 177 (up_smp_call_function(func, info)) 178static inline void call_function_init(void) { } 179 180static inline int 181smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, 182 void *info, int wait) 183{ 184 return smp_call_function_single(0, func, info, wait); 185} 186 187static inline void kick_all_cpus_sync(void) { } 188static inline void wake_up_all_idle_cpus(void) { } 189 190#ifdef CONFIG_UP_LATE_INIT 191extern void __init up_late_init(void); 192static inline void smp_init(void) { up_late_init(); } 193#else 194static inline void smp_init(void) { } 195#endif 196 197static inline int get_boot_cpu_id(void) 198{ 199 return 0; 200} 201 202#endif /* !SMP */ 203 204/** 205 * raw_processor_id() - get the current (unstable) CPU id 206 * 207 * For then you know what you are doing and need an unstable 208 * CPU id. 209 */ 210 211/** 212 * smp_processor_id() - get the current (stable) CPU id 213 * 214 * This is the normal accessor to the CPU id and should be used 215 * whenever possible. 216 * 217 * The CPU id is stable when: 218 * 219 * - IRQs are disabled; 220 * - preemption is disabled; 221 * - the task is CPU affine. 222 * 223 * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN 224 * when smp_processor_id() is used when the CPU id is not stable. 225 */ 226 227/* 228 * Allow the architecture to differentiate between a stable and unstable read. 229 * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a 230 * regular asm read for the stable. 231 */ 232#ifndef __smp_processor_id 233#define __smp_processor_id(x) raw_smp_processor_id(x) 234#endif 235 236#ifdef CONFIG_DEBUG_PREEMPT 237 extern unsigned int debug_smp_processor_id(void); 238# define smp_processor_id() debug_smp_processor_id() 239#else 240# define smp_processor_id() __smp_processor_id() 241#endif 242 243#define get_cpu() ({ preempt_disable(); __smp_processor_id(); }) 244#define put_cpu() preempt_enable() 245 246/* 247 * Callback to arch code if there's nosmp or maxcpus=0 on the 248 * boot command line: 249 */ 250extern void arch_disable_smp_support(void); 251 252extern void arch_thaw_secondary_cpus_begin(void); 253extern void arch_thaw_secondary_cpus_end(void); 254 255void smp_setup_processor_id(void); 256 257int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, 258 bool phys); 259 260/* SMP core functions */ 261int smpcfd_prepare_cpu(unsigned int cpu); 262int smpcfd_dead_cpu(unsigned int cpu); 263int smpcfd_dying_cpu(unsigned int cpu); 264 265#endif /* __LINUX_SMP_H */