Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

smp: Change function signatures to use call_single_data_t

call_single_data_t is a size-aligned typedef of struct __call_single_data.

This alignment is desirable in order to have smp_call_function*() avoid
bouncing an extra cacheline in case of an unaligned csd, given this
would hurt performance.

Since the removal of struct request->csd in commit 660e802c76c8
("blk-mq: use percpu csd to remote complete instead of per-rq csd") there
are no current users of smp_call_function*() with unaligned csd.

Change every 'struct __call_single_data' function parameter to
'call_single_data_t', so we have warnings if any new code tries to
introduce an smp_call_function*() call with unaligned csd.

Signed-off-by: Leonardo Bras <leobras@redhat.com>
Reviewed-by: Guo Ren <guoren@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230831063129.335425-1-leobras@redhat.com

authored by

Leonardo Bras and committed by
Ingo Molnar
d090ec0d e0a99a83

+19 -19
+1 -1
include/linux/smp.h
··· 53 53 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, 54 54 void *info, bool wait, const struct cpumask *mask); 55 55 56 - int smp_call_function_single_async(int cpu, struct __call_single_data *csd); 56 + int smp_call_function_single_async(int cpu, call_single_data_t *csd); 57 57 58 58 /* 59 59 * Cpus stopping functions in panic. All have default weak definitions.
+4 -4
include/trace/events/csd.h
··· 12 12 TP_PROTO(const unsigned int cpu, 13 13 unsigned long callsite, 14 14 smp_call_func_t func, 15 - struct __call_single_data *csd), 15 + call_single_data_t *csd), 16 16 17 17 TP_ARGS(cpu, callsite, func, csd), 18 18 ··· 39 39 */ 40 40 DECLARE_EVENT_CLASS(csd_function, 41 41 42 - TP_PROTO(smp_call_func_t func, struct __call_single_data *csd), 42 + TP_PROTO(smp_call_func_t func, call_single_data_t *csd), 43 43 44 44 TP_ARGS(func, csd), 45 45 ··· 57 57 ); 58 58 59 59 DEFINE_EVENT(csd_function, csd_function_entry, 60 - TP_PROTO(smp_call_func_t func, struct __call_single_data *csd), 60 + TP_PROTO(smp_call_func_t func, call_single_data_t *csd), 61 61 TP_ARGS(func, csd) 62 62 ); 63 63 64 64 DEFINE_EVENT(csd_function, csd_function_exit, 65 - TP_PROTO(smp_call_func_t func, struct __call_single_data *csd), 65 + TP_PROTO(smp_call_func_t func, call_single_data_t *csd), 66 66 TP_ARGS(func, csd) 67 67 ); 68 68
+13 -13
kernel/smp.c
··· 125 125 } 126 126 127 127 static __always_inline void 128 - csd_do_func(smp_call_func_t func, void *info, struct __call_single_data *csd) 128 + csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd) 129 129 { 130 130 trace_csd_function_entry(func, csd); 131 131 func(info); ··· 172 172 static atomic_t csd_bug_count = ATOMIC_INIT(0); 173 173 174 174 /* Record current CSD work for current CPU, NULL to erase. */ 175 - static void __csd_lock_record(struct __call_single_data *csd) 175 + static void __csd_lock_record(call_single_data_t *csd) 176 176 { 177 177 if (!csd) { 178 178 smp_mb(); /* NULL cur_csd after unlock. */ ··· 187 187 /* Or before unlock, as the case may be. */ 188 188 } 189 189 190 - static __always_inline void csd_lock_record(struct __call_single_data *csd) 190 + static __always_inline void csd_lock_record(call_single_data_t *csd) 191 191 { 192 192 if (static_branch_unlikely(&csdlock_debug_enabled)) 193 193 __csd_lock_record(csd); 194 194 } 195 195 196 - static int csd_lock_wait_getcpu(struct __call_single_data *csd) 196 + static int csd_lock_wait_getcpu(call_single_data_t *csd) 197 197 { 198 198 unsigned int csd_type; 199 199 ··· 208 208 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, 209 209 * so waiting on other types gets much less information. 210 210 */ 211 - static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id) 211 + static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id) 212 212 { 213 213 int cpu = -1; 214 214 int cpux; ··· 272 272 * previous function call. For multi-cpu calls its even more interesting 273 273 * as we'll have to ensure no other cpu is observing our csd. 274 274 */ 275 - static void __csd_lock_wait(struct __call_single_data *csd) 275 + static void __csd_lock_wait(call_single_data_t *csd) 276 276 { 277 277 int bug_id = 0; 278 278 u64 ts0, ts1; ··· 286 286 smp_acquire__after_ctrl_dep(); 287 287 } 288 288 289 - static __always_inline void csd_lock_wait(struct __call_single_data *csd) 289 + static __always_inline void csd_lock_wait(call_single_data_t *csd) 290 290 { 291 291 if (static_branch_unlikely(&csdlock_debug_enabled)) { 292 292 __csd_lock_wait(csd); ··· 296 296 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); 297 297 } 298 298 #else 299 - static void csd_lock_record(struct __call_single_data *csd) 299 + static void csd_lock_record(call_single_data_t *csd) 300 300 { 301 301 } 302 302 303 - static __always_inline void csd_lock_wait(struct __call_single_data *csd) 303 + static __always_inline void csd_lock_wait(call_single_data_t *csd) 304 304 { 305 305 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); 306 306 } 307 307 #endif 308 308 309 - static __always_inline void csd_lock(struct __call_single_data *csd) 309 + static __always_inline void csd_lock(call_single_data_t *csd) 310 310 { 311 311 csd_lock_wait(csd); 312 312 csd->node.u_flags |= CSD_FLAG_LOCK; ··· 319 319 smp_wmb(); 320 320 } 321 321 322 - static __always_inline void csd_unlock(struct __call_single_data *csd) 322 + static __always_inline void csd_unlock(call_single_data_t *csd) 323 323 { 324 324 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); 325 325 ··· 372 372 * for execution on the given CPU. data must already have 373 373 * ->func, ->info, and ->flags set. 374 374 */ 375 - static int generic_exec_single(int cpu, struct __call_single_data *csd) 375 + static int generic_exec_single(int cpu, call_single_data_t *csd) 376 376 { 377 377 if (cpu == smp_processor_id()) { 378 378 smp_call_func_t func = csd->func; ··· 658 658 * 659 659 * Return: %0 on success or negative errno value on error 660 660 */ 661 - int smp_call_function_single_async(int cpu, struct __call_single_data *csd) 661 + int smp_call_function_single_async(int cpu, call_single_data_t *csd) 662 662 { 663 663 int err = 0; 664 664
+1 -1
kernel/up.c
··· 25 25 } 26 26 EXPORT_SYMBOL(smp_call_function_single); 27 27 28 - int smp_call_function_single_async(int cpu, struct __call_single_data *csd) 28 + int smp_call_function_single_async(int cpu, call_single_data_t *csd) 29 29 { 30 30 unsigned long flags; 31 31