Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kernel/smp.c: cleanups

We sometimes use "struct call_single_data *data" and sometimes "struct
call_single_data *csd". Use "csd" consistently.

We sometimes use "struct call_function_data *data" and sometimes "struct
call_function_data *cfd". Use "cfd" consistently.

Also, avoid some 80-col layout tricks.

Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Shaohua Li <shli@fusionio.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Andrew Morton and committed by
Linus Torvalds
e1d12f32 74e3d1e1

+46 -45
+46 -45
kernel/smp.c
··· 100 100 * previous function call. For multi-cpu calls its even more interesting 101 101 * as we'll have to ensure no other cpu is observing our csd. 102 102 */ 103 - static void csd_lock_wait(struct call_single_data *data) 103 + static void csd_lock_wait(struct call_single_data *csd) 104 104 { 105 - while (data->flags & CSD_FLAG_LOCK) 105 + while (csd->flags & CSD_FLAG_LOCK) 106 106 cpu_relax(); 107 107 } 108 108 109 - static void csd_lock(struct call_single_data *data) 109 + static void csd_lock(struct call_single_data *csd) 110 110 { 111 - csd_lock_wait(data); 112 - data->flags |= CSD_FLAG_LOCK; 111 + csd_lock_wait(csd); 112 + csd->flags |= CSD_FLAG_LOCK; 113 113 114 114 /* 115 115 * prevent CPU from reordering the above assignment ··· 119 119 smp_mb(); 120 120 } 121 121 122 - static void csd_unlock(struct call_single_data *data) 122 + static void csd_unlock(struct call_single_data *csd) 123 123 { 124 - WARN_ON(!(data->flags & CSD_FLAG_LOCK)); 124 + WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); 125 125 126 126 /* 127 127 * ensure we're all done before releasing data: 128 128 */ 129 129 smp_mb(); 130 130 131 - data->flags &= ~CSD_FLAG_LOCK; 131 + csd->flags &= ~CSD_FLAG_LOCK; 132 132 } 133 133 134 134 /* ··· 137 137 * ->func, ->info, and ->flags set. 138 138 */ 139 139 static 140 - void generic_exec_single(int cpu, struct call_single_data *data, int wait) 140 + void generic_exec_single(int cpu, struct call_single_data *csd, int wait) 141 141 { 142 142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); 143 143 unsigned long flags; ··· 145 145 146 146 raw_spin_lock_irqsave(&dst->lock, flags); 147 147 ipi = list_empty(&dst->list); 148 - list_add_tail(&data->list, &dst->list); 148 + list_add_tail(&csd->list, &dst->list); 149 149 raw_spin_unlock_irqrestore(&dst->lock, flags); 150 150 151 151 /* ··· 163 163 arch_send_call_function_single_ipi(cpu); 164 164 165 165 if (wait) 166 - csd_lock_wait(data); 166 + csd_lock_wait(csd); 167 167 } 168 168 169 169 /* ··· 173 173 void generic_smp_call_function_single_interrupt(void) 174 174 { 175 175 struct call_single_queue *q = &__get_cpu_var(call_single_queue); 176 - unsigned int data_flags; 177 176 LIST_HEAD(list); 178 177 179 178 /* ··· 185 186 raw_spin_unlock(&q->lock); 186 187 187 188 while (!list_empty(&list)) { 188 - struct call_single_data *data; 189 + struct call_single_data *csd; 190 + unsigned int csd_flags; 189 191 190 - data = list_entry(list.next, struct call_single_data, list); 191 - list_del(&data->list); 192 + csd = list_entry(list.next, struct call_single_data, list); 193 + list_del(&csd->list); 192 194 193 195 /* 194 - * 'data' can be invalid after this call if flags == 0 196 + * 'csd' can be invalid after this call if flags == 0 195 197 * (when called through generic_exec_single()), 196 198 * so save them away before making the call: 197 199 */ 198 - data_flags = data->flags; 200 + csd_flags = csd->flags; 199 201 200 - data->func(data->info); 202 + csd->func(csd->info); 201 203 202 204 /* 203 205 * Unlocked CSDs are valid through generic_exec_single(): 204 206 */ 205 - if (data_flags & CSD_FLAG_LOCK) 206 - csd_unlock(data); 207 + if (csd_flags & CSD_FLAG_LOCK) 208 + csd_unlock(csd); 207 209 } 208 210 } 209 211 ··· 249 249 local_irq_restore(flags); 250 250 } else { 251 251 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { 252 - struct call_single_data *data = &d; 252 + struct call_single_data *csd = &d; 253 253 254 254 if (!wait) 255 - data = &__get_cpu_var(csd_data); 255 + csd = &__get_cpu_var(csd_data); 256 256 257 - csd_lock(data); 257 + csd_lock(csd); 258 258 259 - data->func = func; 260 - data->info = info; 261 - generic_exec_single(cpu, data, wait); 259 + csd->func = func; 260 + csd->info = info; 261 + generic_exec_single(cpu, csd, wait); 262 262 } else { 263 263 err = -ENXIO; /* CPU not online */ 264 264 } ··· 325 325 * pre-allocated data structure. Useful for embedding @data inside 326 326 * other structures, for instance. 327 327 */ 328 - void __smp_call_function_single(int cpu, struct call_single_data *data, 328 + void __smp_call_function_single(int cpu, struct call_single_data *csd, 329 329 int wait) 330 330 { 331 331 unsigned int this_cpu; ··· 343 343 344 344 if (cpu == this_cpu) { 345 345 local_irq_save(flags); 346 - data->func(data->info); 346 + csd->func(csd->info); 347 347 local_irq_restore(flags); 348 348 } else { 349 - csd_lock(data); 350 - generic_exec_single(cpu, data, wait); 349 + csd_lock(csd); 350 + generic_exec_single(cpu, csd, wait); 351 351 } 352 352 put_cpu(); 353 353 } ··· 369 369 void smp_call_function_many(const struct cpumask *mask, 370 370 smp_call_func_t func, void *info, bool wait) 371 371 { 372 - struct call_function_data *data; 372 + struct call_function_data *cfd; 373 373 int cpu, next_cpu, this_cpu = smp_processor_id(); 374 374 375 375 /* ··· 401 401 return; 402 402 } 403 403 404 - data = &__get_cpu_var(cfd_data); 404 + cfd = &__get_cpu_var(cfd_data); 405 405 406 - cpumask_and(data->cpumask, mask, cpu_online_mask); 407 - cpumask_clear_cpu(this_cpu, data->cpumask); 406 + cpumask_and(cfd->cpumask, mask, cpu_online_mask); 407 + cpumask_clear_cpu(this_cpu, cfd->cpumask); 408 408 409 409 /* Some callers race with other cpus changing the passed mask */ 410 - if (unlikely(!cpumask_weight(data->cpumask))) 410 + if (unlikely(!cpumask_weight(cfd->cpumask))) 411 411 return; 412 412 413 413 /* 414 - * After we put an entry into the list, data->cpumask 415 - * may be cleared again when another CPU sends another IPI for 416 - * a SMP function call, so data->cpumask will be zero. 414 + * After we put an entry into the list, cfd->cpumask may be cleared 415 + * again when another CPU sends another IPI for a SMP function call, so 416 + * cfd->cpumask will be zero. 417 417 */ 418 - cpumask_copy(data->cpumask_ipi, data->cpumask); 418 + cpumask_copy(cfd->cpumask_ipi, cfd->cpumask); 419 419 420 - for_each_cpu(cpu, data->cpumask) { 421 - struct call_single_data *csd = per_cpu_ptr(data->csd, cpu); 420 + for_each_cpu(cpu, cfd->cpumask) { 421 + struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); 422 422 struct call_single_queue *dst = 423 423 &per_cpu(call_single_queue, cpu); 424 424 unsigned long flags; ··· 433 433 } 434 434 435 435 /* Send a message to all CPUs in the map */ 436 - arch_send_call_function_ipi_mask(data->cpumask_ipi); 436 + arch_send_call_function_ipi_mask(cfd->cpumask_ipi); 437 437 438 438 if (wait) { 439 - for_each_cpu(cpu, data->cpumask) { 440 - struct call_single_data *csd = 441 - per_cpu_ptr(data->csd, cpu); 439 + for_each_cpu(cpu, cfd->cpumask) { 440 + struct call_single_data *csd; 441 + 442 + csd = per_cpu_ptr(cfd->csd, cpu); 442 443 csd_lock_wait(csd); 443 444 } 444 445 }