Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

smp_call_function: get rid of the unused nonatomic/retry argument

It's never used and the comments refer to nonatomic and retry
interchangably. So get rid of it.

Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

+95 -108
+1 -1
arch/alpha/kernel/core_marvel.c
··· 662 662 if (smp_processor_id() != boot_cpuid) 663 663 smp_call_function_single(boot_cpuid, 664 664 __marvel_access_rtc, 665 - &rtc_access, 1, 1); 665 + &rtc_access, 1); 666 666 else 667 667 __marvel_access_rtc(&rtc_access); 668 668 #else
+3 -3
arch/alpha/kernel/smp.c
··· 710 710 } 711 711 } 712 712 713 - if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { 713 + if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) { 714 714 printk(KERN_CRIT "flush_tlb_mm: timed out\n"); 715 715 } 716 716 ··· 763 763 data.mm = mm; 764 764 data.addr = addr; 765 765 766 - if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { 766 + if (smp_call_function(ipi_flush_tlb_page, &data, 1)) { 767 767 printk(KERN_CRIT "flush_tlb_page: timed out\n"); 768 768 } 769 769 ··· 815 815 } 816 816 } 817 817 818 - if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { 818 + if (smp_call_function(ipi_flush_icache_page, mm, 1)) { 819 819 printk(KERN_CRIT "flush_icache_page: timed out\n"); 820 820 } 821 821
+3 -3
arch/alpha/oprofile/common.c
··· 65 65 model->reg_setup(&reg, ctr, &sys); 66 66 67 67 /* Configure the registers on all cpus. */ 68 - (void)smp_call_function(model->cpu_setup, &reg, 0, 1); 68 + (void)smp_call_function(model->cpu_setup, &reg, 1); 69 69 model->cpu_setup(&reg); 70 70 return 0; 71 71 } ··· 86 86 static int 87 87 op_axp_start(void) 88 88 { 89 - (void)smp_call_function(op_axp_cpu_start, NULL, 0, 1); 89 + (void)smp_call_function(op_axp_cpu_start, NULL, 1); 90 90 op_axp_cpu_start(NULL); 91 91 return 0; 92 92 } ··· 101 101 static void 102 102 op_axp_stop(void) 103 103 { 104 - (void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1); 104 + (void)smp_call_function(op_axp_cpu_stop, NULL, 1); 105 105 op_axp_cpu_stop(NULL); 106 106 } 107 107
+1 -1
arch/arm/oprofile/op_model_mpcore.c
··· 201 201 data.ret = 0; 202 202 203 203 preempt_disable(); 204 - smp_call_function(em_func, &data, 1, 1); 204 + smp_call_function(em_func, &data, 1); 205 205 em_func(&data); 206 206 preempt_enable(); 207 207
+1 -1
arch/arm/vfp/vfpmodule.c
··· 352 352 else if (vfpsid & FPSID_NODOUBLE) { 353 353 printk("no double precision support\n"); 354 354 } else { 355 - smp_call_function(vfp_enable, NULL, 1, 1); 355 + smp_call_function(vfp_enable, NULL, 1); 356 356 357 357 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ 358 358 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
+2 -3
arch/cris/arch-v32/kernel/smp.c
··· 194 194 /* Other calls */ 195 195 void smp_send_stop(void) 196 196 { 197 - smp_call_function(stop_this_cpu, NULL, 1, 0); 197 + smp_call_function(stop_this_cpu, NULL, 0); 198 198 } 199 199 200 200 int setup_profiling_timer(unsigned int multiplier) ··· 316 316 * You must not call this function with disabled interrupts or from a 317 317 * hardware interrupt handler or from a bottom half handler. 318 318 */ 319 - int smp_call_function(void (*func)(void *info), void *info, 320 - int nonatomic, int wait) 319 + int smp_call_function(void (*func)(void *info), void *info, int wait) 321 320 { 322 321 cpumask_t cpu_mask = CPU_MASK_ALL; 323 322 struct call_data_struct data;
+1 -1
arch/ia64/kernel/mca.c
··· 1881 1881 case CPU_ONLINE: 1882 1882 case CPU_ONLINE_FROZEN: 1883 1883 smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, 1884 - NULL, 1, 0); 1884 + NULL, 0); 1885 1885 break; 1886 1886 } 1887 1887 return NOTIFY_OK;
+1 -1
arch/ia64/kernel/palinfo.c
··· 921 921 922 922 923 923 /* will send IPI to other CPU and wait for completion of remote call */ 924 - if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) { 924 + if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) { 925 925 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " 926 926 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); 927 927 return 0;
+1 -1
arch/ia64/kernel/perfmon.c
··· 1820 1820 int ret; 1821 1821 1822 1822 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); 1823 - ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); 1823 + ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); 1824 1824 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); 1825 1825 } 1826 1826 #endif /* CONFIG_SMP */
+1 -1
arch/ia64/kernel/process.c
··· 286 286 { 287 287 smp_mb(); 288 288 /* kick all the CPUs so that they exit out of pm_idle */ 289 - smp_call_function(do_nothing, NULL, 0, 1); 289 + smp_call_function(do_nothing, NULL, 1); 290 290 } 291 291 EXPORT_SYMBOL_GPL(cpu_idle_wait); 292 292
+1 -1
arch/ia64/kernel/smpboot.c
··· 317 317 318 318 go[MASTER] = 1; 319 319 320 - if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { 320 + if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { 321 321 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); 322 322 return; 323 323 }
+2 -3
arch/ia64/kernel/uncached.c
··· 123 123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 124 124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { 125 125 atomic_set(&uc_pool->status, 0); 126 - status = smp_call_function(uncached_ipi_visibility, uc_pool, 127 - 0, 1); 126 + status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); 128 127 if (status || atomic_read(&uc_pool->status)) 129 128 goto failed; 130 129 } else if (status != PAL_VISIBILITY_OK) ··· 145 146 if (status != PAL_STATUS_SUCCESS) 146 147 goto failed; 147 148 atomic_set(&uc_pool->status, 0); 148 - status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); 149 + status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); 149 150 if (status || atomic_read(&uc_pool->status)) 150 151 goto failed; 151 152
+1 -1
arch/ia64/sn/kernel/sn2/sn_hwperf.c
··· 629 629 if (use_ipi) { 630 630 /* use an interprocessor interrupt to call SAL */ 631 631 smp_call_function_single(cpu, sn_hwperf_call_sal, 632 - op_info, 1, 1); 632 + op_info, 1); 633 633 } 634 634 else { 635 635 /* migrate the task before calling SAL */
+2 -2
arch/m32r/kernel/smp.c
··· 212 212 local_irq_save(flags); 213 213 __flush_tlb_all(); 214 214 local_irq_restore(flags); 215 - smp_call_function(flush_tlb_all_ipi, NULL, 1, 1); 215 + smp_call_function(flush_tlb_all_ipi, NULL, 1); 216 216 preempt_enable(); 217 217 } 218 218 ··· 505 505 *==========================================================================*/ 506 506 void smp_send_stop(void) 507 507 { 508 - smp_call_function(stop_this_cpu, NULL, 1, 0); 508 + smp_call_function(stop_this_cpu, NULL, 0); 509 509 } 510 510 511 511 /*==========================================================================*
+2 -2
arch/mips/kernel/smp.c
··· 167 167 168 168 void smp_send_stop(void) 169 169 { 170 - smp_call_function(stop_this_cpu, NULL, 1, 0); 170 + smp_call_function(stop_this_cpu, NULL, 0); 171 171 } 172 172 173 173 void __init smp_cpus_done(unsigned int max_cpus) ··· 266 266 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) 267 267 { 268 268 #ifndef CONFIG_MIPS_MT_SMTC 269 - smp_call_function(func, info, 1, 1); 269 + smp_call_function(func, info, 1); 270 270 #endif 271 271 } 272 272
+9 -9
arch/mips/mm/c-r4k.c
··· 43 43 * primary cache. 44 44 */ 45 45 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, 46 - int retry, int wait) 46 + int wait) 47 47 { 48 48 preempt_disable(); 49 49 50 50 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 51 - smp_call_function(func, info, retry, wait); 51 + smp_call_function(func, info, wait); 52 52 #endif 53 53 func(info); 54 54 preempt_enable(); ··· 350 350 351 351 static void r4k___flush_cache_all(void) 352 352 { 353 - r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 353 + r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1); 354 354 } 355 355 356 356 static inline int has_valid_asid(const struct mm_struct *mm) ··· 397 397 int exec = vma->vm_flags & VM_EXEC; 398 398 399 399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) 400 - r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 400 + r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1); 401 401 } 402 402 403 403 static inline void local_r4k_flush_cache_mm(void * args) ··· 429 429 if (!cpu_has_dc_aliases) 430 430 return; 431 431 432 - r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 432 + r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1); 433 433 } 434 434 435 435 struct flush_cache_page_args { ··· 521 521 args.addr = addr; 522 522 args.pfn = pfn; 523 523 524 - r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 524 + r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1); 525 525 } 526 526 527 527 static inline void local_r4k_flush_data_cache_page(void * addr) ··· 535 535 local_r4k_flush_data_cache_page((void *)addr); 536 536 else 537 537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 538 - 1, 1); 538 + 1); 539 539 } 540 540 541 541 struct flush_icache_range_args { ··· 571 571 args.start = start; 572 572 args.end = end; 573 573 574 - r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 574 + r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1); 575 575 instruction_hazard(); 576 576 } 577 577 ··· 672 672 673 673 static void r4k_flush_cache_sigtramp(unsigned long addr) 674 674 { 675 - r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 675 + r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1); 676 676 } 677 677 678 678 static void r4k_flush_icache_all(void)
+1 -1
arch/mips/pmc-sierra/yosemite/prom.c
··· 64 64 #ifdef CONFIG_SMP 65 65 if (smp_processor_id()) 66 66 /* CPU 1 */ 67 - smp_call_function(prom_cpu0_exit, NULL, 1, 1); 67 + smp_call_function(prom_cpu0_exit, NULL, 1); 68 68 #endif 69 69 prom_cpu0_exit(NULL); 70 70 }
+1 -1
arch/mips/sibyte/cfe/setup.c
··· 74 74 if (!reboot_smp) { 75 75 /* Get CPU 0 to do the cfe_exit */ 76 76 reboot_smp = 1; 77 - smp_call_function(cfe_linux_exit, arg, 1, 0); 77 + smp_call_function(cfe_linux_exit, arg, 0); 78 78 } 79 79 } else { 80 80 printk("Passing control back to CFE...\n");
+1 -1
arch/mips/sibyte/sb1250/prom.c
··· 66 66 { 67 67 #ifdef CONFIG_SMP 68 68 if (smp_processor_id()) { 69 - smp_call_function(prom_cpu0_exit, NULL, 1, 1); 69 + smp_call_function(prom_cpu0_exit, NULL, 1); 70 70 } 71 71 #endif 72 72 while(1);
+1 -1
arch/powerpc/kernel/smp.c
··· 168 168 169 169 void smp_send_stop(void) 170 170 { 171 - smp_call_function(stop_this_cpu, NULL, 0, 0); 171 + smp_call_function(stop_this_cpu, NULL, 0); 172 172 } 173 173 174 174 extern struct gettimeofday_struct do_gtod;
+2 -2
arch/s390/appldata/appldata_base.c
··· 209 209 per_cpu(appldata_timer, i).expires = per_cpu_interval; 210 210 smp_call_function_single(i, add_virt_timer_periodic, 211 211 &per_cpu(appldata_timer, i), 212 - 0, 1); 212 + 1); 213 213 } 214 214 appldata_timer_active = 1; 215 215 P_INFO("Monitoring timer started.\n"); ··· 236 236 args.timer = &per_cpu(appldata_timer, i); 237 237 args.expires = per_cpu_interval; 238 238 smp_call_function_single(i, __appldata_mod_vtimer_wrap, 239 - &args, 0, 1); 239 + &args, 1); 240 240 } 241 241 } 242 242 }
+6 -10
arch/s390/kernel/smp.c
··· 109 109 } 110 110 111 111 static void __smp_call_function_map(void (*func) (void *info), void *info, 112 - int nonatomic, int wait, cpumask_t map) 112 + int wait, cpumask_t map) 113 113 { 114 114 struct call_data_struct data; 115 115 int cpu, local = 0; ··· 162 162 * smp_call_function: 163 163 * @func: the function to run; this must be fast and non-blocking 164 164 * @info: an arbitrary pointer to pass to the function 165 - * @nonatomic: unused 166 165 * @wait: if true, wait (atomically) until function has completed on other CPUs 167 166 * 168 167 * Run a function on all other CPUs. ··· 169 170 * You must not call this function with disabled interrupts, from a 170 171 * hardware interrupt handler or from a bottom half. 171 172 */ 172 - int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 173 - int wait) 173 + int smp_call_function(void (*func) (void *info), void *info, int wait) 174 174 { 175 175 cpumask_t map; 176 176 177 177 spin_lock(&call_lock); 178 178 map = cpu_online_map; 179 179 cpu_clear(smp_processor_id(), map); 180 - __smp_call_function_map(func, info, nonatomic, wait, map); 180 + __smp_call_function_map(func, info, wait, map); 181 181 spin_unlock(&call_lock); 182 182 return 0; 183 183 } ··· 187 189 * @cpu: the CPU where func should run 188 190 * @func: the function to run; this must be fast and non-blocking 189 191 * @info: an arbitrary pointer to pass to the function 190 - * @nonatomic: unused 191 192 * @wait: if true, wait (atomically) until function has completed on other CPUs 192 193 * 193 194 * Run a function on one processor. ··· 195 198 * hardware interrupt handler or from a bottom half. 196 199 */ 197 200 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 198 - int nonatomic, int wait) 201 + int wait) 199 202 { 200 203 spin_lock(&call_lock); 201 - __smp_call_function_map(func, info, nonatomic, wait, 202 - cpumask_of_cpu(cpu)); 204 + __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); 203 205 spin_unlock(&call_lock); 204 206 return 0; 205 207 } ··· 224 228 { 225 229 spin_lock(&call_lock); 226 230 cpu_clear(smp_processor_id(), mask); 227 - __smp_call_function_map(func, info, 0, wait, mask); 231 + __smp_call_function_map(func, info, wait, mask); 228 232 spin_unlock(&call_lock); 229 233 return 0; 230 234 }
+2 -2
arch/s390/kernel/time.c
··· 690 690 */ 691 691 memset(&etr_sync, 0, sizeof(etr_sync)); 692 692 preempt_disable(); 693 - smp_call_function(etr_sync_cpu_start, NULL, 0, 0); 693 + smp_call_function(etr_sync_cpu_start, NULL, 0); 694 694 local_irq_disable(); 695 695 etr_enable_sync_clock(); 696 696 ··· 729 729 rc = -EAGAIN; 730 730 } 731 731 local_irq_enable(); 732 - smp_call_function(etr_sync_cpu_end,NULL,0,0); 732 + smp_call_function(etr_sync_cpu_end,NULL,0); 733 733 preempt_enable(); 734 734 return rc; 735 735 }
+5 -5
arch/sh/kernel/smp.c
··· 168 168 169 169 void smp_send_stop(void) 170 170 { 171 - smp_call_function(stop_this_cpu, 0, 1, 0); 171 + smp_call_function(stop_this_cpu, 0, 0); 172 172 } 173 173 174 174 void arch_send_call_function_ipi(cpumask_t mask) ··· 223 223 preempt_disable(); 224 224 225 225 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 226 - smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); 226 + smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); 227 227 } else { 228 228 int i; 229 229 for (i = 0; i < num_online_cpus(); i++) ··· 260 260 fd.vma = vma; 261 261 fd.addr1 = start; 262 262 fd.addr2 = end; 263 - smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); 263 + smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); 264 264 } else { 265 265 int i; 266 266 for (i = 0; i < num_online_cpus(); i++) ··· 303 303 304 304 fd.vma = vma; 305 305 fd.addr1 = page; 306 - smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); 306 + smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); 307 307 } else { 308 308 int i; 309 309 for (i = 0; i < num_online_cpus(); i++) ··· 327 327 fd.addr1 = asid; 328 328 fd.addr2 = vaddr; 329 329 330 - smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1); 330 + smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1); 331 331 local_flush_tlb_one(asid, vaddr); 332 332 }
+4 -8
arch/sparc64/kernel/smp.c
··· 807 807 * smp_call_function(): Run a function on all other CPUs. 808 808 * @func: The function to run. This must be fast and non-blocking. 809 809 * @info: An arbitrary pointer to pass to the function. 810 - * @nonatomic: currently unused. 811 810 * @wait: If true, wait (atomically) until function has completed on other CPUs. 812 811 * 813 812 * Returns 0 on success, else a negative status code. Does not return until ··· 816 817 * hardware interrupt handler or from a bottom half handler. 817 818 */ 818 819 static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info, 819 - int nonatomic, int wait, 820 - cpumask_t mask) 820 + int wait, cpumask_t mask) 821 821 { 822 822 struct call_data_struct data; 823 823 int cpus; ··· 851 853 return 0; 852 854 } 853 855 854 - int smp_call_function(void (*func)(void *info), void *info, 855 - int nonatomic, int wait) 856 + int smp_call_function(void (*func)(void *info), void *info, int wait) 856 857 { 857 - return sparc64_smp_call_function_mask(func, info, nonatomic, wait, 858 - cpu_online_map); 858 + return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map); 859 859 } 860 860 861 861 void smp_call_function_client(int irq, struct pt_regs *regs) ··· 890 894 891 895 void smp_tsb_sync(struct mm_struct *mm) 892 896 { 893 - sparc64_smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); 897 + sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask); 894 898 } 895 899 896 900 extern unsigned long xcall_flush_tlb_mm;
+1 -2
arch/um/kernel/smp.c
··· 214 214 atomic_inc(&scf_finished); 215 215 } 216 216 217 - int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, 218 - int wait) 217 + int smp_call_function(void (*_func)(void *info), void *_info, int wait) 219 218 { 220 219 int cpus = num_online_cpus() - 1; 221 220 int i;
+2 -2
arch/x86/kernel/cpu/mtrr/main.c
··· 222 222 atomic_set(&data.gate,0); 223 223 224 224 /* Start the ball rolling on other CPUs */ 225 - if (smp_call_function(ipi_handler, &data, 1, 0) != 0) 225 + if (smp_call_function(ipi_handler, &data, 0) != 0) 226 226 panic("mtrr: timed out waiting for other CPUs\n"); 227 227 228 228 local_irq_save(flags); ··· 822 822 */ 823 823 void mtrr_save_state(void) 824 824 { 825 - smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); 825 + smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); 826 826 } 827 827 828 828 static int __init mtrr_init_finialize(void)
+1 -1
arch/x86/kernel/cpuid.c
··· 95 95 for (; count; count -= 16) { 96 96 cmd.eax = pos; 97 97 cmd.ecx = pos >> 32; 98 - smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); 98 + smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); 99 99 if (copy_to_user(tmp, &cmd, 16)) 100 100 return -EFAULT; 101 101 tmp += 16;
+1 -1
arch/x86/kernel/ldt.c
··· 68 68 load_LDT(pc); 69 69 mask = cpumask_of_cpu(smp_processor_id()); 70 70 if (!cpus_equal(current->mm->cpu_vm_mask, mask)) 71 - smp_call_function(flush_ldt, NULL, 1, 1); 71 + smp_call_function(flush_ldt, NULL, 1); 72 72 preempt_enable(); 73 73 #else 74 74 load_LDT(pc);
+1 -1
arch/x86/kernel/nmi_32.c
··· 87 87 88 88 #ifdef CONFIG_SMP 89 89 if (nmi_watchdog == NMI_LOCAL_APIC) 90 - smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 90 + smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); 91 91 #endif 92 92 93 93 for_each_possible_cpu(cpu)
+1 -1
arch/x86/kernel/nmi_64.c
··· 96 96 97 97 #ifdef CONFIG_SMP 98 98 if (nmi_watchdog == NMI_LOCAL_APIC) 99 - smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 99 + smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); 100 100 #endif 101 101 102 102 for (cpu = 0; cpu < NR_CPUS; cpu++)
+1 -1
arch/x86/kernel/smp.c
··· 164 164 if (reboot_force) 165 165 return; 166 166 167 - smp_call_function(stop_this_cpu, NULL, 0, 0); 167 + smp_call_function(stop_this_cpu, NULL, 0); 168 168 local_irq_save(flags); 169 169 disable_local_APIC(); 170 170 local_irq_restore(flags);
+1 -1
arch/x86/kernel/vsyscall_64.c
··· 278 278 { 279 279 long cpu = (long)arg; 280 280 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) 281 - smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); 281 + smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); 282 282 return NOTIFY_DONE; 283 283 } 284 284
+1 -1
arch/x86/kvm/vmx.c
··· 335 335 { 336 336 if (vmx->vcpu.cpu == -1) 337 337 return; 338 - smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); 338 + smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); 339 339 vmx->launched = 0; 340 340 } 341 341
+1 -1
arch/x86/kvm/x86.c
··· 4044 4044 * So need not to call smp_call_function_single() in that case. 4045 4045 */ 4046 4046 if (vcpu->guest_mode && vcpu->cpu != cpu) 4047 - smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); 4047 + smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); 4048 4048 put_cpu(); 4049 4049 }
+4 -4
arch/x86/lib/msr-on-cpu.c
··· 30 30 31 31 rv.msr_no = msr_no; 32 32 if (safe) { 33 - smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); 33 + smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); 34 34 err = rv.err; 35 35 } else { 36 - smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); 36 + smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); 37 37 } 38 38 *l = rv.l; 39 39 *h = rv.h; ··· 64 64 rv.l = l; 65 65 rv.h = h; 66 66 if (safe) { 67 - smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); 67 + smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); 68 68 err = rv.err; 69 69 } else { 70 - smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); 70 + smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); 71 71 } 72 72 73 73 return err;
+1 -1
arch/x86/mach-voyager/voyager_smp.c
··· 1113 1113 /* broadcast a halt to all other CPUs */ 1114 1114 static void voyager_smp_send_stop(void) 1115 1115 { 1116 - smp_call_function(smp_stop_cpu_function, NULL, 1, 1); 1116 + smp_call_function(smp_stop_cpu_function, NULL, 1); 1117 1117 } 1118 1118 1119 1119 /* this function is triggered in time.c when a clock tick fires
+1 -1
arch/x86/xen/smp.c
··· 331 331 332 332 void xen_smp_send_stop(void) 333 333 { 334 - smp_call_function(stop_self, NULL, 0, 0); 334 + smp_call_function(stop_self, NULL, 0); 335 335 } 336 336 337 337 void xen_smp_send_reschedule(int cpu)
+1 -1
drivers/acpi/processor_idle.c
··· 1339 1339 static int acpi_processor_latency_notify(struct notifier_block *b, 1340 1340 unsigned long l, void *v) 1341 1341 { 1342 - smp_call_function(smp_callback, NULL, 0, 1); 1342 + smp_call_function(smp_callback, NULL, 1); 1343 1343 return NOTIFY_OK; 1344 1344 } 1345 1345
+1 -1
drivers/cpuidle/cpuidle.c
··· 340 340 static int cpuidle_latency_notify(struct notifier_block *b, 341 341 unsigned long l, void *v) 342 342 { 343 - smp_call_function(smp_callback, NULL, 0, 1); 343 + smp_call_function(smp_callback, NULL, 1); 344 344 return NOTIFY_OK; 345 345 } 346 346
+1 -1
include/asm-alpha/smp.h
··· 53 53 #else /* CONFIG_SMP */ 54 54 55 55 #define hard_smp_processor_id() 0 56 - #define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; }) 56 + #define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; }) 57 57 58 58 #endif /* CONFIG_SMP */ 59 59
+1 -1
include/asm-sparc/smp.h
··· 72 72 unsigned long arg3, unsigned long arg4, unsigned long arg5) 73 73 { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } 74 74 75 - static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) 75 + static inline int smp_call_function(void (*func)(void *info), void *info, int wait) 76 76 { 77 77 xc1((smpfunc_t)func, (unsigned long)info); 78 78 return 0;
+4 -4
include/linux/smp.h
··· 62 62 /* 63 63 * Call a function on all other processors 64 64 */ 65 - int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); 65 + int smp_call_function(void(*func)(void *info), void *info, int wait); 66 66 int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 67 67 int wait); 68 68 int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 69 - int retry, int wait); 69 + int wait); 70 70 void __smp_call_function_single(int cpuid, struct call_single_data *data); 71 71 72 72 /* ··· 119 119 { 120 120 return 0; 121 121 } 122 - #define smp_call_function(func, info, retry, wait) \ 122 + #define smp_call_function(func, info, wait) \ 123 123 (up_smp_call_function(func, info)) 124 124 #define on_each_cpu(func,info,retry,wait) \ 125 125 ({ \ ··· 131 131 static inline void smp_send_reschedule(int cpu) { } 132 132 #define num_booting_cpus() 1 133 133 #define smp_prepare_boot_cpu() do {} while (0) 134 - #define smp_call_function_single(cpuid, func, info, retry, wait) \ 134 + #define smp_call_function_single(cpuid, func, info, wait) \ 135 135 ({ \ 136 136 WARN_ON(cpuid != 0); \ 137 137 local_irq_disable(); \
+2 -4
kernel/smp.c
··· 195 195 * smp_call_function_single - Run a function on a specific CPU 196 196 * @func: The function to run. This must be fast and non-blocking. 197 197 * @info: An arbitrary pointer to pass to the function. 198 - * @retry: Unused 199 198 * @wait: If true, wait until function has completed on other CPUs. 200 199 * 201 200 * Returns 0 on success, else a negative status code. Note that @wait ··· 202 203 * we fall back to on-stack allocation. 203 204 */ 204 205 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 205 - int retry, int wait) 206 + int wait) 206 207 { 207 208 struct call_single_data d; 208 209 unsigned long flags; ··· 338 339 * smp_call_function(): Run a function on all other CPUs. 339 340 * @func: The function to run. This must be fast and non-blocking. 340 341 * @info: An arbitrary pointer to pass to the function. 341 - * @natomic: Unused 342 342 * @wait: If true, wait (atomically) until function has completed on other CPUs. 343 343 * 344 344 * Returns 0 on success, else a negative status code. ··· 349 351 * You must not call this function with disabled interrupts or from a 350 352 * hardware interrupt handler or from a bottom half handler. 351 353 */ 352 - int smp_call_function(void (*func)(void *), void *info, int natomic, int wait) 354 + int smp_call_function(void (*func)(void *), void *info, int wait) 353 355 { 354 356 int ret; 355 357
+1 -1
kernel/softirq.c
··· 679 679 int ret = 0; 680 680 681 681 preempt_disable(); 682 - ret = smp_call_function(func, info, retry, wait); 682 + ret = smp_call_function(func, info, wait); 683 683 local_irq_disable(); 684 684 func(info); 685 685 local_irq_enable();
+1 -1
kernel/time/tick-broadcast.c
··· 266 266 "offline CPU #%d\n", *oncpu); 267 267 else 268 268 smp_call_function_single(*oncpu, tick_do_broadcast_on_off, 269 - &reason, 1, 1); 269 + &reason, 1); 270 270 } 271 271 272 272 /*
+1 -1
net/core/flow.c
··· 298 298 init_completion(&info.completion); 299 299 300 300 local_bh_disable(); 301 - smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0); 301 + smp_call_function(flow_cache_flush_per_cpu, &info, 0); 302 302 flow_cache_flush_tasklet((unsigned long)&info); 303 303 local_bh_enable(); 304 304
+7 -7
net/iucv/iucv.c
··· 480 480 if (cpu_isset(cpu, iucv_buffer_cpumask) && 481 481 !cpu_isset(cpu, iucv_irq_cpumask)) 482 482 smp_call_function_single(cpu, iucv_allow_cpu, 483 - NULL, 0, 1); 483 + NULL, 1); 484 484 preempt_enable(); 485 485 } 486 486 ··· 498 498 cpumask = iucv_irq_cpumask; 499 499 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); 500 500 for_each_cpu_mask(cpu, cpumask) 501 - smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1); 501 + smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); 502 502 } 503 503 504 504 /** ··· 523 523 rc = -EIO; 524 524 preempt_disable(); 525 525 for_each_online_cpu(cpu) 526 - smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); 526 + smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 527 527 preempt_enable(); 528 528 if (cpus_empty(iucv_buffer_cpumask)) 529 529 /* No cpu could declare an iucv buffer. */ ··· 580 580 case CPU_ONLINE_FROZEN: 581 581 case CPU_DOWN_FAILED: 582 582 case CPU_DOWN_FAILED_FROZEN: 583 - smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); 583 + smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 584 584 break; 585 585 case CPU_DOWN_PREPARE: 586 586 case CPU_DOWN_PREPARE_FROZEN: ··· 589 589 if (cpus_empty(cpumask)) 590 590 /* Can't offline last IUCV enabled cpu. */ 591 591 return NOTIFY_BAD; 592 - smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1); 592 + smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); 593 593 if (cpus_empty(iucv_irq_cpumask)) 594 594 smp_call_function_single(first_cpu(iucv_buffer_cpumask), 595 - iucv_allow_cpu, NULL, 0, 1); 595 + iucv_allow_cpu, NULL, 1); 596 596 break; 597 597 } 598 598 return NOTIFY_OK; ··· 652 652 * pending interrupts force them to the work queue by calling 653 653 * an empty function on all cpus. 654 654 */ 655 - smp_call_function(__iucv_cleanup_queue, NULL, 0, 1); 655 + smp_call_function(__iucv_cleanup_queue, NULL, 1); 656 656 spin_lock_irq(&iucv_queue_lock); 657 657 list_for_each_entry_safe(p, n, &iucv_task_queue, list) { 658 658 /* Remove stale work items from the task queue. */
+3 -3
virt/kvm/kvm_main.c
··· 1266 1266 case CPU_UP_CANCELED: 1267 1267 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 1268 1268 cpu); 1269 - smp_call_function_single(cpu, hardware_disable, NULL, 0, 1); 1269 + smp_call_function_single(cpu, hardware_disable, NULL, 1); 1270 1270 break; 1271 1271 case CPU_ONLINE: 1272 1272 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 1273 1273 cpu); 1274 - smp_call_function_single(cpu, hardware_enable, NULL, 0, 1); 1274 + smp_call_function_single(cpu, hardware_enable, NULL, 1); 1275 1275 break; 1276 1276 } 1277 1277 return NOTIFY_OK; ··· 1474 1474 for_each_online_cpu(cpu) { 1475 1475 smp_call_function_single(cpu, 1476 1476 kvm_arch_check_processor_compat, 1477 - &r, 0, 1); 1477 + &r, 1); 1478 1478 if (r < 0) 1479 1479 goto out_free_1; 1480 1480 }