Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
stop_machine: fix up ftrace.c
stop_machine: Wean existing callers off stop_machine_run()
stop_machine(): stop_machine_run() changed to use cpu mask
Hotplug CPU: don't check cpu_online after take_cpu_down
Simplify stop_machine
stop_machine: add ALL_CPUS option
module: fix build warning with !CONFIG_KALLSYMS

+211 -226
+3 -3
arch/s390/kernel/kprobes.c
··· 197 197 args.new = BREAKPOINT_INSTRUCTION; 198 198 199 199 kcb->kprobe_status = KPROBE_SWAP_INST; 200 - stop_machine_run(swap_instruction, &args, NR_CPUS); 200 + stop_machine(swap_instruction, &args, NULL); 201 201 kcb->kprobe_status = status; 202 202 } 203 203 ··· 212 212 args.new = p->opcode; 213 213 214 214 kcb->kprobe_status = KPROBE_SWAP_INST; 215 - stop_machine_run(swap_instruction, &args, NR_CPUS); 215 + stop_machine(swap_instruction, &args, NULL); 216 216 kcb->kprobe_status = status; 217 217 } 218 218 ··· 331 331 * No kprobe at this address. The fault has not been 332 332 * caused by a kprobe breakpoint. The race of breakpoint 333 333 * vs. kprobe remove does not exist because on s390 we 334 - * use stop_machine_run to arm/disarm the breakpoints. 334 + * use stop_machine to arm/disarm the breakpoints. 335 335 */ 336 336 goto no_kprobe; 337 337
+3 -3
drivers/char/hw_random/intel-rng.c
··· 241 241 struct intel_rng_hw *intel_rng_hw = _intel_rng_hw; 242 242 u8 mfc, dvc; 243 243 244 - /* interrupts disabled in stop_machine_run call */ 244 + /* interrupts disabled in stop_machine call */ 245 245 246 246 if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK)) 247 247 pci_write_config_byte(intel_rng_hw->dev, ··· 365 365 * location with the Read ID command, all activity on the system 366 366 * must be stopped until the state is back to normal. 367 367 * 368 - * Use stop_machine_run because IPIs can be blocked by disabling 368 + * Use stop_machine because IPIs can be blocked by disabling 369 369 * interrupts. 370 370 */ 371 - err = stop_machine_run(intel_rng_hw_init, intel_rng_hw, NR_CPUS); 371 + err = stop_machine(intel_rng_hw_init, intel_rng_hw, NULL); 372 372 pci_dev_put(dev); 373 373 iounmap(intel_rng_hw->mem); 374 374 kfree(intel_rng_hw);
+33 -17
include/linux/stop_machine.h
··· 5 5 (and more). So the "read" side to such a lock is anything which 6 6 diables preeempt. */ 7 7 #include <linux/cpu.h> 8 + #include <linux/cpumask.h> 8 9 #include <asm/system.h> 9 10 10 11 #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) 12 + 13 + /* Deprecated, but useful for transition. */ 14 + #define ALL_CPUS ~0U 15 + 11 16 /** 12 - * stop_machine_run: freeze the machine on all CPUs and run this function 17 + * stop_machine: freeze the machine on all CPUs and run this function 13 18 * @fn: the function to run 14 19 * @data: the data ptr for the @fn() 15 - * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS. 20 + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 16 21 * 17 - * Description: This causes a thread to be scheduled on every other cpu, 18 - * each of which disables interrupts, and finally interrupts are disabled 19 - * on the current CPU. The result is that noone is holding a spinlock 20 - * or inside any other preempt-disabled region when @fn() runs. 22 + * Description: This causes a thread to be scheduled on every cpu, 23 + * each of which disables interrupts. The result is that noone is 24 + * holding a spinlock or inside any other preempt-disabled region when 25 + * @fn() runs. 21 26 * 22 27 * This can be thought of as a very heavy write lock, equivalent to 23 28 * grabbing every spinlock in the kernel. */ 24 - int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); 29 + int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); 25 30 26 31 /** 27 - * __stop_machine_run: freeze the machine on all CPUs and run this function 32 + * __stop_machine: freeze the machine on all CPUs and run this function 28 33 * @fn: the function to run 29 34 * @data: the data ptr for the @fn 30 - * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS. 35 + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 31 36 * 32 - * Description: This is a special version of the above, which returns the 33 - * thread which has run @fn(): kthread_stop will return the return value 34 - * of @fn(). Used by hotplug cpu. 37 + * Description: This is a special version of the above, which assumes cpus 38 + * won't come or go while it's being called. Used by hotplug cpu. 35 39 */ 36 - struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, 37 - unsigned int cpu); 38 - 40 + int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); 39 41 #else 40 42 41 - static inline int stop_machine_run(int (*fn)(void *), void *data, 42 - unsigned int cpu) 43 + static inline int stop_machine(int (*fn)(void *), void *data, 44 + const cpumask_t *cpus) 43 45 { 44 46 int ret; 45 47 local_irq_disable(); ··· 50 48 return ret; 51 49 } 52 50 #endif /* CONFIG_SMP */ 51 + 52 + static inline int __deprecated stop_machine_run(int (*fn)(void *), void *data, 53 + unsigned int cpu) 54 + { 55 + /* If they don't care which cpu fn runs on, just pick one. */ 56 + if (cpu == NR_CPUS) 57 + return stop_machine(fn, data, NULL); 58 + else if (cpu == ~0U) 59 + return stop_machine(fn, data, &cpu_possible_map); 60 + else { 61 + cpumask_t cpus = cpumask_of_cpu(cpu); 62 + return stop_machine(fn, data, &cpus); 63 + } 64 + } 53 65 #endif /* _LINUX_STOP_MACHINE */
+5 -11
kernel/cpu.c
··· 216 216 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 217 217 { 218 218 int err, nr_calls = 0; 219 - struct task_struct *p; 220 219 cpumask_t old_allowed, tmp; 221 220 void *hcpu = (void *)(long)cpu; 222 221 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; ··· 248 249 cpus_setall(tmp); 249 250 cpu_clear(cpu, tmp); 250 251 set_cpus_allowed_ptr(current, &tmp); 252 + tmp = cpumask_of_cpu(cpu); 251 253 252 - p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); 253 - 254 - if (IS_ERR(p) || cpu_online(cpu)) { 254 + err = __stop_machine(take_cpu_down, &tcd_param, &tmp); 255 + if (err) { 255 256 /* CPU didn't die: tell everyone. Can't complain. */ 256 257 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 257 258 hcpu) == NOTIFY_BAD) 258 259 BUG(); 259 260 260 - if (IS_ERR(p)) { 261 - err = PTR_ERR(p); 262 - goto out_allowed; 263 - } 264 - goto out_thread; 261 + goto out_allowed; 265 262 } 263 + BUG_ON(cpu_online(cpu)); 266 264 267 265 /* Wait for it to sleep (leaving idle task). */ 268 266 while (!idle_cpu(cpu)) ··· 275 279 276 280 check_for_tasks(cpu); 277 281 278 - out_thread: 279 - err = kthread_stop(p); 280 282 out_allowed: 281 283 set_cpus_allowed_ptr(current, &old_allowed); 282 284 out_release:
+17 -16
kernel/module.c
··· 325 325 return -ENOENT; 326 326 } 327 327 328 - /* lookup symbol in given range of kernel_symbols */ 329 - static const struct kernel_symbol *lookup_symbol(const char *name, 330 - const struct kernel_symbol *start, 331 - const struct kernel_symbol *stop) 332 - { 333 - const struct kernel_symbol *ks = start; 334 - for (; ks < stop; ks++) 335 - if (strcmp(ks->name, name) == 0) 336 - return ks; 337 - return NULL; 338 - } 339 - 340 328 /* Search for module by name: must hold module_mutex. */ 341 329 static struct module *find_module(const char *name) 342 330 { ··· 678 690 if (flags & O_NONBLOCK) { 679 691 struct stopref sref = { mod, flags, forced }; 680 692 681 - return stop_machine_run(__try_stop_module, &sref, NR_CPUS); 693 + return stop_machine(__try_stop_module, &sref, NULL); 682 694 } else { 683 695 /* We don't need to stop the machine for this. */ 684 696 mod->state = MODULE_STATE_GOING; ··· 1416 1428 static void free_module(struct module *mod) 1417 1429 { 1418 1430 /* Delete from various lists */ 1419 - stop_machine_run(__unlink_module, mod, NR_CPUS); 1431 + stop_machine(__unlink_module, mod, NULL); 1420 1432 remove_notes_attrs(mod); 1421 1433 remove_sect_attrs(mod); 1422 1434 mod_kobject_remove(mod); ··· 1691 1703 } 1692 1704 1693 1705 #ifdef CONFIG_KALLSYMS 1706 + 1707 + /* lookup symbol in given range of kernel_symbols */ 1708 + static const struct kernel_symbol *lookup_symbol(const char *name, 1709 + const struct kernel_symbol *start, 1710 + const struct kernel_symbol *stop) 1711 + { 1712 + const struct kernel_symbol *ks = start; 1713 + for (; ks < stop; ks++) 1714 + if (strcmp(ks->name, name) == 0) 1715 + return ks; 1716 + return NULL; 1717 + } 1718 + 1694 1719 static int is_exported(const char *name, const struct module *mod) 1695 1720 { 1696 1721 if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) ··· 2197 2196 /* Now sew it into the lists so we can get lockdep and oops 2198 2197 * info during argument parsing. Noone should access us, since 2199 2198 * strong_try_module_get() will fail. */ 2200 - stop_machine_run(__link_module, mod, NR_CPUS); 2199 + stop_machine(__link_module, mod, NULL); 2201 2200 2202 2201 /* Size of section 0 is 0, so this works well if no params */ 2203 2202 err = parse_args(mod->name, mod->args, ··· 2231 2230 return mod; 2232 2231 2233 2232 unlink: 2234 - stop_machine_run(__unlink_module, mod, NR_CPUS); 2233 + stop_machine(__unlink_module, mod, NULL); 2235 2234 module_arch_cleanup(mod); 2236 2235 cleanup: 2237 2236 kobject_del(&mod->mkobj.kobj);
+2 -2
kernel/rcuclassic.c
··· 91 91 * rdp->cpu is the current cpu. 92 92 * 93 93 * cpu_online_map is updated by the _cpu_down() 94 - * using stop_machine_run(). Since we're in irqs disabled 95 - * section, stop_machine_run() is not exectuting, hence 94 + * using __stop_machine(). Since we're in irqs disabled 95 + * section, __stop_machine() is not exectuting, hence 96 96 * the cpu_online_map is stable. 97 97 * 98 98 * However, a cpu might have been offlined _just_ before
+143 -169
kernel/stop_machine.c
··· 1 - /* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. 1 + /* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. 2 2 * GPL v2 and any later version. 3 3 */ 4 4 #include <linux/cpu.h> ··· 13 13 #include <asm/atomic.h> 14 14 #include <asm/uaccess.h> 15 15 16 - /* Since we effect priority and affinity (both of which are visible 17 - * to, and settable by outside processes) we do indirection via a 18 - * kthread. */ 19 - 20 - /* Thread to stop each CPU in user context. */ 16 + /* This controls the threads on each CPU. */ 21 17 enum stopmachine_state { 22 - STOPMACHINE_WAIT, 18 + /* Dummy starting state for thread. */ 19 + STOPMACHINE_NONE, 20 + /* Awaiting everyone to be scheduled. */ 23 21 STOPMACHINE_PREPARE, 22 + /* Disable interrupts. */ 24 23 STOPMACHINE_DISABLE_IRQ, 24 + /* Run the function */ 25 + STOPMACHINE_RUN, 26 + /* Exit */ 25 27 STOPMACHINE_EXIT, 26 28 }; 27 - 28 - static enum stopmachine_state stopmachine_state; 29 - static unsigned int stopmachine_num_threads; 30 - static atomic_t stopmachine_thread_ack; 31 - 32 - static int stopmachine(void *cpu) 33 - { 34 - int irqs_disabled = 0; 35 - int prepared = 0; 36 - cpumask_of_cpu_ptr(cpumask, (int)(long)cpu); 37 - 38 - set_cpus_allowed_ptr(current, cpumask); 39 - 40 - /* Ack: we are alive */ 41 - smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 42 - atomic_inc(&stopmachine_thread_ack); 43 - 44 - /* Simple state machine */ 45 - while (stopmachine_state != STOPMACHINE_EXIT) { 46 - if (stopmachine_state == STOPMACHINE_DISABLE_IRQ 47 - && !irqs_disabled) { 48 - local_irq_disable(); 49 - hard_irq_disable(); 50 - irqs_disabled = 1; 51 - /* Ack: irqs disabled. */ 52 - smp_mb(); /* Must read state first. */ 53 - atomic_inc(&stopmachine_thread_ack); 54 - } else if (stopmachine_state == STOPMACHINE_PREPARE 55 - && !prepared) { 56 - /* Everyone is in place, hold CPU. */ 57 - preempt_disable(); 58 - prepared = 1; 59 - smp_mb(); /* Must read state first. */ 60 - atomic_inc(&stopmachine_thread_ack); 61 - } 62 - /* Yield in first stage: migration threads need to 63 - * help our sisters onto their CPUs. */ 64 - if (!prepared && !irqs_disabled) 65 - yield(); 66 - cpu_relax(); 67 - } 68 - 69 - /* Ack: we are exiting. */ 70 - smp_mb(); /* Must read state first. */ 71 - atomic_inc(&stopmachine_thread_ack); 72 - 73 - if (irqs_disabled) 74 - local_irq_enable(); 75 - if (prepared) 76 - preempt_enable(); 77 - 78 - return 0; 79 - } 80 - 81 - /* Change the thread state */ 82 - static void stopmachine_set_state(enum stopmachine_state state) 83 - { 84 - atomic_set(&stopmachine_thread_ack, 0); 85 - smp_wmb(); 86 - stopmachine_state = state; 87 - while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 88 - cpu_relax(); 89 - } 90 - 91 - static int stop_machine(void) 92 - { 93 - int i, ret = 0; 94 - 95 - atomic_set(&stopmachine_thread_ack, 0); 96 - stopmachine_num_threads = 0; 97 - stopmachine_state = STOPMACHINE_WAIT; 98 - 99 - for_each_online_cpu(i) { 100 - if (i == raw_smp_processor_id()) 101 - continue; 102 - ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); 103 - if (ret < 0) 104 - break; 105 - stopmachine_num_threads++; 106 - } 107 - 108 - /* Wait for them all to come to life. */ 109 - while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) { 110 - yield(); 111 - cpu_relax(); 112 - } 113 - 114 - /* If some failed, kill them all. */ 115 - if (ret < 0) { 116 - stopmachine_set_state(STOPMACHINE_EXIT); 117 - return ret; 118 - } 119 - 120 - /* Now they are all started, make them hold the CPUs, ready. */ 121 - preempt_disable(); 122 - stopmachine_set_state(STOPMACHINE_PREPARE); 123 - 124 - /* Make them disable irqs. */ 125 - local_irq_disable(); 126 - hard_irq_disable(); 127 - stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); 128 - 129 - return 0; 130 - } 131 - 132 - static void restart_machine(void) 133 - { 134 - stopmachine_set_state(STOPMACHINE_EXIT); 135 - local_irq_enable(); 136 - preempt_enable_no_resched(); 137 - } 29 + static enum stopmachine_state state; 138 30 139 31 struct stop_machine_data { 140 32 int (*fn)(void *); 141 33 void *data; 142 - struct completion done; 34 + int fnret; 143 35 }; 144 36 145 - static int do_stop(void *_smdata) 37 + /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ 38 + static unsigned int num_threads; 39 + static atomic_t thread_ack; 40 + static struct completion finished; 41 + static DEFINE_MUTEX(lock); 42 + 43 + static void set_state(enum stopmachine_state newstate) 146 44 { 147 - struct stop_machine_data *smdata = _smdata; 148 - int ret; 149 - 150 - ret = stop_machine(); 151 - if (ret == 0) { 152 - ret = smdata->fn(smdata->data); 153 - restart_machine(); 154 - } 155 - 156 - /* We're done: you can kthread_stop us now */ 157 - complete(&smdata->done); 158 - 159 - /* Wait for kthread_stop */ 160 - set_current_state(TASK_INTERRUPTIBLE); 161 - while (!kthread_should_stop()) { 162 - schedule(); 163 - set_current_state(TASK_INTERRUPTIBLE); 164 - } 165 - __set_current_state(TASK_RUNNING); 166 - return ret; 45 + /* Reset ack counter. */ 46 + atomic_set(&thread_ack, num_threads); 47 + smp_wmb(); 48 + state = newstate; 167 49 } 168 50 169 - struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, 170 - unsigned int cpu) 51 + /* Last one to ack a state moves to the next state. */ 52 + static void ack_state(void) 171 53 { 172 - static DEFINE_MUTEX(stopmachine_mutex); 173 - struct stop_machine_data smdata; 174 - struct task_struct *p; 54 + if (atomic_dec_and_test(&thread_ack)) { 55 + /* If we're the last one to ack the EXIT, we're finished. */ 56 + if (state == STOPMACHINE_EXIT) 57 + complete(&finished); 58 + else 59 + set_state(state + 1); 60 + } 61 + } 175 62 176 - smdata.fn = fn; 177 - smdata.data = data; 178 - init_completion(&smdata.done); 63 + /* This is the actual thread which stops the CPU. It exits by itself rather 64 + * than waiting for kthread_stop(), because it's easier for hotplug CPU. */ 65 + static int stop_cpu(struct stop_machine_data *smdata) 66 + { 67 + enum stopmachine_state curstate = STOPMACHINE_NONE; 68 + int uninitialized_var(ret); 179 69 180 - mutex_lock(&stopmachine_mutex); 70 + /* Simple state machine */ 71 + do { 72 + /* Chill out and ensure we re-read stopmachine_state. */ 73 + cpu_relax(); 74 + if (state != curstate) { 75 + curstate = state; 76 + switch (curstate) { 77 + case STOPMACHINE_DISABLE_IRQ: 78 + local_irq_disable(); 79 + hard_irq_disable(); 80 + break; 81 + case STOPMACHINE_RUN: 82 + /* |= allows error detection if functions on 83 + * multiple CPUs. */ 84 + smdata->fnret |= smdata->fn(smdata->data); 85 + break; 86 + default: 87 + break; 88 + } 89 + ack_state(); 90 + } 91 + } while (curstate != STOPMACHINE_EXIT); 181 92 182 - /* If they don't care which CPU fn runs on, bind to any online one. */ 183 - if (cpu == NR_CPUS) 184 - cpu = raw_smp_processor_id(); 93 + local_irq_enable(); 94 + do_exit(0); 95 + } 185 96 186 - p = kthread_create(do_stop, &smdata, "kstopmachine"); 187 - if (!IS_ERR(p)) { 97 + /* Callback for CPUs which aren't supposed to do anything. */ 98 + static int chill(void *unused) 99 + { 100 + return 0; 101 + } 102 + 103 + int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 104 + { 105 + int i, err; 106 + struct stop_machine_data active, idle; 107 + struct task_struct **threads; 108 + 109 + active.fn = fn; 110 + active.data = data; 111 + active.fnret = 0; 112 + idle.fn = chill; 113 + idle.data = NULL; 114 + 115 + /* This could be too big for stack on large machines. */ 116 + threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL); 117 + if (!threads) 118 + return -ENOMEM; 119 + 120 + /* Set up initial state. */ 121 + mutex_lock(&lock); 122 + init_completion(&finished); 123 + num_threads = num_online_cpus(); 124 + set_state(STOPMACHINE_PREPARE); 125 + 126 + for_each_online_cpu(i) { 127 + struct stop_machine_data *smdata = &idle; 188 128 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 189 129 190 - /* One high-prio thread per cpu. We'll do this one. */ 191 - sched_setscheduler_nocheck(p, SCHED_FIFO, &param); 192 - kthread_bind(p, cpu); 193 - wake_up_process(p); 194 - wait_for_completion(&smdata.done); 130 + if (!cpus) { 131 + if (i == first_cpu(cpu_online_map)) 132 + smdata = &active; 133 + } else { 134 + if (cpu_isset(i, *cpus)) 135 + smdata = &active; 136 + } 137 + 138 + threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u", 139 + i); 140 + if (IS_ERR(threads[i])) { 141 + err = PTR_ERR(threads[i]); 142 + threads[i] = NULL; 143 + goto kill_threads; 144 + } 145 + 146 + /* Place it onto correct cpu. */ 147 + kthread_bind(threads[i], i); 148 + 149 + /* Make it highest prio. */ 150 + if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, &param)) 151 + BUG(); 195 152 } 196 - mutex_unlock(&stopmachine_mutex); 197 - return p; 153 + 154 + /* We've created all the threads. Wake them all: hold this CPU so one 155 + * doesn't hit this CPU until we're ready. */ 156 + get_cpu(); 157 + for_each_online_cpu(i) 158 + wake_up_process(threads[i]); 159 + 160 + /* This will release the thread on our CPU. */ 161 + put_cpu(); 162 + wait_for_completion(&finished); 163 + mutex_unlock(&lock); 164 + 165 + kfree(threads); 166 + 167 + return active.fnret; 168 + 169 + kill_threads: 170 + for_each_online_cpu(i) 171 + if (threads[i]) 172 + kthread_stop(threads[i]); 173 + mutex_unlock(&lock); 174 + 175 + kfree(threads); 176 + return err; 198 177 } 199 178 200 - int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) 179 + int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 201 180 { 202 - struct task_struct *p; 203 181 int ret; 204 182 205 183 /* No CPUs can come up or down during this. */ 206 184 get_online_cpus(); 207 - p = __stop_machine_run(fn, data, cpu); 208 - if (!IS_ERR(p)) 209 - ret = kthread_stop(p); 210 - else 211 - ret = PTR_ERR(p); 185 + ret = __stop_machine(fn, data, cpus); 212 186 put_online_cpus(); 213 187 214 188 return ret; 215 189 } 216 - EXPORT_SYMBOL_GPL(stop_machine_run); 190 + EXPORT_SYMBOL_GPL(stop_machine);
+3 -3
kernel/trace/ftrace.c
··· 587 587 588 588 static void ftrace_run_update_code(int command) 589 589 { 590 - stop_machine_run(__ftrace_modify_code, &command, NR_CPUS); 590 + stop_machine(__ftrace_modify_code, &command, NULL); 591 591 } 592 592 593 593 void ftrace_disable_daemon(void) ··· 787 787 !ftrace_enabled || !ftraced_trigger) 788 788 return 0; 789 789 790 - stop_machine_run(__ftrace_update_code, NULL, NR_CPUS); 790 + stop_machine(__ftrace_update_code, NULL, NULL); 791 791 792 792 return 1; 793 793 } ··· 1564 1564 1565 1565 addr = (unsigned long)ftrace_record_ip; 1566 1566 1567 - stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS); 1567 + stop_machine(ftrace_dyn_arch_init, &addr, NULL); 1568 1568 1569 1569 /* ftrace_dyn_arch_init places the return code in addr */ 1570 1570 if (addr) {
+2 -2
mm/page_alloc.c
··· 2372 2372 2373 2373 #endif /* CONFIG_NUMA */ 2374 2374 2375 - /* return values int ....just for stop_machine_run() */ 2375 + /* return values int ....just for stop_machine() */ 2376 2376 static int __build_all_zonelists(void *dummy) 2377 2377 { 2378 2378 int nid; ··· 2397 2397 } else { 2398 2398 /* we have to stop all cpus to guarantee there is no user 2399 2399 of zonelist */ 2400 - stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); 2400 + stop_machine(__build_all_zonelists, NULL, NULL); 2401 2401 /* cpuset refresh routine should be here */ 2402 2402 } 2403 2403 vm_total_pages = nr_free_pagecache_pages();