stop_machine: introduce stop_machine_create/destroy.

Introduce stop_machine_create/destroy. With this interface subsystems
that need a non-failing stop_machine environment can create the
stop_machine machine threads before actually calling stop_machine.
When the threads aren't needed anymore they can be killed with
stop_machine_destroy again.

When stop_machine gets called and the threads aren't present they
will be created and destroyed automatically. This restores the old
behaviour of stop_machine.

This patch also converts cpu hotplug to the new interface since it
is special: cpu_down calls __stop_machine instead of stop_machine.
However the kstop threads will only be created when stop_machine
gets called.

Changing the code so that the threads would be created automatically
on __stop_machine is currently not possible: when __stop_machine gets
called we hold cpu_add_remove_lock, which is the same lock that
create_rt_workqueue would take. So the workqueue needs to be created
before the cpu hotplug code locks cpu_add_remove_lock.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>

authored by

Heiko Carstens and committed by
Rusty Russell
9ea09af3 c298be74

+72 -11
+22
include/linux/stop_machine.h
··· 35 35 * won't come or go while it's being called. Used by hotplug cpu. 36 36 */ 37 37 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 38 + 39 + /** 40 + * stop_machine_create: create all stop_machine threads 41 + * 42 + * Description: This causes all stop_machine threads to be created before 43 + * stop_machine actually gets called. This can be used by subsystems that 44 + * need a non failing stop_machine infrastructure. 45 + */ 46 + int stop_machine_create(void); 47 + 48 + /** 49 + * stop_machine_destroy: destroy all stop_machine threads 50 + * 51 + * Description: This causes all stop_machine threads which were created with 52 + * stop_machine_create to be destroyed again. 53 + */ 54 + void stop_machine_destroy(void); 55 + 38 56 #else 39 57 40 58 static inline int stop_machine(int (*fn)(void *), void *data, ··· 64 46 local_irq_enable(); 65 47 return ret; 66 48 } 49 + 50 + static inline int stop_machine_create(void) { return 0; } 51 + static inline void stop_machine_destroy(void) { } 52 + 67 53 #endif /* CONFIG_SMP */ 68 54 #endif /* _LINUX_STOP_MACHINE */
+5 -1
kernel/cpu.c
··· 269 269 270 270 int __ref cpu_down(unsigned int cpu) 271 271 { 272 - int err = 0; 272 + int err; 273 273 274 + err = stop_machine_create(); 275 + if (err) 276 + return err; 274 277 cpu_maps_update_begin(); 275 278 276 279 if (cpu_hotplug_disabled) { ··· 300 297 301 298 out: 302 299 cpu_maps_update_done(); 300 + stop_machine_destroy(); 303 301 return err; 304 302 } 305 303 EXPORT_SYMBOL(cpu_down);
+45 -10
kernel/stop_machine.c
··· 38 38 static unsigned int num_threads; 39 39 static atomic_t thread_ack; 40 40 static DEFINE_MUTEX(lock); 41 - 41 + /* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */ 42 + static DEFINE_MUTEX(setup_lock); 43 + /* Users of stop_machine. */ 44 + static int refcount; 42 45 static struct workqueue_struct *stop_machine_wq; 43 46 static struct stop_machine_data active, idle; 44 47 static const cpumask_t *active_cpus; ··· 112 109 return 0; 113 110 } 114 111 112 + int stop_machine_create(void) 113 + { 114 + mutex_lock(&setup_lock); 115 + if (refcount) 116 + goto done; 117 + stop_machine_wq = create_rt_workqueue("kstop"); 118 + if (!stop_machine_wq) 119 + goto err_out; 120 + stop_machine_work = alloc_percpu(struct work_struct); 121 + if (!stop_machine_work) 122 + goto err_out; 123 + done: 124 + refcount++; 125 + mutex_unlock(&setup_lock); 126 + return 0; 127 + 128 + err_out: 129 + if (stop_machine_wq) 130 + destroy_workqueue(stop_machine_wq); 131 + mutex_unlock(&setup_lock); 132 + return -ENOMEM; 133 + } 134 + EXPORT_SYMBOL_GPL(stop_machine_create); 135 + 136 + void stop_machine_destroy(void) 137 + { 138 + mutex_lock(&setup_lock); 139 + refcount--; 140 + if (refcount) 141 + goto done; 142 + destroy_workqueue(stop_machine_wq); 143 + free_percpu(stop_machine_work); 144 + done: 145 + mutex_unlock(&setup_lock); 146 + } 147 + EXPORT_SYMBOL_GPL(stop_machine_destroy); 148 + 115 149 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 116 150 { 117 151 struct work_struct *sm_work; ··· 186 146 { 187 147 int ret; 188 148 149 + ret = stop_machine_create(); 150 + if (ret) 151 + return ret; 189 152 /* No CPUs can come up or down during this. */ 190 153 get_online_cpus(); 191 154 ret = __stop_machine(fn, data, cpus); 192 155 put_online_cpus(); 193 - 156 + stop_machine_destroy(); 194 157 return ret; 195 158 } 196 159 EXPORT_SYMBOL_GPL(stop_machine); 197 - 198 - static int __init stop_machine_init(void) 199 - { 200 - stop_machine_wq = create_rt_workqueue("kstop"); 201 - stop_machine_work = alloc_percpu(struct work_struct); 202 - return 0; 203 - } 204 - core_initcall(stop_machine_init);