stop_machine: introduce stop_machine_create/destroy.

Introduce stop_machine_create/destroy. With this interface subsystems
that need a non-failing stop_machine environment can create the
stop_machine machine threads before actually calling stop_machine.
When the threads aren't needed anymore they can be killed with
stop_machine_destroy again.

When stop_machine gets called and the threads aren't present they
will be created and destroyed automatically. This restores the old
behaviour of stop_machine.

This patch also converts cpu hotplug to the new interface since it
is special: cpu_down calls __stop_machine instead of stop_machine.
However the kstop threads will only be created when stop_machine
gets called.

Changing the code so that the threads would be created automatically
on __stop_machine is currently not possible: when __stop_machine gets
called we hold cpu_add_remove_lock, which is the same lock that
create_rt_workqueue would take. So the workqueue needs to be created
before the cpu hotplug code locks cpu_add_remove_lock.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>

authored by

Heiko Carstens and committed by
Rusty Russell
9ea09af3 c298be74

+72 -11
+22
include/linux/stop_machine.h
··· 35 * won't come or go while it's being called. Used by hotplug cpu. 36 */ 37 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 38 #else 39 40 static inline int stop_machine(int (*fn)(void *), void *data, ··· 64 local_irq_enable(); 65 return ret; 66 } 67 #endif /* CONFIG_SMP */ 68 #endif /* _LINUX_STOP_MACHINE */
··· 35 * won't come or go while it's being called. Used by hotplug cpu. 36 */ 37 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 38 + 39 + /** 40 + * stop_machine_create: create all stop_machine threads 41 + * 42 + * Description: This causes all stop_machine threads to be created before 43 + * stop_machine actually gets called. This can be used by subsystems that 44 + * need a non failing stop_machine infrastructure. 45 + */ 46 + int stop_machine_create(void); 47 + 48 + /** 49 + * stop_machine_destroy: destroy all stop_machine threads 50 + * 51 + * Description: This causes all stop_machine threads which were created with 52 + * stop_machine_create to be destroyed again. 53 + */ 54 + void stop_machine_destroy(void); 55 + 56 #else 57 58 static inline int stop_machine(int (*fn)(void *), void *data, ··· 46 local_irq_enable(); 47 return ret; 48 } 49 + 50 + static inline int stop_machine_create(void) { return 0; } 51 + static inline void stop_machine_destroy(void) { } 52 + 53 #endif /* CONFIG_SMP */ 54 #endif /* _LINUX_STOP_MACHINE */
+5 -1
kernel/cpu.c
··· 269 270 int __ref cpu_down(unsigned int cpu) 271 { 272 - int err = 0; 273 274 cpu_maps_update_begin(); 275 276 if (cpu_hotplug_disabled) { ··· 300 301 out: 302 cpu_maps_update_done(); 303 return err; 304 } 305 EXPORT_SYMBOL(cpu_down);
··· 269 270 int __ref cpu_down(unsigned int cpu) 271 { 272 + int err; 273 274 + err = stop_machine_create(); 275 + if (err) 276 + return err; 277 cpu_maps_update_begin(); 278 279 if (cpu_hotplug_disabled) { ··· 297 298 out: 299 cpu_maps_update_done(); 300 + stop_machine_destroy(); 301 return err; 302 } 303 EXPORT_SYMBOL(cpu_down);
+45 -10
kernel/stop_machine.c
··· 38 static unsigned int num_threads; 39 static atomic_t thread_ack; 40 static DEFINE_MUTEX(lock); 41 - 42 static struct workqueue_struct *stop_machine_wq; 43 static struct stop_machine_data active, idle; 44 static const cpumask_t *active_cpus; ··· 112 return 0; 113 } 114 115 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 116 { 117 struct work_struct *sm_work; ··· 186 { 187 int ret; 188 189 /* No CPUs can come up or down during this. */ 190 get_online_cpus(); 191 ret = __stop_machine(fn, data, cpus); 192 put_online_cpus(); 193 - 194 return ret; 195 } 196 EXPORT_SYMBOL_GPL(stop_machine); 197 - 198 - static int __init stop_machine_init(void) 199 - { 200 - stop_machine_wq = create_rt_workqueue("kstop"); 201 - stop_machine_work = alloc_percpu(struct work_struct); 202 - return 0; 203 - } 204 - core_initcall(stop_machine_init);
··· 38 static unsigned int num_threads; 39 static atomic_t thread_ack; 40 static DEFINE_MUTEX(lock); 41 + /* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */ 42 + static DEFINE_MUTEX(setup_lock); 43 + /* Users of stop_machine. */ 44 + static int refcount; 45 static struct workqueue_struct *stop_machine_wq; 46 static struct stop_machine_data active, idle; 47 static const cpumask_t *active_cpus; ··· 109 return 0; 110 } 111 112 + int stop_machine_create(void) 113 + { 114 + mutex_lock(&setup_lock); 115 + if (refcount) 116 + goto done; 117 + stop_machine_wq = create_rt_workqueue("kstop"); 118 + if (!stop_machine_wq) 119 + goto err_out; 120 + stop_machine_work = alloc_percpu(struct work_struct); 121 + if (!stop_machine_work) 122 + goto err_out; 123 + done: 124 + refcount++; 125 + mutex_unlock(&setup_lock); 126 + return 0; 127 + 128 + err_out: 129 + if (stop_machine_wq) 130 + destroy_workqueue(stop_machine_wq); 131 + mutex_unlock(&setup_lock); 132 + return -ENOMEM; 133 + } 134 + EXPORT_SYMBOL_GPL(stop_machine_create); 135 + 136 + void stop_machine_destroy(void) 137 + { 138 + mutex_lock(&setup_lock); 139 + refcount--; 140 + if (refcount) 141 + goto done; 142 + destroy_workqueue(stop_machine_wq); 143 + free_percpu(stop_machine_work); 144 + done: 145 + mutex_unlock(&setup_lock); 146 + } 147 + EXPORT_SYMBOL_GPL(stop_machine_destroy); 148 + 149 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 150 { 151 struct work_struct *sm_work; ··· 146 { 147 int ret; 148 149 + ret = stop_machine_create(); 150 + if (ret) 151 + return ret; 152 /* No CPUs can come up or down during this. */ 153 get_online_cpus(); 154 ret = __stop_machine(fn, data, cpus); 155 put_online_cpus(); 156 + stop_machine_destroy(); 157 return ret; 158 } 159 EXPORT_SYMBOL_GPL(stop_machine);