Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

treewide: Introduce kthread_run_worker[_on_cpu]()

kthread_create() creates a kthread without running it yet. kthread_run()
creates a kthread and runs it.

On the other hand, kthread_create_worker() creates a kthread worker and
runs it.

This difference in behaviours is confusing. Also there is no way to
create a kthread worker and affine it using kthread_bind_mask() or
kthread_affine_preferred() before starting it.

Consolidate the behaviours and introduce kthread_run_worker[_on_cpu]()
that behaves just like kthread_run(). kthread_create_worker[_on_cpu]()
will now only create a kthread worker without starting it.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>

+83 -66
+1 -1
arch/x86/kvm/i8254.c
··· 681 681 pid_nr = pid_vnr(pid); 682 682 put_pid(pid); 683 683 684 - pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr); 684 + pit->worker = kthread_run_worker(0, "kvm-pit/%d", pid_nr); 685 685 if (IS_ERR(pit->worker)) 686 686 goto fail_kthread; 687 687
+1 -1
crypto/crypto_engine.c
··· 517 517 crypto_init_queue(&engine->queue, qlen); 518 518 spin_lock_init(&engine->queue_lock); 519 519 520 - engine->kworker = kthread_create_worker(0, "%s", engine->name); 520 + engine->kworker = kthread_run_worker(0, "%s", engine->name); 521 521 if (IS_ERR(engine->kworker)) { 522 522 dev_err(dev, "failed to create crypto request pump task\n"); 523 523 return NULL;
+1 -1
drivers/cpufreq/cppc_cpufreq.c
··· 225 225 if (fie_disabled) 226 226 return; 227 227 228 - kworker_fie = kthread_create_worker(0, "cppc_fie"); 228 + kworker_fie = kthread_run_worker(0, "cppc_fie"); 229 229 if (IS_ERR(kworker_fie)) { 230 230 pr_warn("%s: failed to create kworker_fie: %ld\n", __func__, 231 231 PTR_ERR(kworker_fie));
+1 -1
drivers/gpu/drm/drm_vblank_work.c
··· 277 277 278 278 INIT_LIST_HEAD(&vblank->pending_work); 279 279 init_waitqueue_head(&vblank->work_wait_queue); 280 - worker = kthread_create_worker(0, "card%d-crtc%d", 280 + worker = kthread_run_worker(0, "card%d-crtc%d", 281 281 vblank->dev->primary->index, 282 282 vblank->pipe); 283 283 if (IS_ERR(worker))
+1 -1
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
··· 369 369 if (!data[n].ce[0]) 370 370 continue; 371 371 372 - worker = kthread_create_worker(0, "igt/parallel:%s", 372 + worker = kthread_run_worker(0, "igt/parallel:%s", 373 373 data[n].ce[0]->engine->name); 374 374 if (IS_ERR(worker)) { 375 375 err = PTR_ERR(worker);
+1 -1
drivers/gpu/drm/i915/gt/selftest_execlists.c
··· 3574 3574 arg[id].batch = NULL; 3575 3575 arg[id].count = 0; 3576 3576 3577 - worker[id] = kthread_create_worker(0, "igt/smoke:%d", id); 3577 + worker[id] = kthread_run_worker(0, "igt/smoke:%d", id); 3578 3578 if (IS_ERR(worker[id])) { 3579 3579 err = PTR_ERR(worker[id]); 3580 3580 break;
+1 -1
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
··· 1025 1025 threads[tmp].engine = other; 1026 1026 threads[tmp].flags = flags; 1027 1027 1028 - worker = kthread_create_worker(0, "igt/%s", 1028 + worker = kthread_run_worker(0, "igt/%s", 1029 1029 other->name); 1030 1030 if (IS_ERR(worker)) { 1031 1031 err = PTR_ERR(worker);
+1 -1
drivers/gpu/drm/i915/gt/selftest_slpc.c
··· 489 489 return -ENOMEM; 490 490 491 491 for_each_gt(gt, i915, i) { 492 - threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id); 492 + threads[i].worker = kthread_run_worker(0, "igt/slpc_parallel:%d", gt->info.id); 493 493 494 494 if (IS_ERR(threads[i].worker)) { 495 495 ret = PTR_ERR(threads[i].worker);
+4 -4
drivers/gpu/drm/i915/selftests/i915_request.c
··· 492 492 for (n = 0; n < ncpus; n++) { 493 493 struct kthread_worker *worker; 494 494 495 - worker = kthread_create_worker(0, "igt/%d", n); 495 + worker = kthread_run_worker(0, "igt/%d", n); 496 496 if (IS_ERR(worker)) { 497 497 ret = PTR_ERR(worker); 498 498 ncpus = n; ··· 1645 1645 for_each_uabi_engine(engine, i915) { 1646 1646 struct kthread_worker *worker; 1647 1647 1648 - worker = kthread_create_worker(0, "igt/parallel:%s", 1648 + worker = kthread_run_worker(0, "igt/parallel:%s", 1649 1649 engine->name); 1650 1650 if (IS_ERR(worker)) { 1651 1651 err = PTR_ERR(worker); ··· 1806 1806 unsigned int i = idx * ncpus + n; 1807 1807 struct kthread_worker *worker; 1808 1808 1809 - worker = kthread_create_worker(0, "igt/%d.%d", idx, n); 1809 + worker = kthread_run_worker(0, "igt/%d.%d", idx, n); 1810 1810 if (IS_ERR(worker)) { 1811 1811 ret = PTR_ERR(worker); 1812 1812 goto out_flush; ··· 3219 3219 3220 3220 memset(&engines[idx].p, 0, sizeof(engines[idx].p)); 3221 3221 3222 - worker = kthread_create_worker(0, "igt:%s", 3222 + worker = kthread_run_worker(0, "igt:%s", 3223 3223 engine->name); 3224 3224 if (IS_ERR(worker)) { 3225 3225 err = PTR_ERR(worker);
+1 -1
drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
··· 109 109 110 110 mutex_init(&kms->dump_mutex); 111 111 112 - kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot"); 112 + kms->dump_worker = kthread_run_worker(0, "%s", "disp_snapshot"); 113 113 if (IS_ERR(kms->dump_worker)) 114 114 DRM_ERROR("failed to create disp state task\n"); 115 115
+1 -1
drivers/gpu/drm/msm/msm_atomic.c
··· 115 115 timer->kms = kms; 116 116 timer->crtc_idx = crtc_idx; 117 117 118 - timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx); 118 + timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx); 119 119 if (IS_ERR(timer->worker)) { 120 120 int ret = PTR_ERR(timer->worker); 121 121 timer->worker = NULL;
+1 -1
drivers/gpu/drm/msm/msm_gpu.c
··· 859 859 gpu->funcs = funcs; 860 860 gpu->name = name; 861 861 862 - gpu->worker = kthread_create_worker(0, "gpu-worker"); 862 + gpu->worker = kthread_run_worker(0, "gpu-worker"); 863 863 if (IS_ERR(gpu->worker)) { 864 864 ret = PTR_ERR(gpu->worker); 865 865 gpu->worker = NULL;
+1 -1
drivers/gpu/drm/msm/msm_kms.c
··· 269 269 /* initialize event thread */ 270 270 ev_thread = &priv->event_thread[drm_crtc_index(crtc)]; 271 271 ev_thread->dev = ddev; 272 - ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id); 272 + ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id); 273 273 if (IS_ERR(ev_thread->worker)) { 274 274 ret = PTR_ERR(ev_thread->worker); 275 275 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+1 -1
drivers/media/platform/chips-media/wave5/wave5-vpu.c
··· 271 271 dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n"); 272 272 hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 273 273 dev->hrtimer.function = &wave5_vpu_timer_callback; 274 - dev->worker = kthread_create_worker(0, "vpu_irq_thread"); 274 + dev->worker = kthread_run_worker(0, "vpu_irq_thread"); 275 275 if (IS_ERR(dev->worker)) { 276 276 dev_err(&pdev->dev, "failed to create vpu irq worker\n"); 277 277 ret = PTR_ERR(dev->worker);
+1 -1
drivers/net/dsa/mv88e6xxx/chip.c
··· 394 394 kthread_init_delayed_work(&chip->irq_poll_work, 395 395 mv88e6xxx_irq_poll); 396 396 397 - chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev)); 397 + chip->kworker = kthread_run_worker(0, "%s", dev_name(chip->dev)); 398 398 if (IS_ERR(chip->kworker)) 399 399 return PTR_ERR(chip->kworker); 400 400
+1 -1
drivers/net/ethernet/intel/ice/ice_dpll.c
··· 2053 2053 struct kthread_worker *kworker; 2054 2054 2055 2055 kthread_init_delayed_work(&d->work, ice_dpll_periodic_work); 2056 - kworker = kthread_create_worker(0, "ice-dplls-%s", 2056 + kworker = kthread_run_worker(0, "ice-dplls-%s", 2057 2057 dev_name(ice_pf_to_dev(pf))); 2058 2058 if (IS_ERR(kworker)) 2059 2059 return PTR_ERR(kworker);
+1 -1
drivers/net/ethernet/intel/ice/ice_gnss.c
··· 182 182 pf->gnss_serial = gnss; 183 183 184 184 kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); 185 - kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); 185 + kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev)); 186 186 if (IS_ERR(kworker)) { 187 187 kfree(gnss); 188 188 return NULL;
+1 -1
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 3080 3080 /* Allocate a kworker for handling work required for the ports 3081 3081 * connected to the PTP hardware clock. 3082 3082 */ 3083 - kworker = kthread_create_worker(0, "ice-ptp-%s", 3083 + kworker = kthread_run_worker(0, "ice-ptp-%s", 3084 3084 dev_name(ice_pf_to_dev(pf))); 3085 3085 if (IS_ERR(kworker)) 3086 3086 return PTR_ERR(kworker);
+1 -1
drivers/platform/chrome/cros_ec_spi.c
··· 715 715 int err; 716 716 717 717 ec_spi->high_pri_worker = 718 - kthread_create_worker(0, "cros_ec_spi_high_pri"); 718 + kthread_run_worker(0, "cros_ec_spi_high_pri"); 719 719 720 720 if (IS_ERR(ec_spi->high_pri_worker)) { 721 721 err = PTR_ERR(ec_spi->high_pri_worker);
+1 -1
drivers/ptp/ptp_clock.c
··· 296 296 297 297 if (ptp->info->do_aux_work) { 298 298 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); 299 - ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index); 299 + ptp->kworker = kthread_run_worker(0, "ptp%d", ptp->index); 300 300 if (IS_ERR(ptp->kworker)) { 301 301 err = PTR_ERR(ptp->kworker); 302 302 pr_err("failed to create ptp aux_worker %d\n", err);
+1 -1
drivers/spi/spi.c
··· 2060 2060 ctlr->busy = false; 2061 2061 ctlr->queue_empty = true; 2062 2062 2063 - ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); 2063 + ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev)); 2064 2064 if (IS_ERR(ctlr->kworker)) { 2065 2065 dev_err(&ctlr->dev, "failed to create message pump kworker\n"); 2066 2066 return PTR_ERR(ctlr->kworker);
+1 -1
drivers/usb/typec/tcpm/tcpm.c
··· 7635 7635 mutex_init(&port->lock); 7636 7636 mutex_init(&port->swap_lock); 7637 7637 7638 - port->wq = kthread_create_worker(0, dev_name(dev)); 7638 + port->wq = kthread_run_worker(0, dev_name(dev)); 7639 7639 if (IS_ERR(port->wq)) 7640 7640 return ERR_CAST(port->wq); 7641 7641 sched_set_fifo(port->wq->task);
+1 -1
drivers/vdpa/vdpa_sim/vdpa_sim.c
··· 229 229 dev = &vdpasim->vdpa.dev; 230 230 231 231 kthread_init_work(&vdpasim->work, vdpasim_work_fn); 232 - vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s", 232 + vdpasim->worker = kthread_run_worker(0, "vDPA sim worker: %s", 233 233 dev_attr->name); 234 234 if (IS_ERR(vdpasim->worker)) 235 235 goto err_iommu;
+1 -1
drivers/watchdog/watchdog_dev.c
··· 1229 1229 { 1230 1230 int err; 1231 1231 1232 - watchdog_kworker = kthread_create_worker(0, "watchdogd"); 1232 + watchdog_kworker = kthread_run_worker(0, "watchdogd"); 1233 1233 if (IS_ERR(watchdog_kworker)) { 1234 1234 pr_err("Failed to create watchdog kworker\n"); 1235 1235 return PTR_ERR(watchdog_kworker);
+1 -1
fs/erofs/zdata.c
··· 320 320 static struct kthread_worker *erofs_init_percpu_worker(int cpu) 321 321 { 322 322 struct kthread_worker *worker = 323 - kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u"); 323 + kthread_run_worker_on_cpu(cpu, 0, "erofs_worker/%u"); 324 324 325 325 if (IS_ERR(worker)) 326 326 return worker;
+41 -7
include/linux/kthread.h
··· 193 193 const char namefmt[], ...); 194 194 195 195 #define kthread_create_worker(flags, namefmt, ...) \ 196 - ({ \ 197 - struct kthread_worker *__kw \ 198 - = kthread_create_worker_on_node(flags, NUMA_NO_NODE, \ 199 - namefmt, ## __VA_ARGS__); \ 200 - if (!IS_ERR(__kw)) \ 201 - wake_up_process(__kw->task); \ 202 - __kw; \ 196 + kthread_create_worker_on_node(flags, NUMA_NO_NODE, namefmt, ## __VA_ARGS__); 197 + 198 + /** 199 + * kthread_run_worker - create and wake a kthread worker. 200 + * @flags: flags modifying the default behavior of the worker 201 + * @namefmt: printf-style name for the thread. 202 + * 203 + * Description: Convenient wrapper for kthread_create_worker() followed by 204 + * wake_up_process(). Returns the kthread_worker or ERR_PTR(-ENOMEM). 205 + */ 206 + #define kthread_run_worker(flags, namefmt, ...) \ 207 + ({ \ 208 + struct kthread_worker *__kw \ 209 + = kthread_create_worker(flags, namefmt, ## __VA_ARGS__); \ 210 + if (!IS_ERR(__kw)) \ 211 + wake_up_process(__kw->task); \ 212 + __kw; \ 203 213 }) 204 214 205 215 struct kthread_worker * 206 216 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 207 217 const char namefmt[]); 218 + 219 + /** 220 + * kthread_run_worker_on_cpu - create and wake a cpu bound kthread worker. 221 + * @cpu: CPU number 222 + * @flags: flags modifying the default behavior of the worker 223 + * @namefmt: printf-style name for the thread. Format is restricted 224 + * to "name.*%u". Code fills in cpu number. 225 + * 226 + * Description: Convenient wrapper for kthread_create_worker_on_cpu() 227 + * followed by wake_up_process(). Returns the kthread_worker or 228 + * ERR_PTR(-ENOMEM). 229 + */ 230 + static inline struct kthread_worker * 231 + kthread_run_worker_on_cpu(int cpu, unsigned int flags, 232 + const char namefmt[]) 233 + { 234 + struct kthread_worker *kw; 235 + 236 + kw = kthread_create_worker_on_cpu(cpu, flags, namefmt); 237 + if (!IS_ERR(kw)) 238 + wake_up_process(kw->task); 239 + 240 + return kw; 241 + } 208 242 209 243 bool kthread_queue_work(struct kthread_worker *worker, 210 244 struct kthread_work *work);
+7 -24
kernel/kthread.c
··· 1077 1077 worker = __kthread_create_worker_on_node(flags, node, namefmt, args); 1078 1078 va_end(args); 1079 1079 1080 - if (worker) 1081 - wake_up_process(worker->task); 1082 - 1083 1080 return worker; 1084 1081 } 1085 1082 EXPORT_SYMBOL(kthread_create_worker_on_node); 1086 - 1087 - static __printf(3, 4) struct kthread_worker * 1088 - __kthread_create_worker_on_cpu(int cpu, unsigned int flags, 1089 - const char namefmt[], ...) 1090 - { 1091 - struct kthread_worker *worker; 1092 - va_list args; 1093 - 1094 - va_start(args, namefmt); 1095 - worker = __kthread_create_worker_on_node(flags, cpu_to_node(cpu), 1096 - namefmt, args); 1097 - va_end(args); 1098 - 1099 - if (worker) { 1100 - kthread_bind(worker->task, cpu); 1101 - wake_up_process(worker->task); 1102 - } 1103 - 1104 - return worker; 1105 - } 1106 1083 1107 1084 /** 1108 1085 * kthread_create_worker_on_cpu - create a kthread worker and bind it ··· 1121 1144 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 1122 1145 const char namefmt[]) 1123 1146 { 1124 - return __kthread_create_worker_on_cpu(cpu, flags, namefmt, cpu); 1147 + struct kthread_worker *worker; 1148 + 1149 + worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu); 1150 + if (!IS_ERR(worker)) 1151 + kthread_bind(worker->task, cpu); 1152 + 1153 + return worker; 1125 1154 } 1126 1155 EXPORT_SYMBOL(kthread_create_worker_on_cpu); 1127 1156
+2 -2
kernel/rcu/tree.c
··· 4906 4906 if (rnp->exp_kworker) 4907 4907 return; 4908 4908 4909 - kworker = kthread_create_worker(0, name, rnp_index); 4909 + kworker = kthread_run_worker(0, name, rnp_index); 4910 4910 if (IS_ERR_OR_NULL(kworker)) { 4911 4911 pr_err("Failed to create par gp kworker on %d/%d\n", 4912 4912 rnp->grplo, rnp->grphi); ··· 4933 4933 const char *name = "rcu_exp_gp_kthread_worker"; 4934 4934 struct sched_param param = { .sched_priority = kthread_prio }; 4935 4935 4936 - rcu_exp_gp_kworker = kthread_create_worker(0, name); 4936 + rcu_exp_gp_kworker = kthread_run_worker(0, name); 4937 4937 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { 4938 4938 pr_err("Failed to create %s!\n", name); 4939 4939 rcu_exp_gp_kworker = NULL;
+1 -1
kernel/sched/ext.c
··· 5352 5352 { 5353 5353 struct kthread_worker *helper; 5354 5354 5355 - helper = kthread_create_worker(0, name); 5355 + helper = kthread_run_worker(0, name); 5356 5356 if (helper) 5357 5357 sched_set_fifo(helper->task); 5358 5358 return helper;
+1 -1
kernel/workqueue.c
··· 7828 7828 unsigned long thresh; 7829 7829 unsigned long bogo; 7830 7830 7831 - pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release"); 7831 + pwq_release_worker = kthread_run_worker(0, "pool_workqueue_release"); 7832 7832 BUG_ON(IS_ERR(pwq_release_worker)); 7833 7833 7834 7834 /* if the user set it to a specific value, keep it */
+1 -1
net/dsa/tag_ksz.c
··· 66 66 if (!priv) 67 67 return -ENOMEM; 68 68 69 - xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit", 69 + xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit", 70 70 ds->dst->index, ds->index); 71 71 if (IS_ERR(xmit_worker)) { 72 72 ret = PTR_ERR(xmit_worker);
+1 -1
net/dsa/tag_ocelot_8021q.c
··· 110 110 if (!priv) 111 111 return -ENOMEM; 112 112 113 - priv->xmit_worker = kthread_create_worker(0, "felix_xmit"); 113 + priv->xmit_worker = kthread_run_worker(0, "felix_xmit"); 114 114 if (IS_ERR(priv->xmit_worker)) { 115 115 err = PTR_ERR(priv->xmit_worker); 116 116 kfree(priv);
+1 -1
net/dsa/tag_sja1105.c
··· 707 707 708 708 spin_lock_init(&priv->meta_lock); 709 709 710 - xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit", 710 + xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit", 711 711 ds->dst->index, ds->index); 712 712 if (IS_ERR(xmit_worker)) { 713 713 err = PTR_ERR(xmit_worker);