Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'wq-for-6.5-cleanup-ordered' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull ordered workqueue creation updates from Tejun Heo:
"For historical reasons, unbound workqueues with max concurrency limit
of 1 are considered ordered, even though the concurrency limit hasn't
been system-wide for a long time.

This creates ambiguity around whether ordered execution is actually
required for correctness, which was actually confusing for e.g. btrfs
(btrfs updates are being routed through the btrfs tree).

There aren't that many users in the tree which use the combination and
there are pending improvements to unbound workqueue affinity handling
which will make inadvertent use of ordered workqueue a bigger loss.

This clarifies the situation for most of them by updating the ones
which require ordered execution to use alloc_ordered_workqueue().

There are some conversions being routed through subsystem-specific
trees and likely a few stragglers. Once they're all converted,
workqueue can trigger a warning on unbound + @max_active==1 usages and
eventually drop the implicit ordered behavior"

* tag 'wq-for-6.5-cleanup-ordered' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
rxrpc: Use alloc_ordered_workqueue() to create ordered workqueues
net: qrtr: Use alloc_ordered_workqueue() to create ordered workqueues
net: wwan: t7xx: Use alloc_ordered_workqueue() to create ordered workqueues
dm integrity: Use alloc_ordered_workqueue() to create ordered workqueues
media: amphion: Use alloc_ordered_workqueue() to create ordered workqueues
scsi: NCR5380: Use default @max_active for hostdata->work_q
media: coda: Use alloc_ordered_workqueue() to create ordered workqueues
crypto: octeontx2: Use alloc_ordered_workqueue() to create ordered workqueues
wifi: ath10/11/12k: Use alloc_ordered_workqueue() to create ordered workqueues
wifi: mwifiex: Use default @max_active for workqueues
wifi: iwlwifi: Use default @max_active for trans_pcie->rba.alloc_wq
xen/pvcalls: Use alloc_ordered_workqueue() to create ordered workqueues
virt: acrn: Use alloc_ordered_workqueue() to create ordered workqueues
net: octeontx2: Use alloc_ordered_workqueue() to create ordered workqueues
net: thunderx: Use alloc_ordered_workqueue() to create ordered workqueues
greybus: Use alloc_ordered_workqueue() to create ordered workqueues
powerpc, workqueue: Use alloc_ordered_workqueue() to create ordered workqueues

+57 -65
+1 -1
arch/powerpc/kernel/tau_6xx.c
··· 200 200 tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) && 201 201 !strcmp(cur_cpu_spec->platform, "ppc750"); 202 202 203 - tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1); 203 + tau_workq = alloc_ordered_workqueue("tau", 0); 204 204 if (!tau_workq) 205 205 return -ENOMEM; 206 206
+1 -2
arch/powerpc/platforms/pseries/dlpar.c
··· 564 564 if (pseries_hp_wq) 565 565 return 0; 566 566 567 - pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", 568 - WQ_UNBOUND, 1); 567 + pseries_hp_wq = alloc_ordered_workqueue("pseries hotplug workqueue", 0); 569 568 570 569 return pseries_hp_wq ? 0 : -ENOMEM; 571 570 }
+6 -6
drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
··· 357 357 u64 vfpf_mbox_base; 358 358 int err, i; 359 359 360 - cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox", 361 - WQ_UNBOUND | WQ_HIGHPRI | 362 - WQ_MEM_RECLAIM, 1); 360 + cptpf->vfpf_mbox_wq = 361 + alloc_ordered_workqueue("cpt_vfpf_mailbox", 362 + WQ_HIGHPRI | WQ_MEM_RECLAIM); 363 363 if (!cptpf->vfpf_mbox_wq) 364 364 return -ENOMEM; 365 365 ··· 453 453 resource_size_t offset; 454 454 int err; 455 455 456 - cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox", 457 - WQ_UNBOUND | WQ_HIGHPRI | 458 - WQ_MEM_RECLAIM, 1); 456 + cptpf->afpf_mbox_wq = 457 + alloc_ordered_workqueue("cpt_afpf_mailbox", 458 + WQ_HIGHPRI | WQ_MEM_RECLAIM); 459 459 if (!cptpf->afpf_mbox_wq) 460 460 return -ENOMEM; 461 461
+3 -3
drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
··· 75 75 resource_size_t offset, size; 76 76 int ret; 77 77 78 - cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox", 79 - WQ_UNBOUND | WQ_HIGHPRI | 80 - WQ_MEM_RECLAIM, 1); 78 + cptvf->pfvf_mbox_wq = 79 + alloc_ordered_workqueue("cpt_pfvf_mailbox", 80 + WQ_HIGHPRI | WQ_MEM_RECLAIM); 81 81 if (!cptvf->pfvf_mbox_wq) 82 82 return -ENOMEM; 83 83
+2 -2
drivers/greybus/connection.c
··· 187 187 spin_lock_init(&connection->lock); 188 188 INIT_LIST_HEAD(&connection->operations); 189 189 190 - connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1, 191 - dev_name(&hd->dev), hd_cport_id); 190 + connection->wq = alloc_ordered_workqueue("%s:%d", 0, dev_name(&hd->dev), 191 + hd_cport_id); 192 192 if (!connection->wq) { 193 193 ret = -ENOMEM; 194 194 goto err_free_connection;
+1 -1
drivers/greybus/svc.c
··· 1318 1318 if (!svc) 1319 1319 return NULL; 1320 1320 1321 - svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev)); 1321 + svc->wq = alloc_ordered_workqueue("%s:svc", 0, dev_name(&hd->dev)); 1322 1322 if (!svc->wq) { 1323 1323 kfree(svc); 1324 1324 return NULL;
+2 -2
drivers/md/dm-integrity.c
··· 4268 4268 } 4269 4269 4270 4270 /* 4271 - * If this workqueue were percpu, it would cause bio reordering 4271 + * If this workqueue weren't ordered, it would cause bio reordering 4272 4272 * and reduced performance. 4273 4273 */ 4274 - ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 4274 + ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM); 4275 4275 if (!ic->wait_wq) { 4276 4276 ti->error = "Cannot allocate workqueue"; 4277 4277 r = -ENOMEM;
+1 -1
drivers/md/dm.c
··· 207 207 if (r) 208 208 return r; 209 209 210 - deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 210 + deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0); 211 211 if (!deferred_remove_workqueue) { 212 212 r = -ENOMEM; 213 213 goto out_uevent_exit;
+1 -1
drivers/media/platform/amphion/vpu_core.c
··· 254 254 if (vpu_core_is_exist(vpu, core)) 255 255 return 0; 256 256 257 - core->workqueue = alloc_workqueue("vpu", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 257 + core->workqueue = alloc_ordered_workqueue("vpu", WQ_MEM_RECLAIM); 258 258 if (!core->workqueue) { 259 259 dev_err(core->dev, "fail to alloc workqueue\n"); 260 260 return -ENOMEM;
+1 -1
drivers/media/platform/amphion/vpu_v4l2.c
··· 740 740 inst->fh.ctrl_handler = &inst->ctrl_handler; 741 741 file->private_data = &inst->fh; 742 742 inst->state = VPU_CODEC_STATE_DEINIT; 743 - inst->workqueue = alloc_workqueue("vpu_inst", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 743 + inst->workqueue = alloc_ordered_workqueue("vpu_inst", WQ_MEM_RECLAIM); 744 744 if (inst->workqueue) { 745 745 INIT_WORK(&inst->msg_work, vpu_inst_run_work); 746 746 ret = kfifo_init(&inst->msg_fifo,
+1 -1
drivers/media/platform/chips-media/coda-common.c
··· 3268 3268 &dev->iram.blob); 3269 3269 } 3270 3270 3271 - dev->workqueue = alloc_workqueue("coda", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 3271 + dev->workqueue = alloc_ordered_workqueue("coda", WQ_MEM_RECLAIM); 3272 3272 if (!dev->workqueue) { 3273 3273 dev_err(&pdev->dev, "unable to alloc workqueue\n"); 3274 3274 ret = -ENOMEM;
+1 -2
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 1126 1126 } 1127 1127 1128 1128 poll: 1129 - lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | 1130 - WQ_MEM_RECLAIM, 1); 1129 + lmac->check_link = alloc_ordered_workqueue("check_link", WQ_MEM_RECLAIM); 1131 1130 if (!lmac->check_link) 1132 1131 return -ENOMEM; 1133 1132 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+2 -3
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
··· 3044 3044 cfg | BIT_ULL(22)); 3045 3045 } 3046 3046 3047 - rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", 3048 - WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 3049 - 1); 3047 + rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr", 3048 + WQ_HIGHPRI | WQ_MEM_RECLAIM); 3050 3049 if (!rvu->flr_wq) 3051 3050 return -ENOMEM; 3052 3051
+5 -8
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 271 271 { 272 272 int vf; 273 273 274 - pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq", 275 - WQ_UNBOUND | WQ_HIGHPRI, 1); 274 + pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI); 276 275 if (!pf->flr_wq) 277 276 return -ENOMEM; 278 277 ··· 592 593 if (!pf->mbox_pfvf) 593 594 return -ENOMEM; 594 595 595 - pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox", 596 - WQ_UNBOUND | WQ_HIGHPRI | 597 - WQ_MEM_RECLAIM, 1); 596 + pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox", 597 + WQ_HIGHPRI | WQ_MEM_RECLAIM); 598 598 if (!pf->mbox_pfvf_wq) 599 599 return -ENOMEM; 600 600 ··· 1061 1063 int err; 1062 1064 1063 1065 mbox->pfvf = pf; 1064 - pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox", 1065 - WQ_UNBOUND | WQ_HIGHPRI | 1066 - WQ_MEM_RECLAIM, 1); 1066 + pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox", 1067 + WQ_HIGHPRI | WQ_MEM_RECLAIM); 1067 1068 if (!pf->mbox_wq) 1068 1069 return -ENOMEM; 1069 1070
+2 -3
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
··· 297 297 int err; 298 298 299 299 mbox->pfvf = vf; 300 - vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox", 301 - WQ_UNBOUND | WQ_HIGHPRI | 302 - WQ_MEM_RECLAIM, 1); 300 + vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox", 301 + WQ_HIGHPRI | WQ_MEM_RECLAIM); 303 302 if (!vf->mbox_wq) 304 303 return -ENOMEM; 305 304
+1 -2
drivers/net/wireless/ath/ath10k/qmi.c
··· 1082 1082 if (ret) 1083 1083 goto err; 1084 1084 1085 - qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event", 1086 - WQ_UNBOUND, 1); 1085 + qmi->event_wq = alloc_ordered_workqueue("ath10k_qmi_driver_event", 0); 1087 1086 if (!qmi->event_wq) { 1088 1087 ath10k_err(ar, "failed to allocate workqueue\n"); 1089 1088 ret = -EFAULT;
+1 -2
drivers/net/wireless/ath/ath11k/qmi.c
··· 3256 3256 return ret; 3257 3257 } 3258 3258 3259 - ab->qmi.event_wq = alloc_workqueue("ath11k_qmi_driver_event", 3260 - WQ_UNBOUND, 1); 3259 + ab->qmi.event_wq = alloc_ordered_workqueue("ath11k_qmi_driver_event", 0); 3261 3260 if (!ab->qmi.event_wq) { 3262 3261 ath11k_err(ab, "failed to allocate workqueue\n"); 3263 3262 return -EFAULT;
+1 -2
drivers/net/wireless/ath/ath12k/qmi.c
··· 3056 3056 return ret; 3057 3057 } 3058 3058 3059 - ab->qmi.event_wq = alloc_workqueue("ath12k_qmi_driver_event", 3060 - WQ_UNBOUND, 1); 3059 + ab->qmi.event_wq = alloc_ordered_workqueue("ath12k_qmi_driver_event", 0); 3061 3060 if (!ab->qmi.event_wq) { 3062 3061 ath12k_err(ab, "failed to allocate workqueue\n"); 3063 3062 return -EFAULT;
+1 -1
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 3576 3576 init_waitqueue_head(&trans_pcie->imr_waitq); 3577 3577 3578 3578 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", 3579 - WQ_HIGHPRI | WQ_UNBOUND, 1); 3579 + WQ_HIGHPRI | WQ_UNBOUND, 0); 3580 3580 if (!trans_pcie->rba.alloc_wq) { 3581 3581 ret = -ENOMEM; 3582 3582 goto out_free_trans;
+2 -2
drivers/net/wireless/marvell/mwifiex/cfg80211.c
··· 3127 3127 priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s", 3128 3128 WQ_HIGHPRI | 3129 3129 WQ_MEM_RECLAIM | 3130 - WQ_UNBOUND, 1, name); 3130 + WQ_UNBOUND, 0, name); 3131 3131 if (!priv->dfs_cac_workqueue) { 3132 3132 mwifiex_dbg(adapter, ERROR, "cannot alloc DFS CAC queue\n"); 3133 3133 ret = -ENOMEM; ··· 3138 3138 3139 3139 priv->dfs_chan_sw_workqueue = alloc_workqueue("MWIFIEX_DFS_CHSW%s", 3140 3140 WQ_HIGHPRI | WQ_UNBOUND | 3141 - WQ_MEM_RECLAIM, 1, name); 3141 + WQ_MEM_RECLAIM, 0, name); 3142 3142 if (!priv->dfs_chan_sw_workqueue) { 3143 3143 mwifiex_dbg(adapter, ERROR, "cannot alloc DFS channel sw queue\n"); 3144 3144 ret = -ENOMEM;
+4 -4
drivers/net/wireless/marvell/mwifiex/main.c
··· 1547 1547 1548 1548 adapter->workqueue = 1549 1549 alloc_workqueue("MWIFIEX_WORK_QUEUE", 1550 - WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 1550 + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 1551 1551 if (!adapter->workqueue) 1552 1552 goto err_kmalloc; 1553 1553 ··· 1557 1557 adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE", 1558 1558 WQ_HIGHPRI | 1559 1559 WQ_MEM_RECLAIM | 1560 - WQ_UNBOUND, 1); 1560 + WQ_UNBOUND, 0); 1561 1561 if (!adapter->rx_workqueue) 1562 1562 goto err_kmalloc; 1563 1563 INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue); ··· 1702 1702 1703 1703 adapter->workqueue = 1704 1704 alloc_workqueue("MWIFIEX_WORK_QUEUE", 1705 - WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 1705 + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 1706 1706 if (!adapter->workqueue) 1707 1707 goto err_kmalloc; 1708 1708 ··· 1712 1712 adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE", 1713 1713 WQ_HIGHPRI | 1714 1714 WQ_MEM_RECLAIM | 1715 - WQ_UNBOUND, 1); 1715 + WQ_UNBOUND, 0); 1716 1716 if (!adapter->rx_workqueue) 1717 1717 goto err_kmalloc; 1718 1718
+7 -6
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
··· 1293 1293 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 1294 1294 md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); 1295 1295 md_ctrl->txq[i].worker = 1296 - alloc_workqueue("md_hif%d_tx%d_worker", 1297 - WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), 1298 - 1, md_ctrl->hif_id, i); 1296 + alloc_ordered_workqueue("md_hif%d_tx%d_worker", 1297 + WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), 1298 + md_ctrl->hif_id, i); 1299 1299 if (!md_ctrl->txq[i].worker) 1300 1300 goto err_workqueue; 1301 1301 ··· 1306 1306 md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); 1307 1307 INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); 1308 1308 1309 - md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker", 1310 - WQ_UNBOUND | WQ_MEM_RECLAIM, 1311 - 1, md_ctrl->hif_id, i); 1309 + md_ctrl->rxq[i].worker = 1310 + alloc_ordered_workqueue("md_hif%d_rx%d_worker", 1311 + WQ_MEM_RECLAIM, 1312 + md_ctrl->hif_id, i); 1312 1313 if (!md_ctrl->rxq[i].worker) 1313 1314 goto err_workqueue; 1314 1315 }
+3 -2
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
··· 618 618 return ret; 619 619 } 620 620 621 - txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM | 622 - (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index); 621 + txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker", 622 + WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI), 623 + txq->index); 623 624 if (!txq->worker) 624 625 return -ENOMEM; 625 626
+1 -1
drivers/scsi/NCR5380.c
··· 417 417 INIT_WORK(&hostdata->main_task, NCR5380_main); 418 418 hostdata->work_q = alloc_workqueue("ncr5380_%d", 419 419 WQ_UNBOUND | WQ_MEM_RECLAIM, 420 - 1, instance->host_no); 420 + 0, instance->host_no); 421 421 if (!hostdata->work_q) 422 422 return -ENOMEM; 423 423
+2 -2
drivers/virt/acrn/ioreq.c
··· 576 576 int acrn_ioreq_intr_setup(void) 577 577 { 578 578 acrn_setup_intr_handler(ioreq_intr_handler); 579 - ioreq_wq = alloc_workqueue("ioreq_wq", 580 - WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 579 + ioreq_wq = alloc_ordered_workqueue("ioreq_wq", 580 + WQ_HIGHPRI | WQ_MEM_RECLAIM); 581 581 if (!ioreq_wq) { 582 582 dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n"); 583 583 acrn_remove_intr_handler();
+2 -2
drivers/xen/pvcalls-back.c
··· 363 363 map->data.in = map->bytes; 364 364 map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order); 365 365 366 - map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1); 366 + map->ioworker.wq = alloc_ordered_workqueue("pvcalls_io", 0); 367 367 if (!map->ioworker.wq) 368 368 goto out; 369 369 atomic_set(&map->io, 1); ··· 636 636 637 637 INIT_WORK(&map->register_work, __pvcalls_back_accept); 638 638 spin_lock_init(&map->copy_lock); 639 - map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1); 639 + map->wq = alloc_ordered_workqueue("pvcalls_wq", 0); 640 640 if (!map->wq) { 641 641 ret = -ENOMEM; 642 642 goto out;
+1 -1
net/qrtr/ns.c
··· 783 783 goto err_sock; 784 784 } 785 785 786 - qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1); 786 + qrtr_ns.workqueue = alloc_ordered_workqueue("qrtr_ns_handler", 0); 787 787 if (!qrtr_ns.workqueue) { 788 788 ret = -ENOMEM; 789 789 goto err_sock;
+1 -1
net/rxrpc/af_rxrpc.c
··· 989 989 goto error_call_jar; 990 990 } 991 991 992 - rxrpc_workqueue = alloc_workqueue("krxrpcd", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 992 + rxrpc_workqueue = alloc_ordered_workqueue("krxrpcd", WQ_HIGHPRI | WQ_MEM_RECLAIM); 993 993 if (!rxrpc_workqueue) { 994 994 pr_notice("Failed to allocate work queue\n"); 995 995 goto error_work_queue;