Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs: replace use of system_unbound_wq with system_dfl_wq

Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.

This lack of consistentcy cannot be addressed without refactoring the API.

system_unbound_wq should be the default workqueue so as not to enforce
locality constraints for random work whenever it's not required.

Adding system_dfl_wq to encourage its use when unbound work should be used.

The old system_unbound_wq will be kept for a few release cycles.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Link: https://lore.kernel.org/20250916082906.77439-2-marco.crivellari@suse.com
Signed-off-by: Christian Brauner <brauner@kernel.org>

authored by

Marco Crivellari and committed by
Christian Brauner
7a4f92d3 8f5ae30d

+22 -22
+2 -2
fs/afs/callback.c
··· 42 42 list_for_each_entry(vnode, &volume->open_mmaps, cb_mmap_link) { 43 43 if (vnode->cb_v_check != atomic_read(&volume->cb_v_break)) { 44 44 afs_clear_cb_promise(vnode, afs_cb_promise_clear_vol_init_cb); 45 - queue_work(system_unbound_wq, &vnode->cb_work); 45 + queue_work(system_dfl_wq, &vnode->cb_work); 46 46 } 47 47 } 48 48 ··· 90 90 if (reason != afs_cb_break_for_deleted && 91 91 vnode->status.type == AFS_FTYPE_FILE && 92 92 atomic_read(&vnode->cb_nr_mmap)) 93 - queue_work(system_unbound_wq, &vnode->cb_work); 93 + queue_work(system_dfl_wq, &vnode->cb_work); 94 94 95 95 trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true); 96 96 } else {
+1 -1
fs/afs/write.c
··· 172 172 void afs_issue_write(struct netfs_io_subrequest *subreq) 173 173 { 174 174 subreq->work.func = afs_issue_write_worker; 175 - if (!queue_work(system_unbound_wq, &subreq->work)) 175 + if (!queue_work(system_dfl_wq, &subreq->work)) 176 176 WARN_ON_ONCE(1); 177 177 } 178 178
+1 -1
fs/bcachefs/btree_write_buffer.c
··· 827 827 828 828 if (bch2_btree_write_buffer_should_flush(c) && 829 829 __enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_write_buffer) && 830 - !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work)) 830 + !queue_work(system_dfl_wq, &c->btree_write_buffer.flush_work)) 831 831 enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_write_buffer); 832 832 833 833 if (dst->wb == &wb->flushing)
+4 -4
fs/bcachefs/io_read.c
··· 684 684 685 685 if (bch2_err_matches(ret, BCH_ERR_data_read_retry)) { 686 686 bch2_rbio_punt(rbio, bch2_rbio_retry, 687 - RBIO_CONTEXT_UNBOUND, system_unbound_wq); 687 + RBIO_CONTEXT_UNBOUND, system_dfl_wq); 688 688 } else { 689 689 rbio = bch2_rbio_free(rbio); 690 690 ··· 921 921 bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err, BLK_STS_IOERR); 922 922 goto out; 923 923 decompression_err: 924 - bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); 924 + bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_dfl_wq); 925 925 goto out; 926 926 decrypt_err: 927 - bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); 927 + bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_dfl_wq); 928 928 goto out; 929 929 } 930 930 ··· 963 963 rbio->promote || 964 964 crc_is_compressed(rbio->pick.crc) || 965 965 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type)) 966 - context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq; 966 + context = RBIO_CONTEXT_UNBOUND, wq = system_dfl_wq; 967 967 else if (rbio->pick.crc.csum_type) 968 968 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq; 969 969
+1 -1
fs/bcachefs/journal_io.c
··· 1362 1362 BCH_DEV_READ_REF_journal_read)) 1363 1363 closure_call(&ca->journal.read, 1364 1364 bch2_journal_read_device, 1365 - system_unbound_wq, 1365 + system_dfl_wq, 1366 1366 &jlist.cl); 1367 1367 else 1368 1368 degraded = true;
+1 -1
fs/btrfs/block-group.c
··· 2031 2031 btrfs_reclaim_sweep(fs_info); 2032 2032 spin_lock(&fs_info->unused_bgs_lock); 2033 2033 if (!list_empty(&fs_info->reclaim_bgs)) 2034 - queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); 2034 + queue_work(system_dfl_wq, &fs_info->reclaim_bgs_work); 2035 2035 spin_unlock(&fs_info->unused_bgs_lock); 2036 2036 } 2037 2037
+1 -1
fs/btrfs/extent_map.c
··· 1372 1372 if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0) 1373 1373 return; 1374 1374 1375 - queue_work(system_unbound_wq, &fs_info->em_shrinker_work); 1375 + queue_work(system_dfl_wq, &fs_info->em_shrinker_work); 1376 1376 } 1377 1377 1378 1378 void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info)
+2 -2
fs/btrfs/space-info.c
··· 1830 1830 space_info->flags, 1831 1831 orig_bytes, flush, 1832 1832 "enospc"); 1833 - queue_work(system_unbound_wq, async_work); 1833 + queue_work(system_dfl_wq, async_work); 1834 1834 } 1835 1835 } else { 1836 1836 list_add_tail(&ticket.list, ··· 1847 1847 need_preemptive_reclaim(fs_info, space_info)) { 1848 1848 trace_btrfs_trigger_flush(fs_info, space_info->flags, 1849 1849 orig_bytes, flush, "preempt"); 1850 - queue_work(system_unbound_wq, 1850 + queue_work(system_dfl_wq, 1851 1851 &fs_info->preempt_reclaim_work); 1852 1852 } 1853 1853 }
+1 -1
fs/btrfs/zoned.c
··· 2488 2488 refcount_inc(&eb->refs); 2489 2489 bg->last_eb = eb; 2490 2490 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn); 2491 - queue_work(system_unbound_wq, &bg->zone_finish_work); 2491 + queue_work(system_dfl_wq, &bg->zone_finish_work); 2492 2492 } 2493 2493 2494 2494 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
+1 -1
fs/coredump.c
··· 635 635 636 636 /* 637 637 * Usermode helpers are childen of either 638 - * system_unbound_wq or of kthreadd. So we know that 638 + * system_dfl_wq or of kthreadd. So we know that 639 639 * we're starting off with a clean file descriptor 640 640 * table. So we should always be able to use 641 641 * COREDUMP_PIDFD_NUMBER as our file descriptor value.
+1 -1
fs/ext4/mballoc.c
··· 3995 3995 list_splice_tail(&freed_data_list, &sbi->s_discard_list); 3996 3996 spin_unlock(&sbi->s_md_lock); 3997 3997 if (wake) 3998 - queue_work(system_unbound_wq, &sbi->s_discard_work); 3998 + queue_work(system_dfl_wq, &sbi->s_discard_work); 3999 3999 } else { 4000 4000 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 4001 4001 kmem_cache_free(ext4_free_data_cachep, entry);
+1 -1
fs/netfs/misc.c
··· 321 321 { 322 322 if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) && 323 323 !test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) { 324 - queue_work(system_unbound_wq, &rreq->work); 324 + queue_work(system_dfl_wq, &rreq->work); 325 325 } else { 326 326 trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue); 327 327 wake_up(&rreq->waitq);
+1 -1
fs/netfs/objects.c
··· 163 163 dead = __refcount_dec_and_test(&rreq->ref, &r); 164 164 trace_netfs_rreq_ref(debug_id, r - 1, what); 165 165 if (dead) 166 - WARN_ON(!queue_work(system_unbound_wq, &rreq->cleanup_work)); 166 + WARN_ON(!queue_work(system_dfl_wq, &rreq->cleanup_work)); 167 167 } 168 168 } 169 169
+1 -1
fs/nfsd/filecache.c
··· 113 113 nfsd_file_schedule_laundrette(void) 114 114 { 115 115 if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags)) 116 - queue_delayed_work(system_unbound_wq, &nfsd_filecache_laundrette, 116 + queue_delayed_work(system_dfl_wq, &nfsd_filecache_laundrette, 117 117 NFSD_LAUNDRETTE_DELAY); 118 118 } 119 119
+2 -2
fs/notify/mark.c
··· 428 428 conn->destroy_next = connector_destroy_list; 429 429 connector_destroy_list = conn; 430 430 spin_unlock(&destroy_lock); 431 - queue_work(system_unbound_wq, &connector_reaper_work); 431 + queue_work(system_dfl_wq, &connector_reaper_work); 432 432 } 433 433 /* 434 434 * Note that we didn't update flags telling whether inode cares about ··· 439 439 spin_lock(&destroy_lock); 440 440 list_add(&mark->g_list, &destroy_list); 441 441 spin_unlock(&destroy_lock); 442 - queue_delayed_work(system_unbound_wq, &reaper_work, 442 + queue_delayed_work(system_dfl_wq, &reaper_work, 443 443 FSNOTIFY_REAPER_DELAY); 444 444 } 445 445 EXPORT_SYMBOL_GPL(fsnotify_put_mark);
+1 -1
fs/quota/dquot.c
··· 881 881 put_releasing_dquots(dquot); 882 882 atomic_dec(&dquot->dq_count); 883 883 spin_unlock(&dq_list_lock); 884 - queue_delayed_work(system_unbound_wq, &quota_release_work, 1); 884 + queue_delayed_work(system_dfl_wq, &quota_release_work, 1); 885 885 } 886 886 EXPORT_SYMBOL(dqput); 887 887