Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue updates from Tejun Heo:
"Nothing too interesting. Just two trivial patches"

* 'for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: Mark up unlocked access to wq->first_flusher
workqueue: Make workqueue_init*() return void

+6 -10
+2 -2
include/linux/workqueue.h
··· 665 665 int workqueue_offline_cpu(unsigned int cpu); 666 666 #endif 667 667 668 - int __init workqueue_init_early(void); 669 - int __init workqueue_init(void); 668 + void __init workqueue_init_early(void); 669 + void __init workqueue_init(void); 670 670 671 671 #endif
+4 -8
kernel/workqueue.c
··· 2834 2834 * First flushers are responsible for cascading flushes and 2835 2835 * handling overflow. Non-first flushers can simply return. 2836 2836 */ 2837 - if (wq->first_flusher != &this_flusher) 2837 + if (READ_ONCE(wq->first_flusher) != &this_flusher) 2838 2838 return; 2839 2839 2840 2840 mutex_lock(&wq->mutex); ··· 2843 2843 if (wq->first_flusher != &this_flusher) 2844 2844 goto out_unlock; 2845 2845 2846 - wq->first_flusher = NULL; 2846 + WRITE_ONCE(wq->first_flusher, NULL); 2847 2847 2848 2848 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 2849 2849 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); ··· 5898 5898 * items. Actual work item execution starts only after kthreads can be 5899 5899 * created and scheduled right before early initcalls. 5900 5900 */ 5901 - int __init workqueue_init_early(void) 5901 + void __init workqueue_init_early(void) 5902 5902 { 5903 5903 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 5904 5904 int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; ··· 5965 5965 !system_unbound_wq || !system_freezable_wq || 5966 5966 !system_power_efficient_wq || 5967 5967 !system_freezable_power_efficient_wq); 5968 - 5969 - return 0; 5970 5968 } 5971 5969 5972 5970 /** ··· 5976 5978 * are no kworkers executing the work items yet. Populate the worker pools 5977 5979 * with the initial workers and enable future kworker creations. 5978 5980 */ 5979 - int __init workqueue_init(void) 5981 + void __init workqueue_init(void) 5980 5982 { 5981 5983 struct workqueue_struct *wq; 5982 5984 struct worker_pool *pool; ··· 6023 6025 6024 6026 wq_online = true; 6025 6027 wq_watchdog_init(); 6026 - 6027 - return 0; 6028 6028 }