+57
-9
include/linux/workqueue.h
+57
-9
include/linux/workqueue.h
···
406
406
* alloc_ordered_workqueue - allocate an ordered workqueue
407
407
* @fmt: printf format for the name of the workqueue
408
408
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
409
-
* @args...: args for @fmt
409
+
* @args: args for @fmt
410
410
*
411
411
* Allocate an ordered workqueue. An ordered workqueue executes at
412
412
* most one work item at any given time in the queued order. They are
···
445
445
struct delayed_work *dwork, unsigned long delay);
446
446
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
447
447
448
-
extern void flush_workqueue(struct workqueue_struct *wq);
448
+
extern void __flush_workqueue(struct workqueue_struct *wq);
449
449
extern void drain_workqueue(struct workqueue_struct *wq);
450
450
451
451
extern int schedule_on_each_cpu(work_func_t func);
···
563
563
return queue_work(system_wq, work);
564
564
}
565
565
566
+
/*
567
+
* Detect attempt to flush system-wide workqueues at compile time when possible.
568
+
*
569
+
* See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
570
+
* for reasons and steps for converting system-wide workqueues into local workqueues.
571
+
*/
572
+
extern void __warn_flushing_systemwide_wq(void)
573
+
__compiletime_warning("Please avoid flushing system-wide workqueues.");
574
+
566
575
/**
567
576
* flush_scheduled_work - ensure that any scheduled work has run to completion.
568
577
*
569
578
* Forces execution of the kernel-global workqueue and blocks until its
570
579
* completion.
571
580
*
572
-
* Think twice before calling this function! It's very easy to get into
573
-
* trouble if you don't take great care. Either of the following situations
574
-
* will lead to deadlock:
581
+
* It's very easy to get into trouble if you don't take great care.
582
+
* Either of the following situations will lead to deadlock:
575
583
*
576
584
* One of the work items currently on the workqueue needs to acquire
577
585
* a lock held by your code or its caller.
···
594
586
* need to know that a particular work item isn't queued and isn't running.
595
587
* In such cases you should use cancel_delayed_work_sync() or
596
588
* cancel_work_sync() instead.
589
+
*
590
+
* Please stop calling this function! A conversion to stop flushing system-wide
591
+
* workqueues is in progress. This function will be removed after all in-tree
592
+
* users stopped calling this function.
597
593
*/
598
-
static inline void flush_scheduled_work(void)
599
-
{
600
-
flush_workqueue(system_wq);
601
-
}
594
+
/*
595
+
* The background of commit 771c035372a036f8 ("deprecate the
596
+
* '__deprecated' attribute warnings entirely and for good") is that,
597
+
* since Linus builds all modules between every single pull he does,
598
+
* the standard kernel build needs to be _clean_ in order to be able to
599
+
* notice when new problems happen. Therefore, don't emit warning while
600
+
* there are in-tree users.
601
+
*/
602
+
#define flush_scheduled_work() \
603
+
({ \
604
+
if (0) \
605
+
__warn_flushing_systemwide_wq(); \
606
+
__flush_workqueue(system_wq); \
607
+
})
608
+
609
+
/*
610
+
* Although there is no longer in-tree caller, for now just emit warning
611
+
* in order to give out-of-tree callers time to update.
612
+
*/
613
+
#define flush_workqueue(wq) \
614
+
({ \
615
+
struct workqueue_struct *_wq = (wq); \
616
+
\
617
+
if ((__builtin_constant_p(_wq == system_wq) && \
618
+
_wq == system_wq) || \
619
+
(__builtin_constant_p(_wq == system_highpri_wq) && \
620
+
_wq == system_highpri_wq) || \
621
+
(__builtin_constant_p(_wq == system_long_wq) && \
622
+
_wq == system_long_wq) || \
623
+
(__builtin_constant_p(_wq == system_unbound_wq) && \
624
+
_wq == system_unbound_wq) || \
625
+
(__builtin_constant_p(_wq == system_freezable_wq) && \
626
+
_wq == system_freezable_wq) || \
627
+
(__builtin_constant_p(_wq == system_power_efficient_wq) && \
628
+
_wq == system_power_efficient_wq) || \
629
+
(__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
630
+
_wq == system_freezable_power_efficient_wq)) \
631
+
__warn_flushing_systemwide_wq(); \
632
+
__flush_workqueue(_wq); \
633
+
})
602
634
603
635
/**
604
636
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
+4
-4
include/trace/events/workqueue.h
+4
-4
include/trace/events/workqueue.h
···
22
22
*/
23
23
TRACE_EVENT(workqueue_queue_work,
24
24
25
-
TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
25
+
TP_PROTO(int req_cpu, struct pool_workqueue *pwq,
26
26
struct work_struct *work),
27
27
28
28
TP_ARGS(req_cpu, pwq, work),
···
31
31
__field( void *, work )
32
32
__field( void *, function)
33
33
__string( workqueue, pwq->wq->name)
34
-
__field( unsigned int, req_cpu )
35
-
__field( unsigned int, cpu )
34
+
__field( int, req_cpu )
35
+
__field( int, cpu )
36
36
),
37
37
38
38
TP_fast_assign(
···
43
43
__entry->cpu = pwq->pool->cpu;
44
44
),
45
45
46
-
TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u",
46
+
TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d",
47
47
__entry->work, __entry->function, __get_str(workqueue),
48
48
__entry->req_cpu, __entry->cpu)
49
49
);
+12
-4
kernel/workqueue.c
+12
-4
kernel/workqueue.c
···
2788
2788
}
2789
2789
2790
2790
/**
2791
-
* flush_workqueue - ensure that any scheduled work has run to completion.
2791
+
* __flush_workqueue - ensure that any scheduled work has run to completion.
2792
2792
* @wq: workqueue to flush
2793
2793
*
2794
2794
* This function sleeps until all work items which were queued on entry
2795
2795
* have finished execution, but it is not livelocked by new incoming ones.
2796
2796
*/
2797
-
void flush_workqueue(struct workqueue_struct *wq)
2797
+
void __flush_workqueue(struct workqueue_struct *wq)
2798
2798
{
2799
2799
struct wq_flusher this_flusher = {
2800
2800
.list = LIST_HEAD_INIT(this_flusher.list),
···
2943
2943
out_unlock:
2944
2944
mutex_unlock(&wq->mutex);
2945
2945
}
2946
-
EXPORT_SYMBOL(flush_workqueue);
2946
+
EXPORT_SYMBOL(__flush_workqueue);
2947
2947
2948
2948
/**
2949
2949
* drain_workqueue - drain a workqueue
···
2971
2971
wq->flags |= __WQ_DRAINING;
2972
2972
mutex_unlock(&wq->mutex);
2973
2973
reflush:
2974
-
flush_workqueue(wq);
2974
+
__flush_workqueue(wq);
2975
2975
2976
2976
mutex_lock(&wq->mutex);
2977
2977
···
6111
6111
wq_online = true;
6112
6112
wq_watchdog_init();
6113
6113
}
6114
+
6115
+
/*
6116
+
* Despite the naming, this is a no-op function which is here only for avoiding
6117
+
* link error. Since compile-time warning may fail to catch, we will need to
6118
+
* emit run-time warning from __flush_workqueue().
6119
+
*/
6120
+
void __warn_flushing_systemwide_wq(void) { }
6121
+
EXPORT_SYMBOL(__warn_flushing_systemwide_wq);