Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

workqueue: Introduce show_one_worker_pool and show_one_workqueue.

Currently show_workqueue_state shows the state of all workqueues and of
all worker pools. In certain cases we may need to dump state of only a
specific workqueue or worker pool. For example in destroy_workqueue we
only need to show state of the workqueue which is getting destroyed.

So rename show_workqueue_state to show_all_workqueues(to signify it
dumps state of all busy workqueues) and divide it into more granular
functions (show_one_workqueue and show_one_worker_pool), that would show
states of individual workqueues and worker pools and can be used in
cases such as the one mentioned above.

Also, as mentioned earlier, make destroy_workqueue dump data pertaining
to only the workqueue that is being destroyed and make user(s) of
earlier interface(show_workqueue_state), use new interface
(show_all_workqueues).

Signed-off-by: Imran Khan <imran.f.khan@oracle.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

authored by

Imran Khan and committed by
Tejun Heo
55df0933 d25302e4

+102 -81
+1 -1
drivers/tty/sysrq.c
··· 296 296 static void sysrq_handle_showstate(int key) 297 297 { 298 298 show_state(); 299 - show_workqueue_state(); 299 + show_all_workqueues(); 300 300 } 301 301 static const struct sysrq_key_op sysrq_showstate_op = { 302 302 .handler = sysrq_handle_showstate,
+2 -1
include/linux/workqueue.h
··· 469 469 extern unsigned int work_busy(struct work_struct *work); 470 470 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); 471 471 extern void print_worker_info(const char *log_lvl, struct task_struct *task); 472 - extern void show_workqueue_state(void); 472 + extern void show_all_workqueues(void); 473 + extern void show_one_workqueue(struct workqueue_struct *wq); 473 474 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); 474 475 475 476 /**
+1 -1
kernel/power/process.c
··· 94 94 todo - wq_busy, wq_busy); 95 95 96 96 if (wq_busy) 97 - show_workqueue_state(); 97 + show_all_workqueues(); 98 98 99 99 if (!wakeup || pm_debug_messages_on) { 100 100 read_lock(&tasklist_lock);
+98 -78
kernel/workqueue.c
··· 375 375 static int worker_thread(void *__worker); 376 376 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 377 377 static void show_pwq(struct pool_workqueue *pwq); 378 + static void show_one_worker_pool(struct worker_pool *pool); 378 379 379 380 #define CREATE_TRACE_POINTS 380 381 #include <trace/events/workqueue.h> ··· 4448 4447 raw_spin_unlock_irq(&pwq->pool->lock); 4449 4448 mutex_unlock(&wq->mutex); 4450 4449 mutex_unlock(&wq_pool_mutex); 4451 - show_workqueue_state(); 4450 + show_one_workqueue(wq); 4452 4451 return; 4453 4452 } 4454 4453 raw_spin_unlock_irq(&pwq->pool->lock); ··· 4798 4797 } 4799 4798 4800 4799 /** 4801 - * show_workqueue_state - dump workqueue state 4800 + * show_one_workqueue - dump state of specified workqueue 4801 + * @wq: workqueue whose state will be printed 4802 + */ 4803 + void show_one_workqueue(struct workqueue_struct *wq) 4804 + { 4805 + struct pool_workqueue *pwq; 4806 + bool idle = true; 4807 + unsigned long flags; 4808 + 4809 + for_each_pwq(pwq, wq) { 4810 + if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 4811 + idle = false; 4812 + break; 4813 + } 4814 + } 4815 + if (idle) /* Nothing to print for idle workqueue */ 4816 + return; 4817 + 4818 + pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 4819 + 4820 + for_each_pwq(pwq, wq) { 4821 + raw_spin_lock_irqsave(&pwq->pool->lock, flags); 4822 + if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 4823 + /* 4824 + * Defer printing to avoid deadlocks in console 4825 + * drivers that queue work while holding locks 4826 + * also taken in their write paths. 4827 + */ 4828 + printk_deferred_enter(); 4829 + show_pwq(pwq); 4830 + printk_deferred_exit(); 4831 + } 4832 + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 4833 + /* 4834 + * We could be printing a lot from atomic context, e.g. 4835 + * sysrq-t -> show_all_workqueues(). Avoid triggering 4836 + * hard lockup. 4837 + */ 4838 + touch_nmi_watchdog(); 4839 + } 4840 + 4841 + } 4842 + 4843 + /** 4844 + * show_one_worker_pool - dump state of specified worker pool 4845 + * @pool: worker pool whose state will be printed 4846 + */ 4847 + static void show_one_worker_pool(struct worker_pool *pool) 4848 + { 4849 + struct worker *worker; 4850 + bool first = true; 4851 + unsigned long flags; 4852 + 4853 + raw_spin_lock_irqsave(&pool->lock, flags); 4854 + if (pool->nr_workers == pool->nr_idle) 4855 + goto next_pool; 4856 + /* 4857 + * Defer printing to avoid deadlocks in console drivers that 4858 + * queue work while holding locks also taken in their write 4859 + * paths. 4860 + */ 4861 + printk_deferred_enter(); 4862 + pr_info("pool %d:", pool->id); 4863 + pr_cont_pool_info(pool); 4864 + pr_cont(" hung=%us workers=%d", 4865 + jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000, 4866 + pool->nr_workers); 4867 + if (pool->manager) 4868 + pr_cont(" manager: %d", 4869 + task_pid_nr(pool->manager->task)); 4870 + list_for_each_entry(worker, &pool->idle_list, entry) { 4871 + pr_cont(" %s%d", first ? "idle: " : "", 4872 + task_pid_nr(worker->task)); 4873 + first = false; 4874 + } 4875 + pr_cont("\n"); 4876 + printk_deferred_exit(); 4877 + next_pool: 4878 + raw_spin_unlock_irqrestore(&pool->lock, flags); 4879 + /* 4880 + * We could be printing a lot from atomic context, e.g. 4881 + * sysrq-t -> show_all_workqueues(). Avoid triggering 4882 + * hard lockup. 4883 + */ 4884 + touch_nmi_watchdog(); 4885 + 4886 + } 4887 + 4888 + /** 4889 + * show_all_workqueues - dump workqueue state 4802 4890 * 4803 4891 * Called from a sysrq handler or try_to_freeze_tasks() and prints out 4804 4892 * all busy workqueues and pools. 4805 4893 */ 4806 - void show_workqueue_state(void) 4894 + void show_all_workqueues(void) 4807 4895 { 4808 4896 struct workqueue_struct *wq; 4809 4897 struct worker_pool *pool; 4810 - unsigned long flags; 4811 4898 int pi; 4812 4899 4813 4900 rcu_read_lock(); 4814 4901 4815 4902 pr_info("Showing busy workqueues and worker pools:\n"); 4816 4903 4817 - list_for_each_entry_rcu(wq, &workqueues, list) { 4818 - struct pool_workqueue *pwq; 4819 - bool idle = true; 4904 + list_for_each_entry_rcu(wq, &workqueues, list) 4905 + show_one_workqueue(wq); 4820 4906 4821 - for_each_pwq(pwq, wq) { 4822 - if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 4823 - idle = false; 4824 - break; 4825 - } 4826 - } 4827 - if (idle) 4828 - continue; 4829 - 4830 - pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 4831 - 4832 - for_each_pwq(pwq, wq) { 4833 - raw_spin_lock_irqsave(&pwq->pool->lock, flags); 4834 - if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 4835 - /* 4836 - * Defer printing to avoid deadlocks in console 4837 - * drivers that queue work while holding locks 4838 - * also taken in their write paths. 4839 - */ 4840 - printk_deferred_enter(); 4841 - show_pwq(pwq); 4842 - printk_deferred_exit(); 4843 - } 4844 - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 4845 - /* 4846 - * We could be printing a lot from atomic context, e.g. 4847 - * sysrq-t -> show_workqueue_state(). Avoid triggering 4848 - * hard lockup. 4849 - */ 4850 - touch_nmi_watchdog(); 4851 - } 4852 - } 4853 - 4854 - for_each_pool(pool, pi) { 4855 - struct worker *worker; 4856 - bool first = true; 4857 - 4858 - raw_spin_lock_irqsave(&pool->lock, flags); 4859 - if (pool->nr_workers == pool->nr_idle) 4860 - goto next_pool; 4861 - /* 4862 - * Defer printing to avoid deadlocks in console drivers that 4863 - * queue work while holding locks also taken in their write 4864 - * paths. 4865 - */ 4866 - printk_deferred_enter(); 4867 - pr_info("pool %d:", pool->id); 4868 - pr_cont_pool_info(pool); 4869 - pr_cont(" hung=%us workers=%d", 4870 - jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000, 4871 - pool->nr_workers); 4872 - if (pool->manager) 4873 - pr_cont(" manager: %d", 4874 - task_pid_nr(pool->manager->task)); 4875 - list_for_each_entry(worker, &pool->idle_list, entry) { 4876 - pr_cont(" %s%d", first ? "idle: " : "", 4877 - task_pid_nr(worker->task)); 4878 - first = false; 4879 - } 4880 - pr_cont("\n"); 4881 - printk_deferred_exit(); 4882 - next_pool: 4883 - raw_spin_unlock_irqrestore(&pool->lock, flags); 4884 - /* 4885 - * We could be printing a lot from atomic context, e.g. 4886 - * sysrq-t -> show_workqueue_state(). Avoid triggering 4887 - * hard lockup. 4888 - */ 4889 - touch_nmi_watchdog(); 4890 - } 4907 + for_each_pool(pool, pi) 4908 + show_one_worker_pool(pool); 4891 4909 4892 4910 rcu_read_unlock(); 4893 4911 } ··· 5896 5876 rcu_read_unlock(); 5897 5877 5898 5878 if (lockup_detected) 5899 - show_workqueue_state(); 5879 + show_all_workqueues(); 5900 5880 5901 5881 wq_watchdog_reset_touched(); 5902 5882 mod_timer(&wq_watchdog_timer, jiffies + thresh);