Merge branch 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

* 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: make sure MAYDAY_INITIAL_TIMEOUT is at least 2 jiffies long
workqueue, freezer: unify spelling of 'freeze' + 'able' to 'freezable'
workqueue: wake up a worker when a rescuer is leaving a gcwq

+47 -36
+2 -2
Documentation/workqueue.txt
··· 190 * Long running CPU intensive workloads which can be better 191 managed by the system scheduler. 192 193 - WQ_FREEZEABLE 194 195 - A freezeable wq participates in the freeze phase of the system 196 suspend operations. Work items on the wq are drained and no 197 new work item starts execution until thawed. 198
··· 190 * Long running CPU intensive workloads which can be better 191 managed by the system scheduler. 192 193 + WQ_FREEZABLE 194 195 + A freezable wq participates in the freeze phase of the system 196 suspend operations. Work items on the wq are drained and no 197 new work item starts execution until thawed. 198
+1 -1
drivers/memstick/core/memstick.c
··· 621 { 622 int rc; 623 624 - workqueue = create_freezeable_workqueue("kmemstick"); 625 if (!workqueue) 626 return -ENOMEM; 627
··· 621 { 622 int rc; 623 624 + workqueue = create_freezable_workqueue("kmemstick"); 625 if (!workqueue) 626 return -ENOMEM; 627
+1 -1
drivers/misc/tifm_core.c
··· 329 { 330 int rc; 331 332 - workqueue = create_freezeable_workqueue("tifm"); 333 if (!workqueue) 334 return -ENOMEM; 335
··· 329 { 330 int rc; 331 332 + workqueue = create_freezable_workqueue("tifm"); 333 if (!workqueue) 334 return -ENOMEM; 335
+1 -1
drivers/misc/vmw_balloon.c
··· 785 if (x86_hyper != &x86_hyper_vmware) 786 return -ENODEV; 787 788 - vmballoon_wq = create_freezeable_workqueue("vmmemctl"); 789 if (!vmballoon_wq) { 790 pr_err("failed to create workqueue\n"); 791 return -ENOMEM;
··· 785 if (x86_hyper != &x86_hyper_vmware) 786 return -ENODEV; 787 788 + vmballoon_wq = create_freezable_workqueue("vmmemctl"); 789 if (!vmballoon_wq) { 790 pr_err("failed to create workqueue\n"); 791 return -ENOMEM;
+1 -1
drivers/mtd/nand/r852.c
··· 930 931 init_completion(&dev->dma_done); 932 933 - dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); 934 935 if (!dev->card_workqueue) 936 goto error9;
··· 930 931 init_completion(&dev->dma_done); 932 933 + dev->card_workqueue = create_freezable_workqueue(DRV_NAME); 934 935 if (!dev->card_workqueue) 936 goto error9;
+1 -1
drivers/mtd/sm_ftl.c
··· 1258 static __init int sm_module_init(void) 1259 { 1260 int error = 0; 1261 - cache_flush_workqueue = create_freezeable_workqueue("smflush"); 1262 1263 if (IS_ERR(cache_flush_workqueue)) 1264 return PTR_ERR(cache_flush_workqueue);
··· 1258 static __init int sm_module_init(void) 1259 { 1260 int error = 0; 1261 + cache_flush_workqueue = create_freezable_workqueue("smflush"); 1262 1263 if (IS_ERR(cache_flush_workqueue)) 1264 return PTR_ERR(cache_flush_workqueue);
+1 -1
drivers/net/can/mcp251x.c
··· 940 goto open_unlock; 941 } 942 943 - priv->wq = create_freezeable_workqueue("mcp251x_wq"); 944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 946
··· 940 goto open_unlock; 941 } 942 943 + priv->wq = create_freezable_workqueue("mcp251x_wq"); 944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 946
+1 -1
drivers/tty/serial/max3100.c
··· 601 s->rts = 0; 602 603 sprintf(b, "max3100-%d", s->minor); 604 - s->workqueue = create_freezeable_workqueue(b); 605 if (!s->workqueue) { 606 dev_warn(&s->spi->dev, "cannot create workqueue\n"); 607 return -EBUSY;
··· 601 s->rts = 0; 602 603 sprintf(b, "max3100-%d", s->minor); 604 + s->workqueue = create_freezable_workqueue(b); 605 if (!s->workqueue) { 606 dev_warn(&s->spi->dev, "cannot create workqueue\n"); 607 return -EBUSY;
+1 -1
drivers/tty/serial/max3107.c
··· 833 struct max3107_port *s = container_of(port, struct max3107_port, port); 834 835 /* Initialize work queue */ 836 - s->workqueue = create_freezeable_workqueue("max3107"); 837 if (!s->workqueue) { 838 dev_err(&s->spi->dev, "Workqueue creation failed\n"); 839 return -EBUSY;
··· 833 struct max3107_port *s = container_of(port, struct max3107_port, port); 834 835 /* Initialize work queue */ 836 + s->workqueue = create_freezable_workqueue("max3107"); 837 if (!s->workqueue) { 838 dev_err(&s->spi->dev, "Workqueue creation failed\n"); 839 return -EBUSY;
+2 -2
fs/gfs2/glock.c
··· 1779 #endif 1780 1781 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1782 - WQ_HIGHPRI | WQ_FREEZEABLE, 0); 1783 if (IS_ERR(glock_workqueue)) 1784 return PTR_ERR(glock_workqueue); 1785 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1786 - WQ_MEM_RECLAIM | WQ_FREEZEABLE, 1787 0); 1788 if (IS_ERR(gfs2_delete_workqueue)) { 1789 destroy_workqueue(glock_workqueue);
··· 1779 #endif 1780 1781 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1782 + WQ_HIGHPRI | WQ_FREEZABLE, 0); 1783 if (IS_ERR(glock_workqueue)) 1784 return PTR_ERR(glock_workqueue); 1785 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1786 + WQ_MEM_RECLAIM | WQ_FREEZABLE, 1787 0); 1788 if (IS_ERR(gfs2_delete_workqueue)) { 1789 destroy_workqueue(glock_workqueue);
+1 -1
fs/gfs2/main.c
··· 144 145 error = -ENOMEM; 146 gfs_recovery_wq = alloc_workqueue("gfs_recovery", 147 - WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); 148 if (!gfs_recovery_wq) 149 goto fail_wq; 150
··· 144 145 error = -ENOMEM; 146 gfs_recovery_wq = alloc_workqueue("gfs_recovery", 147 + WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); 148 if (!gfs_recovery_wq) 149 goto fail_wq; 150
+1 -1
include/linux/freezer.h
··· 109 } 110 111 /* 112 - * Check if the task should be counted as freezeable by the freezer 113 */ 114 static inline int freezer_should_skip(struct task_struct *p) 115 {
··· 109 } 110 111 /* 112 + * Check if the task should be counted as freezable by the freezer 113 */ 114 static inline int freezer_should_skip(struct task_struct *p) 115 {
+1 -1
include/linux/sched.h
··· 1744 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1745 #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1746 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1747 - #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1748 #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ 1749 1750 /*
··· 1744 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1745 #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1746 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1747 + #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1748 #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ 1749 1750 /*
+4 -4
include/linux/workqueue.h
··· 250 enum { 251 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 252 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 253 - WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ 254 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 255 WQ_HIGHPRI = 1 << 4, /* high priority */ 256 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ ··· 318 /** 319 * alloc_ordered_workqueue - allocate an ordered workqueue 320 * @name: name of the workqueue 321 - * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) 322 * 323 * Allocate an ordered workqueue. An ordered workqueue executes at 324 * most one work item at any given time in the queued order. They are ··· 335 336 #define create_workqueue(name) \ 337 alloc_workqueue((name), WQ_MEM_RECLAIM, 1) 338 - #define create_freezeable_workqueue(name) \ 339 - alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 340 #define create_singlethread_workqueue(name) \ 341 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 342
··· 250 enum { 251 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 252 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 253 + WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ 254 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 255 WQ_HIGHPRI = 1 << 4, /* high priority */ 256 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ ··· 318 /** 319 * alloc_ordered_workqueue - allocate an ordered workqueue 320 * @name: name of the workqueue 321 + * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 322 * 323 * Allocate an ordered workqueue. An ordered workqueue executes at 324 * most one work item at any given time in the queued order. They are ··· 335 336 #define create_workqueue(name) \ 337 alloc_workqueue((name), WQ_MEM_RECLAIM, 1) 338 + #define create_freezable_workqueue(name) \ 339 + alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 340 #define create_singlethread_workqueue(name) \ 341 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 342
+1 -1
kernel/power/main.c
··· 326 327 static int __init pm_start_workqueue(void) 328 { 329 - pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); 330 331 return pm_wq ? 0 : -ENOMEM; 332 }
··· 326 327 static int __init pm_start_workqueue(void) 328 { 329 + pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); 330 331 return pm_wq ? 0 : -ENOMEM; 332 }
+3 -3
kernel/power/process.c
··· 22 */ 23 #define TIMEOUT (20 * HZ) 24 25 - static inline int freezeable(struct task_struct * p) 26 { 27 if ((p == current) || 28 (p->flags & PF_NOFREEZE) || ··· 53 todo = 0; 54 read_lock(&tasklist_lock); 55 do_each_thread(g, p) { 56 - if (frozen(p) || !freezeable(p)) 57 continue; 58 59 if (!freeze_task(p, sig_only)) ··· 167 168 read_lock(&tasklist_lock); 169 do_each_thread(g, p) { 170 - if (!freezeable(p)) 171 continue; 172 173 if (nosig_only && should_send_signal(p))
··· 22 */ 23 #define TIMEOUT (20 * HZ) 24 25 + static inline int freezable(struct task_struct * p) 26 { 27 if ((p == current) || 28 (p->flags & PF_NOFREEZE) || ··· 53 todo = 0; 54 read_lock(&tasklist_lock); 55 do_each_thread(g, p) { 56 + if (frozen(p) || !freezable(p)) 57 continue; 58 59 if (!freeze_task(p, sig_only)) ··· 167 168 read_lock(&tasklist_lock); 169 do_each_thread(g, p) { 170 + if (!freezable(p)) 171 continue; 172 173 if (nosig_only && should_send_signal(p))
+24 -13
kernel/workqueue.c
··· 79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 81 82 - MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ 83 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 84 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 85 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ ··· 2049 move_linked_works(work, scheduled, &n); 2050 2051 process_scheduled_works(rescuer); 2052 spin_unlock_irq(&gcwq->lock); 2053 } 2054 ··· 2967 */ 2968 spin_lock(&workqueue_lock); 2969 2970 - if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) 2971 for_each_cwq_cpu(cpu, wq) 2972 get_cwq(cpu, wq)->max_active = 0; 2973 ··· 3079 3080 spin_lock_irq(&gcwq->lock); 3081 3082 - if (!(wq->flags & WQ_FREEZEABLE) || 3083 !(gcwq->flags & GCWQ_FREEZING)) 3084 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3085 ··· 3329 * want to get it over with ASAP - spam rescuers, wake up as 3330 * many idlers as necessary and create new ones till the 3331 * worklist is empty. Note that if the gcwq is frozen, there 3332 - * may be frozen works in freezeable cwqs. Don't declare 3333 * completion while frozen. 3334 */ 3335 while (gcwq->nr_workers != gcwq->nr_idle || ··· 3587 /** 3588 * freeze_workqueues_begin - begin freezing workqueues 3589 * 3590 - * Start freezing workqueues. After this function returns, all 3591 - * freezeable workqueues will queue new works to their frozen_works 3592 - * list instead of gcwq->worklist. 3593 * 3594 * CONTEXT: 3595 * Grabs and releases workqueue_lock and gcwq->lock's. ··· 3615 list_for_each_entry(wq, &workqueues, list) { 3616 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3617 3618 - if (cwq && wq->flags & WQ_FREEZEABLE) 3619 cwq->max_active = 0; 3620 } 3621 ··· 3626 } 3627 3628 /** 3629 - * freeze_workqueues_busy - are freezeable workqueues still busy? 3630 * 3631 * Check whether freezing is complete. This function must be called 3632 * between freeze_workqueues_begin() and thaw_workqueues(). ··· 3635 * Grabs and releases workqueue_lock. 3636 * 3637 * RETURNS: 3638 - * %true if some freezeable workqueues are still busy. %false if 3639 - * freezing is complete. 3640 */ 3641 bool freeze_workqueues_busy(void) 3642 { ··· 3656 list_for_each_entry(wq, &workqueues, list) { 3657 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3658 3659 - if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3660 continue; 3661 3662 BUG_ON(cwq->nr_active < 0); ··· 3701 list_for_each_entry(wq, &workqueues, list) { 3702 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3703 3704 - if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3705 continue; 3706 3707 /* restore max_active and repopulate worklist */
··· 79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 81 82 + MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 83 + /* call for help after 10ms 84 + (min two ticks) */ 85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 86 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ ··· 2047 move_linked_works(work, scheduled, &n); 2048 2049 process_scheduled_works(rescuer); 2050 + 2051 + /* 2052 + * Leave this gcwq. If keep_working() is %true, notify a 2053 + * regular worker; otherwise, we end up with 0 concurrency 2054 + * and stalling the execution. 2055 + */ 2056 + if (keep_working(gcwq)) 2057 + wake_up_worker(gcwq); 2058 + 2059 spin_unlock_irq(&gcwq->lock); 2060 } 2061 ··· 2956 */ 2957 spin_lock(&workqueue_lock); 2958 2959 + if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 2960 for_each_cwq_cpu(cpu, wq) 2961 get_cwq(cpu, wq)->max_active = 0; 2962 ··· 3068 3069 spin_lock_irq(&gcwq->lock); 3070 3071 + if (!(wq->flags & WQ_FREEZABLE) || 3072 !(gcwq->flags & GCWQ_FREEZING)) 3073 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3074 ··· 3318 * want to get it over with ASAP - spam rescuers, wake up as 3319 * many idlers as necessary and create new ones till the 3320 * worklist is empty. Note that if the gcwq is frozen, there 3321 + * may be frozen works in freezable cwqs. Don't declare 3322 * completion while frozen. 3323 */ 3324 while (gcwq->nr_workers != gcwq->nr_idle || ··· 3576 /** 3577 * freeze_workqueues_begin - begin freezing workqueues 3578 * 3579 + * Start freezing workqueues. After this function returns, all freezable 3580 + * workqueues will queue new works to their frozen_works list instead of 3581 + * gcwq->worklist. 3582 * 3583 * CONTEXT: 3584 * Grabs and releases workqueue_lock and gcwq->lock's. ··· 3604 list_for_each_entry(wq, &workqueues, list) { 3605 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3606 3607 + if (cwq && wq->flags & WQ_FREEZABLE) 3608 cwq->max_active = 0; 3609 } 3610 ··· 3615 } 3616 3617 /** 3618 + * freeze_workqueues_busy - are freezable workqueues still busy? 3619 * 3620 * Check whether freezing is complete. This function must be called 3621 * between freeze_workqueues_begin() and thaw_workqueues(). ··· 3624 * Grabs and releases workqueue_lock. 3625 * 3626 * RETURNS: 3627 + * %true if some freezable workqueues are still busy. %false if freezing 3628 + * is complete. 3629 */ 3630 bool freeze_workqueues_busy(void) 3631 { ··· 3645 list_for_each_entry(wq, &workqueues, list) { 3646 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3647 3648 + if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3649 continue; 3650 3651 BUG_ON(cwq->nr_active < 0); ··· 3690 list_for_each_entry(wq, &workqueues, list) { 3691 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3692 3693 + if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3694 continue; 3695 3696 /* restore max_active and repopulate worklist */