Merge branch 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

* 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: make sure MAYDAY_INITIAL_TIMEOUT is at least 2 jiffies long
workqueue, freezer: unify spelling of 'freeze' + 'able' to 'freezable'
workqueue: wake up a worker when a rescuer is leaving a gcwq

+47 -36
+2 -2
Documentation/workqueue.txt
··· 190 190 * Long running CPU intensive workloads which can be better 191 191 managed by the system scheduler. 192 192 193 - WQ_FREEZEABLE 193 + WQ_FREEZABLE 194 194 195 - A freezeable wq participates in the freeze phase of the system 195 + A freezable wq participates in the freeze phase of the system 196 196 suspend operations. Work items on the wq are drained and no 197 197 new work item starts execution until thawed. 198 198
+1 -1
drivers/memstick/core/memstick.c
··· 621 621 { 622 622 int rc; 623 623 624 - workqueue = create_freezeable_workqueue("kmemstick"); 624 + workqueue = create_freezable_workqueue("kmemstick"); 625 625 if (!workqueue) 626 626 return -ENOMEM; 627 627
+1 -1
drivers/misc/tifm_core.c
··· 329 329 { 330 330 int rc; 331 331 332 - workqueue = create_freezeable_workqueue("tifm"); 332 + workqueue = create_freezable_workqueue("tifm"); 333 333 if (!workqueue) 334 334 return -ENOMEM; 335 335
+1 -1
drivers/misc/vmw_balloon.c
··· 785 785 if (x86_hyper != &x86_hyper_vmware) 786 786 return -ENODEV; 787 787 788 - vmballoon_wq = create_freezeable_workqueue("vmmemctl"); 788 + vmballoon_wq = create_freezable_workqueue("vmmemctl"); 789 789 if (!vmballoon_wq) { 790 790 pr_err("failed to create workqueue\n"); 791 791 return -ENOMEM;
+1 -1
drivers/mtd/nand/r852.c
··· 930 930 931 931 init_completion(&dev->dma_done); 932 932 933 - dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); 933 + dev->card_workqueue = create_freezable_workqueue(DRV_NAME); 934 934 935 935 if (!dev->card_workqueue) 936 936 goto error9;
+1 -1
drivers/mtd/sm_ftl.c
··· 1258 1258 static __init int sm_module_init(void) 1259 1259 { 1260 1260 int error = 0; 1261 - cache_flush_workqueue = create_freezeable_workqueue("smflush"); 1261 + cache_flush_workqueue = create_freezable_workqueue("smflush"); 1262 1262 1263 1263 if (IS_ERR(cache_flush_workqueue)) 1264 1264 return PTR_ERR(cache_flush_workqueue);
+1 -1
drivers/net/can/mcp251x.c
··· 940 940 goto open_unlock; 941 941 } 942 942 943 - priv->wq = create_freezeable_workqueue("mcp251x_wq"); 943 + priv->wq = create_freezable_workqueue("mcp251x_wq"); 944 944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 945 945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 946 946
+1 -1
drivers/tty/serial/max3100.c
··· 601 601 s->rts = 0; 602 602 603 603 sprintf(b, "max3100-%d", s->minor); 604 - s->workqueue = create_freezeable_workqueue(b); 604 + s->workqueue = create_freezable_workqueue(b); 605 605 if (!s->workqueue) { 606 606 dev_warn(&s->spi->dev, "cannot create workqueue\n"); 607 607 return -EBUSY;
+1 -1
drivers/tty/serial/max3107.c
··· 833 833 struct max3107_port *s = container_of(port, struct max3107_port, port); 834 834 835 835 /* Initialize work queue */ 836 - s->workqueue = create_freezeable_workqueue("max3107"); 836 + s->workqueue = create_freezable_workqueue("max3107"); 837 837 if (!s->workqueue) { 838 838 dev_err(&s->spi->dev, "Workqueue creation failed\n"); 839 839 return -EBUSY;
+2 -2
fs/gfs2/glock.c
··· 1779 1779 #endif 1780 1780 1781 1781 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1782 - WQ_HIGHPRI | WQ_FREEZEABLE, 0); 1782 + WQ_HIGHPRI | WQ_FREEZABLE, 0); 1783 1783 if (IS_ERR(glock_workqueue)) 1784 1784 return PTR_ERR(glock_workqueue); 1785 1785 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1786 - WQ_MEM_RECLAIM | WQ_FREEZEABLE, 1786 + WQ_MEM_RECLAIM | WQ_FREEZABLE, 1787 1787 0); 1788 1788 if (IS_ERR(gfs2_delete_workqueue)) { 1789 1789 destroy_workqueue(glock_workqueue);
+1 -1
fs/gfs2/main.c
··· 144 144 145 145 error = -ENOMEM; 146 146 gfs_recovery_wq = alloc_workqueue("gfs_recovery", 147 - WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); 147 + WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); 148 148 if (!gfs_recovery_wq) 149 149 goto fail_wq; 150 150
+1 -1
include/linux/freezer.h
··· 109 109 } 110 110 111 111 /* 112 - * Check if the task should be counted as freezeable by the freezer 112 + * Check if the task should be counted as freezable by the freezer 113 113 */ 114 114 static inline int freezer_should_skip(struct task_struct *p) 115 115 {
+1 -1
include/linux/sched.h
··· 1744 1744 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1745 1745 #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1746 1746 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1747 - #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1747 + #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1748 1748 #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ 1749 1749 1750 1750 /*
+4 -4
include/linux/workqueue.h
··· 250 250 enum { 251 251 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 252 252 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 253 - WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ 253 + WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ 254 254 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 255 255 WQ_HIGHPRI = 1 << 4, /* high priority */ 256 256 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ ··· 318 318 /** 319 319 * alloc_ordered_workqueue - allocate an ordered workqueue 320 320 * @name: name of the workqueue 321 - * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) 321 + * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 322 322 * 323 323 * Allocate an ordered workqueue. An ordered workqueue executes at 324 324 * most one work item at any given time in the queued order. They are ··· 335 335 336 336 #define create_workqueue(name) \ 337 337 alloc_workqueue((name), WQ_MEM_RECLAIM, 1) 338 - #define create_freezeable_workqueue(name) \ 339 - alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 338 + #define create_freezable_workqueue(name) \ 339 + alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 340 340 #define create_singlethread_workqueue(name) \ 341 341 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 342 342
+1 -1
kernel/power/main.c
··· 326 326 327 327 static int __init pm_start_workqueue(void) 328 328 { 329 - pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); 329 + pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); 330 330 331 331 return pm_wq ? 0 : -ENOMEM; 332 332 }
+3 -3
kernel/power/process.c
··· 22 22 */ 23 23 #define TIMEOUT (20 * HZ) 24 24 25 - static inline int freezeable(struct task_struct * p) 25 + static inline int freezable(struct task_struct * p) 26 26 { 27 27 if ((p == current) || 28 28 (p->flags & PF_NOFREEZE) || ··· 53 53 todo = 0; 54 54 read_lock(&tasklist_lock); 55 55 do_each_thread(g, p) { 56 - if (frozen(p) || !freezeable(p)) 56 + if (frozen(p) || !freezable(p)) 57 57 continue; 58 58 59 59 if (!freeze_task(p, sig_only)) ··· 167 167 168 168 read_lock(&tasklist_lock); 169 169 do_each_thread(g, p) { 170 - if (!freezeable(p)) 170 + if (!freezable(p)) 171 171 continue; 172 172 173 173 if (nosig_only && should_send_signal(p))
+24 -13
kernel/workqueue.c
··· 79 79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 80 80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 81 81 82 - MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ 82 + MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 83 + /* call for help after 10ms 84 + (min two ticks) */ 83 85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 84 86 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 85 87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ ··· 2049 2047 move_linked_works(work, scheduled, &n); 2050 2048 2051 2049 process_scheduled_works(rescuer); 2050 + 2051 + /* 2052 + * Leave this gcwq. If keep_working() is %true, notify a 2053 + * regular worker; otherwise, we end up with 0 concurrency 2054 + * and stalling the execution. 2055 + */ 2056 + if (keep_working(gcwq)) 2057 + wake_up_worker(gcwq); 2058 + 2052 2059 spin_unlock_irq(&gcwq->lock); 2053 2060 } 2054 2061 ··· 2967 2956 */ 2968 2957 spin_lock(&workqueue_lock); 2969 2958 2970 - if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) 2959 + if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 2971 2960 for_each_cwq_cpu(cpu, wq) 2972 2961 get_cwq(cpu, wq)->max_active = 0; 2973 2962 ··· 3079 3068 3080 3069 spin_lock_irq(&gcwq->lock); 3081 3070 3082 - if (!(wq->flags & WQ_FREEZEABLE) || 3071 + if (!(wq->flags & WQ_FREEZABLE) || 3083 3072 !(gcwq->flags & GCWQ_FREEZING)) 3084 3073 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3085 3074 ··· 3329 3318 * want to get it over with ASAP - spam rescuers, wake up as 3330 3319 * many idlers as necessary and create new ones till the 3331 3320 * worklist is empty. Note that if the gcwq is frozen, there 3332 - * may be frozen works in freezeable cwqs. Don't declare 3321 + * may be frozen works in freezable cwqs. Don't declare 3333 3322 * completion while frozen. 3334 3323 */ 3335 3324 while (gcwq->nr_workers != gcwq->nr_idle || ··· 3587 3576 /** 3588 3577 * freeze_workqueues_begin - begin freezing workqueues 3589 3578 * 3590 - * Start freezing workqueues. After this function returns, all 3591 - * freezeable workqueues will queue new works to their frozen_works 3592 - * list instead of gcwq->worklist. 3579 + * Start freezing workqueues. After this function returns, all freezable 3580 + * workqueues will queue new works to their frozen_works list instead of 3581 + * gcwq->worklist. 3593 3582 * 3594 3583 * CONTEXT: 3595 3584 * Grabs and releases workqueue_lock and gcwq->lock's. ··· 3615 3604 list_for_each_entry(wq, &workqueues, list) { 3616 3605 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3617 3606 3618 - if (cwq && wq->flags & WQ_FREEZEABLE) 3607 + if (cwq && wq->flags & WQ_FREEZABLE) 3619 3608 cwq->max_active = 0; 3620 3609 } 3621 3610 ··· 3626 3615 } 3627 3616 3628 3617 /** 3629 - * freeze_workqueues_busy - are freezeable workqueues still busy? 3618 + * freeze_workqueues_busy - are freezable workqueues still busy? 3630 3619 * 3631 3620 * Check whether freezing is complete. This function must be called 3632 3621 * between freeze_workqueues_begin() and thaw_workqueues(). ··· 3635 3624 * Grabs and releases workqueue_lock. 3636 3625 * 3637 3626 * RETURNS: 3638 - * %true if some freezeable workqueues are still busy. %false if 3639 - * freezing is complete. 3627 + * %true if some freezable workqueues are still busy. %false if freezing 3628 + * is complete. 3640 3629 */ 3641 3630 bool freeze_workqueues_busy(void) 3642 3631 { ··· 3656 3645 list_for_each_entry(wq, &workqueues, list) { 3657 3646 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3658 3647 3659 - if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3648 + if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3660 3649 continue; 3661 3650 3662 3651 BUG_ON(cwq->nr_active < 0); ··· 3701 3690 list_for_each_entry(wq, &workqueues, list) { 3702 3691 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3703 3692 3704 - if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3693 + if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3705 3694 continue; 3706 3695 3707 3696 /* restore max_active and repopulate worklist */