Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

workqueue, freezer: unify spelling of 'freeze' + 'able' to 'freezable'

There are two spellings in use for 'freeze' + 'able' - 'freezable' and
'freezeable'. The former is the more prominent one. The latter is
mostly used by workqueue and in a few other odd places. Unify the
spelling to 'freezable'.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Alan Stern <stern@rowland.harvard.edu>
Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl>
Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
Acked-by: Dmitry Torokhov <dtor@mail.ru>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Alex Dubov <oakad@yahoo.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Steven Whitehouse <swhiteho@redhat.com>

Tejun Heo 58a69cb4 7576958a

+35 -35
+2 -2
Documentation/workqueue.txt
··· 190 190 * Long running CPU intensive workloads which can be better 191 191 managed by the system scheduler. 192 192 193 - WQ_FREEZEABLE 193 + WQ_FREEZABLE 194 194 195 - A freezeable wq participates in the freeze phase of the system 195 + A freezable wq participates in the freeze phase of the system 196 196 suspend operations. Work items on the wq are drained and no 197 197 new work item starts execution until thawed. 198 198
+1 -1
drivers/memstick/core/memstick.c
··· 621 621 { 622 622 int rc; 623 623 624 - workqueue = create_freezeable_workqueue("kmemstick"); 624 + workqueue = create_freezable_workqueue("kmemstick"); 625 625 if (!workqueue) 626 626 return -ENOMEM; 627 627
+1 -1
drivers/misc/tifm_core.c
··· 329 329 { 330 330 int rc; 331 331 332 - workqueue = create_freezeable_workqueue("tifm"); 332 + workqueue = create_freezable_workqueue("tifm"); 333 333 if (!workqueue) 334 334 return -ENOMEM; 335 335
+1 -1
drivers/misc/vmw_balloon.c
··· 785 785 if (x86_hyper != &x86_hyper_vmware) 786 786 return -ENODEV; 787 787 788 - vmballoon_wq = create_freezeable_workqueue("vmmemctl"); 788 + vmballoon_wq = create_freezable_workqueue("vmmemctl"); 789 789 if (!vmballoon_wq) { 790 790 pr_err("failed to create workqueue\n"); 791 791 return -ENOMEM;
+1 -1
drivers/mtd/nand/r852.c
··· 930 930 931 931 init_completion(&dev->dma_done); 932 932 933 - dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); 933 + dev->card_workqueue = create_freezable_workqueue(DRV_NAME); 934 934 935 935 if (!dev->card_workqueue) 936 936 goto error9;
+1 -1
drivers/mtd/sm_ftl.c
··· 1258 1258 static __init int sm_module_init(void) 1259 1259 { 1260 1260 int error = 0; 1261 - cache_flush_workqueue = create_freezeable_workqueue("smflush"); 1261 + cache_flush_workqueue = create_freezable_workqueue("smflush"); 1262 1262 1263 1263 if (IS_ERR(cache_flush_workqueue)) 1264 1264 return PTR_ERR(cache_flush_workqueue);
+1 -1
drivers/net/can/mcp251x.c
··· 940 940 goto open_unlock; 941 941 } 942 942 943 - priv->wq = create_freezeable_workqueue("mcp251x_wq"); 943 + priv->wq = create_freezable_workqueue("mcp251x_wq"); 944 944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 945 945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 946 946
+1 -1
drivers/tty/serial/max3100.c
··· 601 601 s->rts = 0; 602 602 603 603 sprintf(b, "max3100-%d", s->minor); 604 - s->workqueue = create_freezeable_workqueue(b); 604 + s->workqueue = create_freezable_workqueue(b); 605 605 if (!s->workqueue) { 606 606 dev_warn(&s->spi->dev, "cannot create workqueue\n"); 607 607 return -EBUSY;
+1 -1
drivers/tty/serial/max3107.c
··· 833 833 struct max3107_port *s = container_of(port, struct max3107_port, port); 834 834 835 835 /* Initialize work queue */ 836 - s->workqueue = create_freezeable_workqueue("max3107"); 836 + s->workqueue = create_freezable_workqueue("max3107"); 837 837 if (!s->workqueue) { 838 838 dev_err(&s->spi->dev, "Workqueue creation failed\n"); 839 839 return -EBUSY;
+2 -2
fs/gfs2/glock.c
··· 1779 1779 #endif 1780 1780 1781 1781 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1782 - WQ_HIGHPRI | WQ_FREEZEABLE, 0); 1782 + WQ_HIGHPRI | WQ_FREEZABLE, 0); 1783 1783 if (IS_ERR(glock_workqueue)) 1784 1784 return PTR_ERR(glock_workqueue); 1785 1785 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1786 - WQ_MEM_RECLAIM | WQ_FREEZEABLE, 1786 + WQ_MEM_RECLAIM | WQ_FREEZABLE, 1787 1787 0); 1788 1788 if (IS_ERR(gfs2_delete_workqueue)) { 1789 1789 destroy_workqueue(glock_workqueue);
+1 -1
fs/gfs2/main.c
··· 144 144 145 145 error = -ENOMEM; 146 146 gfs_recovery_wq = alloc_workqueue("gfs_recovery", 147 - WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); 147 + WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); 148 148 if (!gfs_recovery_wq) 149 149 goto fail_wq; 150 150
+1 -1
include/linux/freezer.h
··· 109 109 } 110 110 111 111 /* 112 - * Check if the task should be counted as freezeable by the freezer 112 + * Check if the task should be counted as freezable by the freezer 113 113 */ 114 114 static inline int freezer_should_skip(struct task_struct *p) 115 115 {
+1 -1
include/linux/sched.h
··· 1744 1744 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1745 1745 #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1746 1746 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1747 - #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1747 + #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1748 1748 #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ 1749 1749 1750 1750 /*
+4 -4
include/linux/workqueue.h
··· 250 250 enum { 251 251 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 252 252 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 253 - WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ 253 + WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ 254 254 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 255 255 WQ_HIGHPRI = 1 << 4, /* high priority */ 256 256 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ ··· 318 318 /** 319 319 * alloc_ordered_workqueue - allocate an ordered workqueue 320 320 * @name: name of the workqueue 321 - * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) 321 + * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 322 322 * 323 323 * Allocate an ordered workqueue. An ordered workqueue executes at 324 324 * most one work item at any given time in the queued order. They are ··· 335 335 336 336 #define create_workqueue(name) \ 337 337 alloc_workqueue((name), WQ_MEM_RECLAIM, 1) 338 - #define create_freezeable_workqueue(name) \ 339 - alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 338 + #define create_freezable_workqueue(name) \ 339 + alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 340 340 #define create_singlethread_workqueue(name) \ 341 341 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 342 342
+1 -1
kernel/power/main.c
··· 326 326 327 327 static int __init pm_start_workqueue(void) 328 328 { 329 - pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); 329 + pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); 330 330 331 331 return pm_wq ? 0 : -ENOMEM; 332 332 }
+3 -3
kernel/power/process.c
··· 22 22 */ 23 23 #define TIMEOUT (20 * HZ) 24 24 25 - static inline int freezeable(struct task_struct * p) 25 + static inline int freezable(struct task_struct * p) 26 26 { 27 27 if ((p == current) || 28 28 (p->flags & PF_NOFREEZE) || ··· 53 53 todo = 0; 54 54 read_lock(&tasklist_lock); 55 55 do_each_thread(g, p) { 56 - if (frozen(p) || !freezeable(p)) 56 + if (frozen(p) || !freezable(p)) 57 57 continue; 58 58 59 59 if (!freeze_task(p, sig_only)) ··· 167 167 168 168 read_lock(&tasklist_lock); 169 169 do_each_thread(g, p) { 170 - if (!freezeable(p)) 170 + if (!freezable(p)) 171 171 continue; 172 172 173 173 if (nosig_only && should_send_signal(p))
+12 -12
kernel/workqueue.c
··· 2965 2965 */ 2966 2966 spin_lock(&workqueue_lock); 2967 2967 2968 - if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) 2968 + if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 2969 2969 for_each_cwq_cpu(cpu, wq) 2970 2970 get_cwq(cpu, wq)->max_active = 0; 2971 2971 ··· 3077 3077 3078 3078 spin_lock_irq(&gcwq->lock); 3079 3079 3080 - if (!(wq->flags & WQ_FREEZEABLE) || 3080 + if (!(wq->flags & WQ_FREEZABLE) || 3081 3081 !(gcwq->flags & GCWQ_FREEZING)) 3082 3082 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3083 3083 ··· 3327 3327 * want to get it over with ASAP - spam rescuers, wake up as 3328 3328 * many idlers as necessary and create new ones till the 3329 3329 * worklist is empty. Note that if the gcwq is frozen, there 3330 - * may be frozen works in freezeable cwqs. Don't declare 3330 + * may be frozen works in freezable cwqs. Don't declare 3331 3331 * completion while frozen. 3332 3332 */ 3333 3333 while (gcwq->nr_workers != gcwq->nr_idle || ··· 3585 3585 /** 3586 3586 * freeze_workqueues_begin - begin freezing workqueues 3587 3587 * 3588 - * Start freezing workqueues. After this function returns, all 3589 - * freezeable workqueues will queue new works to their frozen_works 3590 - * list instead of gcwq->worklist. 3588 + * Start freezing workqueues. After this function returns, all freezable 3589 + * workqueues will queue new works to their frozen_works list instead of 3590 + * gcwq->worklist. 3591 3591 * 3592 3592 * CONTEXT: 3593 3593 * Grabs and releases workqueue_lock and gcwq->lock's. ··· 3613 3613 list_for_each_entry(wq, &workqueues, list) { 3614 3614 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3615 3615 3616 - if (cwq && wq->flags & WQ_FREEZEABLE) 3616 + if (cwq && wq->flags & WQ_FREEZABLE) 3617 3617 cwq->max_active = 0; 3618 3618 } 3619 3619 ··· 3624 3624 } 3625 3625 3626 3626 /** 3627 - * freeze_workqueues_busy - are freezeable workqueues still busy? 3627 + * freeze_workqueues_busy - are freezable workqueues still busy? 3628 3628 * 3629 3629 * Check whether freezing is complete. This function must be called 3630 3630 * between freeze_workqueues_begin() and thaw_workqueues(). ··· 3633 3633 * Grabs and releases workqueue_lock. 3634 3634 * 3635 3635 * RETURNS: 3636 - * %true if some freezeable workqueues are still busy. %false if 3637 - * freezing is complete. 3636 + * %true if some freezable workqueues are still busy. %false if freezing 3637 + * is complete. 3638 3638 */ 3639 3639 bool freeze_workqueues_busy(void) 3640 3640 { ··· 3654 3654 list_for_each_entry(wq, &workqueues, list) { 3655 3655 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3656 3656 3657 - if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3657 + if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3658 3658 continue; 3659 3659 3660 3660 BUG_ON(cwq->nr_active < 0); ··· 3699 3699 list_for_each_entry(wq, &workqueues, list) { 3700 3700 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3701 3701 3702 - if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3702 + if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3703 3703 continue; 3704 3704 3705 3705 /* restore max_active and repopulate worklist */