Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

workqueue: rename cpu_workqueue to pool_workqueue

workqueue has moved away from global_cwqs to worker_pools and with the
scheduled custom worker pools, wforkqueues will be associated with
pools which don't have anything to do with CPUs. The workqueue code
went through significant amount of changes recently and mass renaming
isn't likely to hurt much additionally. Let's replace 'cpu' with
'pool' so that it reflects the current design.

* s/struct cpu_workqueue_struct/struct pool_workqueue/
* s/cpu_wq/pool_wq/
* s/cwq/pwq/

This patch is purely cosmetic.

Signed-off-by: Tejun Heo <tj@kernel.org>

Tejun Heo 112202d9 8d03ecfe

+228 -229
+6 -6
include/linux/workqueue.h
··· 27 27 enum { 28 28 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 29 29 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ 30 - WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ 30 + WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ 31 31 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 32 32 #ifdef CONFIG_DEBUG_OBJECTS_WORK 33 33 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ ··· 40 40 41 41 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 42 42 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, 43 - WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, 43 + WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, 44 44 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 45 45 #ifdef CONFIG_DEBUG_OBJECTS_WORK 46 46 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, ··· 60 60 WORK_CPU_END = NR_CPUS + 1, 61 61 62 62 /* 63 - * Reserve 7 bits off of cwq pointer w/ debugobjects turned 64 - * off. This makes cwqs aligned to 256 bytes and allows 15 65 - * workqueue flush colors. 63 + * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. 64 + * This makes pwqs aligned to 256 bytes and allows 15 workqueue 65 + * flush colors. 66 66 */ 67 67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 68 68 WORK_STRUCT_COLOR_BITS, 69 69 70 - /* data contains off-queue information when !WORK_STRUCT_CWQ */ 70 + /* data contains off-queue information when !WORK_STRUCT_PWQ */ 71 71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, 72 72 73 73 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
+5 -5
include/trace/events/workqueue.h
··· 27 27 /** 28 28 * workqueue_queue_work - called when a work gets queued 29 29 * @req_cpu: the requested cpu 30 - * @cwq: pointer to struct cpu_workqueue_struct 30 + * @pwq: pointer to struct pool_workqueue 31 31 * @work: pointer to struct work_struct 32 32 * 33 33 * This event occurs when a work is queued immediately or once a ··· 36 36 */ 37 37 TRACE_EVENT(workqueue_queue_work, 38 38 39 - TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq, 39 + TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq, 40 40 struct work_struct *work), 41 41 42 - TP_ARGS(req_cpu, cwq, work), 42 + TP_ARGS(req_cpu, pwq, work), 43 43 44 44 TP_STRUCT__entry( 45 45 __field( void *, work ) ··· 52 52 TP_fast_assign( 53 53 __entry->work = work; 54 54 __entry->function = work->func; 55 - __entry->workqueue = cwq->wq; 55 + __entry->workqueue = pwq->wq; 56 56 __entry->req_cpu = req_cpu; 57 - __entry->cpu = cwq->pool->cpu; 57 + __entry->cpu = pwq->pool->cpu; 58 58 ), 59 59 60 60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
+216 -217
kernel/workqueue.c
··· 154 154 } ____cacheline_aligned_in_smp; 155 155 156 156 /* 157 - * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of 158 - * work_struct->data are used for flags and thus cwqs need to be 159 - * aligned at two's power of the number of flag bits. 157 + * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 158 + * of work_struct->data are used for flags and the remaining high bits 159 + * point to the pwq; thus, pwqs need to be aligned at two's power of the 160 + * number of flag bits. 160 161 */ 161 - struct cpu_workqueue_struct { 162 + struct pool_workqueue { 162 163 struct worker_pool *pool; /* I: the associated pool */ 163 164 struct workqueue_struct *wq; /* I: the owning workqueue */ 164 165 int work_color; /* L: current color */ ··· 208 207 struct workqueue_struct { 209 208 unsigned int flags; /* W: WQ_* flags */ 210 209 union { 211 - struct cpu_workqueue_struct __percpu *pcpu; 212 - struct cpu_workqueue_struct *single; 210 + struct pool_workqueue __percpu *pcpu; 211 + struct pool_workqueue *single; 213 212 unsigned long v; 214 - } cpu_wq; /* I: cwq's */ 213 + } pool_wq; /* I: pwq's */ 215 214 struct list_head list; /* W: list of all workqueues */ 216 215 217 216 struct mutex flush_mutex; /* protects wq flushing */ 218 217 int work_color; /* F: current work color */ 219 218 int flush_color; /* F: current flush color */ 220 - atomic_t nr_cwqs_to_flush; /* flush in progress */ 219 + atomic_t nr_pwqs_to_flush; /* flush in progress */ 221 220 struct wq_flusher *first_flusher; /* F: first flusher */ 222 221 struct list_head flusher_queue; /* F: flush waiters */ 223 222 struct list_head flusher_overflow; /* F: flush overflow list */ ··· 226 225 struct worker *rescuer; /* I: rescue worker */ 227 226 228 227 int nr_drainers; /* W: drain in progress */ 229 - int saved_max_active; /* W: saved cwq max_active */ 228 + int saved_max_active; /* W: saved pwq max_active */ 230 229 #ifdef CONFIG_LOCKDEP 231 230 struct lockdep_map lockdep_map; 232 231 #endif ··· 269 268 return WORK_CPU_END; 270 269 } 271 270 272 - static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask, 271 + static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask, 273 272 struct workqueue_struct *wq) 274 273 { 275 274 return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); ··· 285 284 * 286 285 * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND 287 286 * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND 288 - * for_each_cwq_cpu() : possible CPUs for bound workqueues, 287 + * for_each_pwq_cpu() : possible CPUs for bound workqueues, 289 288 * WORK_CPU_UNBOUND for unbound workqueues 290 289 */ 291 290 #define for_each_wq_cpu(cpu) \ ··· 298 297 (cpu) < WORK_CPU_END; \ 299 298 (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) 300 299 301 - #define for_each_cwq_cpu(cpu, wq) \ 302 - for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq)); \ 300 + #define for_each_pwq_cpu(cpu, wq) \ 301 + for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq)); \ 303 302 (cpu) < WORK_CPU_END; \ 304 - (cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq))) 303 + (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq))) 305 304 306 305 #ifdef CONFIG_DEBUG_OBJECTS_WORK 307 306 ··· 480 479 return &pools[highpri]; 481 480 } 482 481 483 - static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 484 - struct workqueue_struct *wq) 482 + static struct pool_workqueue *get_pwq(unsigned int cpu, 483 + struct workqueue_struct *wq) 485 484 { 486 485 if (!(wq->flags & WQ_UNBOUND)) { 487 486 if (likely(cpu < nr_cpu_ids)) 488 - return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); 487 + return per_cpu_ptr(wq->pool_wq.pcpu, cpu); 489 488 } else if (likely(cpu == WORK_CPU_UNBOUND)) 490 - return wq->cpu_wq.single; 489 + return wq->pool_wq.single; 491 490 return NULL; 492 491 } 493 492 ··· 508 507 } 509 508 510 509 /* 511 - * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data 512 - * contain the pointer to the queued cwq. Once execution starts, the flag 510 + * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 511 + * contain the pointer to the queued pwq. Once execution starts, the flag 513 512 * is cleared and the high bits contain OFFQ flags and pool ID. 514 513 * 515 - * set_work_cwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 516 - * and clear_work_data() can be used to set the cwq, pool or clear 514 + * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 515 + * and clear_work_data() can be used to set the pwq, pool or clear 517 516 * work->data. These functions should only be called while the work is 518 517 * owned - ie. while the PENDING bit is set. 519 518 * 520 - * get_work_pool() and get_work_cwq() can be used to obtain the pool or cwq 519 + * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 521 520 * corresponding to a work. Pool is available once the work has been 522 - * queued anywhere after initialization until it is sync canceled. cwq is 521 + * queued anywhere after initialization until it is sync canceled. pwq is 523 522 * available only while the work item is queued. 524 523 * 525 524 * %WORK_OFFQ_CANCELING is used to mark a work item which is being ··· 534 533 atomic_long_set(&work->data, data | flags | work_static(work)); 535 534 } 536 535 537 - static void set_work_cwq(struct work_struct *work, 538 - struct cpu_workqueue_struct *cwq, 536 + static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 539 537 unsigned long extra_flags) 540 538 { 541 - set_work_data(work, (unsigned long)cwq, 542 - WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); 539 + set_work_data(work, (unsigned long)pwq, 540 + WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 543 541 } 544 542 545 543 static void set_work_pool_and_keep_pending(struct work_struct *work, ··· 567 567 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 568 568 } 569 569 570 - static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) 570 + static struct pool_workqueue *get_work_pwq(struct work_struct *work) 571 571 { 572 572 unsigned long data = atomic_long_read(&work->data); 573 573 574 - if (data & WORK_STRUCT_CWQ) 574 + if (data & WORK_STRUCT_PWQ) 575 575 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 576 576 else 577 577 return NULL; ··· 589 589 struct worker_pool *pool; 590 590 int pool_id; 591 591 592 - if (data & WORK_STRUCT_CWQ) 593 - return ((struct cpu_workqueue_struct *) 592 + if (data & WORK_STRUCT_PWQ) 593 + return ((struct pool_workqueue *) 594 594 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 595 595 596 596 pool_id = data >> WORK_OFFQ_POOL_SHIFT; ··· 613 613 { 614 614 unsigned long data = atomic_long_read(&work->data); 615 615 616 - if (data & WORK_STRUCT_CWQ) 617 - return ((struct cpu_workqueue_struct *) 616 + if (data & WORK_STRUCT_PWQ) 617 + return ((struct pool_workqueue *) 618 618 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; 619 619 620 620 return data >> WORK_OFFQ_POOL_SHIFT; ··· 632 632 { 633 633 unsigned long data = atomic_long_read(&work->data); 634 634 635 - return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING); 635 + return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 636 636 } 637 637 638 638 /* ··· 961 961 *nextp = n; 962 962 } 963 963 964 - static void cwq_activate_delayed_work(struct work_struct *work) 964 + static void pwq_activate_delayed_work(struct work_struct *work) 965 965 { 966 - struct cpu_workqueue_struct *cwq = get_work_cwq(work); 966 + struct pool_workqueue *pwq = get_work_pwq(work); 967 967 968 968 trace_workqueue_activate_work(work); 969 - move_linked_works(work, &cwq->pool->worklist, NULL); 969 + move_linked_works(work, &pwq->pool->worklist, NULL); 970 970 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 971 - cwq->nr_active++; 971 + pwq->nr_active++; 972 972 } 973 973 974 - static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) 974 + static void pwq_activate_first_delayed(struct pool_workqueue *pwq) 975 975 { 976 - struct work_struct *work = list_first_entry(&cwq->delayed_works, 976 + struct work_struct *work = list_first_entry(&pwq->delayed_works, 977 977 struct work_struct, entry); 978 978 979 - cwq_activate_delayed_work(work); 979 + pwq_activate_delayed_work(work); 980 980 } 981 981 982 982 /** 983 - * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 984 - * @cwq: cwq of interest 983 + * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 984 + * @pwq: pwq of interest 985 985 * @color: color of work which left the queue 986 986 * 987 987 * A work either has completed or is removed from pending queue, 988 - * decrement nr_in_flight of its cwq and handle workqueue flushing. 988 + * decrement nr_in_flight of its pwq and handle workqueue flushing. 989 989 * 990 990 * CONTEXT: 991 991 * spin_lock_irq(pool->lock). 992 992 */ 993 - static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 993 + static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) 994 994 { 995 995 /* ignore uncolored works */ 996 996 if (color == WORK_NO_COLOR) 997 997 return; 998 998 999 - cwq->nr_in_flight[color]--; 999 + pwq->nr_in_flight[color]--; 1000 1000 1001 - cwq->nr_active--; 1002 - if (!list_empty(&cwq->delayed_works)) { 1001 + pwq->nr_active--; 1002 + if (!list_empty(&pwq->delayed_works)) { 1003 1003 /* one down, submit a delayed one */ 1004 - if (cwq->nr_active < cwq->max_active) 1005 - cwq_activate_first_delayed(cwq); 1004 + if (pwq->nr_active < pwq->max_active) 1005 + pwq_activate_first_delayed(pwq); 1006 1006 } 1007 1007 1008 1008 /* is flush in progress and are we at the flushing tip? */ 1009 - if (likely(cwq->flush_color != color)) 1009 + if (likely(pwq->flush_color != color)) 1010 1010 return; 1011 1011 1012 1012 /* are there still in-flight works? */ 1013 - if (cwq->nr_in_flight[color]) 1013 + if (pwq->nr_in_flight[color]) 1014 1014 return; 1015 1015 1016 - /* this cwq is done, clear flush_color */ 1017 - cwq->flush_color = -1; 1016 + /* this pwq is done, clear flush_color */ 1017 + pwq->flush_color = -1; 1018 1018 1019 1019 /* 1020 - * If this was the last cwq, wake up the first flusher. It 1020 + * If this was the last pwq, wake up the first flusher. It 1021 1021 * will handle the rest. 1022 1022 */ 1023 - if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) 1024 - complete(&cwq->wq->first_flusher->done); 1023 + if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1024 + complete(&pwq->wq->first_flusher->done); 1025 1025 } 1026 1026 1027 1027 /** ··· 1053 1053 unsigned long *flags) 1054 1054 { 1055 1055 struct worker_pool *pool; 1056 - struct cpu_workqueue_struct *cwq; 1056 + struct pool_workqueue *pwq; 1057 1057 1058 1058 local_irq_save(*flags); 1059 1059 ··· 1084 1084 1085 1085 spin_lock(&pool->lock); 1086 1086 /* 1087 - * work->data is guaranteed to point to cwq only while the work 1088 - * item is queued on cwq->wq, and both updating work->data to point 1089 - * to cwq on queueing and to pool on dequeueing are done under 1090 - * cwq->pool->lock. This in turn guarantees that, if work->data 1091 - * points to cwq which is associated with a locked pool, the work 1087 + * work->data is guaranteed to point to pwq only while the work 1088 + * item is queued on pwq->wq, and both updating work->data to point 1089 + * to pwq on queueing and to pool on dequeueing are done under 1090 + * pwq->pool->lock. This in turn guarantees that, if work->data 1091 + * points to pwq which is associated with a locked pool, the work 1092 1092 * item is currently queued on that pool. 1093 1093 */ 1094 - cwq = get_work_cwq(work); 1095 - if (cwq && cwq->pool == pool) { 1094 + pwq = get_work_pwq(work); 1095 + if (pwq && pwq->pool == pool) { 1096 1096 debug_work_deactivate(work); 1097 1097 1098 1098 /* 1099 1099 * A delayed work item cannot be grabbed directly because 1100 1100 * it might have linked NO_COLOR work items which, if left 1101 - * on the delayed_list, will confuse cwq->nr_active 1101 + * on the delayed_list, will confuse pwq->nr_active 1102 1102 * management later on and cause stall. Make sure the work 1103 1103 * item is activated before grabbing. 1104 1104 */ 1105 1105 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 1106 - cwq_activate_delayed_work(work); 1106 + pwq_activate_delayed_work(work); 1107 1107 1108 1108 list_del_init(&work->entry); 1109 - cwq_dec_nr_in_flight(get_work_cwq(work), get_work_color(work)); 1109 + pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work)); 1110 1110 1111 - /* work->data points to cwq iff queued, point to pool */ 1111 + /* work->data points to pwq iff queued, point to pool */ 1112 1112 set_work_pool_and_keep_pending(work, pool->id); 1113 1113 1114 1114 spin_unlock(&pool->lock); ··· 1125 1125 1126 1126 /** 1127 1127 * insert_work - insert a work into a pool 1128 - * @cwq: cwq @work belongs to 1128 + * @pwq: pwq @work belongs to 1129 1129 * @work: work to insert 1130 1130 * @head: insertion point 1131 1131 * @extra_flags: extra WORK_STRUCT_* flags to set 1132 1132 * 1133 - * Insert @work which belongs to @cwq after @head. @extra_flags is or'd to 1133 + * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1134 1134 * work_struct flags. 1135 1135 * 1136 1136 * CONTEXT: 1137 1137 * spin_lock_irq(pool->lock). 1138 1138 */ 1139 - static void insert_work(struct cpu_workqueue_struct *cwq, 1140 - struct work_struct *work, struct list_head *head, 1141 - unsigned int extra_flags) 1139 + static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1140 + struct list_head *head, unsigned int extra_flags) 1142 1141 { 1143 - struct worker_pool *pool = cwq->pool; 1142 + struct worker_pool *pool = pwq->pool; 1144 1143 1145 1144 /* we own @work, set data and link */ 1146 - set_work_cwq(work, cwq, extra_flags); 1145 + set_work_pwq(work, pwq, extra_flags); 1147 1146 list_add_tail(&work->entry, head); 1148 1147 1149 1148 /* ··· 1169 1170 * Return %true iff I'm a worker execuing a work item on @wq. If 1170 1171 * I'm @worker, it's safe to dereference it without locking. 1171 1172 */ 1172 - return worker && worker->current_cwq->wq == wq; 1173 + return worker && worker->current_pwq->wq == wq; 1173 1174 } 1174 1175 1175 1176 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 1176 1177 struct work_struct *work) 1177 1178 { 1178 - struct cpu_workqueue_struct *cwq; 1179 + struct pool_workqueue *pwq; 1179 1180 struct list_head *worklist; 1180 1181 unsigned int work_flags; 1181 1182 unsigned int req_cpu = cpu; ··· 1195 1196 WARN_ON_ONCE(!is_chained_work(wq))) 1196 1197 return; 1197 1198 1198 - /* determine the cwq to use */ 1199 + /* determine the pwq to use */ 1199 1200 if (!(wq->flags & WQ_UNBOUND)) { 1200 1201 struct worker_pool *last_pool; 1201 1202 ··· 1208 1209 * work needs to be queued on that cpu to guarantee 1209 1210 * non-reentrancy. 1210 1211 */ 1211 - cwq = get_cwq(cpu, wq); 1212 + pwq = get_pwq(cpu, wq); 1212 1213 last_pool = get_work_pool(work); 1213 1214 1214 - if (last_pool && last_pool != cwq->pool) { 1215 + if (last_pool && last_pool != pwq->pool) { 1215 1216 struct worker *worker; 1216 1217 1217 1218 spin_lock(&last_pool->lock); 1218 1219 1219 1220 worker = find_worker_executing_work(last_pool, work); 1220 1221 1221 - if (worker && worker->current_cwq->wq == wq) { 1222 - cwq = get_cwq(last_pool->cpu, wq); 1222 + if (worker && worker->current_pwq->wq == wq) { 1223 + pwq = get_pwq(last_pool->cpu, wq); 1223 1224 } else { 1224 1225 /* meh... not running there, queue here */ 1225 1226 spin_unlock(&last_pool->lock); 1226 - spin_lock(&cwq->pool->lock); 1227 + spin_lock(&pwq->pool->lock); 1227 1228 } 1228 1229 } else { 1229 - spin_lock(&cwq->pool->lock); 1230 + spin_lock(&pwq->pool->lock); 1230 1231 } 1231 1232 } else { 1232 - cwq = get_cwq(WORK_CPU_UNBOUND, wq); 1233 - spin_lock(&cwq->pool->lock); 1233 + pwq = get_pwq(WORK_CPU_UNBOUND, wq); 1234 + spin_lock(&pwq->pool->lock); 1234 1235 } 1235 1236 1236 - /* cwq determined, queue */ 1237 - trace_workqueue_queue_work(req_cpu, cwq, work); 1237 + /* pwq determined, queue */ 1238 + trace_workqueue_queue_work(req_cpu, pwq, work); 1238 1239 1239 1240 if (WARN_ON(!list_empty(&work->entry))) { 1240 - spin_unlock(&cwq->pool->lock); 1241 + spin_unlock(&pwq->pool->lock); 1241 1242 return; 1242 1243 } 1243 1244 1244 - cwq->nr_in_flight[cwq->work_color]++; 1245 - work_flags = work_color_to_flags(cwq->work_color); 1245 + pwq->nr_in_flight[pwq->work_color]++; 1246 + work_flags = work_color_to_flags(pwq->work_color); 1246 1247 1247 - if (likely(cwq->nr_active < cwq->max_active)) { 1248 + if (likely(pwq->nr_active < pwq->max_active)) { 1248 1249 trace_workqueue_activate_work(work); 1249 - cwq->nr_active++; 1250 - worklist = &cwq->pool->worklist; 1250 + pwq->nr_active++; 1251 + worklist = &pwq->pool->worklist; 1251 1252 } else { 1252 1253 work_flags |= WORK_STRUCT_DELAYED; 1253 - worklist = &cwq->delayed_works; 1254 + worklist = &pwq->delayed_works; 1254 1255 } 1255 1256 1256 - insert_work(cwq, work, worklist, work_flags); 1257 + insert_work(pwq, work, worklist, work_flags); 1257 1258 1258 - spin_unlock(&cwq->pool->lock); 1259 + spin_unlock(&pwq->pool->lock); 1259 1260 } 1260 1261 1261 1262 /** ··· 1660 1661 1661 1662 /* 1662 1663 * wq doesn't really matter but let's keep @worker->pool 1663 - * and @cwq->pool consistent for sanity. 1664 + * and @pwq->pool consistent for sanity. 1664 1665 */ 1665 1666 if (std_worker_pool_pri(worker->pool)) 1666 1667 wq = system_highpri_wq; 1667 1668 else 1668 1669 wq = system_wq; 1669 1670 1670 - insert_work(get_cwq(pool->cpu, wq), rebind_work, 1671 + insert_work(get_pwq(pool->cpu, wq), rebind_work, 1671 1672 worker->scheduled.next, 1672 1673 work_color_to_flags(WORK_NO_COLOR)); 1673 1674 } ··· 1844 1845 1845 1846 static bool send_mayday(struct work_struct *work) 1846 1847 { 1847 - struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1848 - struct workqueue_struct *wq = cwq->wq; 1848 + struct pool_workqueue *pwq = get_work_pwq(work); 1849 + struct workqueue_struct *wq = pwq->wq; 1849 1850 unsigned int cpu; 1850 1851 1851 1852 if (!(wq->flags & WQ_RESCUER)) 1852 1853 return false; 1853 1854 1854 1855 /* mayday mayday mayday */ 1855 - cpu = cwq->pool->cpu; 1856 + cpu = pwq->pool->cpu; 1856 1857 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ 1857 1858 if (cpu == WORK_CPU_UNBOUND) 1858 1859 cpu = 0; ··· 2081 2082 __releases(&pool->lock) 2082 2083 __acquires(&pool->lock) 2083 2084 { 2084 - struct cpu_workqueue_struct *cwq = get_work_cwq(work); 2085 + struct pool_workqueue *pwq = get_work_pwq(work); 2085 2086 struct worker_pool *pool = worker->pool; 2086 - bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; 2087 + bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; 2087 2088 int work_color; 2088 2089 struct worker *collision; 2089 2090 #ifdef CONFIG_LOCKDEP ··· 2124 2125 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2125 2126 worker->current_work = work; 2126 2127 worker->current_func = work->func; 2127 - worker->current_cwq = cwq; 2128 + worker->current_pwq = pwq; 2128 2129 work_color = get_work_color(work); 2129 2130 2130 2131 list_del_init(&work->entry); ··· 2153 2154 2154 2155 spin_unlock_irq(&pool->lock); 2155 2156 2156 - lock_map_acquire_read(&cwq->wq->lockdep_map); 2157 + lock_map_acquire_read(&pwq->wq->lockdep_map); 2157 2158 lock_map_acquire(&lockdep_map); 2158 2159 trace_workqueue_execute_start(work); 2159 2160 worker->current_func(work); ··· 2163 2164 */ 2164 2165 trace_workqueue_execute_end(work); 2165 2166 lock_map_release(&lockdep_map); 2166 - lock_map_release(&cwq->wq->lockdep_map); 2167 + lock_map_release(&pwq->wq->lockdep_map); 2167 2168 2168 2169 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2169 2170 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" ··· 2184 2185 hash_del(&worker->hentry); 2185 2186 worker->current_work = NULL; 2186 2187 worker->current_func = NULL; 2187 - worker->current_cwq = NULL; 2188 - cwq_dec_nr_in_flight(cwq, work_color); 2188 + worker->current_pwq = NULL; 2189 + pwq_dec_nr_in_flight(pwq, work_color); 2189 2190 } 2190 2191 2191 2192 /** ··· 2352 2353 */ 2353 2354 for_each_mayday_cpu(cpu, wq->mayday_mask) { 2354 2355 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; 2355 - struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); 2356 - struct worker_pool *pool = cwq->pool; 2356 + struct pool_workqueue *pwq = get_pwq(tcpu, wq); 2357 + struct worker_pool *pool = pwq->pool; 2357 2358 struct work_struct *work, *n; 2358 2359 2359 2360 __set_current_state(TASK_RUNNING); ··· 2369 2370 */ 2370 2371 BUG_ON(!list_empty(&rescuer->scheduled)); 2371 2372 list_for_each_entry_safe(work, n, &pool->worklist, entry) 2372 - if (get_work_cwq(work) == cwq) 2373 + if (get_work_pwq(work) == pwq) 2373 2374 move_linked_works(work, scheduled, &n); 2374 2375 2375 2376 process_scheduled_works(rescuer); ··· 2404 2405 2405 2406 /** 2406 2407 * insert_wq_barrier - insert a barrier work 2407 - * @cwq: cwq to insert barrier into 2408 + * @pwq: pwq to insert barrier into 2408 2409 * @barr: wq_barrier to insert 2409 2410 * @target: target work to attach @barr to 2410 2411 * @worker: worker currently executing @target, NULL if @target is not executing ··· 2421 2422 * after a work with LINKED flag set. 2422 2423 * 2423 2424 * Note that when @worker is non-NULL, @target may be modified 2424 - * underneath us, so we can't reliably determine cwq from @target. 2425 + * underneath us, so we can't reliably determine pwq from @target. 2425 2426 * 2426 2427 * CONTEXT: 2427 2428 * spin_lock_irq(pool->lock). 2428 2429 */ 2429 - static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 2430 + static void insert_wq_barrier(struct pool_workqueue *pwq, 2430 2431 struct wq_barrier *barr, 2431 2432 struct work_struct *target, struct worker *worker) 2432 2433 { ··· 2459 2460 } 2460 2461 2461 2462 debug_work_activate(&barr->work); 2462 - insert_work(cwq, &barr->work, head, 2463 + insert_work(pwq, &barr->work, head, 2463 2464 work_color_to_flags(WORK_NO_COLOR) | linked); 2464 2465 } 2465 2466 2466 2467 /** 2467 - * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing 2468 + * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 2468 2469 * @wq: workqueue being flushed 2469 2470 * @flush_color: new flush color, < 0 for no-op 2470 2471 * @work_color: new work color, < 0 for no-op 2471 2472 * 2472 - * Prepare cwqs for workqueue flushing. 2473 + * Prepare pwqs for workqueue flushing. 2473 2474 * 2474 - * If @flush_color is non-negative, flush_color on all cwqs should be 2475 - * -1. If no cwq has in-flight commands at the specified color, all 2476 - * cwq->flush_color's stay at -1 and %false is returned. If any cwq 2477 - * has in flight commands, its cwq->flush_color is set to 2478 - * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq 2475 + * If @flush_color is non-negative, flush_color on all pwqs should be 2476 + * -1. If no pwq has in-flight commands at the specified color, all 2477 + * pwq->flush_color's stay at -1 and %false is returned. If any pwq 2478 + * has in flight commands, its pwq->flush_color is set to 2479 + * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 2479 2480 * wakeup logic is armed and %true is returned. 2480 2481 * 2481 2482 * The caller should have initialized @wq->first_flusher prior to ··· 2483 2484 * @flush_color is negative, no flush color update is done and %false 2484 2485 * is returned. 2485 2486 * 2486 - * If @work_color is non-negative, all cwqs should have the same 2487 + * If @work_color is non-negative, all pwqs should have the same 2487 2488 * work_color which is previous to @work_color and all will be 2488 2489 * advanced to @work_color. 2489 2490 * ··· 2494 2495 * %true if @flush_color >= 0 and there's something to flush. %false 2495 2496 * otherwise. 2496 2497 */ 2497 - static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, 2498 + static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 2498 2499 int flush_color, int work_color) 2499 2500 { 2500 2501 bool wait = false; 2501 2502 unsigned int cpu; 2502 2503 2503 2504 if (flush_color >= 0) { 2504 - BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); 2505 - atomic_set(&wq->nr_cwqs_to_flush, 1); 2505 + BUG_ON(atomic_read(&wq->nr_pwqs_to_flush)); 2506 + atomic_set(&wq->nr_pwqs_to_flush, 1); 2506 2507 } 2507 2508 2508 - for_each_cwq_cpu(cpu, wq) { 2509 - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2510 - struct worker_pool *pool = cwq->pool; 2509 + for_each_pwq_cpu(cpu, wq) { 2510 + struct pool_workqueue *pwq = get_pwq(cpu, wq); 2511 + struct worker_pool *pool = pwq->pool; 2511 2512 2512 2513 spin_lock_irq(&pool->lock); 2513 2514 2514 2515 if (flush_color >= 0) { 2515 - BUG_ON(cwq->flush_color != -1); 2516 + BUG_ON(pwq->flush_color != -1); 2516 2517 2517 - if (cwq->nr_in_flight[flush_color]) { 2518 - cwq->flush_color = flush_color; 2519 - atomic_inc(&wq->nr_cwqs_to_flush); 2518 + if (pwq->nr_in_flight[flush_color]) { 2519 + pwq->flush_color = flush_color; 2520 + atomic_inc(&wq->nr_pwqs_to_flush); 2520 2521 wait = true; 2521 2522 } 2522 2523 } 2523 2524 2524 2525 if (work_color >= 0) { 2525 - BUG_ON(work_color != work_next_color(cwq->work_color)); 2526 - cwq->work_color = work_color; 2526 + BUG_ON(work_color != work_next_color(pwq->work_color)); 2527 + pwq->work_color = work_color; 2527 2528 } 2528 2529 2529 2530 spin_unlock_irq(&pool->lock); 2530 2531 } 2531 2532 2532 - if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) 2533 + if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 2533 2534 complete(&wq->first_flusher->done); 2534 2535 2535 2536 return wait; ··· 2580 2581 2581 2582 wq->first_flusher = &this_flusher; 2582 2583 2583 - if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, 2584 + if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 2584 2585 wq->work_color)) { 2585 2586 /* nothing to flush, done */ 2586 2587 wq->flush_color = next_color; ··· 2591 2592 /* wait in queue */ 2592 2593 BUG_ON(wq->flush_color == this_flusher.flush_color); 2593 2594 list_add_tail(&this_flusher.list, &wq->flusher_queue); 2594 - flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2595 + flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 2595 2596 } 2596 2597 } else { 2597 2598 /* ··· 2658 2659 2659 2660 list_splice_tail_init(&wq->flusher_overflow, 2660 2661 &wq->flusher_queue); 2661 - flush_workqueue_prep_cwqs(wq, -1, wq->work_color); 2662 + flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 2662 2663 } 2663 2664 2664 2665 if (list_empty(&wq->flusher_queue)) { ··· 2668 2669 2669 2670 /* 2670 2671 * Need to flush more colors. Make the next flusher 2671 - * the new first flusher and arm cwqs. 2672 + * the new first flusher and arm pwqs. 2672 2673 */ 2673 2674 BUG_ON(wq->flush_color == wq->work_color); 2674 2675 BUG_ON(wq->flush_color != next->flush_color); ··· 2676 2677 list_del_init(&next->list); 2677 2678 wq->first_flusher = next; 2678 2679 2679 - if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) 2680 + if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 2680 2681 break; 2681 2682 2682 2683 /* ··· 2719 2720 reflush: 2720 2721 flush_workqueue(wq); 2721 2722 2722 - for_each_cwq_cpu(cpu, wq) { 2723 - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2723 + for_each_pwq_cpu(cpu, wq) { 2724 + struct pool_workqueue *pwq = get_pwq(cpu, wq); 2724 2725 bool drained; 2725 2726 2726 - spin_lock_irq(&cwq->pool->lock); 2727 - drained = !cwq->nr_active && list_empty(&cwq->delayed_works); 2728 - spin_unlock_irq(&cwq->pool->lock); 2727 + spin_lock_irq(&pwq->pool->lock); 2728 + drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 2729 + spin_unlock_irq(&pwq->pool->lock); 2729 2730 2730 2731 if (drained) 2731 2732 continue; ··· 2748 2749 { 2749 2750 struct worker *worker = NULL; 2750 2751 struct worker_pool *pool; 2751 - struct cpu_workqueue_struct *cwq; 2752 + struct pool_workqueue *pwq; 2752 2753 2753 2754 might_sleep(); 2754 2755 pool = get_work_pool(work); ··· 2757 2758 2758 2759 spin_lock_irq(&pool->lock); 2759 2760 /* see the comment in try_to_grab_pending() with the same code */ 2760 - cwq = get_work_cwq(work); 2761 - if (cwq) { 2762 - if (unlikely(cwq->pool != pool)) 2761 + pwq = get_work_pwq(work); 2762 + if (pwq) { 2763 + if (unlikely(pwq->pool != pool)) 2763 2764 goto already_gone; 2764 2765 } else { 2765 2766 worker = find_worker_executing_work(pool, work); 2766 2767 if (!worker) 2767 2768 goto already_gone; 2768 - cwq = worker->current_cwq; 2769 + pwq = worker->current_pwq; 2769 2770 } 2770 2771 2771 - insert_wq_barrier(cwq, barr, work, worker); 2772 + insert_wq_barrier(pwq, barr, work, worker); 2772 2773 spin_unlock_irq(&pool->lock); 2773 2774 2774 2775 /* ··· 2777 2778 * flusher is not running on the same workqueue by verifying write 2778 2779 * access. 2779 2780 */ 2780 - if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) 2781 - lock_map_acquire(&cwq->wq->lockdep_map); 2781 + if (pwq->wq->saved_max_active == 1 || pwq->wq->flags & WQ_RESCUER) 2782 + lock_map_acquire(&pwq->wq->lockdep_map); 2782 2783 else 2783 - lock_map_acquire_read(&cwq->wq->lockdep_map); 2784 - lock_map_release(&cwq->wq->lockdep_map); 2784 + lock_map_acquire_read(&pwq->wq->lockdep_map); 2785 + lock_map_release(&pwq->wq->lockdep_map); 2785 2786 2786 2787 return true; 2787 2788 already_gone: ··· 3091 3092 return system_wq != NULL; 3092 3093 } 3093 3094 3094 - static int alloc_cwqs(struct workqueue_struct *wq) 3095 + static int alloc_pwqs(struct workqueue_struct *wq) 3095 3096 { 3096 3097 /* 3097 - * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. 3098 + * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. 3098 3099 * Make sure that the alignment isn't lower than that of 3099 3100 * unsigned long long. 3100 3101 */ 3101 - const size_t size = sizeof(struct cpu_workqueue_struct); 3102 + const size_t size = sizeof(struct pool_workqueue); 3102 3103 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, 3103 3104 __alignof__(unsigned long long)); 3104 3105 3105 3106 if (!(wq->flags & WQ_UNBOUND)) 3106 - wq->cpu_wq.pcpu = __alloc_percpu(size, align); 3107 + wq->pool_wq.pcpu = __alloc_percpu(size, align); 3107 3108 else { 3108 3109 void *ptr; 3109 3110 3110 3111 /* 3111 - * Allocate enough room to align cwq and put an extra 3112 + * Allocate enough room to align pwq and put an extra 3112 3113 * pointer at the end pointing back to the originally 3113 3114 * allocated pointer which will be used for free. 3114 3115 */ 3115 3116 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); 3116 3117 if (ptr) { 3117 - wq->cpu_wq.single = PTR_ALIGN(ptr, align); 3118 - *(void **)(wq->cpu_wq.single + 1) = ptr; 3118 + wq->pool_wq.single = PTR_ALIGN(ptr, align); 3119 + *(void **)(wq->pool_wq.single + 1) = ptr; 3119 3120 } 3120 3121 } 3121 3122 3122 3123 /* just in case, make sure it's actually aligned */ 3123 - BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); 3124 - return wq->cpu_wq.v ? 0 : -ENOMEM; 3124 + BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align)); 3125 + return wq->pool_wq.v ? 0 : -ENOMEM; 3125 3126 } 3126 3127 3127 - static void free_cwqs(struct workqueue_struct *wq) 3128 + static void free_pwqs(struct workqueue_struct *wq) 3128 3129 { 3129 3130 if (!(wq->flags & WQ_UNBOUND)) 3130 - free_percpu(wq->cpu_wq.pcpu); 3131 - else if (wq->cpu_wq.single) { 3132 - /* the pointer to free is stored right after the cwq */ 3133 - kfree(*(void **)(wq->cpu_wq.single + 1)); 3131 + free_percpu(wq->pool_wq.pcpu); 3132 + else if (wq->pool_wq.single) { 3133 + /* the pointer to free is stored right after the pwq */ 3134 + kfree(*(void **)(wq->pool_wq.single + 1)); 3134 3135 } 3135 3136 } 3136 3137 ··· 3184 3185 wq->flags = flags; 3185 3186 wq->saved_max_active = max_active; 3186 3187 mutex_init(&wq->flush_mutex); 3187 - atomic_set(&wq->nr_cwqs_to_flush, 0); 3188 + atomic_set(&wq->nr_pwqs_to_flush, 0); 3188 3189 INIT_LIST_HEAD(&wq->flusher_queue); 3189 3190 INIT_LIST_HEAD(&wq->flusher_overflow); 3190 3191 3191 3192 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 3192 3193 INIT_LIST_HEAD(&wq->list); 3193 3194 3194 - if (alloc_cwqs(wq) < 0) 3195 + if (alloc_pwqs(wq) < 0) 3195 3196 goto err; 3196 3197 3197 - for_each_cwq_cpu(cpu, wq) { 3198 - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3198 + for_each_pwq_cpu(cpu, wq) { 3199 + struct pool_workqueue *pwq = get_pwq(cpu, wq); 3199 3200 3200 - BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); 3201 - cwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI); 3202 - cwq->wq = wq; 3203 - cwq->flush_color = -1; 3204 - cwq->max_active = max_active; 3205 - INIT_LIST_HEAD(&cwq->delayed_works); 3201 + BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 3202 + pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI); 3203 + pwq->wq = wq; 3204 + pwq->flush_color = -1; 3205 + pwq->max_active = max_active; 3206 + INIT_LIST_HEAD(&pwq->delayed_works); 3206 3207 } 3207 3208 3208 3209 if (flags & WQ_RESCUER) { ··· 3233 3234 spin_lock(&workqueue_lock); 3234 3235 3235 3236 if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 3236 - for_each_cwq_cpu(cpu, wq) 3237 - get_cwq(cpu, wq)->max_active = 0; 3237 + for_each_pwq_cpu(cpu, wq) 3238 + get_pwq(cpu, wq)->max_active = 0; 3238 3239 3239 3240 list_add(&wq->list, &workqueues); 3240 3241 ··· 3243 3244 return wq; 3244 3245 err: 3245 3246 if (wq) { 3246 - free_cwqs(wq); 3247 + free_pwqs(wq); 3247 3248 free_mayday_mask(wq->mayday_mask); 3248 3249 kfree(wq->rescuer); 3249 3250 kfree(wq); ··· 3274 3275 spin_unlock(&workqueue_lock); 3275 3276 3276 3277 /* sanity check */ 3277 - for_each_cwq_cpu(cpu, wq) { 3278 - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3278 + for_each_pwq_cpu(cpu, wq) { 3279 + struct pool_workqueue *pwq = get_pwq(cpu, wq); 3279 3280 int i; 3280 3281 3281 3282 for (i = 0; i < WORK_NR_COLORS; i++) 3282 - BUG_ON(cwq->nr_in_flight[i]); 3283 - BUG_ON(cwq->nr_active); 3284 - BUG_ON(!list_empty(&cwq->delayed_works)); 3283 + BUG_ON(pwq->nr_in_flight[i]); 3284 + BUG_ON(pwq->nr_active); 3285 + BUG_ON(!list_empty(&pwq->delayed_works)); 3285 3286 } 3286 3287 3287 3288 if (wq->flags & WQ_RESCUER) { ··· 3290 3291 kfree(wq->rescuer); 3291 3292 } 3292 3293 3293 - free_cwqs(wq); 3294 + free_pwqs(wq); 3294 3295 kfree(wq); 3295 3296 } 3296 3297 EXPORT_SYMBOL_GPL(destroy_workqueue); 3297 3298 3298 3299 /** 3299 - * cwq_set_max_active - adjust max_active of a cwq 3300 - * @cwq: target cpu_workqueue_struct 3300 + * pwq_set_max_active - adjust max_active of a pwq 3301 + * @pwq: target pool_workqueue 3301 3302 * @max_active: new max_active value. 3302 3303 * 3303 - * Set @cwq->max_active to @max_active and activate delayed works if 3304 + * Set @pwq->max_active to @max_active and activate delayed works if 3304 3305 * increased. 3305 3306 * 3306 3307 * CONTEXT: 3307 3308 * spin_lock_irq(pool->lock). 3308 3309 */ 3309 - static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active) 3310 + static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active) 3310 3311 { 3311 - cwq->max_active = max_active; 3312 + pwq->max_active = max_active; 3312 3313 3313 - while (!list_empty(&cwq->delayed_works) && 3314 - cwq->nr_active < cwq->max_active) 3315 - cwq_activate_first_delayed(cwq); 3314 + while (!list_empty(&pwq->delayed_works) && 3315 + pwq->nr_active < pwq->max_active) 3316 + pwq_activate_first_delayed(pwq); 3316 3317 } 3317 3318 3318 3319 /** ··· 3335 3336 3336 3337 wq->saved_max_active = max_active; 3337 3338 3338 - for_each_cwq_cpu(cpu, wq) { 3339 - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3340 - struct worker_pool *pool = cwq->pool; 3339 + for_each_pwq_cpu(cpu, wq) { 3340 + struct pool_workqueue *pwq = get_pwq(cpu, wq); 3341 + struct worker_pool *pool = pwq->pool; 3341 3342 3342 3343 spin_lock_irq(&pool->lock); 3343 3344 3344 3345 if (!(wq->flags & WQ_FREEZABLE) || 3345 3346 !(pool->flags & POOL_FREEZING)) 3346 - cwq_set_max_active(cwq, max_active); 3347 + pwq_set_max_active(pwq, max_active); 3347 3348 3348 3349 spin_unlock_irq(&pool->lock); 3349 3350 } ··· 3366 3367 */ 3367 3368 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) 3368 3369 { 3369 - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3370 + struct pool_workqueue *pwq = get_pwq(cpu, wq); 3370 3371 3371 - return !list_empty(&cwq->delayed_works); 3372 + return !list_empty(&pwq->delayed_works); 3372 3373 } 3373 3374 EXPORT_SYMBOL_GPL(workqueue_congested); 3374 3375 ··· 3407 3408 * CPU hotplug. 3408 3409 * 3409 3410 * There are two challenges in supporting CPU hotplug. Firstly, there 3410 - * are a lot of assumptions on strong associations among work, cwq and 3411 + * are a lot of assumptions on strong associations among work, pwq and 3411 3412 * pool which make migrating pending and scheduled works very 3412 3413 * difficult to implement without impacting hot paths. Secondly, 3413 3414 * worker pools serve mix of short, long and very long running works making ··· 3611 3612 pool->flags |= POOL_FREEZING; 3612 3613 3613 3614 list_for_each_entry(wq, &workqueues, list) { 3614 - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3615 + struct pool_workqueue *pwq = get_pwq(cpu, wq); 3615 3616 3616 - if (cwq && cwq->pool == pool && 3617 + if (pwq && pwq->pool == pool && 3617 3618 (wq->flags & WQ_FREEZABLE)) 3618 - cwq->max_active = 0; 3619 + pwq->max_active = 0; 3619 3620 } 3620 3621 3621 3622 spin_unlock_irq(&pool->lock); ··· 3654 3655 * to peek without lock. 3655 3656 */ 3656 3657 list_for_each_entry(wq, &workqueues, list) { 3657 - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3658 + struct pool_workqueue *pwq = get_pwq(cpu, wq); 3658 3659 3659 - if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3660 + if (!pwq || !(wq->flags & WQ_FREEZABLE)) 3660 3661 continue; 3661 3662 3662 - BUG_ON(cwq->nr_active < 0); 3663 - if (cwq->nr_active) { 3663 + BUG_ON(pwq->nr_active < 0); 3664 + if (pwq->nr_active) { 3664 3665 busy = true; 3665 3666 goto out_unlock; 3666 3667 } ··· 3700 3701 pool->flags &= ~POOL_FREEZING; 3701 3702 3702 3703 list_for_each_entry(wq, &workqueues, list) { 3703 - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3704 + struct pool_workqueue *pwq = get_pwq(cpu, wq); 3704 3705 3705 - if (!cwq || cwq->pool != pool || 3706 + if (!pwq || pwq->pool != pool || 3706 3707 !(wq->flags & WQ_FREEZABLE)) 3707 3708 continue; 3708 3709 3709 3710 /* restore max_active and repopulate worklist */ 3710 - cwq_set_max_active(cwq, wq->saved_max_active); 3711 + pwq_set_max_active(pwq, wq->saved_max_active); 3711 3712 } 3712 3713 3713 3714 wake_up_worker(pool);
+1 -1
kernel/workqueue_internal.h
··· 28 28 29 29 struct work_struct *current_work; /* L: work being processed */ 30 30 work_func_t current_func; /* L: current_work's fn */ 31 - struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ 31 + struct pool_workqueue *current_pwq; /* L: current_work's pwq */ 32 32 struct list_head scheduled; /* L: scheduled works */ 33 33 struct task_struct *task; /* I: worker task */ 34 34 struct worker_pool *pool; /* I: the associated pool */