at v4.8 10 kB view raw
1/* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * Copyright (C) 2014 Fujitsu. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public 7 * License v2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public 15 * License along with this program; if not, write to the 16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 17 * Boston, MA 021110-1307, USA. 18 */ 19 20#include <linux/kthread.h> 21#include <linux/slab.h> 22#include <linux/list.h> 23#include <linux/spinlock.h> 24#include <linux/freezer.h> 25#include "async-thread.h" 26#include "ctree.h" 27 28#define WORK_DONE_BIT 0 29#define WORK_ORDER_DONE_BIT 1 30#define WORK_HIGH_PRIO_BIT 2 31 32#define NO_THRESHOLD (-1) 33#define DFT_THRESHOLD (32) 34 35struct __btrfs_workqueue { 36 struct workqueue_struct *normal_wq; 37 38 /* File system this workqueue services */ 39 struct btrfs_fs_info *fs_info; 40 41 /* List head pointing to ordered work list */ 42 struct list_head ordered_list; 43 44 /* Spinlock for ordered_list */ 45 spinlock_t list_lock; 46 47 /* Thresholding related variants */ 48 atomic_t pending; 49 50 /* Up limit of concurrency workers */ 51 int limit_active; 52 53 /* Current number of concurrency workers */ 54 int current_active; 55 56 /* Threshold to change current_active */ 57 int thresh; 58 unsigned int count; 59 spinlock_t thres_lock; 60}; 61 62struct btrfs_workqueue { 63 struct __btrfs_workqueue *normal; 64 struct __btrfs_workqueue *high; 65}; 66 67static void normal_work_helper(struct btrfs_work *work); 68 69#define BTRFS_WORK_HELPER(name) \ 70void btrfs_##name(struct work_struct *arg) \ 71{ \ 72 struct btrfs_work *work = container_of(arg, struct btrfs_work, \ 73 normal_work); \ 74 normal_work_helper(work); \ 75} 76 77struct btrfs_fs_info * 78btrfs_workqueue_owner(struct __btrfs_workqueue *wq) 79{ 80 return wq->fs_info; 81} 82 83struct btrfs_fs_info * 84btrfs_work_owner(struct btrfs_work *work) 85{ 86 return work->wq->fs_info; 87} 88 89BTRFS_WORK_HELPER(worker_helper); 90BTRFS_WORK_HELPER(delalloc_helper); 91BTRFS_WORK_HELPER(flush_delalloc_helper); 92BTRFS_WORK_HELPER(cache_helper); 93BTRFS_WORK_HELPER(submit_helper); 94BTRFS_WORK_HELPER(fixup_helper); 95BTRFS_WORK_HELPER(endio_helper); 96BTRFS_WORK_HELPER(endio_meta_helper); 97BTRFS_WORK_HELPER(endio_meta_write_helper); 98BTRFS_WORK_HELPER(endio_raid56_helper); 99BTRFS_WORK_HELPER(endio_repair_helper); 100BTRFS_WORK_HELPER(rmw_helper); 101BTRFS_WORK_HELPER(endio_write_helper); 102BTRFS_WORK_HELPER(freespace_write_helper); 103BTRFS_WORK_HELPER(delayed_meta_helper); 104BTRFS_WORK_HELPER(readahead_helper); 105BTRFS_WORK_HELPER(qgroup_rescan_helper); 106BTRFS_WORK_HELPER(extent_refs_helper); 107BTRFS_WORK_HELPER(scrub_helper); 108BTRFS_WORK_HELPER(scrubwrc_helper); 109BTRFS_WORK_HELPER(scrubnc_helper); 110BTRFS_WORK_HELPER(scrubparity_helper); 111 112static struct __btrfs_workqueue * 113__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, 114 unsigned int flags, int limit_active, int thresh) 115{ 116 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); 117 118 if (!ret) 119 return NULL; 120 121 ret->fs_info = fs_info; 122 ret->limit_active = limit_active; 123 atomic_set(&ret->pending, 0); 124 if (thresh == 0) 125 thresh = DFT_THRESHOLD; 126 /* For low threshold, disabling threshold is a better choice */ 127 if (thresh < DFT_THRESHOLD) { 128 ret->current_active = limit_active; 129 ret->thresh = NO_THRESHOLD; 130 } else { 131 /* 132 * For threshold-able wq, let its concurrency grow on demand. 133 * Use minimal max_active at alloc time to reduce resource 134 * usage. 135 */ 136 ret->current_active = 1; 137 ret->thresh = thresh; 138 } 139 140 if (flags & WQ_HIGHPRI) 141 ret->normal_wq = alloc_workqueue("%s-%s-high", flags, 142 ret->current_active, "btrfs", 143 name); 144 else 145 ret->normal_wq = alloc_workqueue("%s-%s", flags, 146 ret->current_active, "btrfs", 147 name); 148 if (!ret->normal_wq) { 149 kfree(ret); 150 return NULL; 151 } 152 153 INIT_LIST_HEAD(&ret->ordered_list); 154 spin_lock_init(&ret->list_lock); 155 spin_lock_init(&ret->thres_lock); 156 trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI); 157 return ret; 158} 159 160static inline void 161__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); 162 163struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, 164 const char *name, 165 unsigned int flags, 166 int limit_active, 167 int thresh) 168{ 169 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); 170 171 if (!ret) 172 return NULL; 173 174 ret->normal = __btrfs_alloc_workqueue(fs_info, name, 175 flags & ~WQ_HIGHPRI, 176 limit_active, thresh); 177 if (!ret->normal) { 178 kfree(ret); 179 return NULL; 180 } 181 182 if (flags & WQ_HIGHPRI) { 183 ret->high = __btrfs_alloc_workqueue(fs_info, name, flags, 184 limit_active, thresh); 185 if (!ret->high) { 186 __btrfs_destroy_workqueue(ret->normal); 187 kfree(ret); 188 return NULL; 189 } 190 } 191 return ret; 192} 193 194/* 195 * Hook for threshold which will be called in btrfs_queue_work. 196 * This hook WILL be called in IRQ handler context, 197 * so workqueue_set_max_active MUST NOT be called in this hook 198 */ 199static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) 200{ 201 if (wq->thresh == NO_THRESHOLD) 202 return; 203 atomic_inc(&wq->pending); 204} 205 206/* 207 * Hook for threshold which will be called before executing the work, 208 * This hook is called in kthread content. 209 * So workqueue_set_max_active is called here. 210 */ 211static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) 212{ 213 int new_current_active; 214 long pending; 215 int need_change = 0; 216 217 if (wq->thresh == NO_THRESHOLD) 218 return; 219 220 atomic_dec(&wq->pending); 221 spin_lock(&wq->thres_lock); 222 /* 223 * Use wq->count to limit the calling frequency of 224 * workqueue_set_max_active. 225 */ 226 wq->count++; 227 wq->count %= (wq->thresh / 4); 228 if (!wq->count) 229 goto out; 230 new_current_active = wq->current_active; 231 232 /* 233 * pending may be changed later, but it's OK since we really 234 * don't need it so accurate to calculate new_max_active. 235 */ 236 pending = atomic_read(&wq->pending); 237 if (pending > wq->thresh) 238 new_current_active++; 239 if (pending < wq->thresh / 2) 240 new_current_active--; 241 new_current_active = clamp_val(new_current_active, 1, wq->limit_active); 242 if (new_current_active != wq->current_active) { 243 need_change = 1; 244 wq->current_active = new_current_active; 245 } 246out: 247 spin_unlock(&wq->thres_lock); 248 249 if (need_change) { 250 workqueue_set_max_active(wq->normal_wq, wq->current_active); 251 } 252} 253 254static void run_ordered_work(struct __btrfs_workqueue *wq) 255{ 256 struct list_head *list = &wq->ordered_list; 257 struct btrfs_work *work; 258 spinlock_t *lock = &wq->list_lock; 259 unsigned long flags; 260 261 while (1) { 262 spin_lock_irqsave(lock, flags); 263 if (list_empty(list)) 264 break; 265 work = list_entry(list->next, struct btrfs_work, 266 ordered_list); 267 if (!test_bit(WORK_DONE_BIT, &work->flags)) 268 break; 269 270 /* 271 * we are going to call the ordered done function, but 272 * we leave the work item on the list as a barrier so 273 * that later work items that are done don't have their 274 * functions called before this one returns 275 */ 276 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) 277 break; 278 trace_btrfs_ordered_sched(work); 279 spin_unlock_irqrestore(lock, flags); 280 work->ordered_func(work); 281 282 /* now take the lock again and drop our item from the list */ 283 spin_lock_irqsave(lock, flags); 284 list_del(&work->ordered_list); 285 spin_unlock_irqrestore(lock, flags); 286 287 /* 288 * we don't want to call the ordered free functions 289 * with the lock held though 290 */ 291 work->ordered_free(work); 292 trace_btrfs_all_work_done(work); 293 } 294 spin_unlock_irqrestore(lock, flags); 295} 296 297static void normal_work_helper(struct btrfs_work *work) 298{ 299 struct __btrfs_workqueue *wq; 300 int need_order = 0; 301 302 /* 303 * We should not touch things inside work in the following cases: 304 * 1) after work->func() if it has no ordered_free 305 * Since the struct is freed in work->func(). 306 * 2) after setting WORK_DONE_BIT 307 * The work may be freed in other threads almost instantly. 308 * So we save the needed things here. 309 */ 310 if (work->ordered_func) 311 need_order = 1; 312 wq = work->wq; 313 314 trace_btrfs_work_sched(work); 315 thresh_exec_hook(wq); 316 work->func(work); 317 if (need_order) { 318 set_bit(WORK_DONE_BIT, &work->flags); 319 run_ordered_work(wq); 320 } 321 if (!need_order) 322 trace_btrfs_all_work_done(work); 323} 324 325void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, 326 btrfs_func_t func, 327 btrfs_func_t ordered_func, 328 btrfs_func_t ordered_free) 329{ 330 work->func = func; 331 work->ordered_func = ordered_func; 332 work->ordered_free = ordered_free; 333 INIT_WORK(&work->normal_work, uniq_func); 334 INIT_LIST_HEAD(&work->ordered_list); 335 work->flags = 0; 336} 337 338static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, 339 struct btrfs_work *work) 340{ 341 unsigned long flags; 342 343 work->wq = wq; 344 thresh_queue_hook(wq); 345 if (work->ordered_func) { 346 spin_lock_irqsave(&wq->list_lock, flags); 347 list_add_tail(&work->ordered_list, &wq->ordered_list); 348 spin_unlock_irqrestore(&wq->list_lock, flags); 349 } 350 trace_btrfs_work_queued(work); 351 queue_work(wq->normal_wq, &work->normal_work); 352} 353 354void btrfs_queue_work(struct btrfs_workqueue *wq, 355 struct btrfs_work *work) 356{ 357 struct __btrfs_workqueue *dest_wq; 358 359 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) 360 dest_wq = wq->high; 361 else 362 dest_wq = wq->normal; 363 __btrfs_queue_work(dest_wq, work); 364} 365 366static inline void 367__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) 368{ 369 destroy_workqueue(wq->normal_wq); 370 trace_btrfs_workqueue_destroy(wq); 371 kfree(wq); 372} 373 374void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) 375{ 376 if (!wq) 377 return; 378 if (wq->high) 379 __btrfs_destroy_workqueue(wq->high); 380 __btrfs_destroy_workqueue(wq->normal); 381 kfree(wq); 382} 383 384void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) 385{ 386 if (!wq) 387 return; 388 wq->normal->limit_active = limit_active; 389 if (wq->high) 390 wq->high->limit_active = limit_active; 391} 392 393void btrfs_set_work_high_priority(struct btrfs_work *work) 394{ 395 set_bit(WORK_HIGH_PRIO_BIT, &work->flags); 396}