Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.8-rc6 1139 lines 28 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Basic worker thread pool for io_uring 4 * 5 * Copyright (C) 2019 Jens Axboe 6 * 7 */ 8#include <linux/kernel.h> 9#include <linux/init.h> 10#include <linux/errno.h> 11#include <linux/sched/signal.h> 12#include <linux/mm.h> 13#include <linux/sched/mm.h> 14#include <linux/percpu.h> 15#include <linux/slab.h> 16#include <linux/kthread.h> 17#include <linux/rculist_nulls.h> 18#include <linux/fs_struct.h> 19#include <linux/task_work.h> 20 21#include "io-wq.h" 22 23#define WORKER_IDLE_TIMEOUT (5 * HZ) 24 25enum { 26 IO_WORKER_F_UP = 1, /* up and active */ 27 IO_WORKER_F_RUNNING = 2, /* account as running */ 28 IO_WORKER_F_FREE = 4, /* worker on free list */ 29 IO_WORKER_F_EXITING = 8, /* worker exiting */ 30 IO_WORKER_F_FIXED = 16, /* static idle worker */ 31 IO_WORKER_F_BOUND = 32, /* is doing bounded work */ 32}; 33 34enum { 35 IO_WQ_BIT_EXIT = 0, /* wq exiting */ 36 IO_WQ_BIT_CANCEL = 1, /* cancel work on list */ 37 IO_WQ_BIT_ERROR = 2, /* error on setup */ 38}; 39 40enum { 41 IO_WQE_FLAG_STALLED = 1, /* stalled on hash */ 42}; 43 44/* 45 * One for each thread in a wqe pool 46 */ 47struct io_worker { 48 refcount_t ref; 49 unsigned flags; 50 struct hlist_nulls_node nulls_node; 51 struct list_head all_list; 52 struct task_struct *task; 53 struct io_wqe *wqe; 54 55 struct io_wq_work *cur_work; 56 spinlock_t lock; 57 58 struct rcu_head rcu; 59 struct mm_struct *mm; 60 const struct cred *cur_creds; 61 const struct cred *saved_creds; 62 struct files_struct *restore_files; 63 struct fs_struct *restore_fs; 64}; 65 66#if BITS_PER_LONG == 64 67#define IO_WQ_HASH_ORDER 6 68#else 69#define IO_WQ_HASH_ORDER 5 70#endif 71 72#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER) 73 74struct io_wqe_acct { 75 unsigned nr_workers; 76 unsigned max_workers; 77 atomic_t nr_running; 78}; 79 80enum { 81 IO_WQ_ACCT_BOUND, 82 IO_WQ_ACCT_UNBOUND, 83}; 84 85/* 86 * Per-node worker thread pool 87 */ 88struct io_wqe { 89 struct { 90 spinlock_t lock; 91 struct io_wq_work_list work_list; 92 unsigned long hash_map; 93 unsigned flags; 94 } ____cacheline_aligned_in_smp; 95 96 int node; 97 struct io_wqe_acct acct[2]; 98 99 struct hlist_nulls_head free_list; 100 struct list_head all_list; 101 102 struct io_wq *wq; 103 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS]; 104}; 105 106/* 107 * Per io_wq state 108 */ 109struct io_wq { 110 struct io_wqe **wqes; 111 unsigned long state; 112 113 free_work_fn *free_work; 114 io_wq_work_fn *do_work; 115 116 struct task_struct *manager; 117 struct user_struct *user; 118 refcount_t refs; 119 struct completion done; 120 121 refcount_t use_refs; 122}; 123 124static bool io_worker_get(struct io_worker *worker) 125{ 126 return refcount_inc_not_zero(&worker->ref); 127} 128 129static void io_worker_release(struct io_worker *worker) 130{ 131 if (refcount_dec_and_test(&worker->ref)) 132 wake_up_process(worker->task); 133} 134 135/* 136 * Note: drops the wqe->lock if returning true! The caller must re-acquire 137 * the lock in that case. Some callers need to restart handling if this 138 * happens, so we can't just re-acquire the lock on behalf of the caller. 139 */ 140static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) 141{ 142 bool dropped_lock = false; 143 144 if (worker->saved_creds) { 145 revert_creds(worker->saved_creds); 146 worker->cur_creds = worker->saved_creds = NULL; 147 } 148 149 if (current->files != worker->restore_files) { 150 __acquire(&wqe->lock); 151 spin_unlock_irq(&wqe->lock); 152 dropped_lock = true; 153 154 task_lock(current); 155 current->files = worker->restore_files; 156 task_unlock(current); 157 } 158 159 if (current->fs != worker->restore_fs) 160 current->fs = worker->restore_fs; 161 162 /* 163 * If we have an active mm, we need to drop the wq lock before unusing 164 * it. If we do, return true and let the caller retry the idle loop. 165 */ 166 if (worker->mm) { 167 if (!dropped_lock) { 168 __acquire(&wqe->lock); 169 spin_unlock_irq(&wqe->lock); 170 dropped_lock = true; 171 } 172 __set_current_state(TASK_RUNNING); 173 kthread_unuse_mm(worker->mm); 174 mmput(worker->mm); 175 worker->mm = NULL; 176 } 177 178 return dropped_lock; 179} 180 181static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, 182 struct io_wq_work *work) 183{ 184 if (work->flags & IO_WQ_WORK_UNBOUND) 185 return &wqe->acct[IO_WQ_ACCT_UNBOUND]; 186 187 return &wqe->acct[IO_WQ_ACCT_BOUND]; 188} 189 190static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe, 191 struct io_worker *worker) 192{ 193 if (worker->flags & IO_WORKER_F_BOUND) 194 return &wqe->acct[IO_WQ_ACCT_BOUND]; 195 196 return &wqe->acct[IO_WQ_ACCT_UNBOUND]; 197} 198 199static void io_worker_exit(struct io_worker *worker) 200{ 201 struct io_wqe *wqe = worker->wqe; 202 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); 203 unsigned nr_workers; 204 205 /* 206 * If we're not at zero, someone else is holding a brief reference 207 * to the worker. Wait for that to go away. 208 */ 209 set_current_state(TASK_INTERRUPTIBLE); 210 if (!refcount_dec_and_test(&worker->ref)) 211 schedule(); 212 __set_current_state(TASK_RUNNING); 213 214 preempt_disable(); 215 current->flags &= ~PF_IO_WORKER; 216 if (worker->flags & IO_WORKER_F_RUNNING) 217 atomic_dec(&acct->nr_running); 218 if (!(worker->flags & IO_WORKER_F_BOUND)) 219 atomic_dec(&wqe->wq->user->processes); 220 worker->flags = 0; 221 preempt_enable(); 222 223 spin_lock_irq(&wqe->lock); 224 hlist_nulls_del_rcu(&worker->nulls_node); 225 list_del_rcu(&worker->all_list); 226 if (__io_worker_unuse(wqe, worker)) { 227 __release(&wqe->lock); 228 spin_lock_irq(&wqe->lock); 229 } 230 acct->nr_workers--; 231 nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers + 232 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers; 233 spin_unlock_irq(&wqe->lock); 234 235 /* all workers gone, wq exit can proceed */ 236 if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs)) 237 complete(&wqe->wq->done); 238 239 kfree_rcu(worker, rcu); 240} 241 242static inline bool io_wqe_run_queue(struct io_wqe *wqe) 243 __must_hold(wqe->lock) 244{ 245 if (!wq_list_empty(&wqe->work_list) && 246 !(wqe->flags & IO_WQE_FLAG_STALLED)) 247 return true; 248 return false; 249} 250 251/* 252 * Check head of free list for an available worker. If one isn't available, 253 * caller must wake up the wq manager to create one. 254 */ 255static bool io_wqe_activate_free_worker(struct io_wqe *wqe) 256 __must_hold(RCU) 257{ 258 struct hlist_nulls_node *n; 259 struct io_worker *worker; 260 261 n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list)); 262 if (is_a_nulls(n)) 263 return false; 264 265 worker = hlist_nulls_entry(n, struct io_worker, nulls_node); 266 if (io_worker_get(worker)) { 267 wake_up_process(worker->task); 268 io_worker_release(worker); 269 return true; 270 } 271 272 return false; 273} 274 275/* 276 * We need a worker. If we find a free one, we're good. If not, and we're 277 * below the max number of workers, wake up the manager to create one. 278 */ 279static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) 280{ 281 bool ret; 282 283 /* 284 * Most likely an attempt to queue unbounded work on an io_wq that 285 * wasn't setup with any unbounded workers. 286 */ 287 WARN_ON_ONCE(!acct->max_workers); 288 289 rcu_read_lock(); 290 ret = io_wqe_activate_free_worker(wqe); 291 rcu_read_unlock(); 292 293 if (!ret && acct->nr_workers < acct->max_workers) 294 wake_up_process(wqe->wq->manager); 295} 296 297static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker) 298{ 299 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); 300 301 atomic_inc(&acct->nr_running); 302} 303 304static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker) 305 __must_hold(wqe->lock) 306{ 307 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); 308 309 if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe)) 310 io_wqe_wake_worker(wqe, acct); 311} 312 313static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker) 314{ 315 allow_kernel_signal(SIGINT); 316 317 current->flags |= PF_IO_WORKER; 318 319 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); 320 worker->restore_files = current->files; 321 worker->restore_fs = current->fs; 322 io_wqe_inc_running(wqe, worker); 323} 324 325/* 326 * Worker will start processing some work. Move it to the busy list, if 327 * it's currently on the freelist 328 */ 329static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, 330 struct io_wq_work *work) 331 __must_hold(wqe->lock) 332{ 333 bool worker_bound, work_bound; 334 335 if (worker->flags & IO_WORKER_F_FREE) { 336 worker->flags &= ~IO_WORKER_F_FREE; 337 hlist_nulls_del_init_rcu(&worker->nulls_node); 338 } 339 340 /* 341 * If worker is moving from bound to unbound (or vice versa), then 342 * ensure we update the running accounting. 343 */ 344 worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0; 345 work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0; 346 if (worker_bound != work_bound) { 347 io_wqe_dec_running(wqe, worker); 348 if (work_bound) { 349 worker->flags |= IO_WORKER_F_BOUND; 350 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--; 351 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++; 352 atomic_dec(&wqe->wq->user->processes); 353 } else { 354 worker->flags &= ~IO_WORKER_F_BOUND; 355 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++; 356 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--; 357 atomic_inc(&wqe->wq->user->processes); 358 } 359 io_wqe_inc_running(wqe, worker); 360 } 361} 362 363/* 364 * No work, worker going to sleep. Move to freelist, and unuse mm if we 365 * have one attached. Dropping the mm may potentially sleep, so we drop 366 * the lock in that case and return success. Since the caller has to 367 * retry the loop in that case (we changed task state), we don't regrab 368 * the lock if we return success. 369 */ 370static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) 371 __must_hold(wqe->lock) 372{ 373 if (!(worker->flags & IO_WORKER_F_FREE)) { 374 worker->flags |= IO_WORKER_F_FREE; 375 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); 376 } 377 378 return __io_worker_unuse(wqe, worker); 379} 380 381static inline unsigned int io_get_work_hash(struct io_wq_work *work) 382{ 383 return work->flags >> IO_WQ_HASH_SHIFT; 384} 385 386static struct io_wq_work *io_get_next_work(struct io_wqe *wqe) 387 __must_hold(wqe->lock) 388{ 389 struct io_wq_work_node *node, *prev; 390 struct io_wq_work *work, *tail; 391 unsigned int hash; 392 393 wq_list_for_each(node, prev, &wqe->work_list) { 394 work = container_of(node, struct io_wq_work, list); 395 396 /* not hashed, can run anytime */ 397 if (!io_wq_is_hashed(work)) { 398 wq_list_del(&wqe->work_list, node, prev); 399 return work; 400 } 401 402 /* hashed, can run if not already running */ 403 hash = io_get_work_hash(work); 404 if (!(wqe->hash_map & BIT(hash))) { 405 wqe->hash_map |= BIT(hash); 406 /* all items with this hash lie in [work, tail] */ 407 tail = wqe->hash_tail[hash]; 408 wqe->hash_tail[hash] = NULL; 409 wq_list_cut(&wqe->work_list, &tail->list, prev); 410 return work; 411 } 412 } 413 414 return NULL; 415} 416 417static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work) 418{ 419 if (worker->mm) { 420 kthread_unuse_mm(worker->mm); 421 mmput(worker->mm); 422 worker->mm = NULL; 423 } 424 if (!work->mm) 425 return; 426 427 if (mmget_not_zero(work->mm)) { 428 kthread_use_mm(work->mm); 429 worker->mm = work->mm; 430 /* hang on to this mm */ 431 work->mm = NULL; 432 return; 433 } 434 435 /* failed grabbing mm, ensure work gets cancelled */ 436 work->flags |= IO_WQ_WORK_CANCEL; 437} 438 439static void io_wq_switch_creds(struct io_worker *worker, 440 struct io_wq_work *work) 441{ 442 const struct cred *old_creds = override_creds(work->creds); 443 444 worker->cur_creds = work->creds; 445 if (worker->saved_creds) 446 put_cred(old_creds); /* creds set by previous switch */ 447 else 448 worker->saved_creds = old_creds; 449} 450 451static void io_impersonate_work(struct io_worker *worker, 452 struct io_wq_work *work) 453{ 454 if (work->files && current->files != work->files) { 455 task_lock(current); 456 current->files = work->files; 457 task_unlock(current); 458 } 459 if (work->fs && current->fs != work->fs) 460 current->fs = work->fs; 461 if (work->mm != worker->mm) 462 io_wq_switch_mm(worker, work); 463 if (worker->cur_creds != work->creds) 464 io_wq_switch_creds(worker, work); 465} 466 467static void io_assign_current_work(struct io_worker *worker, 468 struct io_wq_work *work) 469{ 470 if (work) { 471 /* flush pending signals before assigning new work */ 472 if (signal_pending(current)) 473 flush_signals(current); 474 cond_resched(); 475 } 476 477 spin_lock_irq(&worker->lock); 478 worker->cur_work = work; 479 spin_unlock_irq(&worker->lock); 480} 481 482static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work); 483 484static void io_worker_handle_work(struct io_worker *worker) 485 __releases(wqe->lock) 486{ 487 struct io_wqe *wqe = worker->wqe; 488 struct io_wq *wq = wqe->wq; 489 490 do { 491 struct io_wq_work *work; 492 unsigned int hash; 493get_next: 494 /* 495 * If we got some work, mark us as busy. If we didn't, but 496 * the list isn't empty, it means we stalled on hashed work. 497 * Mark us stalled so we don't keep looking for work when we 498 * can't make progress, any work completion or insertion will 499 * clear the stalled flag. 500 */ 501 work = io_get_next_work(wqe); 502 if (work) 503 __io_worker_busy(wqe, worker, work); 504 else if (!wq_list_empty(&wqe->work_list)) 505 wqe->flags |= IO_WQE_FLAG_STALLED; 506 507 spin_unlock_irq(&wqe->lock); 508 if (!work) 509 break; 510 io_assign_current_work(worker, work); 511 512 /* handle a whole dependent link */ 513 do { 514 struct io_wq_work *old_work, *next_hashed, *linked; 515 516 next_hashed = wq_next_work(work); 517 io_impersonate_work(worker, work); 518 /* 519 * OK to set IO_WQ_WORK_CANCEL even for uncancellable 520 * work, the worker function will do the right thing. 521 */ 522 if (test_bit(IO_WQ_BIT_CANCEL, &wq->state)) 523 work->flags |= IO_WQ_WORK_CANCEL; 524 525 hash = io_get_work_hash(work); 526 linked = old_work = work; 527 wq->do_work(&linked); 528 linked = (old_work == linked) ? NULL : linked; 529 530 work = next_hashed; 531 if (!work && linked && !io_wq_is_hashed(linked)) { 532 work = linked; 533 linked = NULL; 534 } 535 io_assign_current_work(worker, work); 536 wq->free_work(old_work); 537 538 if (linked) 539 io_wqe_enqueue(wqe, linked); 540 541 if (hash != -1U && !next_hashed) { 542 spin_lock_irq(&wqe->lock); 543 wqe->hash_map &= ~BIT_ULL(hash); 544 wqe->flags &= ~IO_WQE_FLAG_STALLED; 545 /* dependent work is not hashed */ 546 hash = -1U; 547 /* skip unnecessary unlock-lock wqe->lock */ 548 if (!work) 549 goto get_next; 550 spin_unlock_irq(&wqe->lock); 551 } 552 } while (work); 553 554 spin_lock_irq(&wqe->lock); 555 } while (1); 556} 557 558static int io_wqe_worker(void *data) 559{ 560 struct io_worker *worker = data; 561 struct io_wqe *wqe = worker->wqe; 562 struct io_wq *wq = wqe->wq; 563 564 io_worker_start(wqe, worker); 565 566 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { 567 set_current_state(TASK_INTERRUPTIBLE); 568loop: 569 spin_lock_irq(&wqe->lock); 570 if (io_wqe_run_queue(wqe)) { 571 __set_current_state(TASK_RUNNING); 572 io_worker_handle_work(worker); 573 goto loop; 574 } 575 /* drops the lock on success, retry */ 576 if (__io_worker_idle(wqe, worker)) { 577 __release(&wqe->lock); 578 goto loop; 579 } 580 spin_unlock_irq(&wqe->lock); 581 if (signal_pending(current)) 582 flush_signals(current); 583 if (schedule_timeout(WORKER_IDLE_TIMEOUT)) 584 continue; 585 /* timed out, exit unless we're the fixed worker */ 586 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) || 587 !(worker->flags & IO_WORKER_F_FIXED)) 588 break; 589 } 590 591 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) { 592 spin_lock_irq(&wqe->lock); 593 if (!wq_list_empty(&wqe->work_list)) 594 io_worker_handle_work(worker); 595 else 596 spin_unlock_irq(&wqe->lock); 597 } 598 599 io_worker_exit(worker); 600 return 0; 601} 602 603/* 604 * Called when a worker is scheduled in. Mark us as currently running. 605 */ 606void io_wq_worker_running(struct task_struct *tsk) 607{ 608 struct io_worker *worker = kthread_data(tsk); 609 struct io_wqe *wqe = worker->wqe; 610 611 if (!(worker->flags & IO_WORKER_F_UP)) 612 return; 613 if (worker->flags & IO_WORKER_F_RUNNING) 614 return; 615 worker->flags |= IO_WORKER_F_RUNNING; 616 io_wqe_inc_running(wqe, worker); 617} 618 619/* 620 * Called when worker is going to sleep. If there are no workers currently 621 * running and we have work pending, wake up a free one or have the manager 622 * set one up. 623 */ 624void io_wq_worker_sleeping(struct task_struct *tsk) 625{ 626 struct io_worker *worker = kthread_data(tsk); 627 struct io_wqe *wqe = worker->wqe; 628 629 if (!(worker->flags & IO_WORKER_F_UP)) 630 return; 631 if (!(worker->flags & IO_WORKER_F_RUNNING)) 632 return; 633 634 worker->flags &= ~IO_WORKER_F_RUNNING; 635 636 spin_lock_irq(&wqe->lock); 637 io_wqe_dec_running(wqe, worker); 638 spin_unlock_irq(&wqe->lock); 639} 640 641static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) 642{ 643 struct io_wqe_acct *acct =&wqe->acct[index]; 644 struct io_worker *worker; 645 646 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); 647 if (!worker) 648 return false; 649 650 refcount_set(&worker->ref, 1); 651 worker->nulls_node.pprev = NULL; 652 worker->wqe = wqe; 653 spin_lock_init(&worker->lock); 654 655 worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node, 656 "io_wqe_worker-%d/%d", index, wqe->node); 657 if (IS_ERR(worker->task)) { 658 kfree(worker); 659 return false; 660 } 661 662 spin_lock_irq(&wqe->lock); 663 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); 664 list_add_tail_rcu(&worker->all_list, &wqe->all_list); 665 worker->flags |= IO_WORKER_F_FREE; 666 if (index == IO_WQ_ACCT_BOUND) 667 worker->flags |= IO_WORKER_F_BOUND; 668 if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND)) 669 worker->flags |= IO_WORKER_F_FIXED; 670 acct->nr_workers++; 671 spin_unlock_irq(&wqe->lock); 672 673 if (index == IO_WQ_ACCT_UNBOUND) 674 atomic_inc(&wq->user->processes); 675 676 wake_up_process(worker->task); 677 return true; 678} 679 680static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index) 681 __must_hold(wqe->lock) 682{ 683 struct io_wqe_acct *acct = &wqe->acct[index]; 684 685 /* if we have available workers or no work, no need */ 686 if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe)) 687 return false; 688 return acct->nr_workers < acct->max_workers; 689} 690 691/* 692 * Manager thread. Tasked with creating new workers, if we need them. 693 */ 694static int io_wq_manager(void *data) 695{ 696 struct io_wq *wq = data; 697 int workers_to_create = num_possible_nodes(); 698 int node; 699 700 /* create fixed workers */ 701 refcount_set(&wq->refs, workers_to_create); 702 for_each_node(node) { 703 if (!node_online(node)) 704 continue; 705 if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND)) 706 goto err; 707 workers_to_create--; 708 } 709 710 while (workers_to_create--) 711 refcount_dec(&wq->refs); 712 713 complete(&wq->done); 714 715 while (!kthread_should_stop()) { 716 if (current->task_works) 717 task_work_run(); 718 719 for_each_node(node) { 720 struct io_wqe *wqe = wq->wqes[node]; 721 bool fork_worker[2] = { false, false }; 722 723 if (!node_online(node)) 724 continue; 725 726 spin_lock_irq(&wqe->lock); 727 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND)) 728 fork_worker[IO_WQ_ACCT_BOUND] = true; 729 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND)) 730 fork_worker[IO_WQ_ACCT_UNBOUND] = true; 731 spin_unlock_irq(&wqe->lock); 732 if (fork_worker[IO_WQ_ACCT_BOUND]) 733 create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND); 734 if (fork_worker[IO_WQ_ACCT_UNBOUND]) 735 create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND); 736 } 737 set_current_state(TASK_INTERRUPTIBLE); 738 schedule_timeout(HZ); 739 } 740 741 if (current->task_works) 742 task_work_run(); 743 744 return 0; 745err: 746 set_bit(IO_WQ_BIT_ERROR, &wq->state); 747 set_bit(IO_WQ_BIT_EXIT, &wq->state); 748 if (refcount_sub_and_test(workers_to_create, &wq->refs)) 749 complete(&wq->done); 750 return 0; 751} 752 753static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct, 754 struct io_wq_work *work) 755{ 756 bool free_worker; 757 758 if (!(work->flags & IO_WQ_WORK_UNBOUND)) 759 return true; 760 if (atomic_read(&acct->nr_running)) 761 return true; 762 763 rcu_read_lock(); 764 free_worker = !hlist_nulls_empty(&wqe->free_list); 765 rcu_read_unlock(); 766 if (free_worker) 767 return true; 768 769 if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers && 770 !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN))) 771 return false; 772 773 return true; 774} 775 776static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) 777{ 778 struct io_wq *wq = wqe->wq; 779 780 do { 781 struct io_wq_work *old_work = work; 782 783 work->flags |= IO_WQ_WORK_CANCEL; 784 wq->do_work(&work); 785 work = (work == old_work) ? NULL : work; 786 wq->free_work(old_work); 787 } while (work); 788} 789 790static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work) 791{ 792 unsigned int hash; 793 struct io_wq_work *tail; 794 795 if (!io_wq_is_hashed(work)) { 796append: 797 wq_list_add_tail(&work->list, &wqe->work_list); 798 return; 799 } 800 801 hash = io_get_work_hash(work); 802 tail = wqe->hash_tail[hash]; 803 wqe->hash_tail[hash] = work; 804 if (!tail) 805 goto append; 806 807 wq_list_add_after(&work->list, &tail->list, &wqe->work_list); 808} 809 810static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) 811{ 812 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); 813 int work_flags; 814 unsigned long flags; 815 816 /* 817 * Do early check to see if we need a new unbound worker, and if we do, 818 * if we're allowed to do so. This isn't 100% accurate as there's a 819 * gap between this check and incrementing the value, but that's OK. 820 * It's close enough to not be an issue, fork() has the same delay. 821 */ 822 if (unlikely(!io_wq_can_queue(wqe, acct, work))) { 823 io_run_cancel(work, wqe); 824 return; 825 } 826 827 work_flags = work->flags; 828 spin_lock_irqsave(&wqe->lock, flags); 829 io_wqe_insert_work(wqe, work); 830 wqe->flags &= ~IO_WQE_FLAG_STALLED; 831 spin_unlock_irqrestore(&wqe->lock, flags); 832 833 if ((work_flags & IO_WQ_WORK_CONCURRENT) || 834 !atomic_read(&acct->nr_running)) 835 io_wqe_wake_worker(wqe, acct); 836} 837 838void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) 839{ 840 struct io_wqe *wqe = wq->wqes[numa_node_id()]; 841 842 io_wqe_enqueue(wqe, work); 843} 844 845/* 846 * Work items that hash to the same value will not be done in parallel. 847 * Used to limit concurrent writes, generally hashed by inode. 848 */ 849void io_wq_hash_work(struct io_wq_work *work, void *val) 850{ 851 unsigned int bit; 852 853 bit = hash_ptr(val, IO_WQ_HASH_ORDER); 854 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); 855} 856 857static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data) 858{ 859 send_sig(SIGINT, worker->task, 1); 860 return false; 861} 862 863/* 864 * Iterate the passed in list and call the specific function for each 865 * worker that isn't exiting 866 */ 867static bool io_wq_for_each_worker(struct io_wqe *wqe, 868 bool (*func)(struct io_worker *, void *), 869 void *data) 870{ 871 struct io_worker *worker; 872 bool ret = false; 873 874 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { 875 if (io_worker_get(worker)) { 876 /* no task if node is/was offline */ 877 if (worker->task) 878 ret = func(worker, data); 879 io_worker_release(worker); 880 if (ret) 881 break; 882 } 883 } 884 885 return ret; 886} 887 888void io_wq_cancel_all(struct io_wq *wq) 889{ 890 int node; 891 892 set_bit(IO_WQ_BIT_CANCEL, &wq->state); 893 894 rcu_read_lock(); 895 for_each_node(node) { 896 struct io_wqe *wqe = wq->wqes[node]; 897 898 io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL); 899 } 900 rcu_read_unlock(); 901} 902 903struct io_cb_cancel_data { 904 work_cancel_fn *fn; 905 void *data; 906 int nr_running; 907 int nr_pending; 908 bool cancel_all; 909}; 910 911static bool io_wq_worker_cancel(struct io_worker *worker, void *data) 912{ 913 struct io_cb_cancel_data *match = data; 914 unsigned long flags; 915 916 /* 917 * Hold the lock to avoid ->cur_work going out of scope, caller 918 * may dereference the passed in work. 919 */ 920 spin_lock_irqsave(&worker->lock, flags); 921 if (worker->cur_work && 922 !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) && 923 match->fn(worker->cur_work, match->data)) { 924 send_sig(SIGINT, worker->task, 1); 925 match->nr_running++; 926 } 927 spin_unlock_irqrestore(&worker->lock, flags); 928 929 return match->nr_running && !match->cancel_all; 930} 931 932static void io_wqe_cancel_pending_work(struct io_wqe *wqe, 933 struct io_cb_cancel_data *match) 934{ 935 struct io_wq_work_node *node, *prev; 936 struct io_wq_work *work; 937 unsigned long flags; 938 939retry: 940 spin_lock_irqsave(&wqe->lock, flags); 941 wq_list_for_each(node, prev, &wqe->work_list) { 942 work = container_of(node, struct io_wq_work, list); 943 if (!match->fn(work, match->data)) 944 continue; 945 946 wq_list_del(&wqe->work_list, node, prev); 947 spin_unlock_irqrestore(&wqe->lock, flags); 948 io_run_cancel(work, wqe); 949 match->nr_pending++; 950 if (!match->cancel_all) 951 return; 952 953 /* not safe to continue after unlock */ 954 goto retry; 955 } 956 spin_unlock_irqrestore(&wqe->lock, flags); 957} 958 959static void io_wqe_cancel_running_work(struct io_wqe *wqe, 960 struct io_cb_cancel_data *match) 961{ 962 rcu_read_lock(); 963 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match); 964 rcu_read_unlock(); 965} 966 967enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, 968 void *data, bool cancel_all) 969{ 970 struct io_cb_cancel_data match = { 971 .fn = cancel, 972 .data = data, 973 .cancel_all = cancel_all, 974 }; 975 int node; 976 977 /* 978 * First check pending list, if we're lucky we can just remove it 979 * from there. CANCEL_OK means that the work is returned as-new, 980 * no completion will be posted for it. 981 */ 982 for_each_node(node) { 983 struct io_wqe *wqe = wq->wqes[node]; 984 985 io_wqe_cancel_pending_work(wqe, &match); 986 if (match.nr_pending && !match.cancel_all) 987 return IO_WQ_CANCEL_OK; 988 } 989 990 /* 991 * Now check if a free (going busy) or busy worker has the work 992 * currently running. If we find it there, we'll return CANCEL_RUNNING 993 * as an indication that we attempt to signal cancellation. The 994 * completion will run normally in this case. 995 */ 996 for_each_node(node) { 997 struct io_wqe *wqe = wq->wqes[node]; 998 999 io_wqe_cancel_running_work(wqe, &match); 1000 if (match.nr_running && !match.cancel_all) 1001 return IO_WQ_CANCEL_RUNNING; 1002 } 1003 1004 if (match.nr_running) 1005 return IO_WQ_CANCEL_RUNNING; 1006 if (match.nr_pending) 1007 return IO_WQ_CANCEL_OK; 1008 return IO_WQ_CANCEL_NOTFOUND; 1009} 1010 1011static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data) 1012{ 1013 return work == data; 1014} 1015 1016enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork) 1017{ 1018 return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false); 1019} 1020 1021struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) 1022{ 1023 int ret = -ENOMEM, node; 1024 struct io_wq *wq; 1025 1026 if (WARN_ON_ONCE(!data->free_work || !data->do_work)) 1027 return ERR_PTR(-EINVAL); 1028 1029 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 1030 if (!wq) 1031 return ERR_PTR(-ENOMEM); 1032 1033 wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL); 1034 if (!wq->wqes) { 1035 kfree(wq); 1036 return ERR_PTR(-ENOMEM); 1037 } 1038 1039 wq->free_work = data->free_work; 1040 wq->do_work = data->do_work; 1041 1042 /* caller must already hold a reference to this */ 1043 wq->user = data->user; 1044 1045 for_each_node(node) { 1046 struct io_wqe *wqe; 1047 int alloc_node = node; 1048 1049 if (!node_online(alloc_node)) 1050 alloc_node = NUMA_NO_NODE; 1051 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node); 1052 if (!wqe) 1053 goto err; 1054 wq->wqes[node] = wqe; 1055 wqe->node = alloc_node; 1056 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; 1057 atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0); 1058 if (wq->user) { 1059 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = 1060 task_rlimit(current, RLIMIT_NPROC); 1061 } 1062 atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0); 1063 wqe->wq = wq; 1064 spin_lock_init(&wqe->lock); 1065 INIT_WQ_LIST(&wqe->work_list); 1066 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0); 1067 INIT_LIST_HEAD(&wqe->all_list); 1068 } 1069 1070 init_completion(&wq->done); 1071 1072 wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager"); 1073 if (!IS_ERR(wq->manager)) { 1074 wake_up_process(wq->manager); 1075 wait_for_completion(&wq->done); 1076 if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) { 1077 ret = -ENOMEM; 1078 goto err; 1079 } 1080 refcount_set(&wq->use_refs, 1); 1081 reinit_completion(&wq->done); 1082 return wq; 1083 } 1084 1085 ret = PTR_ERR(wq->manager); 1086 complete(&wq->done); 1087err: 1088 for_each_node(node) 1089 kfree(wq->wqes[node]); 1090 kfree(wq->wqes); 1091 kfree(wq); 1092 return ERR_PTR(ret); 1093} 1094 1095bool io_wq_get(struct io_wq *wq, struct io_wq_data *data) 1096{ 1097 if (data->free_work != wq->free_work || data->do_work != wq->do_work) 1098 return false; 1099 1100 return refcount_inc_not_zero(&wq->use_refs); 1101} 1102 1103static bool io_wq_worker_wake(struct io_worker *worker, void *data) 1104{ 1105 wake_up_process(worker->task); 1106 return false; 1107} 1108 1109static void __io_wq_destroy(struct io_wq *wq) 1110{ 1111 int node; 1112 1113 set_bit(IO_WQ_BIT_EXIT, &wq->state); 1114 if (wq->manager) 1115 kthread_stop(wq->manager); 1116 1117 rcu_read_lock(); 1118 for_each_node(node) 1119 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); 1120 rcu_read_unlock(); 1121 1122 wait_for_completion(&wq->done); 1123 1124 for_each_node(node) 1125 kfree(wq->wqes[node]); 1126 kfree(wq->wqes); 1127 kfree(wq); 1128} 1129 1130void io_wq_destroy(struct io_wq *wq) 1131{ 1132 if (refcount_dec_and_test(&wq->use_refs)) 1133 __io_wq_destroy(wq); 1134} 1135 1136struct task_struct *io_wq_get_task(struct io_wq *wq) 1137{ 1138 return wq->manager; 1139}