Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 9a2bd244e18ffbb96c8b783210fda4eded7c7e6f 2423 lines 59 kB view raw
1/* 2 * CFQ, or complete fairness queueing, disk scheduler. 3 * 4 * Based on ideas from a previously unfinished io 5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. 6 * 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 8 */ 9#include <linux/module.h> 10#include <linux/blkdev.h> 11#include <linux/elevator.h> 12#include <linux/rbtree.h> 13#include <linux/ioprio.h> 14#include <linux/blktrace_api.h> 15 16/* 17 * tunables 18 */ 19/* max queue in one round of service */ 20static const int cfq_quantum = 4; 21static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 22/* maximum backwards seek, in KiB */ 23static const int cfq_back_max = 16 * 1024; 24/* penalty of a backwards seek */ 25static const int cfq_back_penalty = 2; 26static const int cfq_slice_sync = HZ / 10; 27static int cfq_slice_async = HZ / 25; 28static const int cfq_slice_async_rq = 2; 29static int cfq_slice_idle = HZ / 125; 30 31/* 32 * offset from end of service tree 33 */ 34#define CFQ_IDLE_DELAY (HZ / 5) 35 36/* 37 * below this threshold, we consider thinktime immediate 38 */ 39#define CFQ_MIN_TT (2) 40 41#define CFQ_SLICE_SCALE (5) 42#define CFQ_HW_QUEUE_MIN (5) 43 44#define RQ_CIC(rq) \ 45 ((struct cfq_io_context *) (rq)->elevator_private) 46#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) 47 48static struct kmem_cache *cfq_pool; 49static struct kmem_cache *cfq_ioc_pool; 50 51static DEFINE_PER_CPU(unsigned long, ioc_count); 52static struct completion *ioc_gone; 53static DEFINE_SPINLOCK(ioc_gone_lock); 54 55#define CFQ_PRIO_LISTS IOPRIO_BE_NR 56#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 57#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 58 59#define ASYNC (0) 60#define SYNC (1) 61 62#define sample_valid(samples) ((samples) > 80) 63 64/* 65 * Most of our rbtree usage is for sorting with min extraction, so 66 * if we cache the leftmost node we don't have to walk down the tree 67 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should 68 * move this into the elevator for the rq sorting as well. 69 */ 70struct cfq_rb_root { 71 struct rb_root rb; 72 struct rb_node *left; 73}; 74#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } 75 76/* 77 * Per block device queue structure 78 */ 79struct cfq_data { 80 struct request_queue *queue; 81 82 /* 83 * rr list of queues with requests and the count of them 84 */ 85 struct cfq_rb_root service_tree; 86 unsigned int busy_queues; 87 88 int rq_in_driver; 89 int sync_flight; 90 91 /* 92 * queue-depth detection 93 */ 94 int rq_queued; 95 int hw_tag; 96 int hw_tag_samples; 97 int rq_in_driver_peak; 98 99 /* 100 * idle window management 101 */ 102 struct timer_list idle_slice_timer; 103 struct work_struct unplug_work; 104 105 struct cfq_queue *active_queue; 106 struct cfq_io_context *active_cic; 107 108 /* 109 * async queue for each priority case 110 */ 111 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; 112 struct cfq_queue *async_idle_cfqq; 113 114 sector_t last_position; 115 unsigned long last_end_request; 116 117 /* 118 * tunables, see top of file 119 */ 120 unsigned int cfq_quantum; 121 unsigned int cfq_fifo_expire[2]; 122 unsigned int cfq_back_penalty; 123 unsigned int cfq_back_max; 124 unsigned int cfq_slice[2]; 125 unsigned int cfq_slice_async_rq; 126 unsigned int cfq_slice_idle; 127 128 struct list_head cic_list; 129}; 130 131/* 132 * Per process-grouping structure 133 */ 134struct cfq_queue { 135 /* reference count */ 136 atomic_t ref; 137 /* various state flags, see below */ 138 unsigned int flags; 139 /* parent cfq_data */ 140 struct cfq_data *cfqd; 141 /* service_tree member */ 142 struct rb_node rb_node; 143 /* service_tree key */ 144 unsigned long rb_key; 145 /* sorted list of pending requests */ 146 struct rb_root sort_list; 147 /* if fifo isn't expired, next request to serve */ 148 struct request *next_rq; 149 /* requests queued in sort_list */ 150 int queued[2]; 151 /* currently allocated requests */ 152 int allocated[2]; 153 /* fifo list of requests in sort_list */ 154 struct list_head fifo; 155 156 unsigned long slice_end; 157 long slice_resid; 158 159 /* pending metadata requests */ 160 int meta_pending; 161 /* number of requests that are on the dispatch list or inside driver */ 162 int dispatched; 163 164 /* io prio of this group */ 165 unsigned short ioprio, org_ioprio; 166 unsigned short ioprio_class, org_ioprio_class; 167 168 pid_t pid; 169}; 170 171enum cfqq_state_flags { 172 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 173 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 174 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ 175 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 176 CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */ 177 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 178 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 179 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 180 CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ 181 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 182 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 183}; 184 185#define CFQ_CFQQ_FNS(name) \ 186static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 187{ \ 188 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 189} \ 190static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 191{ \ 192 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 193} \ 194static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 195{ \ 196 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 197} 198 199CFQ_CFQQ_FNS(on_rr); 200CFQ_CFQQ_FNS(wait_request); 201CFQ_CFQQ_FNS(must_alloc); 202CFQ_CFQQ_FNS(must_alloc_slice); 203CFQ_CFQQ_FNS(must_dispatch); 204CFQ_CFQQ_FNS(fifo_expire); 205CFQ_CFQQ_FNS(idle_window); 206CFQ_CFQQ_FNS(prio_changed); 207CFQ_CFQQ_FNS(queue_new); 208CFQ_CFQQ_FNS(slice_new); 209CFQ_CFQQ_FNS(sync); 210#undef CFQ_CFQQ_FNS 211 212#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 213 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) 214#define cfq_log(cfqd, fmt, args...) \ 215 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 216 217static void cfq_dispatch_insert(struct request_queue *, struct request *); 218static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 219 struct io_context *, gfp_t); 220static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 221 struct io_context *); 222 223static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 224 int is_sync) 225{ 226 return cic->cfqq[!!is_sync]; 227} 228 229static inline void cic_set_cfqq(struct cfq_io_context *cic, 230 struct cfq_queue *cfqq, int is_sync) 231{ 232 cic->cfqq[!!is_sync] = cfqq; 233} 234 235/* 236 * We regard a request as SYNC, if it's either a read or has the SYNC bit 237 * set (in which case it could also be direct WRITE). 238 */ 239static inline int cfq_bio_sync(struct bio *bio) 240{ 241 if (bio_data_dir(bio) == READ || bio_sync(bio)) 242 return 1; 243 244 return 0; 245} 246 247/* 248 * scheduler run of queue, if there are requests pending and no one in the 249 * driver that will restart queueing 250 */ 251static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 252{ 253 if (cfqd->busy_queues) { 254 cfq_log(cfqd, "schedule dispatch"); 255 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 256 } 257} 258 259static int cfq_queue_empty(struct request_queue *q) 260{ 261 struct cfq_data *cfqd = q->elevator->elevator_data; 262 263 return !cfqd->busy_queues; 264} 265 266/* 267 * Scale schedule slice based on io priority. Use the sync time slice only 268 * if a queue is marked sync and has sync io queued. A sync queue with async 269 * io only, should not get full sync slice length. 270 */ 271static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, 272 unsigned short prio) 273{ 274 const int base_slice = cfqd->cfq_slice[sync]; 275 276 WARN_ON(prio >= IOPRIO_BE_NR); 277 278 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); 279} 280 281static inline int 282cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 283{ 284 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 285} 286 287static inline void 288cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 289{ 290 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; 291 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); 292} 293 294/* 295 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end 296 * isn't valid until the first request from the dispatch is activated 297 * and the slice time set. 298 */ 299static inline int cfq_slice_used(struct cfq_queue *cfqq) 300{ 301 if (cfq_cfqq_slice_new(cfqq)) 302 return 0; 303 if (time_before(jiffies, cfqq->slice_end)) 304 return 0; 305 306 return 1; 307} 308 309/* 310 * Lifted from AS - choose which of rq1 and rq2 that is best served now. 311 * We choose the request that is closest to the head right now. Distance 312 * behind the head is penalized and only allowed to a certain extent. 313 */ 314static struct request * 315cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) 316{ 317 sector_t last, s1, s2, d1 = 0, d2 = 0; 318 unsigned long back_max; 319#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 320#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 321 unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 322 323 if (rq1 == NULL || rq1 == rq2) 324 return rq2; 325 if (rq2 == NULL) 326 return rq1; 327 328 if (rq_is_sync(rq1) && !rq_is_sync(rq2)) 329 return rq1; 330 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 331 return rq2; 332 if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 333 return rq1; 334 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 335 return rq2; 336 337 s1 = rq1->sector; 338 s2 = rq2->sector; 339 340 last = cfqd->last_position; 341 342 /* 343 * by definition, 1KiB is 2 sectors 344 */ 345 back_max = cfqd->cfq_back_max * 2; 346 347 /* 348 * Strict one way elevator _except_ in the case where we allow 349 * short backward seeks which are biased as twice the cost of a 350 * similar forward seek. 351 */ 352 if (s1 >= last) 353 d1 = s1 - last; 354 else if (s1 + back_max >= last) 355 d1 = (last - s1) * cfqd->cfq_back_penalty; 356 else 357 wrap |= CFQ_RQ1_WRAP; 358 359 if (s2 >= last) 360 d2 = s2 - last; 361 else if (s2 + back_max >= last) 362 d2 = (last - s2) * cfqd->cfq_back_penalty; 363 else 364 wrap |= CFQ_RQ2_WRAP; 365 366 /* Found required data */ 367 368 /* 369 * By doing switch() on the bit mask "wrap" we avoid having to 370 * check two variables for all permutations: --> faster! 371 */ 372 switch (wrap) { 373 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ 374 if (d1 < d2) 375 return rq1; 376 else if (d2 < d1) 377 return rq2; 378 else { 379 if (s1 >= s2) 380 return rq1; 381 else 382 return rq2; 383 } 384 385 case CFQ_RQ2_WRAP: 386 return rq1; 387 case CFQ_RQ1_WRAP: 388 return rq2; 389 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ 390 default: 391 /* 392 * Since both rqs are wrapped, 393 * start with the one that's further behind head 394 * (--> only *one* back seek required), 395 * since back seek takes more time than forward. 396 */ 397 if (s1 <= s2) 398 return rq1; 399 else 400 return rq2; 401 } 402} 403 404/* 405 * The below is leftmost cache rbtree addon 406 */ 407static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) 408{ 409 if (!root->left) 410 root->left = rb_first(&root->rb); 411 412 if (root->left) 413 return rb_entry(root->left, struct cfq_queue, rb_node); 414 415 return NULL; 416} 417 418static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 419{ 420 if (root->left == n) 421 root->left = NULL; 422 423 rb_erase(n, &root->rb); 424 RB_CLEAR_NODE(n); 425} 426 427/* 428 * would be nice to take fifo expire time into account as well 429 */ 430static struct request * 431cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 432 struct request *last) 433{ 434 struct rb_node *rbnext = rb_next(&last->rb_node); 435 struct rb_node *rbprev = rb_prev(&last->rb_node); 436 struct request *next = NULL, *prev = NULL; 437 438 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 439 440 if (rbprev) 441 prev = rb_entry_rq(rbprev); 442 443 if (rbnext) 444 next = rb_entry_rq(rbnext); 445 else { 446 rbnext = rb_first(&cfqq->sort_list); 447 if (rbnext && rbnext != &last->rb_node) 448 next = rb_entry_rq(rbnext); 449 } 450 451 return cfq_choose_req(cfqd, next, prev); 452} 453 454static unsigned long cfq_slice_offset(struct cfq_data *cfqd, 455 struct cfq_queue *cfqq) 456{ 457 /* 458 * just an approximation, should be ok. 459 */ 460 return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) - 461 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); 462} 463 464/* 465 * The cfqd->service_tree holds all pending cfq_queue's that have 466 * requests waiting to be processed. It is sorted in the order that 467 * we will service the queues. 468 */ 469static void cfq_service_tree_add(struct cfq_data *cfqd, 470 struct cfq_queue *cfqq, int add_front) 471{ 472 struct rb_node **p, *parent; 473 struct cfq_queue *__cfqq; 474 unsigned long rb_key; 475 int left; 476 477 if (cfq_class_idle(cfqq)) { 478 rb_key = CFQ_IDLE_DELAY; 479 parent = rb_last(&cfqd->service_tree.rb); 480 if (parent && parent != &cfqq->rb_node) { 481 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 482 rb_key += __cfqq->rb_key; 483 } else 484 rb_key += jiffies; 485 } else if (!add_front) { 486 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 487 rb_key += cfqq->slice_resid; 488 cfqq->slice_resid = 0; 489 } else 490 rb_key = 0; 491 492 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 493 /* 494 * same position, nothing more to do 495 */ 496 if (rb_key == cfqq->rb_key) 497 return; 498 499 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 500 } 501 502 left = 1; 503 parent = NULL; 504 p = &cfqd->service_tree.rb.rb_node; 505 while (*p) { 506 struct rb_node **n; 507 508 parent = *p; 509 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 510 511 /* 512 * sort RT queues first, we always want to give 513 * preference to them. IDLE queues goes to the back. 514 * after that, sort on the next service time. 515 */ 516 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) 517 n = &(*p)->rb_left; 518 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) 519 n = &(*p)->rb_right; 520 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq)) 521 n = &(*p)->rb_left; 522 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 523 n = &(*p)->rb_right; 524 else if (rb_key < __cfqq->rb_key) 525 n = &(*p)->rb_left; 526 else 527 n = &(*p)->rb_right; 528 529 if (n == &(*p)->rb_right) 530 left = 0; 531 532 p = n; 533 } 534 535 if (left) 536 cfqd->service_tree.left = &cfqq->rb_node; 537 538 cfqq->rb_key = rb_key; 539 rb_link_node(&cfqq->rb_node, parent, p); 540 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); 541} 542 543/* 544 * Update cfqq's position in the service tree. 545 */ 546static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) 547{ 548 /* 549 * Resorting requires the cfqq to be on the RR list already. 550 */ 551 if (cfq_cfqq_on_rr(cfqq)) 552 cfq_service_tree_add(cfqd, cfqq, 0); 553} 554 555/* 556 * add to busy list of queues for service, trying to be fair in ordering 557 * the pending list according to last request service 558 */ 559static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 560{ 561 cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); 562 BUG_ON(cfq_cfqq_on_rr(cfqq)); 563 cfq_mark_cfqq_on_rr(cfqq); 564 cfqd->busy_queues++; 565 566 cfq_resort_rr_list(cfqd, cfqq); 567} 568 569/* 570 * Called when the cfqq no longer has requests pending, remove it from 571 * the service tree. 572 */ 573static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 574{ 575 cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); 576 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 577 cfq_clear_cfqq_on_rr(cfqq); 578 579 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 580 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 581 582 BUG_ON(!cfqd->busy_queues); 583 cfqd->busy_queues--; 584} 585 586/* 587 * rb tree support functions 588 */ 589static void cfq_del_rq_rb(struct request *rq) 590{ 591 struct cfq_queue *cfqq = RQ_CFQQ(rq); 592 struct cfq_data *cfqd = cfqq->cfqd; 593 const int sync = rq_is_sync(rq); 594 595 BUG_ON(!cfqq->queued[sync]); 596 cfqq->queued[sync]--; 597 598 elv_rb_del(&cfqq->sort_list, rq); 599 600 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 601 cfq_del_cfqq_rr(cfqd, cfqq); 602} 603 604static void cfq_add_rq_rb(struct request *rq) 605{ 606 struct cfq_queue *cfqq = RQ_CFQQ(rq); 607 struct cfq_data *cfqd = cfqq->cfqd; 608 struct request *__alias; 609 610 cfqq->queued[rq_is_sync(rq)]++; 611 612 /* 613 * looks a little odd, but the first insert might return an alias. 614 * if that happens, put the alias on the dispatch list 615 */ 616 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) 617 cfq_dispatch_insert(cfqd->queue, __alias); 618 619 if (!cfq_cfqq_on_rr(cfqq)) 620 cfq_add_cfqq_rr(cfqd, cfqq); 621 622 /* 623 * check if this request is a better next-serve candidate 624 */ 625 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); 626 BUG_ON(!cfqq->next_rq); 627} 628 629static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) 630{ 631 elv_rb_del(&cfqq->sort_list, rq); 632 cfqq->queued[rq_is_sync(rq)]--; 633 cfq_add_rq_rb(rq); 634} 635 636static struct request * 637cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 638{ 639 struct task_struct *tsk = current; 640 struct cfq_io_context *cic; 641 struct cfq_queue *cfqq; 642 643 cic = cfq_cic_lookup(cfqd, tsk->io_context); 644 if (!cic) 645 return NULL; 646 647 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 648 if (cfqq) { 649 sector_t sector = bio->bi_sector + bio_sectors(bio); 650 651 return elv_rb_find(&cfqq->sort_list, sector); 652 } 653 654 return NULL; 655} 656 657static void cfq_activate_request(struct request_queue *q, struct request *rq) 658{ 659 struct cfq_data *cfqd = q->elevator->elevator_data; 660 661 cfqd->rq_in_driver++; 662 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 663 cfqd->rq_in_driver); 664 665 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 666} 667 668static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 669{ 670 struct cfq_data *cfqd = q->elevator->elevator_data; 671 672 WARN_ON(!cfqd->rq_in_driver); 673 cfqd->rq_in_driver--; 674 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", 675 cfqd->rq_in_driver); 676} 677 678static void cfq_remove_request(struct request *rq) 679{ 680 struct cfq_queue *cfqq = RQ_CFQQ(rq); 681 682 if (cfqq->next_rq == rq) 683 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); 684 685 list_del_init(&rq->queuelist); 686 cfq_del_rq_rb(rq); 687 688 cfqq->cfqd->rq_queued--; 689 if (rq_is_meta(rq)) { 690 WARN_ON(!cfqq->meta_pending); 691 cfqq->meta_pending--; 692 } 693} 694 695static int cfq_merge(struct request_queue *q, struct request **req, 696 struct bio *bio) 697{ 698 struct cfq_data *cfqd = q->elevator->elevator_data; 699 struct request *__rq; 700 701 __rq = cfq_find_rq_fmerge(cfqd, bio); 702 if (__rq && elv_rq_merge_ok(__rq, bio)) { 703 *req = __rq; 704 return ELEVATOR_FRONT_MERGE; 705 } 706 707 return ELEVATOR_NO_MERGE; 708} 709 710static void cfq_merged_request(struct request_queue *q, struct request *req, 711 int type) 712{ 713 if (type == ELEVATOR_FRONT_MERGE) { 714 struct cfq_queue *cfqq = RQ_CFQQ(req); 715 716 cfq_reposition_rq_rb(cfqq, req); 717 } 718} 719 720static void 721cfq_merged_requests(struct request_queue *q, struct request *rq, 722 struct request *next) 723{ 724 /* 725 * reposition in fifo if next is older than rq 726 */ 727 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 728 time_before(next->start_time, rq->start_time)) 729 list_move(&rq->queuelist, &next->queuelist); 730 731 cfq_remove_request(next); 732} 733 734static int cfq_allow_merge(struct request_queue *q, struct request *rq, 735 struct bio *bio) 736{ 737 struct cfq_data *cfqd = q->elevator->elevator_data; 738 struct cfq_io_context *cic; 739 struct cfq_queue *cfqq; 740 741 /* 742 * Disallow merge of a sync bio into an async request. 743 */ 744 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 745 return 0; 746 747 /* 748 * Lookup the cfqq that this bio will be queued with. Allow 749 * merge only if rq is queued there. 750 */ 751 cic = cfq_cic_lookup(cfqd, current->io_context); 752 if (!cic) 753 return 0; 754 755 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 756 if (cfqq == RQ_CFQQ(rq)) 757 return 1; 758 759 return 0; 760} 761 762static void __cfq_set_active_queue(struct cfq_data *cfqd, 763 struct cfq_queue *cfqq) 764{ 765 if (cfqq) { 766 cfq_log_cfqq(cfqd, cfqq, "set_active"); 767 cfqq->slice_end = 0; 768 cfq_clear_cfqq_must_alloc_slice(cfqq); 769 cfq_clear_cfqq_fifo_expire(cfqq); 770 cfq_mark_cfqq_slice_new(cfqq); 771 cfq_clear_cfqq_queue_new(cfqq); 772 } 773 774 cfqd->active_queue = cfqq; 775} 776 777/* 778 * current cfqq expired its slice (or was too idle), select new one 779 */ 780static void 781__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 782 int timed_out) 783{ 784 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 785 786 if (cfq_cfqq_wait_request(cfqq)) 787 del_timer(&cfqd->idle_slice_timer); 788 789 cfq_clear_cfqq_must_dispatch(cfqq); 790 cfq_clear_cfqq_wait_request(cfqq); 791 792 /* 793 * store what was left of this slice, if the queue idled/timed out 794 */ 795 if (timed_out && !cfq_cfqq_slice_new(cfqq)) { 796 cfqq->slice_resid = cfqq->slice_end - jiffies; 797 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); 798 } 799 800 cfq_resort_rr_list(cfqd, cfqq); 801 802 if (cfqq == cfqd->active_queue) 803 cfqd->active_queue = NULL; 804 805 if (cfqd->active_cic) { 806 put_io_context(cfqd->active_cic->ioc); 807 cfqd->active_cic = NULL; 808 } 809} 810 811static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) 812{ 813 struct cfq_queue *cfqq = cfqd->active_queue; 814 815 if (cfqq) 816 __cfq_slice_expired(cfqd, cfqq, timed_out); 817} 818 819/* 820 * Get next queue for service. Unless we have a queue preemption, 821 * we'll simply select the first cfqq in the service tree. 822 */ 823static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 824{ 825 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) 826 return NULL; 827 828 return cfq_rb_first(&cfqd->service_tree); 829} 830 831/* 832 * Get and set a new active queue for service. 833 */ 834static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) 835{ 836 struct cfq_queue *cfqq; 837 838 cfqq = cfq_get_next_queue(cfqd); 839 __cfq_set_active_queue(cfqd, cfqq); 840 return cfqq; 841} 842 843static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 844 struct request *rq) 845{ 846 if (rq->sector >= cfqd->last_position) 847 return rq->sector - cfqd->last_position; 848 else 849 return cfqd->last_position - rq->sector; 850} 851 852static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) 853{ 854 struct cfq_io_context *cic = cfqd->active_cic; 855 856 if (!sample_valid(cic->seek_samples)) 857 return 0; 858 859 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; 860} 861 862static int cfq_close_cooperator(struct cfq_data *cfq_data, 863 struct cfq_queue *cfqq) 864{ 865 /* 866 * We should notice if some of the queues are cooperating, eg 867 * working closely on the same area of the disk. In that case, 868 * we can group them together and don't waste time idling. 869 */ 870 return 0; 871} 872 873#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) 874 875static void cfq_arm_slice_timer(struct cfq_data *cfqd) 876{ 877 struct cfq_queue *cfqq = cfqd->active_queue; 878 struct cfq_io_context *cic; 879 unsigned long sl; 880 881 /* 882 * SSD device without seek penalty, disable idling. But only do so 883 * for devices that support queuing, otherwise we still have a problem 884 * with sync vs async workloads. 885 */ 886 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) 887 return; 888 889 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); 890 WARN_ON(cfq_cfqq_slice_new(cfqq)); 891 892 /* 893 * idle is disabled, either manually or by past process history 894 */ 895 if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) 896 return; 897 898 /* 899 * still requests with the driver, don't idle 900 */ 901 if (cfqd->rq_in_driver) 902 return; 903 904 /* 905 * task has exited, don't wait 906 */ 907 cic = cfqd->active_cic; 908 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 909 return; 910 911 /* 912 * See if this prio level has a good candidate 913 */ 914 if (cfq_close_cooperator(cfqd, cfqq) && 915 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) 916 return; 917 918 cfq_mark_cfqq_must_dispatch(cfqq); 919 cfq_mark_cfqq_wait_request(cfqq); 920 921 /* 922 * we don't want to idle for seeks, but we do want to allow 923 * fair distribution of slice time for a process doing back-to-back 924 * seeks. so allow a little bit of time for him to submit a new rq 925 */ 926 sl = cfqd->cfq_slice_idle; 927 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 928 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); 929 930 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 931 cfq_log(cfqd, "arm_idle: %lu", sl); 932} 933 934/* 935 * Move request from internal lists to the request queue dispatch list. 936 */ 937static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) 938{ 939 struct cfq_data *cfqd = q->elevator->elevator_data; 940 struct cfq_queue *cfqq = RQ_CFQQ(rq); 941 942 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); 943 944 cfq_remove_request(rq); 945 cfqq->dispatched++; 946 elv_dispatch_sort(q, rq); 947 948 if (cfq_cfqq_sync(cfqq)) 949 cfqd->sync_flight++; 950} 951 952/* 953 * return expired entry, or NULL to just start from scratch in rbtree 954 */ 955static struct request *cfq_check_fifo(struct cfq_queue *cfqq) 956{ 957 struct cfq_data *cfqd = cfqq->cfqd; 958 struct request *rq; 959 int fifo; 960 961 if (cfq_cfqq_fifo_expire(cfqq)) 962 return NULL; 963 964 cfq_mark_cfqq_fifo_expire(cfqq); 965 966 if (list_empty(&cfqq->fifo)) 967 return NULL; 968 969 fifo = cfq_cfqq_sync(cfqq); 970 rq = rq_entry_fifo(cfqq->fifo.next); 971 972 if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) 973 rq = NULL; 974 975 cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); 976 return rq; 977} 978 979static inline int 980cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 981{ 982 const int base_rq = cfqd->cfq_slice_async_rq; 983 984 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 985 986 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 987} 988 989/* 990 * Select a queue for service. If we have a current active queue, 991 * check whether to continue servicing it, or retrieve and set a new one. 992 */ 993static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 994{ 995 struct cfq_queue *cfqq; 996 997 cfqq = cfqd->active_queue; 998 if (!cfqq) 999 goto new_queue; 1000 1001 /* 1002 * The active queue has run out of time, expire it and select new. 1003 */ 1004 if (cfq_slice_used(cfqq)) 1005 goto expire; 1006 1007 /* 1008 * The active queue has requests and isn't expired, allow it to 1009 * dispatch. 1010 */ 1011 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 1012 goto keep_queue; 1013 1014 /* 1015 * No requests pending. If the active queue still has requests in 1016 * flight or is idling for a new request, allow either of these 1017 * conditions to happen (or time out) before selecting a new queue. 1018 */ 1019 if (timer_pending(&cfqd->idle_slice_timer) || 1020 (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { 1021 cfqq = NULL; 1022 goto keep_queue; 1023 } 1024 1025expire: 1026 cfq_slice_expired(cfqd, 0); 1027new_queue: 1028 cfqq = cfq_set_active_queue(cfqd); 1029keep_queue: 1030 return cfqq; 1031} 1032 1033/* 1034 * Dispatch some requests from cfqq, moving them to the request queue 1035 * dispatch list. 1036 */ 1037static int 1038__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1039 int max_dispatch) 1040{ 1041 int dispatched = 0; 1042 1043 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 1044 1045 do { 1046 struct request *rq; 1047 1048 /* 1049 * follow expired path, else get first next available 1050 */ 1051 rq = cfq_check_fifo(cfqq); 1052 if (rq == NULL) 1053 rq = cfqq->next_rq; 1054 1055 /* 1056 * finally, insert request into driver dispatch list 1057 */ 1058 cfq_dispatch_insert(cfqd->queue, rq); 1059 1060 dispatched++; 1061 1062 if (!cfqd->active_cic) { 1063 atomic_inc(&RQ_CIC(rq)->ioc->refcount); 1064 cfqd->active_cic = RQ_CIC(rq); 1065 } 1066 1067 if (RB_EMPTY_ROOT(&cfqq->sort_list)) 1068 break; 1069 1070 } while (dispatched < max_dispatch); 1071 1072 /* 1073 * expire an async queue immediately if it has used up its slice. idle 1074 * queue always expire after 1 dispatch round. 1075 */ 1076 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && 1077 dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) || 1078 cfq_class_idle(cfqq))) { 1079 cfqq->slice_end = jiffies + 1; 1080 cfq_slice_expired(cfqd, 0); 1081 } 1082 1083 return dispatched; 1084} 1085 1086static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) 1087{ 1088 int dispatched = 0; 1089 1090 while (cfqq->next_rq) { 1091 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); 1092 dispatched++; 1093 } 1094 1095 BUG_ON(!list_empty(&cfqq->fifo)); 1096 return dispatched; 1097} 1098 1099/* 1100 * Drain our current requests. Used for barriers and when switching 1101 * io schedulers on-the-fly. 1102 */ 1103static int cfq_forced_dispatch(struct cfq_data *cfqd) 1104{ 1105 struct cfq_queue *cfqq; 1106 int dispatched = 0; 1107 1108 while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) 1109 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1110 1111 cfq_slice_expired(cfqd, 0); 1112 1113 BUG_ON(cfqd->busy_queues); 1114 1115 cfq_log(cfqd, "forced_dispatch=%d\n", dispatched); 1116 return dispatched; 1117} 1118 1119static int cfq_dispatch_requests(struct request_queue *q, int force) 1120{ 1121 struct cfq_data *cfqd = q->elevator->elevator_data; 1122 struct cfq_queue *cfqq; 1123 int dispatched; 1124 1125 if (!cfqd->busy_queues) 1126 return 0; 1127 1128 if (unlikely(force)) 1129 return cfq_forced_dispatch(cfqd); 1130 1131 dispatched = 0; 1132 while ((cfqq = cfq_select_queue(cfqd)) != NULL) { 1133 int max_dispatch; 1134 1135 max_dispatch = cfqd->cfq_quantum; 1136 if (cfq_class_idle(cfqq)) 1137 max_dispatch = 1; 1138 1139 if (cfqq->dispatched >= max_dispatch) { 1140 if (cfqd->busy_queues > 1) 1141 break; 1142 if (cfqq->dispatched >= 4 * max_dispatch) 1143 break; 1144 } 1145 1146 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1147 break; 1148 1149 cfq_clear_cfqq_must_dispatch(cfqq); 1150 cfq_clear_cfqq_wait_request(cfqq); 1151 del_timer(&cfqd->idle_slice_timer); 1152 1153 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1154 } 1155 1156 cfq_log(cfqd, "dispatched=%d", dispatched); 1157 return dispatched; 1158} 1159 1160/* 1161 * task holds one reference to the queue, dropped when task exits. each rq 1162 * in-flight on this queue also holds a reference, dropped when rq is freed. 1163 * 1164 * queue lock must be held here. 1165 */ 1166static void cfq_put_queue(struct cfq_queue *cfqq) 1167{ 1168 struct cfq_data *cfqd = cfqq->cfqd; 1169 1170 BUG_ON(atomic_read(&cfqq->ref) <= 0); 1171 1172 if (!atomic_dec_and_test(&cfqq->ref)) 1173 return; 1174 1175 cfq_log_cfqq(cfqd, cfqq, "put_queue"); 1176 BUG_ON(rb_first(&cfqq->sort_list)); 1177 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); 1178 BUG_ON(cfq_cfqq_on_rr(cfqq)); 1179 1180 if (unlikely(cfqd->active_queue == cfqq)) { 1181 __cfq_slice_expired(cfqd, cfqq, 0); 1182 cfq_schedule_dispatch(cfqd); 1183 } 1184 1185 kmem_cache_free(cfq_pool, cfqq); 1186} 1187 1188/* 1189 * Must always be called with the rcu_read_lock() held 1190 */ 1191static void 1192__call_for_each_cic(struct io_context *ioc, 1193 void (*func)(struct io_context *, struct cfq_io_context *)) 1194{ 1195 struct cfq_io_context *cic; 1196 struct hlist_node *n; 1197 1198 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 1199 func(ioc, cic); 1200} 1201 1202/* 1203 * Call func for each cic attached to this ioc. 1204 */ 1205static void 1206call_for_each_cic(struct io_context *ioc, 1207 void (*func)(struct io_context *, struct cfq_io_context *)) 1208{ 1209 rcu_read_lock(); 1210 __call_for_each_cic(ioc, func); 1211 rcu_read_unlock(); 1212} 1213 1214static void cfq_cic_free_rcu(struct rcu_head *head) 1215{ 1216 struct cfq_io_context *cic; 1217 1218 cic = container_of(head, struct cfq_io_context, rcu_head); 1219 1220 kmem_cache_free(cfq_ioc_pool, cic); 1221 elv_ioc_count_dec(ioc_count); 1222 1223 if (ioc_gone) { 1224 /* 1225 * CFQ scheduler is exiting, grab exit lock and check 1226 * the pending io context count. If it hits zero, 1227 * complete ioc_gone and set it back to NULL 1228 */ 1229 spin_lock(&ioc_gone_lock); 1230 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 1231 complete(ioc_gone); 1232 ioc_gone = NULL; 1233 } 1234 spin_unlock(&ioc_gone_lock); 1235 } 1236} 1237 1238static void cfq_cic_free(struct cfq_io_context *cic) 1239{ 1240 call_rcu(&cic->rcu_head, cfq_cic_free_rcu); 1241} 1242 1243static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) 1244{ 1245 unsigned long flags; 1246 1247 BUG_ON(!cic->dead_key); 1248 1249 spin_lock_irqsave(&ioc->lock, flags); 1250 radix_tree_delete(&ioc->radix_root, cic->dead_key); 1251 hlist_del_rcu(&cic->cic_list); 1252 spin_unlock_irqrestore(&ioc->lock, flags); 1253 1254 cfq_cic_free(cic); 1255} 1256 1257/* 1258 * Must be called with rcu_read_lock() held or preemption otherwise disabled. 1259 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(), 1260 * and ->trim() which is called with the task lock held 1261 */ 1262static void cfq_free_io_context(struct io_context *ioc) 1263{ 1264 /* 1265 * ioc->refcount is zero here, or we are called from elv_unregister(), 1266 * so no more cic's are allowed to be linked into this ioc. So it 1267 * should be ok to iterate over the known list, we will see all cic's 1268 * since no new ones are added. 1269 */ 1270 __call_for_each_cic(ioc, cic_free_func); 1271} 1272 1273static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1274{ 1275 if (unlikely(cfqq == cfqd->active_queue)) { 1276 __cfq_slice_expired(cfqd, cfqq, 0); 1277 cfq_schedule_dispatch(cfqd); 1278 } 1279 1280 cfq_put_queue(cfqq); 1281} 1282 1283static void __cfq_exit_single_io_context(struct cfq_data *cfqd, 1284 struct cfq_io_context *cic) 1285{ 1286 struct io_context *ioc = cic->ioc; 1287 1288 list_del_init(&cic->queue_list); 1289 1290 /* 1291 * Make sure key == NULL is seen for dead queues 1292 */ 1293 smp_wmb(); 1294 cic->dead_key = (unsigned long) cic->key; 1295 cic->key = NULL; 1296 1297 if (ioc->ioc_data == cic) 1298 rcu_assign_pointer(ioc->ioc_data, NULL); 1299 1300 if (cic->cfqq[ASYNC]) { 1301 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); 1302 cic->cfqq[ASYNC] = NULL; 1303 } 1304 1305 if (cic->cfqq[SYNC]) { 1306 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); 1307 cic->cfqq[SYNC] = NULL; 1308 } 1309} 1310 1311static void cfq_exit_single_io_context(struct io_context *ioc, 1312 struct cfq_io_context *cic) 1313{ 1314 struct cfq_data *cfqd = cic->key; 1315 1316 if (cfqd) { 1317 struct request_queue *q = cfqd->queue; 1318 unsigned long flags; 1319 1320 spin_lock_irqsave(q->queue_lock, flags); 1321 __cfq_exit_single_io_context(cfqd, cic); 1322 spin_unlock_irqrestore(q->queue_lock, flags); 1323 } 1324} 1325 1326/* 1327 * The process that ioc belongs to has exited, we need to clean up 1328 * and put the internal structures we have that belongs to that process. 1329 */ 1330static void cfq_exit_io_context(struct io_context *ioc) 1331{ 1332 call_for_each_cic(ioc, cfq_exit_single_io_context); 1333} 1334 1335static struct cfq_io_context * 1336cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1337{ 1338 struct cfq_io_context *cic; 1339 1340 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, 1341 cfqd->queue->node); 1342 if (cic) { 1343 cic->last_end_request = jiffies; 1344 INIT_LIST_HEAD(&cic->queue_list); 1345 INIT_HLIST_NODE(&cic->cic_list); 1346 cic->dtor = cfq_free_io_context; 1347 cic->exit = cfq_exit_io_context; 1348 elv_ioc_count_inc(ioc_count); 1349 } 1350 1351 return cic; 1352} 1353 1354static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) 1355{ 1356 struct task_struct *tsk = current; 1357 int ioprio_class; 1358 1359 if (!cfq_cfqq_prio_changed(cfqq)) 1360 return; 1361 1362 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); 1363 switch (ioprio_class) { 1364 default: 1365 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1366 case IOPRIO_CLASS_NONE: 1367 /* 1368 * no prio set, inherit CPU scheduling settings 1369 */ 1370 cfqq->ioprio = task_nice_ioprio(tsk); 1371 cfqq->ioprio_class = task_nice_ioclass(tsk); 1372 break; 1373 case IOPRIO_CLASS_RT: 1374 cfqq->ioprio = task_ioprio(ioc); 1375 cfqq->ioprio_class = IOPRIO_CLASS_RT; 1376 break; 1377 case IOPRIO_CLASS_BE: 1378 cfqq->ioprio = task_ioprio(ioc); 1379 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1380 break; 1381 case IOPRIO_CLASS_IDLE: 1382 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1383 cfqq->ioprio = 7; 1384 cfq_clear_cfqq_idle_window(cfqq); 1385 break; 1386 } 1387 1388 /* 1389 * keep track of original prio settings in case we have to temporarily 1390 * elevate the priority of this queue 1391 */ 1392 cfqq->org_ioprio = cfqq->ioprio; 1393 cfqq->org_ioprio_class = cfqq->ioprio_class; 1394 cfq_clear_cfqq_prio_changed(cfqq); 1395} 1396 1397static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) 1398{ 1399 struct cfq_data *cfqd = cic->key; 1400 struct cfq_queue *cfqq; 1401 unsigned long flags; 1402 1403 if (unlikely(!cfqd)) 1404 return; 1405 1406 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1407 1408 cfqq = cic->cfqq[ASYNC]; 1409 if (cfqq) { 1410 struct cfq_queue *new_cfqq; 1411 new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC); 1412 if (new_cfqq) { 1413 cic->cfqq[ASYNC] = new_cfqq; 1414 cfq_put_queue(cfqq); 1415 } 1416 } 1417 1418 cfqq = cic->cfqq[SYNC]; 1419 if (cfqq) 1420 cfq_mark_cfqq_prio_changed(cfqq); 1421 1422 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1423} 1424 1425static void cfq_ioc_set_ioprio(struct io_context *ioc) 1426{ 1427 call_for_each_cic(ioc, changed_ioprio); 1428 ioc->ioprio_changed = 0; 1429} 1430 1431static struct cfq_queue * 1432cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, 1433 struct io_context *ioc, gfp_t gfp_mask) 1434{ 1435 struct cfq_queue *cfqq, *new_cfqq = NULL; 1436 struct cfq_io_context *cic; 1437 1438retry: 1439 cic = cfq_cic_lookup(cfqd, ioc); 1440 /* cic always exists here */ 1441 cfqq = cic_to_cfqq(cic, is_sync); 1442 1443 if (!cfqq) { 1444 if (new_cfqq) { 1445 cfqq = new_cfqq; 1446 new_cfqq = NULL; 1447 } else if (gfp_mask & __GFP_WAIT) { 1448 /* 1449 * Inform the allocator of the fact that we will 1450 * just repeat this allocation if it fails, to allow 1451 * the allocator to do whatever it needs to attempt to 1452 * free memory. 1453 */ 1454 spin_unlock_irq(cfqd->queue->queue_lock); 1455 new_cfqq = kmem_cache_alloc_node(cfq_pool, 1456 gfp_mask | __GFP_NOFAIL | __GFP_ZERO, 1457 cfqd->queue->node); 1458 spin_lock_irq(cfqd->queue->queue_lock); 1459 goto retry; 1460 } else { 1461 cfqq = kmem_cache_alloc_node(cfq_pool, 1462 gfp_mask | __GFP_ZERO, 1463 cfqd->queue->node); 1464 if (!cfqq) 1465 goto out; 1466 } 1467 1468 RB_CLEAR_NODE(&cfqq->rb_node); 1469 INIT_LIST_HEAD(&cfqq->fifo); 1470 1471 atomic_set(&cfqq->ref, 0); 1472 cfqq->cfqd = cfqd; 1473 1474 cfq_mark_cfqq_prio_changed(cfqq); 1475 cfq_mark_cfqq_queue_new(cfqq); 1476 1477 cfq_init_prio_data(cfqq, ioc); 1478 1479 if (is_sync) { 1480 if (!cfq_class_idle(cfqq)) 1481 cfq_mark_cfqq_idle_window(cfqq); 1482 cfq_mark_cfqq_sync(cfqq); 1483 } 1484 cfqq->pid = current->pid; 1485 cfq_log_cfqq(cfqd, cfqq, "alloced"); 1486 } 1487 1488 if (new_cfqq) 1489 kmem_cache_free(cfq_pool, new_cfqq); 1490 1491out: 1492 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); 1493 return cfqq; 1494} 1495 1496static struct cfq_queue ** 1497cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 1498{ 1499 switch (ioprio_class) { 1500 case IOPRIO_CLASS_RT: 1501 return &cfqd->async_cfqq[0][ioprio]; 1502 case IOPRIO_CLASS_BE: 1503 return &cfqd->async_cfqq[1][ioprio]; 1504 case IOPRIO_CLASS_IDLE: 1505 return &cfqd->async_idle_cfqq; 1506 default: 1507 BUG(); 1508 } 1509} 1510 1511static struct cfq_queue * 1512cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, 1513 gfp_t gfp_mask) 1514{ 1515 const int ioprio = task_ioprio(ioc); 1516 const int ioprio_class = task_ioprio_class(ioc); 1517 struct cfq_queue **async_cfqq = NULL; 1518 struct cfq_queue *cfqq = NULL; 1519 1520 if (!is_sync) { 1521 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); 1522 cfqq = *async_cfqq; 1523 } 1524 1525 if (!cfqq) { 1526 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); 1527 if (!cfqq) 1528 return NULL; 1529 } 1530 1531 /* 1532 * pin the queue now that it's allocated, scheduler exit will prune it 1533 */ 1534 if (!is_sync && !(*async_cfqq)) { 1535 atomic_inc(&cfqq->ref); 1536 *async_cfqq = cfqq; 1537 } 1538 1539 atomic_inc(&cfqq->ref); 1540 return cfqq; 1541} 1542 1543/* 1544 * We drop cfq io contexts lazily, so we may find a dead one. 1545 */ 1546static void 1547cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, 1548 struct cfq_io_context *cic) 1549{ 1550 unsigned long flags; 1551 1552 WARN_ON(!list_empty(&cic->queue_list)); 1553 1554 spin_lock_irqsave(&ioc->lock, flags); 1555 1556 BUG_ON(ioc->ioc_data == cic); 1557 1558 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); 1559 hlist_del_rcu(&cic->cic_list); 1560 spin_unlock_irqrestore(&ioc->lock, flags); 1561 1562 cfq_cic_free(cic); 1563} 1564 1565static struct cfq_io_context * 1566cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) 1567{ 1568 struct cfq_io_context *cic; 1569 unsigned long flags; 1570 void *k; 1571 1572 if (unlikely(!ioc)) 1573 return NULL; 1574 1575 rcu_read_lock(); 1576 1577 /* 1578 * we maintain a last-hit cache, to avoid browsing over the tree 1579 */ 1580 cic = rcu_dereference(ioc->ioc_data); 1581 if (cic && cic->key == cfqd) { 1582 rcu_read_unlock(); 1583 return cic; 1584 } 1585 1586 do { 1587 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); 1588 rcu_read_unlock(); 1589 if (!cic) 1590 break; 1591 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1592 k = cic->key; 1593 if (unlikely(!k)) { 1594 cfq_drop_dead_cic(cfqd, ioc, cic); 1595 rcu_read_lock(); 1596 continue; 1597 } 1598 1599 spin_lock_irqsave(&ioc->lock, flags); 1600 rcu_assign_pointer(ioc->ioc_data, cic); 1601 spin_unlock_irqrestore(&ioc->lock, flags); 1602 break; 1603 } while (1); 1604 1605 return cic; 1606} 1607 1608/* 1609 * Add cic into ioc, using cfqd as the search key. This enables us to lookup 1610 * the process specific cfq io context when entered from the block layer. 1611 * Also adds the cic to a per-cfqd list, used when this queue is removed. 1612 */ 1613static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, 1614 struct cfq_io_context *cic, gfp_t gfp_mask) 1615{ 1616 unsigned long flags; 1617 int ret; 1618 1619 ret = radix_tree_preload(gfp_mask); 1620 if (!ret) { 1621 cic->ioc = ioc; 1622 cic->key = cfqd; 1623 1624 spin_lock_irqsave(&ioc->lock, flags); 1625 ret = radix_tree_insert(&ioc->radix_root, 1626 (unsigned long) cfqd, cic); 1627 if (!ret) 1628 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); 1629 spin_unlock_irqrestore(&ioc->lock, flags); 1630 1631 radix_tree_preload_end(); 1632 1633 if (!ret) { 1634 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1635 list_add(&cic->queue_list, &cfqd->cic_list); 1636 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1637 } 1638 } 1639 1640 if (ret) 1641 printk(KERN_ERR "cfq: cic link failed!\n"); 1642 1643 return ret; 1644} 1645 1646/* 1647 * Setup general io context and cfq io context. There can be several cfq 1648 * io contexts per general io context, if this process is doing io to more 1649 * than one device managed by cfq. 1650 */ 1651static struct cfq_io_context * 1652cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1653{ 1654 struct io_context *ioc = NULL; 1655 struct cfq_io_context *cic; 1656 1657 might_sleep_if(gfp_mask & __GFP_WAIT); 1658 1659 ioc = get_io_context(gfp_mask, cfqd->queue->node); 1660 if (!ioc) 1661 return NULL; 1662 1663 cic = cfq_cic_lookup(cfqd, ioc); 1664 if (cic) 1665 goto out; 1666 1667 cic = cfq_alloc_io_context(cfqd, gfp_mask); 1668 if (cic == NULL) 1669 goto err; 1670 1671 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) 1672 goto err_free; 1673 1674out: 1675 smp_read_barrier_depends(); 1676 if (unlikely(ioc->ioprio_changed)) 1677 cfq_ioc_set_ioprio(ioc); 1678 1679 return cic; 1680err_free: 1681 cfq_cic_free(cic); 1682err: 1683 put_io_context(ioc); 1684 return NULL; 1685} 1686 1687static void 1688cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) 1689{ 1690 unsigned long elapsed = jiffies - cic->last_end_request; 1691 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); 1692 1693 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; 1694 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; 1695 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; 1696} 1697 1698static void 1699cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, 1700 struct request *rq) 1701{ 1702 sector_t sdist; 1703 u64 total; 1704 1705 if (cic->last_request_pos < rq->sector) 1706 sdist = rq->sector - cic->last_request_pos; 1707 else 1708 sdist = cic->last_request_pos - rq->sector; 1709 1710 /* 1711 * Don't allow the seek distance to get too large from the 1712 * odd fragment, pagein, etc 1713 */ 1714 if (cic->seek_samples <= 60) /* second&third seek */ 1715 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); 1716 else 1717 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); 1718 1719 cic->seek_samples = (7*cic->seek_samples + 256) / 8; 1720 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; 1721 total = cic->seek_total + (cic->seek_samples/2); 1722 do_div(total, cic->seek_samples); 1723 cic->seek_mean = (sector_t)total; 1724} 1725 1726/* 1727 * Disable idle window if the process thinks too long or seeks so much that 1728 * it doesn't matter 1729 */ 1730static void 1731cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1732 struct cfq_io_context *cic) 1733{ 1734 int old_idle, enable_idle; 1735 1736 /* 1737 * Don't idle for async or idle io prio class 1738 */ 1739 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) 1740 return; 1741 1742 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 1743 1744 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 1745 (cfqd->hw_tag && CIC_SEEKY(cic))) 1746 enable_idle = 0; 1747 else if (sample_valid(cic->ttime_samples)) { 1748 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1749 enable_idle = 0; 1750 else 1751 enable_idle = 1; 1752 } 1753 1754 if (old_idle != enable_idle) { 1755 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); 1756 if (enable_idle) 1757 cfq_mark_cfqq_idle_window(cfqq); 1758 else 1759 cfq_clear_cfqq_idle_window(cfqq); 1760 } 1761} 1762 1763/* 1764 * Check if new_cfqq should preempt the currently active queue. Return 0 for 1765 * no or if we aren't sure, a 1 will cause a preempt. 1766 */ 1767static int 1768cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 1769 struct request *rq) 1770{ 1771 struct cfq_queue *cfqq; 1772 1773 cfqq = cfqd->active_queue; 1774 if (!cfqq) 1775 return 0; 1776 1777 if (cfq_slice_used(cfqq)) 1778 return 1; 1779 1780 if (cfq_class_idle(new_cfqq)) 1781 return 0; 1782 1783 if (cfq_class_idle(cfqq)) 1784 return 1; 1785 1786 /* 1787 * if the new request is sync, but the currently running queue is 1788 * not, let the sync request have priority. 1789 */ 1790 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 1791 return 1; 1792 1793 /* 1794 * So both queues are sync. Let the new request get disk time if 1795 * it's a metadata request and the current queue is doing regular IO. 1796 */ 1797 if (rq_is_meta(rq) && !cfqq->meta_pending) 1798 return 1; 1799 1800 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 1801 return 0; 1802 1803 /* 1804 * if this request is as-good as one we would expect from the 1805 * current cfqq, let it preempt 1806 */ 1807 if (cfq_rq_close(cfqd, rq)) 1808 return 1; 1809 1810 return 0; 1811} 1812 1813/* 1814 * cfqq preempts the active queue. if we allowed preempt with no slice left, 1815 * let it have half of its nominal slice. 1816 */ 1817static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1818{ 1819 cfq_log_cfqq(cfqd, cfqq, "preempt"); 1820 cfq_slice_expired(cfqd, 1); 1821 1822 /* 1823 * Put the new queue at the front of the of the current list, 1824 * so we know that it will be selected next. 1825 */ 1826 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 1827 1828 cfq_service_tree_add(cfqd, cfqq, 1); 1829 1830 cfqq->slice_end = 0; 1831 cfq_mark_cfqq_slice_new(cfqq); 1832} 1833 1834/* 1835 * Called when a new fs request (rq) is added (to cfqq). Check if there's 1836 * something we should do about it 1837 */ 1838static void 1839cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1840 struct request *rq) 1841{ 1842 struct cfq_io_context *cic = RQ_CIC(rq); 1843 1844 cfqd->rq_queued++; 1845 if (rq_is_meta(rq)) 1846 cfqq->meta_pending++; 1847 1848 cfq_update_io_thinktime(cfqd, cic); 1849 cfq_update_io_seektime(cfqd, cic, rq); 1850 cfq_update_idle_window(cfqd, cfqq, cic); 1851 1852 cic->last_request_pos = rq->sector + rq->nr_sectors; 1853 1854 if (cfqq == cfqd->active_queue) { 1855 /* 1856 * if we are waiting for a request for this queue, let it rip 1857 * immediately and flag that we must not expire this queue 1858 * just now 1859 */ 1860 if (cfq_cfqq_wait_request(cfqq)) { 1861 cfq_mark_cfqq_must_dispatch(cfqq); 1862 del_timer(&cfqd->idle_slice_timer); 1863 blk_start_queueing(cfqd->queue); 1864 } 1865 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1866 /* 1867 * not the active queue - expire current slice if it is 1868 * idle and has expired it's mean thinktime or this new queue 1869 * has some old slice time left and is of higher priority 1870 */ 1871 cfq_preempt_queue(cfqd, cfqq); 1872 cfq_mark_cfqq_must_dispatch(cfqq); 1873 blk_start_queueing(cfqd->queue); 1874 } 1875} 1876 1877static void cfq_insert_request(struct request_queue *q, struct request *rq) 1878{ 1879 struct cfq_data *cfqd = q->elevator->elevator_data; 1880 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1881 1882 cfq_log_cfqq(cfqd, cfqq, "insert_request"); 1883 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); 1884 1885 cfq_add_rq_rb(rq); 1886 1887 list_add_tail(&rq->queuelist, &cfqq->fifo); 1888 1889 cfq_rq_enqueued(cfqd, cfqq, rq); 1890} 1891 1892/* 1893 * Update hw_tag based on peak queue depth over 50 samples under 1894 * sufficient load. 1895 */ 1896static void cfq_update_hw_tag(struct cfq_data *cfqd) 1897{ 1898 if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak) 1899 cfqd->rq_in_driver_peak = cfqd->rq_in_driver; 1900 1901 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && 1902 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) 1903 return; 1904 1905 if (cfqd->hw_tag_samples++ < 50) 1906 return; 1907 1908 if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN) 1909 cfqd->hw_tag = 1; 1910 else 1911 cfqd->hw_tag = 0; 1912 1913 cfqd->hw_tag_samples = 0; 1914 cfqd->rq_in_driver_peak = 0; 1915} 1916 1917static void cfq_completed_request(struct request_queue *q, struct request *rq) 1918{ 1919 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1920 struct cfq_data *cfqd = cfqq->cfqd; 1921 const int sync = rq_is_sync(rq); 1922 unsigned long now; 1923 1924 now = jiffies; 1925 cfq_log_cfqq(cfqd, cfqq, "complete"); 1926 1927 cfq_update_hw_tag(cfqd); 1928 1929 WARN_ON(!cfqd->rq_in_driver); 1930 WARN_ON(!cfqq->dispatched); 1931 cfqd->rq_in_driver--; 1932 cfqq->dispatched--; 1933 1934 if (cfq_cfqq_sync(cfqq)) 1935 cfqd->sync_flight--; 1936 1937 if (!cfq_class_idle(cfqq)) 1938 cfqd->last_end_request = now; 1939 1940 if (sync) 1941 RQ_CIC(rq)->last_end_request = now; 1942 1943 /* 1944 * If this is the active queue, check if it needs to be expired, 1945 * or if we want to idle in case it has no pending requests. 1946 */ 1947 if (cfqd->active_queue == cfqq) { 1948 if (cfq_cfqq_slice_new(cfqq)) { 1949 cfq_set_prio_slice(cfqd, cfqq); 1950 cfq_clear_cfqq_slice_new(cfqq); 1951 } 1952 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 1953 cfq_slice_expired(cfqd, 1); 1954 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) 1955 cfq_arm_slice_timer(cfqd); 1956 } 1957 1958 if (!cfqd->rq_in_driver) 1959 cfq_schedule_dispatch(cfqd); 1960} 1961 1962/* 1963 * we temporarily boost lower priority queues if they are holding fs exclusive 1964 * resources. they are boosted to normal prio (CLASS_BE/4) 1965 */ 1966static void cfq_prio_boost(struct cfq_queue *cfqq) 1967{ 1968 if (has_fs_excl()) { 1969 /* 1970 * boost idle prio on transactions that would lock out other 1971 * users of the filesystem 1972 */ 1973 if (cfq_class_idle(cfqq)) 1974 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1975 if (cfqq->ioprio > IOPRIO_NORM) 1976 cfqq->ioprio = IOPRIO_NORM; 1977 } else { 1978 /* 1979 * check if we need to unboost the queue 1980 */ 1981 if (cfqq->ioprio_class != cfqq->org_ioprio_class) 1982 cfqq->ioprio_class = cfqq->org_ioprio_class; 1983 if (cfqq->ioprio != cfqq->org_ioprio) 1984 cfqq->ioprio = cfqq->org_ioprio; 1985 } 1986} 1987 1988static inline int __cfq_may_queue(struct cfq_queue *cfqq) 1989{ 1990 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1991 !cfq_cfqq_must_alloc_slice(cfqq)) { 1992 cfq_mark_cfqq_must_alloc_slice(cfqq); 1993 return ELV_MQUEUE_MUST; 1994 } 1995 1996 return ELV_MQUEUE_MAY; 1997} 1998 1999static int cfq_may_queue(struct request_queue *q, int rw) 2000{ 2001 struct cfq_data *cfqd = q->elevator->elevator_data; 2002 struct task_struct *tsk = current; 2003 struct cfq_io_context *cic; 2004 struct cfq_queue *cfqq; 2005 2006 /* 2007 * don't force setup of a queue from here, as a call to may_queue 2008 * does not necessarily imply that a request actually will be queued. 2009 * so just lookup a possibly existing queue, or return 'may queue' 2010 * if that fails 2011 */ 2012 cic = cfq_cic_lookup(cfqd, tsk->io_context); 2013 if (!cic) 2014 return ELV_MQUEUE_MAY; 2015 2016 cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); 2017 if (cfqq) { 2018 cfq_init_prio_data(cfqq, cic->ioc); 2019 cfq_prio_boost(cfqq); 2020 2021 return __cfq_may_queue(cfqq); 2022 } 2023 2024 return ELV_MQUEUE_MAY; 2025} 2026 2027/* 2028 * queue lock held here 2029 */ 2030static void cfq_put_request(struct request *rq) 2031{ 2032 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2033 2034 if (cfqq) { 2035 const int rw = rq_data_dir(rq); 2036 2037 BUG_ON(!cfqq->allocated[rw]); 2038 cfqq->allocated[rw]--; 2039 2040 put_io_context(RQ_CIC(rq)->ioc); 2041 2042 rq->elevator_private = NULL; 2043 rq->elevator_private2 = NULL; 2044 2045 cfq_put_queue(cfqq); 2046 } 2047} 2048 2049/* 2050 * Allocate cfq data structures associated with this request. 2051 */ 2052static int 2053cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) 2054{ 2055 struct cfq_data *cfqd = q->elevator->elevator_data; 2056 struct cfq_io_context *cic; 2057 const int rw = rq_data_dir(rq); 2058 const int is_sync = rq_is_sync(rq); 2059 struct cfq_queue *cfqq; 2060 unsigned long flags; 2061 2062 might_sleep_if(gfp_mask & __GFP_WAIT); 2063 2064 cic = cfq_get_io_context(cfqd, gfp_mask); 2065 2066 spin_lock_irqsave(q->queue_lock, flags); 2067 2068 if (!cic) 2069 goto queue_fail; 2070 2071 cfqq = cic_to_cfqq(cic, is_sync); 2072 if (!cfqq) { 2073 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); 2074 2075 if (!cfqq) 2076 goto queue_fail; 2077 2078 cic_set_cfqq(cic, cfqq, is_sync); 2079 } 2080 2081 cfqq->allocated[rw]++; 2082 cfq_clear_cfqq_must_alloc(cfqq); 2083 atomic_inc(&cfqq->ref); 2084 2085 spin_unlock_irqrestore(q->queue_lock, flags); 2086 2087 rq->elevator_private = cic; 2088 rq->elevator_private2 = cfqq; 2089 return 0; 2090 2091queue_fail: 2092 if (cic) 2093 put_io_context(cic->ioc); 2094 2095 cfq_schedule_dispatch(cfqd); 2096 spin_unlock_irqrestore(q->queue_lock, flags); 2097 cfq_log(cfqd, "set_request fail"); 2098 return 1; 2099} 2100 2101static void cfq_kick_queue(struct work_struct *work) 2102{ 2103 struct cfq_data *cfqd = 2104 container_of(work, struct cfq_data, unplug_work); 2105 struct request_queue *q = cfqd->queue; 2106 unsigned long flags; 2107 2108 spin_lock_irqsave(q->queue_lock, flags); 2109 blk_start_queueing(q); 2110 spin_unlock_irqrestore(q->queue_lock, flags); 2111} 2112 2113/* 2114 * Timer running if the active_queue is currently idling inside its time slice 2115 */ 2116static void cfq_idle_slice_timer(unsigned long data) 2117{ 2118 struct cfq_data *cfqd = (struct cfq_data *) data; 2119 struct cfq_queue *cfqq; 2120 unsigned long flags; 2121 int timed_out = 1; 2122 2123 cfq_log(cfqd, "idle timer fired"); 2124 2125 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2126 2127 cfqq = cfqd->active_queue; 2128 if (cfqq) { 2129 timed_out = 0; 2130 2131 /* 2132 * expired 2133 */ 2134 if (cfq_slice_used(cfqq)) 2135 goto expire; 2136 2137 /* 2138 * only expire and reinvoke request handler, if there are 2139 * other queues with pending requests 2140 */ 2141 if (!cfqd->busy_queues) 2142 goto out_cont; 2143 2144 /* 2145 * not expired and it has a request pending, let it dispatch 2146 */ 2147 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { 2148 cfq_mark_cfqq_must_dispatch(cfqq); 2149 goto out_kick; 2150 } 2151 } 2152expire: 2153 cfq_slice_expired(cfqd, timed_out); 2154out_kick: 2155 cfq_schedule_dispatch(cfqd); 2156out_cont: 2157 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2158} 2159 2160static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2161{ 2162 del_timer_sync(&cfqd->idle_slice_timer); 2163 kblockd_flush_work(&cfqd->unplug_work); 2164} 2165 2166static void cfq_put_async_queues(struct cfq_data *cfqd) 2167{ 2168 int i; 2169 2170 for (i = 0; i < IOPRIO_BE_NR; i++) { 2171 if (cfqd->async_cfqq[0][i]) 2172 cfq_put_queue(cfqd->async_cfqq[0][i]); 2173 if (cfqd->async_cfqq[1][i]) 2174 cfq_put_queue(cfqd->async_cfqq[1][i]); 2175 } 2176 2177 if (cfqd->async_idle_cfqq) 2178 cfq_put_queue(cfqd->async_idle_cfqq); 2179} 2180 2181static void cfq_exit_queue(elevator_t *e) 2182{ 2183 struct cfq_data *cfqd = e->elevator_data; 2184 struct request_queue *q = cfqd->queue; 2185 2186 cfq_shutdown_timer_wq(cfqd); 2187 2188 spin_lock_irq(q->queue_lock); 2189 2190 if (cfqd->active_queue) 2191 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 2192 2193 while (!list_empty(&cfqd->cic_list)) { 2194 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 2195 struct cfq_io_context, 2196 queue_list); 2197 2198 __cfq_exit_single_io_context(cfqd, cic); 2199 } 2200 2201 cfq_put_async_queues(cfqd); 2202 2203 spin_unlock_irq(q->queue_lock); 2204 2205 cfq_shutdown_timer_wq(cfqd); 2206 2207 kfree(cfqd); 2208} 2209 2210static void *cfq_init_queue(struct request_queue *q) 2211{ 2212 struct cfq_data *cfqd; 2213 2214 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 2215 if (!cfqd) 2216 return NULL; 2217 2218 cfqd->service_tree = CFQ_RB_ROOT; 2219 INIT_LIST_HEAD(&cfqd->cic_list); 2220 2221 cfqd->queue = q; 2222 2223 init_timer(&cfqd->idle_slice_timer); 2224 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2225 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2226 2227 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2228 2229 cfqd->last_end_request = jiffies; 2230 cfqd->cfq_quantum = cfq_quantum; 2231 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2232 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; 2233 cfqd->cfq_back_max = cfq_back_max; 2234 cfqd->cfq_back_penalty = cfq_back_penalty; 2235 cfqd->cfq_slice[0] = cfq_slice_async; 2236 cfqd->cfq_slice[1] = cfq_slice_sync; 2237 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2238 cfqd->cfq_slice_idle = cfq_slice_idle; 2239 cfqd->hw_tag = 1; 2240 2241 return cfqd; 2242} 2243 2244static void cfq_slab_kill(void) 2245{ 2246 /* 2247 * Caller already ensured that pending RCU callbacks are completed, 2248 * so we should have no busy allocations at this point. 2249 */ 2250 if (cfq_pool) 2251 kmem_cache_destroy(cfq_pool); 2252 if (cfq_ioc_pool) 2253 kmem_cache_destroy(cfq_ioc_pool); 2254} 2255 2256static int __init cfq_slab_setup(void) 2257{ 2258 cfq_pool = KMEM_CACHE(cfq_queue, 0); 2259 if (!cfq_pool) 2260 goto fail; 2261 2262 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); 2263 if (!cfq_ioc_pool) 2264 goto fail; 2265 2266 return 0; 2267fail: 2268 cfq_slab_kill(); 2269 return -ENOMEM; 2270} 2271 2272/* 2273 * sysfs parts below --> 2274 */ 2275static ssize_t 2276cfq_var_show(unsigned int var, char *page) 2277{ 2278 return sprintf(page, "%d\n", var); 2279} 2280 2281static ssize_t 2282cfq_var_store(unsigned int *var, const char *page, size_t count) 2283{ 2284 char *p = (char *) page; 2285 2286 *var = simple_strtoul(p, &p, 10); 2287 return count; 2288} 2289 2290#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 2291static ssize_t __FUNC(elevator_t *e, char *page) \ 2292{ \ 2293 struct cfq_data *cfqd = e->elevator_data; \ 2294 unsigned int __data = __VAR; \ 2295 if (__CONV) \ 2296 __data = jiffies_to_msecs(__data); \ 2297 return cfq_var_show(__data, (page)); \ 2298} 2299SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 2300SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 2301SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 2302SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 2303SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 2304SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 2305SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2306SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2307SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2308#undef SHOW_FUNCTION 2309 2310#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2311static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ 2312{ \ 2313 struct cfq_data *cfqd = e->elevator_data; \ 2314 unsigned int __data; \ 2315 int ret = cfq_var_store(&__data, (page), count); \ 2316 if (__data < (MIN)) \ 2317 __data = (MIN); \ 2318 else if (__data > (MAX)) \ 2319 __data = (MAX); \ 2320 if (__CONV) \ 2321 *(__PTR) = msecs_to_jiffies(__data); \ 2322 else \ 2323 *(__PTR) = __data; \ 2324 return ret; \ 2325} 2326STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2327STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, 2328 UINT_MAX, 1); 2329STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, 2330 UINT_MAX, 1); 2331STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2332STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 2333 UINT_MAX, 0); 2334STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2335STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2336STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2337STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 2338 UINT_MAX, 0); 2339#undef STORE_FUNCTION 2340 2341#define CFQ_ATTR(name) \ 2342 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) 2343 2344static struct elv_fs_entry cfq_attrs[] = { 2345 CFQ_ATTR(quantum), 2346 CFQ_ATTR(fifo_expire_sync), 2347 CFQ_ATTR(fifo_expire_async), 2348 CFQ_ATTR(back_seek_max), 2349 CFQ_ATTR(back_seek_penalty), 2350 CFQ_ATTR(slice_sync), 2351 CFQ_ATTR(slice_async), 2352 CFQ_ATTR(slice_async_rq), 2353 CFQ_ATTR(slice_idle), 2354 __ATTR_NULL 2355}; 2356 2357static struct elevator_type iosched_cfq = { 2358 .ops = { 2359 .elevator_merge_fn = cfq_merge, 2360 .elevator_merged_fn = cfq_merged_request, 2361 .elevator_merge_req_fn = cfq_merged_requests, 2362 .elevator_allow_merge_fn = cfq_allow_merge, 2363 .elevator_dispatch_fn = cfq_dispatch_requests, 2364 .elevator_add_req_fn = cfq_insert_request, 2365 .elevator_activate_req_fn = cfq_activate_request, 2366 .elevator_deactivate_req_fn = cfq_deactivate_request, 2367 .elevator_queue_empty_fn = cfq_queue_empty, 2368 .elevator_completed_req_fn = cfq_completed_request, 2369 .elevator_former_req_fn = elv_rb_former_request, 2370 .elevator_latter_req_fn = elv_rb_latter_request, 2371 .elevator_set_req_fn = cfq_set_request, 2372 .elevator_put_req_fn = cfq_put_request, 2373 .elevator_may_queue_fn = cfq_may_queue, 2374 .elevator_init_fn = cfq_init_queue, 2375 .elevator_exit_fn = cfq_exit_queue, 2376 .trim = cfq_free_io_context, 2377 }, 2378 .elevator_attrs = cfq_attrs, 2379 .elevator_name = "cfq", 2380 .elevator_owner = THIS_MODULE, 2381}; 2382 2383static int __init cfq_init(void) 2384{ 2385 /* 2386 * could be 0 on HZ < 1000 setups 2387 */ 2388 if (!cfq_slice_async) 2389 cfq_slice_async = 1; 2390 if (!cfq_slice_idle) 2391 cfq_slice_idle = 1; 2392 2393 if (cfq_slab_setup()) 2394 return -ENOMEM; 2395 2396 elv_register(&iosched_cfq); 2397 2398 return 0; 2399} 2400 2401static void __exit cfq_exit(void) 2402{ 2403 DECLARE_COMPLETION_ONSTACK(all_gone); 2404 elv_unregister(&iosched_cfq); 2405 ioc_gone = &all_gone; 2406 /* ioc_gone's update must be visible before reading ioc_count */ 2407 smp_wmb(); 2408 2409 /* 2410 * this also protects us from entering cfq_slab_kill() with 2411 * pending RCU callbacks 2412 */ 2413 if (elv_ioc_count_read(ioc_count)) 2414 wait_for_completion(&all_gone); 2415 cfq_slab_kill(); 2416} 2417 2418module_init(cfq_init); 2419module_exit(cfq_exit); 2420 2421MODULE_AUTHOR("Jens Axboe"); 2422MODULE_LICENSE("GPL"); 2423MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");