Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.24-rc5 2320 lines 55 kB view raw
1/* 2 * CFQ, or complete fairness queueing, disk scheduler. 3 * 4 * Based on ideas from a previously unfinished io 5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. 6 * 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 8 */ 9#include <linux/module.h> 10#include <linux/blkdev.h> 11#include <linux/elevator.h> 12#include <linux/rbtree.h> 13#include <linux/ioprio.h> 14 15/* 16 * tunables 17 */ 18static const int cfq_quantum = 4; /* max queue in one round of service */ 19static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 20static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ 21static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ 22 23static const int cfq_slice_sync = HZ / 10; 24static int cfq_slice_async = HZ / 25; 25static const int cfq_slice_async_rq = 2; 26static int cfq_slice_idle = HZ / 125; 27 28/* 29 * grace period before allowing idle class to get disk access 30 */ 31#define CFQ_IDLE_GRACE (HZ / 10) 32 33/* 34 * below this threshold, we consider thinktime immediate 35 */ 36#define CFQ_MIN_TT (2) 37 38#define CFQ_SLICE_SCALE (5) 39 40#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) 41#define RQ_CFQQ(rq) ((rq)->elevator_private2) 42 43static struct kmem_cache *cfq_pool; 44static struct kmem_cache *cfq_ioc_pool; 45 46static DEFINE_PER_CPU(unsigned long, ioc_count); 47static struct completion *ioc_gone; 48 49#define CFQ_PRIO_LISTS IOPRIO_BE_NR 50#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 51#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 52 53#define ASYNC (0) 54#define SYNC (1) 55 56#define sample_valid(samples) ((samples) > 80) 57 58/* 59 * Most of our rbtree usage is for sorting with min extraction, so 60 * if we cache the leftmost node we don't have to walk down the tree 61 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should 62 * move this into the elevator for the rq sorting as well. 63 */ 64struct cfq_rb_root { 65 struct rb_root rb; 66 struct rb_node *left; 67}; 68#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } 69 70/* 71 * Per block device queue structure 72 */ 73struct cfq_data { 74 struct request_queue *queue; 75 76 /* 77 * rr list of queues with requests and the count of them 78 */ 79 struct cfq_rb_root service_tree; 80 unsigned int busy_queues; 81 82 int rq_in_driver; 83 int sync_flight; 84 int hw_tag; 85 86 /* 87 * idle window management 88 */ 89 struct timer_list idle_slice_timer; 90 struct work_struct unplug_work; 91 92 struct cfq_queue *active_queue; 93 struct cfq_io_context *active_cic; 94 95 /* 96 * async queue for each priority case 97 */ 98 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; 99 struct cfq_queue *async_idle_cfqq; 100 101 struct timer_list idle_class_timer; 102 103 sector_t last_position; 104 unsigned long last_end_request; 105 106 /* 107 * tunables, see top of file 108 */ 109 unsigned int cfq_quantum; 110 unsigned int cfq_fifo_expire[2]; 111 unsigned int cfq_back_penalty; 112 unsigned int cfq_back_max; 113 unsigned int cfq_slice[2]; 114 unsigned int cfq_slice_async_rq; 115 unsigned int cfq_slice_idle; 116 117 struct list_head cic_list; 118}; 119 120/* 121 * Per process-grouping structure 122 */ 123struct cfq_queue { 124 /* reference count */ 125 atomic_t ref; 126 /* parent cfq_data */ 127 struct cfq_data *cfqd; 128 /* service_tree member */ 129 struct rb_node rb_node; 130 /* service_tree key */ 131 unsigned long rb_key; 132 /* sorted list of pending requests */ 133 struct rb_root sort_list; 134 /* if fifo isn't expired, next request to serve */ 135 struct request *next_rq; 136 /* requests queued in sort_list */ 137 int queued[2]; 138 /* currently allocated requests */ 139 int allocated[2]; 140 /* pending metadata requests */ 141 int meta_pending; 142 /* fifo list of requests in sort_list */ 143 struct list_head fifo; 144 145 unsigned long slice_end; 146 long slice_resid; 147 148 /* number of requests that are on the dispatch list or inside driver */ 149 int dispatched; 150 151 /* io prio of this group */ 152 unsigned short ioprio, org_ioprio; 153 unsigned short ioprio_class, org_ioprio_class; 154 155 /* various state flags, see below */ 156 unsigned int flags; 157}; 158 159enum cfqq_state_flags { 160 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 161 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 162 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ 163 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 164 CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */ 165 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 166 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 167 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 168 CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ 169 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 170 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 171}; 172 173#define CFQ_CFQQ_FNS(name) \ 174static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 175{ \ 176 cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 177} \ 178static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 179{ \ 180 cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 181} \ 182static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 183{ \ 184 return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 185} 186 187CFQ_CFQQ_FNS(on_rr); 188CFQ_CFQQ_FNS(wait_request); 189CFQ_CFQQ_FNS(must_alloc); 190CFQ_CFQQ_FNS(must_alloc_slice); 191CFQ_CFQQ_FNS(must_dispatch); 192CFQ_CFQQ_FNS(fifo_expire); 193CFQ_CFQQ_FNS(idle_window); 194CFQ_CFQQ_FNS(prio_changed); 195CFQ_CFQQ_FNS(queue_new); 196CFQ_CFQQ_FNS(slice_new); 197CFQ_CFQQ_FNS(sync); 198#undef CFQ_CFQQ_FNS 199 200static void cfq_dispatch_insert(struct request_queue *, struct request *); 201static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 202 struct task_struct *, gfp_t); 203static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, 204 struct io_context *); 205 206static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 207 int is_sync) 208{ 209 return cic->cfqq[!!is_sync]; 210} 211 212static inline void cic_set_cfqq(struct cfq_io_context *cic, 213 struct cfq_queue *cfqq, int is_sync) 214{ 215 cic->cfqq[!!is_sync] = cfqq; 216} 217 218/* 219 * We regard a request as SYNC, if it's either a read or has the SYNC bit 220 * set (in which case it could also be direct WRITE). 221 */ 222static inline int cfq_bio_sync(struct bio *bio) 223{ 224 if (bio_data_dir(bio) == READ || bio_sync(bio)) 225 return 1; 226 227 return 0; 228} 229 230/* 231 * scheduler run of queue, if there are requests pending and no one in the 232 * driver that will restart queueing 233 */ 234static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 235{ 236 if (cfqd->busy_queues) 237 kblockd_schedule_work(&cfqd->unplug_work); 238} 239 240static int cfq_queue_empty(struct request_queue *q) 241{ 242 struct cfq_data *cfqd = q->elevator->elevator_data; 243 244 return !cfqd->busy_queues; 245} 246 247/* 248 * Scale schedule slice based on io priority. Use the sync time slice only 249 * if a queue is marked sync and has sync io queued. A sync queue with async 250 * io only, should not get full sync slice length. 251 */ 252static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, 253 unsigned short prio) 254{ 255 const int base_slice = cfqd->cfq_slice[sync]; 256 257 WARN_ON(prio >= IOPRIO_BE_NR); 258 259 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); 260} 261 262static inline int 263cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 264{ 265 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 266} 267 268static inline void 269cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 270{ 271 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; 272} 273 274/* 275 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end 276 * isn't valid until the first request from the dispatch is activated 277 * and the slice time set. 278 */ 279static inline int cfq_slice_used(struct cfq_queue *cfqq) 280{ 281 if (cfq_cfqq_slice_new(cfqq)) 282 return 0; 283 if (time_before(jiffies, cfqq->slice_end)) 284 return 0; 285 286 return 1; 287} 288 289/* 290 * Lifted from AS - choose which of rq1 and rq2 that is best served now. 291 * We choose the request that is closest to the head right now. Distance 292 * behind the head is penalized and only allowed to a certain extent. 293 */ 294static struct request * 295cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) 296{ 297 sector_t last, s1, s2, d1 = 0, d2 = 0; 298 unsigned long back_max; 299#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 300#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 301 unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 302 303 if (rq1 == NULL || rq1 == rq2) 304 return rq2; 305 if (rq2 == NULL) 306 return rq1; 307 308 if (rq_is_sync(rq1) && !rq_is_sync(rq2)) 309 return rq1; 310 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 311 return rq2; 312 if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 313 return rq1; 314 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 315 return rq2; 316 317 s1 = rq1->sector; 318 s2 = rq2->sector; 319 320 last = cfqd->last_position; 321 322 /* 323 * by definition, 1KiB is 2 sectors 324 */ 325 back_max = cfqd->cfq_back_max * 2; 326 327 /* 328 * Strict one way elevator _except_ in the case where we allow 329 * short backward seeks which are biased as twice the cost of a 330 * similar forward seek. 331 */ 332 if (s1 >= last) 333 d1 = s1 - last; 334 else if (s1 + back_max >= last) 335 d1 = (last - s1) * cfqd->cfq_back_penalty; 336 else 337 wrap |= CFQ_RQ1_WRAP; 338 339 if (s2 >= last) 340 d2 = s2 - last; 341 else if (s2 + back_max >= last) 342 d2 = (last - s2) * cfqd->cfq_back_penalty; 343 else 344 wrap |= CFQ_RQ2_WRAP; 345 346 /* Found required data */ 347 348 /* 349 * By doing switch() on the bit mask "wrap" we avoid having to 350 * check two variables for all permutations: --> faster! 351 */ 352 switch (wrap) { 353 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ 354 if (d1 < d2) 355 return rq1; 356 else if (d2 < d1) 357 return rq2; 358 else { 359 if (s1 >= s2) 360 return rq1; 361 else 362 return rq2; 363 } 364 365 case CFQ_RQ2_WRAP: 366 return rq1; 367 case CFQ_RQ1_WRAP: 368 return rq2; 369 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ 370 default: 371 /* 372 * Since both rqs are wrapped, 373 * start with the one that's further behind head 374 * (--> only *one* back seek required), 375 * since back seek takes more time than forward. 376 */ 377 if (s1 <= s2) 378 return rq1; 379 else 380 return rq2; 381 } 382} 383 384/* 385 * The below is leftmost cache rbtree addon 386 */ 387static struct rb_node *cfq_rb_first(struct cfq_rb_root *root) 388{ 389 if (!root->left) 390 root->left = rb_first(&root->rb); 391 392 return root->left; 393} 394 395static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 396{ 397 if (root->left == n) 398 root->left = NULL; 399 400 rb_erase(n, &root->rb); 401 RB_CLEAR_NODE(n); 402} 403 404/* 405 * would be nice to take fifo expire time into account as well 406 */ 407static struct request * 408cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 409 struct request *last) 410{ 411 struct rb_node *rbnext = rb_next(&last->rb_node); 412 struct rb_node *rbprev = rb_prev(&last->rb_node); 413 struct request *next = NULL, *prev = NULL; 414 415 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 416 417 if (rbprev) 418 prev = rb_entry_rq(rbprev); 419 420 if (rbnext) 421 next = rb_entry_rq(rbnext); 422 else { 423 rbnext = rb_first(&cfqq->sort_list); 424 if (rbnext && rbnext != &last->rb_node) 425 next = rb_entry_rq(rbnext); 426 } 427 428 return cfq_choose_req(cfqd, next, prev); 429} 430 431static unsigned long cfq_slice_offset(struct cfq_data *cfqd, 432 struct cfq_queue *cfqq) 433{ 434 /* 435 * just an approximation, should be ok. 436 */ 437 return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) - 438 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); 439} 440 441/* 442 * The cfqd->service_tree holds all pending cfq_queue's that have 443 * requests waiting to be processed. It is sorted in the order that 444 * we will service the queues. 445 */ 446static void cfq_service_tree_add(struct cfq_data *cfqd, 447 struct cfq_queue *cfqq, int add_front) 448{ 449 struct rb_node **p = &cfqd->service_tree.rb.rb_node; 450 struct rb_node *parent = NULL; 451 unsigned long rb_key; 452 int left; 453 454 if (!add_front) { 455 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 456 rb_key += cfqq->slice_resid; 457 cfqq->slice_resid = 0; 458 } else 459 rb_key = 0; 460 461 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 462 /* 463 * same position, nothing more to do 464 */ 465 if (rb_key == cfqq->rb_key) 466 return; 467 468 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 469 } 470 471 left = 1; 472 while (*p) { 473 struct cfq_queue *__cfqq; 474 struct rb_node **n; 475 476 parent = *p; 477 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 478 479 /* 480 * sort RT queues first, we always want to give 481 * preference to them. IDLE queues goes to the back. 482 * after that, sort on the next service time. 483 */ 484 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) 485 n = &(*p)->rb_left; 486 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) 487 n = &(*p)->rb_right; 488 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq)) 489 n = &(*p)->rb_left; 490 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 491 n = &(*p)->rb_right; 492 else if (rb_key < __cfqq->rb_key) 493 n = &(*p)->rb_left; 494 else 495 n = &(*p)->rb_right; 496 497 if (n == &(*p)->rb_right) 498 left = 0; 499 500 p = n; 501 } 502 503 if (left) 504 cfqd->service_tree.left = &cfqq->rb_node; 505 506 cfqq->rb_key = rb_key; 507 rb_link_node(&cfqq->rb_node, parent, p); 508 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); 509} 510 511/* 512 * Update cfqq's position in the service tree. 513 */ 514static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) 515{ 516 /* 517 * Resorting requires the cfqq to be on the RR list already. 518 */ 519 if (cfq_cfqq_on_rr(cfqq)) 520 cfq_service_tree_add(cfqd, cfqq, 0); 521} 522 523/* 524 * add to busy list of queues for service, trying to be fair in ordering 525 * the pending list according to last request service 526 */ 527static inline void 528cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 529{ 530 BUG_ON(cfq_cfqq_on_rr(cfqq)); 531 cfq_mark_cfqq_on_rr(cfqq); 532 cfqd->busy_queues++; 533 534 cfq_resort_rr_list(cfqd, cfqq); 535} 536 537/* 538 * Called when the cfqq no longer has requests pending, remove it from 539 * the service tree. 540 */ 541static inline void 542cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 543{ 544 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 545 cfq_clear_cfqq_on_rr(cfqq); 546 547 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 548 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 549 550 BUG_ON(!cfqd->busy_queues); 551 cfqd->busy_queues--; 552} 553 554/* 555 * rb tree support functions 556 */ 557static inline void cfq_del_rq_rb(struct request *rq) 558{ 559 struct cfq_queue *cfqq = RQ_CFQQ(rq); 560 struct cfq_data *cfqd = cfqq->cfqd; 561 const int sync = rq_is_sync(rq); 562 563 BUG_ON(!cfqq->queued[sync]); 564 cfqq->queued[sync]--; 565 566 elv_rb_del(&cfqq->sort_list, rq); 567 568 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 569 cfq_del_cfqq_rr(cfqd, cfqq); 570} 571 572static void cfq_add_rq_rb(struct request *rq) 573{ 574 struct cfq_queue *cfqq = RQ_CFQQ(rq); 575 struct cfq_data *cfqd = cfqq->cfqd; 576 struct request *__alias; 577 578 cfqq->queued[rq_is_sync(rq)]++; 579 580 /* 581 * looks a little odd, but the first insert might return an alias. 582 * if that happens, put the alias on the dispatch list 583 */ 584 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) 585 cfq_dispatch_insert(cfqd->queue, __alias); 586 587 if (!cfq_cfqq_on_rr(cfqq)) 588 cfq_add_cfqq_rr(cfqd, cfqq); 589 590 /* 591 * check if this request is a better next-serve candidate 592 */ 593 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); 594 BUG_ON(!cfqq->next_rq); 595} 596 597static inline void 598cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) 599{ 600 elv_rb_del(&cfqq->sort_list, rq); 601 cfqq->queued[rq_is_sync(rq)]--; 602 cfq_add_rq_rb(rq); 603} 604 605static struct request * 606cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 607{ 608 struct task_struct *tsk = current; 609 struct cfq_io_context *cic; 610 struct cfq_queue *cfqq; 611 612 cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); 613 if (!cic) 614 return NULL; 615 616 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 617 if (cfqq) { 618 sector_t sector = bio->bi_sector + bio_sectors(bio); 619 620 return elv_rb_find(&cfqq->sort_list, sector); 621 } 622 623 return NULL; 624} 625 626static void cfq_activate_request(struct request_queue *q, struct request *rq) 627{ 628 struct cfq_data *cfqd = q->elevator->elevator_data; 629 630 cfqd->rq_in_driver++; 631 632 /* 633 * If the depth is larger 1, it really could be queueing. But lets 634 * make the mark a little higher - idling could still be good for 635 * low queueing, and a low queueing number could also just indicate 636 * a SCSI mid layer like behaviour where limit+1 is often seen. 637 */ 638 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) 639 cfqd->hw_tag = 1; 640 641 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 642} 643 644static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 645{ 646 struct cfq_data *cfqd = q->elevator->elevator_data; 647 648 WARN_ON(!cfqd->rq_in_driver); 649 cfqd->rq_in_driver--; 650} 651 652static void cfq_remove_request(struct request *rq) 653{ 654 struct cfq_queue *cfqq = RQ_CFQQ(rq); 655 656 if (cfqq->next_rq == rq) 657 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); 658 659 list_del_init(&rq->queuelist); 660 cfq_del_rq_rb(rq); 661 662 if (rq_is_meta(rq)) { 663 WARN_ON(!cfqq->meta_pending); 664 cfqq->meta_pending--; 665 } 666} 667 668static int cfq_merge(struct request_queue *q, struct request **req, 669 struct bio *bio) 670{ 671 struct cfq_data *cfqd = q->elevator->elevator_data; 672 struct request *__rq; 673 674 __rq = cfq_find_rq_fmerge(cfqd, bio); 675 if (__rq && elv_rq_merge_ok(__rq, bio)) { 676 *req = __rq; 677 return ELEVATOR_FRONT_MERGE; 678 } 679 680 return ELEVATOR_NO_MERGE; 681} 682 683static void cfq_merged_request(struct request_queue *q, struct request *req, 684 int type) 685{ 686 if (type == ELEVATOR_FRONT_MERGE) { 687 struct cfq_queue *cfqq = RQ_CFQQ(req); 688 689 cfq_reposition_rq_rb(cfqq, req); 690 } 691} 692 693static void 694cfq_merged_requests(struct request_queue *q, struct request *rq, 695 struct request *next) 696{ 697 /* 698 * reposition in fifo if next is older than rq 699 */ 700 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 701 time_before(next->start_time, rq->start_time)) 702 list_move(&rq->queuelist, &next->queuelist); 703 704 cfq_remove_request(next); 705} 706 707static int cfq_allow_merge(struct request_queue *q, struct request *rq, 708 struct bio *bio) 709{ 710 struct cfq_data *cfqd = q->elevator->elevator_data; 711 struct cfq_io_context *cic; 712 struct cfq_queue *cfqq; 713 714 /* 715 * Disallow merge of a sync bio into an async request. 716 */ 717 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 718 return 0; 719 720 /* 721 * Lookup the cfqq that this bio will be queued with. Allow 722 * merge only if rq is queued there. 723 */ 724 cic = cfq_cic_rb_lookup(cfqd, current->io_context); 725 if (!cic) 726 return 0; 727 728 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 729 if (cfqq == RQ_CFQQ(rq)) 730 return 1; 731 732 return 0; 733} 734 735static inline void 736__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 737{ 738 if (cfqq) { 739 /* 740 * stop potential idle class queues waiting service 741 */ 742 del_timer(&cfqd->idle_class_timer); 743 744 cfqq->slice_end = 0; 745 cfq_clear_cfqq_must_alloc_slice(cfqq); 746 cfq_clear_cfqq_fifo_expire(cfqq); 747 cfq_mark_cfqq_slice_new(cfqq); 748 cfq_clear_cfqq_queue_new(cfqq); 749 } 750 751 cfqd->active_queue = cfqq; 752} 753 754/* 755 * current cfqq expired its slice (or was too idle), select new one 756 */ 757static void 758__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 759 int timed_out) 760{ 761 if (cfq_cfqq_wait_request(cfqq)) 762 del_timer(&cfqd->idle_slice_timer); 763 764 cfq_clear_cfqq_must_dispatch(cfqq); 765 cfq_clear_cfqq_wait_request(cfqq); 766 767 /* 768 * store what was left of this slice, if the queue idled/timed out 769 */ 770 if (timed_out && !cfq_cfqq_slice_new(cfqq)) 771 cfqq->slice_resid = cfqq->slice_end - jiffies; 772 773 cfq_resort_rr_list(cfqd, cfqq); 774 775 if (cfqq == cfqd->active_queue) 776 cfqd->active_queue = NULL; 777 778 if (cfqd->active_cic) { 779 put_io_context(cfqd->active_cic->ioc); 780 cfqd->active_cic = NULL; 781 } 782} 783 784static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) 785{ 786 struct cfq_queue *cfqq = cfqd->active_queue; 787 788 if (cfqq) 789 __cfq_slice_expired(cfqd, cfqq, timed_out); 790} 791 792static int start_idle_class_timer(struct cfq_data *cfqd) 793{ 794 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; 795 unsigned long now = jiffies; 796 797 if (time_before(now, end) && 798 time_after_eq(now, cfqd->last_end_request)) { 799 mod_timer(&cfqd->idle_class_timer, end); 800 return 1; 801 } 802 803 return 0; 804} 805 806/* 807 * Get next queue for service. Unless we have a queue preemption, 808 * we'll simply select the first cfqq in the service tree. 809 */ 810static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 811{ 812 struct cfq_queue *cfqq; 813 struct rb_node *n; 814 815 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) 816 return NULL; 817 818 n = cfq_rb_first(&cfqd->service_tree); 819 cfqq = rb_entry(n, struct cfq_queue, rb_node); 820 821 if (cfq_class_idle(cfqq)) { 822 /* 823 * if we have idle queues and no rt or be queues had 824 * pending requests, either allow immediate service if 825 * the grace period has passed or arm the idle grace 826 * timer 827 */ 828 if (start_idle_class_timer(cfqd)) 829 cfqq = NULL; 830 } 831 832 return cfqq; 833} 834 835/* 836 * Get and set a new active queue for service. 837 */ 838static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) 839{ 840 struct cfq_queue *cfqq; 841 842 cfqq = cfq_get_next_queue(cfqd); 843 __cfq_set_active_queue(cfqd, cfqq); 844 return cfqq; 845} 846 847static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 848 struct request *rq) 849{ 850 if (rq->sector >= cfqd->last_position) 851 return rq->sector - cfqd->last_position; 852 else 853 return cfqd->last_position - rq->sector; 854} 855 856static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) 857{ 858 struct cfq_io_context *cic = cfqd->active_cic; 859 860 if (!sample_valid(cic->seek_samples)) 861 return 0; 862 863 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; 864} 865 866static int cfq_close_cooperator(struct cfq_data *cfq_data, 867 struct cfq_queue *cfqq) 868{ 869 /* 870 * We should notice if some of the queues are cooperating, eg 871 * working closely on the same area of the disk. In that case, 872 * we can group them together and don't waste time idling. 873 */ 874 return 0; 875} 876 877#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) 878 879static void cfq_arm_slice_timer(struct cfq_data *cfqd) 880{ 881 struct cfq_queue *cfqq = cfqd->active_queue; 882 struct cfq_io_context *cic; 883 unsigned long sl; 884 885 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); 886 WARN_ON(cfq_cfqq_slice_new(cfqq)); 887 888 /* 889 * idle is disabled, either manually or by past process history 890 */ 891 if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) 892 return; 893 894 /* 895 * task has exited, don't wait 896 */ 897 cic = cfqd->active_cic; 898 if (!cic || !cic->ioc->task) 899 return; 900 901 /* 902 * See if this prio level has a good candidate 903 */ 904 if (cfq_close_cooperator(cfqd, cfqq) && 905 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) 906 return; 907 908 cfq_mark_cfqq_must_dispatch(cfqq); 909 cfq_mark_cfqq_wait_request(cfqq); 910 911 /* 912 * we don't want to idle for seeks, but we do want to allow 913 * fair distribution of slice time for a process doing back-to-back 914 * seeks. so allow a little bit of time for him to submit a new rq 915 */ 916 sl = cfqd->cfq_slice_idle; 917 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 918 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); 919 920 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 921} 922 923/* 924 * Move request from internal lists to the request queue dispatch list. 925 */ 926static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) 927{ 928 struct cfq_data *cfqd = q->elevator->elevator_data; 929 struct cfq_queue *cfqq = RQ_CFQQ(rq); 930 931 cfq_remove_request(rq); 932 cfqq->dispatched++; 933 elv_dispatch_sort(q, rq); 934 935 if (cfq_cfqq_sync(cfqq)) 936 cfqd->sync_flight++; 937} 938 939/* 940 * return expired entry, or NULL to just start from scratch in rbtree 941 */ 942static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq) 943{ 944 struct cfq_data *cfqd = cfqq->cfqd; 945 struct request *rq; 946 int fifo; 947 948 if (cfq_cfqq_fifo_expire(cfqq)) 949 return NULL; 950 951 cfq_mark_cfqq_fifo_expire(cfqq); 952 953 if (list_empty(&cfqq->fifo)) 954 return NULL; 955 956 fifo = cfq_cfqq_sync(cfqq); 957 rq = rq_entry_fifo(cfqq->fifo.next); 958 959 if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) 960 return NULL; 961 962 return rq; 963} 964 965static inline int 966cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 967{ 968 const int base_rq = cfqd->cfq_slice_async_rq; 969 970 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 971 972 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 973} 974 975/* 976 * Select a queue for service. If we have a current active queue, 977 * check whether to continue servicing it, or retrieve and set a new one. 978 */ 979static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 980{ 981 struct cfq_queue *cfqq; 982 983 cfqq = cfqd->active_queue; 984 if (!cfqq) 985 goto new_queue; 986 987 /* 988 * The active queue has run out of time, expire it and select new. 989 */ 990 if (cfq_slice_used(cfqq)) 991 goto expire; 992 993 /* 994 * The active queue has requests and isn't expired, allow it to 995 * dispatch. 996 */ 997 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 998 goto keep_queue; 999 1000 /* 1001 * No requests pending. If the active queue still has requests in 1002 * flight or is idling for a new request, allow either of these 1003 * conditions to happen (or time out) before selecting a new queue. 1004 */ 1005 if (timer_pending(&cfqd->idle_slice_timer) || 1006 (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { 1007 cfqq = NULL; 1008 goto keep_queue; 1009 } 1010 1011expire: 1012 cfq_slice_expired(cfqd, 0); 1013new_queue: 1014 cfqq = cfq_set_active_queue(cfqd); 1015keep_queue: 1016 return cfqq; 1017} 1018 1019/* 1020 * Dispatch some requests from cfqq, moving them to the request queue 1021 * dispatch list. 1022 */ 1023static int 1024__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1025 int max_dispatch) 1026{ 1027 int dispatched = 0; 1028 1029 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 1030 1031 do { 1032 struct request *rq; 1033 1034 /* 1035 * follow expired path, else get first next available 1036 */ 1037 if ((rq = cfq_check_fifo(cfqq)) == NULL) 1038 rq = cfqq->next_rq; 1039 1040 /* 1041 * finally, insert request into driver dispatch list 1042 */ 1043 cfq_dispatch_insert(cfqd->queue, rq); 1044 1045 dispatched++; 1046 1047 if (!cfqd->active_cic) { 1048 atomic_inc(&RQ_CIC(rq)->ioc->refcount); 1049 cfqd->active_cic = RQ_CIC(rq); 1050 } 1051 1052 if (RB_EMPTY_ROOT(&cfqq->sort_list)) 1053 break; 1054 1055 } while (dispatched < max_dispatch); 1056 1057 /* 1058 * expire an async queue immediately if it has used up its slice. idle 1059 * queue always expire after 1 dispatch round. 1060 */ 1061 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && 1062 dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) || 1063 cfq_class_idle(cfqq))) { 1064 cfqq->slice_end = jiffies + 1; 1065 cfq_slice_expired(cfqd, 0); 1066 } 1067 1068 return dispatched; 1069} 1070 1071static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) 1072{ 1073 int dispatched = 0; 1074 1075 while (cfqq->next_rq) { 1076 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); 1077 dispatched++; 1078 } 1079 1080 BUG_ON(!list_empty(&cfqq->fifo)); 1081 return dispatched; 1082} 1083 1084/* 1085 * Drain our current requests. Used for barriers and when switching 1086 * io schedulers on-the-fly. 1087 */ 1088static int cfq_forced_dispatch(struct cfq_data *cfqd) 1089{ 1090 int dispatched = 0; 1091 struct rb_node *n; 1092 1093 while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) { 1094 struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node); 1095 1096 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1097 } 1098 1099 cfq_slice_expired(cfqd, 0); 1100 1101 BUG_ON(cfqd->busy_queues); 1102 1103 return dispatched; 1104} 1105 1106static int cfq_dispatch_requests(struct request_queue *q, int force) 1107{ 1108 struct cfq_data *cfqd = q->elevator->elevator_data; 1109 struct cfq_queue *cfqq; 1110 int dispatched; 1111 1112 if (!cfqd->busy_queues) 1113 return 0; 1114 1115 if (unlikely(force)) 1116 return cfq_forced_dispatch(cfqd); 1117 1118 dispatched = 0; 1119 while ((cfqq = cfq_select_queue(cfqd)) != NULL) { 1120 int max_dispatch; 1121 1122 max_dispatch = cfqd->cfq_quantum; 1123 if (cfq_class_idle(cfqq)) 1124 max_dispatch = 1; 1125 1126 if (cfqq->dispatched >= max_dispatch) { 1127 if (cfqd->busy_queues > 1) 1128 break; 1129 if (cfqq->dispatched >= 4 * max_dispatch) 1130 break; 1131 } 1132 1133 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1134 break; 1135 1136 cfq_clear_cfqq_must_dispatch(cfqq); 1137 cfq_clear_cfqq_wait_request(cfqq); 1138 del_timer(&cfqd->idle_slice_timer); 1139 1140 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1141 } 1142 1143 return dispatched; 1144} 1145 1146/* 1147 * task holds one reference to the queue, dropped when task exits. each rq 1148 * in-flight on this queue also holds a reference, dropped when rq is freed. 1149 * 1150 * queue lock must be held here. 1151 */ 1152static void cfq_put_queue(struct cfq_queue *cfqq) 1153{ 1154 struct cfq_data *cfqd = cfqq->cfqd; 1155 1156 BUG_ON(atomic_read(&cfqq->ref) <= 0); 1157 1158 if (!atomic_dec_and_test(&cfqq->ref)) 1159 return; 1160 1161 BUG_ON(rb_first(&cfqq->sort_list)); 1162 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); 1163 BUG_ON(cfq_cfqq_on_rr(cfqq)); 1164 1165 if (unlikely(cfqd->active_queue == cfqq)) { 1166 __cfq_slice_expired(cfqd, cfqq, 0); 1167 cfq_schedule_dispatch(cfqd); 1168 } 1169 1170 kmem_cache_free(cfq_pool, cfqq); 1171} 1172 1173static void cfq_free_io_context(struct io_context *ioc) 1174{ 1175 struct cfq_io_context *__cic; 1176 struct rb_node *n; 1177 int freed = 0; 1178 1179 ioc->ioc_data = NULL; 1180 1181 while ((n = rb_first(&ioc->cic_root)) != NULL) { 1182 __cic = rb_entry(n, struct cfq_io_context, rb_node); 1183 rb_erase(&__cic->rb_node, &ioc->cic_root); 1184 kmem_cache_free(cfq_ioc_pool, __cic); 1185 freed++; 1186 } 1187 1188 elv_ioc_count_mod(ioc_count, -freed); 1189 1190 if (ioc_gone && !elv_ioc_count_read(ioc_count)) 1191 complete(ioc_gone); 1192} 1193 1194static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1195{ 1196 if (unlikely(cfqq == cfqd->active_queue)) { 1197 __cfq_slice_expired(cfqd, cfqq, 0); 1198 cfq_schedule_dispatch(cfqd); 1199 } 1200 1201 cfq_put_queue(cfqq); 1202} 1203 1204static void __cfq_exit_single_io_context(struct cfq_data *cfqd, 1205 struct cfq_io_context *cic) 1206{ 1207 list_del_init(&cic->queue_list); 1208 smp_wmb(); 1209 cic->key = NULL; 1210 1211 if (cic->cfqq[ASYNC]) { 1212 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); 1213 cic->cfqq[ASYNC] = NULL; 1214 } 1215 1216 if (cic->cfqq[SYNC]) { 1217 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); 1218 cic->cfqq[SYNC] = NULL; 1219 } 1220} 1221 1222static void cfq_exit_single_io_context(struct cfq_io_context *cic) 1223{ 1224 struct cfq_data *cfqd = cic->key; 1225 1226 if (cfqd) { 1227 struct request_queue *q = cfqd->queue; 1228 1229 spin_lock_irq(q->queue_lock); 1230 __cfq_exit_single_io_context(cfqd, cic); 1231 spin_unlock_irq(q->queue_lock); 1232 } 1233} 1234 1235/* 1236 * The process that ioc belongs to has exited, we need to clean up 1237 * and put the internal structures we have that belongs to that process. 1238 */ 1239static void cfq_exit_io_context(struct io_context *ioc) 1240{ 1241 struct cfq_io_context *__cic; 1242 struct rb_node *n; 1243 1244 ioc->ioc_data = NULL; 1245 1246 /* 1247 * put the reference this task is holding to the various queues 1248 */ 1249 n = rb_first(&ioc->cic_root); 1250 while (n != NULL) { 1251 __cic = rb_entry(n, struct cfq_io_context, rb_node); 1252 1253 cfq_exit_single_io_context(__cic); 1254 n = rb_next(n); 1255 } 1256} 1257 1258static struct cfq_io_context * 1259cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1260{ 1261 struct cfq_io_context *cic; 1262 1263 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, 1264 cfqd->queue->node); 1265 if (cic) { 1266 cic->last_end_request = jiffies; 1267 INIT_LIST_HEAD(&cic->queue_list); 1268 cic->dtor = cfq_free_io_context; 1269 cic->exit = cfq_exit_io_context; 1270 elv_ioc_count_inc(ioc_count); 1271 } 1272 1273 return cic; 1274} 1275 1276static void cfq_init_prio_data(struct cfq_queue *cfqq) 1277{ 1278 struct task_struct *tsk = current; 1279 int ioprio_class; 1280 1281 if (!cfq_cfqq_prio_changed(cfqq)) 1282 return; 1283 1284 ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); 1285 switch (ioprio_class) { 1286 default: 1287 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1288 case IOPRIO_CLASS_NONE: 1289 /* 1290 * no prio set, place us in the middle of the BE classes 1291 */ 1292 cfqq->ioprio = task_nice_ioprio(tsk); 1293 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1294 break; 1295 case IOPRIO_CLASS_RT: 1296 cfqq->ioprio = task_ioprio(tsk); 1297 cfqq->ioprio_class = IOPRIO_CLASS_RT; 1298 break; 1299 case IOPRIO_CLASS_BE: 1300 cfqq->ioprio = task_ioprio(tsk); 1301 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1302 break; 1303 case IOPRIO_CLASS_IDLE: 1304 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1305 cfqq->ioprio = 7; 1306 cfq_clear_cfqq_idle_window(cfqq); 1307 break; 1308 } 1309 1310 /* 1311 * keep track of original prio settings in case we have to temporarily 1312 * elevate the priority of this queue 1313 */ 1314 cfqq->org_ioprio = cfqq->ioprio; 1315 cfqq->org_ioprio_class = cfqq->ioprio_class; 1316 cfq_clear_cfqq_prio_changed(cfqq); 1317} 1318 1319static inline void changed_ioprio(struct cfq_io_context *cic) 1320{ 1321 struct cfq_data *cfqd = cic->key; 1322 struct cfq_queue *cfqq; 1323 unsigned long flags; 1324 1325 if (unlikely(!cfqd)) 1326 return; 1327 1328 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1329 1330 cfqq = cic->cfqq[ASYNC]; 1331 if (cfqq) { 1332 struct cfq_queue *new_cfqq; 1333 new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task, 1334 GFP_ATOMIC); 1335 if (new_cfqq) { 1336 cic->cfqq[ASYNC] = new_cfqq; 1337 cfq_put_queue(cfqq); 1338 } 1339 } 1340 1341 cfqq = cic->cfqq[SYNC]; 1342 if (cfqq) 1343 cfq_mark_cfqq_prio_changed(cfqq); 1344 1345 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1346} 1347 1348static void cfq_ioc_set_ioprio(struct io_context *ioc) 1349{ 1350 struct cfq_io_context *cic; 1351 struct rb_node *n; 1352 1353 ioc->ioprio_changed = 0; 1354 1355 n = rb_first(&ioc->cic_root); 1356 while (n != NULL) { 1357 cic = rb_entry(n, struct cfq_io_context, rb_node); 1358 1359 changed_ioprio(cic); 1360 n = rb_next(n); 1361 } 1362} 1363 1364static struct cfq_queue * 1365cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, 1366 struct task_struct *tsk, gfp_t gfp_mask) 1367{ 1368 struct cfq_queue *cfqq, *new_cfqq = NULL; 1369 struct cfq_io_context *cic; 1370 1371retry: 1372 cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); 1373 /* cic always exists here */ 1374 cfqq = cic_to_cfqq(cic, is_sync); 1375 1376 if (!cfqq) { 1377 if (new_cfqq) { 1378 cfqq = new_cfqq; 1379 new_cfqq = NULL; 1380 } else if (gfp_mask & __GFP_WAIT) { 1381 /* 1382 * Inform the allocator of the fact that we will 1383 * just repeat this allocation if it fails, to allow 1384 * the allocator to do whatever it needs to attempt to 1385 * free memory. 1386 */ 1387 spin_unlock_irq(cfqd->queue->queue_lock); 1388 new_cfqq = kmem_cache_alloc_node(cfq_pool, 1389 gfp_mask | __GFP_NOFAIL | __GFP_ZERO, 1390 cfqd->queue->node); 1391 spin_lock_irq(cfqd->queue->queue_lock); 1392 goto retry; 1393 } else { 1394 cfqq = kmem_cache_alloc_node(cfq_pool, 1395 gfp_mask | __GFP_ZERO, 1396 cfqd->queue->node); 1397 if (!cfqq) 1398 goto out; 1399 } 1400 1401 RB_CLEAR_NODE(&cfqq->rb_node); 1402 INIT_LIST_HEAD(&cfqq->fifo); 1403 1404 atomic_set(&cfqq->ref, 0); 1405 cfqq->cfqd = cfqd; 1406 1407 if (is_sync) { 1408 cfq_mark_cfqq_idle_window(cfqq); 1409 cfq_mark_cfqq_sync(cfqq); 1410 } 1411 1412 cfq_mark_cfqq_prio_changed(cfqq); 1413 cfq_mark_cfqq_queue_new(cfqq); 1414 1415 cfq_init_prio_data(cfqq); 1416 } 1417 1418 if (new_cfqq) 1419 kmem_cache_free(cfq_pool, new_cfqq); 1420 1421out: 1422 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); 1423 return cfqq; 1424} 1425 1426static struct cfq_queue ** 1427cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 1428{ 1429 switch(ioprio_class) { 1430 case IOPRIO_CLASS_RT: 1431 return &cfqd->async_cfqq[0][ioprio]; 1432 case IOPRIO_CLASS_BE: 1433 return &cfqd->async_cfqq[1][ioprio]; 1434 case IOPRIO_CLASS_IDLE: 1435 return &cfqd->async_idle_cfqq; 1436 default: 1437 BUG(); 1438 } 1439} 1440 1441static struct cfq_queue * 1442cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, 1443 gfp_t gfp_mask) 1444{ 1445 const int ioprio = task_ioprio(tsk); 1446 const int ioprio_class = task_ioprio_class(tsk); 1447 struct cfq_queue **async_cfqq = NULL; 1448 struct cfq_queue *cfqq = NULL; 1449 1450 if (!is_sync) { 1451 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); 1452 cfqq = *async_cfqq; 1453 } 1454 1455 if (!cfqq) { 1456 cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); 1457 if (!cfqq) 1458 return NULL; 1459 } 1460 1461 /* 1462 * pin the queue now that it's allocated, scheduler exit will prune it 1463 */ 1464 if (!is_sync && !(*async_cfqq)) { 1465 atomic_inc(&cfqq->ref); 1466 *async_cfqq = cfqq; 1467 } 1468 1469 atomic_inc(&cfqq->ref); 1470 return cfqq; 1471} 1472 1473/* 1474 * We drop cfq io contexts lazily, so we may find a dead one. 1475 */ 1476static void 1477cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) 1478{ 1479 WARN_ON(!list_empty(&cic->queue_list)); 1480 1481 if (ioc->ioc_data == cic) 1482 ioc->ioc_data = NULL; 1483 1484 rb_erase(&cic->rb_node, &ioc->cic_root); 1485 kmem_cache_free(cfq_ioc_pool, cic); 1486 elv_ioc_count_dec(ioc_count); 1487} 1488 1489static struct cfq_io_context * 1490cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) 1491{ 1492 struct rb_node *n; 1493 struct cfq_io_context *cic; 1494 void *k, *key = cfqd; 1495 1496 if (unlikely(!ioc)) 1497 return NULL; 1498 1499 /* 1500 * we maintain a last-hit cache, to avoid browsing over the tree 1501 */ 1502 cic = ioc->ioc_data; 1503 if (cic && cic->key == cfqd) 1504 return cic; 1505 1506restart: 1507 n = ioc->cic_root.rb_node; 1508 while (n) { 1509 cic = rb_entry(n, struct cfq_io_context, rb_node); 1510 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1511 k = cic->key; 1512 if (unlikely(!k)) { 1513 cfq_drop_dead_cic(ioc, cic); 1514 goto restart; 1515 } 1516 1517 if (key < k) 1518 n = n->rb_left; 1519 else if (key > k) 1520 n = n->rb_right; 1521 else { 1522 ioc->ioc_data = cic; 1523 return cic; 1524 } 1525 } 1526 1527 return NULL; 1528} 1529 1530static inline void 1531cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, 1532 struct cfq_io_context *cic) 1533{ 1534 struct rb_node **p; 1535 struct rb_node *parent; 1536 struct cfq_io_context *__cic; 1537 unsigned long flags; 1538 void *k; 1539 1540 cic->ioc = ioc; 1541 cic->key = cfqd; 1542 1543restart: 1544 parent = NULL; 1545 p = &ioc->cic_root.rb_node; 1546 while (*p) { 1547 parent = *p; 1548 __cic = rb_entry(parent, struct cfq_io_context, rb_node); 1549 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1550 k = __cic->key; 1551 if (unlikely(!k)) { 1552 cfq_drop_dead_cic(ioc, __cic); 1553 goto restart; 1554 } 1555 1556 if (cic->key < k) 1557 p = &(*p)->rb_left; 1558 else if (cic->key > k) 1559 p = &(*p)->rb_right; 1560 else 1561 BUG(); 1562 } 1563 1564 rb_link_node(&cic->rb_node, parent, p); 1565 rb_insert_color(&cic->rb_node, &ioc->cic_root); 1566 1567 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1568 list_add(&cic->queue_list, &cfqd->cic_list); 1569 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1570} 1571 1572/* 1573 * Setup general io context and cfq io context. There can be several cfq 1574 * io contexts per general io context, if this process is doing io to more 1575 * than one device managed by cfq. 1576 */ 1577static struct cfq_io_context * 1578cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1579{ 1580 struct io_context *ioc = NULL; 1581 struct cfq_io_context *cic; 1582 1583 might_sleep_if(gfp_mask & __GFP_WAIT); 1584 1585 ioc = get_io_context(gfp_mask, cfqd->queue->node); 1586 if (!ioc) 1587 return NULL; 1588 1589 cic = cfq_cic_rb_lookup(cfqd, ioc); 1590 if (cic) 1591 goto out; 1592 1593 cic = cfq_alloc_io_context(cfqd, gfp_mask); 1594 if (cic == NULL) 1595 goto err; 1596 1597 cfq_cic_link(cfqd, ioc, cic); 1598out: 1599 smp_read_barrier_depends(); 1600 if (unlikely(ioc->ioprio_changed)) 1601 cfq_ioc_set_ioprio(ioc); 1602 1603 return cic; 1604err: 1605 put_io_context(ioc); 1606 return NULL; 1607} 1608 1609static void 1610cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) 1611{ 1612 unsigned long elapsed = jiffies - cic->last_end_request; 1613 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); 1614 1615 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; 1616 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; 1617 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; 1618} 1619 1620static void 1621cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, 1622 struct request *rq) 1623{ 1624 sector_t sdist; 1625 u64 total; 1626 1627 if (cic->last_request_pos < rq->sector) 1628 sdist = rq->sector - cic->last_request_pos; 1629 else 1630 sdist = cic->last_request_pos - rq->sector; 1631 1632 /* 1633 * Don't allow the seek distance to get too large from the 1634 * odd fragment, pagein, etc 1635 */ 1636 if (cic->seek_samples <= 60) /* second&third seek */ 1637 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); 1638 else 1639 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); 1640 1641 cic->seek_samples = (7*cic->seek_samples + 256) / 8; 1642 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; 1643 total = cic->seek_total + (cic->seek_samples/2); 1644 do_div(total, cic->seek_samples); 1645 cic->seek_mean = (sector_t)total; 1646} 1647 1648/* 1649 * Disable idle window if the process thinks too long or seeks so much that 1650 * it doesn't matter 1651 */ 1652static void 1653cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1654 struct cfq_io_context *cic) 1655{ 1656 int enable_idle; 1657 1658 if (!cfq_cfqq_sync(cfqq)) 1659 return; 1660 1661 enable_idle = cfq_cfqq_idle_window(cfqq); 1662 1663 if (!cic->ioc->task || !cfqd->cfq_slice_idle || 1664 (cfqd->hw_tag && CIC_SEEKY(cic))) 1665 enable_idle = 0; 1666 else if (sample_valid(cic->ttime_samples)) { 1667 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1668 enable_idle = 0; 1669 else 1670 enable_idle = 1; 1671 } 1672 1673 if (enable_idle) 1674 cfq_mark_cfqq_idle_window(cfqq); 1675 else 1676 cfq_clear_cfqq_idle_window(cfqq); 1677} 1678 1679/* 1680 * Check if new_cfqq should preempt the currently active queue. Return 0 for 1681 * no or if we aren't sure, a 1 will cause a preempt. 1682 */ 1683static int 1684cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 1685 struct request *rq) 1686{ 1687 struct cfq_queue *cfqq; 1688 1689 cfqq = cfqd->active_queue; 1690 if (!cfqq) 1691 return 0; 1692 1693 if (cfq_slice_used(cfqq)) 1694 return 1; 1695 1696 if (cfq_class_idle(new_cfqq)) 1697 return 0; 1698 1699 if (cfq_class_idle(cfqq)) 1700 return 1; 1701 1702 /* 1703 * if the new request is sync, but the currently running queue is 1704 * not, let the sync request have priority. 1705 */ 1706 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 1707 return 1; 1708 1709 /* 1710 * So both queues are sync. Let the new request get disk time if 1711 * it's a metadata request and the current queue is doing regular IO. 1712 */ 1713 if (rq_is_meta(rq) && !cfqq->meta_pending) 1714 return 1; 1715 1716 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 1717 return 0; 1718 1719 /* 1720 * if this request is as-good as one we would expect from the 1721 * current cfqq, let it preempt 1722 */ 1723 if (cfq_rq_close(cfqd, rq)) 1724 return 1; 1725 1726 return 0; 1727} 1728 1729/* 1730 * cfqq preempts the active queue. if we allowed preempt with no slice left, 1731 * let it have half of its nominal slice. 1732 */ 1733static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1734{ 1735 cfq_slice_expired(cfqd, 1); 1736 1737 /* 1738 * Put the new queue at the front of the of the current list, 1739 * so we know that it will be selected next. 1740 */ 1741 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 1742 1743 cfq_service_tree_add(cfqd, cfqq, 1); 1744 1745 cfqq->slice_end = 0; 1746 cfq_mark_cfqq_slice_new(cfqq); 1747} 1748 1749/* 1750 * Called when a new fs request (rq) is added (to cfqq). Check if there's 1751 * something we should do about it 1752 */ 1753static void 1754cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1755 struct request *rq) 1756{ 1757 struct cfq_io_context *cic = RQ_CIC(rq); 1758 1759 if (rq_is_meta(rq)) 1760 cfqq->meta_pending++; 1761 1762 cfq_update_io_thinktime(cfqd, cic); 1763 cfq_update_io_seektime(cfqd, cic, rq); 1764 cfq_update_idle_window(cfqd, cfqq, cic); 1765 1766 cic->last_request_pos = rq->sector + rq->nr_sectors; 1767 1768 if (cfqq == cfqd->active_queue) { 1769 /* 1770 * if we are waiting for a request for this queue, let it rip 1771 * immediately and flag that we must not expire this queue 1772 * just now 1773 */ 1774 if (cfq_cfqq_wait_request(cfqq)) { 1775 cfq_mark_cfqq_must_dispatch(cfqq); 1776 del_timer(&cfqd->idle_slice_timer); 1777 blk_start_queueing(cfqd->queue); 1778 } 1779 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1780 /* 1781 * not the active queue - expire current slice if it is 1782 * idle and has expired it's mean thinktime or this new queue 1783 * has some old slice time left and is of higher priority 1784 */ 1785 cfq_preempt_queue(cfqd, cfqq); 1786 cfq_mark_cfqq_must_dispatch(cfqq); 1787 blk_start_queueing(cfqd->queue); 1788 } 1789} 1790 1791static void cfq_insert_request(struct request_queue *q, struct request *rq) 1792{ 1793 struct cfq_data *cfqd = q->elevator->elevator_data; 1794 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1795 1796 cfq_init_prio_data(cfqq); 1797 1798 cfq_add_rq_rb(rq); 1799 1800 list_add_tail(&rq->queuelist, &cfqq->fifo); 1801 1802 cfq_rq_enqueued(cfqd, cfqq, rq); 1803} 1804 1805static void cfq_completed_request(struct request_queue *q, struct request *rq) 1806{ 1807 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1808 struct cfq_data *cfqd = cfqq->cfqd; 1809 const int sync = rq_is_sync(rq); 1810 unsigned long now; 1811 1812 now = jiffies; 1813 1814 WARN_ON(!cfqd->rq_in_driver); 1815 WARN_ON(!cfqq->dispatched); 1816 cfqd->rq_in_driver--; 1817 cfqq->dispatched--; 1818 1819 if (cfq_cfqq_sync(cfqq)) 1820 cfqd->sync_flight--; 1821 1822 if (!cfq_class_idle(cfqq)) 1823 cfqd->last_end_request = now; 1824 1825 if (sync) 1826 RQ_CIC(rq)->last_end_request = now; 1827 1828 /* 1829 * If this is the active queue, check if it needs to be expired, 1830 * or if we want to idle in case it has no pending requests. 1831 */ 1832 if (cfqd->active_queue == cfqq) { 1833 if (cfq_cfqq_slice_new(cfqq)) { 1834 cfq_set_prio_slice(cfqd, cfqq); 1835 cfq_clear_cfqq_slice_new(cfqq); 1836 } 1837 if (cfq_slice_used(cfqq)) 1838 cfq_slice_expired(cfqd, 1); 1839 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) 1840 cfq_arm_slice_timer(cfqd); 1841 } 1842 1843 if (!cfqd->rq_in_driver) 1844 cfq_schedule_dispatch(cfqd); 1845} 1846 1847/* 1848 * we temporarily boost lower priority queues if they are holding fs exclusive 1849 * resources. they are boosted to normal prio (CLASS_BE/4) 1850 */ 1851static void cfq_prio_boost(struct cfq_queue *cfqq) 1852{ 1853 if (has_fs_excl()) { 1854 /* 1855 * boost idle prio on transactions that would lock out other 1856 * users of the filesystem 1857 */ 1858 if (cfq_class_idle(cfqq)) 1859 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1860 if (cfqq->ioprio > IOPRIO_NORM) 1861 cfqq->ioprio = IOPRIO_NORM; 1862 } else { 1863 /* 1864 * check if we need to unboost the queue 1865 */ 1866 if (cfqq->ioprio_class != cfqq->org_ioprio_class) 1867 cfqq->ioprio_class = cfqq->org_ioprio_class; 1868 if (cfqq->ioprio != cfqq->org_ioprio) 1869 cfqq->ioprio = cfqq->org_ioprio; 1870 } 1871} 1872 1873static inline int __cfq_may_queue(struct cfq_queue *cfqq) 1874{ 1875 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1876 !cfq_cfqq_must_alloc_slice(cfqq)) { 1877 cfq_mark_cfqq_must_alloc_slice(cfqq); 1878 return ELV_MQUEUE_MUST; 1879 } 1880 1881 return ELV_MQUEUE_MAY; 1882} 1883 1884static int cfq_may_queue(struct request_queue *q, int rw) 1885{ 1886 struct cfq_data *cfqd = q->elevator->elevator_data; 1887 struct task_struct *tsk = current; 1888 struct cfq_io_context *cic; 1889 struct cfq_queue *cfqq; 1890 1891 /* 1892 * don't force setup of a queue from here, as a call to may_queue 1893 * does not necessarily imply that a request actually will be queued. 1894 * so just lookup a possibly existing queue, or return 'may queue' 1895 * if that fails 1896 */ 1897 cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); 1898 if (!cic) 1899 return ELV_MQUEUE_MAY; 1900 1901 cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); 1902 if (cfqq) { 1903 cfq_init_prio_data(cfqq); 1904 cfq_prio_boost(cfqq); 1905 1906 return __cfq_may_queue(cfqq); 1907 } 1908 1909 return ELV_MQUEUE_MAY; 1910} 1911 1912/* 1913 * queue lock held here 1914 */ 1915static void cfq_put_request(struct request *rq) 1916{ 1917 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1918 1919 if (cfqq) { 1920 const int rw = rq_data_dir(rq); 1921 1922 BUG_ON(!cfqq->allocated[rw]); 1923 cfqq->allocated[rw]--; 1924 1925 put_io_context(RQ_CIC(rq)->ioc); 1926 1927 rq->elevator_private = NULL; 1928 rq->elevator_private2 = NULL; 1929 1930 cfq_put_queue(cfqq); 1931 } 1932} 1933 1934/* 1935 * Allocate cfq data structures associated with this request. 1936 */ 1937static int 1938cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) 1939{ 1940 struct cfq_data *cfqd = q->elevator->elevator_data; 1941 struct task_struct *tsk = current; 1942 struct cfq_io_context *cic; 1943 const int rw = rq_data_dir(rq); 1944 const int is_sync = rq_is_sync(rq); 1945 struct cfq_queue *cfqq; 1946 unsigned long flags; 1947 1948 might_sleep_if(gfp_mask & __GFP_WAIT); 1949 1950 cic = cfq_get_io_context(cfqd, gfp_mask); 1951 1952 spin_lock_irqsave(q->queue_lock, flags); 1953 1954 if (!cic) 1955 goto queue_fail; 1956 1957 cfqq = cic_to_cfqq(cic, is_sync); 1958 if (!cfqq) { 1959 cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask); 1960 1961 if (!cfqq) 1962 goto queue_fail; 1963 1964 cic_set_cfqq(cic, cfqq, is_sync); 1965 } 1966 1967 cfqq->allocated[rw]++; 1968 cfq_clear_cfqq_must_alloc(cfqq); 1969 atomic_inc(&cfqq->ref); 1970 1971 spin_unlock_irqrestore(q->queue_lock, flags); 1972 1973 rq->elevator_private = cic; 1974 rq->elevator_private2 = cfqq; 1975 return 0; 1976 1977queue_fail: 1978 if (cic) 1979 put_io_context(cic->ioc); 1980 1981 cfq_schedule_dispatch(cfqd); 1982 spin_unlock_irqrestore(q->queue_lock, flags); 1983 return 1; 1984} 1985 1986static void cfq_kick_queue(struct work_struct *work) 1987{ 1988 struct cfq_data *cfqd = 1989 container_of(work, struct cfq_data, unplug_work); 1990 struct request_queue *q = cfqd->queue; 1991 unsigned long flags; 1992 1993 spin_lock_irqsave(q->queue_lock, flags); 1994 blk_start_queueing(q); 1995 spin_unlock_irqrestore(q->queue_lock, flags); 1996} 1997 1998/* 1999 * Timer running if the active_queue is currently idling inside its time slice 2000 */ 2001static void cfq_idle_slice_timer(unsigned long data) 2002{ 2003 struct cfq_data *cfqd = (struct cfq_data *) data; 2004 struct cfq_queue *cfqq; 2005 unsigned long flags; 2006 int timed_out = 1; 2007 2008 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2009 2010 if ((cfqq = cfqd->active_queue) != NULL) { 2011 timed_out = 0; 2012 2013 /* 2014 * expired 2015 */ 2016 if (cfq_slice_used(cfqq)) 2017 goto expire; 2018 2019 /* 2020 * only expire and reinvoke request handler, if there are 2021 * other queues with pending requests 2022 */ 2023 if (!cfqd->busy_queues) 2024 goto out_cont; 2025 2026 /* 2027 * not expired and it has a request pending, let it dispatch 2028 */ 2029 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { 2030 cfq_mark_cfqq_must_dispatch(cfqq); 2031 goto out_kick; 2032 } 2033 } 2034expire: 2035 cfq_slice_expired(cfqd, timed_out); 2036out_kick: 2037 cfq_schedule_dispatch(cfqd); 2038out_cont: 2039 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2040} 2041 2042/* 2043 * Timer running if an idle class queue is waiting for service 2044 */ 2045static void cfq_idle_class_timer(unsigned long data) 2046{ 2047 struct cfq_data *cfqd = (struct cfq_data *) data; 2048 unsigned long flags; 2049 2050 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2051 2052 /* 2053 * race with a non-idle queue, reset timer 2054 */ 2055 if (!start_idle_class_timer(cfqd)) 2056 cfq_schedule_dispatch(cfqd); 2057 2058 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2059} 2060 2061static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2062{ 2063 del_timer_sync(&cfqd->idle_slice_timer); 2064 del_timer_sync(&cfqd->idle_class_timer); 2065 kblockd_flush_work(&cfqd->unplug_work); 2066} 2067 2068static void cfq_put_async_queues(struct cfq_data *cfqd) 2069{ 2070 int i; 2071 2072 for (i = 0; i < IOPRIO_BE_NR; i++) { 2073 if (cfqd->async_cfqq[0][i]) 2074 cfq_put_queue(cfqd->async_cfqq[0][i]); 2075 if (cfqd->async_cfqq[1][i]) 2076 cfq_put_queue(cfqd->async_cfqq[1][i]); 2077 } 2078 2079 if (cfqd->async_idle_cfqq) 2080 cfq_put_queue(cfqd->async_idle_cfqq); 2081} 2082 2083static void cfq_exit_queue(elevator_t *e) 2084{ 2085 struct cfq_data *cfqd = e->elevator_data; 2086 struct request_queue *q = cfqd->queue; 2087 2088 cfq_shutdown_timer_wq(cfqd); 2089 2090 spin_lock_irq(q->queue_lock); 2091 2092 if (cfqd->active_queue) 2093 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 2094 2095 while (!list_empty(&cfqd->cic_list)) { 2096 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 2097 struct cfq_io_context, 2098 queue_list); 2099 2100 __cfq_exit_single_io_context(cfqd, cic); 2101 } 2102 2103 cfq_put_async_queues(cfqd); 2104 2105 spin_unlock_irq(q->queue_lock); 2106 2107 cfq_shutdown_timer_wq(cfqd); 2108 2109 kfree(cfqd); 2110} 2111 2112static void *cfq_init_queue(struct request_queue *q) 2113{ 2114 struct cfq_data *cfqd; 2115 2116 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 2117 if (!cfqd) 2118 return NULL; 2119 2120 cfqd->service_tree = CFQ_RB_ROOT; 2121 INIT_LIST_HEAD(&cfqd->cic_list); 2122 2123 cfqd->queue = q; 2124 2125 init_timer(&cfqd->idle_slice_timer); 2126 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2127 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2128 2129 init_timer(&cfqd->idle_class_timer); 2130 cfqd->idle_class_timer.function = cfq_idle_class_timer; 2131 cfqd->idle_class_timer.data = (unsigned long) cfqd; 2132 2133 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2134 2135 cfqd->last_end_request = jiffies; 2136 cfqd->cfq_quantum = cfq_quantum; 2137 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2138 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; 2139 cfqd->cfq_back_max = cfq_back_max; 2140 cfqd->cfq_back_penalty = cfq_back_penalty; 2141 cfqd->cfq_slice[0] = cfq_slice_async; 2142 cfqd->cfq_slice[1] = cfq_slice_sync; 2143 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2144 cfqd->cfq_slice_idle = cfq_slice_idle; 2145 2146 return cfqd; 2147} 2148 2149static void cfq_slab_kill(void) 2150{ 2151 if (cfq_pool) 2152 kmem_cache_destroy(cfq_pool); 2153 if (cfq_ioc_pool) 2154 kmem_cache_destroy(cfq_ioc_pool); 2155} 2156 2157static int __init cfq_slab_setup(void) 2158{ 2159 cfq_pool = KMEM_CACHE(cfq_queue, 0); 2160 if (!cfq_pool) 2161 goto fail; 2162 2163 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); 2164 if (!cfq_ioc_pool) 2165 goto fail; 2166 2167 return 0; 2168fail: 2169 cfq_slab_kill(); 2170 return -ENOMEM; 2171} 2172 2173/* 2174 * sysfs parts below --> 2175 */ 2176static ssize_t 2177cfq_var_show(unsigned int var, char *page) 2178{ 2179 return sprintf(page, "%d\n", var); 2180} 2181 2182static ssize_t 2183cfq_var_store(unsigned int *var, const char *page, size_t count) 2184{ 2185 char *p = (char *) page; 2186 2187 *var = simple_strtoul(p, &p, 10); 2188 return count; 2189} 2190 2191#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 2192static ssize_t __FUNC(elevator_t *e, char *page) \ 2193{ \ 2194 struct cfq_data *cfqd = e->elevator_data; \ 2195 unsigned int __data = __VAR; \ 2196 if (__CONV) \ 2197 __data = jiffies_to_msecs(__data); \ 2198 return cfq_var_show(__data, (page)); \ 2199} 2200SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 2201SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 2202SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 2203SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 2204SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 2205SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 2206SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2207SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2208SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2209#undef SHOW_FUNCTION 2210 2211#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2212static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ 2213{ \ 2214 struct cfq_data *cfqd = e->elevator_data; \ 2215 unsigned int __data; \ 2216 int ret = cfq_var_store(&__data, (page), count); \ 2217 if (__data < (MIN)) \ 2218 __data = (MIN); \ 2219 else if (__data > (MAX)) \ 2220 __data = (MAX); \ 2221 if (__CONV) \ 2222 *(__PTR) = msecs_to_jiffies(__data); \ 2223 else \ 2224 *(__PTR) = __data; \ 2225 return ret; \ 2226} 2227STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2228STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); 2229STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); 2230STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2231STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); 2232STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2233STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2234STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2235STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); 2236#undef STORE_FUNCTION 2237 2238#define CFQ_ATTR(name) \ 2239 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) 2240 2241static struct elv_fs_entry cfq_attrs[] = { 2242 CFQ_ATTR(quantum), 2243 CFQ_ATTR(fifo_expire_sync), 2244 CFQ_ATTR(fifo_expire_async), 2245 CFQ_ATTR(back_seek_max), 2246 CFQ_ATTR(back_seek_penalty), 2247 CFQ_ATTR(slice_sync), 2248 CFQ_ATTR(slice_async), 2249 CFQ_ATTR(slice_async_rq), 2250 CFQ_ATTR(slice_idle), 2251 __ATTR_NULL 2252}; 2253 2254static struct elevator_type iosched_cfq = { 2255 .ops = { 2256 .elevator_merge_fn = cfq_merge, 2257 .elevator_merged_fn = cfq_merged_request, 2258 .elevator_merge_req_fn = cfq_merged_requests, 2259 .elevator_allow_merge_fn = cfq_allow_merge, 2260 .elevator_dispatch_fn = cfq_dispatch_requests, 2261 .elevator_add_req_fn = cfq_insert_request, 2262 .elevator_activate_req_fn = cfq_activate_request, 2263 .elevator_deactivate_req_fn = cfq_deactivate_request, 2264 .elevator_queue_empty_fn = cfq_queue_empty, 2265 .elevator_completed_req_fn = cfq_completed_request, 2266 .elevator_former_req_fn = elv_rb_former_request, 2267 .elevator_latter_req_fn = elv_rb_latter_request, 2268 .elevator_set_req_fn = cfq_set_request, 2269 .elevator_put_req_fn = cfq_put_request, 2270 .elevator_may_queue_fn = cfq_may_queue, 2271 .elevator_init_fn = cfq_init_queue, 2272 .elevator_exit_fn = cfq_exit_queue, 2273 .trim = cfq_free_io_context, 2274 }, 2275 .elevator_attrs = cfq_attrs, 2276 .elevator_name = "cfq", 2277 .elevator_owner = THIS_MODULE, 2278}; 2279 2280static int __init cfq_init(void) 2281{ 2282 int ret; 2283 2284 /* 2285 * could be 0 on HZ < 1000 setups 2286 */ 2287 if (!cfq_slice_async) 2288 cfq_slice_async = 1; 2289 if (!cfq_slice_idle) 2290 cfq_slice_idle = 1; 2291 2292 if (cfq_slab_setup()) 2293 return -ENOMEM; 2294 2295 ret = elv_register(&iosched_cfq); 2296 if (ret) 2297 cfq_slab_kill(); 2298 2299 return ret; 2300} 2301 2302static void __exit cfq_exit(void) 2303{ 2304 DECLARE_COMPLETION_ONSTACK(all_gone); 2305 elv_unregister(&iosched_cfq); 2306 ioc_gone = &all_gone; 2307 /* ioc_gone's update must be visible before reading ioc_count */ 2308 smp_wmb(); 2309 if (elv_ioc_count_read(ioc_count)) 2310 wait_for_completion(ioc_gone); 2311 synchronize_rcu(); 2312 cfq_slab_kill(); 2313} 2314 2315module_init(cfq_init); 2316module_exit(cfq_exit); 2317 2318MODULE_AUTHOR("Jens Axboe"); 2319MODULE_LICENSE("GPL"); 2320MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");