Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.24-rc7 1489 lines 39 kB view raw
1/* 2 * Anticipatory & deadline i/o scheduler. 3 * 4 * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk> 5 * Nick Piggin <nickpiggin@yahoo.com.au> 6 * 7 */ 8#include <linux/kernel.h> 9#include <linux/fs.h> 10#include <linux/blkdev.h> 11#include <linux/elevator.h> 12#include <linux/bio.h> 13#include <linux/module.h> 14#include <linux/slab.h> 15#include <linux/init.h> 16#include <linux/compiler.h> 17#include <linux/rbtree.h> 18#include <linux/interrupt.h> 19 20#define REQ_SYNC 1 21#define REQ_ASYNC 0 22 23/* 24 * See Documentation/block/as-iosched.txt 25 */ 26 27/* 28 * max time before a read is submitted. 29 */ 30#define default_read_expire (HZ / 8) 31 32/* 33 * ditto for writes, these limits are not hard, even 34 * if the disk is capable of satisfying them. 35 */ 36#define default_write_expire (HZ / 4) 37 38/* 39 * read_batch_expire describes how long we will allow a stream of reads to 40 * persist before looking to see whether it is time to switch over to writes. 41 */ 42#define default_read_batch_expire (HZ / 2) 43 44/* 45 * write_batch_expire describes how long we want a stream of writes to run for. 46 * This is not a hard limit, but a target we set for the auto-tuning thingy. 47 * See, the problem is: we can send a lot of writes to disk cache / TCQ in 48 * a short amount of time... 49 */ 50#define default_write_batch_expire (HZ / 8) 51 52/* 53 * max time we may wait to anticipate a read (default around 6ms) 54 */ 55#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1) 56 57/* 58 * Keep track of up to 20ms thinktimes. We can go as big as we like here, 59 * however huge values tend to interfere and not decay fast enough. A program 60 * might be in a non-io phase of operation. Waiting on user input for example, 61 * or doing a lengthy computation. A small penalty can be justified there, and 62 * will still catch out those processes that constantly have large thinktimes. 63 */ 64#define MAX_THINKTIME (HZ/50UL) 65 66/* Bits in as_io_context.state */ 67enum as_io_states { 68 AS_TASK_RUNNING=0, /* Process has not exited */ 69 AS_TASK_IOSTARTED, /* Process has started some IO */ 70 AS_TASK_IORUNNING, /* Process has completed some IO */ 71}; 72 73enum anticipation_status { 74 ANTIC_OFF=0, /* Not anticipating (normal operation) */ 75 ANTIC_WAIT_REQ, /* The last read has not yet completed */ 76 ANTIC_WAIT_NEXT, /* Currently anticipating a request vs 77 last read (which has completed) */ 78 ANTIC_FINISHED, /* Anticipating but have found a candidate 79 * or timed out */ 80}; 81 82struct as_data { 83 /* 84 * run time data 85 */ 86 87 struct request_queue *q; /* the "owner" queue */ 88 89 /* 90 * requests (as_rq s) are present on both sort_list and fifo_list 91 */ 92 struct rb_root sort_list[2]; 93 struct list_head fifo_list[2]; 94 95 struct request *next_rq[2]; /* next in sort order */ 96 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 97 98 unsigned long exit_prob; /* probability a task will exit while 99 being waited on */ 100 unsigned long exit_no_coop; /* probablility an exited task will 101 not be part of a later cooperating 102 request */ 103 unsigned long new_ttime_total; /* mean thinktime on new proc */ 104 unsigned long new_ttime_mean; 105 u64 new_seek_total; /* mean seek on new proc */ 106 sector_t new_seek_mean; 107 108 unsigned long current_batch_expires; 109 unsigned long last_check_fifo[2]; 110 int changed_batch; /* 1: waiting for old batch to end */ 111 int new_batch; /* 1: waiting on first read complete */ 112 int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */ 113 int write_batch_count; /* max # of reqs in a write batch */ 114 int current_write_count; /* how many requests left this batch */ 115 int write_batch_idled; /* has the write batch gone idle? */ 116 117 enum anticipation_status antic_status; 118 unsigned long antic_start; /* jiffies: when it started */ 119 struct timer_list antic_timer; /* anticipatory scheduling timer */ 120 struct work_struct antic_work; /* Deferred unplugging */ 121 struct io_context *io_context; /* Identify the expected process */ 122 int ioc_finished; /* IO associated with io_context is finished */ 123 int nr_dispatched; 124 125 /* 126 * settings that change how the i/o scheduler behaves 127 */ 128 unsigned long fifo_expire[2]; 129 unsigned long batch_expire[2]; 130 unsigned long antic_expire; 131}; 132 133/* 134 * per-request data. 135 */ 136enum arq_state { 137 AS_RQ_NEW=0, /* New - not referenced and not on any lists */ 138 AS_RQ_QUEUED, /* In the request queue. It belongs to the 139 scheduler */ 140 AS_RQ_DISPATCHED, /* On the dispatch list. It belongs to the 141 driver now */ 142 AS_RQ_PRESCHED, /* Debug poisoning for requests being used */ 143 AS_RQ_REMOVED, 144 AS_RQ_MERGED, 145 AS_RQ_POSTSCHED, /* when they shouldn't be */ 146}; 147 148#define RQ_IOC(rq) ((struct io_context *) (rq)->elevator_private) 149#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) 150#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) 151 152static DEFINE_PER_CPU(unsigned long, ioc_count); 153static struct completion *ioc_gone; 154 155static void as_move_to_dispatch(struct as_data *ad, struct request *rq); 156static void as_antic_stop(struct as_data *ad); 157 158/* 159 * IO Context helper functions 160 */ 161 162/* Called to deallocate the as_io_context */ 163static void free_as_io_context(struct as_io_context *aic) 164{ 165 kfree(aic); 166 elv_ioc_count_dec(ioc_count); 167 if (ioc_gone && !elv_ioc_count_read(ioc_count)) 168 complete(ioc_gone); 169} 170 171static void as_trim(struct io_context *ioc) 172{ 173 if (ioc->aic) 174 free_as_io_context(ioc->aic); 175 ioc->aic = NULL; 176} 177 178/* Called when the task exits */ 179static void exit_as_io_context(struct as_io_context *aic) 180{ 181 WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state)); 182 clear_bit(AS_TASK_RUNNING, &aic->state); 183} 184 185static struct as_io_context *alloc_as_io_context(void) 186{ 187 struct as_io_context *ret; 188 189 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 190 if (ret) { 191 ret->dtor = free_as_io_context; 192 ret->exit = exit_as_io_context; 193 ret->state = 1 << AS_TASK_RUNNING; 194 atomic_set(&ret->nr_queued, 0); 195 atomic_set(&ret->nr_dispatched, 0); 196 spin_lock_init(&ret->lock); 197 ret->ttime_total = 0; 198 ret->ttime_samples = 0; 199 ret->ttime_mean = 0; 200 ret->seek_total = 0; 201 ret->seek_samples = 0; 202 ret->seek_mean = 0; 203 elv_ioc_count_inc(ioc_count); 204 } 205 206 return ret; 207} 208 209/* 210 * If the current task has no AS IO context then create one and initialise it. 211 * Then take a ref on the task's io context and return it. 212 */ 213static struct io_context *as_get_io_context(int node) 214{ 215 struct io_context *ioc = get_io_context(GFP_ATOMIC, node); 216 if (ioc && !ioc->aic) { 217 ioc->aic = alloc_as_io_context(); 218 if (!ioc->aic) { 219 put_io_context(ioc); 220 ioc = NULL; 221 } 222 } 223 return ioc; 224} 225 226static void as_put_io_context(struct request *rq) 227{ 228 struct as_io_context *aic; 229 230 if (unlikely(!RQ_IOC(rq))) 231 return; 232 233 aic = RQ_IOC(rq)->aic; 234 235 if (rq_is_sync(rq) && aic) { 236 spin_lock(&aic->lock); 237 set_bit(AS_TASK_IORUNNING, &aic->state); 238 aic->last_end_request = jiffies; 239 spin_unlock(&aic->lock); 240 } 241 242 put_io_context(RQ_IOC(rq)); 243} 244 245/* 246 * rb tree support functions 247 */ 248#define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))]) 249 250static void as_add_rq_rb(struct as_data *ad, struct request *rq) 251{ 252 struct request *alias; 253 254 while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) { 255 as_move_to_dispatch(ad, alias); 256 as_antic_stop(ad); 257 } 258} 259 260static inline void as_del_rq_rb(struct as_data *ad, struct request *rq) 261{ 262 elv_rb_del(RQ_RB_ROOT(ad, rq), rq); 263} 264 265/* 266 * IO Scheduler proper 267 */ 268 269#define MAXBACK (1024 * 1024) /* 270 * Maximum distance the disk will go backward 271 * for a request. 272 */ 273 274#define BACK_PENALTY 2 275 276/* 277 * as_choose_req selects the preferred one of two requests of the same data_dir 278 * ignoring time - eg. timeouts, which is the job of as_dispatch_request 279 */ 280static struct request * 281as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2) 282{ 283 int data_dir; 284 sector_t last, s1, s2, d1, d2; 285 int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */ 286 const sector_t maxback = MAXBACK; 287 288 if (rq1 == NULL || rq1 == rq2) 289 return rq2; 290 if (rq2 == NULL) 291 return rq1; 292 293 data_dir = rq_is_sync(rq1); 294 295 last = ad->last_sector[data_dir]; 296 s1 = rq1->sector; 297 s2 = rq2->sector; 298 299 BUG_ON(data_dir != rq_is_sync(rq2)); 300 301 /* 302 * Strict one way elevator _except_ in the case where we allow 303 * short backward seeks which are biased as twice the cost of a 304 * similar forward seek. 305 */ 306 if (s1 >= last) 307 d1 = s1 - last; 308 else if (s1+maxback >= last) 309 d1 = (last - s1)*BACK_PENALTY; 310 else { 311 r1_wrap = 1; 312 d1 = 0; /* shut up, gcc */ 313 } 314 315 if (s2 >= last) 316 d2 = s2 - last; 317 else if (s2+maxback >= last) 318 d2 = (last - s2)*BACK_PENALTY; 319 else { 320 r2_wrap = 1; 321 d2 = 0; 322 } 323 324 /* Found required data */ 325 if (!r1_wrap && r2_wrap) 326 return rq1; 327 else if (!r2_wrap && r1_wrap) 328 return rq2; 329 else if (r1_wrap && r2_wrap) { 330 /* both behind the head */ 331 if (s1 <= s2) 332 return rq1; 333 else 334 return rq2; 335 } 336 337 /* Both requests in front of the head */ 338 if (d1 < d2) 339 return rq1; 340 else if (d2 < d1) 341 return rq2; 342 else { 343 if (s1 >= s2) 344 return rq1; 345 else 346 return rq2; 347 } 348} 349 350/* 351 * as_find_next_rq finds the next request after @prev in elevator order. 352 * this with as_choose_req form the basis for how the scheduler chooses 353 * what request to process next. Anticipation works on top of this. 354 */ 355static struct request * 356as_find_next_rq(struct as_data *ad, struct request *last) 357{ 358 struct rb_node *rbnext = rb_next(&last->rb_node); 359 struct rb_node *rbprev = rb_prev(&last->rb_node); 360 struct request *next = NULL, *prev = NULL; 361 362 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 363 364 if (rbprev) 365 prev = rb_entry_rq(rbprev); 366 367 if (rbnext) 368 next = rb_entry_rq(rbnext); 369 else { 370 const int data_dir = rq_is_sync(last); 371 372 rbnext = rb_first(&ad->sort_list[data_dir]); 373 if (rbnext && rbnext != &last->rb_node) 374 next = rb_entry_rq(rbnext); 375 } 376 377 return as_choose_req(ad, next, prev); 378} 379 380/* 381 * anticipatory scheduling functions follow 382 */ 383 384/* 385 * as_antic_expired tells us when we have anticipated too long. 386 * The funny "absolute difference" math on the elapsed time is to handle 387 * jiffy wraps, and disks which have been idle for 0x80000000 jiffies. 388 */ 389static int as_antic_expired(struct as_data *ad) 390{ 391 long delta_jif; 392 393 delta_jif = jiffies - ad->antic_start; 394 if (unlikely(delta_jif < 0)) 395 delta_jif = -delta_jif; 396 if (delta_jif < ad->antic_expire) 397 return 0; 398 399 return 1; 400} 401 402/* 403 * as_antic_waitnext starts anticipating that a nice request will soon be 404 * submitted. See also as_antic_waitreq 405 */ 406static void as_antic_waitnext(struct as_data *ad) 407{ 408 unsigned long timeout; 409 410 BUG_ON(ad->antic_status != ANTIC_OFF 411 && ad->antic_status != ANTIC_WAIT_REQ); 412 413 timeout = ad->antic_start + ad->antic_expire; 414 415 mod_timer(&ad->antic_timer, timeout); 416 417 ad->antic_status = ANTIC_WAIT_NEXT; 418} 419 420/* 421 * as_antic_waitreq starts anticipating. We don't start timing the anticipation 422 * until the request that we're anticipating on has finished. This means we 423 * are timing from when the candidate process wakes up hopefully. 424 */ 425static void as_antic_waitreq(struct as_data *ad) 426{ 427 BUG_ON(ad->antic_status == ANTIC_FINISHED); 428 if (ad->antic_status == ANTIC_OFF) { 429 if (!ad->io_context || ad->ioc_finished) 430 as_antic_waitnext(ad); 431 else 432 ad->antic_status = ANTIC_WAIT_REQ; 433 } 434} 435 436/* 437 * This is called directly by the functions in this file to stop anticipation. 438 * We kill the timer and schedule a call to the request_fn asap. 439 */ 440static void as_antic_stop(struct as_data *ad) 441{ 442 int status = ad->antic_status; 443 444 if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) { 445 if (status == ANTIC_WAIT_NEXT) 446 del_timer(&ad->antic_timer); 447 ad->antic_status = ANTIC_FINISHED; 448 /* see as_work_handler */ 449 kblockd_schedule_work(&ad->antic_work); 450 } 451} 452 453/* 454 * as_antic_timeout is the timer function set by as_antic_waitnext. 455 */ 456static void as_antic_timeout(unsigned long data) 457{ 458 struct request_queue *q = (struct request_queue *)data; 459 struct as_data *ad = q->elevator->elevator_data; 460 unsigned long flags; 461 462 spin_lock_irqsave(q->queue_lock, flags); 463 if (ad->antic_status == ANTIC_WAIT_REQ 464 || ad->antic_status == ANTIC_WAIT_NEXT) { 465 struct as_io_context *aic = ad->io_context->aic; 466 467 ad->antic_status = ANTIC_FINISHED; 468 kblockd_schedule_work(&ad->antic_work); 469 470 if (aic->ttime_samples == 0) { 471 /* process anticipated on has exited or timed out*/ 472 ad->exit_prob = (7*ad->exit_prob + 256)/8; 473 } 474 if (!test_bit(AS_TASK_RUNNING, &aic->state)) { 475 /* process not "saved" by a cooperating request */ 476 ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8; 477 } 478 } 479 spin_unlock_irqrestore(q->queue_lock, flags); 480} 481 482static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic, 483 unsigned long ttime) 484{ 485 /* fixed point: 1.0 == 1<<8 */ 486 if (aic->ttime_samples == 0) { 487 ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8; 488 ad->new_ttime_mean = ad->new_ttime_total / 256; 489 490 ad->exit_prob = (7*ad->exit_prob)/8; 491 } 492 aic->ttime_samples = (7*aic->ttime_samples + 256) / 8; 493 aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8; 494 aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples; 495} 496 497static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, 498 sector_t sdist) 499{ 500 u64 total; 501 502 if (aic->seek_samples == 0) { 503 ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8; 504 ad->new_seek_mean = ad->new_seek_total / 256; 505 } 506 507 /* 508 * Don't allow the seek distance to get too large from the 509 * odd fragment, pagein, etc 510 */ 511 if (aic->seek_samples <= 60) /* second&third seek */ 512 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024); 513 else 514 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64); 515 516 aic->seek_samples = (7*aic->seek_samples + 256) / 8; 517 aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8; 518 total = aic->seek_total + (aic->seek_samples/2); 519 do_div(total, aic->seek_samples); 520 aic->seek_mean = (sector_t)total; 521} 522 523/* 524 * as_update_iohist keeps a decaying histogram of IO thinktimes, and 525 * updates @aic->ttime_mean based on that. It is called when a new 526 * request is queued. 527 */ 528static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, 529 struct request *rq) 530{ 531 int data_dir = rq_is_sync(rq); 532 unsigned long thinktime = 0; 533 sector_t seek_dist; 534 535 if (aic == NULL) 536 return; 537 538 if (data_dir == REQ_SYNC) { 539 unsigned long in_flight = atomic_read(&aic->nr_queued) 540 + atomic_read(&aic->nr_dispatched); 541 spin_lock(&aic->lock); 542 if (test_bit(AS_TASK_IORUNNING, &aic->state) || 543 test_bit(AS_TASK_IOSTARTED, &aic->state)) { 544 /* Calculate read -> read thinktime */ 545 if (test_bit(AS_TASK_IORUNNING, &aic->state) 546 && in_flight == 0) { 547 thinktime = jiffies - aic->last_end_request; 548 thinktime = min(thinktime, MAX_THINKTIME-1); 549 } 550 as_update_thinktime(ad, aic, thinktime); 551 552 /* Calculate read -> read seek distance */ 553 if (aic->last_request_pos < rq->sector) 554 seek_dist = rq->sector - aic->last_request_pos; 555 else 556 seek_dist = aic->last_request_pos - rq->sector; 557 as_update_seekdist(ad, aic, seek_dist); 558 } 559 aic->last_request_pos = rq->sector + rq->nr_sectors; 560 set_bit(AS_TASK_IOSTARTED, &aic->state); 561 spin_unlock(&aic->lock); 562 } 563} 564 565/* 566 * as_close_req decides if one request is considered "close" to the 567 * previous one issued. 568 */ 569static int as_close_req(struct as_data *ad, struct as_io_context *aic, 570 struct request *rq) 571{ 572 unsigned long delay; /* jiffies */ 573 sector_t last = ad->last_sector[ad->batch_data_dir]; 574 sector_t next = rq->sector; 575 sector_t delta; /* acceptable close offset (in sectors) */ 576 sector_t s; 577 578 if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished) 579 delay = 0; 580 else 581 delay = jiffies - ad->antic_start; 582 583 if (delay == 0) 584 delta = 8192; 585 else if (delay <= (20 * HZ / 1000) && delay <= ad->antic_expire) 586 delta = 8192 << delay; 587 else 588 return 1; 589 590 if ((last <= next + (delta>>1)) && (next <= last + delta)) 591 return 1; 592 593 if (last < next) 594 s = next - last; 595 else 596 s = last - next; 597 598 if (aic->seek_samples == 0) { 599 /* 600 * Process has just started IO. Use past statistics to 601 * gauge success possibility 602 */ 603 if (ad->new_seek_mean > s) { 604 /* this request is better than what we're expecting */ 605 return 1; 606 } 607 608 } else { 609 if (aic->seek_mean > s) { 610 /* this request is better than what we're expecting */ 611 return 1; 612 } 613 } 614 615 return 0; 616} 617 618/* 619 * as_can_break_anticipation returns true if we have been anticipating this 620 * request. 621 * 622 * It also returns true if the process against which we are anticipating 623 * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to 624 * dispatch it ASAP, because we know that application will not be submitting 625 * any new reads. 626 * 627 * If the task which has submitted the request has exited, break anticipation. 628 * 629 * If this task has queued some other IO, do not enter enticipation. 630 */ 631static int as_can_break_anticipation(struct as_data *ad, struct request *rq) 632{ 633 struct io_context *ioc; 634 struct as_io_context *aic; 635 636 ioc = ad->io_context; 637 BUG_ON(!ioc); 638 639 if (rq && ioc == RQ_IOC(rq)) { 640 /* request from same process */ 641 return 1; 642 } 643 644 if (ad->ioc_finished && as_antic_expired(ad)) { 645 /* 646 * In this situation status should really be FINISHED, 647 * however the timer hasn't had the chance to run yet. 648 */ 649 return 1; 650 } 651 652 aic = ioc->aic; 653 if (!aic) 654 return 0; 655 656 if (atomic_read(&aic->nr_queued) > 0) { 657 /* process has more requests queued */ 658 return 1; 659 } 660 661 if (atomic_read(&aic->nr_dispatched) > 0) { 662 /* process has more requests dispatched */ 663 return 1; 664 } 665 666 if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) { 667 /* 668 * Found a close request that is not one of ours. 669 * 670 * This makes close requests from another process update 671 * our IO history. Is generally useful when there are 672 * two or more cooperating processes working in the same 673 * area. 674 */ 675 if (!test_bit(AS_TASK_RUNNING, &aic->state)) { 676 if (aic->ttime_samples == 0) 677 ad->exit_prob = (7*ad->exit_prob + 256)/8; 678 679 ad->exit_no_coop = (7*ad->exit_no_coop)/8; 680 } 681 682 as_update_iohist(ad, aic, rq); 683 return 1; 684 } 685 686 if (!test_bit(AS_TASK_RUNNING, &aic->state)) { 687 /* process anticipated on has exited */ 688 if (aic->ttime_samples == 0) 689 ad->exit_prob = (7*ad->exit_prob + 256)/8; 690 691 if (ad->exit_no_coop > 128) 692 return 1; 693 } 694 695 if (aic->ttime_samples == 0) { 696 if (ad->new_ttime_mean > ad->antic_expire) 697 return 1; 698 if (ad->exit_prob * ad->exit_no_coop > 128*256) 699 return 1; 700 } else if (aic->ttime_mean > ad->antic_expire) { 701 /* the process thinks too much between requests */ 702 return 1; 703 } 704 705 return 0; 706} 707 708/* 709 * as_can_anticipate indicates whether we should either run rq 710 * or keep anticipating a better request. 711 */ 712static int as_can_anticipate(struct as_data *ad, struct request *rq) 713{ 714 if (!ad->io_context) 715 /* 716 * Last request submitted was a write 717 */ 718 return 0; 719 720 if (ad->antic_status == ANTIC_FINISHED) 721 /* 722 * Don't restart if we have just finished. Run the next request 723 */ 724 return 0; 725 726 if (as_can_break_anticipation(ad, rq)) 727 /* 728 * This request is a good candidate. Don't keep anticipating, 729 * run it. 730 */ 731 return 0; 732 733 /* 734 * OK from here, we haven't finished, and don't have a decent request! 735 * Status is either ANTIC_OFF so start waiting, 736 * ANTIC_WAIT_REQ so continue waiting for request to finish 737 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request. 738 */ 739 740 return 1; 741} 742 743/* 744 * as_update_rq must be called whenever a request (rq) is added to 745 * the sort_list. This function keeps caches up to date, and checks if the 746 * request might be one we are "anticipating" 747 */ 748static void as_update_rq(struct as_data *ad, struct request *rq) 749{ 750 const int data_dir = rq_is_sync(rq); 751 752 /* keep the next_rq cache up to date */ 753 ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]); 754 755 /* 756 * have we been anticipating this request? 757 * or does it come from the same process as the one we are anticipating 758 * for? 759 */ 760 if (ad->antic_status == ANTIC_WAIT_REQ 761 || ad->antic_status == ANTIC_WAIT_NEXT) { 762 if (as_can_break_anticipation(ad, rq)) 763 as_antic_stop(ad); 764 } 765} 766 767/* 768 * Gathers timings and resizes the write batch automatically 769 */ 770static void update_write_batch(struct as_data *ad) 771{ 772 unsigned long batch = ad->batch_expire[REQ_ASYNC]; 773 long write_time; 774 775 write_time = (jiffies - ad->current_batch_expires) + batch; 776 if (write_time < 0) 777 write_time = 0; 778 779 if (write_time > batch && !ad->write_batch_idled) { 780 if (write_time > batch * 3) 781 ad->write_batch_count /= 2; 782 else 783 ad->write_batch_count--; 784 } else if (write_time < batch && ad->current_write_count == 0) { 785 if (batch > write_time * 3) 786 ad->write_batch_count *= 2; 787 else 788 ad->write_batch_count++; 789 } 790 791 if (ad->write_batch_count < 1) 792 ad->write_batch_count = 1; 793} 794 795/* 796 * as_completed_request is to be called when a request has completed and 797 * returned something to the requesting process, be it an error or data. 798 */ 799static void as_completed_request(struct request_queue *q, struct request *rq) 800{ 801 struct as_data *ad = q->elevator->elevator_data; 802 803 WARN_ON(!list_empty(&rq->queuelist)); 804 805 if (RQ_STATE(rq) != AS_RQ_REMOVED) { 806 printk("rq->state %d\n", RQ_STATE(rq)); 807 WARN_ON(1); 808 goto out; 809 } 810 811 if (ad->changed_batch && ad->nr_dispatched == 1) { 812 kblockd_schedule_work(&ad->antic_work); 813 ad->changed_batch = 0; 814 815 if (ad->batch_data_dir == REQ_SYNC) 816 ad->new_batch = 1; 817 } 818 WARN_ON(ad->nr_dispatched == 0); 819 ad->nr_dispatched--; 820 821 /* 822 * Start counting the batch from when a request of that direction is 823 * actually serviced. This should help devices with big TCQ windows 824 * and writeback caches 825 */ 826 if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) { 827 update_write_batch(ad); 828 ad->current_batch_expires = jiffies + 829 ad->batch_expire[REQ_SYNC]; 830 ad->new_batch = 0; 831 } 832 833 if (ad->io_context == RQ_IOC(rq) && ad->io_context) { 834 ad->antic_start = jiffies; 835 ad->ioc_finished = 1; 836 if (ad->antic_status == ANTIC_WAIT_REQ) { 837 /* 838 * We were waiting on this request, now anticipate 839 * the next one 840 */ 841 as_antic_waitnext(ad); 842 } 843 } 844 845 as_put_io_context(rq); 846out: 847 RQ_SET_STATE(rq, AS_RQ_POSTSCHED); 848} 849 850/* 851 * as_remove_queued_request removes a request from the pre dispatch queue 852 * without updating refcounts. It is expected the caller will drop the 853 * reference unless it replaces the request at somepart of the elevator 854 * (ie. the dispatch queue) 855 */ 856static void as_remove_queued_request(struct request_queue *q, 857 struct request *rq) 858{ 859 const int data_dir = rq_is_sync(rq); 860 struct as_data *ad = q->elevator->elevator_data; 861 struct io_context *ioc; 862 863 WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED); 864 865 ioc = RQ_IOC(rq); 866 if (ioc && ioc->aic) { 867 BUG_ON(!atomic_read(&ioc->aic->nr_queued)); 868 atomic_dec(&ioc->aic->nr_queued); 869 } 870 871 /* 872 * Update the "next_rq" cache if we are about to remove its 873 * entry 874 */ 875 if (ad->next_rq[data_dir] == rq) 876 ad->next_rq[data_dir] = as_find_next_rq(ad, rq); 877 878 rq_fifo_clear(rq); 879 as_del_rq_rb(ad, rq); 880} 881 882/* 883 * as_fifo_expired returns 0 if there are no expired requests on the fifo, 884 * 1 otherwise. It is ratelimited so that we only perform the check once per 885 * `fifo_expire' interval. Otherwise a large number of expired requests 886 * would create a hopeless seekstorm. 887 * 888 * See as_antic_expired comment. 889 */ 890static int as_fifo_expired(struct as_data *ad, int adir) 891{ 892 struct request *rq; 893 long delta_jif; 894 895 delta_jif = jiffies - ad->last_check_fifo[adir]; 896 if (unlikely(delta_jif < 0)) 897 delta_jif = -delta_jif; 898 if (delta_jif < ad->fifo_expire[adir]) 899 return 0; 900 901 ad->last_check_fifo[adir] = jiffies; 902 903 if (list_empty(&ad->fifo_list[adir])) 904 return 0; 905 906 rq = rq_entry_fifo(ad->fifo_list[adir].next); 907 908 return time_after(jiffies, rq_fifo_time(rq)); 909} 910 911/* 912 * as_batch_expired returns true if the current batch has expired. A batch 913 * is a set of reads or a set of writes. 914 */ 915static inline int as_batch_expired(struct as_data *ad) 916{ 917 if (ad->changed_batch || ad->new_batch) 918 return 0; 919 920 if (ad->batch_data_dir == REQ_SYNC) 921 /* TODO! add a check so a complete fifo gets written? */ 922 return time_after(jiffies, ad->current_batch_expires); 923 924 return time_after(jiffies, ad->current_batch_expires) 925 || ad->current_write_count == 0; 926} 927 928/* 929 * move an entry to dispatch queue 930 */ 931static void as_move_to_dispatch(struct as_data *ad, struct request *rq) 932{ 933 const int data_dir = rq_is_sync(rq); 934 935 BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); 936 937 as_antic_stop(ad); 938 ad->antic_status = ANTIC_OFF; 939 940 /* 941 * This has to be set in order to be correctly updated by 942 * as_find_next_rq 943 */ 944 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 945 946 if (data_dir == REQ_SYNC) { 947 struct io_context *ioc = RQ_IOC(rq); 948 /* In case we have to anticipate after this */ 949 copy_io_context(&ad->io_context, &ioc); 950 } else { 951 if (ad->io_context) { 952 put_io_context(ad->io_context); 953 ad->io_context = NULL; 954 } 955 956 if (ad->current_write_count != 0) 957 ad->current_write_count--; 958 } 959 ad->ioc_finished = 0; 960 961 ad->next_rq[data_dir] = as_find_next_rq(ad, rq); 962 963 /* 964 * take it off the sort and fifo list, add to dispatch queue 965 */ 966 as_remove_queued_request(ad->q, rq); 967 WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED); 968 969 elv_dispatch_sort(ad->q, rq); 970 971 RQ_SET_STATE(rq, AS_RQ_DISPATCHED); 972 if (RQ_IOC(rq) && RQ_IOC(rq)->aic) 973 atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched); 974 ad->nr_dispatched++; 975} 976 977/* 978 * as_dispatch_request selects the best request according to 979 * read/write expire, batch expire, etc, and moves it to the dispatch 980 * queue. Returns 1 if a request was found, 0 otherwise. 981 */ 982static int as_dispatch_request(struct request_queue *q, int force) 983{ 984 struct as_data *ad = q->elevator->elevator_data; 985 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 986 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); 987 struct request *rq; 988 989 if (unlikely(force)) { 990 /* 991 * Forced dispatch, accounting is useless. Reset 992 * accounting states and dump fifo_lists. Note that 993 * batch_data_dir is reset to REQ_SYNC to avoid 994 * screwing write batch accounting as write batch 995 * accounting occurs on W->R transition. 996 */ 997 int dispatched = 0; 998 999 ad->batch_data_dir = REQ_SYNC; 1000 ad->changed_batch = 0; 1001 ad->new_batch = 0; 1002 1003 while (ad->next_rq[REQ_SYNC]) { 1004 as_move_to_dispatch(ad, ad->next_rq[REQ_SYNC]); 1005 dispatched++; 1006 } 1007 ad->last_check_fifo[REQ_SYNC] = jiffies; 1008 1009 while (ad->next_rq[REQ_ASYNC]) { 1010 as_move_to_dispatch(ad, ad->next_rq[REQ_ASYNC]); 1011 dispatched++; 1012 } 1013 ad->last_check_fifo[REQ_ASYNC] = jiffies; 1014 1015 return dispatched; 1016 } 1017 1018 /* Signal that the write batch was uncontended, so we can't time it */ 1019 if (ad->batch_data_dir == REQ_ASYNC && !reads) { 1020 if (ad->current_write_count == 0 || !writes) 1021 ad->write_batch_idled = 1; 1022 } 1023 1024 if (!(reads || writes) 1025 || ad->antic_status == ANTIC_WAIT_REQ 1026 || ad->antic_status == ANTIC_WAIT_NEXT 1027 || ad->changed_batch) 1028 return 0; 1029 1030 if (!(reads && writes && as_batch_expired(ad))) { 1031 /* 1032 * batch is still running or no reads or no writes 1033 */ 1034 rq = ad->next_rq[ad->batch_data_dir]; 1035 1036 if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) { 1037 if (as_fifo_expired(ad, REQ_SYNC)) 1038 goto fifo_expired; 1039 1040 if (as_can_anticipate(ad, rq)) { 1041 as_antic_waitreq(ad); 1042 return 0; 1043 } 1044 } 1045 1046 if (rq) { 1047 /* we have a "next request" */ 1048 if (reads && !writes) 1049 ad->current_batch_expires = 1050 jiffies + ad->batch_expire[REQ_SYNC]; 1051 goto dispatch_request; 1052 } 1053 } 1054 1055 /* 1056 * at this point we are not running a batch. select the appropriate 1057 * data direction (read / write) 1058 */ 1059 1060 if (reads) { 1061 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); 1062 1063 if (writes && ad->batch_data_dir == REQ_SYNC) 1064 /* 1065 * Last batch was a read, switch to writes 1066 */ 1067 goto dispatch_writes; 1068 1069 if (ad->batch_data_dir == REQ_ASYNC) { 1070 WARN_ON(ad->new_batch); 1071 ad->changed_batch = 1; 1072 } 1073 ad->batch_data_dir = REQ_SYNC; 1074 rq = rq_entry_fifo(ad->fifo_list[REQ_SYNC].next); 1075 ad->last_check_fifo[ad->batch_data_dir] = jiffies; 1076 goto dispatch_request; 1077 } 1078 1079 /* 1080 * the last batch was a read 1081 */ 1082 1083 if (writes) { 1084dispatch_writes: 1085 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); 1086 1087 if (ad->batch_data_dir == REQ_SYNC) { 1088 ad->changed_batch = 1; 1089 1090 /* 1091 * new_batch might be 1 when the queue runs out of 1092 * reads. A subsequent submission of a write might 1093 * cause a change of batch before the read is finished. 1094 */ 1095 ad->new_batch = 0; 1096 } 1097 ad->batch_data_dir = REQ_ASYNC; 1098 ad->current_write_count = ad->write_batch_count; 1099 ad->write_batch_idled = 0; 1100 rq = rq_entry_fifo(ad->fifo_list[REQ_ASYNC].next); 1101 ad->last_check_fifo[REQ_ASYNC] = jiffies; 1102 goto dispatch_request; 1103 } 1104 1105 BUG(); 1106 return 0; 1107 1108dispatch_request: 1109 /* 1110 * If a request has expired, service it. 1111 */ 1112 1113 if (as_fifo_expired(ad, ad->batch_data_dir)) { 1114fifo_expired: 1115 rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next); 1116 } 1117 1118 if (ad->changed_batch) { 1119 WARN_ON(ad->new_batch); 1120 1121 if (ad->nr_dispatched) 1122 return 0; 1123 1124 if (ad->batch_data_dir == REQ_ASYNC) 1125 ad->current_batch_expires = jiffies + 1126 ad->batch_expire[REQ_ASYNC]; 1127 else 1128 ad->new_batch = 1; 1129 1130 ad->changed_batch = 0; 1131 } 1132 1133 /* 1134 * rq is the selected appropriate request. 1135 */ 1136 as_move_to_dispatch(ad, rq); 1137 1138 return 1; 1139} 1140 1141/* 1142 * add rq to rbtree and fifo 1143 */ 1144static void as_add_request(struct request_queue *q, struct request *rq) 1145{ 1146 struct as_data *ad = q->elevator->elevator_data; 1147 int data_dir; 1148 1149 RQ_SET_STATE(rq, AS_RQ_NEW); 1150 1151 data_dir = rq_is_sync(rq); 1152 1153 rq->elevator_private = as_get_io_context(q->node); 1154 1155 if (RQ_IOC(rq)) { 1156 as_update_iohist(ad, RQ_IOC(rq)->aic, rq); 1157 atomic_inc(&RQ_IOC(rq)->aic->nr_queued); 1158 } 1159 1160 as_add_rq_rb(ad, rq); 1161 1162 /* 1163 * set expire time and add to fifo list 1164 */ 1165 rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]); 1166 list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]); 1167 1168 as_update_rq(ad, rq); /* keep state machine up to date */ 1169 RQ_SET_STATE(rq, AS_RQ_QUEUED); 1170} 1171 1172static void as_activate_request(struct request_queue *q, struct request *rq) 1173{ 1174 WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); 1175 RQ_SET_STATE(rq, AS_RQ_REMOVED); 1176 if (RQ_IOC(rq) && RQ_IOC(rq)->aic) 1177 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); 1178} 1179 1180static void as_deactivate_request(struct request_queue *q, struct request *rq) 1181{ 1182 WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); 1183 RQ_SET_STATE(rq, AS_RQ_DISPATCHED); 1184 if (RQ_IOC(rq) && RQ_IOC(rq)->aic) 1185 atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched); 1186} 1187 1188/* 1189 * as_queue_empty tells us if there are requests left in the device. It may 1190 * not be the case that a driver can get the next request even if the queue 1191 * is not empty - it is used in the block layer to check for plugging and 1192 * merging opportunities 1193 */ 1194static int as_queue_empty(struct request_queue *q) 1195{ 1196 struct as_data *ad = q->elevator->elevator_data; 1197 1198 return list_empty(&ad->fifo_list[REQ_ASYNC]) 1199 && list_empty(&ad->fifo_list[REQ_SYNC]); 1200} 1201 1202static int 1203as_merge(struct request_queue *q, struct request **req, struct bio *bio) 1204{ 1205 struct as_data *ad = q->elevator->elevator_data; 1206 sector_t rb_key = bio->bi_sector + bio_sectors(bio); 1207 struct request *__rq; 1208 1209 /* 1210 * check for front merge 1211 */ 1212 __rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key); 1213 if (__rq && elv_rq_merge_ok(__rq, bio)) { 1214 *req = __rq; 1215 return ELEVATOR_FRONT_MERGE; 1216 } 1217 1218 return ELEVATOR_NO_MERGE; 1219} 1220 1221static void as_merged_request(struct request_queue *q, struct request *req, 1222 int type) 1223{ 1224 struct as_data *ad = q->elevator->elevator_data; 1225 1226 /* 1227 * if the merge was a front merge, we need to reposition request 1228 */ 1229 if (type == ELEVATOR_FRONT_MERGE) { 1230 as_del_rq_rb(ad, req); 1231 as_add_rq_rb(ad, req); 1232 /* 1233 * Note! At this stage of this and the next function, our next 1234 * request may not be optimal - eg the request may have "grown" 1235 * behind the disk head. We currently don't bother adjusting. 1236 */ 1237 } 1238} 1239 1240static void as_merged_requests(struct request_queue *q, struct request *req, 1241 struct request *next) 1242{ 1243 /* 1244 * if next expires before rq, assign its expire time to arq 1245 * and move into next position (next will be deleted) in fifo 1246 */ 1247 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 1248 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { 1249 struct io_context *rioc = RQ_IOC(req); 1250 struct io_context *nioc = RQ_IOC(next); 1251 1252 list_move(&req->queuelist, &next->queuelist); 1253 rq_set_fifo_time(req, rq_fifo_time(next)); 1254 /* 1255 * Don't copy here but swap, because when anext is 1256 * removed below, it must contain the unused context 1257 */ 1258 swap_io_context(&rioc, &nioc); 1259 } 1260 } 1261 1262 /* 1263 * kill knowledge of next, this one is a goner 1264 */ 1265 as_remove_queued_request(q, next); 1266 as_put_io_context(next); 1267 1268 RQ_SET_STATE(next, AS_RQ_MERGED); 1269} 1270 1271/* 1272 * This is executed in a "deferred" process context, by kblockd. It calls the 1273 * driver's request_fn so the driver can submit that request. 1274 * 1275 * IMPORTANT! This guy will reenter the elevator, so set up all queue global 1276 * state before calling, and don't rely on any state over calls. 1277 * 1278 * FIXME! dispatch queue is not a queue at all! 1279 */ 1280static void as_work_handler(struct work_struct *work) 1281{ 1282 struct as_data *ad = container_of(work, struct as_data, antic_work); 1283 struct request_queue *q = ad->q; 1284 unsigned long flags; 1285 1286 spin_lock_irqsave(q->queue_lock, flags); 1287 blk_start_queueing(q); 1288 spin_unlock_irqrestore(q->queue_lock, flags); 1289} 1290 1291static int as_may_queue(struct request_queue *q, int rw) 1292{ 1293 int ret = ELV_MQUEUE_MAY; 1294 struct as_data *ad = q->elevator->elevator_data; 1295 struct io_context *ioc; 1296 if (ad->antic_status == ANTIC_WAIT_REQ || 1297 ad->antic_status == ANTIC_WAIT_NEXT) { 1298 ioc = as_get_io_context(q->node); 1299 if (ad->io_context == ioc) 1300 ret = ELV_MQUEUE_MUST; 1301 put_io_context(ioc); 1302 } 1303 1304 return ret; 1305} 1306 1307static void as_exit_queue(elevator_t *e) 1308{ 1309 struct as_data *ad = e->elevator_data; 1310 1311 del_timer_sync(&ad->antic_timer); 1312 kblockd_flush_work(&ad->antic_work); 1313 1314 BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); 1315 BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); 1316 1317 put_io_context(ad->io_context); 1318 kfree(ad); 1319} 1320 1321/* 1322 * initialize elevator private data (as_data). 1323 */ 1324static void *as_init_queue(struct request_queue *q) 1325{ 1326 struct as_data *ad; 1327 1328 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node); 1329 if (!ad) 1330 return NULL; 1331 1332 ad->q = q; /* Identify what queue the data belongs to */ 1333 1334 /* anticipatory scheduling helpers */ 1335 ad->antic_timer.function = as_antic_timeout; 1336 ad->antic_timer.data = (unsigned long)q; 1337 init_timer(&ad->antic_timer); 1338 INIT_WORK(&ad->antic_work, as_work_handler); 1339 1340 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1341 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1342 ad->sort_list[REQ_SYNC] = RB_ROOT; 1343 ad->sort_list[REQ_ASYNC] = RB_ROOT; 1344 ad->fifo_expire[REQ_SYNC] = default_read_expire; 1345 ad->fifo_expire[REQ_ASYNC] = default_write_expire; 1346 ad->antic_expire = default_antic_expire; 1347 ad->batch_expire[REQ_SYNC] = default_read_batch_expire; 1348 ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; 1349 1350 ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; 1351 ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; 1352 if (ad->write_batch_count < 2) 1353 ad->write_batch_count = 2; 1354 1355 return ad; 1356} 1357 1358/* 1359 * sysfs parts below 1360 */ 1361 1362static ssize_t 1363as_var_show(unsigned int var, char *page) 1364{ 1365 return sprintf(page, "%d\n", var); 1366} 1367 1368static ssize_t 1369as_var_store(unsigned long *var, const char *page, size_t count) 1370{ 1371 char *p = (char *) page; 1372 1373 *var = simple_strtoul(p, &p, 10); 1374 return count; 1375} 1376 1377static ssize_t est_time_show(elevator_t *e, char *page) 1378{ 1379 struct as_data *ad = e->elevator_data; 1380 int pos = 0; 1381 1382 pos += sprintf(page+pos, "%lu %% exit probability\n", 1383 100*ad->exit_prob/256); 1384 pos += sprintf(page+pos, "%lu %% probability of exiting without a " 1385 "cooperating process submitting IO\n", 1386 100*ad->exit_no_coop/256); 1387 pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean); 1388 pos += sprintf(page+pos, "%llu sectors new seek distance\n", 1389 (unsigned long long)ad->new_seek_mean); 1390 1391 return pos; 1392} 1393 1394#define SHOW_FUNCTION(__FUNC, __VAR) \ 1395static ssize_t __FUNC(elevator_t *e, char *page) \ 1396{ \ 1397 struct as_data *ad = e->elevator_data; \ 1398 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ 1399} 1400SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); 1401SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); 1402SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); 1403SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); 1404SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); 1405#undef SHOW_FUNCTION 1406 1407#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ 1408static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ 1409{ \ 1410 struct as_data *ad = e->elevator_data; \ 1411 int ret = as_var_store(__PTR, (page), count); \ 1412 if (*(__PTR) < (MIN)) \ 1413 *(__PTR) = (MIN); \ 1414 else if (*(__PTR) > (MAX)) \ 1415 *(__PTR) = (MAX); \ 1416 *(__PTR) = msecs_to_jiffies(*(__PTR)); \ 1417 return ret; \ 1418} 1419STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); 1420STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); 1421STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); 1422STORE_FUNCTION(as_read_batch_expire_store, 1423 &ad->batch_expire[REQ_SYNC], 0, INT_MAX); 1424STORE_FUNCTION(as_write_batch_expire_store, 1425 &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); 1426#undef STORE_FUNCTION 1427 1428#define AS_ATTR(name) \ 1429 __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store) 1430 1431static struct elv_fs_entry as_attrs[] = { 1432 __ATTR_RO(est_time), 1433 AS_ATTR(read_expire), 1434 AS_ATTR(write_expire), 1435 AS_ATTR(antic_expire), 1436 AS_ATTR(read_batch_expire), 1437 AS_ATTR(write_batch_expire), 1438 __ATTR_NULL 1439}; 1440 1441static struct elevator_type iosched_as = { 1442 .ops = { 1443 .elevator_merge_fn = as_merge, 1444 .elevator_merged_fn = as_merged_request, 1445 .elevator_merge_req_fn = as_merged_requests, 1446 .elevator_dispatch_fn = as_dispatch_request, 1447 .elevator_add_req_fn = as_add_request, 1448 .elevator_activate_req_fn = as_activate_request, 1449 .elevator_deactivate_req_fn = as_deactivate_request, 1450 .elevator_queue_empty_fn = as_queue_empty, 1451 .elevator_completed_req_fn = as_completed_request, 1452 .elevator_former_req_fn = elv_rb_former_request, 1453 .elevator_latter_req_fn = elv_rb_latter_request, 1454 .elevator_may_queue_fn = as_may_queue, 1455 .elevator_init_fn = as_init_queue, 1456 .elevator_exit_fn = as_exit_queue, 1457 .trim = as_trim, 1458 }, 1459 1460 .elevator_attrs = as_attrs, 1461 .elevator_name = "anticipatory", 1462 .elevator_owner = THIS_MODULE, 1463}; 1464 1465static int __init as_init(void) 1466{ 1467 elv_register(&iosched_as); 1468 1469 return 0; 1470} 1471 1472static void __exit as_exit(void) 1473{ 1474 DECLARE_COMPLETION_ONSTACK(all_gone); 1475 elv_unregister(&iosched_as); 1476 ioc_gone = &all_gone; 1477 /* ioc_gone's update must be visible before reading ioc_count */ 1478 smp_wmb(); 1479 if (elv_ioc_count_read(ioc_count)) 1480 wait_for_completion(ioc_gone); 1481 synchronize_rcu(); 1482} 1483 1484module_init(as_init); 1485module_exit(as_exit); 1486 1487MODULE_AUTHOR("Nick Piggin"); 1488MODULE_LICENSE("GPL"); 1489MODULE_DESCRIPTION("anticipatory IO scheduler");