Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30-rc2 1524 lines 40 kB view raw
1/* 2 * Anticipatory & deadline i/o scheduler. 3 * 4 * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk> 5 * Nick Piggin <nickpiggin@yahoo.com.au> 6 * 7 */ 8#include <linux/kernel.h> 9#include <linux/fs.h> 10#include <linux/blkdev.h> 11#include <linux/elevator.h> 12#include <linux/bio.h> 13#include <linux/module.h> 14#include <linux/slab.h> 15#include <linux/init.h> 16#include <linux/compiler.h> 17#include <linux/rbtree.h> 18#include <linux/interrupt.h> 19 20#define REQ_SYNC 1 21#define REQ_ASYNC 0 22 23/* 24 * See Documentation/block/as-iosched.txt 25 */ 26 27/* 28 * max time before a read is submitted. 29 */ 30#define default_read_expire (HZ / 8) 31 32/* 33 * ditto for writes, these limits are not hard, even 34 * if the disk is capable of satisfying them. 35 */ 36#define default_write_expire (HZ / 4) 37 38/* 39 * read_batch_expire describes how long we will allow a stream of reads to 40 * persist before looking to see whether it is time to switch over to writes. 41 */ 42#define default_read_batch_expire (HZ / 2) 43 44/* 45 * write_batch_expire describes how long we want a stream of writes to run for. 46 * This is not a hard limit, but a target we set for the auto-tuning thingy. 47 * See, the problem is: we can send a lot of writes to disk cache / TCQ in 48 * a short amount of time... 49 */ 50#define default_write_batch_expire (HZ / 8) 51 52/* 53 * max time we may wait to anticipate a read (default around 6ms) 54 */ 55#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1) 56 57/* 58 * Keep track of up to 20ms thinktimes. We can go as big as we like here, 59 * however huge values tend to interfere and not decay fast enough. A program 60 * might be in a non-io phase of operation. Waiting on user input for example, 61 * or doing a lengthy computation. A small penalty can be justified there, and 62 * will still catch out those processes that constantly have large thinktimes. 63 */ 64#define MAX_THINKTIME (HZ/50UL) 65 66/* Bits in as_io_context.state */ 67enum as_io_states { 68 AS_TASK_RUNNING=0, /* Process has not exited */ 69 AS_TASK_IOSTARTED, /* Process has started some IO */ 70 AS_TASK_IORUNNING, /* Process has completed some IO */ 71}; 72 73enum anticipation_status { 74 ANTIC_OFF=0, /* Not anticipating (normal operation) */ 75 ANTIC_WAIT_REQ, /* The last read has not yet completed */ 76 ANTIC_WAIT_NEXT, /* Currently anticipating a request vs 77 last read (which has completed) */ 78 ANTIC_FINISHED, /* Anticipating but have found a candidate 79 * or timed out */ 80}; 81 82struct as_data { 83 /* 84 * run time data 85 */ 86 87 struct request_queue *q; /* the "owner" queue */ 88 89 /* 90 * requests (as_rq s) are present on both sort_list and fifo_list 91 */ 92 struct rb_root sort_list[2]; 93 struct list_head fifo_list[2]; 94 95 struct request *next_rq[2]; /* next in sort order */ 96 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 97 98 unsigned long exit_prob; /* probability a task will exit while 99 being waited on */ 100 unsigned long exit_no_coop; /* probablility an exited task will 101 not be part of a later cooperating 102 request */ 103 unsigned long new_ttime_total; /* mean thinktime on new proc */ 104 unsigned long new_ttime_mean; 105 u64 new_seek_total; /* mean seek on new proc */ 106 sector_t new_seek_mean; 107 108 unsigned long current_batch_expires; 109 unsigned long last_check_fifo[2]; 110 int changed_batch; /* 1: waiting for old batch to end */ 111 int new_batch; /* 1: waiting on first read complete */ 112 int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */ 113 int write_batch_count; /* max # of reqs in a write batch */ 114 int current_write_count; /* how many requests left this batch */ 115 int write_batch_idled; /* has the write batch gone idle? */ 116 117 enum anticipation_status antic_status; 118 unsigned long antic_start; /* jiffies: when it started */ 119 struct timer_list antic_timer; /* anticipatory scheduling timer */ 120 struct work_struct antic_work; /* Deferred unplugging */ 121 struct io_context *io_context; /* Identify the expected process */ 122 int ioc_finished; /* IO associated with io_context is finished */ 123 int nr_dispatched; 124 125 /* 126 * settings that change how the i/o scheduler behaves 127 */ 128 unsigned long fifo_expire[2]; 129 unsigned long batch_expire[2]; 130 unsigned long antic_expire; 131}; 132 133/* 134 * per-request data. 135 */ 136enum arq_state { 137 AS_RQ_NEW=0, /* New - not referenced and not on any lists */ 138 AS_RQ_QUEUED, /* In the request queue. It belongs to the 139 scheduler */ 140 AS_RQ_DISPATCHED, /* On the dispatch list. It belongs to the 141 driver now */ 142 AS_RQ_PRESCHED, /* Debug poisoning for requests being used */ 143 AS_RQ_REMOVED, 144 AS_RQ_MERGED, 145 AS_RQ_POSTSCHED, /* when they shouldn't be */ 146}; 147 148#define RQ_IOC(rq) ((struct io_context *) (rq)->elevator_private) 149#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) 150#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) 151 152static DEFINE_PER_CPU(unsigned long, ioc_count); 153static struct completion *ioc_gone; 154static DEFINE_SPINLOCK(ioc_gone_lock); 155 156static void as_move_to_dispatch(struct as_data *ad, struct request *rq); 157static void as_antic_stop(struct as_data *ad); 158 159/* 160 * IO Context helper functions 161 */ 162 163/* Called to deallocate the as_io_context */ 164static void free_as_io_context(struct as_io_context *aic) 165{ 166 kfree(aic); 167 elv_ioc_count_dec(ioc_count); 168 if (ioc_gone) { 169 /* 170 * AS scheduler is exiting, grab exit lock and check 171 * the pending io context count. If it hits zero, 172 * complete ioc_gone and set it back to NULL. 173 */ 174 spin_lock(&ioc_gone_lock); 175 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 176 complete(ioc_gone); 177 ioc_gone = NULL; 178 } 179 spin_unlock(&ioc_gone_lock); 180 } 181} 182 183static void as_trim(struct io_context *ioc) 184{ 185 spin_lock_irq(&ioc->lock); 186 if (ioc->aic) 187 free_as_io_context(ioc->aic); 188 ioc->aic = NULL; 189 spin_unlock_irq(&ioc->lock); 190} 191 192/* Called when the task exits */ 193static void exit_as_io_context(struct as_io_context *aic) 194{ 195 WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state)); 196 clear_bit(AS_TASK_RUNNING, &aic->state); 197} 198 199static struct as_io_context *alloc_as_io_context(void) 200{ 201 struct as_io_context *ret; 202 203 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 204 if (ret) { 205 ret->dtor = free_as_io_context; 206 ret->exit = exit_as_io_context; 207 ret->state = 1 << AS_TASK_RUNNING; 208 atomic_set(&ret->nr_queued, 0); 209 atomic_set(&ret->nr_dispatched, 0); 210 spin_lock_init(&ret->lock); 211 ret->ttime_total = 0; 212 ret->ttime_samples = 0; 213 ret->ttime_mean = 0; 214 ret->seek_total = 0; 215 ret->seek_samples = 0; 216 ret->seek_mean = 0; 217 elv_ioc_count_inc(ioc_count); 218 } 219 220 return ret; 221} 222 223/* 224 * If the current task has no AS IO context then create one and initialise it. 225 * Then take a ref on the task's io context and return it. 226 */ 227static struct io_context *as_get_io_context(int node) 228{ 229 struct io_context *ioc = get_io_context(GFP_ATOMIC, node); 230 if (ioc && !ioc->aic) { 231 ioc->aic = alloc_as_io_context(); 232 if (!ioc->aic) { 233 put_io_context(ioc); 234 ioc = NULL; 235 } 236 } 237 return ioc; 238} 239 240static void as_put_io_context(struct request *rq) 241{ 242 struct as_io_context *aic; 243 244 if (unlikely(!RQ_IOC(rq))) 245 return; 246 247 aic = RQ_IOC(rq)->aic; 248 249 if (rq_is_sync(rq) && aic) { 250 unsigned long flags; 251 252 spin_lock_irqsave(&aic->lock, flags); 253 set_bit(AS_TASK_IORUNNING, &aic->state); 254 aic->last_end_request = jiffies; 255 spin_unlock_irqrestore(&aic->lock, flags); 256 } 257 258 put_io_context(RQ_IOC(rq)); 259} 260 261/* 262 * rb tree support functions 263 */ 264#define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))]) 265 266static void as_add_rq_rb(struct as_data *ad, struct request *rq) 267{ 268 struct request *alias; 269 270 while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) { 271 as_move_to_dispatch(ad, alias); 272 as_antic_stop(ad); 273 } 274} 275 276static inline void as_del_rq_rb(struct as_data *ad, struct request *rq) 277{ 278 elv_rb_del(RQ_RB_ROOT(ad, rq), rq); 279} 280 281/* 282 * IO Scheduler proper 283 */ 284 285#define MAXBACK (1024 * 1024) /* 286 * Maximum distance the disk will go backward 287 * for a request. 288 */ 289 290#define BACK_PENALTY 2 291 292/* 293 * as_choose_req selects the preferred one of two requests of the same data_dir 294 * ignoring time - eg. timeouts, which is the job of as_dispatch_request 295 */ 296static struct request * 297as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2) 298{ 299 int data_dir; 300 sector_t last, s1, s2, d1, d2; 301 int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */ 302 const sector_t maxback = MAXBACK; 303 304 if (rq1 == NULL || rq1 == rq2) 305 return rq2; 306 if (rq2 == NULL) 307 return rq1; 308 309 data_dir = rq_is_sync(rq1); 310 311 last = ad->last_sector[data_dir]; 312 s1 = rq1->sector; 313 s2 = rq2->sector; 314 315 BUG_ON(data_dir != rq_is_sync(rq2)); 316 317 /* 318 * Strict one way elevator _except_ in the case where we allow 319 * short backward seeks which are biased as twice the cost of a 320 * similar forward seek. 321 */ 322 if (s1 >= last) 323 d1 = s1 - last; 324 else if (s1+maxback >= last) 325 d1 = (last - s1)*BACK_PENALTY; 326 else { 327 r1_wrap = 1; 328 d1 = 0; /* shut up, gcc */ 329 } 330 331 if (s2 >= last) 332 d2 = s2 - last; 333 else if (s2+maxback >= last) 334 d2 = (last - s2)*BACK_PENALTY; 335 else { 336 r2_wrap = 1; 337 d2 = 0; 338 } 339 340 /* Found required data */ 341 if (!r1_wrap && r2_wrap) 342 return rq1; 343 else if (!r2_wrap && r1_wrap) 344 return rq2; 345 else if (r1_wrap && r2_wrap) { 346 /* both behind the head */ 347 if (s1 <= s2) 348 return rq1; 349 else 350 return rq2; 351 } 352 353 /* Both requests in front of the head */ 354 if (d1 < d2) 355 return rq1; 356 else if (d2 < d1) 357 return rq2; 358 else { 359 if (s1 >= s2) 360 return rq1; 361 else 362 return rq2; 363 } 364} 365 366/* 367 * as_find_next_rq finds the next request after @prev in elevator order. 368 * this with as_choose_req form the basis for how the scheduler chooses 369 * what request to process next. Anticipation works on top of this. 370 */ 371static struct request * 372as_find_next_rq(struct as_data *ad, struct request *last) 373{ 374 struct rb_node *rbnext = rb_next(&last->rb_node); 375 struct rb_node *rbprev = rb_prev(&last->rb_node); 376 struct request *next = NULL, *prev = NULL; 377 378 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 379 380 if (rbprev) 381 prev = rb_entry_rq(rbprev); 382 383 if (rbnext) 384 next = rb_entry_rq(rbnext); 385 else { 386 const int data_dir = rq_is_sync(last); 387 388 rbnext = rb_first(&ad->sort_list[data_dir]); 389 if (rbnext && rbnext != &last->rb_node) 390 next = rb_entry_rq(rbnext); 391 } 392 393 return as_choose_req(ad, next, prev); 394} 395 396/* 397 * anticipatory scheduling functions follow 398 */ 399 400/* 401 * as_antic_expired tells us when we have anticipated too long. 402 * The funny "absolute difference" math on the elapsed time is to handle 403 * jiffy wraps, and disks which have been idle for 0x80000000 jiffies. 404 */ 405static int as_antic_expired(struct as_data *ad) 406{ 407 long delta_jif; 408 409 delta_jif = jiffies - ad->antic_start; 410 if (unlikely(delta_jif < 0)) 411 delta_jif = -delta_jif; 412 if (delta_jif < ad->antic_expire) 413 return 0; 414 415 return 1; 416} 417 418/* 419 * as_antic_waitnext starts anticipating that a nice request will soon be 420 * submitted. See also as_antic_waitreq 421 */ 422static void as_antic_waitnext(struct as_data *ad) 423{ 424 unsigned long timeout; 425 426 BUG_ON(ad->antic_status != ANTIC_OFF 427 && ad->antic_status != ANTIC_WAIT_REQ); 428 429 timeout = ad->antic_start + ad->antic_expire; 430 431 mod_timer(&ad->antic_timer, timeout); 432 433 ad->antic_status = ANTIC_WAIT_NEXT; 434} 435 436/* 437 * as_antic_waitreq starts anticipating. We don't start timing the anticipation 438 * until the request that we're anticipating on has finished. This means we 439 * are timing from when the candidate process wakes up hopefully. 440 */ 441static void as_antic_waitreq(struct as_data *ad) 442{ 443 BUG_ON(ad->antic_status == ANTIC_FINISHED); 444 if (ad->antic_status == ANTIC_OFF) { 445 if (!ad->io_context || ad->ioc_finished) 446 as_antic_waitnext(ad); 447 else 448 ad->antic_status = ANTIC_WAIT_REQ; 449 } 450} 451 452/* 453 * This is called directly by the functions in this file to stop anticipation. 454 * We kill the timer and schedule a call to the request_fn asap. 455 */ 456static void as_antic_stop(struct as_data *ad) 457{ 458 int status = ad->antic_status; 459 460 if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) { 461 if (status == ANTIC_WAIT_NEXT) 462 del_timer(&ad->antic_timer); 463 ad->antic_status = ANTIC_FINISHED; 464 /* see as_work_handler */ 465 kblockd_schedule_work(ad->q, &ad->antic_work); 466 } 467} 468 469/* 470 * as_antic_timeout is the timer function set by as_antic_waitnext. 471 */ 472static void as_antic_timeout(unsigned long data) 473{ 474 struct request_queue *q = (struct request_queue *)data; 475 struct as_data *ad = q->elevator->elevator_data; 476 unsigned long flags; 477 478 spin_lock_irqsave(q->queue_lock, flags); 479 if (ad->antic_status == ANTIC_WAIT_REQ 480 || ad->antic_status == ANTIC_WAIT_NEXT) { 481 struct as_io_context *aic; 482 spin_lock(&ad->io_context->lock); 483 aic = ad->io_context->aic; 484 485 ad->antic_status = ANTIC_FINISHED; 486 kblockd_schedule_work(q, &ad->antic_work); 487 488 if (aic->ttime_samples == 0) { 489 /* process anticipated on has exited or timed out*/ 490 ad->exit_prob = (7*ad->exit_prob + 256)/8; 491 } 492 if (!test_bit(AS_TASK_RUNNING, &aic->state)) { 493 /* process not "saved" by a cooperating request */ 494 ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8; 495 } 496 spin_unlock(&ad->io_context->lock); 497 } 498 spin_unlock_irqrestore(q->queue_lock, flags); 499} 500 501static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic, 502 unsigned long ttime) 503{ 504 /* fixed point: 1.0 == 1<<8 */ 505 if (aic->ttime_samples == 0) { 506 ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8; 507 ad->new_ttime_mean = ad->new_ttime_total / 256; 508 509 ad->exit_prob = (7*ad->exit_prob)/8; 510 } 511 aic->ttime_samples = (7*aic->ttime_samples + 256) / 8; 512 aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8; 513 aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples; 514} 515 516static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, 517 sector_t sdist) 518{ 519 u64 total; 520 521 if (aic->seek_samples == 0) { 522 ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8; 523 ad->new_seek_mean = ad->new_seek_total / 256; 524 } 525 526 /* 527 * Don't allow the seek distance to get too large from the 528 * odd fragment, pagein, etc 529 */ 530 if (aic->seek_samples <= 60) /* second&third seek */ 531 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024); 532 else 533 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64); 534 535 aic->seek_samples = (7*aic->seek_samples + 256) / 8; 536 aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8; 537 total = aic->seek_total + (aic->seek_samples/2); 538 do_div(total, aic->seek_samples); 539 aic->seek_mean = (sector_t)total; 540} 541 542/* 543 * as_update_iohist keeps a decaying histogram of IO thinktimes, and 544 * updates @aic->ttime_mean based on that. It is called when a new 545 * request is queued. 546 */ 547static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, 548 struct request *rq) 549{ 550 int data_dir = rq_is_sync(rq); 551 unsigned long thinktime = 0; 552 sector_t seek_dist; 553 554 if (aic == NULL) 555 return; 556 557 if (data_dir == REQ_SYNC) { 558 unsigned long in_flight = atomic_read(&aic->nr_queued) 559 + atomic_read(&aic->nr_dispatched); 560 spin_lock(&aic->lock); 561 if (test_bit(AS_TASK_IORUNNING, &aic->state) || 562 test_bit(AS_TASK_IOSTARTED, &aic->state)) { 563 /* Calculate read -> read thinktime */ 564 if (test_bit(AS_TASK_IORUNNING, &aic->state) 565 && in_flight == 0) { 566 thinktime = jiffies - aic->last_end_request; 567 thinktime = min(thinktime, MAX_THINKTIME-1); 568 } 569 as_update_thinktime(ad, aic, thinktime); 570 571 /* Calculate read -> read seek distance */ 572 if (aic->last_request_pos < rq->sector) 573 seek_dist = rq->sector - aic->last_request_pos; 574 else 575 seek_dist = aic->last_request_pos - rq->sector; 576 as_update_seekdist(ad, aic, seek_dist); 577 } 578 aic->last_request_pos = rq->sector + rq->nr_sectors; 579 set_bit(AS_TASK_IOSTARTED, &aic->state); 580 spin_unlock(&aic->lock); 581 } 582} 583 584/* 585 * as_close_req decides if one request is considered "close" to the 586 * previous one issued. 587 */ 588static int as_close_req(struct as_data *ad, struct as_io_context *aic, 589 struct request *rq) 590{ 591 unsigned long delay; /* jiffies */ 592 sector_t last = ad->last_sector[ad->batch_data_dir]; 593 sector_t next = rq->sector; 594 sector_t delta; /* acceptable close offset (in sectors) */ 595 sector_t s; 596 597 if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished) 598 delay = 0; 599 else 600 delay = jiffies - ad->antic_start; 601 602 if (delay == 0) 603 delta = 8192; 604 else if (delay <= (20 * HZ / 1000) && delay <= ad->antic_expire) 605 delta = 8192 << delay; 606 else 607 return 1; 608 609 if ((last <= next + (delta>>1)) && (next <= last + delta)) 610 return 1; 611 612 if (last < next) 613 s = next - last; 614 else 615 s = last - next; 616 617 if (aic->seek_samples == 0) { 618 /* 619 * Process has just started IO. Use past statistics to 620 * gauge success possibility 621 */ 622 if (ad->new_seek_mean > s) { 623 /* this request is better than what we're expecting */ 624 return 1; 625 } 626 627 } else { 628 if (aic->seek_mean > s) { 629 /* this request is better than what we're expecting */ 630 return 1; 631 } 632 } 633 634 return 0; 635} 636 637/* 638 * as_can_break_anticipation returns true if we have been anticipating this 639 * request. 640 * 641 * It also returns true if the process against which we are anticipating 642 * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to 643 * dispatch it ASAP, because we know that application will not be submitting 644 * any new reads. 645 * 646 * If the task which has submitted the request has exited, break anticipation. 647 * 648 * If this task has queued some other IO, do not enter enticipation. 649 */ 650static int as_can_break_anticipation(struct as_data *ad, struct request *rq) 651{ 652 struct io_context *ioc; 653 struct as_io_context *aic; 654 655 ioc = ad->io_context; 656 BUG_ON(!ioc); 657 spin_lock(&ioc->lock); 658 659 if (rq && ioc == RQ_IOC(rq)) { 660 /* request from same process */ 661 spin_unlock(&ioc->lock); 662 return 1; 663 } 664 665 if (ad->ioc_finished && as_antic_expired(ad)) { 666 /* 667 * In this situation status should really be FINISHED, 668 * however the timer hasn't had the chance to run yet. 669 */ 670 spin_unlock(&ioc->lock); 671 return 1; 672 } 673 674 aic = ioc->aic; 675 if (!aic) { 676 spin_unlock(&ioc->lock); 677 return 0; 678 } 679 680 if (atomic_read(&aic->nr_queued) > 0) { 681 /* process has more requests queued */ 682 spin_unlock(&ioc->lock); 683 return 1; 684 } 685 686 if (atomic_read(&aic->nr_dispatched) > 0) { 687 /* process has more requests dispatched */ 688 spin_unlock(&ioc->lock); 689 return 1; 690 } 691 692 if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) { 693 /* 694 * Found a close request that is not one of ours. 695 * 696 * This makes close requests from another process update 697 * our IO history. Is generally useful when there are 698 * two or more cooperating processes working in the same 699 * area. 700 */ 701 if (!test_bit(AS_TASK_RUNNING, &aic->state)) { 702 if (aic->ttime_samples == 0) 703 ad->exit_prob = (7*ad->exit_prob + 256)/8; 704 705 ad->exit_no_coop = (7*ad->exit_no_coop)/8; 706 } 707 708 as_update_iohist(ad, aic, rq); 709 spin_unlock(&ioc->lock); 710 return 1; 711 } 712 713 if (!test_bit(AS_TASK_RUNNING, &aic->state)) { 714 /* process anticipated on has exited */ 715 if (aic->ttime_samples == 0) 716 ad->exit_prob = (7*ad->exit_prob + 256)/8; 717 718 if (ad->exit_no_coop > 128) { 719 spin_unlock(&ioc->lock); 720 return 1; 721 } 722 } 723 724 if (aic->ttime_samples == 0) { 725 if (ad->new_ttime_mean > ad->antic_expire) { 726 spin_unlock(&ioc->lock); 727 return 1; 728 } 729 if (ad->exit_prob * ad->exit_no_coop > 128*256) { 730 spin_unlock(&ioc->lock); 731 return 1; 732 } 733 } else if (aic->ttime_mean > ad->antic_expire) { 734 /* the process thinks too much between requests */ 735 spin_unlock(&ioc->lock); 736 return 1; 737 } 738 spin_unlock(&ioc->lock); 739 return 0; 740} 741 742/* 743 * as_can_anticipate indicates whether we should either run rq 744 * or keep anticipating a better request. 745 */ 746static int as_can_anticipate(struct as_data *ad, struct request *rq) 747{ 748#if 0 /* disable for now, we need to check tag level as well */ 749 /* 750 * SSD device without seek penalty, disable idling 751 */ 752 if (blk_queue_nonrot(ad->q)) axman 753 return 0; 754#endif 755 756 if (!ad->io_context) 757 /* 758 * Last request submitted was a write 759 */ 760 return 0; 761 762 if (ad->antic_status == ANTIC_FINISHED) 763 /* 764 * Don't restart if we have just finished. Run the next request 765 */ 766 return 0; 767 768 if (as_can_break_anticipation(ad, rq)) 769 /* 770 * This request is a good candidate. Don't keep anticipating, 771 * run it. 772 */ 773 return 0; 774 775 /* 776 * OK from here, we haven't finished, and don't have a decent request! 777 * Status is either ANTIC_OFF so start waiting, 778 * ANTIC_WAIT_REQ so continue waiting for request to finish 779 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request. 780 */ 781 782 return 1; 783} 784 785/* 786 * as_update_rq must be called whenever a request (rq) is added to 787 * the sort_list. This function keeps caches up to date, and checks if the 788 * request might be one we are "anticipating" 789 */ 790static void as_update_rq(struct as_data *ad, struct request *rq) 791{ 792 const int data_dir = rq_is_sync(rq); 793 794 /* keep the next_rq cache up to date */ 795 ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]); 796 797 /* 798 * have we been anticipating this request? 799 * or does it come from the same process as the one we are anticipating 800 * for? 801 */ 802 if (ad->antic_status == ANTIC_WAIT_REQ 803 || ad->antic_status == ANTIC_WAIT_NEXT) { 804 if (as_can_break_anticipation(ad, rq)) 805 as_antic_stop(ad); 806 } 807} 808 809/* 810 * Gathers timings and resizes the write batch automatically 811 */ 812static void update_write_batch(struct as_data *ad) 813{ 814 unsigned long batch = ad->batch_expire[REQ_ASYNC]; 815 long write_time; 816 817 write_time = (jiffies - ad->current_batch_expires) + batch; 818 if (write_time < 0) 819 write_time = 0; 820 821 if (write_time > batch && !ad->write_batch_idled) { 822 if (write_time > batch * 3) 823 ad->write_batch_count /= 2; 824 else 825 ad->write_batch_count--; 826 } else if (write_time < batch && ad->current_write_count == 0) { 827 if (batch > write_time * 3) 828 ad->write_batch_count *= 2; 829 else 830 ad->write_batch_count++; 831 } 832 833 if (ad->write_batch_count < 1) 834 ad->write_batch_count = 1; 835} 836 837/* 838 * as_completed_request is to be called when a request has completed and 839 * returned something to the requesting process, be it an error or data. 840 */ 841static void as_completed_request(struct request_queue *q, struct request *rq) 842{ 843 struct as_data *ad = q->elevator->elevator_data; 844 845 WARN_ON(!list_empty(&rq->queuelist)); 846 847 if (RQ_STATE(rq) != AS_RQ_REMOVED) { 848 WARN(1, "rq->state %d\n", RQ_STATE(rq)); 849 goto out; 850 } 851 852 if (ad->changed_batch && ad->nr_dispatched == 1) { 853 ad->current_batch_expires = jiffies + 854 ad->batch_expire[ad->batch_data_dir]; 855 kblockd_schedule_work(q, &ad->antic_work); 856 ad->changed_batch = 0; 857 858 if (ad->batch_data_dir == REQ_SYNC) 859 ad->new_batch = 1; 860 } 861 WARN_ON(ad->nr_dispatched == 0); 862 ad->nr_dispatched--; 863 864 /* 865 * Start counting the batch from when a request of that direction is 866 * actually serviced. This should help devices with big TCQ windows 867 * and writeback caches 868 */ 869 if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) { 870 update_write_batch(ad); 871 ad->current_batch_expires = jiffies + 872 ad->batch_expire[REQ_SYNC]; 873 ad->new_batch = 0; 874 } 875 876 if (ad->io_context == RQ_IOC(rq) && ad->io_context) { 877 ad->antic_start = jiffies; 878 ad->ioc_finished = 1; 879 if (ad->antic_status == ANTIC_WAIT_REQ) { 880 /* 881 * We were waiting on this request, now anticipate 882 * the next one 883 */ 884 as_antic_waitnext(ad); 885 } 886 } 887 888 as_put_io_context(rq); 889out: 890 RQ_SET_STATE(rq, AS_RQ_POSTSCHED); 891} 892 893/* 894 * as_remove_queued_request removes a request from the pre dispatch queue 895 * without updating refcounts. It is expected the caller will drop the 896 * reference unless it replaces the request at somepart of the elevator 897 * (ie. the dispatch queue) 898 */ 899static void as_remove_queued_request(struct request_queue *q, 900 struct request *rq) 901{ 902 const int data_dir = rq_is_sync(rq); 903 struct as_data *ad = q->elevator->elevator_data; 904 struct io_context *ioc; 905 906 WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED); 907 908 ioc = RQ_IOC(rq); 909 if (ioc && ioc->aic) { 910 BUG_ON(!atomic_read(&ioc->aic->nr_queued)); 911 atomic_dec(&ioc->aic->nr_queued); 912 } 913 914 /* 915 * Update the "next_rq" cache if we are about to remove its 916 * entry 917 */ 918 if (ad->next_rq[data_dir] == rq) 919 ad->next_rq[data_dir] = as_find_next_rq(ad, rq); 920 921 rq_fifo_clear(rq); 922 as_del_rq_rb(ad, rq); 923} 924 925/* 926 * as_fifo_expired returns 0 if there are no expired requests on the fifo, 927 * 1 otherwise. It is ratelimited so that we only perform the check once per 928 * `fifo_expire' interval. Otherwise a large number of expired requests 929 * would create a hopeless seekstorm. 930 * 931 * See as_antic_expired comment. 932 */ 933static int as_fifo_expired(struct as_data *ad, int adir) 934{ 935 struct request *rq; 936 long delta_jif; 937 938 delta_jif = jiffies - ad->last_check_fifo[adir]; 939 if (unlikely(delta_jif < 0)) 940 delta_jif = -delta_jif; 941 if (delta_jif < ad->fifo_expire[adir]) 942 return 0; 943 944 ad->last_check_fifo[adir] = jiffies; 945 946 if (list_empty(&ad->fifo_list[adir])) 947 return 0; 948 949 rq = rq_entry_fifo(ad->fifo_list[adir].next); 950 951 return time_after(jiffies, rq_fifo_time(rq)); 952} 953 954/* 955 * as_batch_expired returns true if the current batch has expired. A batch 956 * is a set of reads or a set of writes. 957 */ 958static inline int as_batch_expired(struct as_data *ad) 959{ 960 if (ad->changed_batch || ad->new_batch) 961 return 0; 962 963 if (ad->batch_data_dir == REQ_SYNC) 964 /* TODO! add a check so a complete fifo gets written? */ 965 return time_after(jiffies, ad->current_batch_expires); 966 967 return time_after(jiffies, ad->current_batch_expires) 968 || ad->current_write_count == 0; 969} 970 971/* 972 * move an entry to dispatch queue 973 */ 974static void as_move_to_dispatch(struct as_data *ad, struct request *rq) 975{ 976 const int data_dir = rq_is_sync(rq); 977 978 BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); 979 980 as_antic_stop(ad); 981 ad->antic_status = ANTIC_OFF; 982 983 /* 984 * This has to be set in order to be correctly updated by 985 * as_find_next_rq 986 */ 987 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 988 989 if (data_dir == REQ_SYNC) { 990 struct io_context *ioc = RQ_IOC(rq); 991 /* In case we have to anticipate after this */ 992 copy_io_context(&ad->io_context, &ioc); 993 } else { 994 if (ad->io_context) { 995 put_io_context(ad->io_context); 996 ad->io_context = NULL; 997 } 998 999 if (ad->current_write_count != 0) 1000 ad->current_write_count--; 1001 } 1002 ad->ioc_finished = 0; 1003 1004 ad->next_rq[data_dir] = as_find_next_rq(ad, rq); 1005 1006 /* 1007 * take it off the sort and fifo list, add to dispatch queue 1008 */ 1009 as_remove_queued_request(ad->q, rq); 1010 WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED); 1011 1012 elv_dispatch_sort(ad->q, rq); 1013 1014 RQ_SET_STATE(rq, AS_RQ_DISPATCHED); 1015 if (RQ_IOC(rq) && RQ_IOC(rq)->aic) 1016 atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched); 1017 ad->nr_dispatched++; 1018} 1019 1020/* 1021 * as_dispatch_request selects the best request according to 1022 * read/write expire, batch expire, etc, and moves it to the dispatch 1023 * queue. Returns 1 if a request was found, 0 otherwise. 1024 */ 1025static int as_dispatch_request(struct request_queue *q, int force) 1026{ 1027 struct as_data *ad = q->elevator->elevator_data; 1028 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 1029 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); 1030 struct request *rq; 1031 1032 if (unlikely(force)) { 1033 /* 1034 * Forced dispatch, accounting is useless. Reset 1035 * accounting states and dump fifo_lists. Note that 1036 * batch_data_dir is reset to REQ_SYNC to avoid 1037 * screwing write batch accounting as write batch 1038 * accounting occurs on W->R transition. 1039 */ 1040 int dispatched = 0; 1041 1042 ad->batch_data_dir = REQ_SYNC; 1043 ad->changed_batch = 0; 1044 ad->new_batch = 0; 1045 1046 while (ad->next_rq[REQ_SYNC]) { 1047 as_move_to_dispatch(ad, ad->next_rq[REQ_SYNC]); 1048 dispatched++; 1049 } 1050 ad->last_check_fifo[REQ_SYNC] = jiffies; 1051 1052 while (ad->next_rq[REQ_ASYNC]) { 1053 as_move_to_dispatch(ad, ad->next_rq[REQ_ASYNC]); 1054 dispatched++; 1055 } 1056 ad->last_check_fifo[REQ_ASYNC] = jiffies; 1057 1058 return dispatched; 1059 } 1060 1061 /* Signal that the write batch was uncontended, so we can't time it */ 1062 if (ad->batch_data_dir == REQ_ASYNC && !reads) { 1063 if (ad->current_write_count == 0 || !writes) 1064 ad->write_batch_idled = 1; 1065 } 1066 1067 if (!(reads || writes) 1068 || ad->antic_status == ANTIC_WAIT_REQ 1069 || ad->antic_status == ANTIC_WAIT_NEXT 1070 || ad->changed_batch) 1071 return 0; 1072 1073 if (!(reads && writes && as_batch_expired(ad))) { 1074 /* 1075 * batch is still running or no reads or no writes 1076 */ 1077 rq = ad->next_rq[ad->batch_data_dir]; 1078 1079 if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) { 1080 if (as_fifo_expired(ad, REQ_SYNC)) 1081 goto fifo_expired; 1082 1083 if (as_can_anticipate(ad, rq)) { 1084 as_antic_waitreq(ad); 1085 return 0; 1086 } 1087 } 1088 1089 if (rq) { 1090 /* we have a "next request" */ 1091 if (reads && !writes) 1092 ad->current_batch_expires = 1093 jiffies + ad->batch_expire[REQ_SYNC]; 1094 goto dispatch_request; 1095 } 1096 } 1097 1098 /* 1099 * at this point we are not running a batch. select the appropriate 1100 * data direction (read / write) 1101 */ 1102 1103 if (reads) { 1104 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); 1105 1106 if (writes && ad->batch_data_dir == REQ_SYNC) 1107 /* 1108 * Last batch was a read, switch to writes 1109 */ 1110 goto dispatch_writes; 1111 1112 if (ad->batch_data_dir == REQ_ASYNC) { 1113 WARN_ON(ad->new_batch); 1114 ad->changed_batch = 1; 1115 } 1116 ad->batch_data_dir = REQ_SYNC; 1117 rq = rq_entry_fifo(ad->fifo_list[REQ_SYNC].next); 1118 ad->last_check_fifo[ad->batch_data_dir] = jiffies; 1119 goto dispatch_request; 1120 } 1121 1122 /* 1123 * the last batch was a read 1124 */ 1125 1126 if (writes) { 1127dispatch_writes: 1128 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); 1129 1130 if (ad->batch_data_dir == REQ_SYNC) { 1131 ad->changed_batch = 1; 1132 1133 /* 1134 * new_batch might be 1 when the queue runs out of 1135 * reads. A subsequent submission of a write might 1136 * cause a change of batch before the read is finished. 1137 */ 1138 ad->new_batch = 0; 1139 } 1140 ad->batch_data_dir = REQ_ASYNC; 1141 ad->current_write_count = ad->write_batch_count; 1142 ad->write_batch_idled = 0; 1143 rq = rq_entry_fifo(ad->fifo_list[REQ_ASYNC].next); 1144 ad->last_check_fifo[REQ_ASYNC] = jiffies; 1145 goto dispatch_request; 1146 } 1147 1148 BUG(); 1149 return 0; 1150 1151dispatch_request: 1152 /* 1153 * If a request has expired, service it. 1154 */ 1155 1156 if (as_fifo_expired(ad, ad->batch_data_dir)) { 1157fifo_expired: 1158 rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next); 1159 } 1160 1161 if (ad->changed_batch) { 1162 WARN_ON(ad->new_batch); 1163 1164 if (ad->nr_dispatched) 1165 return 0; 1166 1167 if (ad->batch_data_dir == REQ_ASYNC) 1168 ad->current_batch_expires = jiffies + 1169 ad->batch_expire[REQ_ASYNC]; 1170 else 1171 ad->new_batch = 1; 1172 1173 ad->changed_batch = 0; 1174 } 1175 1176 /* 1177 * rq is the selected appropriate request. 1178 */ 1179 as_move_to_dispatch(ad, rq); 1180 1181 return 1; 1182} 1183 1184/* 1185 * add rq to rbtree and fifo 1186 */ 1187static void as_add_request(struct request_queue *q, struct request *rq) 1188{ 1189 struct as_data *ad = q->elevator->elevator_data; 1190 int data_dir; 1191 1192 RQ_SET_STATE(rq, AS_RQ_NEW); 1193 1194 data_dir = rq_is_sync(rq); 1195 1196 rq->elevator_private = as_get_io_context(q->node); 1197 1198 if (RQ_IOC(rq)) { 1199 as_update_iohist(ad, RQ_IOC(rq)->aic, rq); 1200 atomic_inc(&RQ_IOC(rq)->aic->nr_queued); 1201 } 1202 1203 as_add_rq_rb(ad, rq); 1204 1205 /* 1206 * set expire time and add to fifo list 1207 */ 1208 rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]); 1209 list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]); 1210 1211 as_update_rq(ad, rq); /* keep state machine up to date */ 1212 RQ_SET_STATE(rq, AS_RQ_QUEUED); 1213} 1214 1215static void as_activate_request(struct request_queue *q, struct request *rq) 1216{ 1217 WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); 1218 RQ_SET_STATE(rq, AS_RQ_REMOVED); 1219 if (RQ_IOC(rq) && RQ_IOC(rq)->aic) 1220 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); 1221} 1222 1223static void as_deactivate_request(struct request_queue *q, struct request *rq) 1224{ 1225 WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); 1226 RQ_SET_STATE(rq, AS_RQ_DISPATCHED); 1227 if (RQ_IOC(rq) && RQ_IOC(rq)->aic) 1228 atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched); 1229} 1230 1231/* 1232 * as_queue_empty tells us if there are requests left in the device. It may 1233 * not be the case that a driver can get the next request even if the queue 1234 * is not empty - it is used in the block layer to check for plugging and 1235 * merging opportunities 1236 */ 1237static int as_queue_empty(struct request_queue *q) 1238{ 1239 struct as_data *ad = q->elevator->elevator_data; 1240 1241 return list_empty(&ad->fifo_list[REQ_ASYNC]) 1242 && list_empty(&ad->fifo_list[REQ_SYNC]); 1243} 1244 1245static int 1246as_merge(struct request_queue *q, struct request **req, struct bio *bio) 1247{ 1248 struct as_data *ad = q->elevator->elevator_data; 1249 sector_t rb_key = bio->bi_sector + bio_sectors(bio); 1250 struct request *__rq; 1251 1252 /* 1253 * check for front merge 1254 */ 1255 __rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key); 1256 if (__rq && elv_rq_merge_ok(__rq, bio)) { 1257 *req = __rq; 1258 return ELEVATOR_FRONT_MERGE; 1259 } 1260 1261 return ELEVATOR_NO_MERGE; 1262} 1263 1264static void as_merged_request(struct request_queue *q, struct request *req, 1265 int type) 1266{ 1267 struct as_data *ad = q->elevator->elevator_data; 1268 1269 /* 1270 * if the merge was a front merge, we need to reposition request 1271 */ 1272 if (type == ELEVATOR_FRONT_MERGE) { 1273 as_del_rq_rb(ad, req); 1274 as_add_rq_rb(ad, req); 1275 /* 1276 * Note! At this stage of this and the next function, our next 1277 * request may not be optimal - eg the request may have "grown" 1278 * behind the disk head. We currently don't bother adjusting. 1279 */ 1280 } 1281} 1282 1283static void as_merged_requests(struct request_queue *q, struct request *req, 1284 struct request *next) 1285{ 1286 /* 1287 * if next expires before rq, assign its expire time to arq 1288 * and move into next position (next will be deleted) in fifo 1289 */ 1290 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 1291 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { 1292 list_move(&req->queuelist, &next->queuelist); 1293 rq_set_fifo_time(req, rq_fifo_time(next)); 1294 } 1295 } 1296 1297 /* 1298 * kill knowledge of next, this one is a goner 1299 */ 1300 as_remove_queued_request(q, next); 1301 as_put_io_context(next); 1302 1303 RQ_SET_STATE(next, AS_RQ_MERGED); 1304} 1305 1306/* 1307 * This is executed in a "deferred" process context, by kblockd. It calls the 1308 * driver's request_fn so the driver can submit that request. 1309 * 1310 * IMPORTANT! This guy will reenter the elevator, so set up all queue global 1311 * state before calling, and don't rely on any state over calls. 1312 * 1313 * FIXME! dispatch queue is not a queue at all! 1314 */ 1315static void as_work_handler(struct work_struct *work) 1316{ 1317 struct as_data *ad = container_of(work, struct as_data, antic_work); 1318 struct request_queue *q = ad->q; 1319 unsigned long flags; 1320 1321 spin_lock_irqsave(q->queue_lock, flags); 1322 blk_start_queueing(q); 1323 spin_unlock_irqrestore(q->queue_lock, flags); 1324} 1325 1326static int as_may_queue(struct request_queue *q, int rw) 1327{ 1328 int ret = ELV_MQUEUE_MAY; 1329 struct as_data *ad = q->elevator->elevator_data; 1330 struct io_context *ioc; 1331 if (ad->antic_status == ANTIC_WAIT_REQ || 1332 ad->antic_status == ANTIC_WAIT_NEXT) { 1333 ioc = as_get_io_context(q->node); 1334 if (ad->io_context == ioc) 1335 ret = ELV_MQUEUE_MUST; 1336 put_io_context(ioc); 1337 } 1338 1339 return ret; 1340} 1341 1342static void as_exit_queue(struct elevator_queue *e) 1343{ 1344 struct as_data *ad = e->elevator_data; 1345 1346 del_timer_sync(&ad->antic_timer); 1347 cancel_work_sync(&ad->antic_work); 1348 1349 BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); 1350 BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); 1351 1352 put_io_context(ad->io_context); 1353 kfree(ad); 1354} 1355 1356/* 1357 * initialize elevator private data (as_data). 1358 */ 1359static void *as_init_queue(struct request_queue *q) 1360{ 1361 struct as_data *ad; 1362 1363 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node); 1364 if (!ad) 1365 return NULL; 1366 1367 ad->q = q; /* Identify what queue the data belongs to */ 1368 1369 /* anticipatory scheduling helpers */ 1370 ad->antic_timer.function = as_antic_timeout; 1371 ad->antic_timer.data = (unsigned long)q; 1372 init_timer(&ad->antic_timer); 1373 INIT_WORK(&ad->antic_work, as_work_handler); 1374 1375 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1376 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1377 ad->sort_list[REQ_SYNC] = RB_ROOT; 1378 ad->sort_list[REQ_ASYNC] = RB_ROOT; 1379 ad->fifo_expire[REQ_SYNC] = default_read_expire; 1380 ad->fifo_expire[REQ_ASYNC] = default_write_expire; 1381 ad->antic_expire = default_antic_expire; 1382 ad->batch_expire[REQ_SYNC] = default_read_batch_expire; 1383 ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; 1384 1385 ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; 1386 ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; 1387 if (ad->write_batch_count < 2) 1388 ad->write_batch_count = 2; 1389 1390 return ad; 1391} 1392 1393/* 1394 * sysfs parts below 1395 */ 1396 1397static ssize_t 1398as_var_show(unsigned int var, char *page) 1399{ 1400 return sprintf(page, "%d\n", var); 1401} 1402 1403static ssize_t 1404as_var_store(unsigned long *var, const char *page, size_t count) 1405{ 1406 char *p = (char *) page; 1407 1408 *var = simple_strtoul(p, &p, 10); 1409 return count; 1410} 1411 1412static ssize_t est_time_show(struct elevator_queue *e, char *page) 1413{ 1414 struct as_data *ad = e->elevator_data; 1415 int pos = 0; 1416 1417 pos += sprintf(page+pos, "%lu %% exit probability\n", 1418 100*ad->exit_prob/256); 1419 pos += sprintf(page+pos, "%lu %% probability of exiting without a " 1420 "cooperating process submitting IO\n", 1421 100*ad->exit_no_coop/256); 1422 pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean); 1423 pos += sprintf(page+pos, "%llu sectors new seek distance\n", 1424 (unsigned long long)ad->new_seek_mean); 1425 1426 return pos; 1427} 1428 1429#define SHOW_FUNCTION(__FUNC, __VAR) \ 1430static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 1431{ \ 1432 struct as_data *ad = e->elevator_data; \ 1433 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ 1434} 1435SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); 1436SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); 1437SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); 1438SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); 1439SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); 1440#undef SHOW_FUNCTION 1441 1442#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ 1443static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 1444{ \ 1445 struct as_data *ad = e->elevator_data; \ 1446 int ret = as_var_store(__PTR, (page), count); \ 1447 if (*(__PTR) < (MIN)) \ 1448 *(__PTR) = (MIN); \ 1449 else if (*(__PTR) > (MAX)) \ 1450 *(__PTR) = (MAX); \ 1451 *(__PTR) = msecs_to_jiffies(*(__PTR)); \ 1452 return ret; \ 1453} 1454STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); 1455STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); 1456STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); 1457STORE_FUNCTION(as_read_batch_expire_store, 1458 &ad->batch_expire[REQ_SYNC], 0, INT_MAX); 1459STORE_FUNCTION(as_write_batch_expire_store, 1460 &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); 1461#undef STORE_FUNCTION 1462 1463#define AS_ATTR(name) \ 1464 __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store) 1465 1466static struct elv_fs_entry as_attrs[] = { 1467 __ATTR_RO(est_time), 1468 AS_ATTR(read_expire), 1469 AS_ATTR(write_expire), 1470 AS_ATTR(antic_expire), 1471 AS_ATTR(read_batch_expire), 1472 AS_ATTR(write_batch_expire), 1473 __ATTR_NULL 1474}; 1475 1476static struct elevator_type iosched_as = { 1477 .ops = { 1478 .elevator_merge_fn = as_merge, 1479 .elevator_merged_fn = as_merged_request, 1480 .elevator_merge_req_fn = as_merged_requests, 1481 .elevator_dispatch_fn = as_dispatch_request, 1482 .elevator_add_req_fn = as_add_request, 1483 .elevator_activate_req_fn = as_activate_request, 1484 .elevator_deactivate_req_fn = as_deactivate_request, 1485 .elevator_queue_empty_fn = as_queue_empty, 1486 .elevator_completed_req_fn = as_completed_request, 1487 .elevator_former_req_fn = elv_rb_former_request, 1488 .elevator_latter_req_fn = elv_rb_latter_request, 1489 .elevator_may_queue_fn = as_may_queue, 1490 .elevator_init_fn = as_init_queue, 1491 .elevator_exit_fn = as_exit_queue, 1492 .trim = as_trim, 1493 }, 1494 1495 .elevator_attrs = as_attrs, 1496 .elevator_name = "anticipatory", 1497 .elevator_owner = THIS_MODULE, 1498}; 1499 1500static int __init as_init(void) 1501{ 1502 elv_register(&iosched_as); 1503 1504 return 0; 1505} 1506 1507static void __exit as_exit(void) 1508{ 1509 DECLARE_COMPLETION_ONSTACK(all_gone); 1510 elv_unregister(&iosched_as); 1511 ioc_gone = &all_gone; 1512 /* ioc_gone's update must be visible before reading ioc_count */ 1513 smp_wmb(); 1514 if (elv_ioc_count_read(ioc_count)) 1515 wait_for_completion(&all_gone); 1516 synchronize_rcu(); 1517} 1518 1519module_init(as_init); 1520module_exit(as_exit); 1521 1522MODULE_AUTHOR("Nick Piggin"); 1523MODULE_LICENSE("GPL"); 1524MODULE_DESCRIPTION("anticipatory IO scheduler");