Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.31-rc3 1135 lines 25 kB view raw
1/* 2 * Block device elevator/IO-scheduler. 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * 6 * 30042000 Jens Axboe <axboe@kernel.dk> : 7 * 8 * Split the elevator a bit so that it is possible to choose a different 9 * one or even write a new "plug in". There are three pieces: 10 * - elevator_fn, inserts a new request in the queue list 11 * - elevator_merge_fn, decides whether a new buffer can be merged with 12 * an existing request 13 * - elevator_dequeue_fn, called when a request is taken off the active list 14 * 15 * 20082000 Dave Jones <davej@suse.de> : 16 * Removed tests for max-bomb-segments, which was breaking elvtune 17 * when run without -bN 18 * 19 * Jens: 20 * - Rework again to work with bio instead of buffer_heads 21 * - loose bi_dev comparisons, partition handling is right now 22 * - completely modularize elevator setup and teardown 23 * 24 */ 25#include <linux/kernel.h> 26#include <linux/fs.h> 27#include <linux/blkdev.h> 28#include <linux/elevator.h> 29#include <linux/bio.h> 30#include <linux/module.h> 31#include <linux/slab.h> 32#include <linux/init.h> 33#include <linux/compiler.h> 34#include <linux/delay.h> 35#include <linux/blktrace_api.h> 36#include <linux/hash.h> 37#include <linux/uaccess.h> 38 39#include <trace/events/block.h> 40 41#include "blk.h" 42 43static DEFINE_SPINLOCK(elv_list_lock); 44static LIST_HEAD(elv_list); 45 46/* 47 * Merge hash stuff. 48 */ 49static const int elv_hash_shift = 6; 50#define ELV_HASH_BLOCK(sec) ((sec) >> 3) 51#define ELV_HASH_FN(sec) \ 52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 53#define ELV_HASH_ENTRIES (1 << elv_hash_shift) 54#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) 55 56/* 57 * Query io scheduler to see if the current process issuing bio may be 58 * merged with rq. 59 */ 60static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) 61{ 62 struct request_queue *q = rq->q; 63 struct elevator_queue *e = q->elevator; 64 65 if (e->ops->elevator_allow_merge_fn) 66 return e->ops->elevator_allow_merge_fn(q, rq, bio); 67 68 return 1; 69} 70 71/* 72 * can we safely merge with this request? 73 */ 74int elv_rq_merge_ok(struct request *rq, struct bio *bio) 75{ 76 if (!rq_mergeable(rq)) 77 return 0; 78 79 /* 80 * Don't merge file system requests and discard requests 81 */ 82 if (bio_discard(bio) != bio_discard(rq->bio)) 83 return 0; 84 85 /* 86 * different data direction or already started, don't merge 87 */ 88 if (bio_data_dir(bio) != rq_data_dir(rq)) 89 return 0; 90 91 /* 92 * must be same device and not a special request 93 */ 94 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) 95 return 0; 96 97 /* 98 * only merge integrity protected bio into ditto rq 99 */ 100 if (bio_integrity(bio) != blk_integrity_rq(rq)) 101 return 0; 102 103 /* 104 * Don't merge if failfast settings don't match 105 */ 106 if (bio_failfast_dev(bio) != blk_failfast_dev(rq) || 107 bio_failfast_transport(bio) != blk_failfast_transport(rq) || 108 bio_failfast_driver(bio) != blk_failfast_driver(rq)) 109 return 0; 110 111 if (!elv_iosched_allow_merge(rq, bio)) 112 return 0; 113 114 return 1; 115} 116EXPORT_SYMBOL(elv_rq_merge_ok); 117 118static inline int elv_try_merge(struct request *__rq, struct bio *bio) 119{ 120 int ret = ELEVATOR_NO_MERGE; 121 122 /* 123 * we can merge and sequence is ok, check if it's possible 124 */ 125 if (elv_rq_merge_ok(__rq, bio)) { 126 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector) 127 ret = ELEVATOR_BACK_MERGE; 128 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector) 129 ret = ELEVATOR_FRONT_MERGE; 130 } 131 132 return ret; 133} 134 135static struct elevator_type *elevator_find(const char *name) 136{ 137 struct elevator_type *e; 138 139 list_for_each_entry(e, &elv_list, list) { 140 if (!strcmp(e->elevator_name, name)) 141 return e; 142 } 143 144 return NULL; 145} 146 147static void elevator_put(struct elevator_type *e) 148{ 149 module_put(e->elevator_owner); 150} 151 152static struct elevator_type *elevator_get(const char *name) 153{ 154 struct elevator_type *e; 155 156 spin_lock(&elv_list_lock); 157 158 e = elevator_find(name); 159 if (!e) { 160 char elv[ELV_NAME_MAX + strlen("-iosched")]; 161 162 spin_unlock(&elv_list_lock); 163 164 if (!strcmp(name, "anticipatory")) 165 sprintf(elv, "as-iosched"); 166 else 167 sprintf(elv, "%s-iosched", name); 168 169 request_module("%s", elv); 170 spin_lock(&elv_list_lock); 171 e = elevator_find(name); 172 } 173 174 if (e && !try_module_get(e->elevator_owner)) 175 e = NULL; 176 177 spin_unlock(&elv_list_lock); 178 179 return e; 180} 181 182static void *elevator_init_queue(struct request_queue *q, 183 struct elevator_queue *eq) 184{ 185 return eq->ops->elevator_init_fn(q); 186} 187 188static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, 189 void *data) 190{ 191 q->elevator = eq; 192 eq->elevator_data = data; 193} 194 195static char chosen_elevator[16]; 196 197static int __init elevator_setup(char *str) 198{ 199 /* 200 * Be backwards-compatible with previous kernels, so users 201 * won't get the wrong elevator. 202 */ 203 if (!strcmp(str, "as")) 204 strcpy(chosen_elevator, "anticipatory"); 205 else 206 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); 207 return 1; 208} 209 210__setup("elevator=", elevator_setup); 211 212static struct kobj_type elv_ktype; 213 214static struct elevator_queue *elevator_alloc(struct request_queue *q, 215 struct elevator_type *e) 216{ 217 struct elevator_queue *eq; 218 int i; 219 220 eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); 221 if (unlikely(!eq)) 222 goto err; 223 224 eq->ops = &e->ops; 225 eq->elevator_type = e; 226 kobject_init(&eq->kobj, &elv_ktype); 227 mutex_init(&eq->sysfs_lock); 228 229 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, 230 GFP_KERNEL, q->node); 231 if (!eq->hash) 232 goto err; 233 234 for (i = 0; i < ELV_HASH_ENTRIES; i++) 235 INIT_HLIST_HEAD(&eq->hash[i]); 236 237 return eq; 238err: 239 kfree(eq); 240 elevator_put(e); 241 return NULL; 242} 243 244static void elevator_release(struct kobject *kobj) 245{ 246 struct elevator_queue *e; 247 248 e = container_of(kobj, struct elevator_queue, kobj); 249 elevator_put(e->elevator_type); 250 kfree(e->hash); 251 kfree(e); 252} 253 254int elevator_init(struct request_queue *q, char *name) 255{ 256 struct elevator_type *e = NULL; 257 struct elevator_queue *eq; 258 int ret = 0; 259 void *data; 260 261 INIT_LIST_HEAD(&q->queue_head); 262 q->last_merge = NULL; 263 q->end_sector = 0; 264 q->boundary_rq = NULL; 265 266 if (name) { 267 e = elevator_get(name); 268 if (!e) 269 return -EINVAL; 270 } 271 272 if (!e && *chosen_elevator) { 273 e = elevator_get(chosen_elevator); 274 if (!e) 275 printk(KERN_ERR "I/O scheduler %s not found\n", 276 chosen_elevator); 277 } 278 279 if (!e) { 280 e = elevator_get(CONFIG_DEFAULT_IOSCHED); 281 if (!e) { 282 printk(KERN_ERR 283 "Default I/O scheduler not found. " \ 284 "Using noop.\n"); 285 e = elevator_get("noop"); 286 } 287 } 288 289 eq = elevator_alloc(q, e); 290 if (!eq) 291 return -ENOMEM; 292 293 data = elevator_init_queue(q, eq); 294 if (!data) { 295 kobject_put(&eq->kobj); 296 return -ENOMEM; 297 } 298 299 elevator_attach(q, eq, data); 300 return ret; 301} 302EXPORT_SYMBOL(elevator_init); 303 304void elevator_exit(struct elevator_queue *e) 305{ 306 mutex_lock(&e->sysfs_lock); 307 if (e->ops->elevator_exit_fn) 308 e->ops->elevator_exit_fn(e); 309 e->ops = NULL; 310 mutex_unlock(&e->sysfs_lock); 311 312 kobject_put(&e->kobj); 313} 314EXPORT_SYMBOL(elevator_exit); 315 316static inline void __elv_rqhash_del(struct request *rq) 317{ 318 hlist_del_init(&rq->hash); 319} 320 321static void elv_rqhash_del(struct request_queue *q, struct request *rq) 322{ 323 if (ELV_ON_HASH(rq)) 324 __elv_rqhash_del(rq); 325} 326 327static void elv_rqhash_add(struct request_queue *q, struct request *rq) 328{ 329 struct elevator_queue *e = q->elevator; 330 331 BUG_ON(ELV_ON_HASH(rq)); 332 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); 333} 334 335static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) 336{ 337 __elv_rqhash_del(rq); 338 elv_rqhash_add(q, rq); 339} 340 341static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) 342{ 343 struct elevator_queue *e = q->elevator; 344 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; 345 struct hlist_node *entry, *next; 346 struct request *rq; 347 348 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { 349 BUG_ON(!ELV_ON_HASH(rq)); 350 351 if (unlikely(!rq_mergeable(rq))) { 352 __elv_rqhash_del(rq); 353 continue; 354 } 355 356 if (rq_hash_key(rq) == offset) 357 return rq; 358 } 359 360 return NULL; 361} 362 363/* 364 * RB-tree support functions for inserting/lookup/removal of requests 365 * in a sorted RB tree. 366 */ 367struct request *elv_rb_add(struct rb_root *root, struct request *rq) 368{ 369 struct rb_node **p = &root->rb_node; 370 struct rb_node *parent = NULL; 371 struct request *__rq; 372 373 while (*p) { 374 parent = *p; 375 __rq = rb_entry(parent, struct request, rb_node); 376 377 if (blk_rq_pos(rq) < blk_rq_pos(__rq)) 378 p = &(*p)->rb_left; 379 else if (blk_rq_pos(rq) > blk_rq_pos(__rq)) 380 p = &(*p)->rb_right; 381 else 382 return __rq; 383 } 384 385 rb_link_node(&rq->rb_node, parent, p); 386 rb_insert_color(&rq->rb_node, root); 387 return NULL; 388} 389EXPORT_SYMBOL(elv_rb_add); 390 391void elv_rb_del(struct rb_root *root, struct request *rq) 392{ 393 BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); 394 rb_erase(&rq->rb_node, root); 395 RB_CLEAR_NODE(&rq->rb_node); 396} 397EXPORT_SYMBOL(elv_rb_del); 398 399struct request *elv_rb_find(struct rb_root *root, sector_t sector) 400{ 401 struct rb_node *n = root->rb_node; 402 struct request *rq; 403 404 while (n) { 405 rq = rb_entry(n, struct request, rb_node); 406 407 if (sector < blk_rq_pos(rq)) 408 n = n->rb_left; 409 else if (sector > blk_rq_pos(rq)) 410 n = n->rb_right; 411 else 412 return rq; 413 } 414 415 return NULL; 416} 417EXPORT_SYMBOL(elv_rb_find); 418 419/* 420 * Insert rq into dispatch queue of q. Queue lock must be held on 421 * entry. rq is sort instead into the dispatch queue. To be used by 422 * specific elevators. 423 */ 424void elv_dispatch_sort(struct request_queue *q, struct request *rq) 425{ 426 sector_t boundary; 427 struct list_head *entry; 428 int stop_flags; 429 430 if (q->last_merge == rq) 431 q->last_merge = NULL; 432 433 elv_rqhash_del(q, rq); 434 435 q->nr_sorted--; 436 437 boundary = q->end_sector; 438 stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; 439 list_for_each_prev(entry, &q->queue_head) { 440 struct request *pos = list_entry_rq(entry); 441 442 if (blk_discard_rq(rq) != blk_discard_rq(pos)) 443 break; 444 if (rq_data_dir(rq) != rq_data_dir(pos)) 445 break; 446 if (pos->cmd_flags & stop_flags) 447 break; 448 if (blk_rq_pos(rq) >= boundary) { 449 if (blk_rq_pos(pos) < boundary) 450 continue; 451 } else { 452 if (blk_rq_pos(pos) >= boundary) 453 break; 454 } 455 if (blk_rq_pos(rq) >= blk_rq_pos(pos)) 456 break; 457 } 458 459 list_add(&rq->queuelist, entry); 460} 461EXPORT_SYMBOL(elv_dispatch_sort); 462 463/* 464 * Insert rq into dispatch queue of q. Queue lock must be held on 465 * entry. rq is added to the back of the dispatch queue. To be used by 466 * specific elevators. 467 */ 468void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) 469{ 470 if (q->last_merge == rq) 471 q->last_merge = NULL; 472 473 elv_rqhash_del(q, rq); 474 475 q->nr_sorted--; 476 477 q->end_sector = rq_end_sector(rq); 478 q->boundary_rq = rq; 479 list_add_tail(&rq->queuelist, &q->queue_head); 480} 481EXPORT_SYMBOL(elv_dispatch_add_tail); 482 483int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) 484{ 485 struct elevator_queue *e = q->elevator; 486 struct request *__rq; 487 int ret; 488 489 /* 490 * First try one-hit cache. 491 */ 492 if (q->last_merge) { 493 ret = elv_try_merge(q->last_merge, bio); 494 if (ret != ELEVATOR_NO_MERGE) { 495 *req = q->last_merge; 496 return ret; 497 } 498 } 499 500 if (blk_queue_nomerges(q)) 501 return ELEVATOR_NO_MERGE; 502 503 /* 504 * See if our hash lookup can find a potential backmerge. 505 */ 506 __rq = elv_rqhash_find(q, bio->bi_sector); 507 if (__rq && elv_rq_merge_ok(__rq, bio)) { 508 *req = __rq; 509 return ELEVATOR_BACK_MERGE; 510 } 511 512 if (e->ops->elevator_merge_fn) 513 return e->ops->elevator_merge_fn(q, req, bio); 514 515 return ELEVATOR_NO_MERGE; 516} 517 518void elv_merged_request(struct request_queue *q, struct request *rq, int type) 519{ 520 struct elevator_queue *e = q->elevator; 521 522 if (e->ops->elevator_merged_fn) 523 e->ops->elevator_merged_fn(q, rq, type); 524 525 if (type == ELEVATOR_BACK_MERGE) 526 elv_rqhash_reposition(q, rq); 527 528 q->last_merge = rq; 529} 530 531void elv_merge_requests(struct request_queue *q, struct request *rq, 532 struct request *next) 533{ 534 struct elevator_queue *e = q->elevator; 535 536 if (e->ops->elevator_merge_req_fn) 537 e->ops->elevator_merge_req_fn(q, rq, next); 538 539 elv_rqhash_reposition(q, rq); 540 elv_rqhash_del(q, next); 541 542 q->nr_sorted--; 543 q->last_merge = rq; 544} 545 546void elv_requeue_request(struct request_queue *q, struct request *rq) 547{ 548 /* 549 * it already went through dequeue, we need to decrement the 550 * in_flight count again 551 */ 552 if (blk_account_rq(rq)) { 553 q->in_flight[rq_is_sync(rq)]--; 554 if (blk_sorted_rq(rq)) 555 elv_deactivate_rq(q, rq); 556 } 557 558 rq->cmd_flags &= ~REQ_STARTED; 559 560 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 561} 562 563void elv_drain_elevator(struct request_queue *q) 564{ 565 static int printed; 566 while (q->elevator->ops->elevator_dispatch_fn(q, 1)) 567 ; 568 if (q->nr_sorted == 0) 569 return; 570 if (printed++ < 10) { 571 printk(KERN_ERR "%s: forced dispatching is broken " 572 "(nr_sorted=%u), please report this\n", 573 q->elevator->elevator_type->elevator_name, q->nr_sorted); 574 } 575} 576 577/* 578 * Call with queue lock held, interrupts disabled 579 */ 580void elv_quiesce_start(struct request_queue *q) 581{ 582 if (!q->elevator) 583 return; 584 585 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); 586 587 /* 588 * make sure we don't have any requests in flight 589 */ 590 elv_drain_elevator(q); 591 while (q->rq.elvpriv) { 592 __blk_run_queue(q); 593 spin_unlock_irq(q->queue_lock); 594 msleep(10); 595 spin_lock_irq(q->queue_lock); 596 elv_drain_elevator(q); 597 } 598} 599 600void elv_quiesce_end(struct request_queue *q) 601{ 602 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 603} 604 605void elv_insert(struct request_queue *q, struct request *rq, int where) 606{ 607 struct list_head *pos; 608 unsigned ordseq; 609 int unplug_it = 1; 610 611 trace_block_rq_insert(q, rq); 612 613 rq->q = q; 614 615 switch (where) { 616 case ELEVATOR_INSERT_FRONT: 617 rq->cmd_flags |= REQ_SOFTBARRIER; 618 619 list_add(&rq->queuelist, &q->queue_head); 620 break; 621 622 case ELEVATOR_INSERT_BACK: 623 rq->cmd_flags |= REQ_SOFTBARRIER; 624 elv_drain_elevator(q); 625 list_add_tail(&rq->queuelist, &q->queue_head); 626 /* 627 * We kick the queue here for the following reasons. 628 * - The elevator might have returned NULL previously 629 * to delay requests and returned them now. As the 630 * queue wasn't empty before this request, ll_rw_blk 631 * won't run the queue on return, resulting in hang. 632 * - Usually, back inserted requests won't be merged 633 * with anything. There's no point in delaying queue 634 * processing. 635 */ 636 __blk_run_queue(q); 637 break; 638 639 case ELEVATOR_INSERT_SORT: 640 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq)); 641 rq->cmd_flags |= REQ_SORTED; 642 q->nr_sorted++; 643 if (rq_mergeable(rq)) { 644 elv_rqhash_add(q, rq); 645 if (!q->last_merge) 646 q->last_merge = rq; 647 } 648 649 /* 650 * Some ioscheds (cfq) run q->request_fn directly, so 651 * rq cannot be accessed after calling 652 * elevator_add_req_fn. 653 */ 654 q->elevator->ops->elevator_add_req_fn(q, rq); 655 break; 656 657 case ELEVATOR_INSERT_REQUEUE: 658 /* 659 * If ordered flush isn't in progress, we do front 660 * insertion; otherwise, requests should be requeued 661 * in ordseq order. 662 */ 663 rq->cmd_flags |= REQ_SOFTBARRIER; 664 665 /* 666 * Most requeues happen because of a busy condition, 667 * don't force unplug of the queue for that case. 668 */ 669 unplug_it = 0; 670 671 if (q->ordseq == 0) { 672 list_add(&rq->queuelist, &q->queue_head); 673 break; 674 } 675 676 ordseq = blk_ordered_req_seq(rq); 677 678 list_for_each(pos, &q->queue_head) { 679 struct request *pos_rq = list_entry_rq(pos); 680 if (ordseq <= blk_ordered_req_seq(pos_rq)) 681 break; 682 } 683 684 list_add_tail(&rq->queuelist, pos); 685 break; 686 687 default: 688 printk(KERN_ERR "%s: bad insertion point %d\n", 689 __func__, where); 690 BUG(); 691 } 692 693 if (unplug_it && blk_queue_plugged(q)) { 694 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] 695 - queue_in_flight(q); 696 697 if (nrq >= q->unplug_thresh) 698 __generic_unplug_device(q); 699 } 700} 701 702void __elv_add_request(struct request_queue *q, struct request *rq, int where, 703 int plug) 704{ 705 if (q->ordcolor) 706 rq->cmd_flags |= REQ_ORDERED_COLOR; 707 708 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 709 /* 710 * toggle ordered color 711 */ 712 if (blk_barrier_rq(rq)) 713 q->ordcolor ^= 1; 714 715 /* 716 * barriers implicitly indicate back insertion 717 */ 718 if (where == ELEVATOR_INSERT_SORT) 719 where = ELEVATOR_INSERT_BACK; 720 721 /* 722 * this request is scheduling boundary, update 723 * end_sector 724 */ 725 if (blk_fs_request(rq) || blk_discard_rq(rq)) { 726 q->end_sector = rq_end_sector(rq); 727 q->boundary_rq = rq; 728 } 729 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 730 where == ELEVATOR_INSERT_SORT) 731 where = ELEVATOR_INSERT_BACK; 732 733 if (plug) 734 blk_plug_device(q); 735 736 elv_insert(q, rq, where); 737} 738EXPORT_SYMBOL(__elv_add_request); 739 740void elv_add_request(struct request_queue *q, struct request *rq, int where, 741 int plug) 742{ 743 unsigned long flags; 744 745 spin_lock_irqsave(q->queue_lock, flags); 746 __elv_add_request(q, rq, where, plug); 747 spin_unlock_irqrestore(q->queue_lock, flags); 748} 749EXPORT_SYMBOL(elv_add_request); 750 751int elv_queue_empty(struct request_queue *q) 752{ 753 struct elevator_queue *e = q->elevator; 754 755 if (!list_empty(&q->queue_head)) 756 return 0; 757 758 if (e->ops->elevator_queue_empty_fn) 759 return e->ops->elevator_queue_empty_fn(q); 760 761 return 1; 762} 763EXPORT_SYMBOL(elv_queue_empty); 764 765struct request *elv_latter_request(struct request_queue *q, struct request *rq) 766{ 767 struct elevator_queue *e = q->elevator; 768 769 if (e->ops->elevator_latter_req_fn) 770 return e->ops->elevator_latter_req_fn(q, rq); 771 return NULL; 772} 773 774struct request *elv_former_request(struct request_queue *q, struct request *rq) 775{ 776 struct elevator_queue *e = q->elevator; 777 778 if (e->ops->elevator_former_req_fn) 779 return e->ops->elevator_former_req_fn(q, rq); 780 return NULL; 781} 782 783int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) 784{ 785 struct elevator_queue *e = q->elevator; 786 787 if (e->ops->elevator_set_req_fn) 788 return e->ops->elevator_set_req_fn(q, rq, gfp_mask); 789 790 rq->elevator_private = NULL; 791 return 0; 792} 793 794void elv_put_request(struct request_queue *q, struct request *rq) 795{ 796 struct elevator_queue *e = q->elevator; 797 798 if (e->ops->elevator_put_req_fn) 799 e->ops->elevator_put_req_fn(rq); 800} 801 802int elv_may_queue(struct request_queue *q, int rw) 803{ 804 struct elevator_queue *e = q->elevator; 805 806 if (e->ops->elevator_may_queue_fn) 807 return e->ops->elevator_may_queue_fn(q, rw); 808 809 return ELV_MQUEUE_MAY; 810} 811 812void elv_abort_queue(struct request_queue *q) 813{ 814 struct request *rq; 815 816 while (!list_empty(&q->queue_head)) { 817 rq = list_entry_rq(q->queue_head.next); 818 rq->cmd_flags |= REQ_QUIET; 819 trace_block_rq_abort(q, rq); 820 /* 821 * Mark this request as started so we don't trigger 822 * any debug logic in the end I/O path. 823 */ 824 blk_start_request(rq); 825 __blk_end_request_all(rq, -EIO); 826 } 827} 828EXPORT_SYMBOL(elv_abort_queue); 829 830void elv_completed_request(struct request_queue *q, struct request *rq) 831{ 832 struct elevator_queue *e = q->elevator; 833 834 /* 835 * request is released from the driver, io must be done 836 */ 837 if (blk_account_rq(rq)) { 838 q->in_flight[rq_is_sync(rq)]--; 839 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 840 e->ops->elevator_completed_req_fn(q, rq); 841 } 842 843 /* 844 * Check if the queue is waiting for fs requests to be 845 * drained for flush sequence. 846 */ 847 if (unlikely(q->ordseq)) { 848 struct request *next = NULL; 849 850 if (!list_empty(&q->queue_head)) 851 next = list_entry_rq(q->queue_head.next); 852 853 if (!queue_in_flight(q) && 854 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 855 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { 856 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 857 __blk_run_queue(q); 858 } 859 } 860} 861 862#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) 863 864static ssize_t 865elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 866{ 867 struct elv_fs_entry *entry = to_elv(attr); 868 struct elevator_queue *e; 869 ssize_t error; 870 871 if (!entry->show) 872 return -EIO; 873 874 e = container_of(kobj, struct elevator_queue, kobj); 875 mutex_lock(&e->sysfs_lock); 876 error = e->ops ? entry->show(e, page) : -ENOENT; 877 mutex_unlock(&e->sysfs_lock); 878 return error; 879} 880 881static ssize_t 882elv_attr_store(struct kobject *kobj, struct attribute *attr, 883 const char *page, size_t length) 884{ 885 struct elv_fs_entry *entry = to_elv(attr); 886 struct elevator_queue *e; 887 ssize_t error; 888 889 if (!entry->store) 890 return -EIO; 891 892 e = container_of(kobj, struct elevator_queue, kobj); 893 mutex_lock(&e->sysfs_lock); 894 error = e->ops ? entry->store(e, page, length) : -ENOENT; 895 mutex_unlock(&e->sysfs_lock); 896 return error; 897} 898 899static struct sysfs_ops elv_sysfs_ops = { 900 .show = elv_attr_show, 901 .store = elv_attr_store, 902}; 903 904static struct kobj_type elv_ktype = { 905 .sysfs_ops = &elv_sysfs_ops, 906 .release = elevator_release, 907}; 908 909int elv_register_queue(struct request_queue *q) 910{ 911 struct elevator_queue *e = q->elevator; 912 int error; 913 914 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); 915 if (!error) { 916 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; 917 if (attr) { 918 while (attr->attr.name) { 919 if (sysfs_create_file(&e->kobj, &attr->attr)) 920 break; 921 attr++; 922 } 923 } 924 kobject_uevent(&e->kobj, KOBJ_ADD); 925 } 926 return error; 927} 928 929static void __elv_unregister_queue(struct elevator_queue *e) 930{ 931 kobject_uevent(&e->kobj, KOBJ_REMOVE); 932 kobject_del(&e->kobj); 933} 934 935void elv_unregister_queue(struct request_queue *q) 936{ 937 if (q) 938 __elv_unregister_queue(q->elevator); 939} 940 941void elv_register(struct elevator_type *e) 942{ 943 char *def = ""; 944 945 spin_lock(&elv_list_lock); 946 BUG_ON(elevator_find(e->elevator_name)); 947 list_add_tail(&e->list, &elv_list); 948 spin_unlock(&elv_list_lock); 949 950 if (!strcmp(e->elevator_name, chosen_elevator) || 951 (!*chosen_elevator && 952 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) 953 def = " (default)"; 954 955 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, 956 def); 957} 958EXPORT_SYMBOL_GPL(elv_register); 959 960void elv_unregister(struct elevator_type *e) 961{ 962 struct task_struct *g, *p; 963 964 /* 965 * Iterate every thread in the process to remove the io contexts. 966 */ 967 if (e->ops.trim) { 968 read_lock(&tasklist_lock); 969 do_each_thread(g, p) { 970 task_lock(p); 971 if (p->io_context) 972 e->ops.trim(p->io_context); 973 task_unlock(p); 974 } while_each_thread(g, p); 975 read_unlock(&tasklist_lock); 976 } 977 978 spin_lock(&elv_list_lock); 979 list_del_init(&e->list); 980 spin_unlock(&elv_list_lock); 981} 982EXPORT_SYMBOL_GPL(elv_unregister); 983 984/* 985 * switch to new_e io scheduler. be careful not to introduce deadlocks - 986 * we don't free the old io scheduler, before we have allocated what we 987 * need for the new one. this way we have a chance of going back to the old 988 * one, if the new one fails init for some reason. 989 */ 990static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) 991{ 992 struct elevator_queue *old_elevator, *e; 993 void *data; 994 995 /* 996 * Allocate new elevator 997 */ 998 e = elevator_alloc(q, new_e); 999 if (!e) 1000 return 0; 1001 1002 data = elevator_init_queue(q, e); 1003 if (!data) { 1004 kobject_put(&e->kobj); 1005 return 0; 1006 } 1007 1008 /* 1009 * Turn on BYPASS and drain all requests w/ elevator private data 1010 */ 1011 spin_lock_irq(q->queue_lock); 1012 elv_quiesce_start(q); 1013 1014 /* 1015 * Remember old elevator. 1016 */ 1017 old_elevator = q->elevator; 1018 1019 /* 1020 * attach and start new elevator 1021 */ 1022 elevator_attach(q, e, data); 1023 1024 spin_unlock_irq(q->queue_lock); 1025 1026 __elv_unregister_queue(old_elevator); 1027 1028 if (elv_register_queue(q)) 1029 goto fail_register; 1030 1031 /* 1032 * finally exit old elevator and turn off BYPASS. 1033 */ 1034 elevator_exit(old_elevator); 1035 spin_lock_irq(q->queue_lock); 1036 elv_quiesce_end(q); 1037 spin_unlock_irq(q->queue_lock); 1038 1039 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); 1040 1041 return 1; 1042 1043fail_register: 1044 /* 1045 * switch failed, exit the new io scheduler and reattach the old 1046 * one again (along with re-adding the sysfs dir) 1047 */ 1048 elevator_exit(e); 1049 q->elevator = old_elevator; 1050 elv_register_queue(q); 1051 1052 spin_lock_irq(q->queue_lock); 1053 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 1054 spin_unlock_irq(q->queue_lock); 1055 1056 return 0; 1057} 1058 1059ssize_t elv_iosched_store(struct request_queue *q, const char *name, 1060 size_t count) 1061{ 1062 char elevator_name[ELV_NAME_MAX]; 1063 struct elevator_type *e; 1064 1065 if (!q->elevator) 1066 return count; 1067 1068 strlcpy(elevator_name, name, sizeof(elevator_name)); 1069 strstrip(elevator_name); 1070 1071 e = elevator_get(elevator_name); 1072 if (!e) { 1073 printk(KERN_ERR "elevator: type %s not found\n", elevator_name); 1074 return -EINVAL; 1075 } 1076 1077 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { 1078 elevator_put(e); 1079 return count; 1080 } 1081 1082 if (!elevator_switch(q, e)) 1083 printk(KERN_ERR "elevator: switch to %s failed\n", 1084 elevator_name); 1085 return count; 1086} 1087 1088ssize_t elv_iosched_show(struct request_queue *q, char *name) 1089{ 1090 struct elevator_queue *e = q->elevator; 1091 struct elevator_type *elv; 1092 struct elevator_type *__e; 1093 int len = 0; 1094 1095 if (!q->elevator) 1096 return sprintf(name, "none\n"); 1097 1098 elv = e->elevator_type; 1099 1100 spin_lock(&elv_list_lock); 1101 list_for_each_entry(__e, &elv_list, list) { 1102 if (!strcmp(elv->elevator_name, __e->elevator_name)) 1103 len += sprintf(name+len, "[%s] ", elv->elevator_name); 1104 else 1105 len += sprintf(name+len, "%s ", __e->elevator_name); 1106 } 1107 spin_unlock(&elv_list_lock); 1108 1109 len += sprintf(len+name, "\n"); 1110 return len; 1111} 1112 1113struct request *elv_rb_former_request(struct request_queue *q, 1114 struct request *rq) 1115{ 1116 struct rb_node *rbprev = rb_prev(&rq->rb_node); 1117 1118 if (rbprev) 1119 return rb_entry_rq(rbprev); 1120 1121 return NULL; 1122} 1123EXPORT_SYMBOL(elv_rb_former_request); 1124 1125struct request *elv_rb_latter_request(struct request_queue *q, 1126 struct request *rq) 1127{ 1128 struct rb_node *rbnext = rb_next(&rq->rb_node); 1129 1130 if (rbnext) 1131 return rb_entry_rq(rbnext); 1132 1133 return NULL; 1134} 1135EXPORT_SYMBOL(elv_rb_latter_request);