Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.39-rc4 2819 lines 76 kB view raw
1/* 2 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 11/* 12 * This handles all read/write requests to block devices 13 */ 14#include <linux/kernel.h> 15#include <linux/module.h> 16#include <linux/backing-dev.h> 17#include <linux/bio.h> 18#include <linux/blkdev.h> 19#include <linux/highmem.h> 20#include <linux/mm.h> 21#include <linux/kernel_stat.h> 22#include <linux/string.h> 23#include <linux/init.h> 24#include <linux/completion.h> 25#include <linux/slab.h> 26#include <linux/swap.h> 27#include <linux/writeback.h> 28#include <linux/task_io_accounting_ops.h> 29#include <linux/fault-inject.h> 30#include <linux/list_sort.h> 31 32#define CREATE_TRACE_POINTS 33#include <trace/events/block.h> 34 35#include "blk.h" 36 37EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 38EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 39EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 40 41static int __make_request(struct request_queue *q, struct bio *bio); 42 43/* 44 * For the allocated request tables 45 */ 46static struct kmem_cache *request_cachep; 47 48/* 49 * For queue allocation 50 */ 51struct kmem_cache *blk_requestq_cachep; 52 53/* 54 * Controlling structure to kblockd 55 */ 56static struct workqueue_struct *kblockd_workqueue; 57 58static void drive_stat_acct(struct request *rq, int new_io) 59{ 60 struct hd_struct *part; 61 int rw = rq_data_dir(rq); 62 int cpu; 63 64 if (!blk_do_io_stat(rq)) 65 return; 66 67 cpu = part_stat_lock(); 68 69 if (!new_io) { 70 part = rq->part; 71 part_stat_inc(cpu, part, merges[rw]); 72 } else { 73 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 74 if (!hd_struct_try_get(part)) { 75 /* 76 * The partition is already being removed, 77 * the request will be accounted on the disk only 78 * 79 * We take a reference on disk->part0 although that 80 * partition will never be deleted, so we can treat 81 * it as any other partition. 82 */ 83 part = &rq->rq_disk->part0; 84 hd_struct_get(part); 85 } 86 part_round_stats(cpu, part); 87 part_inc_in_flight(part, rw); 88 rq->part = part; 89 } 90 91 part_stat_unlock(); 92} 93 94void blk_queue_congestion_threshold(struct request_queue *q) 95{ 96 int nr; 97 98 nr = q->nr_requests - (q->nr_requests / 8) + 1; 99 if (nr > q->nr_requests) 100 nr = q->nr_requests; 101 q->nr_congestion_on = nr; 102 103 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; 104 if (nr < 1) 105 nr = 1; 106 q->nr_congestion_off = nr; 107} 108 109/** 110 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info 111 * @bdev: device 112 * 113 * Locates the passed device's request queue and returns the address of its 114 * backing_dev_info 115 * 116 * Will return NULL if the request queue cannot be located. 117 */ 118struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 119{ 120 struct backing_dev_info *ret = NULL; 121 struct request_queue *q = bdev_get_queue(bdev); 122 123 if (q) 124 ret = &q->backing_dev_info; 125 return ret; 126} 127EXPORT_SYMBOL(blk_get_backing_dev_info); 128 129void blk_rq_init(struct request_queue *q, struct request *rq) 130{ 131 memset(rq, 0, sizeof(*rq)); 132 133 INIT_LIST_HEAD(&rq->queuelist); 134 INIT_LIST_HEAD(&rq->timeout_list); 135 rq->cpu = -1; 136 rq->q = q; 137 rq->__sector = (sector_t) -1; 138 INIT_HLIST_NODE(&rq->hash); 139 RB_CLEAR_NODE(&rq->rb_node); 140 rq->cmd = rq->__cmd; 141 rq->cmd_len = BLK_MAX_CDB; 142 rq->tag = -1; 143 rq->ref_count = 1; 144 rq->start_time = jiffies; 145 set_start_time_ns(rq); 146 rq->part = NULL; 147} 148EXPORT_SYMBOL(blk_rq_init); 149 150static void req_bio_endio(struct request *rq, struct bio *bio, 151 unsigned int nbytes, int error) 152{ 153 if (error) 154 clear_bit(BIO_UPTODATE, &bio->bi_flags); 155 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 156 error = -EIO; 157 158 if (unlikely(nbytes > bio->bi_size)) { 159 printk(KERN_ERR "%s: want %u bytes done, %u left\n", 160 __func__, nbytes, bio->bi_size); 161 nbytes = bio->bi_size; 162 } 163 164 if (unlikely(rq->cmd_flags & REQ_QUIET)) 165 set_bit(BIO_QUIET, &bio->bi_flags); 166 167 bio->bi_size -= nbytes; 168 bio->bi_sector += (nbytes >> 9); 169 170 if (bio_integrity(bio)) 171 bio_integrity_advance(bio, nbytes); 172 173 /* don't actually finish bio if it's part of flush sequence */ 174 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 175 bio_endio(bio, error); 176} 177 178void blk_dump_rq_flags(struct request *rq, char *msg) 179{ 180 int bit; 181 182 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 183 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 184 rq->cmd_flags); 185 186 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 187 (unsigned long long)blk_rq_pos(rq), 188 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 189 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 190 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 191 192 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 193 printk(KERN_INFO " cdb: "); 194 for (bit = 0; bit < BLK_MAX_CDB; bit++) 195 printk("%02x ", rq->cmd[bit]); 196 printk("\n"); 197 } 198} 199EXPORT_SYMBOL(blk_dump_rq_flags); 200 201static void blk_delay_work(struct work_struct *work) 202{ 203 struct request_queue *q; 204 205 q = container_of(work, struct request_queue, delay_work.work); 206 spin_lock_irq(q->queue_lock); 207 __blk_run_queue(q); 208 spin_unlock_irq(q->queue_lock); 209} 210 211/** 212 * blk_delay_queue - restart queueing after defined interval 213 * @q: The &struct request_queue in question 214 * @msecs: Delay in msecs 215 * 216 * Description: 217 * Sometimes queueing needs to be postponed for a little while, to allow 218 * resources to come back. This function will make sure that queueing is 219 * restarted around the specified time. 220 */ 221void blk_delay_queue(struct request_queue *q, unsigned long msecs) 222{ 223 queue_delayed_work(kblockd_workqueue, &q->delay_work, 224 msecs_to_jiffies(msecs)); 225} 226EXPORT_SYMBOL(blk_delay_queue); 227 228/** 229 * blk_start_queue - restart a previously stopped queue 230 * @q: The &struct request_queue in question 231 * 232 * Description: 233 * blk_start_queue() will clear the stop flag on the queue, and call 234 * the request_fn for the queue if it was in a stopped state when 235 * entered. Also see blk_stop_queue(). Queue lock must be held. 236 **/ 237void blk_start_queue(struct request_queue *q) 238{ 239 WARN_ON(!irqs_disabled()); 240 241 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 242 __blk_run_queue(q); 243} 244EXPORT_SYMBOL(blk_start_queue); 245 246/** 247 * blk_stop_queue - stop a queue 248 * @q: The &struct request_queue in question 249 * 250 * Description: 251 * The Linux block layer assumes that a block driver will consume all 252 * entries on the request queue when the request_fn strategy is called. 253 * Often this will not happen, because of hardware limitations (queue 254 * depth settings). If a device driver gets a 'queue full' response, 255 * or if it simply chooses not to queue more I/O at one point, it can 256 * call this function to prevent the request_fn from being called until 257 * the driver has signalled it's ready to go again. This happens by calling 258 * blk_start_queue() to restart queue operations. Queue lock must be held. 259 **/ 260void blk_stop_queue(struct request_queue *q) 261{ 262 __cancel_delayed_work(&q->delay_work); 263 queue_flag_set(QUEUE_FLAG_STOPPED, q); 264} 265EXPORT_SYMBOL(blk_stop_queue); 266 267/** 268 * blk_sync_queue - cancel any pending callbacks on a queue 269 * @q: the queue 270 * 271 * Description: 272 * The block layer may perform asynchronous callback activity 273 * on a queue, such as calling the unplug function after a timeout. 274 * A block device may call blk_sync_queue to ensure that any 275 * such activity is cancelled, thus allowing it to release resources 276 * that the callbacks might use. The caller must already have made sure 277 * that its ->make_request_fn will not re-add plugging prior to calling 278 * this function. 279 * 280 * This function does not cancel any asynchronous activity arising 281 * out of elevator or throttling code. That would require elevaotor_exit() 282 * and blk_throtl_exit() to be called with queue lock initialized. 283 * 284 */ 285void blk_sync_queue(struct request_queue *q) 286{ 287 del_timer_sync(&q->timeout); 288 cancel_delayed_work_sync(&q->delay_work); 289} 290EXPORT_SYMBOL(blk_sync_queue); 291 292/** 293 * __blk_run_queue - run a single device queue 294 * @q: The queue to run 295 * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. 296 * 297 * Description: 298 * See @blk_run_queue. This variant must be called with the queue lock 299 * held and interrupts disabled. 300 */ 301void __blk_run_queue(struct request_queue *q) 302{ 303 if (unlikely(blk_queue_stopped(q))) 304 return; 305 306 /* 307 * Only recurse once to avoid overrunning the stack, let the unplug 308 * handling reinvoke the handler shortly if we already got there. 309 */ 310 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 311 q->request_fn(q); 312 queue_flag_clear(QUEUE_FLAG_REENTER, q); 313 } else 314 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 315} 316EXPORT_SYMBOL(__blk_run_queue); 317 318/** 319 * blk_run_queue_async - run a single device queue in workqueue context 320 * @q: The queue to run 321 * 322 * Description: 323 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 324 * of us. 325 */ 326void blk_run_queue_async(struct request_queue *q) 327{ 328 if (likely(!blk_queue_stopped(q))) 329 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 330} 331 332/** 333 * blk_run_queue - run a single device queue 334 * @q: The queue to run 335 * 336 * Description: 337 * Invoke request handling on this queue, if it has pending work to do. 338 * May be used to restart queueing when a request has completed. 339 */ 340void blk_run_queue(struct request_queue *q) 341{ 342 unsigned long flags; 343 344 spin_lock_irqsave(q->queue_lock, flags); 345 __blk_run_queue(q); 346 spin_unlock_irqrestore(q->queue_lock, flags); 347} 348EXPORT_SYMBOL(blk_run_queue); 349 350void blk_put_queue(struct request_queue *q) 351{ 352 kobject_put(&q->kobj); 353} 354 355/* 356 * Note: If a driver supplied the queue lock, it should not zap that lock 357 * unexpectedly as some queue cleanup components like elevator_exit() and 358 * blk_throtl_exit() need queue lock. 359 */ 360void blk_cleanup_queue(struct request_queue *q) 361{ 362 /* 363 * We know we have process context here, so we can be a little 364 * cautious and ensure that pending block actions on this device 365 * are done before moving on. Going into this function, we should 366 * not have processes doing IO to this device. 367 */ 368 blk_sync_queue(q); 369 370 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); 371 mutex_lock(&q->sysfs_lock); 372 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 373 mutex_unlock(&q->sysfs_lock); 374 375 if (q->elevator) 376 elevator_exit(q->elevator); 377 378 blk_throtl_exit(q); 379 380 blk_put_queue(q); 381} 382EXPORT_SYMBOL(blk_cleanup_queue); 383 384static int blk_init_free_list(struct request_queue *q) 385{ 386 struct request_list *rl = &q->rq; 387 388 if (unlikely(rl->rq_pool)) 389 return 0; 390 391 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 392 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 393 rl->elvpriv = 0; 394 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 395 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 396 397 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 398 mempool_free_slab, request_cachep, q->node); 399 400 if (!rl->rq_pool) 401 return -ENOMEM; 402 403 return 0; 404} 405 406struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 407{ 408 return blk_alloc_queue_node(gfp_mask, -1); 409} 410EXPORT_SYMBOL(blk_alloc_queue); 411 412struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 413{ 414 struct request_queue *q; 415 int err; 416 417 q = kmem_cache_alloc_node(blk_requestq_cachep, 418 gfp_mask | __GFP_ZERO, node_id); 419 if (!q) 420 return NULL; 421 422 q->backing_dev_info.ra_pages = 423 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 424 q->backing_dev_info.state = 0; 425 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 426 q->backing_dev_info.name = "block"; 427 428 err = bdi_init(&q->backing_dev_info); 429 if (err) { 430 kmem_cache_free(blk_requestq_cachep, q); 431 return NULL; 432 } 433 434 if (blk_throtl_init(q)) { 435 kmem_cache_free(blk_requestq_cachep, q); 436 return NULL; 437 } 438 439 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 440 laptop_mode_timer_fn, (unsigned long) q); 441 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 442 INIT_LIST_HEAD(&q->timeout_list); 443 INIT_LIST_HEAD(&q->flush_queue[0]); 444 INIT_LIST_HEAD(&q->flush_queue[1]); 445 INIT_LIST_HEAD(&q->flush_data_in_flight); 446 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 447 448 kobject_init(&q->kobj, &blk_queue_ktype); 449 450 mutex_init(&q->sysfs_lock); 451 spin_lock_init(&q->__queue_lock); 452 453 /* 454 * By default initialize queue_lock to internal lock and driver can 455 * override it later if need be. 456 */ 457 q->queue_lock = &q->__queue_lock; 458 459 return q; 460} 461EXPORT_SYMBOL(blk_alloc_queue_node); 462 463/** 464 * blk_init_queue - prepare a request queue for use with a block device 465 * @rfn: The function to be called to process requests that have been 466 * placed on the queue. 467 * @lock: Request queue spin lock 468 * 469 * Description: 470 * If a block device wishes to use the standard request handling procedures, 471 * which sorts requests and coalesces adjacent requests, then it must 472 * call blk_init_queue(). The function @rfn will be called when there 473 * are requests on the queue that need to be processed. If the device 474 * supports plugging, then @rfn may not be called immediately when requests 475 * are available on the queue, but may be called at some time later instead. 476 * Plugged queues are generally unplugged when a buffer belonging to one 477 * of the requests on the queue is needed, or due to memory pressure. 478 * 479 * @rfn is not required, or even expected, to remove all requests off the 480 * queue, but only as many as it can handle at a time. If it does leave 481 * requests on the queue, it is responsible for arranging that the requests 482 * get dealt with eventually. 483 * 484 * The queue spin lock must be held while manipulating the requests on the 485 * request queue; this lock will be taken also from interrupt context, so irq 486 * disabling is needed for it. 487 * 488 * Function returns a pointer to the initialized request queue, or %NULL if 489 * it didn't succeed. 490 * 491 * Note: 492 * blk_init_queue() must be paired with a blk_cleanup_queue() call 493 * when the block device is deactivated (such as at module unload). 494 **/ 495 496struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 497{ 498 return blk_init_queue_node(rfn, lock, -1); 499} 500EXPORT_SYMBOL(blk_init_queue); 501 502struct request_queue * 503blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 504{ 505 struct request_queue *uninit_q, *q; 506 507 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); 508 if (!uninit_q) 509 return NULL; 510 511 q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id); 512 if (!q) 513 blk_cleanup_queue(uninit_q); 514 515 return q; 516} 517EXPORT_SYMBOL(blk_init_queue_node); 518 519struct request_queue * 520blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 521 spinlock_t *lock) 522{ 523 return blk_init_allocated_queue_node(q, rfn, lock, -1); 524} 525EXPORT_SYMBOL(blk_init_allocated_queue); 526 527struct request_queue * 528blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, 529 spinlock_t *lock, int node_id) 530{ 531 if (!q) 532 return NULL; 533 534 q->node = node_id; 535 if (blk_init_free_list(q)) 536 return NULL; 537 538 q->request_fn = rfn; 539 q->prep_rq_fn = NULL; 540 q->unprep_rq_fn = NULL; 541 q->queue_flags = QUEUE_FLAG_DEFAULT; 542 543 /* Override internal queue lock with supplied lock pointer */ 544 if (lock) 545 q->queue_lock = lock; 546 547 /* 548 * This also sets hw/phys segments, boundary and size 549 */ 550 blk_queue_make_request(q, __make_request); 551 552 q->sg_reserved_size = INT_MAX; 553 554 /* 555 * all done 556 */ 557 if (!elevator_init(q, NULL)) { 558 blk_queue_congestion_threshold(q); 559 return q; 560 } 561 562 return NULL; 563} 564EXPORT_SYMBOL(blk_init_allocated_queue_node); 565 566int blk_get_queue(struct request_queue *q) 567{ 568 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 569 kobject_get(&q->kobj); 570 return 0; 571 } 572 573 return 1; 574} 575 576static inline void blk_free_request(struct request_queue *q, struct request *rq) 577{ 578 BUG_ON(rq->cmd_flags & REQ_ON_PLUG); 579 580 if (rq->cmd_flags & REQ_ELVPRIV) 581 elv_put_request(q, rq); 582 mempool_free(rq, q->rq.rq_pool); 583} 584 585static struct request * 586blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) 587{ 588 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 589 590 if (!rq) 591 return NULL; 592 593 blk_rq_init(q, rq); 594 595 rq->cmd_flags = flags | REQ_ALLOCED; 596 597 if (priv) { 598 if (unlikely(elv_set_request(q, rq, gfp_mask))) { 599 mempool_free(rq, q->rq.rq_pool); 600 return NULL; 601 } 602 rq->cmd_flags |= REQ_ELVPRIV; 603 } 604 605 return rq; 606} 607 608/* 609 * ioc_batching returns true if the ioc is a valid batching request and 610 * should be given priority access to a request. 611 */ 612static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 613{ 614 if (!ioc) 615 return 0; 616 617 /* 618 * Make sure the process is able to allocate at least 1 request 619 * even if the batch times out, otherwise we could theoretically 620 * lose wakeups. 621 */ 622 return ioc->nr_batch_requests == q->nr_batching || 623 (ioc->nr_batch_requests > 0 624 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); 625} 626 627/* 628 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This 629 * will cause the process to be a "batcher" on all queues in the system. This 630 * is the behaviour we want though - once it gets a wakeup it should be given 631 * a nice run. 632 */ 633static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 634{ 635 if (!ioc || ioc_batching(q, ioc)) 636 return; 637 638 ioc->nr_batch_requests = q->nr_batching; 639 ioc->last_waited = jiffies; 640} 641 642static void __freed_request(struct request_queue *q, int sync) 643{ 644 struct request_list *rl = &q->rq; 645 646 if (rl->count[sync] < queue_congestion_off_threshold(q)) 647 blk_clear_queue_congested(q, sync); 648 649 if (rl->count[sync] + 1 <= q->nr_requests) { 650 if (waitqueue_active(&rl->wait[sync])) 651 wake_up(&rl->wait[sync]); 652 653 blk_clear_queue_full(q, sync); 654 } 655} 656 657/* 658 * A request has just been released. Account for it, update the full and 659 * congestion status, wake up any waiters. Called under q->queue_lock. 660 */ 661static void freed_request(struct request_queue *q, int sync, int priv) 662{ 663 struct request_list *rl = &q->rq; 664 665 rl->count[sync]--; 666 if (priv) 667 rl->elvpriv--; 668 669 __freed_request(q, sync); 670 671 if (unlikely(rl->starved[sync ^ 1])) 672 __freed_request(q, sync ^ 1); 673} 674 675/* 676 * Determine if elevator data should be initialized when allocating the 677 * request associated with @bio. 678 */ 679static bool blk_rq_should_init_elevator(struct bio *bio) 680{ 681 if (!bio) 682 return true; 683 684 /* 685 * Flush requests do not use the elevator so skip initialization. 686 * This allows a request to share the flush and elevator data. 687 */ 688 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) 689 return false; 690 691 return true; 692} 693 694/* 695 * Get a free request, queue_lock must be held. 696 * Returns NULL on failure, with queue_lock held. 697 * Returns !NULL on success, with queue_lock *not held*. 698 */ 699static struct request *get_request(struct request_queue *q, int rw_flags, 700 struct bio *bio, gfp_t gfp_mask) 701{ 702 struct request *rq = NULL; 703 struct request_list *rl = &q->rq; 704 struct io_context *ioc = NULL; 705 const bool is_sync = rw_is_sync(rw_flags) != 0; 706 int may_queue, priv = 0; 707 708 may_queue = elv_may_queue(q, rw_flags); 709 if (may_queue == ELV_MQUEUE_NO) 710 goto rq_starved; 711 712 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { 713 if (rl->count[is_sync]+1 >= q->nr_requests) { 714 ioc = current_io_context(GFP_ATOMIC, q->node); 715 /* 716 * The queue will fill after this allocation, so set 717 * it as full, and mark this process as "batching". 718 * This process will be allowed to complete a batch of 719 * requests, others will be blocked. 720 */ 721 if (!blk_queue_full(q, is_sync)) { 722 ioc_set_batching(q, ioc); 723 blk_set_queue_full(q, is_sync); 724 } else { 725 if (may_queue != ELV_MQUEUE_MUST 726 && !ioc_batching(q, ioc)) { 727 /* 728 * The queue is full and the allocating 729 * process is not a "batcher", and not 730 * exempted by the IO scheduler 731 */ 732 goto out; 733 } 734 } 735 } 736 blk_set_queue_congested(q, is_sync); 737 } 738 739 /* 740 * Only allow batching queuers to allocate up to 50% over the defined 741 * limit of requests, otherwise we could have thousands of requests 742 * allocated with any setting of ->nr_requests 743 */ 744 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 745 goto out; 746 747 rl->count[is_sync]++; 748 rl->starved[is_sync] = 0; 749 750 if (blk_rq_should_init_elevator(bio)) { 751 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 752 if (priv) 753 rl->elvpriv++; 754 } 755 756 if (blk_queue_io_stat(q)) 757 rw_flags |= REQ_IO_STAT; 758 spin_unlock_irq(q->queue_lock); 759 760 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); 761 if (unlikely(!rq)) { 762 /* 763 * Allocation failed presumably due to memory. Undo anything 764 * we might have messed up. 765 * 766 * Allocating task should really be put onto the front of the 767 * wait queue, but this is pretty rare. 768 */ 769 spin_lock_irq(q->queue_lock); 770 freed_request(q, is_sync, priv); 771 772 /* 773 * in the very unlikely event that allocation failed and no 774 * requests for this direction was pending, mark us starved 775 * so that freeing of a request in the other direction will 776 * notice us. another possible fix would be to split the 777 * rq mempool into READ and WRITE 778 */ 779rq_starved: 780 if (unlikely(rl->count[is_sync] == 0)) 781 rl->starved[is_sync] = 1; 782 783 goto out; 784 } 785 786 /* 787 * ioc may be NULL here, and ioc_batching will be false. That's 788 * OK, if the queue is under the request limit then requests need 789 * not count toward the nr_batch_requests limit. There will always 790 * be some limit enforced by BLK_BATCH_TIME. 791 */ 792 if (ioc_batching(q, ioc)) 793 ioc->nr_batch_requests--; 794 795 trace_block_getrq(q, bio, rw_flags & 1); 796out: 797 return rq; 798} 799 800/* 801 * No available requests for this queue, wait for some requests to become 802 * available. 803 * 804 * Called with q->queue_lock held, and returns with it unlocked. 805 */ 806static struct request *get_request_wait(struct request_queue *q, int rw_flags, 807 struct bio *bio) 808{ 809 const bool is_sync = rw_is_sync(rw_flags) != 0; 810 struct request *rq; 811 812 rq = get_request(q, rw_flags, bio, GFP_NOIO); 813 while (!rq) { 814 DEFINE_WAIT(wait); 815 struct io_context *ioc; 816 struct request_list *rl = &q->rq; 817 818 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 819 TASK_UNINTERRUPTIBLE); 820 821 trace_block_sleeprq(q, bio, rw_flags & 1); 822 823 spin_unlock_irq(q->queue_lock); 824 io_schedule(); 825 826 /* 827 * After sleeping, we become a "batching" process and 828 * will be able to allocate at least one request, and 829 * up to a big batch of them for a small period time. 830 * See ioc_batching, ioc_set_batching 831 */ 832 ioc = current_io_context(GFP_NOIO, q->node); 833 ioc_set_batching(q, ioc); 834 835 spin_lock_irq(q->queue_lock); 836 finish_wait(&rl->wait[is_sync], &wait); 837 838 rq = get_request(q, rw_flags, bio, GFP_NOIO); 839 }; 840 841 return rq; 842} 843 844struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 845{ 846 struct request *rq; 847 848 BUG_ON(rw != READ && rw != WRITE); 849 850 spin_lock_irq(q->queue_lock); 851 if (gfp_mask & __GFP_WAIT) { 852 rq = get_request_wait(q, rw, NULL); 853 } else { 854 rq = get_request(q, rw, NULL, gfp_mask); 855 if (!rq) 856 spin_unlock_irq(q->queue_lock); 857 } 858 /* q->queue_lock is unlocked at this point */ 859 860 return rq; 861} 862EXPORT_SYMBOL(blk_get_request); 863 864/** 865 * blk_make_request - given a bio, allocate a corresponding struct request. 866 * @q: target request queue 867 * @bio: The bio describing the memory mappings that will be submitted for IO. 868 * It may be a chained-bio properly constructed by block/bio layer. 869 * @gfp_mask: gfp flags to be used for memory allocation 870 * 871 * blk_make_request is the parallel of generic_make_request for BLOCK_PC 872 * type commands. Where the struct request needs to be farther initialized by 873 * the caller. It is passed a &struct bio, which describes the memory info of 874 * the I/O transfer. 875 * 876 * The caller of blk_make_request must make sure that bi_io_vec 877 * are set to describe the memory buffers. That bio_data_dir() will return 878 * the needed direction of the request. (And all bio's in the passed bio-chain 879 * are properly set accordingly) 880 * 881 * If called under none-sleepable conditions, mapped bio buffers must not 882 * need bouncing, by calling the appropriate masked or flagged allocator, 883 * suitable for the target device. Otherwise the call to blk_queue_bounce will 884 * BUG. 885 * 886 * WARNING: When allocating/cloning a bio-chain, careful consideration should be 887 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 888 * anything but the first bio in the chain. Otherwise you risk waiting for IO 889 * completion of a bio that hasn't been submitted yet, thus resulting in a 890 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 891 * of bio_alloc(), as that avoids the mempool deadlock. 892 * If possible a big IO should be split into smaller parts when allocation 893 * fails. Partial allocation should not be an error, or you risk a live-lock. 894 */ 895struct request *blk_make_request(struct request_queue *q, struct bio *bio, 896 gfp_t gfp_mask) 897{ 898 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 899 900 if (unlikely(!rq)) 901 return ERR_PTR(-ENOMEM); 902 903 for_each_bio(bio) { 904 struct bio *bounce_bio = bio; 905 int ret; 906 907 blk_queue_bounce(q, &bounce_bio); 908 ret = blk_rq_append_bio(q, rq, bounce_bio); 909 if (unlikely(ret)) { 910 blk_put_request(rq); 911 return ERR_PTR(ret); 912 } 913 } 914 915 return rq; 916} 917EXPORT_SYMBOL(blk_make_request); 918 919/** 920 * blk_requeue_request - put a request back on queue 921 * @q: request queue where request should be inserted 922 * @rq: request to be inserted 923 * 924 * Description: 925 * Drivers often keep queueing requests until the hardware cannot accept 926 * more, when that condition happens we need to put the request back 927 * on the queue. Must be called with queue lock held. 928 */ 929void blk_requeue_request(struct request_queue *q, struct request *rq) 930{ 931 blk_delete_timer(rq); 932 blk_clear_rq_complete(rq); 933 trace_block_rq_requeue(q, rq); 934 935 if (blk_rq_tagged(rq)) 936 blk_queue_end_tag(q, rq); 937 938 BUG_ON(blk_queued_rq(rq)); 939 940 elv_requeue_request(q, rq); 941} 942EXPORT_SYMBOL(blk_requeue_request); 943 944static void add_acct_request(struct request_queue *q, struct request *rq, 945 int where) 946{ 947 drive_stat_acct(rq, 1); 948 __elv_add_request(q, rq, where); 949} 950 951/** 952 * blk_insert_request - insert a special request into a request queue 953 * @q: request queue where request should be inserted 954 * @rq: request to be inserted 955 * @at_head: insert request at head or tail of queue 956 * @data: private data 957 * 958 * Description: 959 * Many block devices need to execute commands asynchronously, so they don't 960 * block the whole kernel from preemption during request execution. This is 961 * accomplished normally by inserting aritficial requests tagged as 962 * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them 963 * be scheduled for actual execution by the request queue. 964 * 965 * We have the option of inserting the head or the tail of the queue. 966 * Typically we use the tail for new ioctls and so forth. We use the head 967 * of the queue for things like a QUEUE_FULL message from a device, or a 968 * host that is unable to accept a particular command. 969 */ 970void blk_insert_request(struct request_queue *q, struct request *rq, 971 int at_head, void *data) 972{ 973 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 974 unsigned long flags; 975 976 /* 977 * tell I/O scheduler that this isn't a regular read/write (ie it 978 * must not attempt merges on this) and that it acts as a soft 979 * barrier 980 */ 981 rq->cmd_type = REQ_TYPE_SPECIAL; 982 983 rq->special = data; 984 985 spin_lock_irqsave(q->queue_lock, flags); 986 987 /* 988 * If command is tagged, release the tag 989 */ 990 if (blk_rq_tagged(rq)) 991 blk_queue_end_tag(q, rq); 992 993 add_acct_request(q, rq, where); 994 __blk_run_queue(q); 995 spin_unlock_irqrestore(q->queue_lock, flags); 996} 997EXPORT_SYMBOL(blk_insert_request); 998 999static void part_round_stats_single(int cpu, struct hd_struct *part, 1000 unsigned long now) 1001{ 1002 if (now == part->stamp) 1003 return; 1004 1005 if (part_in_flight(part)) { 1006 __part_stat_add(cpu, part, time_in_queue, 1007 part_in_flight(part) * (now - part->stamp)); 1008 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1009 } 1010 part->stamp = now; 1011} 1012 1013/** 1014 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1015 * @cpu: cpu number for stats access 1016 * @part: target partition 1017 * 1018 * The average IO queue length and utilisation statistics are maintained 1019 * by observing the current state of the queue length and the amount of 1020 * time it has been in this state for. 1021 * 1022 * Normally, that accounting is done on IO completion, but that can result 1023 * in more than a second's worth of IO being accounted for within any one 1024 * second, leading to >100% utilisation. To deal with that, we call this 1025 * function to do a round-off before returning the results when reading 1026 * /proc/diskstats. This accounts immediately for all queue usage up to 1027 * the current jiffies and restarts the counters again. 1028 */ 1029void part_round_stats(int cpu, struct hd_struct *part) 1030{ 1031 unsigned long now = jiffies; 1032 1033 if (part->partno) 1034 part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1035 part_round_stats_single(cpu, part, now); 1036} 1037EXPORT_SYMBOL_GPL(part_round_stats); 1038 1039/* 1040 * queue lock must be held 1041 */ 1042void __blk_put_request(struct request_queue *q, struct request *req) 1043{ 1044 if (unlikely(!q)) 1045 return; 1046 if (unlikely(--req->ref_count)) 1047 return; 1048 1049 elv_completed_request(q, req); 1050 1051 /* this is a bio leak */ 1052 WARN_ON(req->bio != NULL); 1053 1054 /* 1055 * Request may not have originated from ll_rw_blk. if not, 1056 * it didn't come out of our reserved rq pools 1057 */ 1058 if (req->cmd_flags & REQ_ALLOCED) { 1059 int is_sync = rq_is_sync(req) != 0; 1060 int priv = req->cmd_flags & REQ_ELVPRIV; 1061 1062 BUG_ON(!list_empty(&req->queuelist)); 1063 BUG_ON(!hlist_unhashed(&req->hash)); 1064 1065 blk_free_request(q, req); 1066 freed_request(q, is_sync, priv); 1067 } 1068} 1069EXPORT_SYMBOL_GPL(__blk_put_request); 1070 1071void blk_put_request(struct request *req) 1072{ 1073 unsigned long flags; 1074 struct request_queue *q = req->q; 1075 1076 spin_lock_irqsave(q->queue_lock, flags); 1077 __blk_put_request(q, req); 1078 spin_unlock_irqrestore(q->queue_lock, flags); 1079} 1080EXPORT_SYMBOL(blk_put_request); 1081 1082/** 1083 * blk_add_request_payload - add a payload to a request 1084 * @rq: request to update 1085 * @page: page backing the payload 1086 * @len: length of the payload. 1087 * 1088 * This allows to later add a payload to an already submitted request by 1089 * a block driver. The driver needs to take care of freeing the payload 1090 * itself. 1091 * 1092 * Note that this is a quite horrible hack and nothing but handling of 1093 * discard requests should ever use it. 1094 */ 1095void blk_add_request_payload(struct request *rq, struct page *page, 1096 unsigned int len) 1097{ 1098 struct bio *bio = rq->bio; 1099 1100 bio->bi_io_vec->bv_page = page; 1101 bio->bi_io_vec->bv_offset = 0; 1102 bio->bi_io_vec->bv_len = len; 1103 1104 bio->bi_size = len; 1105 bio->bi_vcnt = 1; 1106 bio->bi_phys_segments = 1; 1107 1108 rq->__data_len = rq->resid_len = len; 1109 rq->nr_phys_segments = 1; 1110 rq->buffer = bio_data(bio); 1111} 1112EXPORT_SYMBOL_GPL(blk_add_request_payload); 1113 1114static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 1115 struct bio *bio) 1116{ 1117 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1118 1119 /* 1120 * Debug stuff, kill later 1121 */ 1122 if (!rq_mergeable(req)) { 1123 blk_dump_rq_flags(req, "back"); 1124 return false; 1125 } 1126 1127 if (!ll_back_merge_fn(q, req, bio)) 1128 return false; 1129 1130 trace_block_bio_backmerge(q, bio); 1131 1132 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1133 blk_rq_set_mixed_merge(req); 1134 1135 req->biotail->bi_next = bio; 1136 req->biotail = bio; 1137 req->__data_len += bio->bi_size; 1138 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1139 1140 drive_stat_acct(req, 0); 1141 return true; 1142} 1143 1144static bool bio_attempt_front_merge(struct request_queue *q, 1145 struct request *req, struct bio *bio) 1146{ 1147 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1148 sector_t sector; 1149 1150 /* 1151 * Debug stuff, kill later 1152 */ 1153 if (!rq_mergeable(req)) { 1154 blk_dump_rq_flags(req, "front"); 1155 return false; 1156 } 1157 1158 if (!ll_front_merge_fn(q, req, bio)) 1159 return false; 1160 1161 trace_block_bio_frontmerge(q, bio); 1162 1163 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 1164 blk_rq_set_mixed_merge(req); 1165 1166 sector = bio->bi_sector; 1167 1168 bio->bi_next = req->bio; 1169 req->bio = bio; 1170 1171 /* 1172 * may not be valid. if the low level driver said 1173 * it didn't need a bounce buffer then it better 1174 * not touch req->buffer either... 1175 */ 1176 req->buffer = bio_data(bio); 1177 req->__sector = bio->bi_sector; 1178 req->__data_len += bio->bi_size; 1179 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1180 1181 drive_stat_acct(req, 0); 1182 return true; 1183} 1184 1185/* 1186 * Attempts to merge with the plugged list in the current process. Returns 1187 * true if merge was successful, otherwise false. 1188 */ 1189static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, 1190 struct bio *bio) 1191{ 1192 struct blk_plug *plug; 1193 struct request *rq; 1194 bool ret = false; 1195 1196 plug = tsk->plug; 1197 if (!plug) 1198 goto out; 1199 1200 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1201 int el_ret; 1202 1203 if (rq->q != q) 1204 continue; 1205 1206 el_ret = elv_try_merge(rq, bio); 1207 if (el_ret == ELEVATOR_BACK_MERGE) { 1208 ret = bio_attempt_back_merge(q, rq, bio); 1209 if (ret) 1210 break; 1211 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1212 ret = bio_attempt_front_merge(q, rq, bio); 1213 if (ret) 1214 break; 1215 } 1216 } 1217out: 1218 return ret; 1219} 1220 1221void init_request_from_bio(struct request *req, struct bio *bio) 1222{ 1223 req->cpu = bio->bi_comp_cpu; 1224 req->cmd_type = REQ_TYPE_FS; 1225 1226 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1227 if (bio->bi_rw & REQ_RAHEAD) 1228 req->cmd_flags |= REQ_FAILFAST_MASK; 1229 1230 req->errors = 0; 1231 req->__sector = bio->bi_sector; 1232 req->ioprio = bio_prio(bio); 1233 blk_rq_bio_prep(req->q, req, bio); 1234} 1235 1236static int __make_request(struct request_queue *q, struct bio *bio) 1237{ 1238 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1239 struct blk_plug *plug; 1240 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1241 struct request *req; 1242 1243 /* 1244 * low level driver can indicate that it wants pages above a 1245 * certain limit bounced to low memory (ie for highmem, or even 1246 * ISA dma in theory) 1247 */ 1248 blk_queue_bounce(q, &bio); 1249 1250 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1251 spin_lock_irq(q->queue_lock); 1252 where = ELEVATOR_INSERT_FLUSH; 1253 goto get_rq; 1254 } 1255 1256 /* 1257 * Check if we can merge with the plugged list before grabbing 1258 * any locks. 1259 */ 1260 if (attempt_plug_merge(current, q, bio)) 1261 goto out; 1262 1263 spin_lock_irq(q->queue_lock); 1264 1265 el_ret = elv_merge(q, &req, bio); 1266 if (el_ret == ELEVATOR_BACK_MERGE) { 1267 BUG_ON(req->cmd_flags & REQ_ON_PLUG); 1268 if (bio_attempt_back_merge(q, req, bio)) { 1269 if (!attempt_back_merge(q, req)) 1270 elv_merged_request(q, req, el_ret); 1271 goto out_unlock; 1272 } 1273 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1274 BUG_ON(req->cmd_flags & REQ_ON_PLUG); 1275 if (bio_attempt_front_merge(q, req, bio)) { 1276 if (!attempt_front_merge(q, req)) 1277 elv_merged_request(q, req, el_ret); 1278 goto out_unlock; 1279 } 1280 } 1281 1282get_rq: 1283 /* 1284 * This sync check and mask will be re-done in init_request_from_bio(), 1285 * but we need to set it earlier to expose the sync flag to the 1286 * rq allocator and io schedulers. 1287 */ 1288 rw_flags = bio_data_dir(bio); 1289 if (sync) 1290 rw_flags |= REQ_SYNC; 1291 1292 /* 1293 * Grab a free request. This is might sleep but can not fail. 1294 * Returns with the queue unlocked. 1295 */ 1296 req = get_request_wait(q, rw_flags, bio); 1297 1298 /* 1299 * After dropping the lock and possibly sleeping here, our request 1300 * may now be mergeable after it had proven unmergeable (above). 1301 * We don't worry about that case for efficiency. It won't happen 1302 * often, and the elevators are able to handle it. 1303 */ 1304 init_request_from_bio(req, bio); 1305 1306 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || 1307 bio_flagged(bio, BIO_CPU_AFFINE)) { 1308 req->cpu = blk_cpu_to_group(get_cpu()); 1309 put_cpu(); 1310 } 1311 1312 plug = current->plug; 1313 if (plug) { 1314 /* 1315 * If this is the first request added after a plug, fire 1316 * of a plug trace. If others have been added before, check 1317 * if we have multiple devices in this plug. If so, make a 1318 * note to sort the list before dispatch. 1319 */ 1320 if (list_empty(&plug->list)) 1321 trace_block_plug(q); 1322 else if (!plug->should_sort) { 1323 struct request *__rq; 1324 1325 __rq = list_entry_rq(plug->list.prev); 1326 if (__rq->q != q) 1327 plug->should_sort = 1; 1328 } 1329 /* 1330 * Debug flag, kill later 1331 */ 1332 req->cmd_flags |= REQ_ON_PLUG; 1333 list_add_tail(&req->queuelist, &plug->list); 1334 drive_stat_acct(req, 1); 1335 } else { 1336 spin_lock_irq(q->queue_lock); 1337 add_acct_request(q, req, where); 1338 __blk_run_queue(q); 1339out_unlock: 1340 spin_unlock_irq(q->queue_lock); 1341 } 1342out: 1343 return 0; 1344} 1345 1346/* 1347 * If bio->bi_dev is a partition, remap the location 1348 */ 1349static inline void blk_partition_remap(struct bio *bio) 1350{ 1351 struct block_device *bdev = bio->bi_bdev; 1352 1353 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1354 struct hd_struct *p = bdev->bd_part; 1355 1356 bio->bi_sector += p->start_sect; 1357 bio->bi_bdev = bdev->bd_contains; 1358 1359 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1360 bdev->bd_dev, 1361 bio->bi_sector - p->start_sect); 1362 } 1363} 1364 1365static void handle_bad_sector(struct bio *bio) 1366{ 1367 char b[BDEVNAME_SIZE]; 1368 1369 printk(KERN_INFO "attempt to access beyond end of device\n"); 1370 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1371 bdevname(bio->bi_bdev, b), 1372 bio->bi_rw, 1373 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1374 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1375 1376 set_bit(BIO_EOF, &bio->bi_flags); 1377} 1378 1379#ifdef CONFIG_FAIL_MAKE_REQUEST 1380 1381static DECLARE_FAULT_ATTR(fail_make_request); 1382 1383static int __init setup_fail_make_request(char *str) 1384{ 1385 return setup_fault_attr(&fail_make_request, str); 1386} 1387__setup("fail_make_request=", setup_fail_make_request); 1388 1389static int should_fail_request(struct bio *bio) 1390{ 1391 struct hd_struct *part = bio->bi_bdev->bd_part; 1392 1393 if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail) 1394 return should_fail(&fail_make_request, bio->bi_size); 1395 1396 return 0; 1397} 1398 1399static int __init fail_make_request_debugfs(void) 1400{ 1401 return init_fault_attr_dentries(&fail_make_request, 1402 "fail_make_request"); 1403} 1404 1405late_initcall(fail_make_request_debugfs); 1406 1407#else /* CONFIG_FAIL_MAKE_REQUEST */ 1408 1409static inline int should_fail_request(struct bio *bio) 1410{ 1411 return 0; 1412} 1413 1414#endif /* CONFIG_FAIL_MAKE_REQUEST */ 1415 1416/* 1417 * Check whether this bio extends beyond the end of the device. 1418 */ 1419static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) 1420{ 1421 sector_t maxsector; 1422 1423 if (!nr_sectors) 1424 return 0; 1425 1426 /* Test device or partition size, when known. */ 1427 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1428 if (maxsector) { 1429 sector_t sector = bio->bi_sector; 1430 1431 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1432 /* 1433 * This may well happen - the kernel calls bread() 1434 * without checking the size of the device, e.g., when 1435 * mounting a device. 1436 */ 1437 handle_bad_sector(bio); 1438 return 1; 1439 } 1440 } 1441 1442 return 0; 1443} 1444 1445/** 1446 * generic_make_request - hand a buffer to its device driver for I/O 1447 * @bio: The bio describing the location in memory and on the device. 1448 * 1449 * generic_make_request() is used to make I/O requests of block 1450 * devices. It is passed a &struct bio, which describes the I/O that needs 1451 * to be done. 1452 * 1453 * generic_make_request() does not return any status. The 1454 * success/failure status of the request, along with notification of 1455 * completion, is delivered asynchronously through the bio->bi_end_io 1456 * function described (one day) else where. 1457 * 1458 * The caller of generic_make_request must make sure that bi_io_vec 1459 * are set to describe the memory buffer, and that bi_dev and bi_sector are 1460 * set to describe the device address, and the 1461 * bi_end_io and optionally bi_private are set to describe how 1462 * completion notification should be signaled. 1463 * 1464 * generic_make_request and the drivers it calls may use bi_next if this 1465 * bio happens to be merged with someone else, and may change bi_dev and 1466 * bi_sector for remaps as it sees fit. So the values of these fields 1467 * should NOT be depended on after the call to generic_make_request. 1468 */ 1469static inline void __generic_make_request(struct bio *bio) 1470{ 1471 struct request_queue *q; 1472 sector_t old_sector; 1473 int ret, nr_sectors = bio_sectors(bio); 1474 dev_t old_dev; 1475 int err = -EIO; 1476 1477 might_sleep(); 1478 1479 if (bio_check_eod(bio, nr_sectors)) 1480 goto end_io; 1481 1482 /* 1483 * Resolve the mapping until finished. (drivers are 1484 * still free to implement/resolve their own stacking 1485 * by explicitly returning 0) 1486 * 1487 * NOTE: we don't repeat the blk_size check for each new device. 1488 * Stacking drivers are expected to know what they are doing. 1489 */ 1490 old_sector = -1; 1491 old_dev = 0; 1492 do { 1493 char b[BDEVNAME_SIZE]; 1494 1495 q = bdev_get_queue(bio->bi_bdev); 1496 if (unlikely(!q)) { 1497 printk(KERN_ERR 1498 "generic_make_request: Trying to access " 1499 "nonexistent block-device %s (%Lu)\n", 1500 bdevname(bio->bi_bdev, b), 1501 (long long) bio->bi_sector); 1502 goto end_io; 1503 } 1504 1505 if (unlikely(!(bio->bi_rw & REQ_DISCARD) && 1506 nr_sectors > queue_max_hw_sectors(q))) { 1507 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1508 bdevname(bio->bi_bdev, b), 1509 bio_sectors(bio), 1510 queue_max_hw_sectors(q)); 1511 goto end_io; 1512 } 1513 1514 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 1515 goto end_io; 1516 1517 if (should_fail_request(bio)) 1518 goto end_io; 1519 1520 /* 1521 * If this device has partitions, remap block n 1522 * of partition p to block n+start(p) of the disk. 1523 */ 1524 blk_partition_remap(bio); 1525 1526 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) 1527 goto end_io; 1528 1529 if (old_sector != -1) 1530 trace_block_bio_remap(q, bio, old_dev, old_sector); 1531 1532 old_sector = bio->bi_sector; 1533 old_dev = bio->bi_bdev->bd_dev; 1534 1535 if (bio_check_eod(bio, nr_sectors)) 1536 goto end_io; 1537 1538 /* 1539 * Filter flush bio's early so that make_request based 1540 * drivers without flush support don't have to worry 1541 * about them. 1542 */ 1543 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { 1544 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); 1545 if (!nr_sectors) { 1546 err = 0; 1547 goto end_io; 1548 } 1549 } 1550 1551 if ((bio->bi_rw & REQ_DISCARD) && 1552 (!blk_queue_discard(q) || 1553 ((bio->bi_rw & REQ_SECURE) && 1554 !blk_queue_secdiscard(q)))) { 1555 err = -EOPNOTSUPP; 1556 goto end_io; 1557 } 1558 1559 blk_throtl_bio(q, &bio); 1560 1561 /* 1562 * If bio = NULL, bio has been throttled and will be submitted 1563 * later. 1564 */ 1565 if (!bio) 1566 break; 1567 1568 trace_block_bio_queue(q, bio); 1569 1570 ret = q->make_request_fn(q, bio); 1571 } while (ret); 1572 1573 return; 1574 1575end_io: 1576 bio_endio(bio, err); 1577} 1578 1579/* 1580 * We only want one ->make_request_fn to be active at a time, 1581 * else stack usage with stacked devices could be a problem. 1582 * So use current->bio_list to keep a list of requests 1583 * submited by a make_request_fn function. 1584 * current->bio_list is also used as a flag to say if 1585 * generic_make_request is currently active in this task or not. 1586 * If it is NULL, then no make_request is active. If it is non-NULL, 1587 * then a make_request is active, and new requests should be added 1588 * at the tail 1589 */ 1590void generic_make_request(struct bio *bio) 1591{ 1592 struct bio_list bio_list_on_stack; 1593 1594 if (current->bio_list) { 1595 /* make_request is active */ 1596 bio_list_add(current->bio_list, bio); 1597 return; 1598 } 1599 /* following loop may be a bit non-obvious, and so deserves some 1600 * explanation. 1601 * Before entering the loop, bio->bi_next is NULL (as all callers 1602 * ensure that) so we have a list with a single bio. 1603 * We pretend that we have just taken it off a longer list, so 1604 * we assign bio_list to a pointer to the bio_list_on_stack, 1605 * thus initialising the bio_list of new bios to be 1606 * added. __generic_make_request may indeed add some more bios 1607 * through a recursive call to generic_make_request. If it 1608 * did, we find a non-NULL value in bio_list and re-enter the loop 1609 * from the top. In this case we really did just take the bio 1610 * of the top of the list (no pretending) and so remove it from 1611 * bio_list, and call into __generic_make_request again. 1612 * 1613 * The loop was structured like this to make only one call to 1614 * __generic_make_request (which is important as it is large and 1615 * inlined) and to keep the structure simple. 1616 */ 1617 BUG_ON(bio->bi_next); 1618 bio_list_init(&bio_list_on_stack); 1619 current->bio_list = &bio_list_on_stack; 1620 do { 1621 __generic_make_request(bio); 1622 bio = bio_list_pop(current->bio_list); 1623 } while (bio); 1624 current->bio_list = NULL; /* deactivate */ 1625} 1626EXPORT_SYMBOL(generic_make_request); 1627 1628/** 1629 * submit_bio - submit a bio to the block device layer for I/O 1630 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1631 * @bio: The &struct bio which describes the I/O 1632 * 1633 * submit_bio() is very similar in purpose to generic_make_request(), and 1634 * uses that function to do most of the work. Both are fairly rough 1635 * interfaces; @bio must be presetup and ready for I/O. 1636 * 1637 */ 1638void submit_bio(int rw, struct bio *bio) 1639{ 1640 int count = bio_sectors(bio); 1641 1642 bio->bi_rw |= rw; 1643 1644 /* 1645 * If it's a regular read/write or a barrier with data attached, 1646 * go through the normal accounting stuff before submission. 1647 */ 1648 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { 1649 if (rw & WRITE) { 1650 count_vm_events(PGPGOUT, count); 1651 } else { 1652 task_io_account_read(bio->bi_size); 1653 count_vm_events(PGPGIN, count); 1654 } 1655 1656 if (unlikely(block_dump)) { 1657 char b[BDEVNAME_SIZE]; 1658 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1659 current->comm, task_pid_nr(current), 1660 (rw & WRITE) ? "WRITE" : "READ", 1661 (unsigned long long)bio->bi_sector, 1662 bdevname(bio->bi_bdev, b), 1663 count); 1664 } 1665 } 1666 1667 generic_make_request(bio); 1668} 1669EXPORT_SYMBOL(submit_bio); 1670 1671/** 1672 * blk_rq_check_limits - Helper function to check a request for the queue limit 1673 * @q: the queue 1674 * @rq: the request being checked 1675 * 1676 * Description: 1677 * @rq may have been made based on weaker limitations of upper-level queues 1678 * in request stacking drivers, and it may violate the limitation of @q. 1679 * Since the block layer and the underlying device driver trust @rq 1680 * after it is inserted to @q, it should be checked against @q before 1681 * the insertion using this generic function. 1682 * 1683 * This function should also be useful for request stacking drivers 1684 * in some cases below, so export this function. 1685 * Request stacking drivers like request-based dm may change the queue 1686 * limits while requests are in the queue (e.g. dm's table swapping). 1687 * Such request stacking drivers should check those requests agaist 1688 * the new queue limits again when they dispatch those requests, 1689 * although such checkings are also done against the old queue limits 1690 * when submitting requests. 1691 */ 1692int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1693{ 1694 if (rq->cmd_flags & REQ_DISCARD) 1695 return 0; 1696 1697 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1698 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1699 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1700 return -EIO; 1701 } 1702 1703 /* 1704 * queue's settings related to segment counting like q->bounce_pfn 1705 * may differ from that of other stacking queues. 1706 * Recalculate it to check the request correctly on this queue's 1707 * limitation. 1708 */ 1709 blk_recalc_rq_segments(rq); 1710 if (rq->nr_phys_segments > queue_max_segments(q)) { 1711 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1712 return -EIO; 1713 } 1714 1715 return 0; 1716} 1717EXPORT_SYMBOL_GPL(blk_rq_check_limits); 1718 1719/** 1720 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 1721 * @q: the queue to submit the request 1722 * @rq: the request being queued 1723 */ 1724int blk_insert_cloned_request(struct request_queue *q, struct request *rq) 1725{ 1726 unsigned long flags; 1727 1728 if (blk_rq_check_limits(q, rq)) 1729 return -EIO; 1730 1731#ifdef CONFIG_FAIL_MAKE_REQUEST 1732 if (rq->rq_disk && rq->rq_disk->part0.make_it_fail && 1733 should_fail(&fail_make_request, blk_rq_bytes(rq))) 1734 return -EIO; 1735#endif 1736 1737 spin_lock_irqsave(q->queue_lock, flags); 1738 1739 /* 1740 * Submitting request must be dequeued before calling this function 1741 * because it will be linked to another request_queue 1742 */ 1743 BUG_ON(blk_queued_rq(rq)); 1744 1745 add_acct_request(q, rq, ELEVATOR_INSERT_BACK); 1746 spin_unlock_irqrestore(q->queue_lock, flags); 1747 1748 return 0; 1749} 1750EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1751 1752/** 1753 * blk_rq_err_bytes - determine number of bytes till the next failure boundary 1754 * @rq: request to examine 1755 * 1756 * Description: 1757 * A request could be merge of IOs which require different failure 1758 * handling. This function determines the number of bytes which 1759 * can be failed from the beginning of the request without 1760 * crossing into area which need to be retried further. 1761 * 1762 * Return: 1763 * The number of bytes to fail. 1764 * 1765 * Context: 1766 * queue_lock must be held. 1767 */ 1768unsigned int blk_rq_err_bytes(const struct request *rq) 1769{ 1770 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 1771 unsigned int bytes = 0; 1772 struct bio *bio; 1773 1774 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 1775 return blk_rq_bytes(rq); 1776 1777 /* 1778 * Currently the only 'mixing' which can happen is between 1779 * different fastfail types. We can safely fail portions 1780 * which have all the failfast bits that the first one has - 1781 * the ones which are at least as eager to fail as the first 1782 * one. 1783 */ 1784 for (bio = rq->bio; bio; bio = bio->bi_next) { 1785 if ((bio->bi_rw & ff) != ff) 1786 break; 1787 bytes += bio->bi_size; 1788 } 1789 1790 /* this could lead to infinite loop */ 1791 BUG_ON(blk_rq_bytes(rq) && !bytes); 1792 return bytes; 1793} 1794EXPORT_SYMBOL_GPL(blk_rq_err_bytes); 1795 1796static void blk_account_io_completion(struct request *req, unsigned int bytes) 1797{ 1798 if (blk_do_io_stat(req)) { 1799 const int rw = rq_data_dir(req); 1800 struct hd_struct *part; 1801 int cpu; 1802 1803 cpu = part_stat_lock(); 1804 part = req->part; 1805 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1806 part_stat_unlock(); 1807 } 1808} 1809 1810static void blk_account_io_done(struct request *req) 1811{ 1812 /* 1813 * Account IO completion. flush_rq isn't accounted as a 1814 * normal IO on queueing nor completion. Accounting the 1815 * containing request is enough. 1816 */ 1817 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 1818 unsigned long duration = jiffies - req->start_time; 1819 const int rw = rq_data_dir(req); 1820 struct hd_struct *part; 1821 int cpu; 1822 1823 cpu = part_stat_lock(); 1824 part = req->part; 1825 1826 part_stat_inc(cpu, part, ios[rw]); 1827 part_stat_add(cpu, part, ticks[rw], duration); 1828 part_round_stats(cpu, part); 1829 part_dec_in_flight(part, rw); 1830 1831 hd_struct_put(part); 1832 part_stat_unlock(); 1833 } 1834} 1835 1836/** 1837 * blk_peek_request - peek at the top of a request queue 1838 * @q: request queue to peek at 1839 * 1840 * Description: 1841 * Return the request at the top of @q. The returned request 1842 * should be started using blk_start_request() before LLD starts 1843 * processing it. 1844 * 1845 * Return: 1846 * Pointer to the request at the top of @q if available. Null 1847 * otherwise. 1848 * 1849 * Context: 1850 * queue_lock must be held. 1851 */ 1852struct request *blk_peek_request(struct request_queue *q) 1853{ 1854 struct request *rq; 1855 int ret; 1856 1857 while ((rq = __elv_next_request(q)) != NULL) { 1858 if (!(rq->cmd_flags & REQ_STARTED)) { 1859 /* 1860 * This is the first time the device driver 1861 * sees this request (possibly after 1862 * requeueing). Notify IO scheduler. 1863 */ 1864 if (rq->cmd_flags & REQ_SORTED) 1865 elv_activate_rq(q, rq); 1866 1867 /* 1868 * just mark as started even if we don't start 1869 * it, a request that has been delayed should 1870 * not be passed by new incoming requests 1871 */ 1872 rq->cmd_flags |= REQ_STARTED; 1873 trace_block_rq_issue(q, rq); 1874 } 1875 1876 if (!q->boundary_rq || q->boundary_rq == rq) { 1877 q->end_sector = rq_end_sector(rq); 1878 q->boundary_rq = NULL; 1879 } 1880 1881 if (rq->cmd_flags & REQ_DONTPREP) 1882 break; 1883 1884 if (q->dma_drain_size && blk_rq_bytes(rq)) { 1885 /* 1886 * make sure space for the drain appears we 1887 * know we can do this because max_hw_segments 1888 * has been adjusted to be one fewer than the 1889 * device can handle 1890 */ 1891 rq->nr_phys_segments++; 1892 } 1893 1894 if (!q->prep_rq_fn) 1895 break; 1896 1897 ret = q->prep_rq_fn(q, rq); 1898 if (ret == BLKPREP_OK) { 1899 break; 1900 } else if (ret == BLKPREP_DEFER) { 1901 /* 1902 * the request may have been (partially) prepped. 1903 * we need to keep this request in the front to 1904 * avoid resource deadlock. REQ_STARTED will 1905 * prevent other fs requests from passing this one. 1906 */ 1907 if (q->dma_drain_size && blk_rq_bytes(rq) && 1908 !(rq->cmd_flags & REQ_DONTPREP)) { 1909 /* 1910 * remove the space for the drain we added 1911 * so that we don't add it again 1912 */ 1913 --rq->nr_phys_segments; 1914 } 1915 1916 rq = NULL; 1917 break; 1918 } else if (ret == BLKPREP_KILL) { 1919 rq->cmd_flags |= REQ_QUIET; 1920 /* 1921 * Mark this request as started so we don't trigger 1922 * any debug logic in the end I/O path. 1923 */ 1924 blk_start_request(rq); 1925 __blk_end_request_all(rq, -EIO); 1926 } else { 1927 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 1928 break; 1929 } 1930 } 1931 1932 return rq; 1933} 1934EXPORT_SYMBOL(blk_peek_request); 1935 1936void blk_dequeue_request(struct request *rq) 1937{ 1938 struct request_queue *q = rq->q; 1939 1940 BUG_ON(list_empty(&rq->queuelist)); 1941 BUG_ON(ELV_ON_HASH(rq)); 1942 1943 list_del_init(&rq->queuelist); 1944 1945 /* 1946 * the time frame between a request being removed from the lists 1947 * and to it is freed is accounted as io that is in progress at 1948 * the driver side. 1949 */ 1950 if (blk_account_rq(rq)) { 1951 q->in_flight[rq_is_sync(rq)]++; 1952 set_io_start_time_ns(rq); 1953 } 1954} 1955 1956/** 1957 * blk_start_request - start request processing on the driver 1958 * @req: request to dequeue 1959 * 1960 * Description: 1961 * Dequeue @req and start timeout timer on it. This hands off the 1962 * request to the driver. 1963 * 1964 * Block internal functions which don't want to start timer should 1965 * call blk_dequeue_request(). 1966 * 1967 * Context: 1968 * queue_lock must be held. 1969 */ 1970void blk_start_request(struct request *req) 1971{ 1972 blk_dequeue_request(req); 1973 1974 /* 1975 * We are now handing the request to the hardware, initialize 1976 * resid_len to full count and add the timeout handler. 1977 */ 1978 req->resid_len = blk_rq_bytes(req); 1979 if (unlikely(blk_bidi_rq(req))) 1980 req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 1981 1982 blk_add_timer(req); 1983} 1984EXPORT_SYMBOL(blk_start_request); 1985 1986/** 1987 * blk_fetch_request - fetch a request from a request queue 1988 * @q: request queue to fetch a request from 1989 * 1990 * Description: 1991 * Return the request at the top of @q. The request is started on 1992 * return and LLD can start processing it immediately. 1993 * 1994 * Return: 1995 * Pointer to the request at the top of @q if available. Null 1996 * otherwise. 1997 * 1998 * Context: 1999 * queue_lock must be held. 2000 */ 2001struct request *blk_fetch_request(struct request_queue *q) 2002{ 2003 struct request *rq; 2004 2005 rq = blk_peek_request(q); 2006 if (rq) 2007 blk_start_request(rq); 2008 return rq; 2009} 2010EXPORT_SYMBOL(blk_fetch_request); 2011 2012/** 2013 * blk_update_request - Special helper function for request stacking drivers 2014 * @req: the request being processed 2015 * @error: %0 for success, < %0 for error 2016 * @nr_bytes: number of bytes to complete @req 2017 * 2018 * Description: 2019 * Ends I/O on a number of bytes attached to @req, but doesn't complete 2020 * the request structure even if @req doesn't have leftover. 2021 * If @req has leftover, sets it up for the next range of segments. 2022 * 2023 * This special helper function is only for request stacking drivers 2024 * (e.g. request-based dm) so that they can handle partial completion. 2025 * Actual device drivers should use blk_end_request instead. 2026 * 2027 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 2028 * %false return from this function. 2029 * 2030 * Return: 2031 * %false - this request doesn't have any more data 2032 * %true - this request has more data 2033 **/ 2034bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2035{ 2036 int total_bytes, bio_nbytes, next_idx = 0; 2037 struct bio *bio; 2038 2039 if (!req->bio) 2040 return false; 2041 2042 trace_block_rq_complete(req->q, req); 2043 2044 /* 2045 * For fs requests, rq is just carrier of independent bio's 2046 * and each partial completion should be handled separately. 2047 * Reset per-request error on each partial completion. 2048 * 2049 * TODO: tj: This is too subtle. It would be better to let 2050 * low level drivers do what they see fit. 2051 */ 2052 if (req->cmd_type == REQ_TYPE_FS) 2053 req->errors = 0; 2054 2055 if (error && req->cmd_type == REQ_TYPE_FS && 2056 !(req->cmd_flags & REQ_QUIET)) { 2057 char *error_type; 2058 2059 switch (error) { 2060 case -ENOLINK: 2061 error_type = "recoverable transport"; 2062 break; 2063 case -EREMOTEIO: 2064 error_type = "critical target"; 2065 break; 2066 case -EBADE: 2067 error_type = "critical nexus"; 2068 break; 2069 case -EIO: 2070 default: 2071 error_type = "I/O"; 2072 break; 2073 } 2074 printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", 2075 error_type, req->rq_disk ? req->rq_disk->disk_name : "?", 2076 (unsigned long long)blk_rq_pos(req)); 2077 } 2078 2079 blk_account_io_completion(req, nr_bytes); 2080 2081 total_bytes = bio_nbytes = 0; 2082 while ((bio = req->bio) != NULL) { 2083 int nbytes; 2084 2085 if (nr_bytes >= bio->bi_size) { 2086 req->bio = bio->bi_next; 2087 nbytes = bio->bi_size; 2088 req_bio_endio(req, bio, nbytes, error); 2089 next_idx = 0; 2090 bio_nbytes = 0; 2091 } else { 2092 int idx = bio->bi_idx + next_idx; 2093 2094 if (unlikely(idx >= bio->bi_vcnt)) { 2095 blk_dump_rq_flags(req, "__end_that"); 2096 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 2097 __func__, idx, bio->bi_vcnt); 2098 break; 2099 } 2100 2101 nbytes = bio_iovec_idx(bio, idx)->bv_len; 2102 BIO_BUG_ON(nbytes > bio->bi_size); 2103 2104 /* 2105 * not a complete bvec done 2106 */ 2107 if (unlikely(nbytes > nr_bytes)) { 2108 bio_nbytes += nr_bytes; 2109 total_bytes += nr_bytes; 2110 break; 2111 } 2112 2113 /* 2114 * advance to the next vector 2115 */ 2116 next_idx++; 2117 bio_nbytes += nbytes; 2118 } 2119 2120 total_bytes += nbytes; 2121 nr_bytes -= nbytes; 2122 2123 bio = req->bio; 2124 if (bio) { 2125 /* 2126 * end more in this run, or just return 'not-done' 2127 */ 2128 if (unlikely(nr_bytes <= 0)) 2129 break; 2130 } 2131 } 2132 2133 /* 2134 * completely done 2135 */ 2136 if (!req->bio) { 2137 /* 2138 * Reset counters so that the request stacking driver 2139 * can find how many bytes remain in the request 2140 * later. 2141 */ 2142 req->__data_len = 0; 2143 return false; 2144 } 2145 2146 /* 2147 * if the request wasn't completed, update state 2148 */ 2149 if (bio_nbytes) { 2150 req_bio_endio(req, bio, bio_nbytes, error); 2151 bio->bi_idx += next_idx; 2152 bio_iovec(bio)->bv_offset += nr_bytes; 2153 bio_iovec(bio)->bv_len -= nr_bytes; 2154 } 2155 2156 req->__data_len -= total_bytes; 2157 req->buffer = bio_data(req->bio); 2158 2159 /* update sector only for requests with clear definition of sector */ 2160 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) 2161 req->__sector += total_bytes >> 9; 2162 2163 /* mixed attributes always follow the first bio */ 2164 if (req->cmd_flags & REQ_MIXED_MERGE) { 2165 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2166 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; 2167 } 2168 2169 /* 2170 * If total number of sectors is less than the first segment 2171 * size, something has gone terribly wrong. 2172 */ 2173 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2174 blk_dump_rq_flags(req, "request botched"); 2175 req->__data_len = blk_rq_cur_bytes(req); 2176 } 2177 2178 /* recalculate the number of segments */ 2179 blk_recalc_rq_segments(req); 2180 2181 return true; 2182} 2183EXPORT_SYMBOL_GPL(blk_update_request); 2184 2185static bool blk_update_bidi_request(struct request *rq, int error, 2186 unsigned int nr_bytes, 2187 unsigned int bidi_bytes) 2188{ 2189 if (blk_update_request(rq, error, nr_bytes)) 2190 return true; 2191 2192 /* Bidi request must be completed as a whole */ 2193 if (unlikely(blk_bidi_rq(rq)) && 2194 blk_update_request(rq->next_rq, error, bidi_bytes)) 2195 return true; 2196 2197 if (blk_queue_add_random(rq->q)) 2198 add_disk_randomness(rq->rq_disk); 2199 2200 return false; 2201} 2202 2203/** 2204 * blk_unprep_request - unprepare a request 2205 * @req: the request 2206 * 2207 * This function makes a request ready for complete resubmission (or 2208 * completion). It happens only after all error handling is complete, 2209 * so represents the appropriate moment to deallocate any resources 2210 * that were allocated to the request in the prep_rq_fn. The queue 2211 * lock is held when calling this. 2212 */ 2213void blk_unprep_request(struct request *req) 2214{ 2215 struct request_queue *q = req->q; 2216 2217 req->cmd_flags &= ~REQ_DONTPREP; 2218 if (q->unprep_rq_fn) 2219 q->unprep_rq_fn(q, req); 2220} 2221EXPORT_SYMBOL_GPL(blk_unprep_request); 2222 2223/* 2224 * queue lock must be held 2225 */ 2226static void blk_finish_request(struct request *req, int error) 2227{ 2228 if (blk_rq_tagged(req)) 2229 blk_queue_end_tag(req->q, req); 2230 2231 BUG_ON(blk_queued_rq(req)); 2232 2233 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2234 laptop_io_completion(&req->q->backing_dev_info); 2235 2236 blk_delete_timer(req); 2237 2238 if (req->cmd_flags & REQ_DONTPREP) 2239 blk_unprep_request(req); 2240 2241 2242 blk_account_io_done(req); 2243 2244 if (req->end_io) 2245 req->end_io(req, error); 2246 else { 2247 if (blk_bidi_rq(req)) 2248 __blk_put_request(req->next_rq->q, req->next_rq); 2249 2250 __blk_put_request(req->q, req); 2251 } 2252} 2253 2254/** 2255 * blk_end_bidi_request - Complete a bidi request 2256 * @rq: the request to complete 2257 * @error: %0 for success, < %0 for error 2258 * @nr_bytes: number of bytes to complete @rq 2259 * @bidi_bytes: number of bytes to complete @rq->next_rq 2260 * 2261 * Description: 2262 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2263 * Drivers that supports bidi can safely call this member for any 2264 * type of request, bidi or uni. In the later case @bidi_bytes is 2265 * just ignored. 2266 * 2267 * Return: 2268 * %false - we are done with this request 2269 * %true - still buffers pending for this request 2270 **/ 2271static bool blk_end_bidi_request(struct request *rq, int error, 2272 unsigned int nr_bytes, unsigned int bidi_bytes) 2273{ 2274 struct request_queue *q = rq->q; 2275 unsigned long flags; 2276 2277 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2278 return true; 2279 2280 spin_lock_irqsave(q->queue_lock, flags); 2281 blk_finish_request(rq, error); 2282 spin_unlock_irqrestore(q->queue_lock, flags); 2283 2284 return false; 2285} 2286 2287/** 2288 * __blk_end_bidi_request - Complete a bidi request with queue lock held 2289 * @rq: the request to complete 2290 * @error: %0 for success, < %0 for error 2291 * @nr_bytes: number of bytes to complete @rq 2292 * @bidi_bytes: number of bytes to complete @rq->next_rq 2293 * 2294 * Description: 2295 * Identical to blk_end_bidi_request() except that queue lock is 2296 * assumed to be locked on entry and remains so on return. 2297 * 2298 * Return: 2299 * %false - we are done with this request 2300 * %true - still buffers pending for this request 2301 **/ 2302static bool __blk_end_bidi_request(struct request *rq, int error, 2303 unsigned int nr_bytes, unsigned int bidi_bytes) 2304{ 2305 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 2306 return true; 2307 2308 blk_finish_request(rq, error); 2309 2310 return false; 2311} 2312 2313/** 2314 * blk_end_request - Helper function for drivers to complete the request. 2315 * @rq: the request being processed 2316 * @error: %0 for success, < %0 for error 2317 * @nr_bytes: number of bytes to complete 2318 * 2319 * Description: 2320 * Ends I/O on a number of bytes attached to @rq. 2321 * If @rq has leftover, sets it up for the next range of segments. 2322 * 2323 * Return: 2324 * %false - we are done with this request 2325 * %true - still buffers pending for this request 2326 **/ 2327bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2328{ 2329 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2330} 2331EXPORT_SYMBOL(blk_end_request); 2332 2333/** 2334 * blk_end_request_all - Helper function for drives to finish the request. 2335 * @rq: the request to finish 2336 * @error: %0 for success, < %0 for error 2337 * 2338 * Description: 2339 * Completely finish @rq. 2340 */ 2341void blk_end_request_all(struct request *rq, int error) 2342{ 2343 bool pending; 2344 unsigned int bidi_bytes = 0; 2345 2346 if (unlikely(blk_bidi_rq(rq))) 2347 bidi_bytes = blk_rq_bytes(rq->next_rq); 2348 2349 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2350 BUG_ON(pending); 2351} 2352EXPORT_SYMBOL(blk_end_request_all); 2353 2354/** 2355 * blk_end_request_cur - Helper function to finish the current request chunk. 2356 * @rq: the request to finish the current chunk for 2357 * @error: %0 for success, < %0 for error 2358 * 2359 * Description: 2360 * Complete the current consecutively mapped chunk from @rq. 2361 * 2362 * Return: 2363 * %false - we are done with this request 2364 * %true - still buffers pending for this request 2365 */ 2366bool blk_end_request_cur(struct request *rq, int error) 2367{ 2368 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2369} 2370EXPORT_SYMBOL(blk_end_request_cur); 2371 2372/** 2373 * blk_end_request_err - Finish a request till the next failure boundary. 2374 * @rq: the request to finish till the next failure boundary for 2375 * @error: must be negative errno 2376 * 2377 * Description: 2378 * Complete @rq till the next failure boundary. 2379 * 2380 * Return: 2381 * %false - we are done with this request 2382 * %true - still buffers pending for this request 2383 */ 2384bool blk_end_request_err(struct request *rq, int error) 2385{ 2386 WARN_ON(error >= 0); 2387 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2388} 2389EXPORT_SYMBOL_GPL(blk_end_request_err); 2390 2391/** 2392 * __blk_end_request - Helper function for drivers to complete the request. 2393 * @rq: the request being processed 2394 * @error: %0 for success, < %0 for error 2395 * @nr_bytes: number of bytes to complete 2396 * 2397 * Description: 2398 * Must be called with queue lock held unlike blk_end_request(). 2399 * 2400 * Return: 2401 * %false - we are done with this request 2402 * %true - still buffers pending for this request 2403 **/ 2404bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2405{ 2406 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2407} 2408EXPORT_SYMBOL(__blk_end_request); 2409 2410/** 2411 * __blk_end_request_all - Helper function for drives to finish the request. 2412 * @rq: the request to finish 2413 * @error: %0 for success, < %0 for error 2414 * 2415 * Description: 2416 * Completely finish @rq. Must be called with queue lock held. 2417 */ 2418void __blk_end_request_all(struct request *rq, int error) 2419{ 2420 bool pending; 2421 unsigned int bidi_bytes = 0; 2422 2423 if (unlikely(blk_bidi_rq(rq))) 2424 bidi_bytes = blk_rq_bytes(rq->next_rq); 2425 2426 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2427 BUG_ON(pending); 2428} 2429EXPORT_SYMBOL(__blk_end_request_all); 2430 2431/** 2432 * __blk_end_request_cur - Helper function to finish the current request chunk. 2433 * @rq: the request to finish the current chunk for 2434 * @error: %0 for success, < %0 for error 2435 * 2436 * Description: 2437 * Complete the current consecutively mapped chunk from @rq. Must 2438 * be called with queue lock held. 2439 * 2440 * Return: 2441 * %false - we are done with this request 2442 * %true - still buffers pending for this request 2443 */ 2444bool __blk_end_request_cur(struct request *rq, int error) 2445{ 2446 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2447} 2448EXPORT_SYMBOL(__blk_end_request_cur); 2449 2450/** 2451 * __blk_end_request_err - Finish a request till the next failure boundary. 2452 * @rq: the request to finish till the next failure boundary for 2453 * @error: must be negative errno 2454 * 2455 * Description: 2456 * Complete @rq till the next failure boundary. Must be called 2457 * with queue lock held. 2458 * 2459 * Return: 2460 * %false - we are done with this request 2461 * %true - still buffers pending for this request 2462 */ 2463bool __blk_end_request_err(struct request *rq, int error) 2464{ 2465 WARN_ON(error >= 0); 2466 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); 2467} 2468EXPORT_SYMBOL_GPL(__blk_end_request_err); 2469 2470void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2471 struct bio *bio) 2472{ 2473 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2474 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2475 2476 if (bio_has_data(bio)) { 2477 rq->nr_phys_segments = bio_phys_segments(q, bio); 2478 rq->buffer = bio_data(bio); 2479 } 2480 rq->__data_len = bio->bi_size; 2481 rq->bio = rq->biotail = bio; 2482 2483 if (bio->bi_bdev) 2484 rq->rq_disk = bio->bi_bdev->bd_disk; 2485} 2486 2487#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2488/** 2489 * rq_flush_dcache_pages - Helper function to flush all pages in a request 2490 * @rq: the request to be flushed 2491 * 2492 * Description: 2493 * Flush all pages in @rq. 2494 */ 2495void rq_flush_dcache_pages(struct request *rq) 2496{ 2497 struct req_iterator iter; 2498 struct bio_vec *bvec; 2499 2500 rq_for_each_segment(bvec, rq, iter) 2501 flush_dcache_page(bvec->bv_page); 2502} 2503EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2504#endif 2505 2506/** 2507 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2508 * @q : the queue of the device being checked 2509 * 2510 * Description: 2511 * Check if underlying low-level drivers of a device are busy. 2512 * If the drivers want to export their busy state, they must set own 2513 * exporting function using blk_queue_lld_busy() first. 2514 * 2515 * Basically, this function is used only by request stacking drivers 2516 * to stop dispatching requests to underlying devices when underlying 2517 * devices are busy. This behavior helps more I/O merging on the queue 2518 * of the request stacking driver and prevents I/O throughput regression 2519 * on burst I/O load. 2520 * 2521 * Return: 2522 * 0 - Not busy (The request stacking driver should dispatch request) 2523 * 1 - Busy (The request stacking driver should stop dispatching request) 2524 */ 2525int blk_lld_busy(struct request_queue *q) 2526{ 2527 if (q->lld_busy_fn) 2528 return q->lld_busy_fn(q); 2529 2530 return 0; 2531} 2532EXPORT_SYMBOL_GPL(blk_lld_busy); 2533 2534/** 2535 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2536 * @rq: the clone request to be cleaned up 2537 * 2538 * Description: 2539 * Free all bios in @rq for a cloned request. 2540 */ 2541void blk_rq_unprep_clone(struct request *rq) 2542{ 2543 struct bio *bio; 2544 2545 while ((bio = rq->bio) != NULL) { 2546 rq->bio = bio->bi_next; 2547 2548 bio_put(bio); 2549 } 2550} 2551EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2552 2553/* 2554 * Copy attributes of the original request to the clone request. 2555 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2556 */ 2557static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2558{ 2559 dst->cpu = src->cpu; 2560 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; 2561 dst->cmd_type = src->cmd_type; 2562 dst->__sector = blk_rq_pos(src); 2563 dst->__data_len = blk_rq_bytes(src); 2564 dst->nr_phys_segments = src->nr_phys_segments; 2565 dst->ioprio = src->ioprio; 2566 dst->extra_len = src->extra_len; 2567} 2568 2569/** 2570 * blk_rq_prep_clone - Helper function to setup clone request 2571 * @rq: the request to be setup 2572 * @rq_src: original request to be cloned 2573 * @bs: bio_set that bios for clone are allocated from 2574 * @gfp_mask: memory allocation mask for bio 2575 * @bio_ctr: setup function to be called for each clone bio. 2576 * Returns %0 for success, non %0 for failure. 2577 * @data: private data to be passed to @bio_ctr 2578 * 2579 * Description: 2580 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2581 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2582 * are not copied, and copying such parts is the caller's responsibility. 2583 * Also, pages which the original bios are pointing to are not copied 2584 * and the cloned bios just point same pages. 2585 * So cloned bios must be completed before original bios, which means 2586 * the caller must complete @rq before @rq_src. 2587 */ 2588int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2589 struct bio_set *bs, gfp_t gfp_mask, 2590 int (*bio_ctr)(struct bio *, struct bio *, void *), 2591 void *data) 2592{ 2593 struct bio *bio, *bio_src; 2594 2595 if (!bs) 2596 bs = fs_bio_set; 2597 2598 blk_rq_init(NULL, rq); 2599 2600 __rq_for_each_bio(bio_src, rq_src) { 2601 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); 2602 if (!bio) 2603 goto free_and_out; 2604 2605 __bio_clone(bio, bio_src); 2606 2607 if (bio_integrity(bio_src) && 2608 bio_integrity_clone(bio, bio_src, gfp_mask, bs)) 2609 goto free_and_out; 2610 2611 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2612 goto free_and_out; 2613 2614 if (rq->bio) { 2615 rq->biotail->bi_next = bio; 2616 rq->biotail = bio; 2617 } else 2618 rq->bio = rq->biotail = bio; 2619 } 2620 2621 __blk_rq_prep_clone(rq, rq_src); 2622 2623 return 0; 2624 2625free_and_out: 2626 if (bio) 2627 bio_free(bio, bs); 2628 blk_rq_unprep_clone(rq); 2629 2630 return -ENOMEM; 2631} 2632EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2633 2634int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2635{ 2636 return queue_work(kblockd_workqueue, work); 2637} 2638EXPORT_SYMBOL(kblockd_schedule_work); 2639 2640int kblockd_schedule_delayed_work(struct request_queue *q, 2641 struct delayed_work *dwork, unsigned long delay) 2642{ 2643 return queue_delayed_work(kblockd_workqueue, dwork, delay); 2644} 2645EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2646 2647#define PLUG_MAGIC 0x91827364 2648 2649void blk_start_plug(struct blk_plug *plug) 2650{ 2651 struct task_struct *tsk = current; 2652 2653 plug->magic = PLUG_MAGIC; 2654 INIT_LIST_HEAD(&plug->list); 2655 INIT_LIST_HEAD(&plug->cb_list); 2656 plug->should_sort = 0; 2657 2658 /* 2659 * If this is a nested plug, don't actually assign it. It will be 2660 * flushed on its own. 2661 */ 2662 if (!tsk->plug) { 2663 /* 2664 * Store ordering should not be needed here, since a potential 2665 * preempt will imply a full memory barrier 2666 */ 2667 tsk->plug = plug; 2668 } 2669} 2670EXPORT_SYMBOL(blk_start_plug); 2671 2672static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 2673{ 2674 struct request *rqa = container_of(a, struct request, queuelist); 2675 struct request *rqb = container_of(b, struct request, queuelist); 2676 2677 return !(rqa->q <= rqb->q); 2678} 2679 2680/* 2681 * If 'from_schedule' is true, then postpone the dispatch of requests 2682 * until a safe kblockd context. We due this to avoid accidental big 2683 * additional stack usage in driver dispatch, in places where the originally 2684 * plugger did not intend it. 2685 */ 2686static void queue_unplugged(struct request_queue *q, unsigned int depth, 2687 bool from_schedule) 2688 __releases(q->queue_lock) 2689{ 2690 trace_block_unplug(q, depth, !from_schedule); 2691 2692 /* 2693 * If we are punting this to kblockd, then we can safely drop 2694 * the queue_lock before waking kblockd (which needs to take 2695 * this lock). 2696 */ 2697 if (from_schedule) { 2698 spin_unlock(q->queue_lock); 2699 blk_run_queue_async(q); 2700 } else { 2701 __blk_run_queue(q); 2702 spin_unlock(q->queue_lock); 2703 } 2704 2705} 2706 2707static void flush_plug_callbacks(struct blk_plug *plug) 2708{ 2709 LIST_HEAD(callbacks); 2710 2711 if (list_empty(&plug->cb_list)) 2712 return; 2713 2714 list_splice_init(&plug->cb_list, &callbacks); 2715 2716 while (!list_empty(&callbacks)) { 2717 struct blk_plug_cb *cb = list_first_entry(&callbacks, 2718 struct blk_plug_cb, 2719 list); 2720 list_del(&cb->list); 2721 cb->callback(cb); 2722 } 2723} 2724 2725void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2726{ 2727 struct request_queue *q; 2728 unsigned long flags; 2729 struct request *rq; 2730 LIST_HEAD(list); 2731 unsigned int depth; 2732 2733 BUG_ON(plug->magic != PLUG_MAGIC); 2734 2735 flush_plug_callbacks(plug); 2736 if (list_empty(&plug->list)) 2737 return; 2738 2739 list_splice_init(&plug->list, &list); 2740 2741 if (plug->should_sort) { 2742 list_sort(NULL, &list, plug_rq_cmp); 2743 plug->should_sort = 0; 2744 } 2745 2746 q = NULL; 2747 depth = 0; 2748 2749 /* 2750 * Save and disable interrupts here, to avoid doing it for every 2751 * queue lock we have to take. 2752 */ 2753 local_irq_save(flags); 2754 while (!list_empty(&list)) { 2755 rq = list_entry_rq(list.next); 2756 list_del_init(&rq->queuelist); 2757 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); 2758 BUG_ON(!rq->q); 2759 if (rq->q != q) { 2760 /* 2761 * This drops the queue lock 2762 */ 2763 if (q) 2764 queue_unplugged(q, depth, from_schedule); 2765 q = rq->q; 2766 depth = 0; 2767 spin_lock(q->queue_lock); 2768 } 2769 rq->cmd_flags &= ~REQ_ON_PLUG; 2770 2771 /* 2772 * rq is already accounted, so use raw insert 2773 */ 2774 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) 2775 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2776 else 2777 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2778 2779 depth++; 2780 } 2781 2782 /* 2783 * This drops the queue lock 2784 */ 2785 if (q) 2786 queue_unplugged(q, depth, from_schedule); 2787 2788 local_irq_restore(flags); 2789} 2790EXPORT_SYMBOL(blk_flush_plug_list); 2791 2792void blk_finish_plug(struct blk_plug *plug) 2793{ 2794 blk_flush_plug_list(plug, false); 2795 2796 if (plug == current->plug) 2797 current->plug = NULL; 2798} 2799EXPORT_SYMBOL(blk_finish_plug); 2800 2801int __init blk_dev_init(void) 2802{ 2803 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2804 sizeof(((struct request *)0)->cmd_flags)); 2805 2806 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 2807 kblockd_workqueue = alloc_workqueue("kblockd", 2808 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2809 if (!kblockd_workqueue) 2810 panic("Failed to create kblockd\n"); 2811 2812 request_cachep = kmem_cache_create("blkdev_requests", 2813 sizeof(struct request), 0, SLAB_PANIC, NULL); 2814 2815 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 2816 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 2817 2818 return 0; 2819}