at v4.5-rc3 50 kB view raw
1#ifndef _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H 3 4#include <linux/sched.h> 5 6#ifdef CONFIG_BLOCK 7 8#include <linux/major.h> 9#include <linux/genhd.h> 10#include <linux/list.h> 11#include <linux/llist.h> 12#include <linux/timer.h> 13#include <linux/workqueue.h> 14#include <linux/pagemap.h> 15#include <linux/backing-dev-defs.h> 16#include <linux/wait.h> 17#include <linux/mempool.h> 18#include <linux/pfn.h> 19#include <linux/bio.h> 20#include <linux/stringify.h> 21#include <linux/gfp.h> 22#include <linux/bsg.h> 23#include <linux/smp.h> 24#include <linux/rcupdate.h> 25#include <linux/percpu-refcount.h> 26#include <linux/scatterlist.h> 27 28struct module; 29struct scsi_ioctl_command; 30 31struct request_queue; 32struct elevator_queue; 33struct blk_trace; 34struct request; 35struct sg_io_hdr; 36struct bsg_job; 37struct blkcg_gq; 38struct blk_flush_queue; 39struct pr_ops; 40 41#define BLKDEV_MIN_RQ 4 42#define BLKDEV_MAX_RQ 128 /* Default maximum */ 43 44/* 45 * Maximum number of blkcg policies allowed to be registered concurrently. 46 * Defined here to simplify include dependency. 47 */ 48#define BLKCG_MAX_POLS 2 49 50struct request; 51typedef void (rq_end_io_fn)(struct request *, int); 52 53#define BLK_RL_SYNCFULL (1U << 0) 54#define BLK_RL_ASYNCFULL (1U << 1) 55 56struct request_list { 57 struct request_queue *q; /* the queue this rl belongs to */ 58#ifdef CONFIG_BLK_CGROUP 59 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 60#endif 61 /* 62 * count[], starved[], and wait[] are indexed by 63 * BLK_RW_SYNC/BLK_RW_ASYNC 64 */ 65 int count[2]; 66 int starved[2]; 67 mempool_t *rq_pool; 68 wait_queue_head_t wait[2]; 69 unsigned int flags; 70}; 71 72/* 73 * request command types 74 */ 75enum rq_cmd_type_bits { 76 REQ_TYPE_FS = 1, /* fs request */ 77 REQ_TYPE_BLOCK_PC, /* scsi command */ 78 REQ_TYPE_DRV_PRIV, /* driver defined types from here */ 79}; 80 81#define BLK_MAX_CDB 16 82 83/* 84 * Try to put the fields that are referenced together in the same cacheline. 85 * 86 * If you modify this structure, make sure to update blk_rq_init() and 87 * especially blk_mq_rq_ctx_init() to take care of the added fields. 88 */ 89struct request { 90 struct list_head queuelist; 91 union { 92 struct call_single_data csd; 93 unsigned long fifo_time; 94 }; 95 96 struct request_queue *q; 97 struct blk_mq_ctx *mq_ctx; 98 99 u64 cmd_flags; 100 unsigned cmd_type; 101 unsigned long atomic_flags; 102 103 int cpu; 104 105 /* the following two fields are internal, NEVER access directly */ 106 unsigned int __data_len; /* total data len */ 107 sector_t __sector; /* sector cursor */ 108 109 struct bio *bio; 110 struct bio *biotail; 111 112 /* 113 * The hash is used inside the scheduler, and killed once the 114 * request reaches the dispatch list. The ipi_list is only used 115 * to queue the request for softirq completion, which is long 116 * after the request has been unhashed (and even removed from 117 * the dispatch list). 118 */ 119 union { 120 struct hlist_node hash; /* merge hash */ 121 struct list_head ipi_list; 122 }; 123 124 /* 125 * The rb_node is only used inside the io scheduler, requests 126 * are pruned when moved to the dispatch queue. So let the 127 * completion_data share space with the rb_node. 128 */ 129 union { 130 struct rb_node rb_node; /* sort/lookup */ 131 void *completion_data; 132 }; 133 134 /* 135 * Three pointers are available for the IO schedulers, if they need 136 * more they have to dynamically allocate it. Flush requests are 137 * never put on the IO scheduler. So let the flush fields share 138 * space with the elevator data. 139 */ 140 union { 141 struct { 142 struct io_cq *icq; 143 void *priv[2]; 144 } elv; 145 146 struct { 147 unsigned int seq; 148 struct list_head list; 149 rq_end_io_fn *saved_end_io; 150 } flush; 151 }; 152 153 struct gendisk *rq_disk; 154 struct hd_struct *part; 155 unsigned long start_time; 156#ifdef CONFIG_BLK_CGROUP 157 struct request_list *rl; /* rl this rq is alloced from */ 158 unsigned long long start_time_ns; 159 unsigned long long io_start_time_ns; /* when passed to hardware */ 160#endif 161 /* Number of scatter-gather DMA addr+len pairs after 162 * physical address coalescing is performed. 163 */ 164 unsigned short nr_phys_segments; 165#if defined(CONFIG_BLK_DEV_INTEGRITY) 166 unsigned short nr_integrity_segments; 167#endif 168 169 unsigned short ioprio; 170 171 void *special; /* opaque pointer available for LLD use */ 172 173 int tag; 174 int errors; 175 176 /* 177 * when request is used as a packet command carrier 178 */ 179 unsigned char __cmd[BLK_MAX_CDB]; 180 unsigned char *cmd; 181 unsigned short cmd_len; 182 183 unsigned int extra_len; /* length of alignment and padding */ 184 unsigned int sense_len; 185 unsigned int resid_len; /* residual count */ 186 void *sense; 187 188 unsigned long deadline; 189 struct list_head timeout_list; 190 unsigned int timeout; 191 int retries; 192 193 /* 194 * completion callback. 195 */ 196 rq_end_io_fn *end_io; 197 void *end_io_data; 198 199 /* for bidi */ 200 struct request *next_rq; 201}; 202 203static inline unsigned short req_get_ioprio(struct request *req) 204{ 205 return req->ioprio; 206} 207 208#include <linux/elevator.h> 209 210struct blk_queue_ctx; 211 212typedef void (request_fn_proc) (struct request_queue *q); 213typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 214typedef int (prep_rq_fn) (struct request_queue *, struct request *); 215typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 216 217struct bio_vec; 218typedef void (softirq_done_fn)(struct request *); 219typedef int (dma_drain_needed_fn)(struct request *); 220typedef int (lld_busy_fn) (struct request_queue *q); 221typedef int (bsg_job_fn) (struct bsg_job *); 222 223enum blk_eh_timer_return { 224 BLK_EH_NOT_HANDLED, 225 BLK_EH_HANDLED, 226 BLK_EH_RESET_TIMER, 227}; 228 229typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 230 231enum blk_queue_state { 232 Queue_down, 233 Queue_up, 234}; 235 236struct blk_queue_tag { 237 struct request **tag_index; /* map of busy tags */ 238 unsigned long *tag_map; /* bit map of free/busy tags */ 239 int busy; /* current depth */ 240 int max_depth; /* what we will send to device */ 241 int real_max_depth; /* what the array can hold */ 242 atomic_t refcnt; /* map can be shared */ 243 int alloc_policy; /* tag allocation policy */ 244 int next_tag; /* next tag */ 245}; 246#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 247#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 248 249#define BLK_SCSI_MAX_CMDS (256) 250#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 251 252struct queue_limits { 253 unsigned long bounce_pfn; 254 unsigned long seg_boundary_mask; 255 unsigned long virt_boundary_mask; 256 257 unsigned int max_hw_sectors; 258 unsigned int max_dev_sectors; 259 unsigned int chunk_sectors; 260 unsigned int max_sectors; 261 unsigned int max_segment_size; 262 unsigned int physical_block_size; 263 unsigned int alignment_offset; 264 unsigned int io_min; 265 unsigned int io_opt; 266 unsigned int max_discard_sectors; 267 unsigned int max_hw_discard_sectors; 268 unsigned int max_write_same_sectors; 269 unsigned int discard_granularity; 270 unsigned int discard_alignment; 271 272 unsigned short logical_block_size; 273 unsigned short max_segments; 274 unsigned short max_integrity_segments; 275 276 unsigned char misaligned; 277 unsigned char discard_misaligned; 278 unsigned char cluster; 279 unsigned char discard_zeroes_data; 280 unsigned char raid_partial_stripes_expensive; 281}; 282 283struct request_queue { 284 /* 285 * Together with queue_head for cacheline sharing 286 */ 287 struct list_head queue_head; 288 struct request *last_merge; 289 struct elevator_queue *elevator; 290 int nr_rqs[2]; /* # allocated [a]sync rqs */ 291 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 292 293 /* 294 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 295 * is used, root blkg allocates from @q->root_rl and all other 296 * blkgs from their own blkg->rl. Which one to use should be 297 * determined using bio_request_list(). 298 */ 299 struct request_list root_rl; 300 301 request_fn_proc *request_fn; 302 make_request_fn *make_request_fn; 303 prep_rq_fn *prep_rq_fn; 304 unprep_rq_fn *unprep_rq_fn; 305 softirq_done_fn *softirq_done_fn; 306 rq_timed_out_fn *rq_timed_out_fn; 307 dma_drain_needed_fn *dma_drain_needed; 308 lld_busy_fn *lld_busy_fn; 309 310 struct blk_mq_ops *mq_ops; 311 312 unsigned int *mq_map; 313 314 /* sw queues */ 315 struct blk_mq_ctx __percpu *queue_ctx; 316 unsigned int nr_queues; 317 318 /* hw dispatch queues */ 319 struct blk_mq_hw_ctx **queue_hw_ctx; 320 unsigned int nr_hw_queues; 321 322 /* 323 * Dispatch queue sorting 324 */ 325 sector_t end_sector; 326 struct request *boundary_rq; 327 328 /* 329 * Delayed queue handling 330 */ 331 struct delayed_work delay_work; 332 333 struct backing_dev_info backing_dev_info; 334 335 /* 336 * The queue owner gets to use this for whatever they like. 337 * ll_rw_blk doesn't touch it. 338 */ 339 void *queuedata; 340 341 /* 342 * various queue flags, see QUEUE_* below 343 */ 344 unsigned long queue_flags; 345 346 /* 347 * ida allocated id for this queue. Used to index queues from 348 * ioctx. 349 */ 350 int id; 351 352 /* 353 * queue needs bounce pages for pages above this limit 354 */ 355 gfp_t bounce_gfp; 356 357 /* 358 * protects queue structures from reentrancy. ->__queue_lock should 359 * _never_ be used directly, it is queue private. always use 360 * ->queue_lock. 361 */ 362 spinlock_t __queue_lock; 363 spinlock_t *queue_lock; 364 365 /* 366 * queue kobject 367 */ 368 struct kobject kobj; 369 370 /* 371 * mq queue kobject 372 */ 373 struct kobject mq_kobj; 374 375#ifdef CONFIG_BLK_DEV_INTEGRITY 376 struct blk_integrity integrity; 377#endif /* CONFIG_BLK_DEV_INTEGRITY */ 378 379#ifdef CONFIG_PM 380 struct device *dev; 381 int rpm_status; 382 unsigned int nr_pending; 383#endif 384 385 /* 386 * queue settings 387 */ 388 unsigned long nr_requests; /* Max # of requests */ 389 unsigned int nr_congestion_on; 390 unsigned int nr_congestion_off; 391 unsigned int nr_batching; 392 393 unsigned int dma_drain_size; 394 void *dma_drain_buffer; 395 unsigned int dma_pad_mask; 396 unsigned int dma_alignment; 397 398 struct blk_queue_tag *queue_tags; 399 struct list_head tag_busy_list; 400 401 unsigned int nr_sorted; 402 unsigned int in_flight[2]; 403 /* 404 * Number of active block driver functions for which blk_drain_queue() 405 * must wait. Must be incremented around functions that unlock the 406 * queue_lock internally, e.g. scsi_request_fn(). 407 */ 408 unsigned int request_fn_active; 409 410 unsigned int rq_timeout; 411 struct timer_list timeout; 412 struct work_struct timeout_work; 413 struct list_head timeout_list; 414 415 struct list_head icq_list; 416#ifdef CONFIG_BLK_CGROUP 417 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 418 struct blkcg_gq *root_blkg; 419 struct list_head blkg_list; 420#endif 421 422 struct queue_limits limits; 423 424 /* 425 * sg stuff 426 */ 427 unsigned int sg_timeout; 428 unsigned int sg_reserved_size; 429 int node; 430#ifdef CONFIG_BLK_DEV_IO_TRACE 431 struct blk_trace *blk_trace; 432#endif 433 /* 434 * for flush operations 435 */ 436 unsigned int flush_flags; 437 unsigned int flush_not_queueable:1; 438 struct blk_flush_queue *fq; 439 440 struct list_head requeue_list; 441 spinlock_t requeue_lock; 442 struct work_struct requeue_work; 443 444 struct mutex sysfs_lock; 445 446 int bypass_depth; 447 atomic_t mq_freeze_depth; 448 449#if defined(CONFIG_BLK_DEV_BSG) 450 bsg_job_fn *bsg_job_fn; 451 int bsg_job_size; 452 struct bsg_class_device bsg_dev; 453#endif 454 455#ifdef CONFIG_BLK_DEV_THROTTLING 456 /* Throttle data */ 457 struct throtl_data *td; 458#endif 459 struct rcu_head rcu_head; 460 wait_queue_head_t mq_freeze_wq; 461 struct percpu_ref q_usage_counter; 462 struct list_head all_q_node; 463 464 struct blk_mq_tag_set *tag_set; 465 struct list_head tag_set_list; 466 struct bio_set *bio_split; 467 468 bool mq_sysfs_init_done; 469}; 470 471#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 472#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 473#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 474#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 475#define QUEUE_FLAG_DYING 5 /* queue being torn down */ 476#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 477#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 478#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 479#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 480#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 481#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 482#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 483#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 484#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 485#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 486#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 487#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 488#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 489#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 490#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 491#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 492#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 493#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ 494 495#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 496 (1 << QUEUE_FLAG_STACKABLE) | \ 497 (1 << QUEUE_FLAG_SAME_COMP) | \ 498 (1 << QUEUE_FLAG_ADD_RANDOM)) 499 500#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 501 (1 << QUEUE_FLAG_STACKABLE) | \ 502 (1 << QUEUE_FLAG_SAME_COMP)) 503 504static inline void queue_lockdep_assert_held(struct request_queue *q) 505{ 506 if (q->queue_lock) 507 lockdep_assert_held(q->queue_lock); 508} 509 510static inline void queue_flag_set_unlocked(unsigned int flag, 511 struct request_queue *q) 512{ 513 __set_bit(flag, &q->queue_flags); 514} 515 516static inline int queue_flag_test_and_clear(unsigned int flag, 517 struct request_queue *q) 518{ 519 queue_lockdep_assert_held(q); 520 521 if (test_bit(flag, &q->queue_flags)) { 522 __clear_bit(flag, &q->queue_flags); 523 return 1; 524 } 525 526 return 0; 527} 528 529static inline int queue_flag_test_and_set(unsigned int flag, 530 struct request_queue *q) 531{ 532 queue_lockdep_assert_held(q); 533 534 if (!test_bit(flag, &q->queue_flags)) { 535 __set_bit(flag, &q->queue_flags); 536 return 0; 537 } 538 539 return 1; 540} 541 542static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 543{ 544 queue_lockdep_assert_held(q); 545 __set_bit(flag, &q->queue_flags); 546} 547 548static inline void queue_flag_clear_unlocked(unsigned int flag, 549 struct request_queue *q) 550{ 551 __clear_bit(flag, &q->queue_flags); 552} 553 554static inline int queue_in_flight(struct request_queue *q) 555{ 556 return q->in_flight[0] + q->in_flight[1]; 557} 558 559static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 560{ 561 queue_lockdep_assert_held(q); 562 __clear_bit(flag, &q->queue_flags); 563} 564 565#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 566#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 567#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 568#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 569#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 570#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 571#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 572#define blk_queue_noxmerges(q) \ 573 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 574#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 575#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 576#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 577#define blk_queue_stackable(q) \ 578 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 579#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 580#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 581 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 582 583#define blk_noretry_request(rq) \ 584 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 585 REQ_FAILFAST_DRIVER)) 586 587#define blk_account_rq(rq) \ 588 (((rq)->cmd_flags & REQ_STARTED) && \ 589 ((rq)->cmd_type == REQ_TYPE_FS)) 590 591#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 592#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 593/* rq->queuelist of dequeued request must be list_empty() */ 594#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 595 596#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 597 598#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) 599 600/* 601 * Driver can handle struct request, if it either has an old style 602 * request_fn defined, or is blk-mq based. 603 */ 604static inline bool queue_is_rq_based(struct request_queue *q) 605{ 606 return q->request_fn || q->mq_ops; 607} 608 609static inline unsigned int blk_queue_cluster(struct request_queue *q) 610{ 611 return q->limits.cluster; 612} 613 614/* 615 * We regard a request as sync, if either a read or a sync write 616 */ 617static inline bool rw_is_sync(unsigned int rw_flags) 618{ 619 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 620} 621 622static inline bool rq_is_sync(struct request *rq) 623{ 624 return rw_is_sync(rq->cmd_flags); 625} 626 627static inline bool blk_rl_full(struct request_list *rl, bool sync) 628{ 629 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 630 631 return rl->flags & flag; 632} 633 634static inline void blk_set_rl_full(struct request_list *rl, bool sync) 635{ 636 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 637 638 rl->flags |= flag; 639} 640 641static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 642{ 643 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 644 645 rl->flags &= ~flag; 646} 647 648static inline bool rq_mergeable(struct request *rq) 649{ 650 if (rq->cmd_type != REQ_TYPE_FS) 651 return false; 652 653 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 654 return false; 655 656 return true; 657} 658 659static inline bool blk_check_merge_flags(unsigned int flags1, 660 unsigned int flags2) 661{ 662 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) 663 return false; 664 665 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 666 return false; 667 668 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) 669 return false; 670 671 return true; 672} 673 674static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 675{ 676 if (bio_data(a) == bio_data(b)) 677 return true; 678 679 return false; 680} 681 682/* 683 * q->prep_rq_fn return values 684 */ 685#define BLKPREP_OK 0 /* serve it */ 686#define BLKPREP_KILL 1 /* fatal error, kill */ 687#define BLKPREP_DEFER 2 /* leave on queue */ 688 689extern unsigned long blk_max_low_pfn, blk_max_pfn; 690 691/* 692 * standard bounce addresses: 693 * 694 * BLK_BOUNCE_HIGH : bounce all highmem pages 695 * BLK_BOUNCE_ANY : don't bounce anything 696 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 697 */ 698 699#if BITS_PER_LONG == 32 700#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 701#else 702#define BLK_BOUNCE_HIGH -1ULL 703#endif 704#define BLK_BOUNCE_ANY (-1ULL) 705#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 706 707/* 708 * default timeout for SG_IO if none specified 709 */ 710#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 711#define BLK_MIN_SG_TIMEOUT (7 * HZ) 712 713#ifdef CONFIG_BOUNCE 714extern int init_emergency_isa_pool(void); 715extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 716#else 717static inline int init_emergency_isa_pool(void) 718{ 719 return 0; 720} 721static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 722{ 723} 724#endif /* CONFIG_MMU */ 725 726struct rq_map_data { 727 struct page **pages; 728 int page_order; 729 int nr_entries; 730 unsigned long offset; 731 int null_mapped; 732 int from_user; 733}; 734 735struct req_iterator { 736 struct bvec_iter iter; 737 struct bio *bio; 738}; 739 740/* This should not be used directly - use rq_for_each_segment */ 741#define for_each_bio(_bio) \ 742 for (; _bio; _bio = _bio->bi_next) 743#define __rq_for_each_bio(_bio, rq) \ 744 if ((rq->bio)) \ 745 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 746 747#define rq_for_each_segment(bvl, _rq, _iter) \ 748 __rq_for_each_bio(_iter.bio, _rq) \ 749 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 750 751#define rq_iter_last(bvec, _iter) \ 752 (_iter.bio->bi_next == NULL && \ 753 bio_iter_last(bvec, _iter.iter)) 754 755#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 756# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 757#endif 758#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 759extern void rq_flush_dcache_pages(struct request *rq); 760#else 761static inline void rq_flush_dcache_pages(struct request *rq) 762{ 763} 764#endif 765 766extern int blk_register_queue(struct gendisk *disk); 767extern void blk_unregister_queue(struct gendisk *disk); 768extern blk_qc_t generic_make_request(struct bio *bio); 769extern void blk_rq_init(struct request_queue *q, struct request *rq); 770extern void blk_put_request(struct request *); 771extern void __blk_put_request(struct request_queue *, struct request *); 772extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 773extern struct request *blk_make_request(struct request_queue *, struct bio *, 774 gfp_t); 775extern void blk_rq_set_block_pc(struct request *); 776extern void blk_requeue_request(struct request_queue *, struct request *); 777extern void blk_add_request_payload(struct request *rq, struct page *page, 778 unsigned int len); 779extern int blk_lld_busy(struct request_queue *q); 780extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 781 struct bio_set *bs, gfp_t gfp_mask, 782 int (*bio_ctr)(struct bio *, struct bio *, void *), 783 void *data); 784extern void blk_rq_unprep_clone(struct request *rq); 785extern int blk_insert_cloned_request(struct request_queue *q, 786 struct request *rq); 787extern void blk_delay_queue(struct request_queue *, unsigned long); 788extern void blk_queue_split(struct request_queue *, struct bio **, 789 struct bio_set *); 790extern void blk_recount_segments(struct request_queue *, struct bio *); 791extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 792extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 793 unsigned int, void __user *); 794extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 795 unsigned int, void __user *); 796extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 797 struct scsi_ioctl_command __user *); 798 799extern int blk_queue_enter(struct request_queue *q, bool nowait); 800extern void blk_queue_exit(struct request_queue *q); 801extern void blk_start_queue(struct request_queue *q); 802extern void blk_start_queue_async(struct request_queue *q); 803extern void blk_stop_queue(struct request_queue *q); 804extern void blk_sync_queue(struct request_queue *q); 805extern void __blk_stop_queue(struct request_queue *q); 806extern void __blk_run_queue(struct request_queue *q); 807extern void __blk_run_queue_uncond(struct request_queue *q); 808extern void blk_run_queue(struct request_queue *); 809extern void blk_run_queue_async(struct request_queue *q); 810extern int blk_rq_map_user(struct request_queue *, struct request *, 811 struct rq_map_data *, void __user *, unsigned long, 812 gfp_t); 813extern int blk_rq_unmap_user(struct bio *); 814extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 815extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 816 struct rq_map_data *, const struct iov_iter *, 817 gfp_t); 818extern int blk_execute_rq(struct request_queue *, struct gendisk *, 819 struct request *, int); 820extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 821 struct request *, int, rq_end_io_fn *); 822 823bool blk_poll(struct request_queue *q, blk_qc_t cookie); 824 825static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 826{ 827 return bdev->bd_disk->queue; /* this is never NULL */ 828} 829 830/* 831 * blk_rq_pos() : the current sector 832 * blk_rq_bytes() : bytes left in the entire request 833 * blk_rq_cur_bytes() : bytes left in the current segment 834 * blk_rq_err_bytes() : bytes left till the next error boundary 835 * blk_rq_sectors() : sectors left in the entire request 836 * blk_rq_cur_sectors() : sectors left in the current segment 837 */ 838static inline sector_t blk_rq_pos(const struct request *rq) 839{ 840 return rq->__sector; 841} 842 843static inline unsigned int blk_rq_bytes(const struct request *rq) 844{ 845 return rq->__data_len; 846} 847 848static inline int blk_rq_cur_bytes(const struct request *rq) 849{ 850 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 851} 852 853extern unsigned int blk_rq_err_bytes(const struct request *rq); 854 855static inline unsigned int blk_rq_sectors(const struct request *rq) 856{ 857 return blk_rq_bytes(rq) >> 9; 858} 859 860static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 861{ 862 return blk_rq_cur_bytes(rq) >> 9; 863} 864 865static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 866 unsigned int cmd_flags) 867{ 868 if (unlikely(cmd_flags & REQ_DISCARD)) 869 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 870 871 if (unlikely(cmd_flags & REQ_WRITE_SAME)) 872 return q->limits.max_write_same_sectors; 873 874 return q->limits.max_sectors; 875} 876 877/* 878 * Return maximum size of a request at given offset. Only valid for 879 * file system requests. 880 */ 881static inline unsigned int blk_max_size_offset(struct request_queue *q, 882 sector_t offset) 883{ 884 if (!q->limits.chunk_sectors) 885 return q->limits.max_sectors; 886 887 return q->limits.chunk_sectors - 888 (offset & (q->limits.chunk_sectors - 1)); 889} 890 891static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 892{ 893 struct request_queue *q = rq->q; 894 895 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 896 return q->limits.max_hw_sectors; 897 898 if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) 899 return blk_queue_get_max_sectors(q, rq->cmd_flags); 900 901 return min(blk_max_size_offset(q, blk_rq_pos(rq)), 902 blk_queue_get_max_sectors(q, rq->cmd_flags)); 903} 904 905static inline unsigned int blk_rq_count_bios(struct request *rq) 906{ 907 unsigned int nr_bios = 0; 908 struct bio *bio; 909 910 __rq_for_each_bio(bio, rq) 911 nr_bios++; 912 913 return nr_bios; 914} 915 916/* 917 * Request issue related functions. 918 */ 919extern struct request *blk_peek_request(struct request_queue *q); 920extern void blk_start_request(struct request *rq); 921extern struct request *blk_fetch_request(struct request_queue *q); 922 923/* 924 * Request completion related functions. 925 * 926 * blk_update_request() completes given number of bytes and updates 927 * the request without completing it. 928 * 929 * blk_end_request() and friends. __blk_end_request() must be called 930 * with the request queue spinlock acquired. 931 * 932 * Several drivers define their own end_request and call 933 * blk_end_request() for parts of the original function. 934 * This prevents code duplication in drivers. 935 */ 936extern bool blk_update_request(struct request *rq, int error, 937 unsigned int nr_bytes); 938extern void blk_finish_request(struct request *rq, int error); 939extern bool blk_end_request(struct request *rq, int error, 940 unsigned int nr_bytes); 941extern void blk_end_request_all(struct request *rq, int error); 942extern bool blk_end_request_cur(struct request *rq, int error); 943extern bool blk_end_request_err(struct request *rq, int error); 944extern bool __blk_end_request(struct request *rq, int error, 945 unsigned int nr_bytes); 946extern void __blk_end_request_all(struct request *rq, int error); 947extern bool __blk_end_request_cur(struct request *rq, int error); 948extern bool __blk_end_request_err(struct request *rq, int error); 949 950extern void blk_complete_request(struct request *); 951extern void __blk_complete_request(struct request *); 952extern void blk_abort_request(struct request *); 953extern void blk_unprep_request(struct request *); 954 955/* 956 * Access functions for manipulating queue properties 957 */ 958extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 959 spinlock_t *lock, int node_id); 960extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 961extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 962 request_fn_proc *, spinlock_t *); 963extern void blk_cleanup_queue(struct request_queue *); 964extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 965extern void blk_queue_bounce_limit(struct request_queue *, u64); 966extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 967extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 968extern void blk_queue_max_segments(struct request_queue *, unsigned short); 969extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 970extern void blk_queue_max_discard_sectors(struct request_queue *q, 971 unsigned int max_discard_sectors); 972extern void blk_queue_max_write_same_sectors(struct request_queue *q, 973 unsigned int max_write_same_sectors); 974extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 975extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 976extern void blk_queue_alignment_offset(struct request_queue *q, 977 unsigned int alignment); 978extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 979extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 980extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 981extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 982extern void blk_set_default_limits(struct queue_limits *lim); 983extern void blk_set_stacking_limits(struct queue_limits *lim); 984extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 985 sector_t offset); 986extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 987 sector_t offset); 988extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 989 sector_t offset); 990extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 991extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 992extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 993extern int blk_queue_dma_drain(struct request_queue *q, 994 dma_drain_needed_fn *dma_drain_needed, 995 void *buf, unsigned int size); 996extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 997extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 998extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 999extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 1000extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 1001extern void blk_queue_dma_alignment(struct request_queue *, int); 1002extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1003extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1004extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 1005extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1006extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 1007extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1008extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 1009 1010extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 1011extern void blk_dump_rq_flags(struct request *, char *); 1012extern long nr_blockdev_pages(void); 1013 1014bool __must_check blk_get_queue(struct request_queue *); 1015struct request_queue *blk_alloc_queue(gfp_t); 1016struct request_queue *blk_alloc_queue_node(gfp_t, int); 1017extern void blk_put_queue(struct request_queue *); 1018extern void blk_set_queue_dying(struct request_queue *); 1019 1020/* 1021 * block layer runtime pm functions 1022 */ 1023#ifdef CONFIG_PM 1024extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1025extern int blk_pre_runtime_suspend(struct request_queue *q); 1026extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1027extern void blk_pre_runtime_resume(struct request_queue *q); 1028extern void blk_post_runtime_resume(struct request_queue *q, int err); 1029#else 1030static inline void blk_pm_runtime_init(struct request_queue *q, 1031 struct device *dev) {} 1032static inline int blk_pre_runtime_suspend(struct request_queue *q) 1033{ 1034 return -ENOSYS; 1035} 1036static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1037static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1038static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1039#endif 1040 1041/* 1042 * blk_plug permits building a queue of related requests by holding the I/O 1043 * fragments for a short period. This allows merging of sequential requests 1044 * into single larger request. As the requests are moved from a per-task list to 1045 * the device's request_queue in a batch, this results in improved scalability 1046 * as the lock contention for request_queue lock is reduced. 1047 * 1048 * It is ok not to disable preemption when adding the request to the plug list 1049 * or when attempting a merge, because blk_schedule_flush_list() will only flush 1050 * the plug list when the task sleeps by itself. For details, please see 1051 * schedule() where blk_schedule_flush_plug() is called. 1052 */ 1053struct blk_plug { 1054 struct list_head list; /* requests */ 1055 struct list_head mq_list; /* blk-mq requests */ 1056 struct list_head cb_list; /* md requires an unplug callback */ 1057}; 1058#define BLK_MAX_REQUEST_COUNT 16 1059 1060struct blk_plug_cb; 1061typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1062struct blk_plug_cb { 1063 struct list_head list; 1064 blk_plug_cb_fn callback; 1065 void *data; 1066}; 1067extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1068 void *data, int size); 1069extern void blk_start_plug(struct blk_plug *); 1070extern void blk_finish_plug(struct blk_plug *); 1071extern void blk_flush_plug_list(struct blk_plug *, bool); 1072 1073static inline void blk_flush_plug(struct task_struct *tsk) 1074{ 1075 struct blk_plug *plug = tsk->plug; 1076 1077 if (plug) 1078 blk_flush_plug_list(plug, false); 1079} 1080 1081static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1082{ 1083 struct blk_plug *plug = tsk->plug; 1084 1085 if (plug) 1086 blk_flush_plug_list(plug, true); 1087} 1088 1089static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1090{ 1091 struct blk_plug *plug = tsk->plug; 1092 1093 return plug && 1094 (!list_empty(&plug->list) || 1095 !list_empty(&plug->mq_list) || 1096 !list_empty(&plug->cb_list)); 1097} 1098 1099/* 1100 * tag stuff 1101 */ 1102extern int blk_queue_start_tag(struct request_queue *, struct request *); 1103extern struct request *blk_queue_find_tag(struct request_queue *, int); 1104extern void blk_queue_end_tag(struct request_queue *, struct request *); 1105extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1106extern void blk_queue_free_tags(struct request_queue *); 1107extern int blk_queue_resize_tags(struct request_queue *, int); 1108extern void blk_queue_invalidate_tags(struct request_queue *); 1109extern struct blk_queue_tag *blk_init_tags(int, int); 1110extern void blk_free_tags(struct blk_queue_tag *); 1111 1112static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1113 int tag) 1114{ 1115 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1116 return NULL; 1117 return bqt->tag_index[tag]; 1118} 1119 1120#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 1121 1122extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1123extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1124 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1125extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1126 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1127extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1128 sector_t nr_sects, gfp_t gfp_mask, bool discard); 1129static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1130 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1131{ 1132 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 1133 nr_blocks << (sb->s_blocksize_bits - 9), 1134 gfp_mask, flags); 1135} 1136static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1137 sector_t nr_blocks, gfp_t gfp_mask) 1138{ 1139 return blkdev_issue_zeroout(sb->s_bdev, 1140 block << (sb->s_blocksize_bits - 9), 1141 nr_blocks << (sb->s_blocksize_bits - 9), 1142 gfp_mask, true); 1143} 1144 1145extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1146 1147enum blk_default_limits { 1148 BLK_MAX_SEGMENTS = 128, 1149 BLK_SAFE_MAX_SECTORS = 255, 1150 BLK_DEF_MAX_SECTORS = 2560, 1151 BLK_MAX_SEGMENT_SIZE = 65536, 1152 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1153}; 1154 1155#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1156 1157static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1158{ 1159 return q->limits.bounce_pfn; 1160} 1161 1162static inline unsigned long queue_segment_boundary(struct request_queue *q) 1163{ 1164 return q->limits.seg_boundary_mask; 1165} 1166 1167static inline unsigned long queue_virt_boundary(struct request_queue *q) 1168{ 1169 return q->limits.virt_boundary_mask; 1170} 1171 1172static inline unsigned int queue_max_sectors(struct request_queue *q) 1173{ 1174 return q->limits.max_sectors; 1175} 1176 1177static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1178{ 1179 return q->limits.max_hw_sectors; 1180} 1181 1182static inline unsigned short queue_max_segments(struct request_queue *q) 1183{ 1184 return q->limits.max_segments; 1185} 1186 1187static inline unsigned int queue_max_segment_size(struct request_queue *q) 1188{ 1189 return q->limits.max_segment_size; 1190} 1191 1192static inline unsigned short queue_logical_block_size(struct request_queue *q) 1193{ 1194 int retval = 512; 1195 1196 if (q && q->limits.logical_block_size) 1197 retval = q->limits.logical_block_size; 1198 1199 return retval; 1200} 1201 1202static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1203{ 1204 return queue_logical_block_size(bdev_get_queue(bdev)); 1205} 1206 1207static inline unsigned int queue_physical_block_size(struct request_queue *q) 1208{ 1209 return q->limits.physical_block_size; 1210} 1211 1212static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1213{ 1214 return queue_physical_block_size(bdev_get_queue(bdev)); 1215} 1216 1217static inline unsigned int queue_io_min(struct request_queue *q) 1218{ 1219 return q->limits.io_min; 1220} 1221 1222static inline int bdev_io_min(struct block_device *bdev) 1223{ 1224 return queue_io_min(bdev_get_queue(bdev)); 1225} 1226 1227static inline unsigned int queue_io_opt(struct request_queue *q) 1228{ 1229 return q->limits.io_opt; 1230} 1231 1232static inline int bdev_io_opt(struct block_device *bdev) 1233{ 1234 return queue_io_opt(bdev_get_queue(bdev)); 1235} 1236 1237static inline int queue_alignment_offset(struct request_queue *q) 1238{ 1239 if (q->limits.misaligned) 1240 return -1; 1241 1242 return q->limits.alignment_offset; 1243} 1244 1245static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1246{ 1247 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1248 unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 1249 1250 return (granularity + lim->alignment_offset - alignment) % granularity; 1251} 1252 1253static inline int bdev_alignment_offset(struct block_device *bdev) 1254{ 1255 struct request_queue *q = bdev_get_queue(bdev); 1256 1257 if (q->limits.misaligned) 1258 return -1; 1259 1260 if (bdev != bdev->bd_contains) 1261 return bdev->bd_part->alignment_offset; 1262 1263 return q->limits.alignment_offset; 1264} 1265 1266static inline int queue_discard_alignment(struct request_queue *q) 1267{ 1268 if (q->limits.discard_misaligned) 1269 return -1; 1270 1271 return q->limits.discard_alignment; 1272} 1273 1274static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1275{ 1276 unsigned int alignment, granularity, offset; 1277 1278 if (!lim->max_discard_sectors) 1279 return 0; 1280 1281 /* Why are these in bytes, not sectors? */ 1282 alignment = lim->discard_alignment >> 9; 1283 granularity = lim->discard_granularity >> 9; 1284 if (!granularity) 1285 return 0; 1286 1287 /* Offset of the partition start in 'granularity' sectors */ 1288 offset = sector_div(sector, granularity); 1289 1290 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1291 offset = (granularity + alignment - offset) % granularity; 1292 1293 /* Turn it back into bytes, gaah */ 1294 return offset << 9; 1295} 1296 1297static inline int bdev_discard_alignment(struct block_device *bdev) 1298{ 1299 struct request_queue *q = bdev_get_queue(bdev); 1300 1301 if (bdev != bdev->bd_contains) 1302 return bdev->bd_part->discard_alignment; 1303 1304 return q->limits.discard_alignment; 1305} 1306 1307static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1308{ 1309 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1310 return 1; 1311 1312 return 0; 1313} 1314 1315static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1316{ 1317 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1318} 1319 1320static inline unsigned int bdev_write_same(struct block_device *bdev) 1321{ 1322 struct request_queue *q = bdev_get_queue(bdev); 1323 1324 if (q) 1325 return q->limits.max_write_same_sectors; 1326 1327 return 0; 1328} 1329 1330static inline int queue_dma_alignment(struct request_queue *q) 1331{ 1332 return q ? q->dma_alignment : 511; 1333} 1334 1335static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1336 unsigned int len) 1337{ 1338 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1339 return !(addr & alignment) && !(len & alignment); 1340} 1341 1342/* assumes size > 256 */ 1343static inline unsigned int blksize_bits(unsigned int size) 1344{ 1345 unsigned int bits = 8; 1346 do { 1347 bits++; 1348 size >>= 1; 1349 } while (size > 256); 1350 return bits; 1351} 1352 1353static inline unsigned int block_size(struct block_device *bdev) 1354{ 1355 return bdev->bd_block_size; 1356} 1357 1358static inline bool queue_flush_queueable(struct request_queue *q) 1359{ 1360 return !q->flush_not_queueable; 1361} 1362 1363typedef struct {struct page *v;} Sector; 1364 1365unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1366 1367static inline void put_dev_sector(Sector p) 1368{ 1369 page_cache_release(p.v); 1370} 1371 1372/* 1373 * Check if adding a bio_vec after bprv with offset would create a gap in 1374 * the SG list. Most drivers don't care about this, but some do. 1375 */ 1376static inline bool bvec_gap_to_prev(struct request_queue *q, 1377 struct bio_vec *bprv, unsigned int offset) 1378{ 1379 if (!queue_virt_boundary(q)) 1380 return false; 1381 return offset || 1382 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1383} 1384 1385static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1386 struct bio *next) 1387{ 1388 if (!bio_has_data(prev)) 1389 return false; 1390 1391 return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], 1392 next->bi_io_vec[0].bv_offset); 1393} 1394 1395static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1396{ 1397 return bio_will_gap(req->q, req->biotail, bio); 1398} 1399 1400static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1401{ 1402 return bio_will_gap(req->q, bio, req->bio); 1403} 1404 1405struct work_struct; 1406int kblockd_schedule_work(struct work_struct *work); 1407int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1408int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1409 1410#ifdef CONFIG_BLK_CGROUP 1411/* 1412 * This should not be using sched_clock(). A real patch is in progress 1413 * to fix this up, until that is in place we need to disable preemption 1414 * around sched_clock() in this function and set_io_start_time_ns(). 1415 */ 1416static inline void set_start_time_ns(struct request *req) 1417{ 1418 preempt_disable(); 1419 req->start_time_ns = sched_clock(); 1420 preempt_enable(); 1421} 1422 1423static inline void set_io_start_time_ns(struct request *req) 1424{ 1425 preempt_disable(); 1426 req->io_start_time_ns = sched_clock(); 1427 preempt_enable(); 1428} 1429 1430static inline uint64_t rq_start_time_ns(struct request *req) 1431{ 1432 return req->start_time_ns; 1433} 1434 1435static inline uint64_t rq_io_start_time_ns(struct request *req) 1436{ 1437 return req->io_start_time_ns; 1438} 1439#else 1440static inline void set_start_time_ns(struct request *req) {} 1441static inline void set_io_start_time_ns(struct request *req) {} 1442static inline uint64_t rq_start_time_ns(struct request *req) 1443{ 1444 return 0; 1445} 1446static inline uint64_t rq_io_start_time_ns(struct request *req) 1447{ 1448 return 0; 1449} 1450#endif 1451 1452#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1453 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1454#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1455 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1456 1457#if defined(CONFIG_BLK_DEV_INTEGRITY) 1458 1459enum blk_integrity_flags { 1460 BLK_INTEGRITY_VERIFY = 1 << 0, 1461 BLK_INTEGRITY_GENERATE = 1 << 1, 1462 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1463 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 1464}; 1465 1466struct blk_integrity_iter { 1467 void *prot_buf; 1468 void *data_buf; 1469 sector_t seed; 1470 unsigned int data_size; 1471 unsigned short interval; 1472 const char *disk_name; 1473}; 1474 1475typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 1476 1477struct blk_integrity_profile { 1478 integrity_processing_fn *generate_fn; 1479 integrity_processing_fn *verify_fn; 1480 const char *name; 1481}; 1482 1483extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 1484extern void blk_integrity_unregister(struct gendisk *); 1485extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1486extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1487 struct scatterlist *); 1488extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1489extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 1490 struct request *); 1491extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 1492 struct bio *); 1493 1494static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1495{ 1496 struct blk_integrity *bi = &disk->queue->integrity; 1497 1498 if (!bi->profile) 1499 return NULL; 1500 1501 return bi; 1502} 1503 1504static inline 1505struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1506{ 1507 return blk_get_integrity(bdev->bd_disk); 1508} 1509 1510static inline bool blk_integrity_rq(struct request *rq) 1511{ 1512 return rq->cmd_flags & REQ_INTEGRITY; 1513} 1514 1515static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1516 unsigned int segs) 1517{ 1518 q->limits.max_integrity_segments = segs; 1519} 1520 1521static inline unsigned short 1522queue_max_integrity_segments(struct request_queue *q) 1523{ 1524 return q->limits.max_integrity_segments; 1525} 1526 1527static inline bool integrity_req_gap_back_merge(struct request *req, 1528 struct bio *next) 1529{ 1530 struct bio_integrity_payload *bip = bio_integrity(req->bio); 1531 struct bio_integrity_payload *bip_next = bio_integrity(next); 1532 1533 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1534 bip_next->bip_vec[0].bv_offset); 1535} 1536 1537static inline bool integrity_req_gap_front_merge(struct request *req, 1538 struct bio *bio) 1539{ 1540 struct bio_integrity_payload *bip = bio_integrity(bio); 1541 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 1542 1543 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1544 bip_next->bip_vec[0].bv_offset); 1545} 1546 1547#else /* CONFIG_BLK_DEV_INTEGRITY */ 1548 1549struct bio; 1550struct block_device; 1551struct gendisk; 1552struct blk_integrity; 1553 1554static inline int blk_integrity_rq(struct request *rq) 1555{ 1556 return 0; 1557} 1558static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1559 struct bio *b) 1560{ 1561 return 0; 1562} 1563static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1564 struct bio *b, 1565 struct scatterlist *s) 1566{ 1567 return 0; 1568} 1569static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1570{ 1571 return NULL; 1572} 1573static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1574{ 1575 return NULL; 1576} 1577static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1578{ 1579 return 0; 1580} 1581static inline void blk_integrity_register(struct gendisk *d, 1582 struct blk_integrity *b) 1583{ 1584} 1585static inline void blk_integrity_unregister(struct gendisk *d) 1586{ 1587} 1588static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1589 unsigned int segs) 1590{ 1591} 1592static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1593{ 1594 return 0; 1595} 1596static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1597 struct request *r1, 1598 struct request *r2) 1599{ 1600 return true; 1601} 1602static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1603 struct request *r, 1604 struct bio *b) 1605{ 1606 return true; 1607} 1608 1609static inline bool integrity_req_gap_back_merge(struct request *req, 1610 struct bio *next) 1611{ 1612 return false; 1613} 1614static inline bool integrity_req_gap_front_merge(struct request *req, 1615 struct bio *bio) 1616{ 1617 return false; 1618} 1619 1620#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1621 1622/** 1623 * struct blk_dax_ctl - control and output parameters for ->direct_access 1624 * @sector: (input) offset relative to a block_device 1625 * @addr: (output) kernel virtual address for @sector populated by driver 1626 * @pfn: (output) page frame number for @addr populated by driver 1627 * @size: (input) number of bytes requested 1628 */ 1629struct blk_dax_ctl { 1630 sector_t sector; 1631 void __pmem *addr; 1632 long size; 1633 pfn_t pfn; 1634}; 1635 1636struct block_device_operations { 1637 int (*open) (struct block_device *, fmode_t); 1638 void (*release) (struct gendisk *, fmode_t); 1639 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1640 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1641 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1642 long (*direct_access)(struct block_device *, sector_t, void __pmem **, 1643 pfn_t *); 1644 unsigned int (*check_events) (struct gendisk *disk, 1645 unsigned int clearing); 1646 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1647 int (*media_changed) (struct gendisk *); 1648 void (*unlock_native_capacity) (struct gendisk *); 1649 int (*revalidate_disk) (struct gendisk *); 1650 int (*getgeo)(struct block_device *, struct hd_geometry *); 1651 /* this callback is with swap_lock and sometimes page table lock held */ 1652 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1653 struct module *owner; 1654 const struct pr_ops *pr_ops; 1655}; 1656 1657extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1658 unsigned long); 1659extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1660extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1661 struct writeback_control *); 1662extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); 1663#else /* CONFIG_BLOCK */ 1664 1665struct block_device; 1666 1667/* 1668 * stubs for when the block layer is configured out 1669 */ 1670#define buffer_heads_over_limit 0 1671 1672static inline long nr_blockdev_pages(void) 1673{ 1674 return 0; 1675} 1676 1677struct blk_plug { 1678}; 1679 1680static inline void blk_start_plug(struct blk_plug *plug) 1681{ 1682} 1683 1684static inline void blk_finish_plug(struct blk_plug *plug) 1685{ 1686} 1687 1688static inline void blk_flush_plug(struct task_struct *task) 1689{ 1690} 1691 1692static inline void blk_schedule_flush_plug(struct task_struct *task) 1693{ 1694} 1695 1696 1697static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1698{ 1699 return false; 1700} 1701 1702static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1703 sector_t *error_sector) 1704{ 1705 return 0; 1706} 1707 1708#endif /* CONFIG_BLOCK */ 1709 1710#endif