at v3.16-rc5 49 kB view raw
1#ifndef _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H 3 4#include <linux/sched.h> 5 6#ifdef CONFIG_BLOCK 7 8#include <linux/major.h> 9#include <linux/genhd.h> 10#include <linux/list.h> 11#include <linux/llist.h> 12#include <linux/timer.h> 13#include <linux/workqueue.h> 14#include <linux/pagemap.h> 15#include <linux/backing-dev.h> 16#include <linux/wait.h> 17#include <linux/mempool.h> 18#include <linux/bio.h> 19#include <linux/stringify.h> 20#include <linux/gfp.h> 21#include <linux/bsg.h> 22#include <linux/smp.h> 23#include <linux/rcupdate.h> 24 25#include <asm/scatterlist.h> 26 27struct module; 28struct scsi_ioctl_command; 29 30struct request_queue; 31struct elevator_queue; 32struct request_pm_state; 33struct blk_trace; 34struct request; 35struct sg_io_hdr; 36struct bsg_job; 37struct blkcg_gq; 38 39#define BLKDEV_MIN_RQ 4 40#define BLKDEV_MAX_RQ 128 /* Default maximum */ 41 42/* 43 * Maximum number of blkcg policies allowed to be registered concurrently. 44 * Defined here to simplify include dependency. 45 */ 46#define BLKCG_MAX_POLS 2 47 48struct request; 49typedef void (rq_end_io_fn)(struct request *, int); 50 51#define BLK_RL_SYNCFULL (1U << 0) 52#define BLK_RL_ASYNCFULL (1U << 1) 53 54struct request_list { 55 struct request_queue *q; /* the queue this rl belongs to */ 56#ifdef CONFIG_BLK_CGROUP 57 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 58#endif 59 /* 60 * count[], starved[], and wait[] are indexed by 61 * BLK_RW_SYNC/BLK_RW_ASYNC 62 */ 63 int count[2]; 64 int starved[2]; 65 mempool_t *rq_pool; 66 wait_queue_head_t wait[2]; 67 unsigned int flags; 68}; 69 70/* 71 * request command types 72 */ 73enum rq_cmd_type_bits { 74 REQ_TYPE_FS = 1, /* fs request */ 75 REQ_TYPE_BLOCK_PC, /* scsi command */ 76 REQ_TYPE_SENSE, /* sense request */ 77 REQ_TYPE_PM_SUSPEND, /* suspend request */ 78 REQ_TYPE_PM_RESUME, /* resume request */ 79 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 80 REQ_TYPE_SPECIAL, /* driver defined type */ 81 /* 82 * for ATA/ATAPI devices. this really doesn't belong here, ide should 83 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 84 * private REQ_LB opcodes to differentiate what type of request this is 85 */ 86 REQ_TYPE_ATA_TASKFILE, 87 REQ_TYPE_ATA_PC, 88}; 89 90#define BLK_MAX_CDB 16 91 92/* 93 * Try to put the fields that are referenced together in the same cacheline. 94 * 95 * If you modify this structure, make sure to update blk_rq_init() and 96 * especially blk_mq_rq_ctx_init() to take care of the added fields. 97 */ 98struct request { 99 struct list_head queuelist; 100 union { 101 struct call_single_data csd; 102 unsigned long fifo_time; 103 }; 104 105 struct request_queue *q; 106 struct blk_mq_ctx *mq_ctx; 107 108 u64 cmd_flags; 109 enum rq_cmd_type_bits cmd_type; 110 unsigned long atomic_flags; 111 112 int cpu; 113 114 /* the following two fields are internal, NEVER access directly */ 115 unsigned int __data_len; /* total data len */ 116 sector_t __sector; /* sector cursor */ 117 118 struct bio *bio; 119 struct bio *biotail; 120 121 /* 122 * The hash is used inside the scheduler, and killed once the 123 * request reaches the dispatch list. The ipi_list is only used 124 * to queue the request for softirq completion, which is long 125 * after the request has been unhashed (and even removed from 126 * the dispatch list). 127 */ 128 union { 129 struct hlist_node hash; /* merge hash */ 130 struct list_head ipi_list; 131 }; 132 133 /* 134 * The rb_node is only used inside the io scheduler, requests 135 * are pruned when moved to the dispatch queue. So let the 136 * completion_data share space with the rb_node. 137 */ 138 union { 139 struct rb_node rb_node; /* sort/lookup */ 140 void *completion_data; 141 }; 142 143 /* 144 * Three pointers are available for the IO schedulers, if they need 145 * more they have to dynamically allocate it. Flush requests are 146 * never put on the IO scheduler. So let the flush fields share 147 * space with the elevator data. 148 */ 149 union { 150 struct { 151 struct io_cq *icq; 152 void *priv[2]; 153 } elv; 154 155 struct { 156 unsigned int seq; 157 struct list_head list; 158 rq_end_io_fn *saved_end_io; 159 } flush; 160 }; 161 162 struct gendisk *rq_disk; 163 struct hd_struct *part; 164 unsigned long start_time; 165#ifdef CONFIG_BLK_CGROUP 166 struct request_list *rl; /* rl this rq is alloced from */ 167 unsigned long long start_time_ns; 168 unsigned long long io_start_time_ns; /* when passed to hardware */ 169#endif 170 /* Number of scatter-gather DMA addr+len pairs after 171 * physical address coalescing is performed. 172 */ 173 unsigned short nr_phys_segments; 174#if defined(CONFIG_BLK_DEV_INTEGRITY) 175 unsigned short nr_integrity_segments; 176#endif 177 178 unsigned short ioprio; 179 180 void *special; /* opaque pointer available for LLD use */ 181 182 int tag; 183 int errors; 184 185 /* 186 * when request is used as a packet command carrier 187 */ 188 unsigned char __cmd[BLK_MAX_CDB]; 189 unsigned char *cmd; 190 unsigned short cmd_len; 191 192 unsigned int extra_len; /* length of alignment and padding */ 193 unsigned int sense_len; 194 unsigned int resid_len; /* residual count */ 195 void *sense; 196 197 unsigned long deadline; 198 struct list_head timeout_list; 199 unsigned int timeout; 200 int retries; 201 202 /* 203 * completion callback. 204 */ 205 rq_end_io_fn *end_io; 206 void *end_io_data; 207 208 /* for bidi */ 209 struct request *next_rq; 210}; 211 212static inline unsigned short req_get_ioprio(struct request *req) 213{ 214 return req->ioprio; 215} 216 217/* 218 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 219 * requests. Some step values could eventually be made generic. 220 */ 221struct request_pm_state 222{ 223 /* PM state machine step value, currently driver specific */ 224 int pm_step; 225 /* requested PM state value (S1, S2, S3, S4, ...) */ 226 u32 pm_state; 227 void* data; /* for driver use */ 228}; 229 230#include <linux/elevator.h> 231 232struct blk_queue_ctx; 233 234typedef void (request_fn_proc) (struct request_queue *q); 235typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 236typedef int (prep_rq_fn) (struct request_queue *, struct request *); 237typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 238 239struct bio_vec; 240struct bvec_merge_data { 241 struct block_device *bi_bdev; 242 sector_t bi_sector; 243 unsigned bi_size; 244 unsigned long bi_rw; 245}; 246typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 247 struct bio_vec *); 248typedef void (softirq_done_fn)(struct request *); 249typedef int (dma_drain_needed_fn)(struct request *); 250typedef int (lld_busy_fn) (struct request_queue *q); 251typedef int (bsg_job_fn) (struct bsg_job *); 252 253enum blk_eh_timer_return { 254 BLK_EH_NOT_HANDLED, 255 BLK_EH_HANDLED, 256 BLK_EH_RESET_TIMER, 257}; 258 259typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 260 261enum blk_queue_state { 262 Queue_down, 263 Queue_up, 264}; 265 266struct blk_queue_tag { 267 struct request **tag_index; /* map of busy tags */ 268 unsigned long *tag_map; /* bit map of free/busy tags */ 269 int busy; /* current depth */ 270 int max_depth; /* what we will send to device */ 271 int real_max_depth; /* what the array can hold */ 272 atomic_t refcnt; /* map can be shared */ 273}; 274 275#define BLK_SCSI_MAX_CMDS (256) 276#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 277 278struct queue_limits { 279 unsigned long bounce_pfn; 280 unsigned long seg_boundary_mask; 281 282 unsigned int max_hw_sectors; 283 unsigned int chunk_sectors; 284 unsigned int max_sectors; 285 unsigned int max_segment_size; 286 unsigned int physical_block_size; 287 unsigned int alignment_offset; 288 unsigned int io_min; 289 unsigned int io_opt; 290 unsigned int max_discard_sectors; 291 unsigned int max_write_same_sectors; 292 unsigned int discard_granularity; 293 unsigned int discard_alignment; 294 295 unsigned short logical_block_size; 296 unsigned short max_segments; 297 unsigned short max_integrity_segments; 298 299 unsigned char misaligned; 300 unsigned char discard_misaligned; 301 unsigned char cluster; 302 unsigned char discard_zeroes_data; 303 unsigned char raid_partial_stripes_expensive; 304}; 305 306struct request_queue { 307 /* 308 * Together with queue_head for cacheline sharing 309 */ 310 struct list_head queue_head; 311 struct request *last_merge; 312 struct elevator_queue *elevator; 313 int nr_rqs[2]; /* # allocated [a]sync rqs */ 314 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 315 316 /* 317 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 318 * is used, root blkg allocates from @q->root_rl and all other 319 * blkgs from their own blkg->rl. Which one to use should be 320 * determined using bio_request_list(). 321 */ 322 struct request_list root_rl; 323 324 request_fn_proc *request_fn; 325 make_request_fn *make_request_fn; 326 prep_rq_fn *prep_rq_fn; 327 unprep_rq_fn *unprep_rq_fn; 328 merge_bvec_fn *merge_bvec_fn; 329 softirq_done_fn *softirq_done_fn; 330 rq_timed_out_fn *rq_timed_out_fn; 331 dma_drain_needed_fn *dma_drain_needed; 332 lld_busy_fn *lld_busy_fn; 333 334 struct blk_mq_ops *mq_ops; 335 336 unsigned int *mq_map; 337 338 /* sw queues */ 339 struct blk_mq_ctx __percpu *queue_ctx; 340 unsigned int nr_queues; 341 342 /* hw dispatch queues */ 343 struct blk_mq_hw_ctx **queue_hw_ctx; 344 unsigned int nr_hw_queues; 345 346 /* 347 * Dispatch queue sorting 348 */ 349 sector_t end_sector; 350 struct request *boundary_rq; 351 352 /* 353 * Delayed queue handling 354 */ 355 struct delayed_work delay_work; 356 357 struct backing_dev_info backing_dev_info; 358 359 /* 360 * The queue owner gets to use this for whatever they like. 361 * ll_rw_blk doesn't touch it. 362 */ 363 void *queuedata; 364 365 /* 366 * various queue flags, see QUEUE_* below 367 */ 368 unsigned long queue_flags; 369 370 /* 371 * ida allocated id for this queue. Used to index queues from 372 * ioctx. 373 */ 374 int id; 375 376 /* 377 * queue needs bounce pages for pages above this limit 378 */ 379 gfp_t bounce_gfp; 380 381 /* 382 * protects queue structures from reentrancy. ->__queue_lock should 383 * _never_ be used directly, it is queue private. always use 384 * ->queue_lock. 385 */ 386 spinlock_t __queue_lock; 387 spinlock_t *queue_lock; 388 389 /* 390 * queue kobject 391 */ 392 struct kobject kobj; 393 394 /* 395 * mq queue kobject 396 */ 397 struct kobject mq_kobj; 398 399#ifdef CONFIG_PM_RUNTIME 400 struct device *dev; 401 int rpm_status; 402 unsigned int nr_pending; 403#endif 404 405 /* 406 * queue settings 407 */ 408 unsigned long nr_requests; /* Max # of requests */ 409 unsigned int nr_congestion_on; 410 unsigned int nr_congestion_off; 411 unsigned int nr_batching; 412 413 unsigned int dma_drain_size; 414 void *dma_drain_buffer; 415 unsigned int dma_pad_mask; 416 unsigned int dma_alignment; 417 418 struct blk_queue_tag *queue_tags; 419 struct list_head tag_busy_list; 420 421 unsigned int nr_sorted; 422 unsigned int in_flight[2]; 423 /* 424 * Number of active block driver functions for which blk_drain_queue() 425 * must wait. Must be incremented around functions that unlock the 426 * queue_lock internally, e.g. scsi_request_fn(). 427 */ 428 unsigned int request_fn_active; 429 430 unsigned int rq_timeout; 431 struct timer_list timeout; 432 struct list_head timeout_list; 433 434 struct list_head icq_list; 435#ifdef CONFIG_BLK_CGROUP 436 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 437 struct blkcg_gq *root_blkg; 438 struct list_head blkg_list; 439#endif 440 441 struct queue_limits limits; 442 443 /* 444 * sg stuff 445 */ 446 unsigned int sg_timeout; 447 unsigned int sg_reserved_size; 448 int node; 449#ifdef CONFIG_BLK_DEV_IO_TRACE 450 struct blk_trace *blk_trace; 451#endif 452 /* 453 * for flush operations 454 */ 455 unsigned int flush_flags; 456 unsigned int flush_not_queueable:1; 457 unsigned int flush_queue_delayed:1; 458 unsigned int flush_pending_idx:1; 459 unsigned int flush_running_idx:1; 460 unsigned long flush_pending_since; 461 struct list_head flush_queue[2]; 462 struct list_head flush_data_in_flight; 463 struct request *flush_rq; 464 spinlock_t mq_flush_lock; 465 466 struct list_head requeue_list; 467 spinlock_t requeue_lock; 468 struct work_struct requeue_work; 469 470 struct mutex sysfs_lock; 471 472 int bypass_depth; 473 474#if defined(CONFIG_BLK_DEV_BSG) 475 bsg_job_fn *bsg_job_fn; 476 int bsg_job_size; 477 struct bsg_class_device bsg_dev; 478#endif 479 480#ifdef CONFIG_BLK_DEV_THROTTLING 481 /* Throttle data */ 482 struct throtl_data *td; 483#endif 484 struct rcu_head rcu_head; 485 wait_queue_head_t mq_freeze_wq; 486 struct percpu_counter mq_usage_counter; 487 struct list_head all_q_node; 488 489 struct blk_mq_tag_set *tag_set; 490 struct list_head tag_set_list; 491}; 492 493#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 494#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 495#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 496#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 497#define QUEUE_FLAG_DYING 5 /* queue being torn down */ 498#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 499#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 500#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 501#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 502#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 503#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 504#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 505#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 506#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 507#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 508#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 509#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 510#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 511#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 512#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 513#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 514#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 515#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */ 516 517#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 518 (1 << QUEUE_FLAG_STACKABLE) | \ 519 (1 << QUEUE_FLAG_SAME_COMP) | \ 520 (1 << QUEUE_FLAG_ADD_RANDOM)) 521 522#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 523 (1 << QUEUE_FLAG_SAME_COMP)) 524 525static inline void queue_lockdep_assert_held(struct request_queue *q) 526{ 527 if (q->queue_lock) 528 lockdep_assert_held(q->queue_lock); 529} 530 531static inline void queue_flag_set_unlocked(unsigned int flag, 532 struct request_queue *q) 533{ 534 __set_bit(flag, &q->queue_flags); 535} 536 537static inline int queue_flag_test_and_clear(unsigned int flag, 538 struct request_queue *q) 539{ 540 queue_lockdep_assert_held(q); 541 542 if (test_bit(flag, &q->queue_flags)) { 543 __clear_bit(flag, &q->queue_flags); 544 return 1; 545 } 546 547 return 0; 548} 549 550static inline int queue_flag_test_and_set(unsigned int flag, 551 struct request_queue *q) 552{ 553 queue_lockdep_assert_held(q); 554 555 if (!test_bit(flag, &q->queue_flags)) { 556 __set_bit(flag, &q->queue_flags); 557 return 0; 558 } 559 560 return 1; 561} 562 563static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 564{ 565 queue_lockdep_assert_held(q); 566 __set_bit(flag, &q->queue_flags); 567} 568 569static inline void queue_flag_clear_unlocked(unsigned int flag, 570 struct request_queue *q) 571{ 572 __clear_bit(flag, &q->queue_flags); 573} 574 575static inline int queue_in_flight(struct request_queue *q) 576{ 577 return q->in_flight[0] + q->in_flight[1]; 578} 579 580static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 581{ 582 queue_lockdep_assert_held(q); 583 __clear_bit(flag, &q->queue_flags); 584} 585 586#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 587#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 588#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 589#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 590#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 591#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 592#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 593#define blk_queue_noxmerges(q) \ 594 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 595#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 596#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 597#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 598#define blk_queue_stackable(q) \ 599 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 600#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 601#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 602 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 603 604#define blk_noretry_request(rq) \ 605 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 606 REQ_FAILFAST_DRIVER)) 607 608#define blk_account_rq(rq) \ 609 (((rq)->cmd_flags & REQ_STARTED) && \ 610 ((rq)->cmd_type == REQ_TYPE_FS)) 611 612#define blk_pm_request(rq) \ 613 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 614 (rq)->cmd_type == REQ_TYPE_PM_RESUME) 615 616#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 617#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 618/* rq->queuelist of dequeued request must be list_empty() */ 619#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 620 621#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 622 623#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0) 624 625/* 626 * Driver can handle struct request, if it either has an old style 627 * request_fn defined, or is blk-mq based. 628 */ 629static inline bool queue_is_rq_based(struct request_queue *q) 630{ 631 return q->request_fn || q->mq_ops; 632} 633 634static inline unsigned int blk_queue_cluster(struct request_queue *q) 635{ 636 return q->limits.cluster; 637} 638 639/* 640 * We regard a request as sync, if either a read or a sync write 641 */ 642static inline bool rw_is_sync(unsigned int rw_flags) 643{ 644 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 645} 646 647static inline bool rq_is_sync(struct request *rq) 648{ 649 return rw_is_sync(rq->cmd_flags); 650} 651 652static inline bool blk_rl_full(struct request_list *rl, bool sync) 653{ 654 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 655 656 return rl->flags & flag; 657} 658 659static inline void blk_set_rl_full(struct request_list *rl, bool sync) 660{ 661 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 662 663 rl->flags |= flag; 664} 665 666static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 667{ 668 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 669 670 rl->flags &= ~flag; 671} 672 673static inline bool rq_mergeable(struct request *rq) 674{ 675 if (rq->cmd_type != REQ_TYPE_FS) 676 return false; 677 678 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 679 return false; 680 681 return true; 682} 683 684static inline bool blk_check_merge_flags(unsigned int flags1, 685 unsigned int flags2) 686{ 687 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) 688 return false; 689 690 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 691 return false; 692 693 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) 694 return false; 695 696 return true; 697} 698 699static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 700{ 701 if (bio_data(a) == bio_data(b)) 702 return true; 703 704 return false; 705} 706 707/* 708 * q->prep_rq_fn return values 709 */ 710#define BLKPREP_OK 0 /* serve it */ 711#define BLKPREP_KILL 1 /* fatal error, kill */ 712#define BLKPREP_DEFER 2 /* leave on queue */ 713 714extern unsigned long blk_max_low_pfn, blk_max_pfn; 715 716/* 717 * standard bounce addresses: 718 * 719 * BLK_BOUNCE_HIGH : bounce all highmem pages 720 * BLK_BOUNCE_ANY : don't bounce anything 721 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 722 */ 723 724#if BITS_PER_LONG == 32 725#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 726#else 727#define BLK_BOUNCE_HIGH -1ULL 728#endif 729#define BLK_BOUNCE_ANY (-1ULL) 730#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 731 732/* 733 * default timeout for SG_IO if none specified 734 */ 735#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 736#define BLK_MIN_SG_TIMEOUT (7 * HZ) 737 738#ifdef CONFIG_BOUNCE 739extern int init_emergency_isa_pool(void); 740extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 741#else 742static inline int init_emergency_isa_pool(void) 743{ 744 return 0; 745} 746static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 747{ 748} 749#endif /* CONFIG_MMU */ 750 751struct rq_map_data { 752 struct page **pages; 753 int page_order; 754 int nr_entries; 755 unsigned long offset; 756 int null_mapped; 757 int from_user; 758}; 759 760struct req_iterator { 761 struct bvec_iter iter; 762 struct bio *bio; 763}; 764 765/* This should not be used directly - use rq_for_each_segment */ 766#define for_each_bio(_bio) \ 767 for (; _bio; _bio = _bio->bi_next) 768#define __rq_for_each_bio(_bio, rq) \ 769 if ((rq->bio)) \ 770 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 771 772#define rq_for_each_segment(bvl, _rq, _iter) \ 773 __rq_for_each_bio(_iter.bio, _rq) \ 774 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 775 776#define rq_iter_last(bvec, _iter) \ 777 (_iter.bio->bi_next == NULL && \ 778 bio_iter_last(bvec, _iter.iter)) 779 780#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 781# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 782#endif 783#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 784extern void rq_flush_dcache_pages(struct request *rq); 785#else 786static inline void rq_flush_dcache_pages(struct request *rq) 787{ 788} 789#endif 790 791extern int blk_register_queue(struct gendisk *disk); 792extern void blk_unregister_queue(struct gendisk *disk); 793extern void generic_make_request(struct bio *bio); 794extern void blk_rq_init(struct request_queue *q, struct request *rq); 795extern void blk_put_request(struct request *); 796extern void __blk_put_request(struct request_queue *, struct request *); 797extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 798extern struct request *blk_make_request(struct request_queue *, struct bio *, 799 gfp_t); 800extern void blk_rq_set_block_pc(struct request *); 801extern void blk_requeue_request(struct request_queue *, struct request *); 802extern void blk_add_request_payload(struct request *rq, struct page *page, 803 unsigned int len); 804extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 805extern int blk_lld_busy(struct request_queue *q); 806extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 807 struct bio_set *bs, gfp_t gfp_mask, 808 int (*bio_ctr)(struct bio *, struct bio *, void *), 809 void *data); 810extern void blk_rq_unprep_clone(struct request *rq); 811extern int blk_insert_cloned_request(struct request_queue *q, 812 struct request *rq); 813extern void blk_delay_queue(struct request_queue *, unsigned long); 814extern void blk_recount_segments(struct request_queue *, struct bio *); 815extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 816extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 817 unsigned int, void __user *); 818extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 819 unsigned int, void __user *); 820extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 821 struct scsi_ioctl_command __user *); 822 823extern void blk_queue_bio(struct request_queue *q, struct bio *bio); 824 825/* 826 * A queue has just exitted congestion. Note this in the global counter of 827 * congested queues, and wake up anyone who was waiting for requests to be 828 * put back. 829 */ 830static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 831{ 832 clear_bdi_congested(&q->backing_dev_info, sync); 833} 834 835/* 836 * A queue has just entered congestion. Flag that in the queue's VM-visible 837 * state flags and increment the global gounter of congested queues. 838 */ 839static inline void blk_set_queue_congested(struct request_queue *q, int sync) 840{ 841 set_bdi_congested(&q->backing_dev_info, sync); 842} 843 844extern void blk_start_queue(struct request_queue *q); 845extern void blk_stop_queue(struct request_queue *q); 846extern void blk_sync_queue(struct request_queue *q); 847extern void __blk_stop_queue(struct request_queue *q); 848extern void __blk_run_queue(struct request_queue *q); 849extern void blk_run_queue(struct request_queue *); 850extern void blk_run_queue_async(struct request_queue *q); 851extern int blk_rq_map_user(struct request_queue *, struct request *, 852 struct rq_map_data *, void __user *, unsigned long, 853 gfp_t); 854extern int blk_rq_unmap_user(struct bio *); 855extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 856extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 857 struct rq_map_data *, const struct sg_iovec *, 858 int, unsigned int, gfp_t); 859extern int blk_execute_rq(struct request_queue *, struct gendisk *, 860 struct request *, int); 861extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 862 struct request *, int, rq_end_io_fn *); 863 864static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 865{ 866 return bdev->bd_disk->queue; 867} 868 869/* 870 * blk_rq_pos() : the current sector 871 * blk_rq_bytes() : bytes left in the entire request 872 * blk_rq_cur_bytes() : bytes left in the current segment 873 * blk_rq_err_bytes() : bytes left till the next error boundary 874 * blk_rq_sectors() : sectors left in the entire request 875 * blk_rq_cur_sectors() : sectors left in the current segment 876 */ 877static inline sector_t blk_rq_pos(const struct request *rq) 878{ 879 return rq->__sector; 880} 881 882static inline unsigned int blk_rq_bytes(const struct request *rq) 883{ 884 return rq->__data_len; 885} 886 887static inline int blk_rq_cur_bytes(const struct request *rq) 888{ 889 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 890} 891 892extern unsigned int blk_rq_err_bytes(const struct request *rq); 893 894static inline unsigned int blk_rq_sectors(const struct request *rq) 895{ 896 return blk_rq_bytes(rq) >> 9; 897} 898 899static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 900{ 901 return blk_rq_cur_bytes(rq) >> 9; 902} 903 904static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 905 unsigned int cmd_flags) 906{ 907 if (unlikely(cmd_flags & REQ_DISCARD)) 908 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 909 910 if (unlikely(cmd_flags & REQ_WRITE_SAME)) 911 return q->limits.max_write_same_sectors; 912 913 return q->limits.max_sectors; 914} 915 916/* 917 * Return maximum size of a request at given offset. Only valid for 918 * file system requests. 919 */ 920static inline unsigned int blk_max_size_offset(struct request_queue *q, 921 sector_t offset) 922{ 923 if (!q->limits.chunk_sectors) 924 return q->limits.max_sectors; 925 926 return q->limits.chunk_sectors - 927 (offset & (q->limits.chunk_sectors - 1)); 928} 929 930static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 931{ 932 struct request_queue *q = rq->q; 933 934 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 935 return q->limits.max_hw_sectors; 936 937 if (!q->limits.chunk_sectors) 938 return blk_queue_get_max_sectors(q, rq->cmd_flags); 939 940 return min(blk_max_size_offset(q, blk_rq_pos(rq)), 941 blk_queue_get_max_sectors(q, rq->cmd_flags)); 942} 943 944static inline unsigned int blk_rq_count_bios(struct request *rq) 945{ 946 unsigned int nr_bios = 0; 947 struct bio *bio; 948 949 __rq_for_each_bio(bio, rq) 950 nr_bios++; 951 952 return nr_bios; 953} 954 955/* 956 * Request issue related functions. 957 */ 958extern struct request *blk_peek_request(struct request_queue *q); 959extern void blk_start_request(struct request *rq); 960extern struct request *blk_fetch_request(struct request_queue *q); 961 962/* 963 * Request completion related functions. 964 * 965 * blk_update_request() completes given number of bytes and updates 966 * the request without completing it. 967 * 968 * blk_end_request() and friends. __blk_end_request() must be called 969 * with the request queue spinlock acquired. 970 * 971 * Several drivers define their own end_request and call 972 * blk_end_request() for parts of the original function. 973 * This prevents code duplication in drivers. 974 */ 975extern bool blk_update_request(struct request *rq, int error, 976 unsigned int nr_bytes); 977extern void blk_finish_request(struct request *rq, int error); 978extern bool blk_end_request(struct request *rq, int error, 979 unsigned int nr_bytes); 980extern void blk_end_request_all(struct request *rq, int error); 981extern bool blk_end_request_cur(struct request *rq, int error); 982extern bool blk_end_request_err(struct request *rq, int error); 983extern bool __blk_end_request(struct request *rq, int error, 984 unsigned int nr_bytes); 985extern void __blk_end_request_all(struct request *rq, int error); 986extern bool __blk_end_request_cur(struct request *rq, int error); 987extern bool __blk_end_request_err(struct request *rq, int error); 988 989extern void blk_complete_request(struct request *); 990extern void __blk_complete_request(struct request *); 991extern void blk_abort_request(struct request *); 992extern void blk_unprep_request(struct request *); 993 994/* 995 * Access functions for manipulating queue properties 996 */ 997extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 998 spinlock_t *lock, int node_id); 999extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 1000extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 1001 request_fn_proc *, spinlock_t *); 1002extern void blk_cleanup_queue(struct request_queue *); 1003extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1004extern void blk_queue_bounce_limit(struct request_queue *, u64); 1005extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 1006extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1007extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 1008extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1009extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 1010extern void blk_queue_max_discard_sectors(struct request_queue *q, 1011 unsigned int max_discard_sectors); 1012extern void blk_queue_max_write_same_sectors(struct request_queue *q, 1013 unsigned int max_write_same_sectors); 1014extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 1015extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1016extern void blk_queue_alignment_offset(struct request_queue *q, 1017 unsigned int alignment); 1018extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1019extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 1020extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1021extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1022extern void blk_set_default_limits(struct queue_limits *lim); 1023extern void blk_set_stacking_limits(struct queue_limits *lim); 1024extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1025 sector_t offset); 1026extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 1027 sector_t offset); 1028extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1029 sector_t offset); 1030extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 1031extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 1032extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 1033extern int blk_queue_dma_drain(struct request_queue *q, 1034 dma_drain_needed_fn *dma_drain_needed, 1035 void *buf, unsigned int size); 1036extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 1037extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 1038extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 1039extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 1040extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 1041extern void blk_queue_dma_alignment(struct request_queue *, int); 1042extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1043extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1044extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 1045extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1046extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 1047extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1048extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 1049 1050extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 1051extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio, 1052 struct scatterlist *sglist); 1053extern void blk_dump_rq_flags(struct request *, char *); 1054extern long nr_blockdev_pages(void); 1055 1056bool __must_check blk_get_queue(struct request_queue *); 1057struct request_queue *blk_alloc_queue(gfp_t); 1058struct request_queue *blk_alloc_queue_node(gfp_t, int); 1059extern void blk_put_queue(struct request_queue *); 1060 1061/* 1062 * block layer runtime pm functions 1063 */ 1064#ifdef CONFIG_PM_RUNTIME 1065extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1066extern int blk_pre_runtime_suspend(struct request_queue *q); 1067extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1068extern void blk_pre_runtime_resume(struct request_queue *q); 1069extern void blk_post_runtime_resume(struct request_queue *q, int err); 1070#else 1071static inline void blk_pm_runtime_init(struct request_queue *q, 1072 struct device *dev) {} 1073static inline int blk_pre_runtime_suspend(struct request_queue *q) 1074{ 1075 return -ENOSYS; 1076} 1077static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1078static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1079static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1080#endif 1081 1082/* 1083 * blk_plug permits building a queue of related requests by holding the I/O 1084 * fragments for a short period. This allows merging of sequential requests 1085 * into single larger request. As the requests are moved from a per-task list to 1086 * the device's request_queue in a batch, this results in improved scalability 1087 * as the lock contention for request_queue lock is reduced. 1088 * 1089 * It is ok not to disable preemption when adding the request to the plug list 1090 * or when attempting a merge, because blk_schedule_flush_list() will only flush 1091 * the plug list when the task sleeps by itself. For details, please see 1092 * schedule() where blk_schedule_flush_plug() is called. 1093 */ 1094struct blk_plug { 1095 struct list_head list; /* requests */ 1096 struct list_head mq_list; /* blk-mq requests */ 1097 struct list_head cb_list; /* md requires an unplug callback */ 1098}; 1099#define BLK_MAX_REQUEST_COUNT 16 1100 1101struct blk_plug_cb; 1102typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1103struct blk_plug_cb { 1104 struct list_head list; 1105 blk_plug_cb_fn callback; 1106 void *data; 1107}; 1108extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1109 void *data, int size); 1110extern void blk_start_plug(struct blk_plug *); 1111extern void blk_finish_plug(struct blk_plug *); 1112extern void blk_flush_plug_list(struct blk_plug *, bool); 1113 1114static inline void blk_flush_plug(struct task_struct *tsk) 1115{ 1116 struct blk_plug *plug = tsk->plug; 1117 1118 if (plug) 1119 blk_flush_plug_list(plug, false); 1120} 1121 1122static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1123{ 1124 struct blk_plug *plug = tsk->plug; 1125 1126 if (plug) 1127 blk_flush_plug_list(plug, true); 1128} 1129 1130static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1131{ 1132 struct blk_plug *plug = tsk->plug; 1133 1134 return plug && 1135 (!list_empty(&plug->list) || 1136 !list_empty(&plug->mq_list) || 1137 !list_empty(&plug->cb_list)); 1138} 1139 1140/* 1141 * tag stuff 1142 */ 1143#define blk_rq_tagged(rq) \ 1144 ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED)) 1145extern int blk_queue_start_tag(struct request_queue *, struct request *); 1146extern struct request *blk_queue_find_tag(struct request_queue *, int); 1147extern void blk_queue_end_tag(struct request_queue *, struct request *); 1148extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 1149extern void blk_queue_free_tags(struct request_queue *); 1150extern int blk_queue_resize_tags(struct request_queue *, int); 1151extern void blk_queue_invalidate_tags(struct request_queue *); 1152extern struct blk_queue_tag *blk_init_tags(int); 1153extern void blk_free_tags(struct blk_queue_tag *); 1154 1155static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1156 int tag) 1157{ 1158 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1159 return NULL; 1160 return bqt->tag_index[tag]; 1161} 1162 1163#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 1164 1165extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1166extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1167 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1168extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1169 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1170extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1171 sector_t nr_sects, gfp_t gfp_mask); 1172static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1173 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1174{ 1175 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 1176 nr_blocks << (sb->s_blocksize_bits - 9), 1177 gfp_mask, flags); 1178} 1179static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1180 sector_t nr_blocks, gfp_t gfp_mask) 1181{ 1182 return blkdev_issue_zeroout(sb->s_bdev, 1183 block << (sb->s_blocksize_bits - 9), 1184 nr_blocks << (sb->s_blocksize_bits - 9), 1185 gfp_mask); 1186} 1187 1188extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1189 1190enum blk_default_limits { 1191 BLK_MAX_SEGMENTS = 128, 1192 BLK_SAFE_MAX_SECTORS = 255, 1193 BLK_DEF_MAX_SECTORS = 1024, 1194 BLK_MAX_SEGMENT_SIZE = 65536, 1195 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1196}; 1197 1198#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1199 1200static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1201{ 1202 return q->limits.bounce_pfn; 1203} 1204 1205static inline unsigned long queue_segment_boundary(struct request_queue *q) 1206{ 1207 return q->limits.seg_boundary_mask; 1208} 1209 1210static inline unsigned int queue_max_sectors(struct request_queue *q) 1211{ 1212 return q->limits.max_sectors; 1213} 1214 1215static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1216{ 1217 return q->limits.max_hw_sectors; 1218} 1219 1220static inline unsigned short queue_max_segments(struct request_queue *q) 1221{ 1222 return q->limits.max_segments; 1223} 1224 1225static inline unsigned int queue_max_segment_size(struct request_queue *q) 1226{ 1227 return q->limits.max_segment_size; 1228} 1229 1230static inline unsigned short queue_logical_block_size(struct request_queue *q) 1231{ 1232 int retval = 512; 1233 1234 if (q && q->limits.logical_block_size) 1235 retval = q->limits.logical_block_size; 1236 1237 return retval; 1238} 1239 1240static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1241{ 1242 return queue_logical_block_size(bdev_get_queue(bdev)); 1243} 1244 1245static inline unsigned int queue_physical_block_size(struct request_queue *q) 1246{ 1247 return q->limits.physical_block_size; 1248} 1249 1250static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1251{ 1252 return queue_physical_block_size(bdev_get_queue(bdev)); 1253} 1254 1255static inline unsigned int queue_io_min(struct request_queue *q) 1256{ 1257 return q->limits.io_min; 1258} 1259 1260static inline int bdev_io_min(struct block_device *bdev) 1261{ 1262 return queue_io_min(bdev_get_queue(bdev)); 1263} 1264 1265static inline unsigned int queue_io_opt(struct request_queue *q) 1266{ 1267 return q->limits.io_opt; 1268} 1269 1270static inline int bdev_io_opt(struct block_device *bdev) 1271{ 1272 return queue_io_opt(bdev_get_queue(bdev)); 1273} 1274 1275static inline int queue_alignment_offset(struct request_queue *q) 1276{ 1277 if (q->limits.misaligned) 1278 return -1; 1279 1280 return q->limits.alignment_offset; 1281} 1282 1283static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1284{ 1285 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1286 unsigned int alignment = (sector << 9) & (granularity - 1); 1287 1288 return (granularity + lim->alignment_offset - alignment) 1289 & (granularity - 1); 1290} 1291 1292static inline int bdev_alignment_offset(struct block_device *bdev) 1293{ 1294 struct request_queue *q = bdev_get_queue(bdev); 1295 1296 if (q->limits.misaligned) 1297 return -1; 1298 1299 if (bdev != bdev->bd_contains) 1300 return bdev->bd_part->alignment_offset; 1301 1302 return q->limits.alignment_offset; 1303} 1304 1305static inline int queue_discard_alignment(struct request_queue *q) 1306{ 1307 if (q->limits.discard_misaligned) 1308 return -1; 1309 1310 return q->limits.discard_alignment; 1311} 1312 1313static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1314{ 1315 unsigned int alignment, granularity, offset; 1316 1317 if (!lim->max_discard_sectors) 1318 return 0; 1319 1320 /* Why are these in bytes, not sectors? */ 1321 alignment = lim->discard_alignment >> 9; 1322 granularity = lim->discard_granularity >> 9; 1323 if (!granularity) 1324 return 0; 1325 1326 /* Offset of the partition start in 'granularity' sectors */ 1327 offset = sector_div(sector, granularity); 1328 1329 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1330 offset = (granularity + alignment - offset) % granularity; 1331 1332 /* Turn it back into bytes, gaah */ 1333 return offset << 9; 1334} 1335 1336static inline int bdev_discard_alignment(struct block_device *bdev) 1337{ 1338 struct request_queue *q = bdev_get_queue(bdev); 1339 1340 if (bdev != bdev->bd_contains) 1341 return bdev->bd_part->discard_alignment; 1342 1343 return q->limits.discard_alignment; 1344} 1345 1346static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1347{ 1348 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1349 return 1; 1350 1351 return 0; 1352} 1353 1354static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1355{ 1356 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1357} 1358 1359static inline unsigned int bdev_write_same(struct block_device *bdev) 1360{ 1361 struct request_queue *q = bdev_get_queue(bdev); 1362 1363 if (q) 1364 return q->limits.max_write_same_sectors; 1365 1366 return 0; 1367} 1368 1369static inline int queue_dma_alignment(struct request_queue *q) 1370{ 1371 return q ? q->dma_alignment : 511; 1372} 1373 1374static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1375 unsigned int len) 1376{ 1377 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1378 return !(addr & alignment) && !(len & alignment); 1379} 1380 1381/* assumes size > 256 */ 1382static inline unsigned int blksize_bits(unsigned int size) 1383{ 1384 unsigned int bits = 8; 1385 do { 1386 bits++; 1387 size >>= 1; 1388 } while (size > 256); 1389 return bits; 1390} 1391 1392static inline unsigned int block_size(struct block_device *bdev) 1393{ 1394 return bdev->bd_block_size; 1395} 1396 1397static inline bool queue_flush_queueable(struct request_queue *q) 1398{ 1399 return !q->flush_not_queueable; 1400} 1401 1402typedef struct {struct page *v;} Sector; 1403 1404unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1405 1406static inline void put_dev_sector(Sector p) 1407{ 1408 page_cache_release(p.v); 1409} 1410 1411struct work_struct; 1412int kblockd_schedule_work(struct work_struct *work); 1413int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1414int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1415 1416#ifdef CONFIG_BLK_CGROUP 1417/* 1418 * This should not be using sched_clock(). A real patch is in progress 1419 * to fix this up, until that is in place we need to disable preemption 1420 * around sched_clock() in this function and set_io_start_time_ns(). 1421 */ 1422static inline void set_start_time_ns(struct request *req) 1423{ 1424 preempt_disable(); 1425 req->start_time_ns = sched_clock(); 1426 preempt_enable(); 1427} 1428 1429static inline void set_io_start_time_ns(struct request *req) 1430{ 1431 preempt_disable(); 1432 req->io_start_time_ns = sched_clock(); 1433 preempt_enable(); 1434} 1435 1436static inline uint64_t rq_start_time_ns(struct request *req) 1437{ 1438 return req->start_time_ns; 1439} 1440 1441static inline uint64_t rq_io_start_time_ns(struct request *req) 1442{ 1443 return req->io_start_time_ns; 1444} 1445#else 1446static inline void set_start_time_ns(struct request *req) {} 1447static inline void set_io_start_time_ns(struct request *req) {} 1448static inline uint64_t rq_start_time_ns(struct request *req) 1449{ 1450 return 0; 1451} 1452static inline uint64_t rq_io_start_time_ns(struct request *req) 1453{ 1454 return 0; 1455} 1456#endif 1457 1458#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1459 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1460#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1461 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1462 1463#if defined(CONFIG_BLK_DEV_INTEGRITY) 1464 1465#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1466#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1467 1468struct blk_integrity_exchg { 1469 void *prot_buf; 1470 void *data_buf; 1471 sector_t sector; 1472 unsigned int data_size; 1473 unsigned short sector_size; 1474 const char *disk_name; 1475}; 1476 1477typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1478typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1479typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1480typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1481 1482struct blk_integrity { 1483 integrity_gen_fn *generate_fn; 1484 integrity_vrfy_fn *verify_fn; 1485 integrity_set_tag_fn *set_tag_fn; 1486 integrity_get_tag_fn *get_tag_fn; 1487 1488 unsigned short flags; 1489 unsigned short tuple_size; 1490 unsigned short sector_size; 1491 unsigned short tag_size; 1492 1493 const char *name; 1494 1495 struct kobject kobj; 1496}; 1497 1498extern bool blk_integrity_is_initialized(struct gendisk *); 1499extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1500extern void blk_integrity_unregister(struct gendisk *); 1501extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1502extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1503 struct scatterlist *); 1504extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1505extern int blk_integrity_merge_rq(struct request_queue *, struct request *, 1506 struct request *); 1507extern int blk_integrity_merge_bio(struct request_queue *, struct request *, 1508 struct bio *); 1509 1510static inline 1511struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1512{ 1513 return bdev->bd_disk->integrity; 1514} 1515 1516static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1517{ 1518 return disk->integrity; 1519} 1520 1521static inline int blk_integrity_rq(struct request *rq) 1522{ 1523 if (rq->bio == NULL) 1524 return 0; 1525 1526 return bio_integrity(rq->bio); 1527} 1528 1529static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1530 unsigned int segs) 1531{ 1532 q->limits.max_integrity_segments = segs; 1533} 1534 1535static inline unsigned short 1536queue_max_integrity_segments(struct request_queue *q) 1537{ 1538 return q->limits.max_integrity_segments; 1539} 1540 1541#else /* CONFIG_BLK_DEV_INTEGRITY */ 1542 1543struct bio; 1544struct block_device; 1545struct gendisk; 1546struct blk_integrity; 1547 1548static inline int blk_integrity_rq(struct request *rq) 1549{ 1550 return 0; 1551} 1552static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1553 struct bio *b) 1554{ 1555 return 0; 1556} 1557static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1558 struct bio *b, 1559 struct scatterlist *s) 1560{ 1561 return 0; 1562} 1563static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1564{ 1565 return 0; 1566} 1567static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1568{ 1569 return NULL; 1570} 1571static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1572{ 1573 return 0; 1574} 1575static inline int blk_integrity_register(struct gendisk *d, 1576 struct blk_integrity *b) 1577{ 1578 return 0; 1579} 1580static inline void blk_integrity_unregister(struct gendisk *d) 1581{ 1582} 1583static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1584 unsigned int segs) 1585{ 1586} 1587static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1588{ 1589 return 0; 1590} 1591static inline int blk_integrity_merge_rq(struct request_queue *rq, 1592 struct request *r1, 1593 struct request *r2) 1594{ 1595 return 0; 1596} 1597static inline int blk_integrity_merge_bio(struct request_queue *rq, 1598 struct request *r, 1599 struct bio *b) 1600{ 1601 return 0; 1602} 1603static inline bool blk_integrity_is_initialized(struct gendisk *g) 1604{ 1605 return 0; 1606} 1607 1608#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1609 1610struct block_device_operations { 1611 int (*open) (struct block_device *, fmode_t); 1612 void (*release) (struct gendisk *, fmode_t); 1613 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1614 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1615 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1616 int (*direct_access) (struct block_device *, sector_t, 1617 void **, unsigned long *); 1618 unsigned int (*check_events) (struct gendisk *disk, 1619 unsigned int clearing); 1620 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1621 int (*media_changed) (struct gendisk *); 1622 void (*unlock_native_capacity) (struct gendisk *); 1623 int (*revalidate_disk) (struct gendisk *); 1624 int (*getgeo)(struct block_device *, struct hd_geometry *); 1625 /* this callback is with swap_lock and sometimes page table lock held */ 1626 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1627 struct module *owner; 1628}; 1629 1630extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1631 unsigned long); 1632extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1633extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1634 struct writeback_control *); 1635#else /* CONFIG_BLOCK */ 1636 1637struct block_device; 1638 1639/* 1640 * stubs for when the block layer is configured out 1641 */ 1642#define buffer_heads_over_limit 0 1643 1644static inline long nr_blockdev_pages(void) 1645{ 1646 return 0; 1647} 1648 1649struct blk_plug { 1650}; 1651 1652static inline void blk_start_plug(struct blk_plug *plug) 1653{ 1654} 1655 1656static inline void blk_finish_plug(struct blk_plug *plug) 1657{ 1658} 1659 1660static inline void blk_flush_plug(struct task_struct *task) 1661{ 1662} 1663 1664static inline void blk_schedule_flush_plug(struct task_struct *task) 1665{ 1666} 1667 1668 1669static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1670{ 1671 return false; 1672} 1673 1674static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1675 sector_t *error_sector) 1676{ 1677 return 0; 1678} 1679 1680#endif /* CONFIG_BLOCK */ 1681 1682#endif