at v4.11-rc3 59 kB view raw
1#ifndef _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H 3 4#include <linux/sched.h> 5#include <linux/sched/clock.h> 6 7#ifdef CONFIG_BLOCK 8 9#include <linux/major.h> 10#include <linux/genhd.h> 11#include <linux/list.h> 12#include <linux/llist.h> 13#include <linux/timer.h> 14#include <linux/workqueue.h> 15#include <linux/pagemap.h> 16#include <linux/backing-dev-defs.h> 17#include <linux/wait.h> 18#include <linux/mempool.h> 19#include <linux/pfn.h> 20#include <linux/bio.h> 21#include <linux/stringify.h> 22#include <linux/gfp.h> 23#include <linux/bsg.h> 24#include <linux/smp.h> 25#include <linux/rcupdate.h> 26#include <linux/percpu-refcount.h> 27#include <linux/scatterlist.h> 28#include <linux/blkzoned.h> 29 30struct module; 31struct scsi_ioctl_command; 32 33struct request_queue; 34struct elevator_queue; 35struct blk_trace; 36struct request; 37struct sg_io_hdr; 38struct bsg_job; 39struct blkcg_gq; 40struct blk_flush_queue; 41struct pr_ops; 42struct rq_wb; 43 44#define BLKDEV_MIN_RQ 4 45#define BLKDEV_MAX_RQ 128 /* Default maximum */ 46 47/* 48 * Maximum number of blkcg policies allowed to be registered concurrently. 49 * Defined here to simplify include dependency. 50 */ 51#define BLKCG_MAX_POLS 2 52 53typedef void (rq_end_io_fn)(struct request *, int); 54 55#define BLK_RL_SYNCFULL (1U << 0) 56#define BLK_RL_ASYNCFULL (1U << 1) 57 58struct request_list { 59 struct request_queue *q; /* the queue this rl belongs to */ 60#ifdef CONFIG_BLK_CGROUP 61 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 62#endif 63 /* 64 * count[], starved[], and wait[] are indexed by 65 * BLK_RW_SYNC/BLK_RW_ASYNC 66 */ 67 int count[2]; 68 int starved[2]; 69 mempool_t *rq_pool; 70 wait_queue_head_t wait[2]; 71 unsigned int flags; 72}; 73 74/* 75 * request flags */ 76typedef __u32 __bitwise req_flags_t; 77 78/* elevator knows about this request */ 79#define RQF_SORTED ((__force req_flags_t)(1 << 0)) 80/* drive already may have started this one */ 81#define RQF_STARTED ((__force req_flags_t)(1 << 1)) 82/* uses tagged queueing */ 83#define RQF_QUEUED ((__force req_flags_t)(1 << 2)) 84/* may not be passed by ioscheduler */ 85#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 86/* request for flush sequence */ 87#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 88/* merge of different types, fail separately */ 89#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 90/* track inflight for MQ */ 91#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 92/* don't call prep for this one */ 93#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 94/* set for "ide_preempt" requests and also for requests for which the SCSI 95 "quiesce" state must be ignored. */ 96#define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 97/* contains copies of user pages */ 98#define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) 99/* vaguely specified driver internal error. Ignored by the block layer */ 100#define RQF_FAILED ((__force req_flags_t)(1 << 10)) 101/* don't warn about errors */ 102#define RQF_QUIET ((__force req_flags_t)(1 << 11)) 103/* elevator private data attached */ 104#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 105/* account I/O stat */ 106#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 107/* request came from our alloc pool */ 108#define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 109/* runtime pm request */ 110#define RQF_PM ((__force req_flags_t)(1 << 15)) 111/* on IO scheduler merge hash */ 112#define RQF_HASHED ((__force req_flags_t)(1 << 16)) 113/* IO stats tracking on */ 114#define RQF_STATS ((__force req_flags_t)(1 << 17)) 115/* Look at ->special_vec for the actual data payload instead of the 116 bio chain. */ 117#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 118 119/* flags that prevent us from merging requests: */ 120#define RQF_NOMERGE_FLAGS \ 121 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 122 123/* 124 * Try to put the fields that are referenced together in the same cacheline. 125 * 126 * If you modify this structure, make sure to update blk_rq_init() and 127 * especially blk_mq_rq_ctx_init() to take care of the added fields. 128 */ 129struct request { 130 struct list_head queuelist; 131 union { 132 struct call_single_data csd; 133 u64 fifo_time; 134 }; 135 136 struct request_queue *q; 137 struct blk_mq_ctx *mq_ctx; 138 139 int cpu; 140 unsigned int cmd_flags; /* op and common flags */ 141 req_flags_t rq_flags; 142 143 int internal_tag; 144 145 unsigned long atomic_flags; 146 147 /* the following two fields are internal, NEVER access directly */ 148 unsigned int __data_len; /* total data len */ 149 int tag; 150 sector_t __sector; /* sector cursor */ 151 152 struct bio *bio; 153 struct bio *biotail; 154 155 /* 156 * The hash is used inside the scheduler, and killed once the 157 * request reaches the dispatch list. The ipi_list is only used 158 * to queue the request for softirq completion, which is long 159 * after the request has been unhashed (and even removed from 160 * the dispatch list). 161 */ 162 union { 163 struct hlist_node hash; /* merge hash */ 164 struct list_head ipi_list; 165 }; 166 167 /* 168 * The rb_node is only used inside the io scheduler, requests 169 * are pruned when moved to the dispatch queue. So let the 170 * completion_data share space with the rb_node. 171 */ 172 union { 173 struct rb_node rb_node; /* sort/lookup */ 174 struct bio_vec special_vec; 175 void *completion_data; 176 }; 177 178 /* 179 * Three pointers are available for the IO schedulers, if they need 180 * more they have to dynamically allocate it. Flush requests are 181 * never put on the IO scheduler. So let the flush fields share 182 * space with the elevator data. 183 */ 184 union { 185 struct { 186 struct io_cq *icq; 187 void *priv[2]; 188 } elv; 189 190 struct { 191 unsigned int seq; 192 struct list_head list; 193 rq_end_io_fn *saved_end_io; 194 } flush; 195 }; 196 197 struct gendisk *rq_disk; 198 struct hd_struct *part; 199 unsigned long start_time; 200 struct blk_issue_stat issue_stat; 201#ifdef CONFIG_BLK_CGROUP 202 struct request_list *rl; /* rl this rq is alloced from */ 203 unsigned long long start_time_ns; 204 unsigned long long io_start_time_ns; /* when passed to hardware */ 205#endif 206 /* Number of scatter-gather DMA addr+len pairs after 207 * physical address coalescing is performed. 208 */ 209 unsigned short nr_phys_segments; 210#if defined(CONFIG_BLK_DEV_INTEGRITY) 211 unsigned short nr_integrity_segments; 212#endif 213 214 unsigned short ioprio; 215 216 void *special; /* opaque pointer available for LLD use */ 217 218 int errors; 219 220 unsigned int extra_len; /* length of alignment and padding */ 221 222 unsigned long deadline; 223 struct list_head timeout_list; 224 unsigned int timeout; 225 int retries; 226 227 /* 228 * completion callback. 229 */ 230 rq_end_io_fn *end_io; 231 void *end_io_data; 232 233 /* for bidi */ 234 struct request *next_rq; 235}; 236 237static inline bool blk_rq_is_scsi(struct request *rq) 238{ 239 return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; 240} 241 242static inline bool blk_rq_is_private(struct request *rq) 243{ 244 return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; 245} 246 247static inline bool blk_rq_is_passthrough(struct request *rq) 248{ 249 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); 250} 251 252static inline unsigned short req_get_ioprio(struct request *req) 253{ 254 return req->ioprio; 255} 256 257#include <linux/elevator.h> 258 259struct blk_queue_ctx; 260 261typedef void (request_fn_proc) (struct request_queue *q); 262typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 263typedef int (prep_rq_fn) (struct request_queue *, struct request *); 264typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 265 266struct bio_vec; 267typedef void (softirq_done_fn)(struct request *); 268typedef int (dma_drain_needed_fn)(struct request *); 269typedef int (lld_busy_fn) (struct request_queue *q); 270typedef int (bsg_job_fn) (struct bsg_job *); 271typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t); 272typedef void (exit_rq_fn)(struct request_queue *, struct request *); 273 274enum blk_eh_timer_return { 275 BLK_EH_NOT_HANDLED, 276 BLK_EH_HANDLED, 277 BLK_EH_RESET_TIMER, 278}; 279 280typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 281 282enum blk_queue_state { 283 Queue_down, 284 Queue_up, 285}; 286 287struct blk_queue_tag { 288 struct request **tag_index; /* map of busy tags */ 289 unsigned long *tag_map; /* bit map of free/busy tags */ 290 int max_depth; /* what we will send to device */ 291 int real_max_depth; /* what the array can hold */ 292 atomic_t refcnt; /* map can be shared */ 293 int alloc_policy; /* tag allocation policy */ 294 int next_tag; /* next tag */ 295}; 296#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 297#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 298 299#define BLK_SCSI_MAX_CMDS (256) 300#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 301 302/* 303 * Zoned block device models (zoned limit). 304 */ 305enum blk_zoned_model { 306 BLK_ZONED_NONE, /* Regular block device */ 307 BLK_ZONED_HA, /* Host-aware zoned block device */ 308 BLK_ZONED_HM, /* Host-managed zoned block device */ 309}; 310 311struct queue_limits { 312 unsigned long bounce_pfn; 313 unsigned long seg_boundary_mask; 314 unsigned long virt_boundary_mask; 315 316 unsigned int max_hw_sectors; 317 unsigned int max_dev_sectors; 318 unsigned int chunk_sectors; 319 unsigned int max_sectors; 320 unsigned int max_segment_size; 321 unsigned int physical_block_size; 322 unsigned int alignment_offset; 323 unsigned int io_min; 324 unsigned int io_opt; 325 unsigned int max_discard_sectors; 326 unsigned int max_hw_discard_sectors; 327 unsigned int max_write_same_sectors; 328 unsigned int max_write_zeroes_sectors; 329 unsigned int discard_granularity; 330 unsigned int discard_alignment; 331 332 unsigned short logical_block_size; 333 unsigned short max_segments; 334 unsigned short max_integrity_segments; 335 unsigned short max_discard_segments; 336 337 unsigned char misaligned; 338 unsigned char discard_misaligned; 339 unsigned char cluster; 340 unsigned char discard_zeroes_data; 341 unsigned char raid_partial_stripes_expensive; 342 enum blk_zoned_model zoned; 343}; 344 345#ifdef CONFIG_BLK_DEV_ZONED 346 347struct blk_zone_report_hdr { 348 unsigned int nr_zones; 349 u8 padding[60]; 350}; 351 352extern int blkdev_report_zones(struct block_device *bdev, 353 sector_t sector, struct blk_zone *zones, 354 unsigned int *nr_zones, gfp_t gfp_mask); 355extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, 356 sector_t nr_sectors, gfp_t gfp_mask); 357 358extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 359 unsigned int cmd, unsigned long arg); 360extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, 361 unsigned int cmd, unsigned long arg); 362 363#else /* CONFIG_BLK_DEV_ZONED */ 364 365static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 366 fmode_t mode, unsigned int cmd, 367 unsigned long arg) 368{ 369 return -ENOTTY; 370} 371 372static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, 373 fmode_t mode, unsigned int cmd, 374 unsigned long arg) 375{ 376 return -ENOTTY; 377} 378 379#endif /* CONFIG_BLK_DEV_ZONED */ 380 381struct request_queue { 382 /* 383 * Together with queue_head for cacheline sharing 384 */ 385 struct list_head queue_head; 386 struct request *last_merge; 387 struct elevator_queue *elevator; 388 int nr_rqs[2]; /* # allocated [a]sync rqs */ 389 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 390 391 struct rq_wb *rq_wb; 392 393 /* 394 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 395 * is used, root blkg allocates from @q->root_rl and all other 396 * blkgs from their own blkg->rl. Which one to use should be 397 * determined using bio_request_list(). 398 */ 399 struct request_list root_rl; 400 401 request_fn_proc *request_fn; 402 make_request_fn *make_request_fn; 403 prep_rq_fn *prep_rq_fn; 404 unprep_rq_fn *unprep_rq_fn; 405 softirq_done_fn *softirq_done_fn; 406 rq_timed_out_fn *rq_timed_out_fn; 407 dma_drain_needed_fn *dma_drain_needed; 408 lld_busy_fn *lld_busy_fn; 409 init_rq_fn *init_rq_fn; 410 exit_rq_fn *exit_rq_fn; 411 412 const struct blk_mq_ops *mq_ops; 413 414 unsigned int *mq_map; 415 416 /* sw queues */ 417 struct blk_mq_ctx __percpu *queue_ctx; 418 unsigned int nr_queues; 419 420 unsigned int queue_depth; 421 422 /* hw dispatch queues */ 423 struct blk_mq_hw_ctx **queue_hw_ctx; 424 unsigned int nr_hw_queues; 425 426 /* 427 * Dispatch queue sorting 428 */ 429 sector_t end_sector; 430 struct request *boundary_rq; 431 432 /* 433 * Delayed queue handling 434 */ 435 struct delayed_work delay_work; 436 437 struct backing_dev_info *backing_dev_info; 438 439 /* 440 * The queue owner gets to use this for whatever they like. 441 * ll_rw_blk doesn't touch it. 442 */ 443 void *queuedata; 444 445 /* 446 * various queue flags, see QUEUE_* below 447 */ 448 unsigned long queue_flags; 449 450 /* 451 * ida allocated id for this queue. Used to index queues from 452 * ioctx. 453 */ 454 int id; 455 456 /* 457 * queue needs bounce pages for pages above this limit 458 */ 459 gfp_t bounce_gfp; 460 461 /* 462 * protects queue structures from reentrancy. ->__queue_lock should 463 * _never_ be used directly, it is queue private. always use 464 * ->queue_lock. 465 */ 466 spinlock_t __queue_lock; 467 spinlock_t *queue_lock; 468 469 /* 470 * queue kobject 471 */ 472 struct kobject kobj; 473 474 /* 475 * mq queue kobject 476 */ 477 struct kobject mq_kobj; 478 479#ifdef CONFIG_BLK_DEV_INTEGRITY 480 struct blk_integrity integrity; 481#endif /* CONFIG_BLK_DEV_INTEGRITY */ 482 483#ifdef CONFIG_PM 484 struct device *dev; 485 int rpm_status; 486 unsigned int nr_pending; 487#endif 488 489 /* 490 * queue settings 491 */ 492 unsigned long nr_requests; /* Max # of requests */ 493 unsigned int nr_congestion_on; 494 unsigned int nr_congestion_off; 495 unsigned int nr_batching; 496 497 unsigned int dma_drain_size; 498 void *dma_drain_buffer; 499 unsigned int dma_pad_mask; 500 unsigned int dma_alignment; 501 502 struct blk_queue_tag *queue_tags; 503 struct list_head tag_busy_list; 504 505 unsigned int nr_sorted; 506 unsigned int in_flight[2]; 507 508 struct blk_rq_stat rq_stats[2]; 509 510 /* 511 * Number of active block driver functions for which blk_drain_queue() 512 * must wait. Must be incremented around functions that unlock the 513 * queue_lock internally, e.g. scsi_request_fn(). 514 */ 515 unsigned int request_fn_active; 516 517 unsigned int rq_timeout; 518 int poll_nsec; 519 struct timer_list timeout; 520 struct work_struct timeout_work; 521 struct list_head timeout_list; 522 523 struct list_head icq_list; 524#ifdef CONFIG_BLK_CGROUP 525 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 526 struct blkcg_gq *root_blkg; 527 struct list_head blkg_list; 528#endif 529 530 struct queue_limits limits; 531 532 /* 533 * sg stuff 534 */ 535 unsigned int sg_timeout; 536 unsigned int sg_reserved_size; 537 int node; 538#ifdef CONFIG_BLK_DEV_IO_TRACE 539 struct blk_trace *blk_trace; 540#endif 541 /* 542 * for flush operations 543 */ 544 struct blk_flush_queue *fq; 545 546 struct list_head requeue_list; 547 spinlock_t requeue_lock; 548 struct delayed_work requeue_work; 549 550 struct mutex sysfs_lock; 551 552 int bypass_depth; 553 atomic_t mq_freeze_depth; 554 555#if defined(CONFIG_BLK_DEV_BSG) 556 bsg_job_fn *bsg_job_fn; 557 int bsg_job_size; 558 struct bsg_class_device bsg_dev; 559#endif 560 561#ifdef CONFIG_BLK_DEV_THROTTLING 562 /* Throttle data */ 563 struct throtl_data *td; 564#endif 565 struct rcu_head rcu_head; 566 wait_queue_head_t mq_freeze_wq; 567 struct percpu_ref q_usage_counter; 568 struct list_head all_q_node; 569 570 struct blk_mq_tag_set *tag_set; 571 struct list_head tag_set_list; 572 struct bio_set *bio_split; 573 574#ifdef CONFIG_BLK_DEBUG_FS 575 struct dentry *debugfs_dir; 576 struct dentry *mq_debugfs_dir; 577#endif 578 579 bool mq_sysfs_init_done; 580 581 size_t cmd_size; 582 void *rq_alloc_data; 583}; 584 585#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 586#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 587#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 588#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 589#define QUEUE_FLAG_DYING 5 /* queue being torn down */ 590#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 591#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 592#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 593#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 594#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 595#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 596#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 597#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 598#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 599#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 600#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 601#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 602#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ 603#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 604#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 605#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 606#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 607#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ 608#define QUEUE_FLAG_WC 23 /* Write back caching */ 609#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ 610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 611#define QUEUE_FLAG_DAX 26 /* device supports DAX */ 612#define QUEUE_FLAG_STATS 27 /* track rq completion times */ 613#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */ 614 615#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 616 (1 << QUEUE_FLAG_STACKABLE) | \ 617 (1 << QUEUE_FLAG_SAME_COMP) | \ 618 (1 << QUEUE_FLAG_ADD_RANDOM)) 619 620#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 621 (1 << QUEUE_FLAG_STACKABLE) | \ 622 (1 << QUEUE_FLAG_SAME_COMP) | \ 623 (1 << QUEUE_FLAG_POLL)) 624 625static inline void queue_lockdep_assert_held(struct request_queue *q) 626{ 627 if (q->queue_lock) 628 lockdep_assert_held(q->queue_lock); 629} 630 631static inline void queue_flag_set_unlocked(unsigned int flag, 632 struct request_queue *q) 633{ 634 __set_bit(flag, &q->queue_flags); 635} 636 637static inline int queue_flag_test_and_clear(unsigned int flag, 638 struct request_queue *q) 639{ 640 queue_lockdep_assert_held(q); 641 642 if (test_bit(flag, &q->queue_flags)) { 643 __clear_bit(flag, &q->queue_flags); 644 return 1; 645 } 646 647 return 0; 648} 649 650static inline int queue_flag_test_and_set(unsigned int flag, 651 struct request_queue *q) 652{ 653 queue_lockdep_assert_held(q); 654 655 if (!test_bit(flag, &q->queue_flags)) { 656 __set_bit(flag, &q->queue_flags); 657 return 0; 658 } 659 660 return 1; 661} 662 663static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 664{ 665 queue_lockdep_assert_held(q); 666 __set_bit(flag, &q->queue_flags); 667} 668 669static inline void queue_flag_clear_unlocked(unsigned int flag, 670 struct request_queue *q) 671{ 672 __clear_bit(flag, &q->queue_flags); 673} 674 675static inline int queue_in_flight(struct request_queue *q) 676{ 677 return q->in_flight[0] + q->in_flight[1]; 678} 679 680static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 681{ 682 queue_lockdep_assert_held(q); 683 __clear_bit(flag, &q->queue_flags); 684} 685 686#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 687#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 688#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 689#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 690#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 691#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 692#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 693#define blk_queue_noxmerges(q) \ 694 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 695#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 696#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 697#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 698#define blk_queue_stackable(q) \ 699 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 700#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 701#define blk_queue_secure_erase(q) \ 702 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 703#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 704 705#define blk_noretry_request(rq) \ 706 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 707 REQ_FAILFAST_DRIVER)) 708 709static inline bool blk_account_rq(struct request *rq) 710{ 711 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); 712} 713 714#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 715#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 716/* rq->queuelist of dequeued request must be list_empty() */ 717#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 718 719#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 720 721#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 722 723/* 724 * Driver can handle struct request, if it either has an old style 725 * request_fn defined, or is blk-mq based. 726 */ 727static inline bool queue_is_rq_based(struct request_queue *q) 728{ 729 return q->request_fn || q->mq_ops; 730} 731 732static inline unsigned int blk_queue_cluster(struct request_queue *q) 733{ 734 return q->limits.cluster; 735} 736 737static inline enum blk_zoned_model 738blk_queue_zoned_model(struct request_queue *q) 739{ 740 return q->limits.zoned; 741} 742 743static inline bool blk_queue_is_zoned(struct request_queue *q) 744{ 745 switch (blk_queue_zoned_model(q)) { 746 case BLK_ZONED_HA: 747 case BLK_ZONED_HM: 748 return true; 749 default: 750 return false; 751 } 752} 753 754static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) 755{ 756 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 757} 758 759static inline bool rq_is_sync(struct request *rq) 760{ 761 return op_is_sync(rq->cmd_flags); 762} 763 764static inline bool blk_rl_full(struct request_list *rl, bool sync) 765{ 766 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 767 768 return rl->flags & flag; 769} 770 771static inline void blk_set_rl_full(struct request_list *rl, bool sync) 772{ 773 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 774 775 rl->flags |= flag; 776} 777 778static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 779{ 780 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 781 782 rl->flags &= ~flag; 783} 784 785static inline bool rq_mergeable(struct request *rq) 786{ 787 if (blk_rq_is_passthrough(rq)) 788 return false; 789 790 if (req_op(rq) == REQ_OP_FLUSH) 791 return false; 792 793 if (req_op(rq) == REQ_OP_WRITE_ZEROES) 794 return false; 795 796 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 797 return false; 798 if (rq->rq_flags & RQF_NOMERGE_FLAGS) 799 return false; 800 801 return true; 802} 803 804static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 805{ 806 if (bio_data(a) == bio_data(b)) 807 return true; 808 809 return false; 810} 811 812static inline unsigned int blk_queue_depth(struct request_queue *q) 813{ 814 if (q->queue_depth) 815 return q->queue_depth; 816 817 return q->nr_requests; 818} 819 820/* 821 * q->prep_rq_fn return values 822 */ 823enum { 824 BLKPREP_OK, /* serve it */ 825 BLKPREP_KILL, /* fatal error, kill, return -EIO */ 826 BLKPREP_DEFER, /* leave on queue */ 827 BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ 828}; 829 830extern unsigned long blk_max_low_pfn, blk_max_pfn; 831 832/* 833 * standard bounce addresses: 834 * 835 * BLK_BOUNCE_HIGH : bounce all highmem pages 836 * BLK_BOUNCE_ANY : don't bounce anything 837 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 838 */ 839 840#if BITS_PER_LONG == 32 841#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 842#else 843#define BLK_BOUNCE_HIGH -1ULL 844#endif 845#define BLK_BOUNCE_ANY (-1ULL) 846#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 847 848/* 849 * default timeout for SG_IO if none specified 850 */ 851#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 852#define BLK_MIN_SG_TIMEOUT (7 * HZ) 853 854#ifdef CONFIG_BOUNCE 855extern int init_emergency_isa_pool(void); 856extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 857#else 858static inline int init_emergency_isa_pool(void) 859{ 860 return 0; 861} 862static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 863{ 864} 865#endif /* CONFIG_MMU */ 866 867struct rq_map_data { 868 struct page **pages; 869 int page_order; 870 int nr_entries; 871 unsigned long offset; 872 int null_mapped; 873 int from_user; 874}; 875 876struct req_iterator { 877 struct bvec_iter iter; 878 struct bio *bio; 879}; 880 881/* This should not be used directly - use rq_for_each_segment */ 882#define for_each_bio(_bio) \ 883 for (; _bio; _bio = _bio->bi_next) 884#define __rq_for_each_bio(_bio, rq) \ 885 if ((rq->bio)) \ 886 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 887 888#define rq_for_each_segment(bvl, _rq, _iter) \ 889 __rq_for_each_bio(_iter.bio, _rq) \ 890 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 891 892#define rq_iter_last(bvec, _iter) \ 893 (_iter.bio->bi_next == NULL && \ 894 bio_iter_last(bvec, _iter.iter)) 895 896#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 897# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 898#endif 899#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 900extern void rq_flush_dcache_pages(struct request *rq); 901#else 902static inline void rq_flush_dcache_pages(struct request *rq) 903{ 904} 905#endif 906 907#ifdef CONFIG_PRINTK 908#define vfs_msg(sb, level, fmt, ...) \ 909 __vfs_msg(sb, level, fmt, ##__VA_ARGS__) 910#else 911#define vfs_msg(sb, level, fmt, ...) \ 912do { \ 913 no_printk(fmt, ##__VA_ARGS__); \ 914 __vfs_msg(sb, "", " "); \ 915} while (0) 916#endif 917 918extern int blk_register_queue(struct gendisk *disk); 919extern void blk_unregister_queue(struct gendisk *disk); 920extern blk_qc_t generic_make_request(struct bio *bio); 921extern void blk_rq_init(struct request_queue *q, struct request *rq); 922extern void blk_put_request(struct request *); 923extern void __blk_put_request(struct request_queue *, struct request *); 924extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 925extern void blk_requeue_request(struct request_queue *, struct request *); 926extern int blk_lld_busy(struct request_queue *q); 927extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 928 struct bio_set *bs, gfp_t gfp_mask, 929 int (*bio_ctr)(struct bio *, struct bio *, void *), 930 void *data); 931extern void blk_rq_unprep_clone(struct request *rq); 932extern int blk_insert_cloned_request(struct request_queue *q, 933 struct request *rq); 934extern int blk_rq_append_bio(struct request *rq, struct bio *bio); 935extern void blk_delay_queue(struct request_queue *, unsigned long); 936extern void blk_queue_split(struct request_queue *, struct bio **, 937 struct bio_set *); 938extern void blk_recount_segments(struct request_queue *, struct bio *); 939extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 940extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 941 unsigned int, void __user *); 942extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 943 unsigned int, void __user *); 944extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 945 struct scsi_ioctl_command __user *); 946 947extern int blk_queue_enter(struct request_queue *q, bool nowait); 948extern void blk_queue_exit(struct request_queue *q); 949extern void blk_start_queue(struct request_queue *q); 950extern void blk_start_queue_async(struct request_queue *q); 951extern void blk_stop_queue(struct request_queue *q); 952extern void blk_sync_queue(struct request_queue *q); 953extern void __blk_stop_queue(struct request_queue *q); 954extern void __blk_run_queue(struct request_queue *q); 955extern void __blk_run_queue_uncond(struct request_queue *q); 956extern void blk_run_queue(struct request_queue *); 957extern void blk_run_queue_async(struct request_queue *q); 958extern void blk_mq_quiesce_queue(struct request_queue *q); 959extern int blk_rq_map_user(struct request_queue *, struct request *, 960 struct rq_map_data *, void __user *, unsigned long, 961 gfp_t); 962extern int blk_rq_unmap_user(struct bio *); 963extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 964extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 965 struct rq_map_data *, const struct iov_iter *, 966 gfp_t); 967extern int blk_execute_rq(struct request_queue *, struct gendisk *, 968 struct request *, int); 969extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 970 struct request *, int, rq_end_io_fn *); 971 972bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); 973 974static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 975{ 976 return bdev->bd_disk->queue; /* this is never NULL */ 977} 978 979/* 980 * blk_rq_pos() : the current sector 981 * blk_rq_bytes() : bytes left in the entire request 982 * blk_rq_cur_bytes() : bytes left in the current segment 983 * blk_rq_err_bytes() : bytes left till the next error boundary 984 * blk_rq_sectors() : sectors left in the entire request 985 * blk_rq_cur_sectors() : sectors left in the current segment 986 */ 987static inline sector_t blk_rq_pos(const struct request *rq) 988{ 989 return rq->__sector; 990} 991 992static inline unsigned int blk_rq_bytes(const struct request *rq) 993{ 994 return rq->__data_len; 995} 996 997static inline int blk_rq_cur_bytes(const struct request *rq) 998{ 999 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 1000} 1001 1002extern unsigned int blk_rq_err_bytes(const struct request *rq); 1003 1004static inline unsigned int blk_rq_sectors(const struct request *rq) 1005{ 1006 return blk_rq_bytes(rq) >> 9; 1007} 1008 1009static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 1010{ 1011 return blk_rq_cur_bytes(rq) >> 9; 1012} 1013 1014/* 1015 * Some commands like WRITE SAME have a payload or data transfer size which 1016 * is different from the size of the request. Any driver that supports such 1017 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to 1018 * calculate the data transfer size. 1019 */ 1020static inline unsigned int blk_rq_payload_bytes(struct request *rq) 1021{ 1022 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1023 return rq->special_vec.bv_len; 1024 return blk_rq_bytes(rq); 1025} 1026 1027static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 1028 int op) 1029{ 1030 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) 1031 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 1032 1033 if (unlikely(op == REQ_OP_WRITE_SAME)) 1034 return q->limits.max_write_same_sectors; 1035 1036 if (unlikely(op == REQ_OP_WRITE_ZEROES)) 1037 return q->limits.max_write_zeroes_sectors; 1038 1039 return q->limits.max_sectors; 1040} 1041 1042/* 1043 * Return maximum size of a request at given offset. Only valid for 1044 * file system requests. 1045 */ 1046static inline unsigned int blk_max_size_offset(struct request_queue *q, 1047 sector_t offset) 1048{ 1049 if (!q->limits.chunk_sectors) 1050 return q->limits.max_sectors; 1051 1052 return q->limits.chunk_sectors - 1053 (offset & (q->limits.chunk_sectors - 1)); 1054} 1055 1056static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 1057 sector_t offset) 1058{ 1059 struct request_queue *q = rq->q; 1060 1061 if (blk_rq_is_passthrough(rq)) 1062 return q->limits.max_hw_sectors; 1063 1064 if (!q->limits.chunk_sectors || 1065 req_op(rq) == REQ_OP_DISCARD || 1066 req_op(rq) == REQ_OP_SECURE_ERASE) 1067 return blk_queue_get_max_sectors(q, req_op(rq)); 1068 1069 return min(blk_max_size_offset(q, offset), 1070 blk_queue_get_max_sectors(q, req_op(rq))); 1071} 1072 1073static inline unsigned int blk_rq_count_bios(struct request *rq) 1074{ 1075 unsigned int nr_bios = 0; 1076 struct bio *bio; 1077 1078 __rq_for_each_bio(bio, rq) 1079 nr_bios++; 1080 1081 return nr_bios; 1082} 1083 1084/* 1085 * blk_rq_set_prio - associate a request with prio from ioc 1086 * @rq: request of interest 1087 * @ioc: target iocontext 1088 * 1089 * Assocate request prio with ioc prio so request based drivers 1090 * can leverage priority information. 1091 */ 1092static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc) 1093{ 1094 if (ioc) 1095 rq->ioprio = ioc->ioprio; 1096} 1097 1098/* 1099 * Request issue related functions. 1100 */ 1101extern struct request *blk_peek_request(struct request_queue *q); 1102extern void blk_start_request(struct request *rq); 1103extern struct request *blk_fetch_request(struct request_queue *q); 1104 1105/* 1106 * Request completion related functions. 1107 * 1108 * blk_update_request() completes given number of bytes and updates 1109 * the request without completing it. 1110 * 1111 * blk_end_request() and friends. __blk_end_request() must be called 1112 * with the request queue spinlock acquired. 1113 * 1114 * Several drivers define their own end_request and call 1115 * blk_end_request() for parts of the original function. 1116 * This prevents code duplication in drivers. 1117 */ 1118extern bool blk_update_request(struct request *rq, int error, 1119 unsigned int nr_bytes); 1120extern void blk_finish_request(struct request *rq, int error); 1121extern bool blk_end_request(struct request *rq, int error, 1122 unsigned int nr_bytes); 1123extern void blk_end_request_all(struct request *rq, int error); 1124extern bool blk_end_request_cur(struct request *rq, int error); 1125extern bool blk_end_request_err(struct request *rq, int error); 1126extern bool __blk_end_request(struct request *rq, int error, 1127 unsigned int nr_bytes); 1128extern void __blk_end_request_all(struct request *rq, int error); 1129extern bool __blk_end_request_cur(struct request *rq, int error); 1130extern bool __blk_end_request_err(struct request *rq, int error); 1131 1132extern void blk_complete_request(struct request *); 1133extern void __blk_complete_request(struct request *); 1134extern void blk_abort_request(struct request *); 1135extern void blk_unprep_request(struct request *); 1136 1137/* 1138 * Access functions for manipulating queue properties 1139 */ 1140extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 1141 spinlock_t *lock, int node_id); 1142extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 1143extern int blk_init_allocated_queue(struct request_queue *); 1144extern void blk_cleanup_queue(struct request_queue *); 1145extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1146extern void blk_queue_bounce_limit(struct request_queue *, u64); 1147extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1148extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 1149extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1150extern void blk_queue_max_discard_segments(struct request_queue *, 1151 unsigned short); 1152extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 1153extern void blk_queue_max_discard_sectors(struct request_queue *q, 1154 unsigned int max_discard_sectors); 1155extern void blk_queue_max_write_same_sectors(struct request_queue *q, 1156 unsigned int max_write_same_sectors); 1157extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 1158 unsigned int max_write_same_sectors); 1159extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 1160extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1161extern void blk_queue_alignment_offset(struct request_queue *q, 1162 unsigned int alignment); 1163extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1164extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 1165extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1166extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1167extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 1168extern void blk_set_default_limits(struct queue_limits *lim); 1169extern void blk_set_stacking_limits(struct queue_limits *lim); 1170extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1171 sector_t offset); 1172extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 1173 sector_t offset); 1174extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1175 sector_t offset); 1176extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 1177extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 1178extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 1179extern int blk_queue_dma_drain(struct request_queue *q, 1180 dma_drain_needed_fn *dma_drain_needed, 1181 void *buf, unsigned int size); 1182extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 1183extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 1184extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1185extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 1186extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 1187extern void blk_queue_dma_alignment(struct request_queue *, int); 1188extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1189extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1190extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 1191extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1192extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1193extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 1194 1195/* 1196 * Number of physical segments as sent to the device. 1197 * 1198 * Normally this is the number of discontiguous data segments sent by the 1199 * submitter. But for data-less command like discard we might have no 1200 * actual data segments submitted, but the driver might have to add it's 1201 * own special payload. In that case we still return 1 here so that this 1202 * special payload will be mapped. 1203 */ 1204static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1205{ 1206 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1207 return 1; 1208 return rq->nr_phys_segments; 1209} 1210 1211/* 1212 * Number of discard segments (or ranges) the driver needs to fill in. 1213 * Each discard bio merged into a request is counted as one segment. 1214 */ 1215static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) 1216{ 1217 return max_t(unsigned short, rq->nr_phys_segments, 1); 1218} 1219 1220extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 1221extern void blk_dump_rq_flags(struct request *, char *); 1222extern long nr_blockdev_pages(void); 1223 1224bool __must_check blk_get_queue(struct request_queue *); 1225struct request_queue *blk_alloc_queue(gfp_t); 1226struct request_queue *blk_alloc_queue_node(gfp_t, int); 1227extern void blk_put_queue(struct request_queue *); 1228extern void blk_set_queue_dying(struct request_queue *); 1229 1230/* 1231 * block layer runtime pm functions 1232 */ 1233#ifdef CONFIG_PM 1234extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1235extern int blk_pre_runtime_suspend(struct request_queue *q); 1236extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1237extern void blk_pre_runtime_resume(struct request_queue *q); 1238extern void blk_post_runtime_resume(struct request_queue *q, int err); 1239extern void blk_set_runtime_active(struct request_queue *q); 1240#else 1241static inline void blk_pm_runtime_init(struct request_queue *q, 1242 struct device *dev) {} 1243static inline int blk_pre_runtime_suspend(struct request_queue *q) 1244{ 1245 return -ENOSYS; 1246} 1247static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1248static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1249static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1250static inline void blk_set_runtime_active(struct request_queue *q) {} 1251#endif 1252 1253/* 1254 * blk_plug permits building a queue of related requests by holding the I/O 1255 * fragments for a short period. This allows merging of sequential requests 1256 * into single larger request. As the requests are moved from a per-task list to 1257 * the device's request_queue in a batch, this results in improved scalability 1258 * as the lock contention for request_queue lock is reduced. 1259 * 1260 * It is ok not to disable preemption when adding the request to the plug list 1261 * or when attempting a merge, because blk_schedule_flush_list() will only flush 1262 * the plug list when the task sleeps by itself. For details, please see 1263 * schedule() where blk_schedule_flush_plug() is called. 1264 */ 1265struct blk_plug { 1266 struct list_head list; /* requests */ 1267 struct list_head mq_list; /* blk-mq requests */ 1268 struct list_head cb_list; /* md requires an unplug callback */ 1269}; 1270#define BLK_MAX_REQUEST_COUNT 16 1271#define BLK_PLUG_FLUSH_SIZE (128 * 1024) 1272 1273struct blk_plug_cb; 1274typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1275struct blk_plug_cb { 1276 struct list_head list; 1277 blk_plug_cb_fn callback; 1278 void *data; 1279}; 1280extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1281 void *data, int size); 1282extern void blk_start_plug(struct blk_plug *); 1283extern void blk_finish_plug(struct blk_plug *); 1284extern void blk_flush_plug_list(struct blk_plug *, bool); 1285 1286static inline void blk_flush_plug(struct task_struct *tsk) 1287{ 1288 struct blk_plug *plug = tsk->plug; 1289 1290 if (plug) 1291 blk_flush_plug_list(plug, false); 1292} 1293 1294static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1295{ 1296 struct blk_plug *plug = tsk->plug; 1297 1298 if (plug) 1299 blk_flush_plug_list(plug, true); 1300} 1301 1302static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1303{ 1304 struct blk_plug *plug = tsk->plug; 1305 1306 return plug && 1307 (!list_empty(&plug->list) || 1308 !list_empty(&plug->mq_list) || 1309 !list_empty(&plug->cb_list)); 1310} 1311 1312/* 1313 * tag stuff 1314 */ 1315extern int blk_queue_start_tag(struct request_queue *, struct request *); 1316extern struct request *blk_queue_find_tag(struct request_queue *, int); 1317extern void blk_queue_end_tag(struct request_queue *, struct request *); 1318extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1319extern void blk_queue_free_tags(struct request_queue *); 1320extern int blk_queue_resize_tags(struct request_queue *, int); 1321extern void blk_queue_invalidate_tags(struct request_queue *); 1322extern struct blk_queue_tag *blk_init_tags(int, int); 1323extern void blk_free_tags(struct blk_queue_tag *); 1324 1325static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1326 int tag) 1327{ 1328 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1329 return NULL; 1330 return bqt->tag_index[tag]; 1331} 1332 1333 1334#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1335#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ 1336 1337extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1338extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1339 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1340extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1341 sector_t nr_sects, gfp_t gfp_mask, int flags, 1342 struct bio **biop); 1343extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1344 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1345extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1346 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1347 bool discard); 1348extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1349 sector_t nr_sects, gfp_t gfp_mask, bool discard); 1350static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1351 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1352{ 1353 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 1354 nr_blocks << (sb->s_blocksize_bits - 9), 1355 gfp_mask, flags); 1356} 1357static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1358 sector_t nr_blocks, gfp_t gfp_mask) 1359{ 1360 return blkdev_issue_zeroout(sb->s_bdev, 1361 block << (sb->s_blocksize_bits - 9), 1362 nr_blocks << (sb->s_blocksize_bits - 9), 1363 gfp_mask, true); 1364} 1365 1366extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1367 1368enum blk_default_limits { 1369 BLK_MAX_SEGMENTS = 128, 1370 BLK_SAFE_MAX_SECTORS = 255, 1371 BLK_DEF_MAX_SECTORS = 2560, 1372 BLK_MAX_SEGMENT_SIZE = 65536, 1373 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1374}; 1375 1376#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1377 1378static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1379{ 1380 return q->limits.bounce_pfn; 1381} 1382 1383static inline unsigned long queue_segment_boundary(struct request_queue *q) 1384{ 1385 return q->limits.seg_boundary_mask; 1386} 1387 1388static inline unsigned long queue_virt_boundary(struct request_queue *q) 1389{ 1390 return q->limits.virt_boundary_mask; 1391} 1392 1393static inline unsigned int queue_max_sectors(struct request_queue *q) 1394{ 1395 return q->limits.max_sectors; 1396} 1397 1398static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1399{ 1400 return q->limits.max_hw_sectors; 1401} 1402 1403static inline unsigned short queue_max_segments(struct request_queue *q) 1404{ 1405 return q->limits.max_segments; 1406} 1407 1408static inline unsigned short queue_max_discard_segments(struct request_queue *q) 1409{ 1410 return q->limits.max_discard_segments; 1411} 1412 1413static inline unsigned int queue_max_segment_size(struct request_queue *q) 1414{ 1415 return q->limits.max_segment_size; 1416} 1417 1418static inline unsigned short queue_logical_block_size(struct request_queue *q) 1419{ 1420 int retval = 512; 1421 1422 if (q && q->limits.logical_block_size) 1423 retval = q->limits.logical_block_size; 1424 1425 return retval; 1426} 1427 1428static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1429{ 1430 return queue_logical_block_size(bdev_get_queue(bdev)); 1431} 1432 1433static inline unsigned int queue_physical_block_size(struct request_queue *q) 1434{ 1435 return q->limits.physical_block_size; 1436} 1437 1438static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1439{ 1440 return queue_physical_block_size(bdev_get_queue(bdev)); 1441} 1442 1443static inline unsigned int queue_io_min(struct request_queue *q) 1444{ 1445 return q->limits.io_min; 1446} 1447 1448static inline int bdev_io_min(struct block_device *bdev) 1449{ 1450 return queue_io_min(bdev_get_queue(bdev)); 1451} 1452 1453static inline unsigned int queue_io_opt(struct request_queue *q) 1454{ 1455 return q->limits.io_opt; 1456} 1457 1458static inline int bdev_io_opt(struct block_device *bdev) 1459{ 1460 return queue_io_opt(bdev_get_queue(bdev)); 1461} 1462 1463static inline int queue_alignment_offset(struct request_queue *q) 1464{ 1465 if (q->limits.misaligned) 1466 return -1; 1467 1468 return q->limits.alignment_offset; 1469} 1470 1471static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1472{ 1473 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1474 unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 1475 1476 return (granularity + lim->alignment_offset - alignment) % granularity; 1477} 1478 1479static inline int bdev_alignment_offset(struct block_device *bdev) 1480{ 1481 struct request_queue *q = bdev_get_queue(bdev); 1482 1483 if (q->limits.misaligned) 1484 return -1; 1485 1486 if (bdev != bdev->bd_contains) 1487 return bdev->bd_part->alignment_offset; 1488 1489 return q->limits.alignment_offset; 1490} 1491 1492static inline int queue_discard_alignment(struct request_queue *q) 1493{ 1494 if (q->limits.discard_misaligned) 1495 return -1; 1496 1497 return q->limits.discard_alignment; 1498} 1499 1500static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1501{ 1502 unsigned int alignment, granularity, offset; 1503 1504 if (!lim->max_discard_sectors) 1505 return 0; 1506 1507 /* Why are these in bytes, not sectors? */ 1508 alignment = lim->discard_alignment >> 9; 1509 granularity = lim->discard_granularity >> 9; 1510 if (!granularity) 1511 return 0; 1512 1513 /* Offset of the partition start in 'granularity' sectors */ 1514 offset = sector_div(sector, granularity); 1515 1516 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1517 offset = (granularity + alignment - offset) % granularity; 1518 1519 /* Turn it back into bytes, gaah */ 1520 return offset << 9; 1521} 1522 1523static inline int bdev_discard_alignment(struct block_device *bdev) 1524{ 1525 struct request_queue *q = bdev_get_queue(bdev); 1526 1527 if (bdev != bdev->bd_contains) 1528 return bdev->bd_part->discard_alignment; 1529 1530 return q->limits.discard_alignment; 1531} 1532 1533static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1534{ 1535 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1536 return 1; 1537 1538 return 0; 1539} 1540 1541static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1542{ 1543 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1544} 1545 1546static inline unsigned int bdev_write_same(struct block_device *bdev) 1547{ 1548 struct request_queue *q = bdev_get_queue(bdev); 1549 1550 if (q) 1551 return q->limits.max_write_same_sectors; 1552 1553 return 0; 1554} 1555 1556static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1557{ 1558 struct request_queue *q = bdev_get_queue(bdev); 1559 1560 if (q) 1561 return q->limits.max_write_zeroes_sectors; 1562 1563 return 0; 1564} 1565 1566static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1567{ 1568 struct request_queue *q = bdev_get_queue(bdev); 1569 1570 if (q) 1571 return blk_queue_zoned_model(q); 1572 1573 return BLK_ZONED_NONE; 1574} 1575 1576static inline bool bdev_is_zoned(struct block_device *bdev) 1577{ 1578 struct request_queue *q = bdev_get_queue(bdev); 1579 1580 if (q) 1581 return blk_queue_is_zoned(q); 1582 1583 return false; 1584} 1585 1586static inline unsigned int bdev_zone_sectors(struct block_device *bdev) 1587{ 1588 struct request_queue *q = bdev_get_queue(bdev); 1589 1590 if (q) 1591 return blk_queue_zone_sectors(q); 1592 1593 return 0; 1594} 1595 1596static inline int queue_dma_alignment(struct request_queue *q) 1597{ 1598 return q ? q->dma_alignment : 511; 1599} 1600 1601static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1602 unsigned int len) 1603{ 1604 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1605 return !(addr & alignment) && !(len & alignment); 1606} 1607 1608/* assumes size > 256 */ 1609static inline unsigned int blksize_bits(unsigned int size) 1610{ 1611 unsigned int bits = 8; 1612 do { 1613 bits++; 1614 size >>= 1; 1615 } while (size > 256); 1616 return bits; 1617} 1618 1619static inline unsigned int block_size(struct block_device *bdev) 1620{ 1621 return bdev->bd_block_size; 1622} 1623 1624static inline bool queue_flush_queueable(struct request_queue *q) 1625{ 1626 return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); 1627} 1628 1629typedef struct {struct page *v;} Sector; 1630 1631unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1632 1633static inline void put_dev_sector(Sector p) 1634{ 1635 put_page(p.v); 1636} 1637 1638static inline bool __bvec_gap_to_prev(struct request_queue *q, 1639 struct bio_vec *bprv, unsigned int offset) 1640{ 1641 return offset || 1642 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1643} 1644 1645/* 1646 * Check if adding a bio_vec after bprv with offset would create a gap in 1647 * the SG list. Most drivers don't care about this, but some do. 1648 */ 1649static inline bool bvec_gap_to_prev(struct request_queue *q, 1650 struct bio_vec *bprv, unsigned int offset) 1651{ 1652 if (!queue_virt_boundary(q)) 1653 return false; 1654 return __bvec_gap_to_prev(q, bprv, offset); 1655} 1656 1657/* 1658 * Check if the two bvecs from two bios can be merged to one segment. 1659 * If yes, no need to check gap between the two bios since the 1st bio 1660 * and the 1st bvec in the 2nd bio can be handled in one segment. 1661 */ 1662static inline bool bios_segs_mergeable(struct request_queue *q, 1663 struct bio *prev, struct bio_vec *prev_last_bv, 1664 struct bio_vec *next_first_bv) 1665{ 1666 if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv)) 1667 return false; 1668 if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv)) 1669 return false; 1670 if (prev->bi_seg_back_size + next_first_bv->bv_len > 1671 queue_max_segment_size(q)) 1672 return false; 1673 return true; 1674} 1675 1676static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1677 struct bio *next) 1678{ 1679 if (bio_has_data(prev) && queue_virt_boundary(q)) { 1680 struct bio_vec pb, nb; 1681 1682 bio_get_last_bvec(prev, &pb); 1683 bio_get_first_bvec(next, &nb); 1684 1685 if (!bios_segs_mergeable(q, prev, &pb, &nb)) 1686 return __bvec_gap_to_prev(q, &pb, nb.bv_offset); 1687 } 1688 1689 return false; 1690} 1691 1692static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1693{ 1694 return bio_will_gap(req->q, req->biotail, bio); 1695} 1696 1697static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1698{ 1699 return bio_will_gap(req->q, bio, req->bio); 1700} 1701 1702int kblockd_schedule_work(struct work_struct *work); 1703int kblockd_schedule_work_on(int cpu, struct work_struct *work); 1704int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1705int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1706 1707#ifdef CONFIG_BLK_CGROUP 1708/* 1709 * This should not be using sched_clock(). A real patch is in progress 1710 * to fix this up, until that is in place we need to disable preemption 1711 * around sched_clock() in this function and set_io_start_time_ns(). 1712 */ 1713static inline void set_start_time_ns(struct request *req) 1714{ 1715 preempt_disable(); 1716 req->start_time_ns = sched_clock(); 1717 preempt_enable(); 1718} 1719 1720static inline void set_io_start_time_ns(struct request *req) 1721{ 1722 preempt_disable(); 1723 req->io_start_time_ns = sched_clock(); 1724 preempt_enable(); 1725} 1726 1727static inline uint64_t rq_start_time_ns(struct request *req) 1728{ 1729 return req->start_time_ns; 1730} 1731 1732static inline uint64_t rq_io_start_time_ns(struct request *req) 1733{ 1734 return req->io_start_time_ns; 1735} 1736#else 1737static inline void set_start_time_ns(struct request *req) {} 1738static inline void set_io_start_time_ns(struct request *req) {} 1739static inline uint64_t rq_start_time_ns(struct request *req) 1740{ 1741 return 0; 1742} 1743static inline uint64_t rq_io_start_time_ns(struct request *req) 1744{ 1745 return 0; 1746} 1747#endif 1748 1749#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1750 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1751#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1752 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1753 1754#if defined(CONFIG_BLK_DEV_INTEGRITY) 1755 1756enum blk_integrity_flags { 1757 BLK_INTEGRITY_VERIFY = 1 << 0, 1758 BLK_INTEGRITY_GENERATE = 1 << 1, 1759 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1760 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 1761}; 1762 1763struct blk_integrity_iter { 1764 void *prot_buf; 1765 void *data_buf; 1766 sector_t seed; 1767 unsigned int data_size; 1768 unsigned short interval; 1769 const char *disk_name; 1770}; 1771 1772typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 1773 1774struct blk_integrity_profile { 1775 integrity_processing_fn *generate_fn; 1776 integrity_processing_fn *verify_fn; 1777 const char *name; 1778}; 1779 1780extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 1781extern void blk_integrity_unregister(struct gendisk *); 1782extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1783extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1784 struct scatterlist *); 1785extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1786extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 1787 struct request *); 1788extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 1789 struct bio *); 1790 1791static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1792{ 1793 struct blk_integrity *bi = &disk->queue->integrity; 1794 1795 if (!bi->profile) 1796 return NULL; 1797 1798 return bi; 1799} 1800 1801static inline 1802struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1803{ 1804 return blk_get_integrity(bdev->bd_disk); 1805} 1806 1807static inline bool blk_integrity_rq(struct request *rq) 1808{ 1809 return rq->cmd_flags & REQ_INTEGRITY; 1810} 1811 1812static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1813 unsigned int segs) 1814{ 1815 q->limits.max_integrity_segments = segs; 1816} 1817 1818static inline unsigned short 1819queue_max_integrity_segments(struct request_queue *q) 1820{ 1821 return q->limits.max_integrity_segments; 1822} 1823 1824static inline bool integrity_req_gap_back_merge(struct request *req, 1825 struct bio *next) 1826{ 1827 struct bio_integrity_payload *bip = bio_integrity(req->bio); 1828 struct bio_integrity_payload *bip_next = bio_integrity(next); 1829 1830 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1831 bip_next->bip_vec[0].bv_offset); 1832} 1833 1834static inline bool integrity_req_gap_front_merge(struct request *req, 1835 struct bio *bio) 1836{ 1837 struct bio_integrity_payload *bip = bio_integrity(bio); 1838 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 1839 1840 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1841 bip_next->bip_vec[0].bv_offset); 1842} 1843 1844#else /* CONFIG_BLK_DEV_INTEGRITY */ 1845 1846struct bio; 1847struct block_device; 1848struct gendisk; 1849struct blk_integrity; 1850 1851static inline int blk_integrity_rq(struct request *rq) 1852{ 1853 return 0; 1854} 1855static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1856 struct bio *b) 1857{ 1858 return 0; 1859} 1860static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1861 struct bio *b, 1862 struct scatterlist *s) 1863{ 1864 return 0; 1865} 1866static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1867{ 1868 return NULL; 1869} 1870static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1871{ 1872 return NULL; 1873} 1874static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1875{ 1876 return 0; 1877} 1878static inline void blk_integrity_register(struct gendisk *d, 1879 struct blk_integrity *b) 1880{ 1881} 1882static inline void blk_integrity_unregister(struct gendisk *d) 1883{ 1884} 1885static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1886 unsigned int segs) 1887{ 1888} 1889static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1890{ 1891 return 0; 1892} 1893static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1894 struct request *r1, 1895 struct request *r2) 1896{ 1897 return true; 1898} 1899static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1900 struct request *r, 1901 struct bio *b) 1902{ 1903 return true; 1904} 1905 1906static inline bool integrity_req_gap_back_merge(struct request *req, 1907 struct bio *next) 1908{ 1909 return false; 1910} 1911static inline bool integrity_req_gap_front_merge(struct request *req, 1912 struct bio *bio) 1913{ 1914 return false; 1915} 1916 1917#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1918 1919/** 1920 * struct blk_dax_ctl - control and output parameters for ->direct_access 1921 * @sector: (input) offset relative to a block_device 1922 * @addr: (output) kernel virtual address for @sector populated by driver 1923 * @pfn: (output) page frame number for @addr populated by driver 1924 * @size: (input) number of bytes requested 1925 */ 1926struct blk_dax_ctl { 1927 sector_t sector; 1928 void *addr; 1929 long size; 1930 pfn_t pfn; 1931}; 1932 1933struct block_device_operations { 1934 int (*open) (struct block_device *, fmode_t); 1935 void (*release) (struct gendisk *, fmode_t); 1936 int (*rw_page)(struct block_device *, sector_t, struct page *, bool); 1937 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1938 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1939 long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, 1940 long); 1941 unsigned int (*check_events) (struct gendisk *disk, 1942 unsigned int clearing); 1943 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1944 int (*media_changed) (struct gendisk *); 1945 void (*unlock_native_capacity) (struct gendisk *); 1946 int (*revalidate_disk) (struct gendisk *); 1947 int (*getgeo)(struct block_device *, struct hd_geometry *); 1948 /* this callback is with swap_lock and sometimes page table lock held */ 1949 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1950 struct module *owner; 1951 const struct pr_ops *pr_ops; 1952}; 1953 1954extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1955 unsigned long); 1956extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1957extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1958 struct writeback_control *); 1959extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *); 1960extern int bdev_dax_supported(struct super_block *, int); 1961extern bool bdev_dax_capable(struct block_device *); 1962#else /* CONFIG_BLOCK */ 1963 1964struct block_device; 1965 1966/* 1967 * stubs for when the block layer is configured out 1968 */ 1969#define buffer_heads_over_limit 0 1970 1971static inline long nr_blockdev_pages(void) 1972{ 1973 return 0; 1974} 1975 1976struct blk_plug { 1977}; 1978 1979static inline void blk_start_plug(struct blk_plug *plug) 1980{ 1981} 1982 1983static inline void blk_finish_plug(struct blk_plug *plug) 1984{ 1985} 1986 1987static inline void blk_flush_plug(struct task_struct *task) 1988{ 1989} 1990 1991static inline void blk_schedule_flush_plug(struct task_struct *task) 1992{ 1993} 1994 1995 1996static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1997{ 1998 return false; 1999} 2000 2001static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 2002 sector_t *error_sector) 2003{ 2004 return 0; 2005} 2006 2007#endif /* CONFIG_BLOCK */ 2008 2009#endif