at v4.12-rc6 58 kB view raw
1#ifndef _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H 3 4#include <linux/sched.h> 5#include <linux/sched/clock.h> 6 7#ifdef CONFIG_BLOCK 8 9#include <linux/major.h> 10#include <linux/genhd.h> 11#include <linux/list.h> 12#include <linux/llist.h> 13#include <linux/timer.h> 14#include <linux/workqueue.h> 15#include <linux/pagemap.h> 16#include <linux/backing-dev-defs.h> 17#include <linux/wait.h> 18#include <linux/mempool.h> 19#include <linux/pfn.h> 20#include <linux/bio.h> 21#include <linux/stringify.h> 22#include <linux/gfp.h> 23#include <linux/bsg.h> 24#include <linux/smp.h> 25#include <linux/rcupdate.h> 26#include <linux/percpu-refcount.h> 27#include <linux/scatterlist.h> 28#include <linux/blkzoned.h> 29 30struct module; 31struct scsi_ioctl_command; 32 33struct request_queue; 34struct elevator_queue; 35struct blk_trace; 36struct request; 37struct sg_io_hdr; 38struct bsg_job; 39struct blkcg_gq; 40struct blk_flush_queue; 41struct pr_ops; 42struct rq_wb; 43struct blk_queue_stats; 44struct blk_stat_callback; 45 46#define BLKDEV_MIN_RQ 4 47#define BLKDEV_MAX_RQ 128 /* Default maximum */ 48 49/* Must be consisitent with blk_mq_poll_stats_bkt() */ 50#define BLK_MQ_POLL_STATS_BKTS 16 51 52/* 53 * Maximum number of blkcg policies allowed to be registered concurrently. 54 * Defined here to simplify include dependency. 55 */ 56#define BLKCG_MAX_POLS 3 57 58typedef void (rq_end_io_fn)(struct request *, int); 59 60#define BLK_RL_SYNCFULL (1U << 0) 61#define BLK_RL_ASYNCFULL (1U << 1) 62 63struct request_list { 64 struct request_queue *q; /* the queue this rl belongs to */ 65#ifdef CONFIG_BLK_CGROUP 66 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 67#endif 68 /* 69 * count[], starved[], and wait[] are indexed by 70 * BLK_RW_SYNC/BLK_RW_ASYNC 71 */ 72 int count[2]; 73 int starved[2]; 74 mempool_t *rq_pool; 75 wait_queue_head_t wait[2]; 76 unsigned int flags; 77}; 78 79/* 80 * request flags */ 81typedef __u32 __bitwise req_flags_t; 82 83/* elevator knows about this request */ 84#define RQF_SORTED ((__force req_flags_t)(1 << 0)) 85/* drive already may have started this one */ 86#define RQF_STARTED ((__force req_flags_t)(1 << 1)) 87/* uses tagged queueing */ 88#define RQF_QUEUED ((__force req_flags_t)(1 << 2)) 89/* may not be passed by ioscheduler */ 90#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 91/* request for flush sequence */ 92#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 93/* merge of different types, fail separately */ 94#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 95/* track inflight for MQ */ 96#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 97/* don't call prep for this one */ 98#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 99/* set for "ide_preempt" requests and also for requests for which the SCSI 100 "quiesce" state must be ignored. */ 101#define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 102/* contains copies of user pages */ 103#define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) 104/* vaguely specified driver internal error. Ignored by the block layer */ 105#define RQF_FAILED ((__force req_flags_t)(1 << 10)) 106/* don't warn about errors */ 107#define RQF_QUIET ((__force req_flags_t)(1 << 11)) 108/* elevator private data attached */ 109#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 110/* account I/O stat */ 111#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 112/* request came from our alloc pool */ 113#define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 114/* runtime pm request */ 115#define RQF_PM ((__force req_flags_t)(1 << 15)) 116/* on IO scheduler merge hash */ 117#define RQF_HASHED ((__force req_flags_t)(1 << 16)) 118/* IO stats tracking on */ 119#define RQF_STATS ((__force req_flags_t)(1 << 17)) 120/* Look at ->special_vec for the actual data payload instead of the 121 bio chain. */ 122#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 123 124/* flags that prevent us from merging requests: */ 125#define RQF_NOMERGE_FLAGS \ 126 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 127 128/* 129 * Try to put the fields that are referenced together in the same cacheline. 130 * 131 * If you modify this structure, make sure to update blk_rq_init() and 132 * especially blk_mq_rq_ctx_init() to take care of the added fields. 133 */ 134struct request { 135 struct list_head queuelist; 136 union { 137 struct call_single_data csd; 138 u64 fifo_time; 139 }; 140 141 struct request_queue *q; 142 struct blk_mq_ctx *mq_ctx; 143 144 int cpu; 145 unsigned int cmd_flags; /* op and common flags */ 146 req_flags_t rq_flags; 147 148 int internal_tag; 149 150 unsigned long atomic_flags; 151 152 /* the following two fields are internal, NEVER access directly */ 153 unsigned int __data_len; /* total data len */ 154 int tag; 155 sector_t __sector; /* sector cursor */ 156 157 struct bio *bio; 158 struct bio *biotail; 159 160 /* 161 * The hash is used inside the scheduler, and killed once the 162 * request reaches the dispatch list. The ipi_list is only used 163 * to queue the request for softirq completion, which is long 164 * after the request has been unhashed (and even removed from 165 * the dispatch list). 166 */ 167 union { 168 struct hlist_node hash; /* merge hash */ 169 struct list_head ipi_list; 170 }; 171 172 /* 173 * The rb_node is only used inside the io scheduler, requests 174 * are pruned when moved to the dispatch queue. So let the 175 * completion_data share space with the rb_node. 176 */ 177 union { 178 struct rb_node rb_node; /* sort/lookup */ 179 struct bio_vec special_vec; 180 void *completion_data; 181 int error_count; /* for legacy drivers, don't use */ 182 }; 183 184 /* 185 * Three pointers are available for the IO schedulers, if they need 186 * more they have to dynamically allocate it. Flush requests are 187 * never put on the IO scheduler. So let the flush fields share 188 * space with the elevator data. 189 */ 190 union { 191 struct { 192 struct io_cq *icq; 193 void *priv[2]; 194 } elv; 195 196 struct { 197 unsigned int seq; 198 struct list_head list; 199 rq_end_io_fn *saved_end_io; 200 } flush; 201 }; 202 203 struct gendisk *rq_disk; 204 struct hd_struct *part; 205 unsigned long start_time; 206 struct blk_issue_stat issue_stat; 207#ifdef CONFIG_BLK_CGROUP 208 struct request_list *rl; /* rl this rq is alloced from */ 209 unsigned long long start_time_ns; 210 unsigned long long io_start_time_ns; /* when passed to hardware */ 211#endif 212 /* Number of scatter-gather DMA addr+len pairs after 213 * physical address coalescing is performed. 214 */ 215 unsigned short nr_phys_segments; 216#if defined(CONFIG_BLK_DEV_INTEGRITY) 217 unsigned short nr_integrity_segments; 218#endif 219 220 unsigned short ioprio; 221 222 unsigned int timeout; 223 224 void *special; /* opaque pointer available for LLD use */ 225 226 unsigned int extra_len; /* length of alignment and padding */ 227 228 unsigned long deadline; 229 struct list_head timeout_list; 230 231 /* 232 * completion callback. 233 */ 234 rq_end_io_fn *end_io; 235 void *end_io_data; 236 237 /* for bidi */ 238 struct request *next_rq; 239}; 240 241static inline bool blk_rq_is_scsi(struct request *rq) 242{ 243 return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; 244} 245 246static inline bool blk_rq_is_private(struct request *rq) 247{ 248 return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; 249} 250 251static inline bool blk_rq_is_passthrough(struct request *rq) 252{ 253 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); 254} 255 256static inline unsigned short req_get_ioprio(struct request *req) 257{ 258 return req->ioprio; 259} 260 261#include <linux/elevator.h> 262 263struct blk_queue_ctx; 264 265typedef void (request_fn_proc) (struct request_queue *q); 266typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 267typedef int (prep_rq_fn) (struct request_queue *, struct request *); 268typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 269 270struct bio_vec; 271typedef void (softirq_done_fn)(struct request *); 272typedef int (dma_drain_needed_fn)(struct request *); 273typedef int (lld_busy_fn) (struct request_queue *q); 274typedef int (bsg_job_fn) (struct bsg_job *); 275typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t); 276typedef void (exit_rq_fn)(struct request_queue *, struct request *); 277 278enum blk_eh_timer_return { 279 BLK_EH_NOT_HANDLED, 280 BLK_EH_HANDLED, 281 BLK_EH_RESET_TIMER, 282}; 283 284typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 285 286enum blk_queue_state { 287 Queue_down, 288 Queue_up, 289}; 290 291struct blk_queue_tag { 292 struct request **tag_index; /* map of busy tags */ 293 unsigned long *tag_map; /* bit map of free/busy tags */ 294 int max_depth; /* what we will send to device */ 295 int real_max_depth; /* what the array can hold */ 296 atomic_t refcnt; /* map can be shared */ 297 int alloc_policy; /* tag allocation policy */ 298 int next_tag; /* next tag */ 299}; 300#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 301#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 302 303#define BLK_SCSI_MAX_CMDS (256) 304#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 305 306/* 307 * Zoned block device models (zoned limit). 308 */ 309enum blk_zoned_model { 310 BLK_ZONED_NONE, /* Regular block device */ 311 BLK_ZONED_HA, /* Host-aware zoned block device */ 312 BLK_ZONED_HM, /* Host-managed zoned block device */ 313}; 314 315struct queue_limits { 316 unsigned long bounce_pfn; 317 unsigned long seg_boundary_mask; 318 unsigned long virt_boundary_mask; 319 320 unsigned int max_hw_sectors; 321 unsigned int max_dev_sectors; 322 unsigned int chunk_sectors; 323 unsigned int max_sectors; 324 unsigned int max_segment_size; 325 unsigned int physical_block_size; 326 unsigned int alignment_offset; 327 unsigned int io_min; 328 unsigned int io_opt; 329 unsigned int max_discard_sectors; 330 unsigned int max_hw_discard_sectors; 331 unsigned int max_write_same_sectors; 332 unsigned int max_write_zeroes_sectors; 333 unsigned int discard_granularity; 334 unsigned int discard_alignment; 335 336 unsigned short logical_block_size; 337 unsigned short max_segments; 338 unsigned short max_integrity_segments; 339 unsigned short max_discard_segments; 340 341 unsigned char misaligned; 342 unsigned char discard_misaligned; 343 unsigned char cluster; 344 unsigned char raid_partial_stripes_expensive; 345 enum blk_zoned_model zoned; 346}; 347 348#ifdef CONFIG_BLK_DEV_ZONED 349 350struct blk_zone_report_hdr { 351 unsigned int nr_zones; 352 u8 padding[60]; 353}; 354 355extern int blkdev_report_zones(struct block_device *bdev, 356 sector_t sector, struct blk_zone *zones, 357 unsigned int *nr_zones, gfp_t gfp_mask); 358extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, 359 sector_t nr_sectors, gfp_t gfp_mask); 360 361extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 362 unsigned int cmd, unsigned long arg); 363extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, 364 unsigned int cmd, unsigned long arg); 365 366#else /* CONFIG_BLK_DEV_ZONED */ 367 368static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 369 fmode_t mode, unsigned int cmd, 370 unsigned long arg) 371{ 372 return -ENOTTY; 373} 374 375static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, 376 fmode_t mode, unsigned int cmd, 377 unsigned long arg) 378{ 379 return -ENOTTY; 380} 381 382#endif /* CONFIG_BLK_DEV_ZONED */ 383 384struct request_queue { 385 /* 386 * Together with queue_head for cacheline sharing 387 */ 388 struct list_head queue_head; 389 struct request *last_merge; 390 struct elevator_queue *elevator; 391 int nr_rqs[2]; /* # allocated [a]sync rqs */ 392 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 393 394 struct blk_queue_stats *stats; 395 struct rq_wb *rq_wb; 396 397 /* 398 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 399 * is used, root blkg allocates from @q->root_rl and all other 400 * blkgs from their own blkg->rl. Which one to use should be 401 * determined using bio_request_list(). 402 */ 403 struct request_list root_rl; 404 405 request_fn_proc *request_fn; 406 make_request_fn *make_request_fn; 407 prep_rq_fn *prep_rq_fn; 408 unprep_rq_fn *unprep_rq_fn; 409 softirq_done_fn *softirq_done_fn; 410 rq_timed_out_fn *rq_timed_out_fn; 411 dma_drain_needed_fn *dma_drain_needed; 412 lld_busy_fn *lld_busy_fn; 413 init_rq_fn *init_rq_fn; 414 exit_rq_fn *exit_rq_fn; 415 416 const struct blk_mq_ops *mq_ops; 417 418 unsigned int *mq_map; 419 420 /* sw queues */ 421 struct blk_mq_ctx __percpu *queue_ctx; 422 unsigned int nr_queues; 423 424 unsigned int queue_depth; 425 426 /* hw dispatch queues */ 427 struct blk_mq_hw_ctx **queue_hw_ctx; 428 unsigned int nr_hw_queues; 429 430 /* 431 * Dispatch queue sorting 432 */ 433 sector_t end_sector; 434 struct request *boundary_rq; 435 436 /* 437 * Delayed queue handling 438 */ 439 struct delayed_work delay_work; 440 441 struct backing_dev_info *backing_dev_info; 442 443 /* 444 * The queue owner gets to use this for whatever they like. 445 * ll_rw_blk doesn't touch it. 446 */ 447 void *queuedata; 448 449 /* 450 * various queue flags, see QUEUE_* below 451 */ 452 unsigned long queue_flags; 453 454 /* 455 * ida allocated id for this queue. Used to index queues from 456 * ioctx. 457 */ 458 int id; 459 460 /* 461 * queue needs bounce pages for pages above this limit 462 */ 463 gfp_t bounce_gfp; 464 465 /* 466 * protects queue structures from reentrancy. ->__queue_lock should 467 * _never_ be used directly, it is queue private. always use 468 * ->queue_lock. 469 */ 470 spinlock_t __queue_lock; 471 spinlock_t *queue_lock; 472 473 /* 474 * queue kobject 475 */ 476 struct kobject kobj; 477 478 /* 479 * mq queue kobject 480 */ 481 struct kobject mq_kobj; 482 483#ifdef CONFIG_BLK_DEV_INTEGRITY 484 struct blk_integrity integrity; 485#endif /* CONFIG_BLK_DEV_INTEGRITY */ 486 487#ifdef CONFIG_PM 488 struct device *dev; 489 int rpm_status; 490 unsigned int nr_pending; 491#endif 492 493 /* 494 * queue settings 495 */ 496 unsigned long nr_requests; /* Max # of requests */ 497 unsigned int nr_congestion_on; 498 unsigned int nr_congestion_off; 499 unsigned int nr_batching; 500 501 unsigned int dma_drain_size; 502 void *dma_drain_buffer; 503 unsigned int dma_pad_mask; 504 unsigned int dma_alignment; 505 506 struct blk_queue_tag *queue_tags; 507 struct list_head tag_busy_list; 508 509 unsigned int nr_sorted; 510 unsigned int in_flight[2]; 511 512 /* 513 * Number of active block driver functions for which blk_drain_queue() 514 * must wait. Must be incremented around functions that unlock the 515 * queue_lock internally, e.g. scsi_request_fn(). 516 */ 517 unsigned int request_fn_active; 518 519 unsigned int rq_timeout; 520 int poll_nsec; 521 522 struct blk_stat_callback *poll_cb; 523 struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; 524 525 struct timer_list timeout; 526 struct work_struct timeout_work; 527 struct list_head timeout_list; 528 529 struct list_head icq_list; 530#ifdef CONFIG_BLK_CGROUP 531 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 532 struct blkcg_gq *root_blkg; 533 struct list_head blkg_list; 534#endif 535 536 struct queue_limits limits; 537 538 /* 539 * sg stuff 540 */ 541 unsigned int sg_timeout; 542 unsigned int sg_reserved_size; 543 int node; 544#ifdef CONFIG_BLK_DEV_IO_TRACE 545 struct blk_trace *blk_trace; 546#endif 547 /* 548 * for flush operations 549 */ 550 struct blk_flush_queue *fq; 551 552 struct list_head requeue_list; 553 spinlock_t requeue_lock; 554 struct delayed_work requeue_work; 555 556 struct mutex sysfs_lock; 557 558 int bypass_depth; 559 atomic_t mq_freeze_depth; 560 561#if defined(CONFIG_BLK_DEV_BSG) 562 bsg_job_fn *bsg_job_fn; 563 int bsg_job_size; 564 struct bsg_class_device bsg_dev; 565#endif 566 567#ifdef CONFIG_BLK_DEV_THROTTLING 568 /* Throttle data */ 569 struct throtl_data *td; 570#endif 571 struct rcu_head rcu_head; 572 wait_queue_head_t mq_freeze_wq; 573 struct percpu_ref q_usage_counter; 574 struct list_head all_q_node; 575 576 struct blk_mq_tag_set *tag_set; 577 struct list_head tag_set_list; 578 struct bio_set *bio_split; 579 580#ifdef CONFIG_BLK_DEBUG_FS 581 struct dentry *debugfs_dir; 582 struct dentry *sched_debugfs_dir; 583#endif 584 585 bool mq_sysfs_init_done; 586 587 size_t cmd_size; 588 void *rq_alloc_data; 589 590 struct work_struct release_work; 591}; 592 593#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 594#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 595#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 596#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 597#define QUEUE_FLAG_DYING 5 /* queue being torn down */ 598#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 599#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 600#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 601#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 602#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 603#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 604#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 605#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 606#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 607#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 608#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 609#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 610#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ 611#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 612#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 613#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 614#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 615#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ 616#define QUEUE_FLAG_WC 23 /* Write back caching */ 617#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ 618#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 619#define QUEUE_FLAG_DAX 26 /* device supports DAX */ 620#define QUEUE_FLAG_STATS 27 /* track rq completion times */ 621#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */ 622#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */ 623 624#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 625 (1 << QUEUE_FLAG_STACKABLE) | \ 626 (1 << QUEUE_FLAG_SAME_COMP) | \ 627 (1 << QUEUE_FLAG_ADD_RANDOM)) 628 629#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 630 (1 << QUEUE_FLAG_STACKABLE) | \ 631 (1 << QUEUE_FLAG_SAME_COMP) | \ 632 (1 << QUEUE_FLAG_POLL)) 633 634static inline void queue_lockdep_assert_held(struct request_queue *q) 635{ 636 if (q->queue_lock) 637 lockdep_assert_held(q->queue_lock); 638} 639 640static inline void queue_flag_set_unlocked(unsigned int flag, 641 struct request_queue *q) 642{ 643 __set_bit(flag, &q->queue_flags); 644} 645 646static inline int queue_flag_test_and_clear(unsigned int flag, 647 struct request_queue *q) 648{ 649 queue_lockdep_assert_held(q); 650 651 if (test_bit(flag, &q->queue_flags)) { 652 __clear_bit(flag, &q->queue_flags); 653 return 1; 654 } 655 656 return 0; 657} 658 659static inline int queue_flag_test_and_set(unsigned int flag, 660 struct request_queue *q) 661{ 662 queue_lockdep_assert_held(q); 663 664 if (!test_bit(flag, &q->queue_flags)) { 665 __set_bit(flag, &q->queue_flags); 666 return 0; 667 } 668 669 return 1; 670} 671 672static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 673{ 674 queue_lockdep_assert_held(q); 675 __set_bit(flag, &q->queue_flags); 676} 677 678static inline void queue_flag_clear_unlocked(unsigned int flag, 679 struct request_queue *q) 680{ 681 __clear_bit(flag, &q->queue_flags); 682} 683 684static inline int queue_in_flight(struct request_queue *q) 685{ 686 return q->in_flight[0] + q->in_flight[1]; 687} 688 689static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 690{ 691 queue_lockdep_assert_held(q); 692 __clear_bit(flag, &q->queue_flags); 693} 694 695#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 696#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 697#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 698#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 699#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 700#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 701#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 702#define blk_queue_noxmerges(q) \ 703 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 704#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 705#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 706#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 707#define blk_queue_stackable(q) \ 708 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 709#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 710#define blk_queue_secure_erase(q) \ 711 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 712#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 713 714#define blk_noretry_request(rq) \ 715 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 716 REQ_FAILFAST_DRIVER)) 717 718static inline bool blk_account_rq(struct request *rq) 719{ 720 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); 721} 722 723#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 724#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 725/* rq->queuelist of dequeued request must be list_empty() */ 726#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 727 728#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 729 730#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 731 732/* 733 * Driver can handle struct request, if it either has an old style 734 * request_fn defined, or is blk-mq based. 735 */ 736static inline bool queue_is_rq_based(struct request_queue *q) 737{ 738 return q->request_fn || q->mq_ops; 739} 740 741static inline unsigned int blk_queue_cluster(struct request_queue *q) 742{ 743 return q->limits.cluster; 744} 745 746static inline enum blk_zoned_model 747blk_queue_zoned_model(struct request_queue *q) 748{ 749 return q->limits.zoned; 750} 751 752static inline bool blk_queue_is_zoned(struct request_queue *q) 753{ 754 switch (blk_queue_zoned_model(q)) { 755 case BLK_ZONED_HA: 756 case BLK_ZONED_HM: 757 return true; 758 default: 759 return false; 760 } 761} 762 763static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) 764{ 765 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 766} 767 768static inline bool rq_is_sync(struct request *rq) 769{ 770 return op_is_sync(rq->cmd_flags); 771} 772 773static inline bool blk_rl_full(struct request_list *rl, bool sync) 774{ 775 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 776 777 return rl->flags & flag; 778} 779 780static inline void blk_set_rl_full(struct request_list *rl, bool sync) 781{ 782 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 783 784 rl->flags |= flag; 785} 786 787static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 788{ 789 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 790 791 rl->flags &= ~flag; 792} 793 794static inline bool rq_mergeable(struct request *rq) 795{ 796 if (blk_rq_is_passthrough(rq)) 797 return false; 798 799 if (req_op(rq) == REQ_OP_FLUSH) 800 return false; 801 802 if (req_op(rq) == REQ_OP_WRITE_ZEROES) 803 return false; 804 805 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 806 return false; 807 if (rq->rq_flags & RQF_NOMERGE_FLAGS) 808 return false; 809 810 return true; 811} 812 813static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 814{ 815 if (bio_data(a) == bio_data(b)) 816 return true; 817 818 return false; 819} 820 821static inline unsigned int blk_queue_depth(struct request_queue *q) 822{ 823 if (q->queue_depth) 824 return q->queue_depth; 825 826 return q->nr_requests; 827} 828 829/* 830 * q->prep_rq_fn return values 831 */ 832enum { 833 BLKPREP_OK, /* serve it */ 834 BLKPREP_KILL, /* fatal error, kill, return -EIO */ 835 BLKPREP_DEFER, /* leave on queue */ 836 BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ 837}; 838 839extern unsigned long blk_max_low_pfn, blk_max_pfn; 840 841/* 842 * standard bounce addresses: 843 * 844 * BLK_BOUNCE_HIGH : bounce all highmem pages 845 * BLK_BOUNCE_ANY : don't bounce anything 846 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 847 */ 848 849#if BITS_PER_LONG == 32 850#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 851#else 852#define BLK_BOUNCE_HIGH -1ULL 853#endif 854#define BLK_BOUNCE_ANY (-1ULL) 855#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 856 857/* 858 * default timeout for SG_IO if none specified 859 */ 860#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 861#define BLK_MIN_SG_TIMEOUT (7 * HZ) 862 863#ifdef CONFIG_BOUNCE 864extern int init_emergency_isa_pool(void); 865extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 866#else 867static inline int init_emergency_isa_pool(void) 868{ 869 return 0; 870} 871static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 872{ 873} 874#endif /* CONFIG_MMU */ 875 876struct rq_map_data { 877 struct page **pages; 878 int page_order; 879 int nr_entries; 880 unsigned long offset; 881 int null_mapped; 882 int from_user; 883}; 884 885struct req_iterator { 886 struct bvec_iter iter; 887 struct bio *bio; 888}; 889 890/* This should not be used directly - use rq_for_each_segment */ 891#define for_each_bio(_bio) \ 892 for (; _bio; _bio = _bio->bi_next) 893#define __rq_for_each_bio(_bio, rq) \ 894 if ((rq->bio)) \ 895 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 896 897#define rq_for_each_segment(bvl, _rq, _iter) \ 898 __rq_for_each_bio(_iter.bio, _rq) \ 899 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 900 901#define rq_iter_last(bvec, _iter) \ 902 (_iter.bio->bi_next == NULL && \ 903 bio_iter_last(bvec, _iter.iter)) 904 905#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 906# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 907#endif 908#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 909extern void rq_flush_dcache_pages(struct request *rq); 910#else 911static inline void rq_flush_dcache_pages(struct request *rq) 912{ 913} 914#endif 915 916#ifdef CONFIG_PRINTK 917#define vfs_msg(sb, level, fmt, ...) \ 918 __vfs_msg(sb, level, fmt, ##__VA_ARGS__) 919#else 920#define vfs_msg(sb, level, fmt, ...) \ 921do { \ 922 no_printk(fmt, ##__VA_ARGS__); \ 923 __vfs_msg(sb, "", " "); \ 924} while (0) 925#endif 926 927extern int blk_register_queue(struct gendisk *disk); 928extern void blk_unregister_queue(struct gendisk *disk); 929extern blk_qc_t generic_make_request(struct bio *bio); 930extern void blk_rq_init(struct request_queue *q, struct request *rq); 931extern void blk_init_request_from_bio(struct request *req, struct bio *bio); 932extern void blk_put_request(struct request *); 933extern void __blk_put_request(struct request_queue *, struct request *); 934extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 935extern void blk_requeue_request(struct request_queue *, struct request *); 936extern int blk_lld_busy(struct request_queue *q); 937extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 938 struct bio_set *bs, gfp_t gfp_mask, 939 int (*bio_ctr)(struct bio *, struct bio *, void *), 940 void *data); 941extern void blk_rq_unprep_clone(struct request *rq); 942extern int blk_insert_cloned_request(struct request_queue *q, 943 struct request *rq); 944extern int blk_rq_append_bio(struct request *rq, struct bio *bio); 945extern void blk_delay_queue(struct request_queue *, unsigned long); 946extern void blk_queue_split(struct request_queue *, struct bio **, 947 struct bio_set *); 948extern void blk_recount_segments(struct request_queue *, struct bio *); 949extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 950extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 951 unsigned int, void __user *); 952extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 953 unsigned int, void __user *); 954extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 955 struct scsi_ioctl_command __user *); 956 957extern int blk_queue_enter(struct request_queue *q, bool nowait); 958extern void blk_queue_exit(struct request_queue *q); 959extern void blk_start_queue(struct request_queue *q); 960extern void blk_start_queue_async(struct request_queue *q); 961extern void blk_stop_queue(struct request_queue *q); 962extern void blk_sync_queue(struct request_queue *q); 963extern void __blk_stop_queue(struct request_queue *q); 964extern void __blk_run_queue(struct request_queue *q); 965extern void __blk_run_queue_uncond(struct request_queue *q); 966extern void blk_run_queue(struct request_queue *); 967extern void blk_run_queue_async(struct request_queue *q); 968extern void blk_mq_quiesce_queue(struct request_queue *q); 969extern int blk_rq_map_user(struct request_queue *, struct request *, 970 struct rq_map_data *, void __user *, unsigned long, 971 gfp_t); 972extern int blk_rq_unmap_user(struct bio *); 973extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 974extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 975 struct rq_map_data *, const struct iov_iter *, 976 gfp_t); 977extern void blk_execute_rq(struct request_queue *, struct gendisk *, 978 struct request *, int); 979extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 980 struct request *, int, rq_end_io_fn *); 981 982bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); 983 984static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 985{ 986 return bdev->bd_disk->queue; /* this is never NULL */ 987} 988 989/* 990 * blk_rq_pos() : the current sector 991 * blk_rq_bytes() : bytes left in the entire request 992 * blk_rq_cur_bytes() : bytes left in the current segment 993 * blk_rq_err_bytes() : bytes left till the next error boundary 994 * blk_rq_sectors() : sectors left in the entire request 995 * blk_rq_cur_sectors() : sectors left in the current segment 996 */ 997static inline sector_t blk_rq_pos(const struct request *rq) 998{ 999 return rq->__sector; 1000} 1001 1002static inline unsigned int blk_rq_bytes(const struct request *rq) 1003{ 1004 return rq->__data_len; 1005} 1006 1007static inline int blk_rq_cur_bytes(const struct request *rq) 1008{ 1009 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 1010} 1011 1012extern unsigned int blk_rq_err_bytes(const struct request *rq); 1013 1014static inline unsigned int blk_rq_sectors(const struct request *rq) 1015{ 1016 return blk_rq_bytes(rq) >> 9; 1017} 1018 1019static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 1020{ 1021 return blk_rq_cur_bytes(rq) >> 9; 1022} 1023 1024/* 1025 * Some commands like WRITE SAME have a payload or data transfer size which 1026 * is different from the size of the request. Any driver that supports such 1027 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to 1028 * calculate the data transfer size. 1029 */ 1030static inline unsigned int blk_rq_payload_bytes(struct request *rq) 1031{ 1032 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1033 return rq->special_vec.bv_len; 1034 return blk_rq_bytes(rq); 1035} 1036 1037static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 1038 int op) 1039{ 1040 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) 1041 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 1042 1043 if (unlikely(op == REQ_OP_WRITE_SAME)) 1044 return q->limits.max_write_same_sectors; 1045 1046 if (unlikely(op == REQ_OP_WRITE_ZEROES)) 1047 return q->limits.max_write_zeroes_sectors; 1048 1049 return q->limits.max_sectors; 1050} 1051 1052/* 1053 * Return maximum size of a request at given offset. Only valid for 1054 * file system requests. 1055 */ 1056static inline unsigned int blk_max_size_offset(struct request_queue *q, 1057 sector_t offset) 1058{ 1059 if (!q->limits.chunk_sectors) 1060 return q->limits.max_sectors; 1061 1062 return q->limits.chunk_sectors - 1063 (offset & (q->limits.chunk_sectors - 1)); 1064} 1065 1066static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 1067 sector_t offset) 1068{ 1069 struct request_queue *q = rq->q; 1070 1071 if (blk_rq_is_passthrough(rq)) 1072 return q->limits.max_hw_sectors; 1073 1074 if (!q->limits.chunk_sectors || 1075 req_op(rq) == REQ_OP_DISCARD || 1076 req_op(rq) == REQ_OP_SECURE_ERASE) 1077 return blk_queue_get_max_sectors(q, req_op(rq)); 1078 1079 return min(blk_max_size_offset(q, offset), 1080 blk_queue_get_max_sectors(q, req_op(rq))); 1081} 1082 1083static inline unsigned int blk_rq_count_bios(struct request *rq) 1084{ 1085 unsigned int nr_bios = 0; 1086 struct bio *bio; 1087 1088 __rq_for_each_bio(bio, rq) 1089 nr_bios++; 1090 1091 return nr_bios; 1092} 1093 1094/* 1095 * Request issue related functions. 1096 */ 1097extern struct request *blk_peek_request(struct request_queue *q); 1098extern void blk_start_request(struct request *rq); 1099extern struct request *blk_fetch_request(struct request_queue *q); 1100 1101/* 1102 * Request completion related functions. 1103 * 1104 * blk_update_request() completes given number of bytes and updates 1105 * the request without completing it. 1106 * 1107 * blk_end_request() and friends. __blk_end_request() must be called 1108 * with the request queue spinlock acquired. 1109 * 1110 * Several drivers define their own end_request and call 1111 * blk_end_request() for parts of the original function. 1112 * This prevents code duplication in drivers. 1113 */ 1114extern bool blk_update_request(struct request *rq, int error, 1115 unsigned int nr_bytes); 1116extern void blk_finish_request(struct request *rq, int error); 1117extern bool blk_end_request(struct request *rq, int error, 1118 unsigned int nr_bytes); 1119extern void blk_end_request_all(struct request *rq, int error); 1120extern bool __blk_end_request(struct request *rq, int error, 1121 unsigned int nr_bytes); 1122extern void __blk_end_request_all(struct request *rq, int error); 1123extern bool __blk_end_request_cur(struct request *rq, int error); 1124 1125extern void blk_complete_request(struct request *); 1126extern void __blk_complete_request(struct request *); 1127extern void blk_abort_request(struct request *); 1128extern void blk_unprep_request(struct request *); 1129 1130/* 1131 * Access functions for manipulating queue properties 1132 */ 1133extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 1134 spinlock_t *lock, int node_id); 1135extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 1136extern int blk_init_allocated_queue(struct request_queue *); 1137extern void blk_cleanup_queue(struct request_queue *); 1138extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1139extern void blk_queue_bounce_limit(struct request_queue *, u64); 1140extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1141extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 1142extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1143extern void blk_queue_max_discard_segments(struct request_queue *, 1144 unsigned short); 1145extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 1146extern void blk_queue_max_discard_sectors(struct request_queue *q, 1147 unsigned int max_discard_sectors); 1148extern void blk_queue_max_write_same_sectors(struct request_queue *q, 1149 unsigned int max_write_same_sectors); 1150extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 1151 unsigned int max_write_same_sectors); 1152extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 1153extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1154extern void blk_queue_alignment_offset(struct request_queue *q, 1155 unsigned int alignment); 1156extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1157extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 1158extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1159extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1160extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 1161extern void blk_set_default_limits(struct queue_limits *lim); 1162extern void blk_set_stacking_limits(struct queue_limits *lim); 1163extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1164 sector_t offset); 1165extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 1166 sector_t offset); 1167extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1168 sector_t offset); 1169extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 1170extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 1171extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 1172extern int blk_queue_dma_drain(struct request_queue *q, 1173 dma_drain_needed_fn *dma_drain_needed, 1174 void *buf, unsigned int size); 1175extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 1176extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 1177extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1178extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 1179extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 1180extern void blk_queue_dma_alignment(struct request_queue *, int); 1181extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1182extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 1183extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 1184extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1185extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1186extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 1187 1188/* 1189 * Number of physical segments as sent to the device. 1190 * 1191 * Normally this is the number of discontiguous data segments sent by the 1192 * submitter. But for data-less command like discard we might have no 1193 * actual data segments submitted, but the driver might have to add it's 1194 * own special payload. In that case we still return 1 here so that this 1195 * special payload will be mapped. 1196 */ 1197static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1198{ 1199 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1200 return 1; 1201 return rq->nr_phys_segments; 1202} 1203 1204/* 1205 * Number of discard segments (or ranges) the driver needs to fill in. 1206 * Each discard bio merged into a request is counted as one segment. 1207 */ 1208static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) 1209{ 1210 return max_t(unsigned short, rq->nr_phys_segments, 1); 1211} 1212 1213extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 1214extern void blk_dump_rq_flags(struct request *, char *); 1215extern long nr_blockdev_pages(void); 1216 1217bool __must_check blk_get_queue(struct request_queue *); 1218struct request_queue *blk_alloc_queue(gfp_t); 1219struct request_queue *blk_alloc_queue_node(gfp_t, int); 1220extern void blk_put_queue(struct request_queue *); 1221extern void blk_set_queue_dying(struct request_queue *); 1222 1223/* 1224 * block layer runtime pm functions 1225 */ 1226#ifdef CONFIG_PM 1227extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1228extern int blk_pre_runtime_suspend(struct request_queue *q); 1229extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1230extern void blk_pre_runtime_resume(struct request_queue *q); 1231extern void blk_post_runtime_resume(struct request_queue *q, int err); 1232extern void blk_set_runtime_active(struct request_queue *q); 1233#else 1234static inline void blk_pm_runtime_init(struct request_queue *q, 1235 struct device *dev) {} 1236static inline int blk_pre_runtime_suspend(struct request_queue *q) 1237{ 1238 return -ENOSYS; 1239} 1240static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1241static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1242static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1243static inline void blk_set_runtime_active(struct request_queue *q) {} 1244#endif 1245 1246/* 1247 * blk_plug permits building a queue of related requests by holding the I/O 1248 * fragments for a short period. This allows merging of sequential requests 1249 * into single larger request. As the requests are moved from a per-task list to 1250 * the device's request_queue in a batch, this results in improved scalability 1251 * as the lock contention for request_queue lock is reduced. 1252 * 1253 * It is ok not to disable preemption when adding the request to the plug list 1254 * or when attempting a merge, because blk_schedule_flush_list() will only flush 1255 * the plug list when the task sleeps by itself. For details, please see 1256 * schedule() where blk_schedule_flush_plug() is called. 1257 */ 1258struct blk_plug { 1259 struct list_head list; /* requests */ 1260 struct list_head mq_list; /* blk-mq requests */ 1261 struct list_head cb_list; /* md requires an unplug callback */ 1262}; 1263#define BLK_MAX_REQUEST_COUNT 16 1264#define BLK_PLUG_FLUSH_SIZE (128 * 1024) 1265 1266struct blk_plug_cb; 1267typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1268struct blk_plug_cb { 1269 struct list_head list; 1270 blk_plug_cb_fn callback; 1271 void *data; 1272}; 1273extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1274 void *data, int size); 1275extern void blk_start_plug(struct blk_plug *); 1276extern void blk_finish_plug(struct blk_plug *); 1277extern void blk_flush_plug_list(struct blk_plug *, bool); 1278 1279static inline void blk_flush_plug(struct task_struct *tsk) 1280{ 1281 struct blk_plug *plug = tsk->plug; 1282 1283 if (plug) 1284 blk_flush_plug_list(plug, false); 1285} 1286 1287static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1288{ 1289 struct blk_plug *plug = tsk->plug; 1290 1291 if (plug) 1292 blk_flush_plug_list(plug, true); 1293} 1294 1295static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1296{ 1297 struct blk_plug *plug = tsk->plug; 1298 1299 return plug && 1300 (!list_empty(&plug->list) || 1301 !list_empty(&plug->mq_list) || 1302 !list_empty(&plug->cb_list)); 1303} 1304 1305/* 1306 * tag stuff 1307 */ 1308extern int blk_queue_start_tag(struct request_queue *, struct request *); 1309extern struct request *blk_queue_find_tag(struct request_queue *, int); 1310extern void blk_queue_end_tag(struct request_queue *, struct request *); 1311extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); 1312extern void blk_queue_free_tags(struct request_queue *); 1313extern int blk_queue_resize_tags(struct request_queue *, int); 1314extern void blk_queue_invalidate_tags(struct request_queue *); 1315extern struct blk_queue_tag *blk_init_tags(int, int); 1316extern void blk_free_tags(struct blk_queue_tag *); 1317 1318static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1319 int tag) 1320{ 1321 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1322 return NULL; 1323 return bqt->tag_index[tag]; 1324} 1325 1326extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1327extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1328 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1329 1330#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1331 1332extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1333 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1334extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1335 sector_t nr_sects, gfp_t gfp_mask, int flags, 1336 struct bio **biop); 1337 1338#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1339#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1340 1341extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1342 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1343 unsigned flags); 1344extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1345 sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1346 1347static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1348 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1349{ 1350 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 1351 nr_blocks << (sb->s_blocksize_bits - 9), 1352 gfp_mask, flags); 1353} 1354static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1355 sector_t nr_blocks, gfp_t gfp_mask) 1356{ 1357 return blkdev_issue_zeroout(sb->s_bdev, 1358 block << (sb->s_blocksize_bits - 9), 1359 nr_blocks << (sb->s_blocksize_bits - 9), 1360 gfp_mask, 0); 1361} 1362 1363extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1364 1365enum blk_default_limits { 1366 BLK_MAX_SEGMENTS = 128, 1367 BLK_SAFE_MAX_SECTORS = 255, 1368 BLK_DEF_MAX_SECTORS = 2560, 1369 BLK_MAX_SEGMENT_SIZE = 65536, 1370 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1371}; 1372 1373#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1374 1375static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1376{ 1377 return q->limits.bounce_pfn; 1378} 1379 1380static inline unsigned long queue_segment_boundary(struct request_queue *q) 1381{ 1382 return q->limits.seg_boundary_mask; 1383} 1384 1385static inline unsigned long queue_virt_boundary(struct request_queue *q) 1386{ 1387 return q->limits.virt_boundary_mask; 1388} 1389 1390static inline unsigned int queue_max_sectors(struct request_queue *q) 1391{ 1392 return q->limits.max_sectors; 1393} 1394 1395static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1396{ 1397 return q->limits.max_hw_sectors; 1398} 1399 1400static inline unsigned short queue_max_segments(struct request_queue *q) 1401{ 1402 return q->limits.max_segments; 1403} 1404 1405static inline unsigned short queue_max_discard_segments(struct request_queue *q) 1406{ 1407 return q->limits.max_discard_segments; 1408} 1409 1410static inline unsigned int queue_max_segment_size(struct request_queue *q) 1411{ 1412 return q->limits.max_segment_size; 1413} 1414 1415static inline unsigned short queue_logical_block_size(struct request_queue *q) 1416{ 1417 int retval = 512; 1418 1419 if (q && q->limits.logical_block_size) 1420 retval = q->limits.logical_block_size; 1421 1422 return retval; 1423} 1424 1425static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1426{ 1427 return queue_logical_block_size(bdev_get_queue(bdev)); 1428} 1429 1430static inline unsigned int queue_physical_block_size(struct request_queue *q) 1431{ 1432 return q->limits.physical_block_size; 1433} 1434 1435static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1436{ 1437 return queue_physical_block_size(bdev_get_queue(bdev)); 1438} 1439 1440static inline unsigned int queue_io_min(struct request_queue *q) 1441{ 1442 return q->limits.io_min; 1443} 1444 1445static inline int bdev_io_min(struct block_device *bdev) 1446{ 1447 return queue_io_min(bdev_get_queue(bdev)); 1448} 1449 1450static inline unsigned int queue_io_opt(struct request_queue *q) 1451{ 1452 return q->limits.io_opt; 1453} 1454 1455static inline int bdev_io_opt(struct block_device *bdev) 1456{ 1457 return queue_io_opt(bdev_get_queue(bdev)); 1458} 1459 1460static inline int queue_alignment_offset(struct request_queue *q) 1461{ 1462 if (q->limits.misaligned) 1463 return -1; 1464 1465 return q->limits.alignment_offset; 1466} 1467 1468static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1469{ 1470 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1471 unsigned int alignment = sector_div(sector, granularity >> 9) << 9; 1472 1473 return (granularity + lim->alignment_offset - alignment) % granularity; 1474} 1475 1476static inline int bdev_alignment_offset(struct block_device *bdev) 1477{ 1478 struct request_queue *q = bdev_get_queue(bdev); 1479 1480 if (q->limits.misaligned) 1481 return -1; 1482 1483 if (bdev != bdev->bd_contains) 1484 return bdev->bd_part->alignment_offset; 1485 1486 return q->limits.alignment_offset; 1487} 1488 1489static inline int queue_discard_alignment(struct request_queue *q) 1490{ 1491 if (q->limits.discard_misaligned) 1492 return -1; 1493 1494 return q->limits.discard_alignment; 1495} 1496 1497static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1498{ 1499 unsigned int alignment, granularity, offset; 1500 1501 if (!lim->max_discard_sectors) 1502 return 0; 1503 1504 /* Why are these in bytes, not sectors? */ 1505 alignment = lim->discard_alignment >> 9; 1506 granularity = lim->discard_granularity >> 9; 1507 if (!granularity) 1508 return 0; 1509 1510 /* Offset of the partition start in 'granularity' sectors */ 1511 offset = sector_div(sector, granularity); 1512 1513 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1514 offset = (granularity + alignment - offset) % granularity; 1515 1516 /* Turn it back into bytes, gaah */ 1517 return offset << 9; 1518} 1519 1520static inline int bdev_discard_alignment(struct block_device *bdev) 1521{ 1522 struct request_queue *q = bdev_get_queue(bdev); 1523 1524 if (bdev != bdev->bd_contains) 1525 return bdev->bd_part->discard_alignment; 1526 1527 return q->limits.discard_alignment; 1528} 1529 1530static inline unsigned int bdev_write_same(struct block_device *bdev) 1531{ 1532 struct request_queue *q = bdev_get_queue(bdev); 1533 1534 if (q) 1535 return q->limits.max_write_same_sectors; 1536 1537 return 0; 1538} 1539 1540static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1541{ 1542 struct request_queue *q = bdev_get_queue(bdev); 1543 1544 if (q) 1545 return q->limits.max_write_zeroes_sectors; 1546 1547 return 0; 1548} 1549 1550static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1551{ 1552 struct request_queue *q = bdev_get_queue(bdev); 1553 1554 if (q) 1555 return blk_queue_zoned_model(q); 1556 1557 return BLK_ZONED_NONE; 1558} 1559 1560static inline bool bdev_is_zoned(struct block_device *bdev) 1561{ 1562 struct request_queue *q = bdev_get_queue(bdev); 1563 1564 if (q) 1565 return blk_queue_is_zoned(q); 1566 1567 return false; 1568} 1569 1570static inline unsigned int bdev_zone_sectors(struct block_device *bdev) 1571{ 1572 struct request_queue *q = bdev_get_queue(bdev); 1573 1574 if (q) 1575 return blk_queue_zone_sectors(q); 1576 1577 return 0; 1578} 1579 1580static inline int queue_dma_alignment(struct request_queue *q) 1581{ 1582 return q ? q->dma_alignment : 511; 1583} 1584 1585static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1586 unsigned int len) 1587{ 1588 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1589 return !(addr & alignment) && !(len & alignment); 1590} 1591 1592/* assumes size > 256 */ 1593static inline unsigned int blksize_bits(unsigned int size) 1594{ 1595 unsigned int bits = 8; 1596 do { 1597 bits++; 1598 size >>= 1; 1599 } while (size > 256); 1600 return bits; 1601} 1602 1603static inline unsigned int block_size(struct block_device *bdev) 1604{ 1605 return bdev->bd_block_size; 1606} 1607 1608static inline bool queue_flush_queueable(struct request_queue *q) 1609{ 1610 return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); 1611} 1612 1613typedef struct {struct page *v;} Sector; 1614 1615unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1616 1617static inline void put_dev_sector(Sector p) 1618{ 1619 put_page(p.v); 1620} 1621 1622static inline bool __bvec_gap_to_prev(struct request_queue *q, 1623 struct bio_vec *bprv, unsigned int offset) 1624{ 1625 return offset || 1626 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1627} 1628 1629/* 1630 * Check if adding a bio_vec after bprv with offset would create a gap in 1631 * the SG list. Most drivers don't care about this, but some do. 1632 */ 1633static inline bool bvec_gap_to_prev(struct request_queue *q, 1634 struct bio_vec *bprv, unsigned int offset) 1635{ 1636 if (!queue_virt_boundary(q)) 1637 return false; 1638 return __bvec_gap_to_prev(q, bprv, offset); 1639} 1640 1641/* 1642 * Check if the two bvecs from two bios can be merged to one segment. 1643 * If yes, no need to check gap between the two bios since the 1st bio 1644 * and the 1st bvec in the 2nd bio can be handled in one segment. 1645 */ 1646static inline bool bios_segs_mergeable(struct request_queue *q, 1647 struct bio *prev, struct bio_vec *prev_last_bv, 1648 struct bio_vec *next_first_bv) 1649{ 1650 if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv)) 1651 return false; 1652 if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv)) 1653 return false; 1654 if (prev->bi_seg_back_size + next_first_bv->bv_len > 1655 queue_max_segment_size(q)) 1656 return false; 1657 return true; 1658} 1659 1660static inline bool bio_will_gap(struct request_queue *q, 1661 struct request *prev_rq, 1662 struct bio *prev, 1663 struct bio *next) 1664{ 1665 if (bio_has_data(prev) && queue_virt_boundary(q)) { 1666 struct bio_vec pb, nb; 1667 1668 /* 1669 * don't merge if the 1st bio starts with non-zero 1670 * offset, otherwise it is quite difficult to respect 1671 * sg gap limit. We work hard to merge a huge number of small 1672 * single bios in case of mkfs. 1673 */ 1674 if (prev_rq) 1675 bio_get_first_bvec(prev_rq->bio, &pb); 1676 else 1677 bio_get_first_bvec(prev, &pb); 1678 if (pb.bv_offset) 1679 return true; 1680 1681 /* 1682 * We don't need to worry about the situation that the 1683 * merged segment ends in unaligned virt boundary: 1684 * 1685 * - if 'pb' ends aligned, the merged segment ends aligned 1686 * - if 'pb' ends unaligned, the next bio must include 1687 * one single bvec of 'nb', otherwise the 'nb' can't 1688 * merge with 'pb' 1689 */ 1690 bio_get_last_bvec(prev, &pb); 1691 bio_get_first_bvec(next, &nb); 1692 1693 if (!bios_segs_mergeable(q, prev, &pb, &nb)) 1694 return __bvec_gap_to_prev(q, &pb, nb.bv_offset); 1695 } 1696 1697 return false; 1698} 1699 1700static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1701{ 1702 return bio_will_gap(req->q, req, req->biotail, bio); 1703} 1704 1705static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1706{ 1707 return bio_will_gap(req->q, NULL, bio, req->bio); 1708} 1709 1710int kblockd_schedule_work(struct work_struct *work); 1711int kblockd_schedule_work_on(int cpu, struct work_struct *work); 1712int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1713int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1714int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1715 1716#ifdef CONFIG_BLK_CGROUP 1717/* 1718 * This should not be using sched_clock(). A real patch is in progress 1719 * to fix this up, until that is in place we need to disable preemption 1720 * around sched_clock() in this function and set_io_start_time_ns(). 1721 */ 1722static inline void set_start_time_ns(struct request *req) 1723{ 1724 preempt_disable(); 1725 req->start_time_ns = sched_clock(); 1726 preempt_enable(); 1727} 1728 1729static inline void set_io_start_time_ns(struct request *req) 1730{ 1731 preempt_disable(); 1732 req->io_start_time_ns = sched_clock(); 1733 preempt_enable(); 1734} 1735 1736static inline uint64_t rq_start_time_ns(struct request *req) 1737{ 1738 return req->start_time_ns; 1739} 1740 1741static inline uint64_t rq_io_start_time_ns(struct request *req) 1742{ 1743 return req->io_start_time_ns; 1744} 1745#else 1746static inline void set_start_time_ns(struct request *req) {} 1747static inline void set_io_start_time_ns(struct request *req) {} 1748static inline uint64_t rq_start_time_ns(struct request *req) 1749{ 1750 return 0; 1751} 1752static inline uint64_t rq_io_start_time_ns(struct request *req) 1753{ 1754 return 0; 1755} 1756#endif 1757 1758#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1759 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1760#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1761 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1762 1763#if defined(CONFIG_BLK_DEV_INTEGRITY) 1764 1765enum blk_integrity_flags { 1766 BLK_INTEGRITY_VERIFY = 1 << 0, 1767 BLK_INTEGRITY_GENERATE = 1 << 1, 1768 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1769 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 1770}; 1771 1772struct blk_integrity_iter { 1773 void *prot_buf; 1774 void *data_buf; 1775 sector_t seed; 1776 unsigned int data_size; 1777 unsigned short interval; 1778 const char *disk_name; 1779}; 1780 1781typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 1782 1783struct blk_integrity_profile { 1784 integrity_processing_fn *generate_fn; 1785 integrity_processing_fn *verify_fn; 1786 const char *name; 1787}; 1788 1789extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 1790extern void blk_integrity_unregister(struct gendisk *); 1791extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1792extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1793 struct scatterlist *); 1794extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1795extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, 1796 struct request *); 1797extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 1798 struct bio *); 1799 1800static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1801{ 1802 struct blk_integrity *bi = &disk->queue->integrity; 1803 1804 if (!bi->profile) 1805 return NULL; 1806 1807 return bi; 1808} 1809 1810static inline 1811struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1812{ 1813 return blk_get_integrity(bdev->bd_disk); 1814} 1815 1816static inline bool blk_integrity_rq(struct request *rq) 1817{ 1818 return rq->cmd_flags & REQ_INTEGRITY; 1819} 1820 1821static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1822 unsigned int segs) 1823{ 1824 q->limits.max_integrity_segments = segs; 1825} 1826 1827static inline unsigned short 1828queue_max_integrity_segments(struct request_queue *q) 1829{ 1830 return q->limits.max_integrity_segments; 1831} 1832 1833static inline bool integrity_req_gap_back_merge(struct request *req, 1834 struct bio *next) 1835{ 1836 struct bio_integrity_payload *bip = bio_integrity(req->bio); 1837 struct bio_integrity_payload *bip_next = bio_integrity(next); 1838 1839 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1840 bip_next->bip_vec[0].bv_offset); 1841} 1842 1843static inline bool integrity_req_gap_front_merge(struct request *req, 1844 struct bio *bio) 1845{ 1846 struct bio_integrity_payload *bip = bio_integrity(bio); 1847 struct bio_integrity_payload *bip_next = bio_integrity(req->bio); 1848 1849 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], 1850 bip_next->bip_vec[0].bv_offset); 1851} 1852 1853#else /* CONFIG_BLK_DEV_INTEGRITY */ 1854 1855struct bio; 1856struct block_device; 1857struct gendisk; 1858struct blk_integrity; 1859 1860static inline int blk_integrity_rq(struct request *rq) 1861{ 1862 return 0; 1863} 1864static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1865 struct bio *b) 1866{ 1867 return 0; 1868} 1869static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1870 struct bio *b, 1871 struct scatterlist *s) 1872{ 1873 return 0; 1874} 1875static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1876{ 1877 return NULL; 1878} 1879static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1880{ 1881 return NULL; 1882} 1883static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1884{ 1885 return 0; 1886} 1887static inline void blk_integrity_register(struct gendisk *d, 1888 struct blk_integrity *b) 1889{ 1890} 1891static inline void blk_integrity_unregister(struct gendisk *d) 1892{ 1893} 1894static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1895 unsigned int segs) 1896{ 1897} 1898static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1899{ 1900 return 0; 1901} 1902static inline bool blk_integrity_merge_rq(struct request_queue *rq, 1903 struct request *r1, 1904 struct request *r2) 1905{ 1906 return true; 1907} 1908static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1909 struct request *r, 1910 struct bio *b) 1911{ 1912 return true; 1913} 1914 1915static inline bool integrity_req_gap_back_merge(struct request *req, 1916 struct bio *next) 1917{ 1918 return false; 1919} 1920static inline bool integrity_req_gap_front_merge(struct request *req, 1921 struct bio *bio) 1922{ 1923 return false; 1924} 1925 1926#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1927 1928struct block_device_operations { 1929 int (*open) (struct block_device *, fmode_t); 1930 void (*release) (struct gendisk *, fmode_t); 1931 int (*rw_page)(struct block_device *, sector_t, struct page *, bool); 1932 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1933 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1934 unsigned int (*check_events) (struct gendisk *disk, 1935 unsigned int clearing); 1936 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1937 int (*media_changed) (struct gendisk *); 1938 void (*unlock_native_capacity) (struct gendisk *); 1939 int (*revalidate_disk) (struct gendisk *); 1940 int (*getgeo)(struct block_device *, struct hd_geometry *); 1941 /* this callback is with swap_lock and sometimes page table lock held */ 1942 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1943 struct module *owner; 1944 const struct pr_ops *pr_ops; 1945}; 1946 1947extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1948 unsigned long); 1949extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1950extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1951 struct writeback_control *); 1952#else /* CONFIG_BLOCK */ 1953 1954struct block_device; 1955 1956/* 1957 * stubs for when the block layer is configured out 1958 */ 1959#define buffer_heads_over_limit 0 1960 1961static inline long nr_blockdev_pages(void) 1962{ 1963 return 0; 1964} 1965 1966struct blk_plug { 1967}; 1968 1969static inline void blk_start_plug(struct blk_plug *plug) 1970{ 1971} 1972 1973static inline void blk_finish_plug(struct blk_plug *plug) 1974{ 1975} 1976 1977static inline void blk_flush_plug(struct task_struct *task) 1978{ 1979} 1980 1981static inline void blk_schedule_flush_plug(struct task_struct *task) 1982{ 1983} 1984 1985 1986static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1987{ 1988 return false; 1989} 1990 1991static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, 1992 sector_t *error_sector) 1993{ 1994 return 0; 1995} 1996 1997#endif /* CONFIG_BLOCK */ 1998 1999#endif