at v5.12-rc8 58 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_BLKDEV_H 3#define _LINUX_BLKDEV_H 4 5#include <linux/sched.h> 6#include <linux/sched/clock.h> 7#include <linux/major.h> 8#include <linux/genhd.h> 9#include <linux/list.h> 10#include <linux/llist.h> 11#include <linux/minmax.h> 12#include <linux/timer.h> 13#include <linux/workqueue.h> 14#include <linux/pagemap.h> 15#include <linux/backing-dev-defs.h> 16#include <linux/wait.h> 17#include <linux/mempool.h> 18#include <linux/pfn.h> 19#include <linux/bio.h> 20#include <linux/stringify.h> 21#include <linux/gfp.h> 22#include <linux/bsg.h> 23#include <linux/smp.h> 24#include <linux/rcupdate.h> 25#include <linux/percpu-refcount.h> 26#include <linux/scatterlist.h> 27#include <linux/blkzoned.h> 28#include <linux/pm.h> 29 30struct module; 31struct scsi_ioctl_command; 32 33struct request_queue; 34struct elevator_queue; 35struct blk_trace; 36struct request; 37struct sg_io_hdr; 38struct bsg_job; 39struct blkcg_gq; 40struct blk_flush_queue; 41struct pr_ops; 42struct rq_qos; 43struct blk_queue_stats; 44struct blk_stat_callback; 45struct blk_keyslot_manager; 46 47#define BLKDEV_MIN_RQ 4 48#define BLKDEV_MAX_RQ 128 /* Default maximum */ 49 50/* Must be consistent with blk_mq_poll_stats_bkt() */ 51#define BLK_MQ_POLL_STATS_BKTS 16 52 53/* Doing classic polling */ 54#define BLK_MQ_POLL_CLASSIC -1 55 56/* 57 * Maximum number of blkcg policies allowed to be registered concurrently. 58 * Defined here to simplify include dependency. 59 */ 60#define BLKCG_MAX_POLS 5 61 62typedef void (rq_end_io_fn)(struct request *, blk_status_t); 63 64/* 65 * request flags */ 66typedef __u32 __bitwise req_flags_t; 67 68/* drive already may have started this one */ 69#define RQF_STARTED ((__force req_flags_t)(1 << 1)) 70/* may not be passed by ioscheduler */ 71#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 72/* request for flush sequence */ 73#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 74/* merge of different types, fail separately */ 75#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 76/* track inflight for MQ */ 77#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 78/* don't call prep for this one */ 79#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 80/* vaguely specified driver internal error. Ignored by the block layer */ 81#define RQF_FAILED ((__force req_flags_t)(1 << 10)) 82/* don't warn about errors */ 83#define RQF_QUIET ((__force req_flags_t)(1 << 11)) 84/* elevator private data attached */ 85#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 86/* account into disk and partition IO statistics */ 87#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 88/* runtime pm request */ 89#define RQF_PM ((__force req_flags_t)(1 << 15)) 90/* on IO scheduler merge hash */ 91#define RQF_HASHED ((__force req_flags_t)(1 << 16)) 92/* track IO completion time */ 93#define RQF_STATS ((__force req_flags_t)(1 << 17)) 94/* Look at ->special_vec for the actual data payload instead of the 95 bio chain. */ 96#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 97/* The per-zone write lock is held for this request */ 98#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) 99/* already slept for hybrid poll */ 100#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) 101/* ->timeout has been called, don't expire again */ 102#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) 103 104/* flags that prevent us from merging requests: */ 105#define RQF_NOMERGE_FLAGS \ 106 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 107 108/* 109 * Request state for blk-mq. 110 */ 111enum mq_rq_state { 112 MQ_RQ_IDLE = 0, 113 MQ_RQ_IN_FLIGHT = 1, 114 MQ_RQ_COMPLETE = 2, 115}; 116 117/* 118 * Try to put the fields that are referenced together in the same cacheline. 119 * 120 * If you modify this structure, make sure to update blk_rq_init() and 121 * especially blk_mq_rq_ctx_init() to take care of the added fields. 122 */ 123struct request { 124 struct request_queue *q; 125 struct blk_mq_ctx *mq_ctx; 126 struct blk_mq_hw_ctx *mq_hctx; 127 128 unsigned int cmd_flags; /* op and common flags */ 129 req_flags_t rq_flags; 130 131 int tag; 132 int internal_tag; 133 134 /* the following two fields are internal, NEVER access directly */ 135 unsigned int __data_len; /* total data len */ 136 sector_t __sector; /* sector cursor */ 137 138 struct bio *bio; 139 struct bio *biotail; 140 141 struct list_head queuelist; 142 143 /* 144 * The hash is used inside the scheduler, and killed once the 145 * request reaches the dispatch list. The ipi_list is only used 146 * to queue the request for softirq completion, which is long 147 * after the request has been unhashed (and even removed from 148 * the dispatch list). 149 */ 150 union { 151 struct hlist_node hash; /* merge hash */ 152 struct llist_node ipi_list; 153 }; 154 155 /* 156 * The rb_node is only used inside the io scheduler, requests 157 * are pruned when moved to the dispatch queue. So let the 158 * completion_data share space with the rb_node. 159 */ 160 union { 161 struct rb_node rb_node; /* sort/lookup */ 162 struct bio_vec special_vec; 163 void *completion_data; 164 int error_count; /* for legacy drivers, don't use */ 165 }; 166 167 /* 168 * Three pointers are available for the IO schedulers, if they need 169 * more they have to dynamically allocate it. Flush requests are 170 * never put on the IO scheduler. So let the flush fields share 171 * space with the elevator data. 172 */ 173 union { 174 struct { 175 struct io_cq *icq; 176 void *priv[2]; 177 } elv; 178 179 struct { 180 unsigned int seq; 181 struct list_head list; 182 rq_end_io_fn *saved_end_io; 183 } flush; 184 }; 185 186 struct gendisk *rq_disk; 187 struct block_device *part; 188#ifdef CONFIG_BLK_RQ_ALLOC_TIME 189 /* Time that the first bio started allocating this request. */ 190 u64 alloc_time_ns; 191#endif 192 /* Time that this request was allocated for this IO. */ 193 u64 start_time_ns; 194 /* Time that I/O was submitted to the device. */ 195 u64 io_start_time_ns; 196 197#ifdef CONFIG_BLK_WBT 198 unsigned short wbt_flags; 199#endif 200 /* 201 * rq sectors used for blk stats. It has the same value 202 * with blk_rq_sectors(rq), except that it never be zeroed 203 * by completion. 204 */ 205 unsigned short stats_sectors; 206 207 /* 208 * Number of scatter-gather DMA addr+len pairs after 209 * physical address coalescing is performed. 210 */ 211 unsigned short nr_phys_segments; 212 213#if defined(CONFIG_BLK_DEV_INTEGRITY) 214 unsigned short nr_integrity_segments; 215#endif 216 217#ifdef CONFIG_BLK_INLINE_ENCRYPTION 218 struct bio_crypt_ctx *crypt_ctx; 219 struct blk_ksm_keyslot *crypt_keyslot; 220#endif 221 222 unsigned short write_hint; 223 unsigned short ioprio; 224 225 enum mq_rq_state state; 226 refcount_t ref; 227 228 unsigned int timeout; 229 unsigned long deadline; 230 231 union { 232 struct __call_single_data csd; 233 u64 fifo_time; 234 }; 235 236 /* 237 * completion callback. 238 */ 239 rq_end_io_fn *end_io; 240 void *end_io_data; 241}; 242 243static inline bool blk_op_is_scsi(unsigned int op) 244{ 245 return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; 246} 247 248static inline bool blk_op_is_private(unsigned int op) 249{ 250 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 251} 252 253static inline bool blk_rq_is_scsi(struct request *rq) 254{ 255 return blk_op_is_scsi(req_op(rq)); 256} 257 258static inline bool blk_rq_is_private(struct request *rq) 259{ 260 return blk_op_is_private(req_op(rq)); 261} 262 263static inline bool blk_rq_is_passthrough(struct request *rq) 264{ 265 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); 266} 267 268static inline bool bio_is_passthrough(struct bio *bio) 269{ 270 unsigned op = bio_op(bio); 271 272 return blk_op_is_scsi(op) || blk_op_is_private(op); 273} 274 275static inline unsigned short req_get_ioprio(struct request *req) 276{ 277 return req->ioprio; 278} 279 280#include <linux/elevator.h> 281 282struct blk_queue_ctx; 283 284struct bio_vec; 285 286enum blk_eh_timer_return { 287 BLK_EH_DONE, /* drivers has completed the command */ 288 BLK_EH_RESET_TIMER, /* reset timer and try again */ 289}; 290 291enum blk_queue_state { 292 Queue_down, 293 Queue_up, 294}; 295 296#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 297#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 298 299#define BLK_SCSI_MAX_CMDS (256) 300#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 301 302/* 303 * Zoned block device models (zoned limit). 304 * 305 * Note: This needs to be ordered from the least to the most severe 306 * restrictions for the inheritance in blk_stack_limits() to work. 307 */ 308enum blk_zoned_model { 309 BLK_ZONED_NONE = 0, /* Regular block device */ 310 BLK_ZONED_HA, /* Host-aware zoned block device */ 311 BLK_ZONED_HM, /* Host-managed zoned block device */ 312}; 313 314struct queue_limits { 315 unsigned long bounce_pfn; 316 unsigned long seg_boundary_mask; 317 unsigned long virt_boundary_mask; 318 319 unsigned int max_hw_sectors; 320 unsigned int max_dev_sectors; 321 unsigned int chunk_sectors; 322 unsigned int max_sectors; 323 unsigned int max_segment_size; 324 unsigned int physical_block_size; 325 unsigned int logical_block_size; 326 unsigned int alignment_offset; 327 unsigned int io_min; 328 unsigned int io_opt; 329 unsigned int max_discard_sectors; 330 unsigned int max_hw_discard_sectors; 331 unsigned int max_write_same_sectors; 332 unsigned int max_write_zeroes_sectors; 333 unsigned int max_zone_append_sectors; 334 unsigned int discard_granularity; 335 unsigned int discard_alignment; 336 unsigned int zone_write_granularity; 337 338 unsigned short max_segments; 339 unsigned short max_integrity_segments; 340 unsigned short max_discard_segments; 341 342 unsigned char misaligned; 343 unsigned char discard_misaligned; 344 unsigned char raid_partial_stripes_expensive; 345 enum blk_zoned_model zoned; 346}; 347 348typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 349 void *data); 350 351void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model); 352 353#ifdef CONFIG_BLK_DEV_ZONED 354 355#define BLK_ALL_ZONES ((unsigned int)-1) 356int blkdev_report_zones(struct block_device *bdev, sector_t sector, 357 unsigned int nr_zones, report_zones_cb cb, void *data); 358unsigned int blkdev_nr_zones(struct gendisk *disk); 359extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, 360 sector_t sectors, sector_t nr_sectors, 361 gfp_t gfp_mask); 362int blk_revalidate_disk_zones(struct gendisk *disk, 363 void (*update_driver_data)(struct gendisk *disk)); 364 365extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 366 unsigned int cmd, unsigned long arg); 367extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, 368 unsigned int cmd, unsigned long arg); 369 370#else /* CONFIG_BLK_DEV_ZONED */ 371 372static inline unsigned int blkdev_nr_zones(struct gendisk *disk) 373{ 374 return 0; 375} 376 377static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 378 fmode_t mode, unsigned int cmd, 379 unsigned long arg) 380{ 381 return -ENOTTY; 382} 383 384static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, 385 fmode_t mode, unsigned int cmd, 386 unsigned long arg) 387{ 388 return -ENOTTY; 389} 390 391#endif /* CONFIG_BLK_DEV_ZONED */ 392 393struct request_queue { 394 struct request *last_merge; 395 struct elevator_queue *elevator; 396 397 struct percpu_ref q_usage_counter; 398 399 struct blk_queue_stats *stats; 400 struct rq_qos *rq_qos; 401 402 const struct blk_mq_ops *mq_ops; 403 404 /* sw queues */ 405 struct blk_mq_ctx __percpu *queue_ctx; 406 407 unsigned int queue_depth; 408 409 /* hw dispatch queues */ 410 struct blk_mq_hw_ctx **queue_hw_ctx; 411 unsigned int nr_hw_queues; 412 413 struct backing_dev_info *backing_dev_info; 414 415 /* 416 * The queue owner gets to use this for whatever they like. 417 * ll_rw_blk doesn't touch it. 418 */ 419 void *queuedata; 420 421 /* 422 * various queue flags, see QUEUE_* below 423 */ 424 unsigned long queue_flags; 425 /* 426 * Number of contexts that have called blk_set_pm_only(). If this 427 * counter is above zero then only RQF_PM requests are processed. 428 */ 429 atomic_t pm_only; 430 431 /* 432 * ida allocated id for this queue. Used to index queues from 433 * ioctx. 434 */ 435 int id; 436 437 /* 438 * queue needs bounce pages for pages above this limit 439 */ 440 gfp_t bounce_gfp; 441 442 spinlock_t queue_lock; 443 444 /* 445 * queue kobject 446 */ 447 struct kobject kobj; 448 449 /* 450 * mq queue kobject 451 */ 452 struct kobject *mq_kobj; 453 454#ifdef CONFIG_BLK_DEV_INTEGRITY 455 struct blk_integrity integrity; 456#endif /* CONFIG_BLK_DEV_INTEGRITY */ 457 458#ifdef CONFIG_PM 459 struct device *dev; 460 enum rpm_status rpm_status; 461#endif 462 463 /* 464 * queue settings 465 */ 466 unsigned long nr_requests; /* Max # of requests */ 467 468 unsigned int dma_pad_mask; 469 unsigned int dma_alignment; 470 471#ifdef CONFIG_BLK_INLINE_ENCRYPTION 472 /* Inline crypto capabilities */ 473 struct blk_keyslot_manager *ksm; 474#endif 475 476 unsigned int rq_timeout; 477 int poll_nsec; 478 479 struct blk_stat_callback *poll_cb; 480 struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS]; 481 482 struct timer_list timeout; 483 struct work_struct timeout_work; 484 485 atomic_t nr_active_requests_shared_sbitmap; 486 487 struct list_head icq_list; 488#ifdef CONFIG_BLK_CGROUP 489 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 490 struct blkcg_gq *root_blkg; 491 struct list_head blkg_list; 492#endif 493 494 struct queue_limits limits; 495 496 unsigned int required_elevator_features; 497 498#ifdef CONFIG_BLK_DEV_ZONED 499 /* 500 * Zoned block device information for request dispatch control. 501 * nr_zones is the total number of zones of the device. This is always 502 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones 503 * bits which indicates if a zone is conventional (bit set) or 504 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones 505 * bits which indicates if a zone is write locked, that is, if a write 506 * request targeting the zone was dispatched. All three fields are 507 * initialized by the low level device driver (e.g. scsi/sd.c). 508 * Stacking drivers (device mappers) may or may not initialize 509 * these fields. 510 * 511 * Reads of this information must be protected with blk_queue_enter() / 512 * blk_queue_exit(). Modifying this information is only allowed while 513 * no requests are being processed. See also blk_mq_freeze_queue() and 514 * blk_mq_unfreeze_queue(). 515 */ 516 unsigned int nr_zones; 517 unsigned long *conv_zones_bitmap; 518 unsigned long *seq_zones_wlock; 519 unsigned int max_open_zones; 520 unsigned int max_active_zones; 521#endif /* CONFIG_BLK_DEV_ZONED */ 522 523 /* 524 * sg stuff 525 */ 526 unsigned int sg_timeout; 527 unsigned int sg_reserved_size; 528 int node; 529 struct mutex debugfs_mutex; 530#ifdef CONFIG_BLK_DEV_IO_TRACE 531 struct blk_trace __rcu *blk_trace; 532#endif 533 /* 534 * for flush operations 535 */ 536 struct blk_flush_queue *fq; 537 538 struct list_head requeue_list; 539 spinlock_t requeue_lock; 540 struct delayed_work requeue_work; 541 542 struct mutex sysfs_lock; 543 struct mutex sysfs_dir_lock; 544 545 /* 546 * for reusing dead hctx instance in case of updating 547 * nr_hw_queues 548 */ 549 struct list_head unused_hctx_list; 550 spinlock_t unused_hctx_lock; 551 552 int mq_freeze_depth; 553 554#if defined(CONFIG_BLK_DEV_BSG) 555 struct bsg_class_device bsg_dev; 556#endif 557 558#ifdef CONFIG_BLK_DEV_THROTTLING 559 /* Throttle data */ 560 struct throtl_data *td; 561#endif 562 struct rcu_head rcu_head; 563 wait_queue_head_t mq_freeze_wq; 564 /* 565 * Protect concurrent access to q_usage_counter by 566 * percpu_ref_kill() and percpu_ref_reinit(). 567 */ 568 struct mutex mq_freeze_lock; 569 570 struct blk_mq_tag_set *tag_set; 571 struct list_head tag_set_list; 572 struct bio_set bio_split; 573 574 struct dentry *debugfs_dir; 575 576#ifdef CONFIG_BLK_DEBUG_FS 577 struct dentry *sched_debugfs_dir; 578 struct dentry *rqos_debugfs_dir; 579#endif 580 581 bool mq_sysfs_init_done; 582 583 size_t cmd_size; 584 585#define BLK_MAX_WRITE_HINTS 5 586 u64 write_hints[BLK_MAX_WRITE_HINTS]; 587}; 588 589/* Keep blk_queue_flag_name[] in sync with the definitions below */ 590#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ 591#define QUEUE_FLAG_DYING 1 /* queue being torn down */ 592#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ 593#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ 594#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ 595#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ 596#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 597#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ 598#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */ 599#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ 600#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ 601#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */ 602#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ 603#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */ 604#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ 605#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ 606#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ 607#define QUEUE_FLAG_WC 17 /* Write back caching */ 608#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ 609#define QUEUE_FLAG_DAX 19 /* device supports DAX */ 610#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ 611#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */ 612#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ 613#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */ 614#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ 615#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ 616#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ 617#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ 618#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ 619#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ 620 621#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 622 (1 << QUEUE_FLAG_SAME_COMP) | \ 623 (1 << QUEUE_FLAG_NOWAIT)) 624 625void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 626void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 627bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 628 629#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 630#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 631#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 632#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 633#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 634#define blk_queue_noxmerges(q) \ 635 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 636#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 637#define blk_queue_stable_writes(q) \ 638 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) 639#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 640#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 641#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 642#define blk_queue_zone_resetall(q) \ 643 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) 644#define blk_queue_secure_erase(q) \ 645 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 646#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 647#define blk_queue_scsi_passthrough(q) \ 648 test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) 649#define blk_queue_pci_p2pdma(q) \ 650 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) 651#ifdef CONFIG_BLK_RQ_ALLOC_TIME 652#define blk_queue_rq_alloc_time(q) \ 653 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 654#else 655#define blk_queue_rq_alloc_time(q) false 656#endif 657 658#define blk_noretry_request(rq) \ 659 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 660 REQ_FAILFAST_DRIVER)) 661#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 662#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 663#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) 664#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 665#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags) 666 667extern void blk_set_pm_only(struct request_queue *q); 668extern void blk_clear_pm_only(struct request_queue *q); 669 670static inline bool blk_account_rq(struct request *rq) 671{ 672 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); 673} 674 675#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 676 677#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 678 679#define rq_dma_dir(rq) \ 680 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 681 682#define dma_map_bvec(dev, bv, dir, attrs) \ 683 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 684 (dir), (attrs)) 685 686static inline bool queue_is_mq(struct request_queue *q) 687{ 688 return q->mq_ops; 689} 690 691#ifdef CONFIG_PM 692static inline enum rpm_status queue_rpm_status(struct request_queue *q) 693{ 694 return q->rpm_status; 695} 696#else 697static inline enum rpm_status queue_rpm_status(struct request_queue *q) 698{ 699 return RPM_ACTIVE; 700} 701#endif 702 703static inline enum blk_zoned_model 704blk_queue_zoned_model(struct request_queue *q) 705{ 706 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) 707 return q->limits.zoned; 708 return BLK_ZONED_NONE; 709} 710 711static inline bool blk_queue_is_zoned(struct request_queue *q) 712{ 713 switch (blk_queue_zoned_model(q)) { 714 case BLK_ZONED_HA: 715 case BLK_ZONED_HM: 716 return true; 717 default: 718 return false; 719 } 720} 721 722static inline sector_t blk_queue_zone_sectors(struct request_queue *q) 723{ 724 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 725} 726 727#ifdef CONFIG_BLK_DEV_ZONED 728static inline unsigned int blk_queue_nr_zones(struct request_queue *q) 729{ 730 return blk_queue_is_zoned(q) ? q->nr_zones : 0; 731} 732 733static inline unsigned int blk_queue_zone_no(struct request_queue *q, 734 sector_t sector) 735{ 736 if (!blk_queue_is_zoned(q)) 737 return 0; 738 return sector >> ilog2(q->limits.chunk_sectors); 739} 740 741static inline bool blk_queue_zone_is_seq(struct request_queue *q, 742 sector_t sector) 743{ 744 if (!blk_queue_is_zoned(q)) 745 return false; 746 if (!q->conv_zones_bitmap) 747 return true; 748 return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap); 749} 750 751static inline void blk_queue_max_open_zones(struct request_queue *q, 752 unsigned int max_open_zones) 753{ 754 q->max_open_zones = max_open_zones; 755} 756 757static inline unsigned int queue_max_open_zones(const struct request_queue *q) 758{ 759 return q->max_open_zones; 760} 761 762static inline void blk_queue_max_active_zones(struct request_queue *q, 763 unsigned int max_active_zones) 764{ 765 q->max_active_zones = max_active_zones; 766} 767 768static inline unsigned int queue_max_active_zones(const struct request_queue *q) 769{ 770 return q->max_active_zones; 771} 772#else /* CONFIG_BLK_DEV_ZONED */ 773static inline unsigned int blk_queue_nr_zones(struct request_queue *q) 774{ 775 return 0; 776} 777static inline bool blk_queue_zone_is_seq(struct request_queue *q, 778 sector_t sector) 779{ 780 return false; 781} 782static inline unsigned int blk_queue_zone_no(struct request_queue *q, 783 sector_t sector) 784{ 785 return 0; 786} 787static inline unsigned int queue_max_open_zones(const struct request_queue *q) 788{ 789 return 0; 790} 791static inline unsigned int queue_max_active_zones(const struct request_queue *q) 792{ 793 return 0; 794} 795#endif /* CONFIG_BLK_DEV_ZONED */ 796 797static inline bool rq_is_sync(struct request *rq) 798{ 799 return op_is_sync(rq->cmd_flags); 800} 801 802static inline bool rq_mergeable(struct request *rq) 803{ 804 if (blk_rq_is_passthrough(rq)) 805 return false; 806 807 if (req_op(rq) == REQ_OP_FLUSH) 808 return false; 809 810 if (req_op(rq) == REQ_OP_WRITE_ZEROES) 811 return false; 812 813 if (req_op(rq) == REQ_OP_ZONE_APPEND) 814 return false; 815 816 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 817 return false; 818 if (rq->rq_flags & RQF_NOMERGE_FLAGS) 819 return false; 820 821 return true; 822} 823 824static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 825{ 826 if (bio_page(a) == bio_page(b) && 827 bio_offset(a) == bio_offset(b)) 828 return true; 829 830 return false; 831} 832 833static inline unsigned int blk_queue_depth(struct request_queue *q) 834{ 835 if (q->queue_depth) 836 return q->queue_depth; 837 838 return q->nr_requests; 839} 840 841extern unsigned long blk_max_low_pfn, blk_max_pfn; 842 843/* 844 * standard bounce addresses: 845 * 846 * BLK_BOUNCE_HIGH : bounce all highmem pages 847 * BLK_BOUNCE_ANY : don't bounce anything 848 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 849 */ 850 851#if BITS_PER_LONG == 32 852#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 853#else 854#define BLK_BOUNCE_HIGH -1ULL 855#endif 856#define BLK_BOUNCE_ANY (-1ULL) 857#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 858 859/* 860 * default timeout for SG_IO if none specified 861 */ 862#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 863#define BLK_MIN_SG_TIMEOUT (7 * HZ) 864 865struct rq_map_data { 866 struct page **pages; 867 int page_order; 868 int nr_entries; 869 unsigned long offset; 870 int null_mapped; 871 int from_user; 872}; 873 874struct req_iterator { 875 struct bvec_iter iter; 876 struct bio *bio; 877}; 878 879/* This should not be used directly - use rq_for_each_segment */ 880#define for_each_bio(_bio) \ 881 for (; _bio; _bio = _bio->bi_next) 882#define __rq_for_each_bio(_bio, rq) \ 883 if ((rq->bio)) \ 884 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 885 886#define rq_for_each_segment(bvl, _rq, _iter) \ 887 __rq_for_each_bio(_iter.bio, _rq) \ 888 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 889 890#define rq_for_each_bvec(bvl, _rq, _iter) \ 891 __rq_for_each_bio(_iter.bio, _rq) \ 892 bio_for_each_bvec(bvl, _iter.bio, _iter.iter) 893 894#define rq_iter_last(bvec, _iter) \ 895 (_iter.bio->bi_next == NULL && \ 896 bio_iter_last(bvec, _iter.iter)) 897 898#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 899# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 900#endif 901#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 902extern void rq_flush_dcache_pages(struct request *rq); 903#else 904static inline void rq_flush_dcache_pages(struct request *rq) 905{ 906} 907#endif 908 909extern int blk_register_queue(struct gendisk *disk); 910extern void blk_unregister_queue(struct gendisk *disk); 911blk_qc_t submit_bio_noacct(struct bio *bio); 912extern void blk_rq_init(struct request_queue *q, struct request *rq); 913extern void blk_put_request(struct request *); 914extern struct request *blk_get_request(struct request_queue *, unsigned int op, 915 blk_mq_req_flags_t flags); 916extern int blk_lld_busy(struct request_queue *q); 917extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 918 struct bio_set *bs, gfp_t gfp_mask, 919 int (*bio_ctr)(struct bio *, struct bio *, void *), 920 void *data); 921extern void blk_rq_unprep_clone(struct request *rq); 922extern blk_status_t blk_insert_cloned_request(struct request_queue *q, 923 struct request *rq); 924extern int blk_rq_append_bio(struct request *rq, struct bio **bio); 925extern void blk_queue_split(struct bio **); 926extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 927extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 928 unsigned int, void __user *); 929extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 930 unsigned int, void __user *); 931extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 932 struct scsi_ioctl_command __user *); 933extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp); 934extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp); 935 936extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 937extern void blk_queue_exit(struct request_queue *q); 938extern void blk_sync_queue(struct request_queue *q); 939extern int blk_rq_map_user(struct request_queue *, struct request *, 940 struct rq_map_data *, void __user *, unsigned long, 941 gfp_t); 942extern int blk_rq_unmap_user(struct bio *); 943extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 944extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 945 struct rq_map_data *, const struct iov_iter *, 946 gfp_t); 947extern void blk_execute_rq(struct gendisk *, struct request *, int); 948extern void blk_execute_rq_nowait(struct gendisk *, 949 struct request *, int, rq_end_io_fn *); 950 951/* Helper to convert REQ_OP_XXX to its string format XXX */ 952extern const char *blk_op_str(unsigned int op); 953 954int blk_status_to_errno(blk_status_t status); 955blk_status_t errno_to_blk_status(int errno); 956 957int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); 958 959static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 960{ 961 return bdev->bd_disk->queue; /* this is never NULL */ 962} 963 964/* 965 * The basic unit of block I/O is a sector. It is used in a number of contexts 966 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 967 * bytes. Variables of type sector_t represent an offset or size that is a 968 * multiple of 512 bytes. Hence these two constants. 969 */ 970#ifndef SECTOR_SHIFT 971#define SECTOR_SHIFT 9 972#endif 973#ifndef SECTOR_SIZE 974#define SECTOR_SIZE (1 << SECTOR_SHIFT) 975#endif 976 977/* 978 * blk_rq_pos() : the current sector 979 * blk_rq_bytes() : bytes left in the entire request 980 * blk_rq_cur_bytes() : bytes left in the current segment 981 * blk_rq_err_bytes() : bytes left till the next error boundary 982 * blk_rq_sectors() : sectors left in the entire request 983 * blk_rq_cur_sectors() : sectors left in the current segment 984 * blk_rq_stats_sectors() : sectors of the entire request used for stats 985 */ 986static inline sector_t blk_rq_pos(const struct request *rq) 987{ 988 return rq->__sector; 989} 990 991static inline unsigned int blk_rq_bytes(const struct request *rq) 992{ 993 return rq->__data_len; 994} 995 996static inline int blk_rq_cur_bytes(const struct request *rq) 997{ 998 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 999} 1000 1001extern unsigned int blk_rq_err_bytes(const struct request *rq); 1002 1003static inline unsigned int blk_rq_sectors(const struct request *rq) 1004{ 1005 return blk_rq_bytes(rq) >> SECTOR_SHIFT; 1006} 1007 1008static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 1009{ 1010 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; 1011} 1012 1013static inline unsigned int blk_rq_stats_sectors(const struct request *rq) 1014{ 1015 return rq->stats_sectors; 1016} 1017 1018#ifdef CONFIG_BLK_DEV_ZONED 1019 1020/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 1021const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 1022 1023static inline unsigned int blk_rq_zone_no(struct request *rq) 1024{ 1025 return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); 1026} 1027 1028static inline unsigned int blk_rq_zone_is_seq(struct request *rq) 1029{ 1030 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); 1031} 1032#endif /* CONFIG_BLK_DEV_ZONED */ 1033 1034/* 1035 * Some commands like WRITE SAME have a payload or data transfer size which 1036 * is different from the size of the request. Any driver that supports such 1037 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to 1038 * calculate the data transfer size. 1039 */ 1040static inline unsigned int blk_rq_payload_bytes(struct request *rq) 1041{ 1042 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1043 return rq->special_vec.bv_len; 1044 return blk_rq_bytes(rq); 1045} 1046 1047/* 1048 * Return the first full biovec in the request. The caller needs to check that 1049 * there are any bvecs before calling this helper. 1050 */ 1051static inline struct bio_vec req_bvec(struct request *rq) 1052{ 1053 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1054 return rq->special_vec; 1055 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); 1056} 1057 1058static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 1059 int op) 1060{ 1061 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) 1062 return min(q->limits.max_discard_sectors, 1063 UINT_MAX >> SECTOR_SHIFT); 1064 1065 if (unlikely(op == REQ_OP_WRITE_SAME)) 1066 return q->limits.max_write_same_sectors; 1067 1068 if (unlikely(op == REQ_OP_WRITE_ZEROES)) 1069 return q->limits.max_write_zeroes_sectors; 1070 1071 return q->limits.max_sectors; 1072} 1073 1074/* 1075 * Return maximum size of a request at given offset. Only valid for 1076 * file system requests. 1077 */ 1078static inline unsigned int blk_max_size_offset(struct request_queue *q, 1079 sector_t offset, 1080 unsigned int chunk_sectors) 1081{ 1082 if (!chunk_sectors) { 1083 if (q->limits.chunk_sectors) 1084 chunk_sectors = q->limits.chunk_sectors; 1085 else 1086 return q->limits.max_sectors; 1087 } 1088 1089 if (likely(is_power_of_2(chunk_sectors))) 1090 chunk_sectors -= offset & (chunk_sectors - 1); 1091 else 1092 chunk_sectors -= sector_div(offset, chunk_sectors); 1093 1094 return min(q->limits.max_sectors, chunk_sectors); 1095} 1096 1097static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 1098 sector_t offset) 1099{ 1100 struct request_queue *q = rq->q; 1101 1102 if (blk_rq_is_passthrough(rq)) 1103 return q->limits.max_hw_sectors; 1104 1105 if (!q->limits.chunk_sectors || 1106 req_op(rq) == REQ_OP_DISCARD || 1107 req_op(rq) == REQ_OP_SECURE_ERASE) 1108 return blk_queue_get_max_sectors(q, req_op(rq)); 1109 1110 return min(blk_max_size_offset(q, offset, 0), 1111 blk_queue_get_max_sectors(q, req_op(rq))); 1112} 1113 1114static inline unsigned int blk_rq_count_bios(struct request *rq) 1115{ 1116 unsigned int nr_bios = 0; 1117 struct bio *bio; 1118 1119 __rq_for_each_bio(bio, rq) 1120 nr_bios++; 1121 1122 return nr_bios; 1123} 1124 1125void blk_steal_bios(struct bio_list *list, struct request *rq); 1126 1127/* 1128 * Request completion related functions. 1129 * 1130 * blk_update_request() completes given number of bytes and updates 1131 * the request without completing it. 1132 */ 1133extern bool blk_update_request(struct request *rq, blk_status_t error, 1134 unsigned int nr_bytes); 1135 1136extern void blk_abort_request(struct request *); 1137 1138/* 1139 * Access functions for manipulating queue properties 1140 */ 1141extern void blk_cleanup_queue(struct request_queue *); 1142extern void blk_queue_bounce_limit(struct request_queue *, u64); 1143extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1144extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 1145extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1146extern void blk_queue_max_discard_segments(struct request_queue *, 1147 unsigned short); 1148extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 1149extern void blk_queue_max_discard_sectors(struct request_queue *q, 1150 unsigned int max_discard_sectors); 1151extern void blk_queue_max_write_same_sectors(struct request_queue *q, 1152 unsigned int max_write_same_sectors); 1153extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 1154 unsigned int max_write_same_sectors); 1155extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); 1156extern void blk_queue_max_zone_append_sectors(struct request_queue *q, 1157 unsigned int max_zone_append_sectors); 1158extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 1159void blk_queue_zone_write_granularity(struct request_queue *q, 1160 unsigned int size); 1161extern void blk_queue_alignment_offset(struct request_queue *q, 1162 unsigned int alignment); 1163void blk_queue_update_readahead(struct request_queue *q); 1164extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 1165extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 1166extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 1167extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 1168extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 1169extern void blk_set_default_limits(struct queue_limits *lim); 1170extern void blk_set_stacking_limits(struct queue_limits *lim); 1171extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1172 sector_t offset); 1173extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 1174 sector_t offset); 1175extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 1176extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 1177extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 1178extern void blk_queue_dma_alignment(struct request_queue *, int); 1179extern void blk_queue_update_dma_alignment(struct request_queue *, int); 1180extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1181extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 1182extern void blk_queue_required_elevator_features(struct request_queue *q, 1183 unsigned int features); 1184extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 1185 struct device *dev); 1186 1187/* 1188 * Number of physical segments as sent to the device. 1189 * 1190 * Normally this is the number of discontiguous data segments sent by the 1191 * submitter. But for data-less command like discard we might have no 1192 * actual data segments submitted, but the driver might have to add it's 1193 * own special payload. In that case we still return 1 here so that this 1194 * special payload will be mapped. 1195 */ 1196static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1197{ 1198 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1199 return 1; 1200 return rq->nr_phys_segments; 1201} 1202 1203/* 1204 * Number of discard segments (or ranges) the driver needs to fill in. 1205 * Each discard bio merged into a request is counted as one segment. 1206 */ 1207static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) 1208{ 1209 return max_t(unsigned short, rq->nr_phys_segments, 1); 1210} 1211 1212int __blk_rq_map_sg(struct request_queue *q, struct request *rq, 1213 struct scatterlist *sglist, struct scatterlist **last_sg); 1214static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, 1215 struct scatterlist *sglist) 1216{ 1217 struct scatterlist *last_sg = NULL; 1218 1219 return __blk_rq_map_sg(q, rq, sglist, &last_sg); 1220} 1221extern void blk_dump_rq_flags(struct request *, char *); 1222 1223bool __must_check blk_get_queue(struct request_queue *); 1224struct request_queue *blk_alloc_queue(int node_id); 1225extern void blk_put_queue(struct request_queue *); 1226extern void blk_set_queue_dying(struct request_queue *); 1227 1228#ifdef CONFIG_BLOCK 1229/* 1230 * blk_plug permits building a queue of related requests by holding the I/O 1231 * fragments for a short period. This allows merging of sequential requests 1232 * into single larger request. As the requests are moved from a per-task list to 1233 * the device's request_queue in a batch, this results in improved scalability 1234 * as the lock contention for request_queue lock is reduced. 1235 * 1236 * It is ok not to disable preemption when adding the request to the plug list 1237 * or when attempting a merge, because blk_schedule_flush_list() will only flush 1238 * the plug list when the task sleeps by itself. For details, please see 1239 * schedule() where blk_schedule_flush_plug() is called. 1240 */ 1241struct blk_plug { 1242 struct list_head mq_list; /* blk-mq requests */ 1243 struct list_head cb_list; /* md requires an unplug callback */ 1244 unsigned short rq_count; 1245 bool multiple_queues; 1246 bool nowait; 1247}; 1248#define BLK_MAX_REQUEST_COUNT 16 1249#define BLK_PLUG_FLUSH_SIZE (128 * 1024) 1250 1251struct blk_plug_cb; 1252typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1253struct blk_plug_cb { 1254 struct list_head list; 1255 blk_plug_cb_fn callback; 1256 void *data; 1257}; 1258extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1259 void *data, int size); 1260extern void blk_start_plug(struct blk_plug *); 1261extern void blk_finish_plug(struct blk_plug *); 1262extern void blk_flush_plug_list(struct blk_plug *, bool); 1263 1264static inline void blk_flush_plug(struct task_struct *tsk) 1265{ 1266 struct blk_plug *plug = tsk->plug; 1267 1268 if (plug) 1269 blk_flush_plug_list(plug, false); 1270} 1271 1272static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1273{ 1274 struct blk_plug *plug = tsk->plug; 1275 1276 if (plug) 1277 blk_flush_plug_list(plug, true); 1278} 1279 1280static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1281{ 1282 struct blk_plug *plug = tsk->plug; 1283 1284 return plug && 1285 (!list_empty(&plug->mq_list) || 1286 !list_empty(&plug->cb_list)); 1287} 1288 1289int blkdev_issue_flush(struct block_device *bdev); 1290long nr_blockdev_pages(void); 1291#else /* CONFIG_BLOCK */ 1292struct blk_plug { 1293}; 1294 1295static inline void blk_start_plug(struct blk_plug *plug) 1296{ 1297} 1298 1299static inline void blk_finish_plug(struct blk_plug *plug) 1300{ 1301} 1302 1303static inline void blk_flush_plug(struct task_struct *task) 1304{ 1305} 1306 1307static inline void blk_schedule_flush_plug(struct task_struct *task) 1308{ 1309} 1310 1311 1312static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1313{ 1314 return false; 1315} 1316 1317static inline int blkdev_issue_flush(struct block_device *bdev) 1318{ 1319 return 0; 1320} 1321 1322static inline long nr_blockdev_pages(void) 1323{ 1324 return 0; 1325} 1326#endif /* CONFIG_BLOCK */ 1327 1328extern void blk_io_schedule(void); 1329 1330extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1331 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1332 1333#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ 1334 1335extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1336 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1337extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1338 sector_t nr_sects, gfp_t gfp_mask, int flags, 1339 struct bio **biop); 1340 1341#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1342#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1343 1344extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1345 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1346 unsigned flags); 1347extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1348 sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1349 1350static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1351 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1352{ 1353 return blkdev_issue_discard(sb->s_bdev, 1354 block << (sb->s_blocksize_bits - 1355 SECTOR_SHIFT), 1356 nr_blocks << (sb->s_blocksize_bits - 1357 SECTOR_SHIFT), 1358 gfp_mask, flags); 1359} 1360static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1361 sector_t nr_blocks, gfp_t gfp_mask) 1362{ 1363 return blkdev_issue_zeroout(sb->s_bdev, 1364 block << (sb->s_blocksize_bits - 1365 SECTOR_SHIFT), 1366 nr_blocks << (sb->s_blocksize_bits - 1367 SECTOR_SHIFT), 1368 gfp_mask, 0); 1369} 1370 1371extern int blk_verify_command(unsigned char *cmd, fmode_t mode); 1372 1373static inline bool bdev_is_partition(struct block_device *bdev) 1374{ 1375 return bdev->bd_partno; 1376} 1377 1378enum blk_default_limits { 1379 BLK_MAX_SEGMENTS = 128, 1380 BLK_SAFE_MAX_SECTORS = 255, 1381 BLK_DEF_MAX_SECTORS = 2560, 1382 BLK_MAX_SEGMENT_SIZE = 65536, 1383 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1384}; 1385 1386static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1387{ 1388 return q->limits.seg_boundary_mask; 1389} 1390 1391static inline unsigned long queue_virt_boundary(const struct request_queue *q) 1392{ 1393 return q->limits.virt_boundary_mask; 1394} 1395 1396static inline unsigned int queue_max_sectors(const struct request_queue *q) 1397{ 1398 return q->limits.max_sectors; 1399} 1400 1401static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1402{ 1403 return q->limits.max_hw_sectors; 1404} 1405 1406static inline unsigned short queue_max_segments(const struct request_queue *q) 1407{ 1408 return q->limits.max_segments; 1409} 1410 1411static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 1412{ 1413 return q->limits.max_discard_segments; 1414} 1415 1416static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1417{ 1418 return q->limits.max_segment_size; 1419} 1420 1421static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) 1422{ 1423 1424 const struct queue_limits *l = &q->limits; 1425 1426 return min(l->max_zone_append_sectors, l->max_sectors); 1427} 1428 1429static inline unsigned queue_logical_block_size(const struct request_queue *q) 1430{ 1431 int retval = 512; 1432 1433 if (q && q->limits.logical_block_size) 1434 retval = q->limits.logical_block_size; 1435 1436 return retval; 1437} 1438 1439static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 1440{ 1441 return queue_logical_block_size(bdev_get_queue(bdev)); 1442} 1443 1444static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1445{ 1446 return q->limits.physical_block_size; 1447} 1448 1449static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1450{ 1451 return queue_physical_block_size(bdev_get_queue(bdev)); 1452} 1453 1454static inline unsigned int queue_io_min(const struct request_queue *q) 1455{ 1456 return q->limits.io_min; 1457} 1458 1459static inline int bdev_io_min(struct block_device *bdev) 1460{ 1461 return queue_io_min(bdev_get_queue(bdev)); 1462} 1463 1464static inline unsigned int queue_io_opt(const struct request_queue *q) 1465{ 1466 return q->limits.io_opt; 1467} 1468 1469static inline int bdev_io_opt(struct block_device *bdev) 1470{ 1471 return queue_io_opt(bdev_get_queue(bdev)); 1472} 1473 1474static inline unsigned int 1475queue_zone_write_granularity(const struct request_queue *q) 1476{ 1477 return q->limits.zone_write_granularity; 1478} 1479 1480static inline unsigned int 1481bdev_zone_write_granularity(struct block_device *bdev) 1482{ 1483 return queue_zone_write_granularity(bdev_get_queue(bdev)); 1484} 1485 1486static inline int queue_alignment_offset(const struct request_queue *q) 1487{ 1488 if (q->limits.misaligned) 1489 return -1; 1490 1491 return q->limits.alignment_offset; 1492} 1493 1494static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1495{ 1496 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1497 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) 1498 << SECTOR_SHIFT; 1499 1500 return (granularity + lim->alignment_offset - alignment) % granularity; 1501} 1502 1503static inline int bdev_alignment_offset(struct block_device *bdev) 1504{ 1505 struct request_queue *q = bdev_get_queue(bdev); 1506 1507 if (q->limits.misaligned) 1508 return -1; 1509 if (bdev_is_partition(bdev)) 1510 return queue_limit_alignment_offset(&q->limits, 1511 bdev->bd_start_sect); 1512 return q->limits.alignment_offset; 1513} 1514 1515static inline int queue_discard_alignment(const struct request_queue *q) 1516{ 1517 if (q->limits.discard_misaligned) 1518 return -1; 1519 1520 return q->limits.discard_alignment; 1521} 1522 1523static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1524{ 1525 unsigned int alignment, granularity, offset; 1526 1527 if (!lim->max_discard_sectors) 1528 return 0; 1529 1530 /* Why are these in bytes, not sectors? */ 1531 alignment = lim->discard_alignment >> SECTOR_SHIFT; 1532 granularity = lim->discard_granularity >> SECTOR_SHIFT; 1533 if (!granularity) 1534 return 0; 1535 1536 /* Offset of the partition start in 'granularity' sectors */ 1537 offset = sector_div(sector, granularity); 1538 1539 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1540 offset = (granularity + alignment - offset) % granularity; 1541 1542 /* Turn it back into bytes, gaah */ 1543 return offset << SECTOR_SHIFT; 1544} 1545 1546static inline int bdev_discard_alignment(struct block_device *bdev) 1547{ 1548 struct request_queue *q = bdev_get_queue(bdev); 1549 1550 if (bdev_is_partition(bdev)) 1551 return queue_limit_discard_alignment(&q->limits, 1552 bdev->bd_start_sect); 1553 return q->limits.discard_alignment; 1554} 1555 1556static inline unsigned int bdev_write_same(struct block_device *bdev) 1557{ 1558 struct request_queue *q = bdev_get_queue(bdev); 1559 1560 if (q) 1561 return q->limits.max_write_same_sectors; 1562 1563 return 0; 1564} 1565 1566static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1567{ 1568 struct request_queue *q = bdev_get_queue(bdev); 1569 1570 if (q) 1571 return q->limits.max_write_zeroes_sectors; 1572 1573 return 0; 1574} 1575 1576static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1577{ 1578 struct request_queue *q = bdev_get_queue(bdev); 1579 1580 if (q) 1581 return blk_queue_zoned_model(q); 1582 1583 return BLK_ZONED_NONE; 1584} 1585 1586static inline bool bdev_is_zoned(struct block_device *bdev) 1587{ 1588 struct request_queue *q = bdev_get_queue(bdev); 1589 1590 if (q) 1591 return blk_queue_is_zoned(q); 1592 1593 return false; 1594} 1595 1596static inline sector_t bdev_zone_sectors(struct block_device *bdev) 1597{ 1598 struct request_queue *q = bdev_get_queue(bdev); 1599 1600 if (q) 1601 return blk_queue_zone_sectors(q); 1602 return 0; 1603} 1604 1605static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 1606{ 1607 struct request_queue *q = bdev_get_queue(bdev); 1608 1609 if (q) 1610 return queue_max_open_zones(q); 1611 return 0; 1612} 1613 1614static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 1615{ 1616 struct request_queue *q = bdev_get_queue(bdev); 1617 1618 if (q) 1619 return queue_max_active_zones(q); 1620 return 0; 1621} 1622 1623static inline int queue_dma_alignment(const struct request_queue *q) 1624{ 1625 return q ? q->dma_alignment : 511; 1626} 1627 1628static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1629 unsigned int len) 1630{ 1631 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1632 return !(addr & alignment) && !(len & alignment); 1633} 1634 1635/* assumes size > 256 */ 1636static inline unsigned int blksize_bits(unsigned int size) 1637{ 1638 unsigned int bits = 8; 1639 do { 1640 bits++; 1641 size >>= 1; 1642 } while (size > 256); 1643 return bits; 1644} 1645 1646static inline unsigned int block_size(struct block_device *bdev) 1647{ 1648 return 1 << bdev->bd_inode->i_blkbits; 1649} 1650 1651int kblockd_schedule_work(struct work_struct *work); 1652int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1653 1654#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1655 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1656#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1657 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1658 1659#if defined(CONFIG_BLK_DEV_INTEGRITY) 1660 1661enum blk_integrity_flags { 1662 BLK_INTEGRITY_VERIFY = 1 << 0, 1663 BLK_INTEGRITY_GENERATE = 1 << 1, 1664 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, 1665 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, 1666}; 1667 1668struct blk_integrity_iter { 1669 void *prot_buf; 1670 void *data_buf; 1671 sector_t seed; 1672 unsigned int data_size; 1673 unsigned short interval; 1674 const char *disk_name; 1675}; 1676 1677typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); 1678typedef void (integrity_prepare_fn) (struct request *); 1679typedef void (integrity_complete_fn) (struct request *, unsigned int); 1680 1681struct blk_integrity_profile { 1682 integrity_processing_fn *generate_fn; 1683 integrity_processing_fn *verify_fn; 1684 integrity_prepare_fn *prepare_fn; 1685 integrity_complete_fn *complete_fn; 1686 const char *name; 1687}; 1688 1689extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); 1690extern void blk_integrity_unregister(struct gendisk *); 1691extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1692extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1693 struct scatterlist *); 1694extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1695 1696static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1697{ 1698 struct blk_integrity *bi = &disk->queue->integrity; 1699 1700 if (!bi->profile) 1701 return NULL; 1702 1703 return bi; 1704} 1705 1706static inline 1707struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1708{ 1709 return blk_get_integrity(bdev->bd_disk); 1710} 1711 1712static inline bool 1713blk_integrity_queue_supports_integrity(struct request_queue *q) 1714{ 1715 return q->integrity.profile; 1716} 1717 1718static inline bool blk_integrity_rq(struct request *rq) 1719{ 1720 return rq->cmd_flags & REQ_INTEGRITY; 1721} 1722 1723static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1724 unsigned int segs) 1725{ 1726 q->limits.max_integrity_segments = segs; 1727} 1728 1729static inline unsigned short 1730queue_max_integrity_segments(const struct request_queue *q) 1731{ 1732 return q->limits.max_integrity_segments; 1733} 1734 1735/** 1736 * bio_integrity_intervals - Return number of integrity intervals for a bio 1737 * @bi: blk_integrity profile for device 1738 * @sectors: Size of the bio in 512-byte sectors 1739 * 1740 * Description: The block layer calculates everything in 512 byte 1741 * sectors but integrity metadata is done in terms of the data integrity 1742 * interval size of the storage device. Convert the block layer sectors 1743 * to the appropriate number of integrity intervals. 1744 */ 1745static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, 1746 unsigned int sectors) 1747{ 1748 return sectors >> (bi->interval_exp - 9); 1749} 1750 1751static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, 1752 unsigned int sectors) 1753{ 1754 return bio_integrity_intervals(bi, sectors) * bi->tuple_size; 1755} 1756 1757/* 1758 * Return the first bvec that contains integrity data. Only drivers that are 1759 * limited to a single integrity segment should use this helper. 1760 */ 1761static inline struct bio_vec *rq_integrity_vec(struct request *rq) 1762{ 1763 if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) 1764 return NULL; 1765 return rq->bio->bi_integrity->bip_vec; 1766} 1767 1768#else /* CONFIG_BLK_DEV_INTEGRITY */ 1769 1770struct bio; 1771struct block_device; 1772struct gendisk; 1773struct blk_integrity; 1774 1775static inline int blk_integrity_rq(struct request *rq) 1776{ 1777 return 0; 1778} 1779static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1780 struct bio *b) 1781{ 1782 return 0; 1783} 1784static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1785 struct bio *b, 1786 struct scatterlist *s) 1787{ 1788 return 0; 1789} 1790static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1791{ 1792 return NULL; 1793} 1794static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1795{ 1796 return NULL; 1797} 1798static inline bool 1799blk_integrity_queue_supports_integrity(struct request_queue *q) 1800{ 1801 return false; 1802} 1803static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1804{ 1805 return 0; 1806} 1807static inline void blk_integrity_register(struct gendisk *d, 1808 struct blk_integrity *b) 1809{ 1810} 1811static inline void blk_integrity_unregister(struct gendisk *d) 1812{ 1813} 1814static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1815 unsigned int segs) 1816{ 1817} 1818static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) 1819{ 1820 return 0; 1821} 1822 1823static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, 1824 unsigned int sectors) 1825{ 1826 return 0; 1827} 1828 1829static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, 1830 unsigned int sectors) 1831{ 1832 return 0; 1833} 1834 1835static inline struct bio_vec *rq_integrity_vec(struct request *rq) 1836{ 1837 return NULL; 1838} 1839 1840#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1841 1842#ifdef CONFIG_BLK_INLINE_ENCRYPTION 1843 1844bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q); 1845 1846void blk_ksm_unregister(struct request_queue *q); 1847 1848#else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1849 1850static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm, 1851 struct request_queue *q) 1852{ 1853 return true; 1854} 1855 1856static inline void blk_ksm_unregister(struct request_queue *q) { } 1857 1858#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1859 1860 1861struct block_device_operations { 1862 blk_qc_t (*submit_bio) (struct bio *bio); 1863 int (*open) (struct block_device *, fmode_t); 1864 void (*release) (struct gendisk *, fmode_t); 1865 int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); 1866 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1867 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1868 unsigned int (*check_events) (struct gendisk *disk, 1869 unsigned int clearing); 1870 void (*unlock_native_capacity) (struct gendisk *); 1871 int (*revalidate_disk) (struct gendisk *); 1872 int (*getgeo)(struct block_device *, struct hd_geometry *); 1873 int (*set_read_only)(struct block_device *bdev, bool ro); 1874 /* this callback is with swap_lock and sometimes page table lock held */ 1875 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1876 int (*report_zones)(struct gendisk *, sector_t sector, 1877 unsigned int nr_zones, report_zones_cb cb, void *data); 1878 char *(*devnode)(struct gendisk *disk, umode_t *mode); 1879 struct module *owner; 1880 const struct pr_ops *pr_ops; 1881}; 1882 1883#ifdef CONFIG_COMPAT 1884extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t, 1885 unsigned int, unsigned long); 1886#else 1887#define blkdev_compat_ptr_ioctl NULL 1888#endif 1889 1890extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1891extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1892 struct writeback_control *); 1893 1894#ifdef CONFIG_BLK_DEV_ZONED 1895bool blk_req_needs_zone_write_lock(struct request *rq); 1896bool blk_req_zone_write_trylock(struct request *rq); 1897void __blk_req_zone_write_lock(struct request *rq); 1898void __blk_req_zone_write_unlock(struct request *rq); 1899 1900static inline void blk_req_zone_write_lock(struct request *rq) 1901{ 1902 if (blk_req_needs_zone_write_lock(rq)) 1903 __blk_req_zone_write_lock(rq); 1904} 1905 1906static inline void blk_req_zone_write_unlock(struct request *rq) 1907{ 1908 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) 1909 __blk_req_zone_write_unlock(rq); 1910} 1911 1912static inline bool blk_req_zone_is_write_locked(struct request *rq) 1913{ 1914 return rq->q->seq_zones_wlock && 1915 test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); 1916} 1917 1918static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 1919{ 1920 if (!blk_req_needs_zone_write_lock(rq)) 1921 return true; 1922 return !blk_req_zone_is_write_locked(rq); 1923} 1924#else 1925static inline bool blk_req_needs_zone_write_lock(struct request *rq) 1926{ 1927 return false; 1928} 1929 1930static inline void blk_req_zone_write_lock(struct request *rq) 1931{ 1932} 1933 1934static inline void blk_req_zone_write_unlock(struct request *rq) 1935{ 1936} 1937static inline bool blk_req_zone_is_write_locked(struct request *rq) 1938{ 1939 return false; 1940} 1941 1942static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 1943{ 1944 return true; 1945} 1946#endif /* CONFIG_BLK_DEV_ZONED */ 1947 1948static inline void blk_wake_io_task(struct task_struct *waiter) 1949{ 1950 /* 1951 * If we're polling, the task itself is doing the completions. For 1952 * that case, we don't need to signal a wakeup, it's enough to just 1953 * mark us as RUNNING. 1954 */ 1955 if (waiter == current) 1956 __set_current_state(TASK_RUNNING); 1957 else 1958 wake_up_process(waiter); 1959} 1960 1961unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, 1962 unsigned int op); 1963void disk_end_io_acct(struct gendisk *disk, unsigned int op, 1964 unsigned long start_time); 1965 1966unsigned long bio_start_io_acct(struct bio *bio); 1967void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1968 struct block_device *orig_bdev); 1969 1970/** 1971 * bio_end_io_acct - end I/O accounting for bio based drivers 1972 * @bio: bio to end account for 1973 * @start: start time returned by bio_start_io_acct() 1974 */ 1975static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1976{ 1977 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1978} 1979 1980int bdev_read_only(struct block_device *bdev); 1981int set_blocksize(struct block_device *bdev, int size); 1982 1983const char *bdevname(struct block_device *bdev, char *buffer); 1984int lookup_bdev(const char *pathname, dev_t *dev); 1985 1986void blkdev_show(struct seq_file *seqf, off_t offset); 1987 1988#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1989#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 1990#ifdef CONFIG_BLOCK 1991#define BLKDEV_MAJOR_MAX 512 1992#else 1993#define BLKDEV_MAJOR_MAX 0 1994#endif 1995 1996struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, 1997 void *holder); 1998struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); 1999int bd_prepare_to_claim(struct block_device *bdev, void *holder); 2000void bd_abort_claiming(struct block_device *bdev, void *holder); 2001void blkdev_put(struct block_device *bdev, fmode_t mode); 2002 2003/* just for blk-cgroup, don't use elsewhere */ 2004struct block_device *blkdev_get_no_open(dev_t dev); 2005void blkdev_put_no_open(struct block_device *bdev); 2006 2007struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); 2008void bdev_add(struct block_device *bdev, dev_t dev); 2009struct block_device *I_BDEV(struct inode *inode); 2010struct block_device *bdgrab(struct block_device *bdev); 2011void bdput(struct block_device *); 2012int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, 2013 loff_t lend); 2014 2015#ifdef CONFIG_BLOCK 2016void invalidate_bdev(struct block_device *bdev); 2017int sync_blockdev(struct block_device *bdev); 2018#else 2019static inline void invalidate_bdev(struct block_device *bdev) 2020{ 2021} 2022static inline int sync_blockdev(struct block_device *bdev) 2023{ 2024 return 0; 2025} 2026#endif 2027int fsync_bdev(struct block_device *bdev); 2028 2029int freeze_bdev(struct block_device *bdev); 2030int thaw_bdev(struct block_device *bdev); 2031 2032#endif /* _LINUX_BLKDEV_H */