at v6.14-rc6 50 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Portions Copyright (C) 1992 Drew Eckhardt 4 */ 5#ifndef _LINUX_BLKDEV_H 6#define _LINUX_BLKDEV_H 7 8#include <linux/types.h> 9#include <linux/blk_types.h> 10#include <linux/device.h> 11#include <linux/list.h> 12#include <linux/llist.h> 13#include <linux/minmax.h> 14#include <linux/timer.h> 15#include <linux/workqueue.h> 16#include <linux/wait.h> 17#include <linux/bio.h> 18#include <linux/gfp.h> 19#include <linux/kdev_t.h> 20#include <linux/rcupdate.h> 21#include <linux/percpu-refcount.h> 22#include <linux/blkzoned.h> 23#include <linux/sched.h> 24#include <linux/sbitmap.h> 25#include <linux/uuid.h> 26#include <linux/xarray.h> 27#include <linux/file.h> 28#include <linux/lockdep.h> 29 30struct module; 31struct request_queue; 32struct elevator_queue; 33struct blk_trace; 34struct request; 35struct sg_io_hdr; 36struct blkcg_gq; 37struct blk_flush_queue; 38struct kiocb; 39struct pr_ops; 40struct rq_qos; 41struct blk_queue_stats; 42struct blk_stat_callback; 43struct blk_crypto_profile; 44 45extern const struct device_type disk_type; 46extern const struct device_type part_type; 47extern const struct class block_class; 48 49/* 50 * Maximum number of blkcg policies allowed to be registered concurrently. 51 * Defined here to simplify include dependency. 52 */ 53#define BLKCG_MAX_POLS 6 54 55#define DISK_MAX_PARTS 256 56#define DISK_NAME_LEN 32 57 58#define PARTITION_META_INFO_VOLNAMELTH 64 59/* 60 * Enough for the string representation of any kind of UUID plus NULL. 61 * EFI UUID is 36 characters. MSDOS UUID is 11 characters. 62 */ 63#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) 64 65struct partition_meta_info { 66 char uuid[PARTITION_META_INFO_UUIDLTH]; 67 u8 volname[PARTITION_META_INFO_VOLNAMELTH]; 68}; 69 70/** 71 * DOC: genhd capability flags 72 * 73 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to 74 * removable media. When set, the device remains present even when media is not 75 * inserted. Shall not be set for devices which are removed entirely when the 76 * media is removed. 77 * 78 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, 79 * doesn't appear in sysfs, and can't be opened from userspace or using 80 * blkdev_get*. Used for the underlying components of multipath devices. 81 * 82 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not 83 * scan for partitions from add_disk, and users can't add partitions manually. 84 * 85 */ 86enum { 87 GENHD_FL_REMOVABLE = 1 << 0, 88 GENHD_FL_HIDDEN = 1 << 1, 89 GENHD_FL_NO_PART = 1 << 2, 90}; 91 92enum { 93 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 94 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 95}; 96 97enum { 98 /* Poll even if events_poll_msecs is unset */ 99 DISK_EVENT_FLAG_POLL = 1 << 0, 100 /* Forward events to udev */ 101 DISK_EVENT_FLAG_UEVENT = 1 << 1, 102 /* Block event polling when open for exclusive write */ 103 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, 104}; 105 106struct disk_events; 107struct badblocks; 108 109enum blk_integrity_checksum { 110 BLK_INTEGRITY_CSUM_NONE = 0, 111 BLK_INTEGRITY_CSUM_IP = 1, 112 BLK_INTEGRITY_CSUM_CRC = 2, 113 BLK_INTEGRITY_CSUM_CRC64 = 3, 114} __packed ; 115 116struct blk_integrity { 117 unsigned char flags; 118 enum blk_integrity_checksum csum_type; 119 unsigned char tuple_size; 120 unsigned char pi_offset; 121 unsigned char interval_exp; 122 unsigned char tag_size; 123}; 124 125typedef unsigned int __bitwise blk_mode_t; 126 127/* open for reading */ 128#define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0)) 129/* open for writing */ 130#define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1)) 131/* open exclusively (vs other exclusive openers */ 132#define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2)) 133/* opened with O_NDELAY */ 134#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3)) 135/* open for "writes" only for ioctls (specialy hack for floppy.c) */ 136#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4)) 137/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */ 138#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5)) 139/* return partition scanning errors */ 140#define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6)) 141 142struct gendisk { 143 /* 144 * major/first_minor/minors should not be set by any new driver, the 145 * block core will take care of allocating them automatically. 146 */ 147 int major; 148 int first_minor; 149 int minors; 150 151 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 152 153 unsigned short events; /* supported events */ 154 unsigned short event_flags; /* flags related to event processing */ 155 156 struct xarray part_tbl; 157 struct block_device *part0; 158 159 const struct block_device_operations *fops; 160 struct request_queue *queue; 161 void *private_data; 162 163 struct bio_set bio_split; 164 165 int flags; 166 unsigned long state; 167#define GD_NEED_PART_SCAN 0 168#define GD_READ_ONLY 1 169#define GD_DEAD 2 170#define GD_NATIVE_CAPACITY 3 171#define GD_ADDED 4 172#define GD_SUPPRESS_PART_SCAN 5 173#define GD_OWNS_QUEUE 6 174 175 struct mutex open_mutex; /* open/close mutex */ 176 unsigned open_partitions; /* number of open partitions */ 177 178 struct backing_dev_info *bdi; 179 struct kobject queue_kobj; /* the queue/ directory */ 180 struct kobject *slave_dir; 181#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 182 struct list_head slave_bdevs; 183#endif 184 struct timer_rand_state *random; 185 atomic_t sync_io; /* RAID */ 186 struct disk_events *ev; 187 188#ifdef CONFIG_BLK_DEV_ZONED 189 /* 190 * Zoned block device information. Reads of this information must be 191 * protected with blk_queue_enter() / blk_queue_exit(). Modifying this 192 * information is only allowed while no requests are being processed. 193 * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue(). 194 */ 195 unsigned int nr_zones; 196 unsigned int zone_capacity; 197 unsigned int last_zone_capacity; 198 unsigned long __rcu *conv_zones_bitmap; 199 unsigned int zone_wplugs_hash_bits; 200 atomic_t nr_zone_wplugs; 201 spinlock_t zone_wplugs_lock; 202 struct mempool_s *zone_wplugs_pool; 203 struct hlist_head *zone_wplugs_hash; 204 struct workqueue_struct *zone_wplugs_wq; 205#endif /* CONFIG_BLK_DEV_ZONED */ 206 207#if IS_ENABLED(CONFIG_CDROM) 208 struct cdrom_device_info *cdi; 209#endif 210 int node_id; 211 struct badblocks *bb; 212 struct lockdep_map lockdep_map; 213 u64 diskseq; 214 blk_mode_t open_mode; 215 216 /* 217 * Independent sector access ranges. This is always NULL for 218 * devices that do not have multiple independent access ranges. 219 */ 220 struct blk_independent_access_ranges *ia_ranges; 221}; 222 223/** 224 * disk_openers - returns how many openers are there for a disk 225 * @disk: disk to check 226 * 227 * This returns the number of openers for a disk. Note that this value is only 228 * stable if disk->open_mutex is held. 229 * 230 * Note: Due to a quirk in the block layer open code, each open partition is 231 * only counted once even if there are multiple openers. 232 */ 233static inline unsigned int disk_openers(struct gendisk *disk) 234{ 235 return atomic_read(&disk->part0->bd_openers); 236} 237 238/** 239 * disk_has_partscan - return %true if partition scanning is enabled on a disk 240 * @disk: disk to check 241 * 242 * Returns %true if partitions scanning is enabled for @disk, or %false if 243 * partition scanning is disabled either permanently or temporarily. 244 */ 245static inline bool disk_has_partscan(struct gendisk *disk) 246{ 247 return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) && 248 !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 249} 250 251/* 252 * The gendisk is refcounted by the part0 block_device, and the bd_device 253 * therein is also used for device model presentation in sysfs. 254 */ 255#define dev_to_disk(device) \ 256 (dev_to_bdev(device)->bd_disk) 257#define disk_to_dev(disk) \ 258 (&((disk)->part0->bd_device)) 259 260#if IS_REACHABLE(CONFIG_CDROM) 261#define disk_to_cdi(disk) ((disk)->cdi) 262#else 263#define disk_to_cdi(disk) NULL 264#endif 265 266static inline dev_t disk_devt(struct gendisk *disk) 267{ 268 return MKDEV(disk->major, disk->first_minor); 269} 270 271/* blk_validate_limits() validates bsize, so drivers don't usually need to */ 272static inline int blk_validate_block_size(unsigned long bsize) 273{ 274 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) 275 return -EINVAL; 276 277 return 0; 278} 279 280static inline bool blk_op_is_passthrough(blk_opf_t op) 281{ 282 op &= REQ_OP_MASK; 283 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 284} 285 286/* flags set by the driver in queue_limits.features */ 287typedef unsigned int __bitwise blk_features_t; 288 289/* supports a volatile write cache */ 290#define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0)) 291 292/* supports passing on the FUA bit */ 293#define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1)) 294 295/* rotational device (hard drive or floppy) */ 296#define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2)) 297 298/* contributes to the random number pool */ 299#define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3)) 300 301/* do disk/partitions IO accounting */ 302#define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4)) 303 304/* don't modify data until writeback is done */ 305#define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5)) 306 307/* always completes in submit context */ 308#define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6)) 309 310/* supports REQ_NOWAIT */ 311#define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7)) 312 313/* supports DAX */ 314#define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8)) 315 316/* supports I/O polling */ 317#define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9)) 318 319/* is a zoned device */ 320#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10)) 321 322/* supports PCI(e) p2p requests */ 323#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12)) 324 325/* skip this queue in blk_mq_(un)quiesce_tagset */ 326#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13)) 327 328/* bounce all highmem pages */ 329#define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14)) 330 331/* undocumented magic for bcache */ 332#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ 333 ((__force blk_features_t)(1u << 15)) 334 335/* atomic writes enabled */ 336#define BLK_FEAT_ATOMIC_WRITES \ 337 ((__force blk_features_t)(1u << 16)) 338 339/* 340 * Flags automatically inherited when stacking limits. 341 */ 342#define BLK_FEAT_INHERIT_MASK \ 343 (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ 344 BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \ 345 BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE) 346 347/* internal flags in queue_limits.flags */ 348typedef unsigned int __bitwise blk_flags_t; 349 350/* do not send FLUSH/FUA commands despite advertising a write cache */ 351#define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0)) 352 353/* I/O topology is misaligned */ 354#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1)) 355 356/* passthrough command IO accounting */ 357#define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2)) 358 359struct queue_limits { 360 blk_features_t features; 361 blk_flags_t flags; 362 unsigned long seg_boundary_mask; 363 unsigned long virt_boundary_mask; 364 365 unsigned int max_hw_sectors; 366 unsigned int max_dev_sectors; 367 unsigned int chunk_sectors; 368 unsigned int max_sectors; 369 unsigned int max_user_sectors; 370 unsigned int max_segment_size; 371 unsigned int min_segment_size; 372 unsigned int physical_block_size; 373 unsigned int logical_block_size; 374 unsigned int alignment_offset; 375 unsigned int io_min; 376 unsigned int io_opt; 377 unsigned int max_discard_sectors; 378 unsigned int max_hw_discard_sectors; 379 unsigned int max_user_discard_sectors; 380 unsigned int max_secure_erase_sectors; 381 unsigned int max_write_zeroes_sectors; 382 unsigned int max_hw_zone_append_sectors; 383 unsigned int max_zone_append_sectors; 384 unsigned int discard_granularity; 385 unsigned int discard_alignment; 386 unsigned int zone_write_granularity; 387 388 /* atomic write limits */ 389 unsigned int atomic_write_hw_max; 390 unsigned int atomic_write_max_sectors; 391 unsigned int atomic_write_hw_boundary; 392 unsigned int atomic_write_boundary_sectors; 393 unsigned int atomic_write_hw_unit_min; 394 unsigned int atomic_write_unit_min; 395 unsigned int atomic_write_hw_unit_max; 396 unsigned int atomic_write_unit_max; 397 398 unsigned short max_segments; 399 unsigned short max_integrity_segments; 400 unsigned short max_discard_segments; 401 402 unsigned int max_open_zones; 403 unsigned int max_active_zones; 404 405 /* 406 * Drivers that set dma_alignment to less than 511 must be prepared to 407 * handle individual bvec's that are not a multiple of a SECTOR_SIZE 408 * due to possible offsets. 409 */ 410 unsigned int dma_alignment; 411 unsigned int dma_pad_mask; 412 413 struct blk_integrity integrity; 414}; 415 416typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 417 void *data); 418 419#define BLK_ALL_ZONES ((unsigned int)-1) 420int blkdev_report_zones(struct block_device *bdev, sector_t sector, 421 unsigned int nr_zones, report_zones_cb cb, void *data); 422int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 423 sector_t sectors, sector_t nr_sectors); 424int blk_revalidate_disk_zones(struct gendisk *disk); 425 426/* 427 * Independent access ranges: struct blk_independent_access_range describes 428 * a range of contiguous sectors that can be accessed using device command 429 * execution resources that are independent from the resources used for 430 * other access ranges. This is typically found with single-LUN multi-actuator 431 * HDDs where each access range is served by a different set of heads. 432 * The set of independent ranges supported by the device is defined using 433 * struct blk_independent_access_ranges. The independent ranges must not overlap 434 * and must include all sectors within the disk capacity (no sector holes 435 * allowed). 436 * For a device with multiple ranges, requests targeting sectors in different 437 * ranges can be executed in parallel. A request can straddle an access range 438 * boundary. 439 */ 440struct blk_independent_access_range { 441 struct kobject kobj; 442 sector_t sector; 443 sector_t nr_sectors; 444}; 445 446struct blk_independent_access_ranges { 447 struct kobject kobj; 448 bool sysfs_registered; 449 unsigned int nr_ia_ranges; 450 struct blk_independent_access_range ia_range[]; 451}; 452 453struct request_queue { 454 /* 455 * The queue owner gets to use this for whatever they like. 456 * ll_rw_blk doesn't touch it. 457 */ 458 void *queuedata; 459 460 struct elevator_queue *elevator; 461 462 const struct blk_mq_ops *mq_ops; 463 464 /* sw queues */ 465 struct blk_mq_ctx __percpu *queue_ctx; 466 467 /* 468 * various queue flags, see QUEUE_* below 469 */ 470 unsigned long queue_flags; 471 472 unsigned int rq_timeout; 473 474 unsigned int queue_depth; 475 476 refcount_t refs; 477 478 /* hw dispatch queues */ 479 unsigned int nr_hw_queues; 480 struct xarray hctx_table; 481 482 struct percpu_ref q_usage_counter; 483 struct lock_class_key io_lock_cls_key; 484 struct lockdep_map io_lockdep_map; 485 486 struct lock_class_key q_lock_cls_key; 487 struct lockdep_map q_lockdep_map; 488 489 struct request *last_merge; 490 491 spinlock_t queue_lock; 492 493 int quiesce_depth; 494 495 struct gendisk *disk; 496 497 /* 498 * mq queue kobject 499 */ 500 struct kobject *mq_kobj; 501 502 struct queue_limits limits; 503 504#ifdef CONFIG_PM 505 struct device *dev; 506 enum rpm_status rpm_status; 507#endif 508 509 /* 510 * Number of contexts that have called blk_set_pm_only(). If this 511 * counter is above zero then only RQF_PM requests are processed. 512 */ 513 atomic_t pm_only; 514 515 struct blk_queue_stats *stats; 516 struct rq_qos *rq_qos; 517 struct mutex rq_qos_mutex; 518 519 /* 520 * ida allocated id for this queue. Used to index queues from 521 * ioctx. 522 */ 523 int id; 524 525 /* 526 * queue settings 527 */ 528 unsigned long nr_requests; /* Max # of requests */ 529 530#ifdef CONFIG_BLK_INLINE_ENCRYPTION 531 struct blk_crypto_profile *crypto_profile; 532 struct kobject *crypto_kobject; 533#endif 534 535 struct timer_list timeout; 536 struct work_struct timeout_work; 537 538 atomic_t nr_active_requests_shared_tags; 539 540 struct blk_mq_tags *sched_shared_tags; 541 542 struct list_head icq_list; 543#ifdef CONFIG_BLK_CGROUP 544 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 545 struct blkcg_gq *root_blkg; 546 struct list_head blkg_list; 547 struct mutex blkcg_mutex; 548#endif 549 550 int node; 551 552 spinlock_t requeue_lock; 553 struct list_head requeue_list; 554 struct delayed_work requeue_work; 555 556#ifdef CONFIG_BLK_DEV_IO_TRACE 557 struct blk_trace __rcu *blk_trace; 558#endif 559 /* 560 * for flush operations 561 */ 562 struct blk_flush_queue *fq; 563 struct list_head flush_list; 564 565 struct mutex sysfs_lock; 566 struct mutex limits_lock; 567 568 /* 569 * for reusing dead hctx instance in case of updating 570 * nr_hw_queues 571 */ 572 struct list_head unused_hctx_list; 573 spinlock_t unused_hctx_lock; 574 575 int mq_freeze_depth; 576 577#ifdef CONFIG_BLK_DEV_THROTTLING 578 /* Throttle data */ 579 struct throtl_data *td; 580#endif 581 struct rcu_head rcu_head; 582#ifdef CONFIG_LOCKDEP 583 struct task_struct *mq_freeze_owner; 584 int mq_freeze_owner_depth; 585 /* 586 * Records disk & queue state in current context, used in unfreeze 587 * queue 588 */ 589 bool mq_freeze_disk_dead; 590 bool mq_freeze_queue_dying; 591#endif 592 wait_queue_head_t mq_freeze_wq; 593 /* 594 * Protect concurrent access to q_usage_counter by 595 * percpu_ref_kill() and percpu_ref_reinit(). 596 */ 597 struct mutex mq_freeze_lock; 598 599 struct blk_mq_tag_set *tag_set; 600 struct list_head tag_set_list; 601 602 struct dentry *debugfs_dir; 603 struct dentry *sched_debugfs_dir; 604 struct dentry *rqos_debugfs_dir; 605 /* 606 * Serializes all debugfs metadata operations using the above dentries. 607 */ 608 struct mutex debugfs_mutex; 609}; 610 611/* Keep blk_queue_flag_name[] in sync with the definitions below */ 612enum { 613 QUEUE_FLAG_DYING, /* queue being torn down */ 614 QUEUE_FLAG_NOMERGES, /* disable merge attempts */ 615 QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */ 616 QUEUE_FLAG_FAIL_IO, /* fake timeout */ 617 QUEUE_FLAG_NOXMERGES, /* No extended merges */ 618 QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */ 619 QUEUE_FLAG_INIT_DONE, /* queue is initialized */ 620 QUEUE_FLAG_STATS, /* track IO start and completion times */ 621 QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */ 622 QUEUE_FLAG_QUIESCED, /* queue has been quiesced */ 623 QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */ 624 QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */ 625 QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */ 626 QUEUE_FLAG_MAX 627}; 628 629#define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP) 630 631void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 632void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 633 634#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 635#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 636#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 637#define blk_queue_noxmerges(q) \ 638 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 639#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) 640#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) 641#define blk_queue_passthrough_stat(q) \ 642 ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH) 643#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) 644#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) 645#ifdef CONFIG_BLK_RQ_ALLOC_TIME 646#define blk_queue_rq_alloc_time(q) \ 647 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 648#else 649#define blk_queue_rq_alloc_time(q) false 650#endif 651 652#define blk_noretry_request(rq) \ 653 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 654 REQ_FAILFAST_DRIVER)) 655#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 656#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 657#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 658#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) 659#define blk_queue_skip_tagset_quiesce(q) \ 660 ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE) 661 662extern void blk_set_pm_only(struct request_queue *q); 663extern void blk_clear_pm_only(struct request_queue *q); 664 665#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 666 667#define dma_map_bvec(dev, bv, dir, attrs) \ 668 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 669 (dir), (attrs)) 670 671static inline bool queue_is_mq(struct request_queue *q) 672{ 673 return q->mq_ops; 674} 675 676#ifdef CONFIG_PM 677static inline enum rpm_status queue_rpm_status(struct request_queue *q) 678{ 679 return q->rpm_status; 680} 681#else 682static inline enum rpm_status queue_rpm_status(struct request_queue *q) 683{ 684 return RPM_ACTIVE; 685} 686#endif 687 688static inline bool blk_queue_is_zoned(struct request_queue *q) 689{ 690 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 691 (q->limits.features & BLK_FEAT_ZONED); 692} 693 694#ifdef CONFIG_BLK_DEV_ZONED 695static inline unsigned int disk_nr_zones(struct gendisk *disk) 696{ 697 return disk->nr_zones; 698} 699bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); 700#else /* CONFIG_BLK_DEV_ZONED */ 701static inline unsigned int disk_nr_zones(struct gendisk *disk) 702{ 703 return 0; 704} 705static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) 706{ 707 return false; 708} 709#endif /* CONFIG_BLK_DEV_ZONED */ 710 711static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 712{ 713 if (!blk_queue_is_zoned(disk->queue)) 714 return 0; 715 return sector >> ilog2(disk->queue->limits.chunk_sectors); 716} 717 718static inline unsigned int bdev_nr_zones(struct block_device *bdev) 719{ 720 return disk_nr_zones(bdev->bd_disk); 721} 722 723static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 724{ 725 return bdev->bd_disk->queue->limits.max_open_zones; 726} 727 728static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 729{ 730 return bdev->bd_disk->queue->limits.max_active_zones; 731} 732 733static inline unsigned int blk_queue_depth(struct request_queue *q) 734{ 735 if (q->queue_depth) 736 return q->queue_depth; 737 738 return q->nr_requests; 739} 740 741/* 742 * default timeout for SG_IO if none specified 743 */ 744#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 745#define BLK_MIN_SG_TIMEOUT (7 * HZ) 746 747/* This should not be used directly - use rq_for_each_segment */ 748#define for_each_bio(_bio) \ 749 for (; _bio; _bio = _bio->bi_next) 750 751int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, 752 const struct attribute_group **groups, 753 struct fwnode_handle *fwnode); 754int __must_check device_add_disk(struct device *parent, struct gendisk *disk, 755 const struct attribute_group **groups); 756static inline int __must_check add_disk(struct gendisk *disk) 757{ 758 return device_add_disk(NULL, disk, NULL); 759} 760void del_gendisk(struct gendisk *gp); 761void invalidate_disk(struct gendisk *disk); 762void set_disk_ro(struct gendisk *disk, bool read_only); 763void disk_uevent(struct gendisk *disk, enum kobject_action action); 764 765static inline u8 bdev_partno(const struct block_device *bdev) 766{ 767 return atomic_read(&bdev->__bd_flags) & BD_PARTNO; 768} 769 770static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag) 771{ 772 return atomic_read(&bdev->__bd_flags) & flag; 773} 774 775static inline void bdev_set_flag(struct block_device *bdev, unsigned flag) 776{ 777 atomic_or(flag, &bdev->__bd_flags); 778} 779 780static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) 781{ 782 atomic_andnot(flag, &bdev->__bd_flags); 783} 784 785static inline bool get_disk_ro(struct gendisk *disk) 786{ 787 return bdev_test_flag(disk->part0, BD_READ_ONLY) || 788 test_bit(GD_READ_ONLY, &disk->state); 789} 790 791static inline bool bdev_read_only(struct block_device *bdev) 792{ 793 return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); 794} 795 796bool set_capacity_and_notify(struct gendisk *disk, sector_t size); 797void disk_force_media_change(struct gendisk *disk); 798void bdev_mark_dead(struct block_device *bdev, bool surprise); 799 800void add_disk_randomness(struct gendisk *disk) __latent_entropy; 801void rand_initialize_disk(struct gendisk *disk); 802 803static inline sector_t get_start_sect(struct block_device *bdev) 804{ 805 return bdev->bd_start_sect; 806} 807 808static inline sector_t bdev_nr_sectors(struct block_device *bdev) 809{ 810 return bdev->bd_nr_sectors; 811} 812 813static inline loff_t bdev_nr_bytes(struct block_device *bdev) 814{ 815 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; 816} 817 818static inline sector_t get_capacity(struct gendisk *disk) 819{ 820 return bdev_nr_sectors(disk->part0); 821} 822 823static inline u64 sb_bdev_nr_blocks(struct super_block *sb) 824{ 825 return bdev_nr_sectors(sb->s_bdev) >> 826 (sb->s_blocksize_bits - SECTOR_SHIFT); 827} 828 829int bdev_disk_changed(struct gendisk *disk, bool invalidate); 830 831void put_disk(struct gendisk *disk); 832struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node, 833 struct lock_class_key *lkclass); 834 835/** 836 * blk_alloc_disk - allocate a gendisk structure 837 * @lim: queue limits to be used for this disk. 838 * @node_id: numa node to allocate on 839 * 840 * Allocate and pre-initialize a gendisk structure for use with BIO based 841 * drivers. 842 * 843 * Returns an ERR_PTR on error, else the allocated disk. 844 * 845 * Context: can sleep 846 */ 847#define blk_alloc_disk(lim, node_id) \ 848({ \ 849 static struct lock_class_key __key; \ 850 \ 851 __blk_alloc_disk(lim, node_id, &__key); \ 852}) 853 854int __register_blkdev(unsigned int major, const char *name, 855 void (*probe)(dev_t devt)); 856#define register_blkdev(major, name) \ 857 __register_blkdev(major, name, NULL) 858void unregister_blkdev(unsigned int major, const char *name); 859 860bool disk_check_media_change(struct gendisk *disk); 861void set_capacity(struct gendisk *disk, sector_t size); 862 863#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 864int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 865void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); 866#else 867static inline int bd_link_disk_holder(struct block_device *bdev, 868 struct gendisk *disk) 869{ 870 return 0; 871} 872static inline void bd_unlink_disk_holder(struct block_device *bdev, 873 struct gendisk *disk) 874{ 875} 876#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ 877 878dev_t part_devt(struct gendisk *disk, u8 partno); 879void inc_diskseq(struct gendisk *disk); 880void blk_request_module(dev_t devt); 881 882extern int blk_register_queue(struct gendisk *disk); 883extern void blk_unregister_queue(struct gendisk *disk); 884void submit_bio_noacct(struct bio *bio); 885struct bio *bio_split_to_limits(struct bio *bio); 886 887extern int blk_lld_busy(struct request_queue *q); 888extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 889extern void blk_queue_exit(struct request_queue *q); 890extern void blk_sync_queue(struct request_queue *q); 891 892/* Helper to convert REQ_OP_XXX to its string format XXX */ 893extern const char *blk_op_str(enum req_op op); 894 895int blk_status_to_errno(blk_status_t status); 896blk_status_t errno_to_blk_status(int errno); 897const char *blk_status_to_str(blk_status_t status); 898 899/* only poll the hardware once, don't continue until a completion was found */ 900#define BLK_POLL_ONESHOT (1 << 0) 901int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); 902int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 903 unsigned int flags); 904 905static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 906{ 907 return bdev->bd_queue; /* this is never NULL */ 908} 909 910/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 911const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 912 913static inline unsigned int bio_zone_no(struct bio *bio) 914{ 915 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 916} 917 918static inline bool bio_straddles_zones(struct bio *bio) 919{ 920 return bio_sectors(bio) && 921 bio_zone_no(bio) != 922 disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); 923} 924 925/* 926 * Return how much within the boundary is left to be used for I/O at a given 927 * offset. 928 */ 929static inline unsigned int blk_boundary_sectors_left(sector_t offset, 930 unsigned int boundary_sectors) 931{ 932 if (unlikely(!is_power_of_2(boundary_sectors))) 933 return boundary_sectors - sector_div(offset, boundary_sectors); 934 return boundary_sectors - (offset & (boundary_sectors - 1)); 935} 936 937/** 938 * queue_limits_start_update - start an atomic update of queue limits 939 * @q: queue to update 940 * 941 * This functions starts an atomic update of the queue limits. It takes a lock 942 * to prevent other updates and returns a snapshot of the current limits that 943 * the caller can modify. The caller must call queue_limits_commit_update() 944 * to finish the update. 945 * 946 * Context: process context. 947 */ 948static inline struct queue_limits 949queue_limits_start_update(struct request_queue *q) 950{ 951 mutex_lock(&q->limits_lock); 952 return q->limits; 953} 954int queue_limits_commit_update_frozen(struct request_queue *q, 955 struct queue_limits *lim); 956int queue_limits_commit_update(struct request_queue *q, 957 struct queue_limits *lim); 958int queue_limits_set(struct request_queue *q, struct queue_limits *lim); 959int blk_validate_limits(struct queue_limits *lim); 960 961/** 962 * queue_limits_cancel_update - cancel an atomic update of queue limits 963 * @q: queue to update 964 * 965 * This functions cancels an atomic update of the queue limits started by 966 * queue_limits_start_update() and should be used when an error occurs after 967 * starting update. 968 */ 969static inline void queue_limits_cancel_update(struct request_queue *q) 970{ 971 mutex_unlock(&q->limits_lock); 972} 973 974/* 975 * These helpers are for drivers that have sloppy feature negotiation and might 976 * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O 977 * completion handler when the device returned an indicator that the respective 978 * feature is not actually supported. They are racy and the driver needs to 979 * cope with that. Try to avoid this scheme if you can. 980 */ 981static inline void blk_queue_disable_discard(struct request_queue *q) 982{ 983 q->limits.max_discard_sectors = 0; 984} 985 986static inline void blk_queue_disable_secure_erase(struct request_queue *q) 987{ 988 q->limits.max_secure_erase_sectors = 0; 989} 990 991static inline void blk_queue_disable_write_zeroes(struct request_queue *q) 992{ 993 q->limits.max_write_zeroes_sectors = 0; 994} 995 996/* 997 * Access functions for manipulating queue properties 998 */ 999extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 1000extern void blk_set_stacking_limits(struct queue_limits *lim); 1001extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 1002 sector_t offset); 1003void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, 1004 sector_t offset, const char *pfx); 1005extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1006 1007struct blk_independent_access_ranges * 1008disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); 1009void disk_set_independent_access_ranges(struct gendisk *disk, 1010 struct blk_independent_access_ranges *iars); 1011 1012bool __must_check blk_get_queue(struct request_queue *); 1013extern void blk_put_queue(struct request_queue *); 1014 1015void blk_mark_disk_dead(struct gendisk *disk); 1016 1017struct rq_list { 1018 struct request *head; 1019 struct request *tail; 1020}; 1021 1022#ifdef CONFIG_BLOCK 1023/* 1024 * blk_plug permits building a queue of related requests by holding the I/O 1025 * fragments for a short period. This allows merging of sequential requests 1026 * into single larger request. As the requests are moved from a per-task list to 1027 * the device's request_queue in a batch, this results in improved scalability 1028 * as the lock contention for request_queue lock is reduced. 1029 * 1030 * It is ok not to disable preemption when adding the request to the plug list 1031 * or when attempting a merge. For details, please see schedule() where 1032 * blk_flush_plug() is called. 1033 */ 1034struct blk_plug { 1035 struct rq_list mq_list; /* blk-mq requests */ 1036 1037 /* if ios_left is > 1, we can batch tag/rq allocations */ 1038 struct rq_list cached_rqs; 1039 u64 cur_ktime; 1040 unsigned short nr_ios; 1041 1042 unsigned short rq_count; 1043 1044 bool multiple_queues; 1045 bool has_elevator; 1046 1047 struct list_head cb_list; /* md requires an unplug callback */ 1048}; 1049 1050struct blk_plug_cb; 1051typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1052struct blk_plug_cb { 1053 struct list_head list; 1054 blk_plug_cb_fn callback; 1055 void *data; 1056}; 1057extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1058 void *data, int size); 1059extern void blk_start_plug(struct blk_plug *); 1060extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); 1061extern void blk_finish_plug(struct blk_plug *); 1062 1063void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); 1064static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1065{ 1066 if (plug) 1067 __blk_flush_plug(plug, async); 1068} 1069 1070/* 1071 * tsk == current here 1072 */ 1073static inline void blk_plug_invalidate_ts(struct task_struct *tsk) 1074{ 1075 struct blk_plug *plug = tsk->plug; 1076 1077 if (plug) 1078 plug->cur_ktime = 0; 1079 current->flags &= ~PF_BLOCK_TS; 1080} 1081 1082int blkdev_issue_flush(struct block_device *bdev); 1083long nr_blockdev_pages(void); 1084#else /* CONFIG_BLOCK */ 1085struct blk_plug { 1086}; 1087 1088static inline void blk_start_plug_nr_ios(struct blk_plug *plug, 1089 unsigned short nr_ios) 1090{ 1091} 1092 1093static inline void blk_start_plug(struct blk_plug *plug) 1094{ 1095} 1096 1097static inline void blk_finish_plug(struct blk_plug *plug) 1098{ 1099} 1100 1101static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1102{ 1103} 1104 1105static inline void blk_plug_invalidate_ts(struct task_struct *tsk) 1106{ 1107} 1108 1109static inline int blkdev_issue_flush(struct block_device *bdev) 1110{ 1111 return 0; 1112} 1113 1114static inline long nr_blockdev_pages(void) 1115{ 1116 return 0; 1117} 1118#endif /* CONFIG_BLOCK */ 1119 1120extern void blk_io_schedule(void); 1121 1122int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1123 sector_t nr_sects, gfp_t gfp_mask); 1124int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1125 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 1126int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 1127 sector_t nr_sects, gfp_t gfp); 1128 1129#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1130#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1131#define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */ 1132 1133extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1134 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1135 unsigned flags); 1136extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1137 sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1138 1139static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1140 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1141{ 1142 return blkdev_issue_discard(sb->s_bdev, 1143 block << (sb->s_blocksize_bits - 1144 SECTOR_SHIFT), 1145 nr_blocks << (sb->s_blocksize_bits - 1146 SECTOR_SHIFT), 1147 gfp_mask); 1148} 1149static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1150 sector_t nr_blocks, gfp_t gfp_mask) 1151{ 1152 return blkdev_issue_zeroout(sb->s_bdev, 1153 block << (sb->s_blocksize_bits - 1154 SECTOR_SHIFT), 1155 nr_blocks << (sb->s_blocksize_bits - 1156 SECTOR_SHIFT), 1157 gfp_mask, 0); 1158} 1159 1160static inline bool bdev_is_partition(struct block_device *bdev) 1161{ 1162 return bdev_partno(bdev) != 0; 1163} 1164 1165enum blk_default_limits { 1166 BLK_MAX_SEGMENTS = 128, 1167 BLK_SAFE_MAX_SECTORS = 255, 1168 BLK_MAX_SEGMENT_SIZE = 65536, 1169 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1170}; 1171 1172/* 1173 * Default upper limit for the software max_sectors limit used for 1174 * regular file system I/O. This can be increased through sysfs. 1175 * 1176 * Not to be confused with the max_hw_sector limit that is entirely 1177 * controlled by the driver, usually based on hardware limits. 1178 */ 1179#define BLK_DEF_MAX_SECTORS_CAP 2560u 1180 1181static inline struct queue_limits *bdev_limits(struct block_device *bdev) 1182{ 1183 return &bdev_get_queue(bdev)->limits; 1184} 1185 1186static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1187{ 1188 return q->limits.seg_boundary_mask; 1189} 1190 1191static inline unsigned long queue_virt_boundary(const struct request_queue *q) 1192{ 1193 return q->limits.virt_boundary_mask; 1194} 1195 1196static inline unsigned int queue_max_sectors(const struct request_queue *q) 1197{ 1198 return q->limits.max_sectors; 1199} 1200 1201static inline unsigned int queue_max_bytes(struct request_queue *q) 1202{ 1203 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; 1204} 1205 1206static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1207{ 1208 return q->limits.max_hw_sectors; 1209} 1210 1211static inline unsigned short queue_max_segments(const struct request_queue *q) 1212{ 1213 return q->limits.max_segments; 1214} 1215 1216static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 1217{ 1218 return q->limits.max_discard_segments; 1219} 1220 1221static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1222{ 1223 return q->limits.max_segment_size; 1224} 1225 1226static inline bool queue_emulates_zone_append(struct request_queue *q) 1227{ 1228 return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors; 1229} 1230 1231static inline bool bdev_emulates_zone_append(struct block_device *bdev) 1232{ 1233 return queue_emulates_zone_append(bdev_get_queue(bdev)); 1234} 1235 1236static inline unsigned int 1237bdev_max_zone_append_sectors(struct block_device *bdev) 1238{ 1239 return bdev_limits(bdev)->max_zone_append_sectors; 1240} 1241 1242static inline unsigned int bdev_max_segments(struct block_device *bdev) 1243{ 1244 return queue_max_segments(bdev_get_queue(bdev)); 1245} 1246 1247static inline unsigned queue_logical_block_size(const struct request_queue *q) 1248{ 1249 return q->limits.logical_block_size; 1250} 1251 1252static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 1253{ 1254 return queue_logical_block_size(bdev_get_queue(bdev)); 1255} 1256 1257static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1258{ 1259 return q->limits.physical_block_size; 1260} 1261 1262static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1263{ 1264 return queue_physical_block_size(bdev_get_queue(bdev)); 1265} 1266 1267static inline unsigned int queue_io_min(const struct request_queue *q) 1268{ 1269 return q->limits.io_min; 1270} 1271 1272static inline unsigned int bdev_io_min(struct block_device *bdev) 1273{ 1274 return queue_io_min(bdev_get_queue(bdev)); 1275} 1276 1277static inline unsigned int queue_io_opt(const struct request_queue *q) 1278{ 1279 return q->limits.io_opt; 1280} 1281 1282static inline unsigned int bdev_io_opt(struct block_device *bdev) 1283{ 1284 return queue_io_opt(bdev_get_queue(bdev)); 1285} 1286 1287static inline unsigned int 1288queue_zone_write_granularity(const struct request_queue *q) 1289{ 1290 return q->limits.zone_write_granularity; 1291} 1292 1293static inline unsigned int 1294bdev_zone_write_granularity(struct block_device *bdev) 1295{ 1296 return queue_zone_write_granularity(bdev_get_queue(bdev)); 1297} 1298 1299int bdev_alignment_offset(struct block_device *bdev); 1300unsigned int bdev_discard_alignment(struct block_device *bdev); 1301 1302static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1303{ 1304 return bdev_limits(bdev)->max_discard_sectors; 1305} 1306 1307static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 1308{ 1309 return bdev_limits(bdev)->discard_granularity; 1310} 1311 1312static inline unsigned int 1313bdev_max_secure_erase_sectors(struct block_device *bdev) 1314{ 1315 return bdev_limits(bdev)->max_secure_erase_sectors; 1316} 1317 1318static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1319{ 1320 return bdev_limits(bdev)->max_write_zeroes_sectors; 1321} 1322 1323static inline bool bdev_nonrot(struct block_device *bdev) 1324{ 1325 return blk_queue_nonrot(bdev_get_queue(bdev)); 1326} 1327 1328static inline bool bdev_synchronous(struct block_device *bdev) 1329{ 1330 return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS; 1331} 1332 1333static inline bool bdev_stable_writes(struct block_device *bdev) 1334{ 1335 struct request_queue *q = bdev_get_queue(bdev); 1336 1337 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 1338 q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) 1339 return true; 1340 return q->limits.features & BLK_FEAT_STABLE_WRITES; 1341} 1342 1343static inline bool blk_queue_write_cache(struct request_queue *q) 1344{ 1345 return (q->limits.features & BLK_FEAT_WRITE_CACHE) && 1346 !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED); 1347} 1348 1349static inline bool bdev_write_cache(struct block_device *bdev) 1350{ 1351 return blk_queue_write_cache(bdev_get_queue(bdev)); 1352} 1353 1354static inline bool bdev_fua(struct block_device *bdev) 1355{ 1356 return bdev_limits(bdev)->features & BLK_FEAT_FUA; 1357} 1358 1359static inline bool bdev_nowait(struct block_device *bdev) 1360{ 1361 return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT; 1362} 1363 1364static inline bool bdev_is_zoned(struct block_device *bdev) 1365{ 1366 return blk_queue_is_zoned(bdev_get_queue(bdev)); 1367} 1368 1369static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) 1370{ 1371 return disk_zone_no(bdev->bd_disk, sec); 1372} 1373 1374static inline sector_t bdev_zone_sectors(struct block_device *bdev) 1375{ 1376 struct request_queue *q = bdev_get_queue(bdev); 1377 1378 if (!blk_queue_is_zoned(q)) 1379 return 0; 1380 return q->limits.chunk_sectors; 1381} 1382 1383static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, 1384 sector_t sector) 1385{ 1386 return sector & (bdev_zone_sectors(bdev) - 1); 1387} 1388 1389static inline sector_t bio_offset_from_zone_start(struct bio *bio) 1390{ 1391 return bdev_offset_from_zone_start(bio->bi_bdev, 1392 bio->bi_iter.bi_sector); 1393} 1394 1395static inline bool bdev_is_zone_start(struct block_device *bdev, 1396 sector_t sector) 1397{ 1398 return bdev_offset_from_zone_start(bdev, sector) == 0; 1399} 1400 1401/** 1402 * bdev_zone_is_seq - check if a sector belongs to a sequential write zone 1403 * @bdev: block device to check 1404 * @sector: sector number 1405 * 1406 * Check if @sector on @bdev is contained in a sequential write required zone. 1407 */ 1408static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector) 1409{ 1410 bool is_seq = false; 1411 1412#if IS_ENABLED(CONFIG_BLK_DEV_ZONED) 1413 if (bdev_is_zoned(bdev)) { 1414 struct gendisk *disk = bdev->bd_disk; 1415 unsigned long *bitmap; 1416 1417 rcu_read_lock(); 1418 bitmap = rcu_dereference(disk->conv_zones_bitmap); 1419 is_seq = !bitmap || 1420 !test_bit(disk_zone_no(disk, sector), bitmap); 1421 rcu_read_unlock(); 1422 } 1423#endif 1424 1425 return is_seq; 1426} 1427 1428int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector, 1429 sector_t nr_sects, gfp_t gfp_mask); 1430 1431static inline unsigned int queue_dma_alignment(const struct request_queue *q) 1432{ 1433 return q->limits.dma_alignment; 1434} 1435 1436static inline unsigned int 1437queue_atomic_write_unit_max_bytes(const struct request_queue *q) 1438{ 1439 return q->limits.atomic_write_unit_max; 1440} 1441 1442static inline unsigned int 1443queue_atomic_write_unit_min_bytes(const struct request_queue *q) 1444{ 1445 return q->limits.atomic_write_unit_min; 1446} 1447 1448static inline unsigned int 1449queue_atomic_write_boundary_bytes(const struct request_queue *q) 1450{ 1451 return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT; 1452} 1453 1454static inline unsigned int 1455queue_atomic_write_max_bytes(const struct request_queue *q) 1456{ 1457 return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; 1458} 1459 1460static inline unsigned int bdev_dma_alignment(struct block_device *bdev) 1461{ 1462 return queue_dma_alignment(bdev_get_queue(bdev)); 1463} 1464 1465static inline bool bdev_iter_is_aligned(struct block_device *bdev, 1466 struct iov_iter *iter) 1467{ 1468 return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), 1469 bdev_logical_block_size(bdev) - 1); 1470} 1471 1472static inline unsigned int 1473blk_lim_dma_alignment_and_pad(struct queue_limits *lim) 1474{ 1475 return lim->dma_alignment | lim->dma_pad_mask; 1476} 1477 1478static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr, 1479 unsigned int len) 1480{ 1481 unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); 1482 1483 return !(addr & alignment) && !(len & alignment); 1484} 1485 1486/* assumes size > 256 */ 1487static inline unsigned int blksize_bits(unsigned int size) 1488{ 1489 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; 1490} 1491 1492int kblockd_schedule_work(struct work_struct *work); 1493int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1494 1495#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1496 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1497#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1498 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1499 1500#ifdef CONFIG_BLK_INLINE_ENCRYPTION 1501 1502bool blk_crypto_register(struct blk_crypto_profile *profile, 1503 struct request_queue *q); 1504 1505#else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1506 1507static inline bool blk_crypto_register(struct blk_crypto_profile *profile, 1508 struct request_queue *q) 1509{ 1510 return true; 1511} 1512 1513#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1514 1515enum blk_unique_id { 1516 /* these match the Designator Types specified in SPC */ 1517 BLK_UID_T10 = 1, 1518 BLK_UID_EUI64 = 2, 1519 BLK_UID_NAA = 3, 1520}; 1521 1522struct block_device_operations { 1523 void (*submit_bio)(struct bio *bio); 1524 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, 1525 unsigned int flags); 1526 int (*open)(struct gendisk *disk, blk_mode_t mode); 1527 void (*release)(struct gendisk *disk); 1528 int (*ioctl)(struct block_device *bdev, blk_mode_t mode, 1529 unsigned cmd, unsigned long arg); 1530 int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode, 1531 unsigned cmd, unsigned long arg); 1532 unsigned int (*check_events) (struct gendisk *disk, 1533 unsigned int clearing); 1534 void (*unlock_native_capacity) (struct gendisk *); 1535 int (*getgeo)(struct block_device *, struct hd_geometry *); 1536 int (*set_read_only)(struct block_device *bdev, bool ro); 1537 void (*free_disk)(struct gendisk *disk); 1538 /* this callback is with swap_lock and sometimes page table lock held */ 1539 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1540 int (*report_zones)(struct gendisk *, sector_t sector, 1541 unsigned int nr_zones, report_zones_cb cb, void *data); 1542 char *(*devnode)(struct gendisk *disk, umode_t *mode); 1543 /* returns the length of the identifier or a negative errno: */ 1544 int (*get_unique_id)(struct gendisk *disk, u8 id[16], 1545 enum blk_unique_id id_type); 1546 struct module *owner; 1547 const struct pr_ops *pr_ops; 1548 1549 /* 1550 * Special callback for probing GPT entry at a given sector. 1551 * Needed by Android devices, used by GPT scanner and MMC blk 1552 * driver. 1553 */ 1554 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); 1555}; 1556 1557#ifdef CONFIG_COMPAT 1558extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t, 1559 unsigned int, unsigned long); 1560#else 1561#define blkdev_compat_ptr_ioctl NULL 1562#endif 1563 1564static inline void blk_wake_io_task(struct task_struct *waiter) 1565{ 1566 /* 1567 * If we're polling, the task itself is doing the completions. For 1568 * that case, we don't need to signal a wakeup, it's enough to just 1569 * mark us as RUNNING. 1570 */ 1571 if (waiter == current) 1572 __set_current_state(TASK_RUNNING); 1573 else 1574 wake_up_process(waiter); 1575} 1576 1577unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 1578 unsigned long start_time); 1579void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 1580 unsigned int sectors, unsigned long start_time); 1581 1582unsigned long bio_start_io_acct(struct bio *bio); 1583void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1584 struct block_device *orig_bdev); 1585 1586/** 1587 * bio_end_io_acct - end I/O accounting for bio based drivers 1588 * @bio: bio to end account for 1589 * @start_time: start time returned by bio_start_io_acct() 1590 */ 1591static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1592{ 1593 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1594} 1595 1596int set_blocksize(struct file *file, int size); 1597 1598int lookup_bdev(const char *pathname, dev_t *dev); 1599 1600void blkdev_show(struct seq_file *seqf, off_t offset); 1601 1602#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1603#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 1604#ifdef CONFIG_BLOCK 1605#define BLKDEV_MAJOR_MAX 512 1606#else 1607#define BLKDEV_MAJOR_MAX 0 1608#endif 1609 1610struct blk_holder_ops { 1611 void (*mark_dead)(struct block_device *bdev, bool surprise); 1612 1613 /* 1614 * Sync the file system mounted on the block device. 1615 */ 1616 void (*sync)(struct block_device *bdev); 1617 1618 /* 1619 * Freeze the file system mounted on the block device. 1620 */ 1621 int (*freeze)(struct block_device *bdev); 1622 1623 /* 1624 * Thaw the file system mounted on the block device. 1625 */ 1626 int (*thaw)(struct block_device *bdev); 1627}; 1628 1629/* 1630 * For filesystems using @fs_holder_ops, the @holder argument passed to 1631 * helpers used to open and claim block devices via 1632 * bd_prepare_to_claim() must point to a superblock. 1633 */ 1634extern const struct blk_holder_ops fs_holder_ops; 1635 1636/* 1637 * Return the correct open flags for blkdev_get_by_* for super block flags 1638 * as stored in sb->s_flags. 1639 */ 1640#define sb_open_mode(flags) \ 1641 (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \ 1642 (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) 1643 1644struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, 1645 const struct blk_holder_ops *hops); 1646struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode, 1647 void *holder, const struct blk_holder_ops *hops); 1648int bd_prepare_to_claim(struct block_device *bdev, void *holder, 1649 const struct blk_holder_ops *hops); 1650void bd_abort_claiming(struct block_device *bdev, void *holder); 1651 1652/* just for blk-cgroup, don't use elsewhere */ 1653struct block_device *blkdev_get_no_open(dev_t dev); 1654void blkdev_put_no_open(struct block_device *bdev); 1655 1656struct block_device *I_BDEV(struct inode *inode); 1657struct block_device *file_bdev(struct file *bdev_file); 1658bool disk_live(struct gendisk *disk); 1659unsigned int block_size(struct block_device *bdev); 1660 1661#ifdef CONFIG_BLOCK 1662void invalidate_bdev(struct block_device *bdev); 1663int sync_blockdev(struct block_device *bdev); 1664int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); 1665int sync_blockdev_nowait(struct block_device *bdev); 1666void sync_bdevs(bool wait); 1667void bdev_statx(struct path *, struct kstat *, u32); 1668void printk_all_partitions(void); 1669int __init early_lookup_bdev(const char *pathname, dev_t *dev); 1670#else 1671static inline void invalidate_bdev(struct block_device *bdev) 1672{ 1673} 1674static inline int sync_blockdev(struct block_device *bdev) 1675{ 1676 return 0; 1677} 1678static inline int sync_blockdev_nowait(struct block_device *bdev) 1679{ 1680 return 0; 1681} 1682static inline void sync_bdevs(bool wait) 1683{ 1684} 1685static inline void bdev_statx(struct path *path, struct kstat *stat, 1686 u32 request_mask) 1687{ 1688} 1689static inline void printk_all_partitions(void) 1690{ 1691} 1692static inline int early_lookup_bdev(const char *pathname, dev_t *dev) 1693{ 1694 return -EINVAL; 1695} 1696#endif /* CONFIG_BLOCK */ 1697 1698int bdev_freeze(struct block_device *bdev); 1699int bdev_thaw(struct block_device *bdev); 1700void bdev_fput(struct file *bdev_file); 1701 1702struct io_comp_batch { 1703 struct rq_list req_list; 1704 bool need_ts; 1705 void (*complete)(struct io_comp_batch *); 1706}; 1707 1708static inline bool blk_atomic_write_start_sect_aligned(sector_t sector, 1709 struct queue_limits *limits) 1710{ 1711 unsigned int alignment = max(limits->atomic_write_hw_unit_min, 1712 limits->atomic_write_hw_boundary); 1713 1714 return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT); 1715} 1716 1717static inline bool bdev_can_atomic_write(struct block_device *bdev) 1718{ 1719 struct request_queue *bd_queue = bdev->bd_queue; 1720 struct queue_limits *limits = &bd_queue->limits; 1721 1722 if (!limits->atomic_write_unit_min) 1723 return false; 1724 1725 if (bdev_is_partition(bdev)) 1726 return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect, 1727 limits); 1728 1729 return true; 1730} 1731 1732static inline unsigned int 1733bdev_atomic_write_unit_min_bytes(struct block_device *bdev) 1734{ 1735 if (!bdev_can_atomic_write(bdev)) 1736 return 0; 1737 return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev)); 1738} 1739 1740static inline unsigned int 1741bdev_atomic_write_unit_max_bytes(struct block_device *bdev) 1742{ 1743 if (!bdev_can_atomic_write(bdev)) 1744 return 0; 1745 return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev)); 1746} 1747 1748#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } 1749 1750#endif /* _LINUX_BLKDEV_H */