at v6.13-rc2 50 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Portions Copyright (C) 1992 Drew Eckhardt 4 */ 5#ifndef _LINUX_BLKDEV_H 6#define _LINUX_BLKDEV_H 7 8#include <linux/types.h> 9#include <linux/blk_types.h> 10#include <linux/device.h> 11#include <linux/list.h> 12#include <linux/llist.h> 13#include <linux/minmax.h> 14#include <linux/timer.h> 15#include <linux/workqueue.h> 16#include <linux/wait.h> 17#include <linux/bio.h> 18#include <linux/gfp.h> 19#include <linux/kdev_t.h> 20#include <linux/rcupdate.h> 21#include <linux/percpu-refcount.h> 22#include <linux/blkzoned.h> 23#include <linux/sched.h> 24#include <linux/sbitmap.h> 25#include <linux/uuid.h> 26#include <linux/xarray.h> 27#include <linux/file.h> 28#include <linux/lockdep.h> 29 30struct module; 31struct request_queue; 32struct elevator_queue; 33struct blk_trace; 34struct request; 35struct sg_io_hdr; 36struct blkcg_gq; 37struct blk_flush_queue; 38struct kiocb; 39struct pr_ops; 40struct rq_qos; 41struct blk_queue_stats; 42struct blk_stat_callback; 43struct blk_crypto_profile; 44 45extern const struct device_type disk_type; 46extern const struct device_type part_type; 47extern const struct class block_class; 48 49/* 50 * Maximum number of blkcg policies allowed to be registered concurrently. 51 * Defined here to simplify include dependency. 52 */ 53#define BLKCG_MAX_POLS 6 54 55#define DISK_MAX_PARTS 256 56#define DISK_NAME_LEN 32 57 58#define PARTITION_META_INFO_VOLNAMELTH 64 59/* 60 * Enough for the string representation of any kind of UUID plus NULL. 61 * EFI UUID is 36 characters. MSDOS UUID is 11 characters. 62 */ 63#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) 64 65struct partition_meta_info { 66 char uuid[PARTITION_META_INFO_UUIDLTH]; 67 u8 volname[PARTITION_META_INFO_VOLNAMELTH]; 68}; 69 70/** 71 * DOC: genhd capability flags 72 * 73 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to 74 * removable media. When set, the device remains present even when media is not 75 * inserted. Shall not be set for devices which are removed entirely when the 76 * media is removed. 77 * 78 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, 79 * doesn't appear in sysfs, and can't be opened from userspace or using 80 * blkdev_get*. Used for the underlying components of multipath devices. 81 * 82 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not 83 * scan for partitions from add_disk, and users can't add partitions manually. 84 * 85 */ 86enum { 87 GENHD_FL_REMOVABLE = 1 << 0, 88 GENHD_FL_HIDDEN = 1 << 1, 89 GENHD_FL_NO_PART = 1 << 2, 90}; 91 92enum { 93 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 94 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 95}; 96 97enum { 98 /* Poll even if events_poll_msecs is unset */ 99 DISK_EVENT_FLAG_POLL = 1 << 0, 100 /* Forward events to udev */ 101 DISK_EVENT_FLAG_UEVENT = 1 << 1, 102 /* Block event polling when open for exclusive write */ 103 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, 104}; 105 106struct disk_events; 107struct badblocks; 108 109enum blk_integrity_checksum { 110 BLK_INTEGRITY_CSUM_NONE = 0, 111 BLK_INTEGRITY_CSUM_IP = 1, 112 BLK_INTEGRITY_CSUM_CRC = 2, 113 BLK_INTEGRITY_CSUM_CRC64 = 3, 114} __packed ; 115 116struct blk_integrity { 117 unsigned char flags; 118 enum blk_integrity_checksum csum_type; 119 unsigned char tuple_size; 120 unsigned char pi_offset; 121 unsigned char interval_exp; 122 unsigned char tag_size; 123}; 124 125typedef unsigned int __bitwise blk_mode_t; 126 127/* open for reading */ 128#define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0)) 129/* open for writing */ 130#define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1)) 131/* open exclusively (vs other exclusive openers */ 132#define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2)) 133/* opened with O_NDELAY */ 134#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3)) 135/* open for "writes" only for ioctls (specialy hack for floppy.c) */ 136#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4)) 137/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */ 138#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5)) 139/* return partition scanning errors */ 140#define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6)) 141 142struct gendisk { 143 /* 144 * major/first_minor/minors should not be set by any new driver, the 145 * block core will take care of allocating them automatically. 146 */ 147 int major; 148 int first_minor; 149 int minors; 150 151 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 152 153 unsigned short events; /* supported events */ 154 unsigned short event_flags; /* flags related to event processing */ 155 156 struct xarray part_tbl; 157 struct block_device *part0; 158 159 const struct block_device_operations *fops; 160 struct request_queue *queue; 161 void *private_data; 162 163 struct bio_set bio_split; 164 165 int flags; 166 unsigned long state; 167#define GD_NEED_PART_SCAN 0 168#define GD_READ_ONLY 1 169#define GD_DEAD 2 170#define GD_NATIVE_CAPACITY 3 171#define GD_ADDED 4 172#define GD_SUPPRESS_PART_SCAN 5 173#define GD_OWNS_QUEUE 6 174 175 struct mutex open_mutex; /* open/close mutex */ 176 unsigned open_partitions; /* number of open partitions */ 177 178 struct backing_dev_info *bdi; 179 struct kobject queue_kobj; /* the queue/ directory */ 180 struct kobject *slave_dir; 181#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 182 struct list_head slave_bdevs; 183#endif 184 struct timer_rand_state *random; 185 atomic_t sync_io; /* RAID */ 186 struct disk_events *ev; 187 188#ifdef CONFIG_BLK_DEV_ZONED 189 /* 190 * Zoned block device information. Reads of this information must be 191 * protected with blk_queue_enter() / blk_queue_exit(). Modifying this 192 * information is only allowed while no requests are being processed. 193 * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue(). 194 */ 195 unsigned int nr_zones; 196 unsigned int zone_capacity; 197 unsigned int last_zone_capacity; 198 unsigned long __rcu *conv_zones_bitmap; 199 unsigned int zone_wplugs_hash_bits; 200 spinlock_t zone_wplugs_lock; 201 struct mempool_s *zone_wplugs_pool; 202 struct hlist_head *zone_wplugs_hash; 203 struct list_head zone_wplugs_err_list; 204 struct work_struct zone_wplugs_work; 205 struct workqueue_struct *zone_wplugs_wq; 206#endif /* CONFIG_BLK_DEV_ZONED */ 207 208#if IS_ENABLED(CONFIG_CDROM) 209 struct cdrom_device_info *cdi; 210#endif 211 int node_id; 212 struct badblocks *bb; 213 struct lockdep_map lockdep_map; 214 u64 diskseq; 215 blk_mode_t open_mode; 216 217 /* 218 * Independent sector access ranges. This is always NULL for 219 * devices that do not have multiple independent access ranges. 220 */ 221 struct blk_independent_access_ranges *ia_ranges; 222}; 223 224/** 225 * disk_openers - returns how many openers are there for a disk 226 * @disk: disk to check 227 * 228 * This returns the number of openers for a disk. Note that this value is only 229 * stable if disk->open_mutex is held. 230 * 231 * Note: Due to a quirk in the block layer open code, each open partition is 232 * only counted once even if there are multiple openers. 233 */ 234static inline unsigned int disk_openers(struct gendisk *disk) 235{ 236 return atomic_read(&disk->part0->bd_openers); 237} 238 239/** 240 * disk_has_partscan - return %true if partition scanning is enabled on a disk 241 * @disk: disk to check 242 * 243 * Returns %true if partitions scanning is enabled for @disk, or %false if 244 * partition scanning is disabled either permanently or temporarily. 245 */ 246static inline bool disk_has_partscan(struct gendisk *disk) 247{ 248 return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) && 249 !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 250} 251 252/* 253 * The gendisk is refcounted by the part0 block_device, and the bd_device 254 * therein is also used for device model presentation in sysfs. 255 */ 256#define dev_to_disk(device) \ 257 (dev_to_bdev(device)->bd_disk) 258#define disk_to_dev(disk) \ 259 (&((disk)->part0->bd_device)) 260 261#if IS_REACHABLE(CONFIG_CDROM) 262#define disk_to_cdi(disk) ((disk)->cdi) 263#else 264#define disk_to_cdi(disk) NULL 265#endif 266 267static inline dev_t disk_devt(struct gendisk *disk) 268{ 269 return MKDEV(disk->major, disk->first_minor); 270} 271 272/* blk_validate_limits() validates bsize, so drivers don't usually need to */ 273static inline int blk_validate_block_size(unsigned long bsize) 274{ 275 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) 276 return -EINVAL; 277 278 return 0; 279} 280 281static inline bool blk_op_is_passthrough(blk_opf_t op) 282{ 283 op &= REQ_OP_MASK; 284 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 285} 286 287/* flags set by the driver in queue_limits.features */ 288typedef unsigned int __bitwise blk_features_t; 289 290/* supports a volatile write cache */ 291#define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0)) 292 293/* supports passing on the FUA bit */ 294#define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1)) 295 296/* rotational device (hard drive or floppy) */ 297#define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2)) 298 299/* contributes to the random number pool */ 300#define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3)) 301 302/* do disk/partitions IO accounting */ 303#define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4)) 304 305/* don't modify data until writeback is done */ 306#define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5)) 307 308/* always completes in submit context */ 309#define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6)) 310 311/* supports REQ_NOWAIT */ 312#define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7)) 313 314/* supports DAX */ 315#define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8)) 316 317/* supports I/O polling */ 318#define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9)) 319 320/* is a zoned device */ 321#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10)) 322 323/* supports PCI(e) p2p requests */ 324#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12)) 325 326/* skip this queue in blk_mq_(un)quiesce_tagset */ 327#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13)) 328 329/* bounce all highmem pages */ 330#define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14)) 331 332/* undocumented magic for bcache */ 333#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ 334 ((__force blk_features_t)(1u << 15)) 335 336/* stacked device can/does support atomic writes */ 337#define BLK_FEAT_ATOMIC_WRITES_STACKED \ 338 ((__force blk_features_t)(1u << 16)) 339 340/* 341 * Flags automatically inherited when stacking limits. 342 */ 343#define BLK_FEAT_INHERIT_MASK \ 344 (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ 345 BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \ 346 BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE) 347 348/* internal flags in queue_limits.flags */ 349typedef unsigned int __bitwise blk_flags_t; 350 351/* do not send FLUSH/FUA commands despite advertising a write cache */ 352#define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0)) 353 354/* I/O topology is misaligned */ 355#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1)) 356 357/* passthrough command IO accounting */ 358#define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2)) 359 360struct queue_limits { 361 blk_features_t features; 362 blk_flags_t flags; 363 unsigned long seg_boundary_mask; 364 unsigned long virt_boundary_mask; 365 366 unsigned int max_hw_sectors; 367 unsigned int max_dev_sectors; 368 unsigned int chunk_sectors; 369 unsigned int max_sectors; 370 unsigned int max_user_sectors; 371 unsigned int max_segment_size; 372 unsigned int physical_block_size; 373 unsigned int logical_block_size; 374 unsigned int alignment_offset; 375 unsigned int io_min; 376 unsigned int io_opt; 377 unsigned int max_discard_sectors; 378 unsigned int max_hw_discard_sectors; 379 unsigned int max_user_discard_sectors; 380 unsigned int max_secure_erase_sectors; 381 unsigned int max_write_zeroes_sectors; 382 unsigned int max_hw_zone_append_sectors; 383 unsigned int max_zone_append_sectors; 384 unsigned int discard_granularity; 385 unsigned int discard_alignment; 386 unsigned int zone_write_granularity; 387 388 /* atomic write limits */ 389 unsigned int atomic_write_hw_max; 390 unsigned int atomic_write_max_sectors; 391 unsigned int atomic_write_hw_boundary; 392 unsigned int atomic_write_boundary_sectors; 393 unsigned int atomic_write_hw_unit_min; 394 unsigned int atomic_write_unit_min; 395 unsigned int atomic_write_hw_unit_max; 396 unsigned int atomic_write_unit_max; 397 398 unsigned short max_segments; 399 unsigned short max_integrity_segments; 400 unsigned short max_discard_segments; 401 402 unsigned int max_open_zones; 403 unsigned int max_active_zones; 404 405 /* 406 * Drivers that set dma_alignment to less than 511 must be prepared to 407 * handle individual bvec's that are not a multiple of a SECTOR_SIZE 408 * due to possible offsets. 409 */ 410 unsigned int dma_alignment; 411 unsigned int dma_pad_mask; 412 413 struct blk_integrity integrity; 414}; 415 416typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 417 void *data); 418 419#define BLK_ALL_ZONES ((unsigned int)-1) 420int blkdev_report_zones(struct block_device *bdev, sector_t sector, 421 unsigned int nr_zones, report_zones_cb cb, void *data); 422int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 423 sector_t sectors, sector_t nr_sectors); 424int blk_revalidate_disk_zones(struct gendisk *disk); 425 426/* 427 * Independent access ranges: struct blk_independent_access_range describes 428 * a range of contiguous sectors that can be accessed using device command 429 * execution resources that are independent from the resources used for 430 * other access ranges. This is typically found with single-LUN multi-actuator 431 * HDDs where each access range is served by a different set of heads. 432 * The set of independent ranges supported by the device is defined using 433 * struct blk_independent_access_ranges. The independent ranges must not overlap 434 * and must include all sectors within the disk capacity (no sector holes 435 * allowed). 436 * For a device with multiple ranges, requests targeting sectors in different 437 * ranges can be executed in parallel. A request can straddle an access range 438 * boundary. 439 */ 440struct blk_independent_access_range { 441 struct kobject kobj; 442 sector_t sector; 443 sector_t nr_sectors; 444}; 445 446struct blk_independent_access_ranges { 447 struct kobject kobj; 448 bool sysfs_registered; 449 unsigned int nr_ia_ranges; 450 struct blk_independent_access_range ia_range[]; 451}; 452 453struct request_queue { 454 /* 455 * The queue owner gets to use this for whatever they like. 456 * ll_rw_blk doesn't touch it. 457 */ 458 void *queuedata; 459 460 struct elevator_queue *elevator; 461 462 const struct blk_mq_ops *mq_ops; 463 464 /* sw queues */ 465 struct blk_mq_ctx __percpu *queue_ctx; 466 467 /* 468 * various queue flags, see QUEUE_* below 469 */ 470 unsigned long queue_flags; 471 472 unsigned int rq_timeout; 473 474 unsigned int queue_depth; 475 476 refcount_t refs; 477 478 /* hw dispatch queues */ 479 unsigned int nr_hw_queues; 480 struct xarray hctx_table; 481 482 struct percpu_ref q_usage_counter; 483 struct lock_class_key io_lock_cls_key; 484 struct lockdep_map io_lockdep_map; 485 486 struct lock_class_key q_lock_cls_key; 487 struct lockdep_map q_lockdep_map; 488 489 struct request *last_merge; 490 491 spinlock_t queue_lock; 492 493 int quiesce_depth; 494 495 struct gendisk *disk; 496 497 /* 498 * mq queue kobject 499 */ 500 struct kobject *mq_kobj; 501 502 struct queue_limits limits; 503 504#ifdef CONFIG_PM 505 struct device *dev; 506 enum rpm_status rpm_status; 507#endif 508 509 /* 510 * Number of contexts that have called blk_set_pm_only(). If this 511 * counter is above zero then only RQF_PM requests are processed. 512 */ 513 atomic_t pm_only; 514 515 struct blk_queue_stats *stats; 516 struct rq_qos *rq_qos; 517 struct mutex rq_qos_mutex; 518 519 /* 520 * ida allocated id for this queue. Used to index queues from 521 * ioctx. 522 */ 523 int id; 524 525 /* 526 * queue settings 527 */ 528 unsigned long nr_requests; /* Max # of requests */ 529 530#ifdef CONFIG_BLK_INLINE_ENCRYPTION 531 struct blk_crypto_profile *crypto_profile; 532 struct kobject *crypto_kobject; 533#endif 534 535 struct timer_list timeout; 536 struct work_struct timeout_work; 537 538 atomic_t nr_active_requests_shared_tags; 539 540 struct blk_mq_tags *sched_shared_tags; 541 542 struct list_head icq_list; 543#ifdef CONFIG_BLK_CGROUP 544 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 545 struct blkcg_gq *root_blkg; 546 struct list_head blkg_list; 547 struct mutex blkcg_mutex; 548#endif 549 550 int node; 551 552 spinlock_t requeue_lock; 553 struct list_head requeue_list; 554 struct delayed_work requeue_work; 555 556#ifdef CONFIG_BLK_DEV_IO_TRACE 557 struct blk_trace __rcu *blk_trace; 558#endif 559 /* 560 * for flush operations 561 */ 562 struct blk_flush_queue *fq; 563 struct list_head flush_list; 564 565 struct mutex sysfs_lock; 566 struct mutex sysfs_dir_lock; 567 struct mutex limits_lock; 568 569 /* 570 * for reusing dead hctx instance in case of updating 571 * nr_hw_queues 572 */ 573 struct list_head unused_hctx_list; 574 spinlock_t unused_hctx_lock; 575 576 int mq_freeze_depth; 577 578#ifdef CONFIG_BLK_DEV_THROTTLING 579 /* Throttle data */ 580 struct throtl_data *td; 581#endif 582 struct rcu_head rcu_head; 583#ifdef CONFIG_LOCKDEP 584 struct task_struct *mq_freeze_owner; 585 int mq_freeze_owner_depth; 586#endif 587 wait_queue_head_t mq_freeze_wq; 588 /* 589 * Protect concurrent access to q_usage_counter by 590 * percpu_ref_kill() and percpu_ref_reinit(). 591 */ 592 struct mutex mq_freeze_lock; 593 594 struct blk_mq_tag_set *tag_set; 595 struct list_head tag_set_list; 596 597 struct dentry *debugfs_dir; 598 struct dentry *sched_debugfs_dir; 599 struct dentry *rqos_debugfs_dir; 600 /* 601 * Serializes all debugfs metadata operations using the above dentries. 602 */ 603 struct mutex debugfs_mutex; 604 605 bool mq_sysfs_init_done; 606}; 607 608/* Keep blk_queue_flag_name[] in sync with the definitions below */ 609enum { 610 QUEUE_FLAG_DYING, /* queue being torn down */ 611 QUEUE_FLAG_NOMERGES, /* disable merge attempts */ 612 QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */ 613 QUEUE_FLAG_FAIL_IO, /* fake timeout */ 614 QUEUE_FLAG_NOXMERGES, /* No extended merges */ 615 QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */ 616 QUEUE_FLAG_INIT_DONE, /* queue is initialized */ 617 QUEUE_FLAG_STATS, /* track IO start and completion times */ 618 QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */ 619 QUEUE_FLAG_QUIESCED, /* queue has been quiesced */ 620 QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */ 621 QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */ 622 QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */ 623 QUEUE_FLAG_MAX 624}; 625 626#define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP) 627 628void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 629void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 630 631#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 632#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 633#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 634#define blk_queue_noxmerges(q) \ 635 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 636#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) 637#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) 638#define blk_queue_passthrough_stat(q) \ 639 ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH) 640#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) 641#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) 642#ifdef CONFIG_BLK_RQ_ALLOC_TIME 643#define blk_queue_rq_alloc_time(q) \ 644 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 645#else 646#define blk_queue_rq_alloc_time(q) false 647#endif 648 649#define blk_noretry_request(rq) \ 650 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 651 REQ_FAILFAST_DRIVER)) 652#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 653#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 654#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 655#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) 656#define blk_queue_skip_tagset_quiesce(q) \ 657 ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE) 658 659extern void blk_set_pm_only(struct request_queue *q); 660extern void blk_clear_pm_only(struct request_queue *q); 661 662#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 663 664#define dma_map_bvec(dev, bv, dir, attrs) \ 665 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 666 (dir), (attrs)) 667 668static inline bool queue_is_mq(struct request_queue *q) 669{ 670 return q->mq_ops; 671} 672 673#ifdef CONFIG_PM 674static inline enum rpm_status queue_rpm_status(struct request_queue *q) 675{ 676 return q->rpm_status; 677} 678#else 679static inline enum rpm_status queue_rpm_status(struct request_queue *q) 680{ 681 return RPM_ACTIVE; 682} 683#endif 684 685static inline bool blk_queue_is_zoned(struct request_queue *q) 686{ 687 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 688 (q->limits.features & BLK_FEAT_ZONED); 689} 690 691#ifdef CONFIG_BLK_DEV_ZONED 692static inline unsigned int disk_nr_zones(struct gendisk *disk) 693{ 694 return disk->nr_zones; 695} 696bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); 697#else /* CONFIG_BLK_DEV_ZONED */ 698static inline unsigned int disk_nr_zones(struct gendisk *disk) 699{ 700 return 0; 701} 702static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) 703{ 704 return false; 705} 706#endif /* CONFIG_BLK_DEV_ZONED */ 707 708static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 709{ 710 if (!blk_queue_is_zoned(disk->queue)) 711 return 0; 712 return sector >> ilog2(disk->queue->limits.chunk_sectors); 713} 714 715static inline unsigned int bdev_nr_zones(struct block_device *bdev) 716{ 717 return disk_nr_zones(bdev->bd_disk); 718} 719 720static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 721{ 722 return bdev->bd_disk->queue->limits.max_open_zones; 723} 724 725static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 726{ 727 return bdev->bd_disk->queue->limits.max_active_zones; 728} 729 730static inline unsigned int blk_queue_depth(struct request_queue *q) 731{ 732 if (q->queue_depth) 733 return q->queue_depth; 734 735 return q->nr_requests; 736} 737 738/* 739 * default timeout for SG_IO if none specified 740 */ 741#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 742#define BLK_MIN_SG_TIMEOUT (7 * HZ) 743 744/* This should not be used directly - use rq_for_each_segment */ 745#define for_each_bio(_bio) \ 746 for (; _bio; _bio = _bio->bi_next) 747 748int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, 749 const struct attribute_group **groups, 750 struct fwnode_handle *fwnode); 751int __must_check device_add_disk(struct device *parent, struct gendisk *disk, 752 const struct attribute_group **groups); 753static inline int __must_check add_disk(struct gendisk *disk) 754{ 755 return device_add_disk(NULL, disk, NULL); 756} 757void del_gendisk(struct gendisk *gp); 758void invalidate_disk(struct gendisk *disk); 759void set_disk_ro(struct gendisk *disk, bool read_only); 760void disk_uevent(struct gendisk *disk, enum kobject_action action); 761 762static inline u8 bdev_partno(const struct block_device *bdev) 763{ 764 return atomic_read(&bdev->__bd_flags) & BD_PARTNO; 765} 766 767static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag) 768{ 769 return atomic_read(&bdev->__bd_flags) & flag; 770} 771 772static inline void bdev_set_flag(struct block_device *bdev, unsigned flag) 773{ 774 atomic_or(flag, &bdev->__bd_flags); 775} 776 777static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) 778{ 779 atomic_andnot(flag, &bdev->__bd_flags); 780} 781 782static inline bool get_disk_ro(struct gendisk *disk) 783{ 784 return bdev_test_flag(disk->part0, BD_READ_ONLY) || 785 test_bit(GD_READ_ONLY, &disk->state); 786} 787 788static inline bool bdev_read_only(struct block_device *bdev) 789{ 790 return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); 791} 792 793bool set_capacity_and_notify(struct gendisk *disk, sector_t size); 794void disk_force_media_change(struct gendisk *disk); 795void bdev_mark_dead(struct block_device *bdev, bool surprise); 796 797void add_disk_randomness(struct gendisk *disk) __latent_entropy; 798void rand_initialize_disk(struct gendisk *disk); 799 800static inline sector_t get_start_sect(struct block_device *bdev) 801{ 802 return bdev->bd_start_sect; 803} 804 805static inline sector_t bdev_nr_sectors(struct block_device *bdev) 806{ 807 return bdev->bd_nr_sectors; 808} 809 810static inline loff_t bdev_nr_bytes(struct block_device *bdev) 811{ 812 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; 813} 814 815static inline sector_t get_capacity(struct gendisk *disk) 816{ 817 return bdev_nr_sectors(disk->part0); 818} 819 820static inline u64 sb_bdev_nr_blocks(struct super_block *sb) 821{ 822 return bdev_nr_sectors(sb->s_bdev) >> 823 (sb->s_blocksize_bits - SECTOR_SHIFT); 824} 825 826int bdev_disk_changed(struct gendisk *disk, bool invalidate); 827 828void put_disk(struct gendisk *disk); 829struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node, 830 struct lock_class_key *lkclass); 831 832/** 833 * blk_alloc_disk - allocate a gendisk structure 834 * @lim: queue limits to be used for this disk. 835 * @node_id: numa node to allocate on 836 * 837 * Allocate and pre-initialize a gendisk structure for use with BIO based 838 * drivers. 839 * 840 * Returns an ERR_PTR on error, else the allocated disk. 841 * 842 * Context: can sleep 843 */ 844#define blk_alloc_disk(lim, node_id) \ 845({ \ 846 static struct lock_class_key __key; \ 847 \ 848 __blk_alloc_disk(lim, node_id, &__key); \ 849}) 850 851int __register_blkdev(unsigned int major, const char *name, 852 void (*probe)(dev_t devt)); 853#define register_blkdev(major, name) \ 854 __register_blkdev(major, name, NULL) 855void unregister_blkdev(unsigned int major, const char *name); 856 857bool disk_check_media_change(struct gendisk *disk); 858void set_capacity(struct gendisk *disk, sector_t size); 859 860#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 861int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 862void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); 863#else 864static inline int bd_link_disk_holder(struct block_device *bdev, 865 struct gendisk *disk) 866{ 867 return 0; 868} 869static inline void bd_unlink_disk_holder(struct block_device *bdev, 870 struct gendisk *disk) 871{ 872} 873#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ 874 875dev_t part_devt(struct gendisk *disk, u8 partno); 876void inc_diskseq(struct gendisk *disk); 877void blk_request_module(dev_t devt); 878 879extern int blk_register_queue(struct gendisk *disk); 880extern void blk_unregister_queue(struct gendisk *disk); 881void submit_bio_noacct(struct bio *bio); 882struct bio *bio_split_to_limits(struct bio *bio); 883 884extern int blk_lld_busy(struct request_queue *q); 885extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 886extern void blk_queue_exit(struct request_queue *q); 887extern void blk_sync_queue(struct request_queue *q); 888 889/* Helper to convert REQ_OP_XXX to its string format XXX */ 890extern const char *blk_op_str(enum req_op op); 891 892int blk_status_to_errno(blk_status_t status); 893blk_status_t errno_to_blk_status(int errno); 894const char *blk_status_to_str(blk_status_t status); 895 896/* only poll the hardware once, don't continue until a completion was found */ 897#define BLK_POLL_ONESHOT (1 << 0) 898int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); 899int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 900 unsigned int flags); 901 902static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 903{ 904 return bdev->bd_queue; /* this is never NULL */ 905} 906 907/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 908const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 909 910static inline unsigned int bio_zone_no(struct bio *bio) 911{ 912 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 913} 914 915static inline bool bio_straddles_zones(struct bio *bio) 916{ 917 return bio_sectors(bio) && 918 bio_zone_no(bio) != 919 disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); 920} 921 922/* 923 * Return how much within the boundary is left to be used for I/O at a given 924 * offset. 925 */ 926static inline unsigned int blk_boundary_sectors_left(sector_t offset, 927 unsigned int boundary_sectors) 928{ 929 if (unlikely(!is_power_of_2(boundary_sectors))) 930 return boundary_sectors - sector_div(offset, boundary_sectors); 931 return boundary_sectors - (offset & (boundary_sectors - 1)); 932} 933 934/** 935 * queue_limits_start_update - start an atomic update of queue limits 936 * @q: queue to update 937 * 938 * This functions starts an atomic update of the queue limits. It takes a lock 939 * to prevent other updates and returns a snapshot of the current limits that 940 * the caller can modify. The caller must call queue_limits_commit_update() 941 * to finish the update. 942 * 943 * Context: process context. The caller must have frozen the queue or ensured 944 * that there is outstanding I/O by other means. 945 */ 946static inline struct queue_limits 947queue_limits_start_update(struct request_queue *q) 948{ 949 mutex_lock(&q->limits_lock); 950 return q->limits; 951} 952int queue_limits_commit_update(struct request_queue *q, 953 struct queue_limits *lim); 954int queue_limits_set(struct request_queue *q, struct queue_limits *lim); 955int blk_validate_limits(struct queue_limits *lim); 956 957/** 958 * queue_limits_cancel_update - cancel an atomic update of queue limits 959 * @q: queue to update 960 * 961 * This functions cancels an atomic update of the queue limits started by 962 * queue_limits_start_update() and should be used when an error occurs after 963 * starting update. 964 */ 965static inline void queue_limits_cancel_update(struct request_queue *q) 966{ 967 mutex_unlock(&q->limits_lock); 968} 969 970/* 971 * These helpers are for drivers that have sloppy feature negotiation and might 972 * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O 973 * completion handler when the device returned an indicator that the respective 974 * feature is not actually supported. They are racy and the driver needs to 975 * cope with that. Try to avoid this scheme if you can. 976 */ 977static inline void blk_queue_disable_discard(struct request_queue *q) 978{ 979 q->limits.max_discard_sectors = 0; 980} 981 982static inline void blk_queue_disable_secure_erase(struct request_queue *q) 983{ 984 q->limits.max_secure_erase_sectors = 0; 985} 986 987static inline void blk_queue_disable_write_zeroes(struct request_queue *q) 988{ 989 q->limits.max_write_zeroes_sectors = 0; 990} 991 992/* 993 * Access functions for manipulating queue properties 994 */ 995extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 996extern void blk_set_stacking_limits(struct queue_limits *lim); 997extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 998 sector_t offset); 999void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, 1000 sector_t offset, const char *pfx); 1001extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1002 1003struct blk_independent_access_ranges * 1004disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); 1005void disk_set_independent_access_ranges(struct gendisk *disk, 1006 struct blk_independent_access_ranges *iars); 1007 1008bool __must_check blk_get_queue(struct request_queue *); 1009extern void blk_put_queue(struct request_queue *); 1010 1011void blk_mark_disk_dead(struct gendisk *disk); 1012 1013struct rq_list { 1014 struct request *head; 1015 struct request *tail; 1016}; 1017 1018#ifdef CONFIG_BLOCK 1019/* 1020 * blk_plug permits building a queue of related requests by holding the I/O 1021 * fragments for a short period. This allows merging of sequential requests 1022 * into single larger request. As the requests are moved from a per-task list to 1023 * the device's request_queue in a batch, this results in improved scalability 1024 * as the lock contention for request_queue lock is reduced. 1025 * 1026 * It is ok not to disable preemption when adding the request to the plug list 1027 * or when attempting a merge. For details, please see schedule() where 1028 * blk_flush_plug() is called. 1029 */ 1030struct blk_plug { 1031 struct rq_list mq_list; /* blk-mq requests */ 1032 1033 /* if ios_left is > 1, we can batch tag/rq allocations */ 1034 struct rq_list cached_rqs; 1035 u64 cur_ktime; 1036 unsigned short nr_ios; 1037 1038 unsigned short rq_count; 1039 1040 bool multiple_queues; 1041 bool has_elevator; 1042 1043 struct list_head cb_list; /* md requires an unplug callback */ 1044}; 1045 1046struct blk_plug_cb; 1047typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1048struct blk_plug_cb { 1049 struct list_head list; 1050 blk_plug_cb_fn callback; 1051 void *data; 1052}; 1053extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1054 void *data, int size); 1055extern void blk_start_plug(struct blk_plug *); 1056extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); 1057extern void blk_finish_plug(struct blk_plug *); 1058 1059void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); 1060static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1061{ 1062 if (plug) 1063 __blk_flush_plug(plug, async); 1064} 1065 1066/* 1067 * tsk == current here 1068 */ 1069static inline void blk_plug_invalidate_ts(struct task_struct *tsk) 1070{ 1071 struct blk_plug *plug = tsk->plug; 1072 1073 if (plug) 1074 plug->cur_ktime = 0; 1075 current->flags &= ~PF_BLOCK_TS; 1076} 1077 1078int blkdev_issue_flush(struct block_device *bdev); 1079long nr_blockdev_pages(void); 1080#else /* CONFIG_BLOCK */ 1081struct blk_plug { 1082}; 1083 1084static inline void blk_start_plug_nr_ios(struct blk_plug *plug, 1085 unsigned short nr_ios) 1086{ 1087} 1088 1089static inline void blk_start_plug(struct blk_plug *plug) 1090{ 1091} 1092 1093static inline void blk_finish_plug(struct blk_plug *plug) 1094{ 1095} 1096 1097static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1098{ 1099} 1100 1101static inline void blk_plug_invalidate_ts(struct task_struct *tsk) 1102{ 1103} 1104 1105static inline int blkdev_issue_flush(struct block_device *bdev) 1106{ 1107 return 0; 1108} 1109 1110static inline long nr_blockdev_pages(void) 1111{ 1112 return 0; 1113} 1114#endif /* CONFIG_BLOCK */ 1115 1116extern void blk_io_schedule(void); 1117 1118int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1119 sector_t nr_sects, gfp_t gfp_mask); 1120int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1121 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 1122int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 1123 sector_t nr_sects, gfp_t gfp); 1124 1125#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1126#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1127#define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */ 1128 1129extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1130 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1131 unsigned flags); 1132extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1133 sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1134 1135static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1136 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1137{ 1138 return blkdev_issue_discard(sb->s_bdev, 1139 block << (sb->s_blocksize_bits - 1140 SECTOR_SHIFT), 1141 nr_blocks << (sb->s_blocksize_bits - 1142 SECTOR_SHIFT), 1143 gfp_mask); 1144} 1145static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1146 sector_t nr_blocks, gfp_t gfp_mask) 1147{ 1148 return blkdev_issue_zeroout(sb->s_bdev, 1149 block << (sb->s_blocksize_bits - 1150 SECTOR_SHIFT), 1151 nr_blocks << (sb->s_blocksize_bits - 1152 SECTOR_SHIFT), 1153 gfp_mask, 0); 1154} 1155 1156static inline bool bdev_is_partition(struct block_device *bdev) 1157{ 1158 return bdev_partno(bdev) != 0; 1159} 1160 1161enum blk_default_limits { 1162 BLK_MAX_SEGMENTS = 128, 1163 BLK_SAFE_MAX_SECTORS = 255, 1164 BLK_MAX_SEGMENT_SIZE = 65536, 1165 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1166}; 1167 1168/* 1169 * Default upper limit for the software max_sectors limit used for 1170 * regular file system I/O. This can be increased through sysfs. 1171 * 1172 * Not to be confused with the max_hw_sector limit that is entirely 1173 * controlled by the driver, usually based on hardware limits. 1174 */ 1175#define BLK_DEF_MAX_SECTORS_CAP 2560u 1176 1177static inline struct queue_limits *bdev_limits(struct block_device *bdev) 1178{ 1179 return &bdev_get_queue(bdev)->limits; 1180} 1181 1182static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1183{ 1184 return q->limits.seg_boundary_mask; 1185} 1186 1187static inline unsigned long queue_virt_boundary(const struct request_queue *q) 1188{ 1189 return q->limits.virt_boundary_mask; 1190} 1191 1192static inline unsigned int queue_max_sectors(const struct request_queue *q) 1193{ 1194 return q->limits.max_sectors; 1195} 1196 1197static inline unsigned int queue_max_bytes(struct request_queue *q) 1198{ 1199 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; 1200} 1201 1202static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1203{ 1204 return q->limits.max_hw_sectors; 1205} 1206 1207static inline unsigned short queue_max_segments(const struct request_queue *q) 1208{ 1209 return q->limits.max_segments; 1210} 1211 1212static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 1213{ 1214 return q->limits.max_discard_segments; 1215} 1216 1217static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1218{ 1219 return q->limits.max_segment_size; 1220} 1221 1222static inline bool queue_emulates_zone_append(struct request_queue *q) 1223{ 1224 return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors; 1225} 1226 1227static inline bool bdev_emulates_zone_append(struct block_device *bdev) 1228{ 1229 return queue_emulates_zone_append(bdev_get_queue(bdev)); 1230} 1231 1232static inline unsigned int 1233bdev_max_zone_append_sectors(struct block_device *bdev) 1234{ 1235 return bdev_limits(bdev)->max_zone_append_sectors; 1236} 1237 1238static inline unsigned int bdev_max_segments(struct block_device *bdev) 1239{ 1240 return queue_max_segments(bdev_get_queue(bdev)); 1241} 1242 1243static inline unsigned queue_logical_block_size(const struct request_queue *q) 1244{ 1245 return q->limits.logical_block_size; 1246} 1247 1248static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 1249{ 1250 return queue_logical_block_size(bdev_get_queue(bdev)); 1251} 1252 1253static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1254{ 1255 return q->limits.physical_block_size; 1256} 1257 1258static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1259{ 1260 return queue_physical_block_size(bdev_get_queue(bdev)); 1261} 1262 1263static inline unsigned int queue_io_min(const struct request_queue *q) 1264{ 1265 return q->limits.io_min; 1266} 1267 1268static inline unsigned int bdev_io_min(struct block_device *bdev) 1269{ 1270 return queue_io_min(bdev_get_queue(bdev)); 1271} 1272 1273static inline unsigned int queue_io_opt(const struct request_queue *q) 1274{ 1275 return q->limits.io_opt; 1276} 1277 1278static inline unsigned int bdev_io_opt(struct block_device *bdev) 1279{ 1280 return queue_io_opt(bdev_get_queue(bdev)); 1281} 1282 1283static inline unsigned int 1284queue_zone_write_granularity(const struct request_queue *q) 1285{ 1286 return q->limits.zone_write_granularity; 1287} 1288 1289static inline unsigned int 1290bdev_zone_write_granularity(struct block_device *bdev) 1291{ 1292 return queue_zone_write_granularity(bdev_get_queue(bdev)); 1293} 1294 1295int bdev_alignment_offset(struct block_device *bdev); 1296unsigned int bdev_discard_alignment(struct block_device *bdev); 1297 1298static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1299{ 1300 return bdev_limits(bdev)->max_discard_sectors; 1301} 1302 1303static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 1304{ 1305 return bdev_limits(bdev)->discard_granularity; 1306} 1307 1308static inline unsigned int 1309bdev_max_secure_erase_sectors(struct block_device *bdev) 1310{ 1311 return bdev_limits(bdev)->max_secure_erase_sectors; 1312} 1313 1314static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1315{ 1316 return bdev_limits(bdev)->max_write_zeroes_sectors; 1317} 1318 1319static inline bool bdev_nonrot(struct block_device *bdev) 1320{ 1321 return blk_queue_nonrot(bdev_get_queue(bdev)); 1322} 1323 1324static inline bool bdev_synchronous(struct block_device *bdev) 1325{ 1326 return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS; 1327} 1328 1329static inline bool bdev_stable_writes(struct block_device *bdev) 1330{ 1331 struct request_queue *q = bdev_get_queue(bdev); 1332 1333 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 1334 q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) 1335 return true; 1336 return q->limits.features & BLK_FEAT_STABLE_WRITES; 1337} 1338 1339static inline bool blk_queue_write_cache(struct request_queue *q) 1340{ 1341 return (q->limits.features & BLK_FEAT_WRITE_CACHE) && 1342 !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED); 1343} 1344 1345static inline bool bdev_write_cache(struct block_device *bdev) 1346{ 1347 return blk_queue_write_cache(bdev_get_queue(bdev)); 1348} 1349 1350static inline bool bdev_fua(struct block_device *bdev) 1351{ 1352 return bdev_limits(bdev)->features & BLK_FEAT_FUA; 1353} 1354 1355static inline bool bdev_nowait(struct block_device *bdev) 1356{ 1357 return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT; 1358} 1359 1360static inline bool bdev_is_zoned(struct block_device *bdev) 1361{ 1362 return blk_queue_is_zoned(bdev_get_queue(bdev)); 1363} 1364 1365static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) 1366{ 1367 return disk_zone_no(bdev->bd_disk, sec); 1368} 1369 1370static inline sector_t bdev_zone_sectors(struct block_device *bdev) 1371{ 1372 struct request_queue *q = bdev_get_queue(bdev); 1373 1374 if (!blk_queue_is_zoned(q)) 1375 return 0; 1376 return q->limits.chunk_sectors; 1377} 1378 1379static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, 1380 sector_t sector) 1381{ 1382 return sector & (bdev_zone_sectors(bdev) - 1); 1383} 1384 1385static inline sector_t bio_offset_from_zone_start(struct bio *bio) 1386{ 1387 return bdev_offset_from_zone_start(bio->bi_bdev, 1388 bio->bi_iter.bi_sector); 1389} 1390 1391static inline bool bdev_is_zone_start(struct block_device *bdev, 1392 sector_t sector) 1393{ 1394 return bdev_offset_from_zone_start(bdev, sector) == 0; 1395} 1396 1397/** 1398 * bdev_zone_is_seq - check if a sector belongs to a sequential write zone 1399 * @bdev: block device to check 1400 * @sector: sector number 1401 * 1402 * Check if @sector on @bdev is contained in a sequential write required zone. 1403 */ 1404static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector) 1405{ 1406 bool is_seq = false; 1407 1408#if IS_ENABLED(CONFIG_BLK_DEV_ZONED) 1409 if (bdev_is_zoned(bdev)) { 1410 struct gendisk *disk = bdev->bd_disk; 1411 unsigned long *bitmap; 1412 1413 rcu_read_lock(); 1414 bitmap = rcu_dereference(disk->conv_zones_bitmap); 1415 is_seq = !bitmap || 1416 !test_bit(disk_zone_no(disk, sector), bitmap); 1417 rcu_read_unlock(); 1418 } 1419#endif 1420 1421 return is_seq; 1422} 1423 1424static inline unsigned int queue_dma_alignment(const struct request_queue *q) 1425{ 1426 return q->limits.dma_alignment; 1427} 1428 1429static inline unsigned int 1430queue_atomic_write_unit_max_bytes(const struct request_queue *q) 1431{ 1432 return q->limits.atomic_write_unit_max; 1433} 1434 1435static inline unsigned int 1436queue_atomic_write_unit_min_bytes(const struct request_queue *q) 1437{ 1438 return q->limits.atomic_write_unit_min; 1439} 1440 1441static inline unsigned int 1442queue_atomic_write_boundary_bytes(const struct request_queue *q) 1443{ 1444 return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT; 1445} 1446 1447static inline unsigned int 1448queue_atomic_write_max_bytes(const struct request_queue *q) 1449{ 1450 return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; 1451} 1452 1453static inline unsigned int bdev_dma_alignment(struct block_device *bdev) 1454{ 1455 return queue_dma_alignment(bdev_get_queue(bdev)); 1456} 1457 1458static inline bool bdev_iter_is_aligned(struct block_device *bdev, 1459 struct iov_iter *iter) 1460{ 1461 return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), 1462 bdev_logical_block_size(bdev) - 1); 1463} 1464 1465static inline unsigned int 1466blk_lim_dma_alignment_and_pad(struct queue_limits *lim) 1467{ 1468 return lim->dma_alignment | lim->dma_pad_mask; 1469} 1470 1471static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr, 1472 unsigned int len) 1473{ 1474 unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); 1475 1476 return !(addr & alignment) && !(len & alignment); 1477} 1478 1479/* assumes size > 256 */ 1480static inline unsigned int blksize_bits(unsigned int size) 1481{ 1482 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; 1483} 1484 1485int kblockd_schedule_work(struct work_struct *work); 1486int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1487 1488#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1489 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1490#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1491 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1492 1493#ifdef CONFIG_BLK_INLINE_ENCRYPTION 1494 1495bool blk_crypto_register(struct blk_crypto_profile *profile, 1496 struct request_queue *q); 1497 1498#else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1499 1500static inline bool blk_crypto_register(struct blk_crypto_profile *profile, 1501 struct request_queue *q) 1502{ 1503 return true; 1504} 1505 1506#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1507 1508enum blk_unique_id { 1509 /* these match the Designator Types specified in SPC */ 1510 BLK_UID_T10 = 1, 1511 BLK_UID_EUI64 = 2, 1512 BLK_UID_NAA = 3, 1513}; 1514 1515struct block_device_operations { 1516 void (*submit_bio)(struct bio *bio); 1517 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, 1518 unsigned int flags); 1519 int (*open)(struct gendisk *disk, blk_mode_t mode); 1520 void (*release)(struct gendisk *disk); 1521 int (*ioctl)(struct block_device *bdev, blk_mode_t mode, 1522 unsigned cmd, unsigned long arg); 1523 int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode, 1524 unsigned cmd, unsigned long arg); 1525 unsigned int (*check_events) (struct gendisk *disk, 1526 unsigned int clearing); 1527 void (*unlock_native_capacity) (struct gendisk *); 1528 int (*getgeo)(struct block_device *, struct hd_geometry *); 1529 int (*set_read_only)(struct block_device *bdev, bool ro); 1530 void (*free_disk)(struct gendisk *disk); 1531 /* this callback is with swap_lock and sometimes page table lock held */ 1532 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1533 int (*report_zones)(struct gendisk *, sector_t sector, 1534 unsigned int nr_zones, report_zones_cb cb, void *data); 1535 char *(*devnode)(struct gendisk *disk, umode_t *mode); 1536 /* returns the length of the identifier or a negative errno: */ 1537 int (*get_unique_id)(struct gendisk *disk, u8 id[16], 1538 enum blk_unique_id id_type); 1539 struct module *owner; 1540 const struct pr_ops *pr_ops; 1541 1542 /* 1543 * Special callback for probing GPT entry at a given sector. 1544 * Needed by Android devices, used by GPT scanner and MMC blk 1545 * driver. 1546 */ 1547 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); 1548}; 1549 1550#ifdef CONFIG_COMPAT 1551extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t, 1552 unsigned int, unsigned long); 1553#else 1554#define blkdev_compat_ptr_ioctl NULL 1555#endif 1556 1557static inline void blk_wake_io_task(struct task_struct *waiter) 1558{ 1559 /* 1560 * If we're polling, the task itself is doing the completions. For 1561 * that case, we don't need to signal a wakeup, it's enough to just 1562 * mark us as RUNNING. 1563 */ 1564 if (waiter == current) 1565 __set_current_state(TASK_RUNNING); 1566 else 1567 wake_up_process(waiter); 1568} 1569 1570unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 1571 unsigned long start_time); 1572void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 1573 unsigned int sectors, unsigned long start_time); 1574 1575unsigned long bio_start_io_acct(struct bio *bio); 1576void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1577 struct block_device *orig_bdev); 1578 1579/** 1580 * bio_end_io_acct - end I/O accounting for bio based drivers 1581 * @bio: bio to end account for 1582 * @start_time: start time returned by bio_start_io_acct() 1583 */ 1584static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1585{ 1586 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1587} 1588 1589int set_blocksize(struct file *file, int size); 1590 1591int lookup_bdev(const char *pathname, dev_t *dev); 1592 1593void blkdev_show(struct seq_file *seqf, off_t offset); 1594 1595#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1596#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 1597#ifdef CONFIG_BLOCK 1598#define BLKDEV_MAJOR_MAX 512 1599#else 1600#define BLKDEV_MAJOR_MAX 0 1601#endif 1602 1603struct blk_holder_ops { 1604 void (*mark_dead)(struct block_device *bdev, bool surprise); 1605 1606 /* 1607 * Sync the file system mounted on the block device. 1608 */ 1609 void (*sync)(struct block_device *bdev); 1610 1611 /* 1612 * Freeze the file system mounted on the block device. 1613 */ 1614 int (*freeze)(struct block_device *bdev); 1615 1616 /* 1617 * Thaw the file system mounted on the block device. 1618 */ 1619 int (*thaw)(struct block_device *bdev); 1620}; 1621 1622/* 1623 * For filesystems using @fs_holder_ops, the @holder argument passed to 1624 * helpers used to open and claim block devices via 1625 * bd_prepare_to_claim() must point to a superblock. 1626 */ 1627extern const struct blk_holder_ops fs_holder_ops; 1628 1629/* 1630 * Return the correct open flags for blkdev_get_by_* for super block flags 1631 * as stored in sb->s_flags. 1632 */ 1633#define sb_open_mode(flags) \ 1634 (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \ 1635 (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) 1636 1637struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, 1638 const struct blk_holder_ops *hops); 1639struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode, 1640 void *holder, const struct blk_holder_ops *hops); 1641int bd_prepare_to_claim(struct block_device *bdev, void *holder, 1642 const struct blk_holder_ops *hops); 1643void bd_abort_claiming(struct block_device *bdev, void *holder); 1644 1645/* just for blk-cgroup, don't use elsewhere */ 1646struct block_device *blkdev_get_no_open(dev_t dev); 1647void blkdev_put_no_open(struct block_device *bdev); 1648 1649struct block_device *I_BDEV(struct inode *inode); 1650struct block_device *file_bdev(struct file *bdev_file); 1651bool disk_live(struct gendisk *disk); 1652unsigned int block_size(struct block_device *bdev); 1653 1654#ifdef CONFIG_BLOCK 1655void invalidate_bdev(struct block_device *bdev); 1656int sync_blockdev(struct block_device *bdev); 1657int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); 1658int sync_blockdev_nowait(struct block_device *bdev); 1659void sync_bdevs(bool wait); 1660void bdev_statx(struct path *, struct kstat *, u32); 1661void printk_all_partitions(void); 1662int __init early_lookup_bdev(const char *pathname, dev_t *dev); 1663#else 1664static inline void invalidate_bdev(struct block_device *bdev) 1665{ 1666} 1667static inline int sync_blockdev(struct block_device *bdev) 1668{ 1669 return 0; 1670} 1671static inline int sync_blockdev_nowait(struct block_device *bdev) 1672{ 1673 return 0; 1674} 1675static inline void sync_bdevs(bool wait) 1676{ 1677} 1678static inline void bdev_statx(struct path *path, struct kstat *stat, 1679 u32 request_mask) 1680{ 1681} 1682static inline void printk_all_partitions(void) 1683{ 1684} 1685static inline int early_lookup_bdev(const char *pathname, dev_t *dev) 1686{ 1687 return -EINVAL; 1688} 1689#endif /* CONFIG_BLOCK */ 1690 1691int bdev_freeze(struct block_device *bdev); 1692int bdev_thaw(struct block_device *bdev); 1693void bdev_fput(struct file *bdev_file); 1694 1695struct io_comp_batch { 1696 struct rq_list req_list; 1697 bool need_ts; 1698 void (*complete)(struct io_comp_batch *); 1699}; 1700 1701static inline bool bdev_can_atomic_write(struct block_device *bdev) 1702{ 1703 struct request_queue *bd_queue = bdev->bd_queue; 1704 struct queue_limits *limits = &bd_queue->limits; 1705 1706 if (!limits->atomic_write_unit_min) 1707 return false; 1708 1709 if (bdev_is_partition(bdev)) { 1710 sector_t bd_start_sect = bdev->bd_start_sect; 1711 unsigned int alignment = 1712 max(limits->atomic_write_unit_min, 1713 limits->atomic_write_hw_boundary); 1714 1715 if (!IS_ALIGNED(bd_start_sect, alignment >> SECTOR_SHIFT)) 1716 return false; 1717 } 1718 1719 return true; 1720} 1721 1722static inline unsigned int 1723bdev_atomic_write_unit_min_bytes(struct block_device *bdev) 1724{ 1725 if (!bdev_can_atomic_write(bdev)) 1726 return 0; 1727 return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev)); 1728} 1729 1730static inline unsigned int 1731bdev_atomic_write_unit_max_bytes(struct block_device *bdev) 1732{ 1733 if (!bdev_can_atomic_write(bdev)) 1734 return 0; 1735 return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev)); 1736} 1737 1738#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } 1739 1740#endif /* _LINUX_BLKDEV_H */