at v6.5-rc7 46 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Portions Copyright (C) 1992 Drew Eckhardt 4 */ 5#ifndef _LINUX_BLKDEV_H 6#define _LINUX_BLKDEV_H 7 8#include <linux/types.h> 9#include <linux/blk_types.h> 10#include <linux/device.h> 11#include <linux/list.h> 12#include <linux/llist.h> 13#include <linux/minmax.h> 14#include <linux/timer.h> 15#include <linux/workqueue.h> 16#include <linux/wait.h> 17#include <linux/bio.h> 18#include <linux/gfp.h> 19#include <linux/kdev_t.h> 20#include <linux/rcupdate.h> 21#include <linux/percpu-refcount.h> 22#include <linux/blkzoned.h> 23#include <linux/sched.h> 24#include <linux/sbitmap.h> 25#include <linux/uuid.h> 26#include <linux/xarray.h> 27 28struct module; 29struct request_queue; 30struct elevator_queue; 31struct blk_trace; 32struct request; 33struct sg_io_hdr; 34struct blkcg_gq; 35struct blk_flush_queue; 36struct kiocb; 37struct pr_ops; 38struct rq_qos; 39struct blk_queue_stats; 40struct blk_stat_callback; 41struct blk_crypto_profile; 42 43extern const struct device_type disk_type; 44extern const struct device_type part_type; 45extern struct class block_class; 46 47/* 48 * Maximum number of blkcg policies allowed to be registered concurrently. 49 * Defined here to simplify include dependency. 50 */ 51#define BLKCG_MAX_POLS 6 52 53#define DISK_MAX_PARTS 256 54#define DISK_NAME_LEN 32 55 56#define PARTITION_META_INFO_VOLNAMELTH 64 57/* 58 * Enough for the string representation of any kind of UUID plus NULL. 59 * EFI UUID is 36 characters. MSDOS UUID is 11 characters. 60 */ 61#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) 62 63struct partition_meta_info { 64 char uuid[PARTITION_META_INFO_UUIDLTH]; 65 u8 volname[PARTITION_META_INFO_VOLNAMELTH]; 66}; 67 68/** 69 * DOC: genhd capability flags 70 * 71 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to 72 * removable media. When set, the device remains present even when media is not 73 * inserted. Shall not be set for devices which are removed entirely when the 74 * media is removed. 75 * 76 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, 77 * doesn't appear in sysfs, and can't be opened from userspace or using 78 * blkdev_get*. Used for the underlying components of multipath devices. 79 * 80 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not 81 * scan for partitions from add_disk, and users can't add partitions manually. 82 * 83 */ 84enum { 85 GENHD_FL_REMOVABLE = 1 << 0, 86 GENHD_FL_HIDDEN = 1 << 1, 87 GENHD_FL_NO_PART = 1 << 2, 88}; 89 90enum { 91 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 92 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 93}; 94 95enum { 96 /* Poll even if events_poll_msecs is unset */ 97 DISK_EVENT_FLAG_POLL = 1 << 0, 98 /* Forward events to udev */ 99 DISK_EVENT_FLAG_UEVENT = 1 << 1, 100 /* Block event polling when open for exclusive write */ 101 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, 102}; 103 104struct disk_events; 105struct badblocks; 106 107struct blk_integrity { 108 const struct blk_integrity_profile *profile; 109 unsigned char flags; 110 unsigned char tuple_size; 111 unsigned char interval_exp; 112 unsigned char tag_size; 113}; 114 115typedef unsigned int __bitwise blk_mode_t; 116 117/* open for reading */ 118#define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0)) 119/* open for writing */ 120#define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1)) 121/* open exclusively (vs other exclusive openers */ 122#define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2)) 123/* opened with O_NDELAY */ 124#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3)) 125/* open for "writes" only for ioctls (specialy hack for floppy.c) */ 126#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4)) 127 128struct gendisk { 129 /* 130 * major/first_minor/minors should not be set by any new driver, the 131 * block core will take care of allocating them automatically. 132 */ 133 int major; 134 int first_minor; 135 int minors; 136 137 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 138 139 unsigned short events; /* supported events */ 140 unsigned short event_flags; /* flags related to event processing */ 141 142 struct xarray part_tbl; 143 struct block_device *part0; 144 145 const struct block_device_operations *fops; 146 struct request_queue *queue; 147 void *private_data; 148 149 struct bio_set bio_split; 150 151 int flags; 152 unsigned long state; 153#define GD_NEED_PART_SCAN 0 154#define GD_READ_ONLY 1 155#define GD_DEAD 2 156#define GD_NATIVE_CAPACITY 3 157#define GD_ADDED 4 158#define GD_SUPPRESS_PART_SCAN 5 159#define GD_OWNS_QUEUE 6 160 161 struct mutex open_mutex; /* open/close mutex */ 162 unsigned open_partitions; /* number of open partitions */ 163 164 struct backing_dev_info *bdi; 165 struct kobject queue_kobj; /* the queue/ directory */ 166 struct kobject *slave_dir; 167#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 168 struct list_head slave_bdevs; 169#endif 170 struct timer_rand_state *random; 171 atomic_t sync_io; /* RAID */ 172 struct disk_events *ev; 173 174#ifdef CONFIG_BLK_DEV_ZONED 175 /* 176 * Zoned block device information for request dispatch control. 177 * nr_zones is the total number of zones of the device. This is always 178 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones 179 * bits which indicates if a zone is conventional (bit set) or 180 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones 181 * bits which indicates if a zone is write locked, that is, if a write 182 * request targeting the zone was dispatched. 183 * 184 * Reads of this information must be protected with blk_queue_enter() / 185 * blk_queue_exit(). Modifying this information is only allowed while 186 * no requests are being processed. See also blk_mq_freeze_queue() and 187 * blk_mq_unfreeze_queue(). 188 */ 189 unsigned int nr_zones; 190 unsigned int max_open_zones; 191 unsigned int max_active_zones; 192 unsigned long *conv_zones_bitmap; 193 unsigned long *seq_zones_wlock; 194#endif /* CONFIG_BLK_DEV_ZONED */ 195 196#if IS_ENABLED(CONFIG_CDROM) 197 struct cdrom_device_info *cdi; 198#endif 199 int node_id; 200 struct badblocks *bb; 201 struct lockdep_map lockdep_map; 202 u64 diskseq; 203 blk_mode_t open_mode; 204 205 /* 206 * Independent sector access ranges. This is always NULL for 207 * devices that do not have multiple independent access ranges. 208 */ 209 struct blk_independent_access_ranges *ia_ranges; 210}; 211 212static inline bool disk_live(struct gendisk *disk) 213{ 214 return !inode_unhashed(disk->part0->bd_inode); 215} 216 217/** 218 * disk_openers - returns how many openers are there for a disk 219 * @disk: disk to check 220 * 221 * This returns the number of openers for a disk. Note that this value is only 222 * stable if disk->open_mutex is held. 223 * 224 * Note: Due to a quirk in the block layer open code, each open partition is 225 * only counted once even if there are multiple openers. 226 */ 227static inline unsigned int disk_openers(struct gendisk *disk) 228{ 229 return atomic_read(&disk->part0->bd_openers); 230} 231 232/* 233 * The gendisk is refcounted by the part0 block_device, and the bd_device 234 * therein is also used for device model presentation in sysfs. 235 */ 236#define dev_to_disk(device) \ 237 (dev_to_bdev(device)->bd_disk) 238#define disk_to_dev(disk) \ 239 (&((disk)->part0->bd_device)) 240 241#if IS_REACHABLE(CONFIG_CDROM) 242#define disk_to_cdi(disk) ((disk)->cdi) 243#else 244#define disk_to_cdi(disk) NULL 245#endif 246 247static inline dev_t disk_devt(struct gendisk *disk) 248{ 249 return MKDEV(disk->major, disk->first_minor); 250} 251 252static inline int blk_validate_block_size(unsigned long bsize) 253{ 254 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) 255 return -EINVAL; 256 257 return 0; 258} 259 260static inline bool blk_op_is_passthrough(blk_opf_t op) 261{ 262 op &= REQ_OP_MASK; 263 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 264} 265 266/* 267 * Zoned block device models (zoned limit). 268 * 269 * Note: This needs to be ordered from the least to the most severe 270 * restrictions for the inheritance in blk_stack_limits() to work. 271 */ 272enum blk_zoned_model { 273 BLK_ZONED_NONE = 0, /* Regular block device */ 274 BLK_ZONED_HA, /* Host-aware zoned block device */ 275 BLK_ZONED_HM, /* Host-managed zoned block device */ 276}; 277 278/* 279 * BLK_BOUNCE_NONE: never bounce (default) 280 * BLK_BOUNCE_HIGH: bounce all highmem pages 281 */ 282enum blk_bounce { 283 BLK_BOUNCE_NONE, 284 BLK_BOUNCE_HIGH, 285}; 286 287struct queue_limits { 288 enum blk_bounce bounce; 289 unsigned long seg_boundary_mask; 290 unsigned long virt_boundary_mask; 291 292 unsigned int max_hw_sectors; 293 unsigned int max_dev_sectors; 294 unsigned int chunk_sectors; 295 unsigned int max_sectors; 296 unsigned int max_user_sectors; 297 unsigned int max_segment_size; 298 unsigned int physical_block_size; 299 unsigned int logical_block_size; 300 unsigned int alignment_offset; 301 unsigned int io_min; 302 unsigned int io_opt; 303 unsigned int max_discard_sectors; 304 unsigned int max_hw_discard_sectors; 305 unsigned int max_secure_erase_sectors; 306 unsigned int max_write_zeroes_sectors; 307 unsigned int max_zone_append_sectors; 308 unsigned int discard_granularity; 309 unsigned int discard_alignment; 310 unsigned int zone_write_granularity; 311 312 unsigned short max_segments; 313 unsigned short max_integrity_segments; 314 unsigned short max_discard_segments; 315 316 unsigned char misaligned; 317 unsigned char discard_misaligned; 318 unsigned char raid_partial_stripes_expensive; 319 enum blk_zoned_model zoned; 320 321 /* 322 * Drivers that set dma_alignment to less than 511 must be prepared to 323 * handle individual bvec's that are not a multiple of a SECTOR_SIZE 324 * due to possible offsets. 325 */ 326 unsigned int dma_alignment; 327}; 328 329typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 330 void *data); 331 332void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model); 333 334#ifdef CONFIG_BLK_DEV_ZONED 335#define BLK_ALL_ZONES ((unsigned int)-1) 336int blkdev_report_zones(struct block_device *bdev, sector_t sector, 337 unsigned int nr_zones, report_zones_cb cb, void *data); 338unsigned int bdev_nr_zones(struct block_device *bdev); 339extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 340 sector_t sectors, sector_t nr_sectors, 341 gfp_t gfp_mask); 342int blk_revalidate_disk_zones(struct gendisk *disk, 343 void (*update_driver_data)(struct gendisk *disk)); 344#else /* CONFIG_BLK_DEV_ZONED */ 345static inline unsigned int bdev_nr_zones(struct block_device *bdev) 346{ 347 return 0; 348} 349#endif /* CONFIG_BLK_DEV_ZONED */ 350 351/* 352 * Independent access ranges: struct blk_independent_access_range describes 353 * a range of contiguous sectors that can be accessed using device command 354 * execution resources that are independent from the resources used for 355 * other access ranges. This is typically found with single-LUN multi-actuator 356 * HDDs where each access range is served by a different set of heads. 357 * The set of independent ranges supported by the device is defined using 358 * struct blk_independent_access_ranges. The independent ranges must not overlap 359 * and must include all sectors within the disk capacity (no sector holes 360 * allowed). 361 * For a device with multiple ranges, requests targeting sectors in different 362 * ranges can be executed in parallel. A request can straddle an access range 363 * boundary. 364 */ 365struct blk_independent_access_range { 366 struct kobject kobj; 367 sector_t sector; 368 sector_t nr_sectors; 369}; 370 371struct blk_independent_access_ranges { 372 struct kobject kobj; 373 bool sysfs_registered; 374 unsigned int nr_ia_ranges; 375 struct blk_independent_access_range ia_range[]; 376}; 377 378struct request_queue { 379 struct request *last_merge; 380 struct elevator_queue *elevator; 381 382 struct percpu_ref q_usage_counter; 383 384 struct blk_queue_stats *stats; 385 struct rq_qos *rq_qos; 386 struct mutex rq_qos_mutex; 387 388 const struct blk_mq_ops *mq_ops; 389 390 /* sw queues */ 391 struct blk_mq_ctx __percpu *queue_ctx; 392 393 unsigned int queue_depth; 394 395 /* hw dispatch queues */ 396 struct xarray hctx_table; 397 unsigned int nr_hw_queues; 398 399 /* 400 * The queue owner gets to use this for whatever they like. 401 * ll_rw_blk doesn't touch it. 402 */ 403 void *queuedata; 404 405 /* 406 * various queue flags, see QUEUE_* below 407 */ 408 unsigned long queue_flags; 409 /* 410 * Number of contexts that have called blk_set_pm_only(). If this 411 * counter is above zero then only RQF_PM requests are processed. 412 */ 413 atomic_t pm_only; 414 415 /* 416 * ida allocated id for this queue. Used to index queues from 417 * ioctx. 418 */ 419 int id; 420 421 spinlock_t queue_lock; 422 423 struct gendisk *disk; 424 425 refcount_t refs; 426 427 /* 428 * mq queue kobject 429 */ 430 struct kobject *mq_kobj; 431 432#ifdef CONFIG_BLK_DEV_INTEGRITY 433 struct blk_integrity integrity; 434#endif /* CONFIG_BLK_DEV_INTEGRITY */ 435 436#ifdef CONFIG_PM 437 struct device *dev; 438 enum rpm_status rpm_status; 439#endif 440 441 /* 442 * queue settings 443 */ 444 unsigned long nr_requests; /* Max # of requests */ 445 446 unsigned int dma_pad_mask; 447 448#ifdef CONFIG_BLK_INLINE_ENCRYPTION 449 struct blk_crypto_profile *crypto_profile; 450 struct kobject *crypto_kobject; 451#endif 452 453 unsigned int rq_timeout; 454 455 struct timer_list timeout; 456 struct work_struct timeout_work; 457 458 atomic_t nr_active_requests_shared_tags; 459 460 struct blk_mq_tags *sched_shared_tags; 461 462 struct list_head icq_list; 463#ifdef CONFIG_BLK_CGROUP 464 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 465 struct blkcg_gq *root_blkg; 466 struct list_head blkg_list; 467 struct mutex blkcg_mutex; 468#endif 469 470 struct queue_limits limits; 471 472 unsigned int required_elevator_features; 473 474 int node; 475#ifdef CONFIG_BLK_DEV_IO_TRACE 476 struct blk_trace __rcu *blk_trace; 477#endif 478 /* 479 * for flush operations 480 */ 481 struct blk_flush_queue *fq; 482 struct list_head flush_list; 483 484 struct list_head requeue_list; 485 spinlock_t requeue_lock; 486 struct delayed_work requeue_work; 487 488 struct mutex sysfs_lock; 489 struct mutex sysfs_dir_lock; 490 491 /* 492 * for reusing dead hctx instance in case of updating 493 * nr_hw_queues 494 */ 495 struct list_head unused_hctx_list; 496 spinlock_t unused_hctx_lock; 497 498 int mq_freeze_depth; 499 500#ifdef CONFIG_BLK_DEV_THROTTLING 501 /* Throttle data */ 502 struct throtl_data *td; 503#endif 504 struct rcu_head rcu_head; 505 wait_queue_head_t mq_freeze_wq; 506 /* 507 * Protect concurrent access to q_usage_counter by 508 * percpu_ref_kill() and percpu_ref_reinit(). 509 */ 510 struct mutex mq_freeze_lock; 511 512 int quiesce_depth; 513 514 struct blk_mq_tag_set *tag_set; 515 struct list_head tag_set_list; 516 517 struct dentry *debugfs_dir; 518 struct dentry *sched_debugfs_dir; 519 struct dentry *rqos_debugfs_dir; 520 /* 521 * Serializes all debugfs metadata operations using the above dentries. 522 */ 523 struct mutex debugfs_mutex; 524 525 bool mq_sysfs_init_done; 526}; 527 528/* Keep blk_queue_flag_name[] in sync with the definitions below */ 529#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ 530#define QUEUE_FLAG_DYING 1 /* queue being torn down */ 531#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ 532#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ 533#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ 534#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ 535#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 536#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ 537#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ 538#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ 539#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ 540#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ 541#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ 542#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ 543#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ 544#define QUEUE_FLAG_WC 17 /* Write back caching */ 545#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ 546#define QUEUE_FLAG_DAX 19 /* device supports DAX */ 547#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ 548#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ 549#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ 550#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ 551#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ 552#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ 553#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ 554#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ 555#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ 556#define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/ 557 558#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \ 559 (1UL << QUEUE_FLAG_SAME_COMP) | \ 560 (1UL << QUEUE_FLAG_NOWAIT)) 561 562void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 563void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 564bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 565 566#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 567#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 568#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 569#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 570#define blk_queue_noxmerges(q) \ 571 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 572#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 573#define blk_queue_stable_writes(q) \ 574 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) 575#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 576#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 577#define blk_queue_zone_resetall(q) \ 578 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) 579#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 580#define blk_queue_pci_p2pdma(q) \ 581 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) 582#ifdef CONFIG_BLK_RQ_ALLOC_TIME 583#define blk_queue_rq_alloc_time(q) \ 584 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 585#else 586#define blk_queue_rq_alloc_time(q) false 587#endif 588 589#define blk_noretry_request(rq) \ 590 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 591 REQ_FAILFAST_DRIVER)) 592#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 593#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 594#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 595#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) 596#define blk_queue_skip_tagset_quiesce(q) \ 597 test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags) 598 599extern void blk_set_pm_only(struct request_queue *q); 600extern void blk_clear_pm_only(struct request_queue *q); 601 602#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 603 604#define dma_map_bvec(dev, bv, dir, attrs) \ 605 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 606 (dir), (attrs)) 607 608static inline bool queue_is_mq(struct request_queue *q) 609{ 610 return q->mq_ops; 611} 612 613#ifdef CONFIG_PM 614static inline enum rpm_status queue_rpm_status(struct request_queue *q) 615{ 616 return q->rpm_status; 617} 618#else 619static inline enum rpm_status queue_rpm_status(struct request_queue *q) 620{ 621 return RPM_ACTIVE; 622} 623#endif 624 625static inline enum blk_zoned_model 626blk_queue_zoned_model(struct request_queue *q) 627{ 628 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) 629 return q->limits.zoned; 630 return BLK_ZONED_NONE; 631} 632 633static inline bool blk_queue_is_zoned(struct request_queue *q) 634{ 635 switch (blk_queue_zoned_model(q)) { 636 case BLK_ZONED_HA: 637 case BLK_ZONED_HM: 638 return true; 639 default: 640 return false; 641 } 642} 643 644#ifdef CONFIG_BLK_DEV_ZONED 645static inline unsigned int disk_nr_zones(struct gendisk *disk) 646{ 647 return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0; 648} 649 650static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 651{ 652 if (!blk_queue_is_zoned(disk->queue)) 653 return 0; 654 return sector >> ilog2(disk->queue->limits.chunk_sectors); 655} 656 657static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) 658{ 659 if (!blk_queue_is_zoned(disk->queue)) 660 return false; 661 if (!disk->conv_zones_bitmap) 662 return true; 663 return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap); 664} 665 666static inline void disk_set_max_open_zones(struct gendisk *disk, 667 unsigned int max_open_zones) 668{ 669 disk->max_open_zones = max_open_zones; 670} 671 672static inline void disk_set_max_active_zones(struct gendisk *disk, 673 unsigned int max_active_zones) 674{ 675 disk->max_active_zones = max_active_zones; 676} 677 678static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 679{ 680 return bdev->bd_disk->max_open_zones; 681} 682 683static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 684{ 685 return bdev->bd_disk->max_active_zones; 686} 687 688#else /* CONFIG_BLK_DEV_ZONED */ 689static inline unsigned int disk_nr_zones(struct gendisk *disk) 690{ 691 return 0; 692} 693static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) 694{ 695 return false; 696} 697static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 698{ 699 return 0; 700} 701static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 702{ 703 return 0; 704} 705 706static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 707{ 708 return 0; 709} 710#endif /* CONFIG_BLK_DEV_ZONED */ 711 712static inline unsigned int blk_queue_depth(struct request_queue *q) 713{ 714 if (q->queue_depth) 715 return q->queue_depth; 716 717 return q->nr_requests; 718} 719 720/* 721 * default timeout for SG_IO if none specified 722 */ 723#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 724#define BLK_MIN_SG_TIMEOUT (7 * HZ) 725 726/* This should not be used directly - use rq_for_each_segment */ 727#define for_each_bio(_bio) \ 728 for (; _bio; _bio = _bio->bi_next) 729 730int __must_check device_add_disk(struct device *parent, struct gendisk *disk, 731 const struct attribute_group **groups); 732static inline int __must_check add_disk(struct gendisk *disk) 733{ 734 return device_add_disk(NULL, disk, NULL); 735} 736void del_gendisk(struct gendisk *gp); 737void invalidate_disk(struct gendisk *disk); 738void set_disk_ro(struct gendisk *disk, bool read_only); 739void disk_uevent(struct gendisk *disk, enum kobject_action action); 740 741static inline int get_disk_ro(struct gendisk *disk) 742{ 743 return disk->part0->bd_read_only || 744 test_bit(GD_READ_ONLY, &disk->state); 745} 746 747static inline int bdev_read_only(struct block_device *bdev) 748{ 749 return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); 750} 751 752bool set_capacity_and_notify(struct gendisk *disk, sector_t size); 753bool disk_force_media_change(struct gendisk *disk, unsigned int events); 754 755void add_disk_randomness(struct gendisk *disk) __latent_entropy; 756void rand_initialize_disk(struct gendisk *disk); 757 758static inline sector_t get_start_sect(struct block_device *bdev) 759{ 760 return bdev->bd_start_sect; 761} 762 763static inline sector_t bdev_nr_sectors(struct block_device *bdev) 764{ 765 return bdev->bd_nr_sectors; 766} 767 768static inline loff_t bdev_nr_bytes(struct block_device *bdev) 769{ 770 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; 771} 772 773static inline sector_t get_capacity(struct gendisk *disk) 774{ 775 return bdev_nr_sectors(disk->part0); 776} 777 778static inline u64 sb_bdev_nr_blocks(struct super_block *sb) 779{ 780 return bdev_nr_sectors(sb->s_bdev) >> 781 (sb->s_blocksize_bits - SECTOR_SHIFT); 782} 783 784int bdev_disk_changed(struct gendisk *disk, bool invalidate); 785 786void put_disk(struct gendisk *disk); 787struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); 788 789/** 790 * blk_alloc_disk - allocate a gendisk structure 791 * @node_id: numa node to allocate on 792 * 793 * Allocate and pre-initialize a gendisk structure for use with BIO based 794 * drivers. 795 * 796 * Context: can sleep 797 */ 798#define blk_alloc_disk(node_id) \ 799({ \ 800 static struct lock_class_key __key; \ 801 \ 802 __blk_alloc_disk(node_id, &__key); \ 803}) 804 805int __register_blkdev(unsigned int major, const char *name, 806 void (*probe)(dev_t devt)); 807#define register_blkdev(major, name) \ 808 __register_blkdev(major, name, NULL) 809void unregister_blkdev(unsigned int major, const char *name); 810 811bool disk_check_media_change(struct gendisk *disk); 812int __invalidate_device(struct block_device *bdev, bool kill_dirty); 813void set_capacity(struct gendisk *disk, sector_t size); 814 815#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 816int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 817void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); 818#else 819static inline int bd_link_disk_holder(struct block_device *bdev, 820 struct gendisk *disk) 821{ 822 return 0; 823} 824static inline void bd_unlink_disk_holder(struct block_device *bdev, 825 struct gendisk *disk) 826{ 827} 828#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ 829 830dev_t part_devt(struct gendisk *disk, u8 partno); 831void inc_diskseq(struct gendisk *disk); 832void blk_request_module(dev_t devt); 833 834extern int blk_register_queue(struct gendisk *disk); 835extern void blk_unregister_queue(struct gendisk *disk); 836void submit_bio_noacct(struct bio *bio); 837struct bio *bio_split_to_limits(struct bio *bio); 838 839extern int blk_lld_busy(struct request_queue *q); 840extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 841extern void blk_queue_exit(struct request_queue *q); 842extern void blk_sync_queue(struct request_queue *q); 843 844/* Helper to convert REQ_OP_XXX to its string format XXX */ 845extern const char *blk_op_str(enum req_op op); 846 847int blk_status_to_errno(blk_status_t status); 848blk_status_t errno_to_blk_status(int errno); 849 850/* only poll the hardware once, don't continue until a completion was found */ 851#define BLK_POLL_ONESHOT (1 << 0) 852int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); 853int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 854 unsigned int flags); 855 856static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 857{ 858 return bdev->bd_queue; /* this is never NULL */ 859} 860 861/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 862const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 863 864static inline unsigned int bio_zone_no(struct bio *bio) 865{ 866 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 867} 868 869static inline unsigned int bio_zone_is_seq(struct bio *bio) 870{ 871 return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 872} 873 874/* 875 * Return how much of the chunk is left to be used for I/O at a given offset. 876 */ 877static inline unsigned int blk_chunk_sectors_left(sector_t offset, 878 unsigned int chunk_sectors) 879{ 880 if (unlikely(!is_power_of_2(chunk_sectors))) 881 return chunk_sectors - sector_div(offset, chunk_sectors); 882 return chunk_sectors - (offset & (chunk_sectors - 1)); 883} 884 885/* 886 * Access functions for manipulating queue properties 887 */ 888void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); 889extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 890extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 891extern void blk_queue_max_segments(struct request_queue *, unsigned short); 892extern void blk_queue_max_discard_segments(struct request_queue *, 893 unsigned short); 894void blk_queue_max_secure_erase_sectors(struct request_queue *q, 895 unsigned int max_sectors); 896extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 897extern void blk_queue_max_discard_sectors(struct request_queue *q, 898 unsigned int max_discard_sectors); 899extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 900 unsigned int max_write_same_sectors); 901extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); 902extern void blk_queue_max_zone_append_sectors(struct request_queue *q, 903 unsigned int max_zone_append_sectors); 904extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 905void blk_queue_zone_write_granularity(struct request_queue *q, 906 unsigned int size); 907extern void blk_queue_alignment_offset(struct request_queue *q, 908 unsigned int alignment); 909void disk_update_readahead(struct gendisk *disk); 910extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 911extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 912extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 913extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 914extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 915extern void blk_set_stacking_limits(struct queue_limits *lim); 916extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 917 sector_t offset); 918extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 919 sector_t offset); 920extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 921extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 922extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 923extern void blk_queue_dma_alignment(struct request_queue *, int); 924extern void blk_queue_update_dma_alignment(struct request_queue *, int); 925extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 926extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 927 928struct blk_independent_access_ranges * 929disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); 930void disk_set_independent_access_ranges(struct gendisk *disk, 931 struct blk_independent_access_ranges *iars); 932 933/* 934 * Elevator features for blk_queue_required_elevator_features: 935 */ 936/* Supports zoned block devices sequential write constraint */ 937#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) 938 939extern void blk_queue_required_elevator_features(struct request_queue *q, 940 unsigned int features); 941extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 942 struct device *dev); 943 944bool __must_check blk_get_queue(struct request_queue *); 945extern void blk_put_queue(struct request_queue *); 946 947void blk_mark_disk_dead(struct gendisk *disk); 948 949#ifdef CONFIG_BLOCK 950/* 951 * blk_plug permits building a queue of related requests by holding the I/O 952 * fragments for a short period. This allows merging of sequential requests 953 * into single larger request. As the requests are moved from a per-task list to 954 * the device's request_queue in a batch, this results in improved scalability 955 * as the lock contention for request_queue lock is reduced. 956 * 957 * It is ok not to disable preemption when adding the request to the plug list 958 * or when attempting a merge. For details, please see schedule() where 959 * blk_flush_plug() is called. 960 */ 961struct blk_plug { 962 struct request *mq_list; /* blk-mq requests */ 963 964 /* if ios_left is > 1, we can batch tag/rq allocations */ 965 struct request *cached_rq; 966 unsigned short nr_ios; 967 968 unsigned short rq_count; 969 970 bool multiple_queues; 971 bool has_elevator; 972 973 struct list_head cb_list; /* md requires an unplug callback */ 974}; 975 976struct blk_plug_cb; 977typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 978struct blk_plug_cb { 979 struct list_head list; 980 blk_plug_cb_fn callback; 981 void *data; 982}; 983extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 984 void *data, int size); 985extern void blk_start_plug(struct blk_plug *); 986extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); 987extern void blk_finish_plug(struct blk_plug *); 988 989void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); 990static inline void blk_flush_plug(struct blk_plug *plug, bool async) 991{ 992 if (plug) 993 __blk_flush_plug(plug, async); 994} 995 996int blkdev_issue_flush(struct block_device *bdev); 997long nr_blockdev_pages(void); 998#else /* CONFIG_BLOCK */ 999struct blk_plug { 1000}; 1001 1002static inline void blk_start_plug_nr_ios(struct blk_plug *plug, 1003 unsigned short nr_ios) 1004{ 1005} 1006 1007static inline void blk_start_plug(struct blk_plug *plug) 1008{ 1009} 1010 1011static inline void blk_finish_plug(struct blk_plug *plug) 1012{ 1013} 1014 1015static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1016{ 1017} 1018 1019static inline int blkdev_issue_flush(struct block_device *bdev) 1020{ 1021 return 0; 1022} 1023 1024static inline long nr_blockdev_pages(void) 1025{ 1026 return 0; 1027} 1028#endif /* CONFIG_BLOCK */ 1029 1030extern void blk_io_schedule(void); 1031 1032int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1033 sector_t nr_sects, gfp_t gfp_mask); 1034int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1035 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 1036int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 1037 sector_t nr_sects, gfp_t gfp); 1038 1039#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1040#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1041 1042extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1043 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1044 unsigned flags); 1045extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1046 sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1047 1048static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1049 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1050{ 1051 return blkdev_issue_discard(sb->s_bdev, 1052 block << (sb->s_blocksize_bits - 1053 SECTOR_SHIFT), 1054 nr_blocks << (sb->s_blocksize_bits - 1055 SECTOR_SHIFT), 1056 gfp_mask); 1057} 1058static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1059 sector_t nr_blocks, gfp_t gfp_mask) 1060{ 1061 return blkdev_issue_zeroout(sb->s_bdev, 1062 block << (sb->s_blocksize_bits - 1063 SECTOR_SHIFT), 1064 nr_blocks << (sb->s_blocksize_bits - 1065 SECTOR_SHIFT), 1066 gfp_mask, 0); 1067} 1068 1069static inline bool bdev_is_partition(struct block_device *bdev) 1070{ 1071 return bdev->bd_partno; 1072} 1073 1074enum blk_default_limits { 1075 BLK_MAX_SEGMENTS = 128, 1076 BLK_SAFE_MAX_SECTORS = 255, 1077 BLK_MAX_SEGMENT_SIZE = 65536, 1078 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1079}; 1080 1081#define BLK_DEF_MAX_SECTORS 2560u 1082 1083static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1084{ 1085 return q->limits.seg_boundary_mask; 1086} 1087 1088static inline unsigned long queue_virt_boundary(const struct request_queue *q) 1089{ 1090 return q->limits.virt_boundary_mask; 1091} 1092 1093static inline unsigned int queue_max_sectors(const struct request_queue *q) 1094{ 1095 return q->limits.max_sectors; 1096} 1097 1098static inline unsigned int queue_max_bytes(struct request_queue *q) 1099{ 1100 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; 1101} 1102 1103static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1104{ 1105 return q->limits.max_hw_sectors; 1106} 1107 1108static inline unsigned short queue_max_segments(const struct request_queue *q) 1109{ 1110 return q->limits.max_segments; 1111} 1112 1113static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 1114{ 1115 return q->limits.max_discard_segments; 1116} 1117 1118static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1119{ 1120 return q->limits.max_segment_size; 1121} 1122 1123static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) 1124{ 1125 1126 const struct queue_limits *l = &q->limits; 1127 1128 return min(l->max_zone_append_sectors, l->max_sectors); 1129} 1130 1131static inline unsigned int 1132bdev_max_zone_append_sectors(struct block_device *bdev) 1133{ 1134 return queue_max_zone_append_sectors(bdev_get_queue(bdev)); 1135} 1136 1137static inline unsigned int bdev_max_segments(struct block_device *bdev) 1138{ 1139 return queue_max_segments(bdev_get_queue(bdev)); 1140} 1141 1142static inline unsigned queue_logical_block_size(const struct request_queue *q) 1143{ 1144 int retval = 512; 1145 1146 if (q && q->limits.logical_block_size) 1147 retval = q->limits.logical_block_size; 1148 1149 return retval; 1150} 1151 1152static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 1153{ 1154 return queue_logical_block_size(bdev_get_queue(bdev)); 1155} 1156 1157static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1158{ 1159 return q->limits.physical_block_size; 1160} 1161 1162static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1163{ 1164 return queue_physical_block_size(bdev_get_queue(bdev)); 1165} 1166 1167static inline unsigned int queue_io_min(const struct request_queue *q) 1168{ 1169 return q->limits.io_min; 1170} 1171 1172static inline int bdev_io_min(struct block_device *bdev) 1173{ 1174 return queue_io_min(bdev_get_queue(bdev)); 1175} 1176 1177static inline unsigned int queue_io_opt(const struct request_queue *q) 1178{ 1179 return q->limits.io_opt; 1180} 1181 1182static inline int bdev_io_opt(struct block_device *bdev) 1183{ 1184 return queue_io_opt(bdev_get_queue(bdev)); 1185} 1186 1187static inline unsigned int 1188queue_zone_write_granularity(const struct request_queue *q) 1189{ 1190 return q->limits.zone_write_granularity; 1191} 1192 1193static inline unsigned int 1194bdev_zone_write_granularity(struct block_device *bdev) 1195{ 1196 return queue_zone_write_granularity(bdev_get_queue(bdev)); 1197} 1198 1199int bdev_alignment_offset(struct block_device *bdev); 1200unsigned int bdev_discard_alignment(struct block_device *bdev); 1201 1202static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1203{ 1204 return bdev_get_queue(bdev)->limits.max_discard_sectors; 1205} 1206 1207static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 1208{ 1209 return bdev_get_queue(bdev)->limits.discard_granularity; 1210} 1211 1212static inline unsigned int 1213bdev_max_secure_erase_sectors(struct block_device *bdev) 1214{ 1215 return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; 1216} 1217 1218static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1219{ 1220 struct request_queue *q = bdev_get_queue(bdev); 1221 1222 if (q) 1223 return q->limits.max_write_zeroes_sectors; 1224 1225 return 0; 1226} 1227 1228static inline bool bdev_nonrot(struct block_device *bdev) 1229{ 1230 return blk_queue_nonrot(bdev_get_queue(bdev)); 1231} 1232 1233static inline bool bdev_synchronous(struct block_device *bdev) 1234{ 1235 return test_bit(QUEUE_FLAG_SYNCHRONOUS, 1236 &bdev_get_queue(bdev)->queue_flags); 1237} 1238 1239static inline bool bdev_stable_writes(struct block_device *bdev) 1240{ 1241 return test_bit(QUEUE_FLAG_STABLE_WRITES, 1242 &bdev_get_queue(bdev)->queue_flags); 1243} 1244 1245static inline bool bdev_write_cache(struct block_device *bdev) 1246{ 1247 return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags); 1248} 1249 1250static inline bool bdev_fua(struct block_device *bdev) 1251{ 1252 return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags); 1253} 1254 1255static inline bool bdev_nowait(struct block_device *bdev) 1256{ 1257 return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags); 1258} 1259 1260static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1261{ 1262 return blk_queue_zoned_model(bdev_get_queue(bdev)); 1263} 1264 1265static inline bool bdev_is_zoned(struct block_device *bdev) 1266{ 1267 return blk_queue_is_zoned(bdev_get_queue(bdev)); 1268} 1269 1270static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) 1271{ 1272 return disk_zone_no(bdev->bd_disk, sec); 1273} 1274 1275/* Whether write serialization is required for @op on zoned devices. */ 1276static inline bool op_needs_zoned_write_locking(enum req_op op) 1277{ 1278 return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES; 1279} 1280 1281static inline bool bdev_op_is_zoned_write(struct block_device *bdev, 1282 enum req_op op) 1283{ 1284 return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op); 1285} 1286 1287static inline sector_t bdev_zone_sectors(struct block_device *bdev) 1288{ 1289 struct request_queue *q = bdev_get_queue(bdev); 1290 1291 if (!blk_queue_is_zoned(q)) 1292 return 0; 1293 return q->limits.chunk_sectors; 1294} 1295 1296static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, 1297 sector_t sector) 1298{ 1299 return sector & (bdev_zone_sectors(bdev) - 1); 1300} 1301 1302static inline bool bdev_is_zone_start(struct block_device *bdev, 1303 sector_t sector) 1304{ 1305 return bdev_offset_from_zone_start(bdev, sector) == 0; 1306} 1307 1308static inline int queue_dma_alignment(const struct request_queue *q) 1309{ 1310 return q ? q->limits.dma_alignment : 511; 1311} 1312 1313static inline unsigned int bdev_dma_alignment(struct block_device *bdev) 1314{ 1315 return queue_dma_alignment(bdev_get_queue(bdev)); 1316} 1317 1318static inline bool bdev_iter_is_aligned(struct block_device *bdev, 1319 struct iov_iter *iter) 1320{ 1321 return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), 1322 bdev_logical_block_size(bdev) - 1); 1323} 1324 1325static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1326 unsigned int len) 1327{ 1328 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1329 return !(addr & alignment) && !(len & alignment); 1330} 1331 1332/* assumes size > 256 */ 1333static inline unsigned int blksize_bits(unsigned int size) 1334{ 1335 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; 1336} 1337 1338static inline unsigned int block_size(struct block_device *bdev) 1339{ 1340 return 1 << bdev->bd_inode->i_blkbits; 1341} 1342 1343int kblockd_schedule_work(struct work_struct *work); 1344int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1345 1346#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1347 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1348#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1349 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1350 1351#ifdef CONFIG_BLK_INLINE_ENCRYPTION 1352 1353bool blk_crypto_register(struct blk_crypto_profile *profile, 1354 struct request_queue *q); 1355 1356#else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1357 1358static inline bool blk_crypto_register(struct blk_crypto_profile *profile, 1359 struct request_queue *q) 1360{ 1361 return true; 1362} 1363 1364#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1365 1366enum blk_unique_id { 1367 /* these match the Designator Types specified in SPC */ 1368 BLK_UID_T10 = 1, 1369 BLK_UID_EUI64 = 2, 1370 BLK_UID_NAA = 3, 1371}; 1372 1373struct block_device_operations { 1374 void (*submit_bio)(struct bio *bio); 1375 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, 1376 unsigned int flags); 1377 int (*open)(struct gendisk *disk, blk_mode_t mode); 1378 void (*release)(struct gendisk *disk); 1379 int (*ioctl)(struct block_device *bdev, blk_mode_t mode, 1380 unsigned cmd, unsigned long arg); 1381 int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode, 1382 unsigned cmd, unsigned long arg); 1383 unsigned int (*check_events) (struct gendisk *disk, 1384 unsigned int clearing); 1385 void (*unlock_native_capacity) (struct gendisk *); 1386 int (*getgeo)(struct block_device *, struct hd_geometry *); 1387 int (*set_read_only)(struct block_device *bdev, bool ro); 1388 void (*free_disk)(struct gendisk *disk); 1389 /* this callback is with swap_lock and sometimes page table lock held */ 1390 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1391 int (*report_zones)(struct gendisk *, sector_t sector, 1392 unsigned int nr_zones, report_zones_cb cb, void *data); 1393 char *(*devnode)(struct gendisk *disk, umode_t *mode); 1394 /* returns the length of the identifier or a negative errno: */ 1395 int (*get_unique_id)(struct gendisk *disk, u8 id[16], 1396 enum blk_unique_id id_type); 1397 struct module *owner; 1398 const struct pr_ops *pr_ops; 1399 1400 /* 1401 * Special callback for probing GPT entry at a given sector. 1402 * Needed by Android devices, used by GPT scanner and MMC blk 1403 * driver. 1404 */ 1405 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); 1406}; 1407 1408#ifdef CONFIG_COMPAT 1409extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t, 1410 unsigned int, unsigned long); 1411#else 1412#define blkdev_compat_ptr_ioctl NULL 1413#endif 1414 1415static inline void blk_wake_io_task(struct task_struct *waiter) 1416{ 1417 /* 1418 * If we're polling, the task itself is doing the completions. For 1419 * that case, we don't need to signal a wakeup, it's enough to just 1420 * mark us as RUNNING. 1421 */ 1422 if (waiter == current) 1423 __set_current_state(TASK_RUNNING); 1424 else 1425 wake_up_process(waiter); 1426} 1427 1428unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 1429 unsigned long start_time); 1430void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 1431 unsigned int sectors, unsigned long start_time); 1432 1433unsigned long bio_start_io_acct(struct bio *bio); 1434void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1435 struct block_device *orig_bdev); 1436 1437/** 1438 * bio_end_io_acct - end I/O accounting for bio based drivers 1439 * @bio: bio to end account for 1440 * @start_time: start time returned by bio_start_io_acct() 1441 */ 1442static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1443{ 1444 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1445} 1446 1447int bdev_read_only(struct block_device *bdev); 1448int set_blocksize(struct block_device *bdev, int size); 1449 1450int lookup_bdev(const char *pathname, dev_t *dev); 1451 1452void blkdev_show(struct seq_file *seqf, off_t offset); 1453 1454#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1455#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 1456#ifdef CONFIG_BLOCK 1457#define BLKDEV_MAJOR_MAX 512 1458#else 1459#define BLKDEV_MAJOR_MAX 0 1460#endif 1461 1462struct blk_holder_ops { 1463 void (*mark_dead)(struct block_device *bdev); 1464}; 1465 1466/* 1467 * Return the correct open flags for blkdev_get_by_* for super block flags 1468 * as stored in sb->s_flags. 1469 */ 1470#define sb_open_mode(flags) \ 1471 (BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) 1472 1473struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder, 1474 const struct blk_holder_ops *hops); 1475struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode, 1476 void *holder, const struct blk_holder_ops *hops); 1477int bd_prepare_to_claim(struct block_device *bdev, void *holder, 1478 const struct blk_holder_ops *hops); 1479void bd_abort_claiming(struct block_device *bdev, void *holder); 1480void blkdev_put(struct block_device *bdev, void *holder); 1481 1482/* just for blk-cgroup, don't use elsewhere */ 1483struct block_device *blkdev_get_no_open(dev_t dev); 1484void blkdev_put_no_open(struct block_device *bdev); 1485 1486struct block_device *I_BDEV(struct inode *inode); 1487 1488#ifdef CONFIG_BLOCK 1489void invalidate_bdev(struct block_device *bdev); 1490int sync_blockdev(struct block_device *bdev); 1491int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); 1492int sync_blockdev_nowait(struct block_device *bdev); 1493void sync_bdevs(bool wait); 1494void bdev_statx_dioalign(struct inode *inode, struct kstat *stat); 1495void printk_all_partitions(void); 1496int __init early_lookup_bdev(const char *pathname, dev_t *dev); 1497#else 1498static inline void invalidate_bdev(struct block_device *bdev) 1499{ 1500} 1501static inline int sync_blockdev(struct block_device *bdev) 1502{ 1503 return 0; 1504} 1505static inline int sync_blockdev_nowait(struct block_device *bdev) 1506{ 1507 return 0; 1508} 1509static inline void sync_bdevs(bool wait) 1510{ 1511} 1512static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat) 1513{ 1514} 1515static inline void printk_all_partitions(void) 1516{ 1517} 1518static inline int early_lookup_bdev(const char *pathname, dev_t *dev) 1519{ 1520 return -EINVAL; 1521} 1522#endif /* CONFIG_BLOCK */ 1523 1524int fsync_bdev(struct block_device *bdev); 1525 1526int freeze_bdev(struct block_device *bdev); 1527int thaw_bdev(struct block_device *bdev); 1528 1529struct io_comp_batch { 1530 struct request *req_list; 1531 bool need_ts; 1532 void (*complete)(struct io_comp_batch *); 1533}; 1534 1535#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } 1536 1537#endif /* _LINUX_BLKDEV_H */