at v6.4-rc5 45 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Portions Copyright (C) 1992 Drew Eckhardt 4 */ 5#ifndef _LINUX_BLKDEV_H 6#define _LINUX_BLKDEV_H 7 8#include <linux/types.h> 9#include <linux/blk_types.h> 10#include <linux/device.h> 11#include <linux/list.h> 12#include <linux/llist.h> 13#include <linux/minmax.h> 14#include <linux/timer.h> 15#include <linux/workqueue.h> 16#include <linux/wait.h> 17#include <linux/bio.h> 18#include <linux/gfp.h> 19#include <linux/kdev_t.h> 20#include <linux/rcupdate.h> 21#include <linux/percpu-refcount.h> 22#include <linux/blkzoned.h> 23#include <linux/sched.h> 24#include <linux/sbitmap.h> 25#include <linux/uuid.h> 26#include <linux/xarray.h> 27 28struct module; 29struct request_queue; 30struct elevator_queue; 31struct blk_trace; 32struct request; 33struct sg_io_hdr; 34struct blkcg_gq; 35struct blk_flush_queue; 36struct kiocb; 37struct pr_ops; 38struct rq_qos; 39struct blk_queue_stats; 40struct blk_stat_callback; 41struct blk_crypto_profile; 42 43extern const struct device_type disk_type; 44extern struct device_type part_type; 45extern struct class block_class; 46 47/* 48 * Maximum number of blkcg policies allowed to be registered concurrently. 49 * Defined here to simplify include dependency. 50 */ 51#define BLKCG_MAX_POLS 6 52 53#define DISK_MAX_PARTS 256 54#define DISK_NAME_LEN 32 55 56#define PARTITION_META_INFO_VOLNAMELTH 64 57/* 58 * Enough for the string representation of any kind of UUID plus NULL. 59 * EFI UUID is 36 characters. MSDOS UUID is 11 characters. 60 */ 61#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) 62 63struct partition_meta_info { 64 char uuid[PARTITION_META_INFO_UUIDLTH]; 65 u8 volname[PARTITION_META_INFO_VOLNAMELTH]; 66}; 67 68/** 69 * DOC: genhd capability flags 70 * 71 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to 72 * removable media. When set, the device remains present even when media is not 73 * inserted. Shall not be set for devices which are removed entirely when the 74 * media is removed. 75 * 76 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, 77 * doesn't appear in sysfs, and can't be opened from userspace or using 78 * blkdev_get*. Used for the underlying components of multipath devices. 79 * 80 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not 81 * scan for partitions from add_disk, and users can't add partitions manually. 82 * 83 */ 84enum { 85 GENHD_FL_REMOVABLE = 1 << 0, 86 GENHD_FL_HIDDEN = 1 << 1, 87 GENHD_FL_NO_PART = 1 << 2, 88}; 89 90enum { 91 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 92 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 93}; 94 95enum { 96 /* Poll even if events_poll_msecs is unset */ 97 DISK_EVENT_FLAG_POLL = 1 << 0, 98 /* Forward events to udev */ 99 DISK_EVENT_FLAG_UEVENT = 1 << 1, 100 /* Block event polling when open for exclusive write */ 101 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, 102}; 103 104struct disk_events; 105struct badblocks; 106 107struct blk_integrity { 108 const struct blk_integrity_profile *profile; 109 unsigned char flags; 110 unsigned char tuple_size; 111 unsigned char interval_exp; 112 unsigned char tag_size; 113}; 114 115struct gendisk { 116 /* 117 * major/first_minor/minors should not be set by any new driver, the 118 * block core will take care of allocating them automatically. 119 */ 120 int major; 121 int first_minor; 122 int minors; 123 124 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 125 126 unsigned short events; /* supported events */ 127 unsigned short event_flags; /* flags related to event processing */ 128 129 struct xarray part_tbl; 130 struct block_device *part0; 131 132 const struct block_device_operations *fops; 133 struct request_queue *queue; 134 void *private_data; 135 136 struct bio_set bio_split; 137 138 int flags; 139 unsigned long state; 140#define GD_NEED_PART_SCAN 0 141#define GD_READ_ONLY 1 142#define GD_DEAD 2 143#define GD_NATIVE_CAPACITY 3 144#define GD_ADDED 4 145#define GD_SUPPRESS_PART_SCAN 5 146#define GD_OWNS_QUEUE 6 147 148 struct mutex open_mutex; /* open/close mutex */ 149 unsigned open_partitions; /* number of open partitions */ 150 151 struct backing_dev_info *bdi; 152 struct kobject queue_kobj; /* the queue/ directory */ 153 struct kobject *slave_dir; 154#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 155 struct list_head slave_bdevs; 156#endif 157 struct timer_rand_state *random; 158 atomic_t sync_io; /* RAID */ 159 struct disk_events *ev; 160 161#ifdef CONFIG_BLK_DEV_ZONED 162 /* 163 * Zoned block device information for request dispatch control. 164 * nr_zones is the total number of zones of the device. This is always 165 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones 166 * bits which indicates if a zone is conventional (bit set) or 167 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones 168 * bits which indicates if a zone is write locked, that is, if a write 169 * request targeting the zone was dispatched. 170 * 171 * Reads of this information must be protected with blk_queue_enter() / 172 * blk_queue_exit(). Modifying this information is only allowed while 173 * no requests are being processed. See also blk_mq_freeze_queue() and 174 * blk_mq_unfreeze_queue(). 175 */ 176 unsigned int nr_zones; 177 unsigned int max_open_zones; 178 unsigned int max_active_zones; 179 unsigned long *conv_zones_bitmap; 180 unsigned long *seq_zones_wlock; 181#endif /* CONFIG_BLK_DEV_ZONED */ 182 183#if IS_ENABLED(CONFIG_CDROM) 184 struct cdrom_device_info *cdi; 185#endif 186 int node_id; 187 struct badblocks *bb; 188 struct lockdep_map lockdep_map; 189 u64 diskseq; 190 191 /* 192 * Independent sector access ranges. This is always NULL for 193 * devices that do not have multiple independent access ranges. 194 */ 195 struct blk_independent_access_ranges *ia_ranges; 196}; 197 198static inline bool disk_live(struct gendisk *disk) 199{ 200 return !inode_unhashed(disk->part0->bd_inode); 201} 202 203/** 204 * disk_openers - returns how many openers are there for a disk 205 * @disk: disk to check 206 * 207 * This returns the number of openers for a disk. Note that this value is only 208 * stable if disk->open_mutex is held. 209 * 210 * Note: Due to a quirk in the block layer open code, each open partition is 211 * only counted once even if there are multiple openers. 212 */ 213static inline unsigned int disk_openers(struct gendisk *disk) 214{ 215 return atomic_read(&disk->part0->bd_openers); 216} 217 218/* 219 * The gendisk is refcounted by the part0 block_device, and the bd_device 220 * therein is also used for device model presentation in sysfs. 221 */ 222#define dev_to_disk(device) \ 223 (dev_to_bdev(device)->bd_disk) 224#define disk_to_dev(disk) \ 225 (&((disk)->part0->bd_device)) 226 227#if IS_REACHABLE(CONFIG_CDROM) 228#define disk_to_cdi(disk) ((disk)->cdi) 229#else 230#define disk_to_cdi(disk) NULL 231#endif 232 233static inline dev_t disk_devt(struct gendisk *disk) 234{ 235 return MKDEV(disk->major, disk->first_minor); 236} 237 238static inline int blk_validate_block_size(unsigned long bsize) 239{ 240 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) 241 return -EINVAL; 242 243 return 0; 244} 245 246static inline bool blk_op_is_passthrough(blk_opf_t op) 247{ 248 op &= REQ_OP_MASK; 249 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; 250} 251 252/* 253 * Zoned block device models (zoned limit). 254 * 255 * Note: This needs to be ordered from the least to the most severe 256 * restrictions for the inheritance in blk_stack_limits() to work. 257 */ 258enum blk_zoned_model { 259 BLK_ZONED_NONE = 0, /* Regular block device */ 260 BLK_ZONED_HA, /* Host-aware zoned block device */ 261 BLK_ZONED_HM, /* Host-managed zoned block device */ 262}; 263 264/* 265 * BLK_BOUNCE_NONE: never bounce (default) 266 * BLK_BOUNCE_HIGH: bounce all highmem pages 267 */ 268enum blk_bounce { 269 BLK_BOUNCE_NONE, 270 BLK_BOUNCE_HIGH, 271}; 272 273struct queue_limits { 274 enum blk_bounce bounce; 275 unsigned long seg_boundary_mask; 276 unsigned long virt_boundary_mask; 277 278 unsigned int max_hw_sectors; 279 unsigned int max_dev_sectors; 280 unsigned int chunk_sectors; 281 unsigned int max_sectors; 282 unsigned int max_user_sectors; 283 unsigned int max_segment_size; 284 unsigned int physical_block_size; 285 unsigned int logical_block_size; 286 unsigned int alignment_offset; 287 unsigned int io_min; 288 unsigned int io_opt; 289 unsigned int max_discard_sectors; 290 unsigned int max_hw_discard_sectors; 291 unsigned int max_secure_erase_sectors; 292 unsigned int max_write_zeroes_sectors; 293 unsigned int max_zone_append_sectors; 294 unsigned int discard_granularity; 295 unsigned int discard_alignment; 296 unsigned int zone_write_granularity; 297 298 unsigned short max_segments; 299 unsigned short max_integrity_segments; 300 unsigned short max_discard_segments; 301 302 unsigned char misaligned; 303 unsigned char discard_misaligned; 304 unsigned char raid_partial_stripes_expensive; 305 enum blk_zoned_model zoned; 306 307 /* 308 * Drivers that set dma_alignment to less than 511 must be prepared to 309 * handle individual bvec's that are not a multiple of a SECTOR_SIZE 310 * due to possible offsets. 311 */ 312 unsigned int dma_alignment; 313}; 314 315typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, 316 void *data); 317 318void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model); 319 320#ifdef CONFIG_BLK_DEV_ZONED 321 322#define BLK_ALL_ZONES ((unsigned int)-1) 323int blkdev_report_zones(struct block_device *bdev, sector_t sector, 324 unsigned int nr_zones, report_zones_cb cb, void *data); 325unsigned int bdev_nr_zones(struct block_device *bdev); 326extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 327 sector_t sectors, sector_t nr_sectors, 328 gfp_t gfp_mask); 329int blk_revalidate_disk_zones(struct gendisk *disk, 330 void (*update_driver_data)(struct gendisk *disk)); 331 332extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 333 unsigned int cmd, unsigned long arg); 334extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, 335 unsigned int cmd, unsigned long arg); 336 337#else /* CONFIG_BLK_DEV_ZONED */ 338 339static inline unsigned int bdev_nr_zones(struct block_device *bdev) 340{ 341 return 0; 342} 343 344static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 345 fmode_t mode, unsigned int cmd, 346 unsigned long arg) 347{ 348 return -ENOTTY; 349} 350 351static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, 352 fmode_t mode, unsigned int cmd, 353 unsigned long arg) 354{ 355 return -ENOTTY; 356} 357 358#endif /* CONFIG_BLK_DEV_ZONED */ 359 360/* 361 * Independent access ranges: struct blk_independent_access_range describes 362 * a range of contiguous sectors that can be accessed using device command 363 * execution resources that are independent from the resources used for 364 * other access ranges. This is typically found with single-LUN multi-actuator 365 * HDDs where each access range is served by a different set of heads. 366 * The set of independent ranges supported by the device is defined using 367 * struct blk_independent_access_ranges. The independent ranges must not overlap 368 * and must include all sectors within the disk capacity (no sector holes 369 * allowed). 370 * For a device with multiple ranges, requests targeting sectors in different 371 * ranges can be executed in parallel. A request can straddle an access range 372 * boundary. 373 */ 374struct blk_independent_access_range { 375 struct kobject kobj; 376 sector_t sector; 377 sector_t nr_sectors; 378}; 379 380struct blk_independent_access_ranges { 381 struct kobject kobj; 382 bool sysfs_registered; 383 unsigned int nr_ia_ranges; 384 struct blk_independent_access_range ia_range[]; 385}; 386 387struct request_queue { 388 struct request *last_merge; 389 struct elevator_queue *elevator; 390 391 struct percpu_ref q_usage_counter; 392 393 struct blk_queue_stats *stats; 394 struct rq_qos *rq_qos; 395 396 const struct blk_mq_ops *mq_ops; 397 398 /* sw queues */ 399 struct blk_mq_ctx __percpu *queue_ctx; 400 401 unsigned int queue_depth; 402 403 /* hw dispatch queues */ 404 struct xarray hctx_table; 405 unsigned int nr_hw_queues; 406 407 /* 408 * The queue owner gets to use this for whatever they like. 409 * ll_rw_blk doesn't touch it. 410 */ 411 void *queuedata; 412 413 /* 414 * various queue flags, see QUEUE_* below 415 */ 416 unsigned long queue_flags; 417 /* 418 * Number of contexts that have called blk_set_pm_only(). If this 419 * counter is above zero then only RQF_PM requests are processed. 420 */ 421 atomic_t pm_only; 422 423 /* 424 * ida allocated id for this queue. Used to index queues from 425 * ioctx. 426 */ 427 int id; 428 429 spinlock_t queue_lock; 430 431 struct gendisk *disk; 432 433 refcount_t refs; 434 435 /* 436 * mq queue kobject 437 */ 438 struct kobject *mq_kobj; 439 440#ifdef CONFIG_BLK_DEV_INTEGRITY 441 struct blk_integrity integrity; 442#endif /* CONFIG_BLK_DEV_INTEGRITY */ 443 444#ifdef CONFIG_PM 445 struct device *dev; 446 enum rpm_status rpm_status; 447#endif 448 449 /* 450 * queue settings 451 */ 452 unsigned long nr_requests; /* Max # of requests */ 453 454 unsigned int dma_pad_mask; 455 456#ifdef CONFIG_BLK_INLINE_ENCRYPTION 457 struct blk_crypto_profile *crypto_profile; 458 struct kobject *crypto_kobject; 459#endif 460 461 unsigned int rq_timeout; 462 463 struct timer_list timeout; 464 struct work_struct timeout_work; 465 466 atomic_t nr_active_requests_shared_tags; 467 468 struct blk_mq_tags *sched_shared_tags; 469 470 struct list_head icq_list; 471#ifdef CONFIG_BLK_CGROUP 472 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 473 struct blkcg_gq *root_blkg; 474 struct list_head blkg_list; 475 struct mutex blkcg_mutex; 476#endif 477 478 struct queue_limits limits; 479 480 unsigned int required_elevator_features; 481 482 int node; 483#ifdef CONFIG_BLK_DEV_IO_TRACE 484 struct blk_trace __rcu *blk_trace; 485#endif 486 /* 487 * for flush operations 488 */ 489 struct blk_flush_queue *fq; 490 491 struct list_head requeue_list; 492 spinlock_t requeue_lock; 493 struct delayed_work requeue_work; 494 495 struct mutex sysfs_lock; 496 struct mutex sysfs_dir_lock; 497 498 /* 499 * for reusing dead hctx instance in case of updating 500 * nr_hw_queues 501 */ 502 struct list_head unused_hctx_list; 503 spinlock_t unused_hctx_lock; 504 505 int mq_freeze_depth; 506 507#ifdef CONFIG_BLK_DEV_THROTTLING 508 /* Throttle data */ 509 struct throtl_data *td; 510#endif 511 struct rcu_head rcu_head; 512 wait_queue_head_t mq_freeze_wq; 513 /* 514 * Protect concurrent access to q_usage_counter by 515 * percpu_ref_kill() and percpu_ref_reinit(). 516 */ 517 struct mutex mq_freeze_lock; 518 519 int quiesce_depth; 520 521 struct blk_mq_tag_set *tag_set; 522 struct list_head tag_set_list; 523 524 struct dentry *debugfs_dir; 525 struct dentry *sched_debugfs_dir; 526 struct dentry *rqos_debugfs_dir; 527 /* 528 * Serializes all debugfs metadata operations using the above dentries. 529 */ 530 struct mutex debugfs_mutex; 531 532 bool mq_sysfs_init_done; 533}; 534 535/* Keep blk_queue_flag_name[] in sync with the definitions below */ 536#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ 537#define QUEUE_FLAG_DYING 1 /* queue being torn down */ 538#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ 539#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ 540#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ 541#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ 542#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 543#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ 544#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ 545#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ 546#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ 547#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ 548#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ 549#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ 550#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ 551#define QUEUE_FLAG_WC 17 /* Write back caching */ 552#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ 553#define QUEUE_FLAG_DAX 19 /* device supports DAX */ 554#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ 555#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ 556#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ 557#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ 558#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ 559#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ 560#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ 561#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ 562#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ 563#define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/ 564 565#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \ 566 (1UL << QUEUE_FLAG_SAME_COMP) | \ 567 (1UL << QUEUE_FLAG_NOWAIT)) 568 569void blk_queue_flag_set(unsigned int flag, struct request_queue *q); 570void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); 571bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 572 573#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 574#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 575#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 576#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 577#define blk_queue_noxmerges(q) \ 578 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 579#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 580#define blk_queue_stable_writes(q) \ 581 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) 582#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 583#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 584#define blk_queue_zone_resetall(q) \ 585 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) 586#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 587#define blk_queue_pci_p2pdma(q) \ 588 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) 589#ifdef CONFIG_BLK_RQ_ALLOC_TIME 590#define blk_queue_rq_alloc_time(q) \ 591 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) 592#else 593#define blk_queue_rq_alloc_time(q) false 594#endif 595 596#define blk_noretry_request(rq) \ 597 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 598 REQ_FAILFAST_DRIVER)) 599#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 600#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) 601#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) 602#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) 603#define blk_queue_skip_tagset_quiesce(q) \ 604 test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags) 605 606extern void blk_set_pm_only(struct request_queue *q); 607extern void blk_clear_pm_only(struct request_queue *q); 608 609#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 610 611#define dma_map_bvec(dev, bv, dir, attrs) \ 612 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ 613 (dir), (attrs)) 614 615static inline bool queue_is_mq(struct request_queue *q) 616{ 617 return q->mq_ops; 618} 619 620#ifdef CONFIG_PM 621static inline enum rpm_status queue_rpm_status(struct request_queue *q) 622{ 623 return q->rpm_status; 624} 625#else 626static inline enum rpm_status queue_rpm_status(struct request_queue *q) 627{ 628 return RPM_ACTIVE; 629} 630#endif 631 632static inline enum blk_zoned_model 633blk_queue_zoned_model(struct request_queue *q) 634{ 635 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) 636 return q->limits.zoned; 637 return BLK_ZONED_NONE; 638} 639 640static inline bool blk_queue_is_zoned(struct request_queue *q) 641{ 642 switch (blk_queue_zoned_model(q)) { 643 case BLK_ZONED_HA: 644 case BLK_ZONED_HM: 645 return true; 646 default: 647 return false; 648 } 649} 650 651#ifdef CONFIG_BLK_DEV_ZONED 652static inline unsigned int disk_nr_zones(struct gendisk *disk) 653{ 654 return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0; 655} 656 657static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 658{ 659 if (!blk_queue_is_zoned(disk->queue)) 660 return 0; 661 return sector >> ilog2(disk->queue->limits.chunk_sectors); 662} 663 664static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) 665{ 666 if (!blk_queue_is_zoned(disk->queue)) 667 return false; 668 if (!disk->conv_zones_bitmap) 669 return true; 670 return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap); 671} 672 673static inline void disk_set_max_open_zones(struct gendisk *disk, 674 unsigned int max_open_zones) 675{ 676 disk->max_open_zones = max_open_zones; 677} 678 679static inline void disk_set_max_active_zones(struct gendisk *disk, 680 unsigned int max_active_zones) 681{ 682 disk->max_active_zones = max_active_zones; 683} 684 685static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 686{ 687 return bdev->bd_disk->max_open_zones; 688} 689 690static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 691{ 692 return bdev->bd_disk->max_active_zones; 693} 694 695#else /* CONFIG_BLK_DEV_ZONED */ 696static inline unsigned int disk_nr_zones(struct gendisk *disk) 697{ 698 return 0; 699} 700static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) 701{ 702 return false; 703} 704static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) 705{ 706 return 0; 707} 708static inline unsigned int bdev_max_open_zones(struct block_device *bdev) 709{ 710 return 0; 711} 712 713static inline unsigned int bdev_max_active_zones(struct block_device *bdev) 714{ 715 return 0; 716} 717#endif /* CONFIG_BLK_DEV_ZONED */ 718 719static inline unsigned int blk_queue_depth(struct request_queue *q) 720{ 721 if (q->queue_depth) 722 return q->queue_depth; 723 724 return q->nr_requests; 725} 726 727/* 728 * default timeout for SG_IO if none specified 729 */ 730#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 731#define BLK_MIN_SG_TIMEOUT (7 * HZ) 732 733/* This should not be used directly - use rq_for_each_segment */ 734#define for_each_bio(_bio) \ 735 for (; _bio; _bio = _bio->bi_next) 736 737int __must_check device_add_disk(struct device *parent, struct gendisk *disk, 738 const struct attribute_group **groups); 739static inline int __must_check add_disk(struct gendisk *disk) 740{ 741 return device_add_disk(NULL, disk, NULL); 742} 743void del_gendisk(struct gendisk *gp); 744void invalidate_disk(struct gendisk *disk); 745void set_disk_ro(struct gendisk *disk, bool read_only); 746void disk_uevent(struct gendisk *disk, enum kobject_action action); 747 748static inline int get_disk_ro(struct gendisk *disk) 749{ 750 return disk->part0->bd_read_only || 751 test_bit(GD_READ_ONLY, &disk->state); 752} 753 754static inline int bdev_read_only(struct block_device *bdev) 755{ 756 return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); 757} 758 759bool set_capacity_and_notify(struct gendisk *disk, sector_t size); 760bool disk_force_media_change(struct gendisk *disk, unsigned int events); 761 762void add_disk_randomness(struct gendisk *disk) __latent_entropy; 763void rand_initialize_disk(struct gendisk *disk); 764 765static inline sector_t get_start_sect(struct block_device *bdev) 766{ 767 return bdev->bd_start_sect; 768} 769 770static inline sector_t bdev_nr_sectors(struct block_device *bdev) 771{ 772 return bdev->bd_nr_sectors; 773} 774 775static inline loff_t bdev_nr_bytes(struct block_device *bdev) 776{ 777 return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; 778} 779 780static inline sector_t get_capacity(struct gendisk *disk) 781{ 782 return bdev_nr_sectors(disk->part0); 783} 784 785static inline u64 sb_bdev_nr_blocks(struct super_block *sb) 786{ 787 return bdev_nr_sectors(sb->s_bdev) >> 788 (sb->s_blocksize_bits - SECTOR_SHIFT); 789} 790 791int bdev_disk_changed(struct gendisk *disk, bool invalidate); 792 793void put_disk(struct gendisk *disk); 794struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); 795 796/** 797 * blk_alloc_disk - allocate a gendisk structure 798 * @node_id: numa node to allocate on 799 * 800 * Allocate and pre-initialize a gendisk structure for use with BIO based 801 * drivers. 802 * 803 * Context: can sleep 804 */ 805#define blk_alloc_disk(node_id) \ 806({ \ 807 static struct lock_class_key __key; \ 808 \ 809 __blk_alloc_disk(node_id, &__key); \ 810}) 811 812int __register_blkdev(unsigned int major, const char *name, 813 void (*probe)(dev_t devt)); 814#define register_blkdev(major, name) \ 815 __register_blkdev(major, name, NULL) 816void unregister_blkdev(unsigned int major, const char *name); 817 818bool bdev_check_media_change(struct block_device *bdev); 819int __invalidate_device(struct block_device *bdev, bool kill_dirty); 820void set_capacity(struct gendisk *disk, sector_t size); 821 822#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED 823int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 824void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); 825#else 826static inline int bd_link_disk_holder(struct block_device *bdev, 827 struct gendisk *disk) 828{ 829 return 0; 830} 831static inline void bd_unlink_disk_holder(struct block_device *bdev, 832 struct gendisk *disk) 833{ 834} 835#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ 836 837dev_t part_devt(struct gendisk *disk, u8 partno); 838void inc_diskseq(struct gendisk *disk); 839dev_t blk_lookup_devt(const char *name, int partno); 840void blk_request_module(dev_t devt); 841 842extern int blk_register_queue(struct gendisk *disk); 843extern void blk_unregister_queue(struct gendisk *disk); 844void submit_bio_noacct(struct bio *bio); 845struct bio *bio_split_to_limits(struct bio *bio); 846 847extern int blk_lld_busy(struct request_queue *q); 848extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); 849extern void blk_queue_exit(struct request_queue *q); 850extern void blk_sync_queue(struct request_queue *q); 851 852/* Helper to convert REQ_OP_XXX to its string format XXX */ 853extern const char *blk_op_str(enum req_op op); 854 855int blk_status_to_errno(blk_status_t status); 856blk_status_t errno_to_blk_status(int errno); 857 858/* only poll the hardware once, don't continue until a completion was found */ 859#define BLK_POLL_ONESHOT (1 << 0) 860int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); 861int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, 862 unsigned int flags); 863 864static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 865{ 866 return bdev->bd_queue; /* this is never NULL */ 867} 868 869/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ 870const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); 871 872static inline unsigned int bio_zone_no(struct bio *bio) 873{ 874 return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 875} 876 877static inline unsigned int bio_zone_is_seq(struct bio *bio) 878{ 879 return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); 880} 881 882/* 883 * Return how much of the chunk is left to be used for I/O at a given offset. 884 */ 885static inline unsigned int blk_chunk_sectors_left(sector_t offset, 886 unsigned int chunk_sectors) 887{ 888 if (unlikely(!is_power_of_2(chunk_sectors))) 889 return chunk_sectors - sector_div(offset, chunk_sectors); 890 return chunk_sectors - (offset & (chunk_sectors - 1)); 891} 892 893/* 894 * Access functions for manipulating queue properties 895 */ 896void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); 897extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 898extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 899extern void blk_queue_max_segments(struct request_queue *, unsigned short); 900extern void blk_queue_max_discard_segments(struct request_queue *, 901 unsigned short); 902void blk_queue_max_secure_erase_sectors(struct request_queue *q, 903 unsigned int max_sectors); 904extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 905extern void blk_queue_max_discard_sectors(struct request_queue *q, 906 unsigned int max_discard_sectors); 907extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, 908 unsigned int max_write_same_sectors); 909extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); 910extern void blk_queue_max_zone_append_sectors(struct request_queue *q, 911 unsigned int max_zone_append_sectors); 912extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 913void blk_queue_zone_write_granularity(struct request_queue *q, 914 unsigned int size); 915extern void blk_queue_alignment_offset(struct request_queue *q, 916 unsigned int alignment); 917void disk_update_readahead(struct gendisk *disk); 918extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 919extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 920extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 921extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 922extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); 923extern void blk_set_stacking_limits(struct queue_limits *lim); 924extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 925 sector_t offset); 926extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 927 sector_t offset); 928extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 929extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 930extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); 931extern void blk_queue_dma_alignment(struct request_queue *, int); 932extern void blk_queue_update_dma_alignment(struct request_queue *, int); 933extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 934extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 935 936struct blk_independent_access_ranges * 937disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); 938void disk_set_independent_access_ranges(struct gendisk *disk, 939 struct blk_independent_access_ranges *iars); 940 941/* 942 * Elevator features for blk_queue_required_elevator_features: 943 */ 944/* Supports zoned block devices sequential write constraint */ 945#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) 946 947extern void blk_queue_required_elevator_features(struct request_queue *q, 948 unsigned int features); 949extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, 950 struct device *dev); 951 952bool __must_check blk_get_queue(struct request_queue *); 953extern void blk_put_queue(struct request_queue *); 954 955void blk_mark_disk_dead(struct gendisk *disk); 956 957#ifdef CONFIG_BLOCK 958/* 959 * blk_plug permits building a queue of related requests by holding the I/O 960 * fragments for a short period. This allows merging of sequential requests 961 * into single larger request. As the requests are moved from a per-task list to 962 * the device's request_queue in a batch, this results in improved scalability 963 * as the lock contention for request_queue lock is reduced. 964 * 965 * It is ok not to disable preemption when adding the request to the plug list 966 * or when attempting a merge. For details, please see schedule() where 967 * blk_flush_plug() is called. 968 */ 969struct blk_plug { 970 struct request *mq_list; /* blk-mq requests */ 971 972 /* if ios_left is > 1, we can batch tag/rq allocations */ 973 struct request *cached_rq; 974 unsigned short nr_ios; 975 976 unsigned short rq_count; 977 978 bool multiple_queues; 979 bool has_elevator; 980 bool nowait; 981 982 struct list_head cb_list; /* md requires an unplug callback */ 983}; 984 985struct blk_plug_cb; 986typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 987struct blk_plug_cb { 988 struct list_head list; 989 blk_plug_cb_fn callback; 990 void *data; 991}; 992extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 993 void *data, int size); 994extern void blk_start_plug(struct blk_plug *); 995extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); 996extern void blk_finish_plug(struct blk_plug *); 997 998void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); 999static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1000{ 1001 if (plug) 1002 __blk_flush_plug(plug, async); 1003} 1004 1005int blkdev_issue_flush(struct block_device *bdev); 1006long nr_blockdev_pages(void); 1007#else /* CONFIG_BLOCK */ 1008struct blk_plug { 1009}; 1010 1011static inline void blk_start_plug_nr_ios(struct blk_plug *plug, 1012 unsigned short nr_ios) 1013{ 1014} 1015 1016static inline void blk_start_plug(struct blk_plug *plug) 1017{ 1018} 1019 1020static inline void blk_finish_plug(struct blk_plug *plug) 1021{ 1022} 1023 1024static inline void blk_flush_plug(struct blk_plug *plug, bool async) 1025{ 1026} 1027 1028static inline int blkdev_issue_flush(struct block_device *bdev) 1029{ 1030 return 0; 1031} 1032 1033static inline long nr_blockdev_pages(void) 1034{ 1035 return 0; 1036} 1037#endif /* CONFIG_BLOCK */ 1038 1039extern void blk_io_schedule(void); 1040 1041int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1042 sector_t nr_sects, gfp_t gfp_mask); 1043int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1044 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); 1045int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, 1046 sector_t nr_sects, gfp_t gfp); 1047 1048#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ 1049#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ 1050 1051extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1052 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 1053 unsigned flags); 1054extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1055 sector_t nr_sects, gfp_t gfp_mask, unsigned flags); 1056 1057static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1058 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1059{ 1060 return blkdev_issue_discard(sb->s_bdev, 1061 block << (sb->s_blocksize_bits - 1062 SECTOR_SHIFT), 1063 nr_blocks << (sb->s_blocksize_bits - 1064 SECTOR_SHIFT), 1065 gfp_mask); 1066} 1067static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1068 sector_t nr_blocks, gfp_t gfp_mask) 1069{ 1070 return blkdev_issue_zeroout(sb->s_bdev, 1071 block << (sb->s_blocksize_bits - 1072 SECTOR_SHIFT), 1073 nr_blocks << (sb->s_blocksize_bits - 1074 SECTOR_SHIFT), 1075 gfp_mask, 0); 1076} 1077 1078static inline bool bdev_is_partition(struct block_device *bdev) 1079{ 1080 return bdev->bd_partno; 1081} 1082 1083enum blk_default_limits { 1084 BLK_MAX_SEGMENTS = 128, 1085 BLK_SAFE_MAX_SECTORS = 255, 1086 BLK_MAX_SEGMENT_SIZE = 65536, 1087 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1088}; 1089 1090#define BLK_DEF_MAX_SECTORS 2560u 1091 1092static inline unsigned long queue_segment_boundary(const struct request_queue *q) 1093{ 1094 return q->limits.seg_boundary_mask; 1095} 1096 1097static inline unsigned long queue_virt_boundary(const struct request_queue *q) 1098{ 1099 return q->limits.virt_boundary_mask; 1100} 1101 1102static inline unsigned int queue_max_sectors(const struct request_queue *q) 1103{ 1104 return q->limits.max_sectors; 1105} 1106 1107static inline unsigned int queue_max_bytes(struct request_queue *q) 1108{ 1109 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; 1110} 1111 1112static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) 1113{ 1114 return q->limits.max_hw_sectors; 1115} 1116 1117static inline unsigned short queue_max_segments(const struct request_queue *q) 1118{ 1119 return q->limits.max_segments; 1120} 1121 1122static inline unsigned short queue_max_discard_segments(const struct request_queue *q) 1123{ 1124 return q->limits.max_discard_segments; 1125} 1126 1127static inline unsigned int queue_max_segment_size(const struct request_queue *q) 1128{ 1129 return q->limits.max_segment_size; 1130} 1131 1132static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) 1133{ 1134 1135 const struct queue_limits *l = &q->limits; 1136 1137 return min(l->max_zone_append_sectors, l->max_sectors); 1138} 1139 1140static inline unsigned int 1141bdev_max_zone_append_sectors(struct block_device *bdev) 1142{ 1143 return queue_max_zone_append_sectors(bdev_get_queue(bdev)); 1144} 1145 1146static inline unsigned int bdev_max_segments(struct block_device *bdev) 1147{ 1148 return queue_max_segments(bdev_get_queue(bdev)); 1149} 1150 1151static inline unsigned queue_logical_block_size(const struct request_queue *q) 1152{ 1153 int retval = 512; 1154 1155 if (q && q->limits.logical_block_size) 1156 retval = q->limits.logical_block_size; 1157 1158 return retval; 1159} 1160 1161static inline unsigned int bdev_logical_block_size(struct block_device *bdev) 1162{ 1163 return queue_logical_block_size(bdev_get_queue(bdev)); 1164} 1165 1166static inline unsigned int queue_physical_block_size(const struct request_queue *q) 1167{ 1168 return q->limits.physical_block_size; 1169} 1170 1171static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1172{ 1173 return queue_physical_block_size(bdev_get_queue(bdev)); 1174} 1175 1176static inline unsigned int queue_io_min(const struct request_queue *q) 1177{ 1178 return q->limits.io_min; 1179} 1180 1181static inline int bdev_io_min(struct block_device *bdev) 1182{ 1183 return queue_io_min(bdev_get_queue(bdev)); 1184} 1185 1186static inline unsigned int queue_io_opt(const struct request_queue *q) 1187{ 1188 return q->limits.io_opt; 1189} 1190 1191static inline int bdev_io_opt(struct block_device *bdev) 1192{ 1193 return queue_io_opt(bdev_get_queue(bdev)); 1194} 1195 1196static inline unsigned int 1197queue_zone_write_granularity(const struct request_queue *q) 1198{ 1199 return q->limits.zone_write_granularity; 1200} 1201 1202static inline unsigned int 1203bdev_zone_write_granularity(struct block_device *bdev) 1204{ 1205 return queue_zone_write_granularity(bdev_get_queue(bdev)); 1206} 1207 1208int bdev_alignment_offset(struct block_device *bdev); 1209unsigned int bdev_discard_alignment(struct block_device *bdev); 1210 1211static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) 1212{ 1213 return bdev_get_queue(bdev)->limits.max_discard_sectors; 1214} 1215 1216static inline unsigned int bdev_discard_granularity(struct block_device *bdev) 1217{ 1218 return bdev_get_queue(bdev)->limits.discard_granularity; 1219} 1220 1221static inline unsigned int 1222bdev_max_secure_erase_sectors(struct block_device *bdev) 1223{ 1224 return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; 1225} 1226 1227static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) 1228{ 1229 struct request_queue *q = bdev_get_queue(bdev); 1230 1231 if (q) 1232 return q->limits.max_write_zeroes_sectors; 1233 1234 return 0; 1235} 1236 1237static inline bool bdev_nonrot(struct block_device *bdev) 1238{ 1239 return blk_queue_nonrot(bdev_get_queue(bdev)); 1240} 1241 1242static inline bool bdev_synchronous(struct block_device *bdev) 1243{ 1244 return test_bit(QUEUE_FLAG_SYNCHRONOUS, 1245 &bdev_get_queue(bdev)->queue_flags); 1246} 1247 1248static inline bool bdev_stable_writes(struct block_device *bdev) 1249{ 1250 return test_bit(QUEUE_FLAG_STABLE_WRITES, 1251 &bdev_get_queue(bdev)->queue_flags); 1252} 1253 1254static inline bool bdev_write_cache(struct block_device *bdev) 1255{ 1256 return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags); 1257} 1258 1259static inline bool bdev_fua(struct block_device *bdev) 1260{ 1261 return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags); 1262} 1263 1264static inline bool bdev_nowait(struct block_device *bdev) 1265{ 1266 return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags); 1267} 1268 1269static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) 1270{ 1271 return blk_queue_zoned_model(bdev_get_queue(bdev)); 1272} 1273 1274static inline bool bdev_is_zoned(struct block_device *bdev) 1275{ 1276 return blk_queue_is_zoned(bdev_get_queue(bdev)); 1277} 1278 1279static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) 1280{ 1281 return disk_zone_no(bdev->bd_disk, sec); 1282} 1283 1284static inline bool bdev_op_is_zoned_write(struct block_device *bdev, 1285 blk_opf_t op) 1286{ 1287 if (!bdev_is_zoned(bdev)) 1288 return false; 1289 1290 return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES; 1291} 1292 1293static inline sector_t bdev_zone_sectors(struct block_device *bdev) 1294{ 1295 struct request_queue *q = bdev_get_queue(bdev); 1296 1297 if (!blk_queue_is_zoned(q)) 1298 return 0; 1299 return q->limits.chunk_sectors; 1300} 1301 1302static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, 1303 sector_t sector) 1304{ 1305 return sector & (bdev_zone_sectors(bdev) - 1); 1306} 1307 1308static inline bool bdev_is_zone_start(struct block_device *bdev, 1309 sector_t sector) 1310{ 1311 return bdev_offset_from_zone_start(bdev, sector) == 0; 1312} 1313 1314static inline int queue_dma_alignment(const struct request_queue *q) 1315{ 1316 return q ? q->limits.dma_alignment : 511; 1317} 1318 1319static inline unsigned int bdev_dma_alignment(struct block_device *bdev) 1320{ 1321 return queue_dma_alignment(bdev_get_queue(bdev)); 1322} 1323 1324static inline bool bdev_iter_is_aligned(struct block_device *bdev, 1325 struct iov_iter *iter) 1326{ 1327 return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), 1328 bdev_logical_block_size(bdev) - 1); 1329} 1330 1331static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1332 unsigned int len) 1333{ 1334 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1335 return !(addr & alignment) && !(len & alignment); 1336} 1337 1338/* assumes size > 256 */ 1339static inline unsigned int blksize_bits(unsigned int size) 1340{ 1341 return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; 1342} 1343 1344static inline unsigned int block_size(struct block_device *bdev) 1345{ 1346 return 1 << bdev->bd_inode->i_blkbits; 1347} 1348 1349int kblockd_schedule_work(struct work_struct *work); 1350int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1351 1352#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1353 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1354#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1355 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1356 1357#ifdef CONFIG_BLK_INLINE_ENCRYPTION 1358 1359bool blk_crypto_register(struct blk_crypto_profile *profile, 1360 struct request_queue *q); 1361 1362#else /* CONFIG_BLK_INLINE_ENCRYPTION */ 1363 1364static inline bool blk_crypto_register(struct blk_crypto_profile *profile, 1365 struct request_queue *q) 1366{ 1367 return true; 1368} 1369 1370#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 1371 1372enum blk_unique_id { 1373 /* these match the Designator Types specified in SPC */ 1374 BLK_UID_T10 = 1, 1375 BLK_UID_EUI64 = 2, 1376 BLK_UID_NAA = 3, 1377}; 1378 1379struct block_device_operations { 1380 void (*submit_bio)(struct bio *bio); 1381 int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, 1382 unsigned int flags); 1383 int (*open) (struct block_device *, fmode_t); 1384 void (*release) (struct gendisk *, fmode_t); 1385 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1386 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1387 unsigned int (*check_events) (struct gendisk *disk, 1388 unsigned int clearing); 1389 void (*unlock_native_capacity) (struct gendisk *); 1390 int (*getgeo)(struct block_device *, struct hd_geometry *); 1391 int (*set_read_only)(struct block_device *bdev, bool ro); 1392 void (*free_disk)(struct gendisk *disk); 1393 /* this callback is with swap_lock and sometimes page table lock held */ 1394 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1395 int (*report_zones)(struct gendisk *, sector_t sector, 1396 unsigned int nr_zones, report_zones_cb cb, void *data); 1397 char *(*devnode)(struct gendisk *disk, umode_t *mode); 1398 /* returns the length of the identifier or a negative errno: */ 1399 int (*get_unique_id)(struct gendisk *disk, u8 id[16], 1400 enum blk_unique_id id_type); 1401 struct module *owner; 1402 const struct pr_ops *pr_ops; 1403 1404 /* 1405 * Special callback for probing GPT entry at a given sector. 1406 * Needed by Android devices, used by GPT scanner and MMC blk 1407 * driver. 1408 */ 1409 int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); 1410}; 1411 1412#ifdef CONFIG_COMPAT 1413extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t, 1414 unsigned int, unsigned long); 1415#else 1416#define blkdev_compat_ptr_ioctl NULL 1417#endif 1418 1419static inline void blk_wake_io_task(struct task_struct *waiter) 1420{ 1421 /* 1422 * If we're polling, the task itself is doing the completions. For 1423 * that case, we don't need to signal a wakeup, it's enough to just 1424 * mark us as RUNNING. 1425 */ 1426 if (waiter == current) 1427 __set_current_state(TASK_RUNNING); 1428 else 1429 wake_up_process(waiter); 1430} 1431 1432unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 1433 unsigned long start_time); 1434void bdev_end_io_acct(struct block_device *bdev, enum req_op op, 1435 unsigned int sectors, unsigned long start_time); 1436 1437unsigned long bio_start_io_acct(struct bio *bio); 1438void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, 1439 struct block_device *orig_bdev); 1440 1441/** 1442 * bio_end_io_acct - end I/O accounting for bio based drivers 1443 * @bio: bio to end account for 1444 * @start_time: start time returned by bio_start_io_acct() 1445 */ 1446static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) 1447{ 1448 return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); 1449} 1450 1451int bdev_read_only(struct block_device *bdev); 1452int set_blocksize(struct block_device *bdev, int size); 1453 1454int lookup_bdev(const char *pathname, dev_t *dev); 1455 1456void blkdev_show(struct seq_file *seqf, off_t offset); 1457 1458#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1459#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 1460#ifdef CONFIG_BLOCK 1461#define BLKDEV_MAJOR_MAX 512 1462#else 1463#define BLKDEV_MAJOR_MAX 0 1464#endif 1465 1466struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, 1467 void *holder); 1468struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); 1469int bd_prepare_to_claim(struct block_device *bdev, void *holder); 1470void bd_abort_claiming(struct block_device *bdev, void *holder); 1471void blkdev_put(struct block_device *bdev, fmode_t mode); 1472 1473/* just for blk-cgroup, don't use elsewhere */ 1474struct block_device *blkdev_get_no_open(dev_t dev); 1475void blkdev_put_no_open(struct block_device *bdev); 1476 1477struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); 1478void bdev_add(struct block_device *bdev, dev_t dev); 1479struct block_device *I_BDEV(struct inode *inode); 1480int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, 1481 loff_t lend); 1482 1483#ifdef CONFIG_BLOCK 1484void invalidate_bdev(struct block_device *bdev); 1485int sync_blockdev(struct block_device *bdev); 1486int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); 1487int sync_blockdev_nowait(struct block_device *bdev); 1488void sync_bdevs(bool wait); 1489void bdev_statx_dioalign(struct inode *inode, struct kstat *stat); 1490void printk_all_partitions(void); 1491#else 1492static inline void invalidate_bdev(struct block_device *bdev) 1493{ 1494} 1495static inline int sync_blockdev(struct block_device *bdev) 1496{ 1497 return 0; 1498} 1499static inline int sync_blockdev_nowait(struct block_device *bdev) 1500{ 1501 return 0; 1502} 1503static inline void sync_bdevs(bool wait) 1504{ 1505} 1506static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat) 1507{ 1508} 1509static inline void printk_all_partitions(void) 1510{ 1511} 1512#endif /* CONFIG_BLOCK */ 1513 1514int fsync_bdev(struct block_device *bdev); 1515 1516int freeze_bdev(struct block_device *bdev); 1517int thaw_bdev(struct block_device *bdev); 1518 1519struct io_comp_batch { 1520 struct request *req_list; 1521 bool need_ts; 1522 void (*complete)(struct io_comp_batch *); 1523}; 1524 1525#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } 1526 1527#endif /* _LINUX_BLKDEV_H */