at v5.14 16 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Block data types and constants. Directly include this file only to 4 * break include dependency loop. 5 */ 6#ifndef __LINUX_BLK_TYPES_H 7#define __LINUX_BLK_TYPES_H 8 9#include <linux/types.h> 10#include <linux/bvec.h> 11#include <linux/device.h> 12#include <linux/ktime.h> 13 14struct bio_set; 15struct bio; 16struct bio_integrity_payload; 17struct page; 18struct io_context; 19struct cgroup_subsys_state; 20typedef void (bio_end_io_t) (struct bio *); 21struct bio_crypt_ctx; 22 23struct block_device { 24 sector_t bd_start_sect; 25 struct disk_stats __percpu *bd_stats; 26 unsigned long bd_stamp; 27 bool bd_read_only; /* read-only policy */ 28 dev_t bd_dev; 29 int bd_openers; 30 struct inode * bd_inode; /* will die */ 31 struct super_block * bd_super; 32 void * bd_claiming; 33 struct device bd_device; 34 void * bd_holder; 35 int bd_holders; 36 bool bd_write_holder; 37#ifdef CONFIG_SYSFS 38 struct list_head bd_holder_disks; 39#endif 40 struct kobject *bd_holder_dir; 41 u8 bd_partno; 42 spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ 43 struct gendisk * bd_disk; 44 struct backing_dev_info *bd_bdi; 45 46 /* The counter of freeze processes */ 47 int bd_fsfreeze_count; 48 /* Mutex for freeze */ 49 struct mutex bd_fsfreeze_mutex; 50 struct super_block *bd_fsfreeze_sb; 51 52 struct partition_meta_info *bd_meta_info; 53#ifdef CONFIG_FAIL_MAKE_REQUEST 54 bool bd_make_it_fail; 55#endif 56} __randomize_layout; 57 58#define bdev_whole(_bdev) \ 59 ((_bdev)->bd_disk->part0) 60 61#define dev_to_bdev(device) \ 62 container_of((device), struct block_device, bd_device) 63 64#define bdev_kobj(_bdev) \ 65 (&((_bdev)->bd_device.kobj)) 66 67/* 68 * Block error status values. See block/blk-core:blk_errors for the details. 69 * Alpha cannot write a byte atomically, so we need to use 32-bit value. 70 */ 71#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) 72typedef u32 __bitwise blk_status_t; 73#else 74typedef u8 __bitwise blk_status_t; 75#endif 76#define BLK_STS_OK 0 77#define BLK_STS_NOTSUPP ((__force blk_status_t)1) 78#define BLK_STS_TIMEOUT ((__force blk_status_t)2) 79#define BLK_STS_NOSPC ((__force blk_status_t)3) 80#define BLK_STS_TRANSPORT ((__force blk_status_t)4) 81#define BLK_STS_TARGET ((__force blk_status_t)5) 82#define BLK_STS_NEXUS ((__force blk_status_t)6) 83#define BLK_STS_MEDIUM ((__force blk_status_t)7) 84#define BLK_STS_PROTECTION ((__force blk_status_t)8) 85#define BLK_STS_RESOURCE ((__force blk_status_t)9) 86#define BLK_STS_IOERR ((__force blk_status_t)10) 87 88/* hack for device mapper, don't use elsewhere: */ 89#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) 90 91#define BLK_STS_AGAIN ((__force blk_status_t)12) 92 93/* 94 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if 95 * device related resources are unavailable, but the driver can guarantee 96 * that the queue will be rerun in the future once resources become 97 * available again. This is typically the case for device specific 98 * resources that are consumed for IO. If the driver fails allocating these 99 * resources, we know that inflight (or pending) IO will free these 100 * resource upon completion. 101 * 102 * This is different from BLK_STS_RESOURCE in that it explicitly references 103 * a device specific resource. For resources of wider scope, allocation 104 * failure can happen without having pending IO. This means that we can't 105 * rely on request completions freeing these resources, as IO may not be in 106 * flight. Examples of that are kernel memory allocations, DMA mappings, or 107 * any other system wide resources. 108 */ 109#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) 110 111/* 112 * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone 113 * related resources are unavailable, but the driver can guarantee the queue 114 * will be rerun in the future once the resources become available again. 115 * 116 * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references 117 * a zone specific resource and IO to a different zone on the same device could 118 * still be served. Examples of that are zones that are write-locked, but a read 119 * to the same zone could be served. 120 */ 121#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14) 122 123/* 124 * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion 125 * path if the device returns a status indicating that too many zone resources 126 * are currently open. The same command should be successful if resubmitted 127 * after the number of open zones decreases below the device's limits, which is 128 * reported in the request_queue's max_open_zones. 129 */ 130#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15) 131 132/* 133 * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion 134 * path if the device returns a status indicating that too many zone resources 135 * are currently active. The same command should be successful if resubmitted 136 * after the number of active zones decreases below the device's limits, which 137 * is reported in the request_queue's max_active_zones. 138 */ 139#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16) 140 141/** 142 * blk_path_error - returns true if error may be path related 143 * @error: status the request was completed with 144 * 145 * Description: 146 * This classifies block error status into non-retryable errors and ones 147 * that may be successful if retried on a failover path. 148 * 149 * Return: 150 * %false - retrying failover path will not help 151 * %true - may succeed if retried 152 */ 153static inline bool blk_path_error(blk_status_t error) 154{ 155 switch (error) { 156 case BLK_STS_NOTSUPP: 157 case BLK_STS_NOSPC: 158 case BLK_STS_TARGET: 159 case BLK_STS_NEXUS: 160 case BLK_STS_MEDIUM: 161 case BLK_STS_PROTECTION: 162 return false; 163 } 164 165 /* Anything else could be a path failure, so should be retried */ 166 return true; 167} 168 169/* 170 * From most significant bit: 171 * 1 bit: reserved for other usage, see below 172 * 12 bits: original size of bio 173 * 51 bits: issue time of bio 174 */ 175#define BIO_ISSUE_RES_BITS 1 176#define BIO_ISSUE_SIZE_BITS 12 177#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) 178#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) 179#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) 180#define BIO_ISSUE_SIZE_MASK \ 181 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) 182#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) 183 184/* Reserved bit for blk-throtl */ 185#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) 186 187struct bio_issue { 188 u64 value; 189}; 190 191static inline u64 __bio_issue_time(u64 time) 192{ 193 return time & BIO_ISSUE_TIME_MASK; 194} 195 196static inline u64 bio_issue_time(struct bio_issue *issue) 197{ 198 return __bio_issue_time(issue->value); 199} 200 201static inline sector_t bio_issue_size(struct bio_issue *issue) 202{ 203 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); 204} 205 206static inline void bio_issue_init(struct bio_issue *issue, 207 sector_t size) 208{ 209 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; 210 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | 211 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) | 212 ((u64)size << BIO_ISSUE_SIZE_SHIFT)); 213} 214 215/* 216 * main unit of I/O for the block layer and lower layers (ie drivers and 217 * stacking drivers) 218 */ 219struct bio { 220 struct bio *bi_next; /* request queue link */ 221 struct block_device *bi_bdev; 222 unsigned int bi_opf; /* bottom bits req flags, 223 * top bits REQ_OP. Use 224 * accessors. 225 */ 226 unsigned short bi_flags; /* BIO_* below */ 227 unsigned short bi_ioprio; 228 unsigned short bi_write_hint; 229 blk_status_t bi_status; 230 atomic_t __bi_remaining; 231 232 struct bvec_iter bi_iter; 233 234 bio_end_io_t *bi_end_io; 235 236 void *bi_private; 237#ifdef CONFIG_BLK_CGROUP 238 /* 239 * Represents the association of the css and request_queue for the bio. 240 * If a bio goes direct to device, it will not have a blkg as it will 241 * not have a request_queue associated with it. The reference is put 242 * on release of the bio. 243 */ 244 struct blkcg_gq *bi_blkg; 245 struct bio_issue bi_issue; 246#ifdef CONFIG_BLK_CGROUP_IOCOST 247 u64 bi_iocost_cost; 248#endif 249#endif 250 251#ifdef CONFIG_BLK_INLINE_ENCRYPTION 252 struct bio_crypt_ctx *bi_crypt_context; 253#endif 254 255 union { 256#if defined(CONFIG_BLK_DEV_INTEGRITY) 257 struct bio_integrity_payload *bi_integrity; /* data integrity */ 258#endif 259 }; 260 261 unsigned short bi_vcnt; /* how many bio_vec's */ 262 263 /* 264 * Everything starting with bi_max_vecs will be preserved by bio_reset() 265 */ 266 267 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 268 269 atomic_t __bi_cnt; /* pin count */ 270 271 struct bio_vec *bi_io_vec; /* the actual vec list */ 272 273 struct bio_set *bi_pool; 274 275 /* 276 * We can inline a number of vecs at the end of the bio, to avoid 277 * double allocations for a small number of bio_vecs. This member 278 * MUST obviously be kept at the very end of the bio. 279 */ 280 struct bio_vec bi_inline_vecs[]; 281}; 282 283#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 284 285/* 286 * bio flags 287 */ 288enum { 289 BIO_NO_PAGE_REF, /* don't put release vec pages */ 290 BIO_CLONED, /* doesn't own data */ 291 BIO_BOUNCED, /* bio is a bounce bio */ 292 BIO_WORKINGSET, /* contains userspace workingset pages */ 293 BIO_QUIET, /* Make BIO Quiet */ 294 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ 295 BIO_REFFED, /* bio has elevated ->bi_cnt */ 296 BIO_THROTTLED, /* This bio has already been subjected to 297 * throttling rules. Don't do it again. */ 298 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion 299 * of this bio. */ 300 BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ 301 BIO_TRACKED, /* set if bio goes through the rq_qos path */ 302 BIO_REMAPPED, 303 BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ 304 BIO_FLAG_LAST 305}; 306 307typedef __u32 __bitwise blk_mq_req_flags_t; 308 309/* 310 * Operations and flags common to the bio and request structures. 311 * We use 8 bits for encoding the operation, and the remaining 24 for flags. 312 * 313 * The least significant bit of the operation number indicates the data 314 * transfer direction: 315 * 316 * - if the least significant bit is set transfers are TO the device 317 * - if the least significant bit is not set transfers are FROM the device 318 * 319 * If a operation does not transfer data the least significant bit has no 320 * meaning. 321 */ 322#define REQ_OP_BITS 8 323#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) 324#define REQ_FLAG_BITS 24 325 326enum req_opf { 327 /* read sectors from the device */ 328 REQ_OP_READ = 0, 329 /* write sectors to the device */ 330 REQ_OP_WRITE = 1, 331 /* flush the volatile write cache */ 332 REQ_OP_FLUSH = 2, 333 /* discard sectors */ 334 REQ_OP_DISCARD = 3, 335 /* securely erase sectors */ 336 REQ_OP_SECURE_ERASE = 5, 337 /* write the same sector many times */ 338 REQ_OP_WRITE_SAME = 7, 339 /* write the zero filled sector many times */ 340 REQ_OP_WRITE_ZEROES = 9, 341 /* Open a zone */ 342 REQ_OP_ZONE_OPEN = 10, 343 /* Close a zone */ 344 REQ_OP_ZONE_CLOSE = 11, 345 /* Transition a zone to full */ 346 REQ_OP_ZONE_FINISH = 12, 347 /* write data at the current zone write pointer */ 348 REQ_OP_ZONE_APPEND = 13, 349 /* reset a zone write pointer */ 350 REQ_OP_ZONE_RESET = 15, 351 /* reset all the zone present on the device */ 352 REQ_OP_ZONE_RESET_ALL = 17, 353 354 /* Driver private requests */ 355 REQ_OP_DRV_IN = 34, 356 REQ_OP_DRV_OUT = 35, 357 358 REQ_OP_LAST, 359}; 360 361enum req_flag_bits { 362 __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 363 REQ_OP_BITS, 364 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 365 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 366 __REQ_SYNC, /* request is sync (sync write or read) */ 367 __REQ_META, /* metadata io request */ 368 __REQ_PRIO, /* boost priority in cfq */ 369 __REQ_NOMERGE, /* don't touch this for merging */ 370 __REQ_IDLE, /* anticipate more IO after this one */ 371 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 372 __REQ_FUA, /* forced unit access */ 373 __REQ_PREFLUSH, /* request for cache flush */ 374 __REQ_RAHEAD, /* read ahead, can fail anytime */ 375 __REQ_BACKGROUND, /* background IO */ 376 __REQ_NOWAIT, /* Don't wait if request will block */ 377 /* 378 * When a shared kthread needs to issue a bio for a cgroup, doing 379 * so synchronously can lead to priority inversions as the kthread 380 * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes 381 * submit_bio() punt the actual issuing to a dedicated per-blkcg 382 * work item to avoid such priority inversions. 383 */ 384 __REQ_CGROUP_PUNT, 385 386 /* command specific flags for REQ_OP_WRITE_ZEROES: */ 387 __REQ_NOUNMAP, /* do not free blocks when zeroing */ 388 389 __REQ_HIPRI, 390 391 /* for driver use */ 392 __REQ_DRV, 393 __REQ_SWAP, /* swapping request. */ 394 __REQ_NR_BITS, /* stops here */ 395}; 396 397#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) 398#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) 399#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) 400#define REQ_SYNC (1ULL << __REQ_SYNC) 401#define REQ_META (1ULL << __REQ_META) 402#define REQ_PRIO (1ULL << __REQ_PRIO) 403#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 404#define REQ_IDLE (1ULL << __REQ_IDLE) 405#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) 406#define REQ_FUA (1ULL << __REQ_FUA) 407#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 408#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 409#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 410#define REQ_NOWAIT (1ULL << __REQ_NOWAIT) 411#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) 412 413#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) 414#define REQ_HIPRI (1ULL << __REQ_HIPRI) 415 416#define REQ_DRV (1ULL << __REQ_DRV) 417#define REQ_SWAP (1ULL << __REQ_SWAP) 418 419#define REQ_FAILFAST_MASK \ 420 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 421 422#define REQ_NOMERGE_FLAGS \ 423 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 424 425enum stat_group { 426 STAT_READ, 427 STAT_WRITE, 428 STAT_DISCARD, 429 STAT_FLUSH, 430 431 NR_STAT_GROUPS 432}; 433 434#define bio_op(bio) \ 435 ((bio)->bi_opf & REQ_OP_MASK) 436#define req_op(req) \ 437 ((req)->cmd_flags & REQ_OP_MASK) 438 439/* obsolete, don't use in new code */ 440static inline void bio_set_op_attrs(struct bio *bio, unsigned op, 441 unsigned op_flags) 442{ 443 bio->bi_opf = op | op_flags; 444} 445 446static inline bool op_is_write(unsigned int op) 447{ 448 return (op & 1); 449} 450 451/* 452 * Check if the bio or request is one that needs special treatment in the 453 * flush state machine. 454 */ 455static inline bool op_is_flush(unsigned int op) 456{ 457 return op & (REQ_FUA | REQ_PREFLUSH); 458} 459 460/* 461 * Reads are always treated as synchronous, as are requests with the FUA or 462 * PREFLUSH flag. Other operations may be marked as synchronous using the 463 * REQ_SYNC flag. 464 */ 465static inline bool op_is_sync(unsigned int op) 466{ 467 return (op & REQ_OP_MASK) == REQ_OP_READ || 468 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 469} 470 471static inline bool op_is_discard(unsigned int op) 472{ 473 return (op & REQ_OP_MASK) == REQ_OP_DISCARD; 474} 475 476/* 477 * Check if a bio or request operation is a zone management operation, with 478 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case 479 * due to its different handling in the block layer and device response in 480 * case of command failure. 481 */ 482static inline bool op_is_zone_mgmt(enum req_opf op) 483{ 484 switch (op & REQ_OP_MASK) { 485 case REQ_OP_ZONE_RESET: 486 case REQ_OP_ZONE_OPEN: 487 case REQ_OP_ZONE_CLOSE: 488 case REQ_OP_ZONE_FINISH: 489 return true; 490 default: 491 return false; 492 } 493} 494 495static inline int op_stat_group(unsigned int op) 496{ 497 if (op_is_discard(op)) 498 return STAT_DISCARD; 499 return op_is_write(op); 500} 501 502typedef unsigned int blk_qc_t; 503#define BLK_QC_T_NONE -1U 504#define BLK_QC_T_SHIFT 16 505#define BLK_QC_T_INTERNAL (1U << 31) 506 507static inline bool blk_qc_t_valid(blk_qc_t cookie) 508{ 509 return cookie != BLK_QC_T_NONE; 510} 511 512static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 513{ 514 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; 515} 516 517static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) 518{ 519 return cookie & ((1u << BLK_QC_T_SHIFT) - 1); 520} 521 522static inline bool blk_qc_t_is_internal(blk_qc_t cookie) 523{ 524 return (cookie & BLK_QC_T_INTERNAL) != 0; 525} 526 527struct blk_rq_stat { 528 u64 mean; 529 u64 min; 530 u64 max; 531 u32 nr_samples; 532 u64 batch; 533}; 534 535#endif /* __LINUX_BLK_TYPES_H */