at v5.3 13 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Block data types and constants. Directly include this file only to 4 * break include dependency loop. 5 */ 6#ifndef __LINUX_BLK_TYPES_H 7#define __LINUX_BLK_TYPES_H 8 9#include <linux/types.h> 10#include <linux/bvec.h> 11#include <linux/ktime.h> 12 13struct bio_set; 14struct bio; 15struct bio_integrity_payload; 16struct page; 17struct block_device; 18struct io_context; 19struct cgroup_subsys_state; 20typedef void (bio_end_io_t) (struct bio *); 21 22/* 23 * Block error status values. See block/blk-core:blk_errors for the details. 24 * Alpha cannot write a byte atomically, so we need to use 32-bit value. 25 */ 26#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) 27typedef u32 __bitwise blk_status_t; 28#else 29typedef u8 __bitwise blk_status_t; 30#endif 31#define BLK_STS_OK 0 32#define BLK_STS_NOTSUPP ((__force blk_status_t)1) 33#define BLK_STS_TIMEOUT ((__force blk_status_t)2) 34#define BLK_STS_NOSPC ((__force blk_status_t)3) 35#define BLK_STS_TRANSPORT ((__force blk_status_t)4) 36#define BLK_STS_TARGET ((__force blk_status_t)5) 37#define BLK_STS_NEXUS ((__force blk_status_t)6) 38#define BLK_STS_MEDIUM ((__force blk_status_t)7) 39#define BLK_STS_PROTECTION ((__force blk_status_t)8) 40#define BLK_STS_RESOURCE ((__force blk_status_t)9) 41#define BLK_STS_IOERR ((__force blk_status_t)10) 42 43/* hack for device mapper, don't use elsewhere: */ 44#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) 45 46#define BLK_STS_AGAIN ((__force blk_status_t)12) 47 48/* 49 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if 50 * device related resources are unavailable, but the driver can guarantee 51 * that the queue will be rerun in the future once resources become 52 * available again. This is typically the case for device specific 53 * resources that are consumed for IO. If the driver fails allocating these 54 * resources, we know that inflight (or pending) IO will free these 55 * resource upon completion. 56 * 57 * This is different from BLK_STS_RESOURCE in that it explicitly references 58 * a device specific resource. For resources of wider scope, allocation 59 * failure can happen without having pending IO. This means that we can't 60 * rely on request completions freeing these resources, as IO may not be in 61 * flight. Examples of that are kernel memory allocations, DMA mappings, or 62 * any other system wide resources. 63 */ 64#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) 65 66/** 67 * blk_path_error - returns true if error may be path related 68 * @error: status the request was completed with 69 * 70 * Description: 71 * This classifies block error status into non-retryable errors and ones 72 * that may be successful if retried on a failover path. 73 * 74 * Return: 75 * %false - retrying failover path will not help 76 * %true - may succeed if retried 77 */ 78static inline bool blk_path_error(blk_status_t error) 79{ 80 switch (error) { 81 case BLK_STS_NOTSUPP: 82 case BLK_STS_NOSPC: 83 case BLK_STS_TARGET: 84 case BLK_STS_NEXUS: 85 case BLK_STS_MEDIUM: 86 case BLK_STS_PROTECTION: 87 return false; 88 } 89 90 /* Anything else could be a path failure, so should be retried */ 91 return true; 92} 93 94/* 95 * From most significant bit: 96 * 1 bit: reserved for other usage, see below 97 * 12 bits: original size of bio 98 * 51 bits: issue time of bio 99 */ 100#define BIO_ISSUE_RES_BITS 1 101#define BIO_ISSUE_SIZE_BITS 12 102#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) 103#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) 104#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) 105#define BIO_ISSUE_SIZE_MASK \ 106 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) 107#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) 108 109/* Reserved bit for blk-throtl */ 110#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) 111 112struct bio_issue { 113 u64 value; 114}; 115 116static inline u64 __bio_issue_time(u64 time) 117{ 118 return time & BIO_ISSUE_TIME_MASK; 119} 120 121static inline u64 bio_issue_time(struct bio_issue *issue) 122{ 123 return __bio_issue_time(issue->value); 124} 125 126static inline sector_t bio_issue_size(struct bio_issue *issue) 127{ 128 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); 129} 130 131static inline void bio_issue_init(struct bio_issue *issue, 132 sector_t size) 133{ 134 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; 135 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | 136 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) | 137 ((u64)size << BIO_ISSUE_SIZE_SHIFT)); 138} 139 140/* 141 * main unit of I/O for the block layer and lower layers (ie drivers and 142 * stacking drivers) 143 */ 144struct bio { 145 struct bio *bi_next; /* request queue link */ 146 struct gendisk *bi_disk; 147 unsigned int bi_opf; /* bottom bits req flags, 148 * top bits REQ_OP. Use 149 * accessors. 150 */ 151 unsigned short bi_flags; /* status, etc and bvec pool number */ 152 unsigned short bi_ioprio; 153 unsigned short bi_write_hint; 154 blk_status_t bi_status; 155 u8 bi_partno; 156 157 struct bvec_iter bi_iter; 158 159 atomic_t __bi_remaining; 160 bio_end_io_t *bi_end_io; 161 162 void *bi_private; 163#ifdef CONFIG_BLK_CGROUP 164 /* 165 * Represents the association of the css and request_queue for the bio. 166 * If a bio goes direct to device, it will not have a blkg as it will 167 * not have a request_queue associated with it. The reference is put 168 * on release of the bio. 169 */ 170 struct blkcg_gq *bi_blkg; 171 struct bio_issue bi_issue; 172#endif 173 union { 174#if defined(CONFIG_BLK_DEV_INTEGRITY) 175 struct bio_integrity_payload *bi_integrity; /* data integrity */ 176#endif 177 }; 178 179 unsigned short bi_vcnt; /* how many bio_vec's */ 180 181 /* 182 * Everything starting with bi_max_vecs will be preserved by bio_reset() 183 */ 184 185 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 186 187 atomic_t __bi_cnt; /* pin count */ 188 189 struct bio_vec *bi_io_vec; /* the actual vec list */ 190 191 struct bio_set *bi_pool; 192 193 /* 194 * We can inline a number of vecs at the end of the bio, to avoid 195 * double allocations for a small number of bio_vecs. This member 196 * MUST obviously be kept at the very end of the bio. 197 */ 198 struct bio_vec bi_inline_vecs[0]; 199}; 200 201#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 202 203/* 204 * bio flags 205 */ 206enum { 207 BIO_NO_PAGE_REF, /* don't put release vec pages */ 208 BIO_CLONED, /* doesn't own data */ 209 BIO_BOUNCED, /* bio is a bounce bio */ 210 BIO_USER_MAPPED, /* contains user pages */ 211 BIO_NULL_MAPPED, /* contains invalid user pages */ 212 BIO_QUIET, /* Make BIO Quiet */ 213 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ 214 BIO_REFFED, /* bio has elevated ->bi_cnt */ 215 BIO_THROTTLED, /* This bio has already been subjected to 216 * throttling rules. Don't do it again. */ 217 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion 218 * of this bio. */ 219 BIO_QUEUE_ENTERED, /* can use blk_queue_enter_live() */ 220 BIO_TRACKED, /* set if bio goes through the rq_qos path */ 221 BIO_FLAG_LAST 222}; 223 224/* See BVEC_POOL_OFFSET below before adding new flags */ 225 226/* 227 * We support 6 different bvec pools, the last one is magic in that it 228 * is backed by a mempool. 229 */ 230#define BVEC_POOL_NR 6 231#define BVEC_POOL_MAX (BVEC_POOL_NR - 1) 232 233/* 234 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add 235 * 1 to the actual index so that 0 indicates that there are no bvecs to be 236 * freed. 237 */ 238#define BVEC_POOL_BITS (3) 239#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) 240#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) 241#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1) 242# error "BVEC_POOL_BITS is too small" 243#endif 244 245/* 246 * Flags starting here get preserved by bio_reset() - this includes 247 * only BVEC_POOL_IDX() 248 */ 249#define BIO_RESET_BITS BVEC_POOL_OFFSET 250 251typedef __u32 __bitwise blk_mq_req_flags_t; 252 253/* 254 * Operations and flags common to the bio and request structures. 255 * We use 8 bits for encoding the operation, and the remaining 24 for flags. 256 * 257 * The least significant bit of the operation number indicates the data 258 * transfer direction: 259 * 260 * - if the least significant bit is set transfers are TO the device 261 * - if the least significant bit is not set transfers are FROM the device 262 * 263 * If a operation does not transfer data the least significant bit has no 264 * meaning. 265 */ 266#define REQ_OP_BITS 8 267#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) 268#define REQ_FLAG_BITS 24 269 270enum req_opf { 271 /* read sectors from the device */ 272 REQ_OP_READ = 0, 273 /* write sectors to the device */ 274 REQ_OP_WRITE = 1, 275 /* flush the volatile write cache */ 276 REQ_OP_FLUSH = 2, 277 /* discard sectors */ 278 REQ_OP_DISCARD = 3, 279 /* securely erase sectors */ 280 REQ_OP_SECURE_ERASE = 5, 281 /* reset a zone write pointer */ 282 REQ_OP_ZONE_RESET = 6, 283 /* write the same sector many times */ 284 REQ_OP_WRITE_SAME = 7, 285 /* write the zero filled sector many times */ 286 REQ_OP_WRITE_ZEROES = 9, 287 288 /* SCSI passthrough using struct scsi_request */ 289 REQ_OP_SCSI_IN = 32, 290 REQ_OP_SCSI_OUT = 33, 291 /* Driver private requests */ 292 REQ_OP_DRV_IN = 34, 293 REQ_OP_DRV_OUT = 35, 294 295 REQ_OP_LAST, 296}; 297 298enum req_flag_bits { 299 __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 300 REQ_OP_BITS, 301 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 302 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 303 __REQ_SYNC, /* request is sync (sync write or read) */ 304 __REQ_META, /* metadata io request */ 305 __REQ_PRIO, /* boost priority in cfq */ 306 __REQ_NOMERGE, /* don't touch this for merging */ 307 __REQ_IDLE, /* anticipate more IO after this one */ 308 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 309 __REQ_FUA, /* forced unit access */ 310 __REQ_PREFLUSH, /* request for cache flush */ 311 __REQ_RAHEAD, /* read ahead, can fail anytime */ 312 __REQ_BACKGROUND, /* background IO */ 313 __REQ_NOWAIT, /* Don't wait if request will block */ 314 /* 315 * When a shared kthread needs to issue a bio for a cgroup, doing 316 * so synchronously can lead to priority inversions as the kthread 317 * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes 318 * submit_bio() punt the actual issuing to a dedicated per-blkcg 319 * work item to avoid such priority inversions. 320 */ 321 __REQ_CGROUP_PUNT, 322 323 /* command specific flags for REQ_OP_WRITE_ZEROES: */ 324 __REQ_NOUNMAP, /* do not free blocks when zeroing */ 325 326 __REQ_HIPRI, 327 328 /* for driver use */ 329 __REQ_DRV, 330 __REQ_SWAP, /* swapping request. */ 331 __REQ_NR_BITS, /* stops here */ 332}; 333 334#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) 335#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) 336#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) 337#define REQ_SYNC (1ULL << __REQ_SYNC) 338#define REQ_META (1ULL << __REQ_META) 339#define REQ_PRIO (1ULL << __REQ_PRIO) 340#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 341#define REQ_IDLE (1ULL << __REQ_IDLE) 342#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) 343#define REQ_FUA (1ULL << __REQ_FUA) 344#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 345#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 346#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 347#define REQ_NOWAIT (1ULL << __REQ_NOWAIT) 348#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) 349 350#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) 351#define REQ_HIPRI (1ULL << __REQ_HIPRI) 352 353#define REQ_DRV (1ULL << __REQ_DRV) 354#define REQ_SWAP (1ULL << __REQ_SWAP) 355 356#define REQ_FAILFAST_MASK \ 357 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 358 359#define REQ_NOMERGE_FLAGS \ 360 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 361 362enum stat_group { 363 STAT_READ, 364 STAT_WRITE, 365 STAT_DISCARD, 366 367 NR_STAT_GROUPS 368}; 369 370#define bio_op(bio) \ 371 ((bio)->bi_opf & REQ_OP_MASK) 372#define req_op(req) \ 373 ((req)->cmd_flags & REQ_OP_MASK) 374 375/* obsolete, don't use in new code */ 376static inline void bio_set_op_attrs(struct bio *bio, unsigned op, 377 unsigned op_flags) 378{ 379 bio->bi_opf = op | op_flags; 380} 381 382static inline bool op_is_write(unsigned int op) 383{ 384 return (op & 1); 385} 386 387/* 388 * Check if the bio or request is one that needs special treatment in the 389 * flush state machine. 390 */ 391static inline bool op_is_flush(unsigned int op) 392{ 393 return op & (REQ_FUA | REQ_PREFLUSH); 394} 395 396/* 397 * Reads are always treated as synchronous, as are requests with the FUA or 398 * PREFLUSH flag. Other operations may be marked as synchronous using the 399 * REQ_SYNC flag. 400 */ 401static inline bool op_is_sync(unsigned int op) 402{ 403 return (op & REQ_OP_MASK) == REQ_OP_READ || 404 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 405} 406 407static inline bool op_is_discard(unsigned int op) 408{ 409 return (op & REQ_OP_MASK) == REQ_OP_DISCARD; 410} 411 412static inline int op_stat_group(unsigned int op) 413{ 414 if (op_is_discard(op)) 415 return STAT_DISCARD; 416 return op_is_write(op); 417} 418 419typedef unsigned int blk_qc_t; 420#define BLK_QC_T_NONE -1U 421#define BLK_QC_T_SHIFT 16 422#define BLK_QC_T_INTERNAL (1U << 31) 423 424static inline bool blk_qc_t_valid(blk_qc_t cookie) 425{ 426 return cookie != BLK_QC_T_NONE; 427} 428 429static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 430{ 431 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; 432} 433 434static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) 435{ 436 return cookie & ((1u << BLK_QC_T_SHIFT) - 1); 437} 438 439static inline bool blk_qc_t_is_internal(blk_qc_t cookie) 440{ 441 return (cookie & BLK_QC_T_INTERNAL) != 0; 442} 443 444struct blk_rq_stat { 445 u64 mean; 446 u64 min; 447 u64 max; 448 u32 nr_samples; 449 u64 batch; 450}; 451 452#endif /* __LINUX_BLK_TYPES_H */