at v4.16 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Block data types and constants. Directly include this file only to 4 * break include dependency loop. 5 */ 6#ifndef __LINUX_BLK_TYPES_H 7#define __LINUX_BLK_TYPES_H 8 9#include <linux/types.h> 10#include <linux/bvec.h> 11 12struct bio_set; 13struct bio; 14struct bio_integrity_payload; 15struct page; 16struct block_device; 17struct io_context; 18struct cgroup_subsys_state; 19typedef void (bio_end_io_t) (struct bio *); 20 21/* 22 * Block error status values. See block/blk-core:blk_errors for the details. 23 */ 24typedef u8 __bitwise blk_status_t; 25#define BLK_STS_OK 0 26#define BLK_STS_NOTSUPP ((__force blk_status_t)1) 27#define BLK_STS_TIMEOUT ((__force blk_status_t)2) 28#define BLK_STS_NOSPC ((__force blk_status_t)3) 29#define BLK_STS_TRANSPORT ((__force blk_status_t)4) 30#define BLK_STS_TARGET ((__force blk_status_t)5) 31#define BLK_STS_NEXUS ((__force blk_status_t)6) 32#define BLK_STS_MEDIUM ((__force blk_status_t)7) 33#define BLK_STS_PROTECTION ((__force blk_status_t)8) 34#define BLK_STS_RESOURCE ((__force blk_status_t)9) 35#define BLK_STS_IOERR ((__force blk_status_t)10) 36 37/* hack for device mapper, don't use elsewhere: */ 38#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) 39 40#define BLK_STS_AGAIN ((__force blk_status_t)12) 41 42/* 43 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if 44 * device related resources are unavailable, but the driver can guarantee 45 * that the queue will be rerun in the future once resources become 46 * available again. This is typically the case for device specific 47 * resources that are consumed for IO. If the driver fails allocating these 48 * resources, we know that inflight (or pending) IO will free these 49 * resource upon completion. 50 * 51 * This is different from BLK_STS_RESOURCE in that it explicitly references 52 * a device specific resource. For resources of wider scope, allocation 53 * failure can happen without having pending IO. This means that we can't 54 * rely on request completions freeing these resources, as IO may not be in 55 * flight. Examples of that are kernel memory allocations, DMA mappings, or 56 * any other system wide resources. 57 */ 58#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) 59 60/** 61 * blk_path_error - returns true if error may be path related 62 * @error: status the request was completed with 63 * 64 * Description: 65 * This classifies block error status into non-retryable errors and ones 66 * that may be successful if retried on a failover path. 67 * 68 * Return: 69 * %false - retrying failover path will not help 70 * %true - may succeed if retried 71 */ 72static inline bool blk_path_error(blk_status_t error) 73{ 74 switch (error) { 75 case BLK_STS_NOTSUPP: 76 case BLK_STS_NOSPC: 77 case BLK_STS_TARGET: 78 case BLK_STS_NEXUS: 79 case BLK_STS_MEDIUM: 80 case BLK_STS_PROTECTION: 81 return false; 82 } 83 84 /* Anything else could be a path failure, so should be retried */ 85 return true; 86} 87 88struct blk_issue_stat { 89 u64 stat; 90}; 91 92/* 93 * main unit of I/O for the block layer and lower layers (ie drivers and 94 * stacking drivers) 95 */ 96struct bio { 97 struct bio *bi_next; /* request queue link */ 98 struct gendisk *bi_disk; 99 unsigned int bi_opf; /* bottom bits req flags, 100 * top bits REQ_OP. Use 101 * accessors. 102 */ 103 unsigned short bi_flags; /* status, etc and bvec pool number */ 104 unsigned short bi_ioprio; 105 unsigned short bi_write_hint; 106 blk_status_t bi_status; 107 u8 bi_partno; 108 109 /* Number of segments in this BIO after 110 * physical address coalescing is performed. 111 */ 112 unsigned int bi_phys_segments; 113 114 /* 115 * To keep track of the max segment size, we account for the 116 * sizes of the first and last mergeable segments in this bio. 117 */ 118 unsigned int bi_seg_front_size; 119 unsigned int bi_seg_back_size; 120 121 struct bvec_iter bi_iter; 122 123 atomic_t __bi_remaining; 124 bio_end_io_t *bi_end_io; 125 126 void *bi_private; 127#ifdef CONFIG_BLK_CGROUP 128 /* 129 * Optional ioc and css associated with this bio. Put on bio 130 * release. Read comment on top of bio_associate_current(). 131 */ 132 struct io_context *bi_ioc; 133 struct cgroup_subsys_state *bi_css; 134#ifdef CONFIG_BLK_DEV_THROTTLING_LOW 135 void *bi_cg_private; 136 struct blk_issue_stat bi_issue_stat; 137#endif 138#endif 139 union { 140#if defined(CONFIG_BLK_DEV_INTEGRITY) 141 struct bio_integrity_payload *bi_integrity; /* data integrity */ 142#endif 143 }; 144 145 unsigned short bi_vcnt; /* how many bio_vec's */ 146 147 /* 148 * Everything starting with bi_max_vecs will be preserved by bio_reset() 149 */ 150 151 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 152 153 atomic_t __bi_cnt; /* pin count */ 154 155 struct bio_vec *bi_io_vec; /* the actual vec list */ 156 157 struct bio_set *bi_pool; 158 159 /* 160 * We can inline a number of vecs at the end of the bio, to avoid 161 * double allocations for a small number of bio_vecs. This member 162 * MUST obviously be kept at the very end of the bio. 163 */ 164 struct bio_vec bi_inline_vecs[0]; 165}; 166 167#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 168 169/* 170 * bio flags 171 */ 172#define BIO_SEG_VALID 1 /* bi_phys_segments valid */ 173#define BIO_CLONED 2 /* doesn't own data */ 174#define BIO_BOUNCED 3 /* bio is a bounce bio */ 175#define BIO_USER_MAPPED 4 /* contains user pages */ 176#define BIO_NULL_MAPPED 5 /* contains invalid user pages */ 177#define BIO_QUIET 6 /* Make BIO Quiet */ 178#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ 179#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ 180#define BIO_THROTTLED 9 /* This bio has already been subjected to 181 * throttling rules. Don't do it again. */ 182#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion 183 * of this bio. */ 184/* See BVEC_POOL_OFFSET below before adding new flags */ 185 186/* 187 * We support 6 different bvec pools, the last one is magic in that it 188 * is backed by a mempool. 189 */ 190#define BVEC_POOL_NR 6 191#define BVEC_POOL_MAX (BVEC_POOL_NR - 1) 192 193/* 194 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add 195 * 1 to the actual index so that 0 indicates that there are no bvecs to be 196 * freed. 197 */ 198#define BVEC_POOL_BITS (3) 199#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) 200#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) 201#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1) 202# error "BVEC_POOL_BITS is too small" 203#endif 204 205/* 206 * Flags starting here get preserved by bio_reset() - this includes 207 * only BVEC_POOL_IDX() 208 */ 209#define BIO_RESET_BITS BVEC_POOL_OFFSET 210 211typedef __u32 __bitwise blk_mq_req_flags_t; 212 213/* 214 * Operations and flags common to the bio and request structures. 215 * We use 8 bits for encoding the operation, and the remaining 24 for flags. 216 * 217 * The least significant bit of the operation number indicates the data 218 * transfer direction: 219 * 220 * - if the least significant bit is set transfers are TO the device 221 * - if the least significant bit is not set transfers are FROM the device 222 * 223 * If a operation does not transfer data the least significant bit has no 224 * meaning. 225 */ 226#define REQ_OP_BITS 8 227#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) 228#define REQ_FLAG_BITS 24 229 230enum req_opf { 231 /* read sectors from the device */ 232 REQ_OP_READ = 0, 233 /* write sectors to the device */ 234 REQ_OP_WRITE = 1, 235 /* flush the volatile write cache */ 236 REQ_OP_FLUSH = 2, 237 /* discard sectors */ 238 REQ_OP_DISCARD = 3, 239 /* get zone information */ 240 REQ_OP_ZONE_REPORT = 4, 241 /* securely erase sectors */ 242 REQ_OP_SECURE_ERASE = 5, 243 /* seset a zone write pointer */ 244 REQ_OP_ZONE_RESET = 6, 245 /* write the same sector many times */ 246 REQ_OP_WRITE_SAME = 7, 247 /* write the zero filled sector many times */ 248 REQ_OP_WRITE_ZEROES = 9, 249 250 /* SCSI passthrough using struct scsi_request */ 251 REQ_OP_SCSI_IN = 32, 252 REQ_OP_SCSI_OUT = 33, 253 /* Driver private requests */ 254 REQ_OP_DRV_IN = 34, 255 REQ_OP_DRV_OUT = 35, 256 257 REQ_OP_LAST, 258}; 259 260enum req_flag_bits { 261 __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 262 REQ_OP_BITS, 263 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 264 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 265 __REQ_SYNC, /* request is sync (sync write or read) */ 266 __REQ_META, /* metadata io request */ 267 __REQ_PRIO, /* boost priority in cfq */ 268 __REQ_NOMERGE, /* don't touch this for merging */ 269 __REQ_IDLE, /* anticipate more IO after this one */ 270 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 271 __REQ_FUA, /* forced unit access */ 272 __REQ_PREFLUSH, /* request for cache flush */ 273 __REQ_RAHEAD, /* read ahead, can fail anytime */ 274 __REQ_BACKGROUND, /* background IO */ 275 __REQ_NOWAIT, /* Don't wait if request will block */ 276 277 /* command specific flags for REQ_OP_WRITE_ZEROES: */ 278 __REQ_NOUNMAP, /* do not free blocks when zeroing */ 279 280 /* for driver use */ 281 __REQ_DRV, 282 283 __REQ_NR_BITS, /* stops here */ 284}; 285 286#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) 287#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) 288#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) 289#define REQ_SYNC (1ULL << __REQ_SYNC) 290#define REQ_META (1ULL << __REQ_META) 291#define REQ_PRIO (1ULL << __REQ_PRIO) 292#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 293#define REQ_IDLE (1ULL << __REQ_IDLE) 294#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) 295#define REQ_FUA (1ULL << __REQ_FUA) 296#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 297#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 298#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 299#define REQ_NOWAIT (1ULL << __REQ_NOWAIT) 300 301#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) 302 303#define REQ_DRV (1ULL << __REQ_DRV) 304 305#define REQ_FAILFAST_MASK \ 306 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 307 308#define REQ_NOMERGE_FLAGS \ 309 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 310 311#define bio_op(bio) \ 312 ((bio)->bi_opf & REQ_OP_MASK) 313#define req_op(req) \ 314 ((req)->cmd_flags & REQ_OP_MASK) 315 316/* obsolete, don't use in new code */ 317static inline void bio_set_op_attrs(struct bio *bio, unsigned op, 318 unsigned op_flags) 319{ 320 bio->bi_opf = op | op_flags; 321} 322 323static inline bool op_is_write(unsigned int op) 324{ 325 return (op & 1); 326} 327 328/* 329 * Check if the bio or request is one that needs special treatment in the 330 * flush state machine. 331 */ 332static inline bool op_is_flush(unsigned int op) 333{ 334 return op & (REQ_FUA | REQ_PREFLUSH); 335} 336 337/* 338 * Reads are always treated as synchronous, as are requests with the FUA or 339 * PREFLUSH flag. Other operations may be marked as synchronous using the 340 * REQ_SYNC flag. 341 */ 342static inline bool op_is_sync(unsigned int op) 343{ 344 return (op & REQ_OP_MASK) == REQ_OP_READ || 345 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 346} 347 348typedef unsigned int blk_qc_t; 349#define BLK_QC_T_NONE -1U 350#define BLK_QC_T_SHIFT 16 351#define BLK_QC_T_INTERNAL (1U << 31) 352 353static inline bool blk_qc_t_valid(blk_qc_t cookie) 354{ 355 return cookie != BLK_QC_T_NONE; 356} 357 358static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num, 359 bool internal) 360{ 361 blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT); 362 363 if (internal) 364 ret |= BLK_QC_T_INTERNAL; 365 366 return ret; 367} 368 369static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 370{ 371 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; 372} 373 374static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) 375{ 376 return cookie & ((1u << BLK_QC_T_SHIFT) - 1); 377} 378 379static inline bool blk_qc_t_is_internal(blk_qc_t cookie) 380{ 381 return (cookie & BLK_QC_T_INTERNAL) != 0; 382} 383 384struct blk_rq_stat { 385 u64 mean; 386 u64 min; 387 u64 max; 388 u32 nr_samples; 389 u64 batch; 390}; 391 392#endif /* __LINUX_BLK_TYPES_H */