Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.3-rc5 1222 lines 36 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef BLK_MQ_H 3#define BLK_MQ_H 4 5#include <linux/blkdev.h> 6#include <linux/sbitmap.h> 7#include <linux/lockdep.h> 8#include <linux/scatterlist.h> 9#include <linux/prefetch.h> 10#include <linux/srcu.h> 11 12struct blk_mq_tags; 13struct blk_flush_queue; 14 15#define BLKDEV_MIN_RQ 4 16#define BLKDEV_DEFAULT_RQ 128 17 18enum rq_end_io_ret { 19 RQ_END_IO_NONE, 20 RQ_END_IO_FREE, 21}; 22 23typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); 24 25/* 26 * request flags */ 27typedef __u32 __bitwise req_flags_t; 28 29/* drive already may have started this one */ 30#define RQF_STARTED ((__force req_flags_t)(1 << 1)) 31/* may not be passed by ioscheduler */ 32#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 33/* request for flush sequence */ 34#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 35/* merge of different types, fail separately */ 36#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 37/* track inflight for MQ */ 38#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 39/* don't call prep for this one */ 40#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 41/* vaguely specified driver internal error. Ignored by the block layer */ 42#define RQF_FAILED ((__force req_flags_t)(1 << 10)) 43/* don't warn about errors */ 44#define RQF_QUIET ((__force req_flags_t)(1 << 11)) 45/* elevator private data attached */ 46#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 47/* account into disk and partition IO statistics */ 48#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 49/* runtime pm request */ 50#define RQF_PM ((__force req_flags_t)(1 << 15)) 51/* on IO scheduler merge hash */ 52#define RQF_HASHED ((__force req_flags_t)(1 << 16)) 53/* track IO completion time */ 54#define RQF_STATS ((__force req_flags_t)(1 << 17)) 55/* Look at ->special_vec for the actual data payload instead of the 56 bio chain. */ 57#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 58/* The per-zone write lock is held for this request */ 59#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) 60/* already slept for hybrid poll */ 61#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) 62/* ->timeout has been called, don't expire again */ 63#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) 64/* queue has elevator attached */ 65#define RQF_ELV ((__force req_flags_t)(1 << 22)) 66#define RQF_RESV ((__force req_flags_t)(1 << 23)) 67 68/* flags that prevent us from merging requests: */ 69#define RQF_NOMERGE_FLAGS \ 70 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 71 72enum mq_rq_state { 73 MQ_RQ_IDLE = 0, 74 MQ_RQ_IN_FLIGHT = 1, 75 MQ_RQ_COMPLETE = 2, 76}; 77 78/* 79 * Try to put the fields that are referenced together in the same cacheline. 80 * 81 * If you modify this structure, make sure to update blk_rq_init() and 82 * especially blk_mq_rq_ctx_init() to take care of the added fields. 83 */ 84struct request { 85 struct request_queue *q; 86 struct blk_mq_ctx *mq_ctx; 87 struct blk_mq_hw_ctx *mq_hctx; 88 89 blk_opf_t cmd_flags; /* op and common flags */ 90 req_flags_t rq_flags; 91 92 int tag; 93 int internal_tag; 94 95 unsigned int timeout; 96 97 /* the following two fields are internal, NEVER access directly */ 98 unsigned int __data_len; /* total data len */ 99 sector_t __sector; /* sector cursor */ 100 101 struct bio *bio; 102 struct bio *biotail; 103 104 union { 105 struct list_head queuelist; 106 struct request *rq_next; 107 }; 108 109 struct block_device *part; 110#ifdef CONFIG_BLK_RQ_ALLOC_TIME 111 /* Time that the first bio started allocating this request. */ 112 u64 alloc_time_ns; 113#endif 114 /* Time that this request was allocated for this IO. */ 115 u64 start_time_ns; 116 /* Time that I/O was submitted to the device. */ 117 u64 io_start_time_ns; 118 119#ifdef CONFIG_BLK_WBT 120 unsigned short wbt_flags; 121#endif 122 /* 123 * rq sectors used for blk stats. It has the same value 124 * with blk_rq_sectors(rq), except that it never be zeroed 125 * by completion. 126 */ 127 unsigned short stats_sectors; 128 129 /* 130 * Number of scatter-gather DMA addr+len pairs after 131 * physical address coalescing is performed. 132 */ 133 unsigned short nr_phys_segments; 134 135#ifdef CONFIG_BLK_DEV_INTEGRITY 136 unsigned short nr_integrity_segments; 137#endif 138 139#ifdef CONFIG_BLK_INLINE_ENCRYPTION 140 struct bio_crypt_ctx *crypt_ctx; 141 struct blk_crypto_keyslot *crypt_keyslot; 142#endif 143 144 unsigned short ioprio; 145 146 enum mq_rq_state state; 147 atomic_t ref; 148 149 unsigned long deadline; 150 151 /* 152 * The hash is used inside the scheduler, and killed once the 153 * request reaches the dispatch list. The ipi_list is only used 154 * to queue the request for softirq completion, which is long 155 * after the request has been unhashed (and even removed from 156 * the dispatch list). 157 */ 158 union { 159 struct hlist_node hash; /* merge hash */ 160 struct llist_node ipi_list; 161 }; 162 163 /* 164 * The rb_node is only used inside the io scheduler, requests 165 * are pruned when moved to the dispatch queue. So let the 166 * completion_data share space with the rb_node. 167 */ 168 union { 169 struct rb_node rb_node; /* sort/lookup */ 170 struct bio_vec special_vec; 171 void *completion_data; 172 }; 173 174 175 /* 176 * Three pointers are available for the IO schedulers, if they need 177 * more they have to dynamically allocate it. Flush requests are 178 * never put on the IO scheduler. So let the flush fields share 179 * space with the elevator data. 180 */ 181 union { 182 struct { 183 struct io_cq *icq; 184 void *priv[2]; 185 } elv; 186 187 struct { 188 unsigned int seq; 189 struct list_head list; 190 rq_end_io_fn *saved_end_io; 191 } flush; 192 }; 193 194 union { 195 struct __call_single_data csd; 196 u64 fifo_time; 197 }; 198 199 /* 200 * completion callback. 201 */ 202 rq_end_io_fn *end_io; 203 void *end_io_data; 204}; 205 206static inline enum req_op req_op(const struct request *req) 207{ 208 return req->cmd_flags & REQ_OP_MASK; 209} 210 211static inline bool blk_rq_is_passthrough(struct request *rq) 212{ 213 return blk_op_is_passthrough(req_op(rq)); 214} 215 216static inline unsigned short req_get_ioprio(struct request *req) 217{ 218 return req->ioprio; 219} 220 221#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 222 223#define rq_dma_dir(rq) \ 224 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 225 226#define rq_list_add(listptr, rq) do { \ 227 (rq)->rq_next = *(listptr); \ 228 *(listptr) = rq; \ 229} while (0) 230 231#define rq_list_add_tail(lastpptr, rq) do { \ 232 (rq)->rq_next = NULL; \ 233 **(lastpptr) = rq; \ 234 *(lastpptr) = &rq->rq_next; \ 235} while (0) 236 237#define rq_list_pop(listptr) \ 238({ \ 239 struct request *__req = NULL; \ 240 if ((listptr) && *(listptr)) { \ 241 __req = *(listptr); \ 242 *(listptr) = __req->rq_next; \ 243 } \ 244 __req; \ 245}) 246 247#define rq_list_peek(listptr) \ 248({ \ 249 struct request *__req = NULL; \ 250 if ((listptr) && *(listptr)) \ 251 __req = *(listptr); \ 252 __req; \ 253}) 254 255#define rq_list_for_each(listptr, pos) \ 256 for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) 257 258#define rq_list_for_each_safe(listptr, pos, nxt) \ 259 for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \ 260 pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL) 261 262#define rq_list_next(rq) (rq)->rq_next 263#define rq_list_empty(list) ((list) == (struct request *) NULL) 264 265/** 266 * rq_list_move() - move a struct request from one list to another 267 * @src: The source list @rq is currently in 268 * @dst: The destination list that @rq will be appended to 269 * @rq: The request to move 270 * @prev: The request preceding @rq in @src (NULL if @rq is the head) 271 */ 272static inline void rq_list_move(struct request **src, struct request **dst, 273 struct request *rq, struct request *prev) 274{ 275 if (prev) 276 prev->rq_next = rq->rq_next; 277 else 278 *src = rq->rq_next; 279 rq_list_add(dst, rq); 280} 281 282/** 283 * enum blk_eh_timer_return - How the timeout handler should proceed 284 * @BLK_EH_DONE: The block driver completed the command or will complete it at 285 * a later time. 286 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the 287 * request to complete. 288 */ 289enum blk_eh_timer_return { 290 BLK_EH_DONE, 291 BLK_EH_RESET_TIMER, 292}; 293 294#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 295#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 296 297/** 298 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware 299 * block device 300 */ 301struct blk_mq_hw_ctx { 302 struct { 303 /** @lock: Protects the dispatch list. */ 304 spinlock_t lock; 305 /** 306 * @dispatch: Used for requests that are ready to be 307 * dispatched to the hardware but for some reason (e.g. lack of 308 * resources) could not be sent to the hardware. As soon as the 309 * driver can send new requests, requests at this list will 310 * be sent first for a fairer dispatch. 311 */ 312 struct list_head dispatch; 313 /** 314 * @state: BLK_MQ_S_* flags. Defines the state of the hw 315 * queue (active, scheduled to restart, stopped). 316 */ 317 unsigned long state; 318 } ____cacheline_aligned_in_smp; 319 320 /** 321 * @run_work: Used for scheduling a hardware queue run at a later time. 322 */ 323 struct delayed_work run_work; 324 /** @cpumask: Map of available CPUs where this hctx can run. */ 325 cpumask_var_t cpumask; 326 /** 327 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU 328 * selection from @cpumask. 329 */ 330 int next_cpu; 331 /** 332 * @next_cpu_batch: Counter of how many works left in the batch before 333 * changing to the next CPU. 334 */ 335 int next_cpu_batch; 336 337 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */ 338 unsigned long flags; 339 340 /** 341 * @sched_data: Pointer owned by the IO scheduler attached to a request 342 * queue. It's up to the IO scheduler how to use this pointer. 343 */ 344 void *sched_data; 345 /** 346 * @queue: Pointer to the request queue that owns this hardware context. 347 */ 348 struct request_queue *queue; 349 /** @fq: Queue of requests that need to perform a flush operation. */ 350 struct blk_flush_queue *fq; 351 352 /** 353 * @driver_data: Pointer to data owned by the block driver that created 354 * this hctx 355 */ 356 void *driver_data; 357 358 /** 359 * @ctx_map: Bitmap for each software queue. If bit is on, there is a 360 * pending request in that software queue. 361 */ 362 struct sbitmap ctx_map; 363 364 /** 365 * @dispatch_from: Software queue to be used when no scheduler was 366 * selected. 367 */ 368 struct blk_mq_ctx *dispatch_from; 369 /** 370 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to 371 * decide if the hw_queue is busy using Exponential Weighted Moving 372 * Average algorithm. 373 */ 374 unsigned int dispatch_busy; 375 376 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */ 377 unsigned short type; 378 /** @nr_ctx: Number of software queues. */ 379 unsigned short nr_ctx; 380 /** @ctxs: Array of software queues. */ 381 struct blk_mq_ctx **ctxs; 382 383 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */ 384 spinlock_t dispatch_wait_lock; 385 /** 386 * @dispatch_wait: Waitqueue to put requests when there is no tag 387 * available at the moment, to wait for another try in the future. 388 */ 389 wait_queue_entry_t dispatch_wait; 390 391 /** 392 * @wait_index: Index of next available dispatch_wait queue to insert 393 * requests. 394 */ 395 atomic_t wait_index; 396 397 /** 398 * @tags: Tags owned by the block driver. A tag at this set is only 399 * assigned when a request is dispatched from a hardware queue. 400 */ 401 struct blk_mq_tags *tags; 402 /** 403 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O 404 * scheduler associated with a request queue, a tag is assigned when 405 * that request is allocated. Else, this member is not used. 406 */ 407 struct blk_mq_tags *sched_tags; 408 409 /** @queued: Number of queued requests. */ 410 unsigned long queued; 411 /** @run: Number of dispatched requests. */ 412 unsigned long run; 413 414 /** @numa_node: NUMA node the storage adapter has been connected to. */ 415 unsigned int numa_node; 416 /** @queue_num: Index of this hardware queue. */ 417 unsigned int queue_num; 418 419 /** 420 * @nr_active: Number of active requests. Only used when a tag set is 421 * shared across request queues. 422 */ 423 atomic_t nr_active; 424 425 /** @cpuhp_online: List to store request if CPU is going to die */ 426 struct hlist_node cpuhp_online; 427 /** @cpuhp_dead: List to store request if some CPU die. */ 428 struct hlist_node cpuhp_dead; 429 /** @kobj: Kernel object for sysfs. */ 430 struct kobject kobj; 431 432#ifdef CONFIG_BLK_DEBUG_FS 433 /** 434 * @debugfs_dir: debugfs directory for this hardware queue. Named 435 * as cpu<cpu_number>. 436 */ 437 struct dentry *debugfs_dir; 438 /** @sched_debugfs_dir: debugfs directory for the scheduler. */ 439 struct dentry *sched_debugfs_dir; 440#endif 441 442 /** 443 * @hctx_list: if this hctx is not in use, this is an entry in 444 * q->unused_hctx_list. 445 */ 446 struct list_head hctx_list; 447}; 448 449/** 450 * struct blk_mq_queue_map - Map software queues to hardware queues 451 * @mq_map: CPU ID to hardware queue index map. This is an array 452 * with nr_cpu_ids elements. Each element has a value in the range 453 * [@queue_offset, @queue_offset + @nr_queues). 454 * @nr_queues: Number of hardware queues to map CPU IDs onto. 455 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe 456 * driver to map each hardware queue type (enum hctx_type) onto a distinct 457 * set of hardware queues. 458 */ 459struct blk_mq_queue_map { 460 unsigned int *mq_map; 461 unsigned int nr_queues; 462 unsigned int queue_offset; 463}; 464 465/** 466 * enum hctx_type - Type of hardware queue 467 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for. 468 * @HCTX_TYPE_READ: Just for READ I/O. 469 * @HCTX_TYPE_POLL: Polled I/O of any kind. 470 * @HCTX_MAX_TYPES: Number of types of hctx. 471 */ 472enum hctx_type { 473 HCTX_TYPE_DEFAULT, 474 HCTX_TYPE_READ, 475 HCTX_TYPE_POLL, 476 477 HCTX_MAX_TYPES, 478}; 479 480/** 481 * struct blk_mq_tag_set - tag set that can be shared between request queues 482 * @ops: Pointers to functions that implement block driver behavior. 483 * @map: One or more ctx -> hctx mappings. One map exists for each 484 * hardware queue type (enum hctx_type) that the driver wishes 485 * to support. There are no restrictions on maps being of the 486 * same size, and it's perfectly legal to share maps between 487 * types. 488 * @nr_maps: Number of elements in the @map array. A number in the range 489 * [1, HCTX_MAX_TYPES]. 490 * @nr_hw_queues: Number of hardware queues supported by the block driver that 491 * owns this data structure. 492 * @queue_depth: Number of tags per hardware queue, reserved tags included. 493 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag 494 * allocations. 495 * @cmd_size: Number of additional bytes to allocate per request. The block 496 * driver owns these additional bytes. 497 * @numa_node: NUMA node the storage adapter has been connected to. 498 * @timeout: Request processing timeout in jiffies. 499 * @flags: Zero or more BLK_MQ_F_* flags. 500 * @driver_data: Pointer to data owned by the block driver that created this 501 * tag set. 502 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues 503 * elements. 504 * @shared_tags: 505 * Shared set of tags. Has @nr_hw_queues elements. If set, 506 * shared by all @tags. 507 * @tag_list_lock: Serializes tag_list accesses. 508 * @tag_list: List of the request queues that use this tag set. See also 509 * request_queue.tag_set_list. 510 * @srcu: Use as lock when type of the request queue is blocking 511 * (BLK_MQ_F_BLOCKING). 512 */ 513struct blk_mq_tag_set { 514 const struct blk_mq_ops *ops; 515 struct blk_mq_queue_map map[HCTX_MAX_TYPES]; 516 unsigned int nr_maps; 517 unsigned int nr_hw_queues; 518 unsigned int queue_depth; 519 unsigned int reserved_tags; 520 unsigned int cmd_size; 521 int numa_node; 522 unsigned int timeout; 523 unsigned int flags; 524 void *driver_data; 525 526 struct blk_mq_tags **tags; 527 528 struct blk_mq_tags *shared_tags; 529 530 struct mutex tag_list_lock; 531 struct list_head tag_list; 532 struct srcu_struct *srcu; 533}; 534 535/** 536 * struct blk_mq_queue_data - Data about a request inserted in a queue 537 * 538 * @rq: Request pointer. 539 * @last: If it is the last request in the queue. 540 */ 541struct blk_mq_queue_data { 542 struct request *rq; 543 bool last; 544}; 545 546typedef bool (busy_tag_iter_fn)(struct request *, void *); 547 548/** 549 * struct blk_mq_ops - Callback functions that implements block driver 550 * behaviour. 551 */ 552struct blk_mq_ops { 553 /** 554 * @queue_rq: Queue a new request from block IO. 555 */ 556 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, 557 const struct blk_mq_queue_data *); 558 559 /** 560 * @commit_rqs: If a driver uses bd->last to judge when to submit 561 * requests to hardware, it must define this function. In case of errors 562 * that make us stop issuing further requests, this hook serves the 563 * purpose of kicking the hardware (which the last request otherwise 564 * would have done). 565 */ 566 void (*commit_rqs)(struct blk_mq_hw_ctx *); 567 568 /** 569 * @queue_rqs: Queue a list of new requests. Driver is guaranteed 570 * that each request belongs to the same queue. If the driver doesn't 571 * empty the @rqlist completely, then the rest will be queued 572 * individually by the block layer upon return. 573 */ 574 void (*queue_rqs)(struct request **rqlist); 575 576 /** 577 * @get_budget: Reserve budget before queue request, once .queue_rq is 578 * run, it is driver's responsibility to release the 579 * reserved budget. Also we have to handle failure case 580 * of .get_budget for avoiding I/O deadlock. 581 */ 582 int (*get_budget)(struct request_queue *); 583 584 /** 585 * @put_budget: Release the reserved budget. 586 */ 587 void (*put_budget)(struct request_queue *, int); 588 589 /** 590 * @set_rq_budget_token: store rq's budget token 591 */ 592 void (*set_rq_budget_token)(struct request *, int); 593 /** 594 * @get_rq_budget_token: retrieve rq's budget token 595 */ 596 int (*get_rq_budget_token)(struct request *); 597 598 /** 599 * @timeout: Called on request timeout. 600 */ 601 enum blk_eh_timer_return (*timeout)(struct request *); 602 603 /** 604 * @poll: Called to poll for completion of a specific tag. 605 */ 606 int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); 607 608 /** 609 * @complete: Mark the request as complete. 610 */ 611 void (*complete)(struct request *); 612 613 /** 614 * @init_hctx: Called when the block layer side of a hardware queue has 615 * been set up, allowing the driver to allocate/init matching 616 * structures. 617 */ 618 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); 619 /** 620 * @exit_hctx: Ditto for exit/teardown. 621 */ 622 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); 623 624 /** 625 * @init_request: Called for every command allocated by the block layer 626 * to allow the driver to set up driver specific data. 627 * 628 * Tag greater than or equal to queue_depth is for setting up 629 * flush request. 630 */ 631 int (*init_request)(struct blk_mq_tag_set *set, struct request *, 632 unsigned int, unsigned int); 633 /** 634 * @exit_request: Ditto for exit/teardown. 635 */ 636 void (*exit_request)(struct blk_mq_tag_set *set, struct request *, 637 unsigned int); 638 639 /** 640 * @cleanup_rq: Called before freeing one request which isn't completed 641 * yet, and usually for freeing the driver private data. 642 */ 643 void (*cleanup_rq)(struct request *); 644 645 /** 646 * @busy: If set, returns whether or not this queue currently is busy. 647 */ 648 bool (*busy)(struct request_queue *); 649 650 /** 651 * @map_queues: This allows drivers specify their own queue mapping by 652 * overriding the setup-time function that builds the mq_map. 653 */ 654 void (*map_queues)(struct blk_mq_tag_set *set); 655 656#ifdef CONFIG_BLK_DEBUG_FS 657 /** 658 * @show_rq: Used by the debugfs implementation to show driver-specific 659 * information about a request. 660 */ 661 void (*show_rq)(struct seq_file *m, struct request *rq); 662#endif 663}; 664 665enum { 666 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 667 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, 668 /* 669 * Set when this device requires underlying blk-mq device for 670 * completing IO: 671 */ 672 BLK_MQ_F_STACKING = 1 << 2, 673 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, 674 BLK_MQ_F_BLOCKING = 1 << 5, 675 /* Do not allow an I/O scheduler to be configured. */ 676 BLK_MQ_F_NO_SCHED = 1 << 6, 677 /* 678 * Select 'none' during queue registration in case of a single hwq 679 * or shared hwqs instead of 'mq-deadline'. 680 */ 681 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, 682 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 683 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 684 685 BLK_MQ_S_STOPPED = 0, 686 BLK_MQ_S_TAG_ACTIVE = 1, 687 BLK_MQ_S_SCHED_RESTART = 2, 688 689 /* hw queue is inactive after all its CPUs become offline */ 690 BLK_MQ_S_INACTIVE = 3, 691 692 BLK_MQ_MAX_DEPTH = 10240, 693 694 BLK_MQ_CPU_WORK_BATCH = 8, 695}; 696#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ 697 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ 698 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) 699#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ 700 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ 701 << BLK_MQ_F_ALLOC_POLICY_START_BIT) 702 703#define BLK_MQ_NO_HCTX_IDX (-1U) 704 705struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, 706 struct lock_class_key *lkclass); 707#define blk_mq_alloc_disk(set, queuedata) \ 708({ \ 709 static struct lock_class_key __key; \ 710 \ 711 __blk_mq_alloc_disk(set, queuedata, &__key); \ 712}) 713struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, 714 struct lock_class_key *lkclass); 715struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 716int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 717 struct request_queue *q); 718void blk_mq_destroy_queue(struct request_queue *); 719 720int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); 721int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 722 const struct blk_mq_ops *ops, unsigned int queue_depth, 723 unsigned int set_flags); 724void blk_mq_free_tag_set(struct blk_mq_tag_set *set); 725 726void blk_mq_free_request(struct request *rq); 727 728bool blk_mq_queue_inflight(struct request_queue *q); 729 730enum { 731 /* return when out of requests */ 732 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), 733 /* allocate from reserved pool */ 734 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), 735 /* set RQF_PM */ 736 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), 737}; 738 739struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, 740 blk_mq_req_flags_t flags); 741struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 742 blk_opf_t opf, blk_mq_req_flags_t flags, 743 unsigned int hctx_idx); 744 745/* 746 * Tag address space map. 747 */ 748struct blk_mq_tags { 749 unsigned int nr_tags; 750 unsigned int nr_reserved_tags; 751 752 atomic_t active_queues; 753 754 struct sbitmap_queue bitmap_tags; 755 struct sbitmap_queue breserved_tags; 756 757 struct request **rqs; 758 struct request **static_rqs; 759 struct list_head page_list; 760 761 /* 762 * used to clear request reference in rqs[] before freeing one 763 * request pool 764 */ 765 spinlock_t lock; 766}; 767 768static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, 769 unsigned int tag) 770{ 771 if (tag < tags->nr_tags) { 772 prefetch(tags->rqs[tag]); 773 return tags->rqs[tag]; 774 } 775 776 return NULL; 777} 778 779enum { 780 BLK_MQ_UNIQUE_TAG_BITS = 16, 781 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, 782}; 783 784u32 blk_mq_unique_tag(struct request *rq); 785 786static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) 787{ 788 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; 789} 790 791static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) 792{ 793 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 794} 795 796/** 797 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request 798 * @rq: target request. 799 */ 800static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) 801{ 802 return READ_ONCE(rq->state); 803} 804 805static inline int blk_mq_request_started(struct request *rq) 806{ 807 return blk_mq_rq_state(rq) != MQ_RQ_IDLE; 808} 809 810static inline int blk_mq_request_completed(struct request *rq) 811{ 812 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; 813} 814 815/* 816 * 817 * Set the state to complete when completing a request from inside ->queue_rq. 818 * This is used by drivers that want to ensure special complete actions that 819 * need access to the request are called on failure, e.g. by nvme for 820 * multipathing. 821 */ 822static inline void blk_mq_set_request_complete(struct request *rq) 823{ 824 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 825} 826 827/* 828 * Complete the request directly instead of deferring it to softirq or 829 * completing it another CPU. Useful in preemptible instead of an interrupt. 830 */ 831static inline void blk_mq_complete_request_direct(struct request *rq, 832 void (*complete)(struct request *rq)) 833{ 834 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 835 complete(rq); 836} 837 838void blk_mq_start_request(struct request *rq); 839void blk_mq_end_request(struct request *rq, blk_status_t error); 840void __blk_mq_end_request(struct request *rq, blk_status_t error); 841void blk_mq_end_request_batch(struct io_comp_batch *ib); 842 843/* 844 * Only need start/end time stamping if we have iostat or 845 * blk stats enabled, or using an IO scheduler. 846 */ 847static inline bool blk_mq_need_time_stamp(struct request *rq) 848{ 849 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); 850} 851 852static inline bool blk_mq_is_reserved_rq(struct request *rq) 853{ 854 return rq->rq_flags & RQF_RESV; 855} 856 857/* 858 * Batched completions only work when there is no I/O error and no special 859 * ->end_io handler. 860 */ 861static inline bool blk_mq_add_to_batch(struct request *req, 862 struct io_comp_batch *iob, int ioerror, 863 void (*complete)(struct io_comp_batch *)) 864{ 865 if (!iob || (req->rq_flags & RQF_ELV) || ioerror || 866 (req->end_io && !blk_rq_is_passthrough(req))) 867 return false; 868 869 if (!iob->complete) 870 iob->complete = complete; 871 else if (iob->complete != complete) 872 return false; 873 iob->need_ts |= blk_mq_need_time_stamp(req); 874 rq_list_add(&iob->req_list, req); 875 return true; 876} 877 878void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); 879void blk_mq_kick_requeue_list(struct request_queue *q); 880void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); 881void blk_mq_complete_request(struct request *rq); 882bool blk_mq_complete_request_remote(struct request *rq); 883void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 884void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 885void blk_mq_stop_hw_queues(struct request_queue *q); 886void blk_mq_start_hw_queues(struct request_queue *q); 887void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 888void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 889void blk_mq_quiesce_queue(struct request_queue *q); 890void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set); 891void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set); 892void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set); 893void blk_mq_unquiesce_queue(struct request_queue *q); 894void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 895void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 896void blk_mq_run_hw_queues(struct request_queue *q, bool async); 897void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); 898void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 899 busy_tag_iter_fn *fn, void *priv); 900void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); 901void blk_mq_freeze_queue(struct request_queue *q); 902void blk_mq_unfreeze_queue(struct request_queue *q); 903void blk_freeze_queue_start(struct request_queue *q); 904void blk_mq_freeze_queue_wait(struct request_queue *q); 905int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 906 unsigned long timeout); 907 908void blk_mq_map_queues(struct blk_mq_queue_map *qmap); 909void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); 910 911void blk_mq_quiesce_queue_nowait(struct request_queue *q); 912 913unsigned int blk_mq_rq_cpu(struct request *rq); 914 915bool __blk_should_fake_timeout(struct request_queue *q); 916static inline bool blk_should_fake_timeout(struct request_queue *q) 917{ 918 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && 919 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) 920 return __blk_should_fake_timeout(q); 921 return false; 922} 923 924/** 925 * blk_mq_rq_from_pdu - cast a PDU to a request 926 * @pdu: the PDU (Protocol Data Unit) to be casted 927 * 928 * Return: request 929 * 930 * Driver command data is immediately after the request. So subtract request 931 * size to get back to the original request. 932 */ 933static inline struct request *blk_mq_rq_from_pdu(void *pdu) 934{ 935 return pdu - sizeof(struct request); 936} 937 938/** 939 * blk_mq_rq_to_pdu - cast a request to a PDU 940 * @rq: the request to be casted 941 * 942 * Return: pointer to the PDU 943 * 944 * Driver command data is immediately after the request. So add request to get 945 * the PDU. 946 */ 947static inline void *blk_mq_rq_to_pdu(struct request *rq) 948{ 949 return rq + 1; 950} 951 952#define queue_for_each_hw_ctx(q, hctx, i) \ 953 xa_for_each(&(q)->hctx_table, (i), (hctx)) 954 955#define hctx_for_each_ctx(hctx, ctx, i) \ 956 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 957 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 958 959static inline void blk_mq_cleanup_rq(struct request *rq) 960{ 961 if (rq->q->mq_ops->cleanup_rq) 962 rq->q->mq_ops->cleanup_rq(rq); 963} 964 965static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, 966 unsigned int nr_segs) 967{ 968 rq->nr_phys_segments = nr_segs; 969 rq->__data_len = bio->bi_iter.bi_size; 970 rq->bio = rq->biotail = bio; 971 rq->ioprio = bio_prio(bio); 972} 973 974void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, 975 struct lock_class_key *key); 976 977static inline bool rq_is_sync(struct request *rq) 978{ 979 return op_is_sync(rq->cmd_flags); 980} 981 982void blk_rq_init(struct request_queue *q, struct request *rq); 983int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 984 struct bio_set *bs, gfp_t gfp_mask, 985 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); 986void blk_rq_unprep_clone(struct request *rq); 987blk_status_t blk_insert_cloned_request(struct request *rq); 988 989struct rq_map_data { 990 struct page **pages; 991 unsigned long offset; 992 unsigned short page_order; 993 unsigned short nr_entries; 994 bool null_mapped; 995 bool from_user; 996}; 997 998int blk_rq_map_user(struct request_queue *, struct request *, 999 struct rq_map_data *, void __user *, unsigned long, gfp_t); 1000int blk_rq_map_user_io(struct request *, struct rq_map_data *, 1001 void __user *, unsigned long, gfp_t, bool, int, bool, int); 1002int blk_rq_map_user_iov(struct request_queue *, struct request *, 1003 struct rq_map_data *, const struct iov_iter *, gfp_t); 1004int blk_rq_unmap_user(struct bio *); 1005int blk_rq_map_kern(struct request_queue *, struct request *, void *, 1006 unsigned int, gfp_t); 1007int blk_rq_append_bio(struct request *rq, struct bio *bio); 1008void blk_execute_rq_nowait(struct request *rq, bool at_head); 1009blk_status_t blk_execute_rq(struct request *rq, bool at_head); 1010bool blk_rq_is_poll(struct request *rq); 1011 1012struct req_iterator { 1013 struct bvec_iter iter; 1014 struct bio *bio; 1015}; 1016 1017#define __rq_for_each_bio(_bio, rq) \ 1018 if ((rq->bio)) \ 1019 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 1020 1021#define rq_for_each_segment(bvl, _rq, _iter) \ 1022 __rq_for_each_bio(_iter.bio, _rq) \ 1023 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 1024 1025#define rq_for_each_bvec(bvl, _rq, _iter) \ 1026 __rq_for_each_bio(_iter.bio, _rq) \ 1027 bio_for_each_bvec(bvl, _iter.bio, _iter.iter) 1028 1029#define rq_iter_last(bvec, _iter) \ 1030 (_iter.bio->bi_next == NULL && \ 1031 bio_iter_last(bvec, _iter.iter)) 1032 1033/* 1034 * blk_rq_pos() : the current sector 1035 * blk_rq_bytes() : bytes left in the entire request 1036 * blk_rq_cur_bytes() : bytes left in the current segment 1037 * blk_rq_sectors() : sectors left in the entire request 1038 * blk_rq_cur_sectors() : sectors left in the current segment 1039 * blk_rq_stats_sectors() : sectors of the entire request used for stats 1040 */ 1041static inline sector_t blk_rq_pos(const struct request *rq) 1042{ 1043 return rq->__sector; 1044} 1045 1046static inline unsigned int blk_rq_bytes(const struct request *rq) 1047{ 1048 return rq->__data_len; 1049} 1050 1051static inline int blk_rq_cur_bytes(const struct request *rq) 1052{ 1053 if (!rq->bio) 1054 return 0; 1055 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */ 1056 return rq->bio->bi_iter.bi_size; 1057 return bio_iovec(rq->bio).bv_len; 1058} 1059 1060static inline unsigned int blk_rq_sectors(const struct request *rq) 1061{ 1062 return blk_rq_bytes(rq) >> SECTOR_SHIFT; 1063} 1064 1065static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 1066{ 1067 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; 1068} 1069 1070static inline unsigned int blk_rq_stats_sectors(const struct request *rq) 1071{ 1072 return rq->stats_sectors; 1073} 1074 1075/* 1076 * Some commands like WRITE SAME have a payload or data transfer size which 1077 * is different from the size of the request. Any driver that supports such 1078 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to 1079 * calculate the data transfer size. 1080 */ 1081static inline unsigned int blk_rq_payload_bytes(struct request *rq) 1082{ 1083 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1084 return rq->special_vec.bv_len; 1085 return blk_rq_bytes(rq); 1086} 1087 1088/* 1089 * Return the first full biovec in the request. The caller needs to check that 1090 * there are any bvecs before calling this helper. 1091 */ 1092static inline struct bio_vec req_bvec(struct request *rq) 1093{ 1094 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1095 return rq->special_vec; 1096 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); 1097} 1098 1099static inline unsigned int blk_rq_count_bios(struct request *rq) 1100{ 1101 unsigned int nr_bios = 0; 1102 struct bio *bio; 1103 1104 __rq_for_each_bio(bio, rq) 1105 nr_bios++; 1106 1107 return nr_bios; 1108} 1109 1110void blk_steal_bios(struct bio_list *list, struct request *rq); 1111 1112/* 1113 * Request completion related functions. 1114 * 1115 * blk_update_request() completes given number of bytes and updates 1116 * the request without completing it. 1117 */ 1118bool blk_update_request(struct request *rq, blk_status_t error, 1119 unsigned int nr_bytes); 1120void blk_abort_request(struct request *); 1121 1122/* 1123 * Number of physical segments as sent to the device. 1124 * 1125 * Normally this is the number of discontiguous data segments sent by the 1126 * submitter. But for data-less command like discard we might have no 1127 * actual data segments submitted, but the driver might have to add it's 1128 * own special payload. In that case we still return 1 here so that this 1129 * special payload will be mapped. 1130 */ 1131static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1132{ 1133 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1134 return 1; 1135 return rq->nr_phys_segments; 1136} 1137 1138/* 1139 * Number of discard segments (or ranges) the driver needs to fill in. 1140 * Each discard bio merged into a request is counted as one segment. 1141 */ 1142static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) 1143{ 1144 return max_t(unsigned short, rq->nr_phys_segments, 1); 1145} 1146 1147int __blk_rq_map_sg(struct request_queue *q, struct request *rq, 1148 struct scatterlist *sglist, struct scatterlist **last_sg); 1149static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, 1150 struct scatterlist *sglist) 1151{ 1152 struct scatterlist *last_sg = NULL; 1153 1154 return __blk_rq_map_sg(q, rq, sglist, &last_sg); 1155} 1156void blk_dump_rq_flags(struct request *, char *); 1157 1158#ifdef CONFIG_BLK_DEV_ZONED 1159static inline unsigned int blk_rq_zone_no(struct request *rq) 1160{ 1161 return disk_zone_no(rq->q->disk, blk_rq_pos(rq)); 1162} 1163 1164static inline unsigned int blk_rq_zone_is_seq(struct request *rq) 1165{ 1166 return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq)); 1167} 1168 1169bool blk_req_needs_zone_write_lock(struct request *rq); 1170bool blk_req_zone_write_trylock(struct request *rq); 1171void __blk_req_zone_write_lock(struct request *rq); 1172void __blk_req_zone_write_unlock(struct request *rq); 1173 1174static inline void blk_req_zone_write_lock(struct request *rq) 1175{ 1176 if (blk_req_needs_zone_write_lock(rq)) 1177 __blk_req_zone_write_lock(rq); 1178} 1179 1180static inline void blk_req_zone_write_unlock(struct request *rq) 1181{ 1182 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) 1183 __blk_req_zone_write_unlock(rq); 1184} 1185 1186static inline bool blk_req_zone_is_write_locked(struct request *rq) 1187{ 1188 return rq->q->disk->seq_zones_wlock && 1189 test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock); 1190} 1191 1192static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 1193{ 1194 if (!blk_req_needs_zone_write_lock(rq)) 1195 return true; 1196 return !blk_req_zone_is_write_locked(rq); 1197} 1198#else /* CONFIG_BLK_DEV_ZONED */ 1199static inline bool blk_req_needs_zone_write_lock(struct request *rq) 1200{ 1201 return false; 1202} 1203 1204static inline void blk_req_zone_write_lock(struct request *rq) 1205{ 1206} 1207 1208static inline void blk_req_zone_write_unlock(struct request *rq) 1209{ 1210} 1211static inline bool blk_req_zone_is_write_locked(struct request *rq) 1212{ 1213 return false; 1214} 1215 1216static inline bool blk_req_can_dispatch_to_zone(struct request *rq) 1217{ 1218 return true; 1219} 1220#endif /* CONFIG_BLK_DEV_ZONED */ 1221 1222#endif /* BLK_MQ_H */