at v5.13 20 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef BLK_MQ_H 3#define BLK_MQ_H 4 5#include <linux/blkdev.h> 6#include <linux/sbitmap.h> 7#include <linux/srcu.h> 8#include <linux/lockdep.h> 9 10struct blk_mq_tags; 11struct blk_flush_queue; 12 13/** 14 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware 15 * block device 16 */ 17struct blk_mq_hw_ctx { 18 struct { 19 /** @lock: Protects the dispatch list. */ 20 spinlock_t lock; 21 /** 22 * @dispatch: Used for requests that are ready to be 23 * dispatched to the hardware but for some reason (e.g. lack of 24 * resources) could not be sent to the hardware. As soon as the 25 * driver can send new requests, requests at this list will 26 * be sent first for a fairer dispatch. 27 */ 28 struct list_head dispatch; 29 /** 30 * @state: BLK_MQ_S_* flags. Defines the state of the hw 31 * queue (active, scheduled to restart, stopped). 32 */ 33 unsigned long state; 34 } ____cacheline_aligned_in_smp; 35 36 /** 37 * @run_work: Used for scheduling a hardware queue run at a later time. 38 */ 39 struct delayed_work run_work; 40 /** @cpumask: Map of available CPUs where this hctx can run. */ 41 cpumask_var_t cpumask; 42 /** 43 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU 44 * selection from @cpumask. 45 */ 46 int next_cpu; 47 /** 48 * @next_cpu_batch: Counter of how many works left in the batch before 49 * changing to the next CPU. 50 */ 51 int next_cpu_batch; 52 53 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */ 54 unsigned long flags; 55 56 /** 57 * @sched_data: Pointer owned by the IO scheduler attached to a request 58 * queue. It's up to the IO scheduler how to use this pointer. 59 */ 60 void *sched_data; 61 /** 62 * @queue: Pointer to the request queue that owns this hardware context. 63 */ 64 struct request_queue *queue; 65 /** @fq: Queue of requests that need to perform a flush operation. */ 66 struct blk_flush_queue *fq; 67 68 /** 69 * @driver_data: Pointer to data owned by the block driver that created 70 * this hctx 71 */ 72 void *driver_data; 73 74 /** 75 * @ctx_map: Bitmap for each software queue. If bit is on, there is a 76 * pending request in that software queue. 77 */ 78 struct sbitmap ctx_map; 79 80 /** 81 * @dispatch_from: Software queue to be used when no scheduler was 82 * selected. 83 */ 84 struct blk_mq_ctx *dispatch_from; 85 /** 86 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to 87 * decide if the hw_queue is busy using Exponential Weighted Moving 88 * Average algorithm. 89 */ 90 unsigned int dispatch_busy; 91 92 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */ 93 unsigned short type; 94 /** @nr_ctx: Number of software queues. */ 95 unsigned short nr_ctx; 96 /** @ctxs: Array of software queues. */ 97 struct blk_mq_ctx **ctxs; 98 99 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */ 100 spinlock_t dispatch_wait_lock; 101 /** 102 * @dispatch_wait: Waitqueue to put requests when there is no tag 103 * available at the moment, to wait for another try in the future. 104 */ 105 wait_queue_entry_t dispatch_wait; 106 107 /** 108 * @wait_index: Index of next available dispatch_wait queue to insert 109 * requests. 110 */ 111 atomic_t wait_index; 112 113 /** 114 * @tags: Tags owned by the block driver. A tag at this set is only 115 * assigned when a request is dispatched from a hardware queue. 116 */ 117 struct blk_mq_tags *tags; 118 /** 119 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O 120 * scheduler associated with a request queue, a tag is assigned when 121 * that request is allocated. Else, this member is not used. 122 */ 123 struct blk_mq_tags *sched_tags; 124 125 /** @queued: Number of queued requests. */ 126 unsigned long queued; 127 /** @run: Number of dispatched requests. */ 128 unsigned long run; 129#define BLK_MQ_MAX_DISPATCH_ORDER 7 130 /** @dispatched: Number of dispatch requests by queue. */ 131 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 132 133 /** @numa_node: NUMA node the storage adapter has been connected to. */ 134 unsigned int numa_node; 135 /** @queue_num: Index of this hardware queue. */ 136 unsigned int queue_num; 137 138 /** 139 * @nr_active: Number of active requests. Only used when a tag set is 140 * shared across request queues. 141 */ 142 atomic_t nr_active; 143 144 /** @cpuhp_online: List to store request if CPU is going to die */ 145 struct hlist_node cpuhp_online; 146 /** @cpuhp_dead: List to store request if some CPU die. */ 147 struct hlist_node cpuhp_dead; 148 /** @kobj: Kernel object for sysfs. */ 149 struct kobject kobj; 150 151 /** @poll_considered: Count times blk_poll() was called. */ 152 unsigned long poll_considered; 153 /** @poll_invoked: Count how many requests blk_poll() polled. */ 154 unsigned long poll_invoked; 155 /** @poll_success: Count how many polled requests were completed. */ 156 unsigned long poll_success; 157 158#ifdef CONFIG_BLK_DEBUG_FS 159 /** 160 * @debugfs_dir: debugfs directory for this hardware queue. Named 161 * as cpu<cpu_number>. 162 */ 163 struct dentry *debugfs_dir; 164 /** @sched_debugfs_dir: debugfs directory for the scheduler. */ 165 struct dentry *sched_debugfs_dir; 166#endif 167 168 /** 169 * @hctx_list: if this hctx is not in use, this is an entry in 170 * q->unused_hctx_list. 171 */ 172 struct list_head hctx_list; 173 174 /** 175 * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is 176 * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also 177 * blk_mq_hw_ctx_size(). 178 */ 179 struct srcu_struct srcu[]; 180}; 181 182/** 183 * struct blk_mq_queue_map - Map software queues to hardware queues 184 * @mq_map: CPU ID to hardware queue index map. This is an array 185 * with nr_cpu_ids elements. Each element has a value in the range 186 * [@queue_offset, @queue_offset + @nr_queues). 187 * @nr_queues: Number of hardware queues to map CPU IDs onto. 188 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe 189 * driver to map each hardware queue type (enum hctx_type) onto a distinct 190 * set of hardware queues. 191 */ 192struct blk_mq_queue_map { 193 unsigned int *mq_map; 194 unsigned int nr_queues; 195 unsigned int queue_offset; 196}; 197 198/** 199 * enum hctx_type - Type of hardware queue 200 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for. 201 * @HCTX_TYPE_READ: Just for READ I/O. 202 * @HCTX_TYPE_POLL: Polled I/O of any kind. 203 * @HCTX_MAX_TYPES: Number of types of hctx. 204 */ 205enum hctx_type { 206 HCTX_TYPE_DEFAULT, 207 HCTX_TYPE_READ, 208 HCTX_TYPE_POLL, 209 210 HCTX_MAX_TYPES, 211}; 212 213/** 214 * struct blk_mq_tag_set - tag set that can be shared between request queues 215 * @map: One or more ctx -> hctx mappings. One map exists for each 216 * hardware queue type (enum hctx_type) that the driver wishes 217 * to support. There are no restrictions on maps being of the 218 * same size, and it's perfectly legal to share maps between 219 * types. 220 * @nr_maps: Number of elements in the @map array. A number in the range 221 * [1, HCTX_MAX_TYPES]. 222 * @ops: Pointers to functions that implement block driver behavior. 223 * @nr_hw_queues: Number of hardware queues supported by the block driver that 224 * owns this data structure. 225 * @queue_depth: Number of tags per hardware queue, reserved tags included. 226 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag 227 * allocations. 228 * @cmd_size: Number of additional bytes to allocate per request. The block 229 * driver owns these additional bytes. 230 * @numa_node: NUMA node the storage adapter has been connected to. 231 * @timeout: Request processing timeout in jiffies. 232 * @flags: Zero or more BLK_MQ_F_* flags. 233 * @driver_data: Pointer to data owned by the block driver that created this 234 * tag set. 235 * @active_queues_shared_sbitmap: 236 * number of active request queues per tag set. 237 * @__bitmap_tags: A shared tags sbitmap, used over all hctx's 238 * @__breserved_tags: 239 * A shared reserved tags sbitmap, used over all hctx's 240 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues 241 * elements. 242 * @tag_list_lock: Serializes tag_list accesses. 243 * @tag_list: List of the request queues that use this tag set. See also 244 * request_queue.tag_set_list. 245 */ 246struct blk_mq_tag_set { 247 struct blk_mq_queue_map map[HCTX_MAX_TYPES]; 248 unsigned int nr_maps; 249 const struct blk_mq_ops *ops; 250 unsigned int nr_hw_queues; 251 unsigned int queue_depth; 252 unsigned int reserved_tags; 253 unsigned int cmd_size; 254 int numa_node; 255 unsigned int timeout; 256 unsigned int flags; 257 void *driver_data; 258 atomic_t active_queues_shared_sbitmap; 259 260 struct sbitmap_queue __bitmap_tags; 261 struct sbitmap_queue __breserved_tags; 262 struct blk_mq_tags **tags; 263 264 struct mutex tag_list_lock; 265 struct list_head tag_list; 266}; 267 268/** 269 * struct blk_mq_queue_data - Data about a request inserted in a queue 270 * 271 * @rq: Request pointer. 272 * @last: If it is the last request in the queue. 273 */ 274struct blk_mq_queue_data { 275 struct request *rq; 276 bool last; 277}; 278 279typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 280 bool); 281typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); 282 283/** 284 * struct blk_mq_ops - Callback functions that implements block driver 285 * behaviour. 286 */ 287struct blk_mq_ops { 288 /** 289 * @queue_rq: Queue a new request from block IO. 290 */ 291 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, 292 const struct blk_mq_queue_data *); 293 294 /** 295 * @commit_rqs: If a driver uses bd->last to judge when to submit 296 * requests to hardware, it must define this function. In case of errors 297 * that make us stop issuing further requests, this hook serves the 298 * purpose of kicking the hardware (which the last request otherwise 299 * would have done). 300 */ 301 void (*commit_rqs)(struct blk_mq_hw_ctx *); 302 303 /** 304 * @get_budget: Reserve budget before queue request, once .queue_rq is 305 * run, it is driver's responsibility to release the 306 * reserved budget. Also we have to handle failure case 307 * of .get_budget for avoiding I/O deadlock. 308 */ 309 int (*get_budget)(struct request_queue *); 310 311 /** 312 * @put_budget: Release the reserved budget. 313 */ 314 void (*put_budget)(struct request_queue *, int); 315 316 /** 317 * @set_rq_budget_token: store rq's budget token 318 */ 319 void (*set_rq_budget_token)(struct request *, int); 320 /** 321 * @get_rq_budget_token: retrieve rq's budget token 322 */ 323 int (*get_rq_budget_token)(struct request *); 324 325 /** 326 * @timeout: Called on request timeout. 327 */ 328 enum blk_eh_timer_return (*timeout)(struct request *, bool); 329 330 /** 331 * @poll: Called to poll for completion of a specific tag. 332 */ 333 int (*poll)(struct blk_mq_hw_ctx *); 334 335 /** 336 * @complete: Mark the request as complete. 337 */ 338 void (*complete)(struct request *); 339 340 /** 341 * @init_hctx: Called when the block layer side of a hardware queue has 342 * been set up, allowing the driver to allocate/init matching 343 * structures. 344 */ 345 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); 346 /** 347 * @exit_hctx: Ditto for exit/teardown. 348 */ 349 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); 350 351 /** 352 * @init_request: Called for every command allocated by the block layer 353 * to allow the driver to set up driver specific data. 354 * 355 * Tag greater than or equal to queue_depth is for setting up 356 * flush request. 357 */ 358 int (*init_request)(struct blk_mq_tag_set *set, struct request *, 359 unsigned int, unsigned int); 360 /** 361 * @exit_request: Ditto for exit/teardown. 362 */ 363 void (*exit_request)(struct blk_mq_tag_set *set, struct request *, 364 unsigned int); 365 366 /** 367 * @initialize_rq_fn: Called from inside blk_get_request(). 368 */ 369 void (*initialize_rq_fn)(struct request *rq); 370 371 /** 372 * @cleanup_rq: Called before freeing one request which isn't completed 373 * yet, and usually for freeing the driver private data. 374 */ 375 void (*cleanup_rq)(struct request *); 376 377 /** 378 * @busy: If set, returns whether or not this queue currently is busy. 379 */ 380 bool (*busy)(struct request_queue *); 381 382 /** 383 * @map_queues: This allows drivers specify their own queue mapping by 384 * overriding the setup-time function that builds the mq_map. 385 */ 386 int (*map_queues)(struct blk_mq_tag_set *set); 387 388#ifdef CONFIG_BLK_DEBUG_FS 389 /** 390 * @show_rq: Used by the debugfs implementation to show driver-specific 391 * information about a request. 392 */ 393 void (*show_rq)(struct seq_file *m, struct request *rq); 394#endif 395}; 396 397enum { 398 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 399 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, 400 /* 401 * Set when this device requires underlying blk-mq device for 402 * completing IO: 403 */ 404 BLK_MQ_F_STACKING = 1 << 2, 405 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, 406 BLK_MQ_F_BLOCKING = 1 << 5, 407 BLK_MQ_F_NO_SCHED = 1 << 6, 408 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 409 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 410 411 BLK_MQ_S_STOPPED = 0, 412 BLK_MQ_S_TAG_ACTIVE = 1, 413 BLK_MQ_S_SCHED_RESTART = 2, 414 415 /* hw queue is inactive after all its CPUs become offline */ 416 BLK_MQ_S_INACTIVE = 3, 417 418 BLK_MQ_MAX_DEPTH = 10240, 419 420 BLK_MQ_CPU_WORK_BATCH = 8, 421}; 422#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ 423 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ 424 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) 425#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ 426 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ 427 << BLK_MQ_F_ALLOC_POLICY_START_BIT) 428 429struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 430struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, 431 void *queuedata); 432struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 433 struct request_queue *q, 434 bool elevator_init); 435struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, 436 const struct blk_mq_ops *ops, 437 unsigned int queue_depth, 438 unsigned int set_flags); 439void blk_mq_unregister_dev(struct device *, struct request_queue *); 440 441int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); 442void blk_mq_free_tag_set(struct blk_mq_tag_set *set); 443 444void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 445 446void blk_mq_free_request(struct request *rq); 447 448bool blk_mq_queue_inflight(struct request_queue *q); 449 450enum { 451 /* return when out of requests */ 452 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), 453 /* allocate from reserved pool */ 454 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), 455 /* set RQF_PM */ 456 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), 457}; 458 459struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 460 blk_mq_req_flags_t flags); 461struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 462 unsigned int op, blk_mq_req_flags_t flags, 463 unsigned int hctx_idx); 464struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 465 466enum { 467 BLK_MQ_UNIQUE_TAG_BITS = 16, 468 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, 469}; 470 471u32 blk_mq_unique_tag(struct request *rq); 472 473static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) 474{ 475 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; 476} 477 478static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) 479{ 480 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 481} 482 483/** 484 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request 485 * @rq: target request. 486 */ 487static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) 488{ 489 return READ_ONCE(rq->state); 490} 491 492static inline int blk_mq_request_started(struct request *rq) 493{ 494 return blk_mq_rq_state(rq) != MQ_RQ_IDLE; 495} 496 497static inline int blk_mq_request_completed(struct request *rq) 498{ 499 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; 500} 501 502/* 503 * 504 * Set the state to complete when completing a request from inside ->queue_rq. 505 * This is used by drivers that want to ensure special complete actions that 506 * need access to the request are called on failure, e.g. by nvme for 507 * multipathing. 508 */ 509static inline void blk_mq_set_request_complete(struct request *rq) 510{ 511 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 512} 513 514void blk_mq_start_request(struct request *rq); 515void blk_mq_end_request(struct request *rq, blk_status_t error); 516void __blk_mq_end_request(struct request *rq, blk_status_t error); 517 518void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); 519void blk_mq_kick_requeue_list(struct request_queue *q); 520void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); 521void blk_mq_complete_request(struct request *rq); 522bool blk_mq_complete_request_remote(struct request *rq); 523bool blk_mq_queue_stopped(struct request_queue *q); 524void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 525void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 526void blk_mq_stop_hw_queues(struct request_queue *q); 527void blk_mq_start_hw_queues(struct request_queue *q); 528void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 529void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 530void blk_mq_quiesce_queue(struct request_queue *q); 531void blk_mq_unquiesce_queue(struct request_queue *q); 532void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 533void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 534void blk_mq_run_hw_queues(struct request_queue *q, bool async); 535void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); 536void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 537 busy_tag_iter_fn *fn, void *priv); 538void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); 539void blk_mq_freeze_queue(struct request_queue *q); 540void blk_mq_unfreeze_queue(struct request_queue *q); 541void blk_freeze_queue_start(struct request_queue *q); 542void blk_mq_freeze_queue_wait(struct request_queue *q); 543int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 544 unsigned long timeout); 545 546int blk_mq_map_queues(struct blk_mq_queue_map *qmap); 547void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); 548 549void blk_mq_quiesce_queue_nowait(struct request_queue *q); 550 551unsigned int blk_mq_rq_cpu(struct request *rq); 552 553bool __blk_should_fake_timeout(struct request_queue *q); 554static inline bool blk_should_fake_timeout(struct request_queue *q) 555{ 556 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && 557 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) 558 return __blk_should_fake_timeout(q); 559 return false; 560} 561 562/** 563 * blk_mq_rq_from_pdu - cast a PDU to a request 564 * @pdu: the PDU (Protocol Data Unit) to be casted 565 * 566 * Return: request 567 * 568 * Driver command data is immediately after the request. So subtract request 569 * size to get back to the original request. 570 */ 571static inline struct request *blk_mq_rq_from_pdu(void *pdu) 572{ 573 return pdu - sizeof(struct request); 574} 575 576/** 577 * blk_mq_rq_to_pdu - cast a request to a PDU 578 * @rq: the request to be casted 579 * 580 * Return: pointer to the PDU 581 * 582 * Driver command data is immediately after the request. So add request to get 583 * the PDU. 584 */ 585static inline void *blk_mq_rq_to_pdu(struct request *rq) 586{ 587 return rq + 1; 588} 589 590#define queue_for_each_hw_ctx(q, hctx, i) \ 591 for ((i) = 0; (i) < (q)->nr_hw_queues && \ 592 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 593 594#define hctx_for_each_ctx(hctx, ctx, i) \ 595 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 596 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 597 598static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, 599 struct request *rq) 600{ 601 if (rq->tag != -1) 602 return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); 603 604 return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | 605 BLK_QC_T_INTERNAL; 606} 607 608static inline void blk_mq_cleanup_rq(struct request *rq) 609{ 610 if (rq->q->mq_ops->cleanup_rq) 611 rq->q->mq_ops->cleanup_rq(rq); 612} 613 614static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, 615 unsigned int nr_segs) 616{ 617 rq->nr_phys_segments = nr_segs; 618 rq->__data_len = bio->bi_iter.bi_size; 619 rq->bio = rq->biotail = bio; 620 rq->ioprio = bio_prio(bio); 621 622 if (bio->bi_bdev) 623 rq->rq_disk = bio->bi_bdev->bd_disk; 624} 625 626blk_qc_t blk_mq_submit_bio(struct bio *bio); 627void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, 628 struct lock_class_key *key); 629 630#endif