Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
5#include "blk-stat.h"
6#include "blk-mq-tag.h"
7
8struct blk_mq_tag_set;
9
10struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13};
14
15/**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
18struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
21 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
23
24 unsigned int cpu;
25 unsigned short index_hw[HCTX_MAX_TYPES];
26 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
27
28 struct request_queue *queue;
29 struct blk_mq_ctxs *ctxs;
30 struct kobject kobj;
31} ____cacheline_aligned_in_smp;
32
33void blk_mq_submit_bio(struct bio *bio);
34int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
35 unsigned int flags);
36void blk_mq_exit_queue(struct request_queue *q);
37int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
38void blk_mq_wake_waiters(struct request_queue *q);
39bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
40 unsigned int);
41void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
42 bool kick_requeue_list);
43void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
44struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
45 struct blk_mq_ctx *start);
46void blk_mq_put_rq_ref(struct request *rq);
47
48/*
49 * Internal helpers for allocating/freeing the request map
50 */
51void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
52 unsigned int hctx_idx);
53void blk_mq_free_rq_map(struct blk_mq_tags *tags);
54struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
55 unsigned int hctx_idx, unsigned int depth);
56void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
57 struct blk_mq_tags *tags,
58 unsigned int hctx_idx);
59/*
60 * Internal helpers for request insertion into sw queues
61 */
62void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
63 bool at_head);
64void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
65 bool run_queue);
66void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
67 struct list_head *list);
68void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
69 struct list_head *list);
70
71/*
72 * CPU -> queue mappings
73 */
74extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
75
76/*
77 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
78 * @q: request queue
79 * @type: the hctx type index
80 * @cpu: CPU
81 */
82static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
83 enum hctx_type type,
84 unsigned int cpu)
85{
86 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
87}
88
89static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
90{
91 enum hctx_type type = HCTX_TYPE_DEFAULT;
92
93 /*
94 * The caller ensure that if REQ_POLLED, poll must be enabled.
95 */
96 if (flags & REQ_POLLED)
97 type = HCTX_TYPE_POLL;
98 else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
99 type = HCTX_TYPE_READ;
100 return type;
101}
102
103/*
104 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
105 * @q: request queue
106 * @flags: request command flags
107 * @ctx: software queue cpu ctx
108 */
109static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
110 unsigned int flags,
111 struct blk_mq_ctx *ctx)
112{
113 return ctx->hctxs[blk_mq_get_hctx_type(flags)];
114}
115
116/*
117 * sysfs helpers
118 */
119extern void blk_mq_sysfs_init(struct request_queue *q);
120extern void blk_mq_sysfs_deinit(struct request_queue *q);
121extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
122extern int blk_mq_sysfs_register(struct request_queue *q);
123extern void blk_mq_sysfs_unregister(struct request_queue *q);
124extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
125void blk_mq_free_plug_rqs(struct blk_plug *plug);
126void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
127
128void blk_mq_cancel_work_sync(struct request_queue *q);
129
130void blk_mq_release(struct request_queue *q);
131
132static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
133 unsigned int cpu)
134{
135 return per_cpu_ptr(q->queue_ctx, cpu);
136}
137
138/*
139 * This assumes per-cpu software queueing queues. They could be per-node
140 * as well, for instance. For now this is hardcoded as-is. Note that we don't
141 * care about preemption, since we know the ctx's are persistent. This does
142 * mean that we can't rely on ctx always matching the currently running CPU.
143 */
144static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
145{
146 return __blk_mq_get_ctx(q, raw_smp_processor_id());
147}
148
149struct blk_mq_alloc_data {
150 /* input parameter */
151 struct request_queue *q;
152 blk_mq_req_flags_t flags;
153 unsigned int shallow_depth;
154 unsigned int cmd_flags;
155 req_flags_t rq_flags;
156
157 /* allocate multiple requests/tags in one go */
158 unsigned int nr_tags;
159 struct request **cached_rq;
160
161 /* input & output parameter */
162 struct blk_mq_ctx *ctx;
163 struct blk_mq_hw_ctx *hctx;
164};
165
166static inline bool blk_mq_is_shared_tags(unsigned int flags)
167{
168 return flags & BLK_MQ_F_TAG_HCTX_SHARED;
169}
170
171static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
172{
173 if (!(data->rq_flags & RQF_ELV))
174 return data->hctx->tags;
175 return data->hctx->sched_tags;
176}
177
178static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
179{
180 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
181}
182
183static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
184{
185 return hctx->nr_ctx && hctx->tags;
186}
187
188unsigned int blk_mq_in_flight(struct request_queue *q,
189 struct block_device *part);
190void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
191 unsigned int inflight[2]);
192
193static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
194 int budget_token)
195{
196 if (q->mq_ops->put_budget)
197 q->mq_ops->put_budget(q, budget_token);
198}
199
200static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
201{
202 if (q->mq_ops->get_budget)
203 return q->mq_ops->get_budget(q);
204 return 0;
205}
206
207static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
208{
209 if (token < 0)
210 return;
211
212 if (rq->q->mq_ops->set_rq_budget_token)
213 rq->q->mq_ops->set_rq_budget_token(rq, token);
214}
215
216static inline int blk_mq_get_rq_budget_token(struct request *rq)
217{
218 if (rq->q->mq_ops->get_rq_budget_token)
219 return rq->q->mq_ops->get_rq_budget_token(rq);
220 return -1;
221}
222
223static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
224{
225 if (blk_mq_is_shared_tags(hctx->flags))
226 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
227 else
228 atomic_inc(&hctx->nr_active);
229}
230
231static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
232 int val)
233{
234 if (blk_mq_is_shared_tags(hctx->flags))
235 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
236 else
237 atomic_sub(val, &hctx->nr_active);
238}
239
240static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
241{
242 __blk_mq_sub_active_requests(hctx, 1);
243}
244
245static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
246{
247 if (blk_mq_is_shared_tags(hctx->flags))
248 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
249 return atomic_read(&hctx->nr_active);
250}
251static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
252 struct request *rq)
253{
254 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
255 rq->tag = BLK_MQ_NO_TAG;
256
257 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
258 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
259 __blk_mq_dec_active_requests(hctx);
260 }
261}
262
263static inline void blk_mq_put_driver_tag(struct request *rq)
264{
265 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
266 return;
267
268 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
269}
270
271bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
272
273static inline bool blk_mq_get_driver_tag(struct request *rq)
274{
275 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
276
277 if (rq->tag != BLK_MQ_NO_TAG &&
278 !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
279 hctx->tags->rqs[rq->tag] = rq;
280 return true;
281 }
282
283 return __blk_mq_get_driver_tag(hctx, rq);
284}
285
286static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
287{
288 int cpu;
289
290 for_each_possible_cpu(cpu)
291 qmap->mq_map[cpu] = 0;
292}
293
294/*
295 * blk_mq_plug() - Get caller context plug
296 * @q: request queue
297 * @bio : the bio being submitted by the caller context
298 *
299 * Plugging, by design, may delay the insertion of BIOs into the elevator in
300 * order to increase BIO merging opportunities. This however can cause BIO
301 * insertion order to change from the order in which submit_bio() is being
302 * executed in the case of multiple contexts concurrently issuing BIOs to a
303 * device, even if these context are synchronized to tightly control BIO issuing
304 * order. While this is not a problem with regular block devices, this ordering
305 * change can cause write BIO failures with zoned block devices as these
306 * require sequential write patterns to zones. Prevent this from happening by
307 * ignoring the plug state of a BIO issuing context if the target request queue
308 * is for a zoned block device and the BIO to plug is a write operation.
309 *
310 * Return current->plug if the bio can be plugged and NULL otherwise
311 */
312static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
313 struct bio *bio)
314{
315 /*
316 * For regular block devices or read operations, use the context plug
317 * which may be NULL if blk_start_plug() was not executed.
318 */
319 if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
320 return current->plug;
321
322 /* Zoned block device write operation case: do not plug the BIO */
323 return NULL;
324}
325
326/* Free all requests on the list */
327static inline void blk_mq_free_requests(struct list_head *list)
328{
329 while (!list_empty(list)) {
330 struct request *rq = list_entry_rq(list->next);
331
332 list_del_init(&rq->queuelist);
333 blk_mq_free_request(rq);
334 }
335}
336
337/*
338 * For shared tag users, we track the number of currently active users
339 * and attempt to provide a fair share of the tag depth for each of them.
340 */
341static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
342 struct sbitmap_queue *bt)
343{
344 unsigned int depth, users;
345
346 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
347 return true;
348
349 /*
350 * Don't try dividing an ant
351 */
352 if (bt->sb.depth == 1)
353 return true;
354
355 if (blk_mq_is_shared_tags(hctx->flags)) {
356 struct request_queue *q = hctx->queue;
357
358 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
359 return true;
360 } else {
361 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
362 return true;
363 }
364
365 users = atomic_read(&hctx->tags->active_queues);
366
367 if (!users)
368 return true;
369
370 /*
371 * Allow at least some tags
372 */
373 depth = max((bt->sb.depth + users - 1) / users, 4U);
374 return __blk_mq_active_requests(hctx) < depth;
375}
376
377/* run the code block in @dispatch_ops with rcu/srcu read lock held */
378#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
379do { \
380 if (!blk_queue_has_srcu(q)) { \
381 rcu_read_lock(); \
382 (dispatch_ops); \
383 rcu_read_unlock(); \
384 } else { \
385 int srcu_idx; \
386 \
387 might_sleep_if(check_sleep); \
388 srcu_idx = srcu_read_lock((q)->srcu); \
389 (dispatch_ops); \
390 srcu_read_unlock((q)->srcu, srcu_idx); \
391 } \
392} while (0)
393
394#define blk_mq_run_dispatch_ops(q, dispatch_ops) \
395 __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \
396
397#endif