Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef BLK_MQ_SCHED_H
3#define BLK_MQ_SCHED_H
4
5#include "elevator.h"
6#include "blk-mq.h"
7
8#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
9
10bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
11 unsigned int nr_segs, struct request **merged_request);
12bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
13 unsigned int nr_segs);
14bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
15 struct list_head *free);
16void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
17void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
18
19void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
20
21int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
22 struct elevator_resources *res);
23void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
24void blk_mq_sched_free_rqs(struct request_queue *q);
25
26struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
27 unsigned int nr_hw_queues, unsigned int nr_requests);
28int blk_mq_alloc_sched_res(struct request_queue *q,
29 struct elevator_type *type,
30 struct elevator_resources *res,
31 unsigned int nr_hw_queues);
32int blk_mq_alloc_sched_res_batch(struct xarray *elv_tbl,
33 struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
34int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl,
35 struct blk_mq_tag_set *set);
36void blk_mq_free_sched_ctx_batch(struct xarray *elv_tbl);
37void blk_mq_free_sched_tags(struct elevator_tags *et,
38 struct blk_mq_tag_set *set);
39void blk_mq_free_sched_res(struct elevator_resources *res,
40 struct elevator_type *type,
41 struct blk_mq_tag_set *set);
42void blk_mq_free_sched_res_batch(struct xarray *et_table,
43 struct blk_mq_tag_set *set);
44/*
45 * blk_mq_alloc_sched_data() - Allocates scheduler specific data
46 * Returns:
47 * - Pointer to allocated data on success
48 * - NULL if no allocation needed
49 * - ERR_PTR(-ENOMEM) in case of failure
50 */
51static inline void *blk_mq_alloc_sched_data(struct request_queue *q,
52 struct elevator_type *e)
53{
54 void *sched_data;
55
56 if (!e || !e->ops.alloc_sched_data)
57 return NULL;
58
59 sched_data = e->ops.alloc_sched_data(q);
60 return (sched_data) ?: ERR_PTR(-ENOMEM);
61}
62
63static inline void blk_mq_free_sched_data(struct elevator_type *e, void *data)
64{
65 if (e && e->ops.free_sched_data)
66 e->ops.free_sched_data(data);
67}
68
69static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
70{
71 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
72 __blk_mq_sched_restart(hctx);
73}
74
75static inline bool bio_mergeable(struct bio *bio)
76{
77 return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
78}
79
80static inline bool
81blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
82 struct bio *bio)
83{
84 if (rq->rq_flags & RQF_USE_SCHED) {
85 struct elevator_queue *e = q->elevator;
86
87 if (e->type->ops.allow_merge)
88 return e->type->ops.allow_merge(q, rq, bio);
89 }
90 return true;
91}
92
93static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
94{
95 if (rq->rq_flags & RQF_USE_SCHED) {
96 struct elevator_queue *e = rq->q->elevator;
97
98 if (e->type->ops.completed_request)
99 e->type->ops.completed_request(rq, now);
100 }
101}
102
103static inline void blk_mq_sched_requeue_request(struct request *rq)
104{
105 if (rq->rq_flags & RQF_USE_SCHED) {
106 struct request_queue *q = rq->q;
107 struct elevator_queue *e = q->elevator;
108
109 if (e->type->ops.requeue_request)
110 e->type->ops.requeue_request(rq);
111 }
112}
113
114static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
115{
116 struct elevator_queue *e = hctx->queue->elevator;
117
118 if (e && e->type->ops.has_work)
119 return e->type->ops.has_work(hctx);
120
121 return false;
122}
123
124static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
125{
126 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
127}
128
129static inline void blk_mq_set_min_shallow_depth(struct request_queue *q,
130 unsigned int depth)
131{
132 struct blk_mq_hw_ctx *hctx;
133 unsigned long i;
134
135 queue_for_each_hw_ctx(q, hctx, i)
136 sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags,
137 depth);
138}
139
140#endif