Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef RQ_QOS_H
3#define RQ_QOS_H
4
5#include <linux/kernel.h>
6#include <linux/blkdev.h>
7#include <linux/blk_types.h>
8#include <linux/atomic.h>
9#include <linux/wait.h>
10#include <linux/blk-mq.h>
11
12#include "blk-mq-debugfs.h"
13
14struct blk_mq_debugfs_attr;
15extern struct static_key_false block_rq_qos;
16
17enum rq_qos_id {
18 RQ_QOS_WBT,
19 RQ_QOS_LATENCY,
20 RQ_QOS_COST,
21};
22
23struct rq_wait {
24 wait_queue_head_t wait;
25 atomic_t inflight;
26};
27
28struct rq_qos {
29 const struct rq_qos_ops *ops;
30 struct gendisk *disk;
31 enum rq_qos_id id;
32 struct rq_qos *next;
33#ifdef CONFIG_BLK_DEBUG_FS
34 struct dentry *debugfs_dir;
35#endif
36};
37
38struct rq_qos_ops {
39 void (*throttle)(struct rq_qos *, struct bio *);
40 void (*track)(struct rq_qos *, struct request *, struct bio *);
41 void (*merge)(struct rq_qos *, struct request *, struct bio *);
42 void (*issue)(struct rq_qos *, struct request *);
43 void (*requeue)(struct rq_qos *, struct request *);
44 void (*done)(struct rq_qos *, struct request *);
45 void (*done_bio)(struct rq_qos *, struct bio *);
46 void (*cleanup)(struct rq_qos *, struct bio *);
47 void (*queue_depth_changed)(struct rq_qos *);
48 void (*exit)(struct rq_qos *);
49 const struct blk_mq_debugfs_attr *debugfs_attrs;
50};
51
52struct rq_depth {
53 unsigned int max_depth;
54
55 int scale_step;
56 bool scaled_max;
57
58 unsigned int queue_depth;
59 unsigned int default_depth;
60};
61
62static inline struct rq_qos *rq_qos_id(struct request_queue *q,
63 enum rq_qos_id id)
64{
65 struct rq_qos *rqos;
66 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
67 if (rqos->id == id)
68 break;
69 }
70 return rqos;
71}
72
73static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
74{
75 return rq_qos_id(q, RQ_QOS_WBT);
76}
77
78static inline struct rq_qos *iolat_rq_qos(struct request_queue *q)
79{
80 return rq_qos_id(q, RQ_QOS_LATENCY);
81}
82
83static inline void rq_wait_init(struct rq_wait *rq_wait)
84{
85 atomic_set(&rq_wait->inflight, 0);
86 init_waitqueue_head(&rq_wait->wait);
87}
88
89int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
90 const struct rq_qos_ops *ops);
91void rq_qos_del(struct rq_qos *rqos);
92
93typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
94typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
95
96void rq_qos_wait(struct rq_wait *rqw, void *private_data,
97 acquire_inflight_cb_t *acquire_inflight_cb,
98 cleanup_cb_t *cleanup_cb);
99bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
100bool rq_depth_scale_up(struct rq_depth *rqd);
101bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
102bool rq_depth_calc_max_depth(struct rq_depth *rqd);
103
104void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
105void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
106void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
107void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
108void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
109void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
110void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
111void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
112void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
113
114static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
115{
116 if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
117 __rq_qos_cleanup(q->rq_qos, bio);
118}
119
120static inline void rq_qos_done(struct request_queue *q, struct request *rq)
121{
122 if (static_branch_unlikely(&block_rq_qos) && q->rq_qos &&
123 !blk_rq_is_passthrough(rq))
124 __rq_qos_done(q->rq_qos, rq);
125}
126
127static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
128{
129 if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
130 __rq_qos_issue(q->rq_qos, rq);
131}
132
133static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
134{
135 if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
136 __rq_qos_requeue(q->rq_qos, rq);
137}
138
139static inline void rq_qos_done_bio(struct bio *bio)
140{
141 if (static_branch_unlikely(&block_rq_qos) &&
142 bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
143 bio_flagged(bio, BIO_QOS_MERGED))) {
144 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
145 if (q->rq_qos)
146 __rq_qos_done_bio(q->rq_qos, bio);
147 }
148}
149
150static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
151{
152 if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
153 bio_set_flag(bio, BIO_QOS_THROTTLED);
154 __rq_qos_throttle(q->rq_qos, bio);
155 }
156}
157
158static inline void rq_qos_track(struct request_queue *q, struct request *rq,
159 struct bio *bio)
160{
161 if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
162 __rq_qos_track(q->rq_qos, rq, bio);
163}
164
165static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
166 struct bio *bio)
167{
168 if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
169 bio_set_flag(bio, BIO_QOS_MERGED);
170 __rq_qos_merge(q->rq_qos, rq, bio);
171 }
172}
173
174static inline void rq_qos_queue_depth_changed(struct request_queue *q)
175{
176 if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
177 __rq_qos_queue_depth_changed(q->rq_qos);
178}
179
180void rq_qos_exit(struct request_queue *);
181
182#endif