Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.4-rc3 210 lines 5.1 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef RQ_QOS_H 3#define RQ_QOS_H 4 5#include <linux/kernel.h> 6#include <linux/blkdev.h> 7#include <linux/blk_types.h> 8#include <linux/atomic.h> 9#include <linux/wait.h> 10 11#include "blk-mq-debugfs.h" 12 13struct blk_mq_debugfs_attr; 14 15enum rq_qos_id { 16 RQ_QOS_WBT, 17 RQ_QOS_LATENCY, 18 RQ_QOS_COST, 19}; 20 21struct rq_wait { 22 wait_queue_head_t wait; 23 atomic_t inflight; 24}; 25 26struct rq_qos { 27 struct rq_qos_ops *ops; 28 struct request_queue *q; 29 enum rq_qos_id id; 30 struct rq_qos *next; 31#ifdef CONFIG_BLK_DEBUG_FS 32 struct dentry *debugfs_dir; 33#endif 34}; 35 36struct rq_qos_ops { 37 void (*throttle)(struct rq_qos *, struct bio *); 38 void (*track)(struct rq_qos *, struct request *, struct bio *); 39 void (*merge)(struct rq_qos *, struct request *, struct bio *); 40 void (*issue)(struct rq_qos *, struct request *); 41 void (*requeue)(struct rq_qos *, struct request *); 42 void (*done)(struct rq_qos *, struct request *); 43 void (*done_bio)(struct rq_qos *, struct bio *); 44 void (*cleanup)(struct rq_qos *, struct bio *); 45 void (*queue_depth_changed)(struct rq_qos *); 46 void (*exit)(struct rq_qos *); 47 const struct blk_mq_debugfs_attr *debugfs_attrs; 48}; 49 50struct rq_depth { 51 unsigned int max_depth; 52 53 int scale_step; 54 bool scaled_max; 55 56 unsigned int queue_depth; 57 unsigned int default_depth; 58}; 59 60static inline struct rq_qos *rq_qos_id(struct request_queue *q, 61 enum rq_qos_id id) 62{ 63 struct rq_qos *rqos; 64 for (rqos = q->rq_qos; rqos; rqos = rqos->next) { 65 if (rqos->id == id) 66 break; 67 } 68 return rqos; 69} 70 71static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) 72{ 73 return rq_qos_id(q, RQ_QOS_WBT); 74} 75 76static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) 77{ 78 return rq_qos_id(q, RQ_QOS_LATENCY); 79} 80 81static inline const char *rq_qos_id_to_name(enum rq_qos_id id) 82{ 83 switch (id) { 84 case RQ_QOS_WBT: 85 return "wbt"; 86 case RQ_QOS_LATENCY: 87 return "latency"; 88 case RQ_QOS_COST: 89 return "cost"; 90 } 91 return "unknown"; 92} 93 94static inline void rq_wait_init(struct rq_wait *rq_wait) 95{ 96 atomic_set(&rq_wait->inflight, 0); 97 init_waitqueue_head(&rq_wait->wait); 98} 99 100static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) 101{ 102 rqos->next = q->rq_qos; 103 q->rq_qos = rqos; 104 105 if (rqos->ops->debugfs_attrs) 106 blk_mq_debugfs_register_rqos(rqos); 107} 108 109static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos) 110{ 111 struct rq_qos *cur, *prev = NULL; 112 for (cur = q->rq_qos; cur; cur = cur->next) { 113 if (cur == rqos) { 114 if (prev) 115 prev->next = rqos->next; 116 else 117 q->rq_qos = cur; 118 break; 119 } 120 prev = cur; 121 } 122 123 blk_mq_debugfs_unregister_rqos(rqos); 124} 125 126typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data); 127typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data); 128 129void rq_qos_wait(struct rq_wait *rqw, void *private_data, 130 acquire_inflight_cb_t *acquire_inflight_cb, 131 cleanup_cb_t *cleanup_cb); 132bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit); 133bool rq_depth_scale_up(struct rq_depth *rqd); 134bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); 135bool rq_depth_calc_max_depth(struct rq_depth *rqd); 136 137void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); 138void __rq_qos_done(struct rq_qos *rqos, struct request *rq); 139void __rq_qos_issue(struct rq_qos *rqos, struct request *rq); 140void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq); 141void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); 142void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); 143void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); 144void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); 145void __rq_qos_queue_depth_changed(struct rq_qos *rqos); 146 147static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) 148{ 149 if (q->rq_qos) 150 __rq_qos_cleanup(q->rq_qos, bio); 151} 152 153static inline void rq_qos_done(struct request_queue *q, struct request *rq) 154{ 155 if (q->rq_qos) 156 __rq_qos_done(q->rq_qos, rq); 157} 158 159static inline void rq_qos_issue(struct request_queue *q, struct request *rq) 160{ 161 if (q->rq_qos) 162 __rq_qos_issue(q->rq_qos, rq); 163} 164 165static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) 166{ 167 if (q->rq_qos) 168 __rq_qos_requeue(q->rq_qos, rq); 169} 170 171static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio) 172{ 173 if (q->rq_qos) 174 __rq_qos_done_bio(q->rq_qos, bio); 175} 176 177static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) 178{ 179 /* 180 * BIO_TRACKED lets controllers know that a bio went through the 181 * normal rq_qos path. 182 */ 183 bio_set_flag(bio, BIO_TRACKED); 184 if (q->rq_qos) 185 __rq_qos_throttle(q->rq_qos, bio); 186} 187 188static inline void rq_qos_track(struct request_queue *q, struct request *rq, 189 struct bio *bio) 190{ 191 if (q->rq_qos) 192 __rq_qos_track(q->rq_qos, rq, bio); 193} 194 195static inline void rq_qos_merge(struct request_queue *q, struct request *rq, 196 struct bio *bio) 197{ 198 if (q->rq_qos) 199 __rq_qos_merge(q->rq_qos, rq, bio); 200} 201 202static inline void rq_qos_queue_depth_changed(struct request_queue *q) 203{ 204 if (q->rq_qos) 205 __rq_qos_queue_depth_changed(q->rq_qos); 206} 207 208void rq_qos_exit(struct request_queue *); 209 210#endif