Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.10-rc1 269 lines 7.9 kB view raw
1#ifndef BLK_MQ_H 2#define BLK_MQ_H 3 4#include <linux/blkdev.h> 5#include <linux/sbitmap.h> 6#include <linux/srcu.h> 7 8struct blk_mq_tags; 9struct blk_flush_queue; 10 11struct blk_mq_hw_ctx { 12 struct { 13 spinlock_t lock; 14 struct list_head dispatch; 15 unsigned long state; /* BLK_MQ_S_* flags */ 16 } ____cacheline_aligned_in_smp; 17 18 struct work_struct run_work; 19 cpumask_var_t cpumask; 20 int next_cpu; 21 int next_cpu_batch; 22 23 unsigned long flags; /* BLK_MQ_F_* flags */ 24 25 struct request_queue *queue; 26 struct blk_flush_queue *fq; 27 28 void *driver_data; 29 30 struct sbitmap ctx_map; 31 32 struct blk_mq_ctx **ctxs; 33 unsigned int nr_ctx; 34 35 atomic_t wait_index; 36 37 struct blk_mq_tags *tags; 38 39 struct srcu_struct queue_rq_srcu; 40 41 unsigned long queued; 42 unsigned long run; 43#define BLK_MQ_MAX_DISPATCH_ORDER 7 44 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 45 46 unsigned int numa_node; 47 unsigned int queue_num; 48 49 atomic_t nr_active; 50 51 struct delayed_work delay_work; 52 53 struct hlist_node cpuhp_dead; 54 struct kobject kobj; 55 56 unsigned long poll_considered; 57 unsigned long poll_invoked; 58 unsigned long poll_success; 59}; 60 61struct blk_mq_tag_set { 62 unsigned int *mq_map; 63 struct blk_mq_ops *ops; 64 unsigned int nr_hw_queues; 65 unsigned int queue_depth; /* max hw supported */ 66 unsigned int reserved_tags; 67 unsigned int cmd_size; /* per-request extra data */ 68 int numa_node; 69 unsigned int timeout; 70 unsigned int flags; /* BLK_MQ_F_* */ 71 void *driver_data; 72 73 struct blk_mq_tags **tags; 74 75 struct mutex tag_list_lock; 76 struct list_head tag_list; 77}; 78 79struct blk_mq_queue_data { 80 struct request *rq; 81 struct list_head *list; 82 bool last; 83}; 84 85typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 86typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); 87typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 88typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 89typedef int (init_request_fn)(void *, struct request *, unsigned int, 90 unsigned int, unsigned int); 91typedef void (exit_request_fn)(void *, struct request *, unsigned int, 92 unsigned int); 93typedef int (reinit_request_fn)(void *, struct request *); 94 95typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 96 bool); 97typedef void (busy_tag_iter_fn)(struct request *, void *, bool); 98typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); 99typedef int (map_queues_fn)(struct blk_mq_tag_set *set); 100 101 102struct blk_mq_ops { 103 /* 104 * Queue request 105 */ 106 queue_rq_fn *queue_rq; 107 108 /* 109 * Called on request timeout 110 */ 111 timeout_fn *timeout; 112 113 /* 114 * Called to poll for completion of a specific tag. 115 */ 116 poll_fn *poll; 117 118 softirq_done_fn *complete; 119 120 /* 121 * Called when the block layer side of a hardware queue has been 122 * set up, allowing the driver to allocate/init matching structures. 123 * Ditto for exit/teardown. 124 */ 125 init_hctx_fn *init_hctx; 126 exit_hctx_fn *exit_hctx; 127 128 /* 129 * Called for every command allocated by the block layer to allow 130 * the driver to set up driver specific data. 131 * 132 * Tag greater than or equal to queue_depth is for setting up 133 * flush request. 134 * 135 * Ditto for exit/teardown. 136 */ 137 init_request_fn *init_request; 138 exit_request_fn *exit_request; 139 reinit_request_fn *reinit_request; 140 141 map_queues_fn *map_queues; 142}; 143 144enum { 145 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ 146 BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ 147 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ 148 149 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 150 BLK_MQ_F_TAG_SHARED = 1 << 1, 151 BLK_MQ_F_SG_MERGE = 1 << 2, 152 BLK_MQ_F_DEFER_ISSUE = 1 << 4, 153 BLK_MQ_F_BLOCKING = 1 << 5, 154 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 155 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 156 157 BLK_MQ_S_STOPPED = 0, 158 BLK_MQ_S_TAG_ACTIVE = 1, 159 160 BLK_MQ_MAX_DEPTH = 10240, 161 162 BLK_MQ_CPU_WORK_BATCH = 8, 163}; 164#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ 165 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ 166 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) 167#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ 168 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ 169 << BLK_MQ_F_ALLOC_POLICY_START_BIT) 170 171struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 172struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 173 struct request_queue *q); 174int blk_mq_register_dev(struct device *, struct request_queue *); 175void blk_mq_unregister_dev(struct device *, struct request_queue *); 176 177int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); 178void blk_mq_free_tag_set(struct blk_mq_tag_set *set); 179 180void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 181 182void blk_mq_insert_request(struct request *, bool, bool, bool); 183void blk_mq_free_request(struct request *rq); 184void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); 185bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 186 187enum { 188 BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */ 189 BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */ 190}; 191 192struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 193 unsigned int flags); 194struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op, 195 unsigned int flags, unsigned int hctx_idx); 196struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 197 198enum { 199 BLK_MQ_UNIQUE_TAG_BITS = 16, 200 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, 201}; 202 203u32 blk_mq_unique_tag(struct request *rq); 204 205static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) 206{ 207 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; 208} 209 210static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) 211{ 212 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 213} 214 215 216int blk_mq_request_started(struct request *rq); 217void blk_mq_start_request(struct request *rq); 218void blk_mq_end_request(struct request *rq, int error); 219void __blk_mq_end_request(struct request *rq, int error); 220 221void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); 222void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 223 bool kick_requeue_list); 224void blk_mq_kick_requeue_list(struct request_queue *q); 225void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); 226void blk_mq_abort_requeue_list(struct request_queue *q); 227void blk_mq_complete_request(struct request *rq, int error); 228 229bool blk_mq_queue_stopped(struct request_queue *q); 230void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 231void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 232void blk_mq_stop_hw_queues(struct request_queue *q); 233void blk_mq_start_hw_queues(struct request_queue *q); 234void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 235void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 236void blk_mq_run_hw_queues(struct request_queue *q, bool async); 237void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 238void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 239 busy_tag_iter_fn *fn, void *priv); 240void blk_mq_freeze_queue(struct request_queue *q); 241void blk_mq_unfreeze_queue(struct request_queue *q); 242void blk_mq_freeze_queue_start(struct request_queue *q); 243int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); 244 245int blk_mq_map_queues(struct blk_mq_tag_set *set); 246void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); 247 248/* 249 * Driver command data is immediately after the request. So subtract request 250 * size to get back to the original request, add request size to get the PDU. 251 */ 252static inline struct request *blk_mq_rq_from_pdu(void *pdu) 253{ 254 return pdu - sizeof(struct request); 255} 256static inline void *blk_mq_rq_to_pdu(struct request *rq) 257{ 258 return rq + 1; 259} 260 261#define queue_for_each_hw_ctx(q, hctx, i) \ 262 for ((i) = 0; (i) < (q)->nr_hw_queues && \ 263 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 264 265#define hctx_for_each_ctx(hctx, ctx, i) \ 266 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 267 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 268 269#endif