at v4.8 275 lines 8.0 kB view raw
1#ifndef BLK_MQ_H 2#define BLK_MQ_H 3 4#include <linux/blkdev.h> 5 6struct blk_mq_tags; 7struct blk_flush_queue; 8 9struct blk_mq_cpu_notifier { 10 struct list_head list; 11 void *data; 12 int (*notify)(void *data, unsigned long action, unsigned int cpu); 13}; 14 15struct blk_mq_ctxmap { 16 unsigned int size; 17 unsigned int bits_per_word; 18 struct blk_align_bitmap *map; 19}; 20 21struct blk_mq_hw_ctx { 22 struct { 23 spinlock_t lock; 24 struct list_head dispatch; 25 } ____cacheline_aligned_in_smp; 26 27 unsigned long state; /* BLK_MQ_S_* flags */ 28 struct delayed_work run_work; 29 struct delayed_work delay_work; 30 cpumask_var_t cpumask; 31 int next_cpu; 32 int next_cpu_batch; 33 34 unsigned long flags; /* BLK_MQ_F_* flags */ 35 36 struct request_queue *queue; 37 struct blk_flush_queue *fq; 38 39 void *driver_data; 40 41 struct blk_mq_ctxmap ctx_map; 42 43 unsigned int nr_ctx; 44 struct blk_mq_ctx **ctxs; 45 46 atomic_t wait_index; 47 48 struct blk_mq_tags *tags; 49 50 unsigned long queued; 51 unsigned long run; 52#define BLK_MQ_MAX_DISPATCH_ORDER 10 53 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 54 55 unsigned int numa_node; 56 unsigned int queue_num; 57 58 atomic_t nr_active; 59 60 struct blk_mq_cpu_notifier cpu_notifier; 61 struct kobject kobj; 62 63 unsigned long poll_invoked; 64 unsigned long poll_success; 65}; 66 67struct blk_mq_tag_set { 68 struct blk_mq_ops *ops; 69 unsigned int nr_hw_queues; 70 unsigned int queue_depth; /* max hw supported */ 71 unsigned int reserved_tags; 72 unsigned int cmd_size; /* per-request extra data */ 73 int numa_node; 74 unsigned int timeout; 75 unsigned int flags; /* BLK_MQ_F_* */ 76 void *driver_data; 77 78 struct blk_mq_tags **tags; 79 80 struct mutex tag_list_lock; 81 struct list_head tag_list; 82}; 83 84struct blk_mq_queue_data { 85 struct request *rq; 86 struct list_head *list; 87 bool last; 88}; 89 90typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 91typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); 92typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); 93typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 94typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 95typedef int (init_request_fn)(void *, struct request *, unsigned int, 96 unsigned int, unsigned int); 97typedef void (exit_request_fn)(void *, struct request *, unsigned int, 98 unsigned int); 99typedef int (reinit_request_fn)(void *, struct request *); 100 101typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 102 bool); 103typedef void (busy_tag_iter_fn)(struct request *, void *, bool); 104typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); 105 106 107struct blk_mq_ops { 108 /* 109 * Queue request 110 */ 111 queue_rq_fn *queue_rq; 112 113 /* 114 * Map to specific hardware queue 115 */ 116 map_queue_fn *map_queue; 117 118 /* 119 * Called on request timeout 120 */ 121 timeout_fn *timeout; 122 123 /* 124 * Called to poll for completion of a specific tag. 125 */ 126 poll_fn *poll; 127 128 softirq_done_fn *complete; 129 130 /* 131 * Called when the block layer side of a hardware queue has been 132 * set up, allowing the driver to allocate/init matching structures. 133 * Ditto for exit/teardown. 134 */ 135 init_hctx_fn *init_hctx; 136 exit_hctx_fn *exit_hctx; 137 138 /* 139 * Called for every command allocated by the block layer to allow 140 * the driver to set up driver specific data. 141 * 142 * Tag greater than or equal to queue_depth is for setting up 143 * flush request. 144 * 145 * Ditto for exit/teardown. 146 */ 147 init_request_fn *init_request; 148 exit_request_fn *exit_request; 149 reinit_request_fn *reinit_request; 150}; 151 152enum { 153 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ 154 BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ 155 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ 156 157 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 158 BLK_MQ_F_TAG_SHARED = 1 << 1, 159 BLK_MQ_F_SG_MERGE = 1 << 2, 160 BLK_MQ_F_DEFER_ISSUE = 1 << 4, 161 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 162 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 163 164 BLK_MQ_S_STOPPED = 0, 165 BLK_MQ_S_TAG_ACTIVE = 1, 166 167 BLK_MQ_MAX_DEPTH = 10240, 168 169 BLK_MQ_CPU_WORK_BATCH = 8, 170}; 171#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ 172 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ 173 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) 174#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ 175 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ 176 << BLK_MQ_F_ALLOC_POLICY_START_BIT) 177 178struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 179struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 180 struct request_queue *q); 181int blk_mq_register_disk(struct gendisk *); 182void blk_mq_unregister_disk(struct gendisk *); 183 184int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); 185void blk_mq_free_tag_set(struct blk_mq_tag_set *set); 186 187void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 188 189void blk_mq_insert_request(struct request *, bool, bool, bool); 190void blk_mq_free_request(struct request *rq); 191void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); 192bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 193 194enum { 195 BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */ 196 BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */ 197}; 198 199struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 200 unsigned int flags); 201struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op, 202 unsigned int flags, unsigned int hctx_idx); 203struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 204struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags); 205 206enum { 207 BLK_MQ_UNIQUE_TAG_BITS = 16, 208 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, 209}; 210 211u32 blk_mq_unique_tag(struct request *rq); 212 213static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) 214{ 215 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; 216} 217 218static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) 219{ 220 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 221} 222 223struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 224struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 225 226int blk_mq_request_started(struct request *rq); 227void blk_mq_start_request(struct request *rq); 228void blk_mq_end_request(struct request *rq, int error); 229void __blk_mq_end_request(struct request *rq, int error); 230 231void blk_mq_requeue_request(struct request *rq); 232void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 233void blk_mq_cancel_requeue_work(struct request_queue *q); 234void blk_mq_kick_requeue_list(struct request_queue *q); 235void blk_mq_abort_requeue_list(struct request_queue *q); 236void blk_mq_complete_request(struct request *rq, int error); 237 238void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 239void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 240void blk_mq_stop_hw_queues(struct request_queue *q); 241void blk_mq_start_hw_queues(struct request_queue *q); 242void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 243void blk_mq_run_hw_queues(struct request_queue *q, bool async); 244void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 245void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 246 busy_tag_iter_fn *fn, void *priv); 247void blk_mq_freeze_queue(struct request_queue *q); 248void blk_mq_unfreeze_queue(struct request_queue *q); 249void blk_mq_freeze_queue_start(struct request_queue *q); 250int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); 251 252void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); 253 254/* 255 * Driver command data is immediately after the request. So subtract request 256 * size to get back to the original request, add request size to get the PDU. 257 */ 258static inline struct request *blk_mq_rq_from_pdu(void *pdu) 259{ 260 return pdu - sizeof(struct request); 261} 262static inline void *blk_mq_rq_to_pdu(struct request *rq) 263{ 264 return rq + 1; 265} 266 267#define queue_for_each_hw_ctx(q, hctx, i) \ 268 for ((i) = 0; (i) < (q)->nr_hw_queues && \ 269 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 270 271#define hctx_for_each_ctx(hctx, ctx, i) \ 272 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 273 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 274 275#endif