Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

blk-mq: remove __blk_mq_alloc_request

Move most code into blk_mq_rq_ctx_init, and the rest into
blk_mq_get_request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
e4cdf1a1 5bbf4e5a

+27 -47
+27 -41
block/blk-mq.c
··· 204 204 } 205 205 EXPORT_SYMBOL(blk_mq_can_queue); 206 206 207 - static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, 208 - struct request *rq, unsigned int op) 207 + static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 208 + unsigned int tag, unsigned int op) 209 209 { 210 + struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 211 + struct request *rq = tags->static_rqs[tag]; 212 + 213 + if (data->flags & BLK_MQ_REQ_INTERNAL) { 214 + rq->tag = -1; 215 + rq->internal_tag = tag; 216 + } else { 217 + if (blk_mq_tag_busy(data->hctx)) { 218 + rq->rq_flags = RQF_MQ_INFLIGHT; 219 + atomic_inc(&data->hctx->nr_active); 220 + } 221 + rq->tag = tag; 222 + rq->internal_tag = -1; 223 + data->hctx->tags->rqs[rq->tag] = rq; 224 + } 225 + 210 226 INIT_LIST_HEAD(&rq->queuelist); 211 227 /* csd/requeue_work/fifo_time is initialized before use */ 212 - rq->q = q; 213 - rq->mq_ctx = ctx; 228 + rq->q = data->q; 229 + rq->mq_ctx = data->ctx; 214 230 rq->cmd_flags = op; 215 - if (blk_queue_io_stat(q)) 231 + if (blk_queue_io_stat(data->q)) 216 232 rq->rq_flags |= RQF_IO_STAT; 217 233 /* do not touch atomic flags, it needs atomic ops against the timer */ 218 234 rq->cpu = -1; ··· 257 241 rq->end_io_data = NULL; 258 242 rq->next_rq = NULL; 259 243 260 - ctx->rq_dispatched[op_is_sync(op)]++; 244 + data->ctx->rq_dispatched[op_is_sync(op)]++; 245 + return rq; 261 246 } 262 - 263 - struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, 264 - unsigned int op) 265 - { 266 - struct request *rq; 267 - unsigned int tag; 268 - 269 - tag = blk_mq_get_tag(data); 270 - if (tag != BLK_MQ_TAG_FAIL) { 271 - struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 272 - 273 - rq = tags->static_rqs[tag]; 274 - 275 - if (data->flags & BLK_MQ_REQ_INTERNAL) { 276 - rq->tag = -1; 277 - rq->internal_tag = tag; 278 - } else { 279 - if (blk_mq_tag_busy(data->hctx)) { 280 - rq->rq_flags = RQF_MQ_INFLIGHT; 281 - atomic_inc(&data->hctx->nr_active); 282 - } 283 - rq->tag = tag; 284 - rq->internal_tag = -1; 285 - data->hctx->tags->rqs[rq->tag] = rq; 286 - } 287 - 288 - blk_mq_rq_ctx_init(data->q, data->ctx, rq, op); 289 - return rq; 290 - } 291 - 292 - return NULL; 293 - } 294 - EXPORT_SYMBOL_GPL(__blk_mq_alloc_request); 295 247 296 248 static struct request *blk_mq_get_request(struct request_queue *q, 297 249 struct bio *bio, unsigned int op, ··· 267 283 { 268 284 struct elevator_queue *e = q->elevator; 269 285 struct request *rq; 286 + unsigned int tag; 270 287 271 288 blk_queue_enter_live(q); 272 289 data->q = q; ··· 287 302 e->type->ops.mq.limit_depth(op, data); 288 303 } 289 304 290 - rq = __blk_mq_alloc_request(data, op); 291 - if (!rq) { 305 + tag = blk_mq_get_tag(data); 306 + if (tag == BLK_MQ_TAG_FAIL) { 292 307 blk_queue_exit(q); 293 308 return NULL; 294 309 } 295 310 311 + rq = blk_mq_rq_ctx_init(data, tag, op); 296 312 if (!op_is_flush(op)) { 297 313 rq->elv.icq = NULL; 298 314 if (e && e->type->ops.mq.prepare_request) {
-6
block/blk-mq.h
··· 128 128 return data->hctx->tags; 129 129 } 130 130 131 - /* 132 - * Internal helpers for request allocation/init/free 133 - */ 134 - struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, 135 - unsigned int op); 136 - 137 131 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) 138 132 { 139 133 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);