Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

blk-mq: refactor blk_mq_sched_assign_ioc

blk_mq_sched_assign_ioc now only handles the assigned of the ioc if
the schedule needs it (bfq only at the moment). The caller to the
per-request initializer is moved out so that it can be merged with
a similar call for the kyber I/O scheduler.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
44e8c2bf 9f210738

+17 -28
+4 -24
block/blk-mq-sched.c
··· 31 31 } 32 32 EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); 33 33 34 - static void __blk_mq_sched_assign_ioc(struct request_queue *q, 35 - struct request *rq, 36 - struct bio *bio, 37 - struct io_context *ioc) 34 + void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) 38 35 { 39 - struct elevator_queue *e = q->elevator; 36 + struct request_queue *q = rq->q; 37 + struct io_context *ioc = rq_ioc(bio); 40 38 struct io_cq *icq; 41 39 42 40 spin_lock_irq(q->queue_lock); ··· 46 48 if (!icq) 47 49 return; 48 50 } 49 - 50 - rq->elv.icq = icq; 51 - if (e && e->type->ops.mq.get_rq_priv && 52 - e->type->ops.mq.get_rq_priv(q, rq, bio)) { 53 - rq->elv.icq = NULL; 54 - return; 55 - } 56 - 57 - rq->rq_flags |= RQF_ELVPRIV; 58 51 get_io_context(icq->ioc); 59 - } 60 - 61 - void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq, 62 - struct bio *bio) 63 - { 64 - struct io_context *ioc; 65 - 66 - ioc = rq_ioc(bio); 67 - if (ioc) 68 - __blk_mq_sched_assign_ioc(q, rq, bio, ioc); 52 + rq->elv.icq = icq; 69 53 } 70 54 71 55 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
+1 -2
block/blk-mq-sched.h
··· 7 7 void blk_mq_sched_free_hctx_data(struct request_queue *q, 8 8 void (*exit)(struct blk_mq_hw_ctx *)); 9 9 10 - void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq, 11 - struct bio *bio); 10 + void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio); 12 11 13 12 void blk_mq_sched_request_inserted(struct request *rq); 14 13 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
+12 -2
block/blk-mq.c
··· 315 315 316 316 if (!op_is_flush(op)) { 317 317 rq->elv.icq = NULL; 318 - if (e && e->type->icq_cache) 319 - blk_mq_sched_assign_ioc(q, rq, bio); 318 + if (e && e->type->ops.mq.get_rq_priv) { 319 + if (e->type->icq_cache && rq_ioc(bio)) 320 + blk_mq_sched_assign_ioc(rq, bio); 321 + 322 + if (e->type->ops.mq.get_rq_priv(q, rq, bio)) { 323 + if (rq->elv.icq) 324 + put_io_context(rq->elv.icq->ioc); 325 + rq->elv.icq = NULL; 326 + } else { 327 + rq->rq_flags |= RQF_ELVPRIV; 328 + } 329 + } 320 330 } 321 331 data->hctx->queued++; 322 332 return rq;