Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove unused parameter

The blk_mq_map_queue()'s request_queue param is not used anymore,
remove it, same with blk_get_flush_queue().

Signed-off-by: Guixin Liu <kanie@linux.alibaba.com>
Link: https://lore.kernel.org/r/20250312084722.129680-1-kanie@linux.alibaba.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Guixin Liu and committed by
Jens Axboe
61667cb6 7e76336e

+10 -13
+5 -5
block/blk-flush.c
··· 95 95 struct blk_flush_queue *fq, blk_opf_t flags); 96 96 97 97 static inline struct blk_flush_queue * 98 - blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) 98 + blk_get_flush_queue(struct blk_mq_ctx *ctx) 99 99 { 100 - return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; 100 + return blk_mq_map_queue(REQ_OP_FLUSH, ctx)->fq; 101 101 } 102 102 103 103 static unsigned int blk_flush_cur_seq(struct request *rq) ··· 205 205 struct list_head *running; 206 206 struct request *rq, *n; 207 207 unsigned long flags = 0; 208 - struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); 208 + struct blk_flush_queue *fq = blk_get_flush_queue(flush_rq->mq_ctx); 209 209 210 210 /* release the tag's ownership to the req cloned from */ 211 211 spin_lock_irqsave(&fq->mq_flush_lock, flags); ··· 341 341 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 342 342 struct blk_mq_ctx *ctx = rq->mq_ctx; 343 343 unsigned long flags; 344 - struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); 344 + struct blk_flush_queue *fq = blk_get_flush_queue(ctx); 345 345 346 346 if (q->elevator) { 347 347 WARN_ON(rq->tag < 0); ··· 382 382 bool blk_insert_flush(struct request *rq) 383 383 { 384 384 struct request_queue *q = rq->q; 385 - struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); 385 + struct blk_flush_queue *fq = blk_get_flush_queue(rq->mq_ctx); 386 386 bool supports_fua = q->limits.features & BLK_FEAT_FUA; 387 387 unsigned int policy = 0; 388 388
+1 -1
block/blk-mq-sched.c
··· 349 349 } 350 350 351 351 ctx = blk_mq_get_ctx(q); 352 - hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 352 + hctx = blk_mq_map_queue(bio->bi_opf, ctx); 353 353 type = hctx->type; 354 354 if (list_empty_careful(&ctx->rq_lists[type])) 355 355 goto out_put;
+1 -2
block/blk-mq-tag.c
··· 190 190 sbitmap_finish_wait(bt, ws, &wait); 191 191 192 192 data->ctx = blk_mq_get_ctx(data->q); 193 - data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, 194 - data->ctx); 193 + data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx); 195 194 tags = blk_mq_tags_from_data(data); 196 195 if (data->flags & BLK_MQ_REQ_RESERVED) 197 196 bt = &tags->breserved_tags;
+1 -1
block/blk-mq.c
··· 508 508 509 509 retry: 510 510 data->ctx = blk_mq_get_ctx(q); 511 - data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 511 + data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx); 512 512 513 513 if (q->elevator) { 514 514 /*
+1 -3
block/blk-mq.h
··· 100 100 101 101 /* 102 102 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue 103 - * @q: request queue 104 103 * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED). 105 104 * @ctx: software queue cpu ctx 106 105 */ 107 - static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 108 - blk_opf_t opf, 106 + static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf, 109 107 struct blk_mq_ctx *ctx) 110 108 { 111 109 return ctx->hctxs[blk_mq_get_hctx_type(opf)];
+1 -1
block/kyber-iosched.c
··· 568 568 unsigned int nr_segs) 569 569 { 570 570 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 571 - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); 571 + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(bio->bi_opf, ctx); 572 572 struct kyber_hctx_data *khd = hctx->sched_data; 573 573 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; 574 574 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);