Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: plug attempts to batch allocate tags multiple times

This patch aims to enable batch allocation of sufficient tags after
batch IO submission with plug mechanism, thereby avoiding the need for
frequent individual requests when the initial allocation is
insufficient.
-----------------------------------------------------------
HW:
16 CPUs/16 poll queues
Disk: Samsung PM9A3 Gen4 3.84T

CMD:
[global]
ioengine=io_uring
group_reporting=1
time_based=1
runtime=1m
refill_buffers=1
norandommap=1
randrepeat=0
fixedbufs=1
registerfiles=1
rw=randread
iodepth=128
iodepth_batch_submit=32
iodepth_batch_complete_min=32
iodepth_batch_complete_max=128
iodepth_low=32
bs=4k
numjobs=1
direct=1
hipri=1

[job1]
filename=/dev/nvme0n1
name=batch_test
------------------------------------------------------------
Perf:
base code: __blk_mq_alloc_requests() 1.47%
patch: __blk_mq_alloc_requests() 0.75%
------------------------------------------------------------

Signed-off-by: hexue <xue01.he@samsung.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Xue He and committed by
Jens Axboe
152c331b f43fdeb9

+19 -14
+19 -14
block/blk-mq.c
··· 468 468 unsigned long tag_mask; 469 469 int i, nr = 0; 470 470 471 - tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); 472 - if (unlikely(!tag_mask)) 473 - return NULL; 471 + do { 472 + tag_mask = blk_mq_get_tags(data, data->nr_tags - nr, &tag_offset); 473 + if (unlikely(!tag_mask)) { 474 + if (nr == 0) 475 + return NULL; 476 + break; 477 + } 478 + tags = blk_mq_tags_from_data(data); 479 + for (i = 0; tag_mask; i++) { 480 + if (!(tag_mask & (1UL << i))) 481 + continue; 482 + tag = tag_offset + i; 483 + prefetch(tags->static_rqs[tag]); 484 + tag_mask &= ~(1UL << i); 485 + rq = blk_mq_rq_ctx_init(data, tags, tag); 486 + rq_list_add_head(data->cached_rqs, rq); 487 + nr++; 488 + } 489 + } while (data->nr_tags > nr); 474 490 475 - tags = blk_mq_tags_from_data(data); 476 - for (i = 0; tag_mask; i++) { 477 - if (!(tag_mask & (1UL << i))) 478 - continue; 479 - tag = tag_offset + i; 480 - prefetch(tags->static_rqs[tag]); 481 - tag_mask &= ~(1UL << i); 482 - rq = blk_mq_rq_ctx_init(data, tags, tag); 483 - rq_list_add_head(data->cached_rqs, rq); 484 - nr++; 485 - } 486 491 if (!(data->rq_flags & RQF_SCHED_TAGS)) 487 492 blk_mq_add_active_requests(data->hctx, nr); 488 493 /* caller already holds a reference, add for remainder */