Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block/null_blk: add queue_rqs() support

Add batched mq_ops.queue_rqs() support in null_blk for testing. The
implementation is much easy since null_blk doesn't have commit_rqs().

We simply handle each request one by one, if errors are encountered,
leave them in the passed in list and return back.

There is about 3.6% improvement in IOPS of fio/t/io_uring on null_blk
with hw_queue_depth=256 on my test VM, from 1.09M to 1.13M.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20230913151616.3164338-6-chengming.zhou@linux.dev
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Chengming Zhou and committed by
Jens Axboe
d78bfa13 217b613a

+20
+20
drivers/block/null_blk/main.c
··· 1750 1750 return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); 1751 1751 } 1752 1752 1753 + static void null_queue_rqs(struct request **rqlist) 1754 + { 1755 + struct request *requeue_list = NULL; 1756 + struct request **requeue_lastp = &requeue_list; 1757 + struct blk_mq_queue_data bd = { }; 1758 + blk_status_t ret; 1759 + 1760 + do { 1761 + struct request *rq = rq_list_pop(rqlist); 1762 + 1763 + bd.rq = rq; 1764 + ret = null_queue_rq(rq->mq_hctx, &bd); 1765 + if (ret != BLK_STS_OK) 1766 + rq_list_add_tail(&requeue_lastp, rq); 1767 + } while (!rq_list_empty(*rqlist)); 1768 + 1769 + *rqlist = requeue_list; 1770 + } 1771 + 1753 1772 static void cleanup_queue(struct nullb_queue *nq) 1754 1773 { 1755 1774 bitmap_free(nq->tag_map); ··· 1821 1802 1822 1803 static const struct blk_mq_ops null_mq_ops = { 1823 1804 .queue_rq = null_queue_rq, 1805 + .queue_rqs = null_queue_rqs, 1824 1806 .complete = null_complete_rq, 1825 1807 .timeout = null_timeout_rq, 1826 1808 .poll = null_poll,