Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block/mq-deadline: Switch back to a single dispatch list

Commit c807ab520fc3 ("block/mq-deadline: Add I/O priority support")
modified the behavior of request flag BLK_MQ_INSERT_AT_HEAD from
dispatching a request before other requests into dispatching a request
before other requests with the same I/O priority. This is not correct since
BLK_MQ_INSERT_AT_HEAD is used when requeuing requests and also when a flush
request is inserted. Both types of requests should be dispatched as soon
as possible. Hence, make the mq-deadline I/O scheduler again ignore the I/O
priority for BLK_MQ_INSERT_AT_HEAD requests.

Cc: Damien Le Moal <dlemoal@kernel.org>
Cc: Yu Kuai <yukuai@kernel.org>
Reported-by: chengkaitao <chengkaitao@kylinos.cn>
Closes: https://lore.kernel.org/linux-block/20251009155253.14611-1-pilgrimtao@gmail.com/
Fixes: c807ab520fc3 ("block/mq-deadline: Add I/O priority support")
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moalv <dlemoal@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Bart Van Assche and committed by
Jens Axboe
d60055cf 93a358af

+47 -60
+47 -60
block/mq-deadline.c
··· 71 71 * present on both sort_list[] and fifo_list[]. 72 72 */ 73 73 struct dd_per_prio { 74 - struct list_head dispatch; 75 74 struct rb_root sort_list[DD_DIR_COUNT]; 76 75 struct list_head fifo_list[DD_DIR_COUNT]; 77 76 /* Position of the most recently dispatched request. */ ··· 83 84 * run time data 84 85 */ 85 86 87 + struct list_head dispatch; 86 88 struct dd_per_prio per_prio[DD_PRIO_COUNT]; 87 89 88 90 /* Data direction of latest dispatched request. */ ··· 332 332 333 333 lockdep_assert_held(&dd->lock); 334 334 335 - if (!list_empty(&per_prio->dispatch)) { 336 - rq = list_first_entry(&per_prio->dispatch, struct request, 337 - queuelist); 338 - if (started_after(dd, rq, latest_start)) 339 - return NULL; 340 - list_del_init(&rq->queuelist); 341 - data_dir = rq_data_dir(rq); 342 - goto done; 343 - } 344 - 345 335 /* 346 336 * batches are currently reads XOR writes 347 337 */ ··· 411 421 */ 412 422 dd->batching++; 413 423 deadline_move_request(dd, per_prio, rq); 414 - done: 415 424 return dd_start_request(dd, data_dir, rq); 416 425 } 417 426 ··· 458 469 enum dd_prio prio; 459 470 460 471 spin_lock(&dd->lock); 472 + 473 + if (!list_empty(&dd->dispatch)) { 474 + rq = list_first_entry(&dd->dispatch, struct request, queuelist); 475 + list_del_init(&rq->queuelist); 476 + dd_start_request(dd, rq_data_dir(rq), rq); 477 + goto unlock; 478 + } 479 + 461 480 rq = dd_dispatch_prio_aged_requests(dd, now); 462 481 if (rq) 463 482 goto unlock; ··· 554 557 555 558 eq->elevator_data = dd; 556 559 560 + INIT_LIST_HEAD(&dd->dispatch); 557 561 for (prio = 0; prio <= DD_PRIO_MAX; prio++) { 558 562 struct dd_per_prio *per_prio = &dd->per_prio[prio]; 559 563 560 - INIT_LIST_HEAD(&per_prio->dispatch); 561 564 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]); 562 565 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]); 563 566 per_prio->sort_list[DD_READ] = RB_ROOT; ··· 661 664 trace_block_rq_insert(rq); 662 665 663 666 if (flags & BLK_MQ_INSERT_AT_HEAD) { 664 - list_add(&rq->queuelist, &per_prio->dispatch); 667 + list_add(&rq->queuelist, &dd->dispatch); 665 668 rq->fifo_time = jiffies; 666 669 } else { 667 670 deadline_add_rq_rb(per_prio, rq); ··· 728 731 729 732 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio) 730 733 { 731 - return !list_empty_careful(&per_prio->dispatch) || 732 - !list_empty_careful(&per_prio->fifo_list[DD_READ]) || 734 + return !list_empty_careful(&per_prio->fifo_list[DD_READ]) || 733 735 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]); 734 736 } 735 737 ··· 736 740 { 737 741 struct deadline_data *dd = hctx->queue->elevator->elevator_data; 738 742 enum dd_prio prio; 743 + 744 + if (!list_empty_careful(&dd->dispatch)) 745 + return true; 739 746 740 747 for (prio = 0; prio <= DD_PRIO_MAX; prio++) 741 748 if (dd_has_work_for_prio(&dd->per_prio[prio])) ··· 948 949 return 0; 949 950 } 950 951 951 - #define DEADLINE_DISPATCH_ATTR(prio) \ 952 - static void *deadline_dispatch##prio##_start(struct seq_file *m, \ 953 - loff_t *pos) \ 954 - __acquires(&dd->lock) \ 955 - { \ 956 - struct request_queue *q = m->private; \ 957 - struct deadline_data *dd = q->elevator->elevator_data; \ 958 - struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ 959 - \ 960 - spin_lock(&dd->lock); \ 961 - return seq_list_start(&per_prio->dispatch, *pos); \ 962 - } \ 963 - \ 964 - static void *deadline_dispatch##prio##_next(struct seq_file *m, \ 965 - void *v, loff_t *pos) \ 966 - { \ 967 - struct request_queue *q = m->private; \ 968 - struct deadline_data *dd = q->elevator->elevator_data; \ 969 - struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ 970 - \ 971 - return seq_list_next(v, &per_prio->dispatch, pos); \ 972 - } \ 973 - \ 974 - static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \ 975 - __releases(&dd->lock) \ 976 - { \ 977 - struct request_queue *q = m->private; \ 978 - struct deadline_data *dd = q->elevator->elevator_data; \ 979 - \ 980 - spin_unlock(&dd->lock); \ 981 - } \ 982 - \ 983 - static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \ 984 - .start = deadline_dispatch##prio##_start, \ 985 - .next = deadline_dispatch##prio##_next, \ 986 - .stop = deadline_dispatch##prio##_stop, \ 987 - .show = blk_mq_debugfs_rq_show, \ 952 + static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos) 953 + __acquires(&dd->lock) 954 + { 955 + struct request_queue *q = m->private; 956 + struct deadline_data *dd = q->elevator->elevator_data; 957 + 958 + spin_lock(&dd->lock); 959 + return seq_list_start(&dd->dispatch, *pos); 988 960 } 989 961 990 - DEADLINE_DISPATCH_ATTR(0); 991 - DEADLINE_DISPATCH_ATTR(1); 992 - DEADLINE_DISPATCH_ATTR(2); 993 - #undef DEADLINE_DISPATCH_ATTR 962 + static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos) 963 + { 964 + struct request_queue *q = m->private; 965 + struct deadline_data *dd = q->elevator->elevator_data; 966 + 967 + return seq_list_next(v, &dd->dispatch, pos); 968 + } 969 + 970 + static void deadline_dispatch_stop(struct seq_file *m, void *v) 971 + __releases(&dd->lock) 972 + { 973 + struct request_queue *q = m->private; 974 + struct deadline_data *dd = q->elevator->elevator_data; 975 + 976 + spin_unlock(&dd->lock); 977 + } 978 + 979 + static const struct seq_operations deadline_dispatch_seq_ops = { 980 + .start = deadline_dispatch_start, 981 + .next = deadline_dispatch_next, 982 + .stop = deadline_dispatch_stop, 983 + .show = blk_mq_debugfs_rq_show, 984 + }; 994 985 995 986 #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ 996 987 {#name "_fifo_list", 0400, \ ··· 1003 1014 {"batching", 0400, deadline_batching_show}, 1004 1015 {"starved", 0400, deadline_starved_show}, 1005 1016 {"async_depth", 0400, dd_async_depth_show}, 1006 - {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops}, 1007 - {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops}, 1008 - {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops}, 1017 + {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, 1009 1018 {"owned_by_driver", 0400, dd_owned_by_driver_show}, 1010 1019 {"queued", 0400, dd_queued_show}, 1011 1020 {},