Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: revert 4f1e9630afe6 ("blk-throtl: optimize IOPS throttle for large IO scenarios")

Revert commit 4f1e9630afe6 ("blk-throtl: optimize IOPS throttle for large
IO scenarios") since we have another easier way to address this issue and
get better iops throttling result.

Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20220216044514.2903784-9-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Ming Lei and committed by
Jens Axboe
34841e6f 5a93b602

-33
-28
block/blk-throttle.c
··· 640 640 tg->bytes_disp[rw] = 0; 641 641 tg->io_disp[rw] = 0; 642 642 643 - atomic_set(&tg->io_split_cnt[rw], 0); 644 - 645 643 /* 646 644 * Previous slice has expired. We must have trimmed it after last 647 645 * bio dispatch. That means since start of last slice, we never used ··· 662 664 tg->io_disp[rw] = 0; 663 665 tg->slice_start[rw] = jiffies; 664 666 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; 665 - 666 - atomic_set(&tg->io_split_cnt[rw], 0); 667 667 668 668 throtl_log(&tg->service_queue, 669 669 "[%c] new slice start=%lu end=%lu jiffies=%lu", ··· 895 899 throtl_extend_slice(tg, rw, 896 900 jiffies + tg->td->throtl_slice); 897 901 } 898 - 899 - if (iops_limit != UINT_MAX) 900 - tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0); 901 902 902 903 if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) && 903 904 tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) { ··· 1920 1927 } 1921 1928 1922 1929 if (tg->iops[READ][LIMIT_LOW]) { 1923 - tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0); 1924 1930 iops = tg->last_io_disp[READ] * HZ / elapsed_time; 1925 1931 if (iops >= tg->iops[READ][LIMIT_LOW]) 1926 1932 tg->last_low_overflow_time[READ] = now; 1927 1933 } 1928 1934 1929 1935 if (tg->iops[WRITE][LIMIT_LOW]) { 1930 - tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0); 1931 1936 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; 1932 1937 if (iops >= tg->iops[WRITE][LIMIT_LOW]) 1933 1938 tg->last_low_overflow_time[WRITE] = now; ··· 2043 2052 { 2044 2053 } 2045 2054 #endif 2046 - 2047 - void blk_throtl_charge_bio_split(struct bio *bio) 2048 - { 2049 - struct blkcg_gq *blkg = bio->bi_blkg; 2050 - struct throtl_grp *parent = blkg_to_tg(blkg); 2051 - struct throtl_service_queue *parent_sq; 2052 - bool rw = bio_data_dir(bio); 2053 - 2054 - do { 2055 - if (!parent->has_rules[rw]) 2056 - break; 2057 - 2058 - atomic_inc(&parent->io_split_cnt[rw]); 2059 - atomic_inc(&parent->last_io_split_cnt[rw]); 2060 - 2061 - parent_sq = parent->service_queue.parent_sq; 2062 - parent = sq_to_tg(parent_sq); 2063 - } while (parent); 2064 - } 2065 2055 2066 2056 bool __blk_throtl_bio(struct bio *bio) 2067 2057 {
-5
block/blk-throttle.h
··· 138 138 unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ 139 139 unsigned long bio_cnt_reset_time; 140 140 141 - atomic_t io_split_cnt[2]; 142 - atomic_t last_io_split_cnt[2]; 143 - 144 141 struct blkg_rwstat stat_bytes; 145 142 struct blkg_rwstat stat_ios; 146 143 }; ··· 161 164 static inline int blk_throtl_init(struct request_queue *q) { return 0; } 162 165 static inline void blk_throtl_exit(struct request_queue *q) { } 163 166 static inline void blk_throtl_register_queue(struct request_queue *q) { } 164 - static inline void blk_throtl_charge_bio_split(struct bio *bio) { } 165 167 static inline bool blk_throtl_bio(struct bio *bio) { return false; } 166 168 #else /* CONFIG_BLK_DEV_THROTTLING */ 167 169 int blk_throtl_init(struct request_queue *q); 168 170 void blk_throtl_exit(struct request_queue *q); 169 171 void blk_throtl_register_queue(struct request_queue *q); 170 - void blk_throtl_charge_bio_split(struct bio *bio); 171 172 bool __blk_throtl_bio(struct bio *bio); 172 173 static inline bool blk_throtl_bio(struct bio *bio) 173 174 {