Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

blk-cgroup: pass a gendisk to blkcg_schedule_throttle

Pass the gendisk to blkcg_schedule_throttle as part of moving the
blk-cgroup infrastructure to be gendisk based. Remove the unused
!BLK_CGROUP stub while we're at it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20220921180501.1539876-17-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
de185b56 00ad6991

+11 -10
+5 -3
block/blk-cgroup.c
··· 1792 1792 1793 1793 /** 1794 1794 * blkcg_schedule_throttle - this task needs to check for throttling 1795 - * @q: the request queue IO was submitted on 1795 + * @gendisk: disk to throttle 1796 1796 * @use_memdelay: do we charge this to memory delay for PSI 1797 1797 * 1798 1798 * This is called by the IO controller when we know there's delay accumulated 1799 1799 * for the blkg for this task. We do not pass the blkg because there are places 1800 1800 * we call this that may not have that information, the swapping code for 1801 - * instance will only have a request_queue at that point. This set's the 1801 + * instance will only have a block_device at that point. This set's the 1802 1802 * notify_resume for the task to check and see if it requires throttling before 1803 1803 * returning to user space. 1804 1804 * ··· 1807 1807 * throttle once. If the task needs to be throttled again it'll need to be 1808 1808 * re-set at the next time we see the task. 1809 1809 */ 1810 - void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) 1810 + void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay) 1811 1811 { 1812 + struct request_queue *q = disk->queue; 1813 + 1812 1814 if (unlikely(current->flags & PF_KTHREAD)) 1813 1815 return; 1814 1816
+2 -2
block/blk-iocost.c
··· 2636 2636 if (use_debt) { 2637 2637 iocg_incur_debt(iocg, abs_cost, &now); 2638 2638 if (iocg_kick_delay(iocg, &now)) 2639 - blkcg_schedule_throttle(rqos->q, 2639 + blkcg_schedule_throttle(rqos->q->disk, 2640 2640 (bio->bi_opf & REQ_SWAP) == REQ_SWAP); 2641 2641 iocg_unlock(iocg, ioc_locked, &flags); 2642 2642 return; ··· 2737 2737 if (likely(!list_empty(&iocg->active_list))) { 2738 2738 iocg_incur_debt(iocg, abs_cost, &now); 2739 2739 if (iocg_kick_delay(iocg, &now)) 2740 - blkcg_schedule_throttle(rqos->q, 2740 + blkcg_schedule_throttle(rqos->q->disk, 2741 2741 (bio->bi_opf & REQ_SWAP) == REQ_SWAP); 2742 2742 } else { 2743 2743 iocg_commit_bio(iocg, bio, abs_cost, cost);
+1 -1
block/blk-iolatency.c
··· 292 292 unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); 293 293 294 294 if (use_delay) 295 - blkcg_schedule_throttle(rqos->q, use_memdelay); 295 + blkcg_schedule_throttle(rqos->q->disk, use_memdelay); 296 296 297 297 /* 298 298 * To avoid priority inversions we want to just take a slot if we are
+2 -3
include/linux/blk-cgroup.h
··· 18 18 19 19 struct bio; 20 20 struct cgroup_subsys_state; 21 - struct request_queue; 21 + struct gendisk; 22 22 23 23 #define FC_APPID_LEN 129 24 24 25 25 #ifdef CONFIG_BLK_CGROUP 26 26 extern struct cgroup_subsys_state * const blkcg_root_css; 27 27 28 - void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); 28 + void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay); 29 29 void blkcg_maybe_throttle_current(void); 30 30 bool blk_cgroup_congested(void); 31 31 void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css); ··· 39 39 40 40 static inline void blkcg_maybe_throttle_current(void) { } 41 41 static inline bool blk_cgroup_congested(void) { return false; } 42 - static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { } 43 42 static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio) 44 43 { 45 44 return NULL;
+1 -1
mm/swapfile.c
··· 3655 3655 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid], 3656 3656 avail_lists[nid]) { 3657 3657 if (si->bdev) { 3658 - blkcg_schedule_throttle(bdev_get_queue(si->bdev), true); 3658 + blkcg_schedule_throttle(si->bdev->bd_disk, true); 3659 3659 break; 3660 3660 } 3661 3661 }