Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm cache policy smq: ensure IO doesn't prevent cleaner policy progress

When using the cleaner policy to decommission the cache, there is
never any writeback started from the cache as it is constantly delayed
due to normal I/O keeping the device busy. Meaning @idle=false was
always being passed to clean_target_met()

Fix this by adding a specific 'cleaner' flag that is set when the
cleaner policy is configured. This flag serves to always allow the
cleaner's writeback work to be queued until the cache is
decommissioned (even if the cache isn't idle).

Reported-by: David Jeffery <djeffery@redhat.com>
Fixes: b29d4986d0da ("dm cache: significant rework to leverage dm-bio-prison-v2")
Cc: stable@vger.kernel.org
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>

authored by

Joe Thornber and committed by
Mike Snitzer
1e4ab7b4 7d5fff89

+18 -10
+18 -10
drivers/md/dm-cache-policy-smq.c
··· 857 857 858 858 struct background_tracker *bg_work; 859 859 860 - bool migrations_allowed; 860 + bool migrations_allowed:1; 861 + 862 + /* 863 + * If this is set the policy will try and clean the whole cache 864 + * even if the device is not idle. 865 + */ 866 + bool cleaner:1; 861 867 }; 862 868 863 869 /*----------------------------------------------------------------*/ ··· 1144 1138 * Cache entries may not be populated. So we cannot rely on the 1145 1139 * size of the clean queue. 1146 1140 */ 1147 - if (idle) { 1141 + if (idle || mq->cleaner) { 1148 1142 /* 1149 1143 * We'd like to clean everything. 1150 1144 */ ··· 1728 1722 *hotspot_block_size /= 2u; 1729 1723 } 1730 1724 1731 - static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size, 1732 - sector_t origin_size, 1733 - sector_t cache_block_size, 1734 - bool mimic_mq, 1735 - bool migrations_allowed) 1725 + static struct dm_cache_policy * 1726 + __smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size, 1727 + bool mimic_mq, bool migrations_allowed, bool cleaner) 1736 1728 { 1737 1729 unsigned int i; 1738 1730 unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS; ··· 1817 1813 goto bad_btracker; 1818 1814 1819 1815 mq->migrations_allowed = migrations_allowed; 1816 + mq->cleaner = cleaner; 1820 1817 1821 1818 return &mq->policy; 1822 1819 ··· 1841 1836 sector_t origin_size, 1842 1837 sector_t cache_block_size) 1843 1838 { 1844 - return __smq_create(cache_size, origin_size, cache_block_size, false, true); 1839 + return __smq_create(cache_size, origin_size, cache_block_size, 1840 + false, true, false); 1845 1841 } 1846 1842 1847 1843 static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, 1848 1844 sector_t origin_size, 1849 1845 sector_t cache_block_size) 1850 1846 { 1851 - return __smq_create(cache_size, origin_size, cache_block_size, true, true); 1847 + return __smq_create(cache_size, origin_size, cache_block_size, 1848 + true, true, false); 1852 1849 } 1853 1850 1854 1851 static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size, 1855 1852 sector_t origin_size, 1856 1853 sector_t cache_block_size) 1857 1854 { 1858 - return __smq_create(cache_size, origin_size, cache_block_size, false, false); 1855 + return __smq_create(cache_size, origin_size, cache_block_size, 1856 + false, false, true); 1859 1857 } 1860 1858 1861 1859 /*----------------------------------------------------------------*/