Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm cache: age and write back cache entries even without active IO

The policy tick() method is normally called from interrupt context.
Both the mq and smq policies do some bottom half work for the tick
method in their map functions. However if no IO is going through the
cache, then that bottom half work doesn't occur. With these policies
this means recently hit entries do not age and do not get written
back as early as we'd like.

Fix this by introducing a new 'can_block' parameter to the tick()
method. When this is set the bottom half work occurs immediately.
'can_block' is set when the tick method is called every second by the
core target (not in interrupt context).

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Joe Thornber and committed by
Mike Snitzer
fba10109 b61d9509

+20 -8
+2 -2
drivers/md/dm-cache-policy-internal.h
··· 83 83 return p->residency(p); 84 84 } 85 85 86 - static inline void policy_tick(struct dm_cache_policy *p) 86 + static inline void policy_tick(struct dm_cache_policy *p, bool can_block) 87 87 { 88 88 if (p->tick) 89 - return p->tick(p); 89 + return p->tick(p, can_block); 90 90 } 91 91 92 92 static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
+7 -1
drivers/md/dm-cache-policy-mq.c
··· 1283 1283 return r; 1284 1284 } 1285 1285 1286 - static void mq_tick(struct dm_cache_policy *p) 1286 + static void mq_tick(struct dm_cache_policy *p, bool can_block) 1287 1287 { 1288 1288 struct mq_policy *mq = to_mq_policy(p); 1289 1289 unsigned long flags; ··· 1291 1291 spin_lock_irqsave(&mq->tick_lock, flags); 1292 1292 mq->tick_protected++; 1293 1293 spin_unlock_irqrestore(&mq->tick_lock, flags); 1294 + 1295 + if (can_block) { 1296 + mutex_lock(&mq->lock); 1297 + copy_tick(mq); 1298 + mutex_unlock(&mq->lock); 1299 + } 1294 1300 } 1295 1301 1296 1302 static int mq_set_config_value(struct dm_cache_policy *p,
+7 -1
drivers/md/dm-cache-policy-smq.c
··· 1581 1581 return r; 1582 1582 } 1583 1583 1584 - static void smq_tick(struct dm_cache_policy *p) 1584 + static void smq_tick(struct dm_cache_policy *p, bool can_block) 1585 1585 { 1586 1586 struct smq_policy *mq = to_smq_policy(p); 1587 1587 unsigned long flags; ··· 1589 1589 spin_lock_irqsave(&mq->tick_lock, flags); 1590 1590 mq->tick_protected++; 1591 1591 spin_unlock_irqrestore(&mq->tick_lock, flags); 1592 + 1593 + if (can_block) { 1594 + mutex_lock(&mq->lock); 1595 + copy_tick(mq); 1596 + mutex_unlock(&mq->lock); 1597 + } 1592 1598 } 1593 1599 1594 1600 /* Init the policy plugin interface function pointers. */
+2 -2
drivers/md/dm-cache-policy.h
··· 200 200 * Because of where we sit in the block layer, we can be asked to 201 201 * map a lot of little bios that are all in the same block (no 202 202 * queue merging has occurred). To stop the policy being fooled by 203 - * these the core target sends regular tick() calls to the policy. 203 + * these, the core target sends regular tick() calls to the policy. 204 204 * The policy should only count an entry as hit once per tick. 205 205 */ 206 - void (*tick)(struct dm_cache_policy *p); 206 + void (*tick)(struct dm_cache_policy *p, bool can_block); 207 207 208 208 /* 209 209 * Configuration.
+2 -2
drivers/md/dm-cache-target.c
··· 2271 2271 static void do_waker(struct work_struct *ws) 2272 2272 { 2273 2273 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); 2274 - policy_tick(cache->policy); 2274 + policy_tick(cache->policy, true); 2275 2275 wake_worker(cache); 2276 2276 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); 2277 2277 } ··· 3148 3148 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 3149 3149 3150 3150 if (pb->tick) { 3151 - policy_tick(cache->policy); 3151 + policy_tick(cache->policy, false); 3152 3152 3153 3153 spin_lock_irqsave(&cache->lock, flags); 3154 3154 cache->need_tick_bio = true;