Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

GFS2: Alter point of entry to glock lru list for glocks with an address_space

Rather than allowing the glocks to be scheduled for possible
reclaim as soon as they have exited the journal, this patch
delays their entry to the list until the glocks in question
are no longer in use.

This means that we will rely on the vm for writeback of all
dirty data and metadata from now on. When glocks are added
to the lru list they should be freeable much faster since all
the I/O required to free them should have already been completed.

This should lead to much better I/O patterns under low memory
conditions.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>

+23 -27
+15 -18
fs/gfs2/glock.c
··· 160 160 } 161 161 162 162 163 + void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 164 + { 165 + spin_lock(&lru_lock); 166 + 167 + if (!list_empty(&gl->gl_lru)) 168 + list_del_init(&gl->gl_lru); 169 + else 170 + atomic_inc(&lru_count); 171 + 172 + list_add_tail(&gl->gl_lru, &lru_list); 173 + spin_unlock(&lru_lock); 174 + } 175 + 163 176 /** 164 177 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 165 178 * @gl: the glock ··· 183 170 184 171 static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 185 172 { 186 - if (demote_ok(gl)) { 187 - spin_lock(&lru_lock); 188 - 189 - if (!list_empty(&gl->gl_lru)) 190 - list_del_init(&gl->gl_lru); 191 - else 192 - atomic_inc(&lru_count); 193 - 194 - list_add_tail(&gl->gl_lru, &lru_list); 195 - spin_unlock(&lru_lock); 196 - } 197 - } 198 - 199 - void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 200 - { 201 - spin_lock(&gl->gl_spin); 202 - __gfs2_glock_schedule_for_reclaim(gl); 203 - spin_unlock(&gl->gl_spin); 173 + if (demote_ok(gl)) 174 + gfs2_glock_add_to_lru(gl); 204 175 } 205 176 206 177 /**
+1 -2
fs/gfs2/glock.h
··· 225 225 226 226 extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); 227 227 extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret); 228 - extern void gfs2_reclaim_glock(struct gfs2_sbd *sdp); 229 228 extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); 230 229 extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip); 231 230 extern void gfs2_glock_thaw(struct gfs2_sbd *sdp); 232 - extern void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); 231 + extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl); 233 232 extern void gfs2_glock_free(struct gfs2_glock *gl); 234 233 235 234 extern int __init gfs2_glock_init(void);
+5 -7
fs/gfs2/lops.c
··· 40 40 { 41 41 struct gfs2_bufdata *bd; 42 42 43 - gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)); 43 + BUG_ON(!current->journal_info); 44 44 45 45 clear_buffer_dirty(bh); 46 46 if (test_set_buffer_pinned(bh)) ··· 65 65 * @sdp: the filesystem the buffer belongs to 66 66 * @bh: The buffer to unpin 67 67 * @ai: 68 + * @flags: The inode dirty flags 68 69 * 69 70 */ 70 71 ··· 74 73 { 75 74 struct gfs2_bufdata *bd = bh->b_private; 76 75 77 - gfs2_assert_withdraw(sdp, buffer_uptodate(bh)); 78 - 79 - if (!buffer_pinned(bh)) 80 - gfs2_assert_withdraw(sdp, 0); 76 + BUG_ON(!buffer_uptodate(bh)); 77 + BUG_ON(!buffer_pinned(bh)); 81 78 82 79 lock_buffer(bh); 83 80 mark_buffer_dirty(bh); ··· 94 95 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list); 95 96 spin_unlock(&sdp->sd_ail_lock); 96 97 97 - if (test_and_clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags)) 98 - gfs2_glock_schedule_for_reclaim(bd->bd_gl); 98 + clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 99 99 trace_gfs2_pin(bd, 0); 100 100 unlock_buffer(bh); 101 101 atomic_dec(&sdp->sd_log_pinned);
+1
fs/gfs2/rgrp.c
··· 392 392 393 393 if (gl) { 394 394 gl->gl_object = NULL; 395 + gfs2_glock_add_to_lru(gl); 395 396 gfs2_glock_put(gl); 396 397 } 397 398
+1
fs/gfs2/super.c
··· 1401 1401 end_writeback(inode); 1402 1402 1403 1403 ip->i_gl->gl_object = NULL; 1404 + gfs2_glock_add_to_lru(ip->i_gl); 1404 1405 gfs2_glock_put(ip->i_gl); 1405 1406 ip->i_gl = NULL; 1406 1407 if (ip->i_iopen_gh.gh_gl) {