Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[GFS2] Remove remote lock dropping code

There are several reasons why this is undesirable:

1. It never happens during normal operation anyway
2. If it does happen it causes performance to be very, very poor
3. It isn't likely to solve the original problem (memory shortage
on remote DLM node) it was supposed to solve
4. It uses a bunch of arbitrary constants which are unlikely to be
correct for any particular situation and for which the tuning seems
to be a black art.
5. In an N node cluster, only 1/N of the dropped locked will actually
contribute to solving the problem on average.

So all in all we are better off without it. This also makes merging
the lock_dlm module into GFS2 a bit easier.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>

+6 -73
-5
fs/gfs2/gfs2.h
··· 16 16 }; 17 17 18 18 enum { 19 - NO_WAIT = 0, 20 - WAIT = 1, 21 - }; 22 - 23 - enum { 24 19 NO_FORCE = 0, 25 20 FORCE = 1, 26 21 };
+3 -9
fs/gfs2/glock.c
··· 1316 1316 wake_up_process(sdp->sd_recoverd_process); 1317 1317 return; 1318 1318 1319 - case LM_CB_DROPLOCKS: 1320 - gfs2_gl_hash_clear(sdp, NO_WAIT); 1321 - gfs2_quota_scan(sdp); 1322 - return; 1323 - 1324 1319 default: 1325 1320 gfs2_assert_warn(sdp, 0); 1326 1321 return; ··· 1503 1508 * @sdp: the filesystem 1504 1509 * @wait: wait until it's all gone 1505 1510 * 1506 - * Called when unmounting the filesystem, or when inter-node lock manager 1507 - * requests DROPLOCKS because it is running out of capacity. 1511 + * Called when unmounting the filesystem. 1508 1512 */ 1509 1513 1510 - void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) 1514 + void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 1511 1515 { 1512 1516 unsigned long t; 1513 1517 unsigned int x; ··· 1521 1527 cont = 1; 1522 1528 } 1523 1529 1524 - if (!wait || !cont) 1530 + if (!cont) 1525 1531 break; 1526 1532 1527 1533 if (time_after_eq(jiffies,
+1 -1
fs/gfs2/glock.h
··· 132 132 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); 133 133 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); 134 134 void gfs2_reclaim_glock(struct gfs2_sbd *sdp); 135 - void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait); 135 + void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); 136 136 137 137 int __init gfs2_glock_init(void); 138 138 void gfs2_glock_exit(void);
-3
fs/gfs2/locking/dlm/lock_dlm.h
··· 79 79 wait_queue_head_t wait_control; 80 80 struct task_struct *thread; 81 81 wait_queue_head_t thread_wait; 82 - unsigned long drop_time; 83 - int drop_locks_count; 84 - int drop_locks_period; 85 82 }; 86 83 87 84 enum {
-3
fs/gfs2/locking/dlm/mount.c
··· 22 22 if (!ls) 23 23 return NULL; 24 24 25 - ls->drop_locks_count = GDLM_DROP_COUNT; 26 - ls->drop_locks_period = GDLM_DROP_PERIOD; 27 25 ls->fscb = cb; 28 26 ls->sdp = sdp; 29 27 ls->fsflags = flags; ··· 31 33 INIT_LIST_HEAD(&ls->all_locks); 32 34 init_waitqueue_head(&ls->thread_wait); 33 35 init_waitqueue_head(&ls->wait_control); 34 - ls->drop_time = jiffies; 35 36 ls->jid = -1; 36 37 37 38 strncpy(buf, table_name, 256);
-13
fs/gfs2/locking/dlm/sysfs.c
··· 114 114 return sprintf(buf, "%d\n", ls->recover_jid_status); 115 115 } 116 116 117 - static ssize_t drop_count_show(struct gdlm_ls *ls, char *buf) 118 - { 119 - return sprintf(buf, "%d\n", ls->drop_locks_count); 120 - } 121 - 122 - static ssize_t drop_count_store(struct gdlm_ls *ls, const char *buf, size_t len) 123 - { 124 - ls->drop_locks_count = simple_strtol(buf, NULL, 0); 125 - return len; 126 - } 127 - 128 117 struct gdlm_attr { 129 118 struct attribute attr; 130 119 ssize_t (*show)(struct gdlm_ls *, char *); ··· 133 144 GDLM_ATTR(recover, 0644, recover_show, recover_store); 134 145 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); 135 146 GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); 136 - GDLM_ATTR(drop_count, 0644, drop_count_show, drop_count_store); 137 147 138 148 static struct attribute *gdlm_attrs[] = { 139 149 &gdlm_attr_proto_name.attr, ··· 145 157 &gdlm_attr_recover.attr, 146 158 &gdlm_attr_recover_done.attr, 147 159 &gdlm_attr_recover_status.attr, 148 - &gdlm_attr_drop_count.attr, 149 160 NULL, 150 161 }; 151 162
-19
fs/gfs2/locking/dlm/thread.c
··· 20 20 return ret; 21 21 } 22 22 23 - static inline int check_drop(struct gdlm_ls *ls) 24 - { 25 - if (!ls->drop_locks_count) 26 - return 0; 27 - 28 - if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) { 29 - ls->drop_time = jiffies; 30 - if (ls->all_locks_count >= ls->drop_locks_count) 31 - return 1; 32 - } 33 - return 0; 34 - } 35 - 36 23 static int gdlm_thread(void *data) 37 24 { 38 25 struct gdlm_ls *ls = (struct gdlm_ls *) data; ··· 37 50 list_del_init(&lp->delay_list); 38 51 spin_unlock(&ls->async_lock); 39 52 gdlm_do_lock(lp); 40 - spin_lock(&ls->async_lock); 41 - } 42 - /* Does this ever happen these days? I hope not anyway */ 43 - if (check_drop(ls)) { 44 - spin_unlock(&ls->async_lock); 45 - ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL); 46 53 spin_lock(&ls->async_lock); 47 54 } 48 55 spin_unlock(&ls->async_lock);
+1 -1
fs/gfs2/ops_fstype.c
··· 874 874 fail_locking: 875 875 init_locking(sdp, &mount_gh, UNDO); 876 876 fail_lm: 877 - gfs2_gl_hash_clear(sdp, WAIT); 877 + gfs2_gl_hash_clear(sdp); 878 878 gfs2_lm_unmount(sdp); 879 879 while (invalidate_inodes(sb)) 880 880 yield();
+1 -1
fs/gfs2/ops_super.c
··· 126 126 gfs2_clear_rgrpd(sdp); 127 127 gfs2_jindex_free(sdp); 128 128 /* Take apart glock structures and buffer lists */ 129 - gfs2_gl_hash_clear(sdp, WAIT); 129 + gfs2_gl_hash_clear(sdp); 130 130 /* Unmount the locking protocol */ 131 131 gfs2_lm_unmount(sdp); 132 132
-14
fs/gfs2/sys.c
··· 110 110 return len; 111 111 } 112 112 113 - static ssize_t shrink_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 114 - { 115 - if (!capable(CAP_SYS_ADMIN)) 116 - return -EACCES; 117 - 118 - if (simple_strtol(buf, NULL, 0) != 1) 119 - return -EINVAL; 120 - 121 - gfs2_gl_hash_clear(sdp, NO_WAIT); 122 - return len; 123 - } 124 - 125 113 static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, 126 114 size_t len) 127 115 { ··· 163 175 GFS2_ATTR(id, 0444, id_show, NULL); 164 176 GFS2_ATTR(fsname, 0444, fsname_show, NULL); 165 177 GFS2_ATTR(freeze, 0644, freeze_show, freeze_store); 166 - GFS2_ATTR(shrink, 0200, NULL, shrink_store); 167 178 GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 168 179 GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store); 169 180 GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store); ··· 173 186 &gfs2_attr_id.attr, 174 187 &gfs2_attr_fsname.attr, 175 188 &gfs2_attr_freeze.attr, 176 - &gfs2_attr_shrink.attr, 177 189 &gfs2_attr_withdraw.attr, 178 190 &gfs2_attr_statfs_sync.attr, 179 191 &gfs2_attr_quota_sync.attr,
-4
include/linux/lm_interface.h
··· 138 138 * LM_CB_NEED_RECOVERY 139 139 * The given journal needs to be recovered. 140 140 * 141 - * LM_CB_DROPLOCKS 142 - * Reduce the number of cached locks. 143 - * 144 141 * LM_CB_ASYNC 145 142 * The given lock has been granted. 146 143 */ ··· 146 149 #define LM_CB_NEED_D 258 147 150 #define LM_CB_NEED_S 259 148 151 #define LM_CB_NEED_RECOVERY 260 149 - #define LM_CB_DROPLOCKS 261 150 152 #define LM_CB_ASYNC 262 151 153 152 154 /*