Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nfsd: add a LRU list for blocked locks

It's possible for a client to call in on a lock that is blocked for a
long time, but discontinue polling for it. A malicious client could
even set a lock on a file, and then spam the server with failing lock
requests from different lockowners that pile up in a DoS attack.

Add the blocked lock structures to a per-net namespace LRU when hashing
them, and timestamp them. If the lock request is not revisited after a
lease period, we'll drop it under the assumption that the client is no
longer interested.

This also gives us a mechanism to clean up these objects at server
shutdown time as well.

Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>

authored by

Jeff Layton and committed by
J. Bruce Fields
7919d0a2 76d348fa

+65
+1
fs/nfsd/netns.h
··· 84 84 struct list_head client_lru; 85 85 struct list_head close_lru; 86 86 struct list_head del_recall_lru; 87 + struct list_head blocked_locks_lru; 87 88 88 89 struct delayed_work laundromat_work; 89 90
+62
fs/nfsd/nfs4state.c
··· 221 221 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { 222 222 if (fh_match(fh, &cur->nbl_fh)) { 223 223 list_del_init(&cur->nbl_list); 224 + list_del_init(&cur->nbl_lru); 224 225 found = cur; 225 226 break; 226 227 } ··· 4581 4580 struct nfs4_openowner *oo; 4582 4581 struct nfs4_delegation *dp; 4583 4582 struct nfs4_ol_stateid *stp; 4583 + struct nfsd4_blocked_lock *nbl; 4584 4584 struct list_head *pos, *next, reaplist; 4585 4585 time_t cutoff = get_seconds() - nn->nfsd4_lease; 4586 4586 time_t t, new_timeo = nn->nfsd4_lease; ··· 4649 4647 spin_lock(&nn->client_lock); 4650 4648 } 4651 4649 spin_unlock(&nn->client_lock); 4650 + 4651 + /* 4652 + * It's possible for a client to try and acquire an already held lock 4653 + * that is being held for a long time, and then lose interest in it. 4654 + * So, we clean out any un-revisited request after a lease period 4655 + * under the assumption that the client is no longer interested. 4656 + * 4657 + * RFC5661, sec. 9.6 states that the client must not rely on getting 4658 + * notifications and must continue to poll for locks, even when the 4659 + * server supports them. Thus this shouldn't lead to clients blocking 4660 + * indefinitely once the lock does become free. 4661 + */ 4662 + BUG_ON(!list_empty(&reaplist)); 4663 + spin_lock(&nn->client_lock); 4664 + while (!list_empty(&nn->blocked_locks_lru)) { 4665 + nbl = list_first_entry(&nn->blocked_locks_lru, 4666 + struct nfsd4_blocked_lock, nbl_lru); 4667 + if (time_after((unsigned long)nbl->nbl_time, 4668 + (unsigned long)cutoff)) { 4669 + t = nbl->nbl_time - cutoff; 4670 + new_timeo = min(new_timeo, t); 4671 + break; 4672 + } 4673 + list_move(&nbl->nbl_lru, &reaplist); 4674 + list_del_init(&nbl->nbl_list); 4675 + } 4676 + spin_unlock(&nn->client_lock); 4677 + 4678 + while (!list_empty(&reaplist)) { 4679 + nbl = list_first_entry(&nn->blocked_locks_lru, 4680 + struct nfsd4_blocked_lock, nbl_lru); 4681 + list_del_init(&nbl->nbl_lru); 4682 + posix_unblock_lock(&nbl->nbl_lock); 4683 + free_blocked_lock(nbl); 4684 + } 4652 4685 4653 4686 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 4654 4687 return new_timeo; ··· 5435 5398 struct nfsd4_blocked_lock, nbl_lock); 5436 5399 bool queue = false; 5437 5400 5401 + /* An empty list means that something else is going to be using it */ 5438 5402 spin_lock(&nn->client_lock); 5439 5403 if (!list_empty(&nbl->nbl_list)) { 5440 5404 list_del_init(&nbl->nbl_list); 5405 + list_del_init(&nbl->nbl_lru); 5441 5406 queue = true; 5442 5407 } 5443 5408 spin_unlock(&nn->client_lock); ··· 5864 5825 } 5865 5826 5866 5827 if (fl_flags & FL_SLEEP) { 5828 + nbl->nbl_time = jiffies; 5867 5829 spin_lock(&nn->client_lock); 5868 5830 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 5831 + list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); 5869 5832 spin_unlock(&nn->client_lock); 5870 5833 } 5871 5834 ··· 5899 5858 if (fl_flags & FL_SLEEP) { 5900 5859 spin_lock(&nn->client_lock); 5901 5860 list_del_init(&nbl->nbl_list); 5861 + list_del_init(&nbl->nbl_lru); 5902 5862 spin_unlock(&nn->client_lock); 5903 5863 } 5904 5864 free_blocked_lock(nbl); ··· 6940 6898 INIT_LIST_HEAD(&nn->client_lru); 6941 6899 INIT_LIST_HEAD(&nn->close_lru); 6942 6900 INIT_LIST_HEAD(&nn->del_recall_lru); 6901 + INIT_LIST_HEAD(&nn->blocked_locks_lru); 6943 6902 spin_lock_init(&nn->client_lock); 6944 6903 6945 6904 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); ··· 7038 6995 struct nfs4_delegation *dp = NULL; 7039 6996 struct list_head *pos, *next, reaplist; 7040 6997 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6998 + struct nfsd4_blocked_lock *nbl; 7041 6999 7042 7000 cancel_delayed_work_sync(&nn->laundromat_work); 7043 7001 locks_end_grace(&nn->nfsd4_manager); ··· 7057 7013 put_clnt_odstate(dp->dl_clnt_odstate); 7058 7014 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 7059 7015 nfs4_put_stid(&dp->dl_stid); 7016 + } 7017 + 7018 + BUG_ON(!list_empty(&reaplist)); 7019 + spin_lock(&nn->client_lock); 7020 + while (!list_empty(&nn->blocked_locks_lru)) { 7021 + nbl = list_first_entry(&nn->blocked_locks_lru, 7022 + struct nfsd4_blocked_lock, nbl_lru); 7023 + list_move(&nbl->nbl_lru, &reaplist); 7024 + list_del_init(&nbl->nbl_list); 7025 + } 7026 + spin_unlock(&nn->client_lock); 7027 + 7028 + while (!list_empty(&reaplist)) { 7029 + nbl = list_first_entry(&nn->blocked_locks_lru, 7030 + struct nfsd4_blocked_lock, nbl_lru); 7031 + list_del_init(&nbl->nbl_lru); 7032 + posix_unblock_lock(&nbl->nbl_lock); 7033 + free_blocked_lock(nbl); 7060 7034 } 7061 7035 7062 7036 nfsd4_client_tracking_exit(net);
+2
fs/nfsd/state.h
··· 587 587 */ 588 588 struct nfsd4_blocked_lock { 589 589 struct list_head nbl_list; 590 + struct list_head nbl_lru; 591 + unsigned long nbl_time; 590 592 struct file_lock nbl_lock; 591 593 struct knfsd_fh nbl_fh; 592 594 struct nfsd4_callback nbl_cb;