Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nfs: adapt to breakup of struct file_lock

Most of the existing APIs have remained the same, but subsystems that
access file_lock fields directly need to reach into struct
file_lock_core now.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
Link: https://lore.kernel.org/r/20240131-flsplit-v3-41-c6129007ee8d@kernel.org
Reviewed-by: NeilBrown <neilb@suse.de>
Signed-off-by: Christian Brauner <brauner@kernel.org>

authored by

Jeff Layton and committed by
Christian Brauner
dd1fac6a eb8ed7c6

+38 -38
+1 -1
fs/nfs/delegation.c
··· 157 157 spin_lock(&flctx->flc_lock); 158 158 restart: 159 159 for_each_file_lock(fl, list) { 160 - if (nfs_file_open_context(fl->fl_file)->state != state) 160 + if (nfs_file_open_context(fl->c.flc_file)->state != state) 161 161 continue; 162 162 spin_unlock(&flctx->flc_lock); 163 163 status = nfs4_lock_delegation_recall(fl, state, stateid);
+9 -10
fs/nfs/file.c
··· 31 31 #include <linux/swap.h> 32 32 33 33 #include <linux/uaccess.h> 34 - #define _NEED_FILE_LOCK_FIELD_MACROS 35 34 #include <linux/filelock.h> 36 35 37 36 #include "delegation.h" ··· 720 721 { 721 722 struct inode *inode = filp->f_mapping->host; 722 723 int status = 0; 723 - unsigned int saved_type = fl->fl_type; 724 + unsigned int saved_type = fl->c.flc_type; 724 725 725 726 /* Try local locking first */ 726 727 posix_test_lock(filp, fl); 727 - if (fl->fl_type != F_UNLCK) { 728 + if (fl->c.flc_type != F_UNLCK) { 728 729 /* found a conflict */ 729 730 goto out; 730 731 } 731 - fl->fl_type = saved_type; 732 + fl->c.flc_type = saved_type; 732 733 733 734 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 734 735 goto out_noconflict; ··· 740 741 out: 741 742 return status; 742 743 out_noconflict: 743 - fl->fl_type = F_UNLCK; 744 + fl->c.flc_type = F_UNLCK; 744 745 goto out; 745 746 } 746 747 ··· 765 766 * If we're signalled while cleaning up locks on process exit, we 766 767 * still need to complete the unlock. 767 768 */ 768 - if (status < 0 && !(fl->fl_flags & FL_CLOSE)) 769 + if (status < 0 && !(fl->c.flc_flags & FL_CLOSE)) 769 770 return status; 770 771 } 771 772 ··· 832 833 int is_local = 0; 833 834 834 835 dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n", 835 - filp, fl->fl_type, fl->fl_flags, 836 + filp, fl->c.flc_type, fl->c.flc_flags, 836 837 (long long)fl->fl_start, (long long)fl->fl_end); 837 838 838 839 nfs_inc_stats(inode, NFSIOS_VFSLOCK); 839 840 840 - if (fl->fl_flags & FL_RECLAIM) 841 + if (fl->c.flc_flags & FL_RECLAIM) 841 842 return -ENOGRACE; 842 843 843 844 if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL) ··· 869 870 int is_local = 0; 870 871 871 872 dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n", 872 - filp, fl->fl_type, fl->fl_flags); 873 + filp, fl->c.flc_type, fl->c.flc_flags); 873 874 874 - if (!(fl->fl_flags & FL_FLOCK)) 875 + if (!(fl->c.flc_flags & FL_FLOCK)) 875 876 return -ENOLCK; 876 877 877 878 if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
+1 -1
fs/nfs/nfs3proc.c
··· 963 963 struct nfs_open_context *ctx = nfs_file_open_context(filp); 964 964 int status; 965 965 966 - if (fl->fl_flags & FL_CLOSE) { 966 + if (fl->c.flc_flags & FL_CLOSE) { 967 967 l_ctx = nfs_get_lock_context(ctx); 968 968 if (IS_ERR(l_ctx)) 969 969 l_ctx = NULL;
-1
fs/nfs/nfs4_fs.h
··· 23 23 #define NFS4_MAX_LOOP_ON_RECOVER (10) 24 24 25 25 #include <linux/seqlock.h> 26 - #define _NEED_FILE_LOCK_FIELD_MACROS 27 26 #include <linux/filelock.h> 28 27 29 28 struct idmap;
+18 -15
fs/nfs/nfs4proc.c
··· 6800 6800 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6801 6801 switch (status) { 6802 6802 case 0: 6803 - request->fl_type = F_UNLCK; 6803 + request->c.flc_type = F_UNLCK; 6804 6804 break; 6805 6805 case -NFS4ERR_DENIED: 6806 6806 status = 0; ··· 7018 7018 /* Ensure this is an unlock - when canceling a lock, the 7019 7019 * canceled lock is passed in, and it won't be an unlock. 7020 7020 */ 7021 - fl->fl_type = F_UNLCK; 7022 - if (fl->fl_flags & FL_CLOSE) 7021 + fl->c.flc_type = F_UNLCK; 7022 + if (fl->c.flc_flags & FL_CLOSE) 7023 7023 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 7024 7024 7025 7025 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); ··· 7045 7045 struct rpc_task *task; 7046 7046 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7047 7047 int status = 0; 7048 - unsigned char saved_flags = request->fl_flags; 7048 + unsigned char saved_flags = request->c.flc_flags; 7049 7049 7050 7050 status = nfs4_set_lock_state(state, request); 7051 7051 /* Unlock _before_ we do the RPC call */ 7052 - request->fl_flags |= FL_EXISTS; 7052 + request->c.flc_flags |= FL_EXISTS; 7053 7053 /* Exclude nfs_delegation_claim_locks() */ 7054 7054 mutex_lock(&sp->so_delegreturn_mutex); 7055 7055 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ ··· 7073 7073 status = -ENOMEM; 7074 7074 if (IS_ERR(seqid)) 7075 7075 goto out; 7076 - task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 7076 + task = nfs4_do_unlck(request, 7077 + nfs_file_open_context(request->c.flc_file), 7078 + lsp, seqid); 7077 7079 status = PTR_ERR(task); 7078 7080 if (IS_ERR(task)) 7079 7081 goto out; 7080 7082 status = rpc_wait_for_completion_task(task); 7081 7083 rpc_put_task(task); 7082 7084 out: 7083 - request->fl_flags = saved_flags; 7085 + request->c.flc_flags = saved_flags; 7084 7086 trace_nfs4_unlock(request, state, F_SETLK, status); 7085 7087 return status; 7086 7088 } ··· 7193 7191 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7194 7192 data->timestamp); 7195 7193 if (data->arg.new_lock && !data->cancelled) { 7196 - data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 7194 + data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); 7197 7195 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7198 7196 goto out_restart; 7199 7197 } ··· 7294 7292 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 7295 7293 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7296 7294 7297 - data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 7295 + data = nfs4_alloc_lockdata(fl, 7296 + nfs_file_open_context(fl->c.flc_file), 7298 7297 fl->fl_u.nfs4_fl.owner, GFP_KERNEL); 7299 7298 if (data == NULL) 7300 7299 return -ENOMEM; ··· 7401 7398 { 7402 7399 struct nfs_inode *nfsi = NFS_I(state->inode); 7403 7400 struct nfs4_state_owner *sp = state->owner; 7404 - unsigned char flags = request->fl_flags; 7401 + unsigned char flags = request->c.flc_flags; 7405 7402 int status; 7406 7403 7407 - request->fl_flags |= FL_ACCESS; 7404 + request->c.flc_flags |= FL_ACCESS; 7408 7405 status = locks_lock_inode_wait(state->inode, request); 7409 7406 if (status < 0) 7410 7407 goto out; ··· 7413 7410 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7414 7411 /* Yes: cache locks! */ 7415 7412 /* ...but avoid races with delegation recall... */ 7416 - request->fl_flags = flags & ~FL_SLEEP; 7413 + request->c.flc_flags = flags & ~FL_SLEEP; 7417 7414 status = locks_lock_inode_wait(state->inode, request); 7418 7415 up_read(&nfsi->rwsem); 7419 7416 mutex_unlock(&sp->so_delegreturn_mutex); ··· 7423 7420 mutex_unlock(&sp->so_delegreturn_mutex); 7424 7421 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7425 7422 out: 7426 - request->fl_flags = flags; 7423 + request->c.flc_flags = flags; 7427 7424 return status; 7428 7425 } 7429 7426 ··· 7574 7571 if (state == NULL) 7575 7572 return -ENOLCK; 7576 7573 7577 - if ((request->fl_flags & FL_POSIX) && 7574 + if ((request->c.flc_flags & FL_POSIX) && 7578 7575 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7579 7576 return -ENOLCK; 7580 7577 ··· 7582 7579 * Don't rely on the VFS having checked the file open mode, 7583 7580 * since it won't do this for flock() locks. 7584 7581 */ 7585 - switch (request->fl_type) { 7582 + switch (request->c.flc_type) { 7586 7583 case F_RDLCK: 7587 7584 if (!(filp->f_mode & FMODE_READ)) 7588 7585 return -EBADF;
+2 -2
fs/nfs/nfs4state.c
··· 980 980 981 981 if (fl->fl_ops != NULL) 982 982 return 0; 983 - lsp = nfs4_get_lock_state(state, fl->fl_owner); 983 + lsp = nfs4_get_lock_state(state, fl->c.flc_owner); 984 984 if (lsp == NULL) 985 985 return -ENOMEM; 986 986 fl->fl_u.nfs4_fl.owner = lsp; ··· 1530 1530 spin_lock(&flctx->flc_lock); 1531 1531 restart: 1532 1532 for_each_file_lock(fl, list) { 1533 - if (nfs_file_open_context(fl->fl_file)->state != state) 1533 + if (nfs_file_open_context(fl->c.flc_file)->state != state) 1534 1534 continue; 1535 1535 spin_unlock(&flctx->flc_lock); 1536 1536 status = ops->recover_lock(state, fl);
+2 -2
fs/nfs/nfs4trace.h
··· 699 699 700 700 __entry->error = error < 0 ? -error : 0; 701 701 __entry->cmd = cmd; 702 - __entry->type = request->fl_type; 702 + __entry->type = request->c.flc_type; 703 703 __entry->start = request->fl_start; 704 704 __entry->end = request->fl_end; 705 705 __entry->dev = inode->i_sb->s_dev; ··· 771 771 772 772 __entry->error = error < 0 ? -error : 0; 773 773 __entry->cmd = cmd; 774 - __entry->type = request->fl_type; 774 + __entry->type = request->c.flc_type; 775 775 __entry->start = request->fl_start; 776 776 __entry->end = request->fl_end; 777 777 __entry->dev = inode->i_sb->s_dev;
+3 -3
fs/nfs/nfs4xdr.c
··· 5052 5052 fl->fl_end = fl->fl_start + (loff_t)length - 1; 5053 5053 if (length == ~(uint64_t)0) 5054 5054 fl->fl_end = OFFSET_MAX; 5055 - fl->fl_type = F_WRLCK; 5055 + fl->c.flc_type = F_WRLCK; 5056 5056 if (type & 1) 5057 - fl->fl_type = F_RDLCK; 5058 - fl->fl_pid = 0; 5057 + fl->c.flc_type = F_RDLCK; 5058 + fl->c.flc_pid = 0; 5059 5059 } 5060 5060 p = xdr_decode_hyper(p, &clientid); /* read 8 bytes */ 5061 5061 namelen = be32_to_cpup(p); /* read 4 bytes */ /* have read all 32 bytes now */
+2 -3
fs/nfs/write.c
··· 25 25 #include <linux/freezer.h> 26 26 #include <linux/wait.h> 27 27 #include <linux/iversion.h> 28 - #define _NEED_FILE_LOCK_FIELD_MACROS 29 28 #include <linux/filelock.h> 30 29 31 30 #include <linux/uaccess.h> ··· 1335 1336 spin_lock(&flctx->flc_lock); 1336 1337 if (!list_empty(&flctx->flc_posix)) { 1337 1338 fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1338 - fl_list); 1339 + c.flc_list); 1339 1340 if (is_whole_file_wrlock(fl)) 1340 1341 ret = 1; 1341 1342 } else if (!list_empty(&flctx->flc_flock)) { 1342 1343 fl = list_first_entry(&flctx->flc_flock, struct file_lock, 1343 - fl_list); 1344 + c.flc_list); 1344 1345 if (lock_is_write(fl)) 1345 1346 ret = 1; 1346 1347 }