Merge rsync://client.linux-nfs.org/pub/linux/nfs-2.6

+4142 -1011
+35
fs/Kconfig
··· 1268 depends on INET 1269 select LOCKD 1270 select SUNRPC 1271 help 1272 If you are connected to some other (usually local) Unix computer 1273 (using SLIP, PLIP, PPP or Ethernet) and want to mount files residing ··· 1310 3 of the NFS protocol. 1311 1312 If unsure, say Y. 1313 1314 config NFS_V4 1315 bool "Provide NFSv4 client support (EXPERIMENTAL)" ··· 1364 select LOCKD 1365 select SUNRPC 1366 select EXPORTFS 1367 help 1368 If you want your Linux box to act as an NFS *server*, so that other 1369 computers on your local network which support NFS can access certain ··· 1388 To compile the NFS server support as a module, choose M here: the 1389 module will be called nfsd. If unsure, say N. 1390 1391 config NFSD_V3 1392 bool "Provide NFSv3 server support" 1393 depends on NFSD 1394 help 1395 If you would like to include the NFSv3 server as well as the NFSv2 1396 server, say Y here. If unsure, say Y. 1397 1398 config NFSD_V4 1399 bool "Provide NFSv4 server support (EXPERIMENTAL)" ··· 1452 1453 config EXPORTFS 1454 tristate 1455 1456 config SUNRPC 1457 tristate
··· 1268 depends on INET 1269 select LOCKD 1270 select SUNRPC 1271 + select NFS_ACL_SUPPORT if NFS_V3_ACL 1272 help 1273 If you are connected to some other (usually local) Unix computer 1274 (using SLIP, PLIP, PPP or Ethernet) and want to mount files residing ··· 1309 3 of the NFS protocol. 1310 1311 If unsure, say Y. 1312 + 1313 + config NFS_V3_ACL 1314 + bool "Provide client support for the NFSv3 ACL protocol extension" 1315 + depends on NFS_V3 1316 + help 1317 + Implement the NFSv3 ACL protocol extension for manipulating POSIX 1318 + Access Control Lists. The server should also be compiled with 1319 + the NFSv3 ACL protocol extension; see the CONFIG_NFSD_V3_ACL option. 1320 + 1321 + If unsure, say N. 1322 1323 config NFS_V4 1324 bool "Provide NFSv4 client support (EXPERIMENTAL)" ··· 1353 select LOCKD 1354 select SUNRPC 1355 select EXPORTFS 1356 + select NFS_ACL_SUPPORT if NFSD_V3_ACL || NFSD_V2_ACL 1357 help 1358 If you want your Linux box to act as an NFS *server*, so that other 1359 computers on your local network which support NFS can access certain ··· 1376 To compile the NFS server support as a module, choose M here: the 1377 module will be called nfsd. If unsure, say N. 1378 1379 + config NFSD_V2_ACL 1380 + bool 1381 + depends on NFSD 1382 + 1383 config NFSD_V3 1384 bool "Provide NFSv3 server support" 1385 depends on NFSD 1386 help 1387 If you would like to include the NFSv3 server as well as the NFSv2 1388 server, say Y here. If unsure, say Y. 1389 + 1390 + config NFSD_V3_ACL 1391 + bool "Provide server support for the NFSv3 ACL protocol extension" 1392 + depends on NFSD_V3 1393 + select NFSD_V2_ACL 1394 + help 1395 + Implement the NFSv3 ACL protocol extension for manipulating POSIX 1396 + Access Control Lists on exported file systems. NFS clients should 1397 + be compiled with the NFSv3 ACL protocol extension; see the 1398 + CONFIG_NFS_V3_ACL option. If unsure, say N. 1399 1400 config NFSD_V4 1401 bool "Provide NFSv4 server support (EXPERIMENTAL)" ··· 1426 1427 config EXPORTFS 1428 tristate 1429 + 1430 + config NFS_ACL_SUPPORT 1431 + tristate 1432 + select FS_POSIX_ACL 1433 + 1434 + config NFS_COMMON 1435 + bool 1436 + depends on NFSD || NFS_FS 1437 + default y 1438 1439 config SUNRPC 1440 tristate
+1
fs/Makefile
··· 31 32 obj-$(CONFIG_FS_MBCACHE) += mbcache.o 33 obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o 34 35 obj-$(CONFIG_QUOTA) += dquot.o 36 obj-$(CONFIG_QFMT_V1) += quota_v1.o
··· 31 32 obj-$(CONFIG_FS_MBCACHE) += mbcache.o 33 obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o 34 + obj-$(CONFIG_NFS_COMMON) += nfs_common/ 35 36 obj-$(CONFIG_QUOTA) += dquot.o 37 obj-$(CONFIG_QFMT_V1) += quota_v1.o
+62 -51
fs/lockd/clntlock.c
··· 31 * This is the representation of a blocked client lock. 32 */ 33 struct nlm_wait { 34 - struct nlm_wait * b_next; /* linked list */ 35 wait_queue_head_t b_wait; /* where to wait on */ 36 struct nlm_host * b_host; 37 struct file_lock * b_lock; /* local file lock */ ··· 39 u32 b_status; /* grant callback status */ 40 }; 41 42 - static struct nlm_wait * nlm_blocked; 43 44 /* 45 * Block on a lock 46 */ 47 - int 48 - nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp) 49 { 50 - struct nlm_wait block, **head; 51 - int err; 52 - u32 pstate; 53 54 - block.b_host = host; 55 - block.b_lock = fl; 56 - init_waitqueue_head(&block.b_wait); 57 - block.b_status = NLM_LCK_BLOCKED; 58 - block.b_next = nlm_blocked; 59 - nlm_blocked = &block; 60 - 61 - /* Remember pseudo nsm state */ 62 - pstate = host->h_state; 63 64 /* Go to sleep waiting for GRANT callback. Some servers seem 65 * to lose callbacks, however, so we're going to poll from ··· 96 * a 1 minute timeout would do. See the comment before 97 * nlmclnt_lock for an explanation. 98 */ 99 - sleep_on_timeout(&block.b_wait, 30*HZ); 100 101 - for (head = &nlm_blocked; *head; head = &(*head)->b_next) { 102 - if (*head == &block) { 103 - *head = block.b_next; 104 - break; 105 - } 106 } 107 108 - if (!signalled()) { 109 - *statp = block.b_status; 110 - return 0; 111 - } 112 - 113 - /* Okay, we were interrupted. Cancel the pending request 114 - * unless the server has rebooted. 115 - */ 116 - if (pstate == host->h_state && (err = nlmclnt_cancel(host, fl)) < 0) 117 - printk(KERN_NOTICE 118 - "lockd: CANCEL call failed (errno %d)\n", -err); 119 - 120 - return -ERESTARTSYS; 121 } 122 123 /* ··· 115 nlmclnt_grant(struct nlm_lock *lock) 116 { 117 struct nlm_wait *block; 118 119 /* 120 * Look up blocked request based on arguments. 121 * Warning: must not use cookie to match it! 122 */ 123 - for (block = nlm_blocked; block; block = block->b_next) { 124 - if (nlm_compare_locks(block->b_lock, &lock->fl)) 125 - break; 126 } 127 - 128 - /* Ooops, no blocked request found. */ 129 - if (block == NULL) 130 - return nlm_lck_denied; 131 - 132 - /* Alright, we found the lock. Set the return status and 133 - * wake up the caller. 134 - */ 135 - block->b_status = NLM_LCK_GRANTED; 136 - wake_up(&block->b_wait); 137 - 138 - return nlm_granted; 139 } 140 141 /* ··· 241 host->h_reclaiming = 0; 242 243 /* Now, wake up all processes that sleep on a blocked lock */ 244 - for (block = nlm_blocked; block; block = block->b_next) { 245 if (block->b_host == host) { 246 block->b_status = NLM_LCK_DENIED_GRACE_PERIOD; 247 wake_up(&block->b_wait);
··· 31 * This is the representation of a blocked client lock. 32 */ 33 struct nlm_wait { 34 + struct list_head b_list; /* linked list */ 35 wait_queue_head_t b_wait; /* where to wait on */ 36 struct nlm_host * b_host; 37 struct file_lock * b_lock; /* local file lock */ ··· 39 u32 b_status; /* grant callback status */ 40 }; 41 42 + static LIST_HEAD(nlm_blocked); 43 + 44 + /* 45 + * Queue up a lock for blocking so that the GRANTED request can see it 46 + */ 47 + int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl) 48 + { 49 + struct nlm_wait *block; 50 + 51 + BUG_ON(req->a_block != NULL); 52 + block = kmalloc(sizeof(*block), GFP_KERNEL); 53 + if (block == NULL) 54 + return -ENOMEM; 55 + block->b_host = host; 56 + block->b_lock = fl; 57 + init_waitqueue_head(&block->b_wait); 58 + block->b_status = NLM_LCK_BLOCKED; 59 + 60 + list_add(&block->b_list, &nlm_blocked); 61 + req->a_block = block; 62 + 63 + return 0; 64 + } 65 + 66 + void nlmclnt_finish_block(struct nlm_rqst *req) 67 + { 68 + struct nlm_wait *block = req->a_block; 69 + 70 + if (block == NULL) 71 + return; 72 + req->a_block = NULL; 73 + list_del(&block->b_list); 74 + kfree(block); 75 + } 76 77 /* 78 * Block on a lock 79 */ 80 + long nlmclnt_block(struct nlm_rqst *req, long timeout) 81 { 82 + struct nlm_wait *block = req->a_block; 83 + long ret; 84 85 + /* A borken server might ask us to block even if we didn't 86 + * request it. Just say no! 87 + */ 88 + if (!req->a_args.block) 89 + return -EAGAIN; 90 91 /* Go to sleep waiting for GRANT callback. Some servers seem 92 * to lose callbacks, however, so we're going to poll from ··· 69 * a 1 minute timeout would do. See the comment before 70 * nlmclnt_lock for an explanation. 71 */ 72 + ret = wait_event_interruptible_timeout(block->b_wait, 73 + block->b_status != NLM_LCK_BLOCKED, 74 + timeout); 75 76 + if (block->b_status != NLM_LCK_BLOCKED) { 77 + req->a_res.status = block->b_status; 78 + block->b_status = NLM_LCK_BLOCKED; 79 } 80 81 + return ret; 82 } 83 84 /* ··· 100 nlmclnt_grant(struct nlm_lock *lock) 101 { 102 struct nlm_wait *block; 103 + u32 res = nlm_lck_denied; 104 105 /* 106 * Look up blocked request based on arguments. 107 * Warning: must not use cookie to match it! 108 */ 109 + list_for_each_entry(block, &nlm_blocked, b_list) { 110 + if (nlm_compare_locks(block->b_lock, &lock->fl)) { 111 + /* Alright, we found a lock. Set the return status 112 + * and wake up the caller 113 + */ 114 + block->b_status = NLM_LCK_GRANTED; 115 + wake_up(&block->b_wait); 116 + res = nlm_granted; 117 + } 118 } 119 + return res; 120 } 121 122 /* ··· 230 host->h_reclaiming = 0; 231 232 /* Now, wake up all processes that sleep on a blocked lock */ 233 + list_for_each_entry(block, &nlm_blocked, b_list) { 234 if (block->b_host == host) { 235 block->b_status = NLM_LCK_DENIED_GRACE_PERIOD; 236 wake_up(&block->b_wait);
+32 -8
fs/lockd/clntproc.c
··· 21 22 #define NLMDBG_FACILITY NLMDBG_CLIENT 23 #define NLMCLNT_GRACE_WAIT (5*HZ) 24 25 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); 26 static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); ··· 554 { 555 struct nlm_host *host = req->a_host; 556 struct nlm_res *resp = &req->a_res; 557 - int status; 558 559 if (!host->h_monitored && nsm_monitor(host) < 0) { 560 printk(KERN_NOTICE "lockd: failed to monitor %s\n", ··· 564 goto out; 565 } 566 567 - do { 568 - if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0) { 569 - if (resp->status != NLM_LCK_BLOCKED) 570 - break; 571 - status = nlmclnt_block(host, fl, &resp->status); 572 - } 573 if (status < 0) 574 goto out; 575 - } while (resp->status == NLM_LCK_BLOCKED && req->a_args.block); 576 577 if (resp->status == NLM_LCK_GRANTED) { 578 fl->fl_u.nfs_fl.state = host->h_state; ··· 598 do_vfs_lock(fl); 599 } 600 status = nlm_stat_to_errno(resp->status); 601 out: 602 nlmclnt_release_lockargs(req); 603 return status;
··· 21 22 #define NLMDBG_FACILITY NLMDBG_CLIENT 23 #define NLMCLNT_GRACE_WAIT (5*HZ) 24 + #define NLMCLNT_POLL_TIMEOUT (30*HZ) 25 26 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); 27 static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); ··· 553 { 554 struct nlm_host *host = req->a_host; 555 struct nlm_res *resp = &req->a_res; 556 + long timeout; 557 + int status; 558 559 if (!host->h_monitored && nsm_monitor(host) < 0) { 560 printk(KERN_NOTICE "lockd: failed to monitor %s\n", ··· 562 goto out; 563 } 564 565 + if (req->a_args.block) { 566 + status = nlmclnt_prepare_block(req, host, fl); 567 if (status < 0) 568 goto out; 569 + } 570 + for(;;) { 571 + status = nlmclnt_call(req, NLMPROC_LOCK); 572 + if (status < 0) 573 + goto out_unblock; 574 + if (resp->status != NLM_LCK_BLOCKED) 575 + break; 576 + /* Wait on an NLM blocking lock */ 577 + timeout = nlmclnt_block(req, NLMCLNT_POLL_TIMEOUT); 578 + /* Did a reclaimer thread notify us of a server reboot? */ 579 + if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) 580 + continue; 581 + if (resp->status != NLM_LCK_BLOCKED) 582 + break; 583 + if (timeout >= 0) 584 + continue; 585 + /* We were interrupted. Send a CANCEL request to the server 586 + * and exit 587 + */ 588 + status = (int)timeout; 589 + goto out_unblock; 590 + } 591 592 if (resp->status == NLM_LCK_GRANTED) { 593 fl->fl_u.nfs_fl.state = host->h_state; ··· 579 do_vfs_lock(fl); 580 } 581 status = nlm_stat_to_errno(resp->status); 582 + out_unblock: 583 + nlmclnt_finish_block(req); 584 + /* Cancel the blocked request if it is still pending */ 585 + if (resp->status == NLM_LCK_BLOCKED) 586 + nlmclnt_cancel(host, fl); 587 out: 588 nlmclnt_release_lockargs(req); 589 return status;
+3 -5
fs/lockd/host.c
··· 189 goto forgetit; 190 191 xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout); 192 193 /* Existing NLM servers accept AUTH_UNIX only */ 194 clnt = rpc_create_client(xprt, host->h_name, &nlm_program, 195 host->h_version, RPC_AUTH_UNIX); 196 - if (IS_ERR(clnt)) { 197 - xprt_destroy(xprt); 198 goto forgetit; 199 - } 200 clnt->cl_autobind = 1; /* turn on pmap queries */ 201 - xprt->nocong = 1; /* No congestion control for NLM */ 202 - xprt->resvport = 1; /* NLM requires a reserved port */ 203 204 host->h_rpcclnt = clnt; 205 }
··· 189 goto forgetit; 190 191 xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout); 192 + xprt->nocong = 1; /* No congestion control for NLM */ 193 + xprt->resvport = 1; /* NLM requires a reserved port */ 194 195 /* Existing NLM servers accept AUTH_UNIX only */ 196 clnt = rpc_create_client(xprt, host->h_name, &nlm_program, 197 host->h_version, RPC_AUTH_UNIX); 198 + if (IS_ERR(clnt)) 199 goto forgetit; 200 clnt->cl_autobind = 1; /* turn on pmap queries */ 201 202 host->h_rpcclnt = clnt; 203 }
+3 -4
fs/lockd/mon.c
··· 115 xprt = xprt_create_proto(IPPROTO_UDP, &sin, NULL); 116 if (IS_ERR(xprt)) 117 return (struct rpc_clnt *)xprt; 118 119 clnt = rpc_create_client(xprt, "localhost", 120 &nsm_program, SM_VERSION, 121 RPC_AUTH_NULL); 122 if (IS_ERR(clnt)) 123 - goto out_destroy; 124 clnt->cl_softrtry = 1; 125 clnt->cl_chatty = 1; 126 clnt->cl_oneshot = 1; 127 - xprt->resvport = 1; /* NSM requires a reserved port */ 128 return clnt; 129 130 - out_destroy: 131 - xprt_destroy(xprt); 132 return clnt; 133 } 134
··· 115 xprt = xprt_create_proto(IPPROTO_UDP, &sin, NULL); 116 if (IS_ERR(xprt)) 117 return (struct rpc_clnt *)xprt; 118 + xprt->resvport = 1; /* NSM requires a reserved port */ 119 120 clnt = rpc_create_client(xprt, "localhost", 121 &nsm_program, SM_VERSION, 122 RPC_AUTH_NULL); 123 if (IS_ERR(clnt)) 124 + goto out_err; 125 clnt->cl_softrtry = 1; 126 clnt->cl_chatty = 1; 127 clnt->cl_oneshot = 1; 128 return clnt; 129 130 + out_err: 131 return clnt; 132 } 133
+6
fs/locks.c
··· 1548 1549 if (filp->f_op && filp->f_op->lock) { 1550 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1551 if (error < 0) 1552 goto out; 1553 else ··· 1692 1693 if (filp->f_op && filp->f_op->lock) { 1694 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1695 if (error < 0) 1696 goto out; 1697 else ··· 1877 .fl_end = OFFSET_MAX, 1878 }; 1879 filp->f_op->flock(filp, F_SETLKW, &fl); 1880 } 1881 1882 lock_kernel();
··· 1548 1549 if (filp->f_op && filp->f_op->lock) { 1550 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1551 + if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1552 + file_lock.fl_ops->fl_release_private(&file_lock); 1553 if (error < 0) 1554 goto out; 1555 else ··· 1690 1691 if (filp->f_op && filp->f_op->lock) { 1692 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1693 + if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1694 + file_lock.fl_ops->fl_release_private(&file_lock); 1695 if (error < 0) 1696 goto out; 1697 else ··· 1873 .fl_end = OFFSET_MAX, 1874 }; 1875 filp->f_op->flock(filp, F_SETLKW, &fl); 1876 + if (fl.fl_ops && fl.fl_ops->fl_release_private) 1877 + fl.fl_ops->fl_release_private(&fl); 1878 } 1879 1880 lock_kernel();
+1
fs/nfs/Makefile
··· 8 proc.o read.o symlink.o unlink.o write.o 9 nfs-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o 10 nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o 11 nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \ 12 delegation.o idmap.o \ 13 callback.o callback_xdr.o callback_proc.o
··· 8 proc.o read.o symlink.o unlink.o write.o 9 nfs-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o 10 nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o 11 + nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o 12 nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \ 13 delegation.o idmap.o \ 14 callback.o callback_xdr.o callback_proc.o
+1
fs/nfs/callback.c
··· 14 #include <linux/sunrpc/svc.h> 15 #include <linux/sunrpc/svcsock.h> 16 #include <linux/nfs_fs.h> 17 #include "callback.h" 18 19 #define NFSDBG_FACILITY NFSDBG_CALLBACK
··· 14 #include <linux/sunrpc/svc.h> 15 #include <linux/sunrpc/svcsock.h> 16 #include <linux/nfs_fs.h> 17 + #include "nfs4_fs.h" 18 #include "callback.h" 19 20 #define NFSDBG_FACILITY NFSDBG_CALLBACK
+1
fs/nfs/callback_proc.c
··· 8 #include <linux/config.h> 9 #include <linux/nfs4.h> 10 #include <linux/nfs_fs.h> 11 #include "callback.h" 12 #include "delegation.h" 13
··· 8 #include <linux/config.h> 9 #include <linux/nfs4.h> 10 #include <linux/nfs_fs.h> 11 + #include "nfs4_fs.h" 12 #include "callback.h" 13 #include "delegation.h" 14
+1 -1
fs/nfs/callback_xdr.c
··· 10 #include <linux/sunrpc/svc.h> 11 #include <linux/nfs4.h> 12 #include <linux/nfs_fs.h> 13 #include "callback.h" 14 15 #define CB_OP_TAGLEN_MAXSZ (512) ··· 411 xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); 412 413 p = (uint32_t*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); 414 - rqstp->rq_res.head[0].iov_len = PAGE_SIZE; 415 xdr_init_encode(&xdr_out, &rqstp->rq_res, p); 416 417 decode_compound_hdr_arg(&xdr_in, &hdr_arg);
··· 10 #include <linux/sunrpc/svc.h> 11 #include <linux/nfs4.h> 12 #include <linux/nfs_fs.h> 13 + #include "nfs4_fs.h" 14 #include "callback.h" 15 16 #define CB_OP_TAGLEN_MAXSZ (512) ··· 410 xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base); 411 412 p = (uint32_t*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); 413 xdr_init_encode(&xdr_out, &rqstp->rq_res, p); 414 415 decode_compound_hdr_arg(&xdr_in, &hdr_arg);
+1
fs/nfs/delegation.c
··· 16 #include <linux/nfs_fs.h> 17 #include <linux/nfs_xdr.h> 18 19 #include "delegation.h" 20 21 static struct nfs_delegation *nfs_alloc_delegation(void)
··· 16 #include <linux/nfs_fs.h> 17 #include <linux/nfs_xdr.h> 18 19 + #include "nfs4_fs.h" 20 #include "delegation.h" 21 22 static struct nfs_delegation *nfs_alloc_delegation(void)
+130 -30
fs/nfs/dir.c
··· 32 #include <linux/smp_lock.h> 33 #include <linux/namei.h> 34 35 #include "delegation.h" 36 37 #define NFS_PARANOIA 1 ··· 51 static int nfs_rename(struct inode *, struct dentry *, 52 struct inode *, struct dentry *); 53 static int nfs_fsync_dir(struct file *, struct dentry *, int); 54 55 struct file_operations nfs_dir_operations = { 56 .read = generic_read_dir, 57 .readdir = nfs_readdir, 58 .open = nfs_opendir, ··· 77 .setattr = nfs_setattr, 78 }; 79 80 #ifdef CONFIG_NFS_V4 81 82 static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *); ··· 114 .permission = nfs_permission, 115 .getattr = nfs_getattr, 116 .setattr = nfs_setattr, 117 }; 118 119 #endif /* CONFIG_NFS_V4 */ ··· 143 struct page *page; 144 unsigned long page_index; 145 u32 *ptr; 146 - u64 target; 147 struct nfs_entry *entry; 148 decode_dirent_t decode; 149 int plus; ··· 192 NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME; 193 /* Ensure consistent page alignment of the data. 194 * Note: assumes we have exclusive access to this mapping either 195 - * throught inode->i_sem or some other mechanism. 196 */ 197 - if (page->index == 0) { 198 - invalidate_inode_pages(inode->i_mapping); 199 - NFS_I(inode)->readdir_timestamp = timestamp; 200 - } 201 unlock_page(page); 202 return 0; 203 error: ··· 228 229 /* 230 * Given a pointer to a buffer that has already been filled by a call 231 - * to readdir, find the next entry. 232 * 233 * If the end of the buffer has been reached, return -EAGAIN, if not, 234 * return the offset within the buffer of the next entry to be 235 * read. 236 */ 237 static inline 238 - int find_dirent(nfs_readdir_descriptor_t *desc, struct page *page) 239 { 240 struct nfs_entry *entry = desc->entry; 241 int loop_count = 0, 242 status; 243 244 while((status = dir_decode(desc)) == 0) { 245 - dfprintk(VFS, "NFS: found cookie %Lu\n", (long long)entry->cookie); 246 - if (entry->prev_cookie == desc->target) 247 break; 248 if (loop_count++ > 200) { 249 loop_count = 0; ··· 255 } 256 257 /* 258 - * Find the given page, and call find_dirent() in order to try to 259 - * return the next entry. 260 */ 261 static inline 262 int find_dirent_page(nfs_readdir_descriptor_t *desc) ··· 315 /* NOTE: Someone else may have changed the READDIRPLUS flag */ 316 desc->page = page; 317 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ 318 - status = find_dirent(desc, page); 319 if (status < 0) 320 dir_page_release(desc); 321 out: ··· 333 * Recurse through the page cache pages, and return a 334 * filled nfs_entry structure of the next directory entry if possible. 335 * 336 - * The target for the search is 'desc->target'. 337 */ 338 static inline 339 int readdir_search_pagecache(nfs_readdir_descriptor_t *desc) ··· 342 int loop_count = 0; 343 int res; 344 345 - dfprintk(VFS, "NFS: readdir_search_pagecache() searching for cookie %Lu\n", (long long)desc->target); 346 for (;;) { 347 res = find_dirent_page(desc); 348 if (res != -EAGAIN) ··· 388 int loop_count = 0, 389 res; 390 391 - dfprintk(VFS, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n", (long long)desc->target); 392 393 for(;;) { 394 unsigned d_type = DT_UNKNOWN; ··· 408 } 409 410 res = filldir(dirent, entry->name, entry->len, 411 - entry->prev_cookie, fileid, d_type); 412 if (res < 0) 413 break; 414 - file->f_pos = desc->target = entry->cookie; 415 if (dir_decode(desc) != 0) { 416 desc->page_index ++; 417 break; ··· 425 dir_page_release(desc); 426 if (dentry != NULL) 427 dput(dentry); 428 - dfprintk(VFS, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", (long long)desc->target, res); 429 return res; 430 } 431 ··· 451 struct page *page = NULL; 452 int status; 453 454 - dfprintk(VFS, "NFS: uncached_readdir() searching for cookie %Lu\n", (long long)desc->target); 455 456 page = alloc_page(GFP_HIGHUSER); 457 if (!page) { 458 status = -ENOMEM; 459 goto out; 460 } 461 - desc->error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, desc->target, 462 page, 463 NFS_SERVER(inode)->dtsize, 464 desc->plus); ··· 467 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ 468 if (desc->error >= 0) { 469 if ((status = dir_decode(desc)) == 0) 470 - desc->entry->prev_cookie = desc->target; 471 } else 472 status = -EIO; 473 if (status < 0) ··· 488 goto out; 489 } 490 491 - /* The file offset position is now represented as a true offset into the 492 - * page cache as is the case in most of the other filesystems. 493 */ 494 static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) 495 { ··· 512 } 513 514 /* 515 - * filp->f_pos points to the file offset in the page cache. 516 - * but if the cache has meanwhile been zapped, we need to 517 - * read from the last dirent to revalidate f_pos 518 - * itself. 519 */ 520 memset(desc, 0, sizeof(*desc)); 521 522 desc->file = filp; 523 - desc->target = filp->f_pos; 524 desc->decode = NFS_PROTO(inode)->decode_dirent; 525 desc->plus = NFS_USE_READDIRPLUS(inode); 526 ··· 532 533 while(!desc->entry->eof) { 534 res = readdir_search_pagecache(desc); 535 if (res == -EBADCOOKIE) { 536 /* This means either end of directory */ 537 - if (desc->entry->cookie != desc->target) { 538 /* Or that the server has 'lost' a cookie */ 539 res = uncached_readdir(desc, dirent, filldir); 540 if (res >= 0) ··· 566 if (res < 0) 567 return res; 568 return 0; 569 } 570 571 /*
··· 32 #include <linux/smp_lock.h> 33 #include <linux/namei.h> 34 35 + #include "nfs4_fs.h" 36 #include "delegation.h" 37 38 #define NFS_PARANOIA 1 ··· 50 static int nfs_rename(struct inode *, struct dentry *, 51 struct inode *, struct dentry *); 52 static int nfs_fsync_dir(struct file *, struct dentry *, int); 53 + static loff_t nfs_llseek_dir(struct file *, loff_t, int); 54 55 struct file_operations nfs_dir_operations = { 56 + .llseek = nfs_llseek_dir, 57 .read = generic_read_dir, 58 .readdir = nfs_readdir, 59 .open = nfs_opendir, ··· 74 .setattr = nfs_setattr, 75 }; 76 77 + #ifdef CONFIG_NFS_V3 78 + struct inode_operations nfs3_dir_inode_operations = { 79 + .create = nfs_create, 80 + .lookup = nfs_lookup, 81 + .link = nfs_link, 82 + .unlink = nfs_unlink, 83 + .symlink = nfs_symlink, 84 + .mkdir = nfs_mkdir, 85 + .rmdir = nfs_rmdir, 86 + .mknod = nfs_mknod, 87 + .rename = nfs_rename, 88 + .permission = nfs_permission, 89 + .getattr = nfs_getattr, 90 + .setattr = nfs_setattr, 91 + .listxattr = nfs3_listxattr, 92 + .getxattr = nfs3_getxattr, 93 + .setxattr = nfs3_setxattr, 94 + .removexattr = nfs3_removexattr, 95 + }; 96 + #endif /* CONFIG_NFS_V3 */ 97 + 98 #ifdef CONFIG_NFS_V4 99 100 static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *); ··· 90 .permission = nfs_permission, 91 .getattr = nfs_getattr, 92 .setattr = nfs_setattr, 93 + .getxattr = nfs4_getxattr, 94 + .setxattr = nfs4_setxattr, 95 + .listxattr = nfs4_listxattr, 96 }; 97 98 #endif /* CONFIG_NFS_V4 */ ··· 116 struct page *page; 117 unsigned long page_index; 118 u32 *ptr; 119 + u64 *dir_cookie; 120 + loff_t current_index; 121 struct nfs_entry *entry; 122 decode_dirent_t decode; 123 int plus; ··· 164 NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME; 165 /* Ensure consistent page alignment of the data. 166 * Note: assumes we have exclusive access to this mapping either 167 + * through inode->i_sem or some other mechanism. 168 */ 169 + if (page->index == 0) 170 + invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1); 171 unlock_page(page); 172 return 0; 173 error: ··· 202 203 /* 204 * Given a pointer to a buffer that has already been filled by a call 205 + * to readdir, find the next entry with cookie '*desc->dir_cookie'. 206 * 207 * If the end of the buffer has been reached, return -EAGAIN, if not, 208 * return the offset within the buffer of the next entry to be 209 * read. 210 */ 211 static inline 212 + int find_dirent(nfs_readdir_descriptor_t *desc) 213 { 214 struct nfs_entry *entry = desc->entry; 215 int loop_count = 0, 216 status; 217 218 while((status = dir_decode(desc)) == 0) { 219 + dfprintk(VFS, "NFS: found cookie %Lu\n", (unsigned long long)entry->cookie); 220 + if (entry->prev_cookie == *desc->dir_cookie) 221 break; 222 if (loop_count++ > 200) { 223 loop_count = 0; ··· 229 } 230 231 /* 232 + * Given a pointer to a buffer that has already been filled by a call 233 + * to readdir, find the entry at offset 'desc->file->f_pos'. 234 + * 235 + * If the end of the buffer has been reached, return -EAGAIN, if not, 236 + * return the offset within the buffer of the next entry to be 237 + * read. 238 + */ 239 + static inline 240 + int find_dirent_index(nfs_readdir_descriptor_t *desc) 241 + { 242 + struct nfs_entry *entry = desc->entry; 243 + int loop_count = 0, 244 + status; 245 + 246 + for(;;) { 247 + status = dir_decode(desc); 248 + if (status) 249 + break; 250 + 251 + dfprintk(VFS, "NFS: found cookie %Lu at index %Ld\n", (unsigned long long)entry->cookie, desc->current_index); 252 + 253 + if (desc->file->f_pos == desc->current_index) { 254 + *desc->dir_cookie = entry->cookie; 255 + break; 256 + } 257 + desc->current_index++; 258 + if (loop_count++ > 200) { 259 + loop_count = 0; 260 + schedule(); 261 + } 262 + } 263 + dfprintk(VFS, "NFS: find_dirent_index() returns %d\n", status); 264 + return status; 265 + } 266 + 267 + /* 268 + * Find the given page, and call find_dirent() or find_dirent_index in 269 + * order to try to return the next entry. 270 */ 271 static inline 272 int find_dirent_page(nfs_readdir_descriptor_t *desc) ··· 253 /* NOTE: Someone else may have changed the READDIRPLUS flag */ 254 desc->page = page; 255 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ 256 + if (*desc->dir_cookie != 0) 257 + status = find_dirent(desc); 258 + else 259 + status = find_dirent_index(desc); 260 if (status < 0) 261 dir_page_release(desc); 262 out: ··· 268 * Recurse through the page cache pages, and return a 269 * filled nfs_entry structure of the next directory entry if possible. 270 * 271 + * The target for the search is '*desc->dir_cookie' if non-0, 272 + * 'desc->file->f_pos' otherwise 273 */ 274 static inline 275 int readdir_search_pagecache(nfs_readdir_descriptor_t *desc) ··· 276 int loop_count = 0; 277 int res; 278 279 + /* Always search-by-index from the beginning of the cache */ 280 + if (*desc->dir_cookie == 0) { 281 + dfprintk(VFS, "NFS: readdir_search_pagecache() searching for offset %Ld\n", (long long)desc->file->f_pos); 282 + desc->page_index = 0; 283 + desc->entry->cookie = desc->entry->prev_cookie = 0; 284 + desc->entry->eof = 0; 285 + desc->current_index = 0; 286 + } else 287 + dfprintk(VFS, "NFS: readdir_search_pagecache() searching for cookie %Lu\n", (unsigned long long)*desc->dir_cookie); 288 + 289 for (;;) { 290 res = find_dirent_page(desc); 291 if (res != -EAGAIN) ··· 313 int loop_count = 0, 314 res; 315 316 + dfprintk(VFS, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n", (long long)entry->cookie); 317 318 for(;;) { 319 unsigned d_type = DT_UNKNOWN; ··· 333 } 334 335 res = filldir(dirent, entry->name, entry->len, 336 + file->f_pos, fileid, d_type); 337 if (res < 0) 338 break; 339 + file->f_pos++; 340 + *desc->dir_cookie = entry->cookie; 341 if (dir_decode(desc) != 0) { 342 desc->page_index ++; 343 break; ··· 349 dir_page_release(desc); 350 if (dentry != NULL) 351 dput(dentry); 352 + dfprintk(VFS, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", (unsigned long long)*desc->dir_cookie, res); 353 return res; 354 } 355 ··· 375 struct page *page = NULL; 376 int status; 377 378 + dfprintk(VFS, "NFS: uncached_readdir() searching for cookie %Lu\n", (unsigned long long)*desc->dir_cookie); 379 380 page = alloc_page(GFP_HIGHUSER); 381 if (!page) { 382 status = -ENOMEM; 383 goto out; 384 } 385 + desc->error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, *desc->dir_cookie, 386 page, 387 NFS_SERVER(inode)->dtsize, 388 desc->plus); ··· 391 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ 392 if (desc->error >= 0) { 393 if ((status = dir_decode(desc)) == 0) 394 + desc->entry->prev_cookie = *desc->dir_cookie; 395 } else 396 status = -EIO; 397 if (status < 0) ··· 412 goto out; 413 } 414 415 + /* The file offset position represents the dirent entry number. A 416 + last cookie cache takes care of the common case of reading the 417 + whole directory. 418 */ 419 static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) 420 { ··· 435 } 436 437 /* 438 + * filp->f_pos points to the dirent entry number. 439 + * *desc->dir_cookie has the cookie for the next entry. We have 440 + * to either find the entry with the appropriate number or 441 + * revalidate the cookie. 442 */ 443 memset(desc, 0, sizeof(*desc)); 444 445 desc->file = filp; 446 + desc->dir_cookie = &((struct nfs_open_context *)filp->private_data)->dir_cookie; 447 desc->decode = NFS_PROTO(inode)->decode_dirent; 448 desc->plus = NFS_USE_READDIRPLUS(inode); 449 ··· 455 456 while(!desc->entry->eof) { 457 res = readdir_search_pagecache(desc); 458 + 459 if (res == -EBADCOOKIE) { 460 /* This means either end of directory */ 461 + if (*desc->dir_cookie && desc->entry->cookie != *desc->dir_cookie) { 462 /* Or that the server has 'lost' a cookie */ 463 res = uncached_readdir(desc, dirent, filldir); 464 if (res >= 0) ··· 488 if (res < 0) 489 return res; 490 return 0; 491 + } 492 + 493 + loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin) 494 + { 495 + down(&filp->f_dentry->d_inode->i_sem); 496 + switch (origin) { 497 + case 1: 498 + offset += filp->f_pos; 499 + case 0: 500 + if (offset >= 0) 501 + break; 502 + default: 503 + offset = -EINVAL; 504 + goto out; 505 + } 506 + if (offset != filp->f_pos) { 507 + filp->f_pos = offset; 508 + ((struct nfs_open_context *)filp->private_data)->dir_cookie = 0; 509 + } 510 + out: 511 + up(&filp->f_dentry->d_inode->i_sem); 512 + return offset; 513 } 514 515 /*
+1 -1
fs/nfs/direct.c
··· 517 result = tot_bytes; 518 519 out: 520 - nfs_end_data_update_defer(inode); 521 nfs_writedata_free(wdata); 522 return result; 523
··· 517 result = tot_bytes; 518 519 out: 520 + nfs_end_data_update(inode); 521 nfs_writedata_free(wdata); 522 return result; 523
+41 -7
fs/nfs/file.c
··· 71 .setattr = nfs_setattr, 72 }; 73 74 /* Hack for future NFS swap support */ 75 #ifndef IS_SWAPFILE 76 # define IS_SWAPFILE(inode) (0) ··· 128 } 129 130 /** 131 * nfs_revalidate_size - Revalidate the file size 132 * @inode - pointer to inode struct 133 * @file - pointer to struct file ··· 164 goto force_reval; 165 if (nfsi->npages != 0) 166 return 0; 167 - return nfs_revalidate_inode(server, inode); 168 force_reval: 169 return __nfs_revalidate_inode(server, inode); 170 } ··· 226 dentry->d_parent->d_name.name, dentry->d_name.name, 227 (unsigned long) count, (unsigned long) pos); 228 229 - result = nfs_revalidate_inode(NFS_SERVER(inode), inode); 230 if (!result) 231 result = generic_file_aio_read(iocb, buf, count, pos); 232 return result; ··· 244 dentry->d_parent->d_name.name, dentry->d_name.name, 245 (unsigned long) count, (unsigned long long) *ppos); 246 247 - res = nfs_revalidate_inode(NFS_SERVER(inode), inode); 248 if (!res) 249 res = generic_file_sendfile(filp, ppos, count, actor, target); 250 return res; ··· 260 dfprintk(VFS, "nfs: mmap(%s/%s)\n", 261 dentry->d_parent->d_name.name, dentry->d_name.name); 262 263 - status = nfs_revalidate_inode(NFS_SERVER(inode), inode); 264 if (!status) 265 status = generic_file_mmap(file, vma); 266 return status; ··· 349 result = -EBUSY; 350 if (IS_SWAPFILE(inode)) 351 goto out_swapfile; 352 - result = nfs_revalidate_inode(NFS_SERVER(inode), inode); 353 - if (result) 354 - goto out; 355 356 result = count; 357 if (!count)
··· 71 .setattr = nfs_setattr, 72 }; 73 74 + #ifdef CONFIG_NFS_V3 75 + struct inode_operations nfs3_file_inode_operations = { 76 + .permission = nfs_permission, 77 + .getattr = nfs_getattr, 78 + .setattr = nfs_setattr, 79 + .listxattr = nfs3_listxattr, 80 + .getxattr = nfs3_getxattr, 81 + .setxattr = nfs3_setxattr, 82 + .removexattr = nfs3_removexattr, 83 + }; 84 + #endif /* CONFIG_NFS_v3 */ 85 + 86 /* Hack for future NFS swap support */ 87 #ifndef IS_SWAPFILE 88 # define IS_SWAPFILE(inode) (0) ··· 116 } 117 118 /** 119 + * nfs_revalidate_file - Revalidate the page cache & related metadata 120 + * @inode - pointer to inode struct 121 + * @file - pointer to file 122 + */ 123 + static int nfs_revalidate_file(struct inode *inode, struct file *filp) 124 + { 125 + int retval = 0; 126 + 127 + if ((NFS_FLAGS(inode) & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode)) 128 + retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode); 129 + nfs_revalidate_mapping(inode, filp->f_mapping); 130 + return 0; 131 + } 132 + 133 + /** 134 * nfs_revalidate_size - Revalidate the file size 135 * @inode - pointer to inode struct 136 * @file - pointer to struct file ··· 137 goto force_reval; 138 if (nfsi->npages != 0) 139 return 0; 140 + if (!(NFS_FLAGS(inode) & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode)) 141 + return 0; 142 force_reval: 143 return __nfs_revalidate_inode(server, inode); 144 } ··· 198 dentry->d_parent->d_name.name, dentry->d_name.name, 199 (unsigned long) count, (unsigned long) pos); 200 201 + result = nfs_revalidate_file(inode, iocb->ki_filp); 202 if (!result) 203 result = generic_file_aio_read(iocb, buf, count, pos); 204 return result; ··· 216 dentry->d_parent->d_name.name, dentry->d_name.name, 217 (unsigned long) count, (unsigned long long) *ppos); 218 219 + res = nfs_revalidate_file(inode, filp); 220 if (!res) 221 res = generic_file_sendfile(filp, ppos, count, actor, target); 222 return res; ··· 232 dfprintk(VFS, "nfs: mmap(%s/%s)\n", 233 dentry->d_parent->d_name.name, dentry->d_name.name); 234 235 + status = nfs_revalidate_file(inode, file); 236 if (!status) 237 status = generic_file_mmap(file, vma); 238 return status; ··· 321 result = -EBUSY; 322 if (IS_SWAPFILE(inode)) 323 goto out_swapfile; 324 + /* 325 + * O_APPEND implies that we must revalidate the file length. 326 + */ 327 + if (iocb->ki_filp->f_flags & O_APPEND) { 328 + result = nfs_revalidate_file_size(inode, iocb->ki_filp); 329 + if (result) 330 + goto out; 331 + } 332 + nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); 333 334 result = count; 335 if (!count)
+1
fs/nfs/idmap.c
··· 50 #include <linux/nfs_fs.h> 51 52 #include <linux/nfs_idmap.h> 53 54 #define IDMAP_HASH_SZ 128 55
··· 50 #include <linux/nfs_fs.h> 51 52 #include <linux/nfs_idmap.h> 53 + #include "nfs4_fs.h" 54 55 #define IDMAP_HASH_SZ 128 56
+255 -172
fs/nfs/inode.c
··· 39 #include <asm/system.h> 40 #include <asm/uaccess.h> 41 42 #include "delegation.h" 43 44 #define NFSDBG_FACILITY NFSDBG_VFS ··· 64 static void nfs_umount_begin(struct super_block *); 65 static int nfs_statfs(struct super_block *, struct kstatfs *); 66 static int nfs_show_options(struct seq_file *, struct vfsmount *); 67 68 static struct rpc_program nfs_program; 69 ··· 108 .pipe_dir_name = "/nfs", 109 }; 110 111 static inline unsigned long 112 nfs_fattr_to_ino_t(struct nfs_fattr *fattr) 113 { ··· 135 int flags = sync ? FLUSH_WAIT : 0; 136 int ret; 137 138 - ret = nfs_commit_inode(inode, 0, 0, flags); 139 if (ret < 0) 140 return ret; 141 return 0; ··· 157 clear_inode(inode); 158 } 159 160 - /* 161 - * For the moment, the only task for the NFS clear_inode method is to 162 - * release the mmap credential 163 - */ 164 static void 165 nfs_clear_inode(struct inode *inode) 166 { ··· 165 166 nfs_wb_all(inode); 167 BUG_ON (!list_empty(&nfsi->open_files)); 168 cred = nfsi->cache_access.cred; 169 if (cred) 170 put_rpccred(cred); ··· 175 void 176 nfs_umount_begin(struct super_block *sb) 177 { 178 - struct nfs_server *server = NFS_SB(sb); 179 - struct rpc_clnt *rpc; 180 181 /* -EIO all pending I/O */ 182 - if ((rpc = server->client) != NULL) 183 rpc_killall_tasks(rpc); 184 } 185 ··· 382 xprt = xprt_create_proto(tcp ? IPPROTO_TCP : IPPROTO_UDP, 383 &server->addr, &timeparms); 384 if (IS_ERR(xprt)) { 385 - printk(KERN_WARNING "NFS: cannot create RPC transport.\n"); 386 return (struct rpc_clnt *)xprt; 387 } 388 clnt = rpc_create_client(xprt, server->hostname, &nfs_program, 389 server->rpc_ops->version, data->pseudoflavor); 390 if (IS_ERR(clnt)) { 391 - printk(KERN_WARNING "NFS: cannot create RPC client.\n"); 392 goto out_fail; 393 } 394 ··· 401 return clnt; 402 403 out_fail: 404 - xprt_destroy(xprt); 405 return clnt; 406 } 407 ··· 444 445 /* Check NFS protocol revision and initialize RPC op vector 446 * and file handle pool. */ 447 - if (server->flags & NFS_MOUNT_VER3) { 448 #ifdef CONFIG_NFS_V3 449 server->rpc_ops = &nfs_v3_clientops; 450 server->caps |= NFS_CAP_READDIRPLUS; 451 - if (data->version < 4) { 452 - printk(KERN_NOTICE "NFS: NFSv3 not supported by mount program.\n"); 453 - return -EIO; 454 - } 455 - #else 456 - printk(KERN_NOTICE "NFS: NFSv3 not supported.\n"); 457 - return -EIO; 458 - #endif 459 } else { 460 server->rpc_ops = &nfs_v2_clientops; 461 } 462 463 /* Fill in pseudoflavor for mount version < 5 */ 464 if (!(data->flags & NFS_MOUNT_SECFLAVOUR)) ··· 467 return PTR_ERR(server->client); 468 /* RFC 2623, sec 2.3.2 */ 469 if (authflavor != RPC_AUTH_UNIX) { 470 server->client_sys = rpc_clone_client(server->client); 471 if (IS_ERR(server->client_sys)) 472 return PTR_ERR(server->client_sys); 473 - if (!rpcauth_create(RPC_AUTH_UNIX, server->client_sys)) 474 - return -ENOMEM; 475 } else { 476 atomic_inc(&server->client->cl_count); 477 server->client_sys = server->client; 478 } 479 - 480 if (server->flags & NFS_MOUNT_VER3) { 481 if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) 482 server->namelen = NFS3_MAXNAMLEN; 483 sb->s_time_gran = 1; ··· 578 { NFS_MOUNT_NOCTO, ",nocto", "" }, 579 { NFS_MOUNT_NOAC, ",noac", "" }, 580 { NFS_MOUNT_NONLM, ",nolock", ",lock" }, 581 { 0, NULL, NULL } 582 }; 583 struct proc_nfs_info *nfs_infop; ··· 620 621 memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); 622 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) 623 - nfsi->flags |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS; 624 else 625 - nfsi->flags |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; 626 } 627 628 /* ··· 729 /* Why so? Because we want revalidate for devices/FIFOs, and 730 * that's precisely what we have in nfs_file_inode_operations. 731 */ 732 - inode->i_op = &nfs_file_inode_operations; 733 if (S_ISREG(inode->i_mode)) { 734 inode->i_fop = &nfs_file_operations; 735 inode->i_data.a_ops = &nfs_file_aops; ··· 832 } 833 } 834 if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 835 - NFS_FLAGS(inode) |= NFS_INO_INVALID_ACCESS; 836 nfs_end_data_update(inode); 837 unlock_kernel(); 838 return error; ··· 891 ctx->state = NULL; 892 ctx->lockowner = current->files; 893 ctx->error = 0; 894 - init_waitqueue_head(&ctx->waitq); 895 } 896 return ctx; 897 } ··· 1055 goto out; 1056 } 1057 flags = nfsi->flags; 1058 /* 1059 * We may need to keep the attributes marked as invalid if 1060 * we raced with nfs_end_attr_update(). ··· 1063 if (verifier == nfsi->cache_change_attribute) 1064 nfsi->flags &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME); 1065 /* Do the page cache invalidation */ 1066 - if (flags & NFS_INO_INVALID_DATA) { 1067 - if (S_ISREG(inode->i_mode)) { 1068 - if (filemap_fdatawrite(inode->i_mapping) == 0) 1069 - filemap_fdatawait(inode->i_mapping); 1070 - nfs_wb_all(inode); 1071 - } 1072 - nfsi->flags &= ~NFS_INO_INVALID_DATA; 1073 - invalidate_inode_pages2(inode->i_mapping); 1074 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); 1075 - dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n", 1076 - inode->i_sb->s_id, 1077 - (long long)NFS_FILEID(inode)); 1078 - /* This ensures we revalidate dentries */ 1079 - nfsi->cache_change_attribute++; 1080 - } 1081 dfprintk(PAGECACHE, "NFS: (%s/%Ld) revalidation complete\n", 1082 inode->i_sb->s_id, 1083 (long long)NFS_FILEID(inode)); ··· 1103 } 1104 1105 /** 1106 * nfs_begin_data_update 1107 * @inode - pointer to inode 1108 * Declare that a set of operations will update file data on the server ··· 1163 } 1164 1165 /** 1166 - * nfs_end_data_update_defer 1167 - * @inode - pointer to inode 1168 - * Declare end of the operations that will update file data 1169 - * This will defer marking the inode as needing revalidation 1170 - * unless there are no other pending updates. 1171 - */ 1172 - void nfs_end_data_update_defer(struct inode *inode) 1173 - { 1174 - struct nfs_inode *nfsi = NFS_I(inode); 1175 - 1176 - if (atomic_dec_and_test(&nfsi->data_updates)) { 1177 - /* Mark the attribute cache for revalidation */ 1178 - nfsi->flags |= NFS_INO_INVALID_ATTR; 1179 - /* Directories and symlinks: invalidate page cache too */ 1180 - if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 1181 - nfsi->flags |= NFS_INO_INVALID_DATA; 1182 - nfsi->cache_change_attribute ++; 1183 - } 1184 - } 1185 - 1186 - /** 1187 * nfs_refresh_inode - verify consistency of the inode attribute cache 1188 * @inode - pointer to inode 1189 * @fattr - updated attributes ··· 1188 if ((fattr->valid & NFS_ATTR_PRE_CHANGE) != 0 1189 && nfsi->change_attr == fattr->pre_change_attr) 1190 nfsi->change_attr = fattr->change_attr; 1191 - if (!data_unstable && nfsi->change_attr != fattr->change_attr) 1192 nfsi->flags |= NFS_INO_INVALID_ATTR; 1193 } 1194 1195 if ((fattr->valid & NFS_ATTR_FATTR) == 0) ··· 1215 } 1216 1217 /* Verify a few of the more important attributes */ 1218 - if (!data_unstable) { 1219 - if (!timespec_equal(&inode->i_mtime, &fattr->mtime) 1220 - || cur_size != new_isize) 1221 - nfsi->flags |= NFS_INO_INVALID_ATTR; 1222 - } else if (S_ISREG(inode->i_mode) && new_isize > cur_size) 1223 - nfsi->flags |= NFS_INO_INVALID_ATTR; 1224 1225 /* Have any file permissions changed? */ 1226 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO) 1227 || inode->i_uid != fattr->uid 1228 || inode->i_gid != fattr->gid) 1229 - nfsi->flags |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS; 1230 1231 /* Has the link count changed? */ 1232 if (inode->i_nlink != fattr->nlink) ··· 1258 static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsigned long verifier) 1259 { 1260 struct nfs_inode *nfsi = NFS_I(inode); 1261 - __u64 new_size; 1262 - loff_t new_isize; 1263 unsigned int invalid = 0; 1264 - loff_t cur_isize; 1265 int data_unstable; 1266 1267 dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n", ··· 1292 /* Are we racing with known updates of the metadata on the server? */ 1293 data_unstable = ! nfs_verify_change_attribute(inode, verifier); 1294 1295 - /* Check if the file size agrees */ 1296 - new_size = fattr->size; 1297 new_isize = nfs_size_to_loff_t(fattr->size); 1298 cur_isize = i_size_read(inode); 1299 - if (cur_isize != new_size) { 1300 - #ifdef NFS_DEBUG_VERBOSE 1301 - printk(KERN_DEBUG "NFS: isize change on %s/%ld\n", inode->i_sb->s_id, inode->i_ino); 1302 - #endif 1303 - /* 1304 - * If we have pending writebacks, things can get 1305 - * messy. 1306 - */ 1307 - if (S_ISREG(inode->i_mode) && data_unstable) { 1308 - if (new_isize > cur_isize) { 1309 inode->i_size = new_isize; 1310 - invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1311 } 1312 - } else { 1313 inode->i_size = new_isize; 1314 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1315 } 1316 } 1317 1318 - /* 1319 - * Note: we don't check inode->i_mtime since pipes etc. 1320 - * can change this value in VFS without requiring a 1321 - * cache revalidation. 1322 - */ 1323 if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) { 1324 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); 1325 - #ifdef NFS_DEBUG_VERBOSE 1326 - printk(KERN_DEBUG "NFS: mtime change on %s/%ld\n", inode->i_sb->s_id, inode->i_ino); 1327 - #endif 1328 if (!data_unstable) 1329 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1330 } 1331 1332 if ((fattr->valid & NFS_ATTR_FATTR_V4) 1333 && nfsi->change_attr != fattr->change_attr) { 1334 - #ifdef NFS_DEBUG_VERBOSE 1335 - printk(KERN_DEBUG "NFS: change_attr change on %s/%ld\n", 1336 inode->i_sb->s_id, inode->i_ino); 1337 - #endif 1338 nfsi->change_attr = fattr->change_attr; 1339 if (!data_unstable) 1340 - invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS; 1341 } 1342 1343 - memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); 1344 memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime)); 1345 1346 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO) || 1347 inode->i_uid != fattr->uid || 1348 inode->i_gid != fattr->gid) 1349 - invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; 1350 1351 inode->i_mode = fattr->mode; 1352 inode->i_nlink = fattr->nlink; ··· 1421 int flags, const char *dev_name, void *raw_data) 1422 { 1423 int error; 1424 - struct nfs_server *server; 1425 struct super_block *s; 1426 struct nfs_fh *root; 1427 struct nfs_mount_data *data = raw_data; 1428 1429 - if (!data) { 1430 - printk("nfs_read_super: missing data argument\n"); 1431 - return ERR_PTR(-EINVAL); 1432 } 1433 1434 server = kmalloc(sizeof(struct nfs_server), GFP_KERNEL); 1435 if (!server) 1436 - return ERR_PTR(-ENOMEM); 1437 memset(server, 0, sizeof(struct nfs_server)); 1438 /* Zero out the NFS state stuff */ 1439 init_nfsv4_state(server); 1440 - 1441 - if (data->version != NFS_MOUNT_VERSION) { 1442 - printk("nfs warning: mount version %s than kernel\n", 1443 - data->version < NFS_MOUNT_VERSION ? "older" : "newer"); 1444 - if (data->version < 2) 1445 - data->namlen = 0; 1446 - if (data->version < 3) 1447 - data->bsize = 0; 1448 - if (data->version < 4) { 1449 - data->flags &= ~NFS_MOUNT_VER3; 1450 - data->root.size = NFS2_FHSIZE; 1451 - memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); 1452 - } 1453 - if (data->version < 5) 1454 - data->flags &= ~NFS_MOUNT_SECFLAVOUR; 1455 - } 1456 1457 root = &server->fh; 1458 if (data->flags & NFS_MOUNT_VER3) 1459 root->size = data->root.size; 1460 else 1461 root->size = NFS2_FHSIZE; 1462 if (root->size > sizeof(root->data)) { 1463 - printk("nfs_get_sb: invalid root filehandle\n"); 1464 - kfree(server); 1465 - return ERR_PTR(-EINVAL); 1466 } 1467 memcpy(root->data, data->root.data, root->size); 1468 1469 /* We now require that the mount process passes the remote address */ 1470 memcpy(&server->addr, &data->addr, sizeof(server->addr)); 1471 if (server->addr.sin_addr.s_addr == INADDR_ANY) { 1472 - printk("NFS: mount program didn't pass remote address!\n"); 1473 - kfree(server); 1474 - return ERR_PTR(-EINVAL); 1475 } 1476 1477 s = sget(fs_type, nfs_compare_super, nfs_set_super, server); 1478 - 1479 - if (IS_ERR(s) || s->s_root) { 1480 - kfree(server); 1481 - return s; 1482 - } 1483 1484 s->s_flags = flags; 1485 - 1486 - /* Fire up rpciod if not yet running */ 1487 - if (rpciod_up() != 0) { 1488 - printk(KERN_WARNING "NFS: couldn't start rpciod!\n"); 1489 - kfree(server); 1490 - return ERR_PTR(-EIO); 1491 - } 1492 1493 error = nfs_fill_super(s, data, flags & MS_VERBOSE ? 1 : 0); 1494 if (error) { ··· 1519 } 1520 s->s_flags |= MS_ACTIVE; 1521 return s; 1522 } 1523 1524 static void nfs_kill_super(struct super_block *s) ··· 1532 1533 kill_anon_super(s); 1534 1535 - if (server->client != NULL && !IS_ERR(server->client)) 1536 rpc_shutdown_client(server->client); 1537 - if (server->client_sys != NULL && !IS_ERR(server->client_sys)) 1538 rpc_shutdown_client(server->client_sys); 1539 1540 if (!(server->flags & NFS_MOUNT_NONLM)) 1541 lockd_down(); /* release rpc.lockd */ ··· 1658 1659 clp = nfs4_get_client(&server->addr.sin_addr); 1660 if (!clp) { 1661 - printk(KERN_WARNING "NFS: failed to create NFS4 client.\n"); 1662 return -EIO; 1663 } 1664 1665 /* Now create transport and client */ 1666 authflavour = RPC_AUTH_UNIX; 1667 if (data->auth_flavourlen != 0) { 1668 - if (data->auth_flavourlen > 1) 1669 - printk(KERN_INFO "NFS: cannot yet deal with multiple auth flavours.\n"); 1670 if (copy_from_user(&authflavour, data->auth_flavours, sizeof(authflavour))) { 1671 err = -EFAULT; 1672 goto out_fail; ··· 1678 } 1679 1680 down_write(&clp->cl_sem); 1681 - if (clp->cl_rpcclient == NULL) { 1682 xprt = xprt_create_proto(proto, &server->addr, &timeparms); 1683 if (IS_ERR(xprt)) { 1684 up_write(&clp->cl_sem); 1685 - printk(KERN_WARNING "NFS: cannot create RPC transport.\n"); 1686 err = PTR_ERR(xprt); 1687 goto out_fail; 1688 } 1689 clnt = rpc_create_client(xprt, server->hostname, &nfs_program, 1690 server->rpc_ops->version, authflavour); 1691 if (IS_ERR(clnt)) { 1692 up_write(&clp->cl_sem); 1693 - printk(KERN_WARNING "NFS: cannot create RPC client.\n"); 1694 - xprt_destroy(xprt); 1695 err = PTR_ERR(clnt); 1696 goto out_fail; 1697 } 1698 clnt->cl_intr = 1; ··· 1725 clp = NULL; 1726 1727 if (IS_ERR(clnt)) { 1728 - printk(KERN_WARNING "NFS: cannot create RPC client.\n"); 1729 - return PTR_ERR(clnt); 1730 } 1731 1732 server->client = clnt; 1733 1734 if (server->nfs4_state->cl_idmap == NULL) { 1735 - printk(KERN_WARNING "NFS: failed to create idmapper.\n"); 1736 return -ENOMEM; 1737 } 1738 1739 if (clnt->cl_auth->au_flavor != authflavour) { 1740 - if (rpcauth_create(authflavour, clnt) == NULL) { 1741 - printk(KERN_WARNING "NFS: couldn't create credcache!\n"); 1742 - return -ENOMEM; 1743 } 1744 } 1745 ··· 1804 struct nfs4_mount_data *data = raw_data; 1805 void *p; 1806 1807 - if (!data) { 1808 - printk("nfs_read_super: missing data argument\n"); 1809 return ERR_PTR(-EINVAL); 1810 } 1811 ··· 1819 memset(server, 0, sizeof(struct nfs_server)); 1820 /* Zero out the NFS state stuff */ 1821 init_nfsv4_state(server); 1822 - 1823 - if (data->version != NFS4_MOUNT_VERSION) { 1824 - printk("nfs warning: mount version %s than kernel\n", 1825 - data->version < NFS4_MOUNT_VERSION ? "older" : "newer"); 1826 - } 1827 1828 p = nfs_copy_user_string(NULL, &data->hostname, 256); 1829 if (IS_ERR(p)) ··· 1847 } 1848 if (server->addr.sin_family != AF_INET || 1849 server->addr.sin_addr.s_addr == INADDR_ANY) { 1850 - printk("NFS: mount program didn't pass remote IP address!\n"); 1851 s = ERR_PTR(-EINVAL); 1852 goto out_free; 1853 } 1854 ··· 1867 goto out_free; 1868 1869 s->s_flags = flags; 1870 - 1871 - /* Fire up rpciod if not yet running */ 1872 - if (rpciod_up() != 0) { 1873 - printk(KERN_WARNING "NFS: couldn't start rpciod!\n"); 1874 - s = ERR_PTR(-EIO); 1875 - goto out_free; 1876 - } 1877 1878 error = nfs4_fill_super(s, data, flags & MS_VERBOSE ? 1 : 0); 1879 if (error) { ··· 1951 if (!nfsi) 1952 return NULL; 1953 nfsi->flags = 0; 1954 return &nfsi->vfs_inode; 1955 } 1956
··· 39 #include <asm/system.h> 40 #include <asm/uaccess.h> 41 42 + #include "nfs4_fs.h" 43 #include "delegation.h" 44 45 #define NFSDBG_FACILITY NFSDBG_VFS ··· 63 static void nfs_umount_begin(struct super_block *); 64 static int nfs_statfs(struct super_block *, struct kstatfs *); 65 static int nfs_show_options(struct seq_file *, struct vfsmount *); 66 + static void nfs_zap_acl_cache(struct inode *); 67 68 static struct rpc_program nfs_program; 69 ··· 106 .pipe_dir_name = "/nfs", 107 }; 108 109 + #ifdef CONFIG_NFS_V3_ACL 110 + static struct rpc_stat nfsacl_rpcstat = { &nfsacl_program }; 111 + static struct rpc_version * nfsacl_version[] = { 112 + [3] = &nfsacl_version3, 113 + }; 114 + 115 + struct rpc_program nfsacl_program = { 116 + .name = "nfsacl", 117 + .number = NFS_ACL_PROGRAM, 118 + .nrvers = sizeof(nfsacl_version) / sizeof(nfsacl_version[0]), 119 + .version = nfsacl_version, 120 + .stats = &nfsacl_rpcstat, 121 + }; 122 + #endif /* CONFIG_NFS_V3_ACL */ 123 + 124 static inline unsigned long 125 nfs_fattr_to_ino_t(struct nfs_fattr *fattr) 126 { ··· 118 int flags = sync ? FLUSH_WAIT : 0; 119 int ret; 120 121 + ret = nfs_commit_inode(inode, flags); 122 if (ret < 0) 123 return ret; 124 return 0; ··· 140 clear_inode(inode); 141 } 142 143 static void 144 nfs_clear_inode(struct inode *inode) 145 { ··· 152 153 nfs_wb_all(inode); 154 BUG_ON (!list_empty(&nfsi->open_files)); 155 + nfs_zap_acl_cache(inode); 156 cred = nfsi->cache_access.cred; 157 if (cred) 158 put_rpccred(cred); ··· 161 void 162 nfs_umount_begin(struct super_block *sb) 163 { 164 + struct rpc_clnt *rpc = NFS_SB(sb)->client; 165 166 /* -EIO all pending I/O */ 167 + if (!IS_ERR(rpc)) 168 + rpc_killall_tasks(rpc); 169 + rpc = NFS_SB(sb)->client_acl; 170 + if (!IS_ERR(rpc)) 171 rpc_killall_tasks(rpc); 172 } 173 ··· 366 xprt = xprt_create_proto(tcp ? IPPROTO_TCP : IPPROTO_UDP, 367 &server->addr, &timeparms); 368 if (IS_ERR(xprt)) { 369 + dprintk("%s: cannot create RPC transport. Error = %ld\n", 370 + __FUNCTION__, PTR_ERR(xprt)); 371 return (struct rpc_clnt *)xprt; 372 } 373 clnt = rpc_create_client(xprt, server->hostname, &nfs_program, 374 server->rpc_ops->version, data->pseudoflavor); 375 if (IS_ERR(clnt)) { 376 + dprintk("%s: cannot create RPC client. Error = %ld\n", 377 + __FUNCTION__, PTR_ERR(xprt)); 378 goto out_fail; 379 } 380 ··· 383 return clnt; 384 385 out_fail: 386 return clnt; 387 } 388 ··· 427 428 /* Check NFS protocol revision and initialize RPC op vector 429 * and file handle pool. */ 430 #ifdef CONFIG_NFS_V3 431 + if (server->flags & NFS_MOUNT_VER3) { 432 server->rpc_ops = &nfs_v3_clientops; 433 server->caps |= NFS_CAP_READDIRPLUS; 434 } else { 435 server->rpc_ops = &nfs_v2_clientops; 436 } 437 + #else 438 + server->rpc_ops = &nfs_v2_clientops; 439 + #endif 440 441 /* Fill in pseudoflavor for mount version < 5 */ 442 if (!(data->flags & NFS_MOUNT_SECFLAVOUR)) ··· 455 return PTR_ERR(server->client); 456 /* RFC 2623, sec 2.3.2 */ 457 if (authflavor != RPC_AUTH_UNIX) { 458 + struct rpc_auth *auth; 459 + 460 server->client_sys = rpc_clone_client(server->client); 461 if (IS_ERR(server->client_sys)) 462 return PTR_ERR(server->client_sys); 463 + auth = rpcauth_create(RPC_AUTH_UNIX, server->client_sys); 464 + if (IS_ERR(auth)) 465 + return PTR_ERR(auth); 466 } else { 467 atomic_inc(&server->client->cl_count); 468 server->client_sys = server->client; 469 } 470 if (server->flags & NFS_MOUNT_VER3) { 471 + #ifdef CONFIG_NFS_V3_ACL 472 + if (!(server->flags & NFS_MOUNT_NOACL)) { 473 + server->client_acl = rpc_bind_new_program(server->client, &nfsacl_program, 3); 474 + /* No errors! Assume that Sun nfsacls are supported */ 475 + if (!IS_ERR(server->client_acl)) 476 + server->caps |= NFS_CAP_ACLS; 477 + } 478 + #else 479 + server->flags &= ~NFS_MOUNT_NOACL; 480 + #endif /* CONFIG_NFS_V3_ACL */ 481 + /* 482 + * The VFS shouldn't apply the umask to mode bits. We will 483 + * do so ourselves when necessary. 484 + */ 485 + sb->s_flags |= MS_POSIXACL; 486 if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) 487 server->namelen = NFS3_MAXNAMLEN; 488 sb->s_time_gran = 1; ··· 549 { NFS_MOUNT_NOCTO, ",nocto", "" }, 550 { NFS_MOUNT_NOAC, ",noac", "" }, 551 { NFS_MOUNT_NONLM, ",nolock", ",lock" }, 552 + { NFS_MOUNT_NOACL, ",noacl", "" }, 553 { 0, NULL, NULL } 554 }; 555 struct proc_nfs_info *nfs_infop; ··· 590 591 memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); 592 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) 593 + nfsi->flags |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; 594 else 595 + nfsi->flags |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; 596 + } 597 + 598 + static void nfs_zap_acl_cache(struct inode *inode) 599 + { 600 + void (*clear_acl_cache)(struct inode *); 601 + 602 + clear_acl_cache = NFS_PROTO(inode)->clear_acl_cache; 603 + if (clear_acl_cache != NULL) 604 + clear_acl_cache(inode); 605 + NFS_I(inode)->flags &= ~NFS_INO_INVALID_ACL; 606 } 607 608 /* ··· 689 /* Why so? Because we want revalidate for devices/FIFOs, and 690 * that's precisely what we have in nfs_file_inode_operations. 691 */ 692 + inode->i_op = NFS_SB(sb)->rpc_ops->file_inode_ops; 693 if (S_ISREG(inode->i_mode)) { 694 inode->i_fop = &nfs_file_operations; 695 inode->i_data.a_ops = &nfs_file_aops; ··· 792 } 793 } 794 if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 795 + NFS_FLAGS(inode) |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 796 nfs_end_data_update(inode); 797 unlock_kernel(); 798 return error; ··· 851 ctx->state = NULL; 852 ctx->lockowner = current->files; 853 ctx->error = 0; 854 + ctx->dir_cookie = 0; 855 } 856 return ctx; 857 } ··· 1015 goto out; 1016 } 1017 flags = nfsi->flags; 1018 + nfsi->flags &= ~NFS_INO_REVAL_PAGECACHE; 1019 /* 1020 * We may need to keep the attributes marked as invalid if 1021 * we raced with nfs_end_attr_update(). ··· 1022 if (verifier == nfsi->cache_change_attribute) 1023 nfsi->flags &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME); 1024 /* Do the page cache invalidation */ 1025 + nfs_revalidate_mapping(inode, inode->i_mapping); 1026 + if (flags & NFS_INO_INVALID_ACL) 1027 + nfs_zap_acl_cache(inode); 1028 dfprintk(PAGECACHE, "NFS: (%s/%Ld) revalidation complete\n", 1029 inode->i_sb->s_id, 1030 (long long)NFS_FILEID(inode)); ··· 1074 } 1075 1076 /** 1077 + * nfs_revalidate_mapping - Revalidate the pagecache 1078 + * @inode - pointer to host inode 1079 + * @mapping - pointer to mapping 1080 + */ 1081 + void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) 1082 + { 1083 + struct nfs_inode *nfsi = NFS_I(inode); 1084 + 1085 + if (nfsi->flags & NFS_INO_INVALID_DATA) { 1086 + if (S_ISREG(inode->i_mode)) { 1087 + if (filemap_fdatawrite(mapping) == 0) 1088 + filemap_fdatawait(mapping); 1089 + nfs_wb_all(inode); 1090 + } 1091 + invalidate_inode_pages2(mapping); 1092 + nfsi->flags &= ~NFS_INO_INVALID_DATA; 1093 + if (S_ISDIR(inode->i_mode)) { 1094 + memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf)); 1095 + /* This ensures we revalidate child dentries */ 1096 + nfsi->cache_change_attribute++; 1097 + } 1098 + dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n", 1099 + inode->i_sb->s_id, 1100 + (long long)NFS_FILEID(inode)); 1101 + } 1102 + } 1103 + 1104 + /** 1105 * nfs_begin_data_update 1106 * @inode - pointer to inode 1107 * Declare that a set of operations will update file data on the server ··· 1106 } 1107 1108 /** 1109 * nfs_refresh_inode - verify consistency of the inode attribute cache 1110 * @inode - pointer to inode 1111 * @fattr - updated attributes ··· 1152 if ((fattr->valid & NFS_ATTR_PRE_CHANGE) != 0 1153 && nfsi->change_attr == fattr->pre_change_attr) 1154 nfsi->change_attr = fattr->change_attr; 1155 + if (nfsi->change_attr != fattr->change_attr) { 1156 nfsi->flags |= NFS_INO_INVALID_ATTR; 1157 + if (!data_unstable) 1158 + nfsi->flags |= NFS_INO_REVAL_PAGECACHE; 1159 + } 1160 } 1161 1162 if ((fattr->valid & NFS_ATTR_FATTR) == 0) ··· 1176 } 1177 1178 /* Verify a few of the more important attributes */ 1179 + if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) { 1180 + nfsi->flags |= NFS_INO_INVALID_ATTR; 1181 + if (!data_unstable) 1182 + nfsi->flags |= NFS_INO_REVAL_PAGECACHE; 1183 + } 1184 + if (cur_size != new_isize) { 1185 + nfsi->flags |= NFS_INO_INVALID_ATTR; 1186 + if (nfsi->npages == 0) 1187 + nfsi->flags |= NFS_INO_REVAL_PAGECACHE; 1188 + } 1189 1190 /* Have any file permissions changed? */ 1191 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO) 1192 || inode->i_uid != fattr->uid 1193 || inode->i_gid != fattr->gid) 1194 + nfsi->flags |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL; 1195 1196 /* Has the link count changed? */ 1197 if (inode->i_nlink != fattr->nlink) ··· 1215 static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsigned long verifier) 1216 { 1217 struct nfs_inode *nfsi = NFS_I(inode); 1218 + loff_t cur_isize, new_isize; 1219 unsigned int invalid = 0; 1220 int data_unstable; 1221 1222 dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n", ··· 1251 /* Are we racing with known updates of the metadata on the server? */ 1252 data_unstable = ! nfs_verify_change_attribute(inode, verifier); 1253 1254 + /* Check if our cached file size is stale */ 1255 new_isize = nfs_size_to_loff_t(fattr->size); 1256 cur_isize = i_size_read(inode); 1257 + if (new_isize != cur_isize) { 1258 + /* Do we perhaps have any outstanding writes? */ 1259 + if (nfsi->npages == 0) { 1260 + /* No, but did we race with nfs_end_data_update()? */ 1261 + if (verifier == nfsi->cache_change_attribute) { 1262 inode->i_size = new_isize; 1263 + invalid |= NFS_INO_INVALID_DATA; 1264 } 1265 + invalid |= NFS_INO_INVALID_ATTR; 1266 + } else if (new_isize > cur_isize) { 1267 inode->i_size = new_isize; 1268 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1269 } 1270 + dprintk("NFS: isize change on server for file %s/%ld\n", 1271 + inode->i_sb->s_id, inode->i_ino); 1272 } 1273 1274 + /* Check if the mtime agrees */ 1275 if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) { 1276 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); 1277 + dprintk("NFS: mtime change on server for file %s/%ld\n", 1278 + inode->i_sb->s_id, inode->i_ino); 1279 if (!data_unstable) 1280 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1281 } 1282 1283 if ((fattr->valid & NFS_ATTR_FATTR_V4) 1284 && nfsi->change_attr != fattr->change_attr) { 1285 + dprintk("NFS: change_attr change on server for file %s/%ld\n", 1286 inode->i_sb->s_id, inode->i_ino); 1287 nfsi->change_attr = fattr->change_attr; 1288 if (!data_unstable) 1289 + invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 1290 } 1291 1292 + /* If ctime has changed we should definitely clear access+acl caches */ 1293 + if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) { 1294 + if (!data_unstable) 1295 + invalid |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 1296 + memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); 1297 + } 1298 memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime)); 1299 1300 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO) || 1301 inode->i_uid != fattr->uid || 1302 inode->i_gid != fattr->gid) 1303 + invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 1304 1305 inode->i_mode = fattr->mode; 1306 inode->i_nlink = fattr->nlink; ··· 1385 int flags, const char *dev_name, void *raw_data) 1386 { 1387 int error; 1388 + struct nfs_server *server = NULL; 1389 struct super_block *s; 1390 struct nfs_fh *root; 1391 struct nfs_mount_data *data = raw_data; 1392 1393 + s = ERR_PTR(-EINVAL); 1394 + if (data == NULL) { 1395 + dprintk("%s: missing data argument\n", __FUNCTION__); 1396 + goto out_err; 1397 } 1398 + if (data->version <= 0 || data->version > NFS_MOUNT_VERSION) { 1399 + dprintk("%s: bad mount version\n", __FUNCTION__); 1400 + goto out_err; 1401 + } 1402 + switch (data->version) { 1403 + case 1: 1404 + data->namlen = 0; 1405 + case 2: 1406 + data->bsize = 0; 1407 + case 3: 1408 + if (data->flags & NFS_MOUNT_VER3) { 1409 + dprintk("%s: mount structure version %d does not support NFSv3\n", 1410 + __FUNCTION__, 1411 + data->version); 1412 + goto out_err; 1413 + } 1414 + data->root.size = NFS2_FHSIZE; 1415 + memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); 1416 + case 4: 1417 + if (data->flags & NFS_MOUNT_SECFLAVOUR) { 1418 + dprintk("%s: mount structure version %d does not support strong security\n", 1419 + __FUNCTION__, 1420 + data->version); 1421 + goto out_err; 1422 + } 1423 + case 5: 1424 + memset(data->context, 0, sizeof(data->context)); 1425 + } 1426 + #ifndef CONFIG_NFS_V3 1427 + /* If NFSv3 is not compiled in, return -EPROTONOSUPPORT */ 1428 + s = ERR_PTR(-EPROTONOSUPPORT); 1429 + if (data->flags & NFS_MOUNT_VER3) { 1430 + dprintk("%s: NFSv3 not compiled into kernel\n", __FUNCTION__); 1431 + goto out_err; 1432 + } 1433 + #endif /* CONFIG_NFS_V3 */ 1434 1435 + s = ERR_PTR(-ENOMEM); 1436 server = kmalloc(sizeof(struct nfs_server), GFP_KERNEL); 1437 if (!server) 1438 + goto out_err; 1439 memset(server, 0, sizeof(struct nfs_server)); 1440 /* Zero out the NFS state stuff */ 1441 init_nfsv4_state(server); 1442 + server->client = server->client_sys = server->client_acl = ERR_PTR(-EINVAL); 1443 1444 root = &server->fh; 1445 if (data->flags & NFS_MOUNT_VER3) 1446 root->size = data->root.size; 1447 else 1448 root->size = NFS2_FHSIZE; 1449 + s = ERR_PTR(-EINVAL); 1450 if (root->size > sizeof(root->data)) { 1451 + dprintk("%s: invalid root filehandle\n", __FUNCTION__); 1452 + goto out_err; 1453 } 1454 memcpy(root->data, data->root.data, root->size); 1455 1456 /* We now require that the mount process passes the remote address */ 1457 memcpy(&server->addr, &data->addr, sizeof(server->addr)); 1458 if (server->addr.sin_addr.s_addr == INADDR_ANY) { 1459 + dprintk("%s: mount program didn't pass remote address!\n", 1460 + __FUNCTION__); 1461 + goto out_err; 1462 + } 1463 + 1464 + /* Fire up rpciod if not yet running */ 1465 + s = ERR_PTR(rpciod_up()); 1466 + if (IS_ERR(s)) { 1467 + dprintk("%s: couldn't start rpciod! Error = %ld\n", 1468 + __FUNCTION__, PTR_ERR(s)); 1469 + goto out_err; 1470 } 1471 1472 s = sget(fs_type, nfs_compare_super, nfs_set_super, server); 1473 + if (IS_ERR(s) || s->s_root) 1474 + goto out_rpciod_down; 1475 1476 s->s_flags = flags; 1477 1478 error = nfs_fill_super(s, data, flags & MS_VERBOSE ? 1 : 0); 1479 if (error) { ··· 1462 } 1463 s->s_flags |= MS_ACTIVE; 1464 return s; 1465 + out_rpciod_down: 1466 + rpciod_down(); 1467 + out_err: 1468 + kfree(server); 1469 + return s; 1470 } 1471 1472 static void nfs_kill_super(struct super_block *s) ··· 1470 1471 kill_anon_super(s); 1472 1473 + if (!IS_ERR(server->client)) 1474 rpc_shutdown_client(server->client); 1475 + if (!IS_ERR(server->client_sys)) 1476 rpc_shutdown_client(server->client_sys); 1477 + if (!IS_ERR(server->client_acl)) 1478 + rpc_shutdown_client(server->client_acl); 1479 1480 if (!(server->flags & NFS_MOUNT_NONLM)) 1481 lockd_down(); /* release rpc.lockd */ ··· 1594 1595 clp = nfs4_get_client(&server->addr.sin_addr); 1596 if (!clp) { 1597 + dprintk("%s: failed to create NFS4 client.\n", __FUNCTION__); 1598 return -EIO; 1599 } 1600 1601 /* Now create transport and client */ 1602 authflavour = RPC_AUTH_UNIX; 1603 if (data->auth_flavourlen != 0) { 1604 + if (data->auth_flavourlen != 1) { 1605 + dprintk("%s: Invalid number of RPC auth flavours %d.\n", 1606 + __FUNCTION__, data->auth_flavourlen); 1607 + err = -EINVAL; 1608 + goto out_fail; 1609 + } 1610 if (copy_from_user(&authflavour, data->auth_flavours, sizeof(authflavour))) { 1611 err = -EFAULT; 1612 goto out_fail; ··· 1610 } 1611 1612 down_write(&clp->cl_sem); 1613 + if (IS_ERR(clp->cl_rpcclient)) { 1614 xprt = xprt_create_proto(proto, &server->addr, &timeparms); 1615 if (IS_ERR(xprt)) { 1616 up_write(&clp->cl_sem); 1617 err = PTR_ERR(xprt); 1618 + dprintk("%s: cannot create RPC transport. Error = %d\n", 1619 + __FUNCTION__, err); 1620 goto out_fail; 1621 } 1622 clnt = rpc_create_client(xprt, server->hostname, &nfs_program, 1623 server->rpc_ops->version, authflavour); 1624 if (IS_ERR(clnt)) { 1625 up_write(&clp->cl_sem); 1626 err = PTR_ERR(clnt); 1627 + dprintk("%s: cannot create RPC client. Error = %d\n", 1628 + __FUNCTION__, err); 1629 goto out_fail; 1630 } 1631 clnt->cl_intr = 1; ··· 1656 clp = NULL; 1657 1658 if (IS_ERR(clnt)) { 1659 + err = PTR_ERR(clnt); 1660 + dprintk("%s: cannot create RPC client. Error = %d\n", 1661 + __FUNCTION__, err); 1662 + return err; 1663 } 1664 1665 server->client = clnt; 1666 1667 if (server->nfs4_state->cl_idmap == NULL) { 1668 + dprintk("%s: failed to create idmapper.\n", __FUNCTION__); 1669 return -ENOMEM; 1670 } 1671 1672 if (clnt->cl_auth->au_flavor != authflavour) { 1673 + struct rpc_auth *auth; 1674 + 1675 + auth = rpcauth_create(authflavour, clnt); 1676 + if (IS_ERR(auth)) { 1677 + dprintk("%s: couldn't create credcache!\n", __FUNCTION__); 1678 + return PTR_ERR(auth); 1679 } 1680 } 1681 ··· 1730 struct nfs4_mount_data *data = raw_data; 1731 void *p; 1732 1733 + if (data == NULL) { 1734 + dprintk("%s: missing data argument\n", __FUNCTION__); 1735 + return ERR_PTR(-EINVAL); 1736 + } 1737 + if (data->version <= 0 || data->version > NFS4_MOUNT_VERSION) { 1738 + dprintk("%s: bad mount version\n", __FUNCTION__); 1739 return ERR_PTR(-EINVAL); 1740 } 1741 ··· 1741 memset(server, 0, sizeof(struct nfs_server)); 1742 /* Zero out the NFS state stuff */ 1743 init_nfsv4_state(server); 1744 + server->client = server->client_sys = server->client_acl = ERR_PTR(-EINVAL); 1745 1746 p = nfs_copy_user_string(NULL, &data->hostname, 256); 1747 if (IS_ERR(p)) ··· 1773 } 1774 if (server->addr.sin_family != AF_INET || 1775 server->addr.sin_addr.s_addr == INADDR_ANY) { 1776 + dprintk("%s: mount program didn't pass remote IP address!\n", 1777 + __FUNCTION__); 1778 s = ERR_PTR(-EINVAL); 1779 + goto out_free; 1780 + } 1781 + 1782 + /* Fire up rpciod if not yet running */ 1783 + s = ERR_PTR(rpciod_up()); 1784 + if (IS_ERR(s)) { 1785 + dprintk("%s: couldn't start rpciod! Error = %ld\n", 1786 + __FUNCTION__, PTR_ERR(s)); 1787 goto out_free; 1788 } 1789 ··· 1784 goto out_free; 1785 1786 s->s_flags = flags; 1787 1788 error = nfs4_fill_super(s, data, flags & MS_VERBOSE ? 1 : 0); 1789 if (error) { ··· 1875 if (!nfsi) 1876 return NULL; 1877 nfsi->flags = 0; 1878 + #ifdef CONFIG_NFS_V3_ACL 1879 + nfsi->acl_access = ERR_PTR(-EAGAIN); 1880 + nfsi->acl_default = ERR_PTR(-EAGAIN); 1881 + #endif 1882 + #ifdef CONFIG_NFS_V4 1883 + nfsi->nfs4_acl = NULL; 1884 + #endif /* CONFIG_NFS_V4 */ 1885 return &nfsi->vfs_inode; 1886 } 1887
+1 -3
fs/nfs/mount_clnt.c
··· 80 clnt = rpc_create_client(xprt, hostname, 81 &mnt_program, version, 82 RPC_AUTH_UNIX); 83 - if (IS_ERR(clnt)) { 84 - xprt_destroy(xprt); 85 - } else { 86 clnt->cl_softrtry = 1; 87 clnt->cl_chatty = 1; 88 clnt->cl_oneshot = 1;
··· 80 clnt = rpc_create_client(xprt, hostname, 81 &mnt_program, version, 82 RPC_AUTH_UNIX); 83 + if (!IS_ERR(clnt)) { 84 clnt->cl_softrtry = 1; 85 clnt->cl_chatty = 1; 86 clnt->cl_oneshot = 1;
+403
fs/nfs/nfs3acl.c
···
··· 1 + #include <linux/fs.h> 2 + #include <linux/nfs.h> 3 + #include <linux/nfs3.h> 4 + #include <linux/nfs_fs.h> 5 + #include <linux/xattr_acl.h> 6 + #include <linux/nfsacl.h> 7 + 8 + #define NFSDBG_FACILITY NFSDBG_PROC 9 + 10 + ssize_t nfs3_listxattr(struct dentry *dentry, char *buffer, size_t size) 11 + { 12 + struct inode *inode = dentry->d_inode; 13 + struct posix_acl *acl; 14 + int pos=0, len=0; 15 + 16 + # define output(s) do { \ 17 + if (pos + sizeof(s) <= size) { \ 18 + memcpy(buffer + pos, s, sizeof(s)); \ 19 + pos += sizeof(s); \ 20 + } \ 21 + len += sizeof(s); \ 22 + } while(0) 23 + 24 + acl = nfs3_proc_getacl(inode, ACL_TYPE_ACCESS); 25 + if (IS_ERR(acl)) 26 + return PTR_ERR(acl); 27 + if (acl) { 28 + output("system.posix_acl_access"); 29 + posix_acl_release(acl); 30 + } 31 + 32 + if (S_ISDIR(inode->i_mode)) { 33 + acl = nfs3_proc_getacl(inode, ACL_TYPE_DEFAULT); 34 + if (IS_ERR(acl)) 35 + return PTR_ERR(acl); 36 + if (acl) { 37 + output("system.posix_acl_default"); 38 + posix_acl_release(acl); 39 + } 40 + } 41 + 42 + # undef output 43 + 44 + if (!buffer || len <= size) 45 + return len; 46 + return -ERANGE; 47 + } 48 + 49 + ssize_t nfs3_getxattr(struct dentry *dentry, const char *name, 50 + void *buffer, size_t size) 51 + { 52 + struct inode *inode = dentry->d_inode; 53 + struct posix_acl *acl; 54 + int type, error = 0; 55 + 56 + if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0) 57 + type = ACL_TYPE_ACCESS; 58 + else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) 59 + type = ACL_TYPE_DEFAULT; 60 + else 61 + return -EOPNOTSUPP; 62 + 63 + acl = nfs3_proc_getacl(inode, type); 64 + if (IS_ERR(acl)) 65 + return PTR_ERR(acl); 66 + else if (acl) { 67 + if (type == ACL_TYPE_ACCESS && acl->a_count == 0) 68 + error = -ENODATA; 69 + else 70 + error = posix_acl_to_xattr(acl, buffer, size); 71 + posix_acl_release(acl); 72 + } else 73 + error = -ENODATA; 74 + 75 + return error; 76 + } 77 + 78 + int nfs3_setxattr(struct dentry *dentry, const char *name, 79 + const void *value, size_t size, int flags) 80 + { 81 + struct inode *inode = dentry->d_inode; 82 + struct posix_acl *acl; 83 + int type, error; 84 + 85 + if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0) 86 + type = ACL_TYPE_ACCESS; 87 + else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) 88 + type = ACL_TYPE_DEFAULT; 89 + else 90 + return -EOPNOTSUPP; 91 + 92 + acl = posix_acl_from_xattr(value, size); 93 + if (IS_ERR(acl)) 94 + return PTR_ERR(acl); 95 + error = nfs3_proc_setacl(inode, type, acl); 96 + posix_acl_release(acl); 97 + 98 + return error; 99 + } 100 + 101 + int nfs3_removexattr(struct dentry *dentry, const char *name) 102 + { 103 + struct inode *inode = dentry->d_inode; 104 + int type; 105 + 106 + if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0) 107 + type = ACL_TYPE_ACCESS; 108 + else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) 109 + type = ACL_TYPE_DEFAULT; 110 + else 111 + return -EOPNOTSUPP; 112 + 113 + return nfs3_proc_setacl(inode, type, NULL); 114 + } 115 + 116 + static void __nfs3_forget_cached_acls(struct nfs_inode *nfsi) 117 + { 118 + if (!IS_ERR(nfsi->acl_access)) { 119 + posix_acl_release(nfsi->acl_access); 120 + nfsi->acl_access = ERR_PTR(-EAGAIN); 121 + } 122 + if (!IS_ERR(nfsi->acl_default)) { 123 + posix_acl_release(nfsi->acl_default); 124 + nfsi->acl_default = ERR_PTR(-EAGAIN); 125 + } 126 + } 127 + 128 + void nfs3_forget_cached_acls(struct inode *inode) 129 + { 130 + dprintk("NFS: nfs3_forget_cached_acls(%s/%ld)\n", inode->i_sb->s_id, 131 + inode->i_ino); 132 + spin_lock(&inode->i_lock); 133 + __nfs3_forget_cached_acls(NFS_I(inode)); 134 + spin_unlock(&inode->i_lock); 135 + } 136 + 137 + static struct posix_acl *nfs3_get_cached_acl(struct inode *inode, int type) 138 + { 139 + struct nfs_inode *nfsi = NFS_I(inode); 140 + struct posix_acl *acl = ERR_PTR(-EINVAL); 141 + 142 + spin_lock(&inode->i_lock); 143 + switch(type) { 144 + case ACL_TYPE_ACCESS: 145 + acl = nfsi->acl_access; 146 + break; 147 + 148 + case ACL_TYPE_DEFAULT: 149 + acl = nfsi->acl_default; 150 + break; 151 + 152 + default: 153 + goto out; 154 + } 155 + if (IS_ERR(acl)) 156 + acl = ERR_PTR(-EAGAIN); 157 + else 158 + acl = posix_acl_dup(acl); 159 + out: 160 + spin_unlock(&inode->i_lock); 161 + dprintk("NFS: nfs3_get_cached_acl(%s/%ld, %d) = %p\n", inode->i_sb->s_id, 162 + inode->i_ino, type, acl); 163 + return acl; 164 + } 165 + 166 + static void nfs3_cache_acls(struct inode *inode, struct posix_acl *acl, 167 + struct posix_acl *dfacl) 168 + { 169 + struct nfs_inode *nfsi = NFS_I(inode); 170 + 171 + dprintk("nfs3_cache_acls(%s/%ld, %p, %p)\n", inode->i_sb->s_id, 172 + inode->i_ino, acl, dfacl); 173 + spin_lock(&inode->i_lock); 174 + __nfs3_forget_cached_acls(NFS_I(inode)); 175 + nfsi->acl_access = posix_acl_dup(acl); 176 + nfsi->acl_default = posix_acl_dup(dfacl); 177 + spin_unlock(&inode->i_lock); 178 + } 179 + 180 + struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type) 181 + { 182 + struct nfs_server *server = NFS_SERVER(inode); 183 + struct nfs_fattr fattr; 184 + struct page *pages[NFSACL_MAXPAGES] = { }; 185 + struct nfs3_getaclargs args = { 186 + .fh = NFS_FH(inode), 187 + /* The xdr layer may allocate pages here. */ 188 + .pages = pages, 189 + }; 190 + struct nfs3_getaclres res = { 191 + .fattr = &fattr, 192 + }; 193 + struct posix_acl *acl; 194 + int status, count; 195 + 196 + if (!nfs_server_capable(inode, NFS_CAP_ACLS)) 197 + return ERR_PTR(-EOPNOTSUPP); 198 + 199 + status = nfs_revalidate_inode(server, inode); 200 + if (status < 0) 201 + return ERR_PTR(status); 202 + acl = nfs3_get_cached_acl(inode, type); 203 + if (acl != ERR_PTR(-EAGAIN)) 204 + return acl; 205 + acl = NULL; 206 + 207 + /* 208 + * Only get the access acl when explicitly requested: We don't 209 + * need it for access decisions, and only some applications use 210 + * it. Applications which request the access acl first are not 211 + * penalized from this optimization. 212 + */ 213 + if (type == ACL_TYPE_ACCESS) 214 + args.mask |= NFS_ACLCNT|NFS_ACL; 215 + if (S_ISDIR(inode->i_mode)) 216 + args.mask |= NFS_DFACLCNT|NFS_DFACL; 217 + if (args.mask == 0) 218 + return NULL; 219 + 220 + dprintk("NFS call getacl\n"); 221 + status = rpc_call(server->client_acl, ACLPROC3_GETACL, 222 + &args, &res, 0); 223 + dprintk("NFS reply getacl: %d\n", status); 224 + 225 + /* pages may have been allocated at the xdr layer. */ 226 + for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++) 227 + __free_page(args.pages[count]); 228 + 229 + switch (status) { 230 + case 0: 231 + status = nfs_refresh_inode(inode, &fattr); 232 + break; 233 + case -EPFNOSUPPORT: 234 + case -EPROTONOSUPPORT: 235 + dprintk("NFS_V3_ACL extension not supported; disabling\n"); 236 + server->caps &= ~NFS_CAP_ACLS; 237 + case -ENOTSUPP: 238 + status = -EOPNOTSUPP; 239 + default: 240 + goto getout; 241 + } 242 + if ((args.mask & res.mask) != args.mask) { 243 + status = -EIO; 244 + goto getout; 245 + } 246 + 247 + if (res.acl_access != NULL) { 248 + if (posix_acl_equiv_mode(res.acl_access, NULL) == 0) { 249 + posix_acl_release(res.acl_access); 250 + res.acl_access = NULL; 251 + } 252 + } 253 + nfs3_cache_acls(inode, res.acl_access, res.acl_default); 254 + 255 + switch(type) { 256 + case ACL_TYPE_ACCESS: 257 + acl = res.acl_access; 258 + res.acl_access = NULL; 259 + break; 260 + 261 + case ACL_TYPE_DEFAULT: 262 + acl = res.acl_default; 263 + res.acl_default = NULL; 264 + } 265 + 266 + getout: 267 + posix_acl_release(res.acl_access); 268 + posix_acl_release(res.acl_default); 269 + 270 + if (status != 0) { 271 + posix_acl_release(acl); 272 + acl = ERR_PTR(status); 273 + } 274 + return acl; 275 + } 276 + 277 + static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, 278 + struct posix_acl *dfacl) 279 + { 280 + struct nfs_server *server = NFS_SERVER(inode); 281 + struct nfs_fattr fattr; 282 + struct page *pages[NFSACL_MAXPAGES] = { }; 283 + struct nfs3_setaclargs args = { 284 + .inode = inode, 285 + .mask = NFS_ACL, 286 + .acl_access = acl, 287 + .pages = pages, 288 + }; 289 + int status, count; 290 + 291 + status = -EOPNOTSUPP; 292 + if (!nfs_server_capable(inode, NFS_CAP_ACLS)) 293 + goto out; 294 + 295 + /* We are doing this here, because XDR marshalling can only 296 + return -ENOMEM. */ 297 + status = -ENOSPC; 298 + if (acl != NULL && acl->a_count > NFS_ACL_MAX_ENTRIES) 299 + goto out; 300 + if (dfacl != NULL && dfacl->a_count > NFS_ACL_MAX_ENTRIES) 301 + goto out; 302 + if (S_ISDIR(inode->i_mode)) { 303 + args.mask |= NFS_DFACL; 304 + args.acl_default = dfacl; 305 + } 306 + 307 + dprintk("NFS call setacl\n"); 308 + nfs_begin_data_update(inode); 309 + status = rpc_call(server->client_acl, ACLPROC3_SETACL, 310 + &args, &fattr, 0); 311 + NFS_FLAGS(inode) |= NFS_INO_INVALID_ACCESS; 312 + nfs_end_data_update(inode); 313 + dprintk("NFS reply setacl: %d\n", status); 314 + 315 + /* pages may have been allocated at the xdr layer. */ 316 + for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++) 317 + __free_page(args.pages[count]); 318 + 319 + switch (status) { 320 + case 0: 321 + status = nfs_refresh_inode(inode, &fattr); 322 + break; 323 + case -EPFNOSUPPORT: 324 + case -EPROTONOSUPPORT: 325 + dprintk("NFS_V3_ACL SETACL RPC not supported" 326 + "(will not retry)\n"); 327 + server->caps &= ~NFS_CAP_ACLS; 328 + case -ENOTSUPP: 329 + status = -EOPNOTSUPP; 330 + } 331 + out: 332 + return status; 333 + } 334 + 335 + int nfs3_proc_setacl(struct inode *inode, int type, struct posix_acl *acl) 336 + { 337 + struct posix_acl *alloc = NULL, *dfacl = NULL; 338 + int status; 339 + 340 + if (S_ISDIR(inode->i_mode)) { 341 + switch(type) { 342 + case ACL_TYPE_ACCESS: 343 + alloc = dfacl = nfs3_proc_getacl(inode, 344 + ACL_TYPE_DEFAULT); 345 + if (IS_ERR(alloc)) 346 + goto fail; 347 + break; 348 + 349 + case ACL_TYPE_DEFAULT: 350 + dfacl = acl; 351 + alloc = acl = nfs3_proc_getacl(inode, 352 + ACL_TYPE_ACCESS); 353 + if (IS_ERR(alloc)) 354 + goto fail; 355 + break; 356 + 357 + default: 358 + return -EINVAL; 359 + } 360 + } else if (type != ACL_TYPE_ACCESS) 361 + return -EINVAL; 362 + 363 + if (acl == NULL) { 364 + alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); 365 + if (IS_ERR(alloc)) 366 + goto fail; 367 + } 368 + status = nfs3_proc_setacls(inode, acl, dfacl); 369 + posix_acl_release(alloc); 370 + return status; 371 + 372 + fail: 373 + return PTR_ERR(alloc); 374 + } 375 + 376 + int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode, 377 + mode_t mode) 378 + { 379 + struct posix_acl *dfacl, *acl; 380 + int error = 0; 381 + 382 + dfacl = nfs3_proc_getacl(dir, ACL_TYPE_DEFAULT); 383 + if (IS_ERR(dfacl)) { 384 + error = PTR_ERR(dfacl); 385 + return (error == -EOPNOTSUPP) ? 0 : error; 386 + } 387 + if (!dfacl) 388 + return 0; 389 + acl = posix_acl_clone(dfacl, GFP_KERNEL); 390 + error = -ENOMEM; 391 + if (!acl) 392 + goto out_release_dfacl; 393 + error = posix_acl_create_masq(acl, &mode); 394 + if (error < 0) 395 + goto out_release_acl; 396 + error = nfs3_proc_setacls(inode, acl, S_ISDIR(inode->i_mode) ? 397 + dfacl : NULL); 398 + out_release_acl: 399 + posix_acl_release(acl); 400 + out_release_dfacl: 401 + posix_acl_release(dfacl); 402 + return error; 403 + }
+35 -8
fs/nfs/nfs3proc.c
··· 17 #include <linux/nfs_page.h> 18 #include <linux/lockd/bind.h> 19 #include <linux/smp_lock.h> 20 21 #define NFSDBG_FACILITY NFSDBG_PROC 22 ··· 46 nfs3_rpc_call_wrapper(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp, int flags) 47 { 48 struct rpc_message msg = { 49 - .rpc_proc = &nfs3_procedures[proc], 50 .rpc_argp = argp, 51 .rpc_resp = resp, 52 }; ··· 314 .fh = &fhandle, 315 .fattr = &fattr 316 }; 317 - int status; 318 319 dprintk("NFS call create %s\n", dentry->d_name.name); 320 arg.createmode = NFS3_CREATE_UNCHECKED; ··· 324 arg.verifier[0] = jiffies; 325 arg.verifier[1] = current->pid; 326 } 327 328 again: 329 dir_attr.valid = 0; ··· 373 nfs_refresh_inode(dentry->d_inode, &fattr); 374 dprintk("NFS reply setattr (post-create): %d\n", status); 375 } 376 out: 377 dprintk("NFS reply create: %d\n", status); 378 return status; ··· 545 .fh = &fhandle, 546 .fattr = &fattr 547 }; 548 - int status; 549 550 dprintk("NFS call mkdir %s\n", dentry->d_name.name); 551 dir_attr.valid = 0; 552 fattr.valid = 0; 553 status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKDIR, &arg, &res, 0); 554 nfs_refresh_inode(dir, &dir_attr); 555 - if (status == 0) 556 - status = nfs_instantiate(dentry, &fhandle, &fattr); 557 dprintk("NFS reply mkdir: %d\n", status); 558 return status; 559 } ··· 657 .fh = &fh, 658 .fattr = &fattr 659 }; 660 int status; 661 662 switch (sattr->ia_mode & S_IFMT) { ··· 670 671 dprintk("NFS call mknod %s %u:%u\n", dentry->d_name.name, 672 MAJOR(rdev), MINOR(rdev)); 673 dir_attr.valid = 0; 674 fattr.valid = 0; 675 status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKNOD, &arg, &res, 0); 676 nfs_refresh_inode(dir, &dir_attr); 677 - if (status == 0) 678 - status = nfs_instantiate(dentry, &fh, &fattr); 679 dprintk("NFS reply mknod: %d\n", status); 680 return status; 681 } ··· 850 struct nfs_rpc_ops nfs_v3_clientops = { 851 .version = 3, /* protocol version */ 852 .dentry_ops = &nfs_dentry_operations, 853 - .dir_inode_ops = &nfs_dir_inode_operations, 854 .getroot = nfs3_proc_get_root, 855 .getattr = nfs3_proc_getattr, 856 .setattr = nfs3_proc_setattr, ··· 882 .file_open = nfs_open, 883 .file_release = nfs_release, 884 .lock = nfs3_proc_lock, 885 };
··· 17 #include <linux/nfs_page.h> 18 #include <linux/lockd/bind.h> 19 #include <linux/smp_lock.h> 20 + #include <linux/nfs_mount.h> 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 ··· 45 nfs3_rpc_call_wrapper(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp, int flags) 46 { 47 struct rpc_message msg = { 48 + .rpc_proc = &clnt->cl_procinfo[proc], 49 .rpc_argp = argp, 50 .rpc_resp = resp, 51 }; ··· 313 .fh = &fhandle, 314 .fattr = &fattr 315 }; 316 + mode_t mode = sattr->ia_mode; 317 + int status; 318 319 dprintk("NFS call create %s\n", dentry->d_name.name); 320 arg.createmode = NFS3_CREATE_UNCHECKED; ··· 322 arg.verifier[0] = jiffies; 323 arg.verifier[1] = current->pid; 324 } 325 + 326 + sattr->ia_mode &= ~current->fs->umask; 327 328 again: 329 dir_attr.valid = 0; ··· 369 nfs_refresh_inode(dentry->d_inode, &fattr); 370 dprintk("NFS reply setattr (post-create): %d\n", status); 371 } 372 + if (status != 0) 373 + goto out; 374 + status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode); 375 out: 376 dprintk("NFS reply create: %d\n", status); 377 return status; ··· 538 .fh = &fhandle, 539 .fattr = &fattr 540 }; 541 + int mode = sattr->ia_mode; 542 + int status; 543 544 dprintk("NFS call mkdir %s\n", dentry->d_name.name); 545 dir_attr.valid = 0; 546 fattr.valid = 0; 547 + 548 + sattr->ia_mode &= ~current->fs->umask; 549 + 550 status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKDIR, &arg, &res, 0); 551 nfs_refresh_inode(dir, &dir_attr); 552 + if (status != 0) 553 + goto out; 554 + status = nfs_instantiate(dentry, &fhandle, &fattr); 555 + if (status != 0) 556 + goto out; 557 + status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode); 558 + out: 559 dprintk("NFS reply mkdir: %d\n", status); 560 return status; 561 } ··· 641 .fh = &fh, 642 .fattr = &fattr 643 }; 644 + mode_t mode = sattr->ia_mode; 645 int status; 646 647 switch (sattr->ia_mode & S_IFMT) { ··· 653 654 dprintk("NFS call mknod %s %u:%u\n", dentry->d_name.name, 655 MAJOR(rdev), MINOR(rdev)); 656 + 657 + sattr->ia_mode &= ~current->fs->umask; 658 + 659 dir_attr.valid = 0; 660 fattr.valid = 0; 661 status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKNOD, &arg, &res, 0); 662 nfs_refresh_inode(dir, &dir_attr); 663 + if (status != 0) 664 + goto out; 665 + status = nfs_instantiate(dentry, &fh, &fattr); 666 + if (status != 0) 667 + goto out; 668 + status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode); 669 + out: 670 dprintk("NFS reply mknod: %d\n", status); 671 return status; 672 } ··· 825 struct nfs_rpc_ops nfs_v3_clientops = { 826 .version = 3, /* protocol version */ 827 .dentry_ops = &nfs_dentry_operations, 828 + .dir_inode_ops = &nfs3_dir_inode_operations, 829 + .file_inode_ops = &nfs3_file_inode_operations, 830 .getroot = nfs3_proc_get_root, 831 .getattr = nfs3_proc_getattr, 832 .setattr = nfs3_proc_setattr, ··· 856 .file_open = nfs_open, 857 .file_release = nfs_release, 858 .lock = nfs3_proc_lock, 859 + .clear_acl_cache = nfs3_forget_cached_acls, 860 };
+147
fs/nfs/nfs3xdr.c
··· 21 #include <linux/nfs.h> 22 #include <linux/nfs3.h> 23 #include <linux/nfs_fs.h> 24 25 #define NFSDBG_FACILITY NFSDBG_XDR 26 ··· 79 #define NFS3_fsinfores_sz (1+NFS3_post_op_attr_sz+12) 80 #define NFS3_pathconfres_sz (1+NFS3_post_op_attr_sz+6) 81 #define NFS3_commitres_sz (1+NFS3_wcc_data_sz+2) 82 83 /* 84 * Map file type to S_IFMT bits ··· 633 return 0; 634 } 635 636 /* 637 * NFS XDR decode functions 638 */ ··· 1052 return 0; 1053 } 1054 1055 #ifndef MAX 1056 # define MAX(a, b) (((a) > (b))? (a) : (b)) 1057 #endif ··· 1143 .procs = nfs3_procedures 1144 }; 1145
··· 21 #include <linux/nfs.h> 22 #include <linux/nfs3.h> 23 #include <linux/nfs_fs.h> 24 + #include <linux/nfsacl.h> 25 26 #define NFSDBG_FACILITY NFSDBG_XDR 27 ··· 78 #define NFS3_fsinfores_sz (1+NFS3_post_op_attr_sz+12) 79 #define NFS3_pathconfres_sz (1+NFS3_post_op_attr_sz+6) 80 #define NFS3_commitres_sz (1+NFS3_wcc_data_sz+2) 81 + 82 + #define ACL3_getaclargs_sz (NFS3_fh_sz+1) 83 + #define ACL3_setaclargs_sz (NFS3_fh_sz+1+2*(2+5*3)) 84 + #define ACL3_getaclres_sz (1+NFS3_post_op_attr_sz+1+2*(2+5*3)) 85 + #define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz) 86 87 /* 88 * Map file type to S_IFMT bits ··· 627 return 0; 628 } 629 630 + #ifdef CONFIG_NFS_V3_ACL 631 + /* 632 + * Encode GETACL arguments 633 + */ 634 + static int 635 + nfs3_xdr_getaclargs(struct rpc_rqst *req, u32 *p, 636 + struct nfs3_getaclargs *args) 637 + { 638 + struct rpc_auth *auth = req->rq_task->tk_auth; 639 + unsigned int replen; 640 + 641 + p = xdr_encode_fhandle(p, args->fh); 642 + *p++ = htonl(args->mask); 643 + req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 644 + 645 + if (args->mask & (NFS_ACL | NFS_DFACL)) { 646 + /* Inline the page array */ 647 + replen = (RPC_REPHDRSIZE + auth->au_rslack + 648 + ACL3_getaclres_sz) << 2; 649 + xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, 650 + NFSACL_MAXPAGES << PAGE_SHIFT); 651 + } 652 + return 0; 653 + } 654 + 655 + /* 656 + * Encode SETACL arguments 657 + */ 658 + static int 659 + nfs3_xdr_setaclargs(struct rpc_rqst *req, u32 *p, 660 + struct nfs3_setaclargs *args) 661 + { 662 + struct xdr_buf *buf = &req->rq_snd_buf; 663 + unsigned int base, len_in_head, len = nfsacl_size( 664 + (args->mask & NFS_ACL) ? args->acl_access : NULL, 665 + (args->mask & NFS_DFACL) ? args->acl_default : NULL); 666 + int count, err; 667 + 668 + p = xdr_encode_fhandle(p, NFS_FH(args->inode)); 669 + *p++ = htonl(args->mask); 670 + base = (char *)p - (char *)buf->head->iov_base; 671 + /* put as much of the acls into head as possible. */ 672 + len_in_head = min_t(unsigned int, buf->head->iov_len - base, len); 673 + len -= len_in_head; 674 + req->rq_slen = xdr_adjust_iovec(req->rq_svec, p + (len_in_head >> 2)); 675 + 676 + for (count = 0; (count << PAGE_SHIFT) < len; count++) { 677 + args->pages[count] = alloc_page(GFP_KERNEL); 678 + if (!args->pages[count]) { 679 + while (count) 680 + __free_page(args->pages[--count]); 681 + return -ENOMEM; 682 + } 683 + } 684 + xdr_encode_pages(buf, args->pages, 0, len); 685 + 686 + err = nfsacl_encode(buf, base, args->inode, 687 + (args->mask & NFS_ACL) ? 688 + args->acl_access : NULL, 1, 0); 689 + if (err > 0) 690 + err = nfsacl_encode(buf, base + err, args->inode, 691 + (args->mask & NFS_DFACL) ? 692 + args->acl_default : NULL, 1, 693 + NFS_ACL_DEFAULT); 694 + return (err > 0) ? 0 : err; 695 + } 696 + #endif /* CONFIG_NFS_V3_ACL */ 697 + 698 /* 699 * NFS XDR decode functions 700 */ ··· 978 return 0; 979 } 980 981 + #ifdef CONFIG_NFS_V3_ACL 982 + /* 983 + * Decode GETACL reply 984 + */ 985 + static int 986 + nfs3_xdr_getaclres(struct rpc_rqst *req, u32 *p, 987 + struct nfs3_getaclres *res) 988 + { 989 + struct xdr_buf *buf = &req->rq_rcv_buf; 990 + int status = ntohl(*p++); 991 + struct posix_acl **acl; 992 + unsigned int *aclcnt; 993 + int err, base; 994 + 995 + if (status != 0) 996 + return -nfs_stat_to_errno(status); 997 + p = xdr_decode_post_op_attr(p, res->fattr); 998 + res->mask = ntohl(*p++); 999 + if (res->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) 1000 + return -EINVAL; 1001 + base = (char *)p - (char *)req->rq_rcv_buf.head->iov_base; 1002 + 1003 + acl = (res->mask & NFS_ACL) ? &res->acl_access : NULL; 1004 + aclcnt = (res->mask & NFS_ACLCNT) ? &res->acl_access_count : NULL; 1005 + err = nfsacl_decode(buf, base, aclcnt, acl); 1006 + 1007 + acl = (res->mask & NFS_DFACL) ? &res->acl_default : NULL; 1008 + aclcnt = (res->mask & NFS_DFACLCNT) ? &res->acl_default_count : NULL; 1009 + if (err > 0) 1010 + err = nfsacl_decode(buf, base + err, aclcnt, acl); 1011 + return (err > 0) ? 0 : err; 1012 + } 1013 + 1014 + /* 1015 + * Decode setacl reply. 1016 + */ 1017 + static int 1018 + nfs3_xdr_setaclres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr) 1019 + { 1020 + int status = ntohl(*p++); 1021 + 1022 + if (status) 1023 + return -nfs_stat_to_errno(status); 1024 + xdr_decode_post_op_attr(p, fattr); 1025 + return 0; 1026 + } 1027 + #endif /* CONFIG_NFS_V3_ACL */ 1028 + 1029 #ifndef MAX 1030 # define MAX(a, b) (((a) > (b))? (a) : (b)) 1031 #endif ··· 1021 .procs = nfs3_procedures 1022 }; 1023 1024 + #ifdef CONFIG_NFS_V3_ACL 1025 + static struct rpc_procinfo nfs3_acl_procedures[] = { 1026 + [ACLPROC3_GETACL] = { 1027 + .p_proc = ACLPROC3_GETACL, 1028 + .p_encode = (kxdrproc_t) nfs3_xdr_getaclargs, 1029 + .p_decode = (kxdrproc_t) nfs3_xdr_getaclres, 1030 + .p_bufsiz = MAX(ACL3_getaclargs_sz, ACL3_getaclres_sz) << 2, 1031 + .p_timer = 1, 1032 + }, 1033 + [ACLPROC3_SETACL] = { 1034 + .p_proc = ACLPROC3_SETACL, 1035 + .p_encode = (kxdrproc_t) nfs3_xdr_setaclargs, 1036 + .p_decode = (kxdrproc_t) nfs3_xdr_setaclres, 1037 + .p_bufsiz = MAX(ACL3_setaclargs_sz, ACL3_setaclres_sz) << 2, 1038 + .p_timer = 0, 1039 + }, 1040 + }; 1041 + 1042 + struct rpc_version nfsacl_version3 = { 1043 + .number = 3, 1044 + .nrprocs = sizeof(nfs3_acl_procedures)/ 1045 + sizeof(nfs3_acl_procedures[0]), 1046 + .procs = nfs3_acl_procedures, 1047 + }; 1048 + #endif /* CONFIG_NFS_V3_ACL */
+253
fs/nfs/nfs4_fs.h
···
··· 1 + /* 2 + * linux/fs/nfs/nfs4_fs.h 3 + * 4 + * Copyright (C) 2005 Trond Myklebust 5 + * 6 + * NFSv4-specific filesystem definitions and declarations 7 + */ 8 + 9 + #ifndef __LINUX_FS_NFS_NFS4_FS_H 10 + #define __LINUX_FS_NFS_NFS4_FS_H 11 + 12 + #ifdef CONFIG_NFS_V4 13 + 14 + struct idmap; 15 + 16 + /* 17 + * In a seqid-mutating op, this macro controls which error return 18 + * values trigger incrementation of the seqid. 19 + * 20 + * from rfc 3010: 21 + * The client MUST monotonically increment the sequence number for the 22 + * CLOSE, LOCK, LOCKU, OPEN, OPEN_CONFIRM, and OPEN_DOWNGRADE 23 + * operations. This is true even in the event that the previous 24 + * operation that used the sequence number received an error. The only 25 + * exception to this rule is if the previous operation received one of 26 + * the following errors: NFSERR_STALE_CLIENTID, NFSERR_STALE_STATEID, 27 + * NFSERR_BAD_STATEID, NFSERR_BAD_SEQID, NFSERR_BADXDR, 28 + * NFSERR_RESOURCE, NFSERR_NOFILEHANDLE. 29 + * 30 + */ 31 + #define seqid_mutating_err(err) \ 32 + (((err) != NFSERR_STALE_CLIENTID) && \ 33 + ((err) != NFSERR_STALE_STATEID) && \ 34 + ((err) != NFSERR_BAD_STATEID) && \ 35 + ((err) != NFSERR_BAD_SEQID) && \ 36 + ((err) != NFSERR_BAD_XDR) && \ 37 + ((err) != NFSERR_RESOURCE) && \ 38 + ((err) != NFSERR_NOFILEHANDLE)) 39 + 40 + enum nfs4_client_state { 41 + NFS4CLNT_OK = 0, 42 + }; 43 + 44 + /* 45 + * The nfs4_client identifies our client state to the server. 46 + */ 47 + struct nfs4_client { 48 + struct list_head cl_servers; /* Global list of servers */ 49 + struct in_addr cl_addr; /* Server identifier */ 50 + u64 cl_clientid; /* constant */ 51 + nfs4_verifier cl_confirm; 52 + unsigned long cl_state; 53 + 54 + u32 cl_lockowner_id; 55 + 56 + /* 57 + * The following rwsem ensures exclusive access to the server 58 + * while we recover the state following a lease expiration. 59 + */ 60 + struct rw_semaphore cl_sem; 61 + 62 + struct list_head cl_delegations; 63 + struct list_head cl_state_owners; 64 + struct list_head cl_unused; 65 + int cl_nunused; 66 + spinlock_t cl_lock; 67 + atomic_t cl_count; 68 + 69 + struct rpc_clnt * cl_rpcclient; 70 + struct rpc_cred * cl_cred; 71 + 72 + struct list_head cl_superblocks; /* List of nfs_server structs */ 73 + 74 + unsigned long cl_lease_time; 75 + unsigned long cl_last_renewal; 76 + struct work_struct cl_renewd; 77 + struct work_struct cl_recoverd; 78 + 79 + wait_queue_head_t cl_waitq; 80 + struct rpc_wait_queue cl_rpcwaitq; 81 + 82 + /* used for the setclientid verifier */ 83 + struct timespec cl_boot_time; 84 + 85 + /* idmapper */ 86 + struct idmap * cl_idmap; 87 + 88 + /* Our own IP address, as a null-terminated string. 89 + * This is used to generate the clientid, and the callback address. 90 + */ 91 + char cl_ipaddr[16]; 92 + unsigned char cl_id_uniquifier; 93 + }; 94 + 95 + /* 96 + * NFS4 state_owners and lock_owners are simply labels for ordered 97 + * sequences of RPC calls. Their sole purpose is to provide once-only 98 + * semantics by allowing the server to identify replayed requests. 99 + * 100 + * The ->so_sema is held during all state_owner seqid-mutating operations: 101 + * OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize 102 + * so_seqid. 103 + */ 104 + struct nfs4_state_owner { 105 + struct list_head so_list; /* per-clientid list of state_owners */ 106 + struct nfs4_client *so_client; 107 + u32 so_id; /* 32-bit identifier, unique */ 108 + struct semaphore so_sema; 109 + u32 so_seqid; /* protected by so_sema */ 110 + atomic_t so_count; 111 + 112 + struct rpc_cred *so_cred; /* Associated cred */ 113 + struct list_head so_states; 114 + struct list_head so_delegations; 115 + }; 116 + 117 + /* 118 + * struct nfs4_state maintains the client-side state for a given 119 + * (state_owner,inode) tuple (OPEN) or state_owner (LOCK). 120 + * 121 + * OPEN: 122 + * In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server, 123 + * we need to know how many files are open for reading or writing on a 124 + * given inode. This information too is stored here. 125 + * 126 + * LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN) 127 + */ 128 + 129 + struct nfs4_lock_state { 130 + struct list_head ls_locks; /* Other lock stateids */ 131 + struct nfs4_state * ls_state; /* Pointer to open state */ 132 + fl_owner_t ls_owner; /* POSIX lock owner */ 133 + #define NFS_LOCK_INITIALIZED 1 134 + int ls_flags; 135 + u32 ls_seqid; 136 + u32 ls_id; 137 + nfs4_stateid ls_stateid; 138 + atomic_t ls_count; 139 + }; 140 + 141 + /* bits for nfs4_state->flags */ 142 + enum { 143 + LK_STATE_IN_USE, 144 + NFS_DELEGATED_STATE, 145 + }; 146 + 147 + struct nfs4_state { 148 + struct list_head open_states; /* List of states for the same state_owner */ 149 + struct list_head inode_states; /* List of states for the same inode */ 150 + struct list_head lock_states; /* List of subservient lock stateids */ 151 + 152 + struct nfs4_state_owner *owner; /* Pointer to the open owner */ 153 + struct inode *inode; /* Pointer to the inode */ 154 + 155 + unsigned long flags; /* Do we hold any locks? */ 156 + struct semaphore lock_sema; /* Serializes file locking operations */ 157 + spinlock_t state_lock; /* Protects the lock_states list */ 158 + 159 + nfs4_stateid stateid; 160 + 161 + unsigned int nreaders; 162 + unsigned int nwriters; 163 + int state; /* State on the server (R,W, or RW) */ 164 + atomic_t count; 165 + }; 166 + 167 + 168 + struct nfs4_exception { 169 + long timeout; 170 + int retry; 171 + }; 172 + 173 + struct nfs4_state_recovery_ops { 174 + int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *); 175 + int (*recover_lock)(struct nfs4_state *, struct file_lock *); 176 + }; 177 + 178 + extern struct dentry_operations nfs4_dentry_operations; 179 + extern struct inode_operations nfs4_dir_inode_operations; 180 + 181 + /* inode.c */ 182 + extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t); 183 + extern int nfs4_setxattr(struct dentry *, const char *, const void *, size_t, int); 184 + extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t); 185 + 186 + 187 + /* nfs4proc.c */ 188 + extern int nfs4_map_errors(int err); 189 + extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short); 190 + extern int nfs4_proc_setclientid_confirm(struct nfs4_client *); 191 + extern int nfs4_proc_async_renew(struct nfs4_client *); 192 + extern int nfs4_proc_renew(struct nfs4_client *); 193 + extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode); 194 + extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); 195 + extern int nfs4_open_revalidate(struct inode *, struct dentry *, int); 196 + 197 + extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops; 198 + extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops; 199 + 200 + extern const u32 nfs4_fattr_bitmap[2]; 201 + extern const u32 nfs4_statfs_bitmap[2]; 202 + extern const u32 nfs4_pathconf_bitmap[2]; 203 + extern const u32 nfs4_fsinfo_bitmap[2]; 204 + 205 + /* nfs4renewd.c */ 206 + extern void nfs4_schedule_state_renewal(struct nfs4_client *); 207 + extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); 208 + extern void nfs4_kill_renewd(struct nfs4_client *); 209 + extern void nfs4_renew_state(void *); 210 + 211 + /* nfs4state.c */ 212 + extern void init_nfsv4_state(struct nfs_server *); 213 + extern void destroy_nfsv4_state(struct nfs_server *); 214 + extern struct nfs4_client *nfs4_get_client(struct in_addr *); 215 + extern void nfs4_put_client(struct nfs4_client *clp); 216 + extern int nfs4_init_client(struct nfs4_client *clp); 217 + extern struct nfs4_client *nfs4_find_client(struct in_addr *); 218 + extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *); 219 + 220 + extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); 221 + extern void nfs4_put_state_owner(struct nfs4_state_owner *); 222 + extern void nfs4_drop_state_owner(struct nfs4_state_owner *); 223 + extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); 224 + extern void nfs4_put_open_state(struct nfs4_state *); 225 + extern void nfs4_close_state(struct nfs4_state *, mode_t); 226 + extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode); 227 + extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp); 228 + extern void nfs4_schedule_state_recovery(struct nfs4_client *); 229 + extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); 230 + extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls); 231 + extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); 232 + 233 + extern const nfs4_stateid zero_stateid; 234 + 235 + /* nfs4xdr.c */ 236 + extern uint32_t *nfs4_decode_dirent(uint32_t *p, struct nfs_entry *entry, int plus); 237 + extern struct rpc_procinfo nfs4_procedures[]; 238 + 239 + struct nfs4_mount_data; 240 + 241 + /* callback_xdr.c */ 242 + extern struct svc_version nfs4_callback_version1; 243 + 244 + #else 245 + 246 + #define init_nfsv4_state(server) do { } while (0) 247 + #define destroy_nfsv4_state(server) do { } while (0) 248 + #define nfs4_put_state_owner(inode, owner) do { } while (0) 249 + #define nfs4_put_open_state(state) do { } while (0) 250 + #define nfs4_close_state(a, b) do { } while (0) 251 + 252 + #endif /* CONFIG_NFS_V4 */ 253 + #endif /* __LINUX_FS_NFS_NFS4_FS.H */
+339 -90
fs/nfs/nfs4proc.c
··· 48 #include <linux/smp_lock.h> 49 #include <linux/namei.h> 50 51 #include "delegation.h" 52 53 #define NFSDBG_FACILITY NFSDBG_PROC ··· 62 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception); 63 extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus); 64 extern struct rpc_procinfo nfs4_procedures[]; 65 - 66 - extern nfs4_stateid zero_stateid; 67 68 /* Prevent leaks of NFSv4 errors into userland */ 69 int nfs4_map_errors(int err) ··· 103 | FATTR4_WORD1_SPACE_TOTAL 104 }; 105 106 - u32 nfs4_pathconf_bitmap[2] = { 107 FATTR4_WORD0_MAXLINK 108 | FATTR4_WORD0_MAXNAME, 109 0 ··· 123 124 BUG_ON(readdir->count < 80); 125 if (cookie > 2) { 126 - readdir->cookie = (cookie > 2) ? cookie : 0; 127 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 128 return; 129 } ··· 269 int err; 270 do { 271 err = _nfs4_open_reclaim(sp, state); 272 - switch (err) { 273 - case 0: 274 - case -NFS4ERR_STALE_CLIENTID: 275 - case -NFS4ERR_STALE_STATEID: 276 - case -NFS4ERR_EXPIRED: 277 - return err; 278 - } 279 - err = nfs4_handle_exception(server, err, &exception); 280 } while (exception.retry); 281 return err; 282 } ··· 503 goto out_nodeleg; 504 } 505 506 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 507 { 508 struct nfs_inode *nfsi = NFS_I(state->inode); ··· 529 continue; 530 get_nfs_open_context(ctx); 531 spin_unlock(&state->inode->i_lock); 532 - status = _nfs4_open_expired(sp, state, ctx->dentry); 533 put_nfs_open_context(ctx); 534 return status; 535 } ··· 756 757 fattr->valid = 0; 758 759 - if (state != NULL) 760 msg.rpc_cred = state->owner->so_cred; 761 - if (sattr->ia_valid & ATTR_SIZE) 762 - nfs4_copy_stateid(&arg.stateid, state, NULL); 763 - else 764 memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid)); 765 766 return rpc_call_sync(server->client, &msg, 0); ··· 1123 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 1124 struct iattr *sattr) 1125 { 1126 - struct inode * inode = dentry->d_inode; 1127 - int size_change = sattr->ia_valid & ATTR_SIZE; 1128 - struct nfs4_state *state = NULL; 1129 - int need_iput = 0; 1130 int status; 1131 1132 fattr->valid = 0; 1133 1134 - if (size_change) { 1135 - struct rpc_cred *cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0); 1136 - if (IS_ERR(cred)) 1137 - return PTR_ERR(cred); 1138 state = nfs4_find_state(inode, cred, FMODE_WRITE); 1139 - if (state == NULL) { 1140 - state = nfs4_open_delegated(dentry->d_inode, 1141 - FMODE_WRITE, cred); 1142 - if (IS_ERR(state)) 1143 - state = nfs4_do_open(dentry->d_parent->d_inode, 1144 - dentry, FMODE_WRITE, 1145 - NULL, cred); 1146 - need_iput = 1; 1147 - } 1148 - put_rpccred(cred); 1149 - if (IS_ERR(state)) 1150 - return PTR_ERR(state); 1151 - 1152 - if (state->inode != inode) { 1153 - printk(KERN_WARNING "nfs: raced in setattr (%p != %p), returning -EIO\n", inode, state->inode); 1154 - status = -EIO; 1155 - goto out; 1156 - } 1157 } 1158 status = nfs4_do_setattr(NFS_SERVER(inode), fattr, 1159 NFS_FH(inode), sattr, state); 1160 - out: 1161 - if (state) { 1162 - inode = state->inode; 1163 nfs4_close_state(state, FMODE_WRITE); 1164 - if (need_iput) 1165 - iput(inode); 1166 - } 1167 return status; 1168 } 1169 ··· 1722 }; 1723 int status; 1724 1725 lock_kernel(); 1726 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); 1727 res.pgbase = args.pgbase; ··· 1733 if (status == 0) 1734 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); 1735 unlock_kernel(); 1736 return status; 1737 } 1738 ··· 2159 return 0; 2160 } 2161 2162 static int 2163 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server) 2164 { ··· 2631 down_read(&clp->cl_sem); 2632 nlo.clientid = clp->cl_clientid; 2633 down(&state->lock_sema); 2634 - lsp = nfs4_find_lock_state(state, request->fl_owner); 2635 - if (lsp) 2636 - nlo.id = lsp->ls_id; 2637 - else { 2638 - spin_lock(&clp->cl_lock); 2639 - nlo.id = nfs4_alloc_lockowner_id(clp); 2640 - spin_unlock(&clp->cl_lock); 2641 - } 2642 arg.u.lockt = &nlo; 2643 status = rpc_call_sync(server->client, &msg, 0); 2644 if (!status) { ··· 2656 request->fl_pid = 0; 2657 status = 0; 2658 } 2659 - if (lsp) 2660 - nfs4_put_lock_state(lsp); 2661 up(&state->lock_sema); 2662 up_read(&clp->cl_sem); 2663 return status; ··· 2716 }; 2717 struct nfs4_lock_state *lsp; 2718 struct nfs_locku_opargs luargs; 2719 - int status = 0; 2720 2721 down_read(&clp->cl_sem); 2722 down(&state->lock_sema); 2723 - lsp = nfs4_find_lock_state(state, request->fl_owner); 2724 - if (!lsp) 2725 goto out; 2726 /* We might have lost the locks! */ 2727 - if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) { 2728 - luargs.seqid = lsp->ls_seqid; 2729 - memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid)); 2730 - arg.u.locku = &luargs; 2731 - status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); 2732 - nfs4_increment_lock_seqid(status, lsp); 2733 - } 2734 2735 - if (status == 0) { 2736 memcpy(&lsp->ls_stateid, &res.u.stateid, 2737 sizeof(lsp->ls_stateid)); 2738 - nfs4_notify_unlck(state, request, lsp); 2739 - } 2740 - nfs4_put_lock_state(lsp); 2741 out: 2742 up(&state->lock_sema); 2743 if (status == 0) ··· 2761 { 2762 struct inode *inode = state->inode; 2763 struct nfs_server *server = NFS_SERVER(inode); 2764 - struct nfs4_lock_state *lsp; 2765 struct nfs_lockargs arg = { 2766 .fh = NFS_FH(inode), 2767 .type = nfs4_lck_type(cmd, request), ··· 2783 }; 2784 int status; 2785 2786 - lsp = nfs4_get_lock_state(state, request->fl_owner); 2787 - if (lsp == NULL) 2788 - return -ENOMEM; 2789 if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) { 2790 struct nfs4_state_owner *owner = state->owner; 2791 struct nfs_open_to_lock otl = { ··· 2804 * seqid mutating errors */ 2805 nfs4_increment_seqid(status, owner); 2806 up(&owner->so_sema); 2807 } else { 2808 struct nfs_exist_lock el = { 2809 .seqid = lsp->ls_seqid, 2810 }; 2811 memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid)); 2812 largs.u.exist_lock = &el; 2813 - largs.new_lock_owner = 0; 2814 arg.u.lock = &largs; 2815 status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); 2816 } 2817 - /* increment seqid on success, and * seqid mutating errors*/ 2818 - nfs4_increment_lock_seqid(status, lsp); 2819 /* save the returned stateid. */ 2820 - if (status == 0) { 2821 memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid)); 2822 - lsp->ls_flags |= NFS_LOCK_INITIALIZED; 2823 - if (!reclaim) 2824 - nfs4_notify_setlk(state, request, lsp); 2825 - } else if (status == -NFS4ERR_DENIED) 2826 status = -EAGAIN; 2827 - nfs4_put_lock_state(lsp); 2828 return status; 2829 } 2830 2831 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 2832 { 2833 - return _nfs4_do_setlk(state, F_SETLK, request, 1); 2834 } 2835 2836 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 2837 { 2838 - return _nfs4_do_setlk(state, F_SETLK, request, 0); 2839 } 2840 2841 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) ··· 2864 2865 down_read(&clp->cl_sem); 2866 down(&state->lock_sema); 2867 - status = _nfs4_do_setlk(state, cmd, request, 0); 2868 up(&state->lock_sema); 2869 if (status == 0) { 2870 /* Note: we always want to sleep here! */ ··· 2924 if (signalled()) 2925 break; 2926 } while(status < 0); 2927 - 2928 return status; 2929 } 2930 2931 struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = { ··· 2981 .recover_lock = nfs4_lock_expired, 2982 }; 2983 2984 struct nfs_rpc_ops nfs_v4_clientops = { 2985 .version = 4, /* protocol version */ 2986 .dentry_ops = &nfs4_dentry_operations, 2987 .dir_inode_ops = &nfs4_dir_inode_operations, 2988 .getroot = nfs4_proc_get_root, 2989 .getattr = nfs4_proc_getattr, 2990 .setattr = nfs4_proc_setattr, ··· 3025 .file_open = nfs4_proc_file_open, 3026 .file_release = nfs4_proc_file_release, 3027 .lock = nfs4_proc_lock, 3028 }; 3029 3030 /*
··· 48 #include <linux/smp_lock.h> 49 #include <linux/namei.h> 50 51 + #include "nfs4_fs.h" 52 #include "delegation.h" 53 54 #define NFSDBG_FACILITY NFSDBG_PROC ··· 61 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception); 62 extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus); 63 extern struct rpc_procinfo nfs4_procedures[]; 64 65 /* Prevent leaks of NFSv4 errors into userland */ 66 int nfs4_map_errors(int err) ··· 104 | FATTR4_WORD1_SPACE_TOTAL 105 }; 106 107 + const u32 nfs4_pathconf_bitmap[2] = { 108 FATTR4_WORD0_MAXLINK 109 | FATTR4_WORD0_MAXNAME, 110 0 ··· 124 125 BUG_ON(readdir->count < 80); 126 if (cookie > 2) { 127 + readdir->cookie = cookie; 128 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 129 return; 130 } ··· 270 int err; 271 do { 272 err = _nfs4_open_reclaim(sp, state); 273 + if (err != -NFS4ERR_DELAY) 274 + break; 275 + nfs4_handle_exception(server, err, &exception); 276 } while (exception.retry); 277 return err; 278 } ··· 509 goto out_nodeleg; 510 } 511 512 + static inline int nfs4_do_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) 513 + { 514 + struct nfs_server *server = NFS_SERVER(dentry->d_inode); 515 + struct nfs4_exception exception = { }; 516 + int err; 517 + 518 + do { 519 + err = _nfs4_open_expired(sp, state, dentry); 520 + if (err == -NFS4ERR_DELAY) 521 + nfs4_handle_exception(server, err, &exception); 522 + } while (exception.retry); 523 + return err; 524 + } 525 + 526 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 527 { 528 struct nfs_inode *nfsi = NFS_I(state->inode); ··· 521 continue; 522 get_nfs_open_context(ctx); 523 spin_unlock(&state->inode->i_lock); 524 + status = nfs4_do_open_expired(sp, state, ctx->dentry); 525 put_nfs_open_context(ctx); 526 return status; 527 } ··· 748 749 fattr->valid = 0; 750 751 + if (state != NULL) { 752 msg.rpc_cred = state->owner->so_cred; 753 + nfs4_copy_stateid(&arg.stateid, state, current->files); 754 + } else 755 memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid)); 756 757 return rpc_call_sync(server->client, &msg, 0); ··· 1116 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 1117 struct iattr *sattr) 1118 { 1119 + struct rpc_cred *cred; 1120 + struct inode *inode = dentry->d_inode; 1121 + struct nfs4_state *state; 1122 int status; 1123 1124 fattr->valid = 0; 1125 1126 + cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0); 1127 + if (IS_ERR(cred)) 1128 + return PTR_ERR(cred); 1129 + /* Search for an existing WRITE delegation first */ 1130 + state = nfs4_open_delegated(inode, FMODE_WRITE, cred); 1131 + if (!IS_ERR(state)) { 1132 + /* NB: nfs4_open_delegated() bumps the inode->i_count */ 1133 + iput(inode); 1134 + } else { 1135 + /* Search for an existing open(O_WRITE) stateid */ 1136 state = nfs4_find_state(inode, cred, FMODE_WRITE); 1137 } 1138 + 1139 status = nfs4_do_setattr(NFS_SERVER(inode), fattr, 1140 NFS_FH(inode), sattr, state); 1141 + if (state != NULL) 1142 nfs4_close_state(state, FMODE_WRITE); 1143 + put_rpccred(cred); 1144 return status; 1145 } 1146 ··· 1731 }; 1732 int status; 1733 1734 + dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __FUNCTION__, 1735 + dentry->d_parent->d_name.name, 1736 + dentry->d_name.name, 1737 + (unsigned long long)cookie); 1738 lock_kernel(); 1739 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); 1740 res.pgbase = args.pgbase; ··· 1738 if (status == 0) 1739 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); 1740 unlock_kernel(); 1741 + dprintk("%s: returns %d\n", __FUNCTION__, status); 1742 return status; 1743 } 1744 ··· 2163 return 0; 2164 } 2165 2166 + static inline int nfs4_server_supports_acls(struct nfs_server *server) 2167 + { 2168 + return (server->caps & NFS_CAP_ACLS) 2169 + && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 2170 + && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); 2171 + } 2172 + 2173 + /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that 2174 + * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on 2175 + * the stack. 2176 + */ 2177 + #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT) 2178 + 2179 + static void buf_to_pages(const void *buf, size_t buflen, 2180 + struct page **pages, unsigned int *pgbase) 2181 + { 2182 + const void *p = buf; 2183 + 2184 + *pgbase = offset_in_page(buf); 2185 + p -= *pgbase; 2186 + while (p < buf + buflen) { 2187 + *(pages++) = virt_to_page(p); 2188 + p += PAGE_CACHE_SIZE; 2189 + } 2190 + } 2191 + 2192 + struct nfs4_cached_acl { 2193 + int cached; 2194 + size_t len; 2195 + char data[0]; 2196 + }; 2197 + 2198 + static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 2199 + { 2200 + struct nfs_inode *nfsi = NFS_I(inode); 2201 + 2202 + spin_lock(&inode->i_lock); 2203 + kfree(nfsi->nfs4_acl); 2204 + nfsi->nfs4_acl = acl; 2205 + spin_unlock(&inode->i_lock); 2206 + } 2207 + 2208 + static void nfs4_zap_acl_attr(struct inode *inode) 2209 + { 2210 + nfs4_set_cached_acl(inode, NULL); 2211 + } 2212 + 2213 + static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 2214 + { 2215 + struct nfs_inode *nfsi = NFS_I(inode); 2216 + struct nfs4_cached_acl *acl; 2217 + int ret = -ENOENT; 2218 + 2219 + spin_lock(&inode->i_lock); 2220 + acl = nfsi->nfs4_acl; 2221 + if (acl == NULL) 2222 + goto out; 2223 + if (buf == NULL) /* user is just asking for length */ 2224 + goto out_len; 2225 + if (acl->cached == 0) 2226 + goto out; 2227 + ret = -ERANGE; /* see getxattr(2) man page */ 2228 + if (acl->len > buflen) 2229 + goto out; 2230 + memcpy(buf, acl->data, acl->len); 2231 + out_len: 2232 + ret = acl->len; 2233 + out: 2234 + spin_unlock(&inode->i_lock); 2235 + return ret; 2236 + } 2237 + 2238 + static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len) 2239 + { 2240 + struct nfs4_cached_acl *acl; 2241 + 2242 + if (buf && acl_len <= PAGE_SIZE) { 2243 + acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); 2244 + if (acl == NULL) 2245 + goto out; 2246 + acl->cached = 1; 2247 + memcpy(acl->data, buf, acl_len); 2248 + } else { 2249 + acl = kmalloc(sizeof(*acl), GFP_KERNEL); 2250 + if (acl == NULL) 2251 + goto out; 2252 + acl->cached = 0; 2253 + } 2254 + acl->len = acl_len; 2255 + out: 2256 + nfs4_set_cached_acl(inode, acl); 2257 + } 2258 + 2259 + static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 2260 + { 2261 + struct page *pages[NFS4ACL_MAXPAGES]; 2262 + struct nfs_getaclargs args = { 2263 + .fh = NFS_FH(inode), 2264 + .acl_pages = pages, 2265 + .acl_len = buflen, 2266 + }; 2267 + size_t resp_len = buflen; 2268 + void *resp_buf; 2269 + struct rpc_message msg = { 2270 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 2271 + .rpc_argp = &args, 2272 + .rpc_resp = &resp_len, 2273 + }; 2274 + struct page *localpage = NULL; 2275 + int ret; 2276 + 2277 + if (buflen < PAGE_SIZE) { 2278 + /* As long as we're doing a round trip to the server anyway, 2279 + * let's be prepared for a page of acl data. */ 2280 + localpage = alloc_page(GFP_KERNEL); 2281 + resp_buf = page_address(localpage); 2282 + if (localpage == NULL) 2283 + return -ENOMEM; 2284 + args.acl_pages[0] = localpage; 2285 + args.acl_pgbase = 0; 2286 + args.acl_len = PAGE_SIZE; 2287 + } else { 2288 + resp_buf = buf; 2289 + buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase); 2290 + } 2291 + ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); 2292 + if (ret) 2293 + goto out_free; 2294 + if (resp_len > args.acl_len) 2295 + nfs4_write_cached_acl(inode, NULL, resp_len); 2296 + else 2297 + nfs4_write_cached_acl(inode, resp_buf, resp_len); 2298 + if (buf) { 2299 + ret = -ERANGE; 2300 + if (resp_len > buflen) 2301 + goto out_free; 2302 + if (localpage) 2303 + memcpy(buf, resp_buf, resp_len); 2304 + } 2305 + ret = resp_len; 2306 + out_free: 2307 + if (localpage) 2308 + __free_page(localpage); 2309 + return ret; 2310 + } 2311 + 2312 + static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 2313 + { 2314 + struct nfs_server *server = NFS_SERVER(inode); 2315 + int ret; 2316 + 2317 + if (!nfs4_server_supports_acls(server)) 2318 + return -EOPNOTSUPP; 2319 + ret = nfs_revalidate_inode(server, inode); 2320 + if (ret < 0) 2321 + return ret; 2322 + ret = nfs4_read_cached_acl(inode, buf, buflen); 2323 + if (ret != -ENOENT) 2324 + return ret; 2325 + return nfs4_get_acl_uncached(inode, buf, buflen); 2326 + } 2327 + 2328 + static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 2329 + { 2330 + struct nfs_server *server = NFS_SERVER(inode); 2331 + struct page *pages[NFS4ACL_MAXPAGES]; 2332 + struct nfs_setaclargs arg = { 2333 + .fh = NFS_FH(inode), 2334 + .acl_pages = pages, 2335 + .acl_len = buflen, 2336 + }; 2337 + struct rpc_message msg = { 2338 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 2339 + .rpc_argp = &arg, 2340 + .rpc_resp = NULL, 2341 + }; 2342 + int ret; 2343 + 2344 + if (!nfs4_server_supports_acls(server)) 2345 + return -EOPNOTSUPP; 2346 + buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 2347 + ret = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0); 2348 + if (ret == 0) 2349 + nfs4_write_cached_acl(inode, buf, buflen); 2350 + return ret; 2351 + } 2352 + 2353 static int 2354 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server) 2355 { ··· 2448 down_read(&clp->cl_sem); 2449 nlo.clientid = clp->cl_clientid; 2450 down(&state->lock_sema); 2451 + status = nfs4_set_lock_state(state, request); 2452 + if (status != 0) 2453 + goto out; 2454 + lsp = request->fl_u.nfs4_fl.owner; 2455 + nlo.id = lsp->ls_id; 2456 arg.u.lockt = &nlo; 2457 status = rpc_call_sync(server->client, &msg, 0); 2458 if (!status) { ··· 2476 request->fl_pid = 0; 2477 status = 0; 2478 } 2479 + out: 2480 up(&state->lock_sema); 2481 up_read(&clp->cl_sem); 2482 return status; ··· 2537 }; 2538 struct nfs4_lock_state *lsp; 2539 struct nfs_locku_opargs luargs; 2540 + int status; 2541 2542 down_read(&clp->cl_sem); 2543 down(&state->lock_sema); 2544 + status = nfs4_set_lock_state(state, request); 2545 + if (status != 0) 2546 goto out; 2547 + lsp = request->fl_u.nfs4_fl.owner; 2548 /* We might have lost the locks! */ 2549 + if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) 2550 + goto out; 2551 + luargs.seqid = lsp->ls_seqid; 2552 + memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid)); 2553 + arg.u.locku = &luargs; 2554 + status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); 2555 + nfs4_increment_lock_seqid(status, lsp); 2556 2557 + if (status == 0) 2558 memcpy(&lsp->ls_stateid, &res.u.stateid, 2559 sizeof(lsp->ls_stateid)); 2560 out: 2561 up(&state->lock_sema); 2562 if (status == 0) ··· 2584 { 2585 struct inode *inode = state->inode; 2586 struct nfs_server *server = NFS_SERVER(inode); 2587 + struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 2588 struct nfs_lockargs arg = { 2589 .fh = NFS_FH(inode), 2590 .type = nfs4_lck_type(cmd, request), ··· 2606 }; 2607 int status; 2608 2609 if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) { 2610 struct nfs4_state_owner *owner = state->owner; 2611 struct nfs_open_to_lock otl = { ··· 2630 * seqid mutating errors */ 2631 nfs4_increment_seqid(status, owner); 2632 up(&owner->so_sema); 2633 + if (status == 0) { 2634 + lsp->ls_flags |= NFS_LOCK_INITIALIZED; 2635 + lsp->ls_seqid++; 2636 + } 2637 } else { 2638 struct nfs_exist_lock el = { 2639 .seqid = lsp->ls_seqid, 2640 }; 2641 memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid)); 2642 largs.u.exist_lock = &el; 2643 arg.u.lock = &largs; 2644 status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); 2645 + /* increment seqid on success, and * seqid mutating errors*/ 2646 + nfs4_increment_lock_seqid(status, lsp); 2647 } 2648 /* save the returned stateid. */ 2649 + if (status == 0) 2650 memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid)); 2651 + else if (status == -NFS4ERR_DENIED) 2652 status = -EAGAIN; 2653 return status; 2654 } 2655 2656 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 2657 { 2658 + struct nfs_server *server = NFS_SERVER(state->inode); 2659 + struct nfs4_exception exception = { }; 2660 + int err; 2661 + 2662 + do { 2663 + err = _nfs4_do_setlk(state, F_SETLK, request, 1); 2664 + if (err != -NFS4ERR_DELAY) 2665 + break; 2666 + nfs4_handle_exception(server, err, &exception); 2667 + } while (exception.retry); 2668 + return err; 2669 } 2670 2671 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 2672 { 2673 + struct nfs_server *server = NFS_SERVER(state->inode); 2674 + struct nfs4_exception exception = { }; 2675 + int err; 2676 + 2677 + do { 2678 + err = _nfs4_do_setlk(state, F_SETLK, request, 0); 2679 + if (err != -NFS4ERR_DELAY) 2680 + break; 2681 + nfs4_handle_exception(server, err, &exception); 2682 + } while (exception.retry); 2683 + return err; 2684 } 2685 2686 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) ··· 2671 2672 down_read(&clp->cl_sem); 2673 down(&state->lock_sema); 2674 + status = nfs4_set_lock_state(state, request); 2675 + if (status == 0) 2676 + status = _nfs4_do_setlk(state, cmd, request, 0); 2677 up(&state->lock_sema); 2678 if (status == 0) { 2679 /* Note: we always want to sleep here! */ ··· 2729 if (signalled()) 2730 break; 2731 } while(status < 0); 2732 return status; 2733 + } 2734 + 2735 + 2736 + #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 2737 + 2738 + int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf, 2739 + size_t buflen, int flags) 2740 + { 2741 + struct inode *inode = dentry->d_inode; 2742 + 2743 + if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0) 2744 + return -EOPNOTSUPP; 2745 + 2746 + if (!S_ISREG(inode->i_mode) && 2747 + (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX)) 2748 + return -EPERM; 2749 + 2750 + return nfs4_proc_set_acl(inode, buf, buflen); 2751 + } 2752 + 2753 + /* The getxattr man page suggests returning -ENODATA for unknown attributes, 2754 + * and that's what we'll do for e.g. user attributes that haven't been set. 2755 + * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported 2756 + * attributes in kernel-managed attribute namespaces. */ 2757 + ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf, 2758 + size_t buflen) 2759 + { 2760 + struct inode *inode = dentry->d_inode; 2761 + 2762 + if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0) 2763 + return -EOPNOTSUPP; 2764 + 2765 + return nfs4_proc_get_acl(inode, buf, buflen); 2766 + } 2767 + 2768 + ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen) 2769 + { 2770 + size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1; 2771 + 2772 + if (buf && buflen < len) 2773 + return -ERANGE; 2774 + if (buf) 2775 + memcpy(buf, XATTR_NAME_NFSV4_ACL, len); 2776 + return len; 2777 } 2778 2779 struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = { ··· 2743 .recover_lock = nfs4_lock_expired, 2744 }; 2745 2746 + static struct inode_operations nfs4_file_inode_operations = { 2747 + .permission = nfs_permission, 2748 + .getattr = nfs_getattr, 2749 + .setattr = nfs_setattr, 2750 + .getxattr = nfs4_getxattr, 2751 + .setxattr = nfs4_setxattr, 2752 + .listxattr = nfs4_listxattr, 2753 + }; 2754 + 2755 struct nfs_rpc_ops nfs_v4_clientops = { 2756 .version = 4, /* protocol version */ 2757 .dentry_ops = &nfs4_dentry_operations, 2758 .dir_inode_ops = &nfs4_dir_inode_operations, 2759 + .file_inode_ops = &nfs4_file_inode_operations, 2760 .getroot = nfs4_proc_get_root, 2761 .getattr = nfs4_proc_getattr, 2762 .setattr = nfs4_proc_setattr, ··· 2777 .file_open = nfs4_proc_file_open, 2778 .file_release = nfs4_proc_file_release, 2779 .lock = nfs4_proc_lock, 2780 + .clear_acl_cache = nfs4_zap_acl_attr, 2781 }; 2782 2783 /*
+1
fs/nfs/nfs4renewd.c
··· 53 #include <linux/nfs.h> 54 #include <linux/nfs4.h> 55 #include <linux/nfs_fs.h> 56 57 #define NFSDBG_FACILITY NFSDBG_PROC 58
··· 53 #include <linux/nfs.h> 54 #include <linux/nfs4.h> 55 #include <linux/nfs_fs.h> 56 + #include "nfs4_fs.h" 57 58 #define NFSDBG_FACILITY NFSDBG_PROC 59
+92 -117
fs/nfs/nfs4state.c
··· 46 #include <linux/workqueue.h> 47 #include <linux/bitops.h> 48 49 #include "callback.h" 50 #include "delegation.h" 51 52 #define OPENOWNER_POOL_SIZE 8 53 54 static DEFINE_SPINLOCK(state_spinlock); 55 - 56 - nfs4_stateid zero_stateid; 57 - 58 - #if 0 59 - nfs4_stateid one_stateid = 60 - { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 61 - #endif 62 - 63 static LIST_HEAD(nfs4_clientid_list); 64 65 static void nfs4_recover_state(void *); 66 - extern void nfs4_renew_state(void *); 67 68 void 69 init_nfsv4_state(struct nfs_server *server) ··· 110 INIT_LIST_HEAD(&clp->cl_superblocks); 111 init_waitqueue_head(&clp->cl_waitq); 112 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client"); 113 clp->cl_boot_time = CURRENT_TIME; 114 clp->cl_state = 1 << NFS4CLNT_OK; 115 return clp; ··· 132 if (clp->cl_cred) 133 put_rpccred(clp->cl_cred); 134 nfs_idmap_delete(clp); 135 - if (clp->cl_rpcclient) 136 rpc_shutdown_client(clp->cl_rpcclient); 137 kfree(clp); 138 nfs_callback_down(); ··· 360 atomic_set(&state->count, 1); 361 INIT_LIST_HEAD(&state->lock_states); 362 init_MUTEX(&state->lock_sema); 363 - rwlock_init(&state->state_lock); 364 return state; 365 } 366 ··· 542 return NULL; 543 } 544 545 - struct nfs4_lock_state * 546 - nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) 547 - { 548 - struct nfs4_lock_state *lsp; 549 - read_lock(&state->state_lock); 550 - lsp = __nfs4_find_lock_state(state, fl_owner); 551 - read_unlock(&state->state_lock); 552 - return lsp; 553 - } 554 - 555 /* 556 * Return a compatible lock_state. If no initialized lock_state structure 557 * exists, return an uninitialized one. ··· 558 return NULL; 559 lsp->ls_flags = 0; 560 lsp->ls_seqid = 0; /* arbitrary */ 561 - lsp->ls_id = -1; 562 memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); 563 atomic_set(&lsp->ls_count, 1); 564 lsp->ls_owner = fl_owner; 565 - INIT_LIST_HEAD(&lsp->ls_locks); 566 spin_lock(&clp->cl_lock); 567 lsp->ls_id = nfs4_alloc_lockowner_id(clp); 568 spin_unlock(&clp->cl_lock); 569 return lsp; 570 } 571 ··· 574 * 575 * The caller must be holding state->lock_sema and clp->cl_sem 576 */ 577 - struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) 578 { 579 - struct nfs4_lock_state * lsp; 580 581 - lsp = nfs4_find_lock_state(state, owner); 582 - if (lsp == NULL) 583 - lsp = nfs4_alloc_lock_state(state, owner); 584 return lsp; 585 } 586 587 /* 588 * Byte-range lock aware utility to initialize the stateid of read/write 589 * requests. 590 */ 591 - void 592 - nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) 593 { 594 - if (test_bit(LK_STATE_IN_USE, &state->flags)) { 595 - struct nfs4_lock_state *lsp; 596 597 - lsp = nfs4_find_lock_state(state, fl_owner); 598 - if (lsp) { 599 - memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); 600 - nfs4_put_lock_state(lsp); 601 - return; 602 - } 603 - } 604 memcpy(dst, &state->stateid, sizeof(*dst)); 605 } 606 607 /* ··· 680 { 681 if (status == NFS_OK || seqid_mutating_err(-status)) 682 lsp->ls_seqid++; 683 - } 684 - 685 - /* 686 - * Check to see if the request lock (type FL_UNLK) effects the fl lock. 687 - * 688 - * fl and request must have the same posix owner 689 - * 690 - * return: 691 - * 0 -> fl not effected by request 692 - * 1 -> fl consumed by request 693 - */ 694 - 695 - static int 696 - nfs4_check_unlock(struct file_lock *fl, struct file_lock *request) 697 - { 698 - if (fl->fl_start >= request->fl_start && fl->fl_end <= request->fl_end) 699 - return 1; 700 - return 0; 701 - } 702 - 703 - /* 704 - * Post an initialized lock_state on the state->lock_states list. 705 - */ 706 - void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp) 707 - { 708 - if (!list_empty(&lsp->ls_locks)) 709 - return; 710 - atomic_inc(&lsp->ls_count); 711 - write_lock(&state->state_lock); 712 - list_add(&lsp->ls_locks, &state->lock_states); 713 - set_bit(LK_STATE_IN_USE, &state->flags); 714 - write_unlock(&state->state_lock); 715 - } 716 - 717 - /* 718 - * to decide to 'reap' lock state: 719 - * 1) search i_flock for file_locks with fl.lock_state = to ls. 720 - * 2) determine if unlock will consume found lock. 721 - * if so, reap 722 - * 723 - * else, don't reap. 724 - * 725 - */ 726 - void 727 - nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp) 728 - { 729 - struct inode *inode = state->inode; 730 - struct file_lock *fl; 731 - 732 - for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 733 - if (!(fl->fl_flags & FL_POSIX)) 734 - continue; 735 - if (fl->fl_owner != lsp->ls_owner) 736 - continue; 737 - /* Exit if we find at least one lock which is not consumed */ 738 - if (nfs4_check_unlock(fl,request) == 0) 739 - return; 740 - } 741 - 742 - write_lock(&state->state_lock); 743 - list_del_init(&lsp->ls_locks); 744 - if (list_empty(&state->lock_states)) 745 - clear_bit(LK_STATE_IN_USE, &state->flags); 746 - write_unlock(&state->state_lock); 747 - nfs4_put_lock_state(lsp); 748 - } 749 - 750 - /* 751 - * Release reference to lock_state, and free it if we see that 752 - * it is no longer in use 753 - */ 754 - void 755 - nfs4_put_lock_state(struct nfs4_lock_state *lsp) 756 - { 757 - if (!atomic_dec_and_test(&lsp->ls_count)) 758 - return; 759 - BUG_ON (!list_empty(&lsp->ls_locks)); 760 - kfree(lsp); 761 } 762 763 /*
··· 46 #include <linux/workqueue.h> 47 #include <linux/bitops.h> 48 49 + #include "nfs4_fs.h" 50 #include "callback.h" 51 #include "delegation.h" 52 53 #define OPENOWNER_POOL_SIZE 8 54 55 + const nfs4_stateid zero_stateid; 56 + 57 static DEFINE_SPINLOCK(state_spinlock); 58 static LIST_HEAD(nfs4_clientid_list); 59 60 static void nfs4_recover_state(void *); 61 62 void 63 init_nfsv4_state(struct nfs_server *server) ··· 116 INIT_LIST_HEAD(&clp->cl_superblocks); 117 init_waitqueue_head(&clp->cl_waitq); 118 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client"); 119 + clp->cl_rpcclient = ERR_PTR(-EINVAL); 120 clp->cl_boot_time = CURRENT_TIME; 121 clp->cl_state = 1 << NFS4CLNT_OK; 122 return clp; ··· 137 if (clp->cl_cred) 138 put_rpccred(clp->cl_cred); 139 nfs_idmap_delete(clp); 140 + if (!IS_ERR(clp->cl_rpcclient)) 141 rpc_shutdown_client(clp->cl_rpcclient); 142 kfree(clp); 143 nfs_callback_down(); ··· 365 atomic_set(&state->count, 1); 366 INIT_LIST_HEAD(&state->lock_states); 367 init_MUTEX(&state->lock_sema); 368 + spin_lock_init(&state->state_lock); 369 return state; 370 } 371 ··· 547 return NULL; 548 } 549 550 /* 551 * Return a compatible lock_state. If no initialized lock_state structure 552 * exists, return an uninitialized one. ··· 573 return NULL; 574 lsp->ls_flags = 0; 575 lsp->ls_seqid = 0; /* arbitrary */ 576 memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); 577 atomic_set(&lsp->ls_count, 1); 578 lsp->ls_owner = fl_owner; 579 spin_lock(&clp->cl_lock); 580 lsp->ls_id = nfs4_alloc_lockowner_id(clp); 581 spin_unlock(&clp->cl_lock); 582 + INIT_LIST_HEAD(&lsp->ls_locks); 583 return lsp; 584 } 585 ··· 590 * 591 * The caller must be holding state->lock_sema and clp->cl_sem 592 */ 593 + static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) 594 { 595 + struct nfs4_lock_state *lsp, *new = NULL; 596 597 + for(;;) { 598 + spin_lock(&state->state_lock); 599 + lsp = __nfs4_find_lock_state(state, owner); 600 + if (lsp != NULL) 601 + break; 602 + if (new != NULL) { 603 + new->ls_state = state; 604 + list_add(&new->ls_locks, &state->lock_states); 605 + set_bit(LK_STATE_IN_USE, &state->flags); 606 + lsp = new; 607 + new = NULL; 608 + break; 609 + } 610 + spin_unlock(&state->state_lock); 611 + new = nfs4_alloc_lock_state(state, owner); 612 + if (new == NULL) 613 + return NULL; 614 + } 615 + spin_unlock(&state->state_lock); 616 + kfree(new); 617 return lsp; 618 + } 619 + 620 + /* 621 + * Release reference to lock_state, and free it if we see that 622 + * it is no longer in use 623 + */ 624 + static void nfs4_put_lock_state(struct nfs4_lock_state *lsp) 625 + { 626 + struct nfs4_state *state; 627 + 628 + if (lsp == NULL) 629 + return; 630 + state = lsp->ls_state; 631 + if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock)) 632 + return; 633 + list_del(&lsp->ls_locks); 634 + if (list_empty(&state->lock_states)) 635 + clear_bit(LK_STATE_IN_USE, &state->flags); 636 + spin_unlock(&state->state_lock); 637 + kfree(lsp); 638 + } 639 + 640 + static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) 641 + { 642 + struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner; 643 + 644 + dst->fl_u.nfs4_fl.owner = lsp; 645 + atomic_inc(&lsp->ls_count); 646 + } 647 + 648 + static void nfs4_fl_release_lock(struct file_lock *fl) 649 + { 650 + nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); 651 + } 652 + 653 + static struct file_lock_operations nfs4_fl_lock_ops = { 654 + .fl_copy_lock = nfs4_fl_copy_lock, 655 + .fl_release_private = nfs4_fl_release_lock, 656 + }; 657 + 658 + int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) 659 + { 660 + struct nfs4_lock_state *lsp; 661 + 662 + if (fl->fl_ops != NULL) 663 + return 0; 664 + lsp = nfs4_get_lock_state(state, fl->fl_owner); 665 + if (lsp == NULL) 666 + return -ENOMEM; 667 + fl->fl_u.nfs4_fl.owner = lsp; 668 + fl->fl_ops = &nfs4_fl_lock_ops; 669 + return 0; 670 } 671 672 /* 673 * Byte-range lock aware utility to initialize the stateid of read/write 674 * requests. 675 */ 676 + void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) 677 { 678 + struct nfs4_lock_state *lsp; 679 680 memcpy(dst, &state->stateid, sizeof(*dst)); 681 + if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) 682 + return; 683 + 684 + spin_lock(&state->state_lock); 685 + lsp = __nfs4_find_lock_state(state, fl_owner); 686 + if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) 687 + memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); 688 + spin_unlock(&state->state_lock); 689 + nfs4_put_lock_state(lsp); 690 } 691 692 /* ··· 627 { 628 if (status == NFS_OK || seqid_mutating_err(-status)) 629 lsp->ls_seqid++; 630 } 631 632 /*
+219 -22
fs/nfs/nfs4xdr.c
··· 51 #include <linux/nfs4.h> 52 #include <linux/nfs_fs.h> 53 #include <linux/nfs_idmap.h> 54 55 #define NFSDBG_FACILITY NFSDBG_XDR 56 ··· 83 #define encode_getfh_maxsz (op_encode_hdr_maxsz) 84 #define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \ 85 ((3+NFS4_FHSIZE) >> 2)) 86 - #define encode_getattr_maxsz (op_encode_hdr_maxsz + 3) 87 #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) 88 #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) 89 - #define nfs4_fattr_bitmap_maxsz (36 + 2 * nfs4_name_maxsz) 90 - #define decode_getattr_maxsz (op_decode_hdr_maxsz + 3 + \ 91 - nfs4_fattr_bitmap_maxsz) 92 #define encode_savefh_maxsz (op_encode_hdr_maxsz) 93 #define decode_savefh_maxsz (op_decode_hdr_maxsz) 94 #define encode_fsinfo_maxsz (op_encode_hdr_maxsz + 2) ··· 127 #define encode_symlink_maxsz (op_encode_hdr_maxsz + \ 128 1 + nfs4_name_maxsz + \ 129 nfs4_path_maxsz + \ 130 - nfs4_fattr_bitmap_maxsz) 131 #define decode_symlink_maxsz (op_decode_hdr_maxsz + 8) 132 #define encode_create_maxsz (op_encode_hdr_maxsz + \ 133 2 + nfs4_name_maxsz + \ 134 - nfs4_fattr_bitmap_maxsz) 135 #define decode_create_maxsz (op_decode_hdr_maxsz + 8) 136 #define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4) 137 #define decode_delegreturn_maxsz (op_decode_hdr_maxsz) ··· 210 #define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \ 211 encode_putfh_maxsz + \ 212 op_encode_hdr_maxsz + 4 + \ 213 - nfs4_fattr_bitmap_maxsz + \ 214 encode_getattr_maxsz) 215 #define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \ 216 decode_putfh_maxsz + \ ··· 365 encode_delegreturn_maxsz) 366 #define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \ 367 decode_delegreturn_maxsz) 368 369 static struct { 370 unsigned int mode; ··· 478 * In the worst-case, this would be 479 * 12(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime) 480 * = 36 bytes, plus any contribution from variable-length fields 481 - * such as owner/group/acl's. 482 */ 483 len = 16; 484 ··· 679 680 static int encode_getfattr(struct xdr_stream *xdr, const u32* bitmask) 681 { 682 - extern u32 nfs4_fattr_bitmap[]; 683 - 684 return encode_getattr_two(xdr, 685 bitmask[0] & nfs4_fattr_bitmap[0], 686 bitmask[1] & nfs4_fattr_bitmap[1]); ··· 686 687 static int encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask) 688 { 689 - extern u32 nfs4_fsinfo_bitmap[]; 690 - 691 return encode_getattr_two(xdr, bitmask[0] & nfs4_fsinfo_bitmap[0], 692 bitmask[1] & nfs4_fsinfo_bitmap[1]); 693 } ··· 984 985 static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx) 986 { 987 - extern nfs4_stateid zero_stateid; 988 nfs4_stateid stateid; 989 uint32_t *p; 990 ··· 1014 static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req) 1015 { 1016 struct rpc_auth *auth = req->rq_task->tk_auth; 1017 int replen; 1018 uint32_t *p; 1019 ··· 1028 WRITE32(readdir->count >> 1); /* We're not doing readdirplus */ 1029 WRITE32(readdir->count); 1030 WRITE32(2); 1031 - if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) { 1032 - WRITE32(0); 1033 - WRITE32(FATTR4_WORD1_MOUNTED_ON_FILEID); 1034 - } else { 1035 - WRITE32(FATTR4_WORD0_FILEID); 1036 - WRITE32(0); 1037 - } 1038 1039 /* set up reply kvec 1040 * toplevel_status + taglen + rescount + OP_PUTFH + status ··· 1050 replen = (RPC_REPHDRSIZE + auth->au_rslack + 9) << 2; 1051 xdr_inline_pages(&req->rq_rcv_buf, replen, readdir->pages, 1052 readdir->pgbase, readdir->count); 1053 1054 return 0; 1055 } ··· 1113 WRITE32(OP_RENEW); 1114 WRITE64(client_stateid->cl_clientid); 1115 1116 return 0; 1117 } 1118 ··· 1679 } 1680 1681 /* 1682 * Encode a WRITE request 1683 */ 1684 static int nfs4_xdr_enc_write(struct rpc_rqst *req, uint32_t *p, struct nfs_writeargs *args) ··· 1772 */ 1773 static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, uint32_t *p, const struct nfs4_pathconf_arg *args) 1774 { 1775 - extern u32 nfs4_pathconf_bitmap[2]; 1776 struct xdr_stream xdr; 1777 struct compound_hdr hdr = { 1778 .nops = 2, ··· 1792 */ 1793 static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, uint32_t *p, const struct nfs4_statfs_arg *args) 1794 { 1795 - extern u32 nfs4_statfs_bitmap[]; 1796 struct xdr_stream xdr; 1797 struct compound_hdr hdr = { 1798 .nops = 2, ··· 3076 return status; 3077 READ_BUF(8); 3078 COPYMEM(readdir->verifier.data, 8); 3079 3080 hdrlen = (char *) p - (char *) iov->iov_base; 3081 recvd = rcvbuf->len - hdrlen; ··· 3095 for (nr = 0; *p++; nr++) { 3096 if (p + 3 > end) 3097 goto short_pkt; 3098 p += 2; /* cookie */ 3099 len = ntohl(*p++); /* filename length */ 3100 if (len > NFS4_MAXNAMLEN) { 3101 printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); 3102 goto err_unmap; 3103 } 3104 p += XDR_QUADLEN(len); 3105 if (p + 1 > end) 3106 goto short_pkt; ··· 3122 kunmap_atomic(kaddr, KM_USER0); 3123 return 0; 3124 short_pkt: 3125 entry[0] = entry[1] = 0; 3126 /* truncate listing ? */ 3127 if (!nr) { ··· 3206 static int decode_renew(struct xdr_stream *xdr) 3207 { 3208 return decode_op_hdr(xdr, OP_RENEW); 3209 } 3210 3211 static int ··· 3540 3541 } 3542 3543 3544 /* 3545 * Decode CLOSE response ··· 4082 } 4083 len = XDR_QUADLEN(ntohl(*p++)); /* attribute buffer length */ 4084 if (len > 0) { 4085 if (bitmap[0] == 0 && bitmap[1] == FATTR4_WORD1_MOUNTED_ON_FILEID) 4086 xdr_decode_hyper(p, &entry->ino); 4087 else if (bitmap[0] == FATTR4_WORD0_FILEID) ··· 4127 { NFS4ERR_DQUOT, EDQUOT }, 4128 { NFS4ERR_STALE, ESTALE }, 4129 { NFS4ERR_BADHANDLE, EBADHANDLE }, 4130 { NFS4ERR_BAD_COOKIE, EBADCOOKIE }, 4131 { NFS4ERR_NOTSUPP, ENOTSUPP }, 4132 { NFS4ERR_TOOSMALL, ETOOSMALL }, ··· 4214 PROC(READDIR, enc_readdir, dec_readdir), 4215 PROC(SERVER_CAPS, enc_server_caps, dec_server_caps), 4216 PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn), 4217 }; 4218 4219 struct rpc_version nfs_version4 = {
··· 51 #include <linux/nfs4.h> 52 #include <linux/nfs_fs.h> 53 #include <linux/nfs_idmap.h> 54 + #include "nfs4_fs.h" 55 56 #define NFSDBG_FACILITY NFSDBG_XDR 57 ··· 82 #define encode_getfh_maxsz (op_encode_hdr_maxsz) 83 #define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \ 84 ((3+NFS4_FHSIZE) >> 2)) 85 + #define nfs4_fattr_bitmap_maxsz 3 86 + #define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) 87 #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) 88 #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) 89 + /* This is based on getfattr, which uses the most attributes: */ 90 + #define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \ 91 + 3 + 3 + 3 + 2 * nfs4_name_maxsz)) 92 + #define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \ 93 + nfs4_fattr_value_maxsz) 94 + #define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz) 95 #define encode_savefh_maxsz (op_encode_hdr_maxsz) 96 #define decode_savefh_maxsz (op_decode_hdr_maxsz) 97 #define encode_fsinfo_maxsz (op_encode_hdr_maxsz + 2) ··· 122 #define encode_symlink_maxsz (op_encode_hdr_maxsz + \ 123 1 + nfs4_name_maxsz + \ 124 nfs4_path_maxsz + \ 125 + nfs4_fattr_maxsz) 126 #define decode_symlink_maxsz (op_decode_hdr_maxsz + 8) 127 #define encode_create_maxsz (op_encode_hdr_maxsz + \ 128 2 + nfs4_name_maxsz + \ 129 + nfs4_fattr_maxsz) 130 #define decode_create_maxsz (op_decode_hdr_maxsz + 8) 131 #define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4) 132 #define decode_delegreturn_maxsz (op_decode_hdr_maxsz) ··· 205 #define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \ 206 encode_putfh_maxsz + \ 207 op_encode_hdr_maxsz + 4 + \ 208 + nfs4_fattr_maxsz + \ 209 encode_getattr_maxsz) 210 #define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \ 211 decode_putfh_maxsz + \ ··· 360 encode_delegreturn_maxsz) 361 #define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \ 362 decode_delegreturn_maxsz) 363 + #define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \ 364 + encode_putfh_maxsz + \ 365 + encode_getattr_maxsz) 366 + #define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \ 367 + decode_putfh_maxsz + \ 368 + op_decode_hdr_maxsz + \ 369 + nfs4_fattr_bitmap_maxsz + 1) 370 + #define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \ 371 + encode_putfh_maxsz + \ 372 + op_encode_hdr_maxsz + 4 + \ 373 + nfs4_fattr_bitmap_maxsz + 1) 374 + #define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \ 375 + decode_putfh_maxsz + \ 376 + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) 377 378 static struct { 379 unsigned int mode; ··· 459 * In the worst-case, this would be 460 * 12(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime) 461 * = 36 bytes, plus any contribution from variable-length fields 462 + * such as owner/group. 463 */ 464 len = 16; 465 ··· 660 661 static int encode_getfattr(struct xdr_stream *xdr, const u32* bitmask) 662 { 663 return encode_getattr_two(xdr, 664 bitmask[0] & nfs4_fattr_bitmap[0], 665 bitmask[1] & nfs4_fattr_bitmap[1]); ··· 669 670 static int encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask) 671 { 672 return encode_getattr_two(xdr, bitmask[0] & nfs4_fsinfo_bitmap[0], 673 bitmask[1] & nfs4_fsinfo_bitmap[1]); 674 } ··· 969 970 static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx) 971 { 972 nfs4_stateid stateid; 973 uint32_t *p; 974 ··· 1000 static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req) 1001 { 1002 struct rpc_auth *auth = req->rq_task->tk_auth; 1003 + uint32_t attrs[2] = { 1004 + FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID, 1005 + FATTR4_WORD1_MOUNTED_ON_FILEID, 1006 + }; 1007 int replen; 1008 uint32_t *p; 1009 ··· 1010 WRITE32(readdir->count >> 1); /* We're not doing readdirplus */ 1011 WRITE32(readdir->count); 1012 WRITE32(2); 1013 + /* Switch to mounted_on_fileid if the server supports it */ 1014 + if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 1015 + attrs[0] &= ~FATTR4_WORD0_FILEID; 1016 + else 1017 + attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 1018 + WRITE32(attrs[0] & readdir->bitmask[0]); 1019 + WRITE32(attrs[1] & readdir->bitmask[1]); 1020 + dprintk("%s: cookie = %Lu, verifier = 0x%x%x, bitmap = 0x%x%x\n", 1021 + __FUNCTION__, 1022 + (unsigned long long)readdir->cookie, 1023 + ((u32 *)readdir->verifier.data)[0], 1024 + ((u32 *)readdir->verifier.data)[1], 1025 + attrs[0] & readdir->bitmask[0], 1026 + attrs[1] & readdir->bitmask[1]); 1027 1028 /* set up reply kvec 1029 * toplevel_status + taglen + rescount + OP_PUTFH + status ··· 1025 replen = (RPC_REPHDRSIZE + auth->au_rslack + 9) << 2; 1026 xdr_inline_pages(&req->rq_rcv_buf, replen, readdir->pages, 1027 readdir->pgbase, readdir->count); 1028 + dprintk("%s: inlined page args = (%u, %p, %u, %u)\n", 1029 + __FUNCTION__, replen, readdir->pages, 1030 + readdir->pgbase, readdir->count); 1031 1032 return 0; 1033 } ··· 1085 WRITE32(OP_RENEW); 1086 WRITE64(client_stateid->cl_clientid); 1087 1088 + return 0; 1089 + } 1090 + 1091 + static int 1092 + encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg) 1093 + { 1094 + uint32_t *p; 1095 + 1096 + RESERVE_SPACE(4+sizeof(zero_stateid.data)); 1097 + WRITE32(OP_SETATTR); 1098 + WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data)); 1099 + RESERVE_SPACE(2*4); 1100 + WRITE32(1); 1101 + WRITE32(FATTR4_WORD0_ACL); 1102 + if (arg->acl_len % 4) 1103 + return -EINVAL; 1104 + RESERVE_SPACE(4); 1105 + WRITE32(arg->acl_len); 1106 + xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len); 1107 return 0; 1108 } 1109 ··· 1632 } 1633 1634 /* 1635 + * Encode a GETACL request 1636 + */ 1637 + static int 1638 + nfs4_xdr_enc_getacl(struct rpc_rqst *req, uint32_t *p, 1639 + struct nfs_getaclargs *args) 1640 + { 1641 + struct xdr_stream xdr; 1642 + struct rpc_auth *auth = req->rq_task->tk_auth; 1643 + struct compound_hdr hdr = { 1644 + .nops = 2, 1645 + }; 1646 + int replen, status; 1647 + 1648 + xdr_init_encode(&xdr, &req->rq_snd_buf, p); 1649 + encode_compound_hdr(&xdr, &hdr); 1650 + status = encode_putfh(&xdr, args->fh); 1651 + if (status) 1652 + goto out; 1653 + status = encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0); 1654 + /* set up reply buffer: */ 1655 + replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_getacl_sz) << 2; 1656 + xdr_inline_pages(&req->rq_rcv_buf, replen, 1657 + args->acl_pages, args->acl_pgbase, args->acl_len); 1658 + out: 1659 + return status; 1660 + } 1661 + 1662 + /* 1663 * Encode a WRITE request 1664 */ 1665 static int nfs4_xdr_enc_write(struct rpc_rqst *req, uint32_t *p, struct nfs_writeargs *args) ··· 1697 */ 1698 static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, uint32_t *p, const struct nfs4_pathconf_arg *args) 1699 { 1700 struct xdr_stream xdr; 1701 struct compound_hdr hdr = { 1702 .nops = 2, ··· 1718 */ 1719 static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, uint32_t *p, const struct nfs4_statfs_arg *args) 1720 { 1721 struct xdr_stream xdr; 1722 struct compound_hdr hdr = { 1723 .nops = 2, ··· 3003 return status; 3004 READ_BUF(8); 3005 COPYMEM(readdir->verifier.data, 8); 3006 + dprintk("%s: verifier = 0x%x%x\n", 3007 + __FUNCTION__, 3008 + ((u32 *)readdir->verifier.data)[0], 3009 + ((u32 *)readdir->verifier.data)[1]); 3010 + 3011 3012 hdrlen = (char *) p - (char *) iov->iov_base; 3013 recvd = rcvbuf->len - hdrlen; ··· 3017 for (nr = 0; *p++; nr++) { 3018 if (p + 3 > end) 3019 goto short_pkt; 3020 + dprintk("cookie = %Lu, ", *((unsigned long long *)p)); 3021 p += 2; /* cookie */ 3022 len = ntohl(*p++); /* filename length */ 3023 if (len > NFS4_MAXNAMLEN) { 3024 printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); 3025 goto err_unmap; 3026 } 3027 + dprintk("filename = %*s\n", len, (char *)p); 3028 p += XDR_QUADLEN(len); 3029 if (p + 1 > end) 3030 goto short_pkt; ··· 3042 kunmap_atomic(kaddr, KM_USER0); 3043 return 0; 3044 short_pkt: 3045 + dprintk("%s: short packet at entry %d\n", __FUNCTION__, nr); 3046 entry[0] = entry[1] = 0; 3047 /* truncate listing ? */ 3048 if (!nr) { ··· 3125 static int decode_renew(struct xdr_stream *xdr) 3126 { 3127 return decode_op_hdr(xdr, OP_RENEW); 3128 + } 3129 + 3130 + static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, 3131 + size_t *acl_len) 3132 + { 3133 + uint32_t *savep; 3134 + uint32_t attrlen, 3135 + bitmap[2] = {0}; 3136 + struct kvec *iov = req->rq_rcv_buf.head; 3137 + int status; 3138 + 3139 + *acl_len = 0; 3140 + if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) 3141 + goto out; 3142 + if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) 3143 + goto out; 3144 + if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) 3145 + goto out; 3146 + 3147 + if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U))) 3148 + return -EIO; 3149 + if (likely(bitmap[0] & FATTR4_WORD0_ACL)) { 3150 + int hdrlen, recvd; 3151 + 3152 + /* We ignore &savep and don't do consistency checks on 3153 + * the attr length. Let userspace figure it out.... */ 3154 + hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base; 3155 + recvd = req->rq_rcv_buf.len - hdrlen; 3156 + if (attrlen > recvd) { 3157 + printk(KERN_WARNING "NFS: server cheating in getattr" 3158 + " acl reply: attrlen %u > recvd %u\n", 3159 + attrlen, recvd); 3160 + return -EINVAL; 3161 + } 3162 + if (attrlen <= *acl_len) 3163 + xdr_read_pages(xdr, attrlen); 3164 + *acl_len = attrlen; 3165 + } 3166 + 3167 + out: 3168 + return status; 3169 } 3170 3171 static int ··· 3418 3419 } 3420 3421 + /* 3422 + * Encode an SETACL request 3423 + */ 3424 + static int 3425 + nfs4_xdr_enc_setacl(struct rpc_rqst *req, uint32_t *p, struct nfs_setaclargs *args) 3426 + { 3427 + struct xdr_stream xdr; 3428 + struct compound_hdr hdr = { 3429 + .nops = 2, 3430 + }; 3431 + int status; 3432 + 3433 + xdr_init_encode(&xdr, &req->rq_snd_buf, p); 3434 + encode_compound_hdr(&xdr, &hdr); 3435 + status = encode_putfh(&xdr, args->fh); 3436 + if (status) 3437 + goto out; 3438 + status = encode_setacl(&xdr, args); 3439 + out: 3440 + return status; 3441 + } 3442 + /* 3443 + * Decode SETACL response 3444 + */ 3445 + static int 3446 + nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, uint32_t *p, void *res) 3447 + { 3448 + struct xdr_stream xdr; 3449 + struct compound_hdr hdr; 3450 + int status; 3451 + 3452 + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 3453 + status = decode_compound_hdr(&xdr, &hdr); 3454 + if (status) 3455 + goto out; 3456 + status = decode_putfh(&xdr); 3457 + if (status) 3458 + goto out; 3459 + status = decode_setattr(&xdr, res); 3460 + out: 3461 + return status; 3462 + } 3463 + 3464 + /* 3465 + * Decode GETACL response 3466 + */ 3467 + static int 3468 + nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, uint32_t *p, size_t *acl_len) 3469 + { 3470 + struct xdr_stream xdr; 3471 + struct compound_hdr hdr; 3472 + int status; 3473 + 3474 + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 3475 + status = decode_compound_hdr(&xdr, &hdr); 3476 + if (status) 3477 + goto out; 3478 + status = decode_putfh(&xdr); 3479 + if (status) 3480 + goto out; 3481 + status = decode_getacl(&xdr, rqstp, acl_len); 3482 + 3483 + out: 3484 + return status; 3485 + } 3486 3487 /* 3488 * Decode CLOSE response ··· 3895 } 3896 len = XDR_QUADLEN(ntohl(*p++)); /* attribute buffer length */ 3897 if (len > 0) { 3898 + if (bitmap[0] & FATTR4_WORD0_RDATTR_ERROR) { 3899 + bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR; 3900 + /* Ignore the return value of rdattr_error for now */ 3901 + p++; 3902 + len--; 3903 + } 3904 if (bitmap[0] == 0 && bitmap[1] == FATTR4_WORD1_MOUNTED_ON_FILEID) 3905 xdr_decode_hyper(p, &entry->ino); 3906 else if (bitmap[0] == FATTR4_WORD0_FILEID) ··· 3934 { NFS4ERR_DQUOT, EDQUOT }, 3935 { NFS4ERR_STALE, ESTALE }, 3936 { NFS4ERR_BADHANDLE, EBADHANDLE }, 3937 + { NFS4ERR_BADOWNER, EINVAL }, 3938 + { NFS4ERR_BADNAME, EINVAL }, 3939 { NFS4ERR_BAD_COOKIE, EBADCOOKIE }, 3940 { NFS4ERR_NOTSUPP, ENOTSUPP }, 3941 { NFS4ERR_TOOSMALL, ETOOSMALL }, ··· 4019 PROC(READDIR, enc_readdir, dec_readdir), 4020 PROC(SERVER_CAPS, enc_server_caps, dec_server_caps), 4021 PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn), 4022 + PROC(GETACL, enc_getacl, dec_getacl), 4023 + PROC(SETACL, enc_setacl, dec_setacl), 4024 }; 4025 4026 struct rpc_version nfs_version4 = {
+9
fs/nfs/nfsroot.c
··· 124 Opt_soft, Opt_hard, Opt_intr, 125 Opt_nointr, Opt_posix, Opt_noposix, Opt_cto, Opt_nocto, Opt_ac, 126 Opt_noac, Opt_lock, Opt_nolock, Opt_v2, Opt_v3, Opt_udp, Opt_tcp, 127 /* Error token */ 128 Opt_err 129 }; ··· 159 {Opt_udp, "udp"}, 160 {Opt_tcp, "proto=tcp"}, 161 {Opt_tcp, "tcp"}, 162 {Opt_err, NULL} 163 164 }; ··· 268 break; 269 case Opt_tcp: 270 nfs_data.flags |= NFS_MOUNT_TCP; 271 break; 272 default : 273 return 0;
··· 124 Opt_soft, Opt_hard, Opt_intr, 125 Opt_nointr, Opt_posix, Opt_noposix, Opt_cto, Opt_nocto, Opt_ac, 126 Opt_noac, Opt_lock, Opt_nolock, Opt_v2, Opt_v3, Opt_udp, Opt_tcp, 127 + Opt_acl, Opt_noacl, 128 /* Error token */ 129 Opt_err 130 }; ··· 158 {Opt_udp, "udp"}, 159 {Opt_tcp, "proto=tcp"}, 160 {Opt_tcp, "tcp"}, 161 + {Opt_acl, "acl"}, 162 + {Opt_noacl, "noacl"}, 163 {Opt_err, NULL} 164 165 }; ··· 265 break; 266 case Opt_tcp: 267 nfs_data.flags |= NFS_MOUNT_TCP; 268 + break; 269 + case Opt_acl: 270 + nfs_data.flags &= ~NFS_MOUNT_NOACL; 271 + break; 272 + case Opt_noacl: 273 + nfs_data.flags |= NFS_MOUNT_NOACL; 274 break; 275 default : 276 return 0;
+107 -33
fs/nfs/pagelist.c
··· 107 smp_mb__before_clear_bit(); 108 clear_bit(PG_BUSY, &req->wb_flags); 109 smp_mb__after_clear_bit(); 110 - wake_up_all(&req->wb_context->waitq); 111 nfs_release_request(req); 112 } 113 114 /** ··· 177 nfs_page_free(req); 178 } 179 180 - /** 181 - * nfs_list_add_request - Insert a request into a sorted list 182 - * @req: request 183 - * @head: head of list into which to insert the request. 184 - * 185 - * Note that the wb_list is sorted by page index in order to facilitate 186 - * coalescing of requests. 187 - * We use an insertion sort that is optimized for the case of appended 188 - * writes. 189 - */ 190 - void 191 - nfs_list_add_request(struct nfs_page *req, struct list_head *head) 192 { 193 - struct list_head *pos; 194 195 - #ifdef NFS_PARANOIA 196 - if (!list_empty(&req->wb_list)) { 197 - printk(KERN_ERR "NFS: Add to list failed!\n"); 198 - BUG(); 199 - } 200 - #endif 201 - list_for_each_prev(pos, head) { 202 - struct nfs_page *p = nfs_list_entry(pos); 203 - if (p->wb_index < req->wb_index) 204 - break; 205 - } 206 - list_add(&req->wb_list, pos); 207 - req->wb_list_head = head; 208 } 209 210 /** ··· 198 int 199 nfs_wait_on_request(struct nfs_page *req) 200 { 201 - struct inode *inode = req->wb_context->dentry->d_inode; 202 - struct rpc_clnt *clnt = NFS_CLIENT(inode); 203 204 - if (!NFS_WBACK_BUSY(req)) 205 - return 0; 206 - return nfs_wait_event(clnt, req->wb_context->waitq, !NFS_WBACK_BUSY(req)); 207 } 208 209 /** ··· 261 return npages; 262 } 263 264 /** 265 * nfs_scan_list - Scan a list for matching requests 266 * @head: One of the NFS inode request lists ··· 354 if (req->wb_index > idx_end) 355 break; 356 357 - if (!nfs_lock_request(req)) 358 continue; 359 nfs_list_remove_request(req); 360 nfs_list_add_request(req, dst);
··· 107 smp_mb__before_clear_bit(); 108 clear_bit(PG_BUSY, &req->wb_flags); 109 smp_mb__after_clear_bit(); 110 + wake_up_bit(&req->wb_flags, PG_BUSY); 111 nfs_release_request(req); 112 + } 113 + 114 + /** 115 + * nfs_set_page_writeback_locked - Lock a request for writeback 116 + * @req: 117 + */ 118 + int nfs_set_page_writeback_locked(struct nfs_page *req) 119 + { 120 + struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); 121 + 122 + if (!nfs_lock_request(req)) 123 + return 0; 124 + radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); 125 + return 1; 126 + } 127 + 128 + /** 129 + * nfs_clear_page_writeback - Unlock request and wake up sleepers 130 + */ 131 + void nfs_clear_page_writeback(struct nfs_page *req) 132 + { 133 + struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); 134 + 135 + spin_lock(&nfsi->req_lock); 136 + radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); 137 + spin_unlock(&nfsi->req_lock); 138 + nfs_unlock_request(req); 139 } 140 141 /** ··· 150 nfs_page_free(req); 151 } 152 153 + static int nfs_wait_bit_interruptible(void *word) 154 { 155 + int ret = 0; 156 157 + if (signal_pending(current)) 158 + ret = -ERESTARTSYS; 159 + else 160 + schedule(); 161 + return ret; 162 } 163 164 /** ··· 190 int 191 nfs_wait_on_request(struct nfs_page *req) 192 { 193 + struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode); 194 + sigset_t oldmask; 195 + int ret = 0; 196 197 + if (!test_bit(PG_BUSY, &req->wb_flags)) 198 + goto out; 199 + /* 200 + * Note: the call to rpc_clnt_sigmask() suffices to ensure that we 201 + * are not interrupted if intr flag is not set 202 + */ 203 + rpc_clnt_sigmask(clnt, &oldmask); 204 + ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, 205 + nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE); 206 + rpc_clnt_sigunmask(clnt, &oldmask); 207 + out: 208 + return ret; 209 } 210 211 /** ··· 243 return npages; 244 } 245 246 + #define NFS_SCAN_MAXENTRIES 16 247 + /** 248 + * nfs_scan_lock_dirty - Scan the radix tree for dirty requests 249 + * @nfsi: NFS inode 250 + * @dst: Destination list 251 + * @idx_start: lower bound of page->index to scan 252 + * @npages: idx_start + npages sets the upper bound to scan. 253 + * 254 + * Moves elements from one of the inode request lists. 255 + * If the number of requests is set to 0, the entire address_space 256 + * starting at index idx_start, is scanned. 257 + * The requests are *not* checked to ensure that they form a contiguous set. 258 + * You must be holding the inode's req_lock when calling this function 259 + */ 260 + int 261 + nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst, 262 + unsigned long idx_start, unsigned int npages) 263 + { 264 + struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; 265 + struct nfs_page *req; 266 + unsigned long idx_end; 267 + int found, i; 268 + int res; 269 + 270 + res = 0; 271 + if (npages == 0) 272 + idx_end = ~0; 273 + else 274 + idx_end = idx_start + npages - 1; 275 + 276 + for (;;) { 277 + found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, 278 + (void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES, 279 + NFS_PAGE_TAG_DIRTY); 280 + if (found <= 0) 281 + break; 282 + for (i = 0; i < found; i++) { 283 + req = pgvec[i]; 284 + if (req->wb_index > idx_end) 285 + goto out; 286 + 287 + idx_start = req->wb_index + 1; 288 + 289 + if (nfs_set_page_writeback_locked(req)) { 290 + radix_tree_tag_clear(&nfsi->nfs_page_tree, 291 + req->wb_index, NFS_PAGE_TAG_DIRTY); 292 + nfs_list_remove_request(req); 293 + nfs_list_add_request(req, dst); 294 + res++; 295 + } 296 + } 297 + } 298 + out: 299 + return res; 300 + } 301 + 302 /** 303 * nfs_scan_list - Scan a list for matching requests 304 * @head: One of the NFS inode request lists ··· 280 if (req->wb_index > idx_end) 281 break; 282 283 + if (!nfs_set_page_writeback_locked(req)) 284 continue; 285 nfs_list_remove_request(req); 286 nfs_list_add_request(req, dst);
+1
fs/nfs/proc.c
··· 622 .version = 2, /* protocol version */ 623 .dentry_ops = &nfs_dentry_operations, 624 .dir_inode_ops = &nfs_dir_inode_operations, 625 .getroot = nfs_proc_get_root, 626 .getattr = nfs_proc_getattr, 627 .setattr = nfs_proc_setattr,
··· 622 .version = 2, /* protocol version */ 623 .dentry_ops = &nfs_dentry_operations, 624 .dir_inode_ops = &nfs_dir_inode_operations, 625 + .file_inode_ops = &nfs_file_inode_operations, 626 .getroot = nfs_proc_get_root, 627 .getattr = nfs_proc_getattr, 628 .setattr = nfs_proc_setattr,
-3
fs/nfs/read.c
··· 173 if (len < PAGE_CACHE_SIZE) 174 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 175 176 - nfs_lock_request(new); 177 nfs_list_add_request(new, &one_request); 178 nfs_pagein_one(&one_request, inode); 179 return 0; ··· 184 185 nfs_clear_request(req); 186 nfs_release_request(req); 187 - nfs_unlock_request(req); 188 189 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", 190 req->wb_context->dentry->d_inode->i_sb->s_id, ··· 551 } 552 if (len < PAGE_CACHE_SIZE) 553 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 554 - nfs_lock_request(new); 555 nfs_list_add_request(new, desc->head); 556 return 0; 557 }
··· 173 if (len < PAGE_CACHE_SIZE) 174 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 175 176 nfs_list_add_request(new, &one_request); 177 nfs_pagein_one(&one_request, inode); 178 return 0; ··· 185 186 nfs_clear_request(req); 187 nfs_release_request(req); 188 189 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", 190 req->wb_context->dentry->d_inode->i_sb->s_id, ··· 553 } 554 if (len < PAGE_CACHE_SIZE) 555 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); 556 nfs_list_add_request(new, desc->head); 557 return 0; 558 }
+54 -54
fs/nfs/write.c
··· 220 ClearPageError(page); 221 222 io_error: 223 - nfs_end_data_update_defer(inode); 224 nfs_writedata_free(wdata); 225 return written ? written : result; 226 } ··· 352 if (err < 0) 353 goto out; 354 } 355 - err = nfs_commit_inode(inode, 0, 0, wb_priority(wbc)); 356 if (err > 0) { 357 wbc->nr_to_write -= err; 358 err = 0; ··· 401 nfsi->npages--; 402 if (!nfsi->npages) { 403 spin_unlock(&nfsi->req_lock); 404 - nfs_end_data_update_defer(inode); 405 iput(inode); 406 } else 407 spin_unlock(&nfsi->req_lock); ··· 446 struct nfs_inode *nfsi = NFS_I(inode); 447 448 spin_lock(&nfsi->req_lock); 449 nfs_list_add_request(req, &nfsi->dirty); 450 nfsi->ndirty++; 451 spin_unlock(&nfsi->req_lock); ··· 505 506 spin_lock(&nfsi->req_lock); 507 next = idx_start; 508 - while (radix_tree_gang_lookup(&nfsi->nfs_page_tree, (void **)&req, next, 1)) { 509 if (req->wb_index > idx_end) 510 break; 511 512 next = req->wb_index + 1; 513 - if (!NFS_WBACK_BUSY(req)) 514 - continue; 515 516 atomic_inc(&req->wb_count); 517 spin_unlock(&nfsi->req_lock); ··· 539 nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) 540 { 541 struct nfs_inode *nfsi = NFS_I(inode); 542 - int res; 543 - res = nfs_scan_list(&nfsi->dirty, dst, idx_start, npages); 544 - nfsi->ndirty -= res; 545 - sub_page_state(nr_dirty,res); 546 - if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) 547 - printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); 548 return res; 549 } 550 ··· 566 nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) 567 { 568 struct nfs_inode *nfsi = NFS_I(inode); 569 - int res; 570 - res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages); 571 - nfsi->ncommit -= res; 572 - if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) 573 - printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); 574 return res; 575 } 576 #endif ··· 757 * is entirely in cache, it may be more efficient to avoid 758 * fragmenting write requests. 759 */ 760 - if (PageUptodate(page) && inode->i_flock == NULL) { 761 loff_t end_offs = i_size_read(inode) - 1; 762 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT; 763 ··· 828 #else 829 nfs_inode_remove_request(req); 830 #endif 831 - nfs_unlock_request(req); 832 } 833 834 static inline int flush_task_priority(int how) ··· 959 nfs_writedata_free(data); 960 } 961 nfs_mark_request_dirty(req); 962 - nfs_unlock_request(req); 963 return -ENOMEM; 964 } 965 ··· 1009 struct nfs_page *req = nfs_list_entry(head->next); 1010 nfs_list_remove_request(req); 1011 nfs_mark_request_dirty(req); 1012 - nfs_unlock_request(req); 1013 } 1014 return -ENOMEM; 1015 } ··· 1036 req = nfs_list_entry(head->next); 1037 nfs_list_remove_request(req); 1038 nfs_mark_request_dirty(req); 1039 - nfs_unlock_request(req); 1040 } 1041 return error; 1042 } ··· 1128 nfs_inode_remove_request(req); 1129 #endif 1130 next: 1131 - nfs_unlock_request(req); 1132 } 1133 } 1134 ··· 1217 struct nfs_write_data *data, int how) 1218 { 1219 struct rpc_task *task = &data->task; 1220 - struct nfs_page *first, *last; 1221 struct inode *inode; 1222 - loff_t start, end, len; 1223 1224 /* Set up the RPC argument and reply structs 1225 * NB: take care not to mess about with data->commit et al. */ 1226 1227 list_splice_init(head, &data->pages); 1228 first = nfs_list_entry(data->pages.next); 1229 - last = nfs_list_entry(data->pages.prev); 1230 inode = first->wb_context->dentry->d_inode; 1231 - 1232 - /* 1233 - * Determine the offset range of requests in the COMMIT call. 1234 - * We rely on the fact that data->pages is an ordered list... 1235 - */ 1236 - start = req_offset(first); 1237 - end = req_offset(last) + last->wb_bytes; 1238 - len = end - start; 1239 - /* If 'len' is not a 32-bit quantity, pass '0' in the COMMIT call */ 1240 - if (end >= i_size_read(inode) || len < 0 || len > (~((u32)0) >> 1)) 1241 - len = 0; 1242 1243 data->inode = inode; 1244 data->cred = first->wb_context->cred; 1245 1246 data->args.fh = NFS_FH(data->inode); 1247 - data->args.offset = start; 1248 - data->args.count = len; 1249 - data->res.count = len; 1250 data->res.fattr = &data->fattr; 1251 data->res.verf = &data->verf; 1252 ··· 1273 req = nfs_list_entry(head->next); 1274 nfs_list_remove_request(req); 1275 nfs_mark_request_commit(req); 1276 - nfs_unlock_request(req); 1277 } 1278 return -ENOMEM; 1279 } ··· 1319 dprintk(" mismatch\n"); 1320 nfs_mark_request_dirty(req); 1321 next: 1322 - nfs_unlock_request(req); 1323 res++; 1324 } 1325 sub_page_state(nr_unstable,res); ··· 1337 spin_lock(&nfsi->req_lock); 1338 res = nfs_scan_dirty(inode, &head, idx_start, npages); 1339 spin_unlock(&nfsi->req_lock); 1340 - if (res) 1341 - error = nfs_flush_list(&head, NFS_SERVER(inode)->wpages, how); 1342 if (error < 0) 1343 return error; 1344 return res; 1345 } 1346 1347 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 1348 - int nfs_commit_inode(struct inode *inode, unsigned long idx_start, 1349 - unsigned int npages, int how) 1350 { 1351 struct nfs_inode *nfsi = NFS_I(inode); 1352 LIST_HEAD(head); ··· 1361 error = 0; 1362 1363 spin_lock(&nfsi->req_lock); 1364 - res = nfs_scan_commit(inode, &head, idx_start, npages); 1365 if (res) { 1366 - res += nfs_scan_commit(inode, &head, 0, 0); 1367 - spin_unlock(&nfsi->req_lock); 1368 error = nfs_commit_list(&head, how); 1369 - } else 1370 - spin_unlock(&nfsi->req_lock); 1371 - if (error < 0) 1372 - return error; 1373 return res; 1374 } 1375 #endif ··· 1389 error = nfs_flush_inode(inode, idx_start, npages, how); 1390 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 1391 if (error == 0) 1392 - error = nfs_commit_inode(inode, idx_start, npages, how); 1393 #endif 1394 } while (error > 0); 1395 return error;
··· 220 ClearPageError(page); 221 222 io_error: 223 + nfs_end_data_update(inode); 224 nfs_writedata_free(wdata); 225 return written ? written : result; 226 } ··· 352 if (err < 0) 353 goto out; 354 } 355 + err = nfs_commit_inode(inode, wb_priority(wbc)); 356 if (err > 0) { 357 wbc->nr_to_write -= err; 358 err = 0; ··· 401 nfsi->npages--; 402 if (!nfsi->npages) { 403 spin_unlock(&nfsi->req_lock); 404 + nfs_end_data_update(inode); 405 iput(inode); 406 } else 407 spin_unlock(&nfsi->req_lock); ··· 446 struct nfs_inode *nfsi = NFS_I(inode); 447 448 spin_lock(&nfsi->req_lock); 449 + radix_tree_tag_set(&nfsi->nfs_page_tree, 450 + req->wb_index, NFS_PAGE_TAG_DIRTY); 451 nfs_list_add_request(req, &nfsi->dirty); 452 nfsi->ndirty++; 453 spin_unlock(&nfsi->req_lock); ··· 503 504 spin_lock(&nfsi->req_lock); 505 next = idx_start; 506 + while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) { 507 if (req->wb_index > idx_end) 508 break; 509 510 next = req->wb_index + 1; 511 + BUG_ON(!NFS_WBACK_BUSY(req)); 512 513 atomic_inc(&req->wb_count); 514 spin_unlock(&nfsi->req_lock); ··· 538 nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) 539 { 540 struct nfs_inode *nfsi = NFS_I(inode); 541 + int res = 0; 542 + 543 + if (nfsi->ndirty != 0) { 544 + res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages); 545 + nfsi->ndirty -= res; 546 + sub_page_state(nr_dirty,res); 547 + if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) 548 + printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); 549 + } 550 return res; 551 } 552 ··· 562 nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) 563 { 564 struct nfs_inode *nfsi = NFS_I(inode); 565 + int res = 0; 566 + 567 + if (nfsi->ncommit != 0) { 568 + res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages); 569 + nfsi->ncommit -= res; 570 + if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) 571 + printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); 572 + } 573 return res; 574 } 575 #endif ··· 750 * is entirely in cache, it may be more efficient to avoid 751 * fragmenting write requests. 752 */ 753 + if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { 754 loff_t end_offs = i_size_read(inode) - 1; 755 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT; 756 ··· 821 #else 822 nfs_inode_remove_request(req); 823 #endif 824 + nfs_clear_page_writeback(req); 825 } 826 827 static inline int flush_task_priority(int how) ··· 952 nfs_writedata_free(data); 953 } 954 nfs_mark_request_dirty(req); 955 + nfs_clear_page_writeback(req); 956 return -ENOMEM; 957 } 958 ··· 1002 struct nfs_page *req = nfs_list_entry(head->next); 1003 nfs_list_remove_request(req); 1004 nfs_mark_request_dirty(req); 1005 + nfs_clear_page_writeback(req); 1006 } 1007 return -ENOMEM; 1008 } ··· 1029 req = nfs_list_entry(head->next); 1030 nfs_list_remove_request(req); 1031 nfs_mark_request_dirty(req); 1032 + nfs_clear_page_writeback(req); 1033 } 1034 return error; 1035 } ··· 1121 nfs_inode_remove_request(req); 1122 #endif 1123 next: 1124 + nfs_clear_page_writeback(req); 1125 } 1126 } 1127 ··· 1210 struct nfs_write_data *data, int how) 1211 { 1212 struct rpc_task *task = &data->task; 1213 + struct nfs_page *first; 1214 struct inode *inode; 1215 1216 /* Set up the RPC argument and reply structs 1217 * NB: take care not to mess about with data->commit et al. */ 1218 1219 list_splice_init(head, &data->pages); 1220 first = nfs_list_entry(data->pages.next); 1221 inode = first->wb_context->dentry->d_inode; 1222 1223 data->inode = inode; 1224 data->cred = first->wb_context->cred; 1225 1226 data->args.fh = NFS_FH(data->inode); 1227 + /* Note: we always request a commit of the entire inode */ 1228 + data->args.offset = 0; 1229 + data->args.count = 0; 1230 + data->res.count = 0; 1231 data->res.fattr = &data->fattr; 1232 data->res.verf = &data->verf; 1233 ··· 1278 req = nfs_list_entry(head->next); 1279 nfs_list_remove_request(req); 1280 nfs_mark_request_commit(req); 1281 + nfs_clear_page_writeback(req); 1282 } 1283 return -ENOMEM; 1284 } ··· 1324 dprintk(" mismatch\n"); 1325 nfs_mark_request_dirty(req); 1326 next: 1327 + nfs_clear_page_writeback(req); 1328 res++; 1329 } 1330 sub_page_state(nr_unstable,res); ··· 1342 spin_lock(&nfsi->req_lock); 1343 res = nfs_scan_dirty(inode, &head, idx_start, npages); 1344 spin_unlock(&nfsi->req_lock); 1345 + if (res) { 1346 + struct nfs_server *server = NFS_SERVER(inode); 1347 + 1348 + /* For single writes, FLUSH_STABLE is more efficient */ 1349 + if (res == nfsi->npages && nfsi->npages <= server->wpages) { 1350 + if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize) 1351 + how |= FLUSH_STABLE; 1352 + } 1353 + error = nfs_flush_list(&head, server->wpages, how); 1354 + } 1355 if (error < 0) 1356 return error; 1357 return res; 1358 } 1359 1360 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 1361 + int nfs_commit_inode(struct inode *inode, int how) 1362 { 1363 struct nfs_inode *nfsi = NFS_I(inode); 1364 LIST_HEAD(head); ··· 1359 error = 0; 1360 1361 spin_lock(&nfsi->req_lock); 1362 + res = nfs_scan_commit(inode, &head, 0, 0); 1363 + spin_unlock(&nfsi->req_lock); 1364 if (res) { 1365 error = nfs_commit_list(&head, how); 1366 + if (error < 0) 1367 + return error; 1368 + } 1369 return res; 1370 } 1371 #endif ··· 1389 error = nfs_flush_inode(inode, idx_start, npages, how); 1390 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 1391 if (error == 0) 1392 + error = nfs_commit_inode(inode, how); 1393 #endif 1394 } while (error > 0); 1395 return error;
+7
fs/nfs_common/Makefile
···
··· 1 + # 2 + # Makefile for Linux filesystem routines that are shared by client and server. 3 + # 4 + 5 + obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o 6 + 7 + nfs_acl-objs := nfsacl.o
+257
fs/nfs_common/nfsacl.c
···
··· 1 + /* 2 + * fs/nfs_common/nfsacl.c 3 + * 4 + * Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de> 5 + */ 6 + 7 + /* 8 + * The Solaris nfsacl protocol represents some ACLs slightly differently 9 + * than POSIX 1003.1e draft 17 does (and we do): 10 + * 11 + * - Minimal ACLs always have an ACL_MASK entry, so they have 12 + * four instead of three entries. 13 + * - The ACL_MASK entry in such minimal ACLs always has the same 14 + * permissions as the ACL_GROUP_OBJ entry. (In extended ACLs 15 + * the ACL_MASK and ACL_GROUP_OBJ entries may differ.) 16 + * - The identifier fields of the ACL_USER_OBJ and ACL_GROUP_OBJ 17 + * entries contain the identifiers of the owner and owning group. 18 + * (In POSIX ACLs we always set them to ACL_UNDEFINED_ID). 19 + * - ACL entries in the kernel are kept sorted in ascending order 20 + * of (e_tag, e_id). Solaris ACLs are unsorted. 21 + */ 22 + 23 + #include <linux/module.h> 24 + #include <linux/fs.h> 25 + #include <linux/sunrpc/xdr.h> 26 + #include <linux/nfsacl.h> 27 + #include <linux/nfs3.h> 28 + #include <linux/sort.h> 29 + 30 + MODULE_LICENSE("GPL"); 31 + 32 + EXPORT_SYMBOL(nfsacl_encode); 33 + EXPORT_SYMBOL(nfsacl_decode); 34 + 35 + struct nfsacl_encode_desc { 36 + struct xdr_array2_desc desc; 37 + unsigned int count; 38 + struct posix_acl *acl; 39 + int typeflag; 40 + uid_t uid; 41 + gid_t gid; 42 + }; 43 + 44 + static int 45 + xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem) 46 + { 47 + struct nfsacl_encode_desc *nfsacl_desc = 48 + (struct nfsacl_encode_desc *) desc; 49 + u32 *p = (u32 *) elem; 50 + 51 + if (nfsacl_desc->count < nfsacl_desc->acl->a_count) { 52 + struct posix_acl_entry *entry = 53 + &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; 54 + 55 + *p++ = htonl(entry->e_tag | nfsacl_desc->typeflag); 56 + switch(entry->e_tag) { 57 + case ACL_USER_OBJ: 58 + *p++ = htonl(nfsacl_desc->uid); 59 + break; 60 + case ACL_GROUP_OBJ: 61 + *p++ = htonl(nfsacl_desc->gid); 62 + break; 63 + case ACL_USER: 64 + case ACL_GROUP: 65 + *p++ = htonl(entry->e_id); 66 + break; 67 + default: /* Solaris depends on that! */ 68 + *p++ = 0; 69 + break; 70 + } 71 + *p++ = htonl(entry->e_perm & S_IRWXO); 72 + } else { 73 + const struct posix_acl_entry *pa, *pe; 74 + int group_obj_perm = ACL_READ|ACL_WRITE|ACL_EXECUTE; 75 + 76 + FOREACH_ACL_ENTRY(pa, nfsacl_desc->acl, pe) { 77 + if (pa->e_tag == ACL_GROUP_OBJ) { 78 + group_obj_perm = pa->e_perm & S_IRWXO; 79 + break; 80 + } 81 + } 82 + /* fake up ACL_MASK entry */ 83 + *p++ = htonl(ACL_MASK | nfsacl_desc->typeflag); 84 + *p++ = htonl(0); 85 + *p++ = htonl(group_obj_perm); 86 + } 87 + 88 + return 0; 89 + } 90 + 91 + unsigned int 92 + nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, 93 + struct posix_acl *acl, int encode_entries, int typeflag) 94 + { 95 + int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; 96 + struct nfsacl_encode_desc nfsacl_desc = { 97 + .desc = { 98 + .elem_size = 12, 99 + .array_len = encode_entries ? entries : 0, 100 + .xcode = xdr_nfsace_encode, 101 + }, 102 + .acl = acl, 103 + .typeflag = typeflag, 104 + .uid = inode->i_uid, 105 + .gid = inode->i_gid, 106 + }; 107 + int err; 108 + 109 + if (entries > NFS_ACL_MAX_ENTRIES || 110 + xdr_encode_word(buf, base, entries)) 111 + return -EINVAL; 112 + err = xdr_encode_array2(buf, base + 4, &nfsacl_desc.desc); 113 + if (!err) 114 + err = 8 + nfsacl_desc.desc.elem_size * 115 + nfsacl_desc.desc.array_len; 116 + return err; 117 + } 118 + 119 + struct nfsacl_decode_desc { 120 + struct xdr_array2_desc desc; 121 + unsigned int count; 122 + struct posix_acl *acl; 123 + }; 124 + 125 + static int 126 + xdr_nfsace_decode(struct xdr_array2_desc *desc, void *elem) 127 + { 128 + struct nfsacl_decode_desc *nfsacl_desc = 129 + (struct nfsacl_decode_desc *) desc; 130 + u32 *p = (u32 *) elem; 131 + struct posix_acl_entry *entry; 132 + 133 + if (!nfsacl_desc->acl) { 134 + if (desc->array_len > NFS_ACL_MAX_ENTRIES) 135 + return -EINVAL; 136 + nfsacl_desc->acl = posix_acl_alloc(desc->array_len, GFP_KERNEL); 137 + if (!nfsacl_desc->acl) 138 + return -ENOMEM; 139 + nfsacl_desc->count = 0; 140 + } 141 + 142 + entry = &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; 143 + entry->e_tag = ntohl(*p++) & ~NFS_ACL_DEFAULT; 144 + entry->e_id = ntohl(*p++); 145 + entry->e_perm = ntohl(*p++); 146 + 147 + switch(entry->e_tag) { 148 + case ACL_USER_OBJ: 149 + case ACL_USER: 150 + case ACL_GROUP_OBJ: 151 + case ACL_GROUP: 152 + case ACL_OTHER: 153 + if (entry->e_perm & ~S_IRWXO) 154 + return -EINVAL; 155 + break; 156 + case ACL_MASK: 157 + /* Solaris sometimes sets additonal bits in the mask */ 158 + entry->e_perm &= S_IRWXO; 159 + break; 160 + default: 161 + return -EINVAL; 162 + } 163 + 164 + return 0; 165 + } 166 + 167 + static int 168 + cmp_acl_entry(const void *x, const void *y) 169 + { 170 + const struct posix_acl_entry *a = x, *b = y; 171 + 172 + if (a->e_tag != b->e_tag) 173 + return a->e_tag - b->e_tag; 174 + else if (a->e_id > b->e_id) 175 + return 1; 176 + else if (a->e_id < b->e_id) 177 + return -1; 178 + else 179 + return 0; 180 + } 181 + 182 + /* 183 + * Convert from a Solaris ACL to a POSIX 1003.1e draft 17 ACL. 184 + */ 185 + static int 186 + posix_acl_from_nfsacl(struct posix_acl *acl) 187 + { 188 + struct posix_acl_entry *pa, *pe, 189 + *group_obj = NULL, *mask = NULL; 190 + 191 + if (!acl) 192 + return 0; 193 + 194 + sort(acl->a_entries, acl->a_count, sizeof(struct posix_acl_entry), 195 + cmp_acl_entry, NULL); 196 + 197 + /* Clear undefined identifier fields and find the ACL_GROUP_OBJ 198 + and ACL_MASK entries. */ 199 + FOREACH_ACL_ENTRY(pa, acl, pe) { 200 + switch(pa->e_tag) { 201 + case ACL_USER_OBJ: 202 + pa->e_id = ACL_UNDEFINED_ID; 203 + break; 204 + case ACL_GROUP_OBJ: 205 + pa->e_id = ACL_UNDEFINED_ID; 206 + group_obj = pa; 207 + break; 208 + case ACL_MASK: 209 + mask = pa; 210 + /* fall through */ 211 + case ACL_OTHER: 212 + pa->e_id = ACL_UNDEFINED_ID; 213 + break; 214 + } 215 + } 216 + if (acl->a_count == 4 && group_obj && mask && 217 + mask->e_perm == group_obj->e_perm) { 218 + /* remove bogus ACL_MASK entry */ 219 + memmove(mask, mask+1, (3 - (mask - acl->a_entries)) * 220 + sizeof(struct posix_acl_entry)); 221 + acl->a_count = 3; 222 + } 223 + return 0; 224 + } 225 + 226 + unsigned int 227 + nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, 228 + struct posix_acl **pacl) 229 + { 230 + struct nfsacl_decode_desc nfsacl_desc = { 231 + .desc = { 232 + .elem_size = 12, 233 + .xcode = pacl ? xdr_nfsace_decode : NULL, 234 + }, 235 + }; 236 + u32 entries; 237 + int err; 238 + 239 + if (xdr_decode_word(buf, base, &entries) || 240 + entries > NFS_ACL_MAX_ENTRIES) 241 + return -EINVAL; 242 + err = xdr_decode_array2(buf, base + 4, &nfsacl_desc.desc); 243 + if (err) 244 + return err; 245 + if (pacl) { 246 + if (entries != nfsacl_desc.desc.array_len || 247 + posix_acl_from_nfsacl(nfsacl_desc.acl) != 0) { 248 + posix_acl_release(nfsacl_desc.acl); 249 + return -EINVAL; 250 + } 251 + *pacl = nfsacl_desc.acl; 252 + } 253 + if (aclcnt) 254 + *aclcnt = entries; 255 + return 8 + nfsacl_desc.desc.elem_size * 256 + nfsacl_desc.desc.array_len; 257 + }
+2
fs/nfsd/Makefile
··· 6 7 nfsd-y := nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \ 8 export.o auth.o lockd.o nfscache.o nfsxdr.o stats.o 9 nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o 10 nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \ 11 nfs4acl.o nfs4callback.o 12 nfsd-objs := $(nfsd-y)
··· 6 7 nfsd-y := nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \ 8 export.o auth.o lockd.o nfscache.o nfsxdr.o stats.o 9 + nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o 10 nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o 11 + nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o 12 nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \ 13 nfs4acl.o nfs4callback.o 14 nfsd-objs := $(nfsd-y)
+336
fs/nfsd/nfs2acl.c
···
··· 1 + /* 2 + * linux/fs/nfsd/nfsacl.c 3 + * 4 + * Process version 2 NFSACL requests. 5 + * 6 + * Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de> 7 + */ 8 + 9 + #include <linux/sunrpc/svc.h> 10 + #include <linux/nfs.h> 11 + #include <linux/nfsd/nfsd.h> 12 + #include <linux/nfsd/cache.h> 13 + #include <linux/nfsd/xdr.h> 14 + #include <linux/nfsd/xdr3.h> 15 + #include <linux/posix_acl.h> 16 + #include <linux/nfsacl.h> 17 + 18 + #define NFSDDBG_FACILITY NFSDDBG_PROC 19 + #define RETURN_STATUS(st) { resp->status = (st); return (st); } 20 + 21 + /* 22 + * NULL call. 23 + */ 24 + static int 25 + nfsacld_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) 26 + { 27 + return nfs_ok; 28 + } 29 + 30 + /* 31 + * Get the Access and/or Default ACL of a file. 32 + */ 33 + static int nfsacld_proc_getacl(struct svc_rqst * rqstp, 34 + struct nfsd3_getaclargs *argp, struct nfsd3_getaclres *resp) 35 + { 36 + svc_fh *fh; 37 + struct posix_acl *acl; 38 + int nfserr = 0; 39 + 40 + dprintk("nfsd: GETACL(2acl) %s\n", SVCFH_fmt(&argp->fh)); 41 + 42 + fh = fh_copy(&resp->fh, &argp->fh); 43 + if ((nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP))) 44 + RETURN_STATUS(nfserr_inval); 45 + 46 + if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) 47 + RETURN_STATUS(nfserr_inval); 48 + resp->mask = argp->mask; 49 + 50 + if (resp->mask & (NFS_ACL|NFS_ACLCNT)) { 51 + acl = nfsd_get_posix_acl(fh, ACL_TYPE_ACCESS); 52 + if (IS_ERR(acl)) { 53 + int err = PTR_ERR(acl); 54 + 55 + if (err == -ENODATA || err == -EOPNOTSUPP) 56 + acl = NULL; 57 + else { 58 + nfserr = nfserrno(err); 59 + goto fail; 60 + } 61 + } 62 + if (acl == NULL) { 63 + /* Solaris returns the inode's minimum ACL. */ 64 + 65 + struct inode *inode = fh->fh_dentry->d_inode; 66 + acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); 67 + } 68 + resp->acl_access = acl; 69 + } 70 + if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) { 71 + /* Check how Solaris handles requests for the Default ACL 72 + of a non-directory! */ 73 + 74 + acl = nfsd_get_posix_acl(fh, ACL_TYPE_DEFAULT); 75 + if (IS_ERR(acl)) { 76 + int err = PTR_ERR(acl); 77 + 78 + if (err == -ENODATA || err == -EOPNOTSUPP) 79 + acl = NULL; 80 + else { 81 + nfserr = nfserrno(err); 82 + goto fail; 83 + } 84 + } 85 + resp->acl_default = acl; 86 + } 87 + 88 + /* resp->acl_{access,default} are released in nfssvc_release_getacl. */ 89 + RETURN_STATUS(0); 90 + 91 + fail: 92 + posix_acl_release(resp->acl_access); 93 + posix_acl_release(resp->acl_default); 94 + RETURN_STATUS(nfserr); 95 + } 96 + 97 + /* 98 + * Set the Access and/or Default ACL of a file. 99 + */ 100 + static int nfsacld_proc_setacl(struct svc_rqst * rqstp, 101 + struct nfsd3_setaclargs *argp, 102 + struct nfsd_attrstat *resp) 103 + { 104 + svc_fh *fh; 105 + int nfserr = 0; 106 + 107 + dprintk("nfsd: SETACL(2acl) %s\n", SVCFH_fmt(&argp->fh)); 108 + 109 + fh = fh_copy(&resp->fh, &argp->fh); 110 + nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP); 111 + 112 + if (!nfserr) { 113 + nfserr = nfserrno( nfsd_set_posix_acl( 114 + fh, ACL_TYPE_ACCESS, argp->acl_access) ); 115 + } 116 + if (!nfserr) { 117 + nfserr = nfserrno( nfsd_set_posix_acl( 118 + fh, ACL_TYPE_DEFAULT, argp->acl_default) ); 119 + } 120 + 121 + /* argp->acl_{access,default} may have been allocated in 122 + nfssvc_decode_setaclargs. */ 123 + posix_acl_release(argp->acl_access); 124 + posix_acl_release(argp->acl_default); 125 + return nfserr; 126 + } 127 + 128 + /* 129 + * Check file attributes 130 + */ 131 + static int nfsacld_proc_getattr(struct svc_rqst * rqstp, 132 + struct nfsd_fhandle *argp, struct nfsd_attrstat *resp) 133 + { 134 + dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh)); 135 + 136 + fh_copy(&resp->fh, &argp->fh); 137 + return fh_verify(rqstp, &resp->fh, 0, MAY_NOP); 138 + } 139 + 140 + /* 141 + * Check file access 142 + */ 143 + static int nfsacld_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp, 144 + struct nfsd3_accessres *resp) 145 + { 146 + int nfserr; 147 + 148 + dprintk("nfsd: ACCESS(2acl) %s 0x%x\n", 149 + SVCFH_fmt(&argp->fh), 150 + argp->access); 151 + 152 + fh_copy(&resp->fh, &argp->fh); 153 + resp->access = argp->access; 154 + nfserr = nfsd_access(rqstp, &resp->fh, &resp->access, NULL); 155 + return nfserr; 156 + } 157 + 158 + /* 159 + * XDR decode functions 160 + */ 161 + static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, u32 *p, 162 + struct nfsd3_getaclargs *argp) 163 + { 164 + if (!(p = nfs2svc_decode_fh(p, &argp->fh))) 165 + return 0; 166 + argp->mask = ntohl(*p); p++; 167 + 168 + return xdr_argsize_check(rqstp, p); 169 + } 170 + 171 + 172 + static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, u32 *p, 173 + struct nfsd3_setaclargs *argp) 174 + { 175 + struct kvec *head = rqstp->rq_arg.head; 176 + unsigned int base; 177 + int n; 178 + 179 + if (!(p = nfs2svc_decode_fh(p, &argp->fh))) 180 + return 0; 181 + argp->mask = ntohl(*p++); 182 + if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT) || 183 + !xdr_argsize_check(rqstp, p)) 184 + return 0; 185 + 186 + base = (char *)p - (char *)head->iov_base; 187 + n = nfsacl_decode(&rqstp->rq_arg, base, NULL, 188 + (argp->mask & NFS_ACL) ? 189 + &argp->acl_access : NULL); 190 + if (n > 0) 191 + n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL, 192 + (argp->mask & NFS_DFACL) ? 193 + &argp->acl_default : NULL); 194 + return (n > 0); 195 + } 196 + 197 + static int nfsaclsvc_decode_fhandleargs(struct svc_rqst *rqstp, u32 *p, 198 + struct nfsd_fhandle *argp) 199 + { 200 + if (!(p = nfs2svc_decode_fh(p, &argp->fh))) 201 + return 0; 202 + return xdr_argsize_check(rqstp, p); 203 + } 204 + 205 + static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, u32 *p, 206 + struct nfsd3_accessargs *argp) 207 + { 208 + if (!(p = nfs2svc_decode_fh(p, &argp->fh))) 209 + return 0; 210 + argp->access = ntohl(*p++); 211 + 212 + return xdr_argsize_check(rqstp, p); 213 + } 214 + 215 + /* 216 + * XDR encode functions 217 + */ 218 + 219 + /* GETACL */ 220 + static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, u32 *p, 221 + struct nfsd3_getaclres *resp) 222 + { 223 + struct dentry *dentry = resp->fh.fh_dentry; 224 + struct inode *inode = dentry->d_inode; 225 + int w = nfsacl_size( 226 + (resp->mask & NFS_ACL) ? resp->acl_access : NULL, 227 + (resp->mask & NFS_DFACL) ? resp->acl_default : NULL); 228 + struct kvec *head = rqstp->rq_res.head; 229 + unsigned int base; 230 + int n; 231 + 232 + if (dentry == NULL || dentry->d_inode == NULL) 233 + return 0; 234 + inode = dentry->d_inode; 235 + 236 + p = nfs2svc_encode_fattr(rqstp, p, &resp->fh); 237 + *p++ = htonl(resp->mask); 238 + if (!xdr_ressize_check(rqstp, p)) 239 + return 0; 240 + base = (char *)p - (char *)head->iov_base; 241 + 242 + rqstp->rq_res.page_len = w; 243 + while (w > 0) { 244 + if (!svc_take_res_page(rqstp)) 245 + return 0; 246 + w -= PAGE_SIZE; 247 + } 248 + 249 + n = nfsacl_encode(&rqstp->rq_res, base, inode, 250 + resp->acl_access, 251 + resp->mask & NFS_ACL, 0); 252 + if (n > 0) 253 + n = nfsacl_encode(&rqstp->rq_res, base + n, inode, 254 + resp->acl_default, 255 + resp->mask & NFS_DFACL, 256 + NFS_ACL_DEFAULT); 257 + if (n <= 0) 258 + return 0; 259 + return 1; 260 + } 261 + 262 + static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, u32 *p, 263 + struct nfsd_attrstat *resp) 264 + { 265 + p = nfs2svc_encode_fattr(rqstp, p, &resp->fh); 266 + return xdr_ressize_check(rqstp, p); 267 + } 268 + 269 + /* ACCESS */ 270 + static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, u32 *p, 271 + struct nfsd3_accessres *resp) 272 + { 273 + p = nfs2svc_encode_fattr(rqstp, p, &resp->fh); 274 + *p++ = htonl(resp->access); 275 + return xdr_ressize_check(rqstp, p); 276 + } 277 + 278 + /* 279 + * XDR release functions 280 + */ 281 + static int nfsaclsvc_release_getacl(struct svc_rqst *rqstp, u32 *p, 282 + struct nfsd3_getaclres *resp) 283 + { 284 + fh_put(&resp->fh); 285 + posix_acl_release(resp->acl_access); 286 + posix_acl_release(resp->acl_default); 287 + return 1; 288 + } 289 + 290 + static int nfsaclsvc_release_fhandle(struct svc_rqst *rqstp, u32 *p, 291 + struct nfsd_fhandle *resp) 292 + { 293 + fh_put(&resp->fh); 294 + return 1; 295 + } 296 + 297 + #define nfsaclsvc_decode_voidargs NULL 298 + #define nfsaclsvc_encode_voidres NULL 299 + #define nfsaclsvc_release_void NULL 300 + #define nfsd3_fhandleargs nfsd_fhandle 301 + #define nfsd3_attrstatres nfsd_attrstat 302 + #define nfsd3_voidres nfsd3_voidargs 303 + struct nfsd3_voidargs { int dummy; }; 304 + 305 + #define PROC(name, argt, rest, relt, cache, respsize) \ 306 + { (svc_procfunc) nfsacld_proc_##name, \ 307 + (kxdrproc_t) nfsaclsvc_decode_##argt##args, \ 308 + (kxdrproc_t) nfsaclsvc_encode_##rest##res, \ 309 + (kxdrproc_t) nfsaclsvc_release_##relt, \ 310 + sizeof(struct nfsd3_##argt##args), \ 311 + sizeof(struct nfsd3_##rest##res), \ 312 + 0, \ 313 + cache, \ 314 + respsize, \ 315 + } 316 + 317 + #define ST 1 /* status*/ 318 + #define AT 21 /* attributes */ 319 + #define pAT (1+AT) /* post attributes - conditional */ 320 + #define ACL (1+NFS_ACL_MAX_ENTRIES*3) /* Access Control List */ 321 + 322 + static struct svc_procedure nfsd_acl_procedures2[] = { 323 + PROC(null, void, void, void, RC_NOCACHE, ST), 324 + PROC(getacl, getacl, getacl, getacl, RC_NOCACHE, ST+1+2*(1+ACL)), 325 + PROC(setacl, setacl, attrstat, fhandle, RC_NOCACHE, ST+AT), 326 + PROC(getattr, fhandle, attrstat, fhandle, RC_NOCACHE, ST+AT), 327 + PROC(access, access, access, fhandle, RC_NOCACHE, ST+AT+1), 328 + }; 329 + 330 + struct svc_version nfsd_acl_version2 = { 331 + .vs_vers = 2, 332 + .vs_nproc = 5, 333 + .vs_proc = nfsd_acl_procedures2, 334 + .vs_dispatch = nfsd_dispatch, 335 + .vs_xdrsize = NFS3_SVC_XDRSIZE, 336 + };
+267
fs/nfsd/nfs3acl.c
···
··· 1 + /* 2 + * linux/fs/nfsd/nfs3acl.c 3 + * 4 + * Process version 3 NFSACL requests. 5 + * 6 + * Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de> 7 + */ 8 + 9 + #include <linux/sunrpc/svc.h> 10 + #include <linux/nfs3.h> 11 + #include <linux/nfsd/nfsd.h> 12 + #include <linux/nfsd/cache.h> 13 + #include <linux/nfsd/xdr3.h> 14 + #include <linux/posix_acl.h> 15 + #include <linux/nfsacl.h> 16 + 17 + #define RETURN_STATUS(st) { resp->status = (st); return (st); } 18 + 19 + /* 20 + * NULL call. 21 + */ 22 + static int 23 + nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) 24 + { 25 + return nfs_ok; 26 + } 27 + 28 + /* 29 + * Get the Access and/or Default ACL of a file. 30 + */ 31 + static int nfsd3_proc_getacl(struct svc_rqst * rqstp, 32 + struct nfsd3_getaclargs *argp, struct nfsd3_getaclres *resp) 33 + { 34 + svc_fh *fh; 35 + struct posix_acl *acl; 36 + int nfserr = 0; 37 + 38 + fh = fh_copy(&resp->fh, &argp->fh); 39 + if ((nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP))) 40 + RETURN_STATUS(nfserr_inval); 41 + 42 + if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) 43 + RETURN_STATUS(nfserr_inval); 44 + resp->mask = argp->mask; 45 + 46 + if (resp->mask & (NFS_ACL|NFS_ACLCNT)) { 47 + acl = nfsd_get_posix_acl(fh, ACL_TYPE_ACCESS); 48 + if (IS_ERR(acl)) { 49 + int err = PTR_ERR(acl); 50 + 51 + if (err == -ENODATA || err == -EOPNOTSUPP) 52 + acl = NULL; 53 + else { 54 + nfserr = nfserrno(err); 55 + goto fail; 56 + } 57 + } 58 + if (acl == NULL) { 59 + /* Solaris returns the inode's minimum ACL. */ 60 + 61 + struct inode *inode = fh->fh_dentry->d_inode; 62 + acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); 63 + } 64 + resp->acl_access = acl; 65 + } 66 + if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) { 67 + /* Check how Solaris handles requests for the Default ACL 68 + of a non-directory! */ 69 + 70 + acl = nfsd_get_posix_acl(fh, ACL_TYPE_DEFAULT); 71 + if (IS_ERR(acl)) { 72 + int err = PTR_ERR(acl); 73 + 74 + if (err == -ENODATA || err == -EOPNOTSUPP) 75 + acl = NULL; 76 + else { 77 + nfserr = nfserrno(err); 78 + goto fail; 79 + } 80 + } 81 + resp->acl_default = acl; 82 + } 83 + 84 + /* resp->acl_{access,default} are released in nfs3svc_release_getacl. */ 85 + RETURN_STATUS(0); 86 + 87 + fail: 88 + posix_acl_release(resp->acl_access); 89 + posix_acl_release(resp->acl_default); 90 + RETURN_STATUS(nfserr); 91 + } 92 + 93 + /* 94 + * Set the Access and/or Default ACL of a file. 95 + */ 96 + static int nfsd3_proc_setacl(struct svc_rqst * rqstp, 97 + struct nfsd3_setaclargs *argp, 98 + struct nfsd3_attrstat *resp) 99 + { 100 + svc_fh *fh; 101 + int nfserr = 0; 102 + 103 + fh = fh_copy(&resp->fh, &argp->fh); 104 + nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP); 105 + 106 + if (!nfserr) { 107 + nfserr = nfserrno( nfsd_set_posix_acl( 108 + fh, ACL_TYPE_ACCESS, argp->acl_access) ); 109 + } 110 + if (!nfserr) { 111 + nfserr = nfserrno( nfsd_set_posix_acl( 112 + fh, ACL_TYPE_DEFAULT, argp->acl_default) ); 113 + } 114 + 115 + /* argp->acl_{access,default} may have been allocated in 116 + nfs3svc_decode_setaclargs. */ 117 + posix_acl_release(argp->acl_access); 118 + posix_acl_release(argp->acl_default); 119 + RETURN_STATUS(nfserr); 120 + } 121 + 122 + /* 123 + * XDR decode functions 124 + */ 125 + static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, u32 *p, 126 + struct nfsd3_getaclargs *args) 127 + { 128 + if (!(p = nfs3svc_decode_fh(p, &args->fh))) 129 + return 0; 130 + args->mask = ntohl(*p); p++; 131 + 132 + return xdr_argsize_check(rqstp, p); 133 + } 134 + 135 + 136 + static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, u32 *p, 137 + struct nfsd3_setaclargs *args) 138 + { 139 + struct kvec *head = rqstp->rq_arg.head; 140 + unsigned int base; 141 + int n; 142 + 143 + if (!(p = nfs3svc_decode_fh(p, &args->fh))) 144 + return 0; 145 + args->mask = ntohl(*p++); 146 + if (args->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT) || 147 + !xdr_argsize_check(rqstp, p)) 148 + return 0; 149 + 150 + base = (char *)p - (char *)head->iov_base; 151 + n = nfsacl_decode(&rqstp->rq_arg, base, NULL, 152 + (args->mask & NFS_ACL) ? 153 + &args->acl_access : NULL); 154 + if (n > 0) 155 + n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL, 156 + (args->mask & NFS_DFACL) ? 157 + &args->acl_default : NULL); 158 + return (n > 0); 159 + } 160 + 161 + /* 162 + * XDR encode functions 163 + */ 164 + 165 + /* GETACL */ 166 + static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, u32 *p, 167 + struct nfsd3_getaclres *resp) 168 + { 169 + struct dentry *dentry = resp->fh.fh_dentry; 170 + 171 + p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh); 172 + if (resp->status == 0 && dentry && dentry->d_inode) { 173 + struct inode *inode = dentry->d_inode; 174 + int w = nfsacl_size( 175 + (resp->mask & NFS_ACL) ? resp->acl_access : NULL, 176 + (resp->mask & NFS_DFACL) ? resp->acl_default : NULL); 177 + struct kvec *head = rqstp->rq_res.head; 178 + unsigned int base; 179 + int n; 180 + 181 + *p++ = htonl(resp->mask); 182 + if (!xdr_ressize_check(rqstp, p)) 183 + return 0; 184 + base = (char *)p - (char *)head->iov_base; 185 + 186 + rqstp->rq_res.page_len = w; 187 + while (w > 0) { 188 + if (!svc_take_res_page(rqstp)) 189 + return 0; 190 + w -= PAGE_SIZE; 191 + } 192 + 193 + n = nfsacl_encode(&rqstp->rq_res, base, inode, 194 + resp->acl_access, 195 + resp->mask & NFS_ACL, 0); 196 + if (n > 0) 197 + n = nfsacl_encode(&rqstp->rq_res, base + n, inode, 198 + resp->acl_default, 199 + resp->mask & NFS_DFACL, 200 + NFS_ACL_DEFAULT); 201 + if (n <= 0) 202 + return 0; 203 + } else 204 + if (!xdr_ressize_check(rqstp, p)) 205 + return 0; 206 + 207 + return 1; 208 + } 209 + 210 + /* SETACL */ 211 + static int nfs3svc_encode_setaclres(struct svc_rqst *rqstp, u32 *p, 212 + struct nfsd3_attrstat *resp) 213 + { 214 + p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh); 215 + 216 + return xdr_ressize_check(rqstp, p); 217 + } 218 + 219 + /* 220 + * XDR release functions 221 + */ 222 + static int nfs3svc_release_getacl(struct svc_rqst *rqstp, u32 *p, 223 + struct nfsd3_getaclres *resp) 224 + { 225 + fh_put(&resp->fh); 226 + posix_acl_release(resp->acl_access); 227 + posix_acl_release(resp->acl_default); 228 + return 1; 229 + } 230 + 231 + #define nfs3svc_decode_voidargs NULL 232 + #define nfs3svc_release_void NULL 233 + #define nfsd3_setaclres nfsd3_attrstat 234 + #define nfsd3_voidres nfsd3_voidargs 235 + struct nfsd3_voidargs { int dummy; }; 236 + 237 + #define PROC(name, argt, rest, relt, cache, respsize) \ 238 + { (svc_procfunc) nfsd3_proc_##name, \ 239 + (kxdrproc_t) nfs3svc_decode_##argt##args, \ 240 + (kxdrproc_t) nfs3svc_encode_##rest##res, \ 241 + (kxdrproc_t) nfs3svc_release_##relt, \ 242 + sizeof(struct nfsd3_##argt##args), \ 243 + sizeof(struct nfsd3_##rest##res), \ 244 + 0, \ 245 + cache, \ 246 + respsize, \ 247 + } 248 + 249 + #define ST 1 /* status*/ 250 + #define AT 21 /* attributes */ 251 + #define pAT (1+AT) /* post attributes - conditional */ 252 + #define ACL (1+NFS_ACL_MAX_ENTRIES*3) /* Access Control List */ 253 + 254 + static struct svc_procedure nfsd_acl_procedures3[] = { 255 + PROC(null, void, void, void, RC_NOCACHE, ST), 256 + PROC(getacl, getacl, getacl, getacl, RC_NOCACHE, ST+1+2*(1+ACL)), 257 + PROC(setacl, setacl, setacl, fhandle, RC_NOCACHE, ST+pAT), 258 + }; 259 + 260 + struct svc_version nfsd_acl_version3 = { 261 + .vs_vers = 3, 262 + .vs_nproc = 3, 263 + .vs_proc = nfsd_acl_procedures3, 264 + .vs_dispatch = nfsd_dispatch, 265 + .vs_xdrsize = NFS3_SVC_XDRSIZE, 266 + }; 267 +
+13
fs/nfsd/nfs3xdr.c
··· 71 return p + XDR_QUADLEN(size); 72 } 73 74 static inline u32 * 75 encode_fh(u32 *p, struct svc_fh *fhp) 76 { ··· 237 } 238 *p++ = xdr_zero; 239 return p; 240 } 241 242 /*
··· 71 return p + XDR_QUADLEN(size); 72 } 73 74 + /* Helper function for NFSv3 ACL code */ 75 + u32 *nfs3svc_decode_fh(u32 *p, struct svc_fh *fhp) 76 + { 77 + return decode_fh(p, fhp); 78 + } 79 + 80 static inline u32 * 81 encode_fh(u32 *p, struct svc_fh *fhp) 82 { ··· 231 } 232 *p++ = xdr_zero; 233 return p; 234 + } 235 + 236 + /* Helper for NFSv3 ACLs */ 237 + u32 * 238 + nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp) 239 + { 240 + return encode_post_op_attr(rqstp, p, fhp); 241 } 242 243 /*
+1 -3
fs/nfsd/nfs4callback.c
··· 430 clnt = rpc_create_client(xprt, hostname, program, 1, RPC_AUTH_UNIX); 431 if (IS_ERR(clnt)) { 432 dprintk("NFSD: couldn't create callback client\n"); 433 - goto out_xprt; 434 } 435 clnt->cl_intr = 0; 436 clnt->cl_softrtry = 1; ··· 465 out_clnt: 466 rpc_shutdown_client(clnt); 467 goto out_err; 468 - out_xprt: 469 - xprt_destroy(xprt); 470 out_err: 471 dprintk("NFSD: warning: no callback path to client %.*s\n", 472 (int)clp->cl_name.len, clp->cl_name.data);
··· 430 clnt = rpc_create_client(xprt, hostname, program, 1, RPC_AUTH_UNIX); 431 if (IS_ERR(clnt)) { 432 dprintk("NFSD: couldn't create callback client\n"); 433 + goto out_err; 434 } 435 clnt->cl_intr = 0; 436 clnt->cl_softrtry = 1; ··· 465 out_clnt: 466 rpc_shutdown_client(clnt); 467 goto out_err; 468 out_err: 469 dprintk("NFSD: warning: no callback path to client %.*s\n", 470 (int)clp->cl_name.len, clp->cl_name.data);
+1
fs/nfsd/nfsproc.c
··· 591 { nfserr_dropit, -ENOMEM }, 592 { nfserr_badname, -ESRCH }, 593 { nfserr_io, -ETXTBSY }, 594 { -1, -EIO } 595 }; 596 int i;
··· 591 { nfserr_dropit, -ENOMEM }, 592 { nfserr_badname, -ESRCH }, 593 { nfserr_io, -ETXTBSY }, 594 + { nfserr_notsupp, -EOPNOTSUPP }, 595 { -1, -EIO } 596 }; 597 int i;
+28
fs/nfsd/nfssvc.c
··· 31 #include <linux/nfsd/stats.h> 32 #include <linux/nfsd/cache.h> 33 #include <linux/lockd/bind.h> 34 35 #define NFSDDBG_FACILITY NFSDDBG_SVC 36 ··· 363 return 1; 364 } 365 366 extern struct svc_version nfsd_version2, nfsd_version3, nfsd_version4; 367 368 static struct svc_version * nfsd_version[] = { ··· 403 404 #define NFSD_NRVERS (sizeof(nfsd_version)/sizeof(nfsd_version[0])) 405 struct svc_program nfsd_program = { 406 .pg_prog = NFS_PROGRAM, /* program number */ 407 .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */ 408 .pg_vers = nfsd_version, /* version table */
··· 31 #include <linux/nfsd/stats.h> 32 #include <linux/nfsd/cache.h> 33 #include <linux/lockd/bind.h> 34 + #include <linux/nfsacl.h> 35 36 #define NFSDDBG_FACILITY NFSDDBG_SVC 37 ··· 362 return 1; 363 } 364 365 + #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 366 + static struct svc_stat nfsd_acl_svcstats; 367 + static struct svc_version * nfsd_acl_version[] = { 368 + [2] = &nfsd_acl_version2, 369 + [3] = &nfsd_acl_version3, 370 + }; 371 + 372 + #define NFSD_ACL_NRVERS (sizeof(nfsd_acl_version)/sizeof(nfsd_acl_version[0])) 373 + static struct svc_program nfsd_acl_program = { 374 + .pg_prog = NFS_ACL_PROGRAM, 375 + .pg_nvers = NFSD_ACL_NRVERS, 376 + .pg_vers = nfsd_acl_version, 377 + .pg_name = "nfsd", 378 + .pg_class = "nfsd", 379 + .pg_stats = &nfsd_acl_svcstats, 380 + }; 381 + 382 + static struct svc_stat nfsd_acl_svcstats = { 383 + .program = &nfsd_acl_program, 384 + }; 385 + 386 + #define nfsd_acl_program_p &nfsd_acl_program 387 + #else 388 + #define nfsd_acl_program_p NULL 389 + #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */ 390 + 391 extern struct svc_version nfsd_version2, nfsd_version3, nfsd_version4; 392 393 static struct svc_version * nfsd_version[] = { ··· 376 377 #define NFSD_NRVERS (sizeof(nfsd_version)/sizeof(nfsd_version[0])) 378 struct svc_program nfsd_program = { 379 + .pg_next = nfsd_acl_program_p, 380 .pg_prog = NFS_PROGRAM, /* program number */ 381 .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */ 382 .pg_vers = nfsd_version, /* version table */
+11
fs/nfsd/nfsxdr.c
··· 49 return p + (NFS_FHSIZE >> 2); 50 } 51 52 static inline u32 * 53 encode_fh(u32 *p, struct svc_fh *fhp) 54 { ··· 196 return p; 197 } 198 199 200 /* 201 * XDR decode functions
··· 49 return p + (NFS_FHSIZE >> 2); 50 } 51 52 + /* Helper function for NFSv2 ACL code */ 53 + u32 *nfs2svc_decode_fh(u32 *p, struct svc_fh *fhp) 54 + { 55 + return decode_fh(p, fhp); 56 + } 57 + 58 static inline u32 * 59 encode_fh(u32 *p, struct svc_fh *fhp) 60 { ··· 190 return p; 191 } 192 193 + /* Helper function for NFSv2 ACL code */ 194 + u32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp) 195 + { 196 + return encode_fattr(rqstp, p, fhp); 197 + } 198 199 /* 200 * XDR decode functions
+106 -1
fs/nfsd/vfs.c
··· 46 #include <linux/nfsd/nfsfh.h> 47 #include <linux/quotaops.h> 48 #include <linux/dnotify.h> 49 - #ifdef CONFIG_NFSD_V4 50 #include <linux/posix_acl.h> 51 #include <linux/posix_acl_xattr.h> 52 #include <linux/xattr_acl.h> 53 #include <linux/xattr.h> ··· 1858 nfsdstats.ra_size = cache_size; 1859 return 0; 1860 }
··· 46 #include <linux/nfsd/nfsfh.h> 47 #include <linux/quotaops.h> 48 #include <linux/dnotify.h> 49 + #include <linux/xattr_acl.h> 50 #include <linux/posix_acl.h> 51 + #ifdef CONFIG_NFSD_V4 52 #include <linux/posix_acl_xattr.h> 53 #include <linux/xattr_acl.h> 54 #include <linux/xattr.h> ··· 1857 nfsdstats.ra_size = cache_size; 1858 return 0; 1859 } 1860 + 1861 + #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 1862 + struct posix_acl * 1863 + nfsd_get_posix_acl(struct svc_fh *fhp, int type) 1864 + { 1865 + struct inode *inode = fhp->fh_dentry->d_inode; 1866 + char *name; 1867 + void *value = NULL; 1868 + ssize_t size; 1869 + struct posix_acl *acl; 1870 + 1871 + if (!IS_POSIXACL(inode) || !inode->i_op || !inode->i_op->getxattr) 1872 + return ERR_PTR(-EOPNOTSUPP); 1873 + switch(type) { 1874 + case ACL_TYPE_ACCESS: 1875 + name = XATTR_NAME_ACL_ACCESS; 1876 + break; 1877 + case ACL_TYPE_DEFAULT: 1878 + name = XATTR_NAME_ACL_DEFAULT; 1879 + break; 1880 + default: 1881 + return ERR_PTR(-EOPNOTSUPP); 1882 + } 1883 + 1884 + size = inode->i_op->getxattr(fhp->fh_dentry, name, NULL, 0); 1885 + 1886 + if (size < 0) { 1887 + acl = ERR_PTR(size); 1888 + goto getout; 1889 + } else if (size > 0) { 1890 + value = kmalloc(size, GFP_KERNEL); 1891 + if (!value) { 1892 + acl = ERR_PTR(-ENOMEM); 1893 + goto getout; 1894 + } 1895 + size = inode->i_op->getxattr(fhp->fh_dentry, name, value, size); 1896 + if (size < 0) { 1897 + acl = ERR_PTR(size); 1898 + goto getout; 1899 + } 1900 + } 1901 + acl = posix_acl_from_xattr(value, size); 1902 + 1903 + getout: 1904 + kfree(value); 1905 + return acl; 1906 + } 1907 + 1908 + int 1909 + nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl) 1910 + { 1911 + struct inode *inode = fhp->fh_dentry->d_inode; 1912 + char *name; 1913 + void *value = NULL; 1914 + size_t size; 1915 + int error; 1916 + 1917 + if (!IS_POSIXACL(inode) || !inode->i_op || 1918 + !inode->i_op->setxattr || !inode->i_op->removexattr) 1919 + return -EOPNOTSUPP; 1920 + switch(type) { 1921 + case ACL_TYPE_ACCESS: 1922 + name = XATTR_NAME_ACL_ACCESS; 1923 + break; 1924 + case ACL_TYPE_DEFAULT: 1925 + name = XATTR_NAME_ACL_DEFAULT; 1926 + break; 1927 + default: 1928 + return -EOPNOTSUPP; 1929 + } 1930 + 1931 + if (acl && acl->a_count) { 1932 + size = xattr_acl_size(acl->a_count); 1933 + value = kmalloc(size, GFP_KERNEL); 1934 + if (!value) 1935 + return -ENOMEM; 1936 + size = posix_acl_to_xattr(acl, value, size); 1937 + if (size < 0) { 1938 + error = size; 1939 + goto getout; 1940 + } 1941 + } else 1942 + size = 0; 1943 + 1944 + if (!fhp->fh_locked) 1945 + fh_lock(fhp); /* unlocking is done automatically */ 1946 + if (size) 1947 + error = inode->i_op->setxattr(fhp->fh_dentry, name, 1948 + value, size, 0); 1949 + else { 1950 + if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT) 1951 + error = 0; 1952 + else { 1953 + error = inode->i_op->removexattr(fhp->fh_dentry, name); 1954 + if (error == -ENODATA) 1955 + error = 0; 1956 + } 1957 + } 1958 + 1959 + getout: 1960 + kfree(value); 1961 + return error; 1962 + } 1963 + #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
+1
include/linux/fs.h
··· 674 struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ 675 union { 676 struct nfs_lock_info nfs_fl; 677 } fl_u; 678 }; 679
··· 674 struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ 675 union { 676 struct nfs_lock_info nfs_fl; 677 + struct nfs4_lock_info nfs4_fl; 678 } fl_u; 679 }; 680
+6 -1
include/linux/lockd/lockd.h
··· 72 uint32_t pid; 73 }; 74 75 /* 76 * Memory chunk for NLM client RPC request. 77 */ ··· 83 struct nlm_host * a_host; /* host handle */ 84 struct nlm_args a_args; /* arguments */ 85 struct nlm_res a_res; /* result */ 86 char a_owner[NLMCLNT_OHSIZE]; 87 }; 88 ··· 145 * Lockd client functions 146 */ 147 struct nlm_rqst * nlmclnt_alloc_call(void); 148 - int nlmclnt_block(struct nlm_host *, struct file_lock *, u32 *); 149 int nlmclnt_cancel(struct nlm_host *, struct file_lock *); 150 u32 nlmclnt_grant(struct nlm_lock *); 151 void nlmclnt_recovery(struct nlm_host *, u32);
··· 72 uint32_t pid; 73 }; 74 75 + struct nlm_wait; 76 + 77 /* 78 * Memory chunk for NLM client RPC request. 79 */ ··· 81 struct nlm_host * a_host; /* host handle */ 82 struct nlm_args a_args; /* arguments */ 83 struct nlm_res a_res; /* result */ 84 + struct nlm_wait * a_block; 85 char a_owner[NLMCLNT_OHSIZE]; 86 }; 87 ··· 142 * Lockd client functions 143 */ 144 struct nlm_rqst * nlmclnt_alloc_call(void); 145 + int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl); 146 + void nlmclnt_finish_block(struct nlm_rqst *req); 147 + long nlmclnt_block(struct nlm_rqst *req, long timeout); 148 int nlmclnt_cancel(struct nlm_host *, struct file_lock *); 149 u32 nlmclnt_grant(struct nlm_lock *); 150 void nlmclnt_recovery(struct nlm_host *, u32);
+2
include/linux/nfs4.h
··· 382 NFSPROC4_CLNT_READDIR, 383 NFSPROC4_CLNT_SERVER_CAPS, 384 NFSPROC4_CLNT_DELEGRETURN, 385 }; 386 387 #endif
··· 382 NFSPROC4_CLNT_READDIR, 383 NFSPROC4_CLNT_SERVER_CAPS, 384 NFSPROC4_CLNT_DELEGRETURN, 385 + NFSPROC4_CLNT_GETACL, 386 + NFSPROC4_CLNT_SETACL, 387 }; 388 389 #endif
+59 -247
include/linux/nfs_fs.h
··· 15 #include <linux/pagemap.h> 16 #include <linux/rwsem.h> 17 #include <linux/wait.h> 18 - #include <linux/uio.h> 19 20 #include <linux/nfs_fs_sb.h> 21 ··· 28 #include <linux/nfs4.h> 29 #include <linux/nfs_xdr.h> 30 #include <linux/rwsem.h> 31 - #include <linux/workqueue.h> 32 #include <linux/mempool.h> 33 34 /* ··· 42 #define NFS_DEF_FILE_IO_BUFFER_SIZE 4096 43 44 /* 45 - * The upper limit on timeouts for the exponential backoff algorithm. 46 - */ 47 - #define NFS_WRITEBACK_DELAY (5*HZ) 48 - #define NFS_WRITEBACK_LOCKDELAY (60*HZ) 49 - #define NFS_COMMIT_DELAY (5*HZ) 50 - 51 - /* 52 * superblock magic number for NFS 53 */ 54 #define NFS_SUPER_MAGIC 0x6969 ··· 50 * These are the default flags for swap requests 51 */ 52 #define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) 53 - 54 - #define NFS_RW_SYNC 0x0001 /* O_SYNC handling */ 55 - #define NFS_RW_SWAP 0x0002 /* This is a swap request */ 56 57 /* 58 * When flushing a cluster of dirty pages, there can be different ··· 84 int error; 85 86 struct list_head list; 87 - wait_queue_head_t waitq; 88 }; 89 90 /* 91 * NFSv4 delegation 92 */ 93 struct nfs_delegation; 94 95 /* 96 * nfs fs inode data in memory ··· 131 * 132 * mtime != read_cache_mtime 133 */ 134 - unsigned long readdir_timestamp; 135 unsigned long read_cache_jiffies; 136 unsigned long attrtimeo; 137 unsigned long attrtimeo_timestamp; ··· 148 atomic_t data_updates; 149 150 struct nfs_access_entry cache_access; 151 152 /* 153 * This is the cookie verifier used for NFSv3 readdir ··· 177 wait_queue_head_t nfs_i_wait; 178 179 #ifdef CONFIG_NFS_V4 180 /* NFSv4 state */ 181 struct list_head open_states; 182 struct nfs_delegation *delegation; 183 int delegation_state; 184 struct rw_semaphore rwsem; 185 #endif /* CONFIG_NFS_V4*/ 186 - 187 struct inode vfs_inode; 188 }; 189 ··· 197 #define NFS_INO_INVALID_DATA 0x0010 /* cached data is invalid */ 198 #define NFS_INO_INVALID_ATIME 0x0020 /* cached atime is invalid */ 199 #define NFS_INO_INVALID_ACCESS 0x0040 /* cached access cred invalid */ 200 201 static inline struct nfs_inode *NFS_I(struct inode *inode) 202 { ··· 290 extern int nfs_attribute_timeout(struct inode *inode); 291 extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); 292 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); 293 extern int nfs_setattr(struct dentry *, struct iattr *); 294 extern void nfs_begin_attr_update(struct inode *); 295 extern void nfs_end_attr_update(struct inode *); 296 extern void nfs_begin_data_update(struct inode *); 297 extern void nfs_end_data_update(struct inode *); 298 - extern void nfs_end_data_update_defer(struct inode *); 299 extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred); 300 extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); 301 extern void put_nfs_open_context(struct nfs_open_context *ctx); ··· 310 * linux/fs/nfs/file.c 311 */ 312 extern struct inode_operations nfs_file_inode_operations; 313 extern struct file_operations nfs_file_operations; 314 extern struct address_space_operations nfs_file_aops; 315 ··· 328 } 329 330 /* 331 * linux/fs/nfs/direct.c 332 */ 333 extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t, ··· 357 * linux/fs/nfs/dir.c 358 */ 359 extern struct inode_operations nfs_dir_inode_operations; 360 extern struct file_operations nfs_dir_operations; 361 extern struct dentry_operations nfs_dentry_operations; 362 ··· 395 */ 396 extern int nfs_sync_inode(struct inode *, unsigned long, unsigned int, int); 397 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 398 - extern int nfs_commit_inode(struct inode *, unsigned long, unsigned int, int); 399 #else 400 static inline int 401 - nfs_commit_inode(struct inode *inode, unsigned long idx_start, unsigned int npages, int how) 402 { 403 return 0; 404 } ··· 452 mempool_free(p, nfs_wdata_mempool); 453 } 454 455 - /* Hack for future NFS swap support */ 456 - #ifndef IS_SWAPFILE 457 - # define IS_SWAPFILE(inode) (0) 458 - #endif 459 - 460 /* 461 * linux/fs/nfs/read.c 462 */ ··· 479 } 480 481 extern void nfs_readdata_release(struct rpc_task *task); 482 483 /* 484 * linux/fs/mount_clnt.c ··· 550 }) 551 552 #define NFS_JUKEBOX_RETRY_TIME (5 * HZ) 553 - 554 - #ifdef CONFIG_NFS_V4 555 - 556 - struct idmap; 557 - 558 - /* 559 - * In a seqid-mutating op, this macro controls which error return 560 - * values trigger incrementation of the seqid. 561 - * 562 - * from rfc 3010: 563 - * The client MUST monotonically increment the sequence number for the 564 - * CLOSE, LOCK, LOCKU, OPEN, OPEN_CONFIRM, and OPEN_DOWNGRADE 565 - * operations. This is true even in the event that the previous 566 - * operation that used the sequence number received an error. The only 567 - * exception to this rule is if the previous operation received one of 568 - * the following errors: NFSERR_STALE_CLIENTID, NFSERR_STALE_STATEID, 569 - * NFSERR_BAD_STATEID, NFSERR_BAD_SEQID, NFSERR_BADXDR, 570 - * NFSERR_RESOURCE, NFSERR_NOFILEHANDLE. 571 - * 572 - */ 573 - #define seqid_mutating_err(err) \ 574 - (((err) != NFSERR_STALE_CLIENTID) && \ 575 - ((err) != NFSERR_STALE_STATEID) && \ 576 - ((err) != NFSERR_BAD_STATEID) && \ 577 - ((err) != NFSERR_BAD_SEQID) && \ 578 - ((err) != NFSERR_BAD_XDR) && \ 579 - ((err) != NFSERR_RESOURCE) && \ 580 - ((err) != NFSERR_NOFILEHANDLE)) 581 - 582 - enum nfs4_client_state { 583 - NFS4CLNT_OK = 0, 584 - }; 585 - 586 - /* 587 - * The nfs4_client identifies our client state to the server. 588 - */ 589 - struct nfs4_client { 590 - struct list_head cl_servers; /* Global list of servers */ 591 - struct in_addr cl_addr; /* Server identifier */ 592 - u64 cl_clientid; /* constant */ 593 - nfs4_verifier cl_confirm; 594 - unsigned long cl_state; 595 - 596 - u32 cl_lockowner_id; 597 - 598 - /* 599 - * The following rwsem ensures exclusive access to the server 600 - * while we recover the state following a lease expiration. 601 - */ 602 - struct rw_semaphore cl_sem; 603 - 604 - struct list_head cl_delegations; 605 - struct list_head cl_state_owners; 606 - struct list_head cl_unused; 607 - int cl_nunused; 608 - spinlock_t cl_lock; 609 - atomic_t cl_count; 610 - 611 - struct rpc_clnt * cl_rpcclient; 612 - struct rpc_cred * cl_cred; 613 - 614 - struct list_head cl_superblocks; /* List of nfs_server structs */ 615 - 616 - unsigned long cl_lease_time; 617 - unsigned long cl_last_renewal; 618 - struct work_struct cl_renewd; 619 - struct work_struct cl_recoverd; 620 - 621 - wait_queue_head_t cl_waitq; 622 - struct rpc_wait_queue cl_rpcwaitq; 623 - 624 - /* used for the setclientid verifier */ 625 - struct timespec cl_boot_time; 626 - 627 - /* idmapper */ 628 - struct idmap * cl_idmap; 629 - 630 - /* Our own IP address, as a null-terminated string. 631 - * This is used to generate the clientid, and the callback address. 632 - */ 633 - char cl_ipaddr[16]; 634 - unsigned char cl_id_uniquifier; 635 - }; 636 - 637 - /* 638 - * NFS4 state_owners and lock_owners are simply labels for ordered 639 - * sequences of RPC calls. Their sole purpose is to provide once-only 640 - * semantics by allowing the server to identify replayed requests. 641 - * 642 - * The ->so_sema is held during all state_owner seqid-mutating operations: 643 - * OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize 644 - * so_seqid. 645 - */ 646 - struct nfs4_state_owner { 647 - struct list_head so_list; /* per-clientid list of state_owners */ 648 - struct nfs4_client *so_client; 649 - u32 so_id; /* 32-bit identifier, unique */ 650 - struct semaphore so_sema; 651 - u32 so_seqid; /* protected by so_sema */ 652 - atomic_t so_count; 653 - 654 - struct rpc_cred *so_cred; /* Associated cred */ 655 - struct list_head so_states; 656 - struct list_head so_delegations; 657 - }; 658 - 659 - /* 660 - * struct nfs4_state maintains the client-side state for a given 661 - * (state_owner,inode) tuple (OPEN) or state_owner (LOCK). 662 - * 663 - * OPEN: 664 - * In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server, 665 - * we need to know how many files are open for reading or writing on a 666 - * given inode. This information too is stored here. 667 - * 668 - * LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN) 669 - */ 670 - 671 - struct nfs4_lock_state { 672 - struct list_head ls_locks; /* Other lock stateids */ 673 - fl_owner_t ls_owner; /* POSIX lock owner */ 674 - #define NFS_LOCK_INITIALIZED 1 675 - int ls_flags; 676 - u32 ls_seqid; 677 - u32 ls_id; 678 - nfs4_stateid ls_stateid; 679 - atomic_t ls_count; 680 - }; 681 - 682 - /* bits for nfs4_state->flags */ 683 - enum { 684 - LK_STATE_IN_USE, 685 - NFS_DELEGATED_STATE, 686 - }; 687 - 688 - struct nfs4_state { 689 - struct list_head open_states; /* List of states for the same state_owner */ 690 - struct list_head inode_states; /* List of states for the same inode */ 691 - struct list_head lock_states; /* List of subservient lock stateids */ 692 - 693 - struct nfs4_state_owner *owner; /* Pointer to the open owner */ 694 - struct inode *inode; /* Pointer to the inode */ 695 - 696 - unsigned long flags; /* Do we hold any locks? */ 697 - struct semaphore lock_sema; /* Serializes file locking operations */ 698 - rwlock_t state_lock; /* Protects the lock_states list */ 699 - 700 - nfs4_stateid stateid; 701 - 702 - unsigned int nreaders; 703 - unsigned int nwriters; 704 - int state; /* State on the server (R,W, or RW) */ 705 - atomic_t count; 706 - }; 707 - 708 - 709 - struct nfs4_exception { 710 - long timeout; 711 - int retry; 712 - }; 713 - 714 - struct nfs4_state_recovery_ops { 715 - int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *); 716 - int (*recover_lock)(struct nfs4_state *, struct file_lock *); 717 - }; 718 - 719 - extern struct dentry_operations nfs4_dentry_operations; 720 - extern struct inode_operations nfs4_dir_inode_operations; 721 - 722 - /* nfs4proc.c */ 723 - extern int nfs4_map_errors(int err); 724 - extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short); 725 - extern int nfs4_proc_setclientid_confirm(struct nfs4_client *); 726 - extern int nfs4_proc_async_renew(struct nfs4_client *); 727 - extern int nfs4_proc_renew(struct nfs4_client *); 728 - extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode); 729 - extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); 730 - extern int nfs4_open_revalidate(struct inode *, struct dentry *, int); 731 - 732 - extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops; 733 - extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops; 734 - 735 - /* nfs4renewd.c */ 736 - extern void nfs4_schedule_state_renewal(struct nfs4_client *); 737 - extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); 738 - extern void nfs4_kill_renewd(struct nfs4_client *); 739 - 740 - /* nfs4state.c */ 741 - extern void init_nfsv4_state(struct nfs_server *); 742 - extern void destroy_nfsv4_state(struct nfs_server *); 743 - extern struct nfs4_client *nfs4_get_client(struct in_addr *); 744 - extern void nfs4_put_client(struct nfs4_client *clp); 745 - extern int nfs4_init_client(struct nfs4_client *clp); 746 - extern struct nfs4_client *nfs4_find_client(struct in_addr *); 747 - extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *); 748 - 749 - extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); 750 - extern void nfs4_put_state_owner(struct nfs4_state_owner *); 751 - extern void nfs4_drop_state_owner(struct nfs4_state_owner *); 752 - extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); 753 - extern void nfs4_put_open_state(struct nfs4_state *); 754 - extern void nfs4_close_state(struct nfs4_state *, mode_t); 755 - extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode); 756 - extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp); 757 - extern void nfs4_schedule_state_recovery(struct nfs4_client *); 758 - extern struct nfs4_lock_state *nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t); 759 - extern struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t); 760 - extern void nfs4_put_lock_state(struct nfs4_lock_state *state); 761 - extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls); 762 - extern void nfs4_notify_setlk(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *); 763 - extern void nfs4_notify_unlck(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *); 764 - extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); 765 - 766 - 767 - 768 - struct nfs4_mount_data; 769 - #else 770 - #define init_nfsv4_state(server) do { } while (0) 771 - #define destroy_nfsv4_state(server) do { } while (0) 772 - #define nfs4_put_state_owner(inode, owner) do { } while (0) 773 - #define nfs4_put_open_state(state) do { } while (0) 774 - #define nfs4_close_state(a, b) do { } while (0) 775 - #define nfs4_renewd_prepare_shutdown(server) do { } while (0) 776 - #endif 777 778 #endif /* __KERNEL__ */ 779
··· 15 #include <linux/pagemap.h> 16 #include <linux/rwsem.h> 17 #include <linux/wait.h> 18 19 #include <linux/nfs_fs_sb.h> 20 ··· 29 #include <linux/nfs4.h> 30 #include <linux/nfs_xdr.h> 31 #include <linux/rwsem.h> 32 #include <linux/mempool.h> 33 34 /* ··· 44 #define NFS_DEF_FILE_IO_BUFFER_SIZE 4096 45 46 /* 47 * superblock magic number for NFS 48 */ 49 #define NFS_SUPER_MAGIC 0x6969 ··· 59 * These are the default flags for swap requests 60 */ 61 #define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) 62 63 /* 64 * When flushing a cluster of dirty pages, there can be different ··· 96 int error; 97 98 struct list_head list; 99 + 100 + __u64 dir_cookie; 101 }; 102 103 /* 104 * NFSv4 delegation 105 */ 106 struct nfs_delegation; 107 + 108 + struct posix_acl; 109 110 /* 111 * nfs fs inode data in memory ··· 140 * 141 * mtime != read_cache_mtime 142 */ 143 unsigned long read_cache_jiffies; 144 unsigned long attrtimeo; 145 unsigned long attrtimeo_timestamp; ··· 158 atomic_t data_updates; 159 160 struct nfs_access_entry cache_access; 161 + #ifdef CONFIG_NFS_V3_ACL 162 + struct posix_acl *acl_access; 163 + struct posix_acl *acl_default; 164 + #endif 165 166 /* 167 * This is the cookie verifier used for NFSv3 readdir ··· 183 wait_queue_head_t nfs_i_wait; 184 185 #ifdef CONFIG_NFS_V4 186 + struct nfs4_cached_acl *nfs4_acl; 187 /* NFSv4 state */ 188 struct list_head open_states; 189 struct nfs_delegation *delegation; 190 int delegation_state; 191 struct rw_semaphore rwsem; 192 #endif /* CONFIG_NFS_V4*/ 193 struct inode vfs_inode; 194 }; 195 ··· 203 #define NFS_INO_INVALID_DATA 0x0010 /* cached data is invalid */ 204 #define NFS_INO_INVALID_ATIME 0x0020 /* cached atime is invalid */ 205 #define NFS_INO_INVALID_ACCESS 0x0040 /* cached access cred invalid */ 206 + #define NFS_INO_INVALID_ACL 0x0080 /* cached acls are invalid */ 207 + #define NFS_INO_REVAL_PAGECACHE 0x1000 /* must revalidate pagecache */ 208 209 static inline struct nfs_inode *NFS_I(struct inode *inode) 210 { ··· 294 extern int nfs_attribute_timeout(struct inode *inode); 295 extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); 296 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); 297 + extern void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); 298 extern int nfs_setattr(struct dentry *, struct iattr *); 299 extern void nfs_begin_attr_update(struct inode *); 300 extern void nfs_end_attr_update(struct inode *); 301 extern void nfs_begin_data_update(struct inode *); 302 extern void nfs_end_data_update(struct inode *); 303 extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred); 304 extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); 305 extern void put_nfs_open_context(struct nfs_open_context *ctx); ··· 314 * linux/fs/nfs/file.c 315 */ 316 extern struct inode_operations nfs_file_inode_operations; 317 + #ifdef CONFIG_NFS_V3 318 + extern struct inode_operations nfs3_file_inode_operations; 319 + #endif /* CONFIG_NFS_V3 */ 320 extern struct file_operations nfs_file_operations; 321 extern struct address_space_operations nfs_file_aops; 322 ··· 329 } 330 331 /* 332 + * linux/fs/nfs/xattr.c 333 + */ 334 + #ifdef CONFIG_NFS_V3_ACL 335 + extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t); 336 + extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t); 337 + extern int nfs3_setxattr(struct dentry *, const char *, 338 + const void *, size_t, int); 339 + extern int nfs3_removexattr (struct dentry *, const char *name); 340 + #else 341 + # define nfs3_listxattr NULL 342 + # define nfs3_getxattr NULL 343 + # define nfs3_setxattr NULL 344 + # define nfs3_removexattr NULL 345 + #endif 346 + 347 + /* 348 * linux/fs/nfs/direct.c 349 */ 350 extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t, ··· 342 * linux/fs/nfs/dir.c 343 */ 344 extern struct inode_operations nfs_dir_inode_operations; 345 + #ifdef CONFIG_NFS_V3 346 + extern struct inode_operations nfs3_dir_inode_operations; 347 + #endif /* CONFIG_NFS_V3 */ 348 extern struct file_operations nfs_dir_operations; 349 extern struct dentry_operations nfs_dentry_operations; 350 ··· 377 */ 378 extern int nfs_sync_inode(struct inode *, unsigned long, unsigned int, int); 379 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 380 + extern int nfs_commit_inode(struct inode *, int); 381 #else 382 static inline int 383 + nfs_commit_inode(struct inode *inode, int how) 384 { 385 return 0; 386 } ··· 434 mempool_free(p, nfs_wdata_mempool); 435 } 436 437 /* 438 * linux/fs/nfs/read.c 439 */ ··· 466 } 467 468 extern void nfs_readdata_release(struct rpc_task *task); 469 + 470 + /* 471 + * linux/fs/nfs3proc.c 472 + */ 473 + #ifdef CONFIG_NFS_V3_ACL 474 + extern struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type); 475 + extern int nfs3_proc_setacl(struct inode *inode, int type, 476 + struct posix_acl *acl); 477 + extern int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode, 478 + mode_t mode); 479 + extern void nfs3_forget_cached_acls(struct inode *inode); 480 + #else 481 + static inline int nfs3_proc_set_default_acl(struct inode *dir, 482 + struct inode *inode, 483 + mode_t mode) 484 + { 485 + return 0; 486 + } 487 + 488 + static inline void nfs3_forget_cached_acls(struct inode *inode) 489 + { 490 + } 491 + #endif /* CONFIG_NFS_V3_ACL */ 492 493 /* 494 * linux/fs/mount_clnt.c ··· 514 }) 515 516 #define NFS_JUKEBOX_RETRY_TIME (5 * HZ) 517 518 #endif /* __KERNEL__ */ 519
+5
include/linux/nfs_fs_i.h
··· 16 struct nlm_lockowner *owner; 17 }; 18 19 /* 20 * Lock flag values 21 */
··· 16 struct nlm_lockowner *owner; 17 }; 18 19 + struct nfs4_lock_state; 20 + struct nfs4_lock_info { 21 + struct nfs4_lock_state *owner; 22 + }; 23 + 24 /* 25 * Lock flag values 26 */
+1
include/linux/nfs_fs_sb.h
··· 10 struct nfs_server { 11 struct rpc_clnt * client; /* RPC client handle */ 12 struct rpc_clnt * client_sys; /* 2nd handle for FSINFO */ 13 struct nfs_rpc_ops * rpc_ops; /* NFS protocol vector */ 14 struct backing_dev_info backing_dev_info; 15 int flags; /* various flags */
··· 10 struct nfs_server { 11 struct rpc_clnt * client; /* RPC client handle */ 12 struct rpc_clnt * client_sys; /* 2nd handle for FSINFO */ 13 + struct rpc_clnt * client_acl; /* ACL RPC client handle */ 14 struct nfs_rpc_ops * rpc_ops; /* NFS protocol vector */ 15 struct backing_dev_info backing_dev_info; 16 int flags; /* various flags */
+1
include/linux/nfs_mount.h
··· 58 #define NFS_MOUNT_KERBEROS 0x0100 /* 3 */ 59 #define NFS_MOUNT_NONLM 0x0200 /* 3 */ 60 #define NFS_MOUNT_BROKEN_SUID 0x0400 /* 4 */ 61 #define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */ 62 #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ 63 #define NFS_MOUNT_FLAGMASK 0xFFFF
··· 58 #define NFS_MOUNT_KERBEROS 0x0100 /* 3 */ 59 #define NFS_MOUNT_NONLM 0x0200 /* 3 */ 60 #define NFS_MOUNT_BROKEN_SUID 0x0400 /* 4 */ 61 + #define NFS_MOUNT_NOACL 0x0800 /* 4 */ 62 #define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */ 63 #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ 64 #define NFS_MOUNT_FLAGMASK 0xFFFF
+24 -6
include/linux/nfs_page.h
··· 20 #include <asm/atomic.h> 21 22 /* 23 * Valid flags for a dirty buffer 24 */ 25 #define PG_BUSY 0 26 #define PG_NEED_COMMIT 1 27 #define PG_NEED_RESCHED 2 28 29 struct nfs_page { 30 struct list_head wb_list, /* Defines state of page: */ 31 *wb_list_head; /* read/write/commit */ ··· 61 extern void nfs_release_request(struct nfs_page *req); 62 63 64 - extern void nfs_list_add_request(struct nfs_page *, struct list_head *); 65 - 66 extern int nfs_scan_list(struct list_head *, struct list_head *, 67 unsigned long, unsigned int); 68 extern int nfs_coalesce_requests(struct list_head *, struct list_head *, 69 unsigned int); 70 extern int nfs_wait_on_request(struct nfs_page *); 71 extern void nfs_unlock_request(struct nfs_page *req); 72 73 /* 74 * Lock the page of an asynchronous request without incrementing the wb_count ··· 96 return 1; 97 } 98 99 100 /** 101 * nfs_list_remove_request - Remove a request from its wb_list ··· 118 { 119 if (list_empty(&req->wb_list)) 120 return; 121 - if (!NFS_WBACK_BUSY(req)) { 122 - printk(KERN_ERR "NFS: unlocked request attempted removed from list!\n"); 123 - BUG(); 124 - } 125 list_del_init(&req->wb_list); 126 req->wb_list_head = NULL; 127 }
··· 20 #include <asm/atomic.h> 21 22 /* 23 + * Valid flags for the radix tree 24 + */ 25 + #define NFS_PAGE_TAG_DIRTY 0 26 + #define NFS_PAGE_TAG_WRITEBACK 1 27 + 28 + /* 29 * Valid flags for a dirty buffer 30 */ 31 #define PG_BUSY 0 32 #define PG_NEED_COMMIT 1 33 #define PG_NEED_RESCHED 2 34 35 + struct nfs_inode; 36 struct nfs_page { 37 struct list_head wb_list, /* Defines state of page: */ 38 *wb_list_head; /* read/write/commit */ ··· 54 extern void nfs_release_request(struct nfs_page *req); 55 56 57 + extern int nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst, 58 + unsigned long idx_start, unsigned int npages); 59 extern int nfs_scan_list(struct list_head *, struct list_head *, 60 unsigned long, unsigned int); 61 extern int nfs_coalesce_requests(struct list_head *, struct list_head *, 62 unsigned int); 63 extern int nfs_wait_on_request(struct nfs_page *); 64 extern void nfs_unlock_request(struct nfs_page *req); 65 + extern int nfs_set_page_writeback_locked(struct nfs_page *req); 66 + extern void nfs_clear_page_writeback(struct nfs_page *req); 67 + 68 69 /* 70 * Lock the page of an asynchronous request without incrementing the wb_count ··· 86 return 1; 87 } 88 89 + /** 90 + * nfs_list_add_request - Insert a request into a list 91 + * @req: request 92 + * @head: head of list into which to insert the request. 93 + */ 94 + static inline void 95 + nfs_list_add_request(struct nfs_page *req, struct list_head *head) 96 + { 97 + list_add_tail(&req->wb_list, head); 98 + req->wb_list_head = head; 99 + } 100 + 101 102 /** 103 * nfs_list_remove_request - Remove a request from its wb_list ··· 96 { 97 if (list_empty(&req->wb_list)) 98 return; 99 list_del_init(&req->wb_list); 100 req->wb_list_head = NULL; 101 }
+43
include/linux/nfs_xdr.h
··· 2 #define _LINUX_NFS_XDR_H 3 4 #include <linux/sunrpc/xprt.h> 5 6 struct nfs4_fsid { 7 __u64 major; ··· 327 const u32 * bitmask; 328 }; 329 330 struct nfs_setattrres { 331 struct nfs_fattr * fattr; 332 const struct nfs_server * server; ··· 366 struct nfs_fh * fh; 367 __u32 cookie; 368 unsigned int count; 369 struct page ** pages; 370 }; 371 ··· 504 struct nfs_fattr * dir_attr; 505 __u32 * verf; 506 int plus; 507 }; 508 509 #ifdef CONFIG_NFS_V4 ··· 705 int version; /* Protocol version */ 706 struct dentry_operations *dentry_ops; 707 struct inode_operations *dir_inode_ops; 708 709 int (*getroot) (struct nfs_server *, struct nfs_fh *, 710 struct nfs_fsinfo *); ··· 752 int (*file_open) (struct inode *, struct file *); 753 int (*file_release) (struct inode *, struct file *); 754 int (*lock)(struct file *, int, struct file_lock *); 755 }; 756 757 /* ··· 771 extern struct rpc_version nfs_version2; 772 extern struct rpc_version nfs_version3; 773 extern struct rpc_version nfs_version4; 774 775 #endif
··· 2 #define _LINUX_NFS_XDR_H 3 4 #include <linux/sunrpc/xprt.h> 5 + #include <linux/nfsacl.h> 6 7 struct nfs4_fsid { 8 __u64 major; ··· 326 const u32 * bitmask; 327 }; 328 329 + struct nfs_setaclargs { 330 + struct nfs_fh * fh; 331 + size_t acl_len; 332 + unsigned int acl_pgbase; 333 + struct page ** acl_pages; 334 + }; 335 + 336 + struct nfs_getaclargs { 337 + struct nfs_fh * fh; 338 + size_t acl_len; 339 + unsigned int acl_pgbase; 340 + struct page ** acl_pages; 341 + }; 342 + 343 struct nfs_setattrres { 344 struct nfs_fattr * fattr; 345 const struct nfs_server * server; ··· 351 struct nfs_fh * fh; 352 __u32 cookie; 353 unsigned int count; 354 + struct page ** pages; 355 + }; 356 + 357 + struct nfs3_getaclargs { 358 + struct nfs_fh * fh; 359 + int mask; 360 + struct page ** pages; 361 + }; 362 + 363 + struct nfs3_setaclargs { 364 + struct inode * inode; 365 + int mask; 366 + struct posix_acl * acl_access; 367 + struct posix_acl * acl_default; 368 struct page ** pages; 369 }; 370 ··· 475 struct nfs_fattr * dir_attr; 476 __u32 * verf; 477 int plus; 478 + }; 479 + 480 + struct nfs3_getaclres { 481 + struct nfs_fattr * fattr; 482 + int mask; 483 + unsigned int acl_access_count; 484 + unsigned int acl_default_count; 485 + struct posix_acl * acl_access; 486 + struct posix_acl * acl_default; 487 }; 488 489 #ifdef CONFIG_NFS_V4 ··· 667 int version; /* Protocol version */ 668 struct dentry_operations *dentry_ops; 669 struct inode_operations *dir_inode_ops; 670 + struct inode_operations *file_inode_ops; 671 672 int (*getroot) (struct nfs_server *, struct nfs_fh *, 673 struct nfs_fsinfo *); ··· 713 int (*file_open) (struct inode *, struct file *); 714 int (*file_release) (struct inode *, struct file *); 715 int (*lock)(struct file *, int, struct file_lock *); 716 + void (*clear_acl_cache)(struct inode *); 717 }; 718 719 /* ··· 731 extern struct rpc_version nfs_version2; 732 extern struct rpc_version nfs_version3; 733 extern struct rpc_version nfs_version4; 734 + 735 + extern struct rpc_version nfsacl_version3; 736 + extern struct rpc_program nfsacl_program; 737 738 #endif
+58
include/linux/nfsacl.h
···
··· 1 + /* 2 + * File: linux/nfsacl.h 3 + * 4 + * (C) 2003 Andreas Gruenbacher <agruen@suse.de> 5 + */ 6 + #ifndef __LINUX_NFSACL_H 7 + #define __LINUX_NFSACL_H 8 + 9 + #define NFS_ACL_PROGRAM 100227 10 + 11 + #define ACLPROC2_GETACL 1 12 + #define ACLPROC2_SETACL 2 13 + #define ACLPROC2_GETATTR 3 14 + #define ACLPROC2_ACCESS 4 15 + 16 + #define ACLPROC3_GETACL 1 17 + #define ACLPROC3_SETACL 2 18 + 19 + 20 + /* Flags for the getacl/setacl mode */ 21 + #define NFS_ACL 0x0001 22 + #define NFS_ACLCNT 0x0002 23 + #define NFS_DFACL 0x0004 24 + #define NFS_DFACLCNT 0x0008 25 + 26 + /* Flag for Default ACL entries */ 27 + #define NFS_ACL_DEFAULT 0x1000 28 + 29 + #ifdef __KERNEL__ 30 + 31 + #include <linux/posix_acl.h> 32 + 33 + /* Maximum number of ACL entries over NFS */ 34 + #define NFS_ACL_MAX_ENTRIES 1024 35 + 36 + #define NFSACL_MAXWORDS (2*(2+3*NFS_ACL_MAX_ENTRIES)) 37 + #define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \ 38 + >> PAGE_SHIFT) 39 + 40 + static inline unsigned int 41 + nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default) 42 + { 43 + unsigned int w = 16; 44 + w += max(acl_access ? (int)acl_access->a_count : 3, 4) * 12; 45 + if (acl_default) 46 + w += max((int)acl_default->a_count, 4) * 12; 47 + return w; 48 + } 49 + 50 + extern unsigned int 51 + nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, 52 + struct posix_acl *acl, int encode_entries, int typeflag); 53 + extern unsigned int 54 + nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, 55 + struct posix_acl **pacl); 56 + 57 + #endif /* __KERNEL__ */ 58 + #endif /* __LINUX_NFSACL_H */
+16
include/linux/nfsd/nfsd.h
··· 15 #include <linux/unistd.h> 16 #include <linux/dirent.h> 17 #include <linux/fs.h> 18 #include <linux/mount.h> 19 20 #include <linux/nfsd/debug.h> ··· 124 125 int nfsd_notify_change(struct inode *, struct iattr *); 126 int nfsd_permission(struct svc_export *, struct dentry *, int); 127 128 129 /*
··· 15 #include <linux/unistd.h> 16 #include <linux/dirent.h> 17 #include <linux/fs.h> 18 + #include <linux/posix_acl.h> 19 #include <linux/mount.h> 20 21 #include <linux/nfsd/debug.h> ··· 123 124 int nfsd_notify_change(struct inode *, struct iattr *); 125 int nfsd_permission(struct svc_export *, struct dentry *, int); 126 + 127 + #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 128 + #ifdef CONFIG_NFSD_V2_ACL 129 + extern struct svc_version nfsd_acl_version2; 130 + #else 131 + #define nfsd_acl_version2 NULL 132 + #endif 133 + #ifdef CONFIG_NFSD_V3_ACL 134 + extern struct svc_version nfsd_acl_version3; 135 + #else 136 + #define nfsd_acl_version3 NULL 137 + #endif 138 + struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int); 139 + int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *); 140 + #endif 141 142 143 /*
+4
include/linux/nfsd/xdr.h
··· 169 170 int nfssvc_release_fhandle(struct svc_rqst *, u32 *, struct nfsd_fhandle *); 171 172 #endif /* LINUX_NFSD_H */
··· 169 170 int nfssvc_release_fhandle(struct svc_rqst *, u32 *, struct nfsd_fhandle *); 171 172 + /* Helper functions for NFSv2 ACL code */ 173 + u32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp); 174 + u32 *nfs2svc_decode_fh(u32 *p, struct svc_fh *fhp); 175 + 176 #endif /* LINUX_NFSD_H */
+26
include/linux/nfsd/xdr3.h
··· 110 __u32 count; 111 }; 112 113 struct nfsd3_attrstat { 114 __u32 status; 115 struct svc_fh fh; ··· 222 struct svc_fh fh; 223 }; 224 225 /* dummy type for release */ 226 struct nfsd3_fhandle_pair { 227 __u32 dummy; ··· 262 struct nfsd3_fsinfores fsinfores; 263 struct nfsd3_pathconfres pathconfres; 264 struct nfsd3_commitres commitres; 265 }; 266 267 #define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore) ··· 338 int nfs3svc_encode_entry_plus(struct readdir_cd *, const char *name, 339 int namlen, loff_t offset, ino_t ino, 340 unsigned int); 341 342 343 #endif /* _LINUX_NFSD_XDR3_H */
··· 110 __u32 count; 111 }; 112 113 + struct nfsd3_getaclargs { 114 + struct svc_fh fh; 115 + int mask; 116 + }; 117 + 118 + struct posix_acl; 119 + struct nfsd3_setaclargs { 120 + struct svc_fh fh; 121 + int mask; 122 + struct posix_acl *acl_access; 123 + struct posix_acl *acl_default; 124 + }; 125 + 126 struct nfsd3_attrstat { 127 __u32 status; 128 struct svc_fh fh; ··· 209 struct svc_fh fh; 210 }; 211 212 + struct nfsd3_getaclres { 213 + __u32 status; 214 + struct svc_fh fh; 215 + int mask; 216 + struct posix_acl *acl_access; 217 + struct posix_acl *acl_default; 218 + }; 219 + 220 /* dummy type for release */ 221 struct nfsd3_fhandle_pair { 222 __u32 dummy; ··· 241 struct nfsd3_fsinfores fsinfores; 242 struct nfsd3_pathconfres pathconfres; 243 struct nfsd3_commitres commitres; 244 + struct nfsd3_getaclres getaclres; 245 }; 246 247 #define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore) ··· 316 int nfs3svc_encode_entry_plus(struct readdir_cd *, const char *name, 317 int namlen, loff_t offset, ino_t ino, 318 unsigned int); 319 + /* Helper functions for NFSv3 ACL code */ 320 + u32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, u32 *p, 321 + struct svc_fh *fhp); 322 + u32 *nfs3svc_decode_fh(u32 *p, struct svc_fh *fhp); 323 324 325 #endif /* _LINUX_NFSD_XDR3_H */
+6
include/linux/sunrpc/clnt.h
··· 111 struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, 112 struct rpc_program *info, 113 u32 version, rpc_authflavor_t authflavor); 114 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); 115 int rpc_shutdown_client(struct rpc_clnt *); 116 int rpc_destroy_client(struct rpc_clnt *); ··· 134 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset); 135 void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); 136 size_t rpc_max_payload(struct rpc_clnt *); 137 138 static __inline__ 139 int rpc_call(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp, int flags)
··· 111 struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, 112 struct rpc_program *info, 113 u32 version, rpc_authflavor_t authflavor); 114 + struct rpc_clnt *rpc_new_client(struct rpc_xprt *xprt, char *servname, 115 + struct rpc_program *info, 116 + u32 version, rpc_authflavor_t authflavor); 117 + struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, 118 + struct rpc_program *, int); 119 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); 120 int rpc_shutdown_client(struct rpc_clnt *); 121 int rpc_destroy_client(struct rpc_clnt *); ··· 129 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset); 130 void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); 131 size_t rpc_max_payload(struct rpc_clnt *); 132 + int rpc_ping(struct rpc_clnt *clnt, int flags); 133 134 static __inline__ 135 int rpc_call(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp, int flags)
-1
include/linux/sunrpc/sched.h
··· 31 struct rpc_wait { 32 struct list_head list; /* wait queue links */ 33 struct list_head links; /* Links to related tasks */ 34 - wait_queue_head_t waitq; /* sync: sleep on this q */ 35 struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */ 36 }; 37
··· 31 struct rpc_wait { 32 struct list_head list; /* wait queue links */ 33 struct list_head links; /* Links to related tasks */ 34 struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */ 35 }; 36
+13 -1
include/linux/sunrpc/svc.h
··· 185 return vec->iov_len <= PAGE_SIZE; 186 } 187 188 static inline int svc_take_page(struct svc_rqst *rqstp) 189 { 190 if (rqstp->rq_arghi <= rqstp->rq_argused) ··· 251 }; 252 253 /* 254 - * RPC program 255 */ 256 struct svc_program { 257 u32 pg_prog; /* program number */ 258 unsigned int pg_lovers; /* lowest version */ 259 unsigned int pg_hivers; /* lowest version */
··· 185 return vec->iov_len <= PAGE_SIZE; 186 } 187 188 + static inline struct page * 189 + svc_take_res_page(struct svc_rqst *rqstp) 190 + { 191 + if (rqstp->rq_arghi <= rqstp->rq_argused) 192 + return NULL; 193 + rqstp->rq_arghi--; 194 + rqstp->rq_respages[rqstp->rq_resused] = 195 + rqstp->rq_argpages[rqstp->rq_arghi]; 196 + return rqstp->rq_respages[rqstp->rq_resused++]; 197 + } 198 + 199 static inline int svc_take_page(struct svc_rqst *rqstp) 200 { 201 if (rqstp->rq_arghi <= rqstp->rq_argused) ··· 240 }; 241 242 /* 243 + * List of RPC programs on the same transport endpoint 244 */ 245 struct svc_program { 246 + struct svc_program * pg_next; /* other programs (same xprt) */ 247 u32 pg_prog; /* program number */ 248 unsigned int pg_lovers; /* lowest version */ 249 unsigned int pg_hivers; /* lowest version */
+19 -2
include/linux/sunrpc/xdr.h
··· 146 extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); 147 extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, int, int); 148 extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, int); 149 - extern int read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len); 150 151 /* 152 * Helper structure for copying from an sk_buff. ··· 161 162 typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len); 163 164 - extern void xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int, 165 skb_reader_t *, skb_read_actor_t); 166 167 struct socket; 168 struct sockaddr; 169 extern int xdr_sendpages(struct socket *, struct sockaddr *, int, 170 struct xdr_buf *, unsigned int, int); 171 172 /* 173 * Provide some simple tools for XDR buffer overflow-checking etc.
··· 146 extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); 147 extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, int, int); 148 extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, int); 149 + extern int read_bytes_from_xdr_buf(struct xdr_buf *, int, void *, int); 150 + extern int write_bytes_to_xdr_buf(struct xdr_buf *, int, void *, int); 151 152 /* 153 * Helper structure for copying from an sk_buff. ··· 160 161 typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len); 162 163 + extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int, 164 skb_reader_t *, skb_read_actor_t); 165 166 struct socket; 167 struct sockaddr; 168 extern int xdr_sendpages(struct socket *, struct sockaddr *, int, 169 struct xdr_buf *, unsigned int, int); 170 + 171 + extern int xdr_encode_word(struct xdr_buf *, int, u32); 172 + extern int xdr_decode_word(struct xdr_buf *, int, u32 *); 173 + 174 + struct xdr_array2_desc; 175 + typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem); 176 + struct xdr_array2_desc { 177 + unsigned int elem_size; 178 + unsigned int array_len; 179 + xdr_xcode_elem_t xcode; 180 + }; 181 + 182 + extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, 183 + struct xdr_array2_desc *desc); 184 + extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 185 + struct xdr_array2_desc *desc); 186 187 /* 188 * Provide some simple tools for XDR buffer overflow-checking etc.
+3 -3
net/sunrpc/auth.c
··· 66 u32 flavor = pseudoflavor_to_flavor(pseudoflavor); 67 68 if (flavor >= RPC_AUTH_MAXFLAVOR || !(ops = auth_flavors[flavor])) 69 - return NULL; 70 auth = ops->create(clnt, pseudoflavor); 71 - if (!auth) 72 - return NULL; 73 if (clnt->cl_auth) 74 rpcauth_destroy(clnt->cl_auth); 75 clnt->cl_auth = auth;
··· 66 u32 flavor = pseudoflavor_to_flavor(pseudoflavor); 67 68 if (flavor >= RPC_AUTH_MAXFLAVOR || !(ops = auth_flavors[flavor])) 69 + return ERR_PTR(-EINVAL); 70 auth = ops->create(clnt, pseudoflavor); 71 + if (IS_ERR(auth)) 72 + return auth; 73 if (clnt->cl_auth) 74 rpcauth_destroy(clnt->cl_auth); 75 clnt->cl_auth = auth;
+11 -7
net/sunrpc/auth_gss/auth_gss.c
··· 660 { 661 struct gss_auth *gss_auth; 662 struct rpc_auth * auth; 663 664 dprintk("RPC: creating GSS authenticator for client %p\n",clnt); 665 666 if (!try_module_get(THIS_MODULE)) 667 - return NULL; 668 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL))) 669 goto out_dec; 670 gss_auth->client = clnt; 671 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); 672 if (!gss_auth->mech) { 673 printk(KERN_WARNING "%s: Pseudoflavor %d not found!", ··· 677 goto err_free; 678 } 679 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 680 - /* FIXME: Will go away once privacy support is merged in */ 681 - if (gss_auth->service == RPC_GSS_SVC_PRIVACY) 682 - gss_auth->service = RPC_GSS_SVC_INTEGRITY; 683 INIT_LIST_HEAD(&gss_auth->upcalls); 684 spin_lock_init(&gss_auth->lock); 685 auth = &gss_auth->rpc_auth; ··· 688 auth->au_flavor = flavor; 689 atomic_set(&auth->au_count, 1); 690 691 - if (rpcauth_init_credcache(auth, GSS_CRED_EXPIRE) < 0) 692 goto err_put_mech; 693 694 snprintf(gss_auth->path, sizeof(gss_auth->path), "%s/%s", 695 clnt->cl_pathname, 696 gss_auth->mech->gm_name); 697 gss_auth->dentry = rpc_mkpipe(gss_auth->path, clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); 698 - if (IS_ERR(gss_auth->dentry)) 699 goto err_put_mech; 700 701 return auth; 702 err_put_mech: ··· 708 kfree(gss_auth); 709 out_dec: 710 module_put(THIS_MODULE); 711 - return NULL; 712 } 713 714 static void
··· 660 { 661 struct gss_auth *gss_auth; 662 struct rpc_auth * auth; 663 + int err = -ENOMEM; /* XXX? */ 664 665 dprintk("RPC: creating GSS authenticator for client %p\n",clnt); 666 667 if (!try_module_get(THIS_MODULE)) 668 + return ERR_PTR(err); 669 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL))) 670 goto out_dec; 671 gss_auth->client = clnt; 672 + err = -EINVAL; 673 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); 674 if (!gss_auth->mech) { 675 printk(KERN_WARNING "%s: Pseudoflavor %d not found!", ··· 675 goto err_free; 676 } 677 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 678 + if (gss_auth->service == 0) 679 + goto err_put_mech; 680 INIT_LIST_HEAD(&gss_auth->upcalls); 681 spin_lock_init(&gss_auth->lock); 682 auth = &gss_auth->rpc_auth; ··· 687 auth->au_flavor = flavor; 688 atomic_set(&auth->au_count, 1); 689 690 + err = rpcauth_init_credcache(auth, GSS_CRED_EXPIRE); 691 + if (err) 692 goto err_put_mech; 693 694 snprintf(gss_auth->path, sizeof(gss_auth->path), "%s/%s", 695 clnt->cl_pathname, 696 gss_auth->mech->gm_name); 697 gss_auth->dentry = rpc_mkpipe(gss_auth->path, clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); 698 + if (IS_ERR(gss_auth->dentry)) { 699 + err = PTR_ERR(gss_auth->dentry); 700 goto err_put_mech; 701 + } 702 703 return auth; 704 err_put_mech: ··· 704 kfree(gss_auth); 705 out_dec: 706 module_put(THIS_MODULE); 707 + return ERR_PTR(err); 708 } 709 710 static void
+157 -48
net/sunrpc/clnt.c
··· 97 * made to sleep too long. 98 */ 99 struct rpc_clnt * 100 - rpc_create_client(struct rpc_xprt *xprt, char *servname, 101 struct rpc_program *program, u32 vers, 102 rpc_authflavor_t flavor) 103 { 104 struct rpc_version *version; 105 struct rpc_clnt *clnt = NULL; 106 int err; 107 int len; 108 ··· 158 if (err < 0) 159 goto out_no_path; 160 161 - err = -ENOMEM; 162 - if (!rpcauth_create(flavor, clnt)) { 163 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 164 flavor); 165 goto out_no_auth; 166 } 167 ··· 180 kfree(clnt->cl_server); 181 kfree(clnt); 182 out_err: 183 return ERR_PTR(err); 184 } 185 ··· 241 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 242 if (new->cl_auth) 243 atomic_inc(&new->cl_auth->au_count); 244 return new; 245 out_no_clnt: 246 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); ··· 331 rpc_destroy_client(clnt); 332 } 333 334 /* 335 * Default callback for async RPC calls 336 */ ··· 378 } 379 380 /* 381 - * Export the signal mask handling for aysnchronous code that 382 * sleeps on RPC calls 383 */ 384 385 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 386 { 387 - unsigned long sigallow = sigmask(SIGKILL); 388 - unsigned long irqflags; 389 - 390 - /* Turn off various signals */ 391 - if (clnt->cl_intr) { 392 - struct k_sigaction *action = current->sighand->action; 393 - if (action[SIGINT-1].sa.sa_handler == SIG_DFL) 394 - sigallow |= sigmask(SIGINT); 395 - if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL) 396 - sigallow |= sigmask(SIGQUIT); 397 - } 398 - spin_lock_irqsave(&current->sighand->siglock, irqflags); 399 - *oldset = current->blocked; 400 - siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]); 401 - recalc_sigpending(); 402 - spin_unlock_irqrestore(&current->sighand->siglock, irqflags); 403 } 404 405 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 406 { 407 - unsigned long irqflags; 408 - 409 - spin_lock_irqsave(&current->sighand->siglock, irqflags); 410 - current->blocked = *oldset; 411 - recalc_sigpending(); 412 - spin_unlock_irqrestore(&current->sighand->siglock, irqflags); 413 } 414 415 /* ··· 430 431 BUG_ON(flags & RPC_TASK_ASYNC); 432 433 - rpc_clnt_sigmask(clnt, &oldset); 434 - 435 status = -ENOMEM; 436 task = rpc_new_task(clnt, NULL, flags); 437 if (task == NULL) 438 goto out; 439 440 rpc_call_setup(task, msg, 0); 441 442 /* Set up the call info struct and execute the task */ 443 - if (task->tk_status == 0) 444 status = rpc_execute(task); 445 - else { 446 status = task->tk_status; 447 rpc_release_task(task); 448 } 449 450 out: 451 - rpc_clnt_sigunmask(clnt, &oldset); 452 - 453 return status; 454 } 455 ··· 470 471 flags |= RPC_TASK_ASYNC; 472 473 - rpc_clnt_sigmask(clnt, &oldset); 474 - 475 /* Create/initialize a new RPC task */ 476 if (!callback) 477 callback = rpc_default_callback; ··· 477 if (!(task = rpc_new_task(clnt, callback, flags))) 478 goto out; 479 task->tk_calldata = data; 480 481 rpc_call_setup(task, msg, 0); 482 ··· 490 else 491 rpc_release_task(task); 492 493 out: 494 - rpc_clnt_sigunmask(clnt, &oldset); 495 - 496 return status; 497 } 498 ··· 669 return; 670 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 671 672 - if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) { 673 xprt_release(task); 674 task->tk_action = call_reserve; 675 rpc_delay(task, HZ>>4); ··· 1033 *p++ = htonl(clnt->cl_prog); /* program number */ 1034 *p++ = htonl(clnt->cl_vers); /* program version */ 1035 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1036 - return rpcauth_marshcred(task, p); 1037 } 1038 1039 /* ··· 1064 case RPC_AUTH_ERROR: 1065 break; 1066 case RPC_MISMATCH: 1067 - printk(KERN_WARNING "%s: RPC call version mismatch!\n", __FUNCTION__); 1068 - goto out_eio; 1069 default: 1070 - printk(KERN_WARNING "%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); 1071 goto out_eio; 1072 } 1073 if (--len < 0) ··· 1119 case RPC_SUCCESS: 1120 return p; 1121 case RPC_PROG_UNAVAIL: 1122 - printk(KERN_WARNING "RPC: call_verify: program %u is unsupported by server %s\n", 1123 (unsigned int)task->tk_client->cl_prog, 1124 task->tk_client->cl_server); 1125 - goto out_eio; 1126 case RPC_PROG_MISMATCH: 1127 - printk(KERN_WARNING "RPC: call_verify: program %u, version %u unsupported by server %s\n", 1128 (unsigned int)task->tk_client->cl_prog, 1129 (unsigned int)task->tk_client->cl_vers, 1130 task->tk_client->cl_server); 1131 - goto out_eio; 1132 case RPC_PROC_UNAVAIL: 1133 - printk(KERN_WARNING "RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", 1134 task->tk_msg.rpc_proc, 1135 task->tk_client->cl_prog, 1136 task->tk_client->cl_vers, 1137 task->tk_client->cl_server); 1138 - goto out_eio; 1139 case RPC_GARBAGE_ARGS: 1140 dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); 1141 break; /* retry */ ··· 1151 task->tk_client->cl_stats->rpcgarbage++; 1152 if (task->tk_garb_retry) { 1153 task->tk_garb_retry--; 1154 - dprintk(KERN_WARNING "RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); 1155 task->tk_action = call_bind; 1156 return NULL; 1157 } ··· 1164 out_overflow: 1165 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); 1166 goto out_retry; 1167 }
··· 97 * made to sleep too long. 98 */ 99 struct rpc_clnt * 100 + rpc_new_client(struct rpc_xprt *xprt, char *servname, 101 struct rpc_program *program, u32 vers, 102 rpc_authflavor_t flavor) 103 { 104 struct rpc_version *version; 105 struct rpc_clnt *clnt = NULL; 106 + struct rpc_auth *auth; 107 int err; 108 int len; 109 ··· 157 if (err < 0) 158 goto out_no_path; 159 160 + auth = rpcauth_create(flavor, clnt); 161 + if (IS_ERR(auth)) { 162 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 163 flavor); 164 + err = PTR_ERR(auth); 165 goto out_no_auth; 166 } 167 ··· 178 kfree(clnt->cl_server); 179 kfree(clnt); 180 out_err: 181 + xprt_destroy(xprt); 182 + return ERR_PTR(err); 183 + } 184 + 185 + /** 186 + * Create an RPC client 187 + * @xprt - pointer to xprt struct 188 + * @servname - name of server 189 + * @info - rpc_program 190 + * @version - rpc_program version 191 + * @authflavor - rpc_auth flavour to use 192 + * 193 + * Creates an RPC client structure, then pings the server in order to 194 + * determine if it is up, and if it supports this program and version. 195 + * 196 + * This function should never be called by asynchronous tasks such as 197 + * the portmapper. 198 + */ 199 + struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, 200 + struct rpc_program *info, u32 version, rpc_authflavor_t authflavor) 201 + { 202 + struct rpc_clnt *clnt; 203 + int err; 204 + 205 + clnt = rpc_new_client(xprt, servname, info, version, authflavor); 206 + if (IS_ERR(clnt)) 207 + return clnt; 208 + err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 209 + if (err == 0) 210 + return clnt; 211 + rpc_shutdown_client(clnt); 212 return ERR_PTR(err); 213 } 214 ··· 208 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 209 if (new->cl_auth) 210 atomic_inc(&new->cl_auth->au_count); 211 + new->cl_pmap = &new->cl_pmap_default; 212 + rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 213 return new; 214 out_no_clnt: 215 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); ··· 296 rpc_destroy_client(clnt); 297 } 298 299 + /** 300 + * rpc_bind_new_program - bind a new RPC program to an existing client 301 + * @old - old rpc_client 302 + * @program - rpc program to set 303 + * @vers - rpc program version 304 + * 305 + * Clones the rpc client and sets up a new RPC program. This is mainly 306 + * of use for enabling different RPC programs to share the same transport. 307 + * The Sun NFSv2/v3 ACL protocol can do this. 308 + */ 309 + struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 310 + struct rpc_program *program, 311 + int vers) 312 + { 313 + struct rpc_clnt *clnt; 314 + struct rpc_version *version; 315 + int err; 316 + 317 + BUG_ON(vers >= program->nrvers || !program->version[vers]); 318 + version = program->version[vers]; 319 + clnt = rpc_clone_client(old); 320 + if (IS_ERR(clnt)) 321 + goto out; 322 + clnt->cl_procinfo = version->procs; 323 + clnt->cl_maxproc = version->nrprocs; 324 + clnt->cl_protname = program->name; 325 + clnt->cl_prog = program->number; 326 + clnt->cl_vers = version->number; 327 + clnt->cl_stats = program->stats; 328 + err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 329 + if (err != 0) { 330 + rpc_shutdown_client(clnt); 331 + clnt = ERR_PTR(err); 332 + } 333 + out: 334 + return clnt; 335 + } 336 + 337 /* 338 * Default callback for async RPC calls 339 */ ··· 305 } 306 307 /* 308 + * Export the signal mask handling for synchronous code that 309 * sleeps on RPC calls 310 */ 311 + #define RPC_INTR_SIGNALS (sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGKILL)) 312 313 + static void rpc_save_sigmask(sigset_t *oldset, int intr) 314 + { 315 + unsigned long sigallow = 0; 316 + sigset_t sigmask; 317 + 318 + /* Block all signals except those listed in sigallow */ 319 + if (intr) 320 + sigallow |= RPC_INTR_SIGNALS; 321 + siginitsetinv(&sigmask, sigallow); 322 + sigprocmask(SIG_BLOCK, &sigmask, oldset); 323 + } 324 + 325 + static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) 326 + { 327 + rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); 328 + } 329 + 330 + static inline void rpc_restore_sigmask(sigset_t *oldset) 331 + { 332 + sigprocmask(SIG_SETMASK, oldset, NULL); 333 + } 334 + 335 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 336 { 337 + rpc_save_sigmask(oldset, clnt->cl_intr); 338 } 339 340 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 341 { 342 + rpc_restore_sigmask(oldset); 343 } 344 345 /* ··· 354 355 BUG_ON(flags & RPC_TASK_ASYNC); 356 357 status = -ENOMEM; 358 task = rpc_new_task(clnt, NULL, flags); 359 if (task == NULL) 360 goto out; 361 362 + /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ 363 + rpc_task_sigmask(task, &oldset); 364 + 365 rpc_call_setup(task, msg, 0); 366 367 /* Set up the call info struct and execute the task */ 368 + if (task->tk_status == 0) { 369 status = rpc_execute(task); 370 + } else { 371 status = task->tk_status; 372 rpc_release_task(task); 373 } 374 375 + rpc_restore_sigmask(&oldset); 376 out: 377 return status; 378 } 379 ··· 394 395 flags |= RPC_TASK_ASYNC; 396 397 /* Create/initialize a new RPC task */ 398 if (!callback) 399 callback = rpc_default_callback; ··· 403 if (!(task = rpc_new_task(clnt, callback, flags))) 404 goto out; 405 task->tk_calldata = data; 406 + 407 + /* Mask signals on GSS_AUTH upcalls */ 408 + rpc_task_sigmask(task, &oldset); 409 410 rpc_call_setup(task, msg, 0); 411 ··· 413 else 414 rpc_release_task(task); 415 416 + rpc_restore_sigmask(&oldset); 417 out: 418 return status; 419 } 420 ··· 593 return; 594 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 595 596 + if (RPC_IS_ASYNC(task) || !signalled()) { 597 xprt_release(task); 598 task->tk_action = call_reserve; 599 rpc_delay(task, HZ>>4); ··· 957 *p++ = htonl(clnt->cl_prog); /* program number */ 958 *p++ = htonl(clnt->cl_vers); /* program version */ 959 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 960 + p = rpcauth_marshcred(task, p); 961 + req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 962 + return p; 963 } 964 965 /* ··· 986 case RPC_AUTH_ERROR: 987 break; 988 case RPC_MISMATCH: 989 + dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); 990 + error = -EPROTONOSUPPORT; 991 + goto out_err; 992 default: 993 + dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); 994 goto out_eio; 995 } 996 if (--len < 0) ··· 1040 case RPC_SUCCESS: 1041 return p; 1042 case RPC_PROG_UNAVAIL: 1043 + dprintk("RPC: call_verify: program %u is unsupported by server %s\n", 1044 (unsigned int)task->tk_client->cl_prog, 1045 task->tk_client->cl_server); 1046 + error = -EPFNOSUPPORT; 1047 + goto out_err; 1048 case RPC_PROG_MISMATCH: 1049 + dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", 1050 (unsigned int)task->tk_client->cl_prog, 1051 (unsigned int)task->tk_client->cl_vers, 1052 task->tk_client->cl_server); 1053 + error = -EPROTONOSUPPORT; 1054 + goto out_err; 1055 case RPC_PROC_UNAVAIL: 1056 + dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", 1057 task->tk_msg.rpc_proc, 1058 task->tk_client->cl_prog, 1059 task->tk_client->cl_vers, 1060 task->tk_client->cl_server); 1061 + error = -EOPNOTSUPP; 1062 + goto out_err; 1063 case RPC_GARBAGE_ARGS: 1064 dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); 1065 break; /* retry */ ··· 1069 task->tk_client->cl_stats->rpcgarbage++; 1070 if (task->tk_garb_retry) { 1071 task->tk_garb_retry--; 1072 + dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); 1073 task->tk_action = call_bind; 1074 return NULL; 1075 } ··· 1082 out_overflow: 1083 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); 1084 goto out_retry; 1085 + } 1086 + 1087 + static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) 1088 + { 1089 + return 0; 1090 + } 1091 + 1092 + static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) 1093 + { 1094 + return 0; 1095 + } 1096 + 1097 + static struct rpc_procinfo rpcproc_null = { 1098 + .p_encode = rpcproc_encode_null, 1099 + .p_decode = rpcproc_decode_null, 1100 + }; 1101 + 1102 + int rpc_ping(struct rpc_clnt *clnt, int flags) 1103 + { 1104 + struct rpc_message msg = { 1105 + .rpc_proc = &rpcproc_null, 1106 + }; 1107 + int err; 1108 + msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1109 + err = rpc_call_sync(clnt, &msg, flags); 1110 + put_rpccred(msg.rpc_cred); 1111 + return err; 1112 }
+5 -4
net/sunrpc/pmap_clnt.c
··· 53 task->tk_pid, clnt->cl_server, 54 map->pm_prog, map->pm_vers, map->pm_prot); 55 56 spin_lock(&pmap_lock); 57 if (map->pm_binding) { 58 rpc_sleep_on(&map->pm_bindwait, task, NULL, NULL); ··· 210 xprt->addr.sin_port = htons(RPC_PMAP_PORT); 211 212 /* printk("pmap: create clnt\n"); */ 213 - clnt = rpc_create_client(xprt, hostname, 214 &pmap_program, RPC_PMAP_VERSION, 215 RPC_AUTH_UNIX); 216 - if (IS_ERR(clnt)) { 217 - xprt_destroy(xprt); 218 - } else { 219 clnt->cl_softrtry = 1; 220 clnt->cl_chatty = 1; 221 clnt->cl_oneshot = 1;
··· 53 task->tk_pid, clnt->cl_server, 54 map->pm_prog, map->pm_vers, map->pm_prot); 55 56 + /* Autobind on cloned rpc clients is discouraged */ 57 + BUG_ON(clnt->cl_parent != clnt); 58 + 59 spin_lock(&pmap_lock); 60 if (map->pm_binding) { 61 rpc_sleep_on(&map->pm_bindwait, task, NULL, NULL); ··· 207 xprt->addr.sin_port = htons(RPC_PMAP_PORT); 208 209 /* printk("pmap: create clnt\n"); */ 210 + clnt = rpc_new_client(xprt, hostname, 211 &pmap_program, RPC_PMAP_VERSION, 212 RPC_AUTH_UNIX); 213 + if (!IS_ERR(clnt)) { 214 clnt->cl_softrtry = 1; 215 clnt->cl_chatty = 1; 216 clnt->cl_oneshot = 1;
+48 -36
net/sunrpc/sched.c
··· 290 return; 291 } 292 } else 293 - wake_up(&task->u.tk_wait.waitq); 294 } 295 296 /* ··· 555 } 556 557 /* 558 * This is the RPC `scheduler' (or rather, the finite state machine). 559 */ 560 static int __rpc_execute(struct rpc_task *task) ··· 598 599 BUG_ON(RPC_IS_QUEUED(task)); 600 601 - restarted: 602 - while (1) { 603 /* 604 * Garbage collection of pending timers... 605 */ ··· 631 * by someone else. 632 */ 633 if (!RPC_IS_QUEUED(task)) { 634 - if (!task->tk_action) 635 break; 636 - lock_kernel(); 637 - task->tk_action(task); 638 - unlock_kernel(); 639 } 640 641 /* ··· 656 657 /* sync task: sleep here */ 658 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); 659 - if (RPC_TASK_UNINTERRUPTIBLE(task)) { 660 - __wait_event(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task)); 661 - } else { 662 - __wait_event_interruptible(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task), status); 663 /* 664 * When a sync task receives a signal, it exits with 665 * -ERESTARTSYS. In order to catch any callbacks that 666 * clean up after sleeping on some queue, we don't 667 * break the loop here, but go around once more. 668 */ 669 - if (status == -ERESTARTSYS) { 670 - dprintk("RPC: %4d got signal\n", task->tk_pid); 671 - task->tk_flags |= RPC_TASK_KILLED; 672 - rpc_exit(task, -ERESTARTSYS); 673 - rpc_wake_up_task(task); 674 - } 675 } 676 rpc_set_running(task); 677 dprintk("RPC: %4d sync task resuming\n", task->tk_pid); 678 - } 679 - 680 - if (task->tk_exit) { 681 - lock_kernel(); 682 - task->tk_exit(task); 683 - unlock_kernel(); 684 - /* If tk_action is non-null, the user wants us to restart */ 685 - if (task->tk_action) { 686 - if (!RPC_ASSASSINATED(task)) { 687 - /* Release RPC slot and buffer memory */ 688 - if (task->tk_rqstp) 689 - xprt_release(task); 690 - rpc_free(task); 691 - goto restarted; 692 - } 693 - printk(KERN_ERR "RPC: dead task tries to walk away.\n"); 694 - } 695 } 696 697 dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status); ··· 773 774 /* Initialize workqueue for async tasks */ 775 task->tk_workqueue = rpciod_workqueue; 776 - if (!RPC_IS_ASYNC(task)) 777 - init_waitqueue_head(&task->u.tk_wait.waitq); 778 779 if (clnt) { 780 atomic_inc(&clnt->cl_users);
··· 290 return; 291 } 292 } else 293 + wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); 294 } 295 296 /* ··· 555 } 556 557 /* 558 + * Helper that calls task->tk_exit if it exists and then returns 559 + * true if we should exit __rpc_execute. 560 + */ 561 + static inline int __rpc_do_exit(struct rpc_task *task) 562 + { 563 + if (task->tk_exit != NULL) { 564 + lock_kernel(); 565 + task->tk_exit(task); 566 + unlock_kernel(); 567 + /* If tk_action is non-null, we should restart the call */ 568 + if (task->tk_action != NULL) { 569 + if (!RPC_ASSASSINATED(task)) { 570 + /* Release RPC slot and buffer memory */ 571 + xprt_release(task); 572 + rpc_free(task); 573 + return 0; 574 + } 575 + printk(KERN_ERR "RPC: dead task tried to walk away.\n"); 576 + } 577 + } 578 + return 1; 579 + } 580 + 581 + static int rpc_wait_bit_interruptible(void *word) 582 + { 583 + if (signal_pending(current)) 584 + return -ERESTARTSYS; 585 + schedule(); 586 + return 0; 587 + } 588 + 589 + /* 590 * This is the RPC `scheduler' (or rather, the finite state machine). 591 */ 592 static int __rpc_execute(struct rpc_task *task) ··· 566 567 BUG_ON(RPC_IS_QUEUED(task)); 568 569 + for (;;) { 570 /* 571 * Garbage collection of pending timers... 572 */ ··· 600 * by someone else. 601 */ 602 if (!RPC_IS_QUEUED(task)) { 603 + if (task->tk_action != NULL) { 604 + lock_kernel(); 605 + task->tk_action(task); 606 + unlock_kernel(); 607 + } else if (__rpc_do_exit(task)) 608 break; 609 } 610 611 /* ··· 624 625 /* sync task: sleep here */ 626 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); 627 + /* Note: Caller should be using rpc_clnt_sigmask() */ 628 + status = out_of_line_wait_on_bit(&task->tk_runstate, 629 + RPC_TASK_QUEUED, rpc_wait_bit_interruptible, 630 + TASK_INTERRUPTIBLE); 631 + if (status == -ERESTARTSYS) { 632 /* 633 * When a sync task receives a signal, it exits with 634 * -ERESTARTSYS. In order to catch any callbacks that 635 * clean up after sleeping on some queue, we don't 636 * break the loop here, but go around once more. 637 */ 638 + dprintk("RPC: %4d got signal\n", task->tk_pid); 639 + task->tk_flags |= RPC_TASK_KILLED; 640 + rpc_exit(task, -ERESTARTSYS); 641 + rpc_wake_up_task(task); 642 } 643 rpc_set_running(task); 644 dprintk("RPC: %4d sync task resuming\n", task->tk_pid); 645 } 646 647 dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status); ··· 759 760 /* Initialize workqueue for async tasks */ 761 task->tk_workqueue = rpciod_workqueue; 762 763 if (clnt) { 764 atomic_inc(&clnt->cl_users);
+5 -1
net/sunrpc/sunrpc_syms.c
··· 42 /* RPC client functions */ 43 EXPORT_SYMBOL(rpc_create_client); 44 EXPORT_SYMBOL(rpc_clone_client); 45 EXPORT_SYMBOL(rpc_destroy_client); 46 EXPORT_SYMBOL(rpc_shutdown_client); 47 EXPORT_SYMBOL(rpc_release_client); ··· 62 63 /* Client transport */ 64 EXPORT_SYMBOL(xprt_create_proto); 65 - EXPORT_SYMBOL(xprt_destroy); 66 EXPORT_SYMBOL(xprt_set_timeout); 67 EXPORT_SYMBOL(xprt_udp_slot_table_entries); 68 EXPORT_SYMBOL(xprt_tcp_slot_table_entries); ··· 129 EXPORT_SYMBOL(xdr_encode_pages); 130 EXPORT_SYMBOL(xdr_inline_pages); 131 EXPORT_SYMBOL(xdr_shift_buf); 132 EXPORT_SYMBOL(xdr_buf_from_iov); 133 EXPORT_SYMBOL(xdr_buf_subsegment); 134 EXPORT_SYMBOL(xdr_buf_read_netobj);
··· 42 /* RPC client functions */ 43 EXPORT_SYMBOL(rpc_create_client); 44 EXPORT_SYMBOL(rpc_clone_client); 45 + EXPORT_SYMBOL(rpc_bind_new_program); 46 EXPORT_SYMBOL(rpc_destroy_client); 47 EXPORT_SYMBOL(rpc_shutdown_client); 48 EXPORT_SYMBOL(rpc_release_client); ··· 61 62 /* Client transport */ 63 EXPORT_SYMBOL(xprt_create_proto); 64 EXPORT_SYMBOL(xprt_set_timeout); 65 EXPORT_SYMBOL(xprt_udp_slot_table_entries); 66 EXPORT_SYMBOL(xprt_tcp_slot_table_entries); ··· 129 EXPORT_SYMBOL(xdr_encode_pages); 130 EXPORT_SYMBOL(xdr_inline_pages); 131 EXPORT_SYMBOL(xdr_shift_buf); 132 + EXPORT_SYMBOL(xdr_encode_word); 133 + EXPORT_SYMBOL(xdr_decode_word); 134 + EXPORT_SYMBOL(xdr_encode_array2); 135 + EXPORT_SYMBOL(xdr_decode_array2); 136 EXPORT_SYMBOL(xdr_buf_from_iov); 137 EXPORT_SYMBOL(xdr_buf_subsegment); 138 EXPORT_SYMBOL(xdr_buf_read_netobj);
+19 -17
net/sunrpc/svc.c
··· 35 if (!(serv = (struct svc_serv *) kmalloc(sizeof(*serv), GFP_KERNEL))) 36 return NULL; 37 memset(serv, 0, sizeof(*serv)); 38 serv->sv_program = prog; 39 serv->sv_nrthreads = 1; 40 serv->sv_stats = prog->pg_stats; 41 serv->sv_bufsz = bufsize? bufsize : 4096; 42 - prog->pg_lovers = prog->pg_nvers-1; 43 xdrsize = 0; 44 - for (vers=0; vers<prog->pg_nvers ; vers++) 45 - if (prog->pg_vers[vers]) { 46 - prog->pg_hivers = vers; 47 - if (prog->pg_lovers > vers) 48 - prog->pg_lovers = vers; 49 - if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) 50 - xdrsize = prog->pg_vers[vers]->vs_xdrsize; 51 - } 52 serv->sv_xdrsize = xdrsize; 53 INIT_LIST_HEAD(&serv->sv_threads); 54 INIT_LIST_HEAD(&serv->sv_sockets); 55 INIT_LIST_HEAD(&serv->sv_tempsocks); 56 INIT_LIST_HEAD(&serv->sv_permsocks); 57 spin_lock_init(&serv->sv_lock); 58 - 59 - serv->sv_name = prog->pg_name; 60 61 /* Remove any stale portmap registrations */ 62 svc_register(serv, 0, 0); ··· 283 rqstp->rq_res.len = 0; 284 rqstp->rq_res.page_base = 0; 285 rqstp->rq_res.page_len = 0; 286 rqstp->rq_res.tail[0].iov_len = 0; 287 /* tcp needs a space for the record length... */ 288 if (rqstp->rq_prot == IPPROTO_TCP) ··· 341 goto sendit; 342 } 343 344 - if (prog != progp->pg_prog) 345 goto err_bad_prog; 346 347 if (vers >= progp->pg_nvers || ··· 457 goto sendit; 458 459 err_bad_prog: 460 - #ifdef RPC_PARANOIA 461 - if (prog != 100227 || progp->pg_prog != 100003) 462 - printk("svc: unknown program %d (me %d)\n", prog, progp->pg_prog); 463 - /* else it is just a Solaris client seeing if ACLs are supported */ 464 - #endif 465 serv->sv_stats->rpcbadfmt++; 466 svc_putu32(resv, rpc_prog_unavail); 467 goto sendit;
··· 35 if (!(serv = (struct svc_serv *) kmalloc(sizeof(*serv), GFP_KERNEL))) 36 return NULL; 37 memset(serv, 0, sizeof(*serv)); 38 + serv->sv_name = prog->pg_name; 39 serv->sv_program = prog; 40 serv->sv_nrthreads = 1; 41 serv->sv_stats = prog->pg_stats; 42 serv->sv_bufsz = bufsize? bufsize : 4096; 43 xdrsize = 0; 44 + while (prog) { 45 + prog->pg_lovers = prog->pg_nvers-1; 46 + for (vers=0; vers<prog->pg_nvers ; vers++) 47 + if (prog->pg_vers[vers]) { 48 + prog->pg_hivers = vers; 49 + if (prog->pg_lovers > vers) 50 + prog->pg_lovers = vers; 51 + if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) 52 + xdrsize = prog->pg_vers[vers]->vs_xdrsize; 53 + } 54 + prog = prog->pg_next; 55 + } 56 serv->sv_xdrsize = xdrsize; 57 INIT_LIST_HEAD(&serv->sv_threads); 58 INIT_LIST_HEAD(&serv->sv_sockets); 59 INIT_LIST_HEAD(&serv->sv_tempsocks); 60 INIT_LIST_HEAD(&serv->sv_permsocks); 61 spin_lock_init(&serv->sv_lock); 62 63 /* Remove any stale portmap registrations */ 64 svc_register(serv, 0, 0); ··· 281 rqstp->rq_res.len = 0; 282 rqstp->rq_res.page_base = 0; 283 rqstp->rq_res.page_len = 0; 284 + rqstp->rq_res.buflen = PAGE_SIZE; 285 rqstp->rq_res.tail[0].iov_len = 0; 286 /* tcp needs a space for the record length... */ 287 if (rqstp->rq_prot == IPPROTO_TCP) ··· 338 goto sendit; 339 } 340 341 + for (progp = serv->sv_program; progp; progp = progp->pg_next) 342 + if (prog == progp->pg_prog) 343 + break; 344 + if (progp == NULL) 345 goto err_bad_prog; 346 347 if (vers >= progp->pg_nvers || ··· 451 goto sendit; 452 453 err_bad_prog: 454 + dprintk("svc: unknown program %d\n", prog); 455 serv->sv_stats->rpcbadfmt++; 456 svc_putu32(resv, rpc_prog_unavail); 457 goto sendit;
+288 -10
net/sunrpc/xdr.c
··· 176 xdr->buflen += len; 177 } 178 179 - void 180 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, 181 skb_reader_t *desc, 182 skb_read_actor_t copy_actor) 183 { 184 struct page **ppage = xdr->pages; 185 unsigned int len, pglen = xdr->page_len; 186 int ret; 187 188 len = xdr->head[0].iov_len; 189 if (base < len) { 190 len -= base; 191 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); 192 if (ret != len || !desc->count) 193 - return; 194 base = 0; 195 } else 196 base -= len; ··· 212 do { 213 char *kaddr; 214 215 len = PAGE_CACHE_SIZE; 216 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); 217 if (base) { ··· 238 } 239 flush_dcache_page(*ppage); 240 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); 241 if (ret != len || !desc->count) 242 - return; 243 ppage++; 244 } while ((pglen -= len) != 0); 245 copy_tail: 246 len = xdr->tail[0].iov_len; 247 if (base < len) 248 - copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); 249 } 250 251 ··· 632 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) 633 { 634 struct kvec *iov = buf->head; 635 636 xdr->buf = buf; 637 xdr->iov = iov; 638 - xdr->end = (uint32_t *)((char *)iov->iov_base + iov->iov_len); 639 - buf->len = iov->iov_len = (char *)p - (char *)iov->iov_base; 640 - xdr->p = p; 641 } 642 EXPORT_SYMBOL(xdr_init_encode); 643 ··· 887 return status; 888 } 889 890 - static int 891 - read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj) 892 { 893 u32 raw; 894 int status; ··· 924 return status; 925 *obj = ntohl(raw); 926 return 0; 927 } 928 929 /* If the netobj starting offset bytes from the start of xdr_buf is contained ··· 944 u32 tail_offset = buf->head[0].iov_len + buf->page_len; 945 u32 obj_end_offset; 946 947 - if (read_u32_from_xdr_buf(buf, offset, &obj->len)) 948 goto out; 949 obj_end_offset = offset + 4 + obj->len; 950 ··· 976 return 0; 977 out: 978 return -1; 979 }
··· 176 xdr->buflen += len; 177 } 178 179 + ssize_t 180 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, 181 skb_reader_t *desc, 182 skb_read_actor_t copy_actor) 183 { 184 struct page **ppage = xdr->pages; 185 unsigned int len, pglen = xdr->page_len; 186 + ssize_t copied = 0; 187 int ret; 188 189 len = xdr->head[0].iov_len; 190 if (base < len) { 191 len -= base; 192 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); 193 + copied += ret; 194 if (ret != len || !desc->count) 195 + goto out; 196 base = 0; 197 } else 198 base -= len; ··· 210 do { 211 char *kaddr; 212 213 + /* ACL likes to be lazy in allocating pages - ACLs 214 + * are small by default but can get huge. */ 215 + if (unlikely(*ppage == NULL)) { 216 + *ppage = alloc_page(GFP_ATOMIC); 217 + if (unlikely(*ppage == NULL)) { 218 + if (copied == 0) 219 + copied = -ENOMEM; 220 + goto out; 221 + } 222 + } 223 + 224 len = PAGE_CACHE_SIZE; 225 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); 226 if (base) { ··· 225 } 226 flush_dcache_page(*ppage); 227 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); 228 + copied += ret; 229 if (ret != len || !desc->count) 230 + goto out; 231 ppage++; 232 } while ((pglen -= len) != 0); 233 copy_tail: 234 len = xdr->tail[0].iov_len; 235 if (base < len) 236 + copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); 237 + out: 238 + return copied; 239 } 240 241 ··· 616 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) 617 { 618 struct kvec *iov = buf->head; 619 + int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 620 621 + BUG_ON(scratch_len < 0); 622 xdr->buf = buf; 623 xdr->iov = iov; 624 + xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len); 625 + xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len); 626 + BUG_ON(iov->iov_len > scratch_len); 627 + 628 + if (p != xdr->p && p != NULL) { 629 + size_t len; 630 + 631 + BUG_ON(p < xdr->p || p > xdr->end); 632 + len = (char *)p - (char *)xdr->p; 633 + xdr->p = p; 634 + buf->len += len; 635 + iov->iov_len += len; 636 + } 637 } 638 EXPORT_SYMBOL(xdr_init_encode); 639 ··· 859 return status; 860 } 861 862 + /* obj is assumed to point to allocated memory of size at least len: */ 863 + int 864 + write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) 865 + { 866 + struct xdr_buf subbuf; 867 + int this_len; 868 + int status; 869 + 870 + status = xdr_buf_subsegment(buf, &subbuf, base, len); 871 + if (status) 872 + goto out; 873 + this_len = min(len, (int)subbuf.head[0].iov_len); 874 + memcpy(subbuf.head[0].iov_base, obj, this_len); 875 + len -= this_len; 876 + obj += this_len; 877 + this_len = min(len, (int)subbuf.page_len); 878 + if (this_len) 879 + _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len); 880 + len -= this_len; 881 + obj += this_len; 882 + this_len = min(len, (int)subbuf.tail[0].iov_len); 883 + memcpy(subbuf.tail[0].iov_base, obj, this_len); 884 + out: 885 + return status; 886 + } 887 + 888 + int 889 + xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) 890 { 891 u32 raw; 892 int status; ··· 870 return status; 871 *obj = ntohl(raw); 872 return 0; 873 + } 874 + 875 + int 876 + xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) 877 + { 878 + u32 raw = htonl(obj); 879 + 880 + return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 881 } 882 883 /* If the netobj starting offset bytes from the start of xdr_buf is contained ··· 882 u32 tail_offset = buf->head[0].iov_len + buf->page_len; 883 u32 obj_end_offset; 884 885 + if (xdr_decode_word(buf, offset, &obj->len)) 886 goto out; 887 obj_end_offset = offset + 4 + obj->len; 888 ··· 914 return 0; 915 out: 916 return -1; 917 + } 918 + 919 + /* Returns 0 on success, or else a negative error code. */ 920 + static int 921 + xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, 922 + struct xdr_array2_desc *desc, int encode) 923 + { 924 + char *elem = NULL, *c; 925 + unsigned int copied = 0, todo, avail_here; 926 + struct page **ppages = NULL; 927 + int err; 928 + 929 + if (encode) { 930 + if (xdr_encode_word(buf, base, desc->array_len) != 0) 931 + return -EINVAL; 932 + } else { 933 + if (xdr_decode_word(buf, base, &desc->array_len) != 0 || 934 + (unsigned long) base + 4 + desc->array_len * 935 + desc->elem_size > buf->len) 936 + return -EINVAL; 937 + } 938 + base += 4; 939 + 940 + if (!desc->xcode) 941 + return 0; 942 + 943 + todo = desc->array_len * desc->elem_size; 944 + 945 + /* process head */ 946 + if (todo && base < buf->head->iov_len) { 947 + c = buf->head->iov_base + base; 948 + avail_here = min_t(unsigned int, todo, 949 + buf->head->iov_len - base); 950 + todo -= avail_here; 951 + 952 + while (avail_here >= desc->elem_size) { 953 + err = desc->xcode(desc, c); 954 + if (err) 955 + goto out; 956 + c += desc->elem_size; 957 + avail_here -= desc->elem_size; 958 + } 959 + if (avail_here) { 960 + if (!elem) { 961 + elem = kmalloc(desc->elem_size, GFP_KERNEL); 962 + err = -ENOMEM; 963 + if (!elem) 964 + goto out; 965 + } 966 + if (encode) { 967 + err = desc->xcode(desc, elem); 968 + if (err) 969 + goto out; 970 + memcpy(c, elem, avail_here); 971 + } else 972 + memcpy(elem, c, avail_here); 973 + copied = avail_here; 974 + } 975 + base = buf->head->iov_len; /* align to start of pages */ 976 + } 977 + 978 + /* process pages array */ 979 + base -= buf->head->iov_len; 980 + if (todo && base < buf->page_len) { 981 + unsigned int avail_page; 982 + 983 + avail_here = min(todo, buf->page_len - base); 984 + todo -= avail_here; 985 + 986 + base += buf->page_base; 987 + ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); 988 + base &= ~PAGE_CACHE_MASK; 989 + avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, 990 + avail_here); 991 + c = kmap(*ppages) + base; 992 + 993 + while (avail_here) { 994 + avail_here -= avail_page; 995 + if (copied || avail_page < desc->elem_size) { 996 + unsigned int l = min(avail_page, 997 + desc->elem_size - copied); 998 + if (!elem) { 999 + elem = kmalloc(desc->elem_size, 1000 + GFP_KERNEL); 1001 + err = -ENOMEM; 1002 + if (!elem) 1003 + goto out; 1004 + } 1005 + if (encode) { 1006 + if (!copied) { 1007 + err = desc->xcode(desc, elem); 1008 + if (err) 1009 + goto out; 1010 + } 1011 + memcpy(c, elem + copied, l); 1012 + copied += l; 1013 + if (copied == desc->elem_size) 1014 + copied = 0; 1015 + } else { 1016 + memcpy(elem + copied, c, l); 1017 + copied += l; 1018 + if (copied == desc->elem_size) { 1019 + err = desc->xcode(desc, elem); 1020 + if (err) 1021 + goto out; 1022 + copied = 0; 1023 + } 1024 + } 1025 + avail_page -= l; 1026 + c += l; 1027 + } 1028 + while (avail_page >= desc->elem_size) { 1029 + err = desc->xcode(desc, c); 1030 + if (err) 1031 + goto out; 1032 + c += desc->elem_size; 1033 + avail_page -= desc->elem_size; 1034 + } 1035 + if (avail_page) { 1036 + unsigned int l = min(avail_page, 1037 + desc->elem_size - copied); 1038 + if (!elem) { 1039 + elem = kmalloc(desc->elem_size, 1040 + GFP_KERNEL); 1041 + err = -ENOMEM; 1042 + if (!elem) 1043 + goto out; 1044 + } 1045 + if (encode) { 1046 + if (!copied) { 1047 + err = desc->xcode(desc, elem); 1048 + if (err) 1049 + goto out; 1050 + } 1051 + memcpy(c, elem + copied, l); 1052 + copied += l; 1053 + if (copied == desc->elem_size) 1054 + copied = 0; 1055 + } else { 1056 + memcpy(elem + copied, c, l); 1057 + copied += l; 1058 + if (copied == desc->elem_size) { 1059 + err = desc->xcode(desc, elem); 1060 + if (err) 1061 + goto out; 1062 + copied = 0; 1063 + } 1064 + } 1065 + } 1066 + if (avail_here) { 1067 + kunmap(*ppages); 1068 + ppages++; 1069 + c = kmap(*ppages); 1070 + } 1071 + 1072 + avail_page = min(avail_here, 1073 + (unsigned int) PAGE_CACHE_SIZE); 1074 + } 1075 + base = buf->page_len; /* align to start of tail */ 1076 + } 1077 + 1078 + /* process tail */ 1079 + base -= buf->page_len; 1080 + if (todo) { 1081 + c = buf->tail->iov_base + base; 1082 + if (copied) { 1083 + unsigned int l = desc->elem_size - copied; 1084 + 1085 + if (encode) 1086 + memcpy(c, elem + copied, l); 1087 + else { 1088 + memcpy(elem + copied, c, l); 1089 + err = desc->xcode(desc, elem); 1090 + if (err) 1091 + goto out; 1092 + } 1093 + todo -= l; 1094 + c += l; 1095 + } 1096 + while (todo) { 1097 + err = desc->xcode(desc, c); 1098 + if (err) 1099 + goto out; 1100 + c += desc->elem_size; 1101 + todo -= desc->elem_size; 1102 + } 1103 + } 1104 + err = 0; 1105 + 1106 + out: 1107 + if (elem) 1108 + kfree(elem); 1109 + if (ppages) 1110 + kunmap(*ppages); 1111 + return err; 1112 + } 1113 + 1114 + int 1115 + xdr_decode_array2(struct xdr_buf *buf, unsigned int base, 1116 + struct xdr_array2_desc *desc) 1117 + { 1118 + if (base >= buf->len) 1119 + return -EINVAL; 1120 + 1121 + return xdr_xcode_array2(buf, base, desc, 0); 1122 + } 1123 + 1124 + int 1125 + xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 1126 + struct xdr_array2_desc *desc) 1127 + { 1128 + if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > 1129 + buf->head->iov_len + buf->page_len + buf->tail->iov_len) 1130 + return -EINVAL; 1131 + 1132 + return xdr_xcode_array2(buf, base, desc, 1); 1133 }
+57 -14
net/sunrpc/xprt.c
··· 569 if (xprt->sock != NULL) 570 schedule_delayed_work(&xprt->sock_connect, 571 RPC_REESTABLISH_TIMEOUT); 572 - else 573 schedule_work(&xprt->sock_connect); 574 } 575 return; 576 out_write: ··· 728 goto no_checksum; 729 730 desc.csum = csum_partial(skb->data, desc.offset, skb->csum); 731 - xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits); 732 if (desc.offset != skb->len) { 733 unsigned int csum2; 734 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); ··· 741 return -1; 742 return 0; 743 no_checksum: 744 - xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits); 745 if (desc.count) 746 return -1; 747 return 0; ··· 826 { 827 if (len > desc->count) 828 len = desc->count; 829 - if (skb_copy_bits(desc->skb, desc->offset, p, len)) 830 return 0; 831 desc->offset += len; 832 desc->count -= len; 833 return len; 834 } 835 ··· 873 static void 874 tcp_check_recm(struct rpc_xprt *xprt) 875 { 876 if (xprt->tcp_offset == xprt->tcp_reclen) { 877 xprt->tcp_flags |= XPRT_COPY_RECM; 878 xprt->tcp_offset = 0; ··· 919 struct rpc_rqst *req; 920 struct xdr_buf *rcvbuf; 921 size_t len; 922 923 /* Find and lock the request corresponding to this xid */ 924 spin_lock(&xprt->sock_lock); ··· 940 len = xprt->tcp_reclen - xprt->tcp_offset; 941 memcpy(&my_desc, desc, sizeof(my_desc)); 942 my_desc.count = len; 943 - xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, 944 &my_desc, tcp_copy_data); 945 - desc->count -= len; 946 - desc->offset += len; 947 } else 948 - xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, 949 desc, tcp_copy_data); 950 - xprt->tcp_copied += len; 951 - xprt->tcp_offset += len; 952 953 if (xprt->tcp_copied == req->rq_private_buf.buflen) 954 xprt->tcp_flags &= ~XPRT_COPY_DATA; ··· 982 xprt->tcp_flags &= ~XPRT_COPY_DATA; 983 } 984 985 if (!(xprt->tcp_flags & XPRT_COPY_DATA)) { 986 dprintk("RPC: %4d received reply complete\n", 987 req->rq_task->tk_pid); ··· 1006 desc->count -= len; 1007 desc->offset += len; 1008 xprt->tcp_offset += len; 1009 tcp_check_recm(xprt); 1010 } 1011 ··· 1104 case TCP_SYN_RECV: 1105 break; 1106 default: 1107 - if (xprt_test_and_clear_connected(xprt)) 1108 - rpc_wake_up_status(&xprt->pending, -ENOTCONN); 1109 break; 1110 } 1111 out: ··· 1242 list_add_tail(&req->rq_list, &xprt->recv); 1243 spin_unlock_bh(&xprt->sock_lock); 1244 xprt_reset_majortimeo(req); 1245 } 1246 } else if (!req->rq_bytes_sent) 1247 return; ··· 1374 spin_lock(&xprt->xprt_lock); 1375 do_xprt_reserve(task); 1376 spin_unlock(&xprt->xprt_lock); 1377 - if (task->tk_rqstp) 1378 - del_timer_sync(&xprt->timer); 1379 } 1380 } 1381 ··· 1688 rpc_wake_up(&xprt->backlog); 1689 wake_up(&xprt->cong_wait); 1690 del_timer_sync(&xprt->timer); 1691 } 1692 1693 /*
··· 569 if (xprt->sock != NULL) 570 schedule_delayed_work(&xprt->sock_connect, 571 RPC_REESTABLISH_TIMEOUT); 572 + else { 573 schedule_work(&xprt->sock_connect); 574 + if (!RPC_IS_ASYNC(task)) 575 + flush_scheduled_work(); 576 + } 577 } 578 return; 579 out_write: ··· 725 goto no_checksum; 726 727 desc.csum = csum_partial(skb->data, desc.offset, skb->csum); 728 + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) 729 + return -1; 730 if (desc.offset != skb->len) { 731 unsigned int csum2; 732 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); ··· 737 return -1; 738 return 0; 739 no_checksum: 740 + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) 741 + return -1; 742 if (desc.count) 743 return -1; 744 return 0; ··· 821 { 822 if (len > desc->count) 823 len = desc->count; 824 + if (skb_copy_bits(desc->skb, desc->offset, p, len)) { 825 + dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", 826 + len, desc->count); 827 return 0; 828 + } 829 desc->offset += len; 830 desc->count -= len; 831 + dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", 832 + len, desc->count); 833 return len; 834 } 835 ··· 863 static void 864 tcp_check_recm(struct rpc_xprt *xprt) 865 { 866 + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", 867 + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); 868 if (xprt->tcp_offset == xprt->tcp_reclen) { 869 xprt->tcp_flags |= XPRT_COPY_RECM; 870 xprt->tcp_offset = 0; ··· 907 struct rpc_rqst *req; 908 struct xdr_buf *rcvbuf; 909 size_t len; 910 + ssize_t r; 911 912 /* Find and lock the request corresponding to this xid */ 913 spin_lock(&xprt->sock_lock); ··· 927 len = xprt->tcp_reclen - xprt->tcp_offset; 928 memcpy(&my_desc, desc, sizeof(my_desc)); 929 my_desc.count = len; 930 + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, 931 &my_desc, tcp_copy_data); 932 + desc->count -= r; 933 + desc->offset += r; 934 } else 935 + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, 936 desc, tcp_copy_data); 937 + 938 + if (r > 0) { 939 + xprt->tcp_copied += r; 940 + xprt->tcp_offset += r; 941 + } 942 + if (r != len) { 943 + /* Error when copying to the receive buffer, 944 + * usually because we weren't able to allocate 945 + * additional buffer pages. All we can do now 946 + * is turn off XPRT_COPY_DATA, so the request 947 + * will not receive any additional updates, 948 + * and time out. 949 + * Any remaining data from this record will 950 + * be discarded. 951 + */ 952 + xprt->tcp_flags &= ~XPRT_COPY_DATA; 953 + dprintk("RPC: XID %08x truncated request\n", 954 + ntohl(xprt->tcp_xid)); 955 + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", 956 + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); 957 + goto out; 958 + } 959 + 960 + dprintk("RPC: XID %08x read %u bytes\n", 961 + ntohl(xprt->tcp_xid), r); 962 + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", 963 + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); 964 965 if (xprt->tcp_copied == req->rq_private_buf.buflen) 966 xprt->tcp_flags &= ~XPRT_COPY_DATA; ··· 944 xprt->tcp_flags &= ~XPRT_COPY_DATA; 945 } 946 947 + out: 948 if (!(xprt->tcp_flags & XPRT_COPY_DATA)) { 949 dprintk("RPC: %4d received reply complete\n", 950 req->rq_task->tk_pid); ··· 967 desc->count -= len; 968 desc->offset += len; 969 xprt->tcp_offset += len; 970 + dprintk("RPC: discarded %u bytes\n", len); 971 tcp_check_recm(xprt); 972 } 973 ··· 1064 case TCP_SYN_RECV: 1065 break; 1066 default: 1067 + xprt_disconnect(xprt); 1068 break; 1069 } 1070 out: ··· 1203 list_add_tail(&req->rq_list, &xprt->recv); 1204 spin_unlock_bh(&xprt->sock_lock); 1205 xprt_reset_majortimeo(req); 1206 + /* Turn off autodisconnect */ 1207 + del_singleshot_timer_sync(&xprt->timer); 1208 } 1209 } else if (!req->rq_bytes_sent) 1210 return; ··· 1333 spin_lock(&xprt->xprt_lock); 1334 do_xprt_reserve(task); 1335 spin_unlock(&xprt->xprt_lock); 1336 } 1337 } 1338 ··· 1649 rpc_wake_up(&xprt->backlog); 1650 wake_up(&xprt->cong_wait); 1651 del_timer_sync(&xprt->timer); 1652 + 1653 + /* synchronously wait for connect worker to finish */ 1654 + cancel_delayed_work(&xprt->sock_connect); 1655 + flush_scheduled_work(); 1656 } 1657 1658 /*