Merge git://git.linux-nfs.org/pub/linux/nfs-2.6

* git://git.linux-nfs.org/pub/linux/nfs-2.6: (122 commits)
sunrpc: drop BKL around wrap and unwrap
NFSv4: Make sure unlock is really an unlock when cancelling a lock
NLM: fix source address of callback to client
SUNRPC client: add interface for binding to a local address
SUNRPC server: record the destination address of a request
SUNRPC: cleanup transport creation argument passing
NFSv4: Make the NFS state model work with the nosharedcache mount option
NFS: Error when mounting the same filesystem with different options
NFS: Add the mount option "nosharecache"
NFS: Add support for mounting NFSv4 file systems with string options
NFS: Add final pieces to support in-kernel mount option parsing
NFS: Introduce generic mount client API
NFS: Add enums and match tables for mount option parsing
NFS: Improve debugging output in NFS in-kernel mount client
NFS: Clean up in-kernel NFS mount
NFS: Remake nfsroot_mount as a permanent part of NFS client
SUNRPC: Add a convenient default for the hostname when calling rpc_create()
SUNRPC: Rename rpcb_getport to be consistent with new rpcb_getport_sync name
SUNRPC: Rename rpcb_getport_external routine
SUNRPC: Allow rpcbind requests to be interrupted by a signal.
...

+3325 -1814
+22 -17
fs/lockd/host.c
··· 44 */ 45 static struct nlm_host * 46 nlm_lookup_host(int server, const struct sockaddr_in *sin, 47 - int proto, int version, 48 - const char *hostname, 49 - int hostname_len) 50 { 51 struct hlist_head *chain; 52 struct hlist_node *pos; ··· 53 struct nsm_handle *nsm = NULL; 54 int hash; 55 56 - dprintk("lockd: nlm_lookup_host(%u.%u.%u.%u, p=%d, v=%d, my role=%s, name=%.*s)\n", 57 NIPQUAD(sin->sin_addr.s_addr), proto, version, 58 server? "server" : "client", 59 hostname_len, ··· 92 continue; 93 if (host->h_server != server) 94 continue; 95 96 /* Move to head of hash chain. */ 97 hlist_del(&host->h_hash); ··· 121 host->h_name = nsm->sm_name; 122 host->h_addr = *sin; 123 host->h_addr.sin_port = 0; /* ouch! */ 124 host->h_version = version; 125 host->h_proto = proto; 126 host->h_rpcclnt = NULL; ··· 165 */ 166 nsm_unmonitor(host); 167 168 - if ((clnt = host->h_rpcclnt) != NULL) { 169 - if (atomic_read(&clnt->cl_users)) { 170 - printk(KERN_WARNING 171 - "lockd: active RPC handle\n"); 172 - clnt->cl_dead = 1; 173 - } else { 174 - rpc_destroy_client(host->h_rpcclnt); 175 - } 176 - } 177 kfree(host); 178 } 179 ··· 178 nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, 179 const char *hostname, int hostname_len) 180 { 181 return nlm_lookup_host(0, sin, proto, version, 182 - hostname, hostname_len); 183 } 184 185 /* ··· 191 nlmsvc_lookup_host(struct svc_rqst *rqstp, 192 const char *hostname, int hostname_len) 193 { 194 return nlm_lookup_host(1, svc_addr_in(rqstp), 195 rqstp->rq_prot, rqstp->rq_vers, 196 - hostname, hostname_len); 197 } 198 199 /* ··· 207 { 208 struct rpc_clnt *clnt; 209 210 - dprintk("lockd: nlm_bind_host(%08x)\n", 211 - (unsigned)ntohl(host->h_addr.sin_addr.s_addr)); 212 213 /* Lock host handle */ 214 mutex_lock(&host->h_mutex); ··· 236 .protocol = host->h_proto, 237 .address = (struct sockaddr *)&host->h_addr, 238 .addrsize = sizeof(host->h_addr), 239 .timeout = &timeparms, 240 .servername = host->h_name, 241 .program = &nlm_program,
··· 44 */ 45 static struct nlm_host * 46 nlm_lookup_host(int server, const struct sockaddr_in *sin, 47 + int proto, int version, const char *hostname, 48 + int hostname_len, const struct sockaddr_in *ssin) 49 { 50 struct hlist_head *chain; 51 struct hlist_node *pos; ··· 54 struct nsm_handle *nsm = NULL; 55 int hash; 56 57 + dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT 58 + ", p=%d, v=%d, my role=%s, name=%.*s)\n", 59 + NIPQUAD(ssin->sin_addr.s_addr), 60 NIPQUAD(sin->sin_addr.s_addr), proto, version, 61 server? "server" : "client", 62 hostname_len, ··· 91 continue; 92 if (host->h_server != server) 93 continue; 94 + if (!nlm_cmp_addr(&host->h_saddr, ssin)) 95 + continue; 96 97 /* Move to head of hash chain. */ 98 hlist_del(&host->h_hash); ··· 118 host->h_name = nsm->sm_name; 119 host->h_addr = *sin; 120 host->h_addr.sin_port = 0; /* ouch! */ 121 + host->h_saddr = *ssin; 122 host->h_version = version; 123 host->h_proto = proto; 124 host->h_rpcclnt = NULL; ··· 161 */ 162 nsm_unmonitor(host); 163 164 + clnt = host->h_rpcclnt; 165 + if (clnt != NULL) 166 + rpc_shutdown_client(clnt); 167 kfree(host); 168 } 169 ··· 180 nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, 181 const char *hostname, int hostname_len) 182 { 183 + struct sockaddr_in ssin = {0}; 184 + 185 return nlm_lookup_host(0, sin, proto, version, 186 + hostname, hostname_len, &ssin); 187 } 188 189 /* ··· 191 nlmsvc_lookup_host(struct svc_rqst *rqstp, 192 const char *hostname, int hostname_len) 193 { 194 + struct sockaddr_in ssin = {0}; 195 + 196 + ssin.sin_addr = rqstp->rq_daddr.addr; 197 return nlm_lookup_host(1, svc_addr_in(rqstp), 198 rqstp->rq_prot, rqstp->rq_vers, 199 + hostname, hostname_len, &ssin); 200 } 201 202 /* ··· 204 { 205 struct rpc_clnt *clnt; 206 207 + dprintk("lockd: nlm_bind_host("NIPQUAD_FMT"->"NIPQUAD_FMT")\n", 208 + NIPQUAD(host->h_saddr.sin_addr), 209 + NIPQUAD(host->h_addr.sin_addr)); 210 211 /* Lock host handle */ 212 mutex_lock(&host->h_mutex); ··· 232 .protocol = host->h_proto, 233 .address = (struct sockaddr *)&host->h_addr, 234 .addrsize = sizeof(host->h_addr), 235 + .saddress = (struct sockaddr *)&host->h_saddr, 236 .timeout = &timeparms, 237 .servername = host->h_name, 238 .program = &nlm_program,
+1 -1
fs/lockd/mon.c
··· 61 status); 62 else 63 status = 0; 64 out: 65 return status; 66 } ··· 139 .program = &nsm_program, 140 .version = SM_VERSION, 141 .authflavor = RPC_AUTH_NULL, 142 - .flags = (RPC_CLNT_CREATE_ONESHOT), 143 }; 144 145 return rpc_create(&args);
··· 61 status); 62 else 63 status = 0; 64 + rpc_shutdown_client(clnt); 65 out: 66 return status; 67 } ··· 138 .program = &nsm_program, 139 .version = SM_VERSION, 140 .authflavor = RPC_AUTH_NULL, 141 }; 142 143 return rpc_create(&args);
-6
fs/lockd/svc.c
··· 123 /* Process request with signals blocked, but allow SIGKILL. */ 124 allow_signal(SIGKILL); 125 126 - /* kick rpciod */ 127 - rpciod_up(); 128 - 129 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); 130 131 if (!nlm_timeout) ··· 198 199 /* Exit the RPC thread */ 200 svc_exit_thread(rqstp); 201 - 202 - /* release rpciod */ 203 - rpciod_down(); 204 205 /* Release module */ 206 unlock_kernel();
··· 123 /* Process request with signals blocked, but allow SIGKILL. */ 124 allow_signal(SIGKILL); 125 126 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); 127 128 if (!nlm_timeout) ··· 201 202 /* Exit the RPC thread */ 203 svc_exit_thread(rqstp); 204 205 /* Release module */ 206 unlock_kernel();
+2 -2
fs/nfs/Makefile
··· 6 7 nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \ 8 pagelist.o proc.o read.o symlink.o unlink.o \ 9 - write.o namespace.o 10 - nfs-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o 11 nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o 12 nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o 13 nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
··· 6 7 nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \ 8 pagelist.o proc.o read.o symlink.o unlink.o \ 9 + write.o namespace.o mount_clnt.o 10 + nfs-$(CONFIG_ROOT_NFS) += nfsroot.o 11 nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o 12 nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o 13 nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
+1 -27
fs/nfs/client.c
··· 102 int nfsversion) 103 { 104 struct nfs_client *clp; 105 - int error; 106 107 if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) 108 goto error_0; 109 - 110 - error = rpciod_up(); 111 - if (error < 0) { 112 - dprintk("%s: couldn't start rpciod! Error = %d\n", 113 - __FUNCTION__, error); 114 - goto error_1; 115 - } 116 - __set_bit(NFS_CS_RPCIOD, &clp->cl_res_state); 117 118 if (nfsversion == 4) { 119 if (nfs_callback_up() < 0) ··· 130 #ifdef CONFIG_NFS_V4 131 init_rwsem(&clp->cl_sem); 132 INIT_LIST_HEAD(&clp->cl_delegations); 133 - INIT_LIST_HEAD(&clp->cl_state_owners); 134 - INIT_LIST_HEAD(&clp->cl_unused); 135 spin_lock_init(&clp->cl_lock); 136 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); 137 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); ··· 143 if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) 144 nfs_callback_down(); 145 error_2: 146 - rpciod_down(); 147 - __clear_bit(NFS_CS_RPCIOD, &clp->cl_res_state); 148 - error_1: 149 kfree(clp); 150 error_0: 151 return NULL; ··· 153 #ifdef CONFIG_NFS_V4 154 if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) 155 nfs4_kill_renewd(clp); 156 - while (!list_empty(&clp->cl_unused)) { 157 - struct nfs4_state_owner *sp; 158 - 159 - sp = list_entry(clp->cl_unused.next, 160 - struct nfs4_state_owner, 161 - so_list); 162 - list_del(&sp->so_list); 163 - kfree(sp); 164 - } 165 - BUG_ON(!list_empty(&clp->cl_state_owners)); 166 if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) 167 nfs_idmap_delete(clp); 168 #endif ··· 174 175 if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) 176 nfs_callback_down(); 177 - 178 - if (__test_and_clear_bit(NFS_CS_RPCIOD, &clp->cl_res_state)) 179 - rpciod_down(); 180 181 kfree(clp->cl_hostname); 182 kfree(clp);
··· 102 int nfsversion) 103 { 104 struct nfs_client *clp; 105 106 if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) 107 goto error_0; 108 109 if (nfsversion == 4) { 110 if (nfs_callback_up() < 0) ··· 139 #ifdef CONFIG_NFS_V4 140 init_rwsem(&clp->cl_sem); 141 INIT_LIST_HEAD(&clp->cl_delegations); 142 spin_lock_init(&clp->cl_lock); 143 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); 144 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); ··· 154 if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) 155 nfs_callback_down(); 156 error_2: 157 kfree(clp); 158 error_0: 159 return NULL; ··· 167 #ifdef CONFIG_NFS_V4 168 if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) 169 nfs4_kill_renewd(clp); 170 + BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners)); 171 if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) 172 nfs_idmap_delete(clp); 173 #endif ··· 197 198 if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) 199 nfs_callback_down(); 200 201 kfree(clp->cl_hostname); 202 kfree(clp);
+110 -76
fs/nfs/delegation.c
··· 27 kfree(delegation); 28 } 29 30 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state) 31 { 32 struct inode *inode = state->inode; ··· 64 return status; 65 } 66 67 - static void nfs_delegation_claim_opens(struct inode *inode) 68 { 69 struct nfs_inode *nfsi = NFS_I(inode); 70 struct nfs_open_context *ctx; ··· 79 continue; 80 if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) 81 continue; 82 get_nfs_open_context(ctx); 83 spin_unlock(&inode->i_lock); 84 - err = nfs4_open_delegation_recall(ctx->dentry, state); 85 if (err >= 0) 86 err = nfs_delegation_claim_locks(ctx, state); 87 put_nfs_open_context(ctx); ··· 124 struct nfs_delegation *delegation; 125 int status = 0; 126 127 - /* Ensure we first revalidate the attributes and page cache! */ 128 - if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR))) 129 - __nfs_revalidate_inode(NFS_SERVER(inode), inode); 130 - 131 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL); 132 if (delegation == NULL) 133 return -ENOMEM; ··· 136 delegation->inode = inode; 137 138 spin_lock(&clp->cl_lock); 139 - if (nfsi->delegation == NULL) { 140 - list_add(&delegation->super_list, &clp->cl_delegations); 141 - nfsi->delegation = delegation; 142 nfsi->delegation_state = delegation->type; 143 delegation = NULL; 144 } else { 145 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid, ··· 150 status = -EIO; 151 } 152 } 153 spin_unlock(&clp->cl_lock); 154 kfree(delegation); 155 return status; ··· 166 int res = 0; 167 168 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid); 169 - nfs_free_delegation(delegation); 170 return res; 171 } 172 ··· 181 /* 182 * Basic procedure for returning a delegation to the server 183 */ 184 - int __nfs_inode_return_delegation(struct inode *inode) 185 { 186 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 187 struct nfs_inode *nfsi = NFS_I(inode); 188 - struct nfs_delegation *delegation; 189 - int res = 0; 190 191 nfs_msync_inode(inode); 192 down_read(&clp->cl_sem); 193 /* Guard against new delegated open calls */ 194 down_write(&nfsi->rwsem); 195 - spin_lock(&clp->cl_lock); 196 - delegation = nfsi->delegation; 197 - if (delegation != NULL) { 198 - list_del_init(&delegation->super_list); 199 - nfsi->delegation = NULL; 200 - nfsi->delegation_state = 0; 201 - } 202 - spin_unlock(&clp->cl_lock); 203 - nfs_delegation_claim_opens(inode); 204 up_write(&nfsi->rwsem); 205 up_read(&clp->cl_sem); 206 nfs_msync_inode(inode); 207 208 - if (delegation != NULL) 209 - res = nfs_do_return_delegation(inode, delegation); 210 - return res; 211 } 212 213 /* ··· 244 if (clp == NULL) 245 return; 246 restart: 247 - spin_lock(&clp->cl_lock); 248 - list_for_each_entry(delegation, &clp->cl_delegations, super_list) { 249 if (delegation->inode->i_sb != sb) 250 continue; 251 inode = igrab(delegation->inode); 252 if (inode == NULL) 253 continue; 254 spin_unlock(&clp->cl_lock); 255 - nfs_inode_return_delegation(inode); 256 iput(inode); 257 goto restart; 258 } 259 - spin_unlock(&clp->cl_lock); 260 } 261 262 static int nfs_do_expire_all_delegations(void *ptr) ··· 271 272 allow_signal(SIGKILL); 273 restart: 274 - spin_lock(&clp->cl_lock); 275 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0) 276 goto out; 277 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) 278 goto out; 279 - list_for_each_entry(delegation, &clp->cl_delegations, super_list) { 280 inode = igrab(delegation->inode); 281 if (inode == NULL) 282 continue; 283 spin_unlock(&clp->cl_lock); 284 - nfs_inode_return_delegation(inode); 285 iput(inode); 286 goto restart; 287 } 288 out: 289 - spin_unlock(&clp->cl_lock); 290 nfs_put_client(clp); 291 module_put_and_exit(0); 292 } ··· 321 if (clp == NULL) 322 return; 323 restart: 324 - spin_lock(&clp->cl_lock); 325 - list_for_each_entry(delegation, &clp->cl_delegations, super_list) { 326 inode = igrab(delegation->inode); 327 if (inode == NULL) 328 continue; 329 spin_unlock(&clp->cl_lock); 330 - nfs_inode_return_delegation(inode); 331 iput(inode); 332 goto restart; 333 } 334 - spin_unlock(&clp->cl_lock); 335 } 336 337 struct recall_threadargs { ··· 361 down_read(&clp->cl_sem); 362 down_write(&nfsi->rwsem); 363 spin_lock(&clp->cl_lock); 364 - delegation = nfsi->delegation; 365 - if (delegation != NULL && memcmp(delegation->stateid.data, 366 - args->stateid->data, 367 - sizeof(delegation->stateid.data)) == 0) { 368 - list_del_init(&delegation->super_list); 369 - nfsi->delegation = NULL; 370 - nfsi->delegation_state = 0; 371 args->result = 0; 372 - } else { 373 - delegation = NULL; 374 args->result = -ENOENT; 375 - } 376 spin_unlock(&clp->cl_lock); 377 complete(&args->started); 378 - nfs_delegation_claim_opens(inode); 379 up_write(&nfsi->rwsem); 380 up_read(&clp->cl_sem); 381 nfs_msync_inode(inode); ··· 409 { 410 struct nfs_delegation *delegation; 411 struct inode *res = NULL; 412 - spin_lock(&clp->cl_lock); 413 - list_for_each_entry(delegation, &clp->cl_delegations, super_list) { 414 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { 415 res = igrab(delegation->inode); 416 break; 417 } 418 } 419 - spin_unlock(&clp->cl_lock); 420 return res; 421 } 422 ··· 426 void nfs_delegation_mark_reclaim(struct nfs_client *clp) 427 { 428 struct nfs_delegation *delegation; 429 - spin_lock(&clp->cl_lock); 430 - list_for_each_entry(delegation, &clp->cl_delegations, super_list) 431 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM; 432 - spin_unlock(&clp->cl_lock); 433 } 434 435 /* ··· 437 */ 438 void nfs_delegation_reap_unclaimed(struct nfs_client *clp) 439 { 440 - struct nfs_delegation *delegation, *n; 441 - LIST_HEAD(head); 442 - spin_lock(&clp->cl_lock); 443 - list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) { 444 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) 445 continue; 446 - list_move(&delegation->super_list, &head); 447 - NFS_I(delegation->inode)->delegation = NULL; 448 - NFS_I(delegation->inode)->delegation_state = 0; 449 } 450 - spin_unlock(&clp->cl_lock); 451 - while(!list_empty(&head)) { 452 - delegation = list_entry(head.next, struct nfs_delegation, super_list); 453 - list_del(&delegation->super_list); 454 - nfs_free_delegation(delegation); 455 - } 456 } 457 458 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode) 459 { 460 - struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 461 struct nfs_inode *nfsi = NFS_I(inode); 462 struct nfs_delegation *delegation; 463 - int res = 0; 464 465 - if (nfsi->delegation_state == 0) 466 - return 0; 467 - spin_lock(&clp->cl_lock); 468 - delegation = nfsi->delegation; 469 if (delegation != NULL) { 470 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data)); 471 - res = 1; 472 } 473 - spin_unlock(&clp->cl_lock); 474 - return res; 475 }
··· 27 kfree(delegation); 28 } 29 30 + static void nfs_free_delegation_callback(struct rcu_head *head) 31 + { 32 + struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu); 33 + 34 + nfs_free_delegation(delegation); 35 + } 36 + 37 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state) 38 { 39 struct inode *inode = state->inode; ··· 57 return status; 58 } 59 60 + static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid) 61 { 62 struct nfs_inode *nfsi = NFS_I(inode); 63 struct nfs_open_context *ctx; ··· 72 continue; 73 if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) 74 continue; 75 + if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0) 76 + continue; 77 get_nfs_open_context(ctx); 78 spin_unlock(&inode->i_lock); 79 + err = nfs4_open_delegation_recall(ctx, state, stateid); 80 if (err >= 0) 81 err = nfs_delegation_claim_locks(ctx, state); 82 put_nfs_open_context(ctx); ··· 115 struct nfs_delegation *delegation; 116 int status = 0; 117 118 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL); 119 if (delegation == NULL) 120 return -ENOMEM; ··· 131 delegation->inode = inode; 132 133 spin_lock(&clp->cl_lock); 134 + if (rcu_dereference(nfsi->delegation) == NULL) { 135 + list_add_rcu(&delegation->super_list, &clp->cl_delegations); 136 nfsi->delegation_state = delegation->type; 137 + rcu_assign_pointer(nfsi->delegation, delegation); 138 delegation = NULL; 139 } else { 140 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid, ··· 145 status = -EIO; 146 } 147 } 148 + 149 + /* Ensure we revalidate the attributes and page cache! */ 150 + spin_lock(&inode->i_lock); 151 + nfsi->cache_validity |= NFS_INO_REVAL_FORCED; 152 + spin_unlock(&inode->i_lock); 153 + 154 spin_unlock(&clp->cl_lock); 155 kfree(delegation); 156 return status; ··· 155 int res = 0; 156 157 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid); 158 + call_rcu(&delegation->rcu, nfs_free_delegation_callback); 159 return res; 160 } 161 ··· 170 /* 171 * Basic procedure for returning a delegation to the server 172 */ 173 + static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation) 174 { 175 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 176 struct nfs_inode *nfsi = NFS_I(inode); 177 178 nfs_msync_inode(inode); 179 down_read(&clp->cl_sem); 180 /* Guard against new delegated open calls */ 181 down_write(&nfsi->rwsem); 182 + nfs_delegation_claim_opens(inode, &delegation->stateid); 183 up_write(&nfsi->rwsem); 184 up_read(&clp->cl_sem); 185 nfs_msync_inode(inode); 186 187 + return nfs_do_return_delegation(inode, delegation); 188 + } 189 + 190 + static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid) 191 + { 192 + struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation); 193 + 194 + if (delegation == NULL) 195 + goto nomatch; 196 + if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data, 197 + sizeof(delegation->stateid.data)) != 0) 198 + goto nomatch; 199 + list_del_rcu(&delegation->super_list); 200 + nfsi->delegation_state = 0; 201 + rcu_assign_pointer(nfsi->delegation, NULL); 202 + return delegation; 203 + nomatch: 204 + return NULL; 205 + } 206 + 207 + int nfs_inode_return_delegation(struct inode *inode) 208 + { 209 + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 210 + struct nfs_inode *nfsi = NFS_I(inode); 211 + struct nfs_delegation *delegation; 212 + int err = 0; 213 + 214 + if (rcu_dereference(nfsi->delegation) != NULL) { 215 + spin_lock(&clp->cl_lock); 216 + delegation = nfs_detach_delegation_locked(nfsi, NULL); 217 + spin_unlock(&clp->cl_lock); 218 + if (delegation != NULL) 219 + err = __nfs_inode_return_delegation(inode, delegation); 220 + } 221 + return err; 222 } 223 224 /* ··· 211 if (clp == NULL) 212 return; 213 restart: 214 + rcu_read_lock(); 215 + list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 216 if (delegation->inode->i_sb != sb) 217 continue; 218 inode = igrab(delegation->inode); 219 if (inode == NULL) 220 continue; 221 + spin_lock(&clp->cl_lock); 222 + delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); 223 spin_unlock(&clp->cl_lock); 224 + rcu_read_unlock(); 225 + if (delegation != NULL) 226 + __nfs_inode_return_delegation(inode, delegation); 227 iput(inode); 228 goto restart; 229 } 230 + rcu_read_unlock(); 231 } 232 233 static int nfs_do_expire_all_delegations(void *ptr) ··· 234 235 allow_signal(SIGKILL); 236 restart: 237 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0) 238 goto out; 239 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) 240 goto out; 241 + rcu_read_lock(); 242 + list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 243 inode = igrab(delegation->inode); 244 if (inode == NULL) 245 continue; 246 + spin_lock(&clp->cl_lock); 247 + delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); 248 spin_unlock(&clp->cl_lock); 249 + rcu_read_unlock(); 250 + if (delegation) 251 + __nfs_inode_return_delegation(inode, delegation); 252 iput(inode); 253 goto restart; 254 } 255 + rcu_read_unlock(); 256 out: 257 nfs_put_client(clp); 258 module_put_and_exit(0); 259 } ··· 280 if (clp == NULL) 281 return; 282 restart: 283 + rcu_read_lock(); 284 + list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 285 inode = igrab(delegation->inode); 286 if (inode == NULL) 287 continue; 288 + spin_lock(&clp->cl_lock); 289 + delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); 290 spin_unlock(&clp->cl_lock); 291 + rcu_read_unlock(); 292 + if (delegation != NULL) 293 + __nfs_inode_return_delegation(inode, delegation); 294 iput(inode); 295 goto restart; 296 } 297 + rcu_read_unlock(); 298 } 299 300 struct recall_threadargs { ··· 316 down_read(&clp->cl_sem); 317 down_write(&nfsi->rwsem); 318 spin_lock(&clp->cl_lock); 319 + delegation = nfs_detach_delegation_locked(nfsi, args->stateid); 320 + if (delegation != NULL) 321 args->result = 0; 322 + else 323 args->result = -ENOENT; 324 spin_unlock(&clp->cl_lock); 325 complete(&args->started); 326 + nfs_delegation_claim_opens(inode, args->stateid); 327 up_write(&nfsi->rwsem); 328 up_read(&clp->cl_sem); 329 nfs_msync_inode(inode); ··· 371 { 372 struct nfs_delegation *delegation; 373 struct inode *res = NULL; 374 + rcu_read_lock(); 375 + list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 376 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { 377 res = igrab(delegation->inode); 378 break; 379 } 380 } 381 + rcu_read_unlock(); 382 return res; 383 } 384 ··· 388 void nfs_delegation_mark_reclaim(struct nfs_client *clp) 389 { 390 struct nfs_delegation *delegation; 391 + rcu_read_lock(); 392 + list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) 393 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM; 394 + rcu_read_unlock(); 395 } 396 397 /* ··· 399 */ 400 void nfs_delegation_reap_unclaimed(struct nfs_client *clp) 401 { 402 + struct nfs_delegation *delegation; 403 + restart: 404 + rcu_read_lock(); 405 + list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 406 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) 407 continue; 408 + spin_lock(&clp->cl_lock); 409 + delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL); 410 + spin_unlock(&clp->cl_lock); 411 + rcu_read_unlock(); 412 + if (delegation != NULL) 413 + call_rcu(&delegation->rcu, nfs_free_delegation_callback); 414 + goto restart; 415 } 416 + rcu_read_unlock(); 417 } 418 419 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode) 420 { 421 struct nfs_inode *nfsi = NFS_I(inode); 422 struct nfs_delegation *delegation; 423 + int ret = 0; 424 425 + rcu_read_lock(); 426 + delegation = rcu_dereference(nfsi->delegation); 427 if (delegation != NULL) { 428 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data)); 429 + ret = 1; 430 } 431 + rcu_read_unlock(); 432 + return ret; 433 }
+12 -14
fs/nfs/delegation.h
··· 22 long flags; 23 loff_t maxsize; 24 __u64 change_attr; 25 }; 26 27 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); 28 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); 29 - int __nfs_inode_return_delegation(struct inode *inode); 30 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid); 31 32 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle); ··· 40 41 /* NFSv4 delegation-related procedures */ 42 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid); 43 - int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state); 44 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl); 45 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode); 46 47 static inline int nfs_have_delegation(struct inode *inode, int flags) 48 { 49 flags &= FMODE_READ|FMODE_WRITE; 50 - smp_rmb(); 51 - if ((NFS_I(inode)->delegation_state & flags) == flags) 52 - return 1; 53 - return 0; 54 } 55 56 - static inline int nfs_inode_return_delegation(struct inode *inode) 57 - { 58 - int err = 0; 59 - 60 - if (NFS_I(inode)->delegation != NULL) 61 - err = __nfs_inode_return_delegation(inode); 62 - return err; 63 - } 64 #else 65 static inline int nfs_have_delegation(struct inode *inode, int flags) 66 {
··· 22 long flags; 23 loff_t maxsize; 24 __u64 change_attr; 25 + struct rcu_head rcu; 26 }; 27 28 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); 29 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); 30 + int nfs_inode_return_delegation(struct inode *inode); 31 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid); 32 33 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle); ··· 39 40 /* NFSv4 delegation-related procedures */ 41 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid); 42 + int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid); 43 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl); 44 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode); 45 46 static inline int nfs_have_delegation(struct inode *inode, int flags) 47 { 48 + struct nfs_delegation *delegation; 49 + int ret = 0; 50 + 51 flags &= FMODE_READ|FMODE_WRITE; 52 + rcu_read_lock(); 53 + delegation = rcu_dereference(NFS_I(inode)->delegation); 54 + if (delegation != NULL && (delegation->type & flags) == flags) 55 + ret = 1; 56 + rcu_read_unlock(); 57 + return ret; 58 } 59 60 #else 61 static inline int nfs_have_delegation(struct inode *inode, int flags) 62 {
+8 -8
fs/nfs/dir.c
··· 897 return (nd->intent.open.flags & O_EXCL) != 0; 898 } 899 900 - static inline int nfs_reval_fsid(struct vfsmount *mnt, struct inode *dir, 901 - struct nfs_fh *fh, struct nfs_fattr *fattr) 902 { 903 struct nfs_server *server = NFS_SERVER(dir); 904 905 if (!nfs_fsid_equal(&server->fsid, &fattr->fsid)) 906 - /* Revalidate fsid on root dir */ 907 - return __nfs_revalidate_inode(server, mnt->mnt_root->d_inode); 908 return 0; 909 } 910 ··· 945 res = ERR_PTR(error); 946 goto out_unlock; 947 } 948 - error = nfs_reval_fsid(nd->mnt, dir, &fhandle, &fattr); 949 if (error < 0) { 950 res = ERR_PTR(error); 951 goto out_unlock; ··· 1243 attr.ia_mode = mode; 1244 attr.ia_valid = ATTR_MODE; 1245 1246 - if (nd && (nd->flags & LOOKUP_CREATE)) 1247 open_flags = nd->intent.open.flags; 1248 1249 lock_kernel(); ··· 1534 1535 lock_kernel(); 1536 1537 - page = alloc_page(GFP_KERNEL); 1538 if (!page) { 1539 unlock_kernel(); 1540 return -ENOMEM; ··· 1743 struct nfs_inode *nfsi; 1744 struct nfs_access_entry *cache; 1745 1746 - spin_lock(&nfs_access_lru_lock); 1747 restart: 1748 list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) { 1749 struct inode *inode; 1750 ··· 1769 clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags); 1770 } 1771 spin_unlock(&inode->i_lock); 1772 iput(inode); 1773 goto restart; 1774 }
··· 897 return (nd->intent.open.flags & O_EXCL) != 0; 898 } 899 900 + static inline int nfs_reval_fsid(struct inode *dir, const struct nfs_fattr *fattr) 901 { 902 struct nfs_server *server = NFS_SERVER(dir); 903 904 if (!nfs_fsid_equal(&server->fsid, &fattr->fsid)) 905 + /* Revalidate fsid using the parent directory */ 906 + return __nfs_revalidate_inode(server, dir); 907 return 0; 908 } 909 ··· 946 res = ERR_PTR(error); 947 goto out_unlock; 948 } 949 + error = nfs_reval_fsid(dir, &fattr); 950 if (error < 0) { 951 res = ERR_PTR(error); 952 goto out_unlock; ··· 1244 attr.ia_mode = mode; 1245 attr.ia_valid = ATTR_MODE; 1246 1247 + if ((nd->flags & LOOKUP_CREATE) != 0) 1248 open_flags = nd->intent.open.flags; 1249 1250 lock_kernel(); ··· 1535 1536 lock_kernel(); 1537 1538 + page = alloc_page(GFP_HIGHUSER); 1539 if (!page) { 1540 unlock_kernel(); 1541 return -ENOMEM; ··· 1744 struct nfs_inode *nfsi; 1745 struct nfs_access_entry *cache; 1746 1747 restart: 1748 + spin_lock(&nfs_access_lru_lock); 1749 list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) { 1750 struct inode *inode; 1751 ··· 1770 clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags); 1771 } 1772 spin_unlock(&inode->i_lock); 1773 + spin_unlock(&nfs_access_lru_lock); 1774 iput(inode); 1775 goto restart; 1776 }
+21 -13
fs/nfs/direct.c
··· 266 static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos) 267 { 268 struct nfs_open_context *ctx = dreq->ctx; 269 - struct inode *inode = ctx->dentry->d_inode; 270 size_t rsize = NFS_SERVER(inode)->rsize; 271 unsigned int pgbase; 272 int result; ··· 295 break; 296 } 297 if ((unsigned)result < data->npages) { 298 - nfs_direct_release_pages(data->pagevec, result); 299 - nfs_readdata_release(data); 300 - break; 301 } 302 303 get_dreq(dreq); ··· 606 static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync) 607 { 608 struct nfs_open_context *ctx = dreq->ctx; 609 - struct inode *inode = ctx->dentry->d_inode; 610 size_t wsize = NFS_SERVER(inode)->wsize; 611 unsigned int pgbase; 612 int result; ··· 635 break; 636 } 637 if ((unsigned)result < data->npages) { 638 - nfs_direct_release_pages(data->pagevec, result); 639 - nfs_writedata_release(data); 640 - break; 641 } 642 643 get_dreq(dreq); ··· 773 (unsigned long) count, (long long) pos); 774 775 if (nr_segs != 1) 776 - return -EINVAL; 777 - 778 - if (count < 0) 779 goto out; 780 retval = -EFAULT; 781 if (!access_ok(VERIFY_WRITE, buf, count)) 782 goto out; ··· 822 ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, 823 unsigned long nr_segs, loff_t pos) 824 { 825 - ssize_t retval; 826 struct file *file = iocb->ki_filp; 827 struct address_space *mapping = file->f_mapping; 828 /* XXX: temporary */ ··· 835 (unsigned long) count, (long long) pos); 836 837 if (nr_segs != 1) 838 - return -EINVAL; 839 840 retval = generic_write_checks(file, &pos, &count, 0); 841 if (retval)
··· 266 static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos) 267 { 268 struct nfs_open_context *ctx = dreq->ctx; 269 + struct inode *inode = ctx->path.dentry->d_inode; 270 size_t rsize = NFS_SERVER(inode)->rsize; 271 unsigned int pgbase; 272 int result; ··· 295 break; 296 } 297 if ((unsigned)result < data->npages) { 298 + bytes = result * PAGE_SIZE; 299 + if (bytes <= pgbase) { 300 + nfs_direct_release_pages(data->pagevec, result); 301 + nfs_readdata_release(data); 302 + break; 303 + } 304 + bytes -= pgbase; 305 + data->npages = result; 306 } 307 308 get_dreq(dreq); ··· 601 static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync) 602 { 603 struct nfs_open_context *ctx = dreq->ctx; 604 + struct inode *inode = ctx->path.dentry->d_inode; 605 size_t wsize = NFS_SERVER(inode)->wsize; 606 unsigned int pgbase; 607 int result; ··· 630 break; 631 } 632 if ((unsigned)result < data->npages) { 633 + bytes = result * PAGE_SIZE; 634 + if (bytes <= pgbase) { 635 + nfs_direct_release_pages(data->pagevec, result); 636 + nfs_writedata_release(data); 637 + break; 638 + } 639 + bytes -= pgbase; 640 + data->npages = result; 641 } 642 643 get_dreq(dreq); ··· 763 (unsigned long) count, (long long) pos); 764 765 if (nr_segs != 1) 766 goto out; 767 + 768 retval = -EFAULT; 769 if (!access_ok(VERIFY_WRITE, buf, count)) 770 goto out; ··· 814 ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, 815 unsigned long nr_segs, loff_t pos) 816 { 817 + ssize_t retval = -EINVAL; 818 struct file *file = iocb->ki_filp; 819 struct address_space *mapping = file->f_mapping; 820 /* XXX: temporary */ ··· 827 (unsigned long) count, (long long) pos); 828 829 if (nr_segs != 1) 830 + goto out; 831 832 retval = generic_write_checks(file, &pos, &count, 0); 833 if (retval)
+30 -43
fs/nfs/inode.c
··· 461 462 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 463 if (ctx != NULL) { 464 - atomic_set(&ctx->count, 1); 465 - ctx->dentry = dget(dentry); 466 - ctx->vfsmnt = mntget(mnt); 467 ctx->cred = get_rpccred(cred); 468 ctx->state = NULL; 469 ctx->lockowner = current->files; 470 ctx->error = 0; 471 ctx->dir_cookie = 0; 472 } 473 return ctx; 474 } ··· 476 struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx) 477 { 478 if (ctx != NULL) 479 - atomic_inc(&ctx->count); 480 return ctx; 481 } 482 483 void put_nfs_open_context(struct nfs_open_context *ctx) 484 { 485 - if (atomic_dec_and_test(&ctx->count)) { 486 - if (!list_empty(&ctx->list)) { 487 - struct inode *inode = ctx->dentry->d_inode; 488 - spin_lock(&inode->i_lock); 489 - list_del(&ctx->list); 490 - spin_unlock(&inode->i_lock); 491 - } 492 - if (ctx->state != NULL) 493 - nfs4_close_state(ctx->state, ctx->mode); 494 - if (ctx->cred != NULL) 495 - put_rpccred(ctx->cred); 496 - dput(ctx->dentry); 497 - mntput(ctx->vfsmnt); 498 - kfree(ctx); 499 - } 500 } 501 502 /* ··· 967 goto out_changed; 968 969 server = NFS_SERVER(inode); 970 - /* Update the fsid if and only if this is the root directory */ 971 - if (inode == inode->i_sb->s_root->d_inode 972 && !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 973 server->fsid = fattr->fsid; 974 ··· 1072 invalid &= ~NFS_INO_INVALID_DATA; 1073 if (data_stable) 1074 invalid &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME|NFS_INO_REVAL_PAGECACHE); 1075 - if (!nfs_have_delegation(inode, FMODE_READ)) 1076 nfsi->cache_validity |= invalid; 1077 1078 return 0; 1079 out_changed: ··· 1111 */ 1112 void nfs4_clear_inode(struct inode *inode) 1113 { 1114 - struct nfs_inode *nfsi = NFS_I(inode); 1115 - 1116 /* If we are holding a delegation, return it! */ 1117 nfs_inode_return_delegation(inode); 1118 /* First call standard NFS clear_inode() code */ 1119 nfs_clear_inode(inode); 1120 - /* Now clear out any remaining state */ 1121 - while (!list_empty(&nfsi->open_states)) { 1122 - struct nfs4_state *state; 1123 - 1124 - state = list_entry(nfsi->open_states.next, 1125 - struct nfs4_state, 1126 - inode_states); 1127 - dprintk("%s(%s/%Ld): found unclaimed NFSv4 state %p\n", 1128 - __FUNCTION__, 1129 - inode->i_sb->s_id, 1130 - (long long)NFS_FILEID(inode), 1131 - state); 1132 - BUG_ON(atomic_read(&state->count) != 1); 1133 - nfs4_close_state(state, state->state); 1134 - } 1135 } 1136 #endif 1137 ··· 1156 struct nfs_inode *nfsi = (struct nfs_inode *) foo; 1157 1158 inode_init_once(&nfsi->vfs_inode); 1159 - spin_lock_init(&nfsi->req_lock); 1160 - INIT_LIST_HEAD(&nfsi->dirty); 1161 - INIT_LIST_HEAD(&nfsi->commit); 1162 INIT_LIST_HEAD(&nfsi->open_files); 1163 INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); 1164 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1165 INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); 1166 atomic_set(&nfsi->data_updates, 0); 1167 - nfsi->ndirty = 0; 1168 nfsi->ncommit = 0; 1169 nfsi->npages = 0; 1170 nfs4_init_once(nfsi);
··· 461 462 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 463 if (ctx != NULL) { 464 + ctx->path.dentry = dget(dentry); 465 + ctx->path.mnt = mntget(mnt); 466 ctx->cred = get_rpccred(cred); 467 ctx->state = NULL; 468 ctx->lockowner = current->files; 469 ctx->error = 0; 470 ctx->dir_cookie = 0; 471 + kref_init(&ctx->kref); 472 } 473 return ctx; 474 } ··· 476 struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx) 477 { 478 if (ctx != NULL) 479 + kref_get(&ctx->kref); 480 return ctx; 481 + } 482 + 483 + static void nfs_free_open_context(struct kref *kref) 484 + { 485 + struct nfs_open_context *ctx = container_of(kref, 486 + struct nfs_open_context, kref); 487 + 488 + if (!list_empty(&ctx->list)) { 489 + struct inode *inode = ctx->path.dentry->d_inode; 490 + spin_lock(&inode->i_lock); 491 + list_del(&ctx->list); 492 + spin_unlock(&inode->i_lock); 493 + } 494 + if (ctx->state != NULL) 495 + nfs4_close_state(&ctx->path, ctx->state, ctx->mode); 496 + if (ctx->cred != NULL) 497 + put_rpccred(ctx->cred); 498 + dput(ctx->path.dentry); 499 + mntput(ctx->path.mnt); 500 + kfree(ctx); 501 } 502 503 void put_nfs_open_context(struct nfs_open_context *ctx) 504 { 505 + kref_put(&ctx->kref, nfs_free_open_context); 506 } 507 508 /* ··· 961 goto out_changed; 962 963 server = NFS_SERVER(inode); 964 + /* Update the fsid? */ 965 + if (S_ISDIR(inode->i_mode) 966 && !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 967 server->fsid = fattr->fsid; 968 ··· 1066 invalid &= ~NFS_INO_INVALID_DATA; 1067 if (data_stable) 1068 invalid &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME|NFS_INO_REVAL_PAGECACHE); 1069 + if (!nfs_have_delegation(inode, FMODE_READ) || 1070 + (nfsi->cache_validity & NFS_INO_REVAL_FORCED)) 1071 nfsi->cache_validity |= invalid; 1072 + nfsi->cache_validity &= ~NFS_INO_REVAL_FORCED; 1073 1074 return 0; 1075 out_changed: ··· 1103 */ 1104 void nfs4_clear_inode(struct inode *inode) 1105 { 1106 /* If we are holding a delegation, return it! */ 1107 nfs_inode_return_delegation(inode); 1108 /* First call standard NFS clear_inode() code */ 1109 nfs_clear_inode(inode); 1110 } 1111 #endif 1112 ··· 1165 struct nfs_inode *nfsi = (struct nfs_inode *) foo; 1166 1167 inode_init_once(&nfsi->vfs_inode); 1168 INIT_LIST_HEAD(&nfsi->open_files); 1169 INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); 1170 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1171 INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); 1172 atomic_set(&nfsi->data_updates, 0); 1173 nfsi->ncommit = 0; 1174 nfsi->npages = 0; 1175 nfs4_init_once(nfsi);
+2 -2
fs/nfs/internal.h
··· 183 /* 184 * Calculate the number of 512byte blocks used. 185 */ 186 - static inline unsigned long nfs_calc_block_size(u64 tsize) 187 { 188 - loff_t used = (tsize + 511) >> 9; 189 return (used > ULONG_MAX) ? ULONG_MAX : used; 190 } 191
··· 183 /* 184 * Calculate the number of 512byte blocks used. 185 */ 186 + static inline blkcnt_t nfs_calc_block_size(u64 tsize) 187 { 188 + blkcnt_t used = (tsize + 511) >> 9; 189 return (used > ULONG_MAX) ? ULONG_MAX : used; 190 } 191
+90 -79
fs/nfs/mount_clnt.c
··· 1 /* 2 - * linux/fs/nfs/mount_clnt.c 3 - * 4 - * MOUNT client to support NFSroot. 5 * 6 * Copyright (C) 1997, Olaf Kirch <okir@monad.swb.de> 7 */ ··· 16 #include <linux/nfs_fs.h> 17 18 #ifdef RPC_DEBUG 19 - # define NFSDBG_FACILITY NFSDBG_ROOT 20 #endif 21 22 - /* 23 - #define MOUNT_PROGRAM 100005 24 - #define MOUNT_VERSION 1 25 - #define MOUNT_MNT 1 26 - #define MOUNT_UMNT 3 27 - */ 28 - 29 - static struct rpc_clnt * mnt_create(char *, struct sockaddr_in *, 30 - int, int); 31 static struct rpc_program mnt_program; 32 33 struct mnt_fhstatus { 34 - unsigned int status; 35 - struct nfs_fh * fh; 36 }; 37 38 - /* 39 - * Obtain an NFS file handle for the given host and path 40 */ 41 - int 42 - nfsroot_mount(struct sockaddr_in *addr, char *path, struct nfs_fh *fh, 43 - int version, int protocol) 44 { 45 - struct rpc_clnt *mnt_clnt; 46 struct mnt_fhstatus result = { 47 .fh = fh 48 }; ··· 48 .rpc_argp = path, 49 .rpc_resp = &result, 50 }; 51 - char hostname[32]; 52 int status; 53 54 - dprintk("NFS: nfs_mount(%08x:%s)\n", 55 - (unsigned)ntohl(addr->sin_addr.s_addr), path); 56 57 - sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(addr->sin_addr.s_addr)); 58 - mnt_clnt = mnt_create(hostname, addr, version, protocol); 59 if (IS_ERR(mnt_clnt)) 60 - return PTR_ERR(mnt_clnt); 61 62 if (version == NFS_MNT3_VERSION) 63 msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT]; ··· 74 msg.rpc_proc = &mnt_clnt->cl_procinfo[MNTPROC_MNT]; 75 76 status = rpc_call_sync(mnt_clnt, &msg, 0); 77 - return status < 0? status : (result.status? -EACCES : 0); 78 - } 79 80 - static struct rpc_clnt * 81 - mnt_create(char *hostname, struct sockaddr_in *srvaddr, int version, 82 - int protocol) 83 - { 84 - struct rpc_create_args args = { 85 - .protocol = protocol, 86 - .address = (struct sockaddr *)srvaddr, 87 - .addrsize = sizeof(*srvaddr), 88 - .servername = hostname, 89 - .program = &mnt_program, 90 - .version = version, 91 - .authflavor = RPC_AUTH_UNIX, 92 - .flags = (RPC_CLNT_CREATE_ONESHOT | 93 - RPC_CLNT_CREATE_INTR), 94 - }; 95 96 - return rpc_create(&args); 97 } 98 99 /* 100 * XDR encode/decode functions for MOUNT 101 */ 102 - static int 103 - xdr_encode_dirpath(struct rpc_rqst *req, __be32 *p, const char *path) 104 { 105 p = xdr_encode_string(p, path); 106 ··· 114 return 0; 115 } 116 117 - static int 118 - xdr_decode_fhstatus(struct rpc_rqst *req, __be32 *p, struct mnt_fhstatus *res) 119 { 120 struct nfs_fh *fh = res->fh; 121 ··· 126 return 0; 127 } 128 129 - static int 130 - xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p, struct mnt_fhstatus *res) 131 { 132 struct nfs_fh *fh = res->fh; 133 ··· 146 #define MNT_fhstatus_sz (1 + 8) 147 #define MNT_fhstatus3_sz (1 + 16) 148 149 - static struct rpc_procinfo mnt_procedures[] = { 150 - [MNTPROC_MNT] = { 151 - .p_proc = MNTPROC_MNT, 152 - .p_encode = (kxdrproc_t) xdr_encode_dirpath, 153 - .p_decode = (kxdrproc_t) xdr_decode_fhstatus, 154 - .p_arglen = MNT_dirpath_sz, 155 - .p_replen = MNT_fhstatus_sz, 156 - .p_statidx = MNTPROC_MNT, 157 - .p_name = "MOUNT", 158 }, 159 }; 160 161 static struct rpc_procinfo mnt3_procedures[] = { 162 - [MOUNTPROC3_MNT] = { 163 - .p_proc = MOUNTPROC3_MNT, 164 - .p_encode = (kxdrproc_t) xdr_encode_dirpath, 165 - .p_decode = (kxdrproc_t) xdr_decode_fhstatus3, 166 - .p_arglen = MNT_dirpath_sz, 167 - .p_replen = MNT_fhstatus3_sz, 168 - .p_statidx = MOUNTPROC3_MNT, 169 - .p_name = "MOUNT", 170 }, 171 }; 172 173 174 - static struct rpc_version mnt_version1 = { 175 - .number = 1, 176 - .nrprocs = 2, 177 - .procs = mnt_procedures 178 }; 179 180 - static struct rpc_version mnt_version3 = { 181 - .number = 3, 182 - .nrprocs = 2, 183 - .procs = mnt3_procedures 184 }; 185 186 - static struct rpc_version * mnt_version[] = { 187 NULL, 188 &mnt_version1, 189 NULL, 190 &mnt_version3, 191 }; 192 193 - static struct rpc_stat mnt_stats; 194 195 - static struct rpc_program mnt_program = { 196 .name = "mount", 197 .number = NFS_MNT_PROGRAM, 198 .nrvers = ARRAY_SIZE(mnt_version),
··· 1 /* 2 + * In-kernel MOUNT protocol client 3 * 4 * Copyright (C) 1997, Olaf Kirch <okir@monad.swb.de> 5 */ ··· 18 #include <linux/nfs_fs.h> 19 20 #ifdef RPC_DEBUG 21 + # define NFSDBG_FACILITY NFSDBG_MOUNT 22 #endif 23 24 static struct rpc_program mnt_program; 25 26 struct mnt_fhstatus { 27 + u32 status; 28 + struct nfs_fh *fh; 29 }; 30 31 + /** 32 + * nfs_mount - Obtain an NFS file handle for the given host and path 33 + * @addr: pointer to server's address 34 + * @len: size of server's address 35 + * @hostname: name of server host, or NULL 36 + * @path: pointer to string containing export path to mount 37 + * @version: mount version to use for this request 38 + * @protocol: transport protocol to use for thie request 39 + * @fh: pointer to location to place returned file handle 40 + * 41 + * Uses default timeout parameters specified by underlying transport. 42 */ 43 + int nfs_mount(struct sockaddr *addr, size_t len, char *hostname, char *path, 44 + int version, int protocol, struct nfs_fh *fh) 45 { 46 struct mnt_fhstatus result = { 47 .fh = fh 48 }; ··· 52 .rpc_argp = path, 53 .rpc_resp = &result, 54 }; 55 + struct rpc_create_args args = { 56 + .protocol = protocol, 57 + .address = addr, 58 + .addrsize = len, 59 + .servername = hostname, 60 + .program = &mnt_program, 61 + .version = version, 62 + .authflavor = RPC_AUTH_UNIX, 63 + .flags = RPC_CLNT_CREATE_INTR, 64 + }; 65 + struct rpc_clnt *mnt_clnt; 66 int status; 67 68 + dprintk("NFS: sending MNT request for %s:%s\n", 69 + (hostname ? hostname : "server"), path); 70 71 + mnt_clnt = rpc_create(&args); 72 if (IS_ERR(mnt_clnt)) 73 + goto out_clnt_err; 74 75 if (version == NFS_MNT3_VERSION) 76 msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT]; ··· 69 msg.rpc_proc = &mnt_clnt->cl_procinfo[MNTPROC_MNT]; 70 71 status = rpc_call_sync(mnt_clnt, &msg, 0); 72 + rpc_shutdown_client(mnt_clnt); 73 74 + if (status < 0) 75 + goto out_call_err; 76 + if (result.status != 0) 77 + goto out_mnt_err; 78 79 + dprintk("NFS: MNT request succeeded\n"); 80 + status = 0; 81 + 82 + out: 83 + return status; 84 + 85 + out_clnt_err: 86 + status = PTR_ERR(mnt_clnt); 87 + dprintk("NFS: failed to create RPC client, status=%d\n", status); 88 + goto out; 89 + 90 + out_call_err: 91 + dprintk("NFS: failed to start MNT request, status=%d\n", status); 92 + goto out; 93 + 94 + out_mnt_err: 95 + dprintk("NFS: MNT server returned result %d\n", result.status); 96 + status = -EACCES; 97 + goto out; 98 } 99 100 /* 101 * XDR encode/decode functions for MOUNT 102 */ 103 + static int xdr_encode_dirpath(struct rpc_rqst *req, __be32 *p, 104 + const char *path) 105 { 106 p = xdr_encode_string(p, path); 107 ··· 103 return 0; 104 } 105 106 + static int xdr_decode_fhstatus(struct rpc_rqst *req, __be32 *p, 107 + struct mnt_fhstatus *res) 108 { 109 struct nfs_fh *fh = res->fh; 110 ··· 115 return 0; 116 } 117 118 + static int xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p, 119 + struct mnt_fhstatus *res) 120 { 121 struct nfs_fh *fh = res->fh; 122 ··· 135 #define MNT_fhstatus_sz (1 + 8) 136 #define MNT_fhstatus3_sz (1 + 16) 137 138 + static struct rpc_procinfo mnt_procedures[] = { 139 + [MNTPROC_MNT] = { 140 + .p_proc = MNTPROC_MNT, 141 + .p_encode = (kxdrproc_t) xdr_encode_dirpath, 142 + .p_decode = (kxdrproc_t) xdr_decode_fhstatus, 143 + .p_arglen = MNT_dirpath_sz, 144 + .p_replen = MNT_fhstatus_sz, 145 + .p_statidx = MNTPROC_MNT, 146 + .p_name = "MOUNT", 147 }, 148 }; 149 150 static struct rpc_procinfo mnt3_procedures[] = { 151 + [MOUNTPROC3_MNT] = { 152 + .p_proc = MOUNTPROC3_MNT, 153 + .p_encode = (kxdrproc_t) xdr_encode_dirpath, 154 + .p_decode = (kxdrproc_t) xdr_decode_fhstatus3, 155 + .p_arglen = MNT_dirpath_sz, 156 + .p_replen = MNT_fhstatus3_sz, 157 + .p_statidx = MOUNTPROC3_MNT, 158 + .p_name = "MOUNT", 159 }, 160 }; 161 162 163 + static struct rpc_version mnt_version1 = { 164 + .number = 1, 165 + .nrprocs = 2, 166 + .procs = mnt_procedures, 167 }; 168 169 + static struct rpc_version mnt_version3 = { 170 + .number = 3, 171 + .nrprocs = 2, 172 + .procs = mnt3_procedures, 173 }; 174 175 + static struct rpc_version *mnt_version[] = { 176 NULL, 177 &mnt_version1, 178 NULL, 179 &mnt_version3, 180 }; 181 182 + static struct rpc_stat mnt_stats; 183 184 + static struct rpc_program mnt_program = { 185 .name = "mount", 186 .number = NFS_MNT_PROGRAM, 187 .nrvers = ARRAY_SIZE(mnt_version),
+3 -3
fs/nfs/nfs2xdr.c
··· 223 static int 224 nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 225 { 226 - struct rpc_auth *auth = req->rq_task->tk_auth; 227 unsigned int replen; 228 u32 offset = (u32)args->offset; 229 u32 count = args->count; ··· 380 nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args) 381 { 382 struct rpc_task *task = req->rq_task; 383 - struct rpc_auth *auth = task->tk_auth; 384 unsigned int replen; 385 u32 count = args->count; 386 ··· 541 static int 542 nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args) 543 { 544 - struct rpc_auth *auth = req->rq_task->tk_auth; 545 unsigned int replen; 546 547 p = xdr_encode_fhandle(p, args->fh);
··· 223 static int 224 nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 225 { 226 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 227 unsigned int replen; 228 u32 offset = (u32)args->offset; 229 u32 count = args->count; ··· 380 nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args) 381 { 382 struct rpc_task *task = req->rq_task; 383 + struct rpc_auth *auth = task->tk_msg.rpc_cred->cr_auth; 384 unsigned int replen; 385 u32 count = args->count; 386 ··· 541 static int 542 nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args) 543 { 544 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 545 unsigned int replen; 546 547 p = xdr_encode_fhandle(p, args->fh);
+1 -3
fs/nfs/nfs3proc.c
··· 335 * not sure this buys us anything (and I'd have 336 * to revamp the NFSv3 XDR code) */ 337 status = nfs3_proc_setattr(dentry, &fattr, sattr); 338 - if (status == 0) 339 - nfs_setattr_update_inode(dentry->d_inode, sattr); 340 - nfs_refresh_inode(dentry->d_inode, &fattr); 341 dprintk("NFS reply setattr (post-create): %d\n", status); 342 } 343 if (status != 0)
··· 335 * not sure this buys us anything (and I'd have 336 * to revamp the NFSv3 XDR code) */ 337 status = nfs3_proc_setattr(dentry, &fattr, sattr); 338 + nfs_post_op_update_inode(dentry->d_inode, &fattr); 339 dprintk("NFS reply setattr (post-create): %d\n", status); 340 } 341 if (status != 0)
+4 -4
fs/nfs/nfs3xdr.c
··· 319 static int 320 nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 321 { 322 - struct rpc_auth *auth = req->rq_task->tk_auth; 323 unsigned int replen; 324 u32 count = args->count; 325 ··· 458 static int 459 nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args) 460 { 461 - struct rpc_auth *auth = req->rq_task->tk_auth; 462 unsigned int replen; 463 u32 count = args->count; 464 ··· 643 nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p, 644 struct nfs3_getaclargs *args) 645 { 646 - struct rpc_auth *auth = req->rq_task->tk_auth; 647 unsigned int replen; 648 649 p = xdr_encode_fhandle(p, args->fh); ··· 773 static int 774 nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args) 775 { 776 - struct rpc_auth *auth = req->rq_task->tk_auth; 777 unsigned int replen; 778 779 p = xdr_encode_fhandle(p, args->fh);
··· 319 static int 320 nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 321 { 322 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 323 unsigned int replen; 324 u32 count = args->count; 325 ··· 458 static int 459 nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args) 460 { 461 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 462 unsigned int replen; 463 u32 count = args->count; 464 ··· 643 nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p, 644 struct nfs3_getaclargs *args) 645 { 646 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 647 unsigned int replen; 648 649 p = xdr_encode_fhandle(p, args->fh); ··· 773 static int 774 nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args) 775 { 776 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 777 unsigned int replen; 778 779 p = xdr_encode_fhandle(p, args->fh);
+26 -14
fs/nfs/nfs4_fs.h
··· 70 seqid->flags |= NFS_SEQID_CONFIRMED; 71 } 72 73 /* 74 * NFS4 state_owners and lock_owners are simply labels for ordered 75 * sequences of RPC calls. Their sole purpose is to provide once-only 76 * semantics by allowing the server to identify replayed requests. 77 */ 78 struct nfs4_state_owner { 79 - spinlock_t so_lock; 80 - struct list_head so_list; /* per-clientid list of state_owners */ 81 struct nfs_client *so_client; 82 - u32 so_id; /* 32-bit identifier, unique */ 83 - atomic_t so_count; 84 85 struct rpc_cred *so_cred; /* Associated cred */ 86 struct list_head so_states; 87 struct list_head so_delegations; 88 struct nfs_seqid_counter so_seqid; ··· 115 #define NFS_LOCK_INITIALIZED 1 116 int ls_flags; 117 struct nfs_seqid_counter ls_seqid; 118 - u32 ls_id; 119 nfs4_stateid ls_stateid; 120 atomic_t ls_count; 121 }; ··· 123 /* bits for nfs4_state->flags */ 124 enum { 125 LK_STATE_IN_USE, 126 - NFS_DELEGATED_STATE, 127 }; 128 129 struct nfs4_state { ··· 140 unsigned long flags; /* Do we hold any locks? */ 141 spinlock_t state_lock; /* Protects the lock_states list */ 142 143 - nfs4_stateid stateid; 144 145 - unsigned int n_rdonly; 146 - unsigned int n_wronly; 147 - unsigned int n_rdwr; 148 int state; /* State on the server (R,W, or RW) */ 149 atomic_t count; 150 }; ··· 178 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *); 179 extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); 180 extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); 181 - extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state); 182 extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); 183 extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *); 184 extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); ··· 202 203 /* nfs4state.c */ 204 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); 205 - extern u32 nfs4_alloc_lockowner_id(struct nfs_client *); 206 207 extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); 208 extern void nfs4_put_state_owner(struct nfs4_state_owner *); 209 extern void nfs4_drop_state_owner(struct nfs4_state_owner *); 210 extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); 211 extern void nfs4_put_open_state(struct nfs4_state *); 212 - extern void nfs4_close_state(struct nfs4_state *, mode_t); 213 extern void nfs4_state_set_mode_locked(struct nfs4_state *, mode_t); 214 extern void nfs4_schedule_state_recovery(struct nfs_client *); 215 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); ··· 234 235 #else 236 237 - #define nfs4_close_state(a, b) do { } while (0) 238 239 #endif /* CONFIG_NFS_V4 */ 240 #endif /* __LINUX_FS_NFS_NFS4_FS.H */
··· 70 seqid->flags |= NFS_SEQID_CONFIRMED; 71 } 72 73 + struct nfs_unique_id { 74 + struct rb_node rb_node; 75 + __u64 id; 76 + }; 77 + 78 /* 79 * NFS4 state_owners and lock_owners are simply labels for ordered 80 * sequences of RPC calls. Their sole purpose is to provide once-only 81 * semantics by allowing the server to identify replayed requests. 82 */ 83 struct nfs4_state_owner { 84 + struct nfs_unique_id so_owner_id; 85 struct nfs_client *so_client; 86 + struct nfs_server *so_server; 87 + struct rb_node so_client_node; 88 89 struct rpc_cred *so_cred; /* Associated cred */ 90 + 91 + spinlock_t so_lock; 92 + atomic_t so_count; 93 struct list_head so_states; 94 struct list_head so_delegations; 95 struct nfs_seqid_counter so_seqid; ··· 108 #define NFS_LOCK_INITIALIZED 1 109 int ls_flags; 110 struct nfs_seqid_counter ls_seqid; 111 + struct nfs_unique_id ls_id; 112 nfs4_stateid ls_stateid; 113 atomic_t ls_count; 114 }; ··· 116 /* bits for nfs4_state->flags */ 117 enum { 118 LK_STATE_IN_USE, 119 + NFS_DELEGATED_STATE, /* Current stateid is delegation */ 120 + NFS_O_RDONLY_STATE, /* OPEN stateid has read-only state */ 121 + NFS_O_WRONLY_STATE, /* OPEN stateid has write-only state */ 122 + NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ 123 }; 124 125 struct nfs4_state { ··· 130 unsigned long flags; /* Do we hold any locks? */ 131 spinlock_t state_lock; /* Protects the lock_states list */ 132 133 + seqlock_t seqlock; /* Protects the stateid/open_stateid */ 134 + nfs4_stateid stateid; /* Current stateid: may be delegation */ 135 + nfs4_stateid open_stateid; /* OPEN stateid */ 136 137 + /* The following 3 fields are protected by owner->so_lock */ 138 + unsigned int n_rdonly; /* Number of read-only references */ 139 + unsigned int n_wronly; /* Number of write-only references */ 140 + unsigned int n_rdwr; /* Number of read/write references */ 141 int state; /* State on the server (R,W, or RW) */ 142 atomic_t count; 143 }; ··· 165 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *); 166 extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); 167 extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); 168 + extern int nfs4_do_close(struct path *path, struct nfs4_state *state); 169 extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); 170 extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *); 171 extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); ··· 189 190 /* nfs4state.c */ 191 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); 192 193 extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); 194 extern void nfs4_put_state_owner(struct nfs4_state_owner *); 195 extern void nfs4_drop_state_owner(struct nfs4_state_owner *); 196 extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); 197 extern void nfs4_put_open_state(struct nfs4_state *); 198 + extern void nfs4_close_state(struct path *, struct nfs4_state *, mode_t); 199 extern void nfs4_state_set_mode_locked(struct nfs4_state *, mode_t); 200 extern void nfs4_schedule_state_recovery(struct nfs_client *); 201 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); ··· 222 223 #else 224 225 + #define nfs4_close_state(a, b, c) do { } while (0) 226 227 #endif /* CONFIG_NFS_V4 */ 228 #endif /* __LINUX_FS_NFS_NFS4_FS.H */
+444 -316
fs/nfs/nfs4proc.c
··· 65 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry); 66 static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception); 67 static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp); 68 69 /* Prevent leaks of NFSv4 errors into userland */ 70 int nfs4_map_errors(int err) ··· 215 } 216 217 struct nfs4_opendata { 218 - atomic_t count; 219 struct nfs_openargs o_arg; 220 struct nfs_openres o_res; 221 struct nfs_open_confirmargs c_arg; 222 struct nfs_open_confirmres c_res; 223 struct nfs_fattr f_attr; 224 struct nfs_fattr dir_attr; 225 - struct dentry *dentry; 226 struct dentry *dir; 227 struct nfs4_state_owner *owner; 228 struct iattr attrs; 229 unsigned long timestamp; 230 int rpc_status; 231 int cancelled; 232 }; 233 234 - static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 235 struct nfs4_state_owner *sp, int flags, 236 const struct iattr *attrs) 237 { 238 - struct dentry *parent = dget_parent(dentry); 239 struct inode *dir = parent->d_inode; 240 struct nfs_server *server = NFS_SERVER(dir); 241 struct nfs4_opendata *p; ··· 258 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); 259 if (p->o_arg.seqid == NULL) 260 goto err_free; 261 - atomic_set(&p->count, 1); 262 - p->dentry = dget(dentry); 263 p->dir = parent; 264 p->owner = sp; 265 atomic_inc(&sp->so_count); 266 p->o_arg.fh = NFS_FH(dir); 267 p->o_arg.open_flags = flags, 268 p->o_arg.clientid = server->nfs_client->cl_clientid; 269 - p->o_arg.id = sp->so_id; 270 - p->o_arg.name = &dentry->d_name; 271 p->o_arg.server = server; 272 p->o_arg.bitmask = server->attr_bitmask; 273 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 274 - p->o_res.f_attr = &p->f_attr; 275 - p->o_res.dir_attr = &p->dir_attr; 276 - p->o_res.server = server; 277 - nfs_fattr_init(&p->f_attr); 278 - nfs_fattr_init(&p->dir_attr); 279 if (flags & O_EXCL) { 280 u32 *s = (u32 *) p->o_arg.u.verifier.data; 281 s[0] = jiffies; ··· 282 p->c_arg.fh = &p->o_res.fh; 283 p->c_arg.stateid = &p->o_res.stateid; 284 p->c_arg.seqid = p->o_arg.seqid; 285 return p; 286 err_free: 287 kfree(p); ··· 292 return NULL; 293 } 294 295 - static void nfs4_opendata_free(struct nfs4_opendata *p) 296 { 297 - if (p != NULL && atomic_dec_and_test(&p->count)) { 298 - nfs_free_seqid(p->o_arg.seqid); 299 - nfs4_put_state_owner(p->owner); 300 - dput(p->dir); 301 - dput(p->dentry); 302 - kfree(p); 303 - } 304 } 305 306 - /* Helper for asynchronous RPC calls */ 307 - static int nfs4_call_async(struct rpc_clnt *clnt, 308 - const struct rpc_call_ops *tk_ops, void *calldata) 309 { 310 - struct rpc_task *task; 311 - 312 - if (!(task = rpc_new_task(clnt, RPC_TASK_ASYNC, tk_ops, calldata))) 313 - return -ENOMEM; 314 - rpc_execute(task); 315 - return 0; 316 } 317 318 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) ··· 324 return ret; 325 } 326 327 - static inline void update_open_stateflags(struct nfs4_state *state, mode_t open_flags) 328 { 329 switch (open_flags) { 330 case FMODE_WRITE: ··· 363 case FMODE_READ|FMODE_WRITE: 364 state->n_rdwr++; 365 } 366 } 367 368 - static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) 369 { 370 - struct inode *inode = state->inode; 371 372 open_flags &= (FMODE_READ|FMODE_WRITE); 373 - /* Protect against nfs4_find_state_byowner() */ 374 spin_lock(&state->owner->so_lock); 375 - spin_lock(&inode->i_lock); 376 - memcpy(&state->stateid, stateid, sizeof(state->stateid)); 377 update_open_stateflags(state, open_flags); 378 - nfs4_state_set_mode_locked(state, state->state | open_flags); 379 - spin_unlock(&inode->i_lock); 380 spin_unlock(&state->owner->so_lock); 381 } 382 383 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 384 { 385 struct inode *inode; 386 struct nfs4_state *state = NULL; 387 388 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 389 - goto out; 390 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); 391 if (IS_ERR(inode)) 392 - goto out; 393 state = nfs4_get_open_state(inode, data->owner); 394 if (state == NULL) 395 - goto put_inode; 396 - update_open_stateid(state, &data->o_res.stateid, data->o_arg.open_flags); 397 - put_inode: 398 iput(inode); 399 out: 400 return state; 401 } 402 403 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) ··· 552 return ERR_PTR(-ENOENT); 553 } 554 555 - static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, mode_t openflags, nfs4_stateid *stateid) 556 { 557 int ret; 558 559 opendata->o_arg.open_flags = openflags; 560 ret = _nfs4_proc_open(opendata); 561 if (ret != 0) 562 return ret; 563 - memcpy(stateid->data, opendata->o_res.stateid.data, 564 - sizeof(stateid->data)); 565 return 0; 566 } 567 568 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 569 { 570 - nfs4_stateid stateid; 571 struct nfs4_state *newstate; 572 - int mode = 0; 573 - int delegation = 0; 574 int ret; 575 576 /* memory barrier prior to reading state->n_* */ 577 smp_rmb(); 578 if (state->n_rdwr != 0) { 579 - ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &stateid); 580 if (ret != 0) 581 return ret; 582 - mode |= FMODE_READ|FMODE_WRITE; 583 - if (opendata->o_res.delegation_type != 0) 584 - delegation = opendata->o_res.delegation_type; 585 - smp_rmb(); 586 } 587 if (state->n_wronly != 0) { 588 - ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &stateid); 589 if (ret != 0) 590 return ret; 591 - mode |= FMODE_WRITE; 592 - if (opendata->o_res.delegation_type != 0) 593 - delegation = opendata->o_res.delegation_type; 594 - smp_rmb(); 595 } 596 if (state->n_rdonly != 0) { 597 - ret = nfs4_open_recover_helper(opendata, FMODE_READ, &stateid); 598 if (ret != 0) 599 return ret; 600 - mode |= FMODE_READ; 601 } 602 - clear_bit(NFS_DELEGATED_STATE, &state->flags); 603 - if (mode == 0) 604 - return 0; 605 - if (opendata->o_res.delegation_type == 0) 606 - opendata->o_res.delegation_type = delegation; 607 - opendata->o_arg.open_flags |= mode; 608 - newstate = nfs4_opendata_to_nfs4_state(opendata); 609 - if (newstate != NULL) { 610 - if (opendata->o_res.delegation_type != 0) { 611 - struct nfs_inode *nfsi = NFS_I(newstate->inode); 612 - int delegation_flags = 0; 613 - if (nfsi->delegation) 614 - delegation_flags = nfsi->delegation->flags; 615 - if (!(delegation_flags & NFS_DELEGATION_NEED_RECLAIM)) 616 - nfs_inode_set_delegation(newstate->inode, 617 - opendata->owner->so_cred, 618 - &opendata->o_res); 619 - else 620 - nfs_inode_reclaim_delegation(newstate->inode, 621 - opendata->owner->so_cred, 622 - &opendata->o_res); 623 - } 624 - nfs4_close_state(newstate, opendata->o_arg.open_flags); 625 } 626 - if (newstate != state) 627 - return -ESTALE; 628 return 0; 629 } 630 ··· 619 * OPEN_RECLAIM: 620 * reclaim state on the server after a reboot. 621 */ 622 - static int _nfs4_do_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) 623 { 624 - struct nfs_delegation *delegation = NFS_I(state->inode)->delegation; 625 struct nfs4_opendata *opendata; 626 int delegation_type = 0; 627 int status; 628 629 - if (delegation != NULL) { 630 - if (!(delegation->flags & NFS_DELEGATION_NEED_RECLAIM)) { 631 - memcpy(&state->stateid, &delegation->stateid, 632 - sizeof(state->stateid)); 633 - set_bit(NFS_DELEGATED_STATE, &state->flags); 634 - return 0; 635 - } 636 - delegation_type = delegation->type; 637 - } 638 - opendata = nfs4_opendata_alloc(dentry, sp, 0, NULL); 639 if (opendata == NULL) 640 return -ENOMEM; 641 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; 642 opendata->o_arg.fh = NFS_FH(state->inode); 643 nfs_copy_fh(&opendata->o_res.fh, opendata->o_arg.fh); 644 opendata->o_arg.u.delegation_type = delegation_type; 645 status = nfs4_open_recover(opendata, state); 646 - nfs4_opendata_free(opendata); 647 return status; 648 } 649 650 - static int nfs4_do_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) 651 { 652 struct nfs_server *server = NFS_SERVER(state->inode); 653 struct nfs4_exception exception = { }; 654 int err; 655 do { 656 - err = _nfs4_do_open_reclaim(sp, state, dentry); 657 if (err != -NFS4ERR_DELAY) 658 break; 659 nfs4_handle_exception(server, err, &exception); ··· 665 ctx = nfs4_state_find_open_context(state); 666 if (IS_ERR(ctx)) 667 return PTR_ERR(ctx); 668 - ret = nfs4_do_open_reclaim(sp, state, ctx->dentry); 669 put_nfs_open_context(ctx); 670 return ret; 671 } 672 673 - static int _nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state) 674 { 675 struct nfs4_state_owner *sp = state->owner; 676 struct nfs4_opendata *opendata; 677 int ret; 678 679 - if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) 680 - return 0; 681 - opendata = nfs4_opendata_alloc(dentry, sp, 0, NULL); 682 if (opendata == NULL) 683 return -ENOMEM; 684 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; 685 - memcpy(opendata->o_arg.u.delegation.data, state->stateid.data, 686 sizeof(opendata->o_arg.u.delegation.data)); 687 ret = nfs4_open_recover(opendata, state); 688 - nfs4_opendata_free(opendata); 689 return ret; 690 } 691 692 - int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state) 693 { 694 struct nfs4_exception exception = { }; 695 - struct nfs_server *server = NFS_SERVER(dentry->d_inode); 696 int err; 697 do { 698 - err = _nfs4_open_delegation_recall(dentry, state); 699 switch (err) { 700 case 0: 701 return err; ··· 733 memcpy(data->o_res.stateid.data, data->c_res.stateid.data, 734 sizeof(data->o_res.stateid.data)); 735 renew_lease(data->o_res.server, data->timestamp); 736 } 737 - nfs_increment_open_seqid(data->rpc_status, data->c_arg.seqid); 738 nfs_confirm_seqid(&data->owner->so_seqid, data->rpc_status); 739 } 740 741 static void nfs4_open_confirm_release(void *calldata) ··· 748 if (data->cancelled == 0) 749 goto out_free; 750 /* In case of error, no cleanup! */ 751 - if (data->rpc_status != 0) 752 goto out_free; 753 nfs_confirm_seqid(&data->owner->so_seqid, 0); 754 state = nfs4_opendata_to_nfs4_state(data); 755 - if (state != NULL) 756 - nfs4_close_state(state, data->o_arg.open_flags); 757 out_free: 758 - nfs4_opendata_free(data); 759 } 760 761 static const struct rpc_call_ops nfs4_open_confirm_ops = { ··· 773 struct rpc_task *task; 774 int status; 775 776 - atomic_inc(&data->count); 777 - /* 778 - * If rpc_run_task() ends up calling ->rpc_release(), we 779 - * want to ensure that it takes the 'error' code path. 780 - */ 781 - data->rpc_status = -ENOMEM; 782 task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_confirm_ops, data); 783 if (IS_ERR(task)) 784 return PTR_ERR(task); ··· 802 803 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 804 return; 805 /* Update sequence id. */ 806 - data->o_arg.id = sp->so_id; 807 data->o_arg.clientid = sp->so_client->cl_clientid; 808 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 809 msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 810 data->timestamp = jiffies; 811 rpc_call_setup(task, &msg, 0); 812 } 813 814 static void nfs4_open_done(struct rpc_task *task, void *calldata) ··· 854 data->rpc_status = -ENOTDIR; 855 } 856 renew_lease(data->o_res.server, data->timestamp); 857 } 858 nfs_increment_open_seqid(data->rpc_status, data->o_arg.seqid); 859 } 860 861 static void nfs4_open_release(void *calldata) ··· 870 if (data->cancelled == 0) 871 goto out_free; 872 /* In case of error, no cleanup! */ 873 - if (data->rpc_status != 0) 874 goto out_free; 875 /* In case we need an open_confirm, no cleanup! */ 876 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 877 goto out_free; 878 nfs_confirm_seqid(&data->owner->so_seqid, 0); 879 state = nfs4_opendata_to_nfs4_state(data); 880 - if (state != NULL) 881 - nfs4_close_state(state, data->o_arg.open_flags); 882 out_free: 883 - nfs4_opendata_free(data); 884 } 885 886 static const struct rpc_call_ops nfs4_open_ops = { ··· 901 struct rpc_task *task; 902 int status; 903 904 - atomic_inc(&data->count); 905 - /* 906 - * If rpc_run_task() ends up calling ->rpc_release(), we 907 - * want to ensure that it takes the 'error' code path. 908 - */ 909 - data->rpc_status = -ENOMEM; 910 task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_ops, data); 911 if (IS_ERR(task)) 912 return PTR_ERR(task); ··· 915 } else 916 status = data->rpc_status; 917 rpc_put_task(task); 918 - if (status != 0) 919 return status; 920 921 if (o_arg->open_flags & O_CREAT) { ··· 928 if (status != 0) 929 return status; 930 } 931 - nfs_confirm_seqid(&data->owner->so_seqid, 0); 932 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 933 return server->nfs_client->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr); 934 return 0; ··· 943 mask |= MAY_READ; 944 if (openflags & FMODE_WRITE) 945 mask |= MAY_WRITE; 946 status = nfs_access_get_cached(inode, cred, &cache); 947 if (status == 0) 948 goto out; ··· 984 * reclaim state on the server after a network partition. 985 * Assumes caller holds the appropriate lock 986 */ 987 - static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) 988 { 989 - struct inode *inode = state->inode; 990 - struct nfs_delegation *delegation = NFS_I(inode)->delegation; 991 struct nfs4_opendata *opendata; 992 - int openflags = state->state & (FMODE_READ|FMODE_WRITE); 993 int ret; 994 995 - if (delegation != NULL && !(delegation->flags & NFS_DELEGATION_NEED_RECLAIM)) { 996 - ret = _nfs4_do_access(inode, sp->so_cred, openflags); 997 - if (ret < 0) 998 - return ret; 999 - memcpy(&state->stateid, &delegation->stateid, sizeof(state->stateid)); 1000 - set_bit(NFS_DELEGATED_STATE, &state->flags); 1001 - return 0; 1002 - } 1003 - opendata = nfs4_opendata_alloc(dentry, sp, openflags, NULL); 1004 if (opendata == NULL) 1005 return -ENOMEM; 1006 ret = nfs4_open_recover(opendata, state); 1007 if (ret == -ESTALE) { 1008 /* Invalidate the state owner so we don't ever use it again */ 1009 - nfs4_drop_state_owner(sp); 1010 - d_drop(dentry); 1011 } 1012 - nfs4_opendata_free(opendata); 1013 return ret; 1014 } 1015 1016 - static inline int nfs4_do_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry) 1017 { 1018 - struct nfs_server *server = NFS_SERVER(dentry->d_inode); 1019 struct nfs4_exception exception = { }; 1020 int err; 1021 1022 do { 1023 - err = _nfs4_open_expired(sp, state, dentry); 1024 if (err == -NFS4ERR_DELAY) 1025 nfs4_handle_exception(server, err, &exception); 1026 } while (exception.retry); ··· 1024 ctx = nfs4_state_find_open_context(state); 1025 if (IS_ERR(ctx)) 1026 return PTR_ERR(ctx); 1027 - ret = nfs4_do_open_expired(sp, state, ctx->dentry); 1028 put_nfs_open_context(ctx); 1029 return ret; 1030 } 1031 1032 /* 1033 - * Returns a referenced nfs4_state if there is an open delegation on the file 1034 */ 1035 - static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred, struct nfs4_state **res) 1036 { 1037 - struct nfs_delegation *delegation; 1038 - struct nfs_server *server = NFS_SERVER(inode); 1039 - struct nfs_client *clp = server->nfs_client; 1040 - struct nfs_inode *nfsi = NFS_I(inode); 1041 - struct nfs4_state_owner *sp = NULL; 1042 - struct nfs4_state *state = NULL; 1043 - int open_flags = flags & (FMODE_READ|FMODE_WRITE); 1044 - int err; 1045 1046 - err = -ENOMEM; 1047 - if (!(sp = nfs4_get_state_owner(server, cred))) { 1048 - dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__); 1049 - return err; 1050 - } 1051 - err = nfs4_recover_expired_lease(server); 1052 - if (err != 0) 1053 - goto out_put_state_owner; 1054 - /* Protect against reboot recovery - NOTE ORDER! */ 1055 - down_read(&clp->cl_sem); 1056 - /* Protect against delegation recall */ 1057 - down_read(&nfsi->rwsem); 1058 - delegation = NFS_I(inode)->delegation; 1059 - err = -ENOENT; 1060 - if (delegation == NULL || (delegation->type & open_flags) != open_flags) 1061 - goto out_err; 1062 - err = -ENOMEM; 1063 - state = nfs4_get_open_state(inode, sp); 1064 - if (state == NULL) 1065 - goto out_err; 1066 - 1067 - err = -ENOENT; 1068 - if ((state->state & open_flags) == open_flags) { 1069 - spin_lock(&inode->i_lock); 1070 - update_open_stateflags(state, open_flags); 1071 - spin_unlock(&inode->i_lock); 1072 - goto out_ok; 1073 - } else if (state->state != 0) 1074 - goto out_put_open_state; 1075 - 1076 - lock_kernel(); 1077 - err = _nfs4_do_access(inode, cred, open_flags); 1078 - unlock_kernel(); 1079 - if (err != 0) 1080 - goto out_put_open_state; 1081 - set_bit(NFS_DELEGATED_STATE, &state->flags); 1082 - update_open_stateid(state, &delegation->stateid, open_flags); 1083 - out_ok: 1084 - nfs4_put_state_owner(sp); 1085 - up_read(&nfsi->rwsem); 1086 - up_read(&clp->cl_sem); 1087 - *res = state; 1088 - return 0; 1089 - out_put_open_state: 1090 - nfs4_put_open_state(state); 1091 - out_err: 1092 - up_read(&nfsi->rwsem); 1093 - up_read(&clp->cl_sem); 1094 - if (err != -EACCES) 1095 - nfs_inode_return_delegation(inode); 1096 - out_put_state_owner: 1097 - nfs4_put_state_owner(sp); 1098 - return err; 1099 - } 1100 - 1101 - static struct nfs4_state *nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred) 1102 - { 1103 - struct nfs4_exception exception = { }; 1104 - struct nfs4_state *res = ERR_PTR(-EIO); 1105 - int err; 1106 - 1107 - do { 1108 - err = _nfs4_open_delegated(inode, flags, cred, &res); 1109 - if (err == 0) 1110 - break; 1111 - res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(inode), 1112 - err, &exception)); 1113 - } while (exception.retry); 1114 - return res; 1115 } 1116 1117 /* 1118 * Returns a referenced nfs4_state 1119 */ 1120 - static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) 1121 { 1122 struct nfs4_state_owner *sp; 1123 struct nfs4_state *state = NULL; 1124 struct nfs_server *server = NFS_SERVER(dir); 1125 struct nfs_client *clp = server->nfs_client; 1126 struct nfs4_opendata *opendata; 1127 - int status; 1128 1129 /* Protect against reboot recovery conflicts */ 1130 status = -ENOMEM; ··· 1066 status = nfs4_recover_expired_lease(server); 1067 if (status != 0) 1068 goto err_put_state_owner; 1069 down_read(&clp->cl_sem); 1070 status = -ENOMEM; 1071 - opendata = nfs4_opendata_alloc(dentry, sp, flags, sattr); 1072 if (opendata == NULL) 1073 goto err_release_rwsem; 1074 1075 status = _nfs4_proc_open(opendata); 1076 if (status != 0) 1077 - goto err_opendata_free; 1078 1079 - status = -ENOMEM; 1080 state = nfs4_opendata_to_nfs4_state(opendata); 1081 - if (state == NULL) 1082 - goto err_opendata_free; 1083 - if (opendata->o_res.delegation_type != 0) 1084 - nfs_inode_set_delegation(state->inode, cred, &opendata->o_res); 1085 - nfs4_opendata_free(opendata); 1086 nfs4_put_state_owner(sp); 1087 up_read(&clp->cl_sem); 1088 *res = state; 1089 return 0; 1090 - err_opendata_free: 1091 - nfs4_opendata_free(opendata); 1092 err_release_rwsem: 1093 up_read(&clp->cl_sem); 1094 err_put_state_owner: ··· 1105 } 1106 1107 1108 - static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, struct iattr *sattr, struct rpc_cred *cred) 1109 { 1110 struct nfs4_exception exception = { }; 1111 struct nfs4_state *res; 1112 int status; 1113 1114 do { 1115 - status = _nfs4_do_open(dir, dentry, flags, sattr, cred, &res); 1116 if (status == 0) 1117 break; 1118 /* NOTE: BAD_SEQID means the server and client disagree about the ··· 1127 * the user though... 1128 */ 1129 if (status == -NFS4ERR_BAD_SEQID) { 1130 - printk(KERN_WARNING "NFS: v4 server returned a bad sequence-id error!\n"); 1131 exception.retry = 1; 1132 continue; 1133 } ··· 1140 * on Page 181 of RFC3530. 1141 */ 1142 if (status == -NFS4ERR_BAD_STATEID) { 1143 exception.retry = 1; 1144 continue; 1145 } ··· 1207 } 1208 1209 struct nfs4_closedata { 1210 struct inode *inode; 1211 struct nfs4_state *state; 1212 struct nfs_closeargs arg; ··· 1224 nfs4_put_open_state(calldata->state); 1225 nfs_free_seqid(calldata->arg.seqid); 1226 nfs4_put_state_owner(sp); 1227 kfree(calldata); 1228 } 1229 ··· 1243 nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid); 1244 switch (task->tk_status) { 1245 case 0: 1246 - memcpy(&state->stateid, &calldata->res.stateid, 1247 - sizeof(state->stateid)); 1248 renew_lease(server, calldata->timestamp); 1249 break; 1250 case -NFS4ERR_STALE_STATEID: ··· 1268 .rpc_resp = &calldata->res, 1269 .rpc_cred = state->owner->so_cred, 1270 }; 1271 - int mode = 0, old_mode; 1272 1273 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 1274 return; 1275 - /* Recalculate the new open mode in case someone reopened the file 1276 - * while we were waiting in line to be scheduled. 1277 - */ 1278 spin_lock(&state->owner->so_lock); 1279 - spin_lock(&calldata->inode->i_lock); 1280 - mode = old_mode = state->state; 1281 if (state->n_rdwr == 0) { 1282 - if (state->n_rdonly == 0) 1283 mode &= ~FMODE_READ; 1284 - if (state->n_wronly == 0) 1285 mode &= ~FMODE_WRITE; 1286 } 1287 - nfs4_state_set_mode_locked(state, mode); 1288 - spin_unlock(&calldata->inode->i_lock); 1289 spin_unlock(&state->owner->so_lock); 1290 - if (mode == old_mode || test_bit(NFS_DELEGATED_STATE, &state->flags)) { 1291 /* Note: exit _without_ calling nfs4_close_done */ 1292 task->tk_action = NULL; 1293 return; ··· 1321 * 1322 * NOTE: Caller must be holding the sp->so_owner semaphore! 1323 */ 1324 - int nfs4_do_close(struct inode *inode, struct nfs4_state *state) 1325 { 1326 - struct nfs_server *server = NFS_SERVER(inode); 1327 struct nfs4_closedata *calldata; 1328 int status = -ENOMEM; 1329 1330 calldata = kmalloc(sizeof(*calldata), GFP_KERNEL); 1331 if (calldata == NULL) 1332 goto out; 1333 - calldata->inode = inode; 1334 calldata->state = state; 1335 - calldata->arg.fh = NFS_FH(inode); 1336 - calldata->arg.stateid = &state->stateid; 1337 /* Serialization for the sequence id */ 1338 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid); 1339 if (calldata->arg.seqid == NULL) ··· 1343 calldata->arg.bitmask = server->attr_bitmask; 1344 calldata->res.fattr = &calldata->fattr; 1345 calldata->res.server = server; 1346 1347 - status = nfs4_call_async(server->client, &nfs4_close_ops, calldata); 1348 - if (status == 0) 1349 - goto out; 1350 - 1351 - nfs_free_seqid(calldata->arg.seqid); 1352 out_free_calldata: 1353 kfree(calldata); 1354 out: 1355 return status; 1356 } 1357 1358 - static int nfs4_intent_set_file(struct nameidata *nd, struct dentry *dentry, struct nfs4_state *state) 1359 { 1360 struct file *filp; 1361 1362 - filp = lookup_instantiate_filp(nd, dentry, NULL); 1363 if (!IS_ERR(filp)) { 1364 struct nfs_open_context *ctx; 1365 ctx = (struct nfs_open_context *)filp->private_data; 1366 ctx->state = state; 1367 return 0; 1368 } 1369 - nfs4_close_state(state, nd->intent.open.flags); 1370 - return PTR_ERR(filp); 1371 } 1372 1373 struct dentry * 1374 nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 1375 { 1376 struct iattr attr; 1377 struct rpc_cred *cred; 1378 struct nfs4_state *state; ··· 1410 cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0); 1411 if (IS_ERR(cred)) 1412 return (struct dentry *)cred; 1413 - state = nfs4_do_open(dir, dentry, nd->intent.open.flags, &attr, cred); 1414 put_rpccred(cred); 1415 if (IS_ERR(state)) { 1416 if (PTR_ERR(state) == -ENOENT) ··· 1420 res = d_add_unique(dentry, igrab(state->inode)); 1421 if (res != NULL) 1422 dentry = res; 1423 - nfs4_intent_set_file(nd, dentry, state); 1424 return res; 1425 } 1426 1427 int 1428 nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd) 1429 { 1430 struct rpc_cred *cred; 1431 struct nfs4_state *state; 1432 1433 cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0); 1434 if (IS_ERR(cred)) 1435 return PTR_ERR(cred); 1436 - state = nfs4_open_delegated(dentry->d_inode, openflags, cred); 1437 - if (IS_ERR(state)) 1438 - state = nfs4_do_open(dir, dentry, openflags, NULL, cred); 1439 put_rpccred(cred); 1440 if (IS_ERR(state)) { 1441 switch (PTR_ERR(state)) { ··· 1453 } 1454 } 1455 if (state->inode == dentry->d_inode) { 1456 - nfs4_intent_set_file(nd, dentry, state); 1457 return 1; 1458 } 1459 - nfs4_close_state(state, openflags); 1460 out_drop: 1461 d_drop(dentry); 1462 return 0; ··· 1694 dprintk("NFS call lookupfh %s\n", name->name); 1695 status = rpc_call_sync(server->client, &msg, 0); 1696 dprintk("NFS reply lookupfh: %d\n", status); 1697 - if (status == -NFS4ERR_MOVED) 1698 - status = -EREMOTE; 1699 return status; 1700 } 1701 ··· 1704 struct nfs4_exception exception = { }; 1705 int err; 1706 do { 1707 - err = nfs4_handle_exception(server, 1708 - _nfs4_proc_lookupfh(server, dirfh, name, 1709 - fhandle, fattr), 1710 - &exception); 1711 } while (exception.retry); 1712 return err; 1713 } ··· 1718 static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name, 1719 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 1720 { 1721 - int status; 1722 - struct nfs_server *server = NFS_SERVER(dir); 1723 - struct nfs4_lookup_arg args = { 1724 - .bitmask = server->attr_bitmask, 1725 - .dir_fh = NFS_FH(dir), 1726 - .name = name, 1727 - }; 1728 - struct nfs4_lookup_res res = { 1729 - .server = server, 1730 - .fattr = fattr, 1731 - .fh = fhandle, 1732 - }; 1733 - struct rpc_message msg = { 1734 - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 1735 - .rpc_argp = &args, 1736 - .rpc_resp = &res, 1737 - }; 1738 - 1739 - nfs_fattr_init(fattr); 1740 1741 dprintk("NFS call lookup %s\n", name->name); 1742 - status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); 1743 if (status == -NFS4ERR_MOVED) 1744 status = nfs4_get_referral(dir, name, fattr, fhandle); 1745 dprintk("NFS reply lookup: %d\n", status); ··· 1870 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 1871 int flags, struct nameidata *nd) 1872 { 1873 struct nfs4_state *state; 1874 struct rpc_cred *cred; 1875 int status = 0; ··· 1883 status = PTR_ERR(cred); 1884 goto out; 1885 } 1886 - state = nfs4_do_open(dir, dentry, flags, sattr, cred); 1887 put_rpccred(cred); 1888 if (IS_ERR(state)) { 1889 status = PTR_ERR(state); ··· 1895 status = nfs4_do_setattr(state->inode, &fattr, sattr, state); 1896 if (status == 0) 1897 nfs_setattr_update_inode(state->inode, sattr); 1898 } 1899 - if (status == 0 && nd != NULL && (nd->flags & LOOKUP_OPEN)) 1900 - status = nfs4_intent_set_file(nd, dentry, state); 1901 else 1902 - nfs4_close_state(state, flags); 1903 out: 1904 return status; 1905 } ··· 3131 if (status != 0) 3132 goto out; 3133 lsp = request->fl_u.nfs4_fl.owner; 3134 - arg.lock_owner.id = lsp->ls_id; 3135 status = rpc_call_sync(server->client, &msg, 0); 3136 switch (status) { 3137 case 0: ··· 3275 { 3276 struct nfs4_unlockdata *data; 3277 3278 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 3279 if (data == NULL) { 3280 nfs_free_seqid(seqid); ··· 3350 goto out_free; 3351 p->arg.lock_stateid = &lsp->ls_stateid; 3352 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 3353 - p->arg.lock_owner.id = lsp->ls_id; 3354 p->lsp = lsp; 3355 atomic_inc(&lsp->ls_count); 3356 p->ctx = get_nfs_open_context(ctx); ··· 3413 memcpy(data->lsp->ls_stateid.data, data->res.stateid.data, 3414 sizeof(data->lsp->ls_stateid.data)); 3415 data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; 3416 - renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); 3417 } 3418 nfs_increment_lock_seqid(data->rpc_status, data->arg.lock_seqid); 3419 out:
··· 65 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry); 66 static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception); 67 static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp); 68 + static int _nfs4_do_access(struct inode *inode, struct rpc_cred *cred, int openflags); 69 70 /* Prevent leaks of NFSv4 errors into userland */ 71 int nfs4_map_errors(int err) ··· 214 } 215 216 struct nfs4_opendata { 217 + struct kref kref; 218 struct nfs_openargs o_arg; 219 struct nfs_openres o_res; 220 struct nfs_open_confirmargs c_arg; 221 struct nfs_open_confirmres c_res; 222 struct nfs_fattr f_attr; 223 struct nfs_fattr dir_attr; 224 + struct path path; 225 struct dentry *dir; 226 struct nfs4_state_owner *owner; 227 + struct nfs4_state *state; 228 struct iattr attrs; 229 unsigned long timestamp; 230 + unsigned int rpc_done : 1; 231 int rpc_status; 232 int cancelled; 233 }; 234 235 + 236 + static void nfs4_init_opendata_res(struct nfs4_opendata *p) 237 + { 238 + p->o_res.f_attr = &p->f_attr; 239 + p->o_res.dir_attr = &p->dir_attr; 240 + p->o_res.server = p->o_arg.server; 241 + nfs_fattr_init(&p->f_attr); 242 + nfs_fattr_init(&p->dir_attr); 243 + } 244 + 245 + static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, 246 struct nfs4_state_owner *sp, int flags, 247 const struct iattr *attrs) 248 { 249 + struct dentry *parent = dget_parent(path->dentry); 250 struct inode *dir = parent->d_inode; 251 struct nfs_server *server = NFS_SERVER(dir); 252 struct nfs4_opendata *p; ··· 245 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); 246 if (p->o_arg.seqid == NULL) 247 goto err_free; 248 + p->path.mnt = mntget(path->mnt); 249 + p->path.dentry = dget(path->dentry); 250 p->dir = parent; 251 p->owner = sp; 252 atomic_inc(&sp->so_count); 253 p->o_arg.fh = NFS_FH(dir); 254 p->o_arg.open_flags = flags, 255 p->o_arg.clientid = server->nfs_client->cl_clientid; 256 + p->o_arg.id = sp->so_owner_id.id; 257 + p->o_arg.name = &p->path.dentry->d_name; 258 p->o_arg.server = server; 259 p->o_arg.bitmask = server->attr_bitmask; 260 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 261 if (flags & O_EXCL) { 262 u32 *s = (u32 *) p->o_arg.u.verifier.data; 263 s[0] = jiffies; ··· 274 p->c_arg.fh = &p->o_res.fh; 275 p->c_arg.stateid = &p->o_res.stateid; 276 p->c_arg.seqid = p->o_arg.seqid; 277 + nfs4_init_opendata_res(p); 278 + kref_init(&p->kref); 279 return p; 280 err_free: 281 kfree(p); ··· 282 return NULL; 283 } 284 285 + static void nfs4_opendata_free(struct kref *kref) 286 { 287 + struct nfs4_opendata *p = container_of(kref, 288 + struct nfs4_opendata, kref); 289 + 290 + nfs_free_seqid(p->o_arg.seqid); 291 + if (p->state != NULL) 292 + nfs4_put_open_state(p->state); 293 + nfs4_put_state_owner(p->owner); 294 + dput(p->dir); 295 + dput(p->path.dentry); 296 + mntput(p->path.mnt); 297 + kfree(p); 298 } 299 300 + static void nfs4_opendata_put(struct nfs4_opendata *p) 301 { 302 + if (p != NULL) 303 + kref_put(&p->kref, nfs4_opendata_free); 304 } 305 306 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) ··· 316 return ret; 317 } 318 319 + static int can_open_cached(struct nfs4_state *state, int mode) 320 + { 321 + int ret = 0; 322 + switch (mode & (FMODE_READ|FMODE_WRITE|O_EXCL)) { 323 + case FMODE_READ: 324 + ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0; 325 + ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; 326 + break; 327 + case FMODE_WRITE: 328 + ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0; 329 + ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; 330 + break; 331 + case FMODE_READ|FMODE_WRITE: 332 + ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; 333 + } 334 + return ret; 335 + } 336 + 337 + static int can_open_delegated(struct nfs_delegation *delegation, mode_t open_flags) 338 + { 339 + if ((delegation->type & open_flags) != open_flags) 340 + return 0; 341 + if (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) 342 + return 0; 343 + return 1; 344 + } 345 + 346 + static void update_open_stateflags(struct nfs4_state *state, mode_t open_flags) 347 { 348 switch (open_flags) { 349 case FMODE_WRITE: ··· 328 case FMODE_READ|FMODE_WRITE: 329 state->n_rdwr++; 330 } 331 + nfs4_state_set_mode_locked(state, state->state | open_flags); 332 } 333 334 + static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) 335 { 336 + if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 337 + memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data)); 338 + memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data)); 339 + switch (open_flags) { 340 + case FMODE_READ: 341 + set_bit(NFS_O_RDONLY_STATE, &state->flags); 342 + break; 343 + case FMODE_WRITE: 344 + set_bit(NFS_O_WRONLY_STATE, &state->flags); 345 + break; 346 + case FMODE_READ|FMODE_WRITE: 347 + set_bit(NFS_O_RDWR_STATE, &state->flags); 348 + } 349 + } 350 351 + static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) 352 + { 353 + write_seqlock(&state->seqlock); 354 + nfs_set_open_stateid_locked(state, stateid, open_flags); 355 + write_sequnlock(&state->seqlock); 356 + } 357 + 358 + static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *deleg_stateid, int open_flags) 359 + { 360 open_flags &= (FMODE_READ|FMODE_WRITE); 361 + /* 362 + * Protect the call to nfs4_state_set_mode_locked and 363 + * serialise the stateid update 364 + */ 365 + write_seqlock(&state->seqlock); 366 + if (deleg_stateid != NULL) { 367 + memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data)); 368 + set_bit(NFS_DELEGATED_STATE, &state->flags); 369 + } 370 + if (open_stateid != NULL) 371 + nfs_set_open_stateid_locked(state, open_stateid, open_flags); 372 + write_sequnlock(&state->seqlock); 373 spin_lock(&state->owner->so_lock); 374 update_open_stateflags(state, open_flags); 375 spin_unlock(&state->owner->so_lock); 376 + } 377 + 378 + static void nfs4_return_incompatible_delegation(struct inode *inode, mode_t open_flags) 379 + { 380 + struct nfs_delegation *delegation; 381 + 382 + rcu_read_lock(); 383 + delegation = rcu_dereference(NFS_I(inode)->delegation); 384 + if (delegation == NULL || (delegation->type & open_flags) == open_flags) { 385 + rcu_read_unlock(); 386 + return; 387 + } 388 + rcu_read_unlock(); 389 + nfs_inode_return_delegation(inode); 390 + } 391 + 392 + static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 393 + { 394 + struct nfs4_state *state = opendata->state; 395 + struct nfs_inode *nfsi = NFS_I(state->inode); 396 + struct nfs_delegation *delegation; 397 + int open_mode = opendata->o_arg.open_flags & (FMODE_READ|FMODE_WRITE|O_EXCL); 398 + nfs4_stateid stateid; 399 + int ret = -EAGAIN; 400 + 401 + rcu_read_lock(); 402 + delegation = rcu_dereference(nfsi->delegation); 403 + for (;;) { 404 + if (can_open_cached(state, open_mode)) { 405 + spin_lock(&state->owner->so_lock); 406 + if (can_open_cached(state, open_mode)) { 407 + update_open_stateflags(state, open_mode); 408 + spin_unlock(&state->owner->so_lock); 409 + rcu_read_unlock(); 410 + goto out_return_state; 411 + } 412 + spin_unlock(&state->owner->so_lock); 413 + } 414 + if (delegation == NULL) 415 + break; 416 + if (!can_open_delegated(delegation, open_mode)) 417 + break; 418 + /* Save the delegation */ 419 + memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data)); 420 + rcu_read_unlock(); 421 + lock_kernel(); 422 + ret = _nfs4_do_access(state->inode, state->owner->so_cred, open_mode); 423 + unlock_kernel(); 424 + if (ret != 0) 425 + goto out; 426 + ret = -EAGAIN; 427 + rcu_read_lock(); 428 + delegation = rcu_dereference(nfsi->delegation); 429 + /* If no delegation, try a cached open */ 430 + if (delegation == NULL) 431 + continue; 432 + /* Is the delegation still valid? */ 433 + if (memcmp(stateid.data, delegation->stateid.data, sizeof(stateid.data)) != 0) 434 + continue; 435 + rcu_read_unlock(); 436 + update_open_stateid(state, NULL, &stateid, open_mode); 437 + goto out_return_state; 438 + } 439 + rcu_read_unlock(); 440 + out: 441 + return ERR_PTR(ret); 442 + out_return_state: 443 + atomic_inc(&state->count); 444 + return state; 445 } 446 447 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 448 { 449 struct inode *inode; 450 struct nfs4_state *state = NULL; 451 + struct nfs_delegation *delegation; 452 + nfs4_stateid *deleg_stateid = NULL; 453 + int ret; 454 455 + if (!data->rpc_done) { 456 + state = nfs4_try_open_cached(data); 457 + goto out; 458 + } 459 + 460 + ret = -EAGAIN; 461 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 462 + goto err; 463 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); 464 + ret = PTR_ERR(inode); 465 if (IS_ERR(inode)) 466 + goto err; 467 + ret = -ENOMEM; 468 state = nfs4_get_open_state(inode, data->owner); 469 if (state == NULL) 470 + goto err_put_inode; 471 + if (data->o_res.delegation_type != 0) { 472 + int delegation_flags = 0; 473 + 474 + rcu_read_lock(); 475 + delegation = rcu_dereference(NFS_I(inode)->delegation); 476 + if (delegation) 477 + delegation_flags = delegation->flags; 478 + rcu_read_unlock(); 479 + if (!(delegation_flags & NFS_DELEGATION_NEED_RECLAIM)) 480 + nfs_inode_set_delegation(state->inode, 481 + data->owner->so_cred, 482 + &data->o_res); 483 + else 484 + nfs_inode_reclaim_delegation(state->inode, 485 + data->owner->so_cred, 486 + &data->o_res); 487 + } 488 + rcu_read_lock(); 489 + delegation = rcu_dereference(NFS_I(inode)->delegation); 490 + if (delegation != NULL) 491 + deleg_stateid = &delegation->stateid; 492 + update_open_stateid(state, &data->o_res.stateid, deleg_stateid, data->o_arg.open_flags); 493 + rcu_read_unlock(); 494 iput(inode); 495 out: 496 return state; 497 + err_put_inode: 498 + iput(inode); 499 + err: 500 + return ERR_PTR(ret); 501 } 502 503 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) ··· 382 return ERR_PTR(-ENOENT); 383 } 384 385 + static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, mode_t openflags, struct nfs4_state **res) 386 { 387 + struct nfs4_state *newstate; 388 int ret; 389 390 opendata->o_arg.open_flags = openflags; 391 + memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 392 + memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 393 + nfs4_init_opendata_res(opendata); 394 ret = _nfs4_proc_open(opendata); 395 if (ret != 0) 396 return ret; 397 + newstate = nfs4_opendata_to_nfs4_state(opendata); 398 + if (IS_ERR(newstate)) 399 + return PTR_ERR(newstate); 400 + nfs4_close_state(&opendata->path, newstate, openflags); 401 + *res = newstate; 402 return 0; 403 } 404 405 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 406 { 407 struct nfs4_state *newstate; 408 int ret; 409 410 /* memory barrier prior to reading state->n_* */ 411 + clear_bit(NFS_DELEGATED_STATE, &state->flags); 412 smp_rmb(); 413 if (state->n_rdwr != 0) { 414 + ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 415 if (ret != 0) 416 return ret; 417 + if (newstate != state) 418 + return -ESTALE; 419 } 420 if (state->n_wronly != 0) { 421 + ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 422 if (ret != 0) 423 return ret; 424 + if (newstate != state) 425 + return -ESTALE; 426 } 427 if (state->n_rdonly != 0) { 428 + ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); 429 if (ret != 0) 430 return ret; 431 + if (newstate != state) 432 + return -ESTALE; 433 } 434 + /* 435 + * We may have performed cached opens for all three recoveries. 436 + * Check if we need to update the current stateid. 437 + */ 438 + if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 439 + memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) { 440 + write_seqlock(&state->seqlock); 441 + if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 442 + memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)); 443 + write_sequnlock(&state->seqlock); 444 } 445 return 0; 446 } 447 ··· 462 * OPEN_RECLAIM: 463 * reclaim state on the server after a reboot. 464 */ 465 + static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 466 { 467 + struct nfs_delegation *delegation; 468 struct nfs4_opendata *opendata; 469 int delegation_type = 0; 470 int status; 471 472 + opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, NULL); 473 if (opendata == NULL) 474 return -ENOMEM; 475 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; 476 opendata->o_arg.fh = NFS_FH(state->inode); 477 nfs_copy_fh(&opendata->o_res.fh, opendata->o_arg.fh); 478 + rcu_read_lock(); 479 + delegation = rcu_dereference(NFS_I(state->inode)->delegation); 480 + if (delegation != NULL && (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) != 0) 481 + delegation_type = delegation->flags; 482 + rcu_read_unlock(); 483 opendata->o_arg.u.delegation_type = delegation_type; 484 status = nfs4_open_recover(opendata, state); 485 + nfs4_opendata_put(opendata); 486 return status; 487 } 488 489 + static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 490 { 491 struct nfs_server *server = NFS_SERVER(state->inode); 492 struct nfs4_exception exception = { }; 493 int err; 494 do { 495 + err = _nfs4_do_open_reclaim(ctx, state); 496 if (err != -NFS4ERR_DELAY) 497 break; 498 nfs4_handle_exception(server, err, &exception); ··· 512 ctx = nfs4_state_find_open_context(state); 513 if (IS_ERR(ctx)) 514 return PTR_ERR(ctx); 515 + ret = nfs4_do_open_reclaim(ctx, state); 516 put_nfs_open_context(ctx); 517 return ret; 518 } 519 520 + static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 521 { 522 struct nfs4_state_owner *sp = state->owner; 523 struct nfs4_opendata *opendata; 524 int ret; 525 526 + opendata = nfs4_opendata_alloc(&ctx->path, sp, 0, NULL); 527 if (opendata == NULL) 528 return -ENOMEM; 529 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; 530 + memcpy(opendata->o_arg.u.delegation.data, stateid->data, 531 sizeof(opendata->o_arg.u.delegation.data)); 532 ret = nfs4_open_recover(opendata, state); 533 + nfs4_opendata_put(opendata); 534 return ret; 535 } 536 537 + int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 538 { 539 struct nfs4_exception exception = { }; 540 + struct nfs_server *server = NFS_SERVER(state->inode); 541 int err; 542 do { 543 + err = _nfs4_open_delegation_recall(ctx, state, stateid); 544 switch (err) { 545 case 0: 546 return err; ··· 582 memcpy(data->o_res.stateid.data, data->c_res.stateid.data, 583 sizeof(data->o_res.stateid.data)); 584 renew_lease(data->o_res.server, data->timestamp); 585 + data->rpc_done = 1; 586 } 587 nfs_confirm_seqid(&data->owner->so_seqid, data->rpc_status); 588 + nfs_increment_open_seqid(data->rpc_status, data->c_arg.seqid); 589 } 590 591 static void nfs4_open_confirm_release(void *calldata) ··· 596 if (data->cancelled == 0) 597 goto out_free; 598 /* In case of error, no cleanup! */ 599 + if (!data->rpc_done) 600 goto out_free; 601 nfs_confirm_seqid(&data->owner->so_seqid, 0); 602 state = nfs4_opendata_to_nfs4_state(data); 603 + if (!IS_ERR(state)) 604 + nfs4_close_state(&data->path, state, data->o_arg.open_flags); 605 out_free: 606 + nfs4_opendata_put(data); 607 } 608 609 static const struct rpc_call_ops nfs4_open_confirm_ops = { ··· 621 struct rpc_task *task; 622 int status; 623 624 + kref_get(&data->kref); 625 + data->rpc_done = 0; 626 + data->rpc_status = 0; 627 task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_confirm_ops, data); 628 if (IS_ERR(task)) 629 return PTR_ERR(task); ··· 653 654 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 655 return; 656 + /* 657 + * Check if we still need to send an OPEN call, or if we can use 658 + * a delegation instead. 659 + */ 660 + if (data->state != NULL) { 661 + struct nfs_delegation *delegation; 662 + 663 + if (can_open_cached(data->state, data->o_arg.open_flags & (FMODE_READ|FMODE_WRITE|O_EXCL))) 664 + goto out_no_action; 665 + rcu_read_lock(); 666 + delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 667 + if (delegation != NULL && 668 + (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) { 669 + rcu_read_unlock(); 670 + goto out_no_action; 671 + } 672 + rcu_read_unlock(); 673 + } 674 /* Update sequence id. */ 675 + data->o_arg.id = sp->so_owner_id.id; 676 data->o_arg.clientid = sp->so_client->cl_clientid; 677 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 678 msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 679 data->timestamp = jiffies; 680 rpc_call_setup(task, &msg, 0); 681 + return; 682 + out_no_action: 683 + task->tk_action = NULL; 684 + 685 } 686 687 static void nfs4_open_done(struct rpc_task *task, void *calldata) ··· 683 data->rpc_status = -ENOTDIR; 684 } 685 renew_lease(data->o_res.server, data->timestamp); 686 + if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 687 + nfs_confirm_seqid(&data->owner->so_seqid, 0); 688 } 689 nfs_increment_open_seqid(data->rpc_status, data->o_arg.seqid); 690 + data->rpc_done = 1; 691 } 692 693 static void nfs4_open_release(void *calldata) ··· 696 if (data->cancelled == 0) 697 goto out_free; 698 /* In case of error, no cleanup! */ 699 + if (data->rpc_status != 0 || !data->rpc_done) 700 goto out_free; 701 /* In case we need an open_confirm, no cleanup! */ 702 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 703 goto out_free; 704 nfs_confirm_seqid(&data->owner->so_seqid, 0); 705 state = nfs4_opendata_to_nfs4_state(data); 706 + if (!IS_ERR(state)) 707 + nfs4_close_state(&data->path, state, data->o_arg.open_flags); 708 out_free: 709 + nfs4_opendata_put(data); 710 } 711 712 static const struct rpc_call_ops nfs4_open_ops = { ··· 727 struct rpc_task *task; 728 int status; 729 730 + kref_get(&data->kref); 731 + data->rpc_done = 0; 732 + data->rpc_status = 0; 733 + data->cancelled = 0; 734 task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_ops, data); 735 if (IS_ERR(task)) 736 return PTR_ERR(task); ··· 743 } else 744 status = data->rpc_status; 745 rpc_put_task(task); 746 + if (status != 0 || !data->rpc_done) 747 return status; 748 749 if (o_arg->open_flags & O_CREAT) { ··· 756 if (status != 0) 757 return status; 758 } 759 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 760 return server->nfs_client->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr); 761 return 0; ··· 772 mask |= MAY_READ; 773 if (openflags & FMODE_WRITE) 774 mask |= MAY_WRITE; 775 + if (openflags & FMODE_EXEC) 776 + mask |= MAY_EXEC; 777 status = nfs_access_get_cached(inode, cred, &cache); 778 if (status == 0) 779 goto out; ··· 811 * reclaim state on the server after a network partition. 812 * Assumes caller holds the appropriate lock 813 */ 814 + static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 815 { 816 struct nfs4_opendata *opendata; 817 int ret; 818 819 + opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, NULL); 820 if (opendata == NULL) 821 return -ENOMEM; 822 ret = nfs4_open_recover(opendata, state); 823 if (ret == -ESTALE) { 824 /* Invalidate the state owner so we don't ever use it again */ 825 + nfs4_drop_state_owner(state->owner); 826 + d_drop(ctx->path.dentry); 827 } 828 + nfs4_opendata_put(opendata); 829 return ret; 830 } 831 832 + static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 833 { 834 + struct nfs_server *server = NFS_SERVER(state->inode); 835 struct nfs4_exception exception = { }; 836 int err; 837 838 do { 839 + err = _nfs4_open_expired(ctx, state); 840 if (err == -NFS4ERR_DELAY) 841 nfs4_handle_exception(server, err, &exception); 842 } while (exception.retry); ··· 862 ctx = nfs4_state_find_open_context(state); 863 if (IS_ERR(ctx)) 864 return PTR_ERR(ctx); 865 + ret = nfs4_do_open_expired(ctx, state); 866 put_nfs_open_context(ctx); 867 return ret; 868 } 869 870 /* 871 + * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 872 + * fields corresponding to attributes that were used to store the verifier. 873 + * Make sure we clobber those fields in the later setattr call 874 */ 875 + static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) 876 { 877 + if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 878 + !(sattr->ia_valid & ATTR_ATIME_SET)) 879 + sattr->ia_valid |= ATTR_ATIME; 880 881 + if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 882 + !(sattr->ia_valid & ATTR_MTIME_SET)) 883 + sattr->ia_valid |= ATTR_MTIME; 884 } 885 886 /* 887 * Returns a referenced nfs4_state 888 */ 889 + static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) 890 { 891 struct nfs4_state_owner *sp; 892 struct nfs4_state *state = NULL; 893 struct nfs_server *server = NFS_SERVER(dir); 894 struct nfs_client *clp = server->nfs_client; 895 struct nfs4_opendata *opendata; 896 + int status; 897 898 /* Protect against reboot recovery conflicts */ 899 status = -ENOMEM; ··· 973 status = nfs4_recover_expired_lease(server); 974 if (status != 0) 975 goto err_put_state_owner; 976 + if (path->dentry->d_inode != NULL) 977 + nfs4_return_incompatible_delegation(path->dentry->d_inode, flags & (FMODE_READ|FMODE_WRITE)); 978 down_read(&clp->cl_sem); 979 status = -ENOMEM; 980 + opendata = nfs4_opendata_alloc(path, sp, flags, sattr); 981 if (opendata == NULL) 982 goto err_release_rwsem; 983 984 + if (path->dentry->d_inode != NULL) 985 + opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp); 986 + 987 status = _nfs4_proc_open(opendata); 988 if (status != 0) 989 + goto err_opendata_put; 990 991 + if (opendata->o_arg.open_flags & O_EXCL) 992 + nfs4_exclusive_attrset(opendata, sattr); 993 + 994 state = nfs4_opendata_to_nfs4_state(opendata); 995 + status = PTR_ERR(state); 996 + if (IS_ERR(state)) 997 + goto err_opendata_put; 998 + nfs4_opendata_put(opendata); 999 nfs4_put_state_owner(sp); 1000 up_read(&clp->cl_sem); 1001 *res = state; 1002 return 0; 1003 + err_opendata_put: 1004 + nfs4_opendata_put(opendata); 1005 err_release_rwsem: 1006 up_read(&clp->cl_sem); 1007 err_put_state_owner: ··· 1006 } 1007 1008 1009 + static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, int flags, struct iattr *sattr, struct rpc_cred *cred) 1010 { 1011 struct nfs4_exception exception = { }; 1012 struct nfs4_state *res; 1013 int status; 1014 1015 do { 1016 + status = _nfs4_do_open(dir, path, flags, sattr, cred, &res); 1017 if (status == 0) 1018 break; 1019 /* NOTE: BAD_SEQID means the server and client disagree about the ··· 1028 * the user though... 1029 */ 1030 if (status == -NFS4ERR_BAD_SEQID) { 1031 + printk(KERN_WARNING "NFS: v4 server %s " 1032 + " returned a bad sequence-id error!\n", 1033 + NFS_SERVER(dir)->nfs_client->cl_hostname); 1034 exception.retry = 1; 1035 continue; 1036 } ··· 1039 * on Page 181 of RFC3530. 1040 */ 1041 if (status == -NFS4ERR_BAD_STATEID) { 1042 + exception.retry = 1; 1043 + continue; 1044 + } 1045 + if (status == -EAGAIN) { 1046 + /* We must have found a delegation */ 1047 exception.retry = 1; 1048 continue; 1049 } ··· 1101 } 1102 1103 struct nfs4_closedata { 1104 + struct path path; 1105 struct inode *inode; 1106 struct nfs4_state *state; 1107 struct nfs_closeargs arg; ··· 1117 nfs4_put_open_state(calldata->state); 1118 nfs_free_seqid(calldata->arg.seqid); 1119 nfs4_put_state_owner(sp); 1120 + dput(calldata->path.dentry); 1121 + mntput(calldata->path.mnt); 1122 kfree(calldata); 1123 } 1124 ··· 1134 nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid); 1135 switch (task->tk_status) { 1136 case 0: 1137 + nfs_set_open_stateid(state, &calldata->res.stateid, calldata->arg.open_flags); 1138 renew_lease(server, calldata->timestamp); 1139 break; 1140 case -NFS4ERR_STALE_STATEID: ··· 1160 .rpc_resp = &calldata->res, 1161 .rpc_cred = state->owner->so_cred, 1162 }; 1163 + int clear_rd, clear_wr, clear_rdwr; 1164 + int mode; 1165 1166 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 1167 return; 1168 + 1169 + mode = FMODE_READ|FMODE_WRITE; 1170 + clear_rd = clear_wr = clear_rdwr = 0; 1171 spin_lock(&state->owner->so_lock); 1172 + /* Calculate the change in open mode */ 1173 if (state->n_rdwr == 0) { 1174 + if (state->n_rdonly == 0) { 1175 mode &= ~FMODE_READ; 1176 + clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1177 + clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags); 1178 + } 1179 + if (state->n_wronly == 0) { 1180 mode &= ~FMODE_WRITE; 1181 + clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1182 + clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags); 1183 + } 1184 } 1185 spin_unlock(&state->owner->so_lock); 1186 + if (!clear_rd && !clear_wr && !clear_rdwr) { 1187 /* Note: exit _without_ calling nfs4_close_done */ 1188 task->tk_action = NULL; 1189 return; ··· 1209 * 1210 * NOTE: Caller must be holding the sp->so_owner semaphore! 1211 */ 1212 + int nfs4_do_close(struct path *path, struct nfs4_state *state) 1213 { 1214 + struct nfs_server *server = NFS_SERVER(state->inode); 1215 struct nfs4_closedata *calldata; 1216 + struct nfs4_state_owner *sp = state->owner; 1217 + struct rpc_task *task; 1218 int status = -ENOMEM; 1219 1220 calldata = kmalloc(sizeof(*calldata), GFP_KERNEL); 1221 if (calldata == NULL) 1222 goto out; 1223 + calldata->inode = state->inode; 1224 calldata->state = state; 1225 + calldata->arg.fh = NFS_FH(state->inode); 1226 + calldata->arg.stateid = &state->open_stateid; 1227 /* Serialization for the sequence id */ 1228 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid); 1229 if (calldata->arg.seqid == NULL) ··· 1229 calldata->arg.bitmask = server->attr_bitmask; 1230 calldata->res.fattr = &calldata->fattr; 1231 calldata->res.server = server; 1232 + calldata->path.mnt = mntget(path->mnt); 1233 + calldata->path.dentry = dget(path->dentry); 1234 1235 + task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_close_ops, calldata); 1236 + if (IS_ERR(task)) 1237 + return PTR_ERR(task); 1238 + rpc_put_task(task); 1239 + return 0; 1240 out_free_calldata: 1241 kfree(calldata); 1242 out: 1243 + nfs4_put_open_state(state); 1244 + nfs4_put_state_owner(sp); 1245 return status; 1246 } 1247 1248 + static int nfs4_intent_set_file(struct nameidata *nd, struct path *path, struct nfs4_state *state) 1249 { 1250 struct file *filp; 1251 + int ret; 1252 1253 + /* If the open_intent is for execute, we have an extra check to make */ 1254 + if (nd->intent.open.flags & FMODE_EXEC) { 1255 + ret = _nfs4_do_access(state->inode, 1256 + state->owner->so_cred, 1257 + nd->intent.open.flags); 1258 + if (ret < 0) 1259 + goto out_close; 1260 + } 1261 + filp = lookup_instantiate_filp(nd, path->dentry, NULL); 1262 if (!IS_ERR(filp)) { 1263 struct nfs_open_context *ctx; 1264 ctx = (struct nfs_open_context *)filp->private_data; 1265 ctx->state = state; 1266 return 0; 1267 } 1268 + ret = PTR_ERR(filp); 1269 + out_close: 1270 + nfs4_close_state(path, state, nd->intent.open.flags); 1271 + return ret; 1272 } 1273 1274 struct dentry * 1275 nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 1276 { 1277 + struct path path = { 1278 + .mnt = nd->mnt, 1279 + .dentry = dentry, 1280 + }; 1281 struct iattr attr; 1282 struct rpc_cred *cred; 1283 struct nfs4_state *state; ··· 1277 cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0); 1278 if (IS_ERR(cred)) 1279 return (struct dentry *)cred; 1280 + state = nfs4_do_open(dir, &path, nd->intent.open.flags, &attr, cred); 1281 put_rpccred(cred); 1282 if (IS_ERR(state)) { 1283 if (PTR_ERR(state) == -ENOENT) ··· 1287 res = d_add_unique(dentry, igrab(state->inode)); 1288 if (res != NULL) 1289 dentry = res; 1290 + nfs4_intent_set_file(nd, &path, state); 1291 return res; 1292 } 1293 1294 int 1295 nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd) 1296 { 1297 + struct path path = { 1298 + .mnt = nd->mnt, 1299 + .dentry = dentry, 1300 + }; 1301 struct rpc_cred *cred; 1302 struct nfs4_state *state; 1303 1304 cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0); 1305 if (IS_ERR(cred)) 1306 return PTR_ERR(cred); 1307 + state = nfs4_do_open(dir, &path, openflags, NULL, cred); 1308 put_rpccred(cred); 1309 if (IS_ERR(state)) { 1310 switch (PTR_ERR(state)) { ··· 1318 } 1319 } 1320 if (state->inode == dentry->d_inode) { 1321 + nfs4_intent_set_file(nd, &path, state); 1322 return 1; 1323 } 1324 + nfs4_close_state(&path, state, openflags); 1325 out_drop: 1326 d_drop(dentry); 1327 return 0; ··· 1559 dprintk("NFS call lookupfh %s\n", name->name); 1560 status = rpc_call_sync(server->client, &msg, 0); 1561 dprintk("NFS reply lookupfh: %d\n", status); 1562 return status; 1563 } 1564 ··· 1571 struct nfs4_exception exception = { }; 1572 int err; 1573 do { 1574 + err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr); 1575 + /* FIXME: !!!! */ 1576 + if (err == -NFS4ERR_MOVED) { 1577 + err = -EREMOTE; 1578 + break; 1579 + } 1580 + err = nfs4_handle_exception(server, err, &exception); 1581 } while (exception.retry); 1582 return err; 1583 } ··· 1582 static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name, 1583 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 1584 { 1585 + int status; 1586 1587 dprintk("NFS call lookup %s\n", name->name); 1588 + status = _nfs4_proc_lookupfh(NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr); 1589 if (status == -NFS4ERR_MOVED) 1590 status = nfs4_get_referral(dir, name, fattr, fhandle); 1591 dprintk("NFS reply lookup: %d\n", status); ··· 1752 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 1753 int flags, struct nameidata *nd) 1754 { 1755 + struct path path = { 1756 + .mnt = nd->mnt, 1757 + .dentry = dentry, 1758 + }; 1759 struct nfs4_state *state; 1760 struct rpc_cred *cred; 1761 int status = 0; ··· 1761 status = PTR_ERR(cred); 1762 goto out; 1763 } 1764 + state = nfs4_do_open(dir, &path, flags, sattr, cred); 1765 put_rpccred(cred); 1766 if (IS_ERR(state)) { 1767 status = PTR_ERR(state); ··· 1773 status = nfs4_do_setattr(state->inode, &fattr, sattr, state); 1774 if (status == 0) 1775 nfs_setattr_update_inode(state->inode, sattr); 1776 + nfs_post_op_update_inode(state->inode, &fattr); 1777 } 1778 + if (status == 0 && (nd->flags & LOOKUP_OPEN) != 0) 1779 + status = nfs4_intent_set_file(nd, &path, state); 1780 else 1781 + nfs4_close_state(&path, state, flags); 1782 out: 1783 return status; 1784 } ··· 3008 if (status != 0) 3009 goto out; 3010 lsp = request->fl_u.nfs4_fl.owner; 3011 + arg.lock_owner.id = lsp->ls_id.id; 3012 status = rpc_call_sync(server->client, &msg, 0); 3013 switch (status) { 3014 case 0: ··· 3152 { 3153 struct nfs4_unlockdata *data; 3154 3155 + /* Ensure this is an unlock - when canceling a lock, the 3156 + * canceled lock is passed in, and it won't be an unlock. 3157 + */ 3158 + fl->fl_type = F_UNLCK; 3159 + 3160 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 3161 if (data == NULL) { 3162 nfs_free_seqid(seqid); ··· 3222 goto out_free; 3223 p->arg.lock_stateid = &lsp->ls_stateid; 3224 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 3225 + p->arg.lock_owner.id = lsp->ls_id.id; 3226 p->lsp = lsp; 3227 atomic_inc(&lsp->ls_count); 3228 p->ctx = get_nfs_open_context(ctx); ··· 3285 memcpy(data->lsp->ls_stateid.data, data->res.stateid.data, 3286 sizeof(data->lsp->ls_stateid.data)); 3287 data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; 3288 + renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp); 3289 } 3290 nfs_increment_lock_seqid(data->rpc_status, data->arg.lock_seqid); 3291 out:
+216 -96
fs/nfs/nfs4state.c
··· 38 * subsequent patch. 39 */ 40 41 #include <linux/slab.h> 42 #include <linux/smp_lock.h> 43 #include <linux/nfs_fs.h> 44 #include <linux/nfs_idmap.h> 45 #include <linux/kthread.h> 46 #include <linux/module.h> 47 #include <linux/workqueue.h> 48 #include <linux/bitops.h> 49 ··· 71 return status; 72 } 73 74 - u32 75 - nfs4_alloc_lockowner_id(struct nfs_client *clp) 76 - { 77 - return clp->cl_lockowner_id ++; 78 - } 79 - 80 - static struct nfs4_state_owner * 81 - nfs4_client_grab_unused(struct nfs_client *clp, struct rpc_cred *cred) 82 - { 83 - struct nfs4_state_owner *sp = NULL; 84 - 85 - if (!list_empty(&clp->cl_unused)) { 86 - sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list); 87 - atomic_inc(&sp->so_count); 88 - sp->so_cred = cred; 89 - list_move(&sp->so_list, &clp->cl_state_owners); 90 - clp->cl_nunused--; 91 - } 92 - return sp; 93 - } 94 - 95 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) 96 { 97 struct nfs4_state_owner *sp; 98 struct rpc_cred *cred = NULL; 99 100 - list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 101 if (list_empty(&sp->so_states)) 102 continue; 103 cred = get_rpccred(sp->so_cred); ··· 90 static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) 91 { 92 struct nfs4_state_owner *sp; 93 94 - if (!list_empty(&clp->cl_state_owners)) { 95 - sp = list_entry(clp->cl_state_owners.next, 96 - struct nfs4_state_owner, so_list); 97 return get_rpccred(sp->so_cred); 98 } 99 return NULL; 100 } 101 102 - static struct nfs4_state_owner * 103 - nfs4_find_state_owner(struct nfs_client *clp, struct rpc_cred *cred) 104 { 105 struct nfs4_state_owner *sp, *res = NULL; 106 107 - list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 108 - if (sp->so_cred != cred) 109 continue; 110 - atomic_inc(&sp->so_count); 111 - /* Move to the head of the list */ 112 - list_move(&sp->so_list, &clp->cl_state_owners); 113 - res = sp; 114 - break; 115 } 116 return res; 117 } 118 119 /* ··· 257 void 258 nfs4_drop_state_owner(struct nfs4_state_owner *sp) 259 { 260 - struct nfs_client *clp = sp->so_client; 261 - spin_lock(&clp->cl_lock); 262 - list_del_init(&sp->so_list); 263 - spin_unlock(&clp->cl_lock); 264 } 265 266 /* ··· 276 struct nfs_client *clp = server->nfs_client; 277 struct nfs4_state_owner *sp, *new; 278 279 - get_rpccred(cred); 280 - new = nfs4_alloc_state_owner(); 281 spin_lock(&clp->cl_lock); 282 - sp = nfs4_find_state_owner(clp, cred); 283 - if (sp == NULL) 284 - sp = nfs4_client_grab_unused(clp, cred); 285 - if (sp == NULL && new != NULL) { 286 - list_add(&new->so_list, &clp->cl_state_owners); 287 - new->so_client = clp; 288 - new->so_id = nfs4_alloc_lockowner_id(clp); 289 - new->so_cred = cred; 290 - sp = new; 291 - new = NULL; 292 - } 293 spin_unlock(&clp->cl_lock); 294 - kfree(new); 295 if (sp != NULL) 296 return sp; 297 - put_rpccred(cred); 298 - return NULL; 299 } 300 301 /* ··· 308 309 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) 310 return; 311 - if (clp->cl_nunused >= OPENOWNER_POOL_SIZE) 312 - goto out_free; 313 - if (list_empty(&sp->so_list)) 314 - goto out_free; 315 - list_move(&sp->so_list, &clp->cl_unused); 316 - clp->cl_nunused++; 317 - spin_unlock(&clp->cl_lock); 318 - put_rpccred(cred); 319 - cred = NULL; 320 - return; 321 - out_free: 322 - list_del(&sp->so_list); 323 spin_unlock(&clp->cl_lock); 324 put_rpccred(cred); 325 kfree(sp); ··· 325 atomic_set(&state->count, 1); 326 INIT_LIST_HEAD(&state->lock_states); 327 spin_lock_init(&state->state_lock); 328 return state; 329 } 330 ··· 353 struct nfs4_state *state; 354 355 list_for_each_entry(state, &nfsi->open_states, inode_states) { 356 - /* Is this in the process of being freed? */ 357 - if (state->state == 0) 358 continue; 359 - if (state->owner == owner) { 360 - atomic_inc(&state->count); 361 return state; 362 - } 363 } 364 return NULL; 365 } ··· 428 /* 429 * Close the current file. 430 */ 431 - void nfs4_close_state(struct nfs4_state *state, mode_t mode) 432 { 433 - struct inode *inode = state->inode; 434 struct nfs4_state_owner *owner = state->owner; 435 - int oldstate, newstate = 0; 436 437 atomic_inc(&owner->so_count); 438 /* Protect against nfs4_find_state() */ 439 spin_lock(&owner->so_lock); 440 - spin_lock(&inode->i_lock); 441 switch (mode & (FMODE_READ | FMODE_WRITE)) { 442 case FMODE_READ: 443 state->n_rdonly--; ··· 447 case FMODE_READ|FMODE_WRITE: 448 state->n_rdwr--; 449 } 450 - oldstate = newstate = state->state; 451 if (state->n_rdwr == 0) { 452 - if (state->n_rdonly == 0) 453 newstate &= ~FMODE_READ; 454 - if (state->n_wronly == 0) 455 newstate &= ~FMODE_WRITE; 456 } 457 - if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 458 - nfs4_state_set_mode_locked(state, newstate); 459 - oldstate = newstate; 460 - } 461 - spin_unlock(&inode->i_lock); 462 spin_unlock(&owner->so_lock); 463 464 - if (oldstate != newstate && nfs4_do_close(inode, state) == 0) 465 - return; 466 - nfs4_put_open_state(state); 467 - nfs4_put_state_owner(owner); 468 } 469 470 /* ··· 506 atomic_set(&lsp->ls_count, 1); 507 lsp->ls_owner = fl_owner; 508 spin_lock(&clp->cl_lock); 509 - lsp->ls_id = nfs4_alloc_lockowner_id(clp); 510 spin_unlock(&clp->cl_lock); 511 INIT_LIST_HEAD(&lsp->ls_locks); 512 return lsp; 513 } 514 515 /* ··· 551 return NULL; 552 } 553 spin_unlock(&state->state_lock); 554 - kfree(new); 555 return lsp; 556 } 557 ··· 573 if (list_empty(&state->lock_states)) 574 clear_bit(LK_STATE_IN_USE, &state->flags); 575 spin_unlock(&state->state_lock); 576 - kfree(lsp); 577 } 578 579 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) ··· 615 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) 616 { 617 struct nfs4_lock_state *lsp; 618 619 - memcpy(dst, &state->stateid, sizeof(*dst)); 620 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) 621 return; 622 ··· 663 * failed with a seqid incrementing error - 664 * see comments nfs_fs.h:seqid_mutating_error() 665 */ 666 - static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid) 667 { 668 switch (status) { 669 case 0: 670 break; 671 case -NFS4ERR_BAD_SEQID: 672 case -NFS4ERR_STALE_CLIENTID: 673 case -NFS4ERR_STALE_STATEID: 674 case -NFS4ERR_BAD_STATEID: ··· 698 struct nfs4_state_owner, so_seqid); 699 nfs4_drop_state_owner(sp); 700 } 701 - return nfs_increment_seqid(status, seqid); 702 } 703 704 /* ··· 708 */ 709 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) 710 { 711 - return nfs_increment_seqid(status, seqid); 712 } 713 714 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) ··· 860 static void nfs4_state_mark_reclaim(struct nfs_client *clp) 861 { 862 struct nfs4_state_owner *sp; 863 struct nfs4_state *state; 864 struct nfs4_lock_state *lock; 865 866 /* Reset all sequence ids to zero */ 867 - list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 868 sp->so_seqid.counter = 0; 869 sp->so_seqid.flags = 0; 870 spin_lock(&sp->so_lock); 871 list_for_each_entry(state, &sp->so_states, open_states) { 872 list_for_each_entry(lock, &state->lock_states, ls_locks) { 873 lock->ls_seqid.counter = 0; 874 lock->ls_seqid.flags = 0; ··· 889 { 890 struct nfs_client *clp = ptr; 891 struct nfs4_state_owner *sp; 892 struct nfs4_state_recovery_ops *ops; 893 struct rpc_cred *cred; 894 int status = 0; ··· 935 /* Mark all delegations for reclaim */ 936 nfs_delegation_mark_reclaim(clp); 937 /* Note: list is protected by exclusive lock on cl->cl_sem */ 938 - list_for_each_entry(sp, &clp->cl_state_owners, so_list) { 939 status = nfs4_reclaim_open_state(ops, sp); 940 if (status < 0) { 941 if (status == -NFS4ERR_NO_GRACE) {
··· 38 * subsequent patch. 39 */ 40 41 + #include <linux/kernel.h> 42 #include <linux/slab.h> 43 #include <linux/smp_lock.h> 44 #include <linux/nfs_fs.h> 45 #include <linux/nfs_idmap.h> 46 #include <linux/kthread.h> 47 #include <linux/module.h> 48 + #include <linux/random.h> 49 #include <linux/workqueue.h> 50 #include <linux/bitops.h> 51 ··· 69 return status; 70 } 71 72 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) 73 { 74 struct nfs4_state_owner *sp; 75 + struct rb_node *pos; 76 struct rpc_cred *cred = NULL; 77 78 + for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 79 + sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 80 if (list_empty(&sp->so_states)) 81 continue; 82 cred = get_rpccred(sp->so_cred); ··· 107 static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) 108 { 109 struct nfs4_state_owner *sp; 110 + struct rb_node *pos; 111 112 + pos = rb_first(&clp->cl_state_owners); 113 + if (pos != NULL) { 114 + sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 115 return get_rpccred(sp->so_cred); 116 } 117 return NULL; 118 } 119 120 + static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new, 121 + __u64 minval, int maxbits) 122 { 123 + struct rb_node **p, *parent; 124 + struct nfs_unique_id *pos; 125 + __u64 mask = ~0ULL; 126 + 127 + if (maxbits < 64) 128 + mask = (1ULL << maxbits) - 1ULL; 129 + 130 + /* Ensure distribution is more or less flat */ 131 + get_random_bytes(&new->id, sizeof(new->id)); 132 + new->id &= mask; 133 + if (new->id < minval) 134 + new->id += minval; 135 + retry: 136 + p = &root->rb_node; 137 + parent = NULL; 138 + 139 + while (*p != NULL) { 140 + parent = *p; 141 + pos = rb_entry(parent, struct nfs_unique_id, rb_node); 142 + 143 + if (new->id < pos->id) 144 + p = &(*p)->rb_left; 145 + else if (new->id > pos->id) 146 + p = &(*p)->rb_right; 147 + else 148 + goto id_exists; 149 + } 150 + rb_link_node(&new->rb_node, parent, p); 151 + rb_insert_color(&new->rb_node, root); 152 + return; 153 + id_exists: 154 + for (;;) { 155 + new->id++; 156 + if (new->id < minval || (new->id & mask) != new->id) { 157 + new->id = minval; 158 + break; 159 + } 160 + parent = rb_next(parent); 161 + if (parent == NULL) 162 + break; 163 + pos = rb_entry(parent, struct nfs_unique_id, rb_node); 164 + if (new->id < pos->id) 165 + break; 166 + } 167 + goto retry; 168 + } 169 + 170 + static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id) 171 + { 172 + rb_erase(&id->rb_node, root); 173 + } 174 + 175 + static struct nfs4_state_owner * 176 + nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred) 177 + { 178 + struct nfs_client *clp = server->nfs_client; 179 + struct rb_node **p = &clp->cl_state_owners.rb_node, 180 + *parent = NULL; 181 struct nfs4_state_owner *sp, *res = NULL; 182 183 + while (*p != NULL) { 184 + parent = *p; 185 + sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); 186 + 187 + if (server < sp->so_server) { 188 + p = &parent->rb_left; 189 continue; 190 + } 191 + if (server > sp->so_server) { 192 + p = &parent->rb_right; 193 + continue; 194 + } 195 + if (cred < sp->so_cred) 196 + p = &parent->rb_left; 197 + else if (cred > sp->so_cred) 198 + p = &parent->rb_right; 199 + else { 200 + atomic_inc(&sp->so_count); 201 + res = sp; 202 + break; 203 + } 204 } 205 return res; 206 + } 207 + 208 + static struct nfs4_state_owner * 209 + nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new) 210 + { 211 + struct rb_node **p = &clp->cl_state_owners.rb_node, 212 + *parent = NULL; 213 + struct nfs4_state_owner *sp; 214 + 215 + while (*p != NULL) { 216 + parent = *p; 217 + sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); 218 + 219 + if (new->so_server < sp->so_server) { 220 + p = &parent->rb_left; 221 + continue; 222 + } 223 + if (new->so_server > sp->so_server) { 224 + p = &parent->rb_right; 225 + continue; 226 + } 227 + if (new->so_cred < sp->so_cred) 228 + p = &parent->rb_left; 229 + else if (new->so_cred > sp->so_cred) 230 + p = &parent->rb_right; 231 + else { 232 + atomic_inc(&sp->so_count); 233 + return sp; 234 + } 235 + } 236 + nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64); 237 + rb_link_node(&new->so_client_node, parent, p); 238 + rb_insert_color(&new->so_client_node, &clp->cl_state_owners); 239 + return new; 240 + } 241 + 242 + static void 243 + nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp) 244 + { 245 + if (!RB_EMPTY_NODE(&sp->so_client_node)) 246 + rb_erase(&sp->so_client_node, &clp->cl_state_owners); 247 + nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id); 248 } 249 250 /* ··· 160 void 161 nfs4_drop_state_owner(struct nfs4_state_owner *sp) 162 { 163 + if (!RB_EMPTY_NODE(&sp->so_client_node)) { 164 + struct nfs_client *clp = sp->so_client; 165 + 166 + spin_lock(&clp->cl_lock); 167 + rb_erase(&sp->so_client_node, &clp->cl_state_owners); 168 + RB_CLEAR_NODE(&sp->so_client_node); 169 + spin_unlock(&clp->cl_lock); 170 + } 171 } 172 173 /* ··· 175 struct nfs_client *clp = server->nfs_client; 176 struct nfs4_state_owner *sp, *new; 177 178 spin_lock(&clp->cl_lock); 179 + sp = nfs4_find_state_owner(server, cred); 180 spin_unlock(&clp->cl_lock); 181 if (sp != NULL) 182 return sp; 183 + new = nfs4_alloc_state_owner(); 184 + if (new == NULL) 185 + return NULL; 186 + new->so_client = clp; 187 + new->so_server = server; 188 + new->so_cred = cred; 189 + spin_lock(&clp->cl_lock); 190 + sp = nfs4_insert_state_owner(clp, new); 191 + spin_unlock(&clp->cl_lock); 192 + if (sp == new) 193 + get_rpccred(cred); 194 + else 195 + kfree(new); 196 + return sp; 197 } 198 199 /* ··· 208 209 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) 210 return; 211 + nfs4_remove_state_owner(clp, sp); 212 spin_unlock(&clp->cl_lock); 213 put_rpccred(cred); 214 kfree(sp); ··· 236 atomic_set(&state->count, 1); 237 INIT_LIST_HEAD(&state->lock_states); 238 spin_lock_init(&state->state_lock); 239 + seqlock_init(&state->seqlock); 240 return state; 241 } 242 ··· 263 struct nfs4_state *state; 264 265 list_for_each_entry(state, &nfsi->open_states, inode_states) { 266 + if (state->owner != owner) 267 continue; 268 + if (atomic_inc_not_zero(&state->count)) 269 return state; 270 } 271 return NULL; 272 } ··· 341 /* 342 * Close the current file. 343 */ 344 + void nfs4_close_state(struct path *path, struct nfs4_state *state, mode_t mode) 345 { 346 struct nfs4_state_owner *owner = state->owner; 347 + int call_close = 0; 348 + int newstate; 349 350 atomic_inc(&owner->so_count); 351 /* Protect against nfs4_find_state() */ 352 spin_lock(&owner->so_lock); 353 switch (mode & (FMODE_READ | FMODE_WRITE)) { 354 case FMODE_READ: 355 state->n_rdonly--; ··· 361 case FMODE_READ|FMODE_WRITE: 362 state->n_rdwr--; 363 } 364 + newstate = FMODE_READ|FMODE_WRITE; 365 if (state->n_rdwr == 0) { 366 + if (state->n_rdonly == 0) { 367 newstate &= ~FMODE_READ; 368 + call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); 369 + call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 370 + } 371 + if (state->n_wronly == 0) { 372 newstate &= ~FMODE_WRITE; 373 + call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); 374 + call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 375 + } 376 + if (newstate == 0) 377 + clear_bit(NFS_DELEGATED_STATE, &state->flags); 378 } 379 + nfs4_state_set_mode_locked(state, newstate); 380 spin_unlock(&owner->so_lock); 381 382 + if (!call_close) { 383 + nfs4_put_open_state(state); 384 + nfs4_put_state_owner(owner); 385 + } else 386 + nfs4_do_close(path, state); 387 } 388 389 /* ··· 415 atomic_set(&lsp->ls_count, 1); 416 lsp->ls_owner = fl_owner; 417 spin_lock(&clp->cl_lock); 418 + nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64); 419 spin_unlock(&clp->cl_lock); 420 INIT_LIST_HEAD(&lsp->ls_locks); 421 return lsp; 422 + } 423 + 424 + static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) 425 + { 426 + struct nfs_client *clp = lsp->ls_state->owner->so_client; 427 + 428 + spin_lock(&clp->cl_lock); 429 + nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id); 430 + spin_unlock(&clp->cl_lock); 431 + kfree(lsp); 432 } 433 434 /* ··· 450 return NULL; 451 } 452 spin_unlock(&state->state_lock); 453 + if (new != NULL) 454 + nfs4_free_lock_state(new); 455 return lsp; 456 } 457 ··· 471 if (list_empty(&state->lock_states)) 472 clear_bit(LK_STATE_IN_USE, &state->flags); 473 spin_unlock(&state->state_lock); 474 + nfs4_free_lock_state(lsp); 475 } 476 477 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) ··· 513 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) 514 { 515 struct nfs4_lock_state *lsp; 516 + int seq; 517 518 + do { 519 + seq = read_seqbegin(&state->seqlock); 520 + memcpy(dst, &state->stateid, sizeof(*dst)); 521 + } while (read_seqretry(&state->seqlock, seq)); 522 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) 523 return; 524 ··· 557 * failed with a seqid incrementing error - 558 * see comments nfs_fs.h:seqid_mutating_error() 559 */ 560 + static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) 561 { 562 switch (status) { 563 case 0: 564 break; 565 case -NFS4ERR_BAD_SEQID: 566 + if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) 567 + return; 568 + printk(KERN_WARNING "NFS: v4 server returned a bad" 569 + "sequence-id error on an" 570 + "unconfirmed sequence %p!\n", 571 + seqid->sequence); 572 case -NFS4ERR_STALE_CLIENTID: 573 case -NFS4ERR_STALE_STATEID: 574 case -NFS4ERR_BAD_STATEID: ··· 586 struct nfs4_state_owner, so_seqid); 587 nfs4_drop_state_owner(sp); 588 } 589 + nfs_increment_seqid(status, seqid); 590 } 591 592 /* ··· 596 */ 597 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) 598 { 599 + nfs_increment_seqid(status, seqid); 600 } 601 602 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) ··· 748 static void nfs4_state_mark_reclaim(struct nfs_client *clp) 749 { 750 struct nfs4_state_owner *sp; 751 + struct rb_node *pos; 752 struct nfs4_state *state; 753 struct nfs4_lock_state *lock; 754 755 /* Reset all sequence ids to zero */ 756 + for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 757 + sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 758 sp->so_seqid.counter = 0; 759 sp->so_seqid.flags = 0; 760 spin_lock(&sp->so_lock); 761 list_for_each_entry(state, &sp->so_states, open_states) { 762 + clear_bit(NFS_DELEGATED_STATE, &state->flags); 763 + clear_bit(NFS_O_RDONLY_STATE, &state->flags); 764 + clear_bit(NFS_O_WRONLY_STATE, &state->flags); 765 + clear_bit(NFS_O_RDWR_STATE, &state->flags); 766 list_for_each_entry(lock, &state->lock_states, ls_locks) { 767 lock->ls_seqid.counter = 0; 768 lock->ls_seqid.flags = 0; ··· 771 { 772 struct nfs_client *clp = ptr; 773 struct nfs4_state_owner *sp; 774 + struct rb_node *pos; 775 struct nfs4_state_recovery_ops *ops; 776 struct rpc_cred *cred; 777 int status = 0; ··· 816 /* Mark all delegations for reclaim */ 817 nfs_delegation_mark_reclaim(clp); 818 /* Note: list is protected by exclusive lock on cl->cl_sem */ 819 + for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 820 + sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 821 status = nfs4_reclaim_open_state(ops, sp); 822 if (status < 0) { 823 if (status == -NFS4ERR_NO_GRACE) {
+84 -42
fs/nfs/nfs4xdr.c
··· 68 #endif 69 70 /* lock,open owner id: 71 - * we currently use size 1 (u32) out of (NFS4_OPAQUE_LIMIT >> 2) 72 */ 73 - #define owner_id_maxsz (1 + 1) 74 #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 75 #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 76 #define op_encode_hdr_maxsz (1) ··· 88 #define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) 89 #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) 90 #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) 91 /* This is based on getfattr, which uses the most attributes: */ 92 #define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \ 93 - 3 + 3 + 3 + 2 * nfs4_name_maxsz)) 94 #define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \ 95 nfs4_fattr_value_maxsz) 96 #define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz) ··· 119 3 + (NFS4_VERIFIER_SIZE >> 2)) 120 #define decode_setclientid_confirm_maxsz \ 121 (op_decode_hdr_maxsz) 122 - #define encode_lookup_maxsz (op_encode_hdr_maxsz + \ 123 - 1 + ((3 + NFS4_FHSIZE) >> 2)) 124 #define encode_remove_maxsz (op_encode_hdr_maxsz + \ 125 nfs4_name_maxsz) 126 #define encode_rename_maxsz (op_encode_hdr_maxsz + \ ··· 156 #define encode_create_maxsz (op_encode_hdr_maxsz + \ 157 2 + nfs4_name_maxsz + \ 158 nfs4_fattr_maxsz) 159 - #define decode_create_maxsz (op_decode_hdr_maxsz + 8) 160 #define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4) 161 #define decode_delegreturn_maxsz (op_decode_hdr_maxsz) 162 #define NFS4_enc_compound_sz (1024) /* XXX: large enough? */ 163 #define NFS4_dec_compound_sz (1024) /* XXX: large enough? */ 164 #define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \ ··· 202 op_decode_hdr_maxsz + 2 + \ 203 decode_getattr_maxsz) 204 #define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \ 205 - encode_putfh_maxsz + \ 206 - op_encode_hdr_maxsz + \ 207 - 13 + 3 + 2 + 64 + \ 208 - encode_getattr_maxsz + \ 209 - encode_getfh_maxsz) 210 #define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \ 211 - decode_putfh_maxsz + \ 212 - op_decode_hdr_maxsz + 4 + 5 + 2 + 3 + \ 213 - decode_getattr_maxsz + \ 214 - decode_getfh_maxsz) 215 #define NFS4_enc_open_confirm_sz \ 216 (compound_encode_hdr_maxsz + \ 217 encode_putfh_maxsz + \ ··· 226 op_decode_hdr_maxsz + 4) 227 #define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \ 228 encode_putfh_maxsz + \ 229 - op_encode_hdr_maxsz + \ 230 - 11) 231 #define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \ 232 decode_putfh_maxsz + \ 233 - op_decode_hdr_maxsz + \ 234 - 4 + 5 + 2 + 3) 235 #define NFS4_enc_open_downgrade_sz \ 236 (compound_encode_hdr_maxsz + \ 237 encode_putfh_maxsz + \ ··· 289 op_encode_hdr_maxsz + \ 290 1 + 1 + 2 + 2 + \ 291 1 + 4 + 1 + 2 + \ 292 - owner_id_maxsz) 293 #define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \ 294 decode_putfh_maxsz + \ 295 decode_getattr_maxsz + \ 296 op_decode_hdr_maxsz + \ 297 2 + 2 + 1 + 2 + \ 298 - owner_id_maxsz) 299 #define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \ 300 encode_putfh_maxsz + \ 301 encode_getattr_maxsz + \ 302 op_encode_hdr_maxsz + \ 303 1 + 2 + 2 + 2 + \ 304 - owner_id_maxsz) 305 #define NFS4_dec_lockt_sz (NFS4_dec_lock_sz) 306 #define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \ 307 encode_putfh_maxsz + \ ··· 331 encode_getfh_maxsz) 332 #define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \ 333 decode_putfh_maxsz + \ 334 - op_decode_hdr_maxsz + \ 335 decode_getattr_maxsz + \ 336 decode_getfh_maxsz) 337 #define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \ ··· 450 #define NFS4_enc_fs_locations_sz \ 451 (compound_encode_hdr_maxsz + \ 452 encode_putfh_maxsz + \ 453 - encode_getattr_maxsz) 454 #define NFS4_dec_fs_locations_sz \ 455 (compound_decode_hdr_maxsz + \ 456 decode_putfh_maxsz + \ 457 - op_decode_hdr_maxsz + \ 458 - nfs4_fattr_bitmap_maxsz) 459 460 static struct { 461 unsigned int mode; ··· 827 WRITE64(nfs4_lock_length(args->fl)); 828 WRITE32(args->new_lock_owner); 829 if (args->new_lock_owner){ 830 - RESERVE_SPACE(4+NFS4_STATEID_SIZE+20); 831 WRITE32(args->open_seqid->sequence->counter); 832 WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE); 833 WRITE32(args->lock_seqid->sequence->counter); 834 WRITE64(args->lock_owner.clientid); 835 - WRITE32(4); 836 - WRITE32(args->lock_owner.id); 837 } 838 else { 839 RESERVE_SPACE(NFS4_STATEID_SIZE+4); ··· 849 { 850 __be32 *p; 851 852 - RESERVE_SPACE(40); 853 WRITE32(OP_LOCKT); 854 WRITE32(nfs4_lock_type(args->fl, 0)); 855 WRITE64(args->fl->fl_start); 856 WRITE64(nfs4_lock_length(args->fl)); 857 WRITE64(args->lock_owner.clientid); 858 - WRITE32(4); 859 - WRITE32(args->lock_owner.id); 860 861 return 0; 862 } ··· 922 WRITE32(OP_OPEN); 923 WRITE32(arg->seqid->sequence->counter); 924 encode_share_access(xdr, arg->open_flags); 925 - RESERVE_SPACE(16); 926 WRITE64(arg->clientid); 927 - WRITE32(4); 928 - WRITE32(arg->id); 929 } 930 931 static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) ··· 1108 1109 static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req) 1110 { 1111 - struct rpc_auth *auth = req->rq_task->tk_auth; 1112 uint32_t attrs[2] = { 1113 FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID, 1114 FATTR4_WORD1_MOUNTED_ON_FILEID, ··· 1154 1155 static int encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req) 1156 { 1157 - struct rpc_auth *auth = req->rq_task->tk_auth; 1158 unsigned int replen; 1159 __be32 *p; 1160 ··· 1772 */ 1773 static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 1774 { 1775 - struct rpc_auth *auth = req->rq_task->tk_auth; 1776 struct xdr_stream xdr; 1777 struct compound_hdr hdr = { 1778 .nops = 2, ··· 1832 struct nfs_getaclargs *args) 1833 { 1834 struct xdr_stream xdr; 1835 - struct rpc_auth *auth = req->rq_task->tk_auth; 1836 struct compound_hdr hdr = { 1837 .nops = 2, 1838 }; ··· 2067 struct compound_hdr hdr = { 2068 .nops = 3, 2069 }; 2070 - struct rpc_auth *auth = req->rq_task->tk_auth; 2071 int replen; 2072 int status; 2073 ··· 3306 static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) 3307 { 3308 __be32 *p; 3309 - uint32_t bmlen; 3310 int status; 3311 3312 status = decode_op_hdr(xdr, OP_OPEN); ··· 3324 goto xdr_error; 3325 3326 READ_BUF(bmlen << 2); 3327 - p += bmlen; 3328 return decode_delegation(xdr, res); 3329 xdr_error: 3330 dprintk("%s: Bitmap too large! Length = %u\n", __FUNCTION__, bmlen);
··· 68 #endif 69 70 /* lock,open owner id: 71 + * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) 72 */ 73 + #define open_owner_id_maxsz (1 + 4) 74 + #define lock_owner_id_maxsz (1 + 4) 75 #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 76 #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 77 #define op_encode_hdr_maxsz (1) ··· 87 #define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) 88 #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) 89 #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) 90 + #define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) 91 + #define nfs4_group_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) 92 /* This is based on getfattr, which uses the most attributes: */ 93 #define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \ 94 + 3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz)) 95 #define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \ 96 nfs4_fattr_value_maxsz) 97 #define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz) ··· 116 3 + (NFS4_VERIFIER_SIZE >> 2)) 117 #define decode_setclientid_confirm_maxsz \ 118 (op_decode_hdr_maxsz) 119 + #define encode_lookup_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) 120 + #define decode_lookup_maxsz (op_decode_hdr_maxsz) 121 + #define encode_share_access_maxsz \ 122 + (2) 123 + #define encode_createmode_maxsz (1 + nfs4_fattr_maxsz) 124 + #define encode_opentype_maxsz (1 + encode_createmode_maxsz) 125 + #define encode_claim_null_maxsz (1 + nfs4_name_maxsz) 126 + #define encode_open_maxsz (op_encode_hdr_maxsz + \ 127 + 2 + encode_share_access_maxsz + 2 + \ 128 + open_owner_id_maxsz + \ 129 + encode_opentype_maxsz + \ 130 + encode_claim_null_maxsz) 131 + #define decode_ace_maxsz (3 + nfs4_owner_maxsz) 132 + #define decode_delegation_maxsz (1 + XDR_QUADLEN(NFS4_STATEID_SIZE) + 1 + \ 133 + decode_ace_maxsz) 134 + #define decode_change_info_maxsz (5) 135 + #define decode_open_maxsz (op_decode_hdr_maxsz + \ 136 + XDR_QUADLEN(NFS4_STATEID_SIZE) + \ 137 + decode_change_info_maxsz + 1 + \ 138 + nfs4_fattr_bitmap_maxsz + \ 139 + decode_delegation_maxsz) 140 #define encode_remove_maxsz (op_encode_hdr_maxsz + \ 141 nfs4_name_maxsz) 142 #define encode_rename_maxsz (op_encode_hdr_maxsz + \ ··· 134 #define encode_create_maxsz (op_encode_hdr_maxsz + \ 135 2 + nfs4_name_maxsz + \ 136 nfs4_fattr_maxsz) 137 + #define decode_create_maxsz (op_decode_hdr_maxsz + \ 138 + decode_change_info_maxsz + \ 139 + nfs4_fattr_bitmap_maxsz) 140 #define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4) 141 #define decode_delegreturn_maxsz (op_decode_hdr_maxsz) 142 + #define encode_fs_locations_maxsz \ 143 + (encode_getattr_maxsz) 144 + #define decode_fs_locations_maxsz \ 145 + (0) 146 #define NFS4_enc_compound_sz (1024) /* XXX: large enough? */ 147 #define NFS4_dec_compound_sz (1024) /* XXX: large enough? */ 148 #define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \ ··· 174 op_decode_hdr_maxsz + 2 + \ 175 decode_getattr_maxsz) 176 #define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \ 177 + encode_putfh_maxsz + \ 178 + encode_savefh_maxsz + \ 179 + encode_open_maxsz + \ 180 + encode_getfh_maxsz + \ 181 + encode_getattr_maxsz + \ 182 + encode_restorefh_maxsz + \ 183 + encode_getattr_maxsz) 184 #define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \ 185 + decode_putfh_maxsz + \ 186 + decode_savefh_maxsz + \ 187 + decode_open_maxsz + \ 188 + decode_getfh_maxsz + \ 189 + decode_getattr_maxsz + \ 190 + decode_restorefh_maxsz + \ 191 + decode_getattr_maxsz) 192 #define NFS4_enc_open_confirm_sz \ 193 (compound_encode_hdr_maxsz + \ 194 encode_putfh_maxsz + \ ··· 193 op_decode_hdr_maxsz + 4) 194 #define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \ 195 encode_putfh_maxsz + \ 196 + encode_open_maxsz + \ 197 + encode_getattr_maxsz) 198 #define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \ 199 decode_putfh_maxsz + \ 200 + decode_open_maxsz + \ 201 + decode_getattr_maxsz) 202 #define NFS4_enc_open_downgrade_sz \ 203 (compound_encode_hdr_maxsz + \ 204 encode_putfh_maxsz + \ ··· 256 op_encode_hdr_maxsz + \ 257 1 + 1 + 2 + 2 + \ 258 1 + 4 + 1 + 2 + \ 259 + lock_owner_id_maxsz) 260 #define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \ 261 decode_putfh_maxsz + \ 262 decode_getattr_maxsz + \ 263 op_decode_hdr_maxsz + \ 264 2 + 2 + 1 + 2 + \ 265 + lock_owner_id_maxsz) 266 #define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \ 267 encode_putfh_maxsz + \ 268 encode_getattr_maxsz + \ 269 op_encode_hdr_maxsz + \ 270 1 + 2 + 2 + 2 + \ 271 + lock_owner_id_maxsz) 272 #define NFS4_dec_lockt_sz (NFS4_dec_lock_sz) 273 #define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \ 274 encode_putfh_maxsz + \ ··· 298 encode_getfh_maxsz) 299 #define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \ 300 decode_putfh_maxsz + \ 301 + decode_lookup_maxsz + \ 302 decode_getattr_maxsz + \ 303 decode_getfh_maxsz) 304 #define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \ ··· 417 #define NFS4_enc_fs_locations_sz \ 418 (compound_encode_hdr_maxsz + \ 419 encode_putfh_maxsz + \ 420 + encode_lookup_maxsz + \ 421 + encode_fs_locations_maxsz) 422 #define NFS4_dec_fs_locations_sz \ 423 (compound_decode_hdr_maxsz + \ 424 decode_putfh_maxsz + \ 425 + decode_lookup_maxsz + \ 426 + decode_fs_locations_maxsz) 427 428 static struct { 429 unsigned int mode; ··· 793 WRITE64(nfs4_lock_length(args->fl)); 794 WRITE32(args->new_lock_owner); 795 if (args->new_lock_owner){ 796 + RESERVE_SPACE(4+NFS4_STATEID_SIZE+32); 797 WRITE32(args->open_seqid->sequence->counter); 798 WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE); 799 WRITE32(args->lock_seqid->sequence->counter); 800 WRITE64(args->lock_owner.clientid); 801 + WRITE32(16); 802 + WRITEMEM("lock id:", 8); 803 + WRITE64(args->lock_owner.id); 804 } 805 else { 806 RESERVE_SPACE(NFS4_STATEID_SIZE+4); ··· 814 { 815 __be32 *p; 816 817 + RESERVE_SPACE(52); 818 WRITE32(OP_LOCKT); 819 WRITE32(nfs4_lock_type(args->fl, 0)); 820 WRITE64(args->fl->fl_start); 821 WRITE64(nfs4_lock_length(args->fl)); 822 WRITE64(args->lock_owner.clientid); 823 + WRITE32(16); 824 + WRITEMEM("lock id:", 8); 825 + WRITE64(args->lock_owner.id); 826 827 return 0; 828 } ··· 886 WRITE32(OP_OPEN); 887 WRITE32(arg->seqid->sequence->counter); 888 encode_share_access(xdr, arg->open_flags); 889 + RESERVE_SPACE(28); 890 WRITE64(arg->clientid); 891 + WRITE32(16); 892 + WRITEMEM("open id:", 8); 893 + WRITE64(arg->id); 894 } 895 896 static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) ··· 1071 1072 static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req) 1073 { 1074 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 1075 uint32_t attrs[2] = { 1076 FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID, 1077 FATTR4_WORD1_MOUNTED_ON_FILEID, ··· 1117 1118 static int encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req) 1119 { 1120 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 1121 unsigned int replen; 1122 __be32 *p; 1123 ··· 1735 */ 1736 static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 1737 { 1738 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 1739 struct xdr_stream xdr; 1740 struct compound_hdr hdr = { 1741 .nops = 2, ··· 1795 struct nfs_getaclargs *args) 1796 { 1797 struct xdr_stream xdr; 1798 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 1799 struct compound_hdr hdr = { 1800 .nops = 2, 1801 }; ··· 2030 struct compound_hdr hdr = { 2031 .nops = 3, 2032 }; 2033 + struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 2034 int replen; 2035 int status; 2036 ··· 3269 static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) 3270 { 3271 __be32 *p; 3272 + uint32_t savewords, bmlen, i; 3273 int status; 3274 3275 status = decode_op_hdr(xdr, OP_OPEN); ··· 3287 goto xdr_error; 3288 3289 READ_BUF(bmlen << 2); 3290 + savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE); 3291 + for (i = 0; i < savewords; ++i) 3292 + READ32(res->attrset[i]); 3293 + for (; i < NFS4_BITMAP_SIZE; i++) 3294 + res->attrset[i] = 0; 3295 + 3296 return decode_delegation(xdr, res); 3297 xdr_error: 3298 dprintk("%s: Bitmap too large! Length = %u\n", __FUNCTION__, bmlen);
+3 -2
fs/nfs/nfsroot.c
··· 428 printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n", 429 program, version, NIPQUAD(servaddr)); 430 set_sockaddr(&sin, servaddr, 0); 431 - return rpcb_getport_external(&sin, program, version, proto); 432 } 433 434 ··· 496 NFS_MNT3_VERSION : NFS_MNT_VERSION; 497 498 set_sockaddr(&sin, servaddr, htons(mount_port)); 499 - status = nfsroot_mount(&sin, nfs_path, &fh, version, protocol); 500 if (status < 0) 501 printk(KERN_ERR "Root-NFS: Server returned error %d " 502 "while mounting %s\n", status, nfs_path);
··· 428 printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n", 429 program, version, NIPQUAD(servaddr)); 430 set_sockaddr(&sin, servaddr, 0); 431 + return rpcb_getport_sync(&sin, program, version, proto); 432 } 433 434 ··· 496 NFS_MNT3_VERSION : NFS_MNT_VERSION; 497 498 set_sockaddr(&sin, servaddr, htons(mount_port)); 499 + status = nfs_mount((struct sockaddr *) &sin, sizeof(sin), NULL, 500 + nfs_path, version, protocol, &fh); 501 if (status < 0) 502 printk(KERN_ERR "Root-NFS: Server returned error %d " 503 "while mounting %s\n", status, nfs_path);
+33 -27
fs/nfs/pagelist.c
··· 85 req->wb_offset = offset; 86 req->wb_pgbase = offset; 87 req->wb_bytes = count; 88 - atomic_set(&req->wb_count, 1); 89 req->wb_context = get_nfs_open_context(ctx); 90 - 91 return req; 92 } 93 ··· 108 } 109 110 /** 111 - * nfs_set_page_writeback_locked - Lock a request for writeback 112 * @req: 113 */ 114 - int nfs_set_page_writeback_locked(struct nfs_page *req) 115 { 116 - struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); 117 118 if (!nfs_lock_request(req)) 119 return 0; 120 - radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); 121 return 1; 122 } 123 124 /** 125 - * nfs_clear_page_writeback - Unlock request and wake up sleepers 126 */ 127 - void nfs_clear_page_writeback(struct nfs_page *req) 128 { 129 - struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); 130 131 if (req->wb_page != NULL) { 132 - spin_lock(&nfsi->req_lock); 133 - radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); 134 - spin_unlock(&nfsi->req_lock); 135 } 136 nfs_unlock_request(req); 137 } ··· 160 * 161 * Note: Should never be called with the spinlock held! 162 */ 163 - void 164 - nfs_release_request(struct nfs_page *req) 165 { 166 - if (!atomic_dec_and_test(&req->wb_count)) 167 - return; 168 169 /* Release struct file or cached credential */ 170 nfs_clear_request(req); 171 put_nfs_open_context(req->wb_context); 172 nfs_page_free(req); 173 } 174 175 static int nfs_wait_bit_interruptible(void *word) ··· 196 int 197 nfs_wait_on_request(struct nfs_page *req) 198 { 199 - struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode); 200 sigset_t oldmask; 201 int ret = 0; 202 ··· 382 /** 383 * nfs_scan_list - Scan a list for matching requests 384 * @nfsi: NFS inode 385 - * @head: One of the NFS inode request lists 386 * @dst: Destination list 387 * @idx_start: lower bound of page->index to scan 388 * @npages: idx_start + npages sets the upper bound to scan. 389 * 390 * Moves elements from one of the inode request lists. 391 * If the number of requests is set to 0, the entire address_space 392 * starting at index idx_start, is scanned. 393 * The requests are *not* checked to ensure that they form a contiguous set. 394 - * You must be holding the inode's req_lock when calling this function 395 */ 396 - int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, 397 struct list_head *dst, pgoff_t idx_start, 398 - unsigned int npages) 399 { 400 struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; 401 struct nfs_page *req; ··· 410 idx_end = idx_start + npages - 1; 411 412 for (;;) { 413 - found = radix_tree_gang_lookup(&nfsi->nfs_page_tree, 414 (void **)&pgvec[0], idx_start, 415 - NFS_SCAN_MAXENTRIES); 416 if (found <= 0) 417 break; 418 for (i = 0; i < found; i++) { ··· 420 if (req->wb_index > idx_end) 421 goto out; 422 idx_start = req->wb_index + 1; 423 - if (req->wb_list_head != head) 424 - continue; 425 - if (nfs_set_page_writeback_locked(req)) { 426 nfs_list_remove_request(req); 427 nfs_list_add_request(req, dst); 428 res++; 429 } 430 } 431 - 432 } 433 out: 434 return res;
··· 85 req->wb_offset = offset; 86 req->wb_pgbase = offset; 87 req->wb_bytes = count; 88 req->wb_context = get_nfs_open_context(ctx); 89 + kref_init(&req->wb_kref); 90 return req; 91 } 92 ··· 109 } 110 111 /** 112 + * nfs_set_page_tag_locked - Tag a request as locked 113 * @req: 114 */ 115 + static int nfs_set_page_tag_locked(struct nfs_page *req) 116 { 117 + struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode); 118 119 if (!nfs_lock_request(req)) 120 return 0; 121 + radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 122 return 1; 123 } 124 125 /** 126 + * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers 127 */ 128 + void nfs_clear_page_tag_locked(struct nfs_page *req) 129 { 130 + struct inode *inode = req->wb_context->path.dentry->d_inode; 131 + struct nfs_inode *nfsi = NFS_I(inode); 132 133 if (req->wb_page != NULL) { 134 + spin_lock(&inode->i_lock); 135 + radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 136 + spin_unlock(&inode->i_lock); 137 } 138 nfs_unlock_request(req); 139 } ··· 160 * 161 * Note: Should never be called with the spinlock held! 162 */ 163 + static void nfs_free_request(struct kref *kref) 164 { 165 + struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 166 167 /* Release struct file or cached credential */ 168 nfs_clear_request(req); 169 put_nfs_open_context(req->wb_context); 170 nfs_page_free(req); 171 + } 172 + 173 + void nfs_release_request(struct nfs_page *req) 174 + { 175 + kref_put(&req->wb_kref, nfs_free_request); 176 } 177 178 static int nfs_wait_bit_interruptible(void *word) ··· 193 int 194 nfs_wait_on_request(struct nfs_page *req) 195 { 196 + struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode); 197 sigset_t oldmask; 198 int ret = 0; 199 ··· 379 /** 380 * nfs_scan_list - Scan a list for matching requests 381 * @nfsi: NFS inode 382 * @dst: Destination list 383 * @idx_start: lower bound of page->index to scan 384 * @npages: idx_start + npages sets the upper bound to scan. 385 + * @tag: tag to scan for 386 * 387 * Moves elements from one of the inode request lists. 388 * If the number of requests is set to 0, the entire address_space 389 * starting at index idx_start, is scanned. 390 * The requests are *not* checked to ensure that they form a contiguous set. 391 + * You must be holding the inode's i_lock when calling this function 392 */ 393 + int nfs_scan_list(struct nfs_inode *nfsi, 394 struct list_head *dst, pgoff_t idx_start, 395 + unsigned int npages, int tag) 396 { 397 struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; 398 struct nfs_page *req; ··· 407 idx_end = idx_start + npages - 1; 408 409 for (;;) { 410 + found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, 411 (void **)&pgvec[0], idx_start, 412 + NFS_SCAN_MAXENTRIES, tag); 413 if (found <= 0) 414 break; 415 for (i = 0; i < found; i++) { ··· 417 if (req->wb_index > idx_end) 418 goto out; 419 idx_start = req->wb_index + 1; 420 + if (nfs_set_page_tag_locked(req)) { 421 nfs_list_remove_request(req); 422 + radix_tree_tag_clear(&nfsi->nfs_page_tree, 423 + req->wb_index, tag); 424 nfs_list_add_request(req, dst); 425 res++; 426 + if (res == INT_MAX) 427 + goto out; 428 } 429 } 430 + /* for latency reduction */ 431 + cond_resched_lock(&nfsi->vfs_inode.i_lock); 432 } 433 out: 434 return res;
+26 -14
fs/nfs/read.c
··· 145 unlock_page(req->wb_page); 146 147 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", 148 - req->wb_context->dentry->d_inode->i_sb->s_id, 149 - (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 150 req->wb_bytes, 151 (long long)req_offset(req)); 152 nfs_clear_request(req); ··· 164 int flags; 165 166 data->req = req; 167 - data->inode = inode = req->wb_context->dentry->d_inode; 168 data->cred = req->wb_context->cred; 169 170 data->args.fh = NFS_FH(inode); ··· 483 */ 484 error = nfs_wb_page(inode, page); 485 if (error) 486 - goto out_error; 487 488 error = -ESTALE; 489 if (NFS_STALE(inode)) 490 - goto out_error; 491 492 if (file == NULL) { 493 error = -EBADF; 494 ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 495 if (ctx == NULL) 496 - goto out_error; 497 } else 498 ctx = get_nfs_open_context((struct nfs_open_context *) 499 file->private_data); ··· 504 505 put_nfs_open_context(ctx); 506 return error; 507 - 508 - out_error: 509 unlock_page(page); 510 return error; 511 } ··· 521 struct inode *inode = page->mapping->host; 522 struct nfs_page *new; 523 unsigned int len; 524 525 - nfs_wb_page(inode, page); 526 len = nfs_page_length(page); 527 if (len == 0) 528 return nfs_return_empty_page(page); 529 new = nfs_create_request(desc->ctx, inode, page, 0, len); 530 - if (IS_ERR(new)) { 531 - SetPageError(page); 532 - unlock_page(page); 533 - return PTR_ERR(new); 534 - } 535 if (len < PAGE_CACHE_SIZE) 536 zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); 537 nfs_pageio_add_request(desc->pgio, new); 538 return 0; 539 } 540 541 int nfs_readpages(struct file *filp, struct address_space *mapping,
··· 145 unlock_page(req->wb_page); 146 147 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", 148 + req->wb_context->path.dentry->d_inode->i_sb->s_id, 149 + (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 150 req->wb_bytes, 151 (long long)req_offset(req)); 152 nfs_clear_request(req); ··· 164 int flags; 165 166 data->req = req; 167 + data->inode = inode = req->wb_context->path.dentry->d_inode; 168 data->cred = req->wb_context->cred; 169 170 data->args.fh = NFS_FH(inode); ··· 483 */ 484 error = nfs_wb_page(inode, page); 485 if (error) 486 + goto out_unlock; 487 + if (PageUptodate(page)) 488 + goto out_unlock; 489 490 error = -ESTALE; 491 if (NFS_STALE(inode)) 492 + goto out_unlock; 493 494 if (file == NULL) { 495 error = -EBADF; 496 ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 497 if (ctx == NULL) 498 + goto out_unlock; 499 } else 500 ctx = get_nfs_open_context((struct nfs_open_context *) 501 file->private_data); ··· 502 503 put_nfs_open_context(ctx); 504 return error; 505 + out_unlock: 506 unlock_page(page); 507 return error; 508 } ··· 520 struct inode *inode = page->mapping->host; 521 struct nfs_page *new; 522 unsigned int len; 523 + int error; 524 525 + error = nfs_wb_page(inode, page); 526 + if (error) 527 + goto out_unlock; 528 + if (PageUptodate(page)) 529 + goto out_unlock; 530 + 531 len = nfs_page_length(page); 532 if (len == 0) 533 return nfs_return_empty_page(page); 534 + 535 new = nfs_create_request(desc->ctx, inode, page, 0, len); 536 + if (IS_ERR(new)) 537 + goto out_error; 538 + 539 if (len < PAGE_CACHE_SIZE) 540 zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); 541 nfs_pageio_add_request(desc->pgio, new); 542 return 0; 543 + out_error: 544 + error = PTR_ERR(new); 545 + SetPageError(page); 546 + out_unlock: 547 + unlock_page(page); 548 + return error; 549 } 550 551 int nfs_readpages(struct file *filp, struct address_space *mapping,
+1030 -165
fs/nfs/super.c
··· 45 #include <linux/inet.h> 46 #include <linux/nfs_xdr.h> 47 #include <linux/magic.h> 48 49 #include <asm/system.h> 50 #include <asm/uaccess.h> ··· 57 #include "internal.h" 58 59 #define NFSDBG_FACILITY NFSDBG_VFS 60 61 static void nfs_umount_begin(struct vfsmount *, int); 62 static int nfs_statfs(struct dentry *, struct kstatfs *); ··· 425 { RPC_AUTH_GSS_SPKM, "spkm" }, 426 { RPC_AUTH_GSS_SPKMI, "spkmi" }, 427 { RPC_AUTH_GSS_SPKMP, "spkmp" }, 428 - { -1, "unknown" } 429 }; 430 int i; 431 432 - for (i=0; sec_flavours[i].flavour != -1; i++) { 433 if (sec_flavours[i].flavour == flavour) 434 break; 435 } ··· 453 { NFS_MOUNT_NONLM, ",nolock", "" }, 454 { NFS_MOUNT_NOACL, ",noacl", "" }, 455 { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, 456 { 0, NULL, NULL } 457 }; 458 const struct proc_nfs_info *nfs_infop; ··· 593 */ 594 static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags) 595 { 596 shrink_submounts(vfsmnt, &nfs_automount_list); 597 } 598 599 /* 600 * Validate the NFS2/NFS3 mount data 601 * - fills in the mount root filehandle 602 */ 603 - static int nfs_validate_mount_data(struct nfs_mount_data *data, 604 - struct nfs_fh *mntfh) 605 { 606 - if (data == NULL) { 607 - dprintk("%s: missing data argument\n", __FUNCTION__); 608 - return -EINVAL; 609 - } 610 611 - if (data->version <= 0 || data->version > NFS_MOUNT_VERSION) { 612 - dprintk("%s: bad mount version\n", __FUNCTION__); 613 - return -EINVAL; 614 - } 615 616 switch (data->version) { 617 - case 1: 618 - data->namlen = 0; 619 - case 2: 620 - data->bsize = 0; 621 - case 3: 622 - if (data->flags & NFS_MOUNT_VER3) { 623 - dprintk("%s: mount structure version %d does not support NFSv3\n", 624 - __FUNCTION__, 625 - data->version); 626 - return -EINVAL; 627 - } 628 - data->root.size = NFS2_FHSIZE; 629 - memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); 630 - case 4: 631 - if (data->flags & NFS_MOUNT_SECFLAVOUR) { 632 - dprintk("%s: mount structure version %d does not support strong security\n", 633 - __FUNCTION__, 634 - data->version); 635 - return -EINVAL; 636 - } 637 - case 5: 638 - memset(data->context, 0, sizeof(data->context)); 639 } 640 641 - /* Set the pseudoflavor */ 642 if (!(data->flags & NFS_MOUNT_SECFLAVOUR)) 643 data->pseudoflavor = RPC_AUTH_UNIX; 644 645 #ifndef CONFIG_NFS_V3 646 - /* If NFSv3 is not compiled in, return -EPROTONOSUPPORT */ 647 - if (data->flags & NFS_MOUNT_VER3) { 648 - dprintk("%s: NFSv3 not compiled into kernel\n", __FUNCTION__); 649 - return -EPROTONOSUPPORT; 650 - } 651 - #endif /* CONFIG_NFS_V3 */ 652 - 653 - /* We now require that the mount process passes the remote address */ 654 - if (data->addr.sin_addr.s_addr == INADDR_ANY) { 655 - dprintk("%s: mount program didn't pass remote address!\n", 656 - __FUNCTION__); 657 - return -EINVAL; 658 - } 659 - 660 - /* Prepare the root filehandle */ 661 if (data->flags & NFS_MOUNT_VER3) 662 - mntfh->size = data->root.size; 663 - else 664 - mntfh->size = NFS2_FHSIZE; 665 666 - if (mntfh->size > sizeof(mntfh->data)) { 667 - dprintk("%s: invalid root filehandle\n", __FUNCTION__); 668 - return -EINVAL; 669 - } 670 - 671 - memcpy(mntfh->data, data->root.data, mntfh->size); 672 - if (mntfh->size < sizeof(mntfh->data)) 673 - memset(mntfh->data + mntfh->size, 0, 674 - sizeof(mntfh->data) - mntfh->size); 675 676 return 0; 677 } 678 679 /* ··· 1317 { 1318 struct nfs_server *server = data, *old = NFS_SB(sb); 1319 1320 - if (old->nfs_client != server->nfs_client) 1321 return 0; 1322 if (memcmp(&old->fsid, &server->fsid, sizeof(old->fsid)) != 0) 1323 return 0; 1324 return 1; 1325 } 1326 1327 static int nfs_get_sb(struct file_system_type *fs_type, ··· 1370 struct nfs_fh mntfh; 1371 struct nfs_mount_data *data = raw_data; 1372 struct dentry *mntroot; 1373 int error; 1374 1375 /* Validate the mount data */ 1376 - error = nfs_validate_mount_data(data, &mntfh); 1377 if (error < 0) 1378 - return error; 1379 1380 /* Get a volume representation */ 1381 server = nfs_create_server(data, &mntfh); 1382 if (IS_ERR(server)) { 1383 error = PTR_ERR(server); 1384 - goto out_err_noserver; 1385 } 1386 1387 /* Get a superblock - note that we may end up sharing one that already exists */ 1388 - s = sget(fs_type, nfs_compare_super, nfs_set_super, server); 1389 if (IS_ERR(s)) { 1390 error = PTR_ERR(s); 1391 goto out_err_nosb; 1392 } 1393 1394 if (s->s_fs_info != server) { 1395 nfs_free_server(server); 1396 server = NULL; 1397 } 1398 1399 if (!s->s_root) { ··· 1418 s->s_flags |= MS_ACTIVE; 1419 mnt->mnt_sb = s; 1420 mnt->mnt_root = mntroot; 1421 - return 0; 1422 1423 out_err_nosb: 1424 nfs_free_server(server); 1425 - out_err_noserver: 1426 - return error; 1427 1428 error_splat_super: 1429 up_write(&s->s_umount); 1430 deactivate_super(s); 1431 - return error; 1432 } 1433 1434 /* ··· 1457 struct super_block *s; 1458 struct nfs_server *server; 1459 struct dentry *mntroot; 1460 int error; 1461 1462 dprintk("--> nfs_xdev_get_sb()\n"); ··· 1469 goto out_err_noserver; 1470 } 1471 1472 /* Get a superblock - note that we may end up sharing one that already exists */ 1473 - s = sget(&nfs_fs_type, nfs_compare_super, nfs_set_super, server); 1474 if (IS_ERR(s)) { 1475 error = PTR_ERR(s); 1476 goto out_err_nosb; 1477 } 1478 1479 if (s->s_fs_info != server) { 1480 nfs_free_server(server); 1481 server = NULL; 1482 } 1483 1484 if (!s->s_root) { ··· 1545 nfs_initialise_sb(sb); 1546 } 1547 1548 - static void *nfs_copy_user_string(char *dst, struct nfs_string *src, int maxlen) 1549 { 1550 - void *p = NULL; 1551 1552 - if (!src->len) 1553 - return ERR_PTR(-EINVAL); 1554 - if (src->len < maxlen) 1555 - maxlen = src->len; 1556 - if (dst == NULL) { 1557 - p = dst = kmalloc(maxlen + 1, GFP_KERNEL); 1558 - if (p == NULL) 1559 - return ERR_PTR(-ENOMEM); 1560 } 1561 - if (copy_from_user(dst, src->data, maxlen)) { 1562 - kfree(p); 1563 - return ERR_PTR(-EFAULT); 1564 - } 1565 - dst[maxlen] = '\0'; 1566 - return dst; 1567 } 1568 1569 /* ··· 1718 rpc_authflavor_t authflavour; 1719 struct nfs_fh mntfh; 1720 struct dentry *mntroot; 1721 - char *mntpath = NULL, *hostname = NULL, ip_addr[16]; 1722 - void *p; 1723 int error; 1724 1725 - if (data == NULL) { 1726 - dprintk("%s: missing data argument\n", __FUNCTION__); 1727 - return -EINVAL; 1728 - } 1729 - if (data->version <= 0 || data->version > NFS4_MOUNT_VERSION) { 1730 - dprintk("%s: bad mount version\n", __FUNCTION__); 1731 - return -EINVAL; 1732 - } 1733 - 1734 - /* We now require that the mount process passes the remote address */ 1735 - if (data->host_addrlen != sizeof(addr)) 1736 - return -EINVAL; 1737 - 1738 - if (copy_from_user(&addr, data->host_addr, sizeof(addr))) 1739 - return -EFAULT; 1740 - 1741 - if (addr.sin_family != AF_INET || 1742 - addr.sin_addr.s_addr == INADDR_ANY 1743 - ) { 1744 - dprintk("%s: mount program didn't pass remote IP address!\n", 1745 - __FUNCTION__); 1746 - return -EINVAL; 1747 - } 1748 - /* RFC3530: The default port for NFS is 2049 */ 1749 - if (addr.sin_port == 0) 1750 - addr.sin_port = htons(NFS_PORT); 1751 - 1752 - /* Grab the authentication type */ 1753 - authflavour = RPC_AUTH_UNIX; 1754 - if (data->auth_flavourlen != 0) { 1755 - if (data->auth_flavourlen != 1) { 1756 - dprintk("%s: Invalid number of RPC auth flavours %d.\n", 1757 - __FUNCTION__, data->auth_flavourlen); 1758 - error = -EINVAL; 1759 - goto out_err_noserver; 1760 - } 1761 - 1762 - if (copy_from_user(&authflavour, data->auth_flavours, 1763 - sizeof(authflavour))) { 1764 - error = -EFAULT; 1765 - goto out_err_noserver; 1766 - } 1767 - } 1768 - 1769 - p = nfs_copy_user_string(NULL, &data->hostname, 256); 1770 - if (IS_ERR(p)) 1771 - goto out_err; 1772 - hostname = p; 1773 - 1774 - p = nfs_copy_user_string(NULL, &data->mnt_path, 1024); 1775 - if (IS_ERR(p)) 1776 - goto out_err; 1777 - mntpath = p; 1778 - 1779 - dprintk("MNTPATH: %s\n", mntpath); 1780 - 1781 - p = nfs_copy_user_string(ip_addr, &data->client_addr, 1782 - sizeof(ip_addr) - 1); 1783 - if (IS_ERR(p)) 1784 - goto out_err; 1785 1786 /* Get a volume representation */ 1787 server = nfs4_create_server(data, hostname, &addr, mntpath, ip_addr, 1788 authflavour, &mntfh); 1789 if (IS_ERR(server)) { 1790 error = PTR_ERR(server); 1791 - goto out_err_noserver; 1792 } 1793 1794 /* Get a superblock - note that we may end up sharing one that already exists */ 1795 - s = sget(fs_type, nfs_compare_super, nfs_set_super, server); 1796 if (IS_ERR(s)) { 1797 error = PTR_ERR(s); 1798 goto out_free; ··· 1766 s->s_flags |= MS_ACTIVE; 1767 mnt->mnt_sb = s; 1768 mnt->mnt_root = mntroot; 1769 - kfree(mntpath); 1770 - kfree(hostname); 1771 - return 0; 1772 1773 - out_err: 1774 - error = PTR_ERR(p); 1775 - goto out_err_noserver; 1776 - 1777 - out_free: 1778 - nfs_free_server(server); 1779 - out_err_noserver: 1780 kfree(mntpath); 1781 kfree(hostname); 1782 return error; 1783 1784 error_splat_super: 1785 up_write(&s->s_umount); 1786 deactivate_super(s); 1787 - goto out_err_noserver; 1788 } 1789 1790 static void nfs4_kill_super(struct super_block *sb) ··· 1806 struct super_block *s; 1807 struct nfs_server *server; 1808 struct dentry *mntroot; 1809 int error; 1810 1811 dprintk("--> nfs4_xdev_get_sb()\n"); ··· 1818 goto out_err_noserver; 1819 } 1820 1821 /* Get a superblock - note that we may end up sharing one that already exists */ 1822 - s = sget(&nfs_fs_type, nfs_compare_super, nfs_set_super, server); 1823 if (IS_ERR(s)) { 1824 error = PTR_ERR(s); 1825 goto out_err_nosb; ··· 1877 struct nfs_server *server; 1878 struct dentry *mntroot; 1879 struct nfs_fh mntfh; 1880 int error; 1881 1882 dprintk("--> nfs4_referral_get_sb()\n"); ··· 1889 goto out_err_noserver; 1890 } 1891 1892 /* Get a superblock - note that we may end up sharing one that already exists */ 1893 - s = sget(&nfs_fs_type, nfs_compare_super, nfs_set_super, server); 1894 if (IS_ERR(s)) { 1895 error = PTR_ERR(s); 1896 goto out_err_nosb;
··· 45 #include <linux/inet.h> 46 #include <linux/nfs_xdr.h> 47 #include <linux/magic.h> 48 + #include <linux/parser.h> 49 50 #include <asm/system.h> 51 #include <asm/uaccess.h> ··· 56 #include "internal.h" 57 58 #define NFSDBG_FACILITY NFSDBG_VFS 59 + 60 + 61 + struct nfs_parsed_mount_data { 62 + int flags; 63 + int rsize, wsize; 64 + int timeo, retrans; 65 + int acregmin, acregmax, 66 + acdirmin, acdirmax; 67 + int namlen; 68 + unsigned int bsize; 69 + unsigned int auth_flavor_len; 70 + rpc_authflavor_t auth_flavors[1]; 71 + char *client_address; 72 + 73 + struct { 74 + struct sockaddr_in address; 75 + unsigned int program; 76 + unsigned int version; 77 + unsigned short port; 78 + int protocol; 79 + } mount_server; 80 + 81 + struct { 82 + struct sockaddr_in address; 83 + char *hostname; 84 + char *export_path; 85 + unsigned int program; 86 + int protocol; 87 + } nfs_server; 88 + }; 89 + 90 + enum { 91 + /* Mount options that take no arguments */ 92 + Opt_soft, Opt_hard, 93 + Opt_intr, Opt_nointr, 94 + Opt_posix, Opt_noposix, 95 + Opt_cto, Opt_nocto, 96 + Opt_ac, Opt_noac, 97 + Opt_lock, Opt_nolock, 98 + Opt_v2, Opt_v3, 99 + Opt_udp, Opt_tcp, 100 + Opt_acl, Opt_noacl, 101 + Opt_rdirplus, Opt_nordirplus, 102 + Opt_sharecache, Opt_nosharecache, 103 + 104 + /* Mount options that take integer arguments */ 105 + Opt_port, 106 + Opt_rsize, Opt_wsize, Opt_bsize, 107 + Opt_timeo, Opt_retrans, 108 + Opt_acregmin, Opt_acregmax, 109 + Opt_acdirmin, Opt_acdirmax, 110 + Opt_actimeo, 111 + Opt_namelen, 112 + Opt_mountport, 113 + Opt_mountprog, Opt_mountvers, 114 + Opt_nfsprog, Opt_nfsvers, 115 + 116 + /* Mount options that take string arguments */ 117 + Opt_sec, Opt_proto, Opt_mountproto, 118 + Opt_addr, Opt_mounthost, Opt_clientaddr, 119 + 120 + /* Mount options that are ignored */ 121 + Opt_userspace, Opt_deprecated, 122 + 123 + Opt_err 124 + }; 125 + 126 + static match_table_t nfs_mount_option_tokens = { 127 + { Opt_userspace, "bg" }, 128 + { Opt_userspace, "fg" }, 129 + { Opt_soft, "soft" }, 130 + { Opt_hard, "hard" }, 131 + { Opt_intr, "intr" }, 132 + { Opt_nointr, "nointr" }, 133 + { Opt_posix, "posix" }, 134 + { Opt_noposix, "noposix" }, 135 + { Opt_cto, "cto" }, 136 + { Opt_nocto, "nocto" }, 137 + { Opt_ac, "ac" }, 138 + { Opt_noac, "noac" }, 139 + { Opt_lock, "lock" }, 140 + { Opt_nolock, "nolock" }, 141 + { Opt_v2, "v2" }, 142 + { Opt_v3, "v3" }, 143 + { Opt_udp, "udp" }, 144 + { Opt_tcp, "tcp" }, 145 + { Opt_acl, "acl" }, 146 + { Opt_noacl, "noacl" }, 147 + { Opt_rdirplus, "rdirplus" }, 148 + { Opt_nordirplus, "nordirplus" }, 149 + { Opt_sharecache, "sharecache" }, 150 + { Opt_nosharecache, "nosharecache" }, 151 + 152 + { Opt_port, "port=%u" }, 153 + { Opt_rsize, "rsize=%u" }, 154 + { Opt_wsize, "wsize=%u" }, 155 + { Opt_bsize, "bsize=%u" }, 156 + { Opt_timeo, "timeo=%u" }, 157 + { Opt_retrans, "retrans=%u" }, 158 + { Opt_acregmin, "acregmin=%u" }, 159 + { Opt_acregmax, "acregmax=%u" }, 160 + { Opt_acdirmin, "acdirmin=%u" }, 161 + { Opt_acdirmax, "acdirmax=%u" }, 162 + { Opt_actimeo, "actimeo=%u" }, 163 + { Opt_userspace, "retry=%u" }, 164 + { Opt_namelen, "namlen=%u" }, 165 + { Opt_mountport, "mountport=%u" }, 166 + { Opt_mountprog, "mountprog=%u" }, 167 + { Opt_mountvers, "mountvers=%u" }, 168 + { Opt_nfsprog, "nfsprog=%u" }, 169 + { Opt_nfsvers, "nfsvers=%u" }, 170 + { Opt_nfsvers, "vers=%u" }, 171 + 172 + { Opt_sec, "sec=%s" }, 173 + { Opt_proto, "proto=%s" }, 174 + { Opt_mountproto, "mountproto=%s" }, 175 + { Opt_addr, "addr=%s" }, 176 + { Opt_clientaddr, "clientaddr=%s" }, 177 + { Opt_mounthost, "mounthost=%s" }, 178 + 179 + { Opt_err, NULL } 180 + }; 181 + 182 + enum { 183 + Opt_xprt_udp, Opt_xprt_tcp, 184 + 185 + Opt_xprt_err 186 + }; 187 + 188 + static match_table_t nfs_xprt_protocol_tokens = { 189 + { Opt_xprt_udp, "udp" }, 190 + { Opt_xprt_tcp, "tcp" }, 191 + 192 + { Opt_xprt_err, NULL } 193 + }; 194 + 195 + enum { 196 + Opt_sec_none, Opt_sec_sys, 197 + Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p, 198 + Opt_sec_lkey, Opt_sec_lkeyi, Opt_sec_lkeyp, 199 + Opt_sec_spkm, Opt_sec_spkmi, Opt_sec_spkmp, 200 + 201 + Opt_sec_err 202 + }; 203 + 204 + static match_table_t nfs_secflavor_tokens = { 205 + { Opt_sec_none, "none" }, 206 + { Opt_sec_none, "null" }, 207 + { Opt_sec_sys, "sys" }, 208 + 209 + { Opt_sec_krb5, "krb5" }, 210 + { Opt_sec_krb5i, "krb5i" }, 211 + { Opt_sec_krb5p, "krb5p" }, 212 + 213 + { Opt_sec_lkey, "lkey" }, 214 + { Opt_sec_lkeyi, "lkeyi" }, 215 + { Opt_sec_lkeyp, "lkeyp" }, 216 + 217 + { Opt_sec_err, NULL } 218 + }; 219 + 220 221 static void nfs_umount_begin(struct vfsmount *, int); 222 static int nfs_statfs(struct dentry *, struct kstatfs *); ··· 263 { RPC_AUTH_GSS_SPKM, "spkm" }, 264 { RPC_AUTH_GSS_SPKMI, "spkmi" }, 265 { RPC_AUTH_GSS_SPKMP, "spkmp" }, 266 + { UINT_MAX, "unknown" } 267 }; 268 int i; 269 270 + for (i = 0; sec_flavours[i].flavour != UINT_MAX; i++) { 271 if (sec_flavours[i].flavour == flavour) 272 break; 273 } ··· 291 { NFS_MOUNT_NONLM, ",nolock", "" }, 292 { NFS_MOUNT_NOACL, ",noacl", "" }, 293 { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, 294 + { NFS_MOUNT_UNSHARED, ",nosharecache", ""}, 295 { 0, NULL, NULL } 296 }; 297 const struct proc_nfs_info *nfs_infop; ··· 430 */ 431 static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags) 432 { 433 + struct nfs_server *server = NFS_SB(vfsmnt->mnt_sb); 434 + struct rpc_clnt *rpc; 435 + 436 shrink_submounts(vfsmnt, &nfs_automount_list); 437 + 438 + if (!(flags & MNT_FORCE)) 439 + return; 440 + /* -EIO all pending I/O */ 441 + rpc = server->client_acl; 442 + if (!IS_ERR(rpc)) 443 + rpc_killall_tasks(rpc); 444 + rpc = server->client; 445 + if (!IS_ERR(rpc)) 446 + rpc_killall_tasks(rpc); 447 + } 448 + 449 + /* 450 + * Sanity-check a server address provided by the mount command 451 + */ 452 + static int nfs_verify_server_address(struct sockaddr *addr) 453 + { 454 + switch (addr->sa_family) { 455 + case AF_INET: { 456 + struct sockaddr_in *sa = (struct sockaddr_in *) addr; 457 + if (sa->sin_addr.s_addr != INADDR_ANY) 458 + return 1; 459 + break; 460 + } 461 + } 462 + 463 + return 0; 464 + } 465 + 466 + /* 467 + * Error-check and convert a string of mount options from user space into 468 + * a data structure 469 + */ 470 + static int nfs_parse_mount_options(char *raw, 471 + struct nfs_parsed_mount_data *mnt) 472 + { 473 + char *p, *string; 474 + 475 + if (!raw) { 476 + dfprintk(MOUNT, "NFS: mount options string was NULL.\n"); 477 + return 1; 478 + } 479 + dfprintk(MOUNT, "NFS: nfs mount opts='%s'\n", raw); 480 + 481 + while ((p = strsep(&raw, ",")) != NULL) { 482 + substring_t args[MAX_OPT_ARGS]; 483 + int option, token; 484 + 485 + if (!*p) 486 + continue; 487 + 488 + dfprintk(MOUNT, "NFS: parsing nfs mount option '%s'\n", p); 489 + 490 + token = match_token(p, nfs_mount_option_tokens, args); 491 + switch (token) { 492 + case Opt_soft: 493 + mnt->flags |= NFS_MOUNT_SOFT; 494 + break; 495 + case Opt_hard: 496 + mnt->flags &= ~NFS_MOUNT_SOFT; 497 + break; 498 + case Opt_intr: 499 + mnt->flags |= NFS_MOUNT_INTR; 500 + break; 501 + case Opt_nointr: 502 + mnt->flags &= ~NFS_MOUNT_INTR; 503 + break; 504 + case Opt_posix: 505 + mnt->flags |= NFS_MOUNT_POSIX; 506 + break; 507 + case Opt_noposix: 508 + mnt->flags &= ~NFS_MOUNT_POSIX; 509 + break; 510 + case Opt_cto: 511 + mnt->flags &= ~NFS_MOUNT_NOCTO; 512 + break; 513 + case Opt_nocto: 514 + mnt->flags |= NFS_MOUNT_NOCTO; 515 + break; 516 + case Opt_ac: 517 + mnt->flags &= ~NFS_MOUNT_NOAC; 518 + break; 519 + case Opt_noac: 520 + mnt->flags |= NFS_MOUNT_NOAC; 521 + break; 522 + case Opt_lock: 523 + mnt->flags &= ~NFS_MOUNT_NONLM; 524 + break; 525 + case Opt_nolock: 526 + mnt->flags |= NFS_MOUNT_NONLM; 527 + break; 528 + case Opt_v2: 529 + mnt->flags &= ~NFS_MOUNT_VER3; 530 + break; 531 + case Opt_v3: 532 + mnt->flags |= NFS_MOUNT_VER3; 533 + break; 534 + case Opt_udp: 535 + mnt->flags &= ~NFS_MOUNT_TCP; 536 + mnt->nfs_server.protocol = IPPROTO_UDP; 537 + mnt->timeo = 7; 538 + mnt->retrans = 5; 539 + break; 540 + case Opt_tcp: 541 + mnt->flags |= NFS_MOUNT_TCP; 542 + mnt->nfs_server.protocol = IPPROTO_TCP; 543 + mnt->timeo = 600; 544 + mnt->retrans = 2; 545 + break; 546 + case Opt_acl: 547 + mnt->flags &= ~NFS_MOUNT_NOACL; 548 + break; 549 + case Opt_noacl: 550 + mnt->flags |= NFS_MOUNT_NOACL; 551 + break; 552 + case Opt_rdirplus: 553 + mnt->flags &= ~NFS_MOUNT_NORDIRPLUS; 554 + break; 555 + case Opt_nordirplus: 556 + mnt->flags |= NFS_MOUNT_NORDIRPLUS; 557 + break; 558 + case Opt_sharecache: 559 + mnt->flags &= ~NFS_MOUNT_UNSHARED; 560 + break; 561 + case Opt_nosharecache: 562 + mnt->flags |= NFS_MOUNT_UNSHARED; 563 + break; 564 + 565 + case Opt_port: 566 + if (match_int(args, &option)) 567 + return 0; 568 + if (option < 0 || option > 65535) 569 + return 0; 570 + mnt->nfs_server.address.sin_port = htonl(option); 571 + break; 572 + case Opt_rsize: 573 + if (match_int(args, &mnt->rsize)) 574 + return 0; 575 + break; 576 + case Opt_wsize: 577 + if (match_int(args, &mnt->wsize)) 578 + return 0; 579 + break; 580 + case Opt_bsize: 581 + if (match_int(args, &option)) 582 + return 0; 583 + if (option < 0) 584 + return 0; 585 + mnt->bsize = option; 586 + break; 587 + case Opt_timeo: 588 + if (match_int(args, &mnt->timeo)) 589 + return 0; 590 + break; 591 + case Opt_retrans: 592 + if (match_int(args, &mnt->retrans)) 593 + return 0; 594 + break; 595 + case Opt_acregmin: 596 + if (match_int(args, &mnt->acregmin)) 597 + return 0; 598 + break; 599 + case Opt_acregmax: 600 + if (match_int(args, &mnt->acregmax)) 601 + return 0; 602 + break; 603 + case Opt_acdirmin: 604 + if (match_int(args, &mnt->acdirmin)) 605 + return 0; 606 + break; 607 + case Opt_acdirmax: 608 + if (match_int(args, &mnt->acdirmax)) 609 + return 0; 610 + break; 611 + case Opt_actimeo: 612 + if (match_int(args, &option)) 613 + return 0; 614 + if (option < 0) 615 + return 0; 616 + mnt->acregmin = 617 + mnt->acregmax = 618 + mnt->acdirmin = 619 + mnt->acdirmax = option; 620 + break; 621 + case Opt_namelen: 622 + if (match_int(args, &mnt->namlen)) 623 + return 0; 624 + break; 625 + case Opt_mountport: 626 + if (match_int(args, &option)) 627 + return 0; 628 + if (option < 0 || option > 65535) 629 + return 0; 630 + mnt->mount_server.port = option; 631 + break; 632 + case Opt_mountprog: 633 + if (match_int(args, &option)) 634 + return 0; 635 + if (option < 0) 636 + return 0; 637 + mnt->mount_server.program = option; 638 + break; 639 + case Opt_mountvers: 640 + if (match_int(args, &option)) 641 + return 0; 642 + if (option < 0) 643 + return 0; 644 + mnt->mount_server.version = option; 645 + break; 646 + case Opt_nfsprog: 647 + if (match_int(args, &option)) 648 + return 0; 649 + if (option < 0) 650 + return 0; 651 + mnt->nfs_server.program = option; 652 + break; 653 + case Opt_nfsvers: 654 + if (match_int(args, &option)) 655 + return 0; 656 + switch (option) { 657 + case 2: 658 + mnt->flags &= ~NFS_MOUNT_VER3; 659 + break; 660 + case 3: 661 + mnt->flags |= NFS_MOUNT_VER3; 662 + break; 663 + default: 664 + goto out_unrec_vers; 665 + } 666 + break; 667 + 668 + case Opt_sec: 669 + string = match_strdup(args); 670 + if (string == NULL) 671 + goto out_nomem; 672 + token = match_token(string, nfs_secflavor_tokens, args); 673 + kfree(string); 674 + 675 + /* 676 + * The flags setting is for v2/v3. The flavor_len 677 + * setting is for v4. v2/v3 also need to know the 678 + * difference between NULL and UNIX. 679 + */ 680 + switch (token) { 681 + case Opt_sec_none: 682 + mnt->flags &= ~NFS_MOUNT_SECFLAVOUR; 683 + mnt->auth_flavor_len = 0; 684 + mnt->auth_flavors[0] = RPC_AUTH_NULL; 685 + break; 686 + case Opt_sec_sys: 687 + mnt->flags &= ~NFS_MOUNT_SECFLAVOUR; 688 + mnt->auth_flavor_len = 0; 689 + mnt->auth_flavors[0] = RPC_AUTH_UNIX; 690 + break; 691 + case Opt_sec_krb5: 692 + mnt->flags |= NFS_MOUNT_SECFLAVOUR; 693 + mnt->auth_flavor_len = 1; 694 + mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5; 695 + break; 696 + case Opt_sec_krb5i: 697 + mnt->flags |= NFS_MOUNT_SECFLAVOUR; 698 + mnt->auth_flavor_len = 1; 699 + mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5I; 700 + break; 701 + case Opt_sec_krb5p: 702 + mnt->flags |= NFS_MOUNT_SECFLAVOUR; 703 + mnt->auth_flavor_len = 1; 704 + mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5P; 705 + break; 706 + case Opt_sec_lkey: 707 + mnt->flags |= NFS_MOUNT_SECFLAVOUR; 708 + mnt->auth_flavor_len = 1; 709 + mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEY; 710 + break; 711 + case Opt_sec_lkeyi: 712 + mnt->flags |= NFS_MOUNT_SECFLAVOUR; 713 + mnt->auth_flavor_len = 1; 714 + mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYI; 715 + break; 716 + case Opt_sec_lkeyp: 717 + mnt->flags |= NFS_MOUNT_SECFLAVOUR; 718 + mnt->auth_flavor_len = 1; 719 + mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYP; 720 + break; 721 + case Opt_sec_spkm: 722 + mnt->flags |= NFS_MOUNT_SECFLAVOUR; 723 + mnt->auth_flavor_len = 1; 724 + mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKM; 725 + break; 726 + case Opt_sec_spkmi: 727 + mnt->flags |= NFS_MOUNT_SECFLAVOUR; 728 + mnt->auth_flavor_len = 1; 729 + mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMI; 730 + break; 731 + case Opt_sec_spkmp: 732 + mnt->flags |= NFS_MOUNT_SECFLAVOUR; 733 + mnt->auth_flavor_len = 1; 734 + mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMP; 735 + break; 736 + default: 737 + goto out_unrec_sec; 738 + } 739 + break; 740 + case Opt_proto: 741 + string = match_strdup(args); 742 + if (string == NULL) 743 + goto out_nomem; 744 + token = match_token(string, 745 + nfs_xprt_protocol_tokens, args); 746 + kfree(string); 747 + 748 + switch (token) { 749 + case Opt_udp: 750 + mnt->flags &= ~NFS_MOUNT_TCP; 751 + mnt->nfs_server.protocol = IPPROTO_UDP; 752 + mnt->timeo = 7; 753 + mnt->retrans = 5; 754 + break; 755 + case Opt_tcp: 756 + mnt->flags |= NFS_MOUNT_TCP; 757 + mnt->nfs_server.protocol = IPPROTO_TCP; 758 + mnt->timeo = 600; 759 + mnt->retrans = 2; 760 + break; 761 + default: 762 + goto out_unrec_xprt; 763 + } 764 + break; 765 + case Opt_mountproto: 766 + string = match_strdup(args); 767 + if (string == NULL) 768 + goto out_nomem; 769 + token = match_token(string, 770 + nfs_xprt_protocol_tokens, args); 771 + kfree(string); 772 + 773 + switch (token) { 774 + case Opt_udp: 775 + mnt->mount_server.protocol = IPPROTO_UDP; 776 + break; 777 + case Opt_tcp: 778 + mnt->mount_server.protocol = IPPROTO_TCP; 779 + break; 780 + default: 781 + goto out_unrec_xprt; 782 + } 783 + break; 784 + case Opt_addr: 785 + string = match_strdup(args); 786 + if (string == NULL) 787 + goto out_nomem; 788 + mnt->nfs_server.address.sin_family = AF_INET; 789 + mnt->nfs_server.address.sin_addr.s_addr = 790 + in_aton(string); 791 + kfree(string); 792 + break; 793 + case Opt_clientaddr: 794 + string = match_strdup(args); 795 + if (string == NULL) 796 + goto out_nomem; 797 + mnt->client_address = string; 798 + break; 799 + case Opt_mounthost: 800 + string = match_strdup(args); 801 + if (string == NULL) 802 + goto out_nomem; 803 + mnt->mount_server.address.sin_family = AF_INET; 804 + mnt->mount_server.address.sin_addr.s_addr = 805 + in_aton(string); 806 + kfree(string); 807 + break; 808 + 809 + case Opt_userspace: 810 + case Opt_deprecated: 811 + break; 812 + 813 + default: 814 + goto out_unknown; 815 + } 816 + } 817 + 818 + return 1; 819 + 820 + out_nomem: 821 + printk(KERN_INFO "NFS: not enough memory to parse option\n"); 822 + return 0; 823 + 824 + out_unrec_vers: 825 + printk(KERN_INFO "NFS: unrecognized NFS version number\n"); 826 + return 0; 827 + 828 + out_unrec_xprt: 829 + printk(KERN_INFO "NFS: unrecognized transport protocol\n"); 830 + return 0; 831 + 832 + out_unrec_sec: 833 + printk(KERN_INFO "NFS: unrecognized security flavor\n"); 834 + return 0; 835 + 836 + out_unknown: 837 + printk(KERN_INFO "NFS: unknown mount option: %s\n", p); 838 + return 0; 839 + } 840 + 841 + /* 842 + * Use the remote server's MOUNT service to request the NFS file handle 843 + * corresponding to the provided path. 844 + */ 845 + static int nfs_try_mount(struct nfs_parsed_mount_data *args, 846 + struct nfs_fh *root_fh) 847 + { 848 + struct sockaddr_in sin; 849 + int status; 850 + 851 + if (args->mount_server.version == 0) { 852 + if (args->flags & NFS_MOUNT_VER3) 853 + args->mount_server.version = NFS_MNT3_VERSION; 854 + else 855 + args->mount_server.version = NFS_MNT_VERSION; 856 + } 857 + 858 + /* 859 + * Construct the mount server's address. 860 + */ 861 + if (args->mount_server.address.sin_addr.s_addr != INADDR_ANY) 862 + sin = args->mount_server.address; 863 + else 864 + sin = args->nfs_server.address; 865 + if (args->mount_server.port == 0) { 866 + status = rpcb_getport_sync(&sin, 867 + args->mount_server.program, 868 + args->mount_server.version, 869 + args->mount_server.protocol); 870 + if (status < 0) 871 + goto out_err; 872 + sin.sin_port = htons(status); 873 + } else 874 + sin.sin_port = htons(args->mount_server.port); 875 + 876 + /* 877 + * Now ask the mount server to map our export path 878 + * to a file handle. 879 + */ 880 + status = nfs_mount((struct sockaddr *) &sin, 881 + sizeof(sin), 882 + args->nfs_server.hostname, 883 + args->nfs_server.export_path, 884 + args->mount_server.version, 885 + args->mount_server.protocol, 886 + root_fh); 887 + if (status < 0) 888 + goto out_err; 889 + 890 + return status; 891 + 892 + out_err: 893 + dfprintk(MOUNT, "NFS: unable to contact server on host " 894 + NIPQUAD_FMT "\n", NIPQUAD(sin.sin_addr.s_addr)); 895 + return status; 896 } 897 898 /* 899 * Validate the NFS2/NFS3 mount data 900 * - fills in the mount root filehandle 901 + * 902 + * For option strings, user space handles the following behaviors: 903 + * 904 + * + DNS: mapping server host name to IP address ("addr=" option) 905 + * 906 + * + failure mode: how to behave if a mount request can't be handled 907 + * immediately ("fg/bg" option) 908 + * 909 + * + retry: how often to retry a mount request ("retry=" option) 910 + * 911 + * + breaking back: trying proto=udp after proto=tcp, v2 after v3, 912 + * mountproto=tcp after mountproto=udp, and so on 913 + * 914 + * XXX: as far as I can tell, changing the NFS program number is not 915 + * supported in the NFS client. 916 */ 917 + static int nfs_validate_mount_data(struct nfs_mount_data **options, 918 + struct nfs_fh *mntfh, 919 + const char *dev_name) 920 { 921 + struct nfs_mount_data *data = *options; 922 923 + if (data == NULL) 924 + goto out_no_data; 925 926 switch (data->version) { 927 + case 1: 928 + data->namlen = 0; 929 + case 2: 930 + data->bsize = 0; 931 + case 3: 932 + if (data->flags & NFS_MOUNT_VER3) 933 + goto out_no_v3; 934 + data->root.size = NFS2_FHSIZE; 935 + memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); 936 + case 4: 937 + if (data->flags & NFS_MOUNT_SECFLAVOUR) 938 + goto out_no_sec; 939 + case 5: 940 + memset(data->context, 0, sizeof(data->context)); 941 + case 6: 942 + if (data->flags & NFS_MOUNT_VER3) 943 + mntfh->size = data->root.size; 944 + else 945 + mntfh->size = NFS2_FHSIZE; 946 + 947 + if (mntfh->size > sizeof(mntfh->data)) 948 + goto out_invalid_fh; 949 + 950 + memcpy(mntfh->data, data->root.data, mntfh->size); 951 + if (mntfh->size < sizeof(mntfh->data)) 952 + memset(mntfh->data + mntfh->size, 0, 953 + sizeof(mntfh->data) - mntfh->size); 954 + break; 955 + default: { 956 + unsigned int len; 957 + char *c; 958 + int status; 959 + struct nfs_parsed_mount_data args = { 960 + .flags = (NFS_MOUNT_VER3 | NFS_MOUNT_TCP), 961 + .rsize = NFS_MAX_FILE_IO_SIZE, 962 + .wsize = NFS_MAX_FILE_IO_SIZE, 963 + .timeo = 600, 964 + .retrans = 2, 965 + .acregmin = 3, 966 + .acregmax = 60, 967 + .acdirmin = 30, 968 + .acdirmax = 60, 969 + .mount_server.protocol = IPPROTO_UDP, 970 + .mount_server.program = NFS_MNT_PROGRAM, 971 + .nfs_server.protocol = IPPROTO_TCP, 972 + .nfs_server.program = NFS_PROGRAM, 973 + }; 974 + 975 + if (nfs_parse_mount_options((char *) *options, &args) == 0) 976 + return -EINVAL; 977 + 978 + data = kzalloc(sizeof(*data), GFP_KERNEL); 979 + if (data == NULL) 980 + return -ENOMEM; 981 + 982 + /* 983 + * NB: after this point, caller will free "data" 984 + * if we return an error 985 + */ 986 + *options = data; 987 + 988 + c = strchr(dev_name, ':'); 989 + if (c == NULL) 990 + return -EINVAL; 991 + len = c - dev_name - 1; 992 + if (len > sizeof(data->hostname)) 993 + return -EINVAL; 994 + strncpy(data->hostname, dev_name, len); 995 + args.nfs_server.hostname = data->hostname; 996 + 997 + c++; 998 + if (strlen(c) > NFS_MAXPATHLEN) 999 + return -EINVAL; 1000 + args.nfs_server.export_path = c; 1001 + 1002 + status = nfs_try_mount(&args, mntfh); 1003 + if (status) 1004 + return -EINVAL; 1005 + 1006 + /* 1007 + * Translate to nfs_mount_data, which nfs_fill_super 1008 + * can deal with. 1009 + */ 1010 + data->version = 6; 1011 + data->flags = args.flags; 1012 + data->rsize = args.rsize; 1013 + data->wsize = args.wsize; 1014 + data->timeo = args.timeo; 1015 + data->retrans = args.retrans; 1016 + data->acregmin = args.acregmin; 1017 + data->acregmax = args.acregmax; 1018 + data->acdirmin = args.acdirmin; 1019 + data->acdirmax = args.acdirmax; 1020 + data->addr = args.nfs_server.address; 1021 + data->namlen = args.namlen; 1022 + data->bsize = args.bsize; 1023 + data->pseudoflavor = args.auth_flavors[0]; 1024 + 1025 + break; 1026 + } 1027 } 1028 1029 if (!(data->flags & NFS_MOUNT_SECFLAVOUR)) 1030 data->pseudoflavor = RPC_AUTH_UNIX; 1031 1032 #ifndef CONFIG_NFS_V3 1033 if (data->flags & NFS_MOUNT_VER3) 1034 + goto out_v3_not_compiled; 1035 + #endif /* !CONFIG_NFS_V3 */ 1036 1037 + if (!nfs_verify_server_address((struct sockaddr *) &data->addr)) 1038 + goto out_no_address; 1039 1040 return 0; 1041 + 1042 + out_no_data: 1043 + dfprintk(MOUNT, "NFS: mount program didn't pass any mount data\n"); 1044 + return -EINVAL; 1045 + 1046 + out_no_v3: 1047 + dfprintk(MOUNT, "NFS: nfs_mount_data version %d does not support v3\n", 1048 + data->version); 1049 + return -EINVAL; 1050 + 1051 + out_no_sec: 1052 + dfprintk(MOUNT, "NFS: nfs_mount_data version supports only AUTH_SYS\n"); 1053 + return -EINVAL; 1054 + 1055 + #ifndef CONFIG_NFS_V3 1056 + out_v3_not_compiled: 1057 + dfprintk(MOUNT, "NFS: NFSv3 is not compiled into kernel\n"); 1058 + return -EPROTONOSUPPORT; 1059 + #endif /* !CONFIG_NFS_V3 */ 1060 + 1061 + out_no_address: 1062 + dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n"); 1063 + return -EINVAL; 1064 + 1065 + out_invalid_fh: 1066 + dfprintk(MOUNT, "NFS: invalid root filehandle\n"); 1067 + return -EINVAL; 1068 } 1069 1070 /* ··· 600 { 601 struct nfs_server *server = data, *old = NFS_SB(sb); 602 603 + if (memcmp(&old->nfs_client->cl_addr, 604 + &server->nfs_client->cl_addr, 605 + sizeof(old->nfs_client->cl_addr)) != 0) 606 + return 0; 607 + /* Note: NFS_MOUNT_UNSHARED == NFS4_MOUNT_UNSHARED */ 608 + if (old->flags & NFS_MOUNT_UNSHARED) 609 return 0; 610 if (memcmp(&old->fsid, &server->fsid, sizeof(old->fsid)) != 0) 611 return 0; 612 return 1; 613 + } 614 + 615 + #define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS) 616 + 617 + static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags) 618 + { 619 + const struct nfs_server *a = s->s_fs_info; 620 + const struct rpc_clnt *clnt_a = a->client; 621 + const struct rpc_clnt *clnt_b = b->client; 622 + 623 + if ((s->s_flags & NFS_MS_MASK) != (flags & NFS_MS_MASK)) 624 + goto Ebusy; 625 + if (a->nfs_client != b->nfs_client) 626 + goto Ebusy; 627 + if (a->flags != b->flags) 628 + goto Ebusy; 629 + if (a->wsize != b->wsize) 630 + goto Ebusy; 631 + if (a->rsize != b->rsize) 632 + goto Ebusy; 633 + if (a->acregmin != b->acregmin) 634 + goto Ebusy; 635 + if (a->acregmax != b->acregmax) 636 + goto Ebusy; 637 + if (a->acdirmin != b->acdirmin) 638 + goto Ebusy; 639 + if (a->acdirmax != b->acdirmax) 640 + goto Ebusy; 641 + if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor) 642 + goto Ebusy; 643 + return 0; 644 + Ebusy: 645 + return -EBUSY; 646 } 647 648 static int nfs_get_sb(struct file_system_type *fs_type, ··· 615 struct nfs_fh mntfh; 616 struct nfs_mount_data *data = raw_data; 617 struct dentry *mntroot; 618 + int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 619 int error; 620 621 /* Validate the mount data */ 622 + error = nfs_validate_mount_data(&data, &mntfh, dev_name); 623 if (error < 0) 624 + goto out; 625 626 /* Get a volume representation */ 627 server = nfs_create_server(data, &mntfh); 628 if (IS_ERR(server)) { 629 error = PTR_ERR(server); 630 + goto out; 631 } 632 633 + if (server->flags & NFS_MOUNT_UNSHARED) 634 + compare_super = NULL; 635 + 636 /* Get a superblock - note that we may end up sharing one that already exists */ 637 + s = sget(fs_type, compare_super, nfs_set_super, server); 638 if (IS_ERR(s)) { 639 error = PTR_ERR(s); 640 goto out_err_nosb; 641 } 642 643 if (s->s_fs_info != server) { 644 + error = nfs_compare_mount_options(s, server, flags); 645 nfs_free_server(server); 646 server = NULL; 647 + if (error < 0) 648 + goto error_splat_super; 649 } 650 651 if (!s->s_root) { ··· 656 s->s_flags |= MS_ACTIVE; 657 mnt->mnt_sb = s; 658 mnt->mnt_root = mntroot; 659 + error = 0; 660 + 661 + out: 662 + if (data != raw_data) 663 + kfree(data); 664 + return error; 665 666 out_err_nosb: 667 nfs_free_server(server); 668 + goto out; 669 670 error_splat_super: 671 up_write(&s->s_umount); 672 deactivate_super(s); 673 + goto out; 674 } 675 676 /* ··· 691 struct super_block *s; 692 struct nfs_server *server; 693 struct dentry *mntroot; 694 + int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 695 int error; 696 697 dprintk("--> nfs_xdev_get_sb()\n"); ··· 702 goto out_err_noserver; 703 } 704 705 + if (server->flags & NFS_MOUNT_UNSHARED) 706 + compare_super = NULL; 707 + 708 /* Get a superblock - note that we may end up sharing one that already exists */ 709 + s = sget(&nfs_fs_type, compare_super, nfs_set_super, server); 710 if (IS_ERR(s)) { 711 error = PTR_ERR(s); 712 goto out_err_nosb; 713 } 714 715 if (s->s_fs_info != server) { 716 + error = nfs_compare_mount_options(s, server, flags); 717 nfs_free_server(server); 718 server = NULL; 719 + if (error < 0) 720 + goto error_splat_super; 721 } 722 723 if (!s->s_root) { ··· 772 nfs_initialise_sb(sb); 773 } 774 775 + /* 776 + * Validate NFSv4 mount options 777 + */ 778 + static int nfs4_validate_mount_data(struct nfs4_mount_data **options, 779 + const char *dev_name, 780 + struct sockaddr_in *addr, 781 + rpc_authflavor_t *authflavour, 782 + char **hostname, 783 + char **mntpath, 784 + char **ip_addr) 785 { 786 + struct nfs4_mount_data *data = *options; 787 + char *c; 788 789 + if (data == NULL) 790 + goto out_no_data; 791 + 792 + switch (data->version) { 793 + case 1: 794 + if (data->host_addrlen != sizeof(*addr)) 795 + goto out_no_address; 796 + if (copy_from_user(addr, data->host_addr, sizeof(*addr))) 797 + return -EFAULT; 798 + if (addr->sin_port == 0) 799 + addr->sin_port = htons(NFS_PORT); 800 + if (!nfs_verify_server_address((struct sockaddr *) addr)) 801 + goto out_no_address; 802 + 803 + switch (data->auth_flavourlen) { 804 + case 0: 805 + *authflavour = RPC_AUTH_UNIX; 806 + break; 807 + case 1: 808 + if (copy_from_user(authflavour, data->auth_flavours, 809 + sizeof(*authflavour))) 810 + return -EFAULT; 811 + break; 812 + default: 813 + goto out_inval_auth; 814 + } 815 + 816 + c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN); 817 + if (IS_ERR(c)) 818 + return PTR_ERR(c); 819 + *hostname = c; 820 + 821 + c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN); 822 + if (IS_ERR(c)) 823 + return PTR_ERR(c); 824 + *mntpath = c; 825 + dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", *mntpath); 826 + 827 + c = strndup_user(data->client_addr.data, 16); 828 + if (IS_ERR(c)) 829 + return PTR_ERR(c); 830 + *ip_addr = c; 831 + 832 + break; 833 + default: { 834 + unsigned int len; 835 + struct nfs_parsed_mount_data args = { 836 + .rsize = NFS_MAX_FILE_IO_SIZE, 837 + .wsize = NFS_MAX_FILE_IO_SIZE, 838 + .timeo = 600, 839 + .retrans = 2, 840 + .acregmin = 3, 841 + .acregmax = 60, 842 + .acdirmin = 30, 843 + .acdirmax = 60, 844 + .nfs_server.protocol = IPPROTO_TCP, 845 + }; 846 + 847 + if (nfs_parse_mount_options((char *) *options, &args) == 0) 848 + return -EINVAL; 849 + 850 + if (!nfs_verify_server_address((struct sockaddr *) 851 + &args.nfs_server.address)) 852 + return -EINVAL; 853 + *addr = args.nfs_server.address; 854 + 855 + switch (args.auth_flavor_len) { 856 + case 0: 857 + *authflavour = RPC_AUTH_UNIX; 858 + break; 859 + case 1: 860 + *authflavour = (rpc_authflavor_t) args.auth_flavors[0]; 861 + break; 862 + default: 863 + goto out_inval_auth; 864 + } 865 + 866 + /* 867 + * Translate to nfs4_mount_data, which nfs4_fill_super 868 + * can deal with. 869 + */ 870 + data = kzalloc(sizeof(*data), GFP_KERNEL); 871 + if (data == NULL) 872 + return -ENOMEM; 873 + *options = data; 874 + 875 + data->version = 1; 876 + data->flags = args.flags & NFS4_MOUNT_FLAGMASK; 877 + data->rsize = args.rsize; 878 + data->wsize = args.wsize; 879 + data->timeo = args.timeo; 880 + data->retrans = args.retrans; 881 + data->acregmin = args.acregmin; 882 + data->acregmax = args.acregmax; 883 + data->acdirmin = args.acdirmin; 884 + data->acdirmax = args.acdirmax; 885 + data->proto = args.nfs_server.protocol; 886 + 887 + /* 888 + * Split "dev_name" into "hostname:mntpath". 889 + */ 890 + c = strchr(dev_name, ':'); 891 + if (c == NULL) 892 + return -EINVAL; 893 + /* while calculating len, pretend ':' is '\0' */ 894 + len = c - dev_name; 895 + if (len > NFS4_MAXNAMLEN) 896 + return -EINVAL; 897 + *hostname = kzalloc(len, GFP_KERNEL); 898 + if (*hostname == NULL) 899 + return -ENOMEM; 900 + strncpy(*hostname, dev_name, len - 1); 901 + 902 + c++; /* step over the ':' */ 903 + len = strlen(c); 904 + if (len > NFS4_MAXPATHLEN) 905 + return -EINVAL; 906 + *mntpath = kzalloc(len + 1, GFP_KERNEL); 907 + if (*mntpath == NULL) 908 + return -ENOMEM; 909 + strncpy(*mntpath, c, len); 910 + 911 + dprintk("MNTPATH: %s\n", *mntpath); 912 + 913 + *ip_addr = args.client_address; 914 + 915 + break; 916 + } 917 } 918 + 919 + return 0; 920 + 921 + out_no_data: 922 + dfprintk(MOUNT, "NFS4: mount program didn't pass any mount data\n"); 923 + return -EINVAL; 924 + 925 + out_inval_auth: 926 + dfprintk(MOUNT, "NFS4: Invalid number of RPC auth flavours %d\n", 927 + data->auth_flavourlen); 928 + return -EINVAL; 929 + 930 + out_no_address: 931 + dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n"); 932 + return -EINVAL; 933 } 934 935 /* ··· 806 rpc_authflavor_t authflavour; 807 struct nfs_fh mntfh; 808 struct dentry *mntroot; 809 + char *mntpath = NULL, *hostname = NULL, *ip_addr = NULL; 810 + int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 811 int error; 812 813 + /* Validate the mount data */ 814 + error = nfs4_validate_mount_data(&data, dev_name, &addr, &authflavour, 815 + &hostname, &mntpath, &ip_addr); 816 + if (error < 0) 817 + goto out; 818 819 /* Get a volume representation */ 820 server = nfs4_create_server(data, hostname, &addr, mntpath, ip_addr, 821 authflavour, &mntfh); 822 if (IS_ERR(server)) { 823 error = PTR_ERR(server); 824 + goto out; 825 } 826 827 + if (server->flags & NFS4_MOUNT_UNSHARED) 828 + compare_super = NULL; 829 + 830 /* Get a superblock - note that we may end up sharing one that already exists */ 831 + s = sget(fs_type, compare_super, nfs_set_super, server); 832 if (IS_ERR(s)) { 833 error = PTR_ERR(s); 834 goto out_free; ··· 906 s->s_flags |= MS_ACTIVE; 907 mnt->mnt_sb = s; 908 mnt->mnt_root = mntroot; 909 + error = 0; 910 911 + out: 912 + kfree(ip_addr); 913 kfree(mntpath); 914 kfree(hostname); 915 return error; 916 917 + out_free: 918 + nfs_free_server(server); 919 + goto out; 920 + 921 error_splat_super: 922 up_write(&s->s_umount); 923 deactivate_super(s); 924 + goto out; 925 } 926 927 static void nfs4_kill_super(struct super_block *sb) ··· 949 struct super_block *s; 950 struct nfs_server *server; 951 struct dentry *mntroot; 952 + int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 953 int error; 954 955 dprintk("--> nfs4_xdev_get_sb()\n"); ··· 960 goto out_err_noserver; 961 } 962 963 + if (server->flags & NFS4_MOUNT_UNSHARED) 964 + compare_super = NULL; 965 + 966 /* Get a superblock - note that we may end up sharing one that already exists */ 967 + s = sget(&nfs_fs_type, compare_super, nfs_set_super, server); 968 if (IS_ERR(s)) { 969 error = PTR_ERR(s); 970 goto out_err_nosb; ··· 1016 struct nfs_server *server; 1017 struct dentry *mntroot; 1018 struct nfs_fh mntfh; 1019 + int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 1020 int error; 1021 1022 dprintk("--> nfs4_referral_get_sb()\n"); ··· 1027 goto out_err_noserver; 1028 } 1029 1030 + if (server->flags & NFS4_MOUNT_UNSHARED) 1031 + compare_super = NULL; 1032 + 1033 /* Get a superblock - note that we may end up sharing one that already exists */ 1034 + s = sget(&nfs_fs_type, compare_super, nfs_set_super, server); 1035 if (IS_ERR(s)) { 1036 error = PTR_ERR(s); 1037 goto out_err_nosb;
+77 -72
fs/nfs/write.c
··· 117 if (PagePrivate(page)) { 118 req = (struct nfs_page *)page_private(page); 119 if (req != NULL) 120 - atomic_inc(&req->wb_count); 121 } 122 return req; 123 } 124 125 static struct nfs_page *nfs_page_find_request(struct page *page) 126 { 127 struct nfs_page *req = NULL; 128 - spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; 129 130 - spin_lock(req_lock); 131 req = nfs_page_find_request_locked(page); 132 - spin_unlock(req_lock); 133 return req; 134 } 135 ··· 191 } 192 /* Update file length */ 193 nfs_grow_file(page, offset, count); 194 - /* Set the PG_uptodate flag? */ 195 - nfs_mark_uptodate(page, offset, count); 196 nfs_unlock_request(req); 197 return 0; 198 } ··· 251 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 252 struct page *page) 253 { 254 struct nfs_page *req; 255 - struct nfs_inode *nfsi = NFS_I(page->mapping->host); 256 - spinlock_t *req_lock = &nfsi->req_lock; 257 int ret; 258 259 - spin_lock(req_lock); 260 for(;;) { 261 req = nfs_page_find_request_locked(page); 262 if (req == NULL) { 263 - spin_unlock(req_lock); 264 return 1; 265 } 266 if (nfs_lock_request_dontget(req)) ··· 270 * succeed provided that someone hasn't already marked the 271 * request as dirty (in which case we don't care). 272 */ 273 - spin_unlock(req_lock); 274 ret = nfs_wait_on_request(req); 275 nfs_release_request(req); 276 if (ret != 0) 277 return ret; 278 - spin_lock(req_lock); 279 } 280 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { 281 /* This request is marked for commit */ 282 - spin_unlock(req_lock); 283 nfs_unlock_request(req); 284 nfs_pageio_complete(pgio); 285 return 1; 286 } 287 if (nfs_set_page_writeback(page) != 0) { 288 - spin_unlock(req_lock); 289 BUG(); 290 } 291 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, 292 - NFS_PAGE_TAG_WRITEBACK); 293 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); 294 - spin_unlock(req_lock); 295 nfs_pageio_add_request(pgio, req); 296 return ret; 297 } ··· 398 if (PageDirty(req->wb_page)) 399 set_bit(PG_NEED_FLUSH, &req->wb_flags); 400 nfsi->npages++; 401 - atomic_inc(&req->wb_count); 402 return 0; 403 } 404 ··· 407 */ 408 static void nfs_inode_remove_request(struct nfs_page *req) 409 { 410 - struct inode *inode = req->wb_context->dentry->d_inode; 411 struct nfs_inode *nfsi = NFS_I(inode); 412 413 BUG_ON (!NFS_WBACK_BUSY(req)); 414 415 - spin_lock(&nfsi->req_lock); 416 set_page_private(req->wb_page, 0); 417 ClearPagePrivate(req->wb_page); 418 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); ··· 420 __set_page_dirty_nobuffers(req->wb_page); 421 nfsi->npages--; 422 if (!nfsi->npages) { 423 - spin_unlock(&nfsi->req_lock); 424 nfs_end_data_update(inode); 425 iput(inode); 426 } else 427 - spin_unlock(&nfsi->req_lock); 428 nfs_clear_request(req); 429 nfs_release_request(req); 430 } ··· 455 static void 456 nfs_mark_request_commit(struct nfs_page *req) 457 { 458 - struct inode *inode = req->wb_context->dentry->d_inode; 459 struct nfs_inode *nfsi = NFS_I(inode); 460 461 - spin_lock(&nfsi->req_lock); 462 - nfs_list_add_request(req, &nfsi->commit); 463 nfsi->ncommit++; 464 set_bit(PG_NEED_COMMIT, &(req)->wb_flags); 465 - spin_unlock(&nfsi->req_lock); 466 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 467 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 468 } ··· 526 idx_end = idx_start + npages - 1; 527 528 next = idx_start; 529 - while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) { 530 if (req->wb_index > idx_end) 531 break; 532 533 next = req->wb_index + 1; 534 BUG_ON(!NFS_WBACK_BUSY(req)); 535 536 - atomic_inc(&req->wb_count); 537 - spin_unlock(&nfsi->req_lock); 538 error = nfs_wait_on_request(req); 539 nfs_release_request(req); 540 - spin_lock(&nfsi->req_lock); 541 if (error < 0) 542 return error; 543 res++; ··· 577 int res = 0; 578 579 if (nfsi->ncommit != 0) { 580 - res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages); 581 nfsi->ncommit -= res; 582 - if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) 583 - printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); 584 } 585 return res; 586 } ··· 602 { 603 struct address_space *mapping = page->mapping; 604 struct inode *inode = mapping->host; 605 - struct nfs_inode *nfsi = NFS_I(inode); 606 struct nfs_page *req, *new = NULL; 607 pgoff_t rqend, end; 608 ··· 611 /* Loop over all inode entries and see if we find 612 * A request for the page we wish to update 613 */ 614 - spin_lock(&nfsi->req_lock); 615 req = nfs_page_find_request_locked(page); 616 if (req) { 617 if (!nfs_lock_request_dontget(req)) { 618 int error; 619 620 - spin_unlock(&nfsi->req_lock); 621 error = nfs_wait_on_request(req); 622 nfs_release_request(req); 623 if (error < 0) { ··· 627 } 628 continue; 629 } 630 - spin_unlock(&nfsi->req_lock); 631 if (new) 632 nfs_release_request(new); 633 break; ··· 638 nfs_lock_request_dontget(new); 639 error = nfs_inode_add_request(inode, new); 640 if (error) { 641 - spin_unlock(&nfsi->req_lock); 642 nfs_unlock_request(new); 643 return ERR_PTR(error); 644 } 645 - spin_unlock(&nfsi->req_lock); 646 return new; 647 } 648 - spin_unlock(&nfsi->req_lock); 649 650 new = nfs_create_request(ctx, inode, page, offset, bytes); 651 if (IS_ERR(new)) ··· 749 static void nfs_writepage_release(struct nfs_page *req) 750 { 751 752 - if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { 753 nfs_end_page_writeback(req->wb_page); 754 nfs_inode_remove_request(req); 755 } else 756 nfs_end_page_writeback(req->wb_page); 757 - nfs_clear_page_writeback(req); 758 } 759 760 static inline int flush_task_priority(int how) ··· 789 * NB: take care not to mess about with data->commit et al. */ 790 791 data->req = req; 792 - data->inode = inode = req->wb_context->dentry->d_inode; 793 data->cred = req->wb_context->cred; 794 795 data->args.fh = NFS_FH(inode); ··· 888 } 889 nfs_redirty_request(req); 890 nfs_end_page_writeback(req->wb_page); 891 - nfs_clear_page_writeback(req); 892 return -ENOMEM; 893 } 894 ··· 931 nfs_list_remove_request(req); 932 nfs_redirty_request(req); 933 nfs_end_page_writeback(req->wb_page); 934 - nfs_clear_page_writeback(req); 935 } 936 return -ENOMEM; 937 } ··· 957 struct page *page = req->wb_page; 958 959 dprintk("NFS: write (%s/%Ld %d@%Ld)", 960 - req->wb_context->dentry->d_inode->i_sb->s_id, 961 - (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 962 req->wb_bytes, 963 (long long)req_offset(req)); 964 ··· 973 } 974 975 if (nfs_write_need_commit(data)) { 976 - spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; 977 978 - spin_lock(req_lock); 979 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { 980 /* Do nothing we need to resend the writes */ 981 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) { ··· 986 clear_bit(PG_NEED_COMMIT, &req->wb_flags); 987 dprintk(" server reboot detected\n"); 988 } 989 - spin_unlock(req_lock); 990 } else 991 dprintk(" OK\n"); 992 ··· 1023 page = req->wb_page; 1024 1025 dprintk("NFS: write (%s/%Ld %d@%Ld)", 1026 - req->wb_context->dentry->d_inode->i_sb->s_id, 1027 - (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 1028 req->wb_bytes, 1029 (long long)req_offset(req)); 1030 ··· 1042 dprintk(" marked for commit\n"); 1043 goto next; 1044 } 1045 dprintk(" OK\n"); 1046 remove_request: 1047 nfs_end_page_writeback(page); 1048 nfs_inode_remove_request(req); 1049 next: 1050 - nfs_clear_page_writeback(req); 1051 } 1052 } 1053 ··· 1162 1163 list_splice_init(head, &data->pages); 1164 first = nfs_list_entry(data->pages.next); 1165 - inode = first->wb_context->dentry->d_inode; 1166 1167 data->inode = inode; 1168 data->cred = first->wb_context->cred; ··· 1212 nfs_list_remove_request(req); 1213 nfs_mark_request_commit(req); 1214 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1215 - nfs_clear_page_writeback(req); 1216 } 1217 return -ENOMEM; 1218 } ··· 1239 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1240 1241 dprintk("NFS: commit (%s/%Ld %d@%Ld)", 1242 - req->wb_context->dentry->d_inode->i_sb->s_id, 1243 - (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 1244 req->wb_bytes, 1245 (long long)req_offset(req)); 1246 if (task->tk_status < 0) { ··· 1254 * returned by the server against all stored verfs. */ 1255 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) { 1256 /* We have a match */ 1257 nfs_inode_remove_request(req); 1258 dprintk(" OK\n"); 1259 goto next; ··· 1265 dprintk(" mismatch\n"); 1266 nfs_redirty_request(req); 1267 next: 1268 - nfs_clear_page_writeback(req); 1269 } 1270 } 1271 ··· 1276 1277 int nfs_commit_inode(struct inode *inode, int how) 1278 { 1279 - struct nfs_inode *nfsi = NFS_I(inode); 1280 LIST_HEAD(head); 1281 int res; 1282 1283 - spin_lock(&nfsi->req_lock); 1284 res = nfs_scan_commit(inode, &head, 0, 0); 1285 - spin_unlock(&nfsi->req_lock); 1286 if (res) { 1287 int error = nfs_commit_list(inode, &head, how); 1288 if (error < 0) ··· 1299 long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) 1300 { 1301 struct inode *inode = mapping->host; 1302 - struct nfs_inode *nfsi = NFS_I(inode); 1303 pgoff_t idx_start, idx_end; 1304 unsigned int npages = 0; 1305 LIST_HEAD(head); ··· 1320 } 1321 } 1322 how &= ~FLUSH_NOCOMMIT; 1323 - spin_lock(&nfsi->req_lock); 1324 do { 1325 ret = nfs_wait_on_requests_locked(inode, idx_start, npages); 1326 if (ret != 0) ··· 1331 if (pages == 0) 1332 break; 1333 if (how & FLUSH_INVALIDATE) { 1334 - spin_unlock(&nfsi->req_lock); 1335 nfs_cancel_commit_list(&head); 1336 ret = pages; 1337 - spin_lock(&nfsi->req_lock); 1338 continue; 1339 } 1340 pages += nfs_scan_commit(inode, &head, 0, 0); 1341 - spin_unlock(&nfsi->req_lock); 1342 ret = nfs_commit_list(inode, &head, how); 1343 - spin_lock(&nfsi->req_lock); 1344 } while (ret >= 0); 1345 - spin_unlock(&nfsi->req_lock); 1346 return ret; 1347 } 1348 ··· 1437 { 1438 struct address_space *mapping = page->mapping; 1439 struct inode *inode; 1440 - spinlock_t *req_lock; 1441 struct nfs_page *req; 1442 int ret; 1443 ··· 1445 inode = mapping->host; 1446 if (!inode) 1447 goto out_raced; 1448 - req_lock = &NFS_I(inode)->req_lock; 1449 - spin_lock(req_lock); 1450 req = nfs_page_find_request_locked(page); 1451 if (req != NULL) { 1452 /* Mark any existing write requests for flushing */ 1453 ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); 1454 - spin_unlock(req_lock); 1455 nfs_release_request(req); 1456 return ret; 1457 } 1458 ret = __set_page_dirty_nobuffers(page); 1459 - spin_unlock(req_lock); 1460 return ret; 1461 out_raced: 1462 return !TestSetPageDirty(page);
··· 117 if (PagePrivate(page)) { 118 req = (struct nfs_page *)page_private(page); 119 if (req != NULL) 120 + kref_get(&req->wb_kref); 121 } 122 return req; 123 } 124 125 static struct nfs_page *nfs_page_find_request(struct page *page) 126 { 127 + struct inode *inode = page->mapping->host; 128 struct nfs_page *req = NULL; 129 130 + spin_lock(&inode->i_lock); 131 req = nfs_page_find_request_locked(page); 132 + spin_unlock(&inode->i_lock); 133 return req; 134 } 135 ··· 191 } 192 /* Update file length */ 193 nfs_grow_file(page, offset, count); 194 nfs_unlock_request(req); 195 return 0; 196 } ··· 253 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 254 struct page *page) 255 { 256 + struct inode *inode = page->mapping->host; 257 + struct nfs_inode *nfsi = NFS_I(inode); 258 struct nfs_page *req; 259 int ret; 260 261 + spin_lock(&inode->i_lock); 262 for(;;) { 263 req = nfs_page_find_request_locked(page); 264 if (req == NULL) { 265 + spin_unlock(&inode->i_lock); 266 return 1; 267 } 268 if (nfs_lock_request_dontget(req)) ··· 272 * succeed provided that someone hasn't already marked the 273 * request as dirty (in which case we don't care). 274 */ 275 + spin_unlock(&inode->i_lock); 276 ret = nfs_wait_on_request(req); 277 nfs_release_request(req); 278 if (ret != 0) 279 return ret; 280 + spin_lock(&inode->i_lock); 281 } 282 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { 283 /* This request is marked for commit */ 284 + spin_unlock(&inode->i_lock); 285 nfs_unlock_request(req); 286 nfs_pageio_complete(pgio); 287 return 1; 288 } 289 if (nfs_set_page_writeback(page) != 0) { 290 + spin_unlock(&inode->i_lock); 291 BUG(); 292 } 293 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, 294 + NFS_PAGE_TAG_LOCKED); 295 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); 296 + spin_unlock(&inode->i_lock); 297 nfs_pageio_add_request(pgio, req); 298 return ret; 299 } ··· 400 if (PageDirty(req->wb_page)) 401 set_bit(PG_NEED_FLUSH, &req->wb_flags); 402 nfsi->npages++; 403 + kref_get(&req->wb_kref); 404 return 0; 405 } 406 ··· 409 */ 410 static void nfs_inode_remove_request(struct nfs_page *req) 411 { 412 + struct inode *inode = req->wb_context->path.dentry->d_inode; 413 struct nfs_inode *nfsi = NFS_I(inode); 414 415 BUG_ON (!NFS_WBACK_BUSY(req)); 416 417 + spin_lock(&inode->i_lock); 418 set_page_private(req->wb_page, 0); 419 ClearPagePrivate(req->wb_page); 420 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); ··· 422 __set_page_dirty_nobuffers(req->wb_page); 423 nfsi->npages--; 424 if (!nfsi->npages) { 425 + spin_unlock(&inode->i_lock); 426 nfs_end_data_update(inode); 427 iput(inode); 428 } else 429 + spin_unlock(&inode->i_lock); 430 nfs_clear_request(req); 431 nfs_release_request(req); 432 } ··· 457 static void 458 nfs_mark_request_commit(struct nfs_page *req) 459 { 460 + struct inode *inode = req->wb_context->path.dentry->d_inode; 461 struct nfs_inode *nfsi = NFS_I(inode); 462 463 + spin_lock(&inode->i_lock); 464 nfsi->ncommit++; 465 set_bit(PG_NEED_COMMIT, &(req)->wb_flags); 466 + radix_tree_tag_set(&nfsi->nfs_page_tree, 467 + req->wb_index, 468 + NFS_PAGE_TAG_COMMIT); 469 + spin_unlock(&inode->i_lock); 470 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 471 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 472 } ··· 526 idx_end = idx_start + npages - 1; 527 528 next = idx_start; 529 + while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) { 530 if (req->wb_index > idx_end) 531 break; 532 533 next = req->wb_index + 1; 534 BUG_ON(!NFS_WBACK_BUSY(req)); 535 536 + kref_get(&req->wb_kref); 537 + spin_unlock(&inode->i_lock); 538 error = nfs_wait_on_request(req); 539 nfs_release_request(req); 540 + spin_lock(&inode->i_lock); 541 if (error < 0) 542 return error; 543 res++; ··· 577 int res = 0; 578 579 if (nfsi->ncommit != 0) { 580 + res = nfs_scan_list(nfsi, dst, idx_start, npages, 581 + NFS_PAGE_TAG_COMMIT); 582 nfsi->ncommit -= res; 583 } 584 return res; 585 } ··· 603 { 604 struct address_space *mapping = page->mapping; 605 struct inode *inode = mapping->host; 606 struct nfs_page *req, *new = NULL; 607 pgoff_t rqend, end; 608 ··· 613 /* Loop over all inode entries and see if we find 614 * A request for the page we wish to update 615 */ 616 + spin_lock(&inode->i_lock); 617 req = nfs_page_find_request_locked(page); 618 if (req) { 619 if (!nfs_lock_request_dontget(req)) { 620 int error; 621 622 + spin_unlock(&inode->i_lock); 623 error = nfs_wait_on_request(req); 624 nfs_release_request(req); 625 if (error < 0) { ··· 629 } 630 continue; 631 } 632 + spin_unlock(&inode->i_lock); 633 if (new) 634 nfs_release_request(new); 635 break; ··· 640 nfs_lock_request_dontget(new); 641 error = nfs_inode_add_request(inode, new); 642 if (error) { 643 + spin_unlock(&inode->i_lock); 644 nfs_unlock_request(new); 645 return ERR_PTR(error); 646 } 647 + spin_unlock(&inode->i_lock); 648 return new; 649 } 650 + spin_unlock(&inode->i_lock); 651 652 new = nfs_create_request(ctx, inode, page, offset, bytes); 653 if (IS_ERR(new)) ··· 751 static void nfs_writepage_release(struct nfs_page *req) 752 { 753 754 + if (PageError(req->wb_page)) { 755 + nfs_end_page_writeback(req->wb_page); 756 + nfs_inode_remove_request(req); 757 + } else if (!nfs_reschedule_unstable_write(req)) { 758 + /* Set the PG_uptodate flag */ 759 + nfs_mark_uptodate(req->wb_page, req->wb_pgbase, req->wb_bytes); 760 nfs_end_page_writeback(req->wb_page); 761 nfs_inode_remove_request(req); 762 } else 763 nfs_end_page_writeback(req->wb_page); 764 + nfs_clear_page_tag_locked(req); 765 } 766 767 static inline int flush_task_priority(int how) ··· 786 * NB: take care not to mess about with data->commit et al. */ 787 788 data->req = req; 789 + data->inode = inode = req->wb_context->path.dentry->d_inode; 790 data->cred = req->wb_context->cred; 791 792 data->args.fh = NFS_FH(inode); ··· 885 } 886 nfs_redirty_request(req); 887 nfs_end_page_writeback(req->wb_page); 888 + nfs_clear_page_tag_locked(req); 889 return -ENOMEM; 890 } 891 ··· 928 nfs_list_remove_request(req); 929 nfs_redirty_request(req); 930 nfs_end_page_writeback(req->wb_page); 931 + nfs_clear_page_tag_locked(req); 932 } 933 return -ENOMEM; 934 } ··· 954 struct page *page = req->wb_page; 955 956 dprintk("NFS: write (%s/%Ld %d@%Ld)", 957 + req->wb_context->path.dentry->d_inode->i_sb->s_id, 958 + (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 959 req->wb_bytes, 960 (long long)req_offset(req)); 961 ··· 970 } 971 972 if (nfs_write_need_commit(data)) { 973 + struct inode *inode = page->mapping->host; 974 975 + spin_lock(&inode->i_lock); 976 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { 977 /* Do nothing we need to resend the writes */ 978 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) { ··· 983 clear_bit(PG_NEED_COMMIT, &req->wb_flags); 984 dprintk(" server reboot detected\n"); 985 } 986 + spin_unlock(&inode->i_lock); 987 } else 988 dprintk(" OK\n"); 989 ··· 1020 page = req->wb_page; 1021 1022 dprintk("NFS: write (%s/%Ld %d@%Ld)", 1023 + req->wb_context->path.dentry->d_inode->i_sb->s_id, 1024 + (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 1025 req->wb_bytes, 1026 (long long)req_offset(req)); 1027 ··· 1039 dprintk(" marked for commit\n"); 1040 goto next; 1041 } 1042 + /* Set the PG_uptodate flag? */ 1043 + nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); 1044 dprintk(" OK\n"); 1045 remove_request: 1046 nfs_end_page_writeback(page); 1047 nfs_inode_remove_request(req); 1048 next: 1049 + nfs_clear_page_tag_locked(req); 1050 } 1051 } 1052 ··· 1157 1158 list_splice_init(head, &data->pages); 1159 first = nfs_list_entry(data->pages.next); 1160 + inode = first->wb_context->path.dentry->d_inode; 1161 1162 data->inode = inode; 1163 data->cred = first->wb_context->cred; ··· 1207 nfs_list_remove_request(req); 1208 nfs_mark_request_commit(req); 1209 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1210 + nfs_clear_page_tag_locked(req); 1211 } 1212 return -ENOMEM; 1213 } ··· 1234 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1235 1236 dprintk("NFS: commit (%s/%Ld %d@%Ld)", 1237 + req->wb_context->path.dentry->d_inode->i_sb->s_id, 1238 + (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 1239 req->wb_bytes, 1240 (long long)req_offset(req)); 1241 if (task->tk_status < 0) { ··· 1249 * returned by the server against all stored verfs. */ 1250 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) { 1251 /* We have a match */ 1252 + /* Set the PG_uptodate flag */ 1253 + nfs_mark_uptodate(req->wb_page, req->wb_pgbase, 1254 + req->wb_bytes); 1255 nfs_inode_remove_request(req); 1256 dprintk(" OK\n"); 1257 goto next; ··· 1257 dprintk(" mismatch\n"); 1258 nfs_redirty_request(req); 1259 next: 1260 + nfs_clear_page_tag_locked(req); 1261 } 1262 } 1263 ··· 1268 1269 int nfs_commit_inode(struct inode *inode, int how) 1270 { 1271 LIST_HEAD(head); 1272 int res; 1273 1274 + spin_lock(&inode->i_lock); 1275 res = nfs_scan_commit(inode, &head, 0, 0); 1276 + spin_unlock(&inode->i_lock); 1277 if (res) { 1278 int error = nfs_commit_list(inode, &head, how); 1279 if (error < 0) ··· 1292 long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) 1293 { 1294 struct inode *inode = mapping->host; 1295 pgoff_t idx_start, idx_end; 1296 unsigned int npages = 0; 1297 LIST_HEAD(head); ··· 1314 } 1315 } 1316 how &= ~FLUSH_NOCOMMIT; 1317 + spin_lock(&inode->i_lock); 1318 do { 1319 ret = nfs_wait_on_requests_locked(inode, idx_start, npages); 1320 if (ret != 0) ··· 1325 if (pages == 0) 1326 break; 1327 if (how & FLUSH_INVALIDATE) { 1328 + spin_unlock(&inode->i_lock); 1329 nfs_cancel_commit_list(&head); 1330 ret = pages; 1331 + spin_lock(&inode->i_lock); 1332 continue; 1333 } 1334 pages += nfs_scan_commit(inode, &head, 0, 0); 1335 + spin_unlock(&inode->i_lock); 1336 ret = nfs_commit_list(inode, &head, how); 1337 + spin_lock(&inode->i_lock); 1338 + 1339 } while (ret >= 0); 1340 + spin_unlock(&inode->i_lock); 1341 return ret; 1342 } 1343 ··· 1430 { 1431 struct address_space *mapping = page->mapping; 1432 struct inode *inode; 1433 struct nfs_page *req; 1434 int ret; 1435 ··· 1439 inode = mapping->host; 1440 if (!inode) 1441 goto out_raced; 1442 + spin_lock(&inode->i_lock); 1443 req = nfs_page_find_request_locked(page); 1444 if (req != NULL) { 1445 /* Mark any existing write requests for flushing */ 1446 ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); 1447 + spin_unlock(&inode->i_lock); 1448 nfs_release_request(req); 1449 return ret; 1450 } 1451 ret = __set_page_dirty_nobuffers(page); 1452 + spin_unlock(&inode->i_lock); 1453 return ret; 1454 out_raced: 1455 return !TestSetPageDirty(page);
+3 -15
fs/nfsd/nfs4callback.c
··· 394 .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], 395 .rpc_argp = clp, 396 }; 397 - char clientname[16]; 398 int status; 399 400 if (atomic_read(&cb->cb_set)) ··· 416 memset(program->stats, 0, sizeof(cb->cb_stat)); 417 program->stats->program = program; 418 419 - /* Just here to make some printk's more useful: */ 420 - snprintf(clientname, sizeof(clientname), 421 - "%u.%u.%u.%u", NIPQUAD(addr.sin_addr)); 422 - args.servername = clientname; 423 - 424 /* Create RPC client */ 425 cb->cb_client = rpc_create(&args); 426 if (IS_ERR(cb->cb_client)) { ··· 423 goto out_err; 424 } 425 426 - /* Kick rpciod, put the call on the wire. */ 427 - if (rpciod_up() != 0) 428 - goto out_clnt; 429 - 430 /* the task holds a reference to the nfs4_client struct */ 431 atomic_inc(&clp->cl_count); 432 433 msg.rpc_cred = nfsd4_lookupcred(clp,0); 434 if (IS_ERR(msg.rpc_cred)) 435 - goto out_rpciod; 436 status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_ASYNC, &nfs4_cb_null_ops, NULL); 437 put_rpccred(msg.rpc_cred); 438 439 if (status != 0) { 440 dprintk("NFSD: asynchronous NFSPROC4_CB_NULL failed!\n"); 441 - goto out_rpciod; 442 } 443 return; 444 445 - out_rpciod: 446 atomic_dec(&clp->cl_count); 447 - rpciod_down(); 448 - out_clnt: 449 rpc_shutdown_client(cb->cb_client); 450 out_err: 451 cb->cb_client = NULL;
··· 394 .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], 395 .rpc_argp = clp, 396 }; 397 int status; 398 399 if (atomic_read(&cb->cb_set)) ··· 417 memset(program->stats, 0, sizeof(cb->cb_stat)); 418 program->stats->program = program; 419 420 /* Create RPC client */ 421 cb->cb_client = rpc_create(&args); 422 if (IS_ERR(cb->cb_client)) { ··· 429 goto out_err; 430 } 431 432 /* the task holds a reference to the nfs4_client struct */ 433 atomic_inc(&clp->cl_count); 434 435 msg.rpc_cred = nfsd4_lookupcred(clp,0); 436 if (IS_ERR(msg.rpc_cred)) 437 + goto out_release_clp; 438 status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_ASYNC, &nfs4_cb_null_ops, NULL); 439 put_rpccred(msg.rpc_cred); 440 441 if (status != 0) { 442 dprintk("NFSD: asynchronous NFSPROC4_CB_NULL failed!\n"); 443 + goto out_release_clp; 444 } 445 return; 446 447 + out_release_clp: 448 atomic_dec(&clp->cl_count); 449 rpc_shutdown_client(cb->cb_client); 450 out_err: 451 cb->cb_client = NULL;
-1
fs/nfsd/nfs4state.c
··· 378 if (clnt) { 379 clp->cl_callback.cb_client = NULL; 380 rpc_shutdown_client(clnt); 381 - rpciod_down(); 382 } 383 } 384
··· 378 if (clnt) { 379 clp->cl_callback.cb_client = NULL; 380 rpc_shutdown_client(clnt); 381 } 382 } 383
+1
include/linux/lockd/lockd.h
··· 39 struct nlm_host { 40 struct hlist_node h_hash; /* doubly linked list */ 41 struct sockaddr_in h_addr; /* peer address */ 42 struct rpc_clnt * h_rpcclnt; /* RPC client to talk to peer */ 43 char * h_name; /* remote hostname */ 44 u32 h_version; /* interface version */
··· 39 struct nlm_host { 40 struct hlist_node h_hash; /* doubly linked list */ 41 struct sockaddr_in h_addr; /* peer address */ 42 + struct sockaddr_in h_saddr; /* our address (optional) */ 43 struct rpc_clnt * h_rpcclnt; /* RPC client to talk to peer */ 44 char * h_name; /* remote hostname */ 45 u32 h_version; /* interface version */
+1
include/linux/nfs4.h
··· 15 16 #include <linux/types.h> 17 18 #define NFS4_VERIFIER_SIZE 8 19 #define NFS4_STATEID_SIZE 16 20 #define NFS4_FHSIZE 128
··· 15 16 #include <linux/types.h> 17 18 + #define NFS4_BITMAP_SIZE 2 19 #define NFS4_VERIFIER_SIZE 8 20 #define NFS4_STATEID_SIZE 16 21 #define NFS4_FHSIZE 128
+2 -1
include/linux/nfs4_mount.h
··· 65 #define NFS4_MOUNT_NOCTO 0x0010 /* 1 */ 66 #define NFS4_MOUNT_NOAC 0x0020 /* 1 */ 67 #define NFS4_MOUNT_STRICTLOCK 0x1000 /* 1 */ 68 - #define NFS4_MOUNT_FLAGMASK 0xFFFF 69 70 #endif
··· 65 #define NFS4_MOUNT_NOCTO 0x0010 /* 1 */ 66 #define NFS4_MOUNT_NOAC 0x0020 /* 1 */ 67 #define NFS4_MOUNT_STRICTLOCK 0x1000 /* 1 */ 68 + #define NFS4_MOUNT_UNSHARED 0x8000 /* 1 */ 69 + #define NFS4_MOUNT_FLAGMASK 0x9033 70 71 #endif
+12 -16
include/linux/nfs_fs.h
··· 30 #ifdef __KERNEL__ 31 32 #include <linux/in.h> 33 #include <linux/mm.h> 34 #include <linux/pagemap.h> 35 #include <linux/rbtree.h> 36 #include <linux/rwsem.h> ··· 71 72 struct nfs4_state; 73 struct nfs_open_context { 74 - atomic_t count; 75 - struct vfsmount *vfsmnt; 76 - struct dentry *dentry; 77 struct rpc_cred *cred; 78 struct nfs4_state *state; 79 fl_owner_t lockowner; ··· 156 /* 157 * This is the list of dirty unwritten pages. 158 */ 159 - spinlock_t req_lock; 160 - struct list_head dirty; 161 - struct list_head commit; 162 struct radix_tree_root nfs_page_tree; 163 164 - unsigned int ndirty, 165 - ncommit, 166 npages; 167 168 /* Open contexts for shared mmap writes */ ··· 184 #define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */ 185 #define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */ 186 #define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */ 187 188 /* 189 * Bit offsets in flags field ··· 494 495 /* 496 * linux/fs/mount_clnt.c 497 - * (Used only by nfsroot module) 498 */ 499 - extern int nfsroot_mount(struct sockaddr_in *, char *, struct nfs_fh *, 500 - int, int); 501 502 /* 503 * inline functions 504 */ 505 506 - static inline loff_t 507 - nfs_size_to_loff_t(__u64 size) 508 { 509 - loff_t maxsz = (((loff_t) ULONG_MAX) << PAGE_CACHE_SHIFT) + PAGE_CACHE_SIZE - 1; 510 - if (size > maxsz) 511 - return maxsz; 512 return (loff_t) size; 513 } 514 ··· 552 #define NFSDBG_ROOT 0x0080 553 #define NFSDBG_CALLBACK 0x0100 554 #define NFSDBG_CLIENT 0x0200 555 #define NFSDBG_ALL 0xFFFF 556 557 #ifdef __KERNEL__
··· 30 #ifdef __KERNEL__ 31 32 #include <linux/in.h> 33 + #include <linux/kref.h> 34 #include <linux/mm.h> 35 + #include <linux/namei.h> 36 #include <linux/pagemap.h> 37 #include <linux/rbtree.h> 38 #include <linux/rwsem.h> ··· 69 70 struct nfs4_state; 71 struct nfs_open_context { 72 + struct kref kref; 73 + struct path path; 74 struct rpc_cred *cred; 75 struct nfs4_state *state; 76 fl_owner_t lockowner; ··· 155 /* 156 * This is the list of dirty unwritten pages. 157 */ 158 struct radix_tree_root nfs_page_tree; 159 160 + unsigned long ncommit, 161 npages; 162 163 /* Open contexts for shared mmap writes */ ··· 187 #define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */ 188 #define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */ 189 #define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */ 190 + #define NFS_INO_REVAL_FORCED 0x0040 /* force revalidation ignoring a delegation */ 191 192 /* 193 * Bit offsets in flags field ··· 496 497 /* 498 * linux/fs/mount_clnt.c 499 */ 500 + extern int nfs_mount(struct sockaddr *, size_t, char *, char *, 501 + int, int, struct nfs_fh *); 502 503 /* 504 * inline functions 505 */ 506 507 + static inline loff_t nfs_size_to_loff_t(__u64 size) 508 { 509 + if (size > (__u64) OFFSET_MAX - 1) 510 + return OFFSET_MAX - 1; 511 return (loff_t) size; 512 } 513 ··· 557 #define NFSDBG_ROOT 0x0080 558 #define NFSDBG_CALLBACK 0x0100 559 #define NFSDBG_CLIENT 0x0200 560 + #define NFSDBG_MOUNT 0x0400 561 #define NFSDBG_ALL 0xFFFF 562 563 #ifdef __KERNEL__
+3 -5
include/linux/nfs_fs_sb.h
··· 16 #define NFS_CS_INITING 1 /* busy initialising */ 17 int cl_nfsversion; /* NFS protocol version */ 18 unsigned long cl_res_state; /* NFS resources state */ 19 - #define NFS_CS_RPCIOD 0 /* - rpciod started */ 20 #define NFS_CS_CALLBACK 1 /* - callback started */ 21 #define NFS_CS_IDMAP 2 /* - idmap started */ 22 #define NFS_CS_RENEWD 3 /* - renewd started */ ··· 34 nfs4_verifier cl_confirm; 35 unsigned long cl_state; 36 37 - u32 cl_lockowner_id; 38 39 /* 40 * The following rwsem ensures exclusive access to the server ··· 44 struct rw_semaphore cl_sem; 45 46 struct list_head cl_delegations; 47 - struct list_head cl_state_owners; 48 - struct list_head cl_unused; 49 - int cl_nunused; 50 spinlock_t cl_lock; 51 52 unsigned long cl_lease_time;
··· 16 #define NFS_CS_INITING 1 /* busy initialising */ 17 int cl_nfsversion; /* NFS protocol version */ 18 unsigned long cl_res_state; /* NFS resources state */ 19 #define NFS_CS_CALLBACK 1 /* - callback started */ 20 #define NFS_CS_IDMAP 2 /* - idmap started */ 21 #define NFS_CS_RENEWD 3 /* - renewd started */ ··· 35 nfs4_verifier cl_confirm; 36 unsigned long cl_state; 37 38 + struct rb_root cl_openowner_id; 39 + struct rb_root cl_lockowner_id; 40 41 /* 42 * The following rwsem ensures exclusive access to the server ··· 44 struct rw_semaphore cl_sem; 45 46 struct list_head cl_delegations; 47 + struct rb_root cl_state_owners; 48 spinlock_t cl_lock; 49 50 unsigned long cl_lease_time;
+2 -1
include/linux/nfs_mount.h
··· 37 int acdirmin; /* 1 */ 38 int acdirmax; /* 1 */ 39 struct sockaddr_in addr; /* 1 */ 40 - char hostname[256]; /* 1 */ 41 int namlen; /* 2 */ 42 unsigned int bsize; /* 3 */ 43 struct nfs3_fh root; /* 4 */ ··· 62 #define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */ 63 #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ 64 #define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */ 65 #define NFS_MOUNT_FLAGMASK 0xFFFF 66 67 #endif
··· 37 int acdirmin; /* 1 */ 38 int acdirmax; /* 1 */ 39 struct sockaddr_in addr; /* 1 */ 40 + char hostname[NFS_MAXNAMLEN + 1]; /* 1 */ 41 int namlen; /* 2 */ 42 unsigned int bsize; /* 3 */ 43 struct nfs3_fh root; /* 4 */ ··· 62 #define NFS_MOUNT_STRICTLOCK 0x1000 /* reserved for NFSv4 */ 63 #define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */ 64 #define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */ 65 + #define NFS_MOUNT_UNSHARED 0x8000 /* 5 */ 66 #define NFS_MOUNT_FLAGMASK 0xFFFF 67 68 #endif
+11 -14
include/linux/nfs_page.h
··· 16 #include <linux/sunrpc/auth.h> 17 #include <linux/nfs_xdr.h> 18 19 - #include <asm/atomic.h> 20 21 /* 22 * Valid flags for the radix tree 23 */ 24 - #define NFS_PAGE_TAG_WRITEBACK 0 25 26 /* 27 * Valid flags for a dirty buffer ··· 34 35 struct nfs_inode; 36 struct nfs_page { 37 - struct list_head wb_list, /* Defines state of page: */ 38 - *wb_list_head; /* read/write/commit */ 39 struct page *wb_page; /* page to read in/write out */ 40 struct nfs_open_context *wb_context; /* File state context info */ 41 atomic_t wb_complete; /* i/os we're waiting for */ ··· 42 unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ 43 wb_pgbase, /* Start of page data */ 44 wb_bytes; /* Length of request */ 45 - atomic_t wb_count; /* reference count */ 46 unsigned long wb_flags; 47 struct nfs_writeverf wb_verf; /* Commit cookie */ 48 }; ··· 71 extern void nfs_release_request(struct nfs_page *req); 72 73 74 - extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct list_head *dst, 75 - pgoff_t idx_start, unsigned int npages); 76 extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, 77 struct inode *inode, 78 int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), ··· 84 extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); 85 extern int nfs_wait_on_request(struct nfs_page *); 86 extern void nfs_unlock_request(struct nfs_page *req); 87 - extern int nfs_set_page_writeback_locked(struct nfs_page *req); 88 - extern void nfs_clear_page_writeback(struct nfs_page *req); 89 90 91 /* 92 - * Lock the page of an asynchronous request without incrementing the wb_count 93 */ 94 static inline int 95 nfs_lock_request_dontget(struct nfs_page *req) ··· 97 } 98 99 /* 100 - * Lock the page of an asynchronous request 101 */ 102 static inline int 103 nfs_lock_request(struct nfs_page *req) 104 { 105 if (test_and_set_bit(PG_BUSY, &req->wb_flags)) 106 return 0; 107 - atomic_inc(&req->wb_count); 108 return 1; 109 } 110 ··· 117 nfs_list_add_request(struct nfs_page *req, struct list_head *head) 118 { 119 list_add_tail(&req->wb_list, head); 120 - req->wb_list_head = head; 121 } 122 123 ··· 130 if (list_empty(&req->wb_list)) 131 return; 132 list_del_init(&req->wb_list); 133 - req->wb_list_head = NULL; 134 } 135 136 static inline struct nfs_page *
··· 16 #include <linux/sunrpc/auth.h> 17 #include <linux/nfs_xdr.h> 18 19 + #include <linux/kref.h> 20 21 /* 22 * Valid flags for the radix tree 23 */ 24 + #define NFS_PAGE_TAG_LOCKED 0 25 + #define NFS_PAGE_TAG_COMMIT 1 26 27 /* 28 * Valid flags for a dirty buffer ··· 33 34 struct nfs_inode; 35 struct nfs_page { 36 + struct list_head wb_list; /* Defines state of page: */ 37 struct page *wb_page; /* page to read in/write out */ 38 struct nfs_open_context *wb_context; /* File state context info */ 39 atomic_t wb_complete; /* i/os we're waiting for */ ··· 42 unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ 43 wb_pgbase, /* Start of page data */ 44 wb_bytes; /* Length of request */ 45 + struct kref wb_kref; /* reference count */ 46 unsigned long wb_flags; 47 struct nfs_writeverf wb_verf; /* Commit cookie */ 48 }; ··· 71 extern void nfs_release_request(struct nfs_page *req); 72 73 74 + extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *dst, 75 + pgoff_t idx_start, unsigned int npages, int tag); 76 extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, 77 struct inode *inode, 78 int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), ··· 84 extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); 85 extern int nfs_wait_on_request(struct nfs_page *); 86 extern void nfs_unlock_request(struct nfs_page *req); 87 + extern void nfs_clear_page_tag_locked(struct nfs_page *req); 88 89 90 /* 91 + * Lock the page of an asynchronous request without getting a new reference 92 */ 93 static inline int 94 nfs_lock_request_dontget(struct nfs_page *req) ··· 98 } 99 100 /* 101 + * Lock the page of an asynchronous request and take a reference 102 */ 103 static inline int 104 nfs_lock_request(struct nfs_page *req) 105 { 106 if (test_and_set_bit(PG_BUSY, &req->wb_flags)) 107 return 0; 108 + kref_get(&req->wb_kref); 109 return 1; 110 } 111 ··· 118 nfs_list_add_request(struct nfs_page *req, struct list_head *head) 119 { 120 list_add_tail(&req->wb_list, head); 121 } 122 123 ··· 132 if (list_empty(&req->wb_list)) 133 return; 134 list_del_init(&req->wb_list); 135 } 136 137 static inline struct nfs_page *
+3 -2
include/linux/nfs_xdr.h
··· 119 struct nfs_seqid * seqid; 120 int open_flags; 121 __u64 clientid; 122 - __u32 id; 123 union { 124 struct iattr * attrs; /* UNCHECKED, GUARDED */ 125 nfs4_verifier verifier; /* EXCLUSIVE */ ··· 144 nfs4_stateid delegation; 145 __u32 do_recall; 146 __u64 maxsize; 147 }; 148 149 /* ··· 181 * */ 182 struct nfs_lowner { 183 __u64 clientid; 184 - u32 id; 185 }; 186 187 struct nfs_lock_args {
··· 119 struct nfs_seqid * seqid; 120 int open_flags; 121 __u64 clientid; 122 + __u64 id; 123 union { 124 struct iattr * attrs; /* UNCHECKED, GUARDED */ 125 nfs4_verifier verifier; /* EXCLUSIVE */ ··· 144 nfs4_stateid delegation; 145 __u32 do_recall; 146 __u64 maxsize; 147 + __u32 attrset[NFS4_BITMAP_SIZE]; 148 }; 149 150 /* ··· 180 * */ 181 struct nfs_lowner { 182 __u64 clientid; 183 + __u64 id; 184 }; 185 186 struct nfs_lock_args {
+29 -19
include/linux/sunrpc/auth.h
··· 16 #include <linux/sunrpc/xdr.h> 17 18 #include <asm/atomic.h> 19 20 /* size of the nodename buffer */ 21 #define UNX_MAXNODENAME 32 ··· 31 /* 32 * Client user credentials 33 */ 34 struct rpc_cred { 35 struct hlist_node cr_hash; /* hash chain */ 36 - struct rpc_credops * cr_ops; 37 - unsigned long cr_expire; /* when to gc */ 38 - atomic_t cr_count; /* ref count */ 39 - unsigned short cr_flags; /* various flags */ 40 #ifdef RPC_DEBUG 41 unsigned long cr_magic; /* 0x0f4aa4f0 */ 42 #endif 43 44 uid_t cr_uid; 45 46 /* per-flavor data */ 47 }; 48 - #define RPCAUTH_CRED_NEW 0x0001 49 - #define RPCAUTH_CRED_UPTODATE 0x0002 50 51 #define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 52 ··· 63 #define RPC_CREDCACHE_MASK (RPC_CREDCACHE_NR - 1) 64 struct rpc_cred_cache { 65 struct hlist_head hashtable[RPC_CREDCACHE_NR]; 66 - unsigned long nextgc; /* next garbage collection */ 67 - unsigned long expire; /* cache expiry interval */ 68 }; 69 70 struct rpc_auth { 71 unsigned int au_cslack; /* call cred size estimate */ 72 /* guess at number of u32's auth adds before ··· 76 unsigned int au_verfsize; 77 78 unsigned int au_flags; /* various flags */ 79 - struct rpc_authops * au_ops; /* operations */ 80 rpc_authflavor_t au_flavor; /* pseudoflavor (note may 81 * differ from the flavor in 82 * au_ops->au_flavor in gss ··· 122 void *, __be32 *, void *); 123 }; 124 125 - extern struct rpc_authops authunix_ops; 126 - extern struct rpc_authops authnull_ops; 127 - #ifdef CONFIG_SUNRPC_SECURE 128 - extern struct rpc_authops authdes_ops; 129 - #endif 130 131 - int rpcauth_register(struct rpc_authops *); 132 - int rpcauth_unregister(struct rpc_authops *); 133 struct rpc_auth * rpcauth_create(rpc_authflavor_t, struct rpc_clnt *); 134 - void rpcauth_destroy(struct rpc_auth *); 135 struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int); 136 struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); 137 struct rpc_cred * rpcauth_bindcred(struct rpc_task *); 138 void rpcauth_holdcred(struct rpc_task *); ··· 147 int rpcauth_refreshcred(struct rpc_task *); 148 void rpcauth_invalcred(struct rpc_task *); 149 int rpcauth_uptodatecred(struct rpc_task *); 150 - int rpcauth_init_credcache(struct rpc_auth *, unsigned long); 151 - void rpcauth_free_credcache(struct rpc_auth *); 152 153 static inline 154 struct rpc_cred * get_rpccred(struct rpc_cred *cred)
··· 16 #include <linux/sunrpc/xdr.h> 17 18 #include <asm/atomic.h> 19 + #include <linux/rcupdate.h> 20 21 /* size of the nodename buffer */ 22 #define UNX_MAXNODENAME 32 ··· 30 /* 31 * Client user credentials 32 */ 33 + struct rpc_auth; 34 + struct rpc_credops; 35 struct rpc_cred { 36 struct hlist_node cr_hash; /* hash chain */ 37 + struct list_head cr_lru; /* lru garbage collection */ 38 + struct rcu_head cr_rcu; 39 + struct rpc_auth * cr_auth; 40 + const struct rpc_credops *cr_ops; 41 #ifdef RPC_DEBUG 42 unsigned long cr_magic; /* 0x0f4aa4f0 */ 43 #endif 44 + unsigned long cr_expire; /* when to gc */ 45 + unsigned long cr_flags; /* various flags */ 46 + atomic_t cr_count; /* ref count */ 47 48 uid_t cr_uid; 49 50 /* per-flavor data */ 51 }; 52 + #define RPCAUTH_CRED_NEW 0 53 + #define RPCAUTH_CRED_UPTODATE 1 54 + #define RPCAUTH_CRED_HASHED 2 55 56 #define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 57 ··· 56 #define RPC_CREDCACHE_MASK (RPC_CREDCACHE_NR - 1) 57 struct rpc_cred_cache { 58 struct hlist_head hashtable[RPC_CREDCACHE_NR]; 59 + spinlock_t lock; 60 }; 61 62 + struct rpc_authops; 63 struct rpc_auth { 64 unsigned int au_cslack; /* call cred size estimate */ 65 /* guess at number of u32's auth adds before ··· 69 unsigned int au_verfsize; 70 71 unsigned int au_flags; /* various flags */ 72 + const struct rpc_authops *au_ops; /* operations */ 73 rpc_authflavor_t au_flavor; /* pseudoflavor (note may 74 * differ from the flavor in 75 * au_ops->au_flavor in gss ··· 115 void *, __be32 *, void *); 116 }; 117 118 + extern const struct rpc_authops authunix_ops; 119 + extern const struct rpc_authops authnull_ops; 120 121 + void __init rpc_init_authunix(void); 122 + void __init rpcauth_init_module(void); 123 + void __exit rpcauth_remove_module(void); 124 + 125 + int rpcauth_register(const struct rpc_authops *); 126 + int rpcauth_unregister(const struct rpc_authops *); 127 struct rpc_auth * rpcauth_create(rpc_authflavor_t, struct rpc_clnt *); 128 + void rpcauth_release(struct rpc_auth *); 129 struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int); 130 + void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); 131 struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); 132 struct rpc_cred * rpcauth_bindcred(struct rpc_task *); 133 void rpcauth_holdcred(struct rpc_task *); ··· 138 int rpcauth_refreshcred(struct rpc_task *); 139 void rpcauth_invalcred(struct rpc_task *); 140 int rpcauth_uptodatecred(struct rpc_task *); 141 + int rpcauth_init_credcache(struct rpc_auth *); 142 + void rpcauth_destroy_credcache(struct rpc_auth *); 143 + void rpcauth_clear_credcache(struct rpc_cred_cache *); 144 145 static inline 146 struct rpc_cred * get_rpccred(struct rpc_cred *cred)
+1 -5
include/linux/sunrpc/auth_gss.h
··· 75 struct xdr_netobj gc_wire_ctx; 76 u32 gc_win; 77 unsigned long gc_expiry; 78 }; 79 80 struct gss_upcall_msg; ··· 85 struct gss_cl_ctx *gc_ctx; 86 struct gss_upcall_msg *gc_upcall; 87 }; 88 - 89 - #define gc_uid gc_base.cr_uid 90 - #define gc_count gc_base.cr_count 91 - #define gc_flags gc_base.cr_flags 92 - #define gc_expire gc_base.cr_expire 93 94 #endif /* __KERNEL__ */ 95 #endif /* _LINUX_SUNRPC_AUTH_GSS_H */
··· 75 struct xdr_netobj gc_wire_ctx; 76 u32 gc_win; 77 unsigned long gc_expiry; 78 + struct rcu_head gc_rcu; 79 }; 80 81 struct gss_upcall_msg; ··· 84 struct gss_cl_ctx *gc_ctx; 85 struct gss_upcall_msg *gc_upcall; 86 }; 87 88 #endif /* __KERNEL__ */ 89 #endif /* _LINUX_SUNRPC_AUTH_GSS_H */
+15 -18
include/linux/sunrpc/clnt.h
··· 24 * The high-level client handle 25 */ 26 struct rpc_clnt { 27 - atomic_t cl_count; /* Number of clones */ 28 - atomic_t cl_users; /* number of references */ 29 struct rpc_xprt * cl_xprt; /* transport */ 30 struct rpc_procinfo * cl_procinfo; /* procedure info */ 31 u32 cl_prog, /* RPC program number */ ··· 43 unsigned int cl_softrtry : 1,/* soft timeouts */ 44 cl_intr : 1,/* interruptible */ 45 cl_discrtry : 1,/* disconnect before retry */ 46 - cl_autobind : 1,/* use getport() */ 47 - cl_oneshot : 1,/* dispose after use */ 48 - cl_dead : 1;/* abandoned */ 49 50 struct rpc_rtt * cl_rtt; /* RTO estimator data */ 51 ··· 98 int protocol; 99 struct sockaddr *address; 100 size_t addrsize; 101 struct rpc_timeout *timeout; 102 char *servername; 103 struct rpc_program *program; ··· 111 #define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) 112 #define RPC_CLNT_CREATE_INTR (1UL << 1) 113 #define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) 114 - #define RPC_CLNT_CREATE_ONESHOT (1UL << 3) 115 - #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 4) 116 - #define RPC_CLNT_CREATE_NOPING (1UL << 5) 117 - #define RPC_CLNT_CREATE_DISCRTRY (1UL << 6) 118 119 struct rpc_clnt *rpc_create(struct rpc_create_args *args); 120 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, 121 struct rpc_program *, int); 122 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); 123 - int rpc_shutdown_client(struct rpc_clnt *); 124 - int rpc_destroy_client(struct rpc_clnt *); 125 void rpc_release_client(struct rpc_clnt *); 126 int rpcb_register(u32, u32, int, unsigned short, int *); 127 - void rpcb_getport(struct rpc_task *); 128 129 void rpc_call_setup(struct rpc_task *, struct rpc_message *, int); 130 ··· 133 void *calldata); 134 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, 135 int flags); 136 void rpc_restart_call(struct rpc_task *); 137 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset); 138 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset); 139 void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); 140 size_t rpc_max_payload(struct rpc_clnt *); 141 void rpc_force_rebind(struct rpc_clnt *); 142 - int rpc_ping(struct rpc_clnt *clnt, int flags); 143 size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); 144 char * rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); 145 - 146 - /* 147 - * Helper function for NFSroot support 148 - */ 149 - int rpcb_getport_external(struct sockaddr_in *, __u32, __u32, int); 150 151 #endif /* __KERNEL__ */ 152 #endif /* _LINUX_SUNRPC_CLNT_H */
··· 24 * The high-level client handle 25 */ 26 struct rpc_clnt { 27 + struct kref cl_kref; /* Number of references */ 28 + struct list_head cl_clients; /* Global list of clients */ 29 + struct list_head cl_tasks; /* List of tasks */ 30 + spinlock_t cl_lock; /* spinlock */ 31 struct rpc_xprt * cl_xprt; /* transport */ 32 struct rpc_procinfo * cl_procinfo; /* procedure info */ 33 u32 cl_prog, /* RPC program number */ ··· 41 unsigned int cl_softrtry : 1,/* soft timeouts */ 42 cl_intr : 1,/* interruptible */ 43 cl_discrtry : 1,/* disconnect before retry */ 44 + cl_autobind : 1;/* use getport() */ 45 46 struct rpc_rtt * cl_rtt; /* RTO estimator data */ 47 ··· 98 int protocol; 99 struct sockaddr *address; 100 size_t addrsize; 101 + struct sockaddr *saddress; 102 struct rpc_timeout *timeout; 103 char *servername; 104 struct rpc_program *program; ··· 110 #define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) 111 #define RPC_CLNT_CREATE_INTR (1UL << 1) 112 #define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) 113 + #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) 114 + #define RPC_CLNT_CREATE_NOPING (1UL << 4) 115 + #define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) 116 117 struct rpc_clnt *rpc_create(struct rpc_create_args *args); 118 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, 119 struct rpc_program *, int); 120 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); 121 + void rpc_shutdown_client(struct rpc_clnt *); 122 void rpc_release_client(struct rpc_clnt *); 123 + 124 int rpcb_register(u32, u32, int, unsigned short, int *); 125 + int rpcb_getport_sync(struct sockaddr_in *, __u32, __u32, int); 126 + void rpcb_getport_async(struct rpc_task *); 127 128 void rpc_call_setup(struct rpc_task *, struct rpc_message *, int); 129 ··· 132 void *calldata); 133 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, 134 int flags); 135 + struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, 136 + int flags); 137 void rpc_restart_call(struct rpc_task *); 138 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset); 139 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset); 140 void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); 141 size_t rpc_max_payload(struct rpc_clnt *); 142 void rpc_force_rebind(struct rpc_clnt *); 143 size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); 144 char * rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); 145 146 #endif /* __KERNEL__ */ 147 #endif /* _LINUX_SUNRPC_CLNT_H */
+1 -1
include/linux/sunrpc/gss_api.h
··· 77 struct module *gm_owner; 78 struct xdr_netobj gm_oid; 79 char *gm_name; 80 - struct gss_api_ops *gm_ops; 81 /* pseudoflavors supported by this mechanism: */ 82 int gm_pf_num; 83 struct pf_desc * gm_pfs;
··· 77 struct module *gm_owner; 78 struct xdr_netobj gm_oid; 79 char *gm_name; 80 + const struct gss_api_ops *gm_ops; 81 /* pseudoflavors supported by this mechanism: */ 82 int gm_pf_num; 83 struct pf_desc * gm_pfs;
+2
include/linux/sunrpc/rpc_pipe_fs.h
··· 23 void *private; 24 struct list_head pipe; 25 struct list_head in_upcall; 26 int pipelen; 27 int nreaders; 28 int nwriters; 29 wait_queue_head_t waitq; 30 #define RPC_PIPE_WAIT_FOR_OPEN 1 31 int flags;
··· 23 void *private; 24 struct list_head pipe; 25 struct list_head in_upcall; 26 + struct list_head in_downcall; 27 int pipelen; 28 int nreaders; 29 int nwriters; 30 + int nkern_readwriters; 31 wait_queue_head_t waitq; 32 #define RPC_PIPE_WAIT_FOR_OPEN 1 33 int flags;
-6
include/linux/sunrpc/sched.h
··· 98 unsigned short tk_pid; /* debugging aid */ 99 #endif 100 }; 101 - #define tk_auth tk_client->cl_auth 102 #define tk_xprt tk_client->cl_xprt 103 104 /* support walking a list of tasks on a wait queue */ ··· 108 #define task_for_first(task, head) \ 109 if (!list_empty(head) && \ 110 ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1)) 111 - 112 - /* .. and walking list of all tasks */ 113 - #define alltask_for_each(task, pos, head) \ 114 - list_for_each(pos, head) \ 115 - if ((task=list_entry(pos, struct rpc_task, tk_task)),1) 116 117 typedef void (*rpc_action)(struct rpc_task *); 118
··· 98 unsigned short tk_pid; /* debugging aid */ 99 #endif 100 }; 101 #define tk_xprt tk_client->cl_xprt 102 103 /* support walking a list of tasks on a wait queue */ ··· 109 #define task_for_first(task, head) \ 110 if (!list_empty(head) && \ 111 ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1)) 112 113 typedef void (*rpc_action)(struct rpc_task *); 114
+1
include/linux/sunrpc/svcsock.h
··· 59 /* cache of various info for TCP sockets */ 60 void *sk_info_authunix; 61 62 struct sockaddr_storage sk_remote; /* remote peer's address */ 63 int sk_remotelen; /* length of address */ 64 };
··· 59 /* cache of various info for TCP sockets */ 60 void *sk_info_authunix; 61 62 + struct sockaddr_storage sk_local; /* local address */ 63 struct sockaddr_storage sk_remote; /* remote peer's address */ 64 int sk_remotelen; /* length of address */ 65 };
+12 -4
include/linux/sunrpc/xprt.h
··· 17 #include <linux/sunrpc/xdr.h> 18 #include <linux/sunrpc/msg_prot.h> 19 20 extern unsigned int xprt_udp_slot_table_entries; 21 extern unsigned int xprt_tcp_slot_table_entries; 22 ··· 196 char * address_strings[RPC_DISPLAY_MAX]; 197 }; 198 199 - #ifdef __KERNEL__ 200 201 /* 202 * Transport operations used by ULPs ··· 212 /* 213 * Generic internal transport functions 214 */ 215 - struct rpc_xprt * xprt_create_transport(int proto, struct sockaddr *addr, size_t size, struct rpc_timeout *toparms); 216 void xprt_connect(struct rpc_task *task); 217 void xprt_reserve(struct rpc_task *task); 218 int xprt_reserve_xprt(struct rpc_task *task); ··· 250 /* 251 * Socket transport setup operations 252 */ 253 - struct rpc_xprt * xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to); 254 - struct rpc_xprt * xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to); 255 int init_socket_xprt(void); 256 void cleanup_socket_xprt(void); 257
··· 17 #include <linux/sunrpc/xdr.h> 18 #include <linux/sunrpc/msg_prot.h> 19 20 + #ifdef __KERNEL__ 21 + 22 extern unsigned int xprt_udp_slot_table_entries; 23 extern unsigned int xprt_tcp_slot_table_entries; 24 ··· 194 char * address_strings[RPC_DISPLAY_MAX]; 195 }; 196 197 + struct rpc_xprtsock_create { 198 + int proto; /* IPPROTO_UDP or IPPROTO_TCP */ 199 + struct sockaddr * srcaddr; /* optional local address */ 200 + struct sockaddr * dstaddr; /* remote peer address */ 201 + size_t addrlen; 202 + struct rpc_timeout * timeout; /* optional timeout parameters */ 203 + }; 204 205 /* 206 * Transport operations used by ULPs ··· 204 /* 205 * Generic internal transport functions 206 */ 207 + struct rpc_xprt * xprt_create_transport(struct rpc_xprtsock_create *args); 208 void xprt_connect(struct rpc_task *task); 209 void xprt_reserve(struct rpc_task *task); 210 int xprt_reserve_xprt(struct rpc_task *task); ··· 242 /* 243 * Socket transport setup operations 244 */ 245 + struct rpc_xprt * xs_setup_udp(struct rpc_xprtsock_create *args); 246 + struct rpc_xprt * xs_setup_tcp(struct rpc_xprtsock_create *args); 247 int init_socket_xprt(void); 248 void cleanup_socket_xprt(void); 249
+256 -114
net/sunrpc/auth.c
··· 13 #include <linux/errno.h> 14 #include <linux/sunrpc/clnt.h> 15 #include <linux/spinlock.h> 16 17 #ifdef RPC_DEBUG 18 # define RPCDBG_FACILITY RPCDBG_AUTH 19 #endif 20 21 - static struct rpc_authops * auth_flavors[RPC_AUTH_MAXFLAVOR] = { 22 &authnull_ops, /* AUTH_NULL */ 23 &authunix_ops, /* AUTH_UNIX */ 24 NULL, /* others can be loadable modules */ 25 }; 26 27 static u32 28 pseudoflavor_to_flavor(u32 flavor) { ··· 37 } 38 39 int 40 - rpcauth_register(struct rpc_authops *ops) 41 { 42 rpc_authflavor_t flavor; 43 44 if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) 45 return -EINVAL; 46 - if (auth_flavors[flavor] != NULL) 47 - return -EPERM; /* what else? */ 48 - auth_flavors[flavor] = ops; 49 - return 0; 50 } 51 52 int 53 - rpcauth_unregister(struct rpc_authops *ops) 54 { 55 rpc_authflavor_t flavor; 56 57 if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) 58 return -EINVAL; 59 - if (auth_flavors[flavor] != ops) 60 - return -EPERM; /* what else? */ 61 - auth_flavors[flavor] = NULL; 62 - return 0; 63 } 64 65 struct rpc_auth * 66 rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt) 67 { 68 struct rpc_auth *auth; 69 - struct rpc_authops *ops; 70 u32 flavor = pseudoflavor_to_flavor(pseudoflavor); 71 72 auth = ERR_PTR(-EINVAL); 73 if (flavor >= RPC_AUTH_MAXFLAVOR) 74 goto out; 75 76 - /* FIXME - auth_flavors[] really needs an rw lock, 77 - * and module refcounting. */ 78 #ifdef CONFIG_KMOD 79 if ((ops = auth_flavors[flavor]) == NULL) 80 request_module("rpc-auth-%u", flavor); 81 #endif 82 - if ((ops = auth_flavors[flavor]) == NULL) 83 goto out; 84 auth = ops->create(clnt, pseudoflavor); 85 if (IS_ERR(auth)) 86 return auth; 87 if (clnt->cl_auth) 88 - rpcauth_destroy(clnt->cl_auth); 89 clnt->cl_auth = auth; 90 91 out: ··· 105 } 106 107 void 108 - rpcauth_destroy(struct rpc_auth *auth) 109 { 110 if (!atomic_dec_and_test(&auth->au_count)) 111 return; ··· 114 115 static DEFINE_SPINLOCK(rpc_credcache_lock); 116 117 /* 118 * Initialize RPC credential cache 119 */ 120 int 121 - rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) 122 { 123 struct rpc_cred_cache *new; 124 int i; ··· 148 return -ENOMEM; 149 for (i = 0; i < RPC_CREDCACHE_NR; i++) 150 INIT_HLIST_HEAD(&new->hashtable[i]); 151 - new->expire = expire; 152 - new->nextgc = jiffies + (expire >> 1); 153 auth->au_credcache = new; 154 return 0; 155 } ··· 157 * Destroy a list of credentials 158 */ 159 static inline 160 - void rpcauth_destroy_credlist(struct hlist_head *head) 161 { 162 struct rpc_cred *cred; 163 164 - while (!hlist_empty(head)) { 165 - cred = hlist_entry(head->first, struct rpc_cred, cr_hash); 166 - hlist_del_init(&cred->cr_hash); 167 put_rpccred(cred); 168 } 169 } ··· 173 * that are not referenced. 174 */ 175 void 176 - rpcauth_free_credcache(struct rpc_auth *auth) 177 { 178 - struct rpc_cred_cache *cache = auth->au_credcache; 179 - HLIST_HEAD(free); 180 - struct hlist_node *pos, *next; 181 struct rpc_cred *cred; 182 int i; 183 184 spin_lock(&rpc_credcache_lock); 185 for (i = 0; i < RPC_CREDCACHE_NR; i++) { 186 - hlist_for_each_safe(pos, next, &cache->hashtable[i]) { 187 - cred = hlist_entry(pos, struct rpc_cred, cr_hash); 188 - __hlist_del(&cred->cr_hash); 189 - hlist_add_head(&cred->cr_hash, &free); 190 } 191 } 192 spin_unlock(&rpc_credcache_lock); 193 rpcauth_destroy_credlist(&free); 194 } 195 196 - static void 197 - rpcauth_prune_expired(struct rpc_auth *auth, struct rpc_cred *cred, struct hlist_head *free) 198 { 199 - if (atomic_read(&cred->cr_count) != 1) 200 - return; 201 - if (time_after(jiffies, cred->cr_expire + auth->au_credcache->expire)) 202 - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; 203 - if (!(cred->cr_flags & RPCAUTH_CRED_UPTODATE)) { 204 - __hlist_del(&cred->cr_hash); 205 - hlist_add_head(&cred->cr_hash, free); 206 } 207 } 208 209 /* 210 * Remove stale credentials. Avoid sleeping inside the loop. 211 */ 212 - static void 213 - rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free) 214 { 215 - struct rpc_cred_cache *cache = auth->au_credcache; 216 - struct hlist_node *pos, *next; 217 - struct rpc_cred *cred; 218 - int i; 219 220 - dprintk("RPC: gc'ing RPC credentials for auth %p\n", auth); 221 - for (i = 0; i < RPC_CREDCACHE_NR; i++) { 222 - hlist_for_each_safe(pos, next, &cache->hashtable[i]) { 223 - cred = hlist_entry(pos, struct rpc_cred, cr_hash); 224 - rpcauth_prune_expired(auth, cred, free); 225 } 226 } 227 - cache->nextgc = jiffies + cache->expire; 228 } 229 230 /* ··· 271 rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, 272 int flags) 273 { 274 struct rpc_cred_cache *cache = auth->au_credcache; 275 - HLIST_HEAD(free); 276 - struct hlist_node *pos, *next; 277 - struct rpc_cred *new = NULL, 278 - *cred = NULL; 279 int nr = 0; 280 281 if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) 282 nr = acred->uid & RPC_CREDCACHE_MASK; 283 - retry: 284 - spin_lock(&rpc_credcache_lock); 285 - if (time_before(cache->nextgc, jiffies)) 286 - rpcauth_gc_credcache(auth, &free); 287 - hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { 288 - struct rpc_cred *entry; 289 - entry = hlist_entry(pos, struct rpc_cred, cr_hash); 290 - if (entry->cr_ops->crmatch(acred, entry, flags)) { 291 - hlist_del(&entry->cr_hash); 292 - cred = entry; 293 - break; 294 } 295 - rpcauth_prune_expired(auth, entry, &free); 296 } 297 - if (new) { 298 - if (cred) 299 - hlist_add_head(&new->cr_hash, &free); 300 - else 301 - cred = new; 302 - } 303 - if (cred) { 304 - hlist_add_head(&cred->cr_hash, &cache->hashtable[nr]); 305 - get_rpccred(cred); 306 - } 307 - spin_unlock(&rpc_credcache_lock); 308 309 - rpcauth_destroy_credlist(&free); 310 311 - if (!cred) { 312 - new = auth->au_ops->crcreate(auth, acred, flags); 313 - if (!IS_ERR(new)) { 314 - #ifdef RPC_DEBUG 315 - new->cr_magic = RPCAUTH_CRED_MAGIC; 316 - #endif 317 - goto retry; 318 - } else 319 - cred = new; 320 - } else if ((cred->cr_flags & RPCAUTH_CRED_NEW) 321 && cred->cr_ops->cr_init != NULL 322 && !(flags & RPCAUTH_LOOKUP_NEW)) { 323 int res = cred->cr_ops->cr_init(auth, cred); ··· 329 cred = ERR_PTR(res); 330 } 331 } 332 - 333 - return (struct rpc_cred *) cred; 334 } 335 336 struct rpc_cred * ··· 352 return ret; 353 } 354 355 struct rpc_cred * 356 rpcauth_bindcred(struct rpc_task *task) 357 { 358 - struct rpc_auth *auth = task->tk_auth; 359 struct auth_cred acred = { 360 .uid = current->fsuid, 361 .gid = current->fsgid, ··· 382 int flags = 0; 383 384 dprintk("RPC: %5u looking up %s cred\n", 385 - task->tk_pid, task->tk_auth->au_ops->au_name); 386 get_group_info(acred.group_info); 387 if (task->tk_flags & RPC_TASK_ROOTCREDS) 388 flags |= RPCAUTH_LOOKUP_ROOTCREDS; ··· 398 void 399 rpcauth_holdcred(struct rpc_task *task) 400 { 401 - dprintk("RPC: %5u holding %s cred %p\n", 402 - task->tk_pid, task->tk_auth->au_ops->au_name, 403 - task->tk_msg.rpc_cred); 404 - if (task->tk_msg.rpc_cred) 405 - get_rpccred(task->tk_msg.rpc_cred); 406 } 407 408 void 409 put_rpccred(struct rpc_cred *cred) 410 { 411 - cred->cr_expire = jiffies; 412 if (!atomic_dec_and_test(&cred->cr_count)) 413 return; 414 cred->cr_ops->crdestroy(cred); 415 } 416 ··· 443 struct rpc_cred *cred = task->tk_msg.rpc_cred; 444 445 dprintk("RPC: %5u releasing %s cred %p\n", 446 - task->tk_pid, task->tk_auth->au_ops->au_name, cred); 447 448 put_rpccred(cred); 449 task->tk_msg.rpc_cred = NULL; ··· 455 struct rpc_cred *cred = task->tk_msg.rpc_cred; 456 457 dprintk("RPC: %5u marshaling %s cred %p\n", 458 - task->tk_pid, task->tk_auth->au_ops->au_name, cred); 459 460 return cred->cr_ops->crmarshal(task, p); 461 } ··· 466 struct rpc_cred *cred = task->tk_msg.rpc_cred; 467 468 dprintk("RPC: %5u validating %s cred %p\n", 469 - task->tk_pid, task->tk_auth->au_ops->au_name, cred); 470 471 return cred->cr_ops->crvalidate(task, p); 472 } ··· 476 __be32 *data, void *obj) 477 { 478 struct rpc_cred *cred = task->tk_msg.rpc_cred; 479 480 dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", 481 task->tk_pid, cred->cr_ops->cr_name, cred); 482 if (cred->cr_ops->crwrap_req) 483 return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); 484 /* By default, we encode the arguments normally. */ 485 - return encode(rqstp, data, obj); 486 } 487 488 int ··· 494 __be32 *data, void *obj) 495 { 496 struct rpc_cred *cred = task->tk_msg.rpc_cred; 497 498 dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", 499 task->tk_pid, cred->cr_ops->cr_name, cred); ··· 502 return cred->cr_ops->crunwrap_resp(task, decode, rqstp, 503 data, obj); 504 /* By default, we decode the arguments normally. */ 505 - return decode(rqstp, data, obj); 506 } 507 508 int ··· 515 int err; 516 517 dprintk("RPC: %5u refreshing %s cred %p\n", 518 - task->tk_pid, task->tk_auth->au_ops->au_name, cred); 519 520 err = cred->cr_ops->crrefresh(task); 521 if (err < 0) ··· 526 void 527 rpcauth_invalcred(struct rpc_task *task) 528 { 529 dprintk("RPC: %5u invalidating %s cred %p\n", 530 - task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred); 531 - spin_lock(&rpc_credcache_lock); 532 - if (task->tk_msg.rpc_cred) 533 - task->tk_msg.rpc_cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; 534 - spin_unlock(&rpc_credcache_lock); 535 } 536 537 int 538 rpcauth_uptodatecred(struct rpc_task *task) 539 { 540 - return !(task->tk_msg.rpc_cred) || 541 - (task->tk_msg.rpc_cred->cr_flags & RPCAUTH_CRED_UPTODATE); 542 }
··· 13 #include <linux/errno.h> 14 #include <linux/sunrpc/clnt.h> 15 #include <linux/spinlock.h> 16 + #include <linux/smp_lock.h> 17 18 #ifdef RPC_DEBUG 19 # define RPCDBG_FACILITY RPCDBG_AUTH 20 #endif 21 22 + static DEFINE_SPINLOCK(rpc_authflavor_lock); 23 + static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { 24 &authnull_ops, /* AUTH_NULL */ 25 &authunix_ops, /* AUTH_UNIX */ 26 NULL, /* others can be loadable modules */ 27 }; 28 + 29 + static LIST_HEAD(cred_unused); 30 + static unsigned long number_cred_unused; 31 32 static u32 33 pseudoflavor_to_flavor(u32 flavor) { ··· 32 } 33 34 int 35 + rpcauth_register(const struct rpc_authops *ops) 36 { 37 rpc_authflavor_t flavor; 38 + int ret = -EPERM; 39 40 if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) 41 return -EINVAL; 42 + spin_lock(&rpc_authflavor_lock); 43 + if (auth_flavors[flavor] == NULL) { 44 + auth_flavors[flavor] = ops; 45 + ret = 0; 46 + } 47 + spin_unlock(&rpc_authflavor_lock); 48 + return ret; 49 } 50 51 int 52 + rpcauth_unregister(const struct rpc_authops *ops) 53 { 54 rpc_authflavor_t flavor; 55 + int ret = -EPERM; 56 57 if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) 58 return -EINVAL; 59 + spin_lock(&rpc_authflavor_lock); 60 + if (auth_flavors[flavor] == ops) { 61 + auth_flavors[flavor] = NULL; 62 + ret = 0; 63 + } 64 + spin_unlock(&rpc_authflavor_lock); 65 + return ret; 66 } 67 68 struct rpc_auth * 69 rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt) 70 { 71 struct rpc_auth *auth; 72 + const struct rpc_authops *ops; 73 u32 flavor = pseudoflavor_to_flavor(pseudoflavor); 74 75 auth = ERR_PTR(-EINVAL); 76 if (flavor >= RPC_AUTH_MAXFLAVOR) 77 goto out; 78 79 #ifdef CONFIG_KMOD 80 if ((ops = auth_flavors[flavor]) == NULL) 81 request_module("rpc-auth-%u", flavor); 82 #endif 83 + spin_lock(&rpc_authflavor_lock); 84 + ops = auth_flavors[flavor]; 85 + if (ops == NULL || !try_module_get(ops->owner)) { 86 + spin_unlock(&rpc_authflavor_lock); 87 goto out; 88 + } 89 + spin_unlock(&rpc_authflavor_lock); 90 auth = ops->create(clnt, pseudoflavor); 91 + module_put(ops->owner); 92 if (IS_ERR(auth)) 93 return auth; 94 if (clnt->cl_auth) 95 + rpcauth_release(clnt->cl_auth); 96 clnt->cl_auth = auth; 97 98 out: ··· 88 } 89 90 void 91 + rpcauth_release(struct rpc_auth *auth) 92 { 93 if (!atomic_dec_and_test(&auth->au_count)) 94 return; ··· 97 98 static DEFINE_SPINLOCK(rpc_credcache_lock); 99 100 + static void 101 + rpcauth_unhash_cred_locked(struct rpc_cred *cred) 102 + { 103 + hlist_del_rcu(&cred->cr_hash); 104 + smp_mb__before_clear_bit(); 105 + clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); 106 + } 107 + 108 + static void 109 + rpcauth_unhash_cred(struct rpc_cred *cred) 110 + { 111 + spinlock_t *cache_lock; 112 + 113 + cache_lock = &cred->cr_auth->au_credcache->lock; 114 + spin_lock(cache_lock); 115 + if (atomic_read(&cred->cr_count) == 0) 116 + rpcauth_unhash_cred_locked(cred); 117 + spin_unlock(cache_lock); 118 + } 119 + 120 /* 121 * Initialize RPC credential cache 122 */ 123 int 124 + rpcauth_init_credcache(struct rpc_auth *auth) 125 { 126 struct rpc_cred_cache *new; 127 int i; ··· 111 return -ENOMEM; 112 for (i = 0; i < RPC_CREDCACHE_NR; i++) 113 INIT_HLIST_HEAD(&new->hashtable[i]); 114 + spin_lock_init(&new->lock); 115 auth->au_credcache = new; 116 return 0; 117 } ··· 121 * Destroy a list of credentials 122 */ 123 static inline 124 + void rpcauth_destroy_credlist(struct list_head *head) 125 { 126 struct rpc_cred *cred; 127 128 + while (!list_empty(head)) { 129 + cred = list_entry(head->next, struct rpc_cred, cr_lru); 130 + list_del_init(&cred->cr_lru); 131 put_rpccred(cred); 132 } 133 } ··· 137 * that are not referenced. 138 */ 139 void 140 + rpcauth_clear_credcache(struct rpc_cred_cache *cache) 141 { 142 + LIST_HEAD(free); 143 + struct hlist_head *head; 144 struct rpc_cred *cred; 145 int i; 146 147 spin_lock(&rpc_credcache_lock); 148 + spin_lock(&cache->lock); 149 for (i = 0; i < RPC_CREDCACHE_NR; i++) { 150 + head = &cache->hashtable[i]; 151 + while (!hlist_empty(head)) { 152 + cred = hlist_entry(head->first, struct rpc_cred, cr_hash); 153 + get_rpccred(cred); 154 + if (!list_empty(&cred->cr_lru)) { 155 + list_del(&cred->cr_lru); 156 + number_cred_unused--; 157 + } 158 + list_add_tail(&cred->cr_lru, &free); 159 + rpcauth_unhash_cred_locked(cred); 160 } 161 } 162 + spin_unlock(&cache->lock); 163 spin_unlock(&rpc_credcache_lock); 164 rpcauth_destroy_credlist(&free); 165 } 166 167 + /* 168 + * Destroy the RPC credential cache 169 + */ 170 + void 171 + rpcauth_destroy_credcache(struct rpc_auth *auth) 172 { 173 + struct rpc_cred_cache *cache = auth->au_credcache; 174 + 175 + if (cache) { 176 + auth->au_credcache = NULL; 177 + rpcauth_clear_credcache(cache); 178 + kfree(cache); 179 } 180 } 181 182 /* 183 * Remove stale credentials. Avoid sleeping inside the loop. 184 */ 185 + static int 186 + rpcauth_prune_expired(struct list_head *free, int nr_to_scan) 187 { 188 + spinlock_t *cache_lock; 189 + struct rpc_cred *cred; 190 191 + while (!list_empty(&cred_unused)) { 192 + cred = list_entry(cred_unused.next, struct rpc_cred, cr_lru); 193 + list_del_init(&cred->cr_lru); 194 + number_cred_unused--; 195 + if (atomic_read(&cred->cr_count) != 0) 196 + continue; 197 + cache_lock = &cred->cr_auth->au_credcache->lock; 198 + spin_lock(cache_lock); 199 + if (atomic_read(&cred->cr_count) == 0) { 200 + get_rpccred(cred); 201 + list_add_tail(&cred->cr_lru, free); 202 + rpcauth_unhash_cred_locked(cred); 203 + nr_to_scan--; 204 } 205 + spin_unlock(cache_lock); 206 + if (nr_to_scan == 0) 207 + break; 208 } 209 + return nr_to_scan; 210 + } 211 + 212 + /* 213 + * Run memory cache shrinker. 214 + */ 215 + static int 216 + rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) 217 + { 218 + LIST_HEAD(free); 219 + int res; 220 + 221 + if (list_empty(&cred_unused)) 222 + return 0; 223 + spin_lock(&rpc_credcache_lock); 224 + nr_to_scan = rpcauth_prune_expired(&free, nr_to_scan); 225 + res = (number_cred_unused / 100) * sysctl_vfs_cache_pressure; 226 + spin_unlock(&rpc_credcache_lock); 227 + rpcauth_destroy_credlist(&free); 228 + return res; 229 } 230 231 /* ··· 198 rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, 199 int flags) 200 { 201 + LIST_HEAD(free); 202 struct rpc_cred_cache *cache = auth->au_credcache; 203 + struct hlist_node *pos; 204 + struct rpc_cred *cred = NULL, 205 + *entry, *new; 206 int nr = 0; 207 208 if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) 209 nr = acred->uid & RPC_CREDCACHE_MASK; 210 + 211 + rcu_read_lock(); 212 + hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { 213 + if (!entry->cr_ops->crmatch(acred, entry, flags)) 214 + continue; 215 + spin_lock(&cache->lock); 216 + if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) == 0) { 217 + spin_unlock(&cache->lock); 218 + continue; 219 } 220 + cred = get_rpccred(entry); 221 + spin_unlock(&cache->lock); 222 + break; 223 } 224 + rcu_read_unlock(); 225 226 + if (cred != NULL) 227 + goto found; 228 229 + new = auth->au_ops->crcreate(auth, acred, flags); 230 + if (IS_ERR(new)) { 231 + cred = new; 232 + goto out; 233 + } 234 + 235 + spin_lock(&cache->lock); 236 + hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) { 237 + if (!entry->cr_ops->crmatch(acred, entry, flags)) 238 + continue; 239 + cred = get_rpccred(entry); 240 + break; 241 + } 242 + if (cred == NULL) { 243 + cred = new; 244 + set_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); 245 + hlist_add_head_rcu(&cred->cr_hash, &cache->hashtable[nr]); 246 + } else 247 + list_add_tail(&new->cr_lru, &free); 248 + spin_unlock(&cache->lock); 249 + found: 250 + if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) 251 && cred->cr_ops->cr_init != NULL 252 && !(flags & RPCAUTH_LOOKUP_NEW)) { 253 int res = cred->cr_ops->cr_init(auth, cred); ··· 253 cred = ERR_PTR(res); 254 } 255 } 256 + rpcauth_destroy_credlist(&free); 257 + out: 258 + return cred; 259 } 260 261 struct rpc_cred * ··· 275 return ret; 276 } 277 278 + void 279 + rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, 280 + struct rpc_auth *auth, const struct rpc_credops *ops) 281 + { 282 + INIT_HLIST_NODE(&cred->cr_hash); 283 + INIT_LIST_HEAD(&cred->cr_lru); 284 + atomic_set(&cred->cr_count, 1); 285 + cred->cr_auth = auth; 286 + cred->cr_ops = ops; 287 + cred->cr_expire = jiffies; 288 + #ifdef RPC_DEBUG 289 + cred->cr_magic = RPCAUTH_CRED_MAGIC; 290 + #endif 291 + cred->cr_uid = acred->uid; 292 + } 293 + EXPORT_SYMBOL(rpcauth_init_cred); 294 + 295 struct rpc_cred * 296 rpcauth_bindcred(struct rpc_task *task) 297 { 298 + struct rpc_auth *auth = task->tk_client->cl_auth; 299 struct auth_cred acred = { 300 .uid = current->fsuid, 301 .gid = current->fsgid, ··· 288 int flags = 0; 289 290 dprintk("RPC: %5u looking up %s cred\n", 291 + task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); 292 get_group_info(acred.group_info); 293 if (task->tk_flags & RPC_TASK_ROOTCREDS) 294 flags |= RPCAUTH_LOOKUP_ROOTCREDS; ··· 304 void 305 rpcauth_holdcred(struct rpc_task *task) 306 { 307 + struct rpc_cred *cred = task->tk_msg.rpc_cred; 308 + if (cred != NULL) { 309 + get_rpccred(cred); 310 + dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, 311 + cred->cr_auth->au_ops->au_name, cred); 312 + } 313 } 314 315 void 316 put_rpccred(struct rpc_cred *cred) 317 { 318 + /* Fast path for unhashed credentials */ 319 + if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) 320 + goto need_lock; 321 + 322 if (!atomic_dec_and_test(&cred->cr_count)) 323 return; 324 + goto out_destroy; 325 + need_lock: 326 + if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) 327 + return; 328 + if (!list_empty(&cred->cr_lru)) { 329 + number_cred_unused--; 330 + list_del_init(&cred->cr_lru); 331 + } 332 + if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) 333 + rpcauth_unhash_cred(cred); 334 + else if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { 335 + cred->cr_expire = jiffies; 336 + list_add_tail(&cred->cr_lru, &cred_unused); 337 + number_cred_unused++; 338 + spin_unlock(&rpc_credcache_lock); 339 + return; 340 + } 341 + spin_unlock(&rpc_credcache_lock); 342 + out_destroy: 343 cred->cr_ops->crdestroy(cred); 344 } 345 ··· 326 struct rpc_cred *cred = task->tk_msg.rpc_cred; 327 328 dprintk("RPC: %5u releasing %s cred %p\n", 329 + task->tk_pid, cred->cr_auth->au_ops->au_name, cred); 330 331 put_rpccred(cred); 332 task->tk_msg.rpc_cred = NULL; ··· 338 struct rpc_cred *cred = task->tk_msg.rpc_cred; 339 340 dprintk("RPC: %5u marshaling %s cred %p\n", 341 + task->tk_pid, cred->cr_auth->au_ops->au_name, cred); 342 343 return cred->cr_ops->crmarshal(task, p); 344 } ··· 349 struct rpc_cred *cred = task->tk_msg.rpc_cred; 350 351 dprintk("RPC: %5u validating %s cred %p\n", 352 + task->tk_pid, cred->cr_auth->au_ops->au_name, cred); 353 354 return cred->cr_ops->crvalidate(task, p); 355 } ··· 359 __be32 *data, void *obj) 360 { 361 struct rpc_cred *cred = task->tk_msg.rpc_cred; 362 + int ret; 363 364 dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", 365 task->tk_pid, cred->cr_ops->cr_name, cred); 366 if (cred->cr_ops->crwrap_req) 367 return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); 368 /* By default, we encode the arguments normally. */ 369 + lock_kernel(); 370 + ret = encode(rqstp, data, obj); 371 + unlock_kernel(); 372 + return ret; 373 } 374 375 int ··· 373 __be32 *data, void *obj) 374 { 375 struct rpc_cred *cred = task->tk_msg.rpc_cred; 376 + int ret; 377 378 dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", 379 task->tk_pid, cred->cr_ops->cr_name, cred); ··· 380 return cred->cr_ops->crunwrap_resp(task, decode, rqstp, 381 data, obj); 382 /* By default, we decode the arguments normally. */ 383 + lock_kernel(); 384 + ret = decode(rqstp, data, obj); 385 + unlock_kernel(); 386 + return ret; 387 } 388 389 int ··· 390 int err; 391 392 dprintk("RPC: %5u refreshing %s cred %p\n", 393 + task->tk_pid, cred->cr_auth->au_ops->au_name, cred); 394 395 err = cred->cr_ops->crrefresh(task); 396 if (err < 0) ··· 401 void 402 rpcauth_invalcred(struct rpc_task *task) 403 { 404 + struct rpc_cred *cred = task->tk_msg.rpc_cred; 405 + 406 dprintk("RPC: %5u invalidating %s cred %p\n", 407 + task->tk_pid, cred->cr_auth->au_ops->au_name, cred); 408 + if (cred) 409 + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 410 } 411 412 int 413 rpcauth_uptodatecred(struct rpc_task *task) 414 { 415 + struct rpc_cred *cred = task->tk_msg.rpc_cred; 416 + 417 + return cred == NULL || 418 + test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; 419 + } 420 + 421 + 422 + static struct shrinker *rpc_cred_shrinker; 423 + 424 + void __init rpcauth_init_module(void) 425 + { 426 + rpc_init_authunix(); 427 + rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker); 428 + } 429 + 430 + void __exit rpcauth_remove_module(void) 431 + { 432 + if (rpc_cred_shrinker != NULL) 433 + remove_shrinker(rpc_cred_shrinker); 434 }
+227 -124
net/sunrpc/auth_gss/auth_gss.c
··· 54 #include <linux/sunrpc/gss_api.h> 55 #include <asm/uaccess.h> 56 57 - static struct rpc_authops authgss_ops; 58 59 - static struct rpc_credops gss_credops; 60 61 #ifdef RPC_DEBUG 62 # define RPCDBG_FACILITY RPCDBG_AUTH ··· 65 66 #define NFS_NGROUPS 16 67 68 - #define GSS_CRED_EXPIRE (60 * HZ) /* XXX: reasonable? */ 69 #define GSS_CRED_SLACK 1024 /* XXX: unused */ 70 /* length of a krb5 verifier (48), plus data added before arguments when 71 * using integrity (two 4-byte integers): */ ··· 79 /* dump the buffer in `emacs-hexl' style */ 80 #define isprint(c) ((c > 0x1f) && (c < 0x7f)) 81 82 - static DEFINE_RWLOCK(gss_ctx_lock); 83 - 84 struct gss_auth { 85 struct rpc_auth rpc_auth; 86 struct gss_api_mech *mech; 87 enum rpc_gss_svc service; 88 - struct list_head upcalls; 89 struct rpc_clnt *client; 90 struct dentry *dentry; 91 - spinlock_t lock; 92 }; 93 94 - static void gss_destroy_ctx(struct gss_cl_ctx *); 95 static struct rpc_pipe_ops gss_upcall_ops; 96 97 static inline struct gss_cl_ctx * ··· 102 gss_put_ctx(struct gss_cl_ctx *ctx) 103 { 104 if (atomic_dec_and_test(&ctx->count)) 105 - gss_destroy_ctx(ctx); 106 } 107 108 static void 109 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) 110 { 111 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 112 struct gss_cl_ctx *old; 113 - write_lock(&gss_ctx_lock); 114 old = gss_cred->gc_ctx; 115 - gss_cred->gc_ctx = ctx; 116 - cred->cr_flags |= RPCAUTH_CRED_UPTODATE; 117 - cred->cr_flags &= ~RPCAUTH_CRED_NEW; 118 - write_unlock(&gss_ctx_lock); 119 if (old) 120 gss_put_ctx(old); 121 } ··· 130 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 131 int res = 0; 132 133 - read_lock(&gss_ctx_lock); 134 - if ((cred->cr_flags & RPCAUTH_CRED_UPTODATE) && gss_cred->gc_ctx) 135 res = 1; 136 - read_unlock(&gss_ctx_lock); 137 return res; 138 } 139 ··· 172 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 173 struct gss_cl_ctx *ctx = NULL; 174 175 - read_lock(&gss_ctx_lock); 176 if (gss_cred->gc_ctx) 177 ctx = gss_get_ctx(gss_cred->gc_ctx); 178 - read_unlock(&gss_ctx_lock); 179 return ctx; 180 } 181 ··· 270 } 271 272 static struct gss_upcall_msg * 273 - __gss_find_upcall(struct gss_auth *gss_auth, uid_t uid) 274 { 275 struct gss_upcall_msg *pos; 276 - list_for_each_entry(pos, &gss_auth->upcalls, list) { 277 if (pos->uid != uid) 278 continue; 279 atomic_inc(&pos->count); ··· 291 static inline struct gss_upcall_msg * 292 gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) 293 { 294 struct gss_upcall_msg *old; 295 296 - spin_lock(&gss_auth->lock); 297 - old = __gss_find_upcall(gss_auth, gss_msg->uid); 298 if (old == NULL) { 299 atomic_inc(&gss_msg->count); 300 - list_add(&gss_msg->list, &gss_auth->upcalls); 301 } else 302 gss_msg = old; 303 - spin_unlock(&gss_auth->lock); 304 return gss_msg; 305 } 306 307 static void 308 __gss_unhash_msg(struct gss_upcall_msg *gss_msg) 309 { 310 - if (list_empty(&gss_msg->list)) 311 - return; 312 list_del_init(&gss_msg->list); 313 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 314 wake_up_all(&gss_msg->waitqueue); ··· 319 gss_unhash_msg(struct gss_upcall_msg *gss_msg) 320 { 321 struct gss_auth *gss_auth = gss_msg->auth; 322 323 - spin_lock(&gss_auth->lock); 324 - __gss_unhash_msg(gss_msg); 325 - spin_unlock(&gss_auth->lock); 326 } 327 328 static void ··· 335 struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, 336 struct gss_cred, gc_base); 337 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; 338 339 - BUG_ON(gss_msg == NULL); 340 if (gss_msg->ctx) 341 gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx)); 342 else 343 task->tk_status = gss_msg->msg.errno; 344 - spin_lock(&gss_msg->auth->lock); 345 gss_cred->gc_upcall = NULL; 346 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 347 - spin_unlock(&gss_msg->auth->lock); 348 gss_release_msg(gss_msg); 349 } 350 ··· 391 gss_refresh_upcall(struct rpc_task *task) 392 { 393 struct rpc_cred *cred = task->tk_msg.rpc_cred; 394 - struct gss_auth *gss_auth = container_of(task->tk_client->cl_auth, 395 struct gss_auth, rpc_auth); 396 struct gss_cred *gss_cred = container_of(cred, 397 struct gss_cred, gc_base); 398 struct gss_upcall_msg *gss_msg; 399 int err = 0; 400 401 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, ··· 406 err = PTR_ERR(gss_msg); 407 goto out; 408 } 409 - spin_lock(&gss_auth->lock); 410 if (gss_cred->gc_upcall != NULL) 411 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL); 412 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { ··· 417 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL); 418 } else 419 err = gss_msg->msg.errno; 420 - spin_unlock(&gss_auth->lock); 421 gss_release_msg(gss_msg); 422 out: 423 dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", ··· 428 static inline int 429 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 430 { 431 struct rpc_cred *cred = &gss_cred->gc_base; 432 struct gss_upcall_msg *gss_msg; 433 DEFINE_WAIT(wait); ··· 442 } 443 for (;;) { 444 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); 445 - spin_lock(&gss_auth->lock); 446 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 447 - spin_unlock(&gss_auth->lock); 448 break; 449 } 450 - spin_unlock(&gss_auth->lock); 451 if (signalled()) { 452 err = -ERESTARTSYS; 453 goto out_intr; ··· 457 gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx)); 458 else 459 err = gss_msg->msg.errno; 460 out_intr: 461 finish_wait(&gss_msg->waitqueue, &wait); 462 gss_release_msg(gss_msg); ··· 496 const void *p, *end; 497 void *buf; 498 struct rpc_clnt *clnt; 499 - struct gss_auth *gss_auth; 500 - struct rpc_cred *cred; 501 struct gss_upcall_msg *gss_msg; 502 struct gss_cl_ctx *ctx; 503 uid_t uid; 504 - int err = -EFBIG; 505 506 if (mlen > MSG_BUF_MAXSIZE) 507 goto out; ··· 509 if (!buf) 510 goto out; 511 512 - clnt = RPC_I(filp->f_path.dentry->d_inode)->private; 513 err = -EFAULT; 514 if (copy_from_user(buf, src, mlen)) 515 goto err; ··· 525 ctx = gss_alloc_context(); 526 if (ctx == NULL) 527 goto err; 528 - err = 0; 529 - gss_auth = container_of(clnt->cl_auth, struct gss_auth, rpc_auth); 530 - p = gss_fill_context(p, end, ctx, gss_auth->mech); 531 if (IS_ERR(p)) { 532 err = PTR_ERR(p); 533 - if (err != -EACCES) 534 - goto err_put_ctx; 535 } 536 - spin_lock(&gss_auth->lock); 537 - gss_msg = __gss_find_upcall(gss_auth, uid); 538 - if (gss_msg) { 539 - if (err == 0 && gss_msg->ctx == NULL) 540 - gss_msg->ctx = gss_get_ctx(ctx); 541 - gss_msg->msg.errno = err; 542 - __gss_unhash_msg(gss_msg); 543 - spin_unlock(&gss_auth->lock); 544 - gss_release_msg(gss_msg); 545 - } else { 546 - struct auth_cred acred = { .uid = uid }; 547 - spin_unlock(&gss_auth->lock); 548 - cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW); 549 - if (IS_ERR(cred)) { 550 - err = PTR_ERR(cred); 551 - goto err_put_ctx; 552 - } 553 - gss_cred_set_ctx(cred, gss_get_ctx(ctx)); 554 - } 555 - gss_put_ctx(ctx); 556 - kfree(buf); 557 - dprintk("RPC: gss_pipe_downcall returning length %Zu\n", mlen); 558 - return mlen; 559 err_put_ctx: 560 gss_put_ctx(ctx); 561 err: 562 kfree(buf); 563 out: 564 - dprintk("RPC: gss_pipe_downcall returning %d\n", err); 565 return err; 566 } 567 ··· 564 gss_pipe_release(struct inode *inode) 565 { 566 struct rpc_inode *rpci = RPC_I(inode); 567 - struct rpc_clnt *clnt; 568 - struct rpc_auth *auth; 569 - struct gss_auth *gss_auth; 570 571 - clnt = rpci->private; 572 - auth = clnt->cl_auth; 573 - gss_auth = container_of(auth, struct gss_auth, rpc_auth); 574 - spin_lock(&gss_auth->lock); 575 - while (!list_empty(&gss_auth->upcalls)) { 576 - struct gss_upcall_msg *gss_msg; 577 578 - gss_msg = list_entry(gss_auth->upcalls.next, 579 struct gss_upcall_msg, list); 580 gss_msg->msg.errno = -EPIPE; 581 atomic_inc(&gss_msg->count); 582 __gss_unhash_msg(gss_msg); 583 - spin_unlock(&gss_auth->lock); 584 gss_release_msg(gss_msg); 585 - spin_lock(&gss_auth->lock); 586 } 587 - spin_unlock(&gss_auth->lock); 588 } 589 590 static void ··· 632 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 633 if (gss_auth->service == 0) 634 goto err_put_mech; 635 - INIT_LIST_HEAD(&gss_auth->upcalls); 636 - spin_lock_init(&gss_auth->lock); 637 auth = &gss_auth->rpc_auth; 638 auth->au_cslack = GSS_CRED_SLACK >> 2; 639 auth->au_rslack = GSS_VERF_SLACK >> 2; 640 auth->au_ops = &authgss_ops; 641 auth->au_flavor = flavor; 642 atomic_set(&auth->au_count, 1); 643 - 644 - err = rpcauth_init_credcache(auth, GSS_CRED_EXPIRE); 645 - if (err) 646 - goto err_put_mech; 647 648 gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, 649 clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); ··· 647 goto err_put_mech; 648 } 649 650 return auth; 651 err_put_mech: 652 gss_mech_put(gss_auth->mech); 653 err_free: ··· 664 } 665 666 static void 667 gss_destroy(struct rpc_auth *auth) 668 { 669 struct gss_auth *gss_auth; ··· 690 dprintk("RPC: destroying GSS authenticator %p flavor %d\n", 691 auth, auth->au_flavor); 692 693 - gss_auth = container_of(auth, struct gss_auth, rpc_auth); 694 - rpc_unlink(gss_auth->dentry); 695 - gss_auth->dentry = NULL; 696 - gss_mech_put(gss_auth->mech); 697 698 - rpcauth_free_credcache(auth); 699 - kfree(gss_auth); 700 - module_put(THIS_MODULE); 701 } 702 703 - /* gss_destroy_cred (and gss_destroy_ctx) are used to clean up after failure 704 * to create a new cred or context, so they check that things have been 705 * allocated before freeing them. */ 706 static void 707 - gss_destroy_ctx(struct gss_cl_ctx *ctx) 708 { 709 - dprintk("RPC: gss_destroy_ctx\n"); 710 711 if (ctx->gc_gss_ctx) 712 gss_delete_sec_context(&ctx->gc_gss_ctx); ··· 744 } 745 746 static void 747 - gss_destroy_cred(struct rpc_cred *rc) 748 { 749 - struct gss_cred *cred = container_of(rc, struct gss_cred, gc_base); 750 751 - dprintk("RPC: gss_destroy_cred \n"); 752 753 - if (cred->gc_ctx) 754 - gss_put_ctx(cred->gc_ctx); 755 - kfree(cred); 756 } 757 758 /* ··· 808 if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) 809 goto out_err; 810 811 - atomic_set(&cred->gc_count, 1); 812 - cred->gc_uid = acred->uid; 813 /* 814 * Note: in order to force a call to call_refresh(), we deliberately 815 * fail to flag the credential as RPCAUTH_CRED_UPTODATE. 816 */ 817 - cred->gc_flags = 0; 818 - cred->gc_base.cr_ops = &gss_credops; 819 - cred->gc_base.cr_flags = RPCAUTH_CRED_NEW; 820 cred->gc_service = gss_auth->service; 821 return &cred->gc_base; 822 823 out_err: ··· 846 * we don't really care if the credential has expired or not, 847 * since the caller should be prepared to reinitialise it. 848 */ 849 - if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW)) 850 goto out; 851 /* Don't match with creds that have expired. */ 852 if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) ··· 902 mic.data = (u8 *)(p + 1); 903 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 904 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 905 - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; 906 } else if (maj_stat != 0) { 907 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 908 goto out_put_ctx; ··· 925 if (!gss_cred_is_uptodate_ctx(task->tk_msg.rpc_cred)) 926 return gss_refresh_upcall(task); 927 return 0; 928 } 929 930 static __be32 * ··· 962 963 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 964 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 965 - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; 966 - if (maj_stat) 967 goto out_bad; 968 /* We leave it to unwrap to calculate au_rslack. For now we just 969 * calculate the length of the verifier: */ 970 - task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2; 971 gss_put_ctx(ctx); 972 dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", 973 task->tk_pid); ··· 999 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1000 *p++ = htonl(rqstp->rq_seqno); 1001 1002 status = encode(rqstp, p, obj); 1003 if (status) 1004 return status; 1005 ··· 1021 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1022 status = -EIO; /* XXX? */ 1023 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1024 - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; 1025 else if (maj_stat) 1026 return status; 1027 q = xdr_encode_opaque(p, NULL, mic.len); ··· 1095 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1096 *p++ = htonl(rqstp->rq_seqno); 1097 1098 status = encode(rqstp, p, obj); 1099 if (status) 1100 return status; 1101 ··· 1122 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1123 * done anyway, so it's safe to put the request on the wire: */ 1124 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1125 - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; 1126 else if (maj_stat) 1127 return status; 1128 ··· 1156 /* The spec seems a little ambiguous here, but I think that not 1157 * wrapping context destruction requests makes the most sense. 1158 */ 1159 status = encode(rqstp, p, obj); 1160 goto out; 1161 } 1162 switch (gss_cred->gc_service) { 1163 case RPC_GSS_SVC_NONE: 1164 status = encode(rqstp, p, obj); 1165 break; 1166 case RPC_GSS_SVC_INTEGRITY: 1167 status = gss_wrap_req_integ(cred, ctx, encode, ··· 1213 1214 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1215 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1216 - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; 1217 if (maj_stat != GSS_S_COMPLETE) 1218 return status; 1219 return 0; ··· 1238 1239 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); 1240 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1241 - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; 1242 if (maj_stat != GSS_S_COMPLETE) 1243 return status; 1244 if (ntohl(*(*p)++) != rqstp->rq_seqno) ··· 1278 break; 1279 } 1280 /* take into account extra slack for integrity and privacy cases: */ 1281 - task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp) 1282 + (savedlen - head->iov_len); 1283 out_decode: 1284 status = decode(rqstp, p, obj); 1285 out: 1286 gss_put_ctx(ctx); 1287 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, ··· 1291 return status; 1292 } 1293 1294 - static struct rpc_authops authgss_ops = { 1295 .owner = THIS_MODULE, 1296 .au_flavor = RPC_AUTH_GSS, 1297 #ifdef RPC_DEBUG ··· 1303 .crcreate = gss_create_cred 1304 }; 1305 1306 - static struct rpc_credops gss_credops = { 1307 .cr_name = "AUTH_GSS", 1308 .crdestroy = gss_destroy_cred, 1309 .cr_init = gss_cred_init, 1310 .crmatch = gss_match, 1311 .crmarshal = gss_marshal, 1312 .crrefresh = gss_refresh, 1313 .crvalidate = gss_validate, 1314 .crwrap_req = gss_wrap_req, 1315 .crunwrap_resp = gss_unwrap_resp,
··· 54 #include <linux/sunrpc/gss_api.h> 55 #include <asm/uaccess.h> 56 57 + static const struct rpc_authops authgss_ops; 58 59 + static const struct rpc_credops gss_credops; 60 + static const struct rpc_credops gss_nullops; 61 62 #ifdef RPC_DEBUG 63 # define RPCDBG_FACILITY RPCDBG_AUTH ··· 64 65 #define NFS_NGROUPS 16 66 67 #define GSS_CRED_SLACK 1024 /* XXX: unused */ 68 /* length of a krb5 verifier (48), plus data added before arguments when 69 * using integrity (two 4-byte integers): */ ··· 79 /* dump the buffer in `emacs-hexl' style */ 80 #define isprint(c) ((c > 0x1f) && (c < 0x7f)) 81 82 struct gss_auth { 83 + struct kref kref; 84 struct rpc_auth rpc_auth; 85 struct gss_api_mech *mech; 86 enum rpc_gss_svc service; 87 struct rpc_clnt *client; 88 struct dentry *dentry; 89 }; 90 91 + static void gss_free_ctx(struct gss_cl_ctx *); 92 static struct rpc_pipe_ops gss_upcall_ops; 93 94 static inline struct gss_cl_ctx * ··· 105 gss_put_ctx(struct gss_cl_ctx *ctx) 106 { 107 if (atomic_dec_and_test(&ctx->count)) 108 + gss_free_ctx(ctx); 109 } 110 111 + /* gss_cred_set_ctx: 112 + * called by gss_upcall_callback and gss_create_upcall in order 113 + * to set the gss context. The actual exchange of an old context 114 + * and a new one is protected by the inode->i_lock. 115 + */ 116 static void 117 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) 118 { 119 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 120 struct gss_cl_ctx *old; 121 + 122 old = gss_cred->gc_ctx; 123 + rcu_assign_pointer(gss_cred->gc_ctx, ctx); 124 + set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 125 + clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 126 if (old) 127 gss_put_ctx(old); 128 } ··· 129 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 130 int res = 0; 131 132 + rcu_read_lock(); 133 + if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) && gss_cred->gc_ctx) 134 res = 1; 135 + rcu_read_unlock(); 136 return res; 137 } 138 ··· 171 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 172 struct gss_cl_ctx *ctx = NULL; 173 174 + rcu_read_lock(); 175 if (gss_cred->gc_ctx) 176 ctx = gss_get_ctx(gss_cred->gc_ctx); 177 + rcu_read_unlock(); 178 return ctx; 179 } 180 ··· 269 } 270 271 static struct gss_upcall_msg * 272 + __gss_find_upcall(struct rpc_inode *rpci, uid_t uid) 273 { 274 struct gss_upcall_msg *pos; 275 + list_for_each_entry(pos, &rpci->in_downcall, list) { 276 if (pos->uid != uid) 277 continue; 278 atomic_inc(&pos->count); ··· 290 static inline struct gss_upcall_msg * 291 gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) 292 { 293 + struct inode *inode = gss_auth->dentry->d_inode; 294 + struct rpc_inode *rpci = RPC_I(inode); 295 struct gss_upcall_msg *old; 296 297 + spin_lock(&inode->i_lock); 298 + old = __gss_find_upcall(rpci, gss_msg->uid); 299 if (old == NULL) { 300 atomic_inc(&gss_msg->count); 301 + list_add(&gss_msg->list, &rpci->in_downcall); 302 } else 303 gss_msg = old; 304 + spin_unlock(&inode->i_lock); 305 return gss_msg; 306 } 307 308 static void 309 __gss_unhash_msg(struct gss_upcall_msg *gss_msg) 310 { 311 list_del_init(&gss_msg->list); 312 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 313 wake_up_all(&gss_msg->waitqueue); ··· 318 gss_unhash_msg(struct gss_upcall_msg *gss_msg) 319 { 320 struct gss_auth *gss_auth = gss_msg->auth; 321 + struct inode *inode = gss_auth->dentry->d_inode; 322 323 + if (list_empty(&gss_msg->list)) 324 + return; 325 + spin_lock(&inode->i_lock); 326 + if (!list_empty(&gss_msg->list)) 327 + __gss_unhash_msg(gss_msg); 328 + spin_unlock(&inode->i_lock); 329 } 330 331 static void ··· 330 struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, 331 struct gss_cred, gc_base); 332 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; 333 + struct inode *inode = gss_msg->auth->dentry->d_inode; 334 335 + spin_lock(&inode->i_lock); 336 if (gss_msg->ctx) 337 gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx)); 338 else 339 task->tk_status = gss_msg->msg.errno; 340 gss_cred->gc_upcall = NULL; 341 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 342 + spin_unlock(&inode->i_lock); 343 gss_release_msg(gss_msg); 344 } 345 ··· 386 gss_refresh_upcall(struct rpc_task *task) 387 { 388 struct rpc_cred *cred = task->tk_msg.rpc_cred; 389 + struct gss_auth *gss_auth = container_of(cred->cr_auth, 390 struct gss_auth, rpc_auth); 391 struct gss_cred *gss_cred = container_of(cred, 392 struct gss_cred, gc_base); 393 struct gss_upcall_msg *gss_msg; 394 + struct inode *inode = gss_auth->dentry->d_inode; 395 int err = 0; 396 397 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, ··· 400 err = PTR_ERR(gss_msg); 401 goto out; 402 } 403 + spin_lock(&inode->i_lock); 404 if (gss_cred->gc_upcall != NULL) 405 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL); 406 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { ··· 411 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL); 412 } else 413 err = gss_msg->msg.errno; 414 + spin_unlock(&inode->i_lock); 415 gss_release_msg(gss_msg); 416 out: 417 dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", ··· 422 static inline int 423 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 424 { 425 + struct inode *inode = gss_auth->dentry->d_inode; 426 struct rpc_cred *cred = &gss_cred->gc_base; 427 struct gss_upcall_msg *gss_msg; 428 DEFINE_WAIT(wait); ··· 435 } 436 for (;;) { 437 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); 438 + spin_lock(&inode->i_lock); 439 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 440 break; 441 } 442 + spin_unlock(&inode->i_lock); 443 if (signalled()) { 444 err = -ERESTARTSYS; 445 goto out_intr; ··· 451 gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx)); 452 else 453 err = gss_msg->msg.errno; 454 + spin_unlock(&inode->i_lock); 455 out_intr: 456 finish_wait(&gss_msg->waitqueue, &wait); 457 gss_release_msg(gss_msg); ··· 489 const void *p, *end; 490 void *buf; 491 struct rpc_clnt *clnt; 492 struct gss_upcall_msg *gss_msg; 493 + struct inode *inode = filp->f_path.dentry->d_inode; 494 struct gss_cl_ctx *ctx; 495 uid_t uid; 496 + ssize_t err = -EFBIG; 497 498 if (mlen > MSG_BUF_MAXSIZE) 499 goto out; ··· 503 if (!buf) 504 goto out; 505 506 + clnt = RPC_I(inode)->private; 507 err = -EFAULT; 508 if (copy_from_user(buf, src, mlen)) 509 goto err; ··· 519 ctx = gss_alloc_context(); 520 if (ctx == NULL) 521 goto err; 522 + 523 + err = -ENOENT; 524 + /* Find a matching upcall */ 525 + spin_lock(&inode->i_lock); 526 + gss_msg = __gss_find_upcall(RPC_I(inode), uid); 527 + if (gss_msg == NULL) { 528 + spin_unlock(&inode->i_lock); 529 + goto err_put_ctx; 530 + } 531 + list_del_init(&gss_msg->list); 532 + spin_unlock(&inode->i_lock); 533 + 534 + p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 535 if (IS_ERR(p)) { 536 err = PTR_ERR(p); 537 + gss_msg->msg.errno = (err == -EACCES) ? -EACCES : -EAGAIN; 538 + goto err_release_msg; 539 } 540 + gss_msg->ctx = gss_get_ctx(ctx); 541 + err = mlen; 542 + 543 + err_release_msg: 544 + spin_lock(&inode->i_lock); 545 + __gss_unhash_msg(gss_msg); 546 + spin_unlock(&inode->i_lock); 547 + gss_release_msg(gss_msg); 548 err_put_ctx: 549 gss_put_ctx(ctx); 550 err: 551 kfree(buf); 552 out: 553 + dprintk("RPC: gss_pipe_downcall returning %Zd\n", err); 554 return err; 555 } 556 ··· 563 gss_pipe_release(struct inode *inode) 564 { 565 struct rpc_inode *rpci = RPC_I(inode); 566 + struct gss_upcall_msg *gss_msg; 567 568 + spin_lock(&inode->i_lock); 569 + while (!list_empty(&rpci->in_downcall)) { 570 571 + gss_msg = list_entry(rpci->in_downcall.next, 572 struct gss_upcall_msg, list); 573 gss_msg->msg.errno = -EPIPE; 574 atomic_inc(&gss_msg->count); 575 __gss_unhash_msg(gss_msg); 576 + spin_unlock(&inode->i_lock); 577 gss_release_msg(gss_msg); 578 + spin_lock(&inode->i_lock); 579 } 580 + spin_unlock(&inode->i_lock); 581 } 582 583 static void ··· 637 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 638 if (gss_auth->service == 0) 639 goto err_put_mech; 640 auth = &gss_auth->rpc_auth; 641 auth->au_cslack = GSS_CRED_SLACK >> 2; 642 auth->au_rslack = GSS_VERF_SLACK >> 2; 643 auth->au_ops = &authgss_ops; 644 auth->au_flavor = flavor; 645 atomic_set(&auth->au_count, 1); 646 + kref_init(&gss_auth->kref); 647 648 gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, 649 clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); ··· 657 goto err_put_mech; 658 } 659 660 + err = rpcauth_init_credcache(auth); 661 + if (err) 662 + goto err_unlink_pipe; 663 + 664 return auth; 665 + err_unlink_pipe: 666 + rpc_unlink(gss_auth->dentry); 667 err_put_mech: 668 gss_mech_put(gss_auth->mech); 669 err_free: ··· 668 } 669 670 static void 671 + gss_free(struct gss_auth *gss_auth) 672 + { 673 + rpc_unlink(gss_auth->dentry); 674 + gss_auth->dentry = NULL; 675 + gss_mech_put(gss_auth->mech); 676 + 677 + kfree(gss_auth); 678 + module_put(THIS_MODULE); 679 + } 680 + 681 + static void 682 + gss_free_callback(struct kref *kref) 683 + { 684 + struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); 685 + 686 + gss_free(gss_auth); 687 + } 688 + 689 + static void 690 gss_destroy(struct rpc_auth *auth) 691 { 692 struct gss_auth *gss_auth; ··· 675 dprintk("RPC: destroying GSS authenticator %p flavor %d\n", 676 auth, auth->au_flavor); 677 678 + rpcauth_destroy_credcache(auth); 679 680 + gss_auth = container_of(auth, struct gss_auth, rpc_auth); 681 + kref_put(&gss_auth->kref, gss_free_callback); 682 } 683 684 + /* 685 + * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call 686 + * to the server with the GSS control procedure field set to 687 + * RPC_GSS_PROC_DESTROY. This should normally cause the server to release 688 + * all RPCSEC_GSS state associated with that context. 689 + */ 690 + static int 691 + gss_destroying_context(struct rpc_cred *cred) 692 + { 693 + struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 694 + struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 695 + struct rpc_task *task; 696 + 697 + if (gss_cred->gc_ctx == NULL || 698 + gss_cred->gc_ctx->gc_proc == RPC_GSS_PROC_DESTROY) 699 + return 0; 700 + 701 + gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; 702 + cred->cr_ops = &gss_nullops; 703 + 704 + /* Take a reference to ensure the cred will be destroyed either 705 + * by the RPC call or by the put_rpccred() below */ 706 + get_rpccred(cred); 707 + 708 + task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC); 709 + if (!IS_ERR(task)) 710 + rpc_put_task(task); 711 + 712 + put_rpccred(cred); 713 + return 1; 714 + } 715 + 716 + /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure 717 * to create a new cred or context, so they check that things have been 718 * allocated before freeing them. */ 719 static void 720 + gss_do_free_ctx(struct gss_cl_ctx *ctx) 721 { 722 + dprintk("RPC: gss_free_ctx\n"); 723 724 if (ctx->gc_gss_ctx) 725 gss_delete_sec_context(&ctx->gc_gss_ctx); ··· 701 } 702 703 static void 704 + gss_free_ctx_callback(struct rcu_head *head) 705 { 706 + struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); 707 + gss_do_free_ctx(ctx); 708 + } 709 710 + static void 711 + gss_free_ctx(struct gss_cl_ctx *ctx) 712 + { 713 + call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); 714 + } 715 716 + static void 717 + gss_free_cred(struct gss_cred *gss_cred) 718 + { 719 + dprintk("RPC: gss_free_cred %p\n", gss_cred); 720 + kfree(gss_cred); 721 + } 722 + 723 + static void 724 + gss_free_cred_callback(struct rcu_head *head) 725 + { 726 + struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); 727 + gss_free_cred(gss_cred); 728 + } 729 + 730 + static void 731 + gss_destroy_cred(struct rpc_cred *cred) 732 + { 733 + struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 734 + struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 735 + struct gss_cl_ctx *ctx = gss_cred->gc_ctx; 736 + 737 + if (gss_destroying_context(cred)) 738 + return; 739 + rcu_assign_pointer(gss_cred->gc_ctx, NULL); 740 + call_rcu(&cred->cr_rcu, gss_free_cred_callback); 741 + if (ctx) 742 + gss_put_ctx(ctx); 743 + kref_put(&gss_auth->kref, gss_free_callback); 744 } 745 746 /* ··· 734 if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) 735 goto out_err; 736 737 + rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); 738 /* 739 * Note: in order to force a call to call_refresh(), we deliberately 740 * fail to flag the credential as RPCAUTH_CRED_UPTODATE. 741 */ 742 + cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; 743 cred->gc_service = gss_auth->service; 744 + kref_get(&gss_auth->kref); 745 return &cred->gc_base; 746 747 out_err: ··· 774 * we don't really care if the credential has expired or not, 775 * since the caller should be prepared to reinitialise it. 776 */ 777 + if ((flags & RPCAUTH_LOOKUP_NEW) && test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) 778 goto out; 779 /* Don't match with creds that have expired. */ 780 if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) ··· 830 mic.data = (u8 *)(p + 1); 831 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 832 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 833 + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 834 } else if (maj_stat != 0) { 835 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 836 goto out_put_ctx; ··· 853 if (!gss_cred_is_uptodate_ctx(task->tk_msg.rpc_cred)) 854 return gss_refresh_upcall(task); 855 return 0; 856 + } 857 + 858 + /* Dummy refresh routine: used only when destroying the context */ 859 + static int 860 + gss_refresh_null(struct rpc_task *task) 861 + { 862 + return -EACCES; 863 } 864 865 static __be32 * ··· 883 884 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 885 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 886 + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 887 + if (maj_stat) { 888 + dprintk("RPC: %5u gss_validate: gss_verify_mic returned" 889 + "error 0x%08x\n", task->tk_pid, maj_stat); 890 goto out_bad; 891 + } 892 /* We leave it to unwrap to calculate au_rslack. For now we just 893 * calculate the length of the verifier: */ 894 + cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; 895 gss_put_ctx(ctx); 896 dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", 897 task->tk_pid); ··· 917 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 918 *p++ = htonl(rqstp->rq_seqno); 919 920 + lock_kernel(); 921 status = encode(rqstp, p, obj); 922 + unlock_kernel(); 923 if (status) 924 return status; 925 ··· 937 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 938 status = -EIO; /* XXX? */ 939 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 940 + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 941 else if (maj_stat) 942 return status; 943 q = xdr_encode_opaque(p, NULL, mic.len); ··· 1011 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1012 *p++ = htonl(rqstp->rq_seqno); 1013 1014 + lock_kernel(); 1015 status = encode(rqstp, p, obj); 1016 + unlock_kernel(); 1017 if (status) 1018 return status; 1019 ··· 1036 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1037 * done anyway, so it's safe to put the request on the wire: */ 1038 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1039 + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1040 else if (maj_stat) 1041 return status; 1042 ··· 1070 /* The spec seems a little ambiguous here, but I think that not 1071 * wrapping context destruction requests makes the most sense. 1072 */ 1073 + lock_kernel(); 1074 status = encode(rqstp, p, obj); 1075 + unlock_kernel(); 1076 goto out; 1077 } 1078 switch (gss_cred->gc_service) { 1079 case RPC_GSS_SVC_NONE: 1080 + lock_kernel(); 1081 status = encode(rqstp, p, obj); 1082 + unlock_kernel(); 1083 break; 1084 case RPC_GSS_SVC_INTEGRITY: 1085 status = gss_wrap_req_integ(cred, ctx, encode, ··· 1123 1124 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1125 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1126 + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1127 if (maj_stat != GSS_S_COMPLETE) 1128 return status; 1129 return 0; ··· 1148 1149 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); 1150 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1151 + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1152 if (maj_stat != GSS_S_COMPLETE) 1153 return status; 1154 if (ntohl(*(*p)++) != rqstp->rq_seqno) ··· 1188 break; 1189 } 1190 /* take into account extra slack for integrity and privacy cases: */ 1191 + cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1192 + (savedlen - head->iov_len); 1193 out_decode: 1194 + lock_kernel(); 1195 status = decode(rqstp, p, obj); 1196 + unlock_kernel(); 1197 out: 1198 gss_put_ctx(ctx); 1199 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, ··· 1199 return status; 1200 } 1201 1202 + static const struct rpc_authops authgss_ops = { 1203 .owner = THIS_MODULE, 1204 .au_flavor = RPC_AUTH_GSS, 1205 #ifdef RPC_DEBUG ··· 1211 .crcreate = gss_create_cred 1212 }; 1213 1214 + static const struct rpc_credops gss_credops = { 1215 .cr_name = "AUTH_GSS", 1216 .crdestroy = gss_destroy_cred, 1217 .cr_init = gss_cred_init, 1218 .crmatch = gss_match, 1219 .crmarshal = gss_marshal, 1220 .crrefresh = gss_refresh, 1221 + .crvalidate = gss_validate, 1222 + .crwrap_req = gss_wrap_req, 1223 + .crunwrap_resp = gss_unwrap_resp, 1224 + }; 1225 + 1226 + static const struct rpc_credops gss_nullops = { 1227 + .cr_name = "AUTH_GSS", 1228 + .crdestroy = gss_destroy_cred, 1229 + .crmatch = gss_match, 1230 + .crmarshal = gss_marshal, 1231 + .crrefresh = gss_refresh_null, 1232 .crvalidate = gss_validate, 1233 .crwrap_req = gss_wrap_req, 1234 .crunwrap_resp = gss_unwrap_resp,
+1 -1
net/sunrpc/auth_gss/gss_krb5_mech.c
··· 201 kfree(kctx); 202 } 203 204 - static struct gss_api_ops gss_kerberos_ops = { 205 .gss_import_sec_context = gss_import_sec_context_kerberos, 206 .gss_get_mic = gss_get_mic_kerberos, 207 .gss_verify_mic = gss_verify_mic_kerberos,
··· 201 kfree(kctx); 202 } 203 204 + static const struct gss_api_ops gss_kerberos_ops = { 205 .gss_import_sec_context = gss_import_sec_context_kerberos, 206 .gss_get_mic = gss_get_mic_kerberos, 207 .gss_verify_mic = gss_verify_mic_kerberos,
+1 -1
net/sunrpc/auth_gss/gss_spkm3_mech.c
··· 202 return err; 203 } 204 205 - static struct gss_api_ops gss_spkm3_ops = { 206 .gss_import_sec_context = gss_import_sec_context_spkm3, 207 .gss_get_mic = gss_get_mic_spkm3, 208 .gss_verify_mic = gss_verify_mic_spkm3,
··· 202 return err; 203 } 204 205 + static const struct gss_api_ops gss_spkm3_ops = { 206 .gss_import_sec_context = gss_import_sec_context_spkm3, 207 .gss_get_mic = gss_get_mic_spkm3, 208 .gss_verify_mic = gss_verify_mic_spkm3,
+6 -4
net/sunrpc/auth_null.c
··· 76 static int 77 nul_refresh(struct rpc_task *task) 78 { 79 - task->tk_msg.rpc_cred->cr_flags |= RPCAUTH_CRED_UPTODATE; 80 return 0; 81 } 82 ··· 101 return p; 102 } 103 104 - struct rpc_authops authnull_ops = { 105 .owner = THIS_MODULE, 106 .au_flavor = RPC_AUTH_NULL, 107 #ifdef RPC_DEBUG ··· 122 }; 123 124 static 125 - struct rpc_credops null_credops = { 126 .cr_name = "AUTH_NULL", 127 .crdestroy = nul_destroy_cred, 128 .crmatch = nul_match, ··· 133 134 static 135 struct rpc_cred null_cred = { 136 .cr_ops = &null_credops, 137 .cr_count = ATOMIC_INIT(1), 138 - .cr_flags = RPCAUTH_CRED_UPTODATE, 139 #ifdef RPC_DEBUG 140 .cr_magic = RPCAUTH_CRED_MAGIC, 141 #endif
··· 76 static int 77 nul_refresh(struct rpc_task *task) 78 { 79 + set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags); 80 return 0; 81 } 82 ··· 101 return p; 102 } 103 104 + const struct rpc_authops authnull_ops = { 105 .owner = THIS_MODULE, 106 .au_flavor = RPC_AUTH_NULL, 107 #ifdef RPC_DEBUG ··· 122 }; 123 124 static 125 + const struct rpc_credops null_credops = { 126 .cr_name = "AUTH_NULL", 127 .crdestroy = nul_destroy_cred, 128 .crmatch = nul_match, ··· 133 134 static 135 struct rpc_cred null_cred = { 136 + .cr_lru = LIST_HEAD_INIT(null_cred.cr_lru), 137 + .cr_auth = &null_auth, 138 .cr_ops = &null_credops, 139 .cr_count = ATOMIC_INIT(1), 140 + .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, 141 #ifdef RPC_DEBUG 142 .cr_magic = RPCAUTH_CRED_MAGIC, 143 #endif
+32 -22
net/sunrpc/auth_unix.c
··· 20 gid_t uc_gids[NFS_NGROUPS]; 21 }; 22 #define uc_uid uc_base.cr_uid 23 - #define uc_count uc_base.cr_count 24 - #define uc_flags uc_base.cr_flags 25 - #define uc_expire uc_base.cr_expire 26 - 27 - #define UNX_CRED_EXPIRE (60 * HZ) 28 29 #define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2)) 30 ··· 29 30 static struct rpc_auth unix_auth; 31 static struct rpc_cred_cache unix_cred_cache; 32 - static struct rpc_credops unix_credops; 33 34 static struct rpc_auth * 35 unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) 36 { 37 dprintk("RPC: creating UNIX authenticator for client %p\n", 38 clnt); 39 - if (atomic_inc_return(&unix_auth.au_count) == 0) 40 - unix_cred_cache.nextgc = jiffies + (unix_cred_cache.expire >> 1); 41 return &unix_auth; 42 } 43 ··· 44 unx_destroy(struct rpc_auth *auth) 45 { 46 dprintk("RPC: destroying UNIX authenticator %p\n", auth); 47 - rpcauth_free_credcache(auth); 48 } 49 50 /* ··· 68 if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) 69 return ERR_PTR(-ENOMEM); 70 71 - atomic_set(&cred->uc_count, 1); 72 - cred->uc_flags = RPCAUTH_CRED_UPTODATE; 73 if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { 74 cred->uc_uid = 0; 75 cred->uc_gid = 0; ··· 79 if (groups > NFS_NGROUPS) 80 groups = NFS_NGROUPS; 81 82 - cred->uc_uid = acred->uid; 83 cred->uc_gid = acred->gid; 84 for (i = 0; i < groups; i++) 85 cred->uc_gids[i] = GROUP_AT(acred->group_info, i); 86 if (i < NFS_NGROUPS) 87 cred->uc_gids[i] = NOGROUP; 88 } 89 - cred->uc_base.cr_ops = &unix_credops; 90 91 - return (struct rpc_cred *) cred; 92 } 93 94 static void 95 unx_destroy_cred(struct rpc_cred *cred) 96 { 97 - kfree(cred); 98 } 99 100 /* ··· 117 static int 118 unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) 119 { 120 - struct unx_cred *cred = (struct unx_cred *) rcred; 121 int i; 122 123 if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) { ··· 148 unx_marshal(struct rpc_task *task, __be32 *p) 149 { 150 struct rpc_clnt *clnt = task->tk_client; 151 - struct unx_cred *cred = (struct unx_cred *) task->tk_msg.rpc_cred; 152 __be32 *base, *hold; 153 int i; 154 ··· 181 static int 182 unx_refresh(struct rpc_task *task) 183 { 184 - task->tk_msg.rpc_cred->cr_flags |= RPCAUTH_CRED_UPTODATE; 185 return 0; 186 } 187 ··· 204 printk("RPC: giant verf size: %u\n", size); 205 return NULL; 206 } 207 - task->tk_auth->au_rslack = (size >> 2) + 2; 208 p += (size >> 2); 209 210 return p; 211 } 212 213 - struct rpc_authops authunix_ops = { 214 .owner = THIS_MODULE, 215 .au_flavor = RPC_AUTH_UNIX, 216 #ifdef RPC_DEBUG ··· 229 230 static 231 struct rpc_cred_cache unix_cred_cache = { 232 - .expire = UNX_CRED_EXPIRE, 233 }; 234 235 static ··· 242 }; 243 244 static 245 - struct rpc_credops unix_credops = { 246 .cr_name = "AUTH_UNIX", 247 .crdestroy = unx_destroy_cred, 248 .crmatch = unx_match,
··· 20 gid_t uc_gids[NFS_NGROUPS]; 21 }; 22 #define uc_uid uc_base.cr_uid 23 24 #define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2)) 25 ··· 34 35 static struct rpc_auth unix_auth; 36 static struct rpc_cred_cache unix_cred_cache; 37 + static const struct rpc_credops unix_credops; 38 39 static struct rpc_auth * 40 unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) 41 { 42 dprintk("RPC: creating UNIX authenticator for client %p\n", 43 clnt); 44 + atomic_inc(&unix_auth.au_count); 45 return &unix_auth; 46 } 47 ··· 50 unx_destroy(struct rpc_auth *auth) 51 { 52 dprintk("RPC: destroying UNIX authenticator %p\n", auth); 53 + rpcauth_clear_credcache(auth->au_credcache); 54 } 55 56 /* ··· 74 if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) 75 return ERR_PTR(-ENOMEM); 76 77 + rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops); 78 + cred->uc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE; 79 if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { 80 cred->uc_uid = 0; 81 cred->uc_gid = 0; ··· 85 if (groups > NFS_NGROUPS) 86 groups = NFS_NGROUPS; 87 88 cred->uc_gid = acred->gid; 89 for (i = 0; i < groups; i++) 90 cred->uc_gids[i] = GROUP_AT(acred->group_info, i); 91 if (i < NFS_NGROUPS) 92 cred->uc_gids[i] = NOGROUP; 93 } 94 95 + return &cred->uc_base; 96 + } 97 + 98 + static void 99 + unx_free_cred(struct unx_cred *unx_cred) 100 + { 101 + dprintk("RPC: unx_free_cred %p\n", unx_cred); 102 + kfree(unx_cred); 103 + } 104 + 105 + static void 106 + unx_free_cred_callback(struct rcu_head *head) 107 + { 108 + struct unx_cred *unx_cred = container_of(head, struct unx_cred, uc_base.cr_rcu); 109 + unx_free_cred(unx_cred); 110 } 111 112 static void 113 unx_destroy_cred(struct rpc_cred *cred) 114 { 115 + call_rcu(&cred->cr_rcu, unx_free_cred_callback); 116 } 117 118 /* ··· 111 static int 112 unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) 113 { 114 + struct unx_cred *cred = container_of(rcred, struct unx_cred, uc_base); 115 int i; 116 117 if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) { ··· 142 unx_marshal(struct rpc_task *task, __be32 *p) 143 { 144 struct rpc_clnt *clnt = task->tk_client; 145 + struct unx_cred *cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base); 146 __be32 *base, *hold; 147 int i; 148 ··· 175 static int 176 unx_refresh(struct rpc_task *task) 177 { 178 + set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags); 179 return 0; 180 } 181 ··· 198 printk("RPC: giant verf size: %u\n", size); 199 return NULL; 200 } 201 + task->tk_msg.rpc_cred->cr_auth->au_rslack = (size >> 2) + 2; 202 p += (size >> 2); 203 204 return p; 205 } 206 207 + void __init rpc_init_authunix(void) 208 + { 209 + spin_lock_init(&unix_cred_cache.lock); 210 + } 211 + 212 + const struct rpc_authops authunix_ops = { 213 .owner = THIS_MODULE, 214 .au_flavor = RPC_AUTH_UNIX, 215 #ifdef RPC_DEBUG ··· 218 219 static 220 struct rpc_cred_cache unix_cred_cache = { 221 }; 222 223 static ··· 232 }; 233 234 static 235 + const struct rpc_credops unix_credops = { 236 .cr_name = "AUTH_UNIX", 237 .crdestroy = unx_destroy_cred, 238 .crmatch = unx_match,
+240 -129
net/sunrpc/clnt.c
··· 44 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ 45 __FUNCTION__, t->tk_status) 46 47 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 48 49 ··· 72 static __be32 * call_header(struct rpc_task *task); 73 static __be32 * call_verify(struct rpc_task *task); 74 75 76 static int 77 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) ··· 132 dprintk("RPC: creating %s client for %s (xprt %p)\n", 133 program->name, servname, xprt); 134 135 err = -EINVAL; 136 if (!xprt) 137 goto out_no_xprt; ··· 145 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 146 if (!clnt) 147 goto out_err; 148 - atomic_set(&clnt->cl_users, 0); 149 - atomic_set(&clnt->cl_count, 1); 150 clnt->cl_parent = clnt; 151 152 clnt->cl_server = clnt->cl_inline_name; ··· 170 if (clnt->cl_metrics == NULL) 171 goto out_no_stats; 172 clnt->cl_program = program; 173 174 if (!xprt_bound(clnt->cl_xprt)) 175 clnt->cl_autobind = 1; 176 177 clnt->cl_rtt = &clnt->cl_rtt_default; 178 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); 179 180 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 181 if (err < 0) ··· 198 if (clnt->cl_nodelen > UNX_MAXNODENAME) 199 clnt->cl_nodelen = UNX_MAXNODENAME; 200 memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen); 201 return clnt; 202 203 out_no_auth: ··· 215 out_err: 216 xprt_put(xprt); 217 out_no_xprt: 218 return ERR_PTR(err); 219 } 220 ··· 234 { 235 struct rpc_xprt *xprt; 236 struct rpc_clnt *clnt; 237 238 - xprt = xprt_create_transport(args->protocol, args->address, 239 - args->addrsize, args->timeout); 240 if (IS_ERR(xprt)) 241 return (struct rpc_clnt *)xprt; 242 243 /* 244 * By default, kernel RPC client connects from a reserved port. ··· 293 clnt->cl_intr = 1; 294 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 295 clnt->cl_autobind = 1; 296 - if (args->flags & RPC_CLNT_CREATE_ONESHOT) 297 - clnt->cl_oneshot = 1; 298 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 299 clnt->cl_discrtry = 1; 300 ··· 314 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); 315 if (!new) 316 goto out_no_clnt; 317 - atomic_set(&new->cl_count, 1); 318 - atomic_set(&new->cl_users, 0); 319 new->cl_metrics = rpc_alloc_iostats(clnt); 320 if (new->cl_metrics == NULL) 321 goto out_no_stats; 322 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 323 if (err != 0) 324 goto out_no_path; 325 - new->cl_parent = clnt; 326 - atomic_inc(&clnt->cl_count); 327 - new->cl_xprt = xprt_get(clnt->cl_xprt); 328 - /* Turn off autobind on clones */ 329 - new->cl_autobind = 0; 330 - new->cl_oneshot = 0; 331 - new->cl_dead = 0; 332 - rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 333 if (new->cl_auth) 334 atomic_inc(&new->cl_auth->au_count); 335 return new; 336 out_no_path: 337 rpc_free_iostats(new->cl_metrics); ··· 345 346 /* 347 * Properly shut down an RPC client, terminating all outstanding 348 - * requests. Note that we must be certain that cl_oneshot and 349 - * cl_dead are cleared, or else the client would be destroyed 350 - * when the last task releases it. 351 */ 352 - int 353 - rpc_shutdown_client(struct rpc_clnt *clnt) 354 { 355 - dprintk("RPC: shutting down %s client for %s, tasks=%d\n", 356 - clnt->cl_protname, clnt->cl_server, 357 - atomic_read(&clnt->cl_users)); 358 359 - while (atomic_read(&clnt->cl_users) > 0) { 360 - /* Don't let rpc_release_client destroy us */ 361 - clnt->cl_oneshot = 0; 362 - clnt->cl_dead = 0; 363 rpc_killall_tasks(clnt); 364 wait_event_timeout(destroy_wait, 365 - !atomic_read(&clnt->cl_users), 1*HZ); 366 } 367 368 - if (atomic_read(&clnt->cl_users) < 0) { 369 - printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", 370 - clnt, atomic_read(&clnt->cl_users)); 371 - #ifdef RPC_DEBUG 372 - rpc_show_tasks(); 373 - #endif 374 - BUG(); 375 - } 376 - 377 - return rpc_destroy_client(clnt); 378 } 379 380 /* 381 - * Delete an RPC client 382 */ 383 - int 384 - rpc_destroy_client(struct rpc_clnt *clnt) 385 { 386 - if (!atomic_dec_and_test(&clnt->cl_count)) 387 - return 1; 388 - BUG_ON(atomic_read(&clnt->cl_users) != 0); 389 390 dprintk("RPC: destroying %s client for %s\n", 391 clnt->cl_protname, clnt->cl_server); 392 - if (clnt->cl_auth) { 393 - rpcauth_destroy(clnt->cl_auth); 394 - clnt->cl_auth = NULL; 395 - } 396 if (!IS_ERR(clnt->cl_dentry)) { 397 rpc_rmdir(clnt->cl_dentry); 398 rpc_put_mount(); 399 } 400 if (clnt->cl_parent != clnt) { 401 - rpc_destroy_client(clnt->cl_parent); 402 goto out_free; 403 } 404 if (clnt->cl_server != clnt->cl_inline_name) 405 kfree(clnt->cl_server); 406 out_free: 407 rpc_free_iostats(clnt->cl_metrics); 408 clnt->cl_metrics = NULL; 409 xprt_put(clnt->cl_xprt); 410 kfree(clnt); 411 - return 0; 412 } 413 414 /* 415 - * Release an RPC client 416 */ 417 void 418 rpc_release_client(struct rpc_clnt *clnt) 419 { 420 - dprintk("RPC: rpc_release_client(%p, %d)\n", 421 - clnt, atomic_read(&clnt->cl_users)); 422 423 - if (!atomic_dec_and_test(&clnt->cl_users)) 424 - return; 425 - wake_up(&destroy_wait); 426 - if (clnt->cl_oneshot || clnt->cl_dead) 427 - rpc_destroy_client(clnt); 428 } 429 430 /** ··· 515 rpc_restore_sigmask(oldset); 516 } 517 518 - /* 519 - * New rpc_call implementation 520 */ 521 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 522 { 523 struct rpc_task *task; 524 - sigset_t oldset; 525 - int status; 526 - 527 - /* If this client is slain all further I/O fails */ 528 - if (clnt->cl_dead) 529 - return -EIO; 530 531 BUG_ON(flags & RPC_TASK_ASYNC); 532 533 - task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); 534 - if (task == NULL) 535 - return -ENOMEM; 536 - 537 - /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ 538 - rpc_task_sigmask(task, &oldset); 539 - 540 - /* Set up the call info struct and execute the task */ 541 - rpc_call_setup(task, msg, 0); 542 - if (task->tk_status == 0) { 543 - atomic_inc(&task->tk_count); 544 - rpc_execute(task); 545 - } 546 status = task->tk_status; 547 rpc_put_task(task); 548 - rpc_restore_sigmask(&oldset); 549 return status; 550 } 551 552 - /* 553 - * New rpc_call implementation 554 */ 555 int 556 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 557 const struct rpc_call_ops *tk_ops, void *data) 558 { 559 struct rpc_task *task; 560 - sigset_t oldset; 561 - int status; 562 563 - /* If this client is slain all further I/O fails */ 564 - status = -EIO; 565 - if (clnt->cl_dead) 566 - goto out_release; 567 - 568 - flags |= RPC_TASK_ASYNC; 569 - 570 - /* Create/initialize a new RPC task */ 571 - status = -ENOMEM; 572 - if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) 573 - goto out_release; 574 - 575 - /* Mask signals on GSS_AUTH upcalls */ 576 - rpc_task_sigmask(task, &oldset); 577 - 578 - rpc_call_setup(task, msg, 0); 579 - 580 - /* Set up the call info struct and execute the task */ 581 - status = task->tk_status; 582 - if (status == 0) 583 - rpc_execute(task); 584 - else 585 - rpc_put_task(task); 586 - 587 - rpc_restore_sigmask(&oldset); 588 - return status; 589 - out_release: 590 - rpc_release_calldata(tk_ops, data); 591 - return status; 592 } 593 594 595 void 596 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) ··· 806 static void 807 call_allocate(struct rpc_task *task) 808 { 809 - unsigned int slack = task->tk_auth->au_cslack; 810 struct rpc_rqst *req = task->tk_rqstp; 811 struct rpc_xprt *xprt = task->tk_xprt; 812 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; ··· 904 if (encode == NULL) 905 return; 906 907 - lock_kernel(); 908 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 909 task->tk_msg.rpc_argp); 910 - unlock_kernel(); 911 if (task->tk_status == -ENOMEM) { 912 /* XXX: Is this sane? */ 913 rpc_delay(task, 3*HZ); ··· 1236 task->tk_action = rpc_exit_task; 1237 1238 if (decode) { 1239 - lock_kernel(); 1240 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1241 task->tk_msg.rpc_resp); 1242 - unlock_kernel(); 1243 } 1244 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, 1245 task->tk_status); ··· 1330 * - if it isn't pointer subtraction in the NFS client may give 1331 * undefined results 1332 */ 1333 - printk(KERN_WARNING 1334 - "call_verify: XDR representation not a multiple of" 1335 - " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len); 1336 goto out_eio; 1337 } 1338 if ((len -= 3) < 0) ··· 1340 p += 1; /* skip XID */ 1341 1342 if ((n = ntohl(*p++)) != RPC_REPLY) { 1343 - printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); 1344 goto out_garbage; 1345 } 1346 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { ··· 1392 "authentication.\n", task->tk_client->cl_server); 1393 break; 1394 default: 1395 - printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); 1396 error = -EIO; 1397 } 1398 dprintk("RPC: %5u %s: call rejected %d\n", ··· 1401 goto out_err; 1402 } 1403 if (!(p = rpcauth_checkverf(task, p))) { 1404 - printk(KERN_WARNING "call_verify: auth check failed\n"); 1405 goto out_garbage; /* bad verifier, retry */ 1406 } 1407 len = p - (__be32 *)iov->iov_base - 1; ··· 1441 task->tk_pid, __FUNCTION__); 1442 break; /* retry */ 1443 default: 1444 - printk(KERN_WARNING "call_verify: server accept status: %x\n", n); 1445 /* Also retry */ 1446 } 1447 ··· 1456 out_retry: 1457 return ERR_PTR(-EAGAIN); 1458 } 1459 - printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); 1460 out_eio: 1461 error = -EIO; 1462 out_err: 1463 rpc_exit(task, error); 1464 return ERR_PTR(error); 1465 out_overflow: 1466 - printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); 1467 goto out_garbage; 1468 } 1469 ··· 1484 .p_decode = rpcproc_decode_null, 1485 }; 1486 1487 - int rpc_ping(struct rpc_clnt *clnt, int flags) 1488 { 1489 struct rpc_message msg = { 1490 .rpc_proc = &rpcproc_null, ··· 1495 put_rpccred(msg.rpc_cred); 1496 return err; 1497 }
··· 44 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ 45 __FUNCTION__, t->tk_status) 46 47 + /* 48 + * All RPC clients are linked into this list 49 + */ 50 + static LIST_HEAD(all_clients); 51 + static DEFINE_SPINLOCK(rpc_client_lock); 52 + 53 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 54 55 ··· 66 static __be32 * call_header(struct rpc_task *task); 67 static __be32 * call_verify(struct rpc_task *task); 68 69 + static int rpc_ping(struct rpc_clnt *clnt, int flags); 70 + 71 + static void rpc_register_client(struct rpc_clnt *clnt) 72 + { 73 + spin_lock(&rpc_client_lock); 74 + list_add(&clnt->cl_clients, &all_clients); 75 + spin_unlock(&rpc_client_lock); 76 + } 77 + 78 + static void rpc_unregister_client(struct rpc_clnt *clnt) 79 + { 80 + spin_lock(&rpc_client_lock); 81 + list_del(&clnt->cl_clients); 82 + spin_unlock(&rpc_client_lock); 83 + } 84 85 static int 86 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) ··· 111 dprintk("RPC: creating %s client for %s (xprt %p)\n", 112 program->name, servname, xprt); 113 114 + err = rpciod_up(); 115 + if (err) 116 + goto out_no_rpciod; 117 err = -EINVAL; 118 if (!xprt) 119 goto out_no_xprt; ··· 121 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 122 if (!clnt) 123 goto out_err; 124 clnt->cl_parent = clnt; 125 126 clnt->cl_server = clnt->cl_inline_name; ··· 148 if (clnt->cl_metrics == NULL) 149 goto out_no_stats; 150 clnt->cl_program = program; 151 + INIT_LIST_HEAD(&clnt->cl_tasks); 152 + spin_lock_init(&clnt->cl_lock); 153 154 if (!xprt_bound(clnt->cl_xprt)) 155 clnt->cl_autobind = 1; 156 157 clnt->cl_rtt = &clnt->cl_rtt_default; 158 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); 159 + 160 + kref_init(&clnt->cl_kref); 161 162 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 163 if (err < 0) ··· 172 if (clnt->cl_nodelen > UNX_MAXNODENAME) 173 clnt->cl_nodelen = UNX_MAXNODENAME; 174 memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen); 175 + rpc_register_client(clnt); 176 return clnt; 177 178 out_no_auth: ··· 188 out_err: 189 xprt_put(xprt); 190 out_no_xprt: 191 + rpciod_down(); 192 + out_no_rpciod: 193 return ERR_PTR(err); 194 } 195 ··· 205 { 206 struct rpc_xprt *xprt; 207 struct rpc_clnt *clnt; 208 + struct rpc_xprtsock_create xprtargs = { 209 + .proto = args->protocol, 210 + .srcaddr = args->saddress, 211 + .dstaddr = args->address, 212 + .addrlen = args->addrsize, 213 + .timeout = args->timeout 214 + }; 215 + char servername[20]; 216 217 + xprt = xprt_create_transport(&xprtargs); 218 if (IS_ERR(xprt)) 219 return (struct rpc_clnt *)xprt; 220 + 221 + /* 222 + * If the caller chooses not to specify a hostname, whip 223 + * up a string representation of the passed-in address. 224 + */ 225 + if (args->servername == NULL) { 226 + struct sockaddr_in *addr = 227 + (struct sockaddr_in *) &args->address; 228 + snprintf(servername, sizeof(servername), NIPQUAD_FMT, 229 + NIPQUAD(addr->sin_addr.s_addr)); 230 + args->servername = servername; 231 + } 232 233 /* 234 * By default, kernel RPC client connects from a reserved port. ··· 245 clnt->cl_intr = 1; 246 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 247 clnt->cl_autobind = 1; 248 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 249 clnt->cl_discrtry = 1; 250 ··· 268 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); 269 if (!new) 270 goto out_no_clnt; 271 + new->cl_parent = clnt; 272 + /* Turn off autobind on clones */ 273 + new->cl_autobind = 0; 274 + INIT_LIST_HEAD(&new->cl_tasks); 275 + spin_lock_init(&new->cl_lock); 276 + rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 277 new->cl_metrics = rpc_alloc_iostats(clnt); 278 if (new->cl_metrics == NULL) 279 goto out_no_stats; 280 + kref_init(&new->cl_kref); 281 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 282 if (err != 0) 283 goto out_no_path; 284 if (new->cl_auth) 285 atomic_inc(&new->cl_auth->au_count); 286 + xprt_get(clnt->cl_xprt); 287 + kref_get(&clnt->cl_kref); 288 + rpc_register_client(new); 289 + rpciod_up(); 290 return new; 291 out_no_path: 292 rpc_free_iostats(new->cl_metrics); ··· 298 299 /* 300 * Properly shut down an RPC client, terminating all outstanding 301 + * requests. 302 */ 303 + void rpc_shutdown_client(struct rpc_clnt *clnt) 304 { 305 + dprintk("RPC: shutting down %s client for %s\n", 306 + clnt->cl_protname, clnt->cl_server); 307 308 + while (!list_empty(&clnt->cl_tasks)) { 309 rpc_killall_tasks(clnt); 310 wait_event_timeout(destroy_wait, 311 + list_empty(&clnt->cl_tasks), 1*HZ); 312 } 313 314 + rpc_release_client(clnt); 315 } 316 317 /* 318 + * Free an RPC client 319 */ 320 + static void 321 + rpc_free_client(struct kref *kref) 322 { 323 + struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); 324 325 dprintk("RPC: destroying %s client for %s\n", 326 clnt->cl_protname, clnt->cl_server); 327 if (!IS_ERR(clnt->cl_dentry)) { 328 rpc_rmdir(clnt->cl_dentry); 329 rpc_put_mount(); 330 } 331 if (clnt->cl_parent != clnt) { 332 + rpc_release_client(clnt->cl_parent); 333 goto out_free; 334 } 335 if (clnt->cl_server != clnt->cl_inline_name) 336 kfree(clnt->cl_server); 337 out_free: 338 + rpc_unregister_client(clnt); 339 rpc_free_iostats(clnt->cl_metrics); 340 clnt->cl_metrics = NULL; 341 xprt_put(clnt->cl_xprt); 342 + rpciod_down(); 343 kfree(clnt); 344 } 345 346 /* 347 + * Free an RPC client 348 + */ 349 + static void 350 + rpc_free_auth(struct kref *kref) 351 + { 352 + struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); 353 + 354 + if (clnt->cl_auth == NULL) { 355 + rpc_free_client(kref); 356 + return; 357 + } 358 + 359 + /* 360 + * Note: RPCSEC_GSS may need to send NULL RPC calls in order to 361 + * release remaining GSS contexts. This mechanism ensures 362 + * that it can do so safely. 363 + */ 364 + kref_init(kref); 365 + rpcauth_release(clnt->cl_auth); 366 + clnt->cl_auth = NULL; 367 + kref_put(kref, rpc_free_client); 368 + } 369 + 370 + /* 371 + * Release reference to the RPC client 372 */ 373 void 374 rpc_release_client(struct rpc_clnt *clnt) 375 { 376 + dprintk("RPC: rpc_release_client(%p)\n", clnt); 377 378 + if (list_empty(&clnt->cl_tasks)) 379 + wake_up(&destroy_wait); 380 + kref_put(&clnt->cl_kref, rpc_free_auth); 381 } 382 383 /** ··· 468 rpc_restore_sigmask(oldset); 469 } 470 471 + static 472 + struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt, 473 + struct rpc_message *msg, 474 + int flags, 475 + const struct rpc_call_ops *ops, 476 + void *data) 477 + { 478 + struct rpc_task *task, *ret; 479 + sigset_t oldset; 480 + 481 + task = rpc_new_task(clnt, flags, ops, data); 482 + if (task == NULL) { 483 + rpc_release_calldata(ops, data); 484 + return ERR_PTR(-ENOMEM); 485 + } 486 + 487 + /* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */ 488 + rpc_task_sigmask(task, &oldset); 489 + if (msg != NULL) { 490 + rpc_call_setup(task, msg, 0); 491 + if (task->tk_status != 0) { 492 + ret = ERR_PTR(task->tk_status); 493 + rpc_put_task(task); 494 + goto out; 495 + } 496 + } 497 + atomic_inc(&task->tk_count); 498 + rpc_execute(task); 499 + ret = task; 500 + out: 501 + rpc_restore_sigmask(&oldset); 502 + return ret; 503 + } 504 + 505 + /** 506 + * rpc_call_sync - Perform a synchronous RPC call 507 + * @clnt: pointer to RPC client 508 + * @msg: RPC call parameters 509 + * @flags: RPC call flags 510 */ 511 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 512 { 513 struct rpc_task *task; 514 + int status; 515 516 BUG_ON(flags & RPC_TASK_ASYNC); 517 518 + task = rpc_do_run_task(clnt, msg, flags, &rpc_default_ops, NULL); 519 + if (IS_ERR(task)) 520 + return PTR_ERR(task); 521 status = task->tk_status; 522 rpc_put_task(task); 523 return status; 524 } 525 526 + /** 527 + * rpc_call_async - Perform an asynchronous RPC call 528 + * @clnt: pointer to RPC client 529 + * @msg: RPC call parameters 530 + * @flags: RPC call flags 531 + * @ops: RPC call ops 532 + * @data: user call data 533 */ 534 int 535 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 536 const struct rpc_call_ops *tk_ops, void *data) 537 { 538 struct rpc_task *task; 539 540 + task = rpc_do_run_task(clnt, msg, flags|RPC_TASK_ASYNC, tk_ops, data); 541 + if (IS_ERR(task)) 542 + return PTR_ERR(task); 543 + rpc_put_task(task); 544 + return 0; 545 } 546 547 + /** 548 + * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 549 + * @clnt: pointer to RPC client 550 + * @flags: RPC flags 551 + * @ops: RPC call ops 552 + * @data: user call data 553 + */ 554 + struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, 555 + const struct rpc_call_ops *tk_ops, 556 + void *data) 557 + { 558 + return rpc_do_run_task(clnt, NULL, flags, tk_ops, data); 559 + } 560 + EXPORT_SYMBOL(rpc_run_task); 561 562 void 563 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) ··· 745 static void 746 call_allocate(struct rpc_task *task) 747 { 748 + unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack; 749 struct rpc_rqst *req = task->tk_rqstp; 750 struct rpc_xprt *xprt = task->tk_xprt; 751 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; ··· 843 if (encode == NULL) 844 return; 845 846 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 847 task->tk_msg.rpc_argp); 848 if (task->tk_status == -ENOMEM) { 849 /* XXX: Is this sane? */ 850 rpc_delay(task, 3*HZ); ··· 1177 task->tk_action = rpc_exit_task; 1178 1179 if (decode) { 1180 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1181 task->tk_msg.rpc_resp); 1182 } 1183 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, 1184 task->tk_status); ··· 1273 * - if it isn't pointer subtraction in the NFS client may give 1274 * undefined results 1275 */ 1276 + dprintk("RPC: %5u %s: XDR representation not a multiple of" 1277 + " 4 bytes: 0x%x\n", task->tk_pid, __FUNCTION__, 1278 + task->tk_rqstp->rq_rcv_buf.len); 1279 goto out_eio; 1280 } 1281 if ((len -= 3) < 0) ··· 1283 p += 1; /* skip XID */ 1284 1285 if ((n = ntohl(*p++)) != RPC_REPLY) { 1286 + dprintk("RPC: %5u %s: not an RPC reply: %x\n", 1287 + task->tk_pid, __FUNCTION__, n); 1288 goto out_garbage; 1289 } 1290 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { ··· 1334 "authentication.\n", task->tk_client->cl_server); 1335 break; 1336 default: 1337 + dprintk("RPC: %5u %s: unknown auth error: %x\n", 1338 + task->tk_pid, __FUNCTION__, n); 1339 error = -EIO; 1340 } 1341 dprintk("RPC: %5u %s: call rejected %d\n", ··· 1342 goto out_err; 1343 } 1344 if (!(p = rpcauth_checkverf(task, p))) { 1345 + dprintk("RPC: %5u %s: auth check failed\n", 1346 + task->tk_pid, __FUNCTION__); 1347 goto out_garbage; /* bad verifier, retry */ 1348 } 1349 len = p - (__be32 *)iov->iov_base - 1; ··· 1381 task->tk_pid, __FUNCTION__); 1382 break; /* retry */ 1383 default: 1384 + dprintk("RPC: %5u %s: server accept status: %x\n", 1385 + task->tk_pid, __FUNCTION__, n); 1386 /* Also retry */ 1387 } 1388 ··· 1395 out_retry: 1396 return ERR_PTR(-EAGAIN); 1397 } 1398 out_eio: 1399 error = -EIO; 1400 out_err: 1401 rpc_exit(task, error); 1402 + dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, 1403 + __FUNCTION__, error); 1404 return ERR_PTR(error); 1405 out_overflow: 1406 + dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, 1407 + __FUNCTION__); 1408 goto out_garbage; 1409 } 1410 ··· 1421 .p_decode = rpcproc_decode_null, 1422 }; 1423 1424 + static int rpc_ping(struct rpc_clnt *clnt, int flags) 1425 { 1426 struct rpc_message msg = { 1427 .rpc_proc = &rpcproc_null, ··· 1432 put_rpccred(msg.rpc_cred); 1433 return err; 1434 } 1435 + 1436 + struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) 1437 + { 1438 + struct rpc_message msg = { 1439 + .rpc_proc = &rpcproc_null, 1440 + .rpc_cred = cred, 1441 + }; 1442 + return rpc_do_run_task(clnt, &msg, flags, &rpc_default_ops, NULL); 1443 + } 1444 + EXPORT_SYMBOL(rpc_call_null); 1445 + 1446 + #ifdef RPC_DEBUG 1447 + void rpc_show_tasks(void) 1448 + { 1449 + struct rpc_clnt *clnt; 1450 + struct rpc_task *t; 1451 + 1452 + spin_lock(&rpc_client_lock); 1453 + if (list_empty(&all_clients)) 1454 + goto out; 1455 + printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " 1456 + "-rpcwait -action- ---ops--\n"); 1457 + list_for_each_entry(clnt, &all_clients, cl_clients) { 1458 + if (list_empty(&clnt->cl_tasks)) 1459 + continue; 1460 + spin_lock(&clnt->cl_lock); 1461 + list_for_each_entry(t, &clnt->cl_tasks, tk_task) { 1462 + const char *rpc_waitq = "none"; 1463 + 1464 + if (RPC_IS_QUEUED(t)) 1465 + rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); 1466 + 1467 + printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", 1468 + t->tk_pid, 1469 + (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), 1470 + t->tk_flags, t->tk_status, 1471 + t->tk_client, 1472 + (t->tk_client ? t->tk_client->cl_prog : 0), 1473 + t->tk_rqstp, t->tk_timeout, 1474 + rpc_waitq, 1475 + t->tk_action, t->tk_ops); 1476 + } 1477 + spin_unlock(&clnt->cl_lock); 1478 + } 1479 + out: 1480 + spin_unlock(&rpc_client_lock); 1481 + } 1482 + #endif
+50 -30
net/sunrpc/rpc_pipe.c
··· 344 mutex_lock(&inode->i_mutex); 345 clnt = RPC_I(inode)->private; 346 if (clnt) { 347 - atomic_inc(&clnt->cl_users); 348 m->private = clnt; 349 } else { 350 single_release(inode, file); ··· 448 simple_release_fs(&rpc_mount, &rpc_mount_count); 449 } 450 451 static int 452 rpc_lookup_parent(char *path, struct nameidata *nd) 453 { ··· 515 * FIXME: This probably has races. 516 */ 517 static void 518 - rpc_depopulate(struct dentry *parent) 519 { 520 struct inode *dir = parent->d_inode; 521 struct list_head *pos, *next; ··· 527 spin_lock(&dcache_lock); 528 list_for_each_safe(pos, next, &parent->d_subdirs) { 529 dentry = list_entry(pos, struct dentry, d_u.d_child); 530 spin_lock(&dentry->d_lock); 531 if (!d_unhashed(dentry)) { 532 dget_locked(dentry); ··· 546 if (n) { 547 do { 548 dentry = dvec[--n]; 549 - if (dentry->d_inode) { 550 - rpc_close_pipes(dentry->d_inode); 551 simple_unlink(dir, dentry); 552 - } 553 - inode_dir_notify(dir, DN_DELETE); 554 dput(dentry); 555 } while (n); 556 goto repeat; ··· 573 dentry = d_alloc_name(parent, files[i].name); 574 if (!dentry) 575 goto out_bad; 576 mode = files[i].mode; 577 inode = rpc_get_inode(dir->i_sb, mode); 578 if (!inode) { ··· 621 __rpc_rmdir(struct inode *dir, struct dentry *dentry) 622 { 623 int error; 624 - 625 - shrink_dcache_parent(dentry); 626 - if (d_unhashed(dentry)) 627 - return 0; 628 - if ((error = simple_rmdir(dir, dentry)) != 0) 629 - return error; 630 - if (!error) { 631 - inode_dir_notify(dir, DN_DELETE); 632 - d_drop(dentry); 633 - } 634 - return 0; 635 } 636 637 static struct dentry * 638 - rpc_lookup_create(struct dentry *parent, const char *name, int len) 639 { 640 struct inode *dir = parent->d_inode; 641 struct dentry *dentry; ··· 637 dentry = lookup_one_len(name, parent, len); 638 if (IS_ERR(dentry)) 639 goto out_err; 640 - if (dentry->d_inode) { 641 dput(dentry); 642 dentry = ERR_PTR(-EEXIST); 643 goto out_err; ··· 658 659 if ((error = rpc_lookup_parent(path, nd)) != 0) 660 return ERR_PTR(error); 661 - dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len); 662 if (IS_ERR(dentry)) 663 rpc_release_path(nd); 664 return dentry; ··· 690 rpc_release_path(&nd); 691 return dentry; 692 err_depopulate: 693 - rpc_depopulate(dentry); 694 __rpc_rmdir(dir, dentry); 695 err_dput: 696 dput(dentry); ··· 710 parent = dget_parent(dentry); 711 dir = parent->d_inode; 712 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 713 - rpc_depopulate(dentry); 714 error = __rpc_rmdir(dir, dentry); 715 dput(dentry); 716 mutex_unlock(&dir->i_mutex); ··· 725 struct inode *dir, *inode; 726 struct rpc_inode *rpci; 727 728 - dentry = rpc_lookup_create(parent, name, strlen(name)); 729 if (IS_ERR(dentry)) 730 return dentry; 731 dir = parent->d_inode; 732 inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); 733 if (!inode) 734 goto err_dput; ··· 750 rpci->private = private; 751 rpci->flags = flags; 752 rpci->ops = ops; 753 inode_dir_notify(dir, DN_CREATE); 754 dget(dentry); 755 out: ··· 775 parent = dget_parent(dentry); 776 dir = parent->d_inode; 777 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 778 - if (!d_unhashed(dentry)) { 779 - d_drop(dentry); 780 - if (dentry->d_inode) { 781 - rpc_close_pipes(dentry->d_inode); 782 - error = simple_unlink(dir, dentry); 783 - } 784 - inode_dir_notify(dir, DN_DELETE); 785 } 786 dput(dentry); 787 mutex_unlock(&dir->i_mutex); ··· 852 rpci->nreaders = 0; 853 rpci->nwriters = 0; 854 INIT_LIST_HEAD(&rpci->in_upcall); 855 INIT_LIST_HEAD(&rpci->pipe); 856 rpci->pipelen = 0; 857 init_waitqueue_head(&rpci->waitq);
··· 344 mutex_lock(&inode->i_mutex); 345 clnt = RPC_I(inode)->private; 346 if (clnt) { 347 + kref_get(&clnt->cl_kref); 348 m->private = clnt; 349 } else { 350 single_release(inode, file); ··· 448 simple_release_fs(&rpc_mount, &rpc_mount_count); 449 } 450 451 + static int rpc_delete_dentry(struct dentry *dentry) 452 + { 453 + return 1; 454 + } 455 + 456 + static struct dentry_operations rpc_dentry_operations = { 457 + .d_delete = rpc_delete_dentry, 458 + }; 459 + 460 static int 461 rpc_lookup_parent(char *path, struct nameidata *nd) 462 { ··· 506 * FIXME: This probably has races. 507 */ 508 static void 509 + rpc_depopulate(struct dentry *parent, int start, int eof) 510 { 511 struct inode *dir = parent->d_inode; 512 struct list_head *pos, *next; ··· 518 spin_lock(&dcache_lock); 519 list_for_each_safe(pos, next, &parent->d_subdirs) { 520 dentry = list_entry(pos, struct dentry, d_u.d_child); 521 + if (!dentry->d_inode || 522 + dentry->d_inode->i_ino < start || 523 + dentry->d_inode->i_ino >= eof) 524 + continue; 525 spin_lock(&dentry->d_lock); 526 if (!d_unhashed(dentry)) { 527 dget_locked(dentry); ··· 533 if (n) { 534 do { 535 dentry = dvec[--n]; 536 + if (S_ISREG(dentry->d_inode->i_mode)) 537 simple_unlink(dir, dentry); 538 + else if (S_ISDIR(dentry->d_inode->i_mode)) 539 + simple_rmdir(dir, dentry); 540 + d_delete(dentry); 541 dput(dentry); 542 } while (n); 543 goto repeat; ··· 560 dentry = d_alloc_name(parent, files[i].name); 561 if (!dentry) 562 goto out_bad; 563 + dentry->d_op = &rpc_dentry_operations; 564 mode = files[i].mode; 565 inode = rpc_get_inode(dir->i_sb, mode); 566 if (!inode) { ··· 607 __rpc_rmdir(struct inode *dir, struct dentry *dentry) 608 { 609 int error; 610 + error = simple_rmdir(dir, dentry); 611 + if (!error) 612 + d_delete(dentry); 613 + return error; 614 } 615 616 static struct dentry * 617 + rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive) 618 { 619 struct inode *dir = parent->d_inode; 620 struct dentry *dentry; ··· 630 dentry = lookup_one_len(name, parent, len); 631 if (IS_ERR(dentry)) 632 goto out_err; 633 + if (!dentry->d_inode) 634 + dentry->d_op = &rpc_dentry_operations; 635 + else if (exclusive) { 636 dput(dentry); 637 dentry = ERR_PTR(-EEXIST); 638 goto out_err; ··· 649 650 if ((error = rpc_lookup_parent(path, nd)) != 0) 651 return ERR_PTR(error); 652 + dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1); 653 if (IS_ERR(dentry)) 654 rpc_release_path(nd); 655 return dentry; ··· 681 rpc_release_path(&nd); 682 return dentry; 683 err_depopulate: 684 + rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); 685 __rpc_rmdir(dir, dentry); 686 err_dput: 687 dput(dentry); ··· 701 parent = dget_parent(dentry); 702 dir = parent->d_inode; 703 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 704 + rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); 705 error = __rpc_rmdir(dir, dentry); 706 dput(dentry); 707 mutex_unlock(&dir->i_mutex); ··· 716 struct inode *dir, *inode; 717 struct rpc_inode *rpci; 718 719 + dentry = rpc_lookup_create(parent, name, strlen(name), 0); 720 if (IS_ERR(dentry)) 721 return dentry; 722 dir = parent->d_inode; 723 + if (dentry->d_inode) { 724 + rpci = RPC_I(dentry->d_inode); 725 + if (rpci->private != private || 726 + rpci->ops != ops || 727 + rpci->flags != flags) { 728 + dput (dentry); 729 + dentry = ERR_PTR(-EBUSY); 730 + } 731 + rpci->nkern_readwriters++; 732 + goto out; 733 + } 734 inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); 735 if (!inode) 736 goto err_dput; ··· 730 rpci->private = private; 731 rpci->flags = flags; 732 rpci->ops = ops; 733 + rpci->nkern_readwriters = 1; 734 inode_dir_notify(dir, DN_CREATE); 735 dget(dentry); 736 out: ··· 754 parent = dget_parent(dentry); 755 dir = parent->d_inode; 756 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 757 + if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) { 758 + rpc_close_pipes(dentry->d_inode); 759 + error = simple_unlink(dir, dentry); 760 + if (!error) 761 + d_delete(dentry); 762 } 763 dput(dentry); 764 mutex_unlock(&dir->i_mutex); ··· 833 rpci->nreaders = 0; 834 rpci->nwriters = 0; 835 INIT_LIST_HEAD(&rpci->in_upcall); 836 + INIT_LIST_HEAD(&rpci->in_downcall); 837 INIT_LIST_HEAD(&rpci->pipe); 838 rpci->pipelen = 0; 839 init_waitqueue_head(&rpci->waitq);
+35 -30
net/sunrpc/rpcb_clnt.c
··· 12 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 13 */ 14 15 #include <linux/types.h> 16 #include <linux/socket.h> 17 #include <linux/kernel.h> ··· 186 .program = &rpcb_program, 187 .version = version, 188 .authflavor = RPC_AUTH_UNIX, 189 - .flags = (RPC_CLNT_CREATE_ONESHOT | 190 - RPC_CLNT_CREATE_NOPING), 191 }; 192 193 ((struct sockaddr_in *)srvaddr)->sin_port = htons(RPCBIND_PORT); ··· 240 241 error = rpc_call_sync(rpcb_clnt, &msg, 0); 242 243 if (error < 0) 244 printk(KERN_WARNING "RPC: failed to contact local rpcbind " 245 "server (errno %d).\n", -error); ··· 249 return error; 250 } 251 252 - #ifdef CONFIG_ROOT_NFS 253 /** 254 - * rpcb_getport_external - obtain the port for an RPC service on a given host 255 * @sin: address of remote peer 256 * @prog: RPC program number to bind 257 * @vers: RPC version number to bind 258 * @prot: transport protocol to use to make this request 259 * 260 * Called from outside the RPC client in a synchronous task context. 261 * 262 - * For now, this supports only version 2 queries, but is used only by 263 - * mount_clnt for NFS_ROOT. 264 */ 265 - int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, 266 - __u32 vers, int prot) 267 { 268 struct rpcbind_args map = { 269 .r_prog = prog, ··· 279 char hostname[40]; 280 int status; 281 282 - dprintk("RPC: rpcb_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", 283 - NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); 284 285 - sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); 286 rpcb_clnt = rpcb_create(hostname, (struct sockaddr *)sin, prot, 2, 0); 287 if (IS_ERR(rpcb_clnt)) 288 return PTR_ERR(rpcb_clnt); 289 290 status = rpc_call_sync(rpcb_clnt, &msg, 0); 291 292 if (status >= 0) { 293 if (map.r_port != 0) ··· 297 } 298 return status; 299 } 300 - #endif 301 302 /** 303 - * rpcb_getport - obtain the port for a given RPC service on a given host 304 * @task: task that is waiting for portmapper request 305 * 306 * This one can be called for an ongoing RPC request, and can be used in 307 * an async (rpciod) context. 308 */ 309 - void rpcb_getport(struct rpc_task *task) 310 { 311 struct rpc_clnt *clnt = task->tk_client; 312 int bind_version; ··· 317 struct sockaddr addr; 318 int status; 319 320 - dprintk("RPC: %5u rpcb_getport(%s, %u, %u, %d)\n", 321 - task->tk_pid, clnt->cl_server, 322 - clnt->cl_prog, clnt->cl_vers, xprt->prot); 323 324 /* Autobind on cloned rpc clients is discouraged */ 325 BUG_ON(clnt->cl_parent != clnt); 326 327 if (xprt_test_and_set_binding(xprt)) { 328 status = -EACCES; /* tell caller to check again */ 329 - dprintk("RPC: %5u rpcb_getport waiting for another binder\n", 330 - task->tk_pid); 331 goto bailout_nowake; 332 } 333 ··· 338 /* Someone else may have bound if we slept */ 339 if (xprt_bound(xprt)) { 340 status = 0; 341 - dprintk("RPC: %5u rpcb_getport already bound\n", task->tk_pid); 342 goto bailout_nofree; 343 } 344 345 if (rpcb_next_version[xprt->bind_index].rpc_proc == NULL) { 346 xprt->bind_index = 0; 347 status = -EACCES; /* tell caller to try again later */ 348 - dprintk("RPC: %5u rpcb_getport no more getport versions " 349 - "available\n", task->tk_pid); 350 goto bailout_nofree; 351 } 352 bind_version = rpcb_next_version[xprt->bind_index].rpc_vers; 353 354 - dprintk("RPC: %5u rpcb_getport trying rpcbind version %u\n", 355 - task->tk_pid, bind_version); 356 357 map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC); 358 if (!map) { 359 status = -ENOMEM; 360 - dprintk("RPC: %5u rpcb_getport no memory available\n", 361 - task->tk_pid); 362 goto bailout_nofree; 363 } 364 map->r_prog = clnt->cl_prog; ··· 377 rpcb_clnt = rpcb_create(clnt->cl_server, &addr, xprt->prot, bind_version, 0); 378 if (IS_ERR(rpcb_clnt)) { 379 status = PTR_ERR(rpcb_clnt); 380 - dprintk("RPC: %5u rpcb_getport rpcb_create failed, error %ld\n", 381 - task->tk_pid, PTR_ERR(rpcb_clnt)); 382 goto bailout; 383 } 384 385 child = rpc_run_task(rpcb_clnt, RPC_TASK_ASYNC, &rpcb_getport_ops, map); 386 if (IS_ERR(child)) { 387 status = -EIO; 388 - dprintk("RPC: %5u rpcb_getport rpc_run_task failed\n", 389 - task->tk_pid); 390 goto bailout_nofree; 391 } 392 rpc_put_task(child);
··· 12 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 13 */ 14 15 + #include <linux/module.h> 16 + 17 #include <linux/types.h> 18 #include <linux/socket.h> 19 #include <linux/kernel.h> ··· 184 .program = &rpcb_program, 185 .version = version, 186 .authflavor = RPC_AUTH_UNIX, 187 + .flags = (RPC_CLNT_CREATE_NOPING | 188 + RPC_CLNT_CREATE_INTR), 189 }; 190 191 ((struct sockaddr_in *)srvaddr)->sin_port = htons(RPCBIND_PORT); ··· 238 239 error = rpc_call_sync(rpcb_clnt, &msg, 0); 240 241 + rpc_shutdown_client(rpcb_clnt); 242 if (error < 0) 243 printk(KERN_WARNING "RPC: failed to contact local rpcbind " 244 "server (errno %d).\n", -error); ··· 246 return error; 247 } 248 249 /** 250 + * rpcb_getport_sync - obtain the port for an RPC service on a given host 251 * @sin: address of remote peer 252 * @prog: RPC program number to bind 253 * @vers: RPC version number to bind 254 * @prot: transport protocol to use to make this request 255 * 256 * Called from outside the RPC client in a synchronous task context. 257 + * Uses default timeout parameters specified by underlying transport. 258 * 259 + * XXX: Needs to support IPv6, and rpcbind versions 3 and 4 260 */ 261 + int rpcb_getport_sync(struct sockaddr_in *sin, __u32 prog, 262 + __u32 vers, int prot) 263 { 264 struct rpcbind_args map = { 265 .r_prog = prog, ··· 277 char hostname[40]; 278 int status; 279 280 + dprintk("RPC: %s(" NIPQUAD_FMT ", %u, %u, %d)\n", 281 + __FUNCTION__, NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); 282 283 + sprintf(hostname, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr)); 284 rpcb_clnt = rpcb_create(hostname, (struct sockaddr *)sin, prot, 2, 0); 285 if (IS_ERR(rpcb_clnt)) 286 return PTR_ERR(rpcb_clnt); 287 288 status = rpc_call_sync(rpcb_clnt, &msg, 0); 289 + rpc_shutdown_client(rpcb_clnt); 290 291 if (status >= 0) { 292 if (map.r_port != 0) ··· 294 } 295 return status; 296 } 297 + EXPORT_SYMBOL_GPL(rpcb_getport_sync); 298 299 /** 300 + * rpcb_getport_async - obtain the port for a given RPC service on a given host 301 * @task: task that is waiting for portmapper request 302 * 303 * This one can be called for an ongoing RPC request, and can be used in 304 * an async (rpciod) context. 305 */ 306 + void rpcb_getport_async(struct rpc_task *task) 307 { 308 struct rpc_clnt *clnt = task->tk_client; 309 int bind_version; ··· 314 struct sockaddr addr; 315 int status; 316 317 + dprintk("RPC: %5u %s(%s, %u, %u, %d)\n", 318 + task->tk_pid, __FUNCTION__, 319 + clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot); 320 321 /* Autobind on cloned rpc clients is discouraged */ 322 BUG_ON(clnt->cl_parent != clnt); 323 324 if (xprt_test_and_set_binding(xprt)) { 325 status = -EACCES; /* tell caller to check again */ 326 + dprintk("RPC: %5u %s: waiting for another binder\n", 327 + task->tk_pid, __FUNCTION__); 328 goto bailout_nowake; 329 } 330 ··· 335 /* Someone else may have bound if we slept */ 336 if (xprt_bound(xprt)) { 337 status = 0; 338 + dprintk("RPC: %5u %s: already bound\n", 339 + task->tk_pid, __FUNCTION__); 340 goto bailout_nofree; 341 } 342 343 if (rpcb_next_version[xprt->bind_index].rpc_proc == NULL) { 344 xprt->bind_index = 0; 345 status = -EACCES; /* tell caller to try again later */ 346 + dprintk("RPC: %5u %s: no more getport versions available\n", 347 + task->tk_pid, __FUNCTION__); 348 goto bailout_nofree; 349 } 350 bind_version = rpcb_next_version[xprt->bind_index].rpc_vers; 351 352 + dprintk("RPC: %5u %s: trying rpcbind version %u\n", 353 + task->tk_pid, __FUNCTION__, bind_version); 354 355 map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC); 356 if (!map) { 357 status = -ENOMEM; 358 + dprintk("RPC: %5u %s: no memory available\n", 359 + task->tk_pid, __FUNCTION__); 360 goto bailout_nofree; 361 } 362 map->r_prog = clnt->cl_prog; ··· 373 rpcb_clnt = rpcb_create(clnt->cl_server, &addr, xprt->prot, bind_version, 0); 374 if (IS_ERR(rpcb_clnt)) { 375 status = PTR_ERR(rpcb_clnt); 376 + dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n", 377 + task->tk_pid, __FUNCTION__, PTR_ERR(rpcb_clnt)); 378 goto bailout; 379 } 380 381 child = rpc_run_task(rpcb_clnt, RPC_TASK_ASYNC, &rpcb_getport_ops, map); 382 + rpc_release_client(rpcb_clnt); 383 if (IS_ERR(child)) { 384 status = -EIO; 385 + dprintk("RPC: %5u %s: rpc_run_task failed\n", 386 + task->tk_pid, __FUNCTION__); 387 goto bailout_nofree; 388 } 389 rpc_put_task(child);
+60 -149
net/sunrpc/sched.c
··· 25 #ifdef RPC_DEBUG 26 #define RPCDBG_FACILITY RPCDBG_SCHED 27 #define RPC_TASK_MAGIC_ID 0xf00baa 28 - static int rpc_task_id; 29 #endif 30 31 /* ··· 39 static mempool_t *rpc_buffer_mempool __read_mostly; 40 41 static void __rpc_default_timer(struct rpc_task *task); 42 - static void rpciod_killall(void); 43 static void rpc_async_schedule(struct work_struct *); 44 static void rpc_release_task(struct rpc_task *task); 45 ··· 48 static RPC_WAITQ(delay_queue, "delayq"); 49 50 /* 51 - * All RPC tasks are linked into this list 52 - */ 53 - static LIST_HEAD(all_tasks); 54 - 55 - /* 56 * rpciod-related stuff 57 */ 58 static DEFINE_MUTEX(rpciod_mutex); 59 - static unsigned int rpciod_users; 60 struct workqueue_struct *rpciod_workqueue; 61 - 62 - /* 63 - * Spinlock for other critical sections of code. 64 - */ 65 - static DEFINE_SPINLOCK(rpc_sched_lock); 66 67 /* 68 * Disable the timer for a given RPC task. Should be called with ··· 255 return 0; 256 } 257 258 static void rpc_set_active(struct rpc_task *task) 259 { 260 if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) 261 return; 262 - spin_lock(&rpc_sched_lock); 263 - #ifdef RPC_DEBUG 264 - task->tk_magic = RPC_TASK_MAGIC_ID; 265 - task->tk_pid = rpc_task_id++; 266 - #endif 267 /* Add to global list of all tasks */ 268 - list_add_tail(&task->tk_task, &all_tasks); 269 - spin_unlock(&rpc_sched_lock); 270 } 271 272 /* ··· 821 if (tk_ops->rpc_call_prepare != NULL) 822 task->tk_action = rpc_prepare_task; 823 task->tk_calldata = calldata; 824 825 /* Initialize retry counters */ 826 task->tk_garb_retry = 2; ··· 834 task->tk_workqueue = rpciod_workqueue; 835 836 if (clnt) { 837 - atomic_inc(&clnt->cl_users); 838 if (clnt->cl_softrtry) 839 task->tk_flags |= RPC_TASK_SOFT; 840 if (!clnt->cl_intr) ··· 864 } 865 866 /* 867 - * Create a new task for the specified client. We have to 868 - * clean up after an allocation failure, as the client may 869 - * have specified "oneshot". 870 */ 871 struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) 872 { ··· 872 873 task = rpc_alloc_task(); 874 if (!task) 875 - goto cleanup; 876 877 rpc_init_task(task, clnt, flags, tk_ops, calldata); 878 ··· 880 task->tk_flags |= RPC_TASK_DYNAMIC; 881 out: 882 return task; 883 - 884 - cleanup: 885 - /* Check whether to release the client */ 886 - if (clnt) { 887 - printk("rpc_new_task: failed, users=%d, oneshot=%d\n", 888 - atomic_read(&clnt->cl_users), clnt->cl_oneshot); 889 - atomic_inc(&clnt->cl_users); /* pretend we were used ... */ 890 - rpc_release_client(clnt); 891 - } 892 - goto out; 893 } 894 895 ··· 912 #endif 913 dprintk("RPC: %5u release task\n", task->tk_pid); 914 915 - /* Remove from global task list */ 916 - spin_lock(&rpc_sched_lock); 917 - list_del(&task->tk_task); 918 - spin_unlock(&rpc_sched_lock); 919 - 920 BUG_ON (RPC_IS_QUEUED(task)); 921 922 /* Synchronously delete any running timer */ ··· 933 rpc_put_task(task); 934 } 935 936 - /** 937 - * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 938 - * @clnt: pointer to RPC client 939 - * @flags: RPC flags 940 - * @ops: RPC call ops 941 - * @data: user call data 942 - */ 943 - struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, 944 - const struct rpc_call_ops *ops, 945 - void *data) 946 - { 947 - struct rpc_task *task; 948 - task = rpc_new_task(clnt, flags, ops, data); 949 - if (task == NULL) { 950 - rpc_release_calldata(ops, data); 951 - return ERR_PTR(-ENOMEM); 952 - } 953 - atomic_inc(&task->tk_count); 954 - rpc_execute(task); 955 - return task; 956 - } 957 - EXPORT_SYMBOL(rpc_run_task); 958 - 959 /* 960 * Kill all tasks for the given client. 961 * XXX: kill their descendants as well? ··· 940 void rpc_killall_tasks(struct rpc_clnt *clnt) 941 { 942 struct rpc_task *rovr; 943 - struct list_head *le; 944 945 dprintk("RPC: killing all tasks for client %p\n", clnt); 946 - 947 /* 948 * Spin lock all_tasks to prevent changes... 949 */ 950 - spin_lock(&rpc_sched_lock); 951 - alltask_for_each(rovr, le, &all_tasks) { 952 if (! RPC_IS_ACTIVATED(rovr)) 953 continue; 954 - if (!clnt || rovr->tk_client == clnt) { 955 rovr->tk_flags |= RPC_TASK_KILLED; 956 rpc_exit(rovr, -EIO); 957 rpc_wake_up_task(rovr); 958 } 959 } 960 - spin_unlock(&rpc_sched_lock); 961 - } 962 - 963 - static void rpciod_killall(void) 964 - { 965 - unsigned long flags; 966 - 967 - while (!list_empty(&all_tasks)) { 968 - clear_thread_flag(TIF_SIGPENDING); 969 - rpc_killall_tasks(NULL); 970 - flush_workqueue(rpciod_workqueue); 971 - if (!list_empty(&all_tasks)) { 972 - dprintk("RPC: rpciod_killall: waiting for tasks " 973 - "to exit\n"); 974 - yield(); 975 - } 976 - } 977 - 978 - spin_lock_irqsave(&current->sighand->siglock, flags); 979 - recalc_sigpending(); 980 - spin_unlock_irqrestore(&current->sighand->siglock, flags); 981 } 982 983 /* ··· 970 struct workqueue_struct *wq; 971 int error = 0; 972 973 mutex_lock(&rpciod_mutex); 974 - dprintk("RPC: rpciod_up: users %u\n", rpciod_users); 975 - rpciod_users++; 976 - if (rpciod_workqueue) 977 - goto out; 978 - /* 979 - * If there's no pid, we should be the first user. 980 - */ 981 - if (rpciod_users > 1) 982 - printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users); 983 /* 984 * Create the rpciod thread and wait for it to start. 985 */ 986 error = -ENOMEM; 987 wq = create_workqueue("rpciod"); 988 - if (wq == NULL) { 989 - printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error); 990 - rpciod_users--; 991 goto out; 992 - } 993 rpciod_workqueue = wq; 994 error = 0; 995 out: 996 mutex_unlock(&rpciod_mutex); 997 return error; ··· 999 void 1000 rpciod_down(void) 1001 { 1002 mutex_lock(&rpciod_mutex); 1003 - dprintk("RPC: rpciod_down sema %u\n", rpciod_users); 1004 - if (rpciod_users) { 1005 - if (--rpciod_users) 1006 - goto out; 1007 - } else 1008 - printk(KERN_WARNING "rpciod_down: no users??\n"); 1009 1010 - if (!rpciod_workqueue) { 1011 - dprintk("RPC: rpciod_down: Nothing to do!\n"); 1012 - goto out; 1013 } 1014 - rpciod_killall(); 1015 - 1016 - destroy_workqueue(rpciod_workqueue); 1017 - rpciod_workqueue = NULL; 1018 - out: 1019 mutex_unlock(&rpciod_mutex); 1020 } 1021 - 1022 - #ifdef RPC_DEBUG 1023 - void rpc_show_tasks(void) 1024 - { 1025 - struct list_head *le; 1026 - struct rpc_task *t; 1027 - 1028 - spin_lock(&rpc_sched_lock); 1029 - if (list_empty(&all_tasks)) { 1030 - spin_unlock(&rpc_sched_lock); 1031 - return; 1032 - } 1033 - printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " 1034 - "-rpcwait -action- ---ops--\n"); 1035 - alltask_for_each(t, le, &all_tasks) { 1036 - const char *rpc_waitq = "none"; 1037 - 1038 - if (RPC_IS_QUEUED(t)) 1039 - rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); 1040 - 1041 - printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", 1042 - t->tk_pid, 1043 - (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), 1044 - t->tk_flags, t->tk_status, 1045 - t->tk_client, 1046 - (t->tk_client ? t->tk_client->cl_prog : 0), 1047 - t->tk_rqstp, t->tk_timeout, 1048 - rpc_waitq, 1049 - t->tk_action, t->tk_ops); 1050 - } 1051 - spin_unlock(&rpc_sched_lock); 1052 - } 1053 - #endif 1054 1055 void 1056 rpc_destroy_mempool(void)
··· 25 #ifdef RPC_DEBUG 26 #define RPCDBG_FACILITY RPCDBG_SCHED 27 #define RPC_TASK_MAGIC_ID 0xf00baa 28 #endif 29 30 /* ··· 40 static mempool_t *rpc_buffer_mempool __read_mostly; 41 42 static void __rpc_default_timer(struct rpc_task *task); 43 static void rpc_async_schedule(struct work_struct *); 44 static void rpc_release_task(struct rpc_task *task); 45 ··· 50 static RPC_WAITQ(delay_queue, "delayq"); 51 52 /* 53 * rpciod-related stuff 54 */ 55 static DEFINE_MUTEX(rpciod_mutex); 56 + static atomic_t rpciod_users = ATOMIC_INIT(0); 57 struct workqueue_struct *rpciod_workqueue; 58 59 /* 60 * Disable the timer for a given RPC task. Should be called with ··· 267 return 0; 268 } 269 270 + #ifdef RPC_DEBUG 271 + static void rpc_task_set_debuginfo(struct rpc_task *task) 272 + { 273 + static atomic_t rpc_pid; 274 + 275 + task->tk_magic = RPC_TASK_MAGIC_ID; 276 + task->tk_pid = atomic_inc_return(&rpc_pid); 277 + } 278 + #else 279 + static inline void rpc_task_set_debuginfo(struct rpc_task *task) 280 + { 281 + } 282 + #endif 283 + 284 static void rpc_set_active(struct rpc_task *task) 285 { 286 + struct rpc_clnt *clnt; 287 if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) 288 return; 289 + rpc_task_set_debuginfo(task); 290 /* Add to global list of all tasks */ 291 + clnt = task->tk_client; 292 + if (clnt != NULL) { 293 + spin_lock(&clnt->cl_lock); 294 + list_add_tail(&task->tk_task, &clnt->cl_tasks); 295 + spin_unlock(&clnt->cl_lock); 296 + } 297 } 298 299 /* ··· 818 if (tk_ops->rpc_call_prepare != NULL) 819 task->tk_action = rpc_prepare_task; 820 task->tk_calldata = calldata; 821 + INIT_LIST_HEAD(&task->tk_task); 822 823 /* Initialize retry counters */ 824 task->tk_garb_retry = 2; ··· 830 task->tk_workqueue = rpciod_workqueue; 831 832 if (clnt) { 833 + kref_get(&clnt->cl_kref); 834 if (clnt->cl_softrtry) 835 task->tk_flags |= RPC_TASK_SOFT; 836 if (!clnt->cl_intr) ··· 860 } 861 862 /* 863 + * Create a new task for the specified client. 864 */ 865 struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) 866 { ··· 870 871 task = rpc_alloc_task(); 872 if (!task) 873 + goto out; 874 875 rpc_init_task(task, clnt, flags, tk_ops, calldata); 876 ··· 878 task->tk_flags |= RPC_TASK_DYNAMIC; 879 out: 880 return task; 881 } 882 883 ··· 920 #endif 921 dprintk("RPC: %5u release task\n", task->tk_pid); 922 923 + if (!list_empty(&task->tk_task)) { 924 + struct rpc_clnt *clnt = task->tk_client; 925 + /* Remove from client task list */ 926 + spin_lock(&clnt->cl_lock); 927 + list_del(&task->tk_task); 928 + spin_unlock(&clnt->cl_lock); 929 + } 930 BUG_ON (RPC_IS_QUEUED(task)); 931 932 /* Synchronously delete any running timer */ ··· 939 rpc_put_task(task); 940 } 941 942 /* 943 * Kill all tasks for the given client. 944 * XXX: kill their descendants as well? ··· 969 void rpc_killall_tasks(struct rpc_clnt *clnt) 970 { 971 struct rpc_task *rovr; 972 973 + 974 + if (list_empty(&clnt->cl_tasks)) 975 + return; 976 dprintk("RPC: killing all tasks for client %p\n", clnt); 977 /* 978 * Spin lock all_tasks to prevent changes... 979 */ 980 + spin_lock(&clnt->cl_lock); 981 + list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { 982 if (! RPC_IS_ACTIVATED(rovr)) 983 continue; 984 + if (!(rovr->tk_flags & RPC_TASK_KILLED)) { 985 rovr->tk_flags |= RPC_TASK_KILLED; 986 rpc_exit(rovr, -EIO); 987 rpc_wake_up_task(rovr); 988 } 989 } 990 + spin_unlock(&clnt->cl_lock); 991 } 992 993 /* ··· 1018 struct workqueue_struct *wq; 1019 int error = 0; 1020 1021 + if (atomic_inc_not_zero(&rpciod_users)) 1022 + return 0; 1023 + 1024 mutex_lock(&rpciod_mutex); 1025 + 1026 + /* Guard against races with rpciod_down() */ 1027 + if (rpciod_workqueue != NULL) 1028 + goto out_ok; 1029 /* 1030 * Create the rpciod thread and wait for it to start. 1031 */ 1032 + dprintk("RPC: creating workqueue rpciod\n"); 1033 error = -ENOMEM; 1034 wq = create_workqueue("rpciod"); 1035 + if (wq == NULL) 1036 goto out; 1037 + 1038 rpciod_workqueue = wq; 1039 error = 0; 1040 + out_ok: 1041 + atomic_inc(&rpciod_users); 1042 out: 1043 mutex_unlock(&rpciod_mutex); 1044 return error; ··· 1048 void 1049 rpciod_down(void) 1050 { 1051 + if (!atomic_dec_and_test(&rpciod_users)) 1052 + return; 1053 + 1054 mutex_lock(&rpciod_mutex); 1055 + dprintk("RPC: destroying workqueue rpciod\n"); 1056 1057 + if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) { 1058 + destroy_workqueue(rpciod_workqueue); 1059 + rpciod_workqueue = NULL; 1060 } 1061 mutex_unlock(&rpciod_mutex); 1062 } 1063 1064 void 1065 rpc_destroy_mempool(void)
+3 -5
net/sunrpc/sunrpc_syms.c
··· 28 EXPORT_SYMBOL(rpc_sleep_on); 29 EXPORT_SYMBOL(rpc_wake_up_next); 30 EXPORT_SYMBOL(rpc_wake_up_task); 31 - EXPORT_SYMBOL(rpciod_down); 32 - EXPORT_SYMBOL(rpciod_up); 33 - EXPORT_SYMBOL(rpc_new_task); 34 EXPORT_SYMBOL(rpc_wake_up_status); 35 36 /* RPC client functions */ 37 EXPORT_SYMBOL(rpc_clone_client); 38 EXPORT_SYMBOL(rpc_bind_new_program); 39 - EXPORT_SYMBOL(rpc_destroy_client); 40 EXPORT_SYMBOL(rpc_shutdown_client); 41 EXPORT_SYMBOL(rpc_killall_tasks); 42 EXPORT_SYMBOL(rpc_call_sync); ··· 57 EXPORT_SYMBOL(rpcauth_create); 58 EXPORT_SYMBOL(rpcauth_lookupcred); 59 EXPORT_SYMBOL(rpcauth_lookup_credcache); 60 - EXPORT_SYMBOL(rpcauth_free_credcache); 61 EXPORT_SYMBOL(rpcauth_init_credcache); 62 EXPORT_SYMBOL(put_rpccred); 63 ··· 152 cache_register(&ip_map_cache); 153 cache_register(&unix_gid_cache); 154 init_socket_xprt(); 155 out: 156 return err; 157 } ··· 160 static void __exit 161 cleanup_sunrpc(void) 162 { 163 cleanup_socket_xprt(); 164 unregister_rpc_pipefs(); 165 rpc_destroy_mempool();
··· 28 EXPORT_SYMBOL(rpc_sleep_on); 29 EXPORT_SYMBOL(rpc_wake_up_next); 30 EXPORT_SYMBOL(rpc_wake_up_task); 31 EXPORT_SYMBOL(rpc_wake_up_status); 32 33 /* RPC client functions */ 34 EXPORT_SYMBOL(rpc_clone_client); 35 EXPORT_SYMBOL(rpc_bind_new_program); 36 EXPORT_SYMBOL(rpc_shutdown_client); 37 EXPORT_SYMBOL(rpc_killall_tasks); 38 EXPORT_SYMBOL(rpc_call_sync); ··· 61 EXPORT_SYMBOL(rpcauth_create); 62 EXPORT_SYMBOL(rpcauth_lookupcred); 63 EXPORT_SYMBOL(rpcauth_lookup_credcache); 64 + EXPORT_SYMBOL(rpcauth_destroy_credcache); 65 EXPORT_SYMBOL(rpcauth_init_credcache); 66 EXPORT_SYMBOL(put_rpccred); 67 ··· 156 cache_register(&ip_map_cache); 157 cache_register(&unix_gid_cache); 158 init_socket_xprt(); 159 + rpcauth_init_module(); 160 out: 161 return err; 162 } ··· 163 static void __exit 164 cleanup_sunrpc(void) 165 { 166 + rpcauth_remove_module(); 167 cleanup_socket_xprt(); 168 unregister_rpc_pipefs(); 169 rpc_destroy_mempool();
+20
net/sunrpc/svcsock.c
··· 644 struct msghdr msg = { 645 .msg_flags = MSG_DONTWAIT, 646 }; 647 int len; 648 649 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, ··· 654 */ 655 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); 656 rqstp->rq_addrlen = svsk->sk_remotelen; 657 658 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", 659 svsk, iov[0].iov_base, iov[0].iov_len, len); ··· 1078 goto failed; 1079 memcpy(&newsvsk->sk_remote, sin, slen); 1080 newsvsk->sk_remotelen = slen; 1081 1082 svc_sock_received(newsvsk); 1083
··· 644 struct msghdr msg = { 645 .msg_flags = MSG_DONTWAIT, 646 }; 647 + struct sockaddr *sin; 648 int len; 649 650 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, ··· 653 */ 654 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); 655 rqstp->rq_addrlen = svsk->sk_remotelen; 656 + 657 + /* Destination address in request is needed for binding the 658 + * source address in RPC callbacks later. 659 + */ 660 + sin = (struct sockaddr *)&svsk->sk_local; 661 + switch (sin->sa_family) { 662 + case AF_INET: 663 + rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr; 664 + break; 665 + case AF_INET6: 666 + rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr; 667 + break; 668 + } 669 670 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", 671 svsk, iov[0].iov_base, iov[0].iov_len, len); ··· 1064 goto failed; 1065 memcpy(&newsvsk->sk_remote, sin, slen); 1066 newsvsk->sk_remotelen = slen; 1067 + err = kernel_getsockname(newsock, sin, &slen); 1068 + if (unlikely(err < 0)) { 1069 + dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err); 1070 + slen = offsetof(struct sockaddr, sa_data); 1071 + } 1072 + memcpy(&newsvsk->sk_local, sin, slen); 1073 1074 svc_sock_received(newsvsk); 1075
+8 -11
net/sunrpc/xprt.c
··· 127 clear_bit(XPRT_LOCKED, &xprt->state); 128 smp_mb__after_clear_bit(); 129 } else 130 - schedule_work(&xprt->task_cleanup); 131 } 132 133 /* ··· 515 if (xprt_connecting(xprt)) 516 xprt_release_write(xprt, NULL); 517 else 518 - schedule_work(&xprt->task_cleanup); 519 return; 520 out_abort: 521 spin_unlock(&xprt->transport_lock); ··· 886 887 /** 888 * xprt_create_transport - create an RPC transport 889 - * @proto: requested transport protocol 890 - * @ap: remote peer address 891 - * @size: length of address 892 - * @to: timeout parameters 893 * 894 */ 895 - struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to) 896 { 897 struct rpc_xprt *xprt; 898 struct rpc_rqst *req; 899 900 - switch (proto) { 901 case IPPROTO_UDP: 902 - xprt = xs_setup_udp(ap, size, to); 903 break; 904 case IPPROTO_TCP: 905 - xprt = xs_setup_tcp(ap, size, to); 906 break; 907 default: 908 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", 909 - proto); 910 return ERR_PTR(-EIO); 911 } 912 if (IS_ERR(xprt)) {
··· 127 clear_bit(XPRT_LOCKED, &xprt->state); 128 smp_mb__after_clear_bit(); 129 } else 130 + queue_work(rpciod_workqueue, &xprt->task_cleanup); 131 } 132 133 /* ··· 515 if (xprt_connecting(xprt)) 516 xprt_release_write(xprt, NULL); 517 else 518 + queue_work(rpciod_workqueue, &xprt->task_cleanup); 519 return; 520 out_abort: 521 spin_unlock(&xprt->transport_lock); ··· 886 887 /** 888 * xprt_create_transport - create an RPC transport 889 + * @args: rpc transport creation arguments 890 * 891 */ 892 + struct rpc_xprt *xprt_create_transport(struct rpc_xprtsock_create *args) 893 { 894 struct rpc_xprt *xprt; 895 struct rpc_rqst *req; 896 897 + switch (args->proto) { 898 case IPPROTO_UDP: 899 + xprt = xs_setup_udp(args); 900 break; 901 case IPPROTO_TCP: 902 + xprt = xs_setup_tcp(args); 903 break; 904 default: 905 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", 906 + args->proto); 907 return ERR_PTR(-EIO); 908 } 909 if (IS_ERR(xprt)) {
+41 -40
net/sunrpc/xprtsock.c
··· 235 * Connection of transports 236 */ 237 struct delayed_work connect_worker; 238 unsigned short port; 239 240 /* ··· 654 655 dprintk("RPC: xs_destroy xprt %p\n", xprt); 656 657 - cancel_delayed_work(&transport->connect_worker); 658 - flush_scheduled_work(); 659 660 xprt_disconnect(xprt); 661 xs_close(xprt); ··· 1001 /* Try to schedule an autoclose RPC calls */ 1002 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 1003 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 1004 - schedule_work(&xprt->task_cleanup); 1005 default: 1006 xprt_disconnect(xprt); 1007 } ··· 1146 sap->sin_port = htons(port); 1147 } 1148 1149 - static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) 1150 { 1151 struct sockaddr_in myaddr = { 1152 .sin_family = AF_INET, 1153 }; 1154 int err; 1155 unsigned short port = transport->port; 1156 1157 do { 1158 myaddr.sin_port = htons(port); 1159 err = kernel_bind(sock, (struct sockaddr *) &myaddr, 1160 sizeof(myaddr)); 1161 if (err == 0) { 1162 transport->port = port; 1163 - dprintk("RPC: xs_bindresvport bound to port %u\n", 1164 - port); 1165 - return 0; 1166 } 1167 if (port <= xprt_min_resvport) 1168 port = xprt_max_resvport; 1169 else 1170 port--; 1171 } while (err == -EADDRINUSE && port != transport->port); 1172 - 1173 - dprintk("RPC: can't bind to reserved port (%d).\n", -err); 1174 return err; 1175 } 1176 ··· 1234 } 1235 xs_reclassify_socket(sock); 1236 1237 - if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { 1238 sock_release(sock); 1239 goto out; 1240 } ··· 1321 } 1322 xs_reclassify_socket(sock); 1323 1324 - if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { 1325 sock_release(sock); 1326 goto out; 1327 } ··· 1415 dprintk("RPC: xs_connect delayed xprt %p for %lu " 1416 "seconds\n", 1417 xprt, xprt->reestablish_timeout / HZ); 1418 - schedule_delayed_work(&transport->connect_worker, 1419 - xprt->reestablish_timeout); 1420 xprt->reestablish_timeout <<= 1; 1421 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) 1422 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 1423 } else { 1424 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 1425 - schedule_delayed_work(&transport->connect_worker, 0); 1426 - 1427 - /* flush_scheduled_work can sleep... */ 1428 - if (!RPC_IS_ASYNC(task)) 1429 - flush_scheduled_work(); 1430 } 1431 } 1432 ··· 1479 .set_buffer_size = xs_udp_set_buffer_size, 1480 .reserve_xprt = xprt_reserve_xprt_cong, 1481 .release_xprt = xprt_release_xprt_cong, 1482 - .rpcbind = rpcb_getport, 1483 .set_port = xs_set_port, 1484 .connect = xs_connect, 1485 .buf_alloc = rpc_malloc, ··· 1496 static struct rpc_xprt_ops xs_tcp_ops = { 1497 .reserve_xprt = xprt_reserve_xprt, 1498 .release_xprt = xs_tcp_release_xprt, 1499 - .rpcbind = rpcb_getport, 1500 .set_port = xs_set_port, 1501 .connect = xs_connect, 1502 .buf_alloc = rpc_malloc, ··· 1508 .print_stats = xs_tcp_print_stats, 1509 }; 1510 1511 - static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, unsigned int slot_table_size) 1512 { 1513 struct rpc_xprt *xprt; 1514 struct sock_xprt *new; 1515 1516 - if (addrlen > sizeof(xprt->addr)) { 1517 dprintk("RPC: xs_setup_xprt: address too large\n"); 1518 return ERR_PTR(-EBADF); 1519 } ··· 1535 return ERR_PTR(-ENOMEM); 1536 } 1537 1538 - memcpy(&xprt->addr, addr, addrlen); 1539 - xprt->addrlen = addrlen; 1540 new->port = xs_get_random_port(); 1541 1542 return xprt; ··· 1546 1547 /** 1548 * xs_setup_udp - Set up transport to use a UDP socket 1549 - * @addr: address of remote server 1550 - * @addrlen: length of address in bytes 1551 - * @to: timeout parameters 1552 * 1553 */ 1554 - struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) 1555 { 1556 struct rpc_xprt *xprt; 1557 struct sock_xprt *transport; 1558 1559 - xprt = xs_setup_xprt(addr, addrlen, xprt_udp_slot_table_entries); 1560 if (IS_ERR(xprt)) 1561 return xprt; 1562 transport = container_of(xprt, struct sock_xprt, xprt); 1563 1564 - if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) 1565 xprt_set_bound(xprt); 1566 1567 xprt->prot = IPPROTO_UDP; ··· 1575 1576 xprt->ops = &xs_udp_ops; 1577 1578 - if (to) 1579 - xprt->timeout = *to; 1580 else 1581 xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); 1582 ··· 1589 1590 /** 1591 * xs_setup_tcp - Set up transport to use a TCP socket 1592 - * @addr: address of remote server 1593 - * @addrlen: length of address in bytes 1594 - * @to: timeout parameters 1595 * 1596 */ 1597 - struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) 1598 { 1599 struct rpc_xprt *xprt; 1600 struct sock_xprt *transport; 1601 1602 - xprt = xs_setup_xprt(addr, addrlen, xprt_tcp_slot_table_entries); 1603 if (IS_ERR(xprt)) 1604 return xprt; 1605 transport = container_of(xprt, struct sock_xprt, xprt); 1606 1607 - if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) 1608 xprt_set_bound(xprt); 1609 1610 xprt->prot = IPPROTO_TCP; ··· 1617 1618 xprt->ops = &xs_tcp_ops; 1619 1620 - if (to) 1621 - xprt->timeout = *to; 1622 else 1623 xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); 1624
··· 235 * Connection of transports 236 */ 237 struct delayed_work connect_worker; 238 + struct sockaddr_storage addr; 239 unsigned short port; 240 241 /* ··· 653 654 dprintk("RPC: xs_destroy xprt %p\n", xprt); 655 656 + cancel_rearming_delayed_work(&transport->connect_worker); 657 658 xprt_disconnect(xprt); 659 xs_close(xprt); ··· 1001 /* Try to schedule an autoclose RPC calls */ 1002 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 1003 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 1004 + queue_work(rpciod_workqueue, &xprt->task_cleanup); 1005 default: 1006 xprt_disconnect(xprt); 1007 } ··· 1146 sap->sin_port = htons(port); 1147 } 1148 1149 + static int xs_bind(struct sock_xprt *transport, struct socket *sock) 1150 { 1151 struct sockaddr_in myaddr = { 1152 .sin_family = AF_INET, 1153 }; 1154 + struct sockaddr_in *sa; 1155 int err; 1156 unsigned short port = transport->port; 1157 1158 + if (!transport->xprt.resvport) 1159 + port = 0; 1160 + sa = (struct sockaddr_in *)&transport->addr; 1161 + myaddr.sin_addr = sa->sin_addr; 1162 do { 1163 myaddr.sin_port = htons(port); 1164 err = kernel_bind(sock, (struct sockaddr *) &myaddr, 1165 sizeof(myaddr)); 1166 + if (!transport->xprt.resvport) 1167 + break; 1168 if (err == 0) { 1169 transport->port = port; 1170 + break; 1171 } 1172 if (port <= xprt_min_resvport) 1173 port = xprt_max_resvport; 1174 else 1175 port--; 1176 } while (err == -EADDRINUSE && port != transport->port); 1177 + dprintk("RPC: xs_bind "NIPQUAD_FMT":%u: %s (%d)\n", 1178 + NIPQUAD(myaddr.sin_addr), port, err ? "failed" : "ok", err); 1179 return err; 1180 } 1181 ··· 1229 } 1230 xs_reclassify_socket(sock); 1231 1232 + if (xs_bind(transport, sock)) { 1233 sock_release(sock); 1234 goto out; 1235 } ··· 1316 } 1317 xs_reclassify_socket(sock); 1318 1319 + if (xs_bind(transport, sock)) { 1320 sock_release(sock); 1321 goto out; 1322 } ··· 1410 dprintk("RPC: xs_connect delayed xprt %p for %lu " 1411 "seconds\n", 1412 xprt, xprt->reestablish_timeout / HZ); 1413 + queue_delayed_work(rpciod_workqueue, 1414 + &transport->connect_worker, 1415 + xprt->reestablish_timeout); 1416 xprt->reestablish_timeout <<= 1; 1417 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) 1418 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 1419 } else { 1420 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 1421 + queue_delayed_work(rpciod_workqueue, 1422 + &transport->connect_worker, 0); 1423 } 1424 } 1425 ··· 1476 .set_buffer_size = xs_udp_set_buffer_size, 1477 .reserve_xprt = xprt_reserve_xprt_cong, 1478 .release_xprt = xprt_release_xprt_cong, 1479 + .rpcbind = rpcb_getport_async, 1480 .set_port = xs_set_port, 1481 .connect = xs_connect, 1482 .buf_alloc = rpc_malloc, ··· 1493 static struct rpc_xprt_ops xs_tcp_ops = { 1494 .reserve_xprt = xprt_reserve_xprt, 1495 .release_xprt = xs_tcp_release_xprt, 1496 + .rpcbind = rpcb_getport_async, 1497 .set_port = xs_set_port, 1498 .connect = xs_connect, 1499 .buf_alloc = rpc_malloc, ··· 1505 .print_stats = xs_tcp_print_stats, 1506 }; 1507 1508 + static struct rpc_xprt *xs_setup_xprt(struct rpc_xprtsock_create *args, unsigned int slot_table_size) 1509 { 1510 struct rpc_xprt *xprt; 1511 struct sock_xprt *new; 1512 1513 + if (args->addrlen > sizeof(xprt->addr)) { 1514 dprintk("RPC: xs_setup_xprt: address too large\n"); 1515 return ERR_PTR(-EBADF); 1516 } ··· 1532 return ERR_PTR(-ENOMEM); 1533 } 1534 1535 + memcpy(&xprt->addr, args->dstaddr, args->addrlen); 1536 + xprt->addrlen = args->addrlen; 1537 + if (args->srcaddr) 1538 + memcpy(&new->addr, args->srcaddr, args->addrlen); 1539 new->port = xs_get_random_port(); 1540 1541 return xprt; ··· 1541 1542 /** 1543 * xs_setup_udp - Set up transport to use a UDP socket 1544 + * @args: rpc transport creation arguments 1545 * 1546 */ 1547 + struct rpc_xprt *xs_setup_udp(struct rpc_xprtsock_create *args) 1548 { 1549 struct rpc_xprt *xprt; 1550 struct sock_xprt *transport; 1551 1552 + xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); 1553 if (IS_ERR(xprt)) 1554 return xprt; 1555 transport = container_of(xprt, struct sock_xprt, xprt); 1556 1557 + if (ntohs(((struct sockaddr_in *)args->dstaddr)->sin_port) != 0) 1558 xprt_set_bound(xprt); 1559 1560 xprt->prot = IPPROTO_UDP; ··· 1572 1573 xprt->ops = &xs_udp_ops; 1574 1575 + if (args->timeout) 1576 + xprt->timeout = *args->timeout; 1577 else 1578 xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); 1579 ··· 1586 1587 /** 1588 * xs_setup_tcp - Set up transport to use a TCP socket 1589 + * @args: rpc transport creation arguments 1590 * 1591 */ 1592 + struct rpc_xprt *xs_setup_tcp(struct rpc_xprtsock_create *args) 1593 { 1594 struct rpc_xprt *xprt; 1595 struct sock_xprt *transport; 1596 1597 + xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 1598 if (IS_ERR(xprt)) 1599 return xprt; 1600 transport = container_of(xprt, struct sock_xprt, xprt); 1601 1602 + if (ntohs(((struct sockaddr_in *)args->dstaddr)->sin_port) != 0) 1603 xprt_set_bound(xprt); 1604 1605 xprt->prot = IPPROTO_TCP; ··· 1616 1617 xprt->ops = &xs_tcp_ops; 1618 1619 + if (args->timeout) 1620 + xprt->timeout = *args->timeout; 1621 else 1622 xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); 1623