Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nfs-for-4.9-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client updates from Anna Schumaker:
"Highlights include:

Stable bugfixes:
- sunrpc: fix writ espace race causing stalls
- NFS: Fix inode corruption in nfs_prime_dcache()
- NFSv4: Don't report revoked delegations as valid in nfs_have_delegation()
- NFSv4: nfs4_copy_delegation_stateid() must fail if the delegation is invalid
- NFSv4: Open state recovery must account for file permission changes
- NFSv4.2: Fix a reference leak in nfs42_proc_layoutstats_generic

Features:
- Add support for tracking multiple layout types with an ordered list
- Add support for using multiple backchannel threads on the client
- Add support for pNFS file layout session trunking
- Delay xprtrdma use of DMA API (for device driver removal)
- Add support for xprtrdma remote invalidation
- Add support for larger xprtrdma inline thresholds
- Use a scatter/gather list for sending xprtrdma RPC calls
- Add support for the CB_NOTIFY_LOCK callback
- Improve hashing sunrpc auth_creds by using both uid and gid

Bugfixes:
- Fix xprtrdma use of DMA API
- Validate filenames before adding to the dcache
- Fix corruption of xdr->nwords in xdr_copy_to_scratch
- Fix setting buffer length in xdr_set_next_buffer()
- Don't deadlock the state manager on the SEQUENCE status flags
- Various delegation and stateid related fixes
- Retry operations if an interrupted slot receives EREMOTEIO
- Make nfs boot time y2038 safe"

* tag 'nfs-for-4.9-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (100 commits)
NFSv4.2: Fix a reference leak in nfs42_proc_layoutstats_generic
fs: nfs: Make nfs boot time y2038 safe
sunrpc: replace generic auth_cred hash with auth-specific function
sunrpc: add RPCSEC_GSS hash_cred() function
sunrpc: add auth_unix hash_cred() function
sunrpc: add generic_auth hash_cred() function
sunrpc: add hash_cred() function to rpc_authops struct
Retry operation on EREMOTEIO on an interrupted slot
pNFS: Fix atime updates on pNFS clients
sunrpc: queue work on system_power_efficient_wq
NFSv4.1: Even if the stateid is OK, we may need to recover the open modes
NFSv4: If recovery failed for a specific open stateid, then don't retry
NFSv4: Fix retry issues with nfs41_test/free_stateid
NFSv4: Open state recovery must account for file permission changes
NFSv4: Mark the lock and open stateids as invalid after freeing them
NFSv4: Don't test open_stateid unless it is set
NFSv4: nfs4_do_handle_exception() handle revoke/expiry of a single stateid
NFS: Always call nfs_inode_find_state_and_recover() when revoking a delegation
NFSv4: Fix a race when updating an open_stateid
NFSv4: Fix a race in nfs_inode_reclaim_delegation()
...

+2217 -986
+12
Documentation/kernel-parameters.txt
··· 2470 2470 nfsrootdebug [NFS] enable nfsroot debugging messages. 2471 2471 See Documentation/filesystems/nfs/nfsroot.txt. 2472 2472 2473 + nfs.callback_nr_threads= 2474 + [NFSv4] set the total number of threads that the 2475 + NFS client will assign to service NFSv4 callback 2476 + requests. 2477 + 2473 2478 nfs.callback_tcpport= 2474 2479 [NFS] set the TCP port on which the NFSv4 callback 2475 2480 channel should listen. ··· 2497 2492 number for the readdir() and stat() syscalls instead 2498 2493 of returning the full 64-bit number. 2499 2494 The default is to return 64-bit inode numbers. 2495 + 2496 + nfs.max_session_cb_slots= 2497 + [NFSv4.1] Sets the maximum number of session 2498 + slots the client will assign to the callback 2499 + channel. This determines the maximum number of 2500 + callbacks the client will process in parallel for 2501 + a particular server. 2500 2502 2501 2503 nfs.max_session_slots= 2502 2504 [NFSv4.1] Sets the maximum number of session slots
+1 -1
fs/nfs/cache_lib.c
··· 76 76 77 77 dreq = container_of(d, struct nfs_cache_defer_req, deferred_req); 78 78 79 - complete_all(&dreq->completion); 79 + complete(&dreq->completion); 80 80 nfs_cache_defer_req_put(dreq); 81 81 } 82 82
+54 -82
fs/nfs/callback.c
··· 31 31 struct nfs_callback_data { 32 32 unsigned int users; 33 33 struct svc_serv *serv; 34 - struct svc_rqst *rqst; 35 - struct task_struct *task; 36 34 }; 37 35 38 36 static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1]; ··· 87 89 return 0; 88 90 } 89 91 90 - /* 91 - * Prepare to bring up the NFSv4 callback service 92 - */ 93 - static struct svc_rqst * 94 - nfs4_callback_up(struct svc_serv *serv) 95 - { 96 - return svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE); 97 - } 98 - 99 92 #if defined(CONFIG_NFS_V4_1) 100 93 /* 101 94 * The callback service for NFSv4.1 callbacks ··· 128 139 return 0; 129 140 } 130 141 131 - /* 132 - * Bring up the NFSv4.1 callback service 133 - */ 134 - static struct svc_rqst * 135 - nfs41_callback_up(struct svc_serv *serv) 136 - { 137 - struct svc_rqst *rqstp; 138 - 139 - INIT_LIST_HEAD(&serv->sv_cb_list); 140 - spin_lock_init(&serv->sv_cb_lock); 141 - init_waitqueue_head(&serv->sv_cb_waitq); 142 - rqstp = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE); 143 - dprintk("--> %s return %d\n", __func__, PTR_ERR_OR_ZERO(rqstp)); 144 - return rqstp; 145 - } 146 - 147 - static void nfs_minorversion_callback_svc_setup(struct svc_serv *serv, 148 - struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp)) 149 - { 150 - *rqstpp = nfs41_callback_up(serv); 151 - *callback_svc = nfs41_callback_svc; 152 - } 153 - 154 142 static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt, 155 143 struct svc_serv *serv) 156 144 { ··· 139 173 xprt->bc_serv = serv; 140 174 } 141 175 #else 142 - static void nfs_minorversion_callback_svc_setup(struct svc_serv *serv, 143 - struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp)) 144 - { 145 - *rqstpp = ERR_PTR(-ENOTSUPP); 146 - *callback_svc = ERR_PTR(-ENOTSUPP); 147 - } 148 - 149 176 static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt, 150 177 struct svc_serv *serv) 151 178 { ··· 148 189 static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt, 149 190 struct svc_serv *serv) 150 191 { 151 - struct svc_rqst *rqstp; 152 - int (*callback_svc)(void *vrqstp); 153 - struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; 192 + int nrservs = nfs_callback_nr_threads; 154 193 int ret; 155 194 156 195 nfs_callback_bc_serv(minorversion, xprt, serv); 157 196 158 - if (cb_info->task) 197 + if (nrservs < NFS4_MIN_NR_CALLBACK_THREADS) 198 + nrservs = NFS4_MIN_NR_CALLBACK_THREADS; 199 + 200 + if (serv->sv_nrthreads-1 == nrservs) 159 201 return 0; 160 202 161 - switch (minorversion) { 162 - case 0: 163 - /* v4.0 callback setup */ 164 - rqstp = nfs4_callback_up(serv); 165 - callback_svc = nfs4_callback_svc; 166 - break; 167 - default: 168 - nfs_minorversion_callback_svc_setup(serv, 169 - &rqstp, &callback_svc); 170 - } 171 - 172 - if (IS_ERR(rqstp)) 173 - return PTR_ERR(rqstp); 174 - 175 - svc_sock_update_bufs(serv); 176 - 177 - cb_info->serv = serv; 178 - cb_info->rqst = rqstp; 179 - cb_info->task = kthread_create(callback_svc, cb_info->rqst, 180 - "nfsv4.%u-svc", minorversion); 181 - if (IS_ERR(cb_info->task)) { 182 - ret = PTR_ERR(cb_info->task); 183 - svc_exit_thread(cb_info->rqst); 184 - cb_info->rqst = NULL; 185 - cb_info->task = NULL; 203 + ret = serv->sv_ops->svo_setup(serv, NULL, nrservs); 204 + if (ret) { 205 + serv->sv_ops->svo_setup(serv, NULL, 0); 186 206 return ret; 187 207 } 188 - rqstp->rq_task = cb_info->task; 189 - wake_up_process(cb_info->task); 190 208 dprintk("nfs_callback_up: service started\n"); 191 209 return 0; 192 210 } ··· 217 281 return ret; 218 282 } 219 283 220 - static struct svc_serv_ops nfs_cb_sv_ops = { 284 + static struct svc_serv_ops nfs40_cb_sv_ops = { 285 + .svo_function = nfs4_callback_svc, 221 286 .svo_enqueue_xprt = svc_xprt_do_enqueue, 287 + .svo_setup = svc_set_num_threads, 288 + .svo_module = THIS_MODULE, 222 289 }; 290 + #if defined(CONFIG_NFS_V4_1) 291 + static struct svc_serv_ops nfs41_cb_sv_ops = { 292 + .svo_function = nfs41_callback_svc, 293 + .svo_enqueue_xprt = svc_xprt_do_enqueue, 294 + .svo_setup = svc_set_num_threads, 295 + .svo_module = THIS_MODULE, 296 + }; 297 + 298 + struct svc_serv_ops *nfs4_cb_sv_ops[] = { 299 + [0] = &nfs40_cb_sv_ops, 300 + [1] = &nfs41_cb_sv_ops, 301 + }; 302 + #else 303 + struct svc_serv_ops *nfs4_cb_sv_ops[] = { 304 + [0] = &nfs40_cb_sv_ops, 305 + [1] = NULL, 306 + }; 307 + #endif 223 308 224 309 static struct svc_serv *nfs_callback_create_svc(int minorversion) 225 310 { 226 311 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; 227 312 struct svc_serv *serv; 313 + struct svc_serv_ops *sv_ops; 228 314 229 315 /* 230 316 * Check whether we're already up and running. 231 317 */ 232 - if (cb_info->task) { 318 + if (cb_info->serv) { 233 319 /* 234 320 * Note: increase service usage, because later in case of error 235 321 * svc_destroy() will be called. ··· 259 301 svc_get(cb_info->serv); 260 302 return cb_info->serv; 261 303 } 304 + 305 + switch (minorversion) { 306 + case 0: 307 + sv_ops = nfs4_cb_sv_ops[0]; 308 + break; 309 + default: 310 + sv_ops = nfs4_cb_sv_ops[1]; 311 + } 312 + 313 + if (sv_ops == NULL) 314 + return ERR_PTR(-ENOTSUPP); 262 315 263 316 /* 264 317 * Sanity check: if there's no task, ··· 279 310 printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n", 280 311 cb_info->users); 281 312 282 - serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, &nfs_cb_sv_ops); 313 + serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops); 283 314 if (!serv) { 284 315 printk(KERN_ERR "nfs_callback_create_svc: create service failed\n"); 285 316 return ERR_PTR(-ENOMEM); 286 317 } 318 + cb_info->serv = serv; 287 319 /* As there is only one thread we need to over-ride the 288 320 * default maximum of 80 connections 289 321 */ ··· 327 357 * thread exits. 328 358 */ 329 359 err_net: 360 + if (!cb_info->users) 361 + cb_info->serv = NULL; 330 362 svc_destroy(serv); 331 363 err_create: 332 364 mutex_unlock(&nfs_callback_mutex); ··· 346 374 void nfs_callback_down(int minorversion, struct net *net) 347 375 { 348 376 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; 377 + struct svc_serv *serv; 349 378 350 379 mutex_lock(&nfs_callback_mutex); 351 - nfs_callback_down_net(minorversion, cb_info->serv, net); 380 + serv = cb_info->serv; 381 + nfs_callback_down_net(minorversion, serv, net); 352 382 cb_info->users--; 353 - if (cb_info->users == 0 && cb_info->task != NULL) { 354 - kthread_stop(cb_info->task); 355 - dprintk("nfs_callback_down: service stopped\n"); 356 - svc_exit_thread(cb_info->rqst); 383 + if (cb_info->users == 0) { 384 + svc_get(serv); 385 + serv->sv_ops->svo_setup(serv, NULL, 0); 386 + svc_destroy(serv); 357 387 dprintk("nfs_callback_down: service destroyed\n"); 358 388 cb_info->serv = NULL; 359 - cb_info->rqst = NULL; 360 - cb_info->task = NULL; 361 389 } 362 390 mutex_unlock(&nfs_callback_mutex); 363 391 }
+12
fs/nfs/callback.h
··· 179 179 struct cb_devicenotifyargs *args, 180 180 void *dummy, struct cb_process_state *cps); 181 181 182 + struct cb_notify_lock_args { 183 + struct nfs_fh cbnl_fh; 184 + struct nfs_lowner cbnl_owner; 185 + bool cbnl_valid; 186 + }; 187 + 188 + extern __be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args, 189 + void *dummy, 190 + struct cb_process_state *cps); 182 191 #endif /* CONFIG_NFS_V4_1 */ 183 192 extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *); 184 193 extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, ··· 207 198 #define NFS41_BC_MIN_CALLBACKS 1 208 199 #define NFS41_BC_MAX_CALLBACKS 1 209 200 201 + #define NFS4_MIN_NR_CALLBACK_THREADS 1 202 + 210 203 extern unsigned int nfs_callback_set_tcpport; 204 + extern unsigned short nfs_callback_nr_threads; 211 205 212 206 #endif /* __LINUX_FS_NFS_CALLBACK_H */
+16
fs/nfs/callback_proc.c
··· 628 628 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 629 629 return status; 630 630 } 631 + 632 + __be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args, void *dummy, 633 + struct cb_process_state *cps) 634 + { 635 + if (!cps->clp) /* set in cb_sequence */ 636 + return htonl(NFS4ERR_OP_NOT_IN_SESSION); 637 + 638 + dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n", 639 + rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); 640 + 641 + /* Don't wake anybody if the string looked bogus */ 642 + if (args->cbnl_valid) 643 + __wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args); 644 + 645 + return htonl(NFS4_OK); 646 + } 631 647 #endif /* CONFIG_NFS_V4_1 */
+51 -2
fs/nfs/callback_xdr.c
··· 35 35 (1 + 3) * 4) // seqid, 3 slotids 36 36 #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 37 37 #define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 38 + #define CB_OP_NOTIFY_LOCK_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 38 39 #endif /* CONFIG_NFS_V4_1 */ 39 40 40 41 #define NFSDBG_FACILITY NFSDBG_CALLBACK ··· 73 72 return xdr_ressize_check(rqstp, p); 74 73 } 75 74 76 - static __be32 *read_buf(struct xdr_stream *xdr, int nbytes) 75 + static __be32 *read_buf(struct xdr_stream *xdr, size_t nbytes) 77 76 { 78 77 __be32 *p; 79 78 ··· 535 534 return 0; 536 535 } 537 536 537 + static __be32 decode_lockowner(struct xdr_stream *xdr, struct cb_notify_lock_args *args) 538 + { 539 + __be32 *p; 540 + unsigned int len; 541 + 542 + p = read_buf(xdr, 12); 543 + if (unlikely(p == NULL)) 544 + return htonl(NFS4ERR_BADXDR); 545 + 546 + p = xdr_decode_hyper(p, &args->cbnl_owner.clientid); 547 + len = be32_to_cpu(*p); 548 + 549 + p = read_buf(xdr, len); 550 + if (unlikely(p == NULL)) 551 + return htonl(NFS4ERR_BADXDR); 552 + 553 + /* Only try to decode if the length is right */ 554 + if (len == 20) { 555 + p += 2; /* skip "lock id:" */ 556 + args->cbnl_owner.s_dev = be32_to_cpu(*p++); 557 + xdr_decode_hyper(p, &args->cbnl_owner.id); 558 + args->cbnl_valid = true; 559 + } else { 560 + args->cbnl_owner.s_dev = 0; 561 + args->cbnl_owner.id = 0; 562 + args->cbnl_valid = false; 563 + } 564 + return 0; 565 + } 566 + 567 + static __be32 decode_notify_lock_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_notify_lock_args *args) 568 + { 569 + __be32 status; 570 + 571 + status = decode_fh(xdr, &args->cbnl_fh); 572 + if (unlikely(status != 0)) 573 + goto out; 574 + status = decode_lockowner(xdr, args); 575 + out: 576 + dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 577 + return status; 578 + } 579 + 538 580 #endif /* CONFIG_NFS_V4_1 */ 539 581 540 582 static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) ··· 790 746 case OP_CB_RECALL_SLOT: 791 747 case OP_CB_LAYOUTRECALL: 792 748 case OP_CB_NOTIFY_DEVICEID: 749 + case OP_CB_NOTIFY_LOCK: 793 750 *op = &callback_ops[op_nr]; 794 751 break; 795 752 ··· 798 753 case OP_CB_PUSH_DELEG: 799 754 case OP_CB_RECALLABLE_OBJ_AVAIL: 800 755 case OP_CB_WANTS_CANCELLED: 801 - case OP_CB_NOTIFY_LOCK: 802 756 return htonl(NFS4ERR_NOTSUPP); 803 757 804 758 default: ··· 1049 1005 .process_op = (callback_process_op_t)nfs4_callback_recallslot, 1050 1006 .decode_args = (callback_decode_arg_t)decode_recallslot_args, 1051 1007 .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ, 1008 + }, 1009 + [OP_CB_NOTIFY_LOCK] = { 1010 + .process_op = (callback_process_op_t)nfs4_callback_notify_lock, 1011 + .decode_args = (callback_decode_arg_t)decode_notify_lock_args, 1012 + .res_maxsize = CB_OP_NOTIFY_LOCK_RES_MAXSZ, 1052 1013 }, 1053 1014 #endif /* CONFIG_NFS_V4_1 */ 1054 1015 };
+7 -3
fs/nfs/client.c
··· 313 313 continue; 314 314 /* Match the full socket address */ 315 315 if (!rpc_cmp_addr_port(sap, clap)) 316 - continue; 316 + /* Match all xprt_switch full socket addresses */ 317 + if (!rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient, 318 + sap)) 319 + continue; 317 320 318 321 atomic_inc(&clp->cl_count); 319 322 return clp; ··· 788 785 } 789 786 790 787 fsinfo.fattr = fattr; 791 - fsinfo.layouttype = 0; 788 + fsinfo.nlayouttypes = 0; 789 + memset(fsinfo.layouttype, 0, sizeof(fsinfo.layouttype)); 792 790 error = clp->rpc_ops->fsinfo(server, mntfh, &fsinfo); 793 791 if (error < 0) 794 792 goto out_error; ··· 1082 1078 idr_init(&nn->cb_ident_idr); 1083 1079 #endif 1084 1080 spin_lock_init(&nn->nfs_client_lock); 1085 - nn->boot_time = CURRENT_TIME; 1081 + nn->boot_time = ktime_get_real(); 1086 1082 } 1087 1083 1088 1084 #ifdef CONFIG_PROC_FS
+196 -25
fs/nfs/delegation.c
··· 41 41 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); 42 42 } 43 43 44 + static bool 45 + nfs4_is_valid_delegation(const struct nfs_delegation *delegation, 46 + fmode_t flags) 47 + { 48 + if (delegation != NULL && (delegation->type & flags) == flags && 49 + !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) && 50 + !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 51 + return true; 52 + return false; 53 + } 54 + 44 55 static int 45 56 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark) 46 57 { ··· 61 50 flags &= FMODE_READ|FMODE_WRITE; 62 51 rcu_read_lock(); 63 52 delegation = rcu_dereference(NFS_I(inode)->delegation); 64 - if (delegation != NULL && (delegation->type & flags) == flags && 65 - !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { 53 + if (nfs4_is_valid_delegation(delegation, flags)) { 66 54 if (mark) 67 55 nfs_mark_delegation_referenced(delegation); 68 56 ret = 1; ··· 195 185 rcu_read_unlock(); 196 186 put_rpccred(oldcred); 197 187 trace_nfs4_reclaim_delegation(inode, res->delegation_type); 198 - } else { 199 - /* We appear to have raced with a delegation return. */ 200 - spin_unlock(&delegation->lock); 201 - rcu_read_unlock(); 202 - nfs_inode_set_delegation(inode, cred, res); 188 + return; 203 189 } 204 - } else { 205 - rcu_read_unlock(); 190 + /* We appear to have raced with a delegation return. */ 191 + spin_unlock(&delegation->lock); 206 192 } 193 + rcu_read_unlock(); 194 + nfs_inode_set_delegation(inode, cred, res); 207 195 } 208 196 209 197 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync) ··· 650 642 rcu_read_unlock(); 651 643 } 652 644 653 - static void nfs_revoke_delegation(struct inode *inode) 645 + static void nfs_mark_delegation_revoked(struct nfs_server *server, 646 + struct nfs_delegation *delegation) 654 647 { 655 - struct nfs_delegation *delegation; 656 - rcu_read_lock(); 657 - delegation = rcu_dereference(NFS_I(inode)->delegation); 658 - if (delegation != NULL) { 659 - set_bit(NFS_DELEGATION_REVOKED, &delegation->flags); 660 - nfs_mark_return_delegation(NFS_SERVER(inode), delegation); 661 - } 662 - rcu_read_unlock(); 648 + set_bit(NFS_DELEGATION_REVOKED, &delegation->flags); 649 + delegation->stateid.type = NFS4_INVALID_STATEID_TYPE; 650 + nfs_mark_return_delegation(server, delegation); 663 651 } 664 652 665 - void nfs_remove_bad_delegation(struct inode *inode) 653 + static bool nfs_revoke_delegation(struct inode *inode, 654 + const nfs4_stateid *stateid) 655 + { 656 + struct nfs_delegation *delegation; 657 + nfs4_stateid tmp; 658 + bool ret = false; 659 + 660 + rcu_read_lock(); 661 + delegation = rcu_dereference(NFS_I(inode)->delegation); 662 + if (delegation == NULL) 663 + goto out; 664 + if (stateid == NULL) { 665 + nfs4_stateid_copy(&tmp, &delegation->stateid); 666 + stateid = &tmp; 667 + } else if (!nfs4_stateid_match(stateid, &delegation->stateid)) 668 + goto out; 669 + nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation); 670 + ret = true; 671 + out: 672 + rcu_read_unlock(); 673 + if (ret) 674 + nfs_inode_find_state_and_recover(inode, stateid); 675 + return ret; 676 + } 677 + 678 + void nfs_remove_bad_delegation(struct inode *inode, 679 + const nfs4_stateid *stateid) 666 680 { 667 681 struct nfs_delegation *delegation; 668 682 669 - nfs_revoke_delegation(inode); 683 + if (!nfs_revoke_delegation(inode, stateid)) 684 + return; 670 685 delegation = nfs_inode_detach_delegation(inode); 671 - if (delegation) { 672 - nfs_inode_find_state_and_recover(inode, &delegation->stateid); 686 + if (delegation) 673 687 nfs_free_delegation(delegation); 674 - } 675 688 } 676 689 EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation); 677 690 ··· 815 786 { 816 787 struct nfs_delegation *delegation; 817 788 818 - list_for_each_entry_rcu(delegation, &server->delegations, super_list) 789 + list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 790 + /* 791 + * If the delegation may have been admin revoked, then we 792 + * cannot reclaim it. 793 + */ 794 + if (test_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) 795 + continue; 819 796 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 797 + } 820 798 } 821 799 822 800 /** ··· 887 851 rcu_read_unlock(); 888 852 } 889 853 854 + static inline bool nfs4_server_rebooted(const struct nfs_client *clp) 855 + { 856 + return (clp->cl_state & (BIT(NFS4CLNT_CHECK_LEASE) | 857 + BIT(NFS4CLNT_LEASE_EXPIRED) | 858 + BIT(NFS4CLNT_SESSION_RESET))) != 0; 859 + } 860 + 861 + static void nfs_mark_test_expired_delegation(struct nfs_server *server, 862 + struct nfs_delegation *delegation) 863 + { 864 + if (delegation->stateid.type == NFS4_INVALID_STATEID_TYPE) 865 + return; 866 + clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 867 + set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); 868 + set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state); 869 + } 870 + 871 + static void nfs_inode_mark_test_expired_delegation(struct nfs_server *server, 872 + struct inode *inode) 873 + { 874 + struct nfs_delegation *delegation; 875 + 876 + rcu_read_lock(); 877 + delegation = rcu_dereference(NFS_I(inode)->delegation); 878 + if (delegation) 879 + nfs_mark_test_expired_delegation(server, delegation); 880 + rcu_read_unlock(); 881 + 882 + } 883 + 884 + static void nfs_delegation_mark_test_expired_server(struct nfs_server *server) 885 + { 886 + struct nfs_delegation *delegation; 887 + 888 + list_for_each_entry_rcu(delegation, &server->delegations, super_list) 889 + nfs_mark_test_expired_delegation(server, delegation); 890 + } 891 + 892 + /** 893 + * nfs_mark_test_expired_all_delegations - mark all delegations for testing 894 + * @clp: nfs_client to process 895 + * 896 + * Iterates through all the delegations associated with this server and 897 + * marks them as needing to be checked for validity. 898 + */ 899 + void nfs_mark_test_expired_all_delegations(struct nfs_client *clp) 900 + { 901 + struct nfs_server *server; 902 + 903 + rcu_read_lock(); 904 + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) 905 + nfs_delegation_mark_test_expired_server(server); 906 + rcu_read_unlock(); 907 + } 908 + 909 + /** 910 + * nfs_reap_expired_delegations - reap expired delegations 911 + * @clp: nfs_client to process 912 + * 913 + * Iterates through all the delegations associated with this server and 914 + * checks if they have may have been revoked. This function is usually 915 + * expected to be called in cases where the server may have lost its 916 + * lease. 917 + */ 918 + void nfs_reap_expired_delegations(struct nfs_client *clp) 919 + { 920 + const struct nfs4_minor_version_ops *ops = clp->cl_mvops; 921 + struct nfs_delegation *delegation; 922 + struct nfs_server *server; 923 + struct inode *inode; 924 + struct rpc_cred *cred; 925 + nfs4_stateid stateid; 926 + 927 + restart: 928 + rcu_read_lock(); 929 + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 930 + list_for_each_entry_rcu(delegation, &server->delegations, 931 + super_list) { 932 + if (test_bit(NFS_DELEGATION_RETURNING, 933 + &delegation->flags)) 934 + continue; 935 + if (test_bit(NFS_DELEGATION_TEST_EXPIRED, 936 + &delegation->flags) == 0) 937 + continue; 938 + if (!nfs_sb_active(server->super)) 939 + continue; 940 + inode = nfs_delegation_grab_inode(delegation); 941 + if (inode == NULL) { 942 + rcu_read_unlock(); 943 + nfs_sb_deactive(server->super); 944 + goto restart; 945 + } 946 + cred = get_rpccred_rcu(delegation->cred); 947 + nfs4_stateid_copy(&stateid, &delegation->stateid); 948 + clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); 949 + rcu_read_unlock(); 950 + if (cred != NULL && 951 + ops->test_and_free_expired(server, &stateid, cred) < 0) { 952 + nfs_revoke_delegation(inode, &stateid); 953 + nfs_inode_find_state_and_recover(inode, &stateid); 954 + } 955 + put_rpccred(cred); 956 + if (nfs4_server_rebooted(clp)) { 957 + nfs_inode_mark_test_expired_delegation(server,inode); 958 + iput(inode); 959 + nfs_sb_deactive(server->super); 960 + return; 961 + } 962 + iput(inode); 963 + nfs_sb_deactive(server->super); 964 + goto restart; 965 + } 966 + } 967 + rcu_read_unlock(); 968 + } 969 + 970 + void nfs_inode_find_delegation_state_and_recover(struct inode *inode, 971 + const nfs4_stateid *stateid) 972 + { 973 + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 974 + struct nfs_delegation *delegation; 975 + bool found = false; 976 + 977 + rcu_read_lock(); 978 + delegation = rcu_dereference(NFS_I(inode)->delegation); 979 + if (delegation && 980 + nfs4_stateid_match_other(&delegation->stateid, stateid)) { 981 + nfs_mark_test_expired_delegation(NFS_SERVER(inode), delegation); 982 + found = true; 983 + } 984 + rcu_read_unlock(); 985 + if (found) 986 + nfs4_schedule_state_manager(clp); 987 + } 988 + 890 989 /** 891 990 * nfs_delegations_present - check for existence of delegations 892 991 * @clp: client state handle ··· 1064 893 flags &= FMODE_READ|FMODE_WRITE; 1065 894 rcu_read_lock(); 1066 895 delegation = rcu_dereference(nfsi->delegation); 1067 - ret = (delegation != NULL && (delegation->type & flags) == flags); 896 + ret = nfs4_is_valid_delegation(delegation, flags); 1068 897 if (ret) { 1069 898 nfs4_stateid_copy(dst, &delegation->stateid); 1070 899 nfs_mark_delegation_referenced(delegation);
+7 -1
fs/nfs/delegation.h
··· 32 32 NFS_DELEGATION_REFERENCED, 33 33 NFS_DELEGATION_RETURNING, 34 34 NFS_DELEGATION_REVOKED, 35 + NFS_DELEGATION_TEST_EXPIRED, 35 36 }; 36 37 37 38 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); ··· 48 47 void nfs_expire_unreferenced_delegations(struct nfs_client *clp); 49 48 int nfs_client_return_marked_delegations(struct nfs_client *clp); 50 49 int nfs_delegations_present(struct nfs_client *clp); 51 - void nfs_remove_bad_delegation(struct inode *inode); 50 + void nfs_remove_bad_delegation(struct inode *inode, const nfs4_stateid *stateid); 52 51 53 52 void nfs_delegation_mark_reclaim(struct nfs_client *clp); 54 53 void nfs_delegation_reap_unclaimed(struct nfs_client *clp); 54 + 55 + void nfs_mark_test_expired_all_delegations(struct nfs_client *clp); 56 + void nfs_reap_expired_delegations(struct nfs_client *clp); 55 57 56 58 /* NFSv4 delegation-related procedures */ 57 59 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync); ··· 66 62 int nfs4_have_delegation(struct inode *inode, fmode_t flags); 67 63 int nfs4_check_delegation(struct inode *inode, fmode_t flags); 68 64 bool nfs4_delegation_flush_on_close(const struct inode *inode); 65 + void nfs_inode_find_delegation_state_and_recover(struct inode *inode, 66 + const nfs4_stateid *stateid); 69 67 70 68 #endif 71 69
+19 -5
fs/nfs/dir.c
··· 435 435 return 0; 436 436 437 437 nfsi = NFS_I(inode); 438 - if (entry->fattr->fileid == nfsi->fileid) 439 - return 1; 440 - if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0) 441 - return 1; 442 - return 0; 438 + if (entry->fattr->fileid != nfsi->fileid) 439 + return 0; 440 + if (entry->fh->size && nfs_compare_fh(entry->fh, &nfsi->fh) != 0) 441 + return 0; 442 + return 1; 443 443 } 444 444 445 445 static ··· 496 496 return; 497 497 if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID)) 498 498 return; 499 + if (filename.len == 0) 500 + return; 501 + /* Validate that the name doesn't contain any illegal '\0' */ 502 + if (strnlen(filename.name, filename.len) != filename.len) 503 + return; 504 + /* ...or '/' */ 505 + if (strnchr(filename.name, filename.len, '/')) 506 + return; 499 507 if (filename.name[0] == '.') { 500 508 if (filename.len == 1) 501 509 return; ··· 525 517 &entry->fattr->fsid)) 526 518 goto out; 527 519 if (nfs_same_file(dentry, entry)) { 520 + if (!entry->fh->size) 521 + goto out; 528 522 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 529 523 status = nfs_refresh_inode(d_inode(dentry), entry->fattr); 530 524 if (!status) ··· 538 528 dentry = NULL; 539 529 goto again; 540 530 } 531 + } 532 + if (!entry->fh->size) { 533 + d_lookup_done(dentry); 534 + goto out; 541 535 } 542 536 543 537 inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr, entry->label);
+1 -1
fs/nfs/direct.c
··· 387 387 dreq->iocb->ki_complete(dreq->iocb, res, 0); 388 388 } 389 389 390 - complete_all(&dreq->completion); 390 + complete(&dreq->completion); 391 391 392 392 nfs_direct_req_release(dreq); 393 393 }
+4 -7
fs/nfs/file.c
··· 520 520 .invalidatepage = nfs_invalidate_page, 521 521 .releasepage = nfs_release_page, 522 522 .direct_IO = nfs_direct_IO, 523 + #ifdef CONFIG_MIGRATION 523 524 .migratepage = nfs_migrate_page, 525 + #endif 524 526 .launder_page = nfs_launder_page, 525 527 .is_dirty_writeback = nfs_check_dirty_writeback, 526 528 .error_remove_page = generic_error_remove_page, ··· 687 685 goto out; 688 686 } 689 687 690 - static int do_vfs_lock(struct file *file, struct file_lock *fl) 691 - { 692 - return locks_lock_file_wait(file, fl); 693 - } 694 - 695 688 static int 696 689 do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) 697 690 { ··· 719 722 if (!is_local) 720 723 status = NFS_PROTO(inode)->lock(filp, cmd, fl); 721 724 else 722 - status = do_vfs_lock(filp, fl); 725 + status = locks_lock_file_wait(filp, fl); 723 726 return status; 724 727 } 725 728 ··· 744 747 if (!is_local) 745 748 status = NFS_PROTO(inode)->lock(filp, cmd, fl); 746 749 else 747 - status = do_vfs_lock(filp, fl); 750 + status = locks_lock_file_wait(filp, fl); 748 751 if (status < 0) 749 752 goto out; 750 753
+1 -1
fs/nfs/flexfilelayout/flexfilelayout.c
··· 1080 1080 case -NFS4ERR_BAD_STATEID: 1081 1081 if (state == NULL) 1082 1082 break; 1083 - nfs_remove_bad_delegation(state->inode); 1083 + nfs_remove_bad_delegation(state->inode, NULL); 1084 1084 case -NFS4ERR_OPENMODE: 1085 1085 if (state == NULL) 1086 1086 break;
+3 -4
fs/nfs/internal.h
··· 534 534 } 535 535 #endif 536 536 537 - 538 537 #ifdef CONFIG_MIGRATION 539 538 extern int nfs_migrate_page(struct address_space *, 540 539 struct page *, struct page *, enum migrate_mode); 541 - #else 542 - #define nfs_migrate_page NULL 543 540 #endif 544 541 545 542 static inline int ··· 559 562 extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq); 560 563 561 564 /* nfs4proc.c */ 562 - extern void __nfs4_read_done_cb(struct nfs_pgio_header *); 563 565 extern struct nfs_client *nfs4_init_client(struct nfs_client *clp, 564 566 const struct nfs_client_initdata *); 565 567 extern int nfs40_walk_client_list(struct nfs_client *clp, ··· 567 571 extern int nfs41_walk_client_list(struct nfs_client *clp, 568 572 struct nfs_client **result, 569 573 struct rpc_cred *cred); 574 + extern int nfs4_test_session_trunk(struct rpc_clnt *, 575 + struct rpc_xprt *, 576 + void *); 570 577 571 578 static inline struct inode *nfs_igrab_and_active(struct inode *inode) 572 579 {
+1 -1
fs/nfs/netns.h
··· 29 29 int cb_users[NFS4_MAX_MINOR_VERSION + 1]; 30 30 #endif 31 31 spinlock_t nfs_client_lock; 32 - struct timespec boot_time; 32 + ktime_t boot_time; 33 33 #ifdef CONFIG_PROC_FS 34 34 struct proc_dir_entry *proc_nfsfs; 35 35 #endif
+1
fs/nfs/nfs42proc.c
··· 443 443 task = rpc_run_task(&task_setup); 444 444 if (IS_ERR(task)) 445 445 return PTR_ERR(task); 446 + rpc_put_task(task); 446 447 return 0; 447 448 } 448 449
+14 -1
fs/nfs/nfs4_fs.h
··· 39 39 NFS4CLNT_BIND_CONN_TO_SESSION, 40 40 NFS4CLNT_MOVED, 41 41 NFS4CLNT_LEASE_MOVED, 42 + NFS4CLNT_DELEGATION_EXPIRED, 42 43 }; 43 44 44 45 #define NFS4_RENEW_TIMEOUT 0x01 ··· 58 57 struct nfs_fsinfo *); 59 58 void (*free_lock_state)(struct nfs_server *, 60 59 struct nfs4_lock_state *); 60 + int (*test_and_free_expired)(struct nfs_server *, 61 + nfs4_stateid *, struct rpc_cred *); 61 62 struct nfs_seqid * 62 63 (*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 64 + int (*session_trunk)(struct rpc_clnt *, struct rpc_xprt *, void *); 63 65 const struct rpc_call_ops *call_sync_ops; 64 66 const struct nfs4_state_recovery_ops *reboot_recovery_ops; 65 67 const struct nfs4_state_recovery_ops *nograce_recovery_ops; ··· 160 156 NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */ 161 157 NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */ 162 158 NFS_STATE_RECOVERY_FAILED, /* OPEN stateid state recovery failed */ 159 + NFS_STATE_MAY_NOTIFY_LOCK, /* server may CB_NOTIFY_LOCK */ 163 160 }; 164 161 165 162 struct nfs4_state { ··· 206 201 int (*reclaim_complete)(struct nfs_client *, struct rpc_cred *); 207 202 int (*detect_trunking)(struct nfs_client *, struct nfs_client **, 208 203 struct rpc_cred *); 204 + }; 205 + 206 + struct nfs4_add_xprt_data { 207 + struct nfs_client *clp; 208 + struct rpc_cred *cred; 209 209 }; 210 210 211 211 struct nfs4_state_maintenance_ops { ··· 288 278 struct nfs_fsinfo *fsinfo); 289 279 extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, 290 280 bool sync); 281 + extern int nfs4_detect_session_trunking(struct nfs_client *clp, 282 + struct nfs41_exchange_id_res *res, struct rpc_xprt *xprt); 291 283 292 284 static inline bool 293 285 is_ds_only_client(struct nfs_client *clp) ··· 451 439 extern int nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *); 452 440 extern int nfs4_schedule_migration_recovery(const struct nfs_server *); 453 441 extern void nfs4_schedule_lease_moved_recovery(struct nfs_client *); 454 - extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 442 + extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags, bool); 455 443 extern void nfs41_handle_server_scope(struct nfs_client *, 456 444 struct nfs41_server_scope **); 457 445 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); ··· 483 471 struct dentry *nfs4_try_mount(int, const char *, struct nfs_mount_info *, struct nfs_subversion *); 484 472 extern bool nfs4_disable_idmapping; 485 473 extern unsigned short max_session_slots; 474 + extern unsigned short max_session_cb_slots; 486 475 extern unsigned short send_implementation_id; 487 476 extern bool recover_lost_locks; 488 477
+107 -11
fs/nfs/nfs4client.c
··· 199 199 clp->cl_minorversion = cl_init->minorversion; 200 200 clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion]; 201 201 clp->cl_mig_gen = 1; 202 + #if IS_ENABLED(CONFIG_NFS_V4_1) 203 + init_waitqueue_head(&clp->cl_lock_waitq); 204 + #endif 202 205 return clp; 203 206 204 207 error: ··· 565 562 /* 566 563 * Returns true if the client IDs match 567 564 */ 568 - static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b) 565 + static bool nfs4_match_clientids(u64 a, u64 b) 569 566 { 570 - if (a->cl_clientid != b->cl_clientid) { 567 + if (a != b) { 571 568 dprintk("NFS: --> %s client ID %llx does not match %llx\n", 572 - __func__, a->cl_clientid, b->cl_clientid); 569 + __func__, a, b); 573 570 return false; 574 571 } 575 572 dprintk("NFS: --> %s client ID %llx matches %llx\n", 576 - __func__, a->cl_clientid, b->cl_clientid); 573 + __func__, a, b); 577 574 return true; 578 575 } 579 576 ··· 581 578 * Returns true if the server major ids match 582 579 */ 583 580 static bool 584 - nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b) 581 + nfs4_check_serverowner_major_id(struct nfs41_server_owner *o1, 582 + struct nfs41_server_owner *o2) 585 583 { 586 - struct nfs41_server_owner *o1 = a->cl_serverowner; 587 - struct nfs41_server_owner *o2 = b->cl_serverowner; 588 - 589 584 if (o1->major_id_sz != o2->major_id_sz) 590 585 goto out_major_mismatch; 591 586 if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0) 592 587 goto out_major_mismatch; 593 588 594 - dprintk("NFS: --> %s server owners match\n", __func__); 589 + dprintk("NFS: --> %s server owner major IDs match\n", __func__); 595 590 return true; 596 591 597 592 out_major_mismatch: 598 593 dprintk("NFS: --> %s server owner major IDs do not match\n", 599 594 __func__); 600 595 return false; 596 + } 597 + 598 + /* 599 + * Returns true if server minor ids match 600 + */ 601 + static bool 602 + nfs4_check_serverowner_minor_id(struct nfs41_server_owner *o1, 603 + struct nfs41_server_owner *o2) 604 + { 605 + /* Check eir_server_owner so_minor_id */ 606 + if (o1->minor_id != o2->minor_id) 607 + goto out_minor_mismatch; 608 + 609 + dprintk("NFS: --> %s server owner minor IDs match\n", __func__); 610 + return true; 611 + 612 + out_minor_mismatch: 613 + dprintk("NFS: --> %s server owner minor IDs do not match\n", __func__); 614 + return false; 615 + } 616 + 617 + /* 618 + * Returns true if the server scopes match 619 + */ 620 + static bool 621 + nfs4_check_server_scope(struct nfs41_server_scope *s1, 622 + struct nfs41_server_scope *s2) 623 + { 624 + if (s1->server_scope_sz != s2->server_scope_sz) 625 + goto out_scope_mismatch; 626 + if (memcmp(s1->server_scope, s2->server_scope, 627 + s1->server_scope_sz) != 0) 628 + goto out_scope_mismatch; 629 + 630 + dprintk("NFS: --> %s server scopes match\n", __func__); 631 + return true; 632 + 633 + out_scope_mismatch: 634 + dprintk("NFS: --> %s server scopes do not match\n", 635 + __func__); 636 + return false; 637 + } 638 + 639 + /** 640 + * nfs4_detect_session_trunking - Checks for session trunking. 641 + * 642 + * Called after a successful EXCHANGE_ID on a multi-addr connection. 643 + * Upon success, add the transport. 644 + * 645 + * @clp: original mount nfs_client 646 + * @res: result structure from an exchange_id using the original mount 647 + * nfs_client with a new multi_addr transport 648 + * 649 + * Returns zero on success, otherwise -EINVAL 650 + * 651 + * Note: since the exchange_id for the new multi_addr transport uses the 652 + * same nfs_client from the original mount, the cl_owner_id is reused, 653 + * so eir_clientowner is the same. 654 + */ 655 + int nfs4_detect_session_trunking(struct nfs_client *clp, 656 + struct nfs41_exchange_id_res *res, 657 + struct rpc_xprt *xprt) 658 + { 659 + /* Check eir_clientid */ 660 + if (!nfs4_match_clientids(clp->cl_clientid, res->clientid)) 661 + goto out_err; 662 + 663 + /* Check eir_server_owner so_major_id */ 664 + if (!nfs4_check_serverowner_major_id(clp->cl_serverowner, 665 + res->server_owner)) 666 + goto out_err; 667 + 668 + /* Check eir_server_owner so_minor_id */ 669 + if (!nfs4_check_serverowner_minor_id(clp->cl_serverowner, 670 + res->server_owner)) 671 + goto out_err; 672 + 673 + /* Check eir_server_scope */ 674 + if (!nfs4_check_server_scope(clp->cl_serverscope, res->server_scope)) 675 + goto out_err; 676 + 677 + /* Session trunking passed, add the xprt */ 678 + rpc_clnt_xprt_switch_add_xprt(clp->cl_rpcclient, xprt); 679 + 680 + pr_info("NFS: %s: Session trunking succeeded for %s\n", 681 + clp->cl_hostname, 682 + xprt->address_strings[RPC_DISPLAY_ADDR]); 683 + 684 + return 0; 685 + out_err: 686 + pr_info("NFS: %s: Session trunking failed for %s\n", clp->cl_hostname, 687 + xprt->address_strings[RPC_DISPLAY_ADDR]); 688 + 689 + return -EINVAL; 601 690 } 602 691 603 692 /** ··· 745 650 if (pos->cl_cons_state != NFS_CS_READY) 746 651 continue; 747 652 748 - if (!nfs4_match_clientids(pos, new)) 653 + if (!nfs4_match_clientids(pos->cl_clientid, new->cl_clientid)) 749 654 continue; 750 655 751 656 /* ··· 753 658 * client id trunking. In either case, we want to fall back 754 659 * to using the existing nfs_client. 755 660 */ 756 - if (!nfs4_check_clientid_trunking(pos, new)) 661 + if (!nfs4_check_serverowner_major_id(pos->cl_serverowner, 662 + new->cl_serverowner)) 757 663 continue; 758 664 759 665 /* Unlike NFSv4.0, we know that NFSv4.1 always uses the
+649 -284
fs/nfs/nfs4proc.c
··· 99 99 #ifdef CONFIG_NFS_V4_1 100 100 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, 101 101 struct rpc_cred *); 102 - static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 103 - struct rpc_cred *); 102 + static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *, 103 + struct rpc_cred *, bool); 104 104 #endif 105 105 106 106 #ifdef CONFIG_NFS_V4_SECURITY_LABEL ··· 328 328 kunmap_atomic(start); 329 329 } 330 330 331 + static void nfs4_test_and_free_stateid(struct nfs_server *server, 332 + nfs4_stateid *stateid, 333 + struct rpc_cred *cred) 334 + { 335 + const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 336 + 337 + ops->test_and_free_expired(server, stateid, cred); 338 + } 339 + 340 + static void __nfs4_free_revoked_stateid(struct nfs_server *server, 341 + nfs4_stateid *stateid, 342 + struct rpc_cred *cred) 343 + { 344 + stateid->type = NFS4_REVOKED_STATEID_TYPE; 345 + nfs4_test_and_free_stateid(server, stateid, cred); 346 + } 347 + 348 + static void nfs4_free_revoked_stateid(struct nfs_server *server, 349 + const nfs4_stateid *stateid, 350 + struct rpc_cred *cred) 351 + { 352 + nfs4_stateid tmp; 353 + 354 + nfs4_stateid_copy(&tmp, stateid); 355 + __nfs4_free_revoked_stateid(server, &tmp, cred); 356 + } 357 + 331 358 static long nfs4_update_delay(long *timeout) 332 359 { 333 360 long ret; ··· 397 370 exception->delay = 0; 398 371 exception->recovering = 0; 399 372 exception->retry = 0; 373 + 374 + if (stateid == NULL && state != NULL) 375 + stateid = &state->stateid; 376 + 400 377 switch(errorcode) { 401 378 case 0: 402 379 return 0; 403 - case -NFS4ERR_OPENMODE: 404 380 case -NFS4ERR_DELEG_REVOKED: 405 381 case -NFS4ERR_ADMIN_REVOKED: 382 + case -NFS4ERR_EXPIRED: 406 383 case -NFS4ERR_BAD_STATEID: 384 + if (inode != NULL && stateid != NULL) { 385 + nfs_inode_find_state_and_recover(inode, 386 + stateid); 387 + goto wait_on_recovery; 388 + } 389 + case -NFS4ERR_OPENMODE: 407 390 if (inode) { 408 391 int err; 409 392 ··· 432 395 if (ret < 0) 433 396 break; 434 397 goto wait_on_recovery; 435 - case -NFS4ERR_EXPIRED: 436 - if (state != NULL) { 437 - ret = nfs4_schedule_stateid_recovery(server, state); 438 - if (ret < 0) 439 - break; 440 - } 441 398 case -NFS4ERR_STALE_STATEID: 442 399 case -NFS4ERR_STALE_CLIENTID: 443 400 nfs4_schedule_lease_recovery(clp); ··· 647 616 } 648 617 spin_unlock(&tbl->slot_tbl_lock); 649 618 619 + slot->privileged = args->sa_privileged ? 1 : 0; 650 620 args->sa_slot = slot; 651 621 res->sr_slot = slot; 652 622 ··· 755 723 /* Check the SEQUENCE operation status */ 756 724 switch (res->sr_status) { 757 725 case 0: 726 + /* If previous op on slot was interrupted and we reused 727 + * the seq# and got a reply from the cache, then retry 728 + */ 729 + if (task->tk_status == -EREMOTEIO && interrupted) { 730 + ++slot->seq_nr; 731 + goto retry_nowait; 732 + } 758 733 /* Update the slot's sequence and clientid lease timer */ 759 734 slot->seq_done = 1; 760 735 clp = session->clp; 761 736 do_renew_lease(clp, res->sr_timestamp); 762 737 /* Check sequence flags */ 763 - nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 738 + nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 739 + !!slot->privileged); 764 740 nfs41_update_target_slotid(slot->table, slot, res); 765 741 break; 766 742 case 1: ··· 915 875 } 916 876 spin_unlock(&tbl->slot_tbl_lock); 917 877 878 + slot->privileged = args->sa_privileged ? 1 : 0; 918 879 args->sa_slot = slot; 919 880 920 881 dprintk("<-- %s slotid=%u seqid=%u\n", __func__, ··· 1394 1353 nfs4_state_set_mode_locked(state, state->state | fmode); 1395 1354 } 1396 1355 1356 + #ifdef CONFIG_NFS_V4_1 1357 + static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1358 + { 1359 + if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1360 + return true; 1361 + if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1362 + return true; 1363 + if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1364 + return true; 1365 + return false; 1366 + } 1367 + #endif /* CONFIG_NFS_V4_1 */ 1368 + 1397 1369 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1398 1370 { 1399 1371 struct nfs_client *clp = state->owner->so_server->nfs_client; ··· 1423 1369 } 1424 1370 1425 1371 static bool nfs_need_update_open_stateid(struct nfs4_state *state, 1426 - nfs4_stateid *stateid) 1372 + const nfs4_stateid *stateid, nfs4_stateid *freeme) 1427 1373 { 1428 1374 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0) 1429 1375 return true; 1430 1376 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1377 + nfs4_stateid_copy(freeme, &state->open_stateid); 1431 1378 nfs_test_and_clear_all_open_stateid(state); 1432 1379 return true; 1433 1380 } ··· 1492 1437 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1493 1438 } 1494 1439 1495 - static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1440 + static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1441 + const nfs4_stateid *stateid, fmode_t fmode, 1442 + nfs4_stateid *freeme) 1496 1443 { 1497 1444 switch (fmode) { 1498 1445 case FMODE_READ: ··· 1506 1449 case FMODE_READ|FMODE_WRITE: 1507 1450 set_bit(NFS_O_RDWR_STATE, &state->flags); 1508 1451 } 1509 - if (!nfs_need_update_open_stateid(state, stateid)) 1452 + if (!nfs_need_update_open_stateid(state, stateid, freeme)) 1510 1453 return; 1511 1454 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1512 1455 nfs4_stateid_copy(&state->stateid, stateid); 1513 1456 nfs4_stateid_copy(&state->open_stateid, stateid); 1514 1457 } 1515 1458 1516 - static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1459 + static void __update_open_stateid(struct nfs4_state *state, 1460 + const nfs4_stateid *open_stateid, 1461 + const nfs4_stateid *deleg_stateid, 1462 + fmode_t fmode, 1463 + nfs4_stateid *freeme) 1517 1464 { 1518 1465 /* 1519 1466 * Protect the call to nfs4_state_set_mode_locked and ··· 1530 1469 set_bit(NFS_DELEGATED_STATE, &state->flags); 1531 1470 } 1532 1471 if (open_stateid != NULL) 1533 - nfs_set_open_stateid_locked(state, open_stateid, fmode); 1472 + nfs_set_open_stateid_locked(state, open_stateid, fmode, freeme); 1534 1473 write_sequnlock(&state->seqlock); 1535 1474 update_open_stateflags(state, fmode); 1536 1475 spin_unlock(&state->owner->so_lock); 1537 1476 } 1538 1477 1539 - static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1478 + static int update_open_stateid(struct nfs4_state *state, 1479 + const nfs4_stateid *open_stateid, 1480 + const nfs4_stateid *delegation, 1481 + fmode_t fmode) 1540 1482 { 1483 + struct nfs_server *server = NFS_SERVER(state->inode); 1484 + struct nfs_client *clp = server->nfs_client; 1541 1485 struct nfs_inode *nfsi = NFS_I(state->inode); 1542 1486 struct nfs_delegation *deleg_cur; 1487 + nfs4_stateid freeme = {0}; 1543 1488 int ret = 0; 1544 1489 1545 1490 fmode &= (FMODE_READ|FMODE_WRITE); ··· 1567 1500 goto no_delegation_unlock; 1568 1501 1569 1502 nfs_mark_delegation_referenced(deleg_cur); 1570 - __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1503 + __update_open_stateid(state, open_stateid, &deleg_cur->stateid, 1504 + fmode, &freeme); 1571 1505 ret = 1; 1572 1506 no_delegation_unlock: 1573 1507 spin_unlock(&deleg_cur->lock); ··· 1576 1508 rcu_read_unlock(); 1577 1509 1578 1510 if (!ret && open_stateid != NULL) { 1579 - __update_open_stateid(state, open_stateid, NULL, fmode); 1511 + __update_open_stateid(state, open_stateid, NULL, fmode, &freeme); 1580 1512 ret = 1; 1581 1513 } 1582 1514 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1583 - nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1515 + nfs4_schedule_state_manager(clp); 1516 + if (freeme.type != 0) 1517 + nfs4_test_and_free_stateid(server, &freeme, 1518 + state->owner->so_cred); 1584 1519 1585 1520 return ret; 1586 1521 } ··· 1960 1889 case -NFS4ERR_STALE_CLIENTID: 1961 1890 case -NFS4ERR_STALE_STATEID: 1962 1891 set_bit(NFS_DELEGATED_STATE, &state->flags); 1963 - case -NFS4ERR_EXPIRED: 1964 1892 /* Don't recall a delegation if it was lost */ 1965 1893 nfs4_schedule_lease_recovery(server->nfs_client); 1966 1894 return -EAGAIN; ··· 1971 1901 return -EAGAIN; 1972 1902 case -NFS4ERR_DELEG_REVOKED: 1973 1903 case -NFS4ERR_ADMIN_REVOKED: 1904 + case -NFS4ERR_EXPIRED: 1974 1905 case -NFS4ERR_BAD_STATEID: 1975 1906 case -NFS4ERR_OPENMODE: 1976 1907 nfs_inode_find_state_and_recover(state->inode, ··· 2453 2382 return ret; 2454 2383 } 2455 2384 2456 - static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) 2385 + static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2386 + const nfs4_stateid *stateid) 2457 2387 { 2458 - nfs_remove_bad_delegation(state->inode); 2388 + nfs_remove_bad_delegation(state->inode, stateid); 2459 2389 write_seqlock(&state->seqlock); 2460 2390 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2461 2391 write_sequnlock(&state->seqlock); ··· 2466 2394 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2467 2395 { 2468 2396 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2469 - nfs_finish_clear_delegation_stateid(state); 2397 + nfs_finish_clear_delegation_stateid(state, NULL); 2470 2398 } 2471 2399 2472 2400 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) ··· 2476 2404 return nfs4_open_expired(sp, state); 2477 2405 } 2478 2406 2407 + static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2408 + nfs4_stateid *stateid, 2409 + struct rpc_cred *cred) 2410 + { 2411 + return -NFS4ERR_BAD_STATEID; 2412 + } 2413 + 2479 2414 #if defined(CONFIG_NFS_V4_1) 2415 + static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2416 + nfs4_stateid *stateid, 2417 + struct rpc_cred *cred) 2418 + { 2419 + int status; 2420 + 2421 + switch (stateid->type) { 2422 + default: 2423 + break; 2424 + case NFS4_INVALID_STATEID_TYPE: 2425 + case NFS4_SPECIAL_STATEID_TYPE: 2426 + return -NFS4ERR_BAD_STATEID; 2427 + case NFS4_REVOKED_STATEID_TYPE: 2428 + goto out_free; 2429 + } 2430 + 2431 + status = nfs41_test_stateid(server, stateid, cred); 2432 + switch (status) { 2433 + case -NFS4ERR_EXPIRED: 2434 + case -NFS4ERR_ADMIN_REVOKED: 2435 + case -NFS4ERR_DELEG_REVOKED: 2436 + break; 2437 + default: 2438 + return status; 2439 + } 2440 + out_free: 2441 + /* Ack the revoked state to the server */ 2442 + nfs41_free_stateid(server, stateid, cred, true); 2443 + return -NFS4ERR_EXPIRED; 2444 + } 2445 + 2480 2446 static void nfs41_check_delegation_stateid(struct nfs4_state *state) 2481 2447 { 2482 2448 struct nfs_server *server = NFS_SERVER(state->inode); ··· 2532 2422 } 2533 2423 2534 2424 nfs4_stateid_copy(&stateid, &delegation->stateid); 2535 - cred = get_rpccred(delegation->cred); 2536 - rcu_read_unlock(); 2537 - status = nfs41_test_stateid(server, &stateid, cred); 2538 - trace_nfs4_test_delegation_stateid(state, NULL, status); 2539 - 2540 - if (status != NFS_OK) { 2541 - /* Free the stateid unless the server explicitly 2542 - * informs us the stateid is unrecognized. */ 2543 - if (status != -NFS4ERR_BAD_STATEID) 2544 - nfs41_free_stateid(server, &stateid, cred); 2545 - nfs_finish_clear_delegation_stateid(state); 2425 + if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 2426 + rcu_read_unlock(); 2427 + nfs_finish_clear_delegation_stateid(state, &stateid); 2428 + return; 2546 2429 } 2547 2430 2431 + if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) { 2432 + rcu_read_unlock(); 2433 + return; 2434 + } 2435 + 2436 + cred = get_rpccred(delegation->cred); 2437 + rcu_read_unlock(); 2438 + status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2439 + trace_nfs4_test_delegation_stateid(state, NULL, status); 2440 + if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2441 + nfs_finish_clear_delegation_stateid(state, &stateid); 2442 + 2548 2443 put_rpccred(cred); 2444 + } 2445 + 2446 + /** 2447 + * nfs41_check_expired_locks - possibly free a lock stateid 2448 + * 2449 + * @state: NFSv4 state for an inode 2450 + * 2451 + * Returns NFS_OK if recovery for this stateid is now finished. 2452 + * Otherwise a negative NFS4ERR value is returned. 2453 + */ 2454 + static int nfs41_check_expired_locks(struct nfs4_state *state) 2455 + { 2456 + int status, ret = NFS_OK; 2457 + struct nfs4_lock_state *lsp; 2458 + struct nfs_server *server = NFS_SERVER(state->inode); 2459 + 2460 + if (!test_bit(LK_STATE_IN_USE, &state->flags)) 2461 + goto out; 2462 + list_for_each_entry(lsp, &state->lock_states, ls_locks) { 2463 + if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 2464 + struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 2465 + 2466 + status = nfs41_test_and_free_expired_stateid(server, 2467 + &lsp->ls_stateid, 2468 + cred); 2469 + trace_nfs4_test_lock_stateid(state, lsp, status); 2470 + if (status == -NFS4ERR_EXPIRED || 2471 + status == -NFS4ERR_BAD_STATEID) { 2472 + clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 2473 + lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 2474 + if (!recover_lost_locks) 2475 + set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2476 + } else if (status != NFS_OK) { 2477 + ret = status; 2478 + break; 2479 + } 2480 + } 2481 + }; 2482 + out: 2483 + return ret; 2549 2484 } 2550 2485 2551 2486 /** ··· 2608 2453 struct rpc_cred *cred = state->owner->so_cred; 2609 2454 int status; 2610 2455 2611 - /* If a state reset has been done, test_stateid is unneeded */ 2612 - if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 2613 - (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 2614 - (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 2456 + if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) { 2457 + if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) { 2458 + if (nfs4_have_delegation(state->inode, state->state)) 2459 + return NFS_OK; 2460 + return -NFS4ERR_OPENMODE; 2461 + } 2615 2462 return -NFS4ERR_BAD_STATEID; 2616 - 2617 - status = nfs41_test_stateid(server, stateid, cred); 2463 + } 2464 + status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 2618 2465 trace_nfs4_test_open_stateid(state, NULL, status); 2619 - if (status != NFS_OK) { 2620 - /* Free the stateid unless the server explicitly 2621 - * informs us the stateid is unrecognized. */ 2622 - if (status != -NFS4ERR_BAD_STATEID) 2623 - nfs41_free_stateid(server, stateid, cred); 2624 - 2466 + if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 2625 2467 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2626 2468 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2627 2469 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2628 2470 clear_bit(NFS_OPEN_STATE, &state->flags); 2471 + stateid->type = NFS4_INVALID_STATEID_TYPE; 2629 2472 } 2630 - return status; 2473 + if (status != NFS_OK) 2474 + return status; 2475 + if (nfs_open_stateid_recover_openmode(state)) 2476 + return -NFS4ERR_OPENMODE; 2477 + return NFS_OK; 2631 2478 } 2632 2479 2633 2480 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) ··· 2637 2480 int status; 2638 2481 2639 2482 nfs41_check_delegation_stateid(state); 2483 + status = nfs41_check_expired_locks(state); 2484 + if (status != NFS_OK) 2485 + return status; 2640 2486 status = nfs41_check_open_stateid(state); 2641 2487 if (status != NFS_OK) 2642 2488 status = nfs4_open_expired(sp, state); ··· 2697 2537 goto out; 2698 2538 if (server->caps & NFS_CAP_POSIX_LOCK) 2699 2539 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2540 + if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 2541 + set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 2700 2542 2701 2543 dentry = opendata->dentry; 2702 2544 if (d_really_is_negative(dentry)) { ··· 3061 2899 break; 3062 2900 case -NFS4ERR_ADMIN_REVOKED: 3063 2901 case -NFS4ERR_STALE_STATEID: 2902 + case -NFS4ERR_EXPIRED: 2903 + nfs4_free_revoked_stateid(server, 2904 + &calldata->arg.stateid, 2905 + task->tk_msg.rpc_cred); 3064 2906 case -NFS4ERR_OLD_STATEID: 3065 2907 case -NFS4ERR_BAD_STATEID: 3066 - case -NFS4ERR_EXPIRED: 3067 2908 if (!nfs4_stateid_match(&calldata->arg.stateid, 3068 2909 &state->open_stateid)) { 3069 2910 rpc_restart_call_prepare(task); ··· 4477 4312 if (error == 0) { 4478 4313 /* block layout checks this! */ 4479 4314 server->pnfs_blksize = fsinfo->blksize; 4480 - set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 4315 + set_pnfs_layoutdriver(server, fhandle, fsinfo); 4481 4316 } 4482 4317 4483 4318 return error; ··· 4564 4399 return false; 4565 4400 } 4566 4401 4567 - void __nfs4_read_done_cb(struct nfs_pgio_header *hdr) 4568 - { 4569 - nfs_invalidate_atime(hdr->inode); 4570 - } 4571 - 4572 4402 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 4573 4403 { 4574 4404 struct nfs_server *server = NFS_SERVER(hdr->inode); 4575 4405 4576 4406 trace_nfs4_read(hdr, task->tk_status); 4577 - if (nfs4_async_handle_error(task, server, 4578 - hdr->args.context->state, 4579 - NULL) == -EAGAIN) { 4580 - rpc_restart_call_prepare(task); 4581 - return -EAGAIN; 4407 + if (task->tk_status < 0) { 4408 + struct nfs4_exception exception = { 4409 + .inode = hdr->inode, 4410 + .state = hdr->args.context->state, 4411 + .stateid = &hdr->args.stateid, 4412 + }; 4413 + task->tk_status = nfs4_async_handle_exception(task, 4414 + server, task->tk_status, &exception); 4415 + if (exception.retry) { 4416 + rpc_restart_call_prepare(task); 4417 + return -EAGAIN; 4418 + } 4582 4419 } 4583 4420 4584 - __nfs4_read_done_cb(hdr); 4585 4421 if (task->tk_status > 0) 4586 4422 renew_lease(server, hdr->timestamp); 4587 4423 return 0; ··· 4611 4445 return -EAGAIN; 4612 4446 if (nfs4_read_stateid_changed(task, &hdr->args)) 4613 4447 return -EAGAIN; 4448 + if (task->tk_status > 0) 4449 + nfs_invalidate_atime(hdr->inode); 4614 4450 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4615 4451 nfs4_read_done_cb(task, hdr); 4616 4452 } ··· 4650 4482 struct inode *inode = hdr->inode; 4651 4483 4652 4484 trace_nfs4_write(hdr, task->tk_status); 4653 - if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4654 - hdr->args.context->state, 4655 - NULL) == -EAGAIN) { 4656 - rpc_restart_call_prepare(task); 4657 - return -EAGAIN; 4485 + if (task->tk_status < 0) { 4486 + struct nfs4_exception exception = { 4487 + .inode = hdr->inode, 4488 + .state = hdr->args.context->state, 4489 + .stateid = &hdr->args.stateid, 4490 + }; 4491 + task->tk_status = nfs4_async_handle_exception(task, 4492 + NFS_SERVER(inode), task->tk_status, 4493 + &exception); 4494 + if (exception.retry) { 4495 + rpc_restart_call_prepare(task); 4496 + return -EAGAIN; 4497 + } 4658 4498 } 4659 4499 if (task->tk_status >= 0) { 4660 4500 renew_lease(NFS_SERVER(inode), hdr->timestamp); ··· 5299 5123 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 5300 5124 /* An impossible timestamp guarantees this value 5301 5125 * will never match a generated boot time. */ 5302 - verf[0] = 0; 5303 - verf[1] = cpu_to_be32(NSEC_PER_SEC + 1); 5126 + verf[0] = cpu_to_be32(U32_MAX); 5127 + verf[1] = cpu_to_be32(U32_MAX); 5304 5128 } else { 5305 5129 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 5306 - verf[0] = cpu_to_be32(nn->boot_time.tv_sec); 5307 - verf[1] = cpu_to_be32(nn->boot_time.tv_nsec); 5130 + u64 ns = ktime_to_ns(nn->boot_time); 5131 + 5132 + verf[0] = cpu_to_be32(ns >> 32); 5133 + verf[1] = cpu_to_be32(ns); 5308 5134 } 5309 5135 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 5310 5136 } ··· 5571 5393 renew_lease(data->res.server, data->timestamp); 5572 5394 case -NFS4ERR_ADMIN_REVOKED: 5573 5395 case -NFS4ERR_DELEG_REVOKED: 5396 + case -NFS4ERR_EXPIRED: 5397 + nfs4_free_revoked_stateid(data->res.server, 5398 + data->args.stateid, 5399 + task->tk_msg.rpc_cred); 5574 5400 case -NFS4ERR_BAD_STATEID: 5575 5401 case -NFS4ERR_OLD_STATEID: 5576 5402 case -NFS4ERR_STALE_STATEID: 5577 - case -NFS4ERR_EXPIRED: 5578 5403 task->tk_status = 0; 5579 5404 if (data->roc) 5580 5405 pnfs_roc_set_barrier(data->inode, data->roc_barrier); ··· 5709 5528 return err; 5710 5529 } 5711 5530 5712 - #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 5713 - #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 5714 - 5715 - /* 5716 - * sleep, with exponential backoff, and retry the LOCK operation. 5717 - */ 5718 - static unsigned long 5719 - nfs4_set_lock_task_retry(unsigned long timeout) 5720 - { 5721 - freezable_schedule_timeout_killable_unsafe(timeout); 5722 - timeout <<= 1; 5723 - if (timeout > NFS4_LOCK_MAXTIMEOUT) 5724 - return NFS4_LOCK_MAXTIMEOUT; 5725 - return timeout; 5726 - } 5727 - 5728 5531 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5729 5532 { 5730 5533 struct inode *inode = state->inode; ··· 5765 5600 return err; 5766 5601 } 5767 5602 5768 - static int do_vfs_lock(struct inode *inode, struct file_lock *fl) 5769 - { 5770 - return locks_lock_inode_wait(inode, fl); 5771 - } 5772 - 5773 5603 struct nfs4_unlockdata { 5774 5604 struct nfs_locku_args arg; 5775 5605 struct nfs_locku_res res; ··· 5817 5657 switch (task->tk_status) { 5818 5658 case 0: 5819 5659 renew_lease(calldata->server, calldata->timestamp); 5820 - do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl); 5660 + locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 5821 5661 if (nfs4_update_lock_stateid(calldata->lsp, 5822 5662 &calldata->res.stateid)) 5823 5663 break; 5664 + case -NFS4ERR_ADMIN_REVOKED: 5665 + case -NFS4ERR_EXPIRED: 5666 + nfs4_free_revoked_stateid(calldata->server, 5667 + &calldata->arg.stateid, 5668 + task->tk_msg.rpc_cred); 5824 5669 case -NFS4ERR_BAD_STATEID: 5825 5670 case -NFS4ERR_OLD_STATEID: 5826 5671 case -NFS4ERR_STALE_STATEID: 5827 - case -NFS4ERR_EXPIRED: 5828 5672 if (!nfs4_stateid_match(&calldata->arg.stateid, 5829 5673 &calldata->lsp->ls_stateid)) 5830 5674 rpc_restart_call_prepare(task); ··· 5929 5765 mutex_lock(&sp->so_delegreturn_mutex); 5930 5766 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5931 5767 down_read(&nfsi->rwsem); 5932 - if (do_vfs_lock(inode, request) == -ENOENT) { 5768 + if (locks_lock_inode_wait(inode, request) == -ENOENT) { 5933 5769 up_read(&nfsi->rwsem); 5934 5770 mutex_unlock(&sp->so_delegreturn_mutex); 5935 5771 goto out; ··· 6070 5906 data->timestamp); 6071 5907 if (data->arg.new_lock) { 6072 5908 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 6073 - if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) { 5909 + if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) { 6074 5910 rpc_restart_call_prepare(task); 6075 5911 break; 6076 5912 } ··· 6129 5965 { 6130 5966 switch (error) { 6131 5967 case -NFS4ERR_ADMIN_REVOKED: 5968 + case -NFS4ERR_EXPIRED: 6132 5969 case -NFS4ERR_BAD_STATEID: 6133 5970 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 6134 5971 if (new_lock_owner != 0 || ··· 6138 5973 break; 6139 5974 case -NFS4ERR_STALE_STATEID: 6140 5975 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 6141 - case -NFS4ERR_EXPIRED: 6142 5976 nfs4_schedule_lease_recovery(server->nfs_client); 6143 5977 }; 6144 5978 } ··· 6247 6083 } 6248 6084 6249 6085 #if defined(CONFIG_NFS_V4_1) 6250 - /** 6251 - * nfs41_check_expired_locks - possibly free a lock stateid 6252 - * 6253 - * @state: NFSv4 state for an inode 6254 - * 6255 - * Returns NFS_OK if recovery for this stateid is now finished. 6256 - * Otherwise a negative NFS4ERR value is returned. 6257 - */ 6258 - static int nfs41_check_expired_locks(struct nfs4_state *state) 6259 - { 6260 - int status, ret = -NFS4ERR_BAD_STATEID; 6261 - struct nfs4_lock_state *lsp; 6262 - struct nfs_server *server = NFS_SERVER(state->inode); 6263 - 6264 - list_for_each_entry(lsp, &state->lock_states, ls_locks) { 6265 - if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 6266 - struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 6267 - 6268 - status = nfs41_test_stateid(server, 6269 - &lsp->ls_stateid, 6270 - cred); 6271 - trace_nfs4_test_lock_stateid(state, lsp, status); 6272 - if (status != NFS_OK) { 6273 - /* Free the stateid unless the server 6274 - * informs us the stateid is unrecognized. */ 6275 - if (status != -NFS4ERR_BAD_STATEID) 6276 - nfs41_free_stateid(server, 6277 - &lsp->ls_stateid, 6278 - cred); 6279 - clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 6280 - ret = status; 6281 - } 6282 - } 6283 - }; 6284 - 6285 - return ret; 6286 - } 6287 - 6288 6086 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 6289 6087 { 6290 - int status = NFS_OK; 6088 + struct nfs4_lock_state *lsp; 6089 + int status; 6291 6090 6292 - if (test_bit(LK_STATE_IN_USE, &state->flags)) 6293 - status = nfs41_check_expired_locks(state); 6294 - if (status != NFS_OK) 6295 - status = nfs4_lock_expired(state, request); 6091 + status = nfs4_set_lock_state(state, request); 6092 + if (status != 0) 6093 + return status; 6094 + lsp = request->fl_u.nfs4_fl.owner; 6095 + if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 6096 + test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 6097 + return 0; 6098 + status = nfs4_lock_expired(state, request); 6296 6099 return status; 6297 6100 } 6298 6101 #endif ··· 6269 6138 struct nfs_inode *nfsi = NFS_I(state->inode); 6270 6139 struct nfs4_state_owner *sp = state->owner; 6271 6140 unsigned char fl_flags = request->fl_flags; 6272 - int status = -ENOLCK; 6141 + int status; 6273 6142 6274 - if ((fl_flags & FL_POSIX) && 6275 - !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 6276 - goto out; 6277 - /* Is this a delegated open? */ 6278 - status = nfs4_set_lock_state(state, request); 6279 - if (status != 0) 6280 - goto out; 6281 6143 request->fl_flags |= FL_ACCESS; 6282 - status = do_vfs_lock(state->inode, request); 6144 + status = locks_lock_inode_wait(state->inode, request); 6283 6145 if (status < 0) 6284 6146 goto out; 6285 6147 mutex_lock(&sp->so_delegreturn_mutex); ··· 6281 6157 /* Yes: cache locks! */ 6282 6158 /* ...but avoid races with delegation recall... */ 6283 6159 request->fl_flags = fl_flags & ~FL_SLEEP; 6284 - status = do_vfs_lock(state->inode, request); 6160 + status = locks_lock_inode_wait(state->inode, request); 6285 6161 up_read(&nfsi->rwsem); 6286 6162 mutex_unlock(&sp->so_delegreturn_mutex); 6287 6163 goto out; ··· 6312 6188 return err; 6313 6189 } 6314 6190 6191 + #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 6192 + #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 6193 + 6194 + static int 6195 + nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 6196 + struct file_lock *request) 6197 + { 6198 + int status = -ERESTARTSYS; 6199 + unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 6200 + 6201 + while(!signalled()) { 6202 + status = nfs4_proc_setlk(state, cmd, request); 6203 + if ((status != -EAGAIN) || IS_SETLK(cmd)) 6204 + break; 6205 + freezable_schedule_timeout_interruptible(timeout); 6206 + timeout *= 2; 6207 + timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 6208 + status = -ERESTARTSYS; 6209 + } 6210 + return status; 6211 + } 6212 + 6213 + #ifdef CONFIG_NFS_V4_1 6214 + struct nfs4_lock_waiter { 6215 + struct task_struct *task; 6216 + struct inode *inode; 6217 + struct nfs_lowner *owner; 6218 + bool notified; 6219 + }; 6220 + 6221 + static int 6222 + nfs4_wake_lock_waiter(wait_queue_t *wait, unsigned int mode, int flags, void *key) 6223 + { 6224 + int ret; 6225 + struct cb_notify_lock_args *cbnl = key; 6226 + struct nfs4_lock_waiter *waiter = wait->private; 6227 + struct nfs_lowner *lowner = &cbnl->cbnl_owner, 6228 + *wowner = waiter->owner; 6229 + 6230 + /* Only wake if the callback was for the same owner */ 6231 + if (lowner->clientid != wowner->clientid || 6232 + lowner->id != wowner->id || 6233 + lowner->s_dev != wowner->s_dev) 6234 + return 0; 6235 + 6236 + /* Make sure it's for the right inode */ 6237 + if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 6238 + return 0; 6239 + 6240 + waiter->notified = true; 6241 + 6242 + /* override "private" so we can use default_wake_function */ 6243 + wait->private = waiter->task; 6244 + ret = autoremove_wake_function(wait, mode, flags, key); 6245 + wait->private = waiter; 6246 + return ret; 6247 + } 6248 + 6249 + static int 6250 + nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6251 + { 6252 + int status = -ERESTARTSYS; 6253 + unsigned long flags; 6254 + struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 6255 + struct nfs_server *server = NFS_SERVER(state->inode); 6256 + struct nfs_client *clp = server->nfs_client; 6257 + wait_queue_head_t *q = &clp->cl_lock_waitq; 6258 + struct nfs_lowner owner = { .clientid = clp->cl_clientid, 6259 + .id = lsp->ls_seqid.owner_id, 6260 + .s_dev = server->s_dev }; 6261 + struct nfs4_lock_waiter waiter = { .task = current, 6262 + .inode = state->inode, 6263 + .owner = &owner, 6264 + .notified = false }; 6265 + wait_queue_t wait; 6266 + 6267 + /* Don't bother with waitqueue if we don't expect a callback */ 6268 + if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 6269 + return nfs4_retry_setlk_simple(state, cmd, request); 6270 + 6271 + init_wait(&wait); 6272 + wait.private = &waiter; 6273 + wait.func = nfs4_wake_lock_waiter; 6274 + add_wait_queue(q, &wait); 6275 + 6276 + while(!signalled()) { 6277 + status = nfs4_proc_setlk(state, cmd, request); 6278 + if ((status != -EAGAIN) || IS_SETLK(cmd)) 6279 + break; 6280 + 6281 + status = -ERESTARTSYS; 6282 + spin_lock_irqsave(&q->lock, flags); 6283 + if (waiter.notified) { 6284 + spin_unlock_irqrestore(&q->lock, flags); 6285 + continue; 6286 + } 6287 + set_current_state(TASK_INTERRUPTIBLE); 6288 + spin_unlock_irqrestore(&q->lock, flags); 6289 + 6290 + freezable_schedule_timeout_interruptible(NFS4_LOCK_MAXTIMEOUT); 6291 + } 6292 + 6293 + finish_wait(q, &wait); 6294 + return status; 6295 + } 6296 + #else /* !CONFIG_NFS_V4_1 */ 6297 + static inline int 6298 + nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6299 + { 6300 + return nfs4_retry_setlk_simple(state, cmd, request); 6301 + } 6302 + #endif 6303 + 6315 6304 static int 6316 6305 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 6317 6306 { 6318 6307 struct nfs_open_context *ctx; 6319 6308 struct nfs4_state *state; 6320 - unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 6321 6309 int status; 6322 6310 6323 6311 /* verify open state */ ··· 6456 6220 6457 6221 if (state == NULL) 6458 6222 return -ENOLCK; 6223 + 6224 + if ((request->fl_flags & FL_POSIX) && 6225 + !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 6226 + return -ENOLCK; 6227 + 6459 6228 /* 6460 6229 * Don't rely on the VFS having checked the file open mode, 6461 6230 * since it won't do this for flock() locks. ··· 6475 6234 return -EBADF; 6476 6235 } 6477 6236 6478 - do { 6479 - status = nfs4_proc_setlk(state, cmd, request); 6480 - if ((status != -EAGAIN) || IS_SETLK(cmd)) 6481 - break; 6482 - timeout = nfs4_set_lock_task_retry(timeout); 6483 - status = -ERESTARTSYS; 6484 - if (signalled()) 6485 - break; 6486 - } while(status < 0); 6487 - return status; 6237 + status = nfs4_set_lock_state(state, request); 6238 + if (status != 0) 6239 + return status; 6240 + 6241 + return nfs4_retry_setlk(state, cmd, request); 6488 6242 } 6489 6243 6490 6244 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) ··· 7340 7104 return 0; 7341 7105 } 7342 7106 7343 - /* 7344 - * _nfs4_proc_exchange_id() 7345 - * 7346 - * Wrapper for EXCHANGE_ID operation. 7347 - */ 7348 - static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, 7349 - u32 sp4_how) 7107 + struct nfs41_exchange_id_data { 7108 + struct nfs41_exchange_id_res res; 7109 + struct nfs41_exchange_id_args args; 7110 + struct rpc_xprt *xprt; 7111 + int rpc_status; 7112 + }; 7113 + 7114 + static void nfs4_exchange_id_done(struct rpc_task *task, void *data) 7350 7115 { 7351 - nfs4_verifier verifier; 7352 - struct nfs41_exchange_id_args args = { 7353 - .verifier = &verifier, 7354 - .client = clp, 7355 - #ifdef CONFIG_NFS_V4_1_MIGRATION 7356 - .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7357 - EXCHGID4_FLAG_BIND_PRINC_STATEID | 7358 - EXCHGID4_FLAG_SUPP_MOVED_MIGR, 7359 - #else 7360 - .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7361 - EXCHGID4_FLAG_BIND_PRINC_STATEID, 7362 - #endif 7363 - }; 7364 - struct nfs41_exchange_id_res res = { 7365 - 0 7366 - }; 7367 - int status; 7368 - struct rpc_message msg = { 7369 - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 7370 - .rpc_argp = &args, 7371 - .rpc_resp = &res, 7372 - .rpc_cred = cred, 7373 - }; 7116 + struct nfs41_exchange_id_data *cdata = 7117 + (struct nfs41_exchange_id_data *)data; 7118 + struct nfs_client *clp = cdata->args.client; 7119 + int status = task->tk_status; 7374 7120 7375 - nfs4_init_boot_verifier(clp, &verifier); 7376 - 7377 - status = nfs4_init_uniform_client_string(clp); 7378 - if (status) 7379 - goto out; 7380 - 7381 - dprintk("NFS call exchange_id auth=%s, '%s'\n", 7382 - clp->cl_rpcclient->cl_auth->au_ops->au_name, 7383 - clp->cl_owner_id); 7384 - 7385 - res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 7386 - GFP_NOFS); 7387 - if (unlikely(res.server_owner == NULL)) { 7388 - status = -ENOMEM; 7389 - goto out; 7390 - } 7391 - 7392 - res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 7393 - GFP_NOFS); 7394 - if (unlikely(res.server_scope == NULL)) { 7395 - status = -ENOMEM; 7396 - goto out_server_owner; 7397 - } 7398 - 7399 - res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 7400 - if (unlikely(res.impl_id == NULL)) { 7401 - status = -ENOMEM; 7402 - goto out_server_scope; 7403 - } 7404 - 7405 - switch (sp4_how) { 7406 - case SP4_NONE: 7407 - args.state_protect.how = SP4_NONE; 7408 - break; 7409 - 7410 - case SP4_MACH_CRED: 7411 - args.state_protect = nfs4_sp4_mach_cred_request; 7412 - break; 7413 - 7414 - default: 7415 - /* unsupported! */ 7416 - WARN_ON_ONCE(1); 7417 - status = -EINVAL; 7418 - goto out_impl_id; 7419 - } 7420 - 7421 - status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7422 7121 trace_nfs4_exchange_id(clp, status); 7423 - if (status == 0) 7424 - status = nfs4_check_cl_exchange_flags(res.flags); 7425 7122 7426 7123 if (status == 0) 7427 - status = nfs4_sp4_select_mode(clp, &res.state_protect); 7124 + status = nfs4_check_cl_exchange_flags(cdata->res.flags); 7125 + 7126 + if (cdata->xprt && status == 0) { 7127 + status = nfs4_detect_session_trunking(clp, &cdata->res, 7128 + cdata->xprt); 7129 + goto out; 7130 + } 7131 + 7132 + if (status == 0) 7133 + status = nfs4_sp4_select_mode(clp, &cdata->res.state_protect); 7428 7134 7429 7135 if (status == 0) { 7430 - clp->cl_clientid = res.clientid; 7431 - clp->cl_exchange_flags = res.flags; 7136 + clp->cl_clientid = cdata->res.clientid; 7137 + clp->cl_exchange_flags = cdata->res.flags; 7432 7138 /* Client ID is not confirmed */ 7433 - if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { 7139 + if (!(cdata->res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { 7434 7140 clear_bit(NFS4_SESSION_ESTABLISHED, 7435 - &clp->cl_session->session_state); 7436 - clp->cl_seqid = res.seqid; 7141 + &clp->cl_session->session_state); 7142 + clp->cl_seqid = cdata->res.seqid; 7437 7143 } 7438 7144 7439 7145 kfree(clp->cl_serverowner); 7440 - clp->cl_serverowner = res.server_owner; 7441 - res.server_owner = NULL; 7146 + clp->cl_serverowner = cdata->res.server_owner; 7147 + cdata->res.server_owner = NULL; 7442 7148 7443 7149 /* use the most recent implementation id */ 7444 7150 kfree(clp->cl_implid); 7445 - clp->cl_implid = res.impl_id; 7446 - res.impl_id = NULL; 7151 + clp->cl_implid = cdata->res.impl_id; 7152 + cdata->res.impl_id = NULL; 7447 7153 7448 7154 if (clp->cl_serverscope != NULL && 7449 7155 !nfs41_same_server_scope(clp->cl_serverscope, 7450 - res.server_scope)) { 7156 + cdata->res.server_scope)) { 7451 7157 dprintk("%s: server_scope mismatch detected\n", 7452 7158 __func__); 7453 7159 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); ··· 7398 7220 } 7399 7221 7400 7222 if (clp->cl_serverscope == NULL) { 7401 - clp->cl_serverscope = res.server_scope; 7402 - res.server_scope = NULL; 7223 + clp->cl_serverscope = cdata->res.server_scope; 7224 + cdata->res.server_scope = NULL; 7403 7225 } 7226 + /* Save the EXCHANGE_ID verifier session trunk tests */ 7227 + memcpy(clp->cl_confirm.data, cdata->args.verifier->data, 7228 + sizeof(clp->cl_confirm.data)); 7229 + } 7230 + out: 7231 + cdata->rpc_status = status; 7232 + return; 7233 + } 7234 + 7235 + static void nfs4_exchange_id_release(void *data) 7236 + { 7237 + struct nfs41_exchange_id_data *cdata = 7238 + (struct nfs41_exchange_id_data *)data; 7239 + 7240 + nfs_put_client(cdata->args.client); 7241 + if (cdata->xprt) { 7242 + xprt_put(cdata->xprt); 7243 + rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient); 7244 + } 7245 + kfree(cdata->res.impl_id); 7246 + kfree(cdata->res.server_scope); 7247 + kfree(cdata->res.server_owner); 7248 + kfree(cdata); 7249 + } 7250 + 7251 + static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 7252 + .rpc_call_done = nfs4_exchange_id_done, 7253 + .rpc_release = nfs4_exchange_id_release, 7254 + }; 7255 + 7256 + /* 7257 + * _nfs4_proc_exchange_id() 7258 + * 7259 + * Wrapper for EXCHANGE_ID operation. 7260 + */ 7261 + static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, 7262 + u32 sp4_how, struct rpc_xprt *xprt) 7263 + { 7264 + nfs4_verifier verifier; 7265 + struct rpc_message msg = { 7266 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 7267 + .rpc_cred = cred, 7268 + }; 7269 + struct rpc_task_setup task_setup_data = { 7270 + .rpc_client = clp->cl_rpcclient, 7271 + .callback_ops = &nfs4_exchange_id_call_ops, 7272 + .rpc_message = &msg, 7273 + .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7274 + }; 7275 + struct nfs41_exchange_id_data *calldata; 7276 + struct rpc_task *task; 7277 + int status = -EIO; 7278 + 7279 + if (!atomic_inc_not_zero(&clp->cl_count)) 7280 + goto out; 7281 + 7282 + status = -ENOMEM; 7283 + calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7284 + if (!calldata) 7285 + goto out; 7286 + 7287 + if (!xprt) 7288 + nfs4_init_boot_verifier(clp, &verifier); 7289 + 7290 + status = nfs4_init_uniform_client_string(clp); 7291 + if (status) 7292 + goto out_calldata; 7293 + 7294 + dprintk("NFS call exchange_id auth=%s, '%s'\n", 7295 + clp->cl_rpcclient->cl_auth->au_ops->au_name, 7296 + clp->cl_owner_id); 7297 + 7298 + calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 7299 + GFP_NOFS); 7300 + status = -ENOMEM; 7301 + if (unlikely(calldata->res.server_owner == NULL)) 7302 + goto out_calldata; 7303 + 7304 + calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 7305 + GFP_NOFS); 7306 + if (unlikely(calldata->res.server_scope == NULL)) 7307 + goto out_server_owner; 7308 + 7309 + calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 7310 + if (unlikely(calldata->res.impl_id == NULL)) 7311 + goto out_server_scope; 7312 + 7313 + switch (sp4_how) { 7314 + case SP4_NONE: 7315 + calldata->args.state_protect.how = SP4_NONE; 7316 + break; 7317 + 7318 + case SP4_MACH_CRED: 7319 + calldata->args.state_protect = nfs4_sp4_mach_cred_request; 7320 + break; 7321 + 7322 + default: 7323 + /* unsupported! */ 7324 + WARN_ON_ONCE(1); 7325 + status = -EINVAL; 7326 + goto out_impl_id; 7327 + } 7328 + if (xprt) { 7329 + calldata->xprt = xprt; 7330 + task_setup_data.rpc_xprt = xprt; 7331 + task_setup_data.flags = 7332 + RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC; 7333 + calldata->args.verifier = &clp->cl_confirm; 7334 + } else { 7335 + calldata->args.verifier = &verifier; 7336 + } 7337 + calldata->args.client = clp; 7338 + #ifdef CONFIG_NFS_V4_1_MIGRATION 7339 + calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7340 + EXCHGID4_FLAG_BIND_PRINC_STATEID | 7341 + EXCHGID4_FLAG_SUPP_MOVED_MIGR, 7342 + #else 7343 + calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7344 + EXCHGID4_FLAG_BIND_PRINC_STATEID, 7345 + #endif 7346 + msg.rpc_argp = &calldata->args; 7347 + msg.rpc_resp = &calldata->res; 7348 + task_setup_data.callback_data = calldata; 7349 + 7350 + task = rpc_run_task(&task_setup_data); 7351 + if (IS_ERR(task)) { 7352 + status = PTR_ERR(task); 7353 + goto out_impl_id; 7404 7354 } 7405 7355 7406 - out_impl_id: 7407 - kfree(res.impl_id); 7408 - out_server_scope: 7409 - kfree(res.server_scope); 7410 - out_server_owner: 7411 - kfree(res.server_owner); 7356 + if (!xprt) { 7357 + status = rpc_wait_for_completion_task(task); 7358 + if (!status) 7359 + status = calldata->rpc_status; 7360 + } else /* session trunking test */ 7361 + status = calldata->rpc_status; 7362 + 7363 + rpc_put_task(task); 7412 7364 out: 7413 7365 if (clp->cl_implid != NULL) 7414 7366 dprintk("NFS reply exchange_id: Server Implementation ID: " ··· 7548 7240 clp->cl_implid->date.nseconds); 7549 7241 dprintk("NFS reply exchange_id: %d\n", status); 7550 7242 return status; 7243 + 7244 + out_impl_id: 7245 + kfree(calldata->res.impl_id); 7246 + out_server_scope: 7247 + kfree(calldata->res.server_scope); 7248 + out_server_owner: 7249 + kfree(calldata->res.server_owner); 7250 + out_calldata: 7251 + kfree(calldata); 7252 + goto out; 7551 7253 } 7552 7254 7553 7255 /* ··· 7580 7262 /* try SP4_MACH_CRED if krb5i/p */ 7581 7263 if (authflavor == RPC_AUTH_GSS_KRB5I || 7582 7264 authflavor == RPC_AUTH_GSS_KRB5P) { 7583 - status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 7265 + status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED, NULL); 7584 7266 if (!status) 7585 7267 return 0; 7586 7268 } 7587 7269 7588 7270 /* try SP4_NONE */ 7589 - return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 7271 + return _nfs4_proc_exchange_id(clp, cred, SP4_NONE, NULL); 7590 7272 } 7273 + 7274 + /** 7275 + * nfs4_test_session_trunk 7276 + * 7277 + * This is an add_xprt_test() test function called from 7278 + * rpc_clnt_setup_test_and_add_xprt. 7279 + * 7280 + * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 7281 + * and is dereferrenced in nfs4_exchange_id_release 7282 + * 7283 + * Upon success, add the new transport to the rpc_clnt 7284 + * 7285 + * @clnt: struct rpc_clnt to get new transport 7286 + * @xprt: the rpc_xprt to test 7287 + * @data: call data for _nfs4_proc_exchange_id. 7288 + */ 7289 + int nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 7290 + void *data) 7291 + { 7292 + struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data; 7293 + u32 sp4_how; 7294 + 7295 + dprintk("--> %s try %s\n", __func__, 7296 + xprt->address_strings[RPC_DISPLAY_ADDR]); 7297 + 7298 + sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 7299 + 7300 + /* Test connection for session trunking. Async exchange_id call */ 7301 + return _nfs4_proc_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 7302 + } 7303 + EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 7591 7304 7592 7305 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 7593 7306 struct rpc_cred *cred) ··· 7812 7463 args->bc_attrs.max_resp_sz = max_bc_payload; 7813 7464 args->bc_attrs.max_resp_sz_cached = 0; 7814 7465 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 7815 - args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS; 7466 + args->bc_attrs.max_reqs = min_t(unsigned short, max_session_cb_slots, 1); 7816 7467 7817 7468 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 7818 7469 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", ··· 7859 7510 return -EINVAL; 7860 7511 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 7861 7512 return -EINVAL; 7862 - /* These would render the backchannel useless: */ 7863 - if (rcvd->max_ops != sent->max_ops) 7513 + if (rcvd->max_ops > sent->max_ops) 7864 7514 return -EINVAL; 7865 - if (rcvd->max_reqs != sent->max_reqs) 7515 + if (rcvd->max_reqs > sent->max_reqs) 7866 7516 return -EINVAL; 7867 7517 out: 7868 7518 return 0; ··· 8330 7982 case -NFS4ERR_RECALLCONFLICT: 8331 7983 status = -ERECALLCONFLICT; 8332 7984 break; 7985 + case -NFS4ERR_DELEG_REVOKED: 7986 + case -NFS4ERR_ADMIN_REVOKED: 8333 7987 case -NFS4ERR_EXPIRED: 8334 7988 case -NFS4ERR_BAD_STATEID: 8335 7989 exception->timeout = 0; ··· 8343 7993 &lgp->args.ctx->state->stateid)) { 8344 7994 spin_unlock(&inode->i_lock); 8345 7995 exception->state = lgp->args.ctx->state; 7996 + exception->stateid = &lgp->args.stateid; 8346 7997 break; 8347 7998 } 8348 7999 ··· 8942 8591 return -res.status; 8943 8592 } 8944 8593 8594 + static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 8595 + int err, struct nfs4_exception *exception) 8596 + { 8597 + exception->retry = 0; 8598 + switch(err) { 8599 + case -NFS4ERR_DELAY: 8600 + case -NFS4ERR_RETRY_UNCACHED_REP: 8601 + nfs4_handle_exception(server, err, exception); 8602 + break; 8603 + case -NFS4ERR_BADSESSION: 8604 + case -NFS4ERR_BADSLOT: 8605 + case -NFS4ERR_BAD_HIGH_SLOT: 8606 + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 8607 + case -NFS4ERR_DEADSESSION: 8608 + nfs4_do_handle_exception(server, err, exception); 8609 + } 8610 + } 8611 + 8945 8612 /** 8946 8613 * nfs41_test_stateid - perform a TEST_STATEID operation 8947 8614 * ··· 8979 8610 int err; 8980 8611 do { 8981 8612 err = _nfs41_test_stateid(server, stateid, cred); 8982 - if (err != -NFS4ERR_DELAY) 8983 - break; 8984 - nfs4_handle_exception(server, err, &exception); 8613 + nfs4_handle_delay_or_session_error(server, err, &exception); 8985 8614 } while (exception.retry); 8986 8615 return err; 8987 8616 } ··· 9024 8657 }; 9025 8658 9026 8659 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server, 9027 - nfs4_stateid *stateid, 8660 + const nfs4_stateid *stateid, 9028 8661 struct rpc_cred *cred, 9029 8662 bool privileged) 9030 8663 { ··· 9054 8687 9055 8688 msg.rpc_argp = &data->args; 9056 8689 msg.rpc_resp = &data->res; 9057 - nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 8690 + nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 9058 8691 if (privileged) 9059 8692 nfs4_set_sequence_privileged(&data->args.seq_args); 9060 8693 ··· 9067 8700 * @server: server / transport on which to perform the operation 9068 8701 * @stateid: state ID to release 9069 8702 * @cred: credential 8703 + * @is_recovery: set to true if this call needs to be privileged 9070 8704 * 9071 - * Returns NFS_OK if the server freed "stateid". Otherwise a 9072 - * negative NFS4ERR value is returned. 8705 + * Note: this function is always asynchronous. 9073 8706 */ 9074 8707 static int nfs41_free_stateid(struct nfs_server *server, 9075 - nfs4_stateid *stateid, 9076 - struct rpc_cred *cred) 8708 + const nfs4_stateid *stateid, 8709 + struct rpc_cred *cred, 8710 + bool is_recovery) 9077 8711 { 9078 8712 struct rpc_task *task; 9079 - int ret; 9080 8713 9081 - task = _nfs41_free_stateid(server, stateid, cred, true); 8714 + task = _nfs41_free_stateid(server, stateid, cred, is_recovery); 9082 8715 if (IS_ERR(task)) 9083 8716 return PTR_ERR(task); 9084 - ret = rpc_wait_for_completion_task(task); 9085 - if (!ret) 9086 - ret = task->tk_status; 9087 8717 rpc_put_task(task); 9088 - return ret; 8718 + return 0; 9089 8719 } 9090 8720 9091 8721 static void 9092 8722 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 9093 8723 { 9094 - struct rpc_task *task; 9095 8724 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 9096 8725 9097 - task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 8726 + nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 9098 8727 nfs4_free_lock_state(server, lsp); 9099 - if (IS_ERR(task)) 9100 - return; 9101 - rpc_put_task(task); 9102 8728 } 9103 8729 9104 8730 static bool nfs41_match_stateid(const nfs4_stateid *s1, ··· 9195 8835 .match_stateid = nfs4_match_stateid, 9196 8836 .find_root_sec = nfs4_find_root_sec, 9197 8837 .free_lock_state = nfs4_release_lockowner, 8838 + .test_and_free_expired = nfs40_test_and_free_expired_stateid, 9198 8839 .alloc_seqid = nfs_alloc_seqid, 9199 8840 .call_sync_ops = &nfs40_call_sync_ops, 9200 8841 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, ··· 9223 8862 .match_stateid = nfs41_match_stateid, 9224 8863 .find_root_sec = nfs41_find_root_sec, 9225 8864 .free_lock_state = nfs41_free_lock_state, 8865 + .test_and_free_expired = nfs41_test_and_free_expired_stateid, 9226 8866 .alloc_seqid = nfs_alloc_no_seqid, 8867 + .session_trunk = nfs4_test_session_trunk, 9227 8868 .call_sync_ops = &nfs41_call_sync_ops, 9228 8869 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 9229 8870 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, ··· 9254 8891 .find_root_sec = nfs41_find_root_sec, 9255 8892 .free_lock_state = nfs41_free_lock_state, 9256 8893 .call_sync_ops = &nfs41_call_sync_ops, 8894 + .test_and_free_expired = nfs41_test_and_free_expired_stateid, 9257 8895 .alloc_seqid = nfs_alloc_no_seqid, 8896 + .session_trunk = nfs4_test_session_trunk, 9258 8897 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 9259 8898 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 9260 8899 .state_renewal_ops = &nfs41_state_renewal_ops,
+2
fs/nfs/nfs4session.h
··· 9 9 10 10 /* maximum number of slots to use */ 11 11 #define NFS4_DEF_SLOT_TABLE_SIZE (64U) 12 + #define NFS4_DEF_CB_SLOT_TABLE_SIZE (1U) 12 13 #define NFS4_MAX_SLOT_TABLE (1024U) 13 14 #define NFS4_NO_SLOT ((u32)-1) 14 15 ··· 23 22 u32 slot_nr; 24 23 u32 seq_nr; 25 24 unsigned int interrupted : 1, 25 + privileged : 1, 26 26 seq_done : 1; 27 27 }; 28 28
+68 -16
fs/nfs/nfs4state.c
··· 991 991 { 992 992 int ret; 993 993 994 + if (!nfs4_valid_open_stateid(state)) 995 + return -EIO; 994 996 if (cred != NULL) 995 997 *cred = NULL; 996 998 ret = nfs4_copy_lock_stateid(dst, state, lockowner); ··· 1305 1303 static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) 1306 1304 { 1307 1305 1306 + if (!nfs4_valid_open_stateid(state)) 1307 + return 0; 1308 1308 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 1309 1309 /* Don't recover state that expired before the reboot */ 1310 1310 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) { ··· 1320 1316 1321 1317 int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) 1322 1318 { 1319 + if (!nfs4_valid_open_stateid(state)) 1320 + return 0; 1323 1321 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); 1324 1322 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 1325 1323 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags); ··· 1333 1327 { 1334 1328 struct nfs_client *clp = server->nfs_client; 1335 1329 1336 - if (!nfs4_valid_open_stateid(state)) 1330 + if (!nfs4_state_mark_reclaim_nograce(clp, state)) 1337 1331 return -EBADF; 1338 - nfs4_state_mark_reclaim_nograce(clp, state); 1339 1332 dprintk("%s: scheduling stateid recovery for server %s\n", __func__, 1340 1333 clp->cl_hostname); 1341 1334 nfs4_schedule_state_manager(clp); 1342 1335 return 0; 1343 1336 } 1344 1337 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery); 1338 + 1339 + static struct nfs4_lock_state * 1340 + nfs_state_find_lock_state_by_stateid(struct nfs4_state *state, 1341 + const nfs4_stateid *stateid) 1342 + { 1343 + struct nfs4_lock_state *pos; 1344 + 1345 + list_for_each_entry(pos, &state->lock_states, ls_locks) { 1346 + if (!test_bit(NFS_LOCK_INITIALIZED, &pos->ls_flags)) 1347 + continue; 1348 + if (nfs4_stateid_match_other(&pos->ls_stateid, stateid)) 1349 + return pos; 1350 + } 1351 + return NULL; 1352 + } 1353 + 1354 + static bool nfs_state_lock_state_matches_stateid(struct nfs4_state *state, 1355 + const nfs4_stateid *stateid) 1356 + { 1357 + bool found = false; 1358 + 1359 + if (test_bit(LK_STATE_IN_USE, &state->flags)) { 1360 + spin_lock(&state->state_lock); 1361 + if (nfs_state_find_lock_state_by_stateid(state, stateid)) 1362 + found = true; 1363 + spin_unlock(&state->state_lock); 1364 + } 1365 + return found; 1366 + } 1345 1367 1346 1368 void nfs_inode_find_state_and_recover(struct inode *inode, 1347 1369 const nfs4_stateid *stateid) ··· 1385 1351 state = ctx->state; 1386 1352 if (state == NULL) 1387 1353 continue; 1388 - if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) 1354 + if (nfs4_stateid_match_other(&state->stateid, stateid) && 1355 + nfs4_state_mark_reclaim_nograce(clp, state)) { 1356 + found = true; 1389 1357 continue; 1390 - if (!nfs4_stateid_match(&state->stateid, stateid)) 1391 - continue; 1392 - nfs4_state_mark_reclaim_nograce(clp, state); 1393 - found = true; 1358 + } 1359 + if (nfs_state_lock_state_matches_stateid(state, stateid) && 1360 + nfs4_state_mark_reclaim_nograce(clp, state)) 1361 + found = true; 1394 1362 } 1395 1363 spin_unlock(&inode->i_lock); 1364 + 1365 + nfs_inode_find_delegation_state_and_recover(inode, stateid); 1396 1366 if (found) 1397 1367 nfs4_schedule_state_manager(clp); 1398 1368 } ··· 1536 1498 __func__, status); 1537 1499 case -ENOENT: 1538 1500 case -ENOMEM: 1501 + case -EACCES: 1502 + case -EROFS: 1503 + case -EIO: 1539 1504 case -ESTALE: 1540 1505 /* Open state on this file cannot be recovered */ 1541 1506 nfs4_state_mark_recovery_failed(state, status); ··· 1697 1656 put_rpccred(cred); 1698 1657 } 1699 1658 1700 - static void nfs_delegation_clear_all(struct nfs_client *clp) 1701 - { 1702 - nfs_delegation_mark_reclaim(clp); 1703 - nfs_delegation_reap_unclaimed(clp); 1704 - } 1705 - 1706 1659 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp) 1707 1660 { 1708 - nfs_delegation_clear_all(clp); 1661 + nfs_mark_test_expired_all_delegations(clp); 1709 1662 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); 1710 1663 } 1711 1664 ··· 2230 2195 2231 2196 static void nfs41_handle_some_state_revoked(struct nfs_client *clp) 2232 2197 { 2233 - nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); 2198 + nfs4_state_start_reclaim_nograce(clp); 2234 2199 nfs4_schedule_state_manager(clp); 2235 2200 2236 2201 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); ··· 2262 2227 nfs4_schedule_state_manager(clp); 2263 2228 } 2264 2229 2265 - void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) 2230 + void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags, 2231 + bool recovery) 2266 2232 { 2267 2233 if (!flags) 2268 2234 return; 2269 2235 2270 2236 dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n", 2271 2237 __func__, clp->cl_hostname, clp->cl_clientid, flags); 2238 + /* 2239 + * If we're called from the state manager thread, then assume we're 2240 + * already handling the RECLAIM_NEEDED and/or STATE_REVOKED. 2241 + * Those flags are expected to remain set until we're done 2242 + * recovering (see RFC5661, section 18.46.3). 2243 + */ 2244 + if (recovery) 2245 + goto out_recovery; 2272 2246 2273 2247 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 2274 2248 nfs41_handle_server_reboot(clp); ··· 2290 2246 nfs4_schedule_lease_moved_recovery(clp); 2291 2247 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 2292 2248 nfs41_handle_recallable_state_revoked(clp); 2249 + out_recovery: 2293 2250 if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT) 2294 2251 nfs41_handle_backchannel_fault(clp); 2295 2252 else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | ··· 2453 2408 if (status < 0) 2454 2409 goto out_error; 2455 2410 nfs4_state_end_reclaim_reboot(clp); 2411 + } 2412 + 2413 + /* Detect expired delegations... */ 2414 + if (test_and_clear_bit(NFS4CLNT_DELEGATION_EXPIRED, &clp->cl_state)) { 2415 + section = "detect expired delegations"; 2416 + nfs_reap_expired_delegations(clp); 2417 + continue; 2456 2418 } 2457 2419 2458 2420 /* Now recover expired state... */
+22 -20
fs/nfs/nfs4xdr.c
··· 1850 1850 *p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */ 1851 1851 1852 1852 /* authsys_parms rfc1831 */ 1853 - *p++ = cpu_to_be32(nn->boot_time.tv_nsec); /* stamp */ 1853 + *p++ = cpu_to_be32(ktime_to_ns(nn->boot_time)); /* stamp */ 1854 1854 p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen); 1855 1855 *p++ = cpu_to_be32(0); /* UID */ 1856 1856 *p++ = cpu_to_be32(0); /* GID */ ··· 4725 4725 } 4726 4726 4727 4727 /* 4728 - * Decode potentially multiple layout types. Currently we only support 4729 - * one layout driver per file system. 4728 + * Decode potentially multiple layout types. 4730 4729 */ 4731 - static int decode_first_pnfs_layout_type(struct xdr_stream *xdr, 4732 - uint32_t *layouttype) 4730 + static int decode_pnfs_layout_types(struct xdr_stream *xdr, 4731 + struct nfs_fsinfo *fsinfo) 4733 4732 { 4734 4733 __be32 *p; 4735 - int num; 4734 + uint32_t i; 4736 4735 4737 4736 p = xdr_inline_decode(xdr, 4); 4738 4737 if (unlikely(!p)) 4739 4738 goto out_overflow; 4740 - num = be32_to_cpup(p); 4739 + fsinfo->nlayouttypes = be32_to_cpup(p); 4741 4740 4742 4741 /* pNFS is not supported by the underlying file system */ 4743 - if (num == 0) { 4744 - *layouttype = 0; 4742 + if (fsinfo->nlayouttypes == 0) 4745 4743 return 0; 4746 - } 4747 - if (num > 1) 4748 - printk(KERN_INFO "NFS: %s: Warning: Multiple pNFS layout " 4749 - "drivers per filesystem not supported\n", __func__); 4750 4744 4751 4745 /* Decode and set first layout type, move xdr->p past unused types */ 4752 - p = xdr_inline_decode(xdr, num * 4); 4746 + p = xdr_inline_decode(xdr, fsinfo->nlayouttypes * 4); 4753 4747 if (unlikely(!p)) 4754 4748 goto out_overflow; 4755 - *layouttype = be32_to_cpup(p); 4749 + 4750 + /* If we get too many, then just cap it at the max */ 4751 + if (fsinfo->nlayouttypes > NFS_MAX_LAYOUT_TYPES) { 4752 + printk(KERN_INFO "NFS: %s: Warning: Too many (%u) pNFS layout types\n", 4753 + __func__, fsinfo->nlayouttypes); 4754 + fsinfo->nlayouttypes = NFS_MAX_LAYOUT_TYPES; 4755 + } 4756 + 4757 + for(i = 0; i < fsinfo->nlayouttypes; ++i) 4758 + fsinfo->layouttype[i] = be32_to_cpup(p++); 4756 4759 return 0; 4757 4760 out_overflow: 4758 4761 print_overflow_msg(__func__, xdr); ··· 4767 4764 * Note we must ensure that layouttype is set in any non-error case. 4768 4765 */ 4769 4766 static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap, 4770 - uint32_t *layouttype) 4767 + struct nfs_fsinfo *fsinfo) 4771 4768 { 4772 4769 int status = 0; 4773 4770 ··· 4775 4772 if (unlikely(bitmap[1] & (FATTR4_WORD1_FS_LAYOUT_TYPES - 1U))) 4776 4773 return -EIO; 4777 4774 if (bitmap[1] & FATTR4_WORD1_FS_LAYOUT_TYPES) { 4778 - status = decode_first_pnfs_layout_type(xdr, layouttype); 4775 + status = decode_pnfs_layout_types(xdr, fsinfo); 4779 4776 bitmap[1] &= ~FATTR4_WORD1_FS_LAYOUT_TYPES; 4780 - } else 4781 - *layouttype = 0; 4777 + } 4782 4778 return status; 4783 4779 } 4784 4780 ··· 4858 4856 status = decode_attr_time_delta(xdr, bitmap, &fsinfo->time_delta); 4859 4857 if (status != 0) 4860 4858 goto xdr_error; 4861 - status = decode_attr_pnfstype(xdr, bitmap, &fsinfo->layouttype); 4859 + status = decode_attr_pnfstype(xdr, bitmap, fsinfo); 4862 4860 if (status != 0) 4863 4861 goto xdr_error; 4864 4862
+58 -15
fs/nfs/pnfs.c
··· 30 30 #include <linux/nfs_fs.h> 31 31 #include <linux/nfs_page.h> 32 32 #include <linux/module.h> 33 + #include <linux/sort.h> 33 34 #include "internal.h" 34 35 #include "pnfs.h" 35 36 #include "iostat.h" ··· 100 99 } 101 100 102 101 /* 102 + * When the server sends a list of layout types, we choose one in the order 103 + * given in the list below. 104 + * 105 + * FIXME: should this list be configurable in some fashion? module param? 106 + * mount option? something else? 107 + */ 108 + static const u32 ld_prefs[] = { 109 + LAYOUT_SCSI, 110 + LAYOUT_BLOCK_VOLUME, 111 + LAYOUT_OSD2_OBJECTS, 112 + LAYOUT_FLEX_FILES, 113 + LAYOUT_NFSV4_1_FILES, 114 + 0 115 + }; 116 + 117 + static int 118 + ld_cmp(const void *e1, const void *e2) 119 + { 120 + u32 ld1 = *((u32 *)e1); 121 + u32 ld2 = *((u32 *)e2); 122 + int i; 123 + 124 + for (i = 0; ld_prefs[i] != 0; i++) { 125 + if (ld1 == ld_prefs[i]) 126 + return -1; 127 + 128 + if (ld2 == ld_prefs[i]) 129 + return 1; 130 + } 131 + return 0; 132 + } 133 + 134 + /* 103 135 * Try to set the server's pnfs module to the pnfs layout type specified by id. 104 136 * Currently only one pNFS layout driver per filesystem is supported. 105 137 * 106 - * @id layout type. Zero (illegal layout type) indicates pNFS not in use. 138 + * @ids array of layout types supported by MDS. 107 139 */ 108 140 void 109 141 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, 110 - u32 id) 142 + struct nfs_fsinfo *fsinfo) 111 143 { 112 144 struct pnfs_layoutdriver_type *ld_type = NULL; 145 + u32 id; 146 + int i; 113 147 114 - if (id == 0) 115 - goto out_no_driver; 116 148 if (!(server->nfs_client->cl_exchange_flags & 117 149 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { 118 - printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n", 119 - __func__, id, server->nfs_client->cl_exchange_flags); 150 + printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n", 151 + __func__, server->nfs_client->cl_exchange_flags); 120 152 goto out_no_driver; 121 153 } 122 - ld_type = find_pnfs_driver(id); 123 - if (!ld_type) { 124 - request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id); 154 + 155 + sort(fsinfo->layouttype, fsinfo->nlayouttypes, 156 + sizeof(*fsinfo->layouttype), ld_cmp, NULL); 157 + 158 + for (i = 0; i < fsinfo->nlayouttypes; i++) { 159 + id = fsinfo->layouttype[i]; 125 160 ld_type = find_pnfs_driver(id); 126 161 if (!ld_type) { 127 - dprintk("%s: No pNFS module found for %u.\n", 128 - __func__, id); 129 - goto out_no_driver; 162 + request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, 163 + id); 164 + ld_type = find_pnfs_driver(id); 130 165 } 166 + if (ld_type) 167 + break; 131 168 } 169 + 170 + if (!ld_type) { 171 + dprintk("%s: No pNFS module found!\n", __func__); 172 + goto out_no_driver; 173 + } 174 + 132 175 server->pnfs_curr_ld = ld_type; 133 176 if (ld_type->set_layoutdriver 134 177 && ld_type->set_layoutdriver(server, mntfh)) { ··· 2230 2185 */ 2231 2186 void pnfs_ld_read_done(struct nfs_pgio_header *hdr) 2232 2187 { 2233 - if (likely(!hdr->pnfs_error)) { 2234 - __nfs4_read_done_cb(hdr); 2188 + if (likely(!hdr->pnfs_error)) 2235 2189 hdr->mds_ops->rpc_call_done(&hdr->task, hdr); 2236 - } 2237 2190 trace_nfs4_pnfs_read(hdr, hdr->pnfs_error); 2238 2191 if (unlikely(hdr->pnfs_error)) 2239 2192 pnfs_ld_handle_read_error(hdr);
+3 -2
fs/nfs/pnfs.h
··· 236 236 void pnfs_put_lseg(struct pnfs_layout_segment *lseg); 237 237 void pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg); 238 238 239 - void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32); 239 + void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, struct nfs_fsinfo *); 240 240 void unset_pnfs_layoutdriver(struct nfs_server *); 241 241 void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *); 242 242 int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc); ··· 657 657 } 658 658 659 659 static inline void set_pnfs_layoutdriver(struct nfs_server *s, 660 - const struct nfs_fh *mntfh, u32 id) 660 + const struct nfs_fh *mntfh, 661 + struct nfs_fsinfo *fsinfo) 661 662 { 662 663 } 663 664
+44 -14
fs/nfs/pnfs_nfs.c
··· 690 690 dprintk("%s: DS %s: trying address %s\n", 691 691 __func__, ds->ds_remotestr, da->da_remotestr); 692 692 693 - clp = nfs4_set_ds_client(mds_srv, 694 - (struct sockaddr *)&da->da_addr, 695 - da->da_addrlen, IPPROTO_TCP, 696 - timeo, retrans, minor_version, 697 - au_flavor); 698 - if (!IS_ERR(clp)) 699 - break; 693 + if (!IS_ERR(clp) && clp->cl_mvops->session_trunk) { 694 + struct xprt_create xprt_args = { 695 + .ident = XPRT_TRANSPORT_TCP, 696 + .net = clp->cl_net, 697 + .dstaddr = (struct sockaddr *)&da->da_addr, 698 + .addrlen = da->da_addrlen, 699 + .servername = clp->cl_hostname, 700 + }; 701 + struct nfs4_add_xprt_data xprtdata = { 702 + .clp = clp, 703 + .cred = nfs4_get_clid_cred(clp), 704 + }; 705 + struct rpc_add_xprt_test rpcdata = { 706 + .add_xprt_test = clp->cl_mvops->session_trunk, 707 + .data = &xprtdata, 708 + }; 709 + 710 + /** 711 + * Test this address for session trunking and 712 + * add as an alias 713 + */ 714 + rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 715 + rpc_clnt_setup_test_and_add_xprt, 716 + &rpcdata); 717 + if (xprtdata.cred) 718 + put_rpccred(xprtdata.cred); 719 + } else { 720 + clp = nfs4_set_ds_client(mds_srv, 721 + (struct sockaddr *)&da->da_addr, 722 + da->da_addrlen, IPPROTO_TCP, 723 + timeo, retrans, minor_version, 724 + au_flavor); 725 + if (IS_ERR(clp)) 726 + continue; 727 + 728 + status = nfs4_init_ds_session(clp, 729 + mds_srv->nfs_client->cl_lease_time); 730 + if (status) { 731 + nfs_put_client(clp); 732 + clp = ERR_PTR(-EIO); 733 + continue; 734 + } 735 + 736 + } 700 737 } 701 738 702 739 if (IS_ERR(clp)) { ··· 741 704 goto out; 742 705 } 743 706 744 - status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time); 745 - if (status) 746 - goto out_put; 747 - 748 707 smp_wmb(); 749 708 ds->ds_clp = clp; 750 709 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); 751 710 out: 752 711 return status; 753 - out_put: 754 - nfs_put_client(clp); 755 - goto out; 756 712 } 757 713 758 714 /*
+10
fs/nfs/super.c
··· 2848 2848 * NFS client for backwards compatibility 2849 2849 */ 2850 2850 unsigned int nfs_callback_set_tcpport; 2851 + unsigned short nfs_callback_nr_threads; 2851 2852 /* Default cache timeout is 10 minutes */ 2852 2853 unsigned int nfs_idmap_cache_timeout = 600; 2853 2854 /* Turn off NFSv4 uid/gid mapping when using AUTH_SYS */ 2854 2855 bool nfs4_disable_idmapping = true; 2855 2856 unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE; 2857 + unsigned short max_session_cb_slots = NFS4_DEF_CB_SLOT_TABLE_SIZE; 2856 2858 unsigned short send_implementation_id = 1; 2857 2859 char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN] = ""; 2858 2860 bool recover_lost_locks = false; 2859 2861 2862 + EXPORT_SYMBOL_GPL(nfs_callback_nr_threads); 2860 2863 EXPORT_SYMBOL_GPL(nfs_callback_set_tcpport); 2861 2864 EXPORT_SYMBOL_GPL(nfs_idmap_cache_timeout); 2862 2865 EXPORT_SYMBOL_GPL(nfs4_disable_idmapping); 2863 2866 EXPORT_SYMBOL_GPL(max_session_slots); 2867 + EXPORT_SYMBOL_GPL(max_session_cb_slots); 2864 2868 EXPORT_SYMBOL_GPL(send_implementation_id); 2865 2869 EXPORT_SYMBOL_GPL(nfs4_client_id_uniquifier); 2866 2870 EXPORT_SYMBOL_GPL(recover_lost_locks); ··· 2891 2887 #define param_check_portnr(name, p) __param_check(name, p, unsigned int); 2892 2888 2893 2889 module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644); 2890 + module_param_named(callback_nr_threads, nfs_callback_nr_threads, ushort, 0644); 2891 + MODULE_PARM_DESC(callback_nr_threads, "Number of threads that will be " 2892 + "assigned to the NFSv4 callback channels."); 2894 2893 module_param(nfs_idmap_cache_timeout, int, 0644); 2895 2894 module_param(nfs4_disable_idmapping, bool, 0644); 2896 2895 module_param_string(nfs4_unique_id, nfs4_client_id_uniquifier, ··· 2903 2896 module_param(max_session_slots, ushort, 0644); 2904 2897 MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 " 2905 2898 "requests the client will negotiate"); 2899 + module_param(max_session_cb_slots, ushort, 0644); 2900 + MODULE_PARM_DESC(max_session_slots, "Maximum number of parallel NFSv4.1 " 2901 + "callbacks the client will process for a given server"); 2906 2902 module_param(send_implementation_id, ushort, 0644); 2907 2903 MODULE_PARM_DESC(send_implementation_id, 2908 2904 "Send implementation ID with NFSv4.1 exchange_id");
+1
include/linux/nfs4.h
··· 67 67 NFS4_DELEGATION_STATEID_TYPE, 68 68 NFS4_LAYOUT_STATEID_TYPE, 69 69 NFS4_PNFS_DS_STATEID_TYPE, 70 + NFS4_REVOKED_STATEID_TYPE, 70 71 } type; 71 72 }; 72 73
+3
include/linux/nfs_fs_sb.h
··· 103 103 #define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */ 104 104 #define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */ 105 105 #define NFS_SP4_MACH_CRED_PNFS_CLEANUP 7 /* LAYOUTRETURN */ 106 + #if IS_ENABLED(CONFIG_NFS_V4_1) 107 + wait_queue_head_t cl_lock_waitq; 108 + #endif /* CONFIG_NFS_V4_1 */ 106 109 #endif /* CONFIG_NFS_V4 */ 107 110 108 111 /* Our own IP address, as a null-terminated string.
+7 -1
include/linux/nfs_xdr.h
··· 125 125 | NFS_ATTR_FATTR_V4_SECURITY_LABEL) 126 126 127 127 /* 128 + * Maximal number of supported layout drivers. 129 + */ 130 + #define NFS_MAX_LAYOUT_TYPES 8 131 + 132 + /* 128 133 * Info on the file system 129 134 */ 130 135 struct nfs_fsinfo { ··· 144 139 __u64 maxfilesize; 145 140 struct timespec time_delta; /* server time granularity */ 146 141 __u32 lease_time; /* in seconds */ 147 - __u32 layouttype; /* supported pnfs layout driver */ 142 + __u32 nlayouttypes; /* number of layouttypes */ 143 + __u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */ 148 144 __u32 blksize; /* preferred pnfs io block size */ 149 145 __u32 clone_blksize; /* granularity of a CLONE operation */ 150 146 };
+1
include/linux/sunrpc/auth.h
··· 131 131 struct rpc_auth * (*create)(struct rpc_auth_create_args *, struct rpc_clnt *); 132 132 void (*destroy)(struct rpc_auth *); 133 133 134 + int (*hash_cred)(struct auth_cred *, unsigned int); 134 135 struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); 135 136 struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t); 136 137 int (*list_pseudoflavors)(rpc_authflavor_t *, int);
+17
include/linux/sunrpc/clnt.h
··· 125 125 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ 126 126 }; 127 127 128 + struct rpc_add_xprt_test { 129 + int (*add_xprt_test)(struct rpc_clnt *, 130 + struct rpc_xprt *, 131 + void *calldata); 132 + void *data; 133 + }; 134 + 128 135 /* Values for "flags" field */ 129 136 #define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) 130 137 #define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) ··· 205 198 void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, 206 199 unsigned long timeo); 207 200 201 + int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *, 202 + struct rpc_xprt_switch *, 203 + struct rpc_xprt *, 204 + void *); 205 + 208 206 const char *rpc_proc_name(const struct rpc_task *task); 207 + 208 + void rpc_clnt_xprt_switch_put(struct rpc_clnt *); 209 + void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); 210 + bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, 211 + const struct sockaddr *sap); 209 212 #endif /* __KERNEL__ */ 210 213 #endif /* _LINUX_SUNRPC_CLNT_H */
+4
include/linux/sunrpc/rpc_rdma.h
··· 46 46 #define RPCRDMA_VERSION 1 47 47 #define rpcrdma_version cpu_to_be32(RPCRDMA_VERSION) 48 48 49 + enum { 50 + RPCRDMA_V1_DEF_INLINE_SIZE = 1024, 51 + }; 52 + 49 53 struct rpcrdma_segment { 50 54 __be32 rs_handle; /* Registered memory handle */ 51 55 __be32 rs_length; /* Length of the chunk in bytes */
+2 -2
include/linux/sunrpc/sched.h
··· 239 239 void *); 240 240 void rpc_wake_up_status(struct rpc_wait_queue *, int); 241 241 void rpc_delay(struct rpc_task *, unsigned long); 242 - void * rpc_malloc(struct rpc_task *, size_t); 243 - void rpc_free(void *); 242 + int rpc_malloc(struct rpc_task *); 243 + void rpc_free(struct rpc_task *); 244 244 int rpciod_up(void); 245 245 void rpciod_down(void); 246 246 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
+12
include/linux/sunrpc/xdr.h
··· 67 67 len; /* Length of XDR encoded message */ 68 68 }; 69 69 70 + static inline void 71 + xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) 72 + { 73 + buf->head[0].iov_base = start; 74 + buf->head[0].iov_len = len; 75 + buf->tail[0].iov_len = 0; 76 + buf->page_len = 0; 77 + buf->flags = 0; 78 + buf->len = 0; 79 + buf->buflen = len; 80 + } 81 + 70 82 /* 71 83 * pre-xdr'ed macros. 72 84 */
+7 -5
include/linux/sunrpc/xprt.h
··· 83 83 void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ 84 84 struct list_head rq_list; 85 85 86 - __u32 * rq_buffer; /* XDR encode buffer */ 87 - size_t rq_callsize, 88 - rq_rcvsize; 86 + void *rq_xprtdata; /* Per-xprt private data */ 87 + void *rq_buffer; /* Call XDR encode buffer */ 88 + size_t rq_callsize; 89 + void *rq_rbuffer; /* Reply XDR decode buffer */ 90 + size_t rq_rcvsize; 89 91 size_t rq_xmit_bytes_sent; /* total bytes sent */ 90 92 size_t rq_reply_bytes_recvd; /* total reply bytes */ 91 93 /* received */ ··· 129 127 void (*rpcbind)(struct rpc_task *task); 130 128 void (*set_port)(struct rpc_xprt *xprt, unsigned short port); 131 129 void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); 132 - void * (*buf_alloc)(struct rpc_task *task, size_t size); 133 - void (*buf_free)(void *buffer); 130 + int (*buf_alloc)(struct rpc_task *task); 131 + void (*buf_free)(struct rpc_task *task); 134 132 int (*send_request)(struct rpc_task *task); 135 133 void (*set_retrans_timeout)(struct rpc_task *task); 136 134 void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
+2
include/linux/sunrpc/xprtmultipath.h
··· 66 66 extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi); 67 67 extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi); 68 68 69 + extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps, 70 + const struct sockaddr *sap); 69 71 #endif
+2 -2
include/linux/sunrpc/xprtrdma.h
··· 53 53 #define RPCRDMA_MAX_SLOT_TABLE (256U) 54 54 55 55 #define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */ 56 - #define RPCRDMA_DEF_INLINE (1024) /* default inline thresh */ 57 - #define RPCRDMA_MAX_INLINE (3068) /* max inline thresh */ 56 + #define RPCRDMA_DEF_INLINE (4096) /* default inline thresh */ 57 + #define RPCRDMA_MAX_INLINE (65536) /* max inline thresh */ 58 58 59 59 /* Memory registration strategies, by number. 60 60 * This is part of a kernel / user space API. Do not remove. */
+1 -1
net/sunrpc/auth.c
··· 551 551 *entry, *new; 552 552 unsigned int nr; 553 553 554 - nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits); 554 + nr = auth->au_ops->hash_cred(acred, cache->hashbits); 555 555 556 556 rcu_read_lock(); 557 557 hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) {
+9
net/sunrpc/auth_generic.c
··· 78 78 return auth->au_ops->lookup_cred(auth, acred, lookupflags); 79 79 } 80 80 81 + static int 82 + generic_hash_cred(struct auth_cred *acred, unsigned int hashbits) 83 + { 84 + return hash_64(from_kgid(&init_user_ns, acred->gid) | 85 + ((u64)from_kuid(&init_user_ns, acred->uid) << 86 + (sizeof(gid_t) * 8)), hashbits); 87 + } 88 + 81 89 /* 82 90 * Lookup generic creds for current process 83 91 */ ··· 266 258 static const struct rpc_authops generic_auth_ops = { 267 259 .owner = THIS_MODULE, 268 260 .au_name = "Generic", 261 + .hash_cred = generic_hash_cred, 269 262 .lookup_cred = generic_lookup_cred, 270 263 .crcreate = generic_create_cred, 271 264 .key_timeout = generic_key_timeout,
+7
net/sunrpc/auth_gss/auth_gss.c
··· 1298 1298 gss_destroy_nullcred(cred); 1299 1299 } 1300 1300 1301 + static int 1302 + gss_hash_cred(struct auth_cred *acred, unsigned int hashbits) 1303 + { 1304 + return hash_64(from_kuid(&init_user_ns, acred->uid), hashbits); 1305 + } 1306 + 1301 1307 /* 1302 1308 * Lookup RPCSEC_GSS cred for the current process 1303 1309 */ ··· 1988 1982 .au_name = "RPCSEC_GSS", 1989 1983 .create = gss_create, 1990 1984 .destroy = gss_destroy, 1985 + .hash_cred = gss_hash_cred, 1991 1986 .lookup_cred = gss_lookup_cred, 1992 1987 .crcreate = gss_create_cred, 1993 1988 .list_pseudoflavors = gss_mech_list_pseudoflavors,
+9
net/sunrpc/auth_unix.c
··· 46 46 rpcauth_clear_credcache(auth->au_credcache); 47 47 } 48 48 49 + static int 50 + unx_hash_cred(struct auth_cred *acred, unsigned int hashbits) 51 + { 52 + return hash_64(from_kgid(&init_user_ns, acred->gid) | 53 + ((u64)from_kuid(&init_user_ns, acred->uid) << 54 + (sizeof(gid_t) * 8)), hashbits); 55 + } 56 + 49 57 /* 50 58 * Lookup AUTH_UNIX creds for current process 51 59 */ ··· 228 220 .au_name = "UNIX", 229 221 .create = unx_create, 230 222 .destroy = unx_destroy, 223 + .hash_cred = unx_hash_cred, 231 224 .lookup_cred = unx_lookup_cred, 232 225 .crcreate = unx_create_cred, 233 226 };
+1 -7
net/sunrpc/backchannel_rqst.c
··· 76 76 page = alloc_page(gfp_flags); 77 77 if (page == NULL) 78 78 return -ENOMEM; 79 - buf->head[0].iov_base = page_address(page); 80 - buf->head[0].iov_len = PAGE_SIZE; 81 - buf->tail[0].iov_base = NULL; 82 - buf->tail[0].iov_len = 0; 83 - buf->page_len = 0; 84 - buf->len = 0; 85 - buf->buflen = PAGE_SIZE; 79 + xdr_buf_init(buf, page_address(page), PAGE_SIZE); 86 80 return 0; 87 81 } 88 82
+3 -2
net/sunrpc/cache.c
··· 353 353 spin_unlock(&cache_list_lock); 354 354 355 355 /* start the cleaning process */ 356 - schedule_delayed_work(&cache_cleaner, 0); 356 + queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0); 357 357 } 358 358 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail); 359 359 ··· 476 476 delay = 0; 477 477 478 478 if (delay) 479 - schedule_delayed_work(&cache_cleaner, delay); 479 + queue_delayed_work(system_power_efficient_wq, 480 + &cache_cleaner, delay); 480 481 } 481 482 482 483
+107 -25
net/sunrpc/clnt.c
··· 184 184 struct super_block *sb) 185 185 { 186 186 struct dentry *dentry; 187 - int err = 0; 188 187 189 188 switch (event) { 190 189 case RPC_PIPEFS_MOUNT: ··· 200 201 printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); 201 202 return -ENOTSUPP; 202 203 } 203 - return err; 204 + return 0; 204 205 } 205 206 206 207 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, ··· 987 988 { 988 989 989 990 if (clnt != NULL) { 990 - rpc_task_release_client(task); 991 991 if (task->tk_xprt == NULL) 992 992 task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi); 993 993 task->tk_client = clnt; ··· 1691 1693 struct rpc_rqst *req = task->tk_rqstp; 1692 1694 struct rpc_xprt *xprt = req->rq_xprt; 1693 1695 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1696 + int status; 1694 1697 1695 1698 dprint_status(task); 1696 1699 ··· 1717 1718 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; 1718 1719 req->rq_rcvsize <<= 2; 1719 1720 1720 - req->rq_buffer = xprt->ops->buf_alloc(task, 1721 - req->rq_callsize + req->rq_rcvsize); 1722 - if (req->rq_buffer != NULL) 1723 - return; 1721 + status = xprt->ops->buf_alloc(task); 1724 1722 xprt_inject_disconnect(xprt); 1723 + if (status == 0) 1724 + return; 1725 + if (status != -ENOMEM) { 1726 + rpc_exit(task, status); 1727 + return; 1728 + } 1725 1729 1726 1730 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); 1727 1731 ··· 1750 1748 task->tk_rqstp->rq_bytes_sent = 0; 1751 1749 } 1752 1750 1753 - static inline void 1754 - rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) 1755 - { 1756 - buf->head[0].iov_base = start; 1757 - buf->head[0].iov_len = len; 1758 - buf->tail[0].iov_len = 0; 1759 - buf->page_len = 0; 1760 - buf->flags = 0; 1761 - buf->len = 0; 1762 - buf->buflen = len; 1763 - } 1764 - 1765 1751 /* 1766 1752 * 3. Encode arguments of an RPC call 1767 1753 */ ··· 1762 1772 1763 1773 dprint_status(task); 1764 1774 1765 - rpc_xdr_buf_init(&req->rq_snd_buf, 1766 - req->rq_buffer, 1767 - req->rq_callsize); 1768 - rpc_xdr_buf_init(&req->rq_rcv_buf, 1769 - (char *)req->rq_buffer + req->rq_callsize, 1770 - req->rq_rcvsize); 1775 + xdr_buf_init(&req->rq_snd_buf, 1776 + req->rq_buffer, 1777 + req->rq_callsize); 1778 + xdr_buf_init(&req->rq_rcv_buf, 1779 + req->rq_rbuffer, 1780 + req->rq_rcvsize); 1771 1781 1772 1782 p = rpc_encode_header(task); 1773 1783 if (p == NULL) { ··· 2606 2616 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt); 2607 2617 2608 2618 /** 2619 + * rpc_clnt_setup_test_and_add_xprt() 2620 + * 2621 + * This is an rpc_clnt_add_xprt setup() function which returns 1 so: 2622 + * 1) caller of the test function must dereference the rpc_xprt_switch 2623 + * and the rpc_xprt. 2624 + * 2) test function must call rpc_xprt_switch_add_xprt, usually in 2625 + * the rpc_call_done routine. 2626 + * 2627 + * Upon success (return of 1), the test function adds the new 2628 + * transport to the rpc_clnt xprt switch 2629 + * 2630 + * @clnt: struct rpc_clnt to get the new transport 2631 + * @xps: the rpc_xprt_switch to hold the new transport 2632 + * @xprt: the rpc_xprt to test 2633 + * @data: a struct rpc_add_xprt_test pointer that holds the test function 2634 + * and test function call data 2635 + */ 2636 + int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt, 2637 + struct rpc_xprt_switch *xps, 2638 + struct rpc_xprt *xprt, 2639 + void *data) 2640 + { 2641 + struct rpc_cred *cred; 2642 + struct rpc_task *task; 2643 + struct rpc_add_xprt_test *xtest = (struct rpc_add_xprt_test *)data; 2644 + int status = -EADDRINUSE; 2645 + 2646 + xprt = xprt_get(xprt); 2647 + xprt_switch_get(xps); 2648 + 2649 + if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr)) 2650 + goto out_err; 2651 + 2652 + /* Test the connection */ 2653 + cred = authnull_ops.lookup_cred(NULL, NULL, 0); 2654 + task = rpc_call_null_helper(clnt, xprt, cred, 2655 + RPC_TASK_SOFT | RPC_TASK_SOFTCONN, 2656 + NULL, NULL); 2657 + put_rpccred(cred); 2658 + if (IS_ERR(task)) { 2659 + status = PTR_ERR(task); 2660 + goto out_err; 2661 + } 2662 + status = task->tk_status; 2663 + rpc_put_task(task); 2664 + 2665 + if (status < 0) 2666 + goto out_err; 2667 + 2668 + /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */ 2669 + xtest->add_xprt_test(clnt, xprt, xtest->data); 2670 + 2671 + /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */ 2672 + return 1; 2673 + out_err: 2674 + xprt_put(xprt); 2675 + xprt_switch_put(xps); 2676 + pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not added\n", 2677 + status, xprt->address_strings[RPC_DISPLAY_ADDR]); 2678 + return status; 2679 + } 2680 + EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt); 2681 + 2682 + /** 2609 2683 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt 2610 2684 * @clnt: pointer to struct rpc_clnt 2611 2685 * @xprtargs: pointer to struct xprt_create ··· 2750 2696 &timeo); 2751 2697 } 2752 2698 EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout); 2699 + 2700 + void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt) 2701 + { 2702 + xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); 2703 + } 2704 + EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put); 2705 + 2706 + void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) 2707 + { 2708 + rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), 2709 + xprt); 2710 + } 2711 + EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); 2712 + 2713 + bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, 2714 + const struct sockaddr *sap) 2715 + { 2716 + struct rpc_xprt_switch *xps; 2717 + bool ret; 2718 + 2719 + xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); 2720 + 2721 + rcu_read_lock(); 2722 + ret = rpc_xprt_switch_has_addr(xps, sap); 2723 + rcu_read_unlock(); 2724 + return ret; 2725 + } 2726 + EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr); 2753 2727 2754 2728 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 2755 2729 static void rpc_show_header(void)
+20 -15
net/sunrpc/sched.c
··· 849 849 } 850 850 851 851 /** 852 - * rpc_malloc - allocate an RPC buffer 853 - * @task: RPC task that will use this buffer 854 - * @size: requested byte size 852 + * rpc_malloc - allocate RPC buffer resources 853 + * @task: RPC task 854 + * 855 + * A single memory region is allocated, which is split between the 856 + * RPC call and RPC reply that this task is being used for. When 857 + * this RPC is retired, the memory is released by calling rpc_free. 855 858 * 856 859 * To prevent rpciod from hanging, this allocator never sleeps, 857 - * returning NULL and suppressing warning if the request cannot be serviced 858 - * immediately. 859 - * The caller can arrange to sleep in a way that is safe for rpciod. 860 + * returning -ENOMEM and suppressing warning if the request cannot 861 + * be serviced immediately. The caller can arrange to sleep in a 862 + * way that is safe for rpciod. 860 863 * 861 864 * Most requests are 'small' (under 2KiB) and can be serviced from a 862 865 * mempool, ensuring that NFS reads and writes can always proceed, ··· 868 865 * In order to avoid memory starvation triggering more writebacks of 869 866 * NFS requests, we avoid using GFP_KERNEL. 870 867 */ 871 - void *rpc_malloc(struct rpc_task *task, size_t size) 868 + int rpc_malloc(struct rpc_task *task) 872 869 { 870 + struct rpc_rqst *rqst = task->tk_rqstp; 871 + size_t size = rqst->rq_callsize + rqst->rq_rcvsize; 873 872 struct rpc_buffer *buf; 874 873 gfp_t gfp = GFP_NOIO | __GFP_NOWARN; 875 874 ··· 885 880 buf = kmalloc(size, gfp); 886 881 887 882 if (!buf) 888 - return NULL; 883 + return -ENOMEM; 889 884 890 885 buf->len = size; 891 886 dprintk("RPC: %5u allocated buffer of size %zu at %p\n", 892 887 task->tk_pid, size, buf); 893 - return &buf->data; 888 + rqst->rq_buffer = buf->data; 889 + rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; 890 + return 0; 894 891 } 895 892 EXPORT_SYMBOL_GPL(rpc_malloc); 896 893 897 894 /** 898 - * rpc_free - free buffer allocated via rpc_malloc 899 - * @buffer: buffer to free 895 + * rpc_free - free RPC buffer resources allocated via rpc_malloc 896 + * @task: RPC task 900 897 * 901 898 */ 902 - void rpc_free(void *buffer) 899 + void rpc_free(struct rpc_task *task) 903 900 { 901 + void *buffer = task->tk_rqstp->rq_buffer; 904 902 size_t size; 905 903 struct rpc_buffer *buf; 906 - 907 - if (!buffer) 908 - return; 909 904 910 905 buf = container_of(buffer, struct rpc_buffer, data); 911 906 size = buf->len;
+17
net/sunrpc/svc.c
··· 401 401 } 402 402 EXPORT_SYMBOL_GPL(svc_bind); 403 403 404 + #if defined(CONFIG_SUNRPC_BACKCHANNEL) 405 + static void 406 + __svc_init_bc(struct svc_serv *serv) 407 + { 408 + INIT_LIST_HEAD(&serv->sv_cb_list); 409 + spin_lock_init(&serv->sv_cb_lock); 410 + init_waitqueue_head(&serv->sv_cb_waitq); 411 + } 412 + #else 413 + static void 414 + __svc_init_bc(struct svc_serv *serv) 415 + { 416 + } 417 + #endif 418 + 404 419 /* 405 420 * Create an RPC service 406 421 */ ··· 457 442 INIT_LIST_HEAD(&serv->sv_permsocks); 458 443 init_timer(&serv->sv_temptimer); 459 444 spin_lock_init(&serv->sv_lock); 445 + 446 + __svc_init_bc(serv); 460 447 461 448 serv->sv_nrpools = npools; 462 449 serv->sv_pools =
+7 -4
net/sunrpc/xdr.c
··· 767 767 newbase -= xdr->buf->page_base; 768 768 769 769 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0) 770 - xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len); 770 + xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2); 771 771 } 772 772 773 773 static bool xdr_set_next_buffer(struct xdr_stream *xdr) ··· 776 776 xdr_set_next_page(xdr); 777 777 else if (xdr->iov == xdr->buf->head) { 778 778 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0) 779 - xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len); 779 + xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2); 780 780 } 781 781 return xdr->p != xdr->end; 782 782 } ··· 859 859 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) 860 860 { 861 861 __be32 *p; 862 - void *cpdest = xdr->scratch.iov_base; 862 + char *cpdest = xdr->scratch.iov_base; 863 863 size_t cplen = (char *)xdr->end - (char *)xdr->p; 864 864 865 865 if (nbytes > xdr->scratch.iov_len) 866 866 return NULL; 867 - memcpy(cpdest, xdr->p, cplen); 867 + p = __xdr_inline_decode(xdr, cplen); 868 + if (p == NULL) 869 + return NULL; 870 + memcpy(cpdest, p, cplen); 868 871 cpdest += cplen; 869 872 nbytes -= cplen; 870 873 if (!xdr_set_next_buffer(xdr))
+1 -1
net/sunrpc/xprt.c
··· 1295 1295 xprt_schedule_autodisconnect(xprt); 1296 1296 spin_unlock_bh(&xprt->transport_lock); 1297 1297 if (req->rq_buffer) 1298 - xprt->ops->buf_free(req->rq_buffer); 1298 + xprt->ops->buf_free(task); 1299 1299 xprt_inject_disconnect(xprt); 1300 1300 if (req->rq_cred != NULL) 1301 1301 put_rpccred(req->rq_cred);
+23 -1
net/sunrpc/xprtmultipath.c
··· 15 15 #include <asm/cmpxchg.h> 16 16 #include <linux/spinlock.h> 17 17 #include <linux/sunrpc/xprt.h> 18 + #include <linux/sunrpc/addr.h> 18 19 #include <linux/sunrpc/xprtmultipath.h> 19 20 20 21 typedef struct rpc_xprt *(*xprt_switch_find_xprt_t)(struct list_head *head, ··· 50 49 if (xprt == NULL) 51 50 return; 52 51 spin_lock(&xps->xps_lock); 53 - if (xps->xps_net == xprt->xprt_net || xps->xps_net == NULL) 52 + if ((xps->xps_net == xprt->xprt_net || xps->xps_net == NULL) && 53 + !rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr)) 54 54 xprt_switch_add_xprt_locked(xps, xprt); 55 55 spin_unlock(&xps->xps_lock); 56 56 } ··· 232 230 if (xpi->xpi_cursor == NULL || xps->xps_nxprts < 2) 233 231 return xprt_switch_find_first_entry(head); 234 232 return xprt_switch_find_current_entry(head, xpi->xpi_cursor); 233 + } 234 + 235 + bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps, 236 + const struct sockaddr *sap) 237 + { 238 + struct list_head *head; 239 + struct rpc_xprt *pos; 240 + 241 + if (xps == NULL || sap == NULL) 242 + return false; 243 + 244 + head = &xps->xps_xprt_list; 245 + list_for_each_entry_rcu(pos, head, xprt_switch) { 246 + if (rpc_cmp_addr_port(sap, (struct sockaddr *)&pos->addr)) { 247 + pr_info("RPC: addr %s already in xprt switch\n", 248 + pos->address_strings[RPC_DISPLAY_ADDR]); 249 + return true; 250 + } 251 + } 252 + return false; 235 253 } 236 254 237 255 static
+11 -42
net/sunrpc/xprtrdma/backchannel.c
··· 27 27 list_del(&req->rl_all); 28 28 spin_unlock(&buf->rb_reqslock); 29 29 30 - rpcrdma_destroy_req(&r_xprt->rx_ia, req); 30 + rpcrdma_destroy_req(req); 31 31 32 32 kfree(rqst); 33 33 } ··· 35 35 static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt, 36 36 struct rpc_rqst *rqst) 37 37 { 38 - struct rpcrdma_ia *ia = &r_xprt->rx_ia; 39 38 struct rpcrdma_regbuf *rb; 40 39 struct rpcrdma_req *req; 41 - struct xdr_buf *buf; 42 40 size_t size; 43 41 44 42 req = rpcrdma_create_req(r_xprt); ··· 44 46 return PTR_ERR(req); 45 47 req->rl_backchannel = true; 46 48 47 - size = RPCRDMA_INLINE_WRITE_THRESHOLD(rqst); 48 - rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL); 49 + rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE, 50 + DMA_TO_DEVICE, GFP_KERNEL); 49 51 if (IS_ERR(rb)) 50 52 goto out_fail; 51 53 req->rl_rdmabuf = rb; 52 54 53 - size += RPCRDMA_INLINE_READ_THRESHOLD(rqst); 54 - rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL); 55 + size = r_xprt->rx_data.inline_rsize; 56 + rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL); 55 57 if (IS_ERR(rb)) 56 58 goto out_fail; 57 - rb->rg_owner = req; 58 59 req->rl_sendbuf = rb; 59 - /* so that rpcr_to_rdmar works when receiving a request */ 60 - rqst->rq_buffer = (void *)req->rl_sendbuf->rg_base; 61 - 62 - buf = &rqst->rq_snd_buf; 63 - buf->head[0].iov_base = rqst->rq_buffer; 64 - buf->head[0].iov_len = 0; 65 - buf->tail[0].iov_base = NULL; 66 - buf->tail[0].iov_len = 0; 67 - buf->page_len = 0; 68 - buf->len = 0; 69 - buf->buflen = size; 70 - 60 + xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, size); 61 + rpcrdma_set_xprtdata(rqst, req); 71 62 return 0; 72 63 73 64 out_fail: ··· 206 219 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 207 220 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 208 221 struct rpcrdma_msg *headerp; 209 - size_t rpclen; 210 222 211 223 headerp = rdmab_to_msg(req->rl_rdmabuf); 212 224 headerp->rm_xid = rqst->rq_xid; ··· 217 231 headerp->rm_body.rm_chunks[1] = xdr_zero; 218 232 headerp->rm_body.rm_chunks[2] = xdr_zero; 219 233 220 - rpclen = rqst->rq_svec[0].iov_len; 221 - 222 - #ifdef RPCRDMA_BACKCHANNEL_DEBUG 223 - pr_info("RPC: %s: rpclen %zd headerp 0x%p lkey 0x%x\n", 224 - __func__, rpclen, headerp, rdmab_lkey(req->rl_rdmabuf)); 225 - pr_info("RPC: %s: RPC/RDMA: %*ph\n", 226 - __func__, (int)RPCRDMA_HDRLEN_MIN, headerp); 227 - pr_info("RPC: %s: RPC: %*ph\n", 228 - __func__, (int)rpclen, rqst->rq_svec[0].iov_base); 229 - #endif 230 - 231 - req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf); 232 - req->rl_send_iov[0].length = RPCRDMA_HDRLEN_MIN; 233 - req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); 234 - 235 - req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf); 236 - req->rl_send_iov[1].length = rpclen; 237 - req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); 238 - 239 - req->rl_niovs = 2; 234 + if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, RPCRDMA_HDRLEN_MIN, 235 + &rqst->rq_snd_buf, rpcrdma_noch)) 236 + return -EIO; 240 237 return 0; 241 238 } 242 239 ··· 371 402 out_short: 372 403 pr_warn("RPC/RDMA short backward direction call\n"); 373 404 374 - if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep)) 405 + if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep)) 375 406 xprt_disconnect_done(xprt); 376 407 else 377 408 pr_warn("RPC: %s: reposting rep %p\n",
+4 -3
net/sunrpc/xprtrdma/fmr_ops.c
··· 160 160 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 161 161 struct rpcrdma_create_data_internal *cdata) 162 162 { 163 - rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1, 164 - RPCRDMA_MAX_DATA_SEGS / 165 - RPCRDMA_MAX_FMR_SGES)); 163 + ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS / 164 + RPCRDMA_MAX_FMR_SGES); 166 165 return 0; 167 166 } 168 167 ··· 273 274 */ 274 275 list_for_each_entry(mw, &req->rl_registered, mw_list) 275 276 list_add_tail(&mw->fmr.fm_mr->list, &unmap_list); 277 + r_xprt->rx_stats.local_inv_needed++; 276 278 rc = ib_unmap_fmr(&unmap_list); 277 279 if (rc) 278 280 goto out_reset; ··· 331 331 .ro_init_mr = fmr_op_init_mr, 332 332 .ro_release_mr = fmr_op_release_mr, 333 333 .ro_displayname = "fmr", 334 + .ro_send_w_inv_ok = 0, 334 335 };
+22 -6
net/sunrpc/xprtrdma/frwr_ops.c
··· 67 67 * pending send queue WRs before the transport is reconnected. 68 68 */ 69 69 70 + #include <linux/sunrpc/rpc_rdma.h> 71 + 70 72 #include "xprt_rdma.h" 71 73 72 74 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) ··· 163 161 return PTR_ERR(f->fr_mr); 164 162 } 165 163 166 - dprintk("RPC: %s: recovered FRMR %p\n", __func__, r); 164 + dprintk("RPC: %s: recovered FRMR %p\n", __func__, f); 167 165 f->fr_state = FRMR_IS_INVALID; 168 166 return 0; 169 167 } ··· 244 242 depth; 245 243 } 246 244 247 - rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1, 248 - RPCRDMA_MAX_DATA_SEGS / 249 - ia->ri_max_frmr_depth)); 245 + ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS / 246 + ia->ri_max_frmr_depth); 250 247 return 0; 251 248 } 252 249 ··· 330 329 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 331 330 if (wc->status != IB_WC_SUCCESS) 332 331 __frwr_sendcompletion_flush(wc, frmr, "localinv"); 333 - complete_all(&frmr->fr_linv_done); 332 + complete(&frmr->fr_linv_done); 334 333 } 335 334 336 335 /* Post a REG_MR Work Request to register a memory region ··· 397 396 goto out_mapmr_err; 398 397 399 398 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n", 400 - __func__, mw, mw->mw_nents, mr->length); 399 + __func__, frmr, mw->mw_nents, mr->length); 401 400 402 401 key = (u8)(mr->rkey & 0x000000FF); 403 402 ib_update_fast_reg_key(mr, ++key); ··· 450 449 struct rpcrdma_frmr *f = &mw->frmr; 451 450 struct ib_send_wr *invalidate_wr; 452 451 452 + dprintk("RPC: %s: invalidating frmr %p\n", __func__, f); 453 + 453 454 f->fr_state = FRMR_IS_INVALID; 454 455 invalidate_wr = &f->fr_invwr; 455 456 ··· 475 472 frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) 476 473 { 477 474 struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr; 475 + struct rpcrdma_rep *rep = req->rl_reply; 478 476 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 479 477 struct rpcrdma_mw *mw, *tmp; 480 478 struct rpcrdma_frmr *f; ··· 491 487 f = NULL; 492 488 invalidate_wrs = pos = prev = NULL; 493 489 list_for_each_entry(mw, &req->rl_registered, mw_list) { 490 + if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) && 491 + (mw->mw_handle == rep->rr_inv_rkey)) { 492 + mw->frmr.fr_state = FRMR_IS_INVALID; 493 + continue; 494 + } 495 + 494 496 pos = __frwr_prepare_linv_wr(mw); 495 497 496 498 if (!invalidate_wrs) ··· 506 496 prev = pos; 507 497 f = &mw->frmr; 508 498 } 499 + if (!f) 500 + goto unmap; 509 501 510 502 /* Strong send queue ordering guarantees that when the 511 503 * last WR in the chain completes, all WRs in the chain ··· 522 510 * replaces the QP. The RPC reply handler won't call us 523 511 * unless ri_id->qp is a valid pointer. 524 512 */ 513 + r_xprt->rx_stats.local_inv_needed++; 525 514 rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr); 526 515 if (rc) 527 516 goto reset_mrs; ··· 534 521 */ 535 522 unmap: 536 523 list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) { 524 + dprintk("RPC: %s: unmapping frmr %p\n", 525 + __func__, &mw->frmr); 537 526 list_del_init(&mw->mw_list); 538 527 ib_dma_unmap_sg(ia->ri_device, 539 528 mw->mw_sg, mw->mw_nents, mw->mw_dir); ··· 591 576 .ro_init_mr = frwr_op_init_mr, 592 577 .ro_release_mr = frwr_op_release_mr, 593 578 .ro_displayname = "frwr", 579 + .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK, 594 580 };
+193 -130
net/sunrpc/xprtrdma/rpc_rdma.c
··· 53 53 # define RPCDBG_FACILITY RPCDBG_TRANS 54 54 #endif 55 55 56 - enum rpcrdma_chunktype { 57 - rpcrdma_noch = 0, 58 - rpcrdma_readch, 59 - rpcrdma_areadch, 60 - rpcrdma_writech, 61 - rpcrdma_replych 62 - }; 63 - 64 56 static const char transfertypes[][12] = { 65 57 "inline", /* no chunks */ 66 58 "read list", /* some argument via rdma read */ ··· 110 118 return size; 111 119 } 112 120 113 - void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *ia, 114 - struct rpcrdma_create_data_internal *cdata, 115 - unsigned int maxsegs) 121 + void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt) 116 122 { 123 + struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; 124 + struct rpcrdma_ia *ia = &r_xprt->rx_ia; 125 + unsigned int maxsegs = ia->ri_max_segs; 126 + 117 127 ia->ri_max_inline_write = cdata->inline_wsize - 118 128 rpcrdma_max_call_header_size(maxsegs); 119 129 ia->ri_max_inline_read = cdata->inline_rsize - ··· 147 153 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 148 154 149 155 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read; 150 - } 151 - 152 - static int 153 - rpcrdma_tail_pullup(struct xdr_buf *buf) 154 - { 155 - size_t tlen = buf->tail[0].iov_len; 156 - size_t skip = tlen & 3; 157 - 158 - /* Do not include the tail if it is only an XDR pad */ 159 - if (tlen < 4) 160 - return 0; 161 - 162 - /* xdr_write_pages() adds a pad at the beginning of the tail 163 - * if the content in "buf->pages" is unaligned. Force the 164 - * tail's actual content to land at the next XDR position 165 - * after the head instead. 166 - */ 167 - if (skip) { 168 - unsigned char *src, *dst; 169 - unsigned int count; 170 - 171 - src = buf->tail[0].iov_base; 172 - dst = buf->head[0].iov_base; 173 - dst += buf->head[0].iov_len; 174 - 175 - src += skip; 176 - tlen -= skip; 177 - 178 - dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n", 179 - __func__, skip, dst, src, tlen); 180 - 181 - for (count = tlen; count; count--) 182 - *dst++ = *src++; 183 - } 184 - 185 - return tlen; 186 156 } 187 157 188 158 /* Split "vec" on page boundaries into segments. FMR registers pages, ··· 187 229 188 230 static int 189 231 rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, 190 - enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg) 232 + enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, 233 + bool reminv_expected) 191 234 { 192 235 int len, n, p, page_base; 193 236 struct page **ppages; ··· 228 269 229 270 /* When encoding the read list, the tail is always sent inline */ 230 271 if (type == rpcrdma_readch) 272 + return n; 273 + 274 + /* When encoding the Write list, some servers need to see an extra 275 + * segment for odd-length Write chunks. The upper layer provides 276 + * space in the tail iovec for this purpose. 277 + */ 278 + if (type == rpcrdma_writech && reminv_expected) 231 279 return n; 232 280 233 281 if (xdrbuf->tail[0].iov_len) { ··· 293 327 if (rtype == rpcrdma_areadch) 294 328 pos = 0; 295 329 seg = req->rl_segments; 296 - nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg); 330 + nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg, false); 297 331 if (nsegs < 0) 298 332 return ERR_PTR(nsegs); 299 333 ··· 357 391 seg = req->rl_segments; 358 392 nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 359 393 rqst->rq_rcv_buf.head[0].iov_len, 360 - wtype, seg); 394 + wtype, seg, 395 + r_xprt->rx_ia.ri_reminv_expected); 361 396 if (nsegs < 0) 362 397 return ERR_PTR(nsegs); 363 398 ··· 423 456 } 424 457 425 458 seg = req->rl_segments; 426 - nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg); 459 + nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg, 460 + r_xprt->rx_ia.ri_reminv_expected); 427 461 if (nsegs < 0) 428 462 return ERR_PTR(nsegs); 429 463 ··· 459 491 return iptr; 460 492 } 461 493 462 - /* 463 - * Copy write data inline. 464 - * This function is used for "small" requests. Data which is passed 465 - * to RPC via iovecs (or page list) is copied directly into the 466 - * pre-registered memory buffer for this request. For small amounts 467 - * of data, this is efficient. The cutoff value is tunable. 494 + /* Prepare the RPC-over-RDMA header SGE. 468 495 */ 469 - static void rpcrdma_inline_pullup(struct rpc_rqst *rqst) 496 + static bool 497 + rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req, 498 + u32 len) 470 499 { 471 - int i, npages, curlen; 472 - int copy_len; 473 - unsigned char *srcp, *destp; 474 - struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 475 - int page_base; 476 - struct page **ppages; 500 + struct rpcrdma_regbuf *rb = req->rl_rdmabuf; 501 + struct ib_sge *sge = &req->rl_send_sge[0]; 477 502 478 - destp = rqst->rq_svec[0].iov_base; 479 - curlen = rqst->rq_svec[0].iov_len; 480 - destp += curlen; 503 + if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) { 504 + if (!__rpcrdma_dma_map_regbuf(ia, rb)) 505 + return false; 506 + sge->addr = rdmab_addr(rb); 507 + sge->lkey = rdmab_lkey(rb); 508 + } 509 + sge->length = len; 481 510 482 - dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n", 483 - __func__, destp, rqst->rq_slen, curlen); 511 + ib_dma_sync_single_for_device(ia->ri_device, sge->addr, 512 + sge->length, DMA_TO_DEVICE); 513 + req->rl_send_wr.num_sge++; 514 + return true; 515 + } 484 516 485 - copy_len = rqst->rq_snd_buf.page_len; 517 + /* Prepare the Send SGEs. The head and tail iovec, and each entry 518 + * in the page list, gets its own SGE. 519 + */ 520 + static bool 521 + rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, 522 + struct xdr_buf *xdr, enum rpcrdma_chunktype rtype) 523 + { 524 + unsigned int sge_no, page_base, len, remaining; 525 + struct rpcrdma_regbuf *rb = req->rl_sendbuf; 526 + struct ib_device *device = ia->ri_device; 527 + struct ib_sge *sge = req->rl_send_sge; 528 + u32 lkey = ia->ri_pd->local_dma_lkey; 529 + struct page *page, **ppages; 486 530 487 - if (rqst->rq_snd_buf.tail[0].iov_len) { 488 - curlen = rqst->rq_snd_buf.tail[0].iov_len; 489 - if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) { 490 - memmove(destp + copy_len, 491 - rqst->rq_snd_buf.tail[0].iov_base, curlen); 492 - r_xprt->rx_stats.pullup_copy_count += curlen; 531 + /* The head iovec is straightforward, as it is already 532 + * DMA-mapped. Sync the content that has changed. 533 + */ 534 + if (!rpcrdma_dma_map_regbuf(ia, rb)) 535 + return false; 536 + sge_no = 1; 537 + sge[sge_no].addr = rdmab_addr(rb); 538 + sge[sge_no].length = xdr->head[0].iov_len; 539 + sge[sge_no].lkey = rdmab_lkey(rb); 540 + ib_dma_sync_single_for_device(device, sge[sge_no].addr, 541 + sge[sge_no].length, DMA_TO_DEVICE); 542 + 543 + /* If there is a Read chunk, the page list is being handled 544 + * via explicit RDMA, and thus is skipped here. However, the 545 + * tail iovec may include an XDR pad for the page list, as 546 + * well as additional content, and may not reside in the 547 + * same page as the head iovec. 548 + */ 549 + if (rtype == rpcrdma_readch) { 550 + len = xdr->tail[0].iov_len; 551 + 552 + /* Do not include the tail if it is only an XDR pad */ 553 + if (len < 4) 554 + goto out; 555 + 556 + page = virt_to_page(xdr->tail[0].iov_base); 557 + page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK; 558 + 559 + /* If the content in the page list is an odd length, 560 + * xdr_write_pages() has added a pad at the beginning 561 + * of the tail iovec. Force the tail's non-pad content 562 + * to land at the next XDR position in the Send message. 563 + */ 564 + page_base += len & 3; 565 + len -= len & 3; 566 + goto map_tail; 567 + } 568 + 569 + /* If there is a page list present, temporarily DMA map 570 + * and prepare an SGE for each page to be sent. 571 + */ 572 + if (xdr->page_len) { 573 + ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 574 + page_base = xdr->page_base & ~PAGE_MASK; 575 + remaining = xdr->page_len; 576 + while (remaining) { 577 + sge_no++; 578 + if (sge_no > RPCRDMA_MAX_SEND_SGES - 2) 579 + goto out_mapping_overflow; 580 + 581 + len = min_t(u32, PAGE_SIZE - page_base, remaining); 582 + sge[sge_no].addr = ib_dma_map_page(device, *ppages, 583 + page_base, len, 584 + DMA_TO_DEVICE); 585 + if (ib_dma_mapping_error(device, sge[sge_no].addr)) 586 + goto out_mapping_err; 587 + sge[sge_no].length = len; 588 + sge[sge_no].lkey = lkey; 589 + 590 + req->rl_mapped_sges++; 591 + ppages++; 592 + remaining -= len; 593 + page_base = 0; 493 594 } 494 - dprintk("RPC: %s: tail destp 0x%p len %d\n", 495 - __func__, destp + copy_len, curlen); 496 - rqst->rq_svec[0].iov_len += curlen; 497 595 } 498 - r_xprt->rx_stats.pullup_copy_count += copy_len; 499 596 500 - page_base = rqst->rq_snd_buf.page_base; 501 - ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT); 502 - page_base &= ~PAGE_MASK; 503 - npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT; 504 - for (i = 0; copy_len && i < npages; i++) { 505 - curlen = PAGE_SIZE - page_base; 506 - if (curlen > copy_len) 507 - curlen = copy_len; 508 - dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n", 509 - __func__, i, destp, copy_len, curlen); 510 - srcp = kmap_atomic(ppages[i]); 511 - memcpy(destp, srcp+page_base, curlen); 512 - kunmap_atomic(srcp); 513 - rqst->rq_svec[0].iov_len += curlen; 514 - destp += curlen; 515 - copy_len -= curlen; 516 - page_base = 0; 597 + /* The tail iovec is not always constructed in the same 598 + * page where the head iovec resides (see, for example, 599 + * gss_wrap_req_priv). To neatly accommodate that case, 600 + * DMA map it separately. 601 + */ 602 + if (xdr->tail[0].iov_len) { 603 + page = virt_to_page(xdr->tail[0].iov_base); 604 + page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK; 605 + len = xdr->tail[0].iov_len; 606 + 607 + map_tail: 608 + sge_no++; 609 + sge[sge_no].addr = ib_dma_map_page(device, page, 610 + page_base, len, 611 + DMA_TO_DEVICE); 612 + if (ib_dma_mapping_error(device, sge[sge_no].addr)) 613 + goto out_mapping_err; 614 + sge[sge_no].length = len; 615 + sge[sge_no].lkey = lkey; 616 + req->rl_mapped_sges++; 517 617 } 518 - /* header now contains entire send message */ 618 + 619 + out: 620 + req->rl_send_wr.num_sge = sge_no + 1; 621 + return true; 622 + 623 + out_mapping_overflow: 624 + pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no); 625 + return false; 626 + 627 + out_mapping_err: 628 + pr_err("rpcrdma: Send mapping error\n"); 629 + return false; 630 + } 631 + 632 + bool 633 + rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, 634 + u32 hdrlen, struct xdr_buf *xdr, 635 + enum rpcrdma_chunktype rtype) 636 + { 637 + req->rl_send_wr.num_sge = 0; 638 + req->rl_mapped_sges = 0; 639 + 640 + if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen)) 641 + goto out_map; 642 + 643 + if (rtype != rpcrdma_areadch) 644 + if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype)) 645 + goto out_map; 646 + 647 + return true; 648 + 649 + out_map: 650 + pr_err("rpcrdma: failed to DMA map a Send buffer\n"); 651 + return false; 652 + } 653 + 654 + void 655 + rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req) 656 + { 657 + struct ib_device *device = ia->ri_device; 658 + struct ib_sge *sge; 659 + int count; 660 + 661 + sge = &req->rl_send_sge[2]; 662 + for (count = req->rl_mapped_sges; count--; sge++) 663 + ib_dma_unmap_page(device, sge->addr, sge->length, 664 + DMA_TO_DEVICE); 665 + req->rl_mapped_sges = 0; 519 666 } 520 667 521 668 /* 522 669 * Marshal a request: the primary job of this routine is to choose 523 670 * the transfer modes. See comments below. 524 - * 525 - * Prepares up to two IOVs per Call message: 526 - * 527 - * [0] -- RPC RDMA header 528 - * [1] -- the RPC header/data 529 671 * 530 672 * Returns zero on success, otherwise a negative errno. 531 673 */ ··· 704 626 */ 705 627 if (rpcrdma_args_inline(r_xprt, rqst)) { 706 628 rtype = rpcrdma_noch; 707 - rpcrdma_inline_pullup(rqst); 708 - rpclen = rqst->rq_svec[0].iov_len; 629 + rpclen = rqst->rq_snd_buf.len; 709 630 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) { 710 631 rtype = rpcrdma_readch; 711 - rpclen = rqst->rq_svec[0].iov_len; 712 - rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf); 632 + rpclen = rqst->rq_snd_buf.head[0].iov_len + 633 + rqst->rq_snd_buf.tail[0].iov_len; 713 634 } else { 714 635 r_xprt->rx_stats.nomsg_call_count++; 715 636 headerp->rm_type = htonl(RDMA_NOMSG); ··· 750 673 goto out_unmap; 751 674 hdrlen = (unsigned char *)iptr - (unsigned char *)headerp; 752 675 753 - if (hdrlen + rpclen > RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) 754 - goto out_overflow; 755 - 756 676 dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n", 757 677 rqst->rq_task->tk_pid, __func__, 758 678 transfertypes[rtype], transfertypes[wtype], 759 679 hdrlen, rpclen); 760 680 761 - req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf); 762 - req->rl_send_iov[0].length = hdrlen; 763 - req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); 764 - 765 - req->rl_niovs = 1; 766 - if (rtype == rpcrdma_areadch) 767 - return 0; 768 - 769 - req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf); 770 - req->rl_send_iov[1].length = rpclen; 771 - req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); 772 - 773 - req->rl_niovs = 2; 681 + if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen, 682 + &rqst->rq_snd_buf, rtype)) { 683 + iptr = ERR_PTR(-EIO); 684 + goto out_unmap; 685 + } 774 686 return 0; 775 - 776 - out_overflow: 777 - pr_err("rpcrdma: send overflow: hdrlen %zd rpclen %zu %s/%s\n", 778 - hdrlen, rpclen, transfertypes[rtype], transfertypes[wtype]); 779 - iptr = ERR_PTR(-EIO); 780 687 781 688 out_unmap: 782 689 r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false); ··· 977 916 * allowed to timeout, to discover the errors at that time. 978 917 */ 979 918 void 980 - rpcrdma_reply_handler(struct rpcrdma_rep *rep) 919 + rpcrdma_reply_handler(struct work_struct *work) 981 920 { 921 + struct rpcrdma_rep *rep = 922 + container_of(work, struct rpcrdma_rep, rr_work); 982 923 struct rpcrdma_msg *headerp; 983 924 struct rpcrdma_req *req; 984 925 struct rpc_rqst *rqst; ··· 1195 1132 1196 1133 repost: 1197 1134 r_xprt->rx_stats.bad_reply_count++; 1198 - if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep)) 1135 + if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep)) 1199 1136 rpcrdma_recv_buffer_put(rep); 1200 1137 }
+10 -9
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
··· 159 159 /* Server-side transport endpoint wants a whole page for its send 160 160 * buffer. The client RPC code constructs the RPC header in this 161 161 * buffer before it invokes ->send_request. 162 - * 163 - * Returns NULL if there was a temporary allocation failure. 164 162 */ 165 - static void * 166 - xprt_rdma_bc_allocate(struct rpc_task *task, size_t size) 163 + static int 164 + xprt_rdma_bc_allocate(struct rpc_task *task) 167 165 { 168 166 struct rpc_rqst *rqst = task->tk_rqstp; 169 167 struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt; 168 + size_t size = rqst->rq_callsize; 170 169 struct svcxprt_rdma *rdma; 171 170 struct page *page; 172 171 173 172 rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt); 174 173 175 - /* Prevent an infinite loop: try to make this case work */ 176 - if (size > PAGE_SIZE) 174 + if (size > PAGE_SIZE) { 177 175 WARN_ONCE(1, "svcrdma: large bc buffer request (size %zu)\n", 178 176 size); 177 + return -EINVAL; 178 + } 179 179 180 180 page = alloc_page(RPCRDMA_DEF_GFP); 181 181 if (!page) 182 - return NULL; 182 + return -ENOMEM; 183 183 184 - return page_address(page); 184 + rqst->rq_buffer = page_address(page); 185 + return 0; 185 186 } 186 187 187 188 static void 188 - xprt_rdma_bc_free(void *buffer) 189 + xprt_rdma_bc_free(struct rpc_task *task) 189 190 { 190 191 /* No-op: ctxt and page have already been freed. */ 191 192 }
+122 -84
net/sunrpc/xprtrdma/transport.c
··· 97 97 .data = &xprt_rdma_max_inline_read, 98 98 .maxlen = sizeof(unsigned int), 99 99 .mode = 0644, 100 - .proc_handler = proc_dointvec, 100 + .proc_handler = proc_dointvec_minmax, 101 101 .extra1 = &min_inline_size, 102 102 .extra2 = &max_inline_size, 103 103 }, ··· 106 106 .data = &xprt_rdma_max_inline_write, 107 107 .maxlen = sizeof(unsigned int), 108 108 .mode = 0644, 109 - .proc_handler = proc_dointvec, 109 + .proc_handler = proc_dointvec_minmax, 110 110 .extra1 = &min_inline_size, 111 111 .extra2 = &max_inline_size, 112 112 }, ··· 477 477 } 478 478 } 479 479 480 - /* 481 - * The RDMA allocate/free functions need the task structure as a place 482 - * to hide the struct rpcrdma_req, which is necessary for the actual send/recv 483 - * sequence. 484 - * 485 - * The RPC layer allocates both send and receive buffers in the same call 486 - * (rq_send_buf and rq_rcv_buf are both part of a single contiguous buffer). 487 - * We may register rq_rcv_buf when using reply chunks. 480 + /* Allocate a fixed-size buffer in which to construct and send the 481 + * RPC-over-RDMA header for this request. 488 482 */ 489 - static void * 490 - xprt_rdma_allocate(struct rpc_task *task, size_t size) 483 + static bool 484 + rpcrdma_get_rdmabuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, 485 + gfp_t flags) 491 486 { 492 - struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 493 - struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 487 + size_t size = RPCRDMA_HDRBUF_SIZE; 494 488 struct rpcrdma_regbuf *rb; 489 + 490 + if (req->rl_rdmabuf) 491 + return true; 492 + 493 + rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags); 494 + if (IS_ERR(rb)) 495 + return false; 496 + 497 + r_xprt->rx_stats.hardway_register_count += size; 498 + req->rl_rdmabuf = rb; 499 + return true; 500 + } 501 + 502 + static bool 503 + rpcrdma_get_sendbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, 504 + size_t size, gfp_t flags) 505 + { 506 + struct rpcrdma_regbuf *rb; 507 + 508 + if (req->rl_sendbuf && rdmab_length(req->rl_sendbuf) >= size) 509 + return true; 510 + 511 + rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags); 512 + if (IS_ERR(rb)) 513 + return false; 514 + 515 + rpcrdma_free_regbuf(req->rl_sendbuf); 516 + r_xprt->rx_stats.hardway_register_count += size; 517 + req->rl_sendbuf = rb; 518 + return true; 519 + } 520 + 521 + /* The rq_rcv_buf is used only if a Reply chunk is necessary. 522 + * The decision to use a Reply chunk is made later in 523 + * rpcrdma_marshal_req. This buffer is registered at that time. 524 + * 525 + * Otherwise, the associated RPC Reply arrives in a separate 526 + * Receive buffer, arbitrarily chosen by the HCA. The buffer 527 + * allocated here for the RPC Reply is not utilized in that 528 + * case. See rpcrdma_inline_fixup. 529 + * 530 + * A regbuf is used here to remember the buffer size. 531 + */ 532 + static bool 533 + rpcrdma_get_recvbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, 534 + size_t size, gfp_t flags) 535 + { 536 + struct rpcrdma_regbuf *rb; 537 + 538 + if (req->rl_recvbuf && rdmab_length(req->rl_recvbuf) >= size) 539 + return true; 540 + 541 + rb = rpcrdma_alloc_regbuf(size, DMA_NONE, flags); 542 + if (IS_ERR(rb)) 543 + return false; 544 + 545 + rpcrdma_free_regbuf(req->rl_recvbuf); 546 + r_xprt->rx_stats.hardway_register_count += size; 547 + req->rl_recvbuf = rb; 548 + return true; 549 + } 550 + 551 + /** 552 + * xprt_rdma_allocate - allocate transport resources for an RPC 553 + * @task: RPC task 554 + * 555 + * Return values: 556 + * 0: Success; rq_buffer points to RPC buffer to use 557 + * ENOMEM: Out of memory, call again later 558 + * EIO: A permanent error occurred, do not retry 559 + * 560 + * The RDMA allocate/free functions need the task structure as a place 561 + * to hide the struct rpcrdma_req, which is necessary for the actual 562 + * send/recv sequence. 563 + * 564 + * xprt_rdma_allocate provides buffers that are already mapped for 565 + * DMA, and a local DMA lkey is provided for each. 566 + */ 567 + static int 568 + xprt_rdma_allocate(struct rpc_task *task) 569 + { 570 + struct rpc_rqst *rqst = task->tk_rqstp; 571 + struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 495 572 struct rpcrdma_req *req; 496 - size_t min_size; 497 573 gfp_t flags; 498 574 499 575 req = rpcrdma_buffer_get(&r_xprt->rx_buf); 500 576 if (req == NULL) 501 - return NULL; 577 + return -ENOMEM; 502 578 503 579 flags = RPCRDMA_DEF_GFP; 504 580 if (RPC_IS_SWAPPER(task)) 505 581 flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; 506 582 507 - if (req->rl_rdmabuf == NULL) 508 - goto out_rdmabuf; 509 - if (req->rl_sendbuf == NULL) 510 - goto out_sendbuf; 511 - if (size > req->rl_sendbuf->rg_size) 512 - goto out_sendbuf; 583 + if (!rpcrdma_get_rdmabuf(r_xprt, req, flags)) 584 + goto out_fail; 585 + if (!rpcrdma_get_sendbuf(r_xprt, req, rqst->rq_callsize, flags)) 586 + goto out_fail; 587 + if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags)) 588 + goto out_fail; 513 589 514 - out: 515 - dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req); 590 + dprintk("RPC: %5u %s: send size = %zd, recv size = %zd, req = %p\n", 591 + task->tk_pid, __func__, rqst->rq_callsize, 592 + rqst->rq_rcvsize, req); 593 + 516 594 req->rl_connect_cookie = 0; /* our reserved value */ 517 - req->rl_task = task; 518 - return req->rl_sendbuf->rg_base; 519 - 520 - out_rdmabuf: 521 - min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp); 522 - rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags); 523 - if (IS_ERR(rb)) 524 - goto out_fail; 525 - req->rl_rdmabuf = rb; 526 - 527 - out_sendbuf: 528 - /* XDR encoding and RPC/RDMA marshaling of this request has not 529 - * yet occurred. Thus a lower bound is needed to prevent buffer 530 - * overrun during marshaling. 531 - * 532 - * RPC/RDMA marshaling may choose to send payload bearing ops 533 - * inline, if the result is smaller than the inline threshold. 534 - * The value of the "size" argument accounts for header 535 - * requirements but not for the payload in these cases. 536 - * 537 - * Likewise, allocate enough space to receive a reply up to the 538 - * size of the inline threshold. 539 - * 540 - * It's unlikely that both the send header and the received 541 - * reply will be large, but slush is provided here to allow 542 - * flexibility when marshaling. 543 - */ 544 - min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp); 545 - min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp); 546 - if (size < min_size) 547 - size = min_size; 548 - 549 - rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags); 550 - if (IS_ERR(rb)) 551 - goto out_fail; 552 - rb->rg_owner = req; 553 - 554 - r_xprt->rx_stats.hardway_register_count += size; 555 - rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf); 556 - req->rl_sendbuf = rb; 557 - goto out; 595 + rpcrdma_set_xprtdata(rqst, req); 596 + rqst->rq_buffer = req->rl_sendbuf->rg_base; 597 + rqst->rq_rbuffer = req->rl_recvbuf->rg_base; 598 + return 0; 558 599 559 600 out_fail: 560 601 rpcrdma_buffer_put(req); 561 - return NULL; 602 + return -ENOMEM; 562 603 } 563 604 564 - /* 565 - * This function returns all RDMA resources to the pool. 605 + /** 606 + * xprt_rdma_free - release resources allocated by xprt_rdma_allocate 607 + * @task: RPC task 608 + * 609 + * Caller guarantees rqst->rq_buffer is non-NULL. 566 610 */ 567 611 static void 568 - xprt_rdma_free(void *buffer) 612 + xprt_rdma_free(struct rpc_task *task) 569 613 { 570 - struct rpcrdma_req *req; 571 - struct rpcrdma_xprt *r_xprt; 572 - struct rpcrdma_regbuf *rb; 614 + struct rpc_rqst *rqst = task->tk_rqstp; 615 + struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 616 + struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 617 + struct rpcrdma_ia *ia = &r_xprt->rx_ia; 573 618 574 - if (buffer == NULL) 575 - return; 576 - 577 - rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]); 578 - req = rb->rg_owner; 579 619 if (req->rl_backchannel) 580 620 return; 581 621 582 - r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); 583 - 584 622 dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply); 585 623 586 - r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, 587 - !RPC_IS_ASYNC(req->rl_task)); 588 - 624 + ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task)); 625 + rpcrdma_unmap_sges(ia, req); 589 626 rpcrdma_buffer_put(req); 590 627 } 591 628 ··· 722 685 r_xprt->rx_stats.failed_marshal_count, 723 686 r_xprt->rx_stats.bad_reply_count, 724 687 r_xprt->rx_stats.nomsg_call_count); 725 - seq_printf(seq, "%lu %lu %lu\n", 688 + seq_printf(seq, "%lu %lu %lu %lu\n", 726 689 r_xprt->rx_stats.mrs_recovered, 727 690 r_xprt->rx_stats.mrs_orphaned, 728 - r_xprt->rx_stats.mrs_allocated); 691 + r_xprt->rx_stats.mrs_allocated, 692 + r_xprt->rx_stats.local_inv_needed); 729 693 } 730 694 731 695 static int
+133 -104
net/sunrpc/xprtrdma/verbs.c
··· 129 129 wc->status, wc->vendor_err); 130 130 } 131 131 132 - static void 133 - rpcrdma_receive_worker(struct work_struct *work) 134 - { 135 - struct rpcrdma_rep *rep = 136 - container_of(work, struct rpcrdma_rep, rr_work); 137 - 138 - rpcrdma_reply_handler(rep); 139 - } 140 - 141 132 /* Perform basic sanity checking to avoid using garbage 142 133 * to update the credit grant value. 143 134 */ ··· 152 161 } 153 162 154 163 /** 155 - * rpcrdma_receive_wc - Invoked by RDMA provider for each polled Receive WC 164 + * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC 156 165 * @cq: completion queue (ignored) 157 166 * @wc: completed WR 158 167 * 159 168 */ 160 169 static void 161 - rpcrdma_receive_wc(struct ib_cq *cq, struct ib_wc *wc) 170 + rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) 162 171 { 163 172 struct ib_cqe *cqe = wc->wr_cqe; 164 173 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, ··· 176 185 __func__, rep, wc->byte_len); 177 186 178 187 rep->rr_len = wc->byte_len; 188 + rep->rr_wc_flags = wc->wc_flags; 189 + rep->rr_inv_rkey = wc->ex.invalidate_rkey; 190 + 179 191 ib_dma_sync_single_for_cpu(rep->rr_device, 180 192 rdmab_addr(rep->rr_rdmabuf), 181 193 rep->rr_len, DMA_FROM_DEVICE); ··· 196 202 wc->status, wc->vendor_err); 197 203 rep->rr_len = RPCRDMA_BAD_LEN; 198 204 goto out_schedule; 205 + } 206 + 207 + static void 208 + rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, 209 + struct rdma_conn_param *param) 210 + { 211 + struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; 212 + const struct rpcrdma_connect_private *pmsg = param->private_data; 213 + unsigned int rsize, wsize; 214 + 215 + /* Default settings for RPC-over-RDMA Version One */ 216 + r_xprt->rx_ia.ri_reminv_expected = false; 217 + rsize = RPCRDMA_V1_DEF_INLINE_SIZE; 218 + wsize = RPCRDMA_V1_DEF_INLINE_SIZE; 219 + 220 + if (pmsg && 221 + pmsg->cp_magic == rpcrdma_cmp_magic && 222 + pmsg->cp_version == RPCRDMA_CMP_VERSION) { 223 + r_xprt->rx_ia.ri_reminv_expected = true; 224 + rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); 225 + wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); 226 + } 227 + 228 + if (rsize < cdata->inline_rsize) 229 + cdata->inline_rsize = rsize; 230 + if (wsize < cdata->inline_wsize) 231 + cdata->inline_wsize = wsize; 232 + pr_info("rpcrdma: max send %u, max recv %u\n", 233 + cdata->inline_wsize, cdata->inline_rsize); 234 + rpcrdma_set_max_header_sizes(r_xprt); 199 235 } 200 236 201 237 static int ··· 268 244 " (%d initiator)\n", 269 245 __func__, attr->max_dest_rd_atomic, 270 246 attr->max_rd_atomic); 247 + rpcrdma_update_connect_private(xprt, &event->param.conn); 271 248 goto connected; 272 249 case RDMA_CM_EVENT_CONNECT_ERROR: 273 250 connstate = -ENOTCONN; ··· 479 454 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, 480 455 struct rpcrdma_create_data_internal *cdata) 481 456 { 457 + struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; 482 458 struct ib_cq *sendcq, *recvcq; 483 459 unsigned int max_qp_wr; 484 460 int rc; 485 461 486 - if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_IOVS) { 462 + if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_SEND_SGES) { 487 463 dprintk("RPC: %s: insufficient sge's available\n", 488 464 __func__); 489 465 return -ENOMEM; ··· 513 487 ep->rep_attr.cap.max_recv_wr = cdata->max_requests; 514 488 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; 515 489 ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */ 516 - ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS; 490 + ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_SEND_SGES; 517 491 ep->rep_attr.cap.max_recv_sge = 1; 518 492 ep->rep_attr.cap.max_inline_data = 0; 519 493 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; ··· 562 536 /* Initialize cma parameters */ 563 537 memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); 564 538 565 - /* RPC/RDMA does not use private data */ 566 - ep->rep_remote_cma.private_data = NULL; 567 - ep->rep_remote_cma.private_data_len = 0; 539 + /* Prepare RDMA-CM private message */ 540 + pmsg->cp_magic = rpcrdma_cmp_magic; 541 + pmsg->cp_version = RPCRDMA_CMP_VERSION; 542 + pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok; 543 + pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize); 544 + pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize); 545 + ep->rep_remote_cma.private_data = pmsg; 546 + ep->rep_remote_cma.private_data_len = sizeof(*pmsg); 568 547 569 548 /* Client offers RDMA Read but does not initiate */ 570 549 ep->rep_remote_cma.initiator_depth = 0; ··· 880 849 req->rl_cqe.done = rpcrdma_wc_send; 881 850 req->rl_buffer = &r_xprt->rx_buf; 882 851 INIT_LIST_HEAD(&req->rl_registered); 852 + req->rl_send_wr.next = NULL; 853 + req->rl_send_wr.wr_cqe = &req->rl_cqe; 854 + req->rl_send_wr.sg_list = req->rl_send_sge; 855 + req->rl_send_wr.opcode = IB_WR_SEND; 883 856 return req; 884 857 } 885 858 ··· 900 865 if (rep == NULL) 901 866 goto out; 902 867 903 - rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize, 904 - GFP_KERNEL); 868 + rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize, 869 + DMA_FROM_DEVICE, GFP_KERNEL); 905 870 if (IS_ERR(rep->rr_rdmabuf)) { 906 871 rc = PTR_ERR(rep->rr_rdmabuf); 907 872 goto out_free; 908 873 } 909 874 910 875 rep->rr_device = ia->ri_device; 911 - rep->rr_cqe.done = rpcrdma_receive_wc; 876 + rep->rr_cqe.done = rpcrdma_wc_receive; 912 877 rep->rr_rxprt = r_xprt; 913 - INIT_WORK(&rep->rr_work, rpcrdma_receive_worker); 878 + INIT_WORK(&rep->rr_work, rpcrdma_reply_handler); 879 + rep->rr_recv_wr.next = NULL; 880 + rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; 881 + rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; 882 + rep->rr_recv_wr.num_sge = 1; 914 883 return rep; 915 884 916 885 out_free: ··· 1005 966 } 1006 967 1007 968 static void 1008 - rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep) 969 + rpcrdma_destroy_rep(struct rpcrdma_rep *rep) 1009 970 { 1010 - rpcrdma_free_regbuf(ia, rep->rr_rdmabuf); 971 + rpcrdma_free_regbuf(rep->rr_rdmabuf); 1011 972 kfree(rep); 1012 973 } 1013 974 1014 975 void 1015 - rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req) 976 + rpcrdma_destroy_req(struct rpcrdma_req *req) 1016 977 { 1017 - rpcrdma_free_regbuf(ia, req->rl_sendbuf); 1018 - rpcrdma_free_regbuf(ia, req->rl_rdmabuf); 978 + rpcrdma_free_regbuf(req->rl_recvbuf); 979 + rpcrdma_free_regbuf(req->rl_sendbuf); 980 + rpcrdma_free_regbuf(req->rl_rdmabuf); 1019 981 kfree(req); 1020 982 } 1021 983 ··· 1049 1009 void 1050 1010 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) 1051 1011 { 1052 - struct rpcrdma_ia *ia = rdmab_to_ia(buf); 1053 - 1054 1012 cancel_delayed_work_sync(&buf->rb_recovery_worker); 1055 1013 1056 1014 while (!list_empty(&buf->rb_recv_bufs)) { 1057 1015 struct rpcrdma_rep *rep; 1058 1016 1059 1017 rep = rpcrdma_buffer_get_rep_locked(buf); 1060 - rpcrdma_destroy_rep(ia, rep); 1018 + rpcrdma_destroy_rep(rep); 1061 1019 } 1062 1020 buf->rb_send_count = 0; 1063 1021 ··· 1068 1030 list_del(&req->rl_all); 1069 1031 1070 1032 spin_unlock(&buf->rb_reqslock); 1071 - rpcrdma_destroy_req(ia, req); 1033 + rpcrdma_destroy_req(req); 1072 1034 spin_lock(&buf->rb_reqslock); 1073 1035 } 1074 1036 spin_unlock(&buf->rb_reqslock); ··· 1167 1129 struct rpcrdma_buffer *buffers = req->rl_buffer; 1168 1130 struct rpcrdma_rep *rep = req->rl_reply; 1169 1131 1170 - req->rl_niovs = 0; 1132 + req->rl_send_wr.num_sge = 0; 1171 1133 req->rl_reply = NULL; 1172 1134 1173 1135 spin_lock(&buffers->rb_lock); ··· 1209 1171 spin_unlock(&buffers->rb_lock); 1210 1172 } 1211 1173 1212 - /* 1213 - * Wrappers for internal-use kmalloc memory registration, used by buffer code. 1214 - */ 1215 - 1216 1174 /** 1217 - * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers 1218 - * @ia: controlling rpcrdma_ia 1175 + * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers 1219 1176 * @size: size of buffer to be allocated, in bytes 1177 + * @direction: direction of data movement 1220 1178 * @flags: GFP flags 1221 1179 * 1222 - * Returns pointer to private header of an area of internally 1223 - * registered memory, or an ERR_PTR. The registered buffer follows 1224 - * the end of the private header. 1180 + * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that 1181 + * can be persistently DMA-mapped for I/O. 1225 1182 * 1226 1183 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for 1227 - * receiving the payload of RDMA RECV operations. regbufs are not 1228 - * used for RDMA READ/WRITE operations, thus are registered only for 1229 - * LOCAL access. 1184 + * receiving the payload of RDMA RECV operations. During Long Calls 1185 + * or Replies they may be registered externally via ro_map. 1230 1186 */ 1231 1187 struct rpcrdma_regbuf * 1232 - rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) 1188 + rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction, 1189 + gfp_t flags) 1233 1190 { 1234 1191 struct rpcrdma_regbuf *rb; 1235 - struct ib_sge *iov; 1236 1192 1237 1193 rb = kmalloc(sizeof(*rb) + size, flags); 1238 1194 if (rb == NULL) 1239 - goto out; 1195 + return ERR_PTR(-ENOMEM); 1240 1196 1241 - iov = &rb->rg_iov; 1242 - iov->addr = ib_dma_map_single(ia->ri_device, 1243 - (void *)rb->rg_base, size, 1244 - DMA_BIDIRECTIONAL); 1245 - if (ib_dma_mapping_error(ia->ri_device, iov->addr)) 1246 - goto out_free; 1197 + rb->rg_device = NULL; 1198 + rb->rg_direction = direction; 1199 + rb->rg_iov.length = size; 1247 1200 1248 - iov->length = size; 1249 - iov->lkey = ia->ri_pd->local_dma_lkey; 1250 - rb->rg_size = size; 1251 - rb->rg_owner = NULL; 1252 1201 return rb; 1202 + } 1253 1203 1254 - out_free: 1255 - kfree(rb); 1256 - out: 1257 - return ERR_PTR(-ENOMEM); 1204 + /** 1205 + * __rpcrdma_map_regbuf - DMA-map a regbuf 1206 + * @ia: controlling rpcrdma_ia 1207 + * @rb: regbuf to be mapped 1208 + */ 1209 + bool 1210 + __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) 1211 + { 1212 + if (rb->rg_direction == DMA_NONE) 1213 + return false; 1214 + 1215 + rb->rg_iov.addr = ib_dma_map_single(ia->ri_device, 1216 + (void *)rb->rg_base, 1217 + rdmab_length(rb), 1218 + rb->rg_direction); 1219 + if (ib_dma_mapping_error(ia->ri_device, rdmab_addr(rb))) 1220 + return false; 1221 + 1222 + rb->rg_device = ia->ri_device; 1223 + rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey; 1224 + return true; 1225 + } 1226 + 1227 + static void 1228 + rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) 1229 + { 1230 + if (!rpcrdma_regbuf_is_mapped(rb)) 1231 + return; 1232 + 1233 + ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), 1234 + rdmab_length(rb), rb->rg_direction); 1235 + rb->rg_device = NULL; 1258 1236 } 1259 1237 1260 1238 /** 1261 1239 * rpcrdma_free_regbuf - deregister and free registered buffer 1262 - * @ia: controlling rpcrdma_ia 1263 1240 * @rb: regbuf to be deregistered and freed 1264 1241 */ 1265 1242 void 1266 - rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) 1243 + rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb) 1267 1244 { 1268 - struct ib_sge *iov; 1269 - 1270 1245 if (!rb) 1271 1246 return; 1272 1247 1273 - iov = &rb->rg_iov; 1274 - ib_dma_unmap_single(ia->ri_device, 1275 - iov->addr, iov->length, DMA_BIDIRECTIONAL); 1248 + rpcrdma_dma_unmap_regbuf(rb); 1276 1249 kfree(rb); 1277 1250 } 1278 1251 ··· 1297 1248 struct rpcrdma_ep *ep, 1298 1249 struct rpcrdma_req *req) 1299 1250 { 1300 - struct ib_device *device = ia->ri_device; 1301 - struct ib_send_wr send_wr, *send_wr_fail; 1302 - struct rpcrdma_rep *rep = req->rl_reply; 1303 - struct ib_sge *iov = req->rl_send_iov; 1304 - int i, rc; 1251 + struct ib_send_wr *send_wr = &req->rl_send_wr; 1252 + struct ib_send_wr *send_wr_fail; 1253 + int rc; 1305 1254 1306 - if (rep) { 1307 - rc = rpcrdma_ep_post_recv(ia, ep, rep); 1255 + if (req->rl_reply) { 1256 + rc = rpcrdma_ep_post_recv(ia, req->rl_reply); 1308 1257 if (rc) 1309 1258 return rc; 1310 1259 req->rl_reply = NULL; 1311 1260 } 1312 1261 1313 - send_wr.next = NULL; 1314 - send_wr.wr_cqe = &req->rl_cqe; 1315 - send_wr.sg_list = iov; 1316 - send_wr.num_sge = req->rl_niovs; 1317 - send_wr.opcode = IB_WR_SEND; 1318 - 1319 - for (i = 0; i < send_wr.num_sge; i++) 1320 - ib_dma_sync_single_for_device(device, iov[i].addr, 1321 - iov[i].length, DMA_TO_DEVICE); 1322 1262 dprintk("RPC: %s: posting %d s/g entries\n", 1323 - __func__, send_wr.num_sge); 1263 + __func__, send_wr->num_sge); 1324 1264 1325 1265 if (DECR_CQCOUNT(ep) > 0) 1326 - send_wr.send_flags = 0; 1266 + send_wr->send_flags = 0; 1327 1267 else { /* Provider must take a send completion every now and then */ 1328 1268 INIT_CQCOUNT(ep); 1329 - send_wr.send_flags = IB_SEND_SIGNALED; 1269 + send_wr->send_flags = IB_SEND_SIGNALED; 1330 1270 } 1331 1271 1332 - rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail); 1272 + rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail); 1333 1273 if (rc) 1334 1274 goto out_postsend_err; 1335 1275 return 0; ··· 1328 1290 return -ENOTCONN; 1329 1291 } 1330 1292 1331 - /* 1332 - * (Re)post a receive buffer. 1333 - */ 1334 1293 int 1335 1294 rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, 1336 - struct rpcrdma_ep *ep, 1337 1295 struct rpcrdma_rep *rep) 1338 1296 { 1339 - struct ib_recv_wr recv_wr, *recv_wr_fail; 1297 + struct ib_recv_wr *recv_wr_fail; 1340 1298 int rc; 1341 1299 1342 - recv_wr.next = NULL; 1343 - recv_wr.wr_cqe = &rep->rr_cqe; 1344 - recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; 1345 - recv_wr.num_sge = 1; 1346 - 1347 - ib_dma_sync_single_for_cpu(ia->ri_device, 1348 - rdmab_addr(rep->rr_rdmabuf), 1349 - rdmab_length(rep->rr_rdmabuf), 1350 - DMA_BIDIRECTIONAL); 1351 - 1352 - rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); 1300 + if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf)) 1301 + goto out_map; 1302 + rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail); 1353 1303 if (rc) 1354 1304 goto out_postrecv; 1355 1305 return 0; 1306 + 1307 + out_map: 1308 + pr_err("rpcrdma: failed to DMA map the Receive buffer\n"); 1309 + return -EIO; 1356 1310 1357 1311 out_postrecv: 1358 1312 pr_err("rpcrdma: ib_post_recv returned %i\n", rc); ··· 1363 1333 { 1364 1334 struct rpcrdma_buffer *buffers = &r_xprt->rx_buf; 1365 1335 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 1366 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 1367 1336 struct rpcrdma_rep *rep; 1368 1337 int rc; 1369 1338 ··· 1373 1344 rep = rpcrdma_buffer_get_rep_locked(buffers); 1374 1345 spin_unlock(&buffers->rb_lock); 1375 1346 1376 - rc = rpcrdma_ep_post_recv(ia, ep, rep); 1347 + rc = rpcrdma_ep_post_recv(ia, rep); 1377 1348 if (rc) 1378 1349 goto out_rc; 1379 1350 }
+73 -35
net/sunrpc/xprtrdma/xprt_rdma.h
··· 70 70 struct ib_pd *ri_pd; 71 71 struct completion ri_done; 72 72 int ri_async_rc; 73 + unsigned int ri_max_segs; 73 74 unsigned int ri_max_frmr_depth; 74 75 unsigned int ri_max_inline_write; 75 76 unsigned int ri_max_inline_read; 77 + bool ri_reminv_expected; 76 78 struct ib_qp_attr ri_qp_attr; 77 79 struct ib_qp_init_attr ri_qp_init_attr; 78 80 }; ··· 89 87 int rep_connected; 90 88 struct ib_qp_init_attr rep_attr; 91 89 wait_queue_head_t rep_connect_wait; 90 + struct rpcrdma_connect_private rep_cm_private; 92 91 struct rdma_conn_param rep_remote_cma; 93 92 struct sockaddr_storage rep_remote_addr; 94 93 struct delayed_work rep_connect_worker; ··· 115 112 */ 116 113 117 114 struct rpcrdma_regbuf { 118 - size_t rg_size; 119 - struct rpcrdma_req *rg_owner; 120 115 struct ib_sge rg_iov; 116 + struct ib_device *rg_device; 117 + enum dma_data_direction rg_direction; 121 118 __be32 rg_base[0] __attribute__ ((aligned(256))); 122 119 }; 123 120 ··· 165 162 * The smallest inline threshold is 1024 bytes, ensuring that 166 163 * at least 750 bytes are available for RPC messages. 167 164 */ 168 - #define RPCRDMA_MAX_HDR_SEGS (8) 165 + enum { 166 + RPCRDMA_MAX_HDR_SEGS = 8, 167 + RPCRDMA_HDRBUF_SIZE = 256, 168 + }; 169 169 170 170 /* 171 171 * struct rpcrdma_rep -- this structure encapsulates state required to recv ··· 188 182 struct rpcrdma_rep { 189 183 struct ib_cqe rr_cqe; 190 184 unsigned int rr_len; 185 + int rr_wc_flags; 186 + u32 rr_inv_rkey; 191 187 struct ib_device *rr_device; 192 188 struct rpcrdma_xprt *rr_rxprt; 193 189 struct work_struct rr_work; 194 190 struct list_head rr_list; 191 + struct ib_recv_wr rr_recv_wr; 195 192 struct rpcrdma_regbuf *rr_rdmabuf; 196 193 }; 197 194 ··· 285 276 char *mr_offset; /* kva if no page, else offset */ 286 277 }; 287 278 288 - #define RPCRDMA_MAX_IOVS (2) 279 + /* Reserve enough Send SGEs to send a maximum size inline request: 280 + * - RPC-over-RDMA header 281 + * - xdr_buf head iovec 282 + * - RPCRDMA_MAX_INLINE bytes, possibly unaligned, in pages 283 + * - xdr_buf tail iovec 284 + */ 285 + enum { 286 + RPCRDMA_MAX_SEND_PAGES = PAGE_SIZE + RPCRDMA_MAX_INLINE - 1, 287 + RPCRDMA_MAX_PAGE_SGES = (RPCRDMA_MAX_SEND_PAGES >> PAGE_SHIFT) + 1, 288 + RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1, 289 + }; 289 290 290 291 struct rpcrdma_buffer; 291 292 struct rpcrdma_req { 292 293 struct list_head rl_free; 293 - unsigned int rl_niovs; 294 + unsigned int rl_mapped_sges; 294 295 unsigned int rl_connect_cookie; 295 - struct rpc_task *rl_task; 296 296 struct rpcrdma_buffer *rl_buffer; 297 - struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ 298 - struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS]; 299 - struct rpcrdma_regbuf *rl_rdmabuf; 300 - struct rpcrdma_regbuf *rl_sendbuf; 297 + struct rpcrdma_rep *rl_reply; 298 + struct ib_send_wr rl_send_wr; 299 + struct ib_sge rl_send_sge[RPCRDMA_MAX_SEND_SGES]; 300 + struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */ 301 + struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */ 302 + struct rpcrdma_regbuf *rl_recvbuf; /* rq_rcv_buf */ 301 303 302 304 struct ib_cqe rl_cqe; 303 305 struct list_head rl_all; ··· 318 298 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; 319 299 }; 320 300 301 + static inline void 302 + rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req) 303 + { 304 + rqst->rq_xprtdata = req; 305 + } 306 + 321 307 static inline struct rpcrdma_req * 322 308 rpcr_to_rdmar(struct rpc_rqst *rqst) 323 309 { 324 - void *buffer = rqst->rq_buffer; 325 - struct rpcrdma_regbuf *rb; 326 - 327 - rb = container_of(buffer, struct rpcrdma_regbuf, rg_base); 328 - return rb->rg_owner; 310 + return rqst->rq_xprtdata; 329 311 } 330 312 331 313 /* ··· 378 356 unsigned int padding; /* non-rdma write header padding */ 379 357 }; 380 358 381 - #define RPCRDMA_INLINE_READ_THRESHOLD(rq) \ 382 - (rpcx_to_rdmad(rq->rq_xprt).inline_rsize) 383 - 384 - #define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\ 385 - (rpcx_to_rdmad(rq->rq_xprt).inline_wsize) 386 - 387 - #define RPCRDMA_INLINE_PAD_VALUE(rq)\ 388 - rpcx_to_rdmad(rq->rq_xprt).padding 389 - 390 359 /* 391 360 * Statistics for RPCRDMA 392 361 */ ··· 399 386 unsigned long mrs_recovered; 400 387 unsigned long mrs_orphaned; 401 388 unsigned long mrs_allocated; 389 + unsigned long local_inv_needed; 402 390 }; 403 391 404 392 /* ··· 423 409 struct rpcrdma_mw *); 424 410 void (*ro_release_mr)(struct rpcrdma_mw *); 425 411 const char *ro_displayname; 412 + const int ro_send_w_inv_ok; 426 413 }; 427 414 428 415 extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops; ··· 476 461 477 462 int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *, 478 463 struct rpcrdma_req *); 479 - int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *, 480 - struct rpcrdma_rep *); 464 + int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_rep *); 481 465 482 466 /* 483 467 * Buffer calls - xprtrdma/verbs.c 484 468 */ 485 469 struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *); 486 470 struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *); 487 - void rpcrdma_destroy_req(struct rpcrdma_ia *, struct rpcrdma_req *); 471 + void rpcrdma_destroy_req(struct rpcrdma_req *); 488 472 int rpcrdma_buffer_create(struct rpcrdma_xprt *); 489 473 void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); 490 474 ··· 496 482 497 483 void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *); 498 484 499 - struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *, 500 - size_t, gfp_t); 501 - void rpcrdma_free_regbuf(struct rpcrdma_ia *, 502 - struct rpcrdma_regbuf *); 485 + struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction, 486 + gfp_t); 487 + bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *); 488 + void rpcrdma_free_regbuf(struct rpcrdma_regbuf *); 489 + 490 + static inline bool 491 + rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb) 492 + { 493 + return rb->rg_device != NULL; 494 + } 495 + 496 + static inline bool 497 + rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) 498 + { 499 + if (likely(rpcrdma_regbuf_is_mapped(rb))) 500 + return true; 501 + return __rpcrdma_dma_map_regbuf(ia, rb); 502 + } 503 503 504 504 int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int); 505 505 ··· 535 507 */ 536 508 void rpcrdma_connect_worker(struct work_struct *); 537 509 void rpcrdma_conn_func(struct rpcrdma_ep *); 538 - void rpcrdma_reply_handler(struct rpcrdma_rep *); 510 + void rpcrdma_reply_handler(struct work_struct *); 539 511 540 512 /* 541 513 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c 542 514 */ 515 + 516 + enum rpcrdma_chunktype { 517 + rpcrdma_noch = 0, 518 + rpcrdma_readch, 519 + rpcrdma_areadch, 520 + rpcrdma_writech, 521 + rpcrdma_replych 522 + }; 523 + 524 + bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *, 525 + u32, struct xdr_buf *, enum rpcrdma_chunktype); 526 + void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *); 543 527 int rpcrdma_marshal_req(struct rpc_rqst *); 544 - void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *, 545 - struct rpcrdma_create_data_internal *, 546 - unsigned int); 528 + void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *); 547 529 548 530 /* RPC/RDMA module init - xprtrdma/transport.c 549 531 */
+23 -11
net/sunrpc/xprtsock.c
··· 473 473 spin_unlock_bh(&xprt->transport_lock); 474 474 475 475 /* Race breaker in case memory is freed before above code is called */ 476 - sk->sk_write_space(sk); 476 + if (ret == -EAGAIN) { 477 + struct socket_wq *wq; 478 + 479 + rcu_read_lock(); 480 + wq = rcu_dereference(sk->sk_wq); 481 + set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); 482 + rcu_read_unlock(); 483 + 484 + sk->sk_write_space(sk); 485 + } 477 486 return ret; 478 487 } 479 488 ··· 2542 2533 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2543 2534 * to use the server side send routines. 2544 2535 */ 2545 - static void *bc_malloc(struct rpc_task *task, size_t size) 2536 + static int bc_malloc(struct rpc_task *task) 2546 2537 { 2538 + struct rpc_rqst *rqst = task->tk_rqstp; 2539 + size_t size = rqst->rq_callsize; 2547 2540 struct page *page; 2548 2541 struct rpc_buffer *buf; 2549 2542 2550 - WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer)); 2551 - if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) 2552 - return NULL; 2543 + if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) { 2544 + WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n", 2545 + size); 2546 + return -EINVAL; 2547 + } 2553 2548 2554 2549 page = alloc_page(GFP_KERNEL); 2555 2550 if (!page) 2556 - return NULL; 2551 + return -ENOMEM; 2557 2552 2558 2553 buf = page_address(page); 2559 2554 buf->len = PAGE_SIZE; 2560 2555 2561 - return buf->data; 2556 + rqst->rq_buffer = buf->data; 2557 + return 0; 2562 2558 } 2563 2559 2564 2560 /* 2565 2561 * Free the space allocated in the bc_alloc routine 2566 2562 */ 2567 - static void bc_free(void *buffer) 2563 + static void bc_free(struct rpc_task *task) 2568 2564 { 2565 + void *buffer = task->tk_rqstp->rq_buffer; 2569 2566 struct rpc_buffer *buf; 2570 - 2571 - if (!buffer) 2572 - return; 2573 2567 2574 2568 buf = container_of(buffer, struct rpc_buffer, data); 2575 2569 free_page((unsigned long)buf);