Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

NFSv4.1: Use the nfs_client's rpc timeouts for backchannel

For backchannel requests that lookup the appropriate nfs_client, use the
state-management rpc_clnt's rpc_timeout parameters for the backchannel's
response. When the nfs_client cannot be found, fall back to using the
xprt's default timeout parameters.

Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
Tested-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>

authored by

Benjamin Coddington and committed by
Anna Schumaker
57331a59 e6f533b6

+45 -19
+5
fs/nfs/callback_xdr.c
··· 967 967 nops--; 968 968 } 969 969 970 + if (svc_is_backchannel(rqstp) && cps.clp) { 971 + rqstp->bc_to_initval = cps.clp->cl_rpcclient->cl_timeout->to_initval; 972 + rqstp->bc_to_retries = cps.clp->cl_rpcclient->cl_timeout->to_retries; 973 + } 974 + 970 975 *hdr_res.status = status; 971 976 *hdr_res.nops = htonl(nops); 972 977 nfs4_cb_free_slot(&cps);
+2 -1
include/linux/sunrpc/bc_xprt.h
··· 20 20 #ifdef CONFIG_SUNRPC_BACKCHANNEL 21 21 struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid); 22 22 void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied); 23 - void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task); 23 + void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task, 24 + const struct rpc_timeout *to); 24 25 void xprt_free_bc_request(struct rpc_rqst *req); 25 26 int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); 26 27 void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
+13 -1
include/linux/sunrpc/sched.h
··· 38 38 }; 39 39 40 40 /* 41 + * This describes a timeout strategy 42 + */ 43 + struct rpc_timeout { 44 + unsigned long to_initval, /* initial timeout */ 45 + to_maxval, /* max timeout */ 46 + to_increment; /* if !exponential */ 47 + unsigned int to_retries; /* max # of retries */ 48 + unsigned char to_exponential; 49 + }; 50 + 51 + /* 41 52 * This is the RPC task struct 42 53 */ 43 54 struct rpc_task { ··· 216 205 */ 217 206 struct rpc_task *rpc_new_task(const struct rpc_task_setup *); 218 207 struct rpc_task *rpc_run_task(const struct rpc_task_setup *); 219 - struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req); 208 + struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 209 + struct rpc_timeout *timeout); 220 210 void rpc_put_task(struct rpc_task *); 221 211 void rpc_put_task_async(struct rpc_task *); 222 212 bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
+2
include/linux/sunrpc/svc.h
··· 250 250 struct net *rq_bc_net; /* pointer to backchannel's 251 251 * net namespace 252 252 */ 253 + unsigned long bc_to_initval; 254 + unsigned int bc_to_retries; 253 255 void ** rq_lease_breaker; /* The v4 client breaking a lease */ 254 256 unsigned int rq_status_counter; /* RPC processing counter */ 255 257 };
-11
include/linux/sunrpc/xprt.h
··· 30 30 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) 31 31 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) 32 32 33 - /* 34 - * This describes a timeout strategy 35 - */ 36 - struct rpc_timeout { 37 - unsigned long to_initval, /* initial timeout */ 38 - to_maxval, /* max timeout */ 39 - to_increment; /* if !exponential */ 40 - unsigned int to_retries; /* max # of retries */ 41 - unsigned char to_exponential; 42 - }; 43 - 44 33 enum rpc_display_format_t { 45 34 RPC_DISPLAY_ADDR = 0, 46 35 RPC_DISPLAY_PORT,
+4 -2
net/sunrpc/clnt.c
··· 1311 1311 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run 1312 1312 * rpc_execute against it 1313 1313 * @req: RPC request 1314 + * @timeout: timeout values to use for this task 1314 1315 */ 1315 - struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req) 1316 + struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 1317 + struct rpc_timeout *timeout) 1316 1318 { 1317 1319 struct rpc_task *task; 1318 1320 struct rpc_task_setup task_setup_data = { ··· 1333 1331 return task; 1334 1332 } 1335 1333 1336 - xprt_init_bc_request(req, task); 1334 + xprt_init_bc_request(req, task, timeout); 1337 1335 1338 1336 task->tk_action = call_bc_encode; 1339 1337 atomic_inc(&task->tk_count);
+10 -1
net/sunrpc/svc.c
··· 1557 1557 { 1558 1558 struct rpc_task *task; 1559 1559 int proc_error; 1560 + struct rpc_timeout timeout; 1560 1561 1561 1562 /* Build the svc_rqst used by the common processing routine */ 1562 1563 rqstp->rq_xid = req->rq_xid; ··· 1603 1602 return; 1604 1603 } 1605 1604 /* Finally, send the reply synchronously */ 1605 + if (rqstp->bc_to_initval > 0) { 1606 + timeout.to_initval = rqstp->bc_to_initval; 1607 + timeout.to_retries = rqstp->bc_to_initval; 1608 + } else { 1609 + timeout.to_initval = req->rq_xprt->timeout->to_initval; 1610 + timeout.to_initval = req->rq_xprt->timeout->to_retries; 1611 + } 1606 1612 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); 1607 - task = rpc_run_bc_task(req); 1613 + task = rpc_run_bc_task(req, &timeout); 1614 + 1608 1615 if (IS_ERR(task)) 1609 1616 return; 1610 1617
+9 -3
net/sunrpc/xprt.c
··· 1986 1986 1987 1987 #ifdef CONFIG_SUNRPC_BACKCHANNEL 1988 1988 void 1989 - xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task) 1989 + xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task, 1990 + const struct rpc_timeout *to) 1990 1991 { 1991 1992 struct xdr_buf *xbufp = &req->rq_snd_buf; 1992 1993 ··· 2000 1999 */ 2001 2000 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + 2002 2001 xbufp->tail[0].iov_len; 2003 - 2004 - xprt_init_majortimeo(task, req, req->rq_xprt->timeout); 2002 + /* 2003 + * Backchannel Replies are sent with !RPC_TASK_SOFT and 2004 + * RPC_TASK_NO_RETRANS_TIMEOUT. The major timeout setting 2005 + * affects only how long each Reply waits to be sent when 2006 + * a transport connection cannot be established. 2007 + */ 2008 + xprt_init_majortimeo(task, req, to); 2005 2009 } 2006 2010 #endif 2007 2011