Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sunrpc: implement rfc2203 rpcsec_gss seqnum cache

This implements a sequence number cache of the last three (right now
hardcoded) sent sequence numbers for a given XID, as suggested by the
RFC.

From RFC2203 5.3.3.1:

"Note that the sequence number algorithm requires that the client
increment the sequence number even if it is retrying a request with
the same RPC transaction identifier. It is not infrequent for
clients to get into a situation where they send two or more attempts
and a slow server sends the reply for the first attempt. With
RPCSEC_GSS, each request and reply will have a unique sequence
number. If the client wishes to improve turn around time on the RPC
call, it can cache the RPCSEC_GSS sequence number of each request it
sends. Then when it receives a response with a matching RPC
transaction identifier, it can compute the checksum of each sequence
number in the cache to try to match the checksum in the reply's
verifier."

Signed-off-by: Nikhil Jha <njha@janestreet.com>
Acked-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>

authored by

Nikhil Jha and committed by
Anna Schumaker
08d6ee6d a5806cd5

+57 -28
+16 -1
include/linux/sunrpc/xprt.h
··· 30 30 #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) 31 31 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) 32 32 33 + #define RPC_GSS_SEQNO_ARRAY_SIZE 3U 34 + 33 35 enum rpc_display_format_t { 34 36 RPC_DISPLAY_ADDR = 0, 35 37 RPC_DISPLAY_PORT, ··· 68 66 struct rpc_cred * rq_cred; /* Bound cred */ 69 67 __be32 rq_xid; /* request XID */ 70 68 int rq_cong; /* has incremented xprt->cong */ 71 - u32 rq_seqno; /* gss seq no. used on req. */ 69 + u32 rq_seqnos[RPC_GSS_SEQNO_ARRAY_SIZE]; /* past gss req seq nos. */ 70 + unsigned int rq_seqno_count; /* number of entries in rq_seqnos */ 72 71 int rq_enc_pages_num; 73 72 struct page **rq_enc_pages; /* scratch pages for use by 74 73 gss privacy code */ ··· 121 118 }; 122 119 #define rq_svec rq_snd_buf.head 123 120 #define rq_slen rq_snd_buf.len 121 + 122 + static inline int xprt_rqst_add_seqno(struct rpc_rqst *req, u32 seqno) 123 + { 124 + if (likely(req->rq_seqno_count < RPC_GSS_SEQNO_ARRAY_SIZE)) 125 + req->rq_seqno_count++; 126 + 127 + /* Shift array to make room for the newest element at the beginning */ 128 + memmove(&req->rq_seqnos[1], &req->rq_seqnos[0], 129 + (RPC_GSS_SEQNO_ARRAY_SIZE - 1) * sizeof(req->rq_seqnos[0])); 130 + req->rq_seqnos[0] = seqno; 131 + return 0; 132 + } 124 133 125 134 /* RPC transport layer security policies */ 126 135 enum xprtsec_policies {
+2 -2
include/trace/events/rpcgss.h
··· 409 409 __entry->task_id = task->tk_pid; 410 410 __entry->client_id = task->tk_client->cl_clid; 411 411 __entry->xid = be32_to_cpu(rqst->rq_xid); 412 - __entry->seqno = rqst->rq_seqno; 412 + __entry->seqno = *rqst->rq_seqnos; 413 413 ), 414 414 415 415 TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x seqno=%u", ··· 440 440 __entry->client_id = task->tk_client->cl_clid; 441 441 __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); 442 442 __entry->seq_xmit = seq_xmit; 443 - __entry->seqno = task->tk_rqstp->rq_seqno; 443 + __entry->seqno = *task->tk_rqstp->rq_seqnos; 444 444 __entry->ret = ret; 445 445 ), 446 446
+1 -1
include/trace/events/sunrpc.h
··· 1100 1100 __entry->client_id = rqst->rq_task->tk_client ? 1101 1101 rqst->rq_task->tk_client->cl_clid : -1; 1102 1102 __entry->xid = be32_to_cpu(rqst->rq_xid); 1103 - __entry->seqno = rqst->rq_seqno; 1103 + __entry->seqno = *rqst->rq_seqnos; 1104 1104 __entry->status = status; 1105 1105 ), 1106 1106
+36 -23
net/sunrpc/auth_gss/auth_gss.c
··· 1545 1545 struct kvec iov; 1546 1546 struct xdr_buf verf_buf; 1547 1547 int status; 1548 + u32 seqno; 1548 1549 1549 1550 /* Credential */ 1550 1551 ··· 1557 1556 cred_len = p++; 1558 1557 1559 1558 spin_lock(&ctx->gc_seq_lock); 1560 - req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; 1559 + seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; 1560 + xprt_rqst_add_seqno(req, seqno); 1561 1561 spin_unlock(&ctx->gc_seq_lock); 1562 - if (req->rq_seqno == MAXSEQ) 1562 + if (*req->rq_seqnos == MAXSEQ) 1563 1563 goto expired; 1564 1564 trace_rpcgss_seqno(task); 1565 1565 1566 1566 *p++ = cpu_to_be32(RPC_GSS_VERSION); 1567 1567 *p++ = cpu_to_be32(ctx->gc_proc); 1568 - *p++ = cpu_to_be32(req->rq_seqno); 1568 + *p++ = cpu_to_be32(*req->rq_seqnos); 1569 1569 *p++ = cpu_to_be32(gss_cred->gc_service); 1570 1570 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx); 1571 1571 *cred_len = cpu_to_be32((p - (cred_len + 1)) << 2); ··· 1680 1678 return 0; 1681 1679 } 1682 1680 1681 + static u32 1682 + gss_validate_seqno_mic(struct gss_cl_ctx *ctx, u32 seqno, __be32 *seq, __be32 *p, u32 len) 1683 + { 1684 + struct kvec iov; 1685 + struct xdr_buf verf_buf; 1686 + struct xdr_netobj mic; 1687 + 1688 + *seq = cpu_to_be32(seqno); 1689 + iov.iov_base = seq; 1690 + iov.iov_len = 4; 1691 + xdr_buf_from_iov(&iov, &verf_buf); 1692 + mic.data = (u8 *)p; 1693 + mic.len = len; 1694 + return gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1695 + } 1696 + 1683 1697 static int 1684 1698 gss_validate(struct rpc_task *task, struct xdr_stream *xdr) 1685 1699 { 1686 1700 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1687 1701 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1688 1702 __be32 *p, *seq = NULL; 1689 - struct kvec iov; 1690 - struct xdr_buf verf_buf; 1691 - struct xdr_netobj mic; 1692 1703 u32 len, maj_stat; 1693 1704 int status; 1705 + int i = 1; /* don't recheck the first item */ 1694 1706 1695 1707 p = xdr_inline_decode(xdr, 2 * sizeof(*p)); 1696 1708 if (!p) ··· 1721 1705 seq = kmalloc(4, GFP_KERNEL); 1722 1706 if (!seq) 1723 1707 goto validate_failed; 1724 - *seq = cpu_to_be32(task->tk_rqstp->rq_seqno); 1725 - iov.iov_base = seq; 1726 - iov.iov_len = 4; 1727 - xdr_buf_from_iov(&iov, &verf_buf); 1728 - mic.data = (u8 *)p; 1729 - mic.len = len; 1730 - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1708 + maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[0], seq, p, len); 1709 + /* RFC 2203 5.3.3.1 - compute the checksum of each sequence number in the cache */ 1710 + while (unlikely(maj_stat == GSS_S_BAD_SIG && i < task->tk_rqstp->rq_seqno_count)) 1711 + maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[i], seq, p, len); 1731 1712 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1732 1713 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1733 1714 if (maj_stat) ··· 1763 1750 if (!p) 1764 1751 goto wrap_failed; 1765 1752 integ_len = p++; 1766 - *p = cpu_to_be32(rqstp->rq_seqno); 1753 + *p = cpu_to_be32(*rqstp->rq_seqnos); 1767 1754 1768 1755 if (rpcauth_wrap_req_encode(task, xdr)) 1769 1756 goto wrap_failed; ··· 1860 1847 if (!p) 1861 1848 goto wrap_failed; 1862 1849 opaque_len = p++; 1863 - *p = cpu_to_be32(rqstp->rq_seqno); 1850 + *p = cpu_to_be32(*rqstp->rq_seqnos); 1864 1851 1865 1852 if (rpcauth_wrap_req_encode(task, xdr)) 1866 1853 goto wrap_failed; ··· 2014 2001 offset = rcv_buf->len - xdr_stream_remaining(xdr); 2015 2002 if (xdr_stream_decode_u32(xdr, &seqno)) 2016 2003 goto unwrap_failed; 2017 - if (seqno != rqstp->rq_seqno) 2004 + if (seqno != *rqstp->rq_seqnos) 2018 2005 goto bad_seqno; 2019 2006 if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len)) 2020 2007 goto unwrap_failed; ··· 2058 2045 trace_rpcgss_unwrap_failed(task); 2059 2046 goto out; 2060 2047 bad_seqno: 2061 - trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno); 2048 + trace_rpcgss_bad_seqno(task, *rqstp->rq_seqnos, seqno); 2062 2049 goto out; 2063 2050 bad_mic: 2064 2051 trace_rpcgss_verify_mic(task, maj_stat); ··· 2090 2077 if (maj_stat != GSS_S_COMPLETE) 2091 2078 goto bad_unwrap; 2092 2079 /* gss_unwrap decrypted the sequence number */ 2093 - if (be32_to_cpup(p++) != rqstp->rq_seqno) 2080 + if (be32_to_cpup(p++) != *rqstp->rq_seqnos) 2094 2081 goto bad_seqno; 2095 2082 2096 2083 /* gss_unwrap redacts the opaque blob from the head iovec. ··· 2106 2093 trace_rpcgss_unwrap_failed(task); 2107 2094 return -EIO; 2108 2095 bad_seqno: 2109 - trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p)); 2096 + trace_rpcgss_bad_seqno(task, *rqstp->rq_seqnos, be32_to_cpup(--p)); 2110 2097 return -EIO; 2111 2098 bad_unwrap: 2112 2099 trace_rpcgss_unwrap(task, maj_stat); ··· 2131 2118 if (!ctx) 2132 2119 goto out; 2133 2120 2134 - if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq))) 2121 + if (gss_seq_is_newer(*req->rq_seqnos, READ_ONCE(ctx->gc_seq))) 2135 2122 goto out_ctx; 2136 2123 2137 2124 seq_xmit = READ_ONCE(ctx->gc_seq_xmit); 2138 - while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) { 2125 + while (gss_seq_is_newer(*req->rq_seqnos, seq_xmit)) { 2139 2126 u32 tmp = seq_xmit; 2140 2127 2141 - seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno); 2128 + seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, *req->rq_seqnos); 2142 2129 if (seq_xmit == tmp) { 2143 2130 ret = false; 2144 2131 goto out_ctx; ··· 2147 2134 2148 2135 win = ctx->gc_win; 2149 2136 if (win > 0) 2150 - ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win); 2137 + ret = !gss_seq_is_newer(*req->rq_seqnos, seq_xmit - win); 2151 2138 2152 2139 out_ctx: 2153 2140 gss_put_ctx(ctx);
+2 -1
net/sunrpc/xprt.c
··· 1365 1365 INIT_LIST_HEAD(&req->rq_xmit2); 1366 1366 goto out; 1367 1367 } 1368 - } else if (!req->rq_seqno) { 1368 + } else if (req->rq_seqno_count == 0) { 1369 1369 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1370 1370 if (pos->rq_task->tk_owner != task->tk_owner) 1371 1371 continue; ··· 1898 1898 req->rq_snd_buf.bvec = NULL; 1899 1899 req->rq_rcv_buf.bvec = NULL; 1900 1900 req->rq_release_snd_buf = NULL; 1901 + req->rq_seqno_count = 0; 1901 1902 xprt_init_majortimeo(task, req, task->tk_client->cl_timeout); 1902 1903 1903 1904 trace_xprt_reserve(req);