Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

svcrdma: Use llist for managing cache of recv_ctxts

Use a wait-free mechanism for managing the svc_rdma_recv_ctxts free
list. Subsequently, sc_recv_lock can be eliminated.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>

authored by

Chuck Lever and committed by
J. Bruce Fields
4866073e d6dfe43e

+14 -18
+3 -2
include/linux/sunrpc/svc_rdma.h
··· 42 42 43 43 #ifndef SVC_RDMA_H 44 44 #define SVC_RDMA_H 45 + #include <linux/llist.h> 45 46 #include <linux/sunrpc/xdr.h> 46 47 #include <linux/sunrpc/svcsock.h> 47 48 #include <linux/sunrpc/rpc_rdma.h> ··· 108 107 struct list_head sc_read_complete_q; 109 108 struct work_struct sc_work; 110 109 111 - spinlock_t sc_recv_lock; 112 - struct list_head sc_recv_ctxts; 110 + struct llist_head sc_recv_ctxts; 113 111 }; 114 112 /* sc_flags */ 115 113 #define RDMAXPRT_CONN_PENDING 3 ··· 125 125 #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD 126 126 127 127 struct svc_rdma_recv_ctxt { 128 + struct llist_node rc_node; 128 129 struct list_head rc_list; 129 130 struct ib_recv_wr rc_recv_wr; 130 131 struct ib_cqe rc_cqe;
+10 -14
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
··· 172 172 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma) 173 173 { 174 174 struct svc_rdma_recv_ctxt *ctxt; 175 + struct llist_node *node; 175 176 176 - while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts))) { 177 - list_del(&ctxt->rc_list); 177 + while ((node = llist_del_first(&rdma->sc_recv_ctxts))) { 178 + ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node); 178 179 svc_rdma_recv_ctxt_destroy(rdma, ctxt); 179 180 } 180 181 } ··· 184 183 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma) 185 184 { 186 185 struct svc_rdma_recv_ctxt *ctxt; 186 + struct llist_node *node; 187 187 188 - spin_lock(&rdma->sc_recv_lock); 189 - ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts); 190 - if (!ctxt) 188 + node = llist_del_first(&rdma->sc_recv_ctxts); 189 + if (!node) 191 190 goto out_empty; 192 - list_del(&ctxt->rc_list); 193 - spin_unlock(&rdma->sc_recv_lock); 191 + ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node); 194 192 195 193 out: 196 194 ctxt->rc_page_count = 0; 197 195 return ctxt; 198 196 199 197 out_empty: 200 - spin_unlock(&rdma->sc_recv_lock); 201 - 202 198 ctxt = svc_rdma_recv_ctxt_alloc(rdma); 203 199 if (!ctxt) 204 200 return NULL; ··· 216 218 for (i = 0; i < ctxt->rc_page_count; i++) 217 219 put_page(ctxt->rc_pages[i]); 218 220 219 - if (!ctxt->rc_temp) { 220 - spin_lock(&rdma->sc_recv_lock); 221 - list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts); 222 - spin_unlock(&rdma->sc_recv_lock); 223 - } else 221 + if (!ctxt->rc_temp) 222 + llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts); 223 + else 224 224 svc_rdma_recv_ctxt_destroy(rdma, ctxt); 225 225 } 226 226
+1 -2
net/sunrpc/xprtrdma/svc_rdma_transport.c
··· 140 140 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); 141 141 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); 142 142 INIT_LIST_HEAD(&cma_xprt->sc_send_ctxts); 143 - INIT_LIST_HEAD(&cma_xprt->sc_recv_ctxts); 143 + init_llist_head(&cma_xprt->sc_recv_ctxts); 144 144 INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts); 145 145 init_waitqueue_head(&cma_xprt->sc_send_wait); 146 146 147 147 spin_lock_init(&cma_xprt->sc_lock); 148 148 spin_lock_init(&cma_xprt->sc_rq_dto_lock); 149 149 spin_lock_init(&cma_xprt->sc_send_lock); 150 - spin_lock_init(&cma_xprt->sc_recv_lock); 151 150 spin_lock_init(&cma_xprt->sc_rw_ctxt_lock); 152 151 153 152 /*