Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xprtrdma: Saving IRQs no longer needed for rb_lock

Now that RPC replies are processed in a workqueue, there's no need
to disable IRQs when managing send and receive buffers. This saves
noticeable overhead per RPC.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Tested-By: Devesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>

authored by

Chuck Lever and committed by
Anna Schumaker
a5b027e1 2da9ab30

+10 -14
+10 -14
net/sunrpc/xprtrdma/verbs.c
··· 1063 1063 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) 1064 1064 { 1065 1065 struct rpcrdma_req *req; 1066 - unsigned long flags; 1067 1066 1068 - spin_lock_irqsave(&buffers->rb_lock, flags); 1067 + spin_lock(&buffers->rb_lock); 1069 1068 if (list_empty(&buffers->rb_send_bufs)) 1070 1069 goto out_reqbuf; 1071 1070 req = rpcrdma_buffer_get_req_locked(buffers); 1072 1071 if (list_empty(&buffers->rb_recv_bufs)) 1073 1072 goto out_repbuf; 1074 1073 req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers); 1075 - spin_unlock_irqrestore(&buffers->rb_lock, flags); 1074 + spin_unlock(&buffers->rb_lock); 1076 1075 return req; 1077 1076 1078 1077 out_reqbuf: 1079 - spin_unlock_irqrestore(&buffers->rb_lock, flags); 1078 + spin_unlock(&buffers->rb_lock); 1080 1079 pr_warn("RPC: %s: out of request buffers\n", __func__); 1081 1080 return NULL; 1082 1081 out_repbuf: 1083 - spin_unlock_irqrestore(&buffers->rb_lock, flags); 1082 + spin_unlock(&buffers->rb_lock); 1084 1083 pr_warn("RPC: %s: out of reply buffers\n", __func__); 1085 1084 req->rl_reply = NULL; 1086 1085 return req; ··· 1094 1095 { 1095 1096 struct rpcrdma_buffer *buffers = req->rl_buffer; 1096 1097 struct rpcrdma_rep *rep = req->rl_reply; 1097 - unsigned long flags; 1098 1098 1099 1099 req->rl_niovs = 0; 1100 1100 req->rl_reply = NULL; 1101 1101 1102 - spin_lock_irqsave(&buffers->rb_lock, flags); 1102 + spin_lock(&buffers->rb_lock); 1103 1103 list_add_tail(&req->rl_free, &buffers->rb_send_bufs); 1104 1104 if (rep) 1105 1105 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); 1106 - spin_unlock_irqrestore(&buffers->rb_lock, flags); 1106 + spin_unlock(&buffers->rb_lock); 1107 1107 } 1108 1108 1109 1109 /* ··· 1113 1115 rpcrdma_recv_buffer_get(struct rpcrdma_req *req) 1114 1116 { 1115 1117 struct rpcrdma_buffer *buffers = req->rl_buffer; 1116 - unsigned long flags; 1117 1118 1118 - spin_lock_irqsave(&buffers->rb_lock, flags); 1119 + spin_lock(&buffers->rb_lock); 1119 1120 if (!list_empty(&buffers->rb_recv_bufs)) 1120 1121 req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers); 1121 - spin_unlock_irqrestore(&buffers->rb_lock, flags); 1122 + spin_unlock(&buffers->rb_lock); 1122 1123 } 1123 1124 1124 1125 /* ··· 1128 1131 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) 1129 1132 { 1130 1133 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; 1131 - unsigned long flags; 1132 1134 1133 - spin_lock_irqsave(&buffers->rb_lock, flags); 1135 + spin_lock(&buffers->rb_lock); 1134 1136 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); 1135 - spin_unlock_irqrestore(&buffers->rb_lock, flags); 1137 + spin_unlock(&buffers->rb_lock); 1136 1138 } 1137 1139 1138 1140 /*