Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xprtrdma: Remove atomic send completion counting

The sendctx circular queue now guarantees that xprtrdma cannot
overflow the Send Queue, so remove the remaining bits of the
original Send WQE counting mechanism.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>

authored by

Chuck Lever and committed by
Anna Schumaker
6f0afc28 01bb35c8

-33
-8
net/sunrpc/xprtrdma/frwr_ops.c
··· 419 419 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : 420 420 IB_ACCESS_REMOTE_READ; 421 421 422 - rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr); 423 422 rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr); 424 423 if (rc) 425 424 goto out_senderr; ··· 506 507 f->fr_cqe.done = frwr_wc_localinv_wake; 507 508 reinit_completion(&f->fr_linv_done); 508 509 509 - /* Initialize CQ count, since there is always a signaled 510 - * WR being posted here. The new cqcount depends on how 511 - * many SQEs are about to be consumed. 512 - */ 513 - rpcrdma_init_cqcount(&r_xprt->rx_ep, count); 514 - 515 510 /* Transport disconnect drains the receive CQ before it 516 511 * replaces the QP. The RPC reply handler won't call us 517 512 * unless ri_id->qp is a valid pointer. ··· 538 545 /* Find and reset the MRs in the LOCAL_INV WRs that did not 539 546 * get posted. 540 547 */ 541 - rpcrdma_init_cqcount(&r_xprt->rx_ep, -count); 542 548 while (bad_wr) { 543 549 f = container_of(bad_wr, struct rpcrdma_frmr, 544 550 fr_invwr);
-4
net/sunrpc/xprtrdma/verbs.c
··· 553 553 ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH, 554 554 cdata->max_requests >> 2); 555 555 ep->rep_send_count = ep->rep_send_batch; 556 - ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1; 557 - if (ep->rep_cqinit <= 2) 558 - ep->rep_cqinit = 0; /* always signal? */ 559 - rpcrdma_init_cqcount(ep, 0); 560 556 init_waitqueue_head(&ep->rep_connect_wait); 561 557 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); 562 558
-21
net/sunrpc/xprtrdma/xprt_rdma.h
··· 95 95 struct rpcrdma_ep { 96 96 unsigned int rep_send_count; 97 97 unsigned int rep_send_batch; 98 - atomic_t rep_cqcount; 99 - int rep_cqinit; 100 98 int rep_connected; 101 99 struct ib_qp_init_attr rep_attr; 102 100 wait_queue_head_t rep_connect_wait; ··· 103 105 struct sockaddr_storage rep_remote_addr; 104 106 struct delayed_work rep_connect_worker; 105 107 }; 106 - 107 - static inline void 108 - rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count) 109 - { 110 - atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count); 111 - } 112 - 113 - /* To update send queue accounting, provider must take a 114 - * send completion every now and then. 115 - */ 116 - static inline void 117 - rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr) 118 - { 119 - send_wr->send_flags = 0; 120 - if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) { 121 - rpcrdma_init_cqcount(ep, 0); 122 - send_wr->send_flags = IB_SEND_SIGNALED; 123 - } 124 - } 125 108 126 109 /* Pre-allocate extra Work Requests for handling backward receives 127 110 * and sends. This is a fixed value because the Work Queues are