Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xprtrdma: Make rpc_rqst part of rpcrdma_req

This simplifies allocation of the generic RPC slot and xprtrdma
specific per-RPC resources.

It also makes xprtrdma more like the socket-based transports:
->buf_alloc and ->buf_free are now responsible only for send and
receive buffers.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>

authored by

Chuck Lever and committed by
Anna Schumaker
edb41e61 48be539d

+45 -75
-1
include/linux/sunrpc/xprt.h
··· 84 84 void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ 85 85 struct list_head rq_list; 86 86 87 - void *rq_xprtdata; /* Per-xprt private data */ 88 87 void *rq_buffer; /* Call XDR encode buffer */ 89 88 size_t rq_callsize; 90 89 void *rq_rbuffer; /* Reply XDR decode buffer */
+34 -41
net/sunrpc/xprtrdma/backchannel.c
··· 29 29 spin_unlock(&buf->rb_reqslock); 30 30 31 31 rpcrdma_destroy_req(req); 32 - 33 - kfree(rqst); 34 32 } 35 33 36 - static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt, 37 - struct rpc_rqst *rqst) 34 + static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt, 35 + unsigned int count) 38 36 { 39 - struct rpcrdma_regbuf *rb; 40 - struct rpcrdma_req *req; 41 - size_t size; 37 + struct rpc_xprt *xprt = &r_xprt->rx_xprt; 38 + struct rpc_rqst *rqst; 39 + unsigned int i; 42 40 43 - req = rpcrdma_create_req(r_xprt); 44 - if (IS_ERR(req)) 45 - return PTR_ERR(req); 41 + for (i = 0; i < (count << 1); i++) { 42 + struct rpcrdma_regbuf *rb; 43 + struct rpcrdma_req *req; 44 + size_t size; 46 45 47 - size = r_xprt->rx_data.inline_rsize; 48 - rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL); 49 - if (IS_ERR(rb)) 50 - goto out_fail; 51 - req->rl_sendbuf = rb; 52 - xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, 53 - min_t(size_t, size, PAGE_SIZE)); 54 - rpcrdma_set_xprtdata(rqst, req); 46 + req = rpcrdma_create_req(r_xprt); 47 + if (IS_ERR(req)) 48 + return PTR_ERR(req); 49 + rqst = &req->rl_slot; 50 + 51 + rqst->rq_xprt = xprt; 52 + INIT_LIST_HEAD(&rqst->rq_list); 53 + INIT_LIST_HEAD(&rqst->rq_bc_list); 54 + __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); 55 + spin_lock_bh(&xprt->bc_pa_lock); 56 + list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); 57 + spin_unlock_bh(&xprt->bc_pa_lock); 58 + 59 + size = r_xprt->rx_data.inline_rsize; 60 + rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL); 61 + if (IS_ERR(rb)) 62 + goto out_fail; 63 + req->rl_sendbuf = rb; 64 + xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, 65 + min_t(size_t, size, PAGE_SIZE)); 66 + } 55 67 return 0; 56 68 57 69 out_fail: ··· 98 86 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) 99 87 { 100 88 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 101 - struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; 102 - struct rpc_rqst *rqst; 103 - unsigned int i; 104 89 int rc; 105 90 106 91 /* The backchannel reply path returns each rpc_rqst to the ··· 112 103 if (reqs > RPCRDMA_BACKWARD_WRS >> 1) 113 104 goto out_err; 114 105 115 - for (i = 0; i < (reqs << 1); i++) { 116 - rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); 117 - if (!rqst) 118 - goto out_free; 119 - 120 - dprintk("RPC: %s: new rqst %p\n", __func__, rqst); 121 - 122 - rqst->rq_xprt = &r_xprt->rx_xprt; 123 - INIT_LIST_HEAD(&rqst->rq_list); 124 - INIT_LIST_HEAD(&rqst->rq_bc_list); 125 - __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state); 126 - 127 - if (rpcrdma_bc_setup_rqst(r_xprt, rqst)) 128 - goto out_free; 129 - 130 - spin_lock_bh(&xprt->bc_pa_lock); 131 - list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); 132 - spin_unlock_bh(&xprt->bc_pa_lock); 133 - } 106 + rc = rpcrdma_bc_setup_reqs(r_xprt, reqs); 107 + if (rc) 108 + goto out_free; 134 109 135 110 rc = rpcrdma_bc_setup_reps(r_xprt, reqs); 136 111 if (rc) ··· 124 131 if (rc) 125 132 goto out_free; 126 133 127 - buffer->rb_bc_srv_max_requests = reqs; 134 + r_xprt->rx_buf.rb_bc_srv_max_requests = reqs; 128 135 request_module("svcrdma"); 129 136 trace_xprtrdma_cb_setup(r_xprt, reqs); 130 137 return 0;
+9 -26
net/sunrpc/xprtrdma/transport.c
··· 331 331 return ERR_PTR(-EBADF); 332 332 } 333 333 334 - xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 335 - xprt_rdma_slot_table_entries, 336 - xprt_rdma_slot_table_entries); 334 + xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0, 0); 337 335 if (xprt == NULL) { 338 336 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", 339 337 __func__); ··· 363 365 xprt_set_bound(xprt); 364 366 xprt_rdma_format_addresses(xprt, sap); 365 367 366 - cdata.max_requests = xprt->max_reqs; 368 + cdata.max_requests = xprt_rdma_slot_table_entries; 367 369 368 370 cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */ 369 371 cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */ ··· 548 550 static void 549 551 xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 550 552 { 551 - struct rpc_rqst *rqst; 553 + struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 554 + struct rpcrdma_req *req; 552 555 553 - spin_lock(&xprt->reserve_lock); 554 - if (list_empty(&xprt->free)) 556 + req = rpcrdma_buffer_get(&r_xprt->rx_buf); 557 + if (!req) 555 558 goto out_sleep; 556 - rqst = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); 557 - list_del(&rqst->rq_list); 558 - spin_unlock(&xprt->reserve_lock); 559 - 560 - task->tk_rqstp = rqst; 559 + task->tk_rqstp = &req->rl_slot; 561 560 task->tk_status = 0; 562 561 return; 563 562 564 563 out_sleep: 565 564 rpc_sleep_on(&xprt->backlog, task, NULL); 566 - spin_unlock(&xprt->reserve_lock); 567 565 task->tk_status = -EAGAIN; 568 566 } 569 567 ··· 573 579 xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst) 574 580 { 575 581 memset(rqst, 0, sizeof(*rqst)); 576 - 577 - spin_lock(&xprt->reserve_lock); 578 - list_add(&rqst->rq_list, &xprt->free); 582 + rpcrdma_buffer_put(rpcr_to_rdmar(rqst)); 579 583 rpc_wake_up_next(&xprt->backlog); 580 - spin_unlock(&xprt->reserve_lock); 581 584 } 582 585 583 586 static bool ··· 647 656 { 648 657 struct rpc_rqst *rqst = task->tk_rqstp; 649 658 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 650 - struct rpcrdma_req *req; 659 + struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 651 660 gfp_t flags; 652 - 653 - req = rpcrdma_buffer_get(&r_xprt->rx_buf); 654 - if (req == NULL) 655 - goto out_get; 656 661 657 662 flags = RPCRDMA_DEF_GFP; 658 663 if (RPC_IS_SWAPPER(task)) ··· 659 672 if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags)) 660 673 goto out_fail; 661 674 662 - rpcrdma_set_xprtdata(rqst, req); 663 675 rqst->rq_buffer = req->rl_sendbuf->rg_base; 664 676 rqst->rq_rbuffer = req->rl_recvbuf->rg_base; 665 677 trace_xprtrdma_allocate(task, req); 666 678 return 0; 667 679 668 680 out_fail: 669 - rpcrdma_buffer_put(req); 670 - out_get: 671 681 trace_xprtrdma_allocate(task, NULL); 672 682 return -ENOMEM; 673 683 } ··· 685 701 if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags)) 686 702 rpcrdma_release_rqst(r_xprt, req); 687 703 trace_xprtrdma_rpc_done(task, req); 688 - rpcrdma_buffer_put(req); 689 704 } 690 705 691 706 /**
+2 -7
net/sunrpc/xprtrdma/xprt_rdma.h
··· 335 335 struct rpcrdma_buffer; 336 336 struct rpcrdma_req { 337 337 struct list_head rl_list; 338 + struct rpc_rqst rl_slot; 338 339 struct rpcrdma_buffer *rl_buffer; 339 340 struct rpcrdma_rep *rl_reply; 340 341 struct xdr_stream rl_stream; ··· 358 357 RPCRDMA_REQ_F_TX_RESOURCES, 359 358 }; 360 359 361 - static inline void 362 - rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req) 363 - { 364 - rqst->rq_xprtdata = req; 365 - } 366 - 367 360 static inline struct rpcrdma_req * 368 361 rpcr_to_rdmar(const struct rpc_rqst *rqst) 369 362 { 370 - return rqst->rq_xprtdata; 363 + return container_of(rqst, struct rpcrdma_req, rl_slot); 371 364 } 372 365 373 366 static inline void