xprtrdma: Remove imul instructions from chunk list encoders

Re-arrange the pointer arithmetic in the chunk list encoders to
eliminate several more integer multiplication instructions during
Transport Header encoding.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>

authored by

Chuck Lever and committed by
Anna Schumaker
6748b0ca 28d9d56f

+34 -36
+5 -5
net/sunrpc/xprtrdma/fmr_ops.c
··· 177 177 /* Use the ib_map_phys_fmr() verb to register a memory region 178 178 * for remote access via RDMA READ or RDMA WRITE. 179 179 */ 180 - static int 180 + static struct rpcrdma_mr_seg * 181 181 fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, 182 182 int nsegs, bool writing, struct rpcrdma_mw **out) 183 183 { ··· 188 188 189 189 mw = rpcrdma_get_mw(r_xprt); 190 190 if (!mw) 191 - return -ENOBUFS; 191 + return ERR_PTR(-ENOBUFS); 192 192 193 193 pageoff = offset_in_page(seg1->mr_offset); 194 194 seg1->mr_offset -= pageoff; /* start of page */ ··· 232 232 mw->mw_offset = dma_pages[0] + pageoff; 233 233 234 234 *out = mw; 235 - return mw->mw_nents; 235 + return seg; 236 236 237 237 out_dmamap_err: 238 238 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n", 239 239 mw->mw_sg, i); 240 240 rpcrdma_put_mw(r_xprt, mw); 241 - return -EIO; 241 + return ERR_PTR(-EIO); 242 242 243 243 out_maperr: 244 244 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n", ··· 247 247 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, 248 248 mw->mw_sg, mw->mw_nents, mw->mw_dir); 249 249 rpcrdma_put_mw(r_xprt, mw); 250 - return -EIO; 250 + return ERR_PTR(-EIO); 251 251 } 252 252 253 253 /* Invalidate all memory regions that were registered for "req".
+6 -6
net/sunrpc/xprtrdma/frwr_ops.c
··· 344 344 /* Post a REG_MR Work Request to register a memory region 345 345 * for remote access via RDMA READ or RDMA WRITE. 346 346 */ 347 - static int 347 + static struct rpcrdma_mr_seg * 348 348 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, 349 349 int nsegs, bool writing, struct rpcrdma_mw **out) 350 350 { ··· 364 364 rpcrdma_defer_mr_recovery(mw); 365 365 mw = rpcrdma_get_mw(r_xprt); 366 366 if (!mw) 367 - return -ENOBUFS; 367 + return ERR_PTR(-ENOBUFS); 368 368 } while (mw->frmr.fr_state != FRMR_IS_INVALID); 369 369 frmr = &mw->frmr; 370 370 frmr->fr_state = FRMR_IS_VALID; ··· 429 429 mw->mw_offset = mr->iova; 430 430 431 431 *out = mw; 432 - return mw->mw_nents; 432 + return seg; 433 433 434 434 out_dmamap_err: 435 435 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n", 436 436 mw->mw_sg, i); 437 437 frmr->fr_state = FRMR_IS_INVALID; 438 438 rpcrdma_put_mw(r_xprt, mw); 439 - return -EIO; 439 + return ERR_PTR(-EIO); 440 440 441 441 out_mapmr_err: 442 442 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n", 443 443 frmr->fr_mr, n, mw->mw_nents); 444 444 rpcrdma_defer_mr_recovery(mw); 445 - return -EIO; 445 + return ERR_PTR(-EIO); 446 446 447 447 out_senderr: 448 448 pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc); 449 449 rpcrdma_defer_mr_recovery(mw); 450 - return -ENOTCONN; 450 + return ERR_PTR(-ENOTCONN); 451 451 } 452 452 453 453 /* Invalidate all memory regions that were registered for "req".
+21 -24
net/sunrpc/xprtrdma/rpc_rdma.c
··· 349 349 struct rpcrdma_mr_seg *seg; 350 350 struct rpcrdma_mw *mw; 351 351 unsigned int pos; 352 - int n, nsegs; 352 + int nsegs; 353 353 354 354 pos = rqst->rq_snd_buf.head[0].iov_len; 355 355 if (rtype == rpcrdma_areadch) ··· 361 361 return nsegs; 362 362 363 363 do { 364 - n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, 365 - false, &mw); 366 - if (n < 0) 367 - return n; 364 + seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, 365 + false, &mw); 366 + if (IS_ERR(seg)) 367 + return PTR_ERR(seg); 368 368 rpcrdma_push_mw(mw, &req->rl_registered); 369 369 370 370 if (encode_read_segment(xdr, mw, pos) < 0) ··· 373 373 dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n", 374 374 rqst->rq_task->tk_pid, __func__, pos, 375 375 mw->mw_length, (unsigned long long)mw->mw_offset, 376 - mw->mw_handle, n < nsegs ? "more" : "last"); 376 + mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last"); 377 377 378 378 r_xprt->rx_stats.read_chunk_count++; 379 - seg += n; 380 - nsegs -= n; 379 + nsegs -= mw->mw_nents; 381 380 } while (nsegs); 382 381 383 382 return 0; ··· 404 405 struct xdr_stream *xdr = &req->rl_stream; 405 406 struct rpcrdma_mr_seg *seg; 406 407 struct rpcrdma_mw *mw; 407 - int n, nsegs, nchunks; 408 + int nsegs, nchunks; 408 409 __be32 *segcount; 409 410 410 411 seg = req->rl_segments; ··· 423 424 424 425 nchunks = 0; 425 426 do { 426 - n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, 427 - true, &mw); 428 - if (n < 0) 429 - return n; 427 + seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, 428 + true, &mw); 429 + if (IS_ERR(seg)) 430 + return PTR_ERR(seg); 430 431 rpcrdma_push_mw(mw, &req->rl_registered); 431 432 432 433 if (encode_rdma_segment(xdr, mw) < 0) ··· 435 436 dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n", 436 437 rqst->rq_task->tk_pid, __func__, 437 438 mw->mw_length, (unsigned long long)mw->mw_offset, 438 - mw->mw_handle, n < nsegs ? "more" : "last"); 439 + mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last"); 439 440 440 441 r_xprt->rx_stats.write_chunk_count++; 441 442 r_xprt->rx_stats.total_rdma_request += seg->mr_len; 442 443 nchunks++; 443 - seg += n; 444 - nsegs -= n; 444 + nsegs -= mw->mw_nents; 445 445 } while (nsegs); 446 446 447 447 /* Update count of segments in this Write chunk */ ··· 468 470 struct xdr_stream *xdr = &req->rl_stream; 469 471 struct rpcrdma_mr_seg *seg; 470 472 struct rpcrdma_mw *mw; 471 - int n, nsegs, nchunks; 473 + int nsegs, nchunks; 472 474 __be32 *segcount; 473 475 474 476 seg = req->rl_segments; ··· 485 487 486 488 nchunks = 0; 487 489 do { 488 - n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, 489 - true, &mw); 490 - if (n < 0) 491 - return n; 490 + seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, 491 + true, &mw); 492 + if (IS_ERR(seg)) 493 + return PTR_ERR(seg); 492 494 rpcrdma_push_mw(mw, &req->rl_registered); 493 495 494 496 if (encode_rdma_segment(xdr, mw) < 0) ··· 497 499 dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n", 498 500 rqst->rq_task->tk_pid, __func__, 499 501 mw->mw_length, (unsigned long long)mw->mw_offset, 500 - mw->mw_handle, n < nsegs ? "more" : "last"); 502 + mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last"); 501 503 502 504 r_xprt->rx_stats.reply_chunk_count++; 503 505 r_xprt->rx_stats.total_rdma_request += seg->mr_len; 504 506 nchunks++; 505 - seg += n; 506 - nsegs -= n; 507 + nsegs -= mw->mw_nents; 507 508 } while (nsegs); 508 509 509 510 /* Update count of segments in the Reply chunk */
+2 -1
net/sunrpc/xprtrdma/xprt_rdma.h
··· 466 466 */ 467 467 struct rpcrdma_xprt; 468 468 struct rpcrdma_memreg_ops { 469 - int (*ro_map)(struct rpcrdma_xprt *, 469 + struct rpcrdma_mr_seg * 470 + (*ro_map)(struct rpcrdma_xprt *, 470 471 struct rpcrdma_mr_seg *, int, bool, 471 472 struct rpcrdma_mw **); 472 473 void (*ro_unmap_sync)(struct rpcrdma_xprt *,