Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xprtrdma: Replace all usage of "frmr" with "frwr"

Clean up: Over time, the industry has adopted the term "frwr"
instead of "frmr". The term "frwr" is now more widely recognized.

For the past couple of years I've attempted to add new code using
"frwr" , but there still remains plenty of older code that still
uses "frmr". Replace all usage of "frmr" to avoid confusion.

While we're churning code, rename variables unhelpfully called "f"
to "frwr", to improve code clarity.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>

authored by

Chuck Lever and committed by
Anna Schumaker
ce5b3717 30b5416b

+98 -98
+1 -1
include/linux/sunrpc/xprtrdma.h
··· 64 64 RPCRDMA_MEMWINDOWS, 65 65 RPCRDMA_MEMWINDOWS_ASYNC, 66 66 RPCRDMA_MTHCAFMR, 67 - RPCRDMA_FRMR, 67 + RPCRDMA_FRWR, 68 68 RPCRDMA_ALLPHYSICAL, 69 69 RPCRDMA_LAST 70 70 };
+86 -86
net/sunrpc/xprtrdma/frwr_ops.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2015 Oracle. All rights reserved. 3 + * Copyright (c) 2015, 2017 Oracle. All rights reserved. 4 4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 5 5 */ 6 6 7 7 /* Lightweight memory registration using Fast Registration Work 8 - * Requests (FRWR). Also referred to sometimes as FRMR mode. 8 + * Requests (FRWR). 9 9 * 10 10 * FRWR features ordered asynchronous registration and deregistration 11 11 * of arbitrarily sized memory regions. This is the fastest and safest ··· 15 15 /* Normal operation 16 16 * 17 17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG 18 - * Work Request (frmr_op_map). When the RDMA operation is finished, this 18 + * Work Request (frwr_op_map). When the RDMA operation is finished, this 19 19 * Memory Region is invalidated using a LOCAL_INV Work Request 20 - * (frmr_op_unmap). 20 + * (frwr_op_unmap). 21 21 * 22 22 * Typically these Work Requests are not signaled, and neither are RDMA 23 23 * SEND Work Requests (with the exception of signaling occasionally to ··· 98 98 static int 99 99 frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) 100 100 { 101 - unsigned int depth = ia->ri_max_frmr_depth; 102 - struct rpcrdma_frmr *f = &r->frmr; 101 + unsigned int depth = ia->ri_max_frwr_depth; 102 + struct rpcrdma_frwr *frwr = &r->frwr; 103 103 int rc; 104 104 105 - f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth); 106 - if (IS_ERR(f->fr_mr)) 105 + frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth); 106 + if (IS_ERR(frwr->fr_mr)) 107 107 goto out_mr_err; 108 108 109 109 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL); ··· 111 111 goto out_list_err; 112 112 113 113 sg_init_table(r->mw_sg, depth); 114 - init_completion(&f->fr_linv_done); 114 + init_completion(&frwr->fr_linv_done); 115 115 return 0; 116 116 117 117 out_mr_err: 118 - rc = PTR_ERR(f->fr_mr); 118 + rc = PTR_ERR(frwr->fr_mr); 119 119 dprintk("RPC: %s: ib_alloc_mr status %i\n", 120 120 __func__, rc); 121 121 return rc; ··· 124 124 rc = -ENOMEM; 125 125 dprintk("RPC: %s: sg allocation failure\n", 126 126 __func__); 127 - ib_dereg_mr(f->fr_mr); 127 + ib_dereg_mr(frwr->fr_mr); 128 128 return rc; 129 129 } 130 130 ··· 137 137 if (!list_empty(&r->mw_list)) 138 138 list_del(&r->mw_list); 139 139 140 - rc = ib_dereg_mr(r->frmr.fr_mr); 140 + rc = ib_dereg_mr(r->frwr.fr_mr); 141 141 if (rc) 142 142 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n", 143 143 r, rc); ··· 148 148 static int 149 149 __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) 150 150 { 151 - struct rpcrdma_frmr *f = &r->frmr; 151 + struct rpcrdma_frwr *frwr = &r->frwr; 152 152 int rc; 153 153 154 - rc = ib_dereg_mr(f->fr_mr); 154 + rc = ib_dereg_mr(frwr->fr_mr); 155 155 if (rc) { 156 156 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n", 157 157 rc, r); 158 158 return rc; 159 159 } 160 160 161 - f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, 162 - ia->ri_max_frmr_depth); 163 - if (IS_ERR(f->fr_mr)) { 161 + frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, 162 + ia->ri_max_frwr_depth); 163 + if (IS_ERR(frwr->fr_mr)) { 164 164 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n", 165 - PTR_ERR(f->fr_mr), r); 166 - return PTR_ERR(f->fr_mr); 165 + PTR_ERR(frwr->fr_mr), r); 166 + return PTR_ERR(frwr->fr_mr); 167 167 } 168 168 169 - dprintk("RPC: %s: recovered FRMR %p\n", __func__, f); 170 - f->fr_state = FRMR_IS_INVALID; 169 + dprintk("RPC: %s: recovered FRWR %p\n", __func__, frwr); 170 + frwr->fr_state = FRWR_IS_INVALID; 171 171 return 0; 172 172 } 173 173 174 - /* Reset of a single FRMR. Generate a fresh rkey by replacing the MR. 174 + /* Reset of a single FRWR. Generate a fresh rkey by replacing the MR. 175 175 */ 176 176 static void 177 177 frwr_op_recover_mr(struct rpcrdma_mw *mw) 178 178 { 179 - enum rpcrdma_frmr_state state = mw->frmr.fr_state; 179 + enum rpcrdma_frwr_state state = mw->frwr.fr_state; 180 180 struct rpcrdma_xprt *r_xprt = mw->mw_xprt; 181 181 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 182 182 int rc; 183 183 184 184 rc = __frwr_reset_mr(ia, mw); 185 - if (state != FRMR_FLUSHED_LI) 185 + if (state != FRWR_FLUSHED_LI) 186 186 ib_dma_unmap_sg(ia->ri_device, 187 187 mw->mw_sg, mw->mw_nents, mw->mw_dir); 188 188 if (rc) ··· 193 193 return; 194 194 195 195 out_release: 196 - pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw); 196 + pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mw); 197 197 r_xprt->rx_stats.mrs_orphaned++; 198 198 199 199 spin_lock(&r_xprt->rx_buf.rb_mwlock); ··· 214 214 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) 215 215 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS; 216 216 217 - ia->ri_max_frmr_depth = 217 + ia->ri_max_frwr_depth = 218 218 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, 219 219 attrs->max_fast_reg_page_list_len); 220 220 dprintk("RPC: %s: device's max FR page list len = %u\n", 221 - __func__, ia->ri_max_frmr_depth); 221 + __func__, ia->ri_max_frwr_depth); 222 222 223 - /* Add room for frmr register and invalidate WRs. 224 - * 1. FRMR reg WR for head 225 - * 2. FRMR invalidate WR for head 226 - * 3. N FRMR reg WRs for pagelist 227 - * 4. N FRMR invalidate WRs for pagelist 228 - * 5. FRMR reg WR for tail 229 - * 6. FRMR invalidate WR for tail 223 + /* Add room for frwr register and invalidate WRs. 224 + * 1. FRWR reg WR for head 225 + * 2. FRWR invalidate WR for head 226 + * 3. N FRWR reg WRs for pagelist 227 + * 4. N FRWR invalidate WRs for pagelist 228 + * 5. FRWR reg WR for tail 229 + * 6. FRWR invalidate WR for tail 230 230 * 7. The RDMA_SEND WR 231 231 */ 232 232 depth = 7; 233 233 234 - /* Calculate N if the device max FRMR depth is smaller than 234 + /* Calculate N if the device max FRWR depth is smaller than 235 235 * RPCRDMA_MAX_DATA_SEGS. 236 236 */ 237 - if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { 238 - delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth; 237 + if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) { 238 + delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth; 239 239 do { 240 - depth += 2; /* FRMR reg + invalidate */ 241 - delta -= ia->ri_max_frmr_depth; 240 + depth += 2; /* FRWR reg + invalidate */ 241 + delta -= ia->ri_max_frwr_depth; 242 242 } while (delta > 0); 243 243 } 244 244 ··· 252 252 } 253 253 254 254 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS / 255 - ia->ri_max_frmr_depth); 255 + ia->ri_max_frwr_depth); 256 256 return 0; 257 257 } 258 258 ··· 265 265 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 266 266 267 267 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, 268 - RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth); 268 + RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frwr_depth); 269 269 } 270 270 271 271 static void ··· 286 286 static void 287 287 frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) 288 288 { 289 - struct rpcrdma_frmr *frmr; 289 + struct rpcrdma_frwr *frwr; 290 290 struct ib_cqe *cqe; 291 291 292 292 /* WARNING: Only wr_cqe and status are reliable at this point */ 293 293 if (wc->status != IB_WC_SUCCESS) { 294 294 cqe = wc->wr_cqe; 295 - frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 296 - frmr->fr_state = FRMR_FLUSHED_FR; 295 + frwr = container_of(cqe, struct rpcrdma_frwr, fr_cqe); 296 + frwr->fr_state = FRWR_FLUSHED_FR; 297 297 __frwr_sendcompletion_flush(wc, "fastreg"); 298 298 } 299 299 } ··· 307 307 static void 308 308 frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) 309 309 { 310 - struct rpcrdma_frmr *frmr; 310 + struct rpcrdma_frwr *frwr; 311 311 struct ib_cqe *cqe; 312 312 313 313 /* WARNING: Only wr_cqe and status are reliable at this point */ 314 314 if (wc->status != IB_WC_SUCCESS) { 315 315 cqe = wc->wr_cqe; 316 - frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 317 - frmr->fr_state = FRMR_FLUSHED_LI; 316 + frwr = container_of(cqe, struct rpcrdma_frwr, fr_cqe); 317 + frwr->fr_state = FRWR_FLUSHED_LI; 318 318 __frwr_sendcompletion_flush(wc, "localinv"); 319 319 } 320 320 } ··· 329 329 static void 330 330 frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) 331 331 { 332 - struct rpcrdma_frmr *frmr; 332 + struct rpcrdma_frwr *frwr; 333 333 struct ib_cqe *cqe; 334 334 335 335 /* WARNING: Only wr_cqe and status are reliable at this point */ 336 336 cqe = wc->wr_cqe; 337 - frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); 337 + frwr = container_of(cqe, struct rpcrdma_frwr, fr_cqe); 338 338 if (wc->status != IB_WC_SUCCESS) { 339 - frmr->fr_state = FRMR_FLUSHED_LI; 339 + frwr->fr_state = FRWR_FLUSHED_LI; 340 340 __frwr_sendcompletion_flush(wc, "localinv"); 341 341 } 342 - complete(&frmr->fr_linv_done); 342 + complete(&frwr->fr_linv_done); 343 343 } 344 344 345 345 /* Post a REG_MR Work Request to register a memory region ··· 351 351 { 352 352 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 353 353 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS; 354 + struct rpcrdma_frwr *frwr; 354 355 struct rpcrdma_mw *mw; 355 - struct rpcrdma_frmr *frmr; 356 356 struct ib_mr *mr; 357 357 struct ib_reg_wr *reg_wr; 358 358 struct ib_send_wr *bad_wr; ··· 366 366 mw = rpcrdma_get_mw(r_xprt); 367 367 if (!mw) 368 368 return ERR_PTR(-ENOBUFS); 369 - } while (mw->frmr.fr_state != FRMR_IS_INVALID); 370 - frmr = &mw->frmr; 371 - frmr->fr_state = FRMR_IS_VALID; 372 - mr = frmr->fr_mr; 373 - reg_wr = &frmr->fr_regwr; 369 + } while (mw->frwr.fr_state != FRWR_IS_INVALID); 370 + frwr = &mw->frwr; 371 + frwr->fr_state = FRWR_IS_VALID; 372 + mr = frwr->fr_mr; 374 373 375 - if (nsegs > ia->ri_max_frmr_depth) 376 - nsegs = ia->ri_max_frmr_depth; 374 + if (nsegs > ia->ri_max_frwr_depth) 375 + nsegs = ia->ri_max_frwr_depth; 377 376 for (i = 0; i < nsegs;) { 378 377 if (seg->mr_page) 379 378 sg_set_page(&mw->mw_sg[i], ··· 401 402 if (unlikely(n != mw->mw_nents)) 402 403 goto out_mapmr_err; 403 404 404 - dprintk("RPC: %s: Using frmr %p to map %u segments (%llu bytes)\n", 405 - __func__, frmr, mw->mw_nents, mr->length); 405 + dprintk("RPC: %s: Using frwr %p to map %u segments (%llu bytes)\n", 406 + __func__, frwr, mw->mw_nents, mr->length); 406 407 407 408 key = (u8)(mr->rkey & 0x000000FF); 408 409 ib_update_fast_reg_key(mr, ++key); 409 410 411 + reg_wr = &frwr->fr_regwr; 410 412 reg_wr->wr.next = NULL; 411 413 reg_wr->wr.opcode = IB_WR_REG_MR; 412 - frmr->fr_cqe.done = frwr_wc_fastreg; 413 - reg_wr->wr.wr_cqe = &frmr->fr_cqe; 414 + frwr->fr_cqe.done = frwr_wc_fastreg; 415 + reg_wr->wr.wr_cqe = &frwr->fr_cqe; 414 416 reg_wr->wr.num_sge = 0; 415 417 reg_wr->wr.send_flags = 0; 416 418 reg_wr->mr = mr; ··· 434 434 out_dmamap_err: 435 435 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n", 436 436 mw->mw_sg, i); 437 - frmr->fr_state = FRMR_IS_INVALID; 437 + frwr->fr_state = FRWR_IS_INVALID; 438 438 rpcrdma_put_mw(r_xprt, mw); 439 439 return ERR_PTR(-EIO); 440 440 441 441 out_mapmr_err: 442 442 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n", 443 - frmr->fr_mr, n, mw->mw_nents); 443 + frwr->fr_mr, n, mw->mw_nents); 444 444 rpcrdma_defer_mr_recovery(mw); 445 445 return ERR_PTR(-EIO); 446 446 447 447 out_senderr: 448 - pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc); 448 + pr_err("rpcrdma: FRWR registration ib_post_send returned %i\n", rc); 449 449 rpcrdma_defer_mr_recovery(mw); 450 450 return ERR_PTR(-ENOTCONN); 451 451 } ··· 462 462 struct rpcrdma_xprt *r_xprt = mw->mw_xprt; 463 463 464 464 list_del(&mw->mw_list); 465 - mw->frmr.fr_state = FRMR_IS_INVALID; 465 + mw->frwr.fr_state = FRWR_IS_INVALID; 466 466 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, 467 467 mw->mw_sg, mw->mw_nents, mw->mw_dir); 468 468 rpcrdma_put_mw(r_xprt, mw); ··· 483 483 { 484 484 struct ib_send_wr *first, **prev, *last, *bad_wr; 485 485 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 486 - struct rpcrdma_frmr *f; 486 + struct rpcrdma_frwr *frwr; 487 487 struct rpcrdma_mw *mw; 488 488 int count, rc; 489 489 ··· 492 492 * Chain the LOCAL_INV Work Requests and post them with 493 493 * a single ib_post_send() call. 494 494 */ 495 - f = NULL; 495 + frwr = NULL; 496 496 count = 0; 497 497 prev = &first; 498 498 list_for_each_entry(mw, mws, mw_list) { 499 - mw->frmr.fr_state = FRMR_IS_INVALID; 499 + mw->frwr.fr_state = FRWR_IS_INVALID; 500 500 501 - f = &mw->frmr; 502 - dprintk("RPC: %s: invalidating frmr %p\n", 503 - __func__, f); 501 + frwr = &mw->frwr; 502 + dprintk("RPC: %s: invalidating frwr %p\n", 503 + __func__, frwr); 504 504 505 - f->fr_cqe.done = frwr_wc_localinv; 506 - last = &f->fr_invwr; 505 + frwr->fr_cqe.done = frwr_wc_localinv; 506 + last = &frwr->fr_invwr; 507 507 memset(last, 0, sizeof(*last)); 508 - last->wr_cqe = &f->fr_cqe; 508 + last->wr_cqe = &frwr->fr_cqe; 509 509 last->opcode = IB_WR_LOCAL_INV; 510 510 last->ex.invalidate_rkey = mw->mw_handle; 511 511 count++; ··· 513 513 *prev = last; 514 514 prev = &last->next; 515 515 } 516 - if (!f) 516 + if (!frwr) 517 517 goto unmap; 518 518 519 519 /* Strong send queue ordering guarantees that when the ··· 521 521 * are complete. 522 522 */ 523 523 last->send_flags = IB_SEND_SIGNALED; 524 - f->fr_cqe.done = frwr_wc_localinv_wake; 525 - reinit_completion(&f->fr_linv_done); 524 + frwr->fr_cqe.done = frwr_wc_localinv_wake; 525 + reinit_completion(&frwr->fr_linv_done); 526 526 527 527 /* Transport disconnect drains the receive CQ before it 528 528 * replaces the QP. The RPC reply handler won't call us ··· 532 532 bad_wr = NULL; 533 533 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr); 534 534 if (bad_wr != first) 535 - wait_for_completion(&f->fr_linv_done); 535 + wait_for_completion(&frwr->fr_linv_done); 536 536 if (rc) 537 537 goto reset_mrs; 538 538 ··· 542 542 unmap: 543 543 while (!list_empty(mws)) { 544 544 mw = rpcrdma_pop_mw(mws); 545 - dprintk("RPC: %s: DMA unmapping frmr %p\n", 546 - __func__, &mw->frmr); 545 + dprintk("RPC: %s: DMA unmapping frwr %p\n", 546 + __func__, &mw->frwr); 547 547 ib_dma_unmap_sg(ia->ri_device, 548 548 mw->mw_sg, mw->mw_nents, mw->mw_dir); 549 549 rpcrdma_put_mw(r_xprt, mw); ··· 551 551 return; 552 552 553 553 reset_mrs: 554 - pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc); 554 + pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc); 555 555 556 556 /* Find and reset the MRs in the LOCAL_INV WRs that did not 557 557 * get posted. 558 558 */ 559 559 while (bad_wr) { 560 - f = container_of(bad_wr, struct rpcrdma_frmr, 561 - fr_invwr); 562 - mw = container_of(f, struct rpcrdma_mw, frmr); 560 + frwr = container_of(bad_wr, struct rpcrdma_frwr, 561 + fr_invwr); 562 + mw = container_of(frwr, struct rpcrdma_mw, frwr); 563 563 564 564 __frwr_reset_mr(ia, mw); 565 565
+1 -1
net/sunrpc/xprtrdma/transport.c
··· 67 67 static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE; 68 68 unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; 69 69 static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; 70 - unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; 70 + unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR; 71 71 int xprt_rdma_pad_optimize; 72 72 73 73 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+1 -1
net/sunrpc/xprtrdma/verbs.c
··· 388 388 } 389 389 390 390 switch (xprt_rdma_memreg_strategy) { 391 - case RPCRDMA_FRMR: 391 + case RPCRDMA_FRWR: 392 392 if (frwr_is_supported(ia)) { 393 393 ia->ri_ops = &rpcrdma_frwr_memreg_ops; 394 394 break;
+9 -9
net/sunrpc/xprtrdma/xprt_rdma.h
··· 73 73 struct completion ri_remove_done; 74 74 int ri_async_rc; 75 75 unsigned int ri_max_segs; 76 - unsigned int ri_max_frmr_depth; 76 + unsigned int ri_max_frwr_depth; 77 77 unsigned int ri_max_inline_write; 78 78 unsigned int ri_max_inline_read; 79 79 unsigned int ri_max_send_sges; ··· 242 242 * rpcrdma_deregister_external() uses this metadata to unmap and 243 243 * release these resources when an RPC is complete. 244 244 */ 245 - enum rpcrdma_frmr_state { 246 - FRMR_IS_INVALID, /* ready to be used */ 247 - FRMR_IS_VALID, /* in use */ 248 - FRMR_FLUSHED_FR, /* flushed FASTREG WR */ 249 - FRMR_FLUSHED_LI, /* flushed LOCALINV WR */ 245 + enum rpcrdma_frwr_state { 246 + FRWR_IS_INVALID, /* ready to be used */ 247 + FRWR_IS_VALID, /* in use */ 248 + FRWR_FLUSHED_FR, /* flushed FASTREG WR */ 249 + FRWR_FLUSHED_LI, /* flushed LOCALINV WR */ 250 250 }; 251 251 252 - struct rpcrdma_frmr { 252 + struct rpcrdma_frwr { 253 253 struct ib_mr *fr_mr; 254 254 struct ib_cqe fr_cqe; 255 - enum rpcrdma_frmr_state fr_state; 255 + enum rpcrdma_frwr_state fr_state; 256 256 struct completion fr_linv_done; 257 257 union { 258 258 struct ib_reg_wr fr_regwr; ··· 272 272 enum dma_data_direction mw_dir; 273 273 union { 274 274 struct rpcrdma_fmr fmr; 275 - struct rpcrdma_frmr frmr; 275 + struct rpcrdma_frwr frwr; 276 276 }; 277 277 struct rpcrdma_xprt *mw_xprt; 278 278 u32 mw_handle;