Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xprtrdma: Remove usage of "mw"

Clean up: struct rpcrdma_mw was named after Memory Windows, but
xprtrdma no longer supports a Memory Window registration mode.
Rename rpcrdma_mw and its fields to reduce confusion and make
the code more sensible to read.

Renaming "mw" was suggested by Tom Talpey, the author of the
original xprtrdma implementation. It's a good idea, but I haven't
done this until now because it's a huge diffstat for no benefit
other than code readability.

However, I'm about to introduce static trace points that expose
a few of xprtrdma's internal data structures. They should make sense
in the trace report, and it's reasonable to treat trace points as a
kernel API contract which might be difficult to change later.

While I'm churning things up, two additional changes:
- rename variables unhelpfully called "r" to "mr", to improve code
clarity, and
- rename the MR-related helper functions using the form
"rpcrdma_mr_<verb>", to be consistent with other areas of the
code.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>

authored by

Chuck Lever and committed by
Anna Schumaker
96ceddea ce5b3717

+292 -278
+74 -74
net/sunrpc/xprtrdma/fmr_ops.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2015 Oracle. All rights reserved. 3 + * Copyright (c) 2015, 2017 Oracle. All rights reserved. 4 4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 5 5 */ 6 6 ··· 47 47 } 48 48 49 49 static int 50 - fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw) 50 + fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) 51 51 { 52 52 static struct ib_fmr_attr fmr_attr = { 53 53 .max_pages = RPCRDMA_MAX_FMR_SGES, ··· 55 55 .page_shift = PAGE_SHIFT 56 56 }; 57 57 58 - mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES, 58 + mr->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES, 59 59 sizeof(u64), GFP_KERNEL); 60 - if (!mw->fmr.fm_physaddrs) 60 + if (!mr->fmr.fm_physaddrs) 61 61 goto out_free; 62 62 63 - mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES, 64 - sizeof(*mw->mw_sg), GFP_KERNEL); 65 - if (!mw->mw_sg) 63 + mr->mr_sg = kcalloc(RPCRDMA_MAX_FMR_SGES, 64 + sizeof(*mr->mr_sg), GFP_KERNEL); 65 + if (!mr->mr_sg) 66 66 goto out_free; 67 67 68 - sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES); 68 + sg_init_table(mr->mr_sg, RPCRDMA_MAX_FMR_SGES); 69 69 70 - mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS, 70 + mr->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS, 71 71 &fmr_attr); 72 - if (IS_ERR(mw->fmr.fm_mr)) 72 + if (IS_ERR(mr->fmr.fm_mr)) 73 73 goto out_fmr_err; 74 74 75 75 return 0; 76 76 77 77 out_fmr_err: 78 78 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__, 79 - PTR_ERR(mw->fmr.fm_mr)); 79 + PTR_ERR(mr->fmr.fm_mr)); 80 80 81 81 out_free: 82 - kfree(mw->mw_sg); 83 - kfree(mw->fmr.fm_physaddrs); 82 + kfree(mr->mr_sg); 83 + kfree(mr->fmr.fm_physaddrs); 84 84 return -ENOMEM; 85 85 } 86 86 87 87 static int 88 - __fmr_unmap(struct rpcrdma_mw *mw) 88 + __fmr_unmap(struct rpcrdma_mr *mr) 89 89 { 90 90 LIST_HEAD(l); 91 91 int rc; 92 92 93 - list_add(&mw->fmr.fm_mr->list, &l); 93 + list_add(&mr->fmr.fm_mr->list, &l); 94 94 rc = ib_unmap_fmr(&l); 95 - list_del(&mw->fmr.fm_mr->list); 95 + list_del(&mr->fmr.fm_mr->list); 96 96 return rc; 97 97 } 98 98 99 99 static void 100 - fmr_op_release_mr(struct rpcrdma_mw *r) 100 + fmr_op_release_mr(struct rpcrdma_mr *mr) 101 101 { 102 102 LIST_HEAD(unmap_list); 103 103 int rc; 104 104 105 105 /* Ensure MW is not on any rl_registered list */ 106 - if (!list_empty(&r->mw_list)) 107 - list_del(&r->mw_list); 106 + if (!list_empty(&mr->mr_list)) 107 + list_del(&mr->mr_list); 108 108 109 - kfree(r->fmr.fm_physaddrs); 110 - kfree(r->mw_sg); 109 + kfree(mr->fmr.fm_physaddrs); 110 + kfree(mr->mr_sg); 111 111 112 112 /* In case this one was left mapped, try to unmap it 113 113 * to prevent dealloc_fmr from failing with EBUSY 114 114 */ 115 - rc = __fmr_unmap(r); 115 + rc = __fmr_unmap(mr); 116 116 if (rc) 117 117 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n", 118 - r, rc); 118 + mr, rc); 119 119 120 - rc = ib_dealloc_fmr(r->fmr.fm_mr); 120 + rc = ib_dealloc_fmr(mr->fmr.fm_mr); 121 121 if (rc) 122 122 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n", 123 - r, rc); 123 + mr, rc); 124 124 125 - kfree(r); 125 + kfree(mr); 126 126 } 127 127 128 128 /* Reset of a single FMR. 129 129 */ 130 130 static void 131 - fmr_op_recover_mr(struct rpcrdma_mw *mw) 131 + fmr_op_recover_mr(struct rpcrdma_mr *mr) 132 132 { 133 - struct rpcrdma_xprt *r_xprt = mw->mw_xprt; 133 + struct rpcrdma_xprt *r_xprt = mr->mr_xprt; 134 134 int rc; 135 135 136 136 /* ORDER: invalidate first */ 137 - rc = __fmr_unmap(mw); 137 + rc = __fmr_unmap(mr); 138 138 139 139 /* ORDER: then DMA unmap */ 140 140 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, 141 - mw->mw_sg, mw->mw_nents, mw->mw_dir); 141 + mr->mr_sg, mr->mr_nents, mr->mr_dir); 142 142 if (rc) 143 143 goto out_release; 144 144 145 - rpcrdma_put_mw(r_xprt, mw); 145 + rpcrdma_mr_put(mr); 146 146 r_xprt->rx_stats.mrs_recovered++; 147 147 return; 148 148 149 149 out_release: 150 - pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mw); 150 + pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mr); 151 151 r_xprt->rx_stats.mrs_orphaned++; 152 152 153 - spin_lock(&r_xprt->rx_buf.rb_mwlock); 154 - list_del(&mw->mw_all); 155 - spin_unlock(&r_xprt->rx_buf.rb_mwlock); 153 + spin_lock(&r_xprt->rx_buf.rb_mrlock); 154 + list_del(&mr->mr_all); 155 + spin_unlock(&r_xprt->rx_buf.rb_mrlock); 156 156 157 - fmr_op_release_mr(mw); 157 + fmr_op_release_mr(mr); 158 158 } 159 159 160 160 static int ··· 180 180 */ 181 181 static struct rpcrdma_mr_seg * 182 182 fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, 183 - int nsegs, bool writing, struct rpcrdma_mw **out) 183 + int nsegs, bool writing, struct rpcrdma_mr **out) 184 184 { 185 185 struct rpcrdma_mr_seg *seg1 = seg; 186 186 int len, pageoff, i, rc; 187 - struct rpcrdma_mw *mw; 187 + struct rpcrdma_mr *mr; 188 188 u64 *dma_pages; 189 189 190 - mw = rpcrdma_get_mw(r_xprt); 191 - if (!mw) 190 + mr = rpcrdma_mr_get(r_xprt); 191 + if (!mr) 192 192 return ERR_PTR(-ENOBUFS); 193 193 194 194 pageoff = offset_in_page(seg1->mr_offset); ··· 199 199 nsegs = RPCRDMA_MAX_FMR_SGES; 200 200 for (i = 0; i < nsegs;) { 201 201 if (seg->mr_page) 202 - sg_set_page(&mw->mw_sg[i], 202 + sg_set_page(&mr->mr_sg[i], 203 203 seg->mr_page, 204 204 seg->mr_len, 205 205 offset_in_page(seg->mr_offset)); 206 206 else 207 - sg_set_buf(&mw->mw_sg[i], seg->mr_offset, 207 + sg_set_buf(&mr->mr_sg[i], seg->mr_offset, 208 208 seg->mr_len); 209 209 len += seg->mr_len; 210 210 ++seg; ··· 214 214 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) 215 215 break; 216 216 } 217 - mw->mw_dir = rpcrdma_data_dir(writing); 217 + mr->mr_dir = rpcrdma_data_dir(writing); 218 218 219 - mw->mw_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device, 220 - mw->mw_sg, i, mw->mw_dir); 221 - if (!mw->mw_nents) 219 + mr->mr_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device, 220 + mr->mr_sg, i, mr->mr_dir); 221 + if (!mr->mr_nents) 222 222 goto out_dmamap_err; 223 223 224 - for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++) 225 - dma_pages[i] = sg_dma_address(&mw->mw_sg[i]); 226 - rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents, 224 + for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++) 225 + dma_pages[i] = sg_dma_address(&mr->mr_sg[i]); 226 + rc = ib_map_phys_fmr(mr->fmr.fm_mr, dma_pages, mr->mr_nents, 227 227 dma_pages[0]); 228 228 if (rc) 229 229 goto out_maperr; 230 230 231 - mw->mw_handle = mw->fmr.fm_mr->rkey; 232 - mw->mw_length = len; 233 - mw->mw_offset = dma_pages[0] + pageoff; 231 + mr->mr_handle = mr->fmr.fm_mr->rkey; 232 + mr->mr_length = len; 233 + mr->mr_offset = dma_pages[0] + pageoff; 234 234 235 - *out = mw; 235 + *out = mr; 236 236 return seg; 237 237 238 238 out_dmamap_err: 239 239 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n", 240 - mw->mw_sg, i); 241 - rpcrdma_put_mw(r_xprt, mw); 240 + mr->mr_sg, i); 241 + rpcrdma_mr_put(mr); 242 242 return ERR_PTR(-EIO); 243 243 244 244 out_maperr: 245 245 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n", 246 246 len, (unsigned long long)dma_pages[0], 247 - pageoff, mw->mw_nents, rc); 247 + pageoff, mr->mr_nents, rc); 248 248 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, 249 - mw->mw_sg, mw->mw_nents, mw->mw_dir); 250 - rpcrdma_put_mw(r_xprt, mw); 249 + mr->mr_sg, mr->mr_nents, mr->mr_dir); 250 + rpcrdma_mr_put(mr); 251 251 return ERR_PTR(-EIO); 252 252 } 253 253 ··· 256 256 * Sleeps until it is safe for the host CPU to access the 257 257 * previously mapped memory regions. 258 258 * 259 - * Caller ensures that @mws is not empty before the call. This 259 + * Caller ensures that @mrs is not empty before the call. This 260 260 * function empties the list. 261 261 */ 262 262 static void 263 - fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) 263 + fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs) 264 264 { 265 - struct rpcrdma_mw *mw; 265 + struct rpcrdma_mr *mr; 266 266 LIST_HEAD(unmap_list); 267 267 int rc; 268 268 ··· 271 271 * ib_unmap_fmr() is slow, so use a single call instead 272 272 * of one call per mapped FMR. 273 273 */ 274 - list_for_each_entry(mw, mws, mw_list) { 274 + list_for_each_entry(mr, mrs, mr_list) { 275 275 dprintk("RPC: %s: unmapping fmr %p\n", 276 - __func__, &mw->fmr); 277 - list_add_tail(&mw->fmr.fm_mr->list, &unmap_list); 276 + __func__, &mr->fmr); 277 + list_add_tail(&mr->fmr.fm_mr->list, &unmap_list); 278 278 } 279 279 r_xprt->rx_stats.local_inv_needed++; 280 280 rc = ib_unmap_fmr(&unmap_list); ··· 284 284 /* ORDER: Now DMA unmap all of the req's MRs, and return 285 285 * them to the free MW list. 286 286 */ 287 - while (!list_empty(mws)) { 288 - mw = rpcrdma_pop_mw(mws); 287 + while (!list_empty(mrs)) { 288 + mr = rpcrdma_mr_pop(mrs); 289 289 dprintk("RPC: %s: DMA unmapping fmr %p\n", 290 - __func__, &mw->fmr); 291 - list_del(&mw->fmr.fm_mr->list); 290 + __func__, &mr->fmr); 291 + list_del(&mr->fmr.fm_mr->list); 292 292 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, 293 - mw->mw_sg, mw->mw_nents, mw->mw_dir); 294 - rpcrdma_put_mw(r_xprt, mw); 293 + mr->mr_sg, mr->mr_nents, mr->mr_dir); 294 + rpcrdma_mr_put(mr); 295 295 } 296 296 297 297 return; ··· 299 299 out_reset: 300 300 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc); 301 301 302 - while (!list_empty(mws)) { 303 - mw = rpcrdma_pop_mw(mws); 304 - list_del(&mw->fmr.fm_mr->list); 305 - fmr_op_recover_mr(mw); 302 + while (!list_empty(mrs)) { 303 + mr = rpcrdma_mr_pop(mrs); 304 + list_del(&mr->fmr.fm_mr->list); 305 + fmr_op_recover_mr(mr); 306 306 } 307 307 } 308 308
+89 -88
net/sunrpc/xprtrdma/frwr_ops.c
··· 17 17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG 18 18 * Work Request (frwr_op_map). When the RDMA operation is finished, this 19 19 * Memory Region is invalidated using a LOCAL_INV Work Request 20 - * (frwr_op_unmap). 20 + * (frwr_op_unmap_sync). 21 21 * 22 22 * Typically these Work Requests are not signaled, and neither are RDMA 23 23 * SEND Work Requests (with the exception of signaling occasionally to ··· 26 26 * 27 27 * As an optimization, frwr_op_unmap marks MRs INVALID before the 28 28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on 29 - * rb_mws immediately so that no work (like managing a linked list 29 + * rb_mrs immediately so that no work (like managing a linked list 30 30 * under a spinlock) is needed in the completion upcall. 31 31 * 32 32 * But this means that frwr_op_map() can occasionally encounter an MR ··· 60 60 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered 61 61 * with ib_dereg_mr and then are re-initialized. Because MR recovery 62 62 * allocates fresh resources, it is deferred to a workqueue, and the 63 - * recovered MRs are placed back on the rb_mws list when recovery is 63 + * recovered MRs are placed back on the rb_mrs list when recovery is 64 64 * complete. frwr_op_map allocates another MR for the current RPC while 65 65 * the broken MR is reset. 66 66 * ··· 96 96 } 97 97 98 98 static int 99 - frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) 99 + frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) 100 100 { 101 101 unsigned int depth = ia->ri_max_frwr_depth; 102 - struct rpcrdma_frwr *frwr = &r->frwr; 102 + struct rpcrdma_frwr *frwr = &mr->frwr; 103 103 int rc; 104 104 105 105 frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth); 106 106 if (IS_ERR(frwr->fr_mr)) 107 107 goto out_mr_err; 108 108 109 - r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL); 110 - if (!r->mw_sg) 109 + mr->mr_sg = kcalloc(depth, sizeof(*mr->mr_sg), GFP_KERNEL); 110 + if (!mr->mr_sg) 111 111 goto out_list_err; 112 112 113 - sg_init_table(r->mw_sg, depth); 113 + sg_init_table(mr->mr_sg, depth); 114 114 init_completion(&frwr->fr_linv_done); 115 115 return 0; 116 116 ··· 129 129 } 130 130 131 131 static void 132 - frwr_op_release_mr(struct rpcrdma_mw *r) 132 + frwr_op_release_mr(struct rpcrdma_mr *mr) 133 133 { 134 134 int rc; 135 135 136 - /* Ensure MW is not on any rl_registered list */ 137 - if (!list_empty(&r->mw_list)) 138 - list_del(&r->mw_list); 136 + /* Ensure MR is not on any rl_registered list */ 137 + if (!list_empty(&mr->mr_list)) 138 + list_del(&mr->mr_list); 139 139 140 - rc = ib_dereg_mr(r->frwr.fr_mr); 140 + rc = ib_dereg_mr(mr->frwr.fr_mr); 141 141 if (rc) 142 142 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n", 143 - r, rc); 144 - kfree(r->mw_sg); 145 - kfree(r); 143 + mr, rc); 144 + kfree(mr->mr_sg); 145 + kfree(mr); 146 146 } 147 147 148 148 static int 149 - __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) 149 + __frwr_mr_reset(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) 150 150 { 151 - struct rpcrdma_frwr *frwr = &r->frwr; 151 + struct rpcrdma_frwr *frwr = &mr->frwr; 152 152 int rc; 153 153 154 154 rc = ib_dereg_mr(frwr->fr_mr); 155 155 if (rc) { 156 156 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n", 157 - rc, r); 157 + rc, mr); 158 158 return rc; 159 159 } 160 160 ··· 162 162 ia->ri_max_frwr_depth); 163 163 if (IS_ERR(frwr->fr_mr)) { 164 164 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n", 165 - PTR_ERR(frwr->fr_mr), r); 165 + PTR_ERR(frwr->fr_mr), mr); 166 166 return PTR_ERR(frwr->fr_mr); 167 167 } 168 168 ··· 174 174 /* Reset of a single FRWR. Generate a fresh rkey by replacing the MR. 175 175 */ 176 176 static void 177 - frwr_op_recover_mr(struct rpcrdma_mw *mw) 177 + frwr_op_recover_mr(struct rpcrdma_mr *mr) 178 178 { 179 - enum rpcrdma_frwr_state state = mw->frwr.fr_state; 180 - struct rpcrdma_xprt *r_xprt = mw->mw_xprt; 179 + enum rpcrdma_frwr_state state = mr->frwr.fr_state; 180 + struct rpcrdma_xprt *r_xprt = mr->mr_xprt; 181 181 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 182 182 int rc; 183 183 184 - rc = __frwr_reset_mr(ia, mw); 184 + rc = __frwr_mr_reset(ia, mr); 185 185 if (state != FRWR_FLUSHED_LI) 186 186 ib_dma_unmap_sg(ia->ri_device, 187 - mw->mw_sg, mw->mw_nents, mw->mw_dir); 187 + mr->mr_sg, mr->mr_nents, mr->mr_dir); 188 188 if (rc) 189 189 goto out_release; 190 190 191 - rpcrdma_put_mw(r_xprt, mw); 191 + rpcrdma_mr_put(mr); 192 192 r_xprt->rx_stats.mrs_recovered++; 193 193 return; 194 194 195 195 out_release: 196 - pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mw); 196 + pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mr); 197 197 r_xprt->rx_stats.mrs_orphaned++; 198 198 199 - spin_lock(&r_xprt->rx_buf.rb_mwlock); 200 - list_del(&mw->mw_all); 201 - spin_unlock(&r_xprt->rx_buf.rb_mwlock); 199 + spin_lock(&r_xprt->rx_buf.rb_mrlock); 200 + list_del(&mr->mr_all); 201 + spin_unlock(&r_xprt->rx_buf.rb_mrlock); 202 202 203 - frwr_op_release_mr(mw); 203 + frwr_op_release_mr(mr); 204 204 } 205 205 206 206 static int ··· 347 347 */ 348 348 static struct rpcrdma_mr_seg * 349 349 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, 350 - int nsegs, bool writing, struct rpcrdma_mw **out) 350 + int nsegs, bool writing, struct rpcrdma_mr **out) 351 351 { 352 352 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 353 353 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS; 354 354 struct rpcrdma_frwr *frwr; 355 - struct rpcrdma_mw *mw; 356 - struct ib_mr *mr; 355 + struct rpcrdma_mr *mr; 356 + struct ib_mr *ibmr; 357 357 struct ib_reg_wr *reg_wr; 358 358 struct ib_send_wr *bad_wr; 359 359 int rc, i, n; 360 360 u8 key; 361 361 362 - mw = NULL; 362 + mr = NULL; 363 363 do { 364 - if (mw) 365 - rpcrdma_defer_mr_recovery(mw); 366 - mw = rpcrdma_get_mw(r_xprt); 367 - if (!mw) 364 + if (mr) 365 + rpcrdma_mr_defer_recovery(mr); 366 + mr = rpcrdma_mr_get(r_xprt); 367 + if (!mr) 368 368 return ERR_PTR(-ENOBUFS); 369 - } while (mw->frwr.fr_state != FRWR_IS_INVALID); 370 - frwr = &mw->frwr; 369 + } while (mr->frwr.fr_state != FRWR_IS_INVALID); 370 + frwr = &mr->frwr; 371 371 frwr->fr_state = FRWR_IS_VALID; 372 - mr = frwr->fr_mr; 373 372 374 373 if (nsegs > ia->ri_max_frwr_depth) 375 374 nsegs = ia->ri_max_frwr_depth; 376 375 for (i = 0; i < nsegs;) { 377 376 if (seg->mr_page) 378 - sg_set_page(&mw->mw_sg[i], 377 + sg_set_page(&mr->mr_sg[i], 379 378 seg->mr_page, 380 379 seg->mr_len, 381 380 offset_in_page(seg->mr_offset)); 382 381 else 383 - sg_set_buf(&mw->mw_sg[i], seg->mr_offset, 382 + sg_set_buf(&mr->mr_sg[i], seg->mr_offset, 384 383 seg->mr_len); 385 384 386 385 ++seg; ··· 390 391 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) 391 392 break; 392 393 } 393 - mw->mw_dir = rpcrdma_data_dir(writing); 394 + mr->mr_dir = rpcrdma_data_dir(writing); 394 395 395 - mw->mw_nents = ib_dma_map_sg(ia->ri_device, mw->mw_sg, i, mw->mw_dir); 396 - if (!mw->mw_nents) 396 + mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir); 397 + if (!mr->mr_nents) 397 398 goto out_dmamap_err; 398 399 399 - n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE); 400 - if (unlikely(n != mw->mw_nents)) 400 + ibmr = frwr->fr_mr; 401 + n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE); 402 + if (unlikely(n != mr->mr_nents)) 401 403 goto out_mapmr_err; 402 404 403 405 dprintk("RPC: %s: Using frwr %p to map %u segments (%llu bytes)\n", 404 - __func__, frwr, mw->mw_nents, mr->length); 406 + __func__, frwr, mr->mr_nents, ibmr->length); 405 407 406 - key = (u8)(mr->rkey & 0x000000FF); 407 - ib_update_fast_reg_key(mr, ++key); 408 + key = (u8)(ibmr->rkey & 0x000000FF); 409 + ib_update_fast_reg_key(ibmr, ++key); 408 410 409 411 reg_wr = &frwr->fr_regwr; 410 412 reg_wr->wr.next = NULL; ··· 414 414 reg_wr->wr.wr_cqe = &frwr->fr_cqe; 415 415 reg_wr->wr.num_sge = 0; 416 416 reg_wr->wr.send_flags = 0; 417 - reg_wr->mr = mr; 418 - reg_wr->key = mr->rkey; 417 + reg_wr->mr = ibmr; 418 + reg_wr->key = ibmr->rkey; 419 419 reg_wr->access = writing ? 420 420 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : 421 421 IB_ACCESS_REMOTE_READ; ··· 424 424 if (rc) 425 425 goto out_senderr; 426 426 427 - mw->mw_handle = mr->rkey; 428 - mw->mw_length = mr->length; 429 - mw->mw_offset = mr->iova; 427 + mr->mr_handle = ibmr->rkey; 428 + mr->mr_length = ibmr->length; 429 + mr->mr_offset = ibmr->iova; 430 430 431 - *out = mw; 431 + *out = mr; 432 432 return seg; 433 433 434 434 out_dmamap_err: 435 435 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n", 436 - mw->mw_sg, i); 436 + mr->mr_sg, i); 437 437 frwr->fr_state = FRWR_IS_INVALID; 438 - rpcrdma_put_mw(r_xprt, mw); 438 + rpcrdma_mr_put(mr); 439 439 return ERR_PTR(-EIO); 440 440 441 441 out_mapmr_err: 442 442 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n", 443 - frwr->fr_mr, n, mw->mw_nents); 444 - rpcrdma_defer_mr_recovery(mw); 443 + frwr->fr_mr, n, mr->mr_nents); 444 + rpcrdma_mr_defer_recovery(mr); 445 445 return ERR_PTR(-EIO); 446 446 447 447 out_senderr: 448 448 pr_err("rpcrdma: FRWR registration ib_post_send returned %i\n", rc); 449 - rpcrdma_defer_mr_recovery(mw); 449 + rpcrdma_mr_defer_recovery(mr); 450 450 return ERR_PTR(-ENOTCONN); 451 451 } 452 452 453 - /* Handle a remotely invalidated mw on the @mws list 453 + /* Handle a remotely invalidated mr on the @mrs list 454 454 */ 455 455 static void 456 - frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mws) 456 + frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) 457 457 { 458 - struct rpcrdma_mw *mw; 458 + struct rpcrdma_mr *mr; 459 459 460 - list_for_each_entry(mw, mws, mw_list) 461 - if (mw->mw_handle == rep->rr_inv_rkey) { 462 - struct rpcrdma_xprt *r_xprt = mw->mw_xprt; 460 + list_for_each_entry(mr, mrs, mr_list) 461 + if (mr->mr_handle == rep->rr_inv_rkey) { 462 + struct rpcrdma_xprt *r_xprt = mr->mr_xprt; 463 463 464 - list_del(&mw->mw_list); 465 - mw->frwr.fr_state = FRWR_IS_INVALID; 464 + list_del(&mr->mr_list); 465 + mr->frwr.fr_state = FRWR_IS_INVALID; 466 466 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, 467 - mw->mw_sg, mw->mw_nents, mw->mw_dir); 468 - rpcrdma_put_mw(r_xprt, mw); 467 + mr->mr_sg, mr->mr_nents, mr->mr_dir); 468 + rpcrdma_mr_put(mr); 469 469 break; /* only one invalidated MR per RPC */ 470 470 } 471 471 } ··· 475 475 * Sleeps until it is safe for the host CPU to access the 476 476 * previously mapped memory regions. 477 477 * 478 - * Caller ensures that @mws is not empty before the call. This 478 + * Caller ensures that @mrs is not empty before the call. This 479 479 * function empties the list. 480 480 */ 481 481 static void 482 - frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws) 482 + frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs) 483 483 { 484 484 struct ib_send_wr *first, **prev, *last, *bad_wr; 485 485 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 486 486 struct rpcrdma_frwr *frwr; 487 - struct rpcrdma_mw *mw; 487 + struct rpcrdma_mr *mr; 488 488 int count, rc; 489 489 490 490 /* ORDER: Invalidate all of the MRs first ··· 495 495 frwr = NULL; 496 496 count = 0; 497 497 prev = &first; 498 - list_for_each_entry(mw, mws, mw_list) { 499 - mw->frwr.fr_state = FRWR_IS_INVALID; 498 + list_for_each_entry(mr, mrs, mr_list) { 499 + mr->frwr.fr_state = FRWR_IS_INVALID; 500 500 501 - frwr = &mw->frwr; 501 + frwr = &mr->frwr; 502 + 502 503 dprintk("RPC: %s: invalidating frwr %p\n", 503 504 __func__, frwr); 504 505 ··· 508 507 memset(last, 0, sizeof(*last)); 509 508 last->wr_cqe = &frwr->fr_cqe; 510 509 last->opcode = IB_WR_LOCAL_INV; 511 - last->ex.invalidate_rkey = mw->mw_handle; 510 + last->ex.invalidate_rkey = mr->mr_handle; 512 511 count++; 513 512 514 513 *prev = last; ··· 538 537 goto reset_mrs; 539 538 540 539 /* ORDER: Now DMA unmap all of the MRs, and return 541 - * them to the free MW list. 540 + * them to the free MR list. 542 541 */ 543 542 unmap: 544 - while (!list_empty(mws)) { 545 - mw = rpcrdma_pop_mw(mws); 543 + while (!list_empty(mrs)) { 544 + mr = rpcrdma_mr_pop(mrs); 546 545 dprintk("RPC: %s: DMA unmapping frwr %p\n", 547 - __func__, &mw->frwr); 546 + __func__, &mr->frwr); 548 547 ib_dma_unmap_sg(ia->ri_device, 549 - mw->mw_sg, mw->mw_nents, mw->mw_dir); 550 - rpcrdma_put_mw(r_xprt, mw); 548 + mr->mr_sg, mr->mr_nents, mr->mr_dir); 549 + rpcrdma_mr_put(mr); 551 550 } 552 551 return; 553 552 ··· 560 559 while (bad_wr) { 561 560 frwr = container_of(bad_wr, struct rpcrdma_frwr, 562 561 fr_invwr); 563 - mw = container_of(frwr, struct rpcrdma_mw, frwr); 562 + mr = container_of(frwr, struct rpcrdma_mr, frwr); 564 563 565 - __frwr_reset_mr(ia, mw); 564 + __frwr_mr_reset(ia, mr); 566 565 567 566 bad_wr = bad_wr->next; 568 567 }
+32 -32
net/sunrpc/xprtrdma/rpc_rdma.c
··· 292 292 } 293 293 294 294 static void 295 - xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw) 295 + xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr) 296 296 { 297 - *iptr++ = cpu_to_be32(mw->mw_handle); 298 - *iptr++ = cpu_to_be32(mw->mw_length); 299 - xdr_encode_hyper(iptr, mw->mw_offset); 297 + *iptr++ = cpu_to_be32(mr->mr_handle); 298 + *iptr++ = cpu_to_be32(mr->mr_length); 299 + xdr_encode_hyper(iptr, mr->mr_offset); 300 300 } 301 301 302 302 static int 303 - encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw) 303 + encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr) 304 304 { 305 305 __be32 *p; 306 306 ··· 308 308 if (unlikely(!p)) 309 309 return -EMSGSIZE; 310 310 311 - xdr_encode_rdma_segment(p, mw); 311 + xdr_encode_rdma_segment(p, mr); 312 312 return 0; 313 313 } 314 314 315 315 static int 316 - encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw, 316 + encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr, 317 317 u32 position) 318 318 { 319 319 __be32 *p; ··· 324 324 325 325 *p++ = xdr_one; /* Item present */ 326 326 *p++ = cpu_to_be32(position); 327 - xdr_encode_rdma_segment(p, mw); 327 + xdr_encode_rdma_segment(p, mr); 328 328 return 0; 329 329 } 330 330 ··· 348 348 { 349 349 struct xdr_stream *xdr = &req->rl_stream; 350 350 struct rpcrdma_mr_seg *seg; 351 - struct rpcrdma_mw *mw; 351 + struct rpcrdma_mr *mr; 352 352 unsigned int pos; 353 353 int nsegs; 354 354 ··· 363 363 364 364 do { 365 365 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, 366 - false, &mw); 366 + false, &mr); 367 367 if (IS_ERR(seg)) 368 368 return PTR_ERR(seg); 369 - rpcrdma_push_mw(mw, &req->rl_registered); 369 + rpcrdma_mr_push(mr, &req->rl_registered); 370 370 371 - if (encode_read_segment(xdr, mw, pos) < 0) 371 + if (encode_read_segment(xdr, mr, pos) < 0) 372 372 return -EMSGSIZE; 373 373 374 374 dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n", 375 375 rqst->rq_task->tk_pid, __func__, pos, 376 - mw->mw_length, (unsigned long long)mw->mw_offset, 377 - mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last"); 376 + mr->mr_length, (unsigned long long)mr->mr_offset, 377 + mr->mr_handle, mr->mr_nents < nsegs ? "more" : "last"); 378 378 379 379 r_xprt->rx_stats.read_chunk_count++; 380 - nsegs -= mw->mw_nents; 380 + nsegs -= mr->mr_nents; 381 381 } while (nsegs); 382 382 383 383 return 0; ··· 404 404 { 405 405 struct xdr_stream *xdr = &req->rl_stream; 406 406 struct rpcrdma_mr_seg *seg; 407 - struct rpcrdma_mw *mw; 407 + struct rpcrdma_mr *mr; 408 408 int nsegs, nchunks; 409 409 __be32 *segcount; 410 410 ··· 425 425 nchunks = 0; 426 426 do { 427 427 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, 428 - true, &mw); 428 + true, &mr); 429 429 if (IS_ERR(seg)) 430 430 return PTR_ERR(seg); 431 - rpcrdma_push_mw(mw, &req->rl_registered); 431 + rpcrdma_mr_push(mr, &req->rl_registered); 432 432 433 - if (encode_rdma_segment(xdr, mw) < 0) 433 + if (encode_rdma_segment(xdr, mr) < 0) 434 434 return -EMSGSIZE; 435 435 436 436 dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n", 437 437 rqst->rq_task->tk_pid, __func__, 438 - mw->mw_length, (unsigned long long)mw->mw_offset, 439 - mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last"); 438 + mr->mr_length, (unsigned long long)mr->mr_offset, 439 + mr->mr_handle, mr->mr_nents < nsegs ? "more" : "last"); 440 440 441 441 r_xprt->rx_stats.write_chunk_count++; 442 442 r_xprt->rx_stats.total_rdma_request += seg->mr_len; 443 443 nchunks++; 444 - nsegs -= mw->mw_nents; 444 + nsegs -= mr->mr_nents; 445 445 } while (nsegs); 446 446 447 447 /* Update count of segments in this Write chunk */ ··· 468 468 { 469 469 struct xdr_stream *xdr = &req->rl_stream; 470 470 struct rpcrdma_mr_seg *seg; 471 - struct rpcrdma_mw *mw; 471 + struct rpcrdma_mr *mr; 472 472 int nsegs, nchunks; 473 473 __be32 *segcount; 474 474 ··· 487 487 nchunks = 0; 488 488 do { 489 489 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, 490 - true, &mw); 490 + true, &mr); 491 491 if (IS_ERR(seg)) 492 492 return PTR_ERR(seg); 493 - rpcrdma_push_mw(mw, &req->rl_registered); 493 + rpcrdma_mr_push(mr, &req->rl_registered); 494 494 495 - if (encode_rdma_segment(xdr, mw) < 0) 495 + if (encode_rdma_segment(xdr, mr) < 0) 496 496 return -EMSGSIZE; 497 497 498 498 dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n", 499 499 rqst->rq_task->tk_pid, __func__, 500 - mw->mw_length, (unsigned long long)mw->mw_offset, 501 - mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last"); 500 + mr->mr_length, (unsigned long long)mr->mr_offset, 501 + mr->mr_handle, mr->mr_nents < nsegs ? "more" : "last"); 502 502 503 503 r_xprt->rx_stats.reply_chunk_count++; 504 504 r_xprt->rx_stats.total_rdma_request += seg->mr_len; 505 505 nchunks++; 506 - nsegs -= mw->mw_nents; 506 + nsegs -= mr->mr_nents; 507 507 } while (nsegs); 508 508 509 509 /* Update count of segments in the Reply chunk */ ··· 821 821 * so these registrations are invalid and unusable. 822 822 */ 823 823 while (unlikely(!list_empty(&req->rl_registered))) { 824 - struct rpcrdma_mw *mw; 824 + struct rpcrdma_mr *mr; 825 825 826 - mw = rpcrdma_pop_mw(&req->rl_registered); 827 - rpcrdma_defer_mr_recovery(mw); 826 + mr = rpcrdma_mr_pop(&req->rl_registered); 827 + rpcrdma_mr_defer_recovery(mr); 828 828 } 829 829 830 830 /* This implementation supports the following combinations
+66 -53
net/sunrpc/xprtrdma/verbs.c
··· 71 71 /* 72 72 * internal functions 73 73 */ 74 - static void rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt); 75 - static void rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf); 74 + static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); 75 + static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf); 76 76 static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb); 77 77 78 78 struct workqueue_struct *rpcrdma_receive_wq __read_mostly; ··· 458 458 rpcrdma_dma_unmap_regbuf(req->rl_sendbuf); 459 459 rpcrdma_dma_unmap_regbuf(req->rl_recvbuf); 460 460 } 461 - rpcrdma_destroy_mrs(buf); 461 + rpcrdma_mrs_destroy(buf); 462 462 463 463 /* Allow waiters to continue */ 464 464 complete(&ia->ri_remove_done); ··· 671 671 goto out3; 672 672 } 673 673 674 - rpcrdma_create_mrs(r_xprt); 674 + rpcrdma_mrs_create(r_xprt); 675 675 return 0; 676 676 677 677 out3: ··· 992 992 { 993 993 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, 994 994 rb_recovery_worker.work); 995 - struct rpcrdma_mw *mw; 995 + struct rpcrdma_mr *mr; 996 996 997 997 spin_lock(&buf->rb_recovery_lock); 998 998 while (!list_empty(&buf->rb_stale_mrs)) { 999 - mw = rpcrdma_pop_mw(&buf->rb_stale_mrs); 999 + mr = rpcrdma_mr_pop(&buf->rb_stale_mrs); 1000 1000 spin_unlock(&buf->rb_recovery_lock); 1001 1001 1002 - dprintk("RPC: %s: recovering MR %p\n", __func__, mw); 1003 - mw->mw_xprt->rx_ia.ri_ops->ro_recover_mr(mw); 1002 + dprintk("RPC: %s: recovering MR %p\n", __func__, mr); 1003 + mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr); 1004 1004 1005 1005 spin_lock(&buf->rb_recovery_lock); 1006 1006 } ··· 1008 1008 } 1009 1009 1010 1010 void 1011 - rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw) 1011 + rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr) 1012 1012 { 1013 - struct rpcrdma_xprt *r_xprt = mw->mw_xprt; 1013 + struct rpcrdma_xprt *r_xprt = mr->mr_xprt; 1014 1014 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1015 1015 1016 1016 spin_lock(&buf->rb_recovery_lock); 1017 - rpcrdma_push_mw(mw, &buf->rb_stale_mrs); 1017 + rpcrdma_mr_push(mr, &buf->rb_stale_mrs); 1018 1018 spin_unlock(&buf->rb_recovery_lock); 1019 1019 1020 1020 schedule_delayed_work(&buf->rb_recovery_worker, 0); 1021 1021 } 1022 1022 1023 1023 static void 1024 - rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt) 1024 + rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) 1025 1025 { 1026 1026 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1027 1027 struct rpcrdma_ia *ia = &r_xprt->rx_ia; ··· 1030 1030 LIST_HEAD(all); 1031 1031 1032 1032 for (count = 0; count < 32; count++) { 1033 - struct rpcrdma_mw *mw; 1033 + struct rpcrdma_mr *mr; 1034 1034 int rc; 1035 1035 1036 - mw = kzalloc(sizeof(*mw), GFP_KERNEL); 1037 - if (!mw) 1036 + mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1037 + if (!mr) 1038 1038 break; 1039 1039 1040 - rc = ia->ri_ops->ro_init_mr(ia, mw); 1040 + rc = ia->ri_ops->ro_init_mr(ia, mr); 1041 1041 if (rc) { 1042 - kfree(mw); 1042 + kfree(mr); 1043 1043 break; 1044 1044 } 1045 1045 1046 - mw->mw_xprt = r_xprt; 1046 + mr->mr_xprt = r_xprt; 1047 1047 1048 - list_add(&mw->mw_list, &free); 1049 - list_add(&mw->mw_all, &all); 1048 + list_add(&mr->mr_list, &free); 1049 + list_add(&mr->mr_all, &all); 1050 1050 } 1051 1051 1052 - spin_lock(&buf->rb_mwlock); 1053 - list_splice(&free, &buf->rb_mws); 1052 + spin_lock(&buf->rb_mrlock); 1053 + list_splice(&free, &buf->rb_mrs); 1054 1054 list_splice(&all, &buf->rb_all); 1055 1055 r_xprt->rx_stats.mrs_allocated += count; 1056 - spin_unlock(&buf->rb_mwlock); 1056 + spin_unlock(&buf->rb_mrlock); 1057 1057 1058 1058 dprintk("RPC: %s: created %u MRs\n", __func__, count); 1059 1059 } ··· 1066 1066 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, 1067 1067 rx_buf); 1068 1068 1069 - rpcrdma_create_mrs(r_xprt); 1069 + rpcrdma_mrs_create(r_xprt); 1070 1070 } 1071 1071 1072 1072 struct rpcrdma_req * ··· 1144 1144 1145 1145 buf->rb_max_requests = r_xprt->rx_data.max_requests; 1146 1146 buf->rb_bc_srv_max_requests = 0; 1147 - spin_lock_init(&buf->rb_mwlock); 1147 + spin_lock_init(&buf->rb_mrlock); 1148 1148 spin_lock_init(&buf->rb_lock); 1149 1149 spin_lock_init(&buf->rb_recovery_lock); 1150 - INIT_LIST_HEAD(&buf->rb_mws); 1150 + INIT_LIST_HEAD(&buf->rb_mrs); 1151 1151 INIT_LIST_HEAD(&buf->rb_all); 1152 1152 INIT_LIST_HEAD(&buf->rb_stale_mrs); 1153 1153 INIT_DELAYED_WORK(&buf->rb_refresh_worker, ··· 1155 1155 INIT_DELAYED_WORK(&buf->rb_recovery_worker, 1156 1156 rpcrdma_mr_recovery_worker); 1157 1157 1158 - rpcrdma_create_mrs(r_xprt); 1158 + rpcrdma_mrs_create(r_xprt); 1159 1159 1160 1160 INIT_LIST_HEAD(&buf->rb_send_bufs); 1161 1161 INIT_LIST_HEAD(&buf->rb_allreqs); ··· 1229 1229 } 1230 1230 1231 1231 static void 1232 - rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf) 1232 + rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) 1233 1233 { 1234 1234 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, 1235 1235 rx_buf); 1236 1236 struct rpcrdma_ia *ia = rdmab_to_ia(buf); 1237 - struct rpcrdma_mw *mw; 1237 + struct rpcrdma_mr *mr; 1238 1238 unsigned int count; 1239 1239 1240 1240 count = 0; 1241 - spin_lock(&buf->rb_mwlock); 1241 + spin_lock(&buf->rb_mrlock); 1242 1242 while (!list_empty(&buf->rb_all)) { 1243 - mw = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); 1244 - list_del(&mw->mw_all); 1243 + mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all); 1244 + list_del(&mr->mr_all); 1245 1245 1246 - spin_unlock(&buf->rb_mwlock); 1247 - ia->ri_ops->ro_release_mr(mw); 1246 + spin_unlock(&buf->rb_mrlock); 1247 + ia->ri_ops->ro_release_mr(mr); 1248 1248 count++; 1249 - spin_lock(&buf->rb_mwlock); 1249 + spin_lock(&buf->rb_mrlock); 1250 1250 } 1251 - spin_unlock(&buf->rb_mwlock); 1251 + spin_unlock(&buf->rb_mrlock); 1252 1252 r_xprt->rx_stats.mrs_allocated = 0; 1253 1253 1254 1254 dprintk("RPC: %s: released %u MRs\n", __func__, count); ··· 1285 1285 spin_unlock(&buf->rb_reqslock); 1286 1286 buf->rb_recv_count = 0; 1287 1287 1288 - rpcrdma_destroy_mrs(buf); 1288 + rpcrdma_mrs_destroy(buf); 1289 1289 } 1290 1290 1291 - struct rpcrdma_mw * 1292 - rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) 1291 + /** 1292 + * rpcrdma_mr_get - Allocate an rpcrdma_mr object 1293 + * @r_xprt: controlling transport 1294 + * 1295 + * Returns an initialized rpcrdma_mr or NULL if no free 1296 + * rpcrdma_mr objects are available. 1297 + */ 1298 + struct rpcrdma_mr * 1299 + rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) 1293 1300 { 1294 1301 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1295 - struct rpcrdma_mw *mw = NULL; 1302 + struct rpcrdma_mr *mr = NULL; 1296 1303 1297 - spin_lock(&buf->rb_mwlock); 1298 - if (!list_empty(&buf->rb_mws)) 1299 - mw = rpcrdma_pop_mw(&buf->rb_mws); 1300 - spin_unlock(&buf->rb_mwlock); 1304 + spin_lock(&buf->rb_mrlock); 1305 + if (!list_empty(&buf->rb_mrs)) 1306 + mr = rpcrdma_mr_pop(&buf->rb_mrs); 1307 + spin_unlock(&buf->rb_mrlock); 1301 1308 1302 - if (!mw) 1303 - goto out_nomws; 1304 - return mw; 1309 + if (!mr) 1310 + goto out_nomrs; 1311 + return mr; 1305 1312 1306 - out_nomws: 1307 - dprintk("RPC: %s: no MWs available\n", __func__); 1313 + out_nomrs: 1314 + dprintk("RPC: %s: no MRs available\n", __func__); 1308 1315 if (r_xprt->rx_ep.rep_connected != -ENODEV) 1309 1316 schedule_delayed_work(&buf->rb_refresh_worker, 0); 1310 1317 ··· 1321 1314 return NULL; 1322 1315 } 1323 1316 1317 + /** 1318 + * rpcrdma_mr_put - Release an rpcrdma_mr object 1319 + * @mr: object to release 1320 + * 1321 + */ 1324 1322 void 1325 - rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) 1323 + rpcrdma_mr_put(struct rpcrdma_mr *mr) 1326 1324 { 1325 + struct rpcrdma_xprt *r_xprt = mr->mr_xprt; 1327 1326 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1328 1327 1329 - spin_lock(&buf->rb_mwlock); 1330 - rpcrdma_push_mw(mw, &buf->rb_mws); 1331 - spin_unlock(&buf->rb_mwlock); 1328 + spin_lock(&buf->rb_mrlock); 1329 + rpcrdma_mr_push(mr, &buf->rb_mrs); 1330 + spin_unlock(&buf->rb_mrlock); 1332 1331 } 1333 1332 1334 1333 static struct rpcrdma_rep *
+31 -31
net/sunrpc/xprtrdma/xprt_rdma.h
··· 230 230 }; 231 231 232 232 /* 233 - * struct rpcrdma_mw - external memory region metadata 233 + * struct rpcrdma_mr - external memory region metadata 234 234 * 235 235 * An external memory region is any buffer or page that is registered 236 236 * on the fly (ie, not pre-registered). 237 237 * 238 - * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During 238 + * Each rpcrdma_buffer has a list of free MWs anchored in rb_mrs. During 239 239 * call_allocate, rpcrdma_buffer_get() assigns one to each segment in 240 240 * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep 241 241 * track of registration metadata while each RPC is pending. ··· 265 265 u64 *fm_physaddrs; 266 266 }; 267 267 268 - struct rpcrdma_mw { 269 - struct list_head mw_list; 270 - struct scatterlist *mw_sg; 271 - int mw_nents; 272 - enum dma_data_direction mw_dir; 268 + struct rpcrdma_mr { 269 + struct list_head mr_list; 270 + struct scatterlist *mr_sg; 271 + int mr_nents; 272 + enum dma_data_direction mr_dir; 273 273 union { 274 274 struct rpcrdma_fmr fmr; 275 275 struct rpcrdma_frwr frwr; 276 276 }; 277 - struct rpcrdma_xprt *mw_xprt; 278 - u32 mw_handle; 279 - u32 mw_length; 280 - u64 mw_offset; 281 - struct list_head mw_all; 277 + struct rpcrdma_xprt *mr_xprt; 278 + u32 mr_handle; 279 + u32 mr_length; 280 + u64 mr_offset; 281 + struct list_head mr_all; 282 282 }; 283 283 284 284 /* ··· 371 371 } 372 372 373 373 static inline void 374 - rpcrdma_push_mw(struct rpcrdma_mw *mw, struct list_head *list) 374 + rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list) 375 375 { 376 - list_add_tail(&mw->mw_list, list); 376 + list_add_tail(&mr->mr_list, list); 377 377 } 378 378 379 - static inline struct rpcrdma_mw * 380 - rpcrdma_pop_mw(struct list_head *list) 379 + static inline struct rpcrdma_mr * 380 + rpcrdma_mr_pop(struct list_head *list) 381 381 { 382 - struct rpcrdma_mw *mw; 382 + struct rpcrdma_mr *mr; 383 383 384 - mw = list_first_entry(list, struct rpcrdma_mw, mw_list); 385 - list_del(&mw->mw_list); 386 - return mw; 384 + mr = list_first_entry(list, struct rpcrdma_mr, mr_list); 385 + list_del(&mr->mr_list); 386 + return mr; 387 387 } 388 388 389 389 /* ··· 393 393 * One of these is associated with a transport instance 394 394 */ 395 395 struct rpcrdma_buffer { 396 - spinlock_t rb_mwlock; /* protect rb_mws list */ 397 - struct list_head rb_mws; 396 + spinlock_t rb_mrlock; /* protect rb_mrs list */ 397 + struct list_head rb_mrs; 398 398 struct list_head rb_all; 399 399 400 400 unsigned long rb_sc_head; ··· 473 473 struct rpcrdma_mr_seg * 474 474 (*ro_map)(struct rpcrdma_xprt *, 475 475 struct rpcrdma_mr_seg *, int, bool, 476 - struct rpcrdma_mw **); 476 + struct rpcrdma_mr **); 477 477 void (*ro_reminv)(struct rpcrdma_rep *rep, 478 - struct list_head *mws); 478 + struct list_head *mrs); 479 479 void (*ro_unmap_sync)(struct rpcrdma_xprt *, 480 480 struct list_head *); 481 - void (*ro_recover_mr)(struct rpcrdma_mw *); 481 + void (*ro_recover_mr)(struct rpcrdma_mr *mr); 482 482 int (*ro_open)(struct rpcrdma_ia *, 483 483 struct rpcrdma_ep *, 484 484 struct rpcrdma_create_data_internal *); 485 485 size_t (*ro_maxpages)(struct rpcrdma_xprt *); 486 486 int (*ro_init_mr)(struct rpcrdma_ia *, 487 - struct rpcrdma_mw *); 488 - void (*ro_release_mr)(struct rpcrdma_mw *); 487 + struct rpcrdma_mr *); 488 + void (*ro_release_mr)(struct rpcrdma_mr *mr); 489 489 const char *ro_displayname; 490 490 const int ro_send_w_inv_ok; 491 491 }; ··· 574 574 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf); 575 575 void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc); 576 576 577 - struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *); 578 - void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *); 577 + struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt); 578 + void rpcrdma_mr_put(struct rpcrdma_mr *mr); 579 + void rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr); 580 + 579 581 struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); 580 582 void rpcrdma_buffer_put(struct rpcrdma_req *); 581 583 void rpcrdma_recv_buffer_get(struct rpcrdma_req *); 582 584 void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); 583 - 584 - void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *); 585 585 586 586 struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction, 587 587 gfp_t);