Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/srp: Create an insecure all physical rkey only if needed

The SRP initiator only needs this if the insecure register_always=N
performance optimization is enabled, or if FRWR/FMR is not supported
in the driver.

Do not create an all physical MR unless it is needed to support
either of those modes. Default register_always to true so the out of
the box configuration does not create an insecure all physical MR.

Signed-off-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
[bvanassche: reworked and rebased this patch]
Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>

authored by

Bart Van Assche and committed by
Doug Ledford
03f6fb93 330179f2

+25 -19
+23 -17
drivers/infiniband/ulp/srp/ib_srp.c
··· 68 68 static unsigned int cmd_sg_entries; 69 69 static unsigned int indirect_sg_entries; 70 70 static bool allow_ext_sg; 71 - static bool prefer_fr; 72 - static bool register_always; 71 + static bool prefer_fr = true; 72 + static bool register_always = true; 73 73 static int topspin_workarounds = 1; 74 74 75 75 module_param(srp_sg_tablesize, uint, 0444); ··· 1353 1353 if (state->npages == 0) 1354 1354 return 0; 1355 1355 1356 - if (state->npages == 1 && !register_always) 1356 + if (state->npages == 1 && target->global_mr) 1357 1357 srp_map_desc(state, state->base_dma_addr, state->dma_len, 1358 - target->rkey); 1358 + target->global_mr->rkey); 1359 1359 else 1360 1360 ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) : 1361 1361 srp_map_finish_fmr(state, ch); ··· 1442 1442 } else { 1443 1443 for_each_sg(scat, sg, count, i) { 1444 1444 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), 1445 - ib_sg_dma_len(dev->dev, sg), target->rkey); 1445 + ib_sg_dma_len(dev->dev, sg), 1446 + target->global_mr->rkey); 1446 1447 } 1447 1448 } 1448 1449 ··· 1532 1531 fmt = SRP_DATA_DESC_DIRECT; 1533 1532 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 1534 1533 1535 - if (count == 1 && !register_always) { 1534 + if (count == 1 && target->global_mr) { 1536 1535 /* 1537 1536 * The midlayer only generated a single gather/scatter 1538 1537 * entry, or DMA mapping coalesced everything to a ··· 1542 1541 struct srp_direct_buf *buf = (void *) cmd->add_data; 1543 1542 1544 1543 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 1545 - buf->key = cpu_to_be32(target->rkey); 1544 + buf->key = cpu_to_be32(target->global_mr->rkey); 1546 1545 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 1547 1546 1548 1547 req->nmdesc = 0; ··· 1596 1595 memcpy(indirect_hdr->desc_list, req->indirect_desc, 1597 1596 count * sizeof (struct srp_direct_buf)); 1598 1597 1599 - if (register_always && (dev->use_fast_reg || dev->use_fmr)) { 1598 + if (!target->global_mr) { 1600 1599 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, 1601 1600 idb_len, &idb_rkey); 1602 1601 if (ret < 0) 1603 1602 return ret; 1604 1603 req->nmdesc++; 1605 1604 } else { 1606 - idb_rkey = target->rkey; 1605 + idb_rkey = target->global_mr->rkey; 1607 1606 } 1608 1607 1609 1608 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); ··· 3158 3157 target->scsi_host = target_host; 3159 3158 target->srp_host = host; 3160 3159 target->lkey = host->srp_dev->pd->local_dma_lkey; 3161 - target->rkey = host->srp_dev->mr->rkey; 3160 + target->global_mr = host->srp_dev->global_mr; 3162 3161 target->cmd_sg_cnt = cmd_sg_entries; 3163 3162 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 3164 3163 target->allow_ext_sg = allow_ext_sg; ··· 3448 3447 if (IS_ERR(srp_dev->pd)) 3449 3448 goto free_dev; 3450 3449 3451 - srp_dev->mr = ib_get_dma_mr(srp_dev->pd, 3452 - IB_ACCESS_LOCAL_WRITE | 3453 - IB_ACCESS_REMOTE_READ | 3454 - IB_ACCESS_REMOTE_WRITE); 3455 - if (IS_ERR(srp_dev->mr)) 3456 - goto err_pd; 3450 + if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) { 3451 + srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd, 3452 + IB_ACCESS_LOCAL_WRITE | 3453 + IB_ACCESS_REMOTE_READ | 3454 + IB_ACCESS_REMOTE_WRITE); 3455 + if (IS_ERR(srp_dev->global_mr)) 3456 + goto err_pd; 3457 + } else { 3458 + srp_dev->global_mr = NULL; 3459 + } 3457 3460 3458 3461 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 3459 3462 host = srp_add_port(srp_dev, p); ··· 3514 3509 kfree(host); 3515 3510 } 3516 3511 3517 - ib_dereg_mr(srp_dev->mr); 3512 + if (srp_dev->global_mr) 3513 + ib_dereg_mr(srp_dev->global_mr); 3518 3514 ib_dealloc_pd(srp_dev->pd); 3519 3515 3520 3516 kfree(srp_dev);
+2 -2
drivers/infiniband/ulp/srp/ib_srp.h
··· 95 95 struct list_head dev_list; 96 96 struct ib_device *dev; 97 97 struct ib_pd *pd; 98 - struct ib_mr *mr; 98 + struct ib_mr *global_mr; 99 99 u64 mr_page_mask; 100 100 int mr_page_size; 101 101 int mr_max_size; ··· 183 183 spinlock_t lock; 184 184 185 185 /* read only in the hot path */ 186 + struct ib_mr *global_mr; 186 187 struct srp_rdma_ch *ch; 187 188 u32 ch_count; 188 189 u32 lkey; 189 - u32 rkey; 190 190 enum srp_target_state state; 191 191 unsigned int max_iu_len; 192 192 unsigned int cmd_sg_cnt;