Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
"A few minor fixes:

- Fix buffer management in SRP to correct a regression with the login
authentication feature from v5.17

- Don't iterate over non-present ports in mlx5

- Fix an error introduced by the foritify work in cxgb4

- Two bug fixes for the recently merged ERDMA driver

- Unbreak RDMA dmabuf support, a regresion from v5.19"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
RDMA: Handle the return code from dma_resv_wait_timeout() properly
RDMA/erdma: Correct the max_qp and max_cq capacities of the device
RDMA/erdma: Using the key in FMR WR instead of MR structure
RDMA/cxgb4: fix accept failure due to increased cpl_t5_pass_accept_rpl size
RDMA/mlx5: Use the proper number of ports
IB/iser: Fix login with authentication

+42 -40
+7 -1
drivers/infiniband/core/umem_dmabuf.c
··· 18 18 struct scatterlist *sg; 19 19 unsigned long start, end, cur = 0; 20 20 unsigned int nmap = 0; 21 + long ret; 21 22 int i; 22 23 23 24 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); ··· 68 67 * may be not up-to-date. Wait for the exporter to finish 69 68 * the migration. 70 69 */ 71 - return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, 70 + ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, 72 71 DMA_RESV_USAGE_KERNEL, 73 72 false, MAX_SCHEDULE_TIMEOUT); 73 + if (ret < 0) 74 + return ret; 75 + if (ret == 0) 76 + return -ETIMEDOUT; 77 + return 0; 74 78 } 75 79 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages); 76 80
+9 -16
drivers/infiniband/hw/cxgb4/cm.c
··· 2468 2468 opt2 |= CCTRL_ECN_V(1); 2469 2469 } 2470 2470 2471 - skb_get(skb); 2472 - rpl = cplhdr(skb); 2473 2471 if (!is_t4(adapter_type)) { 2474 - BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16)); 2475 - skb_trim(skb, sizeof(*rpl5)); 2476 - rpl5 = (void *)rpl; 2477 - INIT_TP_WR(rpl5, ep->hwtid); 2478 - } else { 2479 - skb_trim(skb, sizeof(*rpl)); 2480 - INIT_TP_WR(rpl, ep->hwtid); 2481 - } 2482 - OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 2483 - ep->hwtid)); 2484 - 2485 - if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { 2486 2472 u32 isn = (prandom_u32() & ~7UL) - 1; 2473 + 2474 + skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL); 2475 + rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16)); 2476 + rpl = (void *)rpl5; 2477 + INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid); 2487 2478 opt2 |= T5_OPT_2_VALID_F; 2488 2479 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 2489 2480 opt2 |= T5_ISS_F; 2490 - rpl5 = (void *)rpl; 2491 - memset_after(rpl5, 0, iss); 2492 2481 if (peer2peer) 2493 2482 isn += 4; 2494 2483 rpl5->iss = cpu_to_be32(isn); 2495 2484 pr_debug("iss %u\n", be32_to_cpu(rpl5->iss)); 2485 + } else { 2486 + skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 2487 + rpl = __skb_put_zero(skb, sizeof(*rpl)); 2488 + INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid); 2496 2489 } 2497 2490 2498 2491 rpl->opt0 = cpu_to_be64(opt0);
+1 -1
drivers/infiniband/hw/erdma/erdma_qp.c
··· 407 407 to_erdma_access_flags(reg_wr(send_wr)->access); 408 408 regmr_sge->addr = cpu_to_le64(mr->ibmr.iova); 409 409 regmr_sge->length = cpu_to_le32(mr->ibmr.length); 410 - regmr_sge->stag = cpu_to_le32(mr->ibmr.lkey); 410 + regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key); 411 411 attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) | 412 412 FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) | 413 413 FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
+2 -2
drivers/infiniband/hw/erdma/erdma_verbs.c
··· 280 280 attr->vendor_id = PCI_VENDOR_ID_ALIBABA; 281 281 attr->vendor_part_id = dev->pdev->device; 282 282 attr->hw_ver = dev->pdev->revision; 283 - attr->max_qp = dev->attrs.max_qp; 283 + attr->max_qp = dev->attrs.max_qp - 1; 284 284 attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr); 285 285 attr->max_qp_rd_atom = dev->attrs.max_ord; 286 286 attr->max_qp_init_rd_atom = dev->attrs.max_ird; ··· 291 291 attr->max_send_sge = dev->attrs.max_send_sge; 292 292 attr->max_recv_sge = dev->attrs.max_recv_sge; 293 293 attr->max_sge_rd = dev->attrs.max_sge_rd; 294 - attr->max_cq = dev->attrs.max_cq; 294 + attr->max_cq = dev->attrs.max_cq - 1; 295 295 attr->max_cqe = dev->attrs.max_cqe; 296 296 attr->max_mr = dev->attrs.max_mr; 297 297 attr->max_pd = dev->attrs.max_pd;
+16 -18
drivers/infiniband/hw/mlx5/main.c
··· 2738 2738 int err; 2739 2739 int port; 2740 2740 2741 - for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) { 2742 - dev->port_caps[port - 1].has_smi = false; 2743 - if (MLX5_CAP_GEN(dev->mdev, port_type) == 2744 - MLX5_CAP_PORT_TYPE_IB) { 2745 - if (MLX5_CAP_GEN(dev->mdev, ib_virt)) { 2746 - err = mlx5_query_hca_vport_context(dev->mdev, 0, 2747 - port, 0, 2748 - &vport_ctx); 2749 - if (err) { 2750 - mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n", 2751 - port, err); 2752 - return err; 2753 - } 2754 - dev->port_caps[port - 1].has_smi = 2755 - vport_ctx.has_smi; 2756 - } else { 2757 - dev->port_caps[port - 1].has_smi = true; 2758 - } 2741 + if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB) 2742 + return 0; 2743 + 2744 + for (port = 1; port <= dev->num_ports; port++) { 2745 + if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) { 2746 + dev->port_caps[port - 1].has_smi = true; 2747 + continue; 2759 2748 } 2749 + err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0, 2750 + &vport_ctx); 2751 + if (err) { 2752 + mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n", 2753 + port, err); 2754 + return err; 2755 + } 2756 + dev->port_caps[port - 1].has_smi = vport_ctx.has_smi; 2760 2757 } 2758 + 2761 2759 return 0; 2762 2760 } 2763 2761
+6 -1
drivers/infiniband/ulp/iser/iser_initiator.c
··· 537 537 struct iscsi_hdr *hdr; 538 538 char *data; 539 539 int length; 540 + bool full_feature_phase; 540 541 541 542 if (unlikely(wc->status != IB_WC_SUCCESS)) { 542 543 iser_err_comp(wc, "login_rsp"); ··· 551 550 hdr = desc->rsp + sizeof(struct iser_ctrl); 552 551 data = desc->rsp + ISER_HEADERS_LEN; 553 552 length = wc->byte_len - ISER_HEADERS_LEN; 553 + full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) == 554 + ISCSI_FULL_FEATURE_PHASE) && 555 + (hdr->flags & ISCSI_FLAG_CMD_FINAL); 554 556 555 557 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, 556 558 hdr->itt, length); ··· 564 560 desc->rsp_dma, ISER_RX_LOGIN_SIZE, 565 561 DMA_FROM_DEVICE); 566 562 567 - if (iser_conn->iscsi_conn->session->discovery_sess) 563 + if (!full_feature_phase || 564 + iser_conn->iscsi_conn->session->discovery_sess) 568 565 return; 569 566 570 567 /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
+1 -1
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
··· 497 497 __be32 opt2; 498 498 __be64 opt0; 499 499 __be32 iss; 500 - __be32 rsvd[3]; 500 + __be32 rsvd; 501 501 }; 502 502 503 503 struct cpl_act_open_req {