Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
"This is pretty much just the usual array of smallish driver bugs.

- remove bouncing addresses from the MAINTAINERS file

- kernel oops and bad error handling fixes for hfi, i40iw, cxgb4, and
hns drivers

- various small LOC behavioral/operational bugs in mlx5, hns, qedr
and i40iw drivers

- two fixes for patches already sent during the merge window

- a long-standing bug related to not decreasing the pinned pages
count in the right MM was found and fixed"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (28 commits)
RDMA/hns: Move the location for initializing tmp_len
RDMA/hns: Bugfix for cq record db for kernel
IB/uverbs: Fix uverbs_attr_get_obj
RDMA/qedr: Fix doorbell bar mapping for dpi > 1
IB/umem: Use the correct mm during ib_umem_release
iw_cxgb4: Fix an error handling path in 'c4iw_get_dma_mr()'
RDMA/i40iw: Avoid panic when reading back the IRQ affinity hint
RDMA/i40iw: Avoid reference leaks when processing the AEQ
RDMA/i40iw: Avoid panic when objects are being created and destroyed
RDMA/hns: Fix the bug with NULL pointer
RDMA/hns: Set NULL for __internal_mr
RDMA/hns: Enable inner_pa_vld filed of mpt
RDMA/hns: Set desc_dma_addr for zero when free cmq desc
RDMA/hns: Fix the bug with rq sge
RDMA/hns: Not support qp transition from reset to reset for hip06
RDMA/hns: Add return operation when configured global param fail
RDMA/hns: Update convert function of endian format
RDMA/hns: Load the RoCE dirver automatically
RDMA/hns: Bugfix for rq record db for kernel
RDMA/hns: Add rq inline flags judgement
...

+117 -99
-6
MAINTAINERS
··· 5388 5388 F: drivers/iommu/exynos-iommu.c 5389 5389 5390 5390 EZchip NPS platform support 5391 - M: Elad Kanfi <eladkan@mellanox.com> 5392 5391 M: Vineet Gupta <vgupta@synopsys.com> 5393 5392 S: Supported 5394 5393 F: arch/arc/plat-eznps ··· 9020 9021 F: drivers/net/ethernet/mellanox/mlx5/core/en_* 9021 9022 9022 9023 MELLANOX ETHERNET INNOVA DRIVER 9023 - M: Ilan Tayari <ilant@mellanox.com> 9024 9024 R: Boris Pismenny <borisp@mellanox.com> 9025 9025 L: netdev@vger.kernel.org 9026 9026 S: Supported ··· 9029 9031 F: include/linux/mlx5/mlx5_ifc_fpga.h 9030 9032 9031 9033 MELLANOX ETHERNET INNOVA IPSEC DRIVER 9032 - M: Ilan Tayari <ilant@mellanox.com> 9033 9034 R: Boris Pismenny <borisp@mellanox.com> 9034 9035 L: netdev@vger.kernel.org 9035 9036 S: Supported ··· 9084 9087 9085 9088 MELLANOX MLX5 core VPI driver 9086 9089 M: Saeed Mahameed <saeedm@mellanox.com> 9087 - M: Matan Barak <matanb@mellanox.com> 9088 9090 M: Leon Romanovsky <leonro@mellanox.com> 9089 9091 L: netdev@vger.kernel.org 9090 9092 L: linux-rdma@vger.kernel.org ··· 9094 9098 F: include/linux/mlx5/ 9095 9099 9096 9100 MELLANOX MLX5 IB driver 9097 - M: Matan Barak <matanb@mellanox.com> 9098 9101 M: Leon Romanovsky <leonro@mellanox.com> 9099 9102 L: linux-rdma@vger.kernel.org 9100 9103 W: http://www.mellanox.com ··· 9827 9832 F: net/netfilter/xt_SECMARK.c 9828 9833 9829 9834 NETWORKING [TLS] 9830 - M: Ilya Lesokhin <ilyal@mellanox.com> 9831 9835 M: Aviad Yehezkel <aviadye@mellanox.com> 9832 9836 M: Dave Watson <davejwatson@fb.com> 9833 9837 L: netdev@vger.kernel.org
+1 -6
drivers/infiniband/core/umem.c
··· 119 119 umem->length = size; 120 120 umem->address = addr; 121 121 umem->page_shift = PAGE_SHIFT; 122 - umem->pid = get_task_pid(current, PIDTYPE_PID); 123 122 /* 124 123 * We ask for writable memory if any of the following 125 124 * access flags are set. "Local write" and "remote write" ··· 131 132 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 132 133 133 134 if (access & IB_ACCESS_ON_DEMAND) { 134 - put_pid(umem->pid); 135 135 ret = ib_umem_odp_get(context, umem, access); 136 136 if (ret) { 137 137 kfree(umem); ··· 146 148 147 149 page_list = (struct page **) __get_free_page(GFP_KERNEL); 148 150 if (!page_list) { 149 - put_pid(umem->pid); 150 151 kfree(umem); 151 152 return ERR_PTR(-ENOMEM); 152 153 } ··· 228 231 if (ret < 0) { 229 232 if (need_release) 230 233 __ib_umem_release(context->device, umem, 0); 231 - put_pid(umem->pid); 232 234 kfree(umem); 233 235 } else 234 236 current->mm->pinned_vm = locked; ··· 270 274 271 275 __ib_umem_release(umem->context->device, umem, 1); 272 276 273 - task = get_pid_task(umem->pid, PIDTYPE_PID); 274 - put_pid(umem->pid); 277 + task = get_pid_task(umem->context->tgid, PIDTYPE_PID); 275 278 if (!task) 276 279 goto out; 277 280 mm = get_task_mm(task);
+2 -2
drivers/infiniband/hw/cxgb4/mem.c
··· 489 489 err_dereg_mem: 490 490 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 491 491 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); 492 - err_free_wr_wait: 493 - c4iw_put_wr_wait(mhp->wr_waitp); 494 492 err_free_skb: 495 493 kfree_skb(mhp->dereg_skb); 494 + err_free_wr_wait: 495 + c4iw_put_wr_wait(mhp->wr_waitp); 496 496 err_free_mhp: 497 497 kfree(mhp); 498 498 return ERR_PTR(ret);
+4
drivers/infiniband/hw/hfi1/chip.c
··· 5944 5944 u64 status; 5945 5945 u32 sw_index; 5946 5946 int i = 0; 5947 + unsigned long irq_flags; 5947 5948 5948 5949 sw_index = dd->hw_to_sw[hw_context]; 5949 5950 if (sw_index >= dd->num_send_contexts) { ··· 5954 5953 return; 5955 5954 } 5956 5955 sci = &dd->send_contexts[sw_index]; 5956 + spin_lock_irqsave(&dd->sc_lock, irq_flags); 5957 5957 sc = sci->sc; 5958 5958 if (!sc) { 5959 5959 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, 5960 5960 sw_index, hw_context); 5961 + spin_unlock_irqrestore(&dd->sc_lock, irq_flags); 5961 5962 return; 5962 5963 } 5963 5964 ··· 5981 5978 */ 5982 5979 if (sc->type != SC_USER) 5983 5980 queue_work(dd->pport->hfi1_wq, &sc->halt_work); 5981 + spin_unlock_irqrestore(&dd->sc_lock, irq_flags); 5984 5982 5985 5983 /* 5986 5984 * Update the counters for the corresponding status bits.
+1
drivers/infiniband/hw/hns/hns_roce_cq.c
··· 377 377 378 378 hr_cq->set_ci_db = hr_cq->db.db_record; 379 379 *hr_cq->set_ci_db = 0; 380 + hr_cq->db_en = 1; 380 381 } 381 382 382 383 /* Init mmt table and write buff address to mtt table */
+2 -1
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
··· 722 722 free_mr->mr_free_pd = to_hr_pd(pd); 723 723 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; 724 724 free_mr->mr_free_pd->ibpd.uobject = NULL; 725 + free_mr->mr_free_pd->ibpd.__internal_mr = NULL; 725 726 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); 726 727 727 728 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; ··· 1037 1036 1038 1037 do { 1039 1038 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); 1040 - if (ret < 0) { 1039 + if (ret < 0 && hr_qp) { 1041 1040 dev_err(dev, 1042 1041 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", 1043 1042 hr_qp->qpn, ret, hr_mr->key, ne);
+23 -9
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 142 142 unsigned long flags; 143 143 unsigned int ind; 144 144 void *wqe = NULL; 145 - u32 tmp_len = 0; 146 145 bool loopback; 146 + u32 tmp_len; 147 147 int ret = 0; 148 148 u8 *smac; 149 149 int nreq; ··· 189 189 190 190 owner_bit = 191 191 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); 192 + tmp_len = 0; 192 193 193 194 /* Corresponding to the QP type, wqe process separately */ 194 195 if (ibqp->qp_type == IB_QPT_GSI) { ··· 548 547 } 549 548 550 549 if (i < hr_qp->rq.max_gs) { 551 - dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); 552 - dseg[i].addr = 0; 550 + dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); 551 + dseg->addr = 0; 553 552 } 554 553 555 554 /* rq support inline data */ 556 - sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list; 557 - hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge; 558 - for (i = 0; i < wr->num_sge; i++) { 559 - sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr; 560 - sge_list[i].len = wr->sg_list[i].length; 555 + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { 556 + sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list; 557 + hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = 558 + (u32)wr->num_sge; 559 + for (i = 0; i < wr->num_sge; i++) { 560 + sge_list[i].addr = 561 + (void *)(u64)wr->sg_list[i].addr; 562 + sge_list[i].len = wr->sg_list[i].length; 563 + } 561 564 } 562 565 563 566 hr_qp->rq.wrid[ind] = wr->wr_id; ··· 618 613 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr, 619 614 ring->desc_num * sizeof(struct hns_roce_cmq_desc), 620 615 DMA_BIDIRECTIONAL); 616 + 617 + ring->desc_dma_addr = 0; 621 618 kfree(ring->desc); 622 619 } 623 620 ··· 1088 1081 if (ret) { 1089 1082 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n", 1090 1083 ret); 1084 + return ret; 1091 1085 } 1092 1086 1093 1087 /* Get pf resource owned by every pf */ ··· 1380 1372 1381 1373 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 1382 1374 mr->type == MR_TYPE_MR ? 0 : 1); 1375 + roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S, 1376 + 1); 1383 1377 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa); 1384 1378 1385 1379 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); ··· 2179 2169 struct hns_roce_v2_qp_context *context, 2180 2170 struct hns_roce_v2_qp_context *qpc_mask) 2181 2171 { 2172 + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 2182 2173 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 2183 2174 2184 2175 /* ··· 2292 2281 context->rq_db_record_addr = hr_qp->rdb.dma >> 32; 2293 2282 qpc_mask->rq_db_record_addr = 0; 2294 2283 2295 - roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1); 2284 + roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 2285 + (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0); 2296 2286 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0); 2297 2287 2298 2288 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, ··· 4714 4702 /* required last entry */ 4715 4703 {0, } 4716 4704 }; 4705 + 4706 + MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl); 4717 4707 4718 4708 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, 4719 4709 struct hnae3_handle *handle)
+1 -1
drivers/infiniband/hw/hns/hns_roce_main.c
··· 199 199 200 200 memset(props, 0, sizeof(*props)); 201 201 202 - props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid); 202 + props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid); 203 203 props->max_mr_size = (u64)(~(0ULL)); 204 204 props->page_size_cap = hr_dev->caps.page_size_cap; 205 205 props->vendor_id = hr_dev->vendor_id;
+9 -1
drivers/infiniband/hw/hns/hns_roce_qp.c
··· 660 660 goto err_rq_sge_list; 661 661 } 662 662 *hr_qp->rdb.db_record = 0; 663 + hr_qp->rdb_en = 1; 663 664 } 664 665 665 666 /* Allocate QP buf */ ··· 956 955 } 957 956 958 957 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 959 - ret = 0; 958 + if (hr_dev->caps.min_wqes) { 959 + ret = -EPERM; 960 + dev_err(dev, "cur_state=%d new_state=%d\n", cur_state, 961 + new_state); 962 + } else { 963 + ret = 0; 964 + } 965 + 960 966 goto out; 961 967 } 962 968
+1
drivers/infiniband/hw/i40iw/i40iw.h
··· 207 207 u32 irq; 208 208 u32 cpu_affinity; 209 209 u32 ceq_id; 210 + cpumask_t mask; 210 211 }; 211 212 212 213 struct l2params_work {
+1 -1
drivers/infiniband/hw/i40iw/i40iw_cm.c
··· 2093 2093 if (netif_is_bond_slave(netdev)) 2094 2094 netdev = netdev_master_upper_dev_get(netdev); 2095 2095 2096 - neigh = dst_neigh_lookup(dst, &dst_addr); 2096 + neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32); 2097 2097 2098 2098 rcu_read_lock(); 2099 2099 if (neigh) {
+2 -2
drivers/infiniband/hw/i40iw/i40iw_hw.c
··· 331 331 switch (info->ae_id) { 332 332 case I40IW_AE_LLP_FIN_RECEIVED: 333 333 if (qp->term_flags) 334 - continue; 334 + break; 335 335 if (atomic_inc_return(&iwqp->close_timer_started) == 1) { 336 336 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT; 337 337 if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) && ··· 360 360 break; 361 361 case I40IW_AE_LLP_CONNECTION_RESET: 362 362 if (atomic_read(&iwqp->close_timer_started)) 363 - continue; 363 + break; 364 364 i40iw_cm_disconn(iwqp); 365 365 break; 366 366 case I40IW_AE_QP_SUSPEND_COMPLETE:
+3 -4
drivers/infiniband/hw/i40iw/i40iw_main.c
··· 687 687 struct i40iw_msix_vector *msix_vec) 688 688 { 689 689 enum i40iw_status_code status; 690 - cpumask_t mask; 691 690 692 691 if (iwdev->msix_shared && !ceq_id) { 693 692 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev); ··· 696 697 status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq); 697 698 } 698 699 699 - cpumask_clear(&mask); 700 - cpumask_set_cpu(msix_vec->cpu_affinity, &mask); 701 - irq_set_affinity_hint(msix_vec->irq, &mask); 700 + cpumask_clear(&msix_vec->mask); 701 + cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); 702 + irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); 702 703 703 704 if (status) { 704 705 i40iw_pr_err("ceq irq config fail\n");
+10 -3
drivers/infiniband/hw/i40iw/i40iw_verbs.c
··· 394 394 395 395 list_for_each_entry(iwpbl, pbl_list, list) { 396 396 if (iwpbl->user_base == va) { 397 + iwpbl->on_list = false; 397 398 list_del(&iwpbl->list); 398 399 return iwpbl; 399 400 } ··· 615 614 return ERR_PTR(-ENOMEM); 616 615 617 616 iwqp = (struct i40iw_qp *)mem; 617 + iwqp->allocated_buffer = mem; 618 618 qp = &iwqp->sc_qp; 619 619 qp->back_qp = (void *)iwqp; 620 620 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; ··· 644 642 goto error; 645 643 } 646 644 647 - iwqp->allocated_buffer = mem; 648 645 iwqp->iwdev = iwdev; 649 646 iwqp->iwpd = iwpd; 650 647 iwqp->ibqp.qp_num = qp_num; ··· 1899 1898 goto error; 1900 1899 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 1901 1900 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); 1901 + iwpbl->on_list = true; 1902 1902 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 1903 1903 break; 1904 1904 case IW_MEMREG_TYPE_CQ: ··· 1910 1908 1911 1909 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1912 1910 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); 1911 + iwpbl->on_list = true; 1913 1912 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 1914 1913 break; 1915 1914 case IW_MEMREG_TYPE_MEM: ··· 2048 2045 switch (iwmr->type) { 2049 2046 case IW_MEMREG_TYPE_CQ: 2050 2047 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2051 - if (!list_empty(&ucontext->cq_reg_mem_list)) 2048 + if (iwpbl->on_list) { 2049 + iwpbl->on_list = false; 2052 2050 list_del(&iwpbl->list); 2051 + } 2053 2052 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2054 2053 break; 2055 2054 case IW_MEMREG_TYPE_QP: 2056 2055 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 2057 - if (!list_empty(&ucontext->qp_reg_mem_list)) 2056 + if (iwpbl->on_list) { 2057 + iwpbl->on_list = false; 2058 2058 list_del(&iwpbl->list); 2059 + } 2059 2060 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 2060 2061 break; 2061 2062 default:
+1
drivers/infiniband/hw/i40iw/i40iw_verbs.h
··· 78 78 }; 79 79 80 80 bool pbl_allocated; 81 + bool on_list; 81 82 u64 user_base; 82 83 struct i40iw_pble_alloc pble_alloc; 83 84 struct i40iw_mr *iwmr;
+1 -1
drivers/infiniband/hw/mlx5/main.c
··· 2416 2416 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 2417 2417 } 2418 2418 2419 - static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val, 2419 + static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, 2420 2420 bool inner) 2421 2421 { 2422 2422 if (inner) {
+11 -7
drivers/infiniband/hw/mlx5/qp.c
··· 484 484 return 1; 485 485 } 486 486 487 - static int first_med_bfreg(void) 488 - { 489 - return 1; 490 - } 491 - 492 487 enum { 493 488 /* this is the first blue flame register in the array of bfregs assigned 494 489 * to a processes. Since we do not use it for blue flame but rather ··· 507 512 NUM_NON_BLUE_FLAME_BFREGS; 508 513 509 514 return n >= 0 ? n : 0; 515 + } 516 + 517 + static int first_med_bfreg(struct mlx5_ib_dev *dev, 518 + struct mlx5_bfreg_info *bfregi) 519 + { 520 + return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM; 510 521 } 511 522 512 523 static int first_hi_bfreg(struct mlx5_ib_dev *dev, ··· 542 541 static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, 543 542 struct mlx5_bfreg_info *bfregi) 544 543 { 545 - int minidx = first_med_bfreg(); 544 + int minidx = first_med_bfreg(dev, bfregi); 546 545 int i; 547 546 548 - for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) { 547 + if (minidx < 0) 548 + return minidx; 549 + 550 + for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) { 549 551 if (bfregi->count[i] < bfregi->count[minidx]) 550 552 minidx = i; 551 553 if (!bfregi->count[minidx])
+30 -32
drivers/infiniband/hw/qedr/verbs.c
··· 401 401 { 402 402 struct qedr_ucontext *ucontext = get_qedr_ucontext(context); 403 403 struct qedr_dev *dev = get_qedr_dev(context->device); 404 - unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; 405 - u64 unmapped_db = dev->db_phys_addr; 404 + unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT; 406 405 unsigned long len = (vma->vm_end - vma->vm_start); 407 - int rc = 0; 408 - bool found; 406 + unsigned long dpi_start; 407 + 408 + dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size); 409 409 410 410 DP_DEBUG(dev, QEDR_MSG_INIT, 411 - "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n", 412 - vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len); 413 - if (vma->vm_start & (PAGE_SIZE - 1)) { 414 - DP_ERR(dev, "Vma_start not page aligned = %ld\n", 415 - vma->vm_start); 411 + "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n", 412 + (void *)vma->vm_start, (void *)vma->vm_end, 413 + (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size); 414 + 415 + if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) { 416 + DP_ERR(dev, 417 + "failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n", 418 + (void *)vma->vm_start, (void *)vma->vm_end); 416 419 return -EINVAL; 417 420 } 418 421 419 - found = qedr_search_mmap(ucontext, vm_page, len); 420 - if (!found) { 421 - DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n", 422 + if (!qedr_search_mmap(ucontext, phys_addr, len)) { 423 + DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n", 422 424 vma->vm_pgoff); 423 425 return -EINVAL; 424 426 } 425 427 426 - DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); 427 - 428 - if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + 429 - dev->db_size))) { 430 - DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); 431 - if (vma->vm_flags & VM_READ) { 432 - DP_ERR(dev, "Trying to map doorbell bar for read\n"); 433 - return -EPERM; 434 - } 435 - 436 - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 437 - 438 - rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 439 - PAGE_SIZE, vma->vm_page_prot); 440 - } else { 441 - DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n"); 442 - rc = remap_pfn_range(vma, vma->vm_start, 443 - vma->vm_pgoff, len, vma->vm_page_prot); 428 + if (phys_addr < dpi_start || 429 + ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) { 430 + DP_ERR(dev, 431 + "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n", 432 + (void *)phys_addr, (void *)dpi_start, 433 + ucontext->dpi_size); 434 + return -EINVAL; 444 435 } 445 - DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc); 446 - return rc; 436 + 437 + if (vma->vm_flags & VM_READ) { 438 + DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n"); 439 + return -EINVAL; 440 + } 441 + 442 + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 443 + return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len, 444 + vma->vm_page_prot); 447 445 } 448 446 449 447 struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
+1 -9
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 761 761 unsigned int mask; 762 762 unsigned int length = 0; 763 763 int i; 764 - int must_sched; 765 764 766 765 while (wr) { 767 766 mask = wr_opcode_mask(wr->opcode, qp); ··· 790 791 wr = wr->next; 791 792 } 792 793 793 - /* 794 - * Must sched in case of GSI QP because ib_send_mad() hold irq lock, 795 - * and the requester call ip_local_out_sk() that takes spin_lock_bh. 796 - */ 797 - must_sched = (qp_type(qp) == IB_QPT_GSI) || 798 - (queue_count(qp->sq.queue) > 1); 799 - 800 - rxe_run_task(&qp->req.task, must_sched); 794 + rxe_run_task(&qp->req.task, 1); 801 795 if (unlikely(qp->req.state == QP_STATE_ERROR)) 802 796 rxe_run_task(&qp->comp.task, 1); 803 797
+1 -1
drivers/infiniband/ulp/srpt/Kconfig
··· 1 1 config INFINIBAND_SRPT 2 2 tristate "InfiniBand SCSI RDMA Protocol target support" 3 - depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE 3 + depends on INFINIBAND_ADDR_TRANS && TARGET_CORE 4 4 ---help--- 5 5 6 6 Support for the SCSI RDMA Protocol (SRP) Target driver. The
+1 -1
drivers/nvme/host/Kconfig
··· 27 27 28 28 config NVME_RDMA 29 29 tristate "NVM Express over Fabrics RDMA host driver" 30 - depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK 30 + depends on INFINIBAND_ADDR_TRANS && BLOCK 31 31 select NVME_CORE 32 32 select NVME_FABRICS 33 33 select SG_POOL
+1 -1
drivers/nvme/target/Kconfig
··· 27 27 28 28 config NVME_TARGET_RDMA 29 29 tristate "NVMe over Fabrics RDMA target support" 30 - depends on INFINIBAND && INFINIBAND_ADDR_TRANS 30 + depends on INFINIBAND_ADDR_TRANS 31 31 depends on NVME_TARGET 32 32 select SGL_ALLOC 33 33 help
+1 -1
drivers/staging/lustre/lnet/Kconfig
··· 34 34 35 35 config LNET_XPRT_IB 36 36 tristate "LNET infiniband support" 37 - depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS 37 + depends on LNET && PCI && INFINIBAND_ADDR_TRANS 38 38 default LNET && INFINIBAND 39 39 help 40 40 This option allows the LNET users to use infiniband as an
+1 -1
fs/cifs/Kconfig
··· 197 197 198 198 config CIFS_SMB_DIRECT 199 199 bool "SMB Direct support (Experimental)" 200 - depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y 200 + depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y 201 201 help 202 202 Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. 203 203 SMB Direct allows transferring SMB packets over RDMA. If unsure,
-1
include/rdma/ib_umem.h
··· 48 48 int writable; 49 49 int hugetlb; 50 50 struct work_struct work; 51 - struct pid *pid; 52 51 struct mm_struct *mm; 53 52 unsigned long diff; 54 53 struct ib_umem_odp *odp_data;
+5 -5
include/rdma/uverbs_ioctl.h
··· 411 411 static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle, 412 412 u16 idx) 413 413 { 414 - struct ib_uobject *uobj = 415 - uverbs_attr_get(attrs_bundle, idx)->obj_attr.uobject; 414 + const struct uverbs_attr *attr; 416 415 417 - if (IS_ERR(uobj)) 418 - return uobj; 416 + attr = uverbs_attr_get(attrs_bundle, idx); 417 + if (IS_ERR(attr)) 418 + return ERR_CAST(attr); 419 419 420 - return uobj->object; 420 + return attr->obj_attr.uobject->object; 421 421 } 422 422 423 423 static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
+1 -1
net/9p/Kconfig
··· 32 32 33 33 34 34 config NET_9P_RDMA 35 - depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS 35 + depends on INET && INFINIBAND_ADDR_TRANS 36 36 tristate "9P RDMA Transport (Experimental)" 37 37 help 38 38 This builds support for an RDMA transport.
+1 -1
net/rds/Kconfig
··· 8 8 9 9 config RDS_RDMA 10 10 tristate "RDS over Infiniband" 11 - depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS 11 + depends on RDS && INFINIBAND_ADDR_TRANS 12 12 ---help--- 13 13 Allow RDS to use Infiniband as a transport. 14 14 This transport supports RDMA operations.
+1 -1
net/sunrpc/Kconfig
··· 50 50 51 51 config SUNRPC_XPRT_RDMA 52 52 tristate "RPC-over-RDMA transport" 53 - depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS 53 + depends on SUNRPC && INFINIBAND_ADDR_TRANS 54 54 default SUNRPC && INFINIBAND 55 55 select SG_POOL 56 56 help