Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA: Globally allocate and release QP memory

Convert QP object to follow IB/core general allocation scheme. That
change allows us to make sure that restrack properly kref the memory.

Link: https://lore.kernel.org/r/48e767124758aeecc433360ddd85eaa6325b34d9.1627040189.git.leonro@nvidia.com
Reviewed-by: Gal Pressman <galpress@amazon.com> #efa
Tested-by: Gal Pressman <galpress@amazon.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> #rdma and core
Tested-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Tested-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Leon Romanovsky and committed by
Jason Gunthorpe
514aee66 44da3730

+405 -550
+21 -7
drivers/infiniband/core/core_priv.h
··· 322 322 struct ib_uqp_object *uobj, const char *caller) 323 323 { 324 324 struct ib_qp *qp; 325 + int ret; 325 326 326 327 if (!dev->ops.create_qp) 327 328 return ERR_PTR(-EOPNOTSUPP); 328 329 329 - qp = dev->ops.create_qp(pd, attr, udata); 330 - if (IS_ERR(qp)) 331 - return qp; 330 + qp = rdma_zalloc_drv_obj_numa(dev, ib_qp); 331 + if (!qp) 332 + return ERR_PTR(-ENOMEM); 332 333 333 334 qp->device = dev; 334 335 qp->pd = pd; ··· 338 337 339 338 qp->qp_type = attr->qp_type; 340 339 qp->rwq_ind_tbl = attr->rwq_ind_tbl; 341 - qp->send_cq = attr->send_cq; 342 - qp->recv_cq = attr->recv_cq; 343 340 qp->srq = attr->srq; 344 - qp->rwq_ind_tbl = attr->rwq_ind_tbl; 345 341 qp->event_handler = attr->event_handler; 346 342 qp->port = attr->port_num; 347 343 348 - atomic_set(&qp->usecnt, 0); 349 344 spin_lock_init(&qp->mr_lock); 350 345 INIT_LIST_HEAD(&qp->rdma_mrs); 351 346 INIT_LIST_HEAD(&qp->sig_mrs); ··· 349 352 rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP); 350 353 WARN_ONCE(!udata && !caller, "Missing kernel QP owner"); 351 354 rdma_restrack_set_name(&qp->res, udata ? NULL : caller); 355 + ret = dev->ops.create_qp(qp, attr, udata); 356 + if (ret) 357 + goto err_create; 358 + 359 + /* 360 + * TODO: The mlx4 internally overwrites send_cq and recv_cq. 361 + * Unfortunately, it is not an easy task to fix that driver. 362 + */ 363 + qp->send_cq = attr->send_cq; 364 + qp->recv_cq = attr->recv_cq; 365 + 352 366 rdma_restrack_add(&qp->res); 353 367 return qp; 368 + 369 + err_create: 370 + rdma_restrack_put(&qp->res); 371 + kfree(qp); 372 + return ERR_PTR(ret); 373 + 354 374 } 355 375 356 376 struct rdma_dev_addr;
+2
drivers/infiniband/core/device.c
··· 2654 2654 SET_DEVICE_OP(dev_ops, get_hw_stats); 2655 2655 SET_DEVICE_OP(dev_ops, get_link_layer); 2656 2656 SET_DEVICE_OP(dev_ops, get_netdev); 2657 + SET_DEVICE_OP(dev_ops, get_numa_node); 2657 2658 SET_DEVICE_OP(dev_ops, get_port_immutable); 2658 2659 SET_DEVICE_OP(dev_ops, get_vector_affinity); 2659 2660 SET_DEVICE_OP(dev_ops, get_vf_config); ··· 2711 2710 SET_OBJ_SIZE(dev_ops, ib_cq); 2712 2711 SET_OBJ_SIZE(dev_ops, ib_mw); 2713 2712 SET_OBJ_SIZE(dev_ops, ib_pd); 2713 + SET_OBJ_SIZE(dev_ops, ib_qp); 2714 2714 SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table); 2715 2715 SET_OBJ_SIZE(dev_ops, ib_srq); 2716 2716 SET_OBJ_SIZE(dev_ops, ib_ucontext);
+1 -1
drivers/infiniband/core/restrack.c
··· 343 343 rt = &dev->res[res->type]; 344 344 345 345 old = xa_erase(&rt->xa, res->id); 346 - if (res->type == RDMA_RESTRACK_MR || res->type == RDMA_RESTRACK_QP) 346 + if (res->type == RDMA_RESTRACK_MR) 347 347 return; 348 348 WARN_ON(old != res); 349 349
+21 -19
drivers/infiniband/core/verbs.c
··· 1963 1963 rdma_rw_cleanup_mrs(qp); 1964 1964 1965 1965 rdma_counter_unbind_qp(qp, true); 1966 - rdma_restrack_del(&qp->res); 1967 1966 ret = qp->device->ops.destroy_qp(qp, udata); 1968 - if (!ret) { 1969 - if (alt_path_sgid_attr) 1970 - rdma_put_gid_attr(alt_path_sgid_attr); 1971 - if (av_sgid_attr) 1972 - rdma_put_gid_attr(av_sgid_attr); 1973 - if (pd) 1974 - atomic_dec(&pd->usecnt); 1975 - if (scq) 1976 - atomic_dec(&scq->usecnt); 1977 - if (rcq) 1978 - atomic_dec(&rcq->usecnt); 1979 - if (srq) 1980 - atomic_dec(&srq->usecnt); 1981 - if (ind_tbl) 1982 - atomic_dec(&ind_tbl->usecnt); 1983 - if (sec) 1984 - ib_destroy_qp_security_end(sec); 1985 - } else { 1967 + if (ret) { 1986 1968 if (sec) 1987 1969 ib_destroy_qp_security_abort(sec); 1970 + return ret; 1988 1971 } 1989 1972 1973 + if (alt_path_sgid_attr) 1974 + rdma_put_gid_attr(alt_path_sgid_attr); 1975 + if (av_sgid_attr) 1976 + rdma_put_gid_attr(av_sgid_attr); 1977 + if (pd) 1978 + atomic_dec(&pd->usecnt); 1979 + if (scq) 1980 + atomic_dec(&scq->usecnt); 1981 + if (rcq) 1982 + atomic_dec(&rcq->usecnt); 1983 + if (srq) 1984 + atomic_dec(&srq->usecnt); 1985 + if (ind_tbl) 1986 + atomic_dec(&ind_tbl->usecnt); 1987 + if (sec) 1988 + ib_destroy_qp_security_end(sec); 1989 + 1990 + rdma_restrack_del(&qp->res); 1991 + kfree(qp); 1990 1992 return ret; 1991 1993 } 1992 1994 EXPORT_SYMBOL(ib_destroy_qp_user);
+8 -18
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 815 815 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) { 816 816 rc = bnxt_re_destroy_gsi_sqp(qp); 817 817 if (rc) 818 - goto sh_fail; 818 + return rc; 819 819 } 820 820 821 821 mutex_lock(&rdev->qp_lock); ··· 826 826 ib_umem_release(qp->rumem); 827 827 ib_umem_release(qp->sumem); 828 828 829 - kfree(qp); 830 829 return 0; 831 - sh_fail: 832 - return rc; 833 830 } 834 831 835 832 static u8 __from_ib_qp_type(enum ib_qp_type type) ··· 1399 1402 return rc; 1400 1403 } 1401 1404 1402 - struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, 1403 - struct ib_qp_init_attr *qp_init_attr, 1404 - struct ib_udata *udata) 1405 + int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, 1406 + struct ib_udata *udata) 1405 1407 { 1408 + struct ib_pd *ib_pd = ib_qp->pd; 1406 1409 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 1407 1410 struct bnxt_re_dev *rdev = pd->rdev; 1408 1411 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; 1409 - struct bnxt_re_qp *qp; 1412 + struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 1410 1413 int rc; 1411 1414 1412 1415 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); 1413 1416 if (!rc) { 1414 1417 rc = -EINVAL; 1415 - goto exit; 1418 + goto fail; 1416 1419 } 1417 1420 1418 - qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1419 - if (!qp) { 1420 - rc = -ENOMEM; 1421 - goto exit; 1422 - } 1423 1421 qp->rdev = rdev; 1424 1422 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata); 1425 1423 if (rc) ··· 1457 1465 mutex_unlock(&rdev->qp_lock); 1458 1466 atomic_inc(&rdev->qp_count); 1459 1467 1460 - return &qp->ib_qp; 1468 + return 0; 1461 1469 qp_destroy: 1462 1470 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); 1463 1471 free_umem: 1464 1472 ib_umem_release(qp->rumem); 1465 1473 ib_umem_release(qp->sumem); 1466 1474 fail: 1467 - kfree(qp); 1468 - exit: 1469 - return ERR_PTR(rc); 1475 + return rc; 1470 1476 } 1471 1477 1472 1478 static u8 __from_ib_qp_state(enum ib_qp_state state)
+3 -4
drivers/infiniband/hw/bnxt_re/ib_verbs.h
··· 78 78 }; 79 79 80 80 struct bnxt_re_qp { 81 + struct ib_qp ib_qp; 81 82 struct list_head list; 82 83 struct bnxt_re_dev *rdev; 83 - struct ib_qp ib_qp; 84 84 spinlock_t sq_lock; /* protect sq */ 85 85 spinlock_t rq_lock; /* protect rq */ 86 86 struct bnxt_qplib_qp qplib_qp; ··· 179 179 int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); 180 180 int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr, 181 181 const struct ib_recv_wr **bad_recv_wr); 182 - struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd, 183 - struct ib_qp_init_attr *qp_init_attr, 184 - struct ib_udata *udata); 182 + int bnxt_re_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr, 183 + struct ib_udata *udata); 185 184 int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 186 185 int qp_attr_mask, struct ib_udata *udata); 187 186 int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+1
drivers/infiniband/hw/bnxt_re/main.c
··· 709 709 INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah), 710 710 INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq), 711 711 INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd), 712 + INIT_RDMA_OBJ_SIZE(ib_qp, bnxt_re_qp, ib_qp), 712 713 INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq), 713 714 INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx), 714 715 };
+2 -3
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 989 989 int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs, 990 990 struct ib_udata *udata); 991 991 int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata); 992 - struct ib_qp *c4iw_create_qp(struct ib_pd *pd, 993 - struct ib_qp_init_attr *attrs, 994 - struct ib_udata *udata); 992 + int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs, 993 + struct ib_udata *udata); 995 994 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 996 995 int attr_mask, struct ib_udata *udata); 997 996 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+1
drivers/infiniband/hw/cxgb4/provider.c
··· 499 499 INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq), 500 500 INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw), 501 501 INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd), 502 + INIT_RDMA_OBJ_SIZE(ib_qp, c4iw_qp, ibqp), 502 503 INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq), 503 504 INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext), 504 505 };
+13 -24
drivers/infiniband/hw/cxgb4/qp.c
··· 2103 2103 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); 2104 2104 2105 2105 c4iw_put_wr_wait(qhp->wr_waitp); 2106 - 2107 - kfree(qhp); 2108 2106 return 0; 2109 2107 } 2110 2108 2111 - struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, 2112 - struct ib_udata *udata) 2109 + int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs, 2110 + struct ib_udata *udata) 2113 2111 { 2112 + struct ib_pd *pd = qp->pd; 2114 2113 struct c4iw_dev *rhp; 2115 - struct c4iw_qp *qhp; 2114 + struct c4iw_qp *qhp = to_c4iw_qp(qp); 2116 2115 struct c4iw_pd *php; 2117 2116 struct c4iw_cq *schp; 2118 2117 struct c4iw_cq *rchp; ··· 2123 2124 struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm; 2124 2125 struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL; 2125 2126 2126 - pr_debug("ib_pd %p\n", pd); 2127 - 2128 2127 if (attrs->qp_type != IB_QPT_RC || attrs->create_flags) 2129 - return ERR_PTR(-EOPNOTSUPP); 2128 + return -EOPNOTSUPP; 2130 2129 2131 2130 php = to_c4iw_pd(pd); 2132 2131 rhp = php->rhp; 2133 2132 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); 2134 2133 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); 2135 2134 if (!schp || !rchp) 2136 - return ERR_PTR(-EINVAL); 2135 + return -EINVAL; 2137 2136 2138 2137 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) 2139 - return ERR_PTR(-EINVAL); 2138 + return -EINVAL; 2140 2139 2141 2140 if (!attrs->srq) { 2142 2141 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) 2143 - return ERR_PTR(-E2BIG); 2142 + return -E2BIG; 2144 2143 rqsize = attrs->cap.max_recv_wr + 1; 2145 2144 if (rqsize < 8) 2146 2145 rqsize = 8; 2147 2146 } 2148 2147 2149 2148 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) 2150 - return ERR_PTR(-E2BIG); 2149 + return -E2BIG; 2151 2150 sqsize = attrs->cap.max_send_wr + 1; 2152 2151 if (sqsize < 8) 2153 2152 sqsize = 8; 2154 2153 2155 - qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); 2156 - if (!qhp) 2157 - return ERR_PTR(-ENOMEM); 2158 - 2159 2154 qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); 2160 - if (!qhp->wr_waitp) { 2161 - ret = -ENOMEM; 2162 - goto err_free_qhp; 2163 - } 2155 + if (!qhp->wr_waitp) 2156 + return -ENOMEM; 2164 2157 2165 2158 qhp->wq.sq.size = sqsize; 2166 2159 qhp->wq.sq.memsize = ··· 2330 2339 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, 2331 2340 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, 2332 2341 qhp->wq.rq.memsize, attrs->cap.max_recv_wr); 2333 - return &qhp->ibqp; 2342 + return 0; 2334 2343 err_free_ma_sync_key: 2335 2344 kfree(ma_sync_key_mm); 2336 2345 err_free_rq_db_key: ··· 2350 2359 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); 2351 2360 err_free_wr_wait: 2352 2361 c4iw_put_wr_wait(qhp->wr_waitp); 2353 - err_free_qhp: 2354 - kfree(qhp); 2355 - return ERR_PTR(ret); 2362 + return ret; 2356 2363 } 2357 2364 2358 2365 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+2 -3
drivers/infiniband/hw/efa/efa.h
··· 132 132 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 133 133 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 134 134 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); 135 - struct ib_qp *efa_create_qp(struct ib_pd *ibpd, 136 - struct ib_qp_init_attr *init_attr, 137 - struct ib_udata *udata); 135 + int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, 136 + struct ib_udata *udata); 138 137 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 139 138 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 140 139 struct ib_udata *udata);
+1
drivers/infiniband/hw/efa/efa_main.c
··· 271 271 INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah), 272 272 INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq), 273 273 INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd), 274 + INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp), 274 275 INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext), 275 276 }; 276 277
+9 -19
drivers/infiniband/hw/efa/efa_verbs.c
··· 450 450 qp->rq_size, DMA_TO_DEVICE); 451 451 } 452 452 453 - kfree(qp); 454 453 return 0; 455 454 } 456 455 ··· 608 609 return 0; 609 610 } 610 611 611 - struct ib_qp *efa_create_qp(struct ib_pd *ibpd, 612 - struct ib_qp_init_attr *init_attr, 613 - struct ib_udata *udata) 612 + int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, 613 + struct ib_udata *udata) 614 614 { 615 615 struct efa_com_create_qp_params create_qp_params = {}; 616 616 struct efa_com_create_qp_result create_qp_resp; 617 - struct efa_dev *dev = to_edev(ibpd->device); 617 + struct efa_dev *dev = to_edev(ibqp->device); 618 618 struct efa_ibv_create_qp_resp resp = {}; 619 619 struct efa_ibv_create_qp cmd = {}; 620 + struct efa_qp *qp = to_eqp(ibqp); 620 621 struct efa_ucontext *ucontext; 621 - struct efa_qp *qp; 622 622 int err; 623 623 624 624 ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext, ··· 662 664 goto err_out; 663 665 } 664 666 665 - qp = kzalloc(sizeof(*qp), GFP_KERNEL); 666 - if (!qp) { 667 - err = -ENOMEM; 668 - goto err_out; 669 - } 670 - 671 667 create_qp_params.uarn = ucontext->uarn; 672 - create_qp_params.pd = to_epd(ibpd)->pdn; 668 + create_qp_params.pd = to_epd(ibqp->pd)->pdn; 673 669 674 670 if (init_attr->qp_type == IB_QPT_UD) { 675 671 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD; ··· 674 682 "Unsupported qp type %d driver qp type %d\n", 675 683 init_attr->qp_type, cmd.driver_qp_type); 676 684 err = -EOPNOTSUPP; 677 - goto err_free_qp; 685 + goto err_out; 678 686 } 679 687 680 688 ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n", ··· 692 700 qp->rq_size, DMA_TO_DEVICE); 693 701 if (!qp->rq_cpu_addr) { 694 702 err = -ENOMEM; 695 - goto err_free_qp; 703 + goto err_out; 696 704 } 697 705 698 706 ibdev_dbg(&dev->ibdev, ··· 738 746 739 747 ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num); 740 748 741 - return &qp->ibqp; 749 + return 0; 742 750 743 751 err_remove_mmap_entries: 744 752 efa_qp_user_mmap_entries_remove(qp); ··· 748 756 if (qp->rq_size) 749 757 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr, 750 758 qp->rq_size, DMA_TO_DEVICE); 751 - err_free_qp: 752 - kfree(qp); 753 759 err_out: 754 760 atomic64_inc(&dev->stats.create_qp_err); 755 - return ERR_PTR(err); 761 + return err; 756 762 } 757 763 758 764 static const struct {
+2 -3
drivers/infiniband/hw/hns/hns_roce_device.h
··· 1216 1216 int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata); 1217 1217 int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata); 1218 1218 1219 - struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd, 1220 - struct ib_qp_init_attr *init_attr, 1221 - struct ib_udata *udata); 1219 + int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr, 1220 + struct ib_udata *udata); 1222 1221 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1223 1222 int attr_mask, struct ib_udata *udata); 1224 1223 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
+1
drivers/infiniband/hw/hns/hns_roce_main.c
··· 454 454 INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah), 455 455 INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq), 456 456 INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd), 457 + INIT_RDMA_OBJ_SIZE(ib_qp, hns_roce_qp, ibqp), 457 458 INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext), 458 459 }; 459 460
+8 -20
drivers/infiniband/hw/hns/hns_roce_qp.c
··· 959 959 struct ib_device *ibdev = &hr_dev->ib_dev; 960 960 int ret; 961 961 962 - hr_qp->ibqp.qp_type = init_attr->qp_type; 963 - 964 962 if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline) 965 963 init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline; 966 964 ··· 1119 1121 free_qp_buf(hr_dev, hr_qp); 1120 1122 free_kernel_wrid(hr_qp); 1121 1123 free_qp_db(hr_dev, hr_qp, udata); 1122 - 1123 - kfree(hr_qp); 1124 1124 } 1125 1125 1126 1126 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, ··· 1150 1154 return -EOPNOTSUPP; 1151 1155 } 1152 1156 1153 - struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, 1154 - struct ib_qp_init_attr *init_attr, 1155 - struct ib_udata *udata) 1157 + int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, 1158 + struct ib_udata *udata) 1156 1159 { 1157 - struct ib_device *ibdev = pd ? pd->device : init_attr->xrcd->device; 1160 + struct ib_device *ibdev = qp->device; 1158 1161 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); 1159 - struct hns_roce_qp *hr_qp; 1162 + struct hns_roce_qp *hr_qp = to_hr_qp(qp); 1163 + struct ib_pd *pd = qp->pd; 1160 1164 int ret; 1161 1165 1162 1166 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); 1163 1167 if (ret) 1164 - return ERR_PTR(ret); 1165 - 1166 - hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); 1167 - if (!hr_qp) 1168 - return ERR_PTR(-ENOMEM); 1168 + return ret; 1169 1169 1170 1170 if (init_attr->qp_type == IB_QPT_XRC_TGT) 1171 1171 hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn; ··· 1172 1180 } 1173 1181 1174 1182 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); 1175 - if (ret) { 1183 + if (ret) 1176 1184 ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n", 1177 1185 init_attr->qp_type, ret); 1178 1186 1179 - kfree(hr_qp); 1180 - return ERR_PTR(ret); 1181 - } 1182 - 1183 - return &hr_qp->ibqp; 1187 + return ret; 1184 1188 } 1185 1189 1186 1190 int to_hr_qp_type(int qp_type)
-3
drivers/infiniband/hw/irdma/utils.c
··· 1141 1141 iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa); 1142 1142 iwqp->kqp.dma_mem.va = NULL; 1143 1143 kfree(iwqp->kqp.sq_wrid_mem); 1144 - iwqp->kqp.sq_wrid_mem = NULL; 1145 1144 kfree(iwqp->kqp.rq_wrid_mem); 1146 - iwqp->kqp.rq_wrid_mem = NULL; 1147 - kfree(iwqp); 1148 1145 } 1149 1146 1150 1147 /**
+13 -18
drivers/infiniband/hw/irdma/verbs.c
··· 792 792 793 793 /** 794 794 * irdma_create_qp - create qp 795 - * @ibpd: ptr of pd 795 + * @ibqp: ptr of qp 796 796 * @init_attr: attributes for qp 797 797 * @udata: user data for create qp 798 798 */ 799 - static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd, 800 - struct ib_qp_init_attr *init_attr, 801 - struct ib_udata *udata) 799 + static int irdma_create_qp(struct ib_qp *ibqp, 800 + struct ib_qp_init_attr *init_attr, 801 + struct ib_udata *udata) 802 802 { 803 + struct ib_pd *ibpd = ibqp->pd; 803 804 struct irdma_pd *iwpd = to_iwpd(ibpd); 804 805 struct irdma_device *iwdev = to_iwdev(ibpd->device); 805 806 struct irdma_pci_f *rf = iwdev->rf; 806 - struct irdma_qp *iwqp; 807 + struct irdma_qp *iwqp = to_iwqp(ibqp); 807 808 struct irdma_create_qp_req req; 808 809 struct irdma_create_qp_resp uresp = {}; 809 810 u32 qp_num = 0; ··· 821 820 822 821 err_code = irdma_validate_qp_attrs(init_attr, iwdev); 823 822 if (err_code) 824 - return ERR_PTR(err_code); 823 + return err_code; 825 824 826 825 sq_size = init_attr->cap.max_send_wr; 827 826 rq_size = init_attr->cap.max_recv_wr; ··· 833 832 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; 834 833 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; 835 834 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data; 836 - 837 - iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL); 838 - if (!iwqp) 839 - return ERR_PTR(-ENOMEM); 840 835 841 836 qp = &iwqp->sc_qp; 842 837 qp->qp_uk.back_qp = iwqp; ··· 846 849 iwqp->q2_ctx_mem.size, 847 850 &iwqp->q2_ctx_mem.pa, 848 851 GFP_KERNEL); 849 - if (!iwqp->q2_ctx_mem.va) { 850 - err_code = -ENOMEM; 851 - goto error; 852 - } 852 + if (!iwqp->q2_ctx_mem.va) 853 + return -ENOMEM; 853 854 854 855 init_info.q2 = iwqp->q2_ctx_mem.va; 855 856 init_info.q2_pa = iwqp->q2_ctx_mem.pa; ··· 996 1001 if (err_code) { 997 1002 ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n"); 998 1003 irdma_destroy_qp(&iwqp->ibqp, udata); 999 - return ERR_PTR(err_code); 1004 + return err_code; 1000 1005 } 1001 1006 } 1002 1007 1003 1008 init_completion(&iwqp->free_qp); 1004 - return &iwqp->ibqp; 1009 + return 0; 1005 1010 1006 1011 error: 1007 1012 irdma_free_qp_rsrc(iwqp); 1008 - 1009 - return ERR_PTR(err_code); 1013 + return err_code; 1010 1014 } 1011 1015 1012 1016 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp) ··· 4400 4406 INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah), 4401 4407 INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq), 4402 4408 INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw), 4409 + INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp), 4403 4410 }; 4404 4411 4405 4412 /**
+1
drivers/infiniband/hw/mlx4/main.c
··· 2577 2577 INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah), 2578 2578 INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq), 2579 2579 INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd), 2580 + INIT_RDMA_OBJ_SIZE(ib_qp, mlx4_ib_qp, ibqp), 2580 2581 INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq), 2581 2582 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext), 2582 2583 };
+2 -3
drivers/infiniband/hw/mlx4/mlx4_ib.h
··· 792 792 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, 793 793 const struct ib_recv_wr **bad_wr); 794 794 795 - struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, 796 - struct ib_qp_init_attr *init_attr, 797 - struct ib_udata *udata); 795 + int mlx4_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, 796 + struct ib_udata *udata); 798 797 int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); 799 798 void mlx4_ib_drain_sq(struct ib_qp *qp); 800 799 void mlx4_ib_drain_rq(struct ib_qp *qp);
+9 -16
drivers/infiniband/hw/mlx4/qp.c
··· 1578 1578 return 0; 1579 1579 } 1580 1580 1581 - struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, 1582 - struct ib_qp_init_attr *init_attr, 1583 - struct ib_udata *udata) { 1584 - struct ib_device *device = pd ? pd->device : init_attr->xrcd->device; 1581 + int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, 1582 + struct ib_udata *udata) 1583 + { 1584 + struct ib_device *device = ibqp->device; 1585 1585 struct mlx4_ib_dev *dev = to_mdev(device); 1586 - struct mlx4_ib_qp *qp; 1586 + struct mlx4_ib_qp *qp = to_mqp(ibqp); 1587 + struct ib_pd *pd = ibqp->pd; 1587 1588 int ret; 1588 - 1589 - qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1590 - if (!qp) 1591 - return ERR_PTR(-ENOMEM); 1592 1589 1593 1590 mutex_init(&qp->mutex); 1594 1591 ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata); 1595 - if (ret) { 1596 - kfree(qp); 1597 - return ERR_PTR(ret); 1598 - } 1592 + if (ret) 1593 + return ret; 1599 1594 1600 1595 if (init_attr->qp_type == IB_QPT_GSI && 1601 1596 !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) { ··· 1613 1618 init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI; 1614 1619 } 1615 1620 } 1616 - return &qp->ibqp; 1621 + return 0; 1617 1622 } 1618 1623 1619 1624 static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) ··· 1641 1646 } 1642 1647 1643 1648 kfree(mqp->sqp); 1644 - kfree(mqp); 1645 - 1646 1649 return 0; 1647 1650 } 1648 1651
-2
drivers/infiniband/hw/mlx5/gsi.c
··· 193 193 194 194 kfree(gsi->outstanding_wrs); 195 195 kfree(gsi->tx_qps); 196 - kfree(mqp); 197 - 198 196 return 0; 199 197 } 200 198
+1
drivers/infiniband/hw/mlx5/main.c
··· 3805 3805 INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs), 3806 3806 INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), 3807 3807 INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), 3808 + INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp), 3808 3809 INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), 3809 3810 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), 3810 3811 };
+2 -3
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 1219 1219 const struct ib_recv_wr **bad_wr); 1220 1220 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); 1221 1221 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); 1222 - struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, 1223 - struct ib_qp_init_attr *init_attr, 1224 - struct ib_udata *udata); 1222 + int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, 1223 + struct ib_udata *udata); 1225 1224 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1226 1225 int attr_mask, struct ib_udata *udata); 1227 1226 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+17 -39
drivers/infiniband/hw/mlx5/qp.c
··· 3114 3114 } 3115 3115 3116 3116 kfree(mqp->dct.in); 3117 - kfree(mqp); 3118 3117 return 0; 3119 3118 } 3120 3119 ··· 3151 3152 return ret ? 0 : -EINVAL; 3152 3153 } 3153 3154 3154 - struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, 3155 - struct ib_udata *udata) 3155 + int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, 3156 + struct ib_udata *udata) 3156 3157 { 3157 3158 struct mlx5_create_qp_params params = {}; 3158 - struct mlx5_ib_dev *dev; 3159 - struct mlx5_ib_qp *qp; 3159 + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3160 + struct mlx5_ib_qp *qp = to_mqp(ibqp); 3161 + struct ib_pd *pd = ibqp->pd; 3160 3162 enum ib_qp_type type; 3161 3163 int err; 3162 3164 3163 - dev = pd ? to_mdev(pd->device) : 3164 - to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device); 3165 - 3166 3165 err = check_qp_type(dev, attr, &type); 3167 3166 if (err) 3168 - return ERR_PTR(err); 3167 + return err; 3169 3168 3170 3169 err = check_valid_flow(dev, pd, attr, udata); 3171 3170 if (err) 3172 - return ERR_PTR(err); 3171 + return err; 3173 3172 3174 3173 params.udata = udata; 3175 3174 params.uidx = MLX5_IB_DEFAULT_UIDX; ··· 3177 3180 if (udata) { 3178 3181 err = process_udata_size(dev, &params); 3179 3182 if (err) 3180 - return ERR_PTR(err); 3183 + return err; 3181 3184 3182 3185 err = check_ucmd_data(dev, &params); 3183 3186 if (err) 3184 - return ERR_PTR(err); 3187 + return err; 3185 3188 3186 3189 params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL); 3187 3190 if (!params.ucmd) 3188 - return ERR_PTR(-ENOMEM); 3191 + return -ENOMEM; 3189 3192 3190 3193 err = ib_copy_from_udata(params.ucmd, udata, params.inlen); 3191 3194 if (err) 3192 3195 goto free_ucmd; 3193 - } 3194 - 3195 - qp = kzalloc(sizeof(*qp), GFP_KERNEL); 3196 - if (!qp) { 3197 - err = -ENOMEM; 3198 - goto free_ucmd; 3199 3196 } 3200 3197 3201 3198 mutex_init(&qp->mutex); ··· 3197 3206 if (udata) { 3198 3207 err = process_vendor_flags(dev, qp, params.ucmd, attr); 3199 3208 if (err) 3200 - goto free_qp; 3209 + goto free_ucmd; 3201 3210 3202 3211 err = get_qp_uidx(qp, &params); 3203 3212 if (err) 3204 - goto free_qp; 3213 + goto free_ucmd; 3205 3214 } 3206 3215 err = process_create_flags(dev, qp, attr); 3207 3216 if (err) 3208 - goto free_qp; 3217 + goto free_ucmd; 3209 3218 3210 3219 err = check_qp_attr(dev, qp, attr); 3211 3220 if (err) 3212 - goto free_qp; 3221 + goto free_ucmd; 3213 3222 3214 3223 err = create_qp(dev, pd, qp, &params); 3215 3224 if (err) 3216 - goto free_qp; 3225 + goto free_ucmd; 3217 3226 3218 3227 kfree(params.ucmd); 3219 3228 params.ucmd = NULL; ··· 3228 3237 if (err) 3229 3238 goto destroy_qp; 3230 3239 3231 - return &qp->ibqp; 3240 + return 0; 3232 3241 3233 3242 destroy_qp: 3234 3243 switch (qp->type) { ··· 3239 3248 mlx5_ib_destroy_gsi(qp); 3240 3249 break; 3241 3250 default: 3242 - /* 3243 - * These lines below are temp solution till QP allocation 3244 - * will be moved to be under IB/core responsiblity. 3245 - */ 3246 - qp->ibqp.send_cq = attr->send_cq; 3247 - qp->ibqp.recv_cq = attr->recv_cq; 3248 - qp->ibqp.pd = pd; 3249 3251 destroy_qp_common(dev, qp, udata); 3250 3252 } 3251 3253 3252 - qp = NULL; 3253 - free_qp: 3254 - kfree(qp); 3255 3254 free_ucmd: 3256 3255 kfree(params.ucmd); 3257 - return ERR_PTR(err); 3256 + return err; 3258 3257 } 3259 3258 3260 3259 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) ··· 3259 3278 return mlx5_ib_destroy_dct(mqp); 3260 3279 3261 3280 destroy_qp_common(dev, mqp, udata); 3262 - 3263 - kfree(mqp); 3264 - 3265 3281 return 0; 3266 3282 } 3267 3283
+30 -47
drivers/infiniband/hw/mthca/mthca_provider.c
··· 459 459 return 0; 460 460 } 461 461 462 - static struct ib_qp *mthca_create_qp(struct ib_pd *pd, 463 - struct ib_qp_init_attr *init_attr, 464 - struct ib_udata *udata) 462 + static int mthca_create_qp(struct ib_qp *ibqp, 463 + struct ib_qp_init_attr *init_attr, 464 + struct ib_udata *udata) 465 465 { 466 466 struct mthca_ucontext *context = rdma_udata_to_drv_context( 467 467 udata, struct mthca_ucontext, ibucontext); 468 468 struct mthca_create_qp ucmd; 469 - struct mthca_qp *qp; 469 + struct mthca_qp *qp = to_mqp(ibqp); 470 + struct mthca_dev *dev = to_mdev(ibqp->device); 470 471 int err; 471 472 472 473 if (init_attr->create_flags) 473 - return ERR_PTR(-EOPNOTSUPP); 474 + return -EOPNOTSUPP; 474 475 475 476 switch (init_attr->qp_type) { 476 477 case IB_QPT_RC: 477 478 case IB_QPT_UC: 478 479 case IB_QPT_UD: 479 480 { 480 - qp = kzalloc(sizeof(*qp), GFP_KERNEL); 481 - if (!qp) 482 - return ERR_PTR(-ENOMEM); 483 - 484 481 if (udata) { 485 - if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 486 - kfree(qp); 487 - return ERR_PTR(-EFAULT); 488 - } 482 + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) 483 + return -EFAULT; 489 484 490 - err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 485 + err = mthca_map_user_db(dev, &context->uar, 491 486 context->db_tab, 492 - ucmd.sq_db_index, ucmd.sq_db_page); 493 - if (err) { 494 - kfree(qp); 495 - return ERR_PTR(err); 496 - } 487 + ucmd.sq_db_index, 488 + ucmd.sq_db_page); 489 + if (err) 490 + return err; 497 491 498 - err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 492 + err = mthca_map_user_db(dev, &context->uar, 499 493 context->db_tab, 500 - ucmd.rq_db_index, ucmd.rq_db_page); 494 + ucmd.rq_db_index, 495 + ucmd.rq_db_page); 501 496 if (err) { 502 - mthca_unmap_user_db(to_mdev(pd->device), 503 - &context->uar, 497 + mthca_unmap_user_db(dev, &context->uar, 504 498 context->db_tab, 505 499 ucmd.sq_db_index); 506 - kfree(qp); 507 - return ERR_PTR(err); 500 + return err; 508 501 } 509 502 510 503 qp->mr.ibmr.lkey = ucmd.lkey; ··· 505 512 qp->rq.db_index = ucmd.rq_db_index; 506 513 } 507 514 508 - err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), 515 + err = mthca_alloc_qp(dev, to_mpd(ibqp->pd), 509 516 to_mcq(init_attr->send_cq), 510 517 to_mcq(init_attr->recv_cq), 511 518 init_attr->qp_type, init_attr->sq_sig_type, 512 519 &init_attr->cap, qp, udata); 513 520 514 521 if (err && udata) { 515 - mthca_unmap_user_db(to_mdev(pd->device), 516 - &context->uar, 517 - context->db_tab, 522 + mthca_unmap_user_db(dev, &context->uar, context->db_tab, 518 523 ucmd.sq_db_index); 519 - mthca_unmap_user_db(to_mdev(pd->device), 520 - &context->uar, 521 - context->db_tab, 524 + mthca_unmap_user_db(dev, &context->uar, context->db_tab, 522 525 ucmd.rq_db_index); 523 526 } 524 527 ··· 524 535 case IB_QPT_SMI: 525 536 case IB_QPT_GSI: 526 537 { 527 - qp = kzalloc(sizeof(*qp), GFP_KERNEL); 528 - if (!qp) 529 - return ERR_PTR(-ENOMEM); 530 538 qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); 531 - if (!qp->sqp) { 532 - kfree(qp); 533 - return ERR_PTR(-ENOMEM); 534 - } 539 + if (!qp->sqp) 540 + return -ENOMEM; 535 541 536 542 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; 537 543 538 - err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), 544 + err = mthca_alloc_sqp(dev, to_mpd(ibqp->pd), 539 545 to_mcq(init_attr->send_cq), 540 546 to_mcq(init_attr->recv_cq), 541 547 init_attr->sq_sig_type, &init_attr->cap, 542 - qp->ibqp.qp_num, init_attr->port_num, 543 - qp, udata); 548 + qp->ibqp.qp_num, init_attr->port_num, qp, 549 + udata); 544 550 break; 545 551 } 546 552 default: 547 553 /* Don't support raw QPs */ 548 - return ERR_PTR(-EOPNOTSUPP); 554 + return -EOPNOTSUPP; 549 555 } 550 556 551 557 if (err) { 552 558 kfree(qp->sqp); 553 - kfree(qp); 554 - return ERR_PTR(err); 559 + return err; 555 560 } 556 561 557 562 init_attr->cap.max_send_wr = qp->sq.max; ··· 554 571 init_attr->cap.max_recv_sge = qp->rq.max_gs; 555 572 init_attr->cap.max_inline_data = qp->max_inline_data; 556 573 557 - return &qp->ibqp; 574 + return 0; 558 575 } 559 576 560 577 static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) ··· 577 594 } 578 595 mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); 579 596 kfree(to_mqp(qp)->sqp); 580 - kfree(to_mqp(qp)); 581 597 return 0; 582 598 } 583 599 ··· 1103 1121 INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah), 1104 1122 INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq), 1105 1123 INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd), 1124 + INIT_RDMA_OBJ_SIZE(ib_qp, mthca_qp, ibqp), 1106 1125 INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext), 1107 1126 }; 1108 1127
+1
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 185 185 INIT_RDMA_OBJ_SIZE(ib_ah, ocrdma_ah, ibah), 186 186 INIT_RDMA_OBJ_SIZE(ib_cq, ocrdma_cq, ibcq), 187 187 INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd), 188 + INIT_RDMA_OBJ_SIZE(ib_qp, ocrdma_qp, ibqp), 188 189 INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext), 189 190 }; 190 191
+9 -16
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 1288 1288 } 1289 1289 } 1290 1290 1291 - struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, 1292 - struct ib_qp_init_attr *attrs, 1293 - struct ib_udata *udata) 1291 + int ocrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, 1292 + struct ib_udata *udata) 1294 1293 { 1295 1294 int status; 1295 + struct ib_pd *ibpd = ibqp->pd; 1296 1296 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 1297 - struct ocrdma_qp *qp; 1298 - struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 1297 + struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1298 + struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device); 1299 1299 struct ocrdma_create_qp_ureq ureq; 1300 1300 u16 dpp_credit_lmt, dpp_offset; 1301 1301 1302 1302 if (attrs->create_flags) 1303 - return ERR_PTR(-EOPNOTSUPP); 1303 + return -EOPNOTSUPP; 1304 1304 1305 1305 status = ocrdma_check_qp_params(ibpd, dev, attrs, udata); 1306 1306 if (status) ··· 1309 1309 memset(&ureq, 0, sizeof(ureq)); 1310 1310 if (udata) { 1311 1311 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) 1312 - return ERR_PTR(-EFAULT); 1313 - } 1314 - qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1315 - if (!qp) { 1316 - status = -ENOMEM; 1317 - goto gen_err; 1312 + return -EFAULT; 1318 1313 } 1319 1314 ocrdma_set_qp_init_params(qp, pd, attrs); 1320 1315 if (udata == NULL) ··· 1344 1349 ocrdma_store_gsi_qp_cq(dev, attrs); 1345 1350 qp->ibqp.qp_num = qp->id; 1346 1351 mutex_unlock(&dev->dev_lock); 1347 - return &qp->ibqp; 1352 + return 0; 1348 1353 1349 1354 cpy_err: 1350 1355 ocrdma_del_qpn_map(dev, qp); ··· 1354 1359 mutex_unlock(&dev->dev_lock); 1355 1360 kfree(qp->wqe_wr_id_tbl); 1356 1361 kfree(qp->rqe_wr_id_tbl); 1357 - kfree(qp); 1358 1362 pr_err("%s(%d) error=%d\n", __func__, dev->id, status); 1359 1363 gen_err: 1360 - return ERR_PTR(status); 1364 + return status; 1361 1365 } 1362 1366 1363 1367 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ··· 1725 1731 1726 1732 kfree(qp->wqe_wr_id_tbl); 1727 1733 kfree(qp->rqe_wr_id_tbl); 1728 - kfree(qp); 1729 1734 return 0; 1730 1735 } 1731 1736
+2 -3
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
··· 75 75 int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); 76 76 int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 77 77 78 - struct ib_qp *ocrdma_create_qp(struct ib_pd *, 79 - struct ib_qp_init_attr *attrs, 80 - struct ib_udata *); 78 + int ocrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs, 79 + struct ib_udata *udata); 81 80 int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, 82 81 int attr_mask); 83 82 int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+1
drivers/infiniband/hw/qedr/main.c
··· 233 233 INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah), 234 234 INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq), 235 235 INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd), 236 + INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp), 236 237 INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq), 237 238 INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd), 238 239 INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
+6 -7
drivers/infiniband/hw/qedr/qedr_roce_cm.c
··· 319 319 return rc; 320 320 } 321 321 322 - struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, 323 - struct ib_qp_init_attr *attrs, 324 - struct qedr_qp *qp) 322 + int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, 323 + struct qedr_qp *qp) 325 324 { 326 325 int rc; 327 326 328 327 rc = qedr_check_gsi_qp_attrs(dev, attrs); 329 328 if (rc) 330 - return ERR_PTR(rc); 329 + return rc; 331 330 332 331 rc = qedr_ll2_start(dev, attrs, qp); 333 332 if (rc) { 334 333 DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); 335 - return ERR_PTR(rc); 334 + return rc; 336 335 } 337 336 338 337 /* create QP */ ··· 358 359 359 360 DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp); 360 361 361 - return &qp->ibqp; 362 + return 0; 362 363 363 364 err: 364 365 kfree(qp->rqe_wr_id); ··· 367 368 if (rc) 368 369 DP_ERR(dev, "create gsi qp: failed destroy on create\n"); 369 370 370 - return ERR_PTR(-ENOMEM); 371 + return -ENOMEM; 371 372 } 372 373 373 374 int qedr_destroy_gsi_qp(struct qedr_dev *dev)
+2 -3
drivers/infiniband/hw/qedr/qedr_roce_cm.h
··· 50 50 const struct ib_recv_wr **bad_wr); 51 51 int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 52 52 const struct ib_send_wr **bad_wr); 53 - struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, 54 - struct ib_qp_init_attr *attrs, 55 - struct qedr_qp *qp); 53 + int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, 54 + struct qedr_qp *qp); 56 55 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, 57 56 struct qedr_qp *qp, struct ib_qp_init_attr *attrs); 58 57 int qedr_destroy_gsi_qp(struct qedr_dev *dev);
+15 -34
drivers/infiniband/hw/qedr/verbs.c
··· 2239 2239 return 0; 2240 2240 } 2241 2241 2242 - struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, 2243 - struct ib_qp_init_attr *attrs, 2244 - struct ib_udata *udata) 2242 + int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, 2243 + struct ib_udata *udata) 2245 2244 { 2246 2245 struct qedr_xrcd *xrcd = NULL; 2247 - struct qedr_pd *pd = NULL; 2248 - struct qedr_dev *dev; 2249 - struct qedr_qp *qp; 2250 - struct ib_qp *ibqp; 2246 + struct ib_pd *ibpd = ibqp->pd; 2247 + struct qedr_pd *pd = get_qedr_pd(ibpd); 2248 + struct qedr_dev *dev = get_qedr_dev(ibqp->device); 2249 + struct qedr_qp *qp = get_qedr_qp(ibqp); 2251 2250 int rc = 0; 2252 2251 2253 2252 if (attrs->create_flags) 2254 - return ERR_PTR(-EOPNOTSUPP); 2253 + return -EOPNOTSUPP; 2255 2254 2256 - if (attrs->qp_type == IB_QPT_XRC_TGT) { 2255 + if (attrs->qp_type == IB_QPT_XRC_TGT) 2257 2256 xrcd = get_qedr_xrcd(attrs->xrcd); 2258 - dev = get_qedr_dev(xrcd->ibxrcd.device); 2259 - } else { 2257 + else 2260 2258 pd = get_qedr_pd(ibpd); 2261 - dev = get_qedr_dev(ibpd->device); 2262 - } 2263 2259 2264 2260 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n", 2265 2261 udata ? "user library" : "kernel", pd); 2266 2262 2267 2263 rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata); 2268 2264 if (rc) 2269 - return ERR_PTR(rc); 2265 + return rc; 2270 2266 2271 2267 DP_DEBUG(dev, QEDR_MSG_QP, 2272 2268 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n", ··· 2272 2276 get_qedr_cq(attrs->recv_cq), 2273 2277 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0); 2274 2278 2275 - qp = kzalloc(sizeof(*qp), GFP_KERNEL); 2276 - if (!qp) { 2277 - DP_ERR(dev, "create qp: failed allocating memory\n"); 2278 - return ERR_PTR(-ENOMEM); 2279 - } 2280 - 2281 2279 qedr_set_common_qp_params(dev, qp, pd, attrs); 2282 2280 2283 - if (attrs->qp_type == IB_QPT_GSI) { 2284 - ibqp = qedr_create_gsi_qp(dev, attrs, qp); 2285 - if (IS_ERR(ibqp)) 2286 - kfree(qp); 2287 - return ibqp; 2288 - } 2281 + if (attrs->qp_type == IB_QPT_GSI) 2282 + return qedr_create_gsi_qp(dev, attrs, qp); 2289 2283 2290 2284 if (udata || xrcd) 2291 2285 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs); ··· 2283 2297 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs); 2284 2298 2285 2299 if (rc) 2286 - goto out_free_qp; 2300 + return rc; 2287 2301 2288 2302 qp->ibqp.qp_num = qp->qp_id; 2289 2303 ··· 2293 2307 goto out_free_qp_resources; 2294 2308 } 2295 2309 2296 - return &qp->ibqp; 2310 + return 0; 2297 2311 2298 2312 out_free_qp_resources: 2299 2313 qedr_free_qp_resources(dev, qp, udata); 2300 - out_free_qp: 2301 - kfree(qp); 2302 - 2303 - return ERR_PTR(-EFAULT); 2314 + return -EFAULT; 2304 2315 } 2305 2316 2306 2317 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) ··· 2857 2874 2858 2875 if (rdma_protocol_iwarp(&dev->ibdev, 1)) 2859 2876 qedr_iw_qp_rem_ref(&qp->ibqp); 2860 - else 2861 - kfree(qp); 2862 2877 2863 2878 return 0; 2864 2879 }
+2 -2
drivers/infiniband/hw/qedr/verbs.h
··· 56 56 int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); 57 57 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 58 58 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 59 - struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, 60 - struct ib_udata *); 59 + int qedr_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs, 60 + struct ib_udata *udata); 61 61 int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, 62 62 int attr_mask, struct ib_udata *udata); 63 63 int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
+1
drivers/infiniband/hw/usnic/usnic_ib_main.c
··· 360 360 .reg_user_mr = usnic_ib_reg_mr, 361 361 INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd), 362 362 INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq), 363 + INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp), 363 364 INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext), 364 365 }; 365 366
+12 -22
drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
··· 665 665 return 0; 666 666 } 667 667 668 - struct usnic_ib_qp_grp * 669 - usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf, 670 - struct usnic_ib_pd *pd, 671 - struct usnic_vnic_res_spec *res_spec, 672 - struct usnic_transport_spec *transport_spec) 668 + int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp_grp, 669 + struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf, 670 + struct usnic_ib_pd *pd, 671 + struct usnic_vnic_res_spec *res_spec, 672 + struct usnic_transport_spec *transport_spec) 673 673 { 674 - struct usnic_ib_qp_grp *qp_grp; 675 674 int err; 676 675 enum usnic_transport_type transport = transport_spec->trans_type; 677 676 struct usnic_ib_qp_grp_flow *qp_flow; ··· 683 684 usnic_err("Spec does not meet minimum req for transport %d\n", 684 685 transport); 685 686 log_spec(res_spec); 686 - return ERR_PTR(err); 687 + return err; 687 688 } 688 - 689 - qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC); 690 - if (!qp_grp) 691 - return NULL; 692 689 693 690 qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec, 694 691 qp_grp); 695 - if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) { 696 - err = qp_grp->res_chunk_list ? 697 - PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM; 698 - goto out_free_qp_grp; 699 - } 692 + if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) 693 + return qp_grp->res_chunk_list ? 694 + PTR_ERR(qp_grp->res_chunk_list) : 695 + -ENOMEM; 700 696 701 697 err = qp_grp_and_vf_bind(vf, pd, qp_grp); 702 698 if (err) ··· 718 724 719 725 usnic_ib_sysfs_qpn_add(qp_grp); 720 726 721 - return qp_grp; 727 + return 0; 722 728 723 729 out_release_flow: 724 730 release_and_remove_flow(qp_flow); ··· 726 732 qp_grp_and_vf_unbind(qp_grp); 727 733 out_free_res: 728 734 free_qp_grp_res(qp_grp->res_chunk_list); 729 - out_free_qp_grp: 730 - kfree(qp_grp); 731 - 732 - return ERR_PTR(err); 735 + return err; 733 736 } 734 737 735 738 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) ··· 739 748 usnic_ib_sysfs_qpn_remove(qp_grp); 740 749 qp_grp_and_vf_unbind(qp_grp); 741 750 free_qp_grp_res(qp_grp->res_chunk_list); 742 - kfree(qp_grp); 743 751 } 744 752 745 753 struct usnic_vnic_res_chunk*
+5 -5
drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
··· 89 89 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state); 90 90 int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz); 91 91 int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz); 92 - struct usnic_ib_qp_grp * 93 - usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf, 94 - struct usnic_ib_pd *pd, 95 - struct usnic_vnic_res_spec *res_spec, 96 - struct usnic_transport_spec *trans_spec); 92 + int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp, 93 + struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf, 94 + struct usnic_ib_pd *pd, 95 + struct usnic_vnic_res_spec *res_spec, 96 + struct usnic_transport_spec *trans_spec); 97 97 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp); 98 98 int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp, 99 99 enum ib_qp_state new_state,
+32 -37
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
··· 168 168 return 0; 169 169 } 170 170 171 - static struct usnic_ib_qp_grp* 172 - find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, 173 - struct usnic_ib_pd *pd, 174 - struct usnic_transport_spec *trans_spec, 175 - struct usnic_vnic_res_spec *res_spec) 171 + static int 172 + find_free_vf_and_create_qp_grp(struct ib_qp *qp, 173 + struct usnic_transport_spec *trans_spec, 174 + struct usnic_vnic_res_spec *res_spec) 176 175 { 176 + struct usnic_ib_dev *us_ibdev = to_usdev(qp->device); 177 + struct usnic_ib_pd *pd = to_upd(qp->pd); 177 178 struct usnic_ib_vf *vf; 178 179 struct usnic_vnic *vnic; 179 - struct usnic_ib_qp_grp *qp_grp; 180 + struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(qp); 180 181 struct device *dev, **dev_list; 181 - int i; 182 + int i, ret; 182 183 183 184 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); 184 185 185 186 if (list_empty(&us_ibdev->vf_dev_list)) { 186 187 usnic_info("No vfs to allocate\n"); 187 - return NULL; 188 + return -ENOMEM; 188 189 } 189 190 190 191 if (usnic_ib_share_vf) { 191 192 /* Try to find resouces on a used vf which is in pd */ 192 193 dev_list = usnic_uiom_get_dev_list(pd->umem_pd); 193 194 if (IS_ERR(dev_list)) 194 - return ERR_CAST(dev_list); 195 + return PTR_ERR(dev_list); 195 196 for (i = 0; dev_list[i]; i++) { 196 197 dev = dev_list[i]; 197 198 vf = dev_get_drvdata(dev); ··· 203 202 dev_name(&us_ibdev->ib_dev.dev), 204 203 pci_name(usnic_vnic_get_pdev( 205 204 vnic))); 206 - qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, 207 - vf, pd, 208 - res_spec, 209 - trans_spec); 205 + ret = usnic_ib_qp_grp_create(qp_grp, 206 + us_ibdev->ufdev, 207 + vf, pd, res_spec, 208 + trans_spec); 210 209 211 210 spin_unlock(&vf->lock); 212 211 goto qp_grp_check; ··· 224 223 vnic = vf->vnic; 225 224 if (vf->qp_grp_ref_cnt == 0 && 226 225 usnic_vnic_check_room(vnic, res_spec) == 0) { 227 - qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, 228 - pd, res_spec, 229 - trans_spec); 226 + ret = usnic_ib_qp_grp_create(qp_grp, us_ibdev->ufdev, 227 + vf, pd, res_spec, 228 + trans_spec); 230 229 231 230 spin_unlock(&vf->lock); 232 231 goto qp_grp_check; ··· 236 235 237 236 usnic_info("No free qp grp found on %s\n", 238 237 dev_name(&us_ibdev->ib_dev.dev)); 239 - return ERR_PTR(-ENOMEM); 238 + return -ENOMEM; 240 239 241 240 qp_grp_check: 242 - if (IS_ERR_OR_NULL(qp_grp)) { 241 + if (ret) { 243 242 usnic_err("Failed to allocate qp_grp\n"); 244 243 if (usnic_ib_share_vf) 245 244 usnic_uiom_free_dev_list(dev_list); 246 - return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM); 247 245 } 248 - return qp_grp; 246 + return ret; 249 247 } 250 248 251 249 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) ··· 458 458 return 0; 459 459 } 460 460 461 - struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, 462 - struct ib_qp_init_attr *init_attr, 463 - struct ib_udata *udata) 461 + int usnic_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, 462 + struct ib_udata *udata) 464 463 { 465 464 int err; 466 465 struct usnic_ib_dev *us_ibdev; 467 - struct usnic_ib_qp_grp *qp_grp; 466 + struct usnic_ib_qp_grp *qp_grp = to_uqp_grp(ibqp); 468 467 struct usnic_ib_ucontext *ucontext = rdma_udata_to_drv_context( 469 468 udata, struct usnic_ib_ucontext, ibucontext); 470 469 int cq_cnt; ··· 473 474 474 475 usnic_dbg("\n"); 475 476 476 - us_ibdev = to_usdev(pd->device); 477 + us_ibdev = to_usdev(ibqp->device); 477 478 478 479 if (init_attr->create_flags) 479 - return ERR_PTR(-EOPNOTSUPP); 480 + return -EOPNOTSUPP; 480 481 481 482 err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); 482 483 if (err) { 483 484 usnic_err("%s: cannot copy udata for create_qp\n", 484 485 dev_name(&us_ibdev->ib_dev.dev)); 485 - return ERR_PTR(-EINVAL); 486 + return -EINVAL; 486 487 } 487 488 488 489 err = create_qp_validate_user_data(cmd); 489 490 if (err) { 490 491 usnic_err("%s: Failed to validate user data\n", 491 492 dev_name(&us_ibdev->ib_dev.dev)); 492 - return ERR_PTR(-EINVAL); 493 + return -EINVAL; 493 494 } 494 495 495 496 if (init_attr->qp_type != IB_QPT_UD) { 496 497 usnic_err("%s asked to make a non-UD QP: %d\n", 497 498 dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type); 498 - return ERR_PTR(-EOPNOTSUPP); 499 + return -EOPNOTSUPP; 499 500 } 500 501 501 502 trans_spec = cmd.spec; ··· 503 504 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2; 504 505 res_spec = min_transport_spec[trans_spec.trans_type]; 505 506 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt); 506 - qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd), 507 - &trans_spec, 508 - &res_spec); 509 - if (IS_ERR_OR_NULL(qp_grp)) { 510 - err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM; 507 + err = find_free_vf_and_create_qp_grp(ibqp, &trans_spec, &res_spec); 508 + if (err) 511 509 goto out_release_mutex; 512 - } 513 510 514 511 err = usnic_ib_fill_create_qp_resp(qp_grp, udata); 515 512 if (err) { ··· 517 522 list_add_tail(&qp_grp->link, &ucontext->qp_grp_list); 518 523 usnic_ib_log_vf(qp_grp->vf); 519 524 mutex_unlock(&us_ibdev->usdev_lock); 520 - return &qp_grp->ibqp; 525 + return 0; 521 526 522 527 out_release_qp_grp: 523 528 qp_grp_destroy(qp_grp); 524 529 out_release_mutex: 525 530 mutex_unlock(&us_ibdev->usdev_lock); 526 - return ERR_PTR(err); 531 + return err; 527 532 } 528 533 529 534 int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+2 -3
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
··· 50 50 union ib_gid *gid); 51 51 int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 52 52 int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 53 - struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, 54 - struct ib_qp_init_attr *init_attr, 55 - struct ib_udata *udata); 53 + int usnic_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, 54 + struct ib_udata *udata); 56 55 int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); 57 56 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 58 57 int attr_mask, struct ib_udata *udata);
+1
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
··· 185 185 INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah), 186 186 INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq), 187 187 INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd), 188 + INIT_RDMA_OBJ_SIZE(ib_qp, pvrdma_qp, ibqp), 188 189 INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext), 189 190 }; 190 191
+21 -32
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
··· 182 182 183 183 /** 184 184 * pvrdma_create_qp - create queue pair 185 - * @pd: protection domain 185 + * @ibqp: queue pair 186 186 * @init_attr: queue pair attributes 187 187 * @udata: user data 188 188 * 189 - * @return: the ib_qp pointer on success, otherwise returns an errno. 189 + * @return: the 0 on success, otherwise returns an errno. 190 190 */ 191 - struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, 192 - struct ib_qp_init_attr *init_attr, 193 - struct ib_udata *udata) 191 + int pvrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, 192 + struct ib_udata *udata) 194 193 { 195 - struct pvrdma_qp *qp = NULL; 196 - struct pvrdma_dev *dev = to_vdev(pd->device); 194 + struct pvrdma_qp *qp = to_vqp(ibqp); 195 + struct pvrdma_dev *dev = to_vdev(ibqp->device); 197 196 union pvrdma_cmd_req req; 198 197 union pvrdma_cmd_resp rsp; 199 198 struct pvrdma_cmd_create_qp *cmd = &req.create_qp; ··· 208 209 dev_warn(&dev->pdev->dev, 209 210 "invalid create queuepair flags %#x\n", 210 211 init_attr->create_flags); 211 - return ERR_PTR(-EOPNOTSUPP); 212 + return -EOPNOTSUPP; 212 213 } 213 214 214 215 if (init_attr->qp_type != IB_QPT_RC && ··· 216 217 init_attr->qp_type != IB_QPT_GSI) { 217 218 dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n", 218 219 init_attr->qp_type); 219 - return ERR_PTR(-EOPNOTSUPP); 220 + return -EOPNOTSUPP; 220 221 } 221 222 222 223 if (is_srq && !dev->dsr->caps.max_srq) { 223 224 dev_warn(&dev->pdev->dev, 224 225 "SRQs not supported by device\n"); 225 - return ERR_PTR(-EINVAL); 226 + return -EINVAL; 226 227 } 227 228 228 229 if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp)) 229 - return ERR_PTR(-ENOMEM); 230 + return -ENOMEM; 230 231 231 232 switch (init_attr->qp_type) { 232 233 case IB_QPT_GSI: 233 234 if (init_attr->port_num == 0 || 234 - init_attr->port_num > pd->device->phys_port_cnt) { 235 + init_attr->port_num > ibqp->device->phys_port_cnt) { 235 236 dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n"); 236 237 ret = -EINVAL; 237 238 goto err_qp; ··· 239 240 fallthrough; 240 241 case IB_QPT_RC: 241 242 case IB_QPT_UD: 242 - qp = kzalloc(sizeof(*qp), GFP_KERNEL); 243 - if (!qp) { 244 - ret = -ENOMEM; 245 - goto err_qp; 246 - } 247 - 248 243 spin_lock_init(&qp->sq.lock); 249 244 spin_lock_init(&qp->rq.lock); 250 245 mutex_init(&qp->mutex); ··· 268 275 269 276 if (!is_srq) { 270 277 /* set qp->sq.wqe_cnt, shift, buf_size.. */ 271 - qp->rumem = 272 - ib_umem_get(pd->device, ucmd.rbuf_addr, 273 - ucmd.rbuf_size, 0); 278 + qp->rumem = ib_umem_get(ibqp->device, 279 + ucmd.rbuf_addr, 280 + ucmd.rbuf_size, 0); 274 281 if (IS_ERR(qp->rumem)) { 275 282 ret = PTR_ERR(qp->rumem); 276 283 goto err_qp; ··· 281 288 qp->srq = to_vsrq(init_attr->srq); 282 289 } 283 290 284 - qp->sumem = ib_umem_get(pd->device, ucmd.sbuf_addr, 291 + qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr, 285 292 ucmd.sbuf_size, 0); 286 293 if (IS_ERR(qp->sumem)) { 287 294 if (!is_srq) ··· 299 306 qp->npages_recv = 0; 300 307 qp->npages = qp->npages_send + qp->npages_recv; 301 308 } else { 302 - ret = pvrdma_set_sq_size(to_vdev(pd->device), 309 + ret = pvrdma_set_sq_size(to_vdev(ibqp->device), 303 310 &init_attr->cap, qp); 304 311 if (ret) 305 312 goto err_qp; 306 313 307 - ret = pvrdma_set_rq_size(to_vdev(pd->device), 314 + ret = pvrdma_set_rq_size(to_vdev(ibqp->device), 308 315 &init_attr->cap, qp); 309 316 if (ret) 310 317 goto err_qp; ··· 355 362 356 363 memset(cmd, 0, sizeof(*cmd)); 357 364 cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP; 358 - cmd->pd_handle = to_vpd(pd)->pd_handle; 365 + cmd->pd_handle = to_vpd(ibqp->pd)->pd_handle; 359 366 cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; 360 367 cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle; 361 368 if (is_srq) ··· 411 418 dev_warn(&dev->pdev->dev, 412 419 "failed to copy back udata\n"); 413 420 __pvrdma_destroy_qp(dev, qp); 414 - return ERR_PTR(-EINVAL); 421 + return -EINVAL; 415 422 } 416 423 } 417 424 418 - return &qp->ibqp; 425 + return 0; 419 426 420 427 err_pdir: 421 428 pvrdma_page_dir_cleanup(dev, &qp->pdir); ··· 423 430 ib_umem_release(qp->rumem); 424 431 ib_umem_release(qp->sumem); 425 432 err_qp: 426 - kfree(qp); 427 433 atomic_dec(&dev->num_qps); 428 - 429 - return ERR_PTR(ret); 434 + return ret; 430 435 } 431 436 432 437 static void _pvrdma_free_qp(struct pvrdma_qp *qp) ··· 444 453 ib_umem_release(qp->sumem); 445 454 446 455 pvrdma_page_dir_cleanup(dev, &qp->pdir); 447 - 448 - kfree(qp); 449 456 450 457 atomic_dec(&dev->num_qps); 451 458 }
+2 -3
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
··· 390 390 int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 391 391 int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); 392 392 393 - struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, 394 - struct ib_qp_init_attr *init_attr, 395 - struct ib_udata *udata); 393 + int pvrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, 394 + struct ib_udata *udata); 396 395 int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 397 396 int attr_mask, struct ib_udata *udata); 398 397 int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+34 -57
drivers/infiniband/sw/rdmavt/qp.c
··· 1058 1058 1059 1059 /** 1060 1060 * rvt_create_qp - create a queue pair for a device 1061 - * @ibpd: the protection domain who's device we create the queue pair for 1061 + * @ibqp: the queue pair 1062 1062 * @init_attr: the attributes of the queue pair 1063 1063 * @udata: user data for libibverbs.so 1064 1064 * ··· 1066 1066 * unique idea of what queue pair numbers mean. For instance there is a reserved 1067 1067 * range for PSM. 1068 1068 * 1069 - * Return: the queue pair on success, otherwise returns an errno. 1069 + * Return: 0 on success, otherwise returns an errno. 1070 1070 * 1071 1071 * Called by the ib_create_qp() core verbs function. 1072 1072 */ 1073 - struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, 1074 - struct ib_qp_init_attr *init_attr, 1075 - struct ib_udata *udata) 1073 + int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, 1074 + struct ib_udata *udata) 1076 1075 { 1077 - struct rvt_qp *qp; 1078 - int err; 1076 + struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); 1077 + int ret = -ENOMEM; 1079 1078 struct rvt_swqe *swq = NULL; 1080 1079 size_t sz; 1081 1080 size_t sg_list_sz = 0; 1082 - struct ib_qp *ret = ERR_PTR(-ENOMEM); 1083 - struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); 1081 + struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); 1084 1082 void *priv = NULL; 1085 1083 size_t sqsize; 1086 1084 u8 exclude_prefix = 0; 1087 1085 1088 1086 if (!rdi) 1089 - return ERR_PTR(-EINVAL); 1087 + return -EINVAL; 1090 1088 1091 1089 if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE) 1092 - return ERR_PTR(-EOPNOTSUPP); 1090 + return -EOPNOTSUPP; 1093 1091 1094 1092 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge || 1095 1093 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr) 1096 - return ERR_PTR(-EINVAL); 1094 + return -EINVAL; 1097 1095 1098 1096 /* Check receive queue parameters if no SRQ is specified. */ 1099 1097 if (!init_attr->srq) { 1100 1098 if (init_attr->cap.max_recv_sge > 1101 1099 rdi->dparms.props.max_recv_sge || 1102 1100 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) 1103 - return ERR_PTR(-EINVAL); 1101 + return -EINVAL; 1104 1102 1105 1103 if (init_attr->cap.max_send_sge + 1106 1104 init_attr->cap.max_send_wr + 1107 1105 init_attr->cap.max_recv_sge + 1108 1106 init_attr->cap.max_recv_wr == 0) 1109 - return ERR_PTR(-EINVAL); 1107 + return -EINVAL; 1110 1108 } 1111 1109 sqsize = 1112 1110 init_attr->cap.max_send_wr + 1 + ··· 1113 1115 case IB_QPT_SMI: 1114 1116 case IB_QPT_GSI: 1115 1117 if (init_attr->port_num == 0 || 1116 - init_attr->port_num > ibpd->device->phys_port_cnt) 1117 - return ERR_PTR(-EINVAL); 1118 + init_attr->port_num > ibqp->device->phys_port_cnt) 1119 + return -EINVAL; 1118 1120 fallthrough; 1119 1121 case IB_QPT_UC: 1120 1122 case IB_QPT_RC: ··· 1122 1124 sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge); 1123 1125 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node); 1124 1126 if (!swq) 1125 - return ERR_PTR(-ENOMEM); 1127 + return -ENOMEM; 1126 1128 1127 1129 if (init_attr->srq) { 1128 1130 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); ··· 1133 1135 } else if (init_attr->cap.max_recv_sge > 1) 1134 1136 sg_list_sz = sizeof(*qp->r_sg_list) * 1135 1137 (init_attr->cap.max_recv_sge - 1); 1136 - qp = kzalloc_node(sizeof(*qp), GFP_KERNEL, rdi->dparms.node); 1137 - if (!qp) 1138 - goto bail_swq; 1139 1138 qp->r_sg_list = 1140 1139 kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node); 1141 1140 if (!qp->r_sg_list) ··· 1161 1166 */ 1162 1167 priv = rdi->driver_f.qp_priv_alloc(rdi, qp); 1163 1168 if (IS_ERR(priv)) { 1164 - ret = priv; 1169 + ret = PTR_ERR(priv); 1165 1170 goto bail_qp; 1166 1171 } 1167 1172 qp->priv = priv; ··· 1175 1180 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 1176 1181 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + 1177 1182 sizeof(struct rvt_rwqe); 1178 - err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz, 1183 + ret = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz, 1179 1184 rdi->dparms.node, udata); 1180 - if (err) { 1181 - ret = ERR_PTR(err); 1185 + if (ret) 1182 1186 goto bail_driver_priv; 1183 - } 1184 1187 } 1185 1188 1186 1189 /* ··· 1199 1206 qp->s_max_sge = init_attr->cap.max_send_sge; 1200 1207 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) 1201 1208 qp->s_flags = RVT_S_SIGNAL_REQ_WR; 1202 - err = alloc_ud_wq_attr(qp, rdi->dparms.node); 1203 - if (err) { 1204 - ret = (ERR_PTR(err)); 1209 + ret = alloc_ud_wq_attr(qp, rdi->dparms.node); 1210 + if (ret) 1205 1211 goto bail_rq_rvt; 1206 - } 1207 1212 1208 1213 if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) 1209 1214 exclude_prefix = RVT_AIP_QP_PREFIX; 1210 1215 1211 - err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, 1216 + ret = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, 1212 1217 init_attr->qp_type, 1213 1218 init_attr->port_num, 1214 1219 exclude_prefix); 1215 - if (err < 0) { 1216 - ret = ERR_PTR(err); 1220 + if (ret < 0) 1217 1221 goto bail_rq_wq; 1218 - } 1219 - qp->ibqp.qp_num = err; 1222 + 1223 + qp->ibqp.qp_num = ret; 1220 1224 if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) 1221 1225 qp->ibqp.qp_num |= RVT_AIP_QP_BASE; 1222 1226 qp->port_num = init_attr->port_num; 1223 1227 rvt_init_qp(rdi, qp, init_attr->qp_type); 1224 1228 if (rdi->driver_f.qp_priv_init) { 1225 - err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr); 1226 - if (err) { 1227 - ret = ERR_PTR(err); 1229 + ret = rdi->driver_f.qp_priv_init(rdi, qp, init_attr); 1230 + if (ret) 1228 1231 goto bail_rq_wq; 1229 - } 1230 1232 } 1231 1233 break; 1232 1234 1233 1235 default: 1234 1236 /* Don't support raw QPs */ 1235 - return ERR_PTR(-EOPNOTSUPP); 1237 + return -EOPNOTSUPP; 1236 1238 } 1237 1239 1238 1240 init_attr->cap.max_inline_data = 0; ··· 1240 1252 if (!qp->r_rq.wq) { 1241 1253 __u64 offset = 0; 1242 1254 1243 - err = ib_copy_to_udata(udata, &offset, 1255 + ret = ib_copy_to_udata(udata, &offset, 1244 1256 sizeof(offset)); 1245 - if (err) { 1246 - ret = ERR_PTR(err); 1257 + if (ret) 1247 1258 goto bail_qpn; 1248 - } 1249 1259 } else { 1250 1260 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; 1251 1261 1252 1262 qp->ip = rvt_create_mmap_info(rdi, s, udata, 1253 1263 qp->r_rq.wq); 1254 1264 if (IS_ERR(qp->ip)) { 1255 - ret = ERR_CAST(qp->ip); 1265 + ret = PTR_ERR(qp->ip); 1256 1266 goto bail_qpn; 1257 1267 } 1258 1268 1259 - err = ib_copy_to_udata(udata, &qp->ip->offset, 1269 + ret = ib_copy_to_udata(udata, &qp->ip->offset, 1260 1270 sizeof(qp->ip->offset)); 1261 - if (err) { 1262 - ret = ERR_PTR(err); 1271 + if (ret) 1263 1272 goto bail_ip; 1264 - } 1265 1273 } 1266 1274 qp->pid = current->pid; 1267 1275 } ··· 1265 1281 spin_lock(&rdi->n_qps_lock); 1266 1282 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { 1267 1283 spin_unlock(&rdi->n_qps_lock); 1268 - ret = ERR_PTR(-ENOMEM); 1284 + ret = ENOMEM; 1269 1285 goto bail_ip; 1270 1286 } 1271 1287 ··· 1291 1307 spin_unlock_irq(&rdi->pending_lock); 1292 1308 } 1293 1309 1294 - ret = &qp->ibqp; 1295 - 1296 - return ret; 1310 + return 0; 1297 1311 1298 1312 bail_ip: 1299 1313 if (qp->ip) ··· 1312 1330 bail_qp: 1313 1331 kfree(qp->s_ack_queue); 1314 1332 kfree(qp->r_sg_list); 1315 - kfree(qp); 1316 - 1317 - bail_swq: 1318 1333 vfree(swq); 1319 - 1320 1334 return ret; 1321 1335 } 1322 1336 ··· 1747 1769 rdma_destroy_ah_attr(&qp->alt_ah_attr); 1748 1770 free_ud_wq_attr(qp); 1749 1771 vfree(qp->s_wq); 1750 - kfree(qp); 1751 1772 return 0; 1752 1773 } 1753 1774
+2 -3
drivers/infiniband/sw/rdmavt/qp.h
··· 52 52 53 53 int rvt_driver_qp_init(struct rvt_dev_info *rdi); 54 54 void rvt_qp_exit(struct rvt_dev_info *rdi); 55 - struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, 56 - struct ib_qp_init_attr *init_attr, 57 - struct ib_udata *udata); 55 + int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, 56 + struct ib_udata *udata); 58 57 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 59 58 int attr_mask, struct ib_udata *udata); 60 59 int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+9
drivers/infiniband/sw/rdmavt/vt.c
··· 131 131 return 0; 132 132 } 133 133 134 + static int rvt_get_numa_node(struct ib_device *ibdev) 135 + { 136 + struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 137 + 138 + return rdi->dparms.node; 139 + } 140 + 134 141 static int rvt_modify_device(struct ib_device *device, 135 142 int device_modify_mask, 136 143 struct ib_device_modify *device_modify) ··· 387 380 .destroy_srq = rvt_destroy_srq, 388 381 .detach_mcast = rvt_detach_mcast, 389 382 .get_dma_mr = rvt_get_dma_mr, 383 + .get_numa_node = rvt_get_numa_node, 390 384 .get_port_immutable = rvt_get_port_immutable, 391 385 .map_mr_sg = rvt_map_mr_sg, 392 386 .mmap = rvt_mmap, ··· 414 406 INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah), 415 407 INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq), 416 408 INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd), 409 + INIT_RDMA_OBJ_SIZE(ib_qp, rvt_qp, ibqp), 417 410 INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq), 418 411 INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext), 419 412 };
+1 -1
drivers/infiniband/sw/rxe/rxe_pool.c
··· 41 41 .size = sizeof(struct rxe_qp), 42 42 .elem_offset = offsetof(struct rxe_qp, pelem), 43 43 .cleanup = rxe_qp_cleanup, 44 - .flags = RXE_POOL_INDEX, 44 + .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, 45 45 .min_index = RXE_MIN_QP_INDEX, 46 46 .max_index = RXE_MAX_QP_INDEX, 47 47 },
+22 -28
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 391 391 return err; 392 392 } 393 393 394 - static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd, 395 - struct ib_qp_init_attr *init, 396 - struct ib_udata *udata) 394 + static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, 395 + struct ib_udata *udata) 397 396 { 398 397 int err; 399 - struct rxe_dev *rxe = to_rdev(ibpd->device); 400 - struct rxe_pd *pd = to_rpd(ibpd); 401 - struct rxe_qp *qp; 398 + struct rxe_dev *rxe = to_rdev(ibqp->device); 399 + struct rxe_pd *pd = to_rpd(ibqp->pd); 400 + struct rxe_qp *qp = to_rqp(ibqp); 402 401 struct rxe_create_qp_resp __user *uresp = NULL; 403 402 404 403 if (udata) { 405 404 if (udata->outlen < sizeof(*uresp)) 406 - return ERR_PTR(-EINVAL); 405 + return -EINVAL; 407 406 uresp = udata->outbuf; 408 407 } 409 408 410 409 if (init->create_flags) 411 - return ERR_PTR(-EOPNOTSUPP); 410 + return -EOPNOTSUPP; 412 411 413 412 err = rxe_qp_chk_init(rxe, init); 414 413 if (err) 415 - goto err1; 416 - 417 - qp = rxe_alloc(&rxe->qp_pool); 418 - if (!qp) { 419 - err = -ENOMEM; 420 - goto err1; 421 - } 414 + return err; 422 415 423 416 if (udata) { 424 - if (udata->inlen) { 425 - err = -EINVAL; 426 - goto err2; 427 - } 417 + if (udata->inlen) 418 + return -EINVAL; 419 + 428 420 qp->is_user = true; 429 421 } else { 430 422 qp->is_user = false; 431 423 } 432 424 433 - rxe_add_index(qp); 434 - 435 - err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata); 425 + err = rxe_add_to_pool(&rxe->qp_pool, qp); 436 426 if (err) 437 - goto err3; 427 + return err; 438 428 439 - return &qp->ibqp; 429 + rxe_add_index(qp); 430 + err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata); 431 + if (err) 432 + goto qp_init; 440 433 441 - err3: 434 + return 0; 435 + 436 + qp_init: 442 437 rxe_drop_index(qp); 443 - err2: 444 438 rxe_drop_ref(qp); 445 - err1: 446 - return ERR_PTR(err); 439 + return err; 447 440 } 448 441 449 442 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ··· 1138 1145 INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah), 1139 1146 INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq), 1140 1147 INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd), 1148 + INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp), 1141 1149 INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq), 1142 1150 INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc), 1143 1151 INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
+1 -1
drivers/infiniband/sw/rxe/rxe_verbs.h
··· 210 210 }; 211 211 212 212 struct rxe_qp { 213 - struct rxe_pool_entry pelem; 214 213 struct ib_qp ibqp; 214 + struct rxe_pool_entry pelem; 215 215 struct ib_qp_attr attr; 216 216 unsigned int valid; 217 217 unsigned int mtu;
+1
drivers/infiniband/sw/siw/siw_main.c
··· 297 297 298 298 INIT_RDMA_OBJ_SIZE(ib_cq, siw_cq, base_cq), 299 299 INIT_RDMA_OBJ_SIZE(ib_pd, siw_pd, base_pd), 300 + INIT_RDMA_OBJ_SIZE(ib_qp, siw_qp, base_qp), 300 301 INIT_RDMA_OBJ_SIZE(ib_srq, siw_srq, base_srq), 301 302 INIT_RDMA_OBJ_SIZE(ib_ucontext, siw_ucontext, base_ucontext), 302 303 };
-2
drivers/infiniband/sw/siw/siw_qp.c
··· 1344 1344 siw_put_tx_cpu(qp->tx_cpu); 1345 1345 1346 1346 atomic_dec(&sdev->num_qp); 1347 - siw_dbg_qp(qp, "free QP\n"); 1348 - kfree_rcu(qp, rcu); 1349 1347 }
+23 -31
drivers/infiniband/sw/siw/siw_verbs.c
··· 285 285 * 286 286 * Create QP of requested size on given device. 287 287 * 288 - * @pd: Protection Domain 288 + * @qp: Queue pait 289 289 * @attrs: Initial QP attributes. 290 290 * @udata: used to provide QP ID, SQ and RQ size back to user. 291 291 */ 292 292 293 - struct ib_qp *siw_create_qp(struct ib_pd *pd, 294 - struct ib_qp_init_attr *attrs, 295 - struct ib_udata *udata) 293 + int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, 294 + struct ib_udata *udata) 296 295 { 297 - struct siw_qp *qp = NULL; 296 + struct ib_pd *pd = ibqp->pd; 297 + struct siw_qp *qp = to_siw_qp(ibqp); 298 298 struct ib_device *base_dev = pd->device; 299 299 struct siw_device *sdev = to_siw_dev(base_dev); 300 300 struct siw_ucontext *uctx = ··· 307 307 siw_dbg(base_dev, "create new QP\n"); 308 308 309 309 if (attrs->create_flags) 310 - return ERR_PTR(-EOPNOTSUPP); 310 + return -EOPNOTSUPP; 311 311 312 312 if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) { 313 313 siw_dbg(base_dev, "too many QP's\n"); 314 - rv = -ENOMEM; 315 - goto err_out; 314 + return -ENOMEM; 316 315 } 317 316 if (attrs->qp_type != IB_QPT_RC) { 318 317 siw_dbg(base_dev, "only RC QP's supported\n"); 319 318 rv = -EOPNOTSUPP; 320 - goto err_out; 319 + goto err_atomic; 321 320 } 322 321 if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) || 323 322 (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) || ··· 324 325 (attrs->cap.max_recv_sge > SIW_MAX_SGE)) { 325 326 siw_dbg(base_dev, "QP size error\n"); 326 327 rv = -EINVAL; 327 - goto err_out; 328 + goto err_atomic; 328 329 } 329 330 if (attrs->cap.max_inline_data > SIW_MAX_INLINE) { 330 331 siw_dbg(base_dev, "max inline send: %d > %d\n", 331 332 attrs->cap.max_inline_data, (int)SIW_MAX_INLINE); 332 333 rv = -EINVAL; 333 - goto err_out; 334 + goto err_atomic; 334 335 } 335 336 /* 336 337 * NOTE: we allow for zero element SQ and RQ WQE's SGL's ··· 339 340 if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) { 340 341 siw_dbg(base_dev, "QP must have send or receive queue\n"); 341 342 rv = -EINVAL; 342 - goto err_out; 343 + goto err_atomic; 343 344 } 344 345 345 346 if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) { 346 347 siw_dbg(base_dev, "send CQ or receive CQ invalid\n"); 347 348 rv = -EINVAL; 348 - goto err_out; 349 + goto err_atomic; 349 350 } 350 - qp = kzalloc(sizeof(*qp), GFP_KERNEL); 351 - if (!qp) { 352 - rv = -ENOMEM; 353 - goto err_out; 354 - } 351 + 355 352 init_rwsem(&qp->state_lock); 356 353 spin_lock_init(&qp->sq_lock); 357 354 spin_lock_init(&qp->rq_lock); ··· 355 360 356 361 rv = siw_qp_add(sdev, qp); 357 362 if (rv) 358 - goto err_out; 363 + goto err_atomic; 359 364 360 365 num_sqe = attrs->cap.max_send_wr; 361 366 num_rqe = attrs->cap.max_recv_wr; ··· 477 482 list_add_tail(&qp->devq, &sdev->qp_list); 478 483 spin_unlock_irqrestore(&sdev->lock, flags); 479 484 480 - return &qp->base_qp; 485 + return 0; 481 486 482 487 err_out_xa: 483 488 xa_erase(&sdev->qp_xa, qp_id(qp)); 484 - err_out: 485 - if (qp) { 486 - if (uctx) { 487 - rdma_user_mmap_entry_remove(qp->sq_entry); 488 - rdma_user_mmap_entry_remove(qp->rq_entry); 489 - } 490 - vfree(qp->sendq); 491 - vfree(qp->recvq); 492 - kfree(qp); 489 + if (uctx) { 490 + rdma_user_mmap_entry_remove(qp->sq_entry); 491 + rdma_user_mmap_entry_remove(qp->rq_entry); 493 492 } 494 - atomic_dec(&sdev->num_qp); 493 + vfree(qp->sendq); 494 + vfree(qp->recvq); 495 495 496 - return ERR_PTR(rv); 496 + err_atomic: 497 + atomic_dec(&sdev->num_qp); 498 + return rv; 497 499 } 498 500 499 501 /*
+2 -3
drivers/infiniband/sw/siw/siw_verbs.h
··· 50 50 union ib_gid *gid); 51 51 int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); 52 52 int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); 53 - struct ib_qp *siw_create_qp(struct ib_pd *base_pd, 54 - struct ib_qp_init_attr *attr, 55 - struct ib_udata *udata); 53 + int siw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attr, 54 + struct ib_udata *udata); 56 55 int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr, 57 56 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); 58 57 int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
+25 -5
include/rdma/ib_verbs.h
··· 2268 2268 !__same_type(((struct drv_struct *)NULL)->member, \ 2269 2269 struct ib_struct))) 2270 2270 2271 - #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ 2272 - ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp)) 2271 + #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ 2272 + ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \ 2273 + gfp, false)) 2274 + 2275 + #define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \ 2276 + ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \ 2277 + GFP_KERNEL, true)) 2273 2278 2274 2279 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \ 2275 2280 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL) ··· 2440 2435 struct ib_udata *udata); 2441 2436 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 2442 2437 int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); 2443 - struct ib_qp *(*create_qp)(struct ib_pd *pd, 2444 - struct ib_qp_init_attr *qp_init_attr, 2445 - struct ib_udata *udata); 2438 + int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr, 2439 + struct ib_udata *udata); 2446 2440 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2447 2441 int qp_attr_mask, struct ib_udata *udata); 2448 2442 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, ··· 2639 2635 int (*query_ucontext)(struct ib_ucontext *context, 2640 2636 struct uverbs_attr_bundle *attrs); 2641 2637 2638 + /* 2639 + * Provide NUMA node. This API exists for rdmavt/hfi1 only. 2640 + * Everyone else relies on Linux memory management model. 2641 + */ 2642 + int (*get_numa_node)(struct ib_device *dev); 2643 + 2642 2644 DECLARE_RDMA_OBJ_SIZE(ib_ah); 2643 2645 DECLARE_RDMA_OBJ_SIZE(ib_counters); 2644 2646 DECLARE_RDMA_OBJ_SIZE(ib_cq); 2645 2647 DECLARE_RDMA_OBJ_SIZE(ib_mw); 2646 2648 DECLARE_RDMA_OBJ_SIZE(ib_pd); 2649 + DECLARE_RDMA_OBJ_SIZE(ib_qp); 2647 2650 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table); 2648 2651 DECLARE_RDMA_OBJ_SIZE(ib_srq); 2649 2652 DECLARE_RDMA_OBJ_SIZE(ib_ucontext); ··· 2756 2745 u32 iw_driver_flags; 2757 2746 u32 lag_flags; 2758 2747 }; 2748 + 2749 + static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size, 2750 + gfp_t gfp, bool is_numa_aware) 2751 + { 2752 + if (is_numa_aware && dev->ops.get_numa_node) 2753 + return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev)); 2754 + 2755 + return kzalloc(size, gfp); 2756 + } 2759 2757 2760 2758 struct ib_client_nl_info; 2761 2759 struct ib_client {