Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/core: Change provider's API of create_cq to be extendible

Add a new ib_cq_init_attr structure which contains the
previous cqe (minimum number of CQ entries) and comp_vector
(completion vector) in addition to a new flags field.
All vendors' create_cq callbacks are changed in order
to work with the new API.

This commit does not change any functionality.

Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Reviewed-By: Devesh Sharma <devesh.sharma@avagotech.com> to patch #2
Signed-off-by: Doug Ledford <dledford@redhat.com>

authored by

Matan Barak and committed by
Doug Ledford
bcf4c1ea 74217d4c

+124 -46
+4 -2
drivers/infiniband/core/uverbs_cmd.c
··· 1341 1341 struct ib_uverbs_event_file *ev_file = NULL; 1342 1342 struct ib_cq *cq; 1343 1343 int ret; 1344 + struct ib_cq_init_attr attr = {}; 1344 1345 1345 1346 if (out_len < sizeof resp) 1346 1347 return -ENOSPC; ··· 1377 1376 INIT_LIST_HEAD(&obj->comp_list); 1378 1377 INIT_LIST_HEAD(&obj->async_list); 1379 1378 1380 - cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 1381 - cmd.comp_vector, 1379 + attr.cqe = cmd.cqe; 1380 + attr.comp_vector = cmd.comp_vector; 1381 + cq = file->device->ib_dev->create_cq(file->device->ib_dev, &attr, 1382 1382 file->ucontext, &udata); 1383 1383 if (IS_ERR(cq)) { 1384 1384 ret = PTR_ERR(cq);
+2 -1
drivers/infiniband/core/verbs.c
··· 1079 1079 void *cq_context, int cqe, int comp_vector) 1080 1080 { 1081 1081 struct ib_cq *cq; 1082 + struct ib_cq_init_attr attr = {.cqe = cqe, .comp_vector = comp_vector}; 1082 1083 1083 - cq = device->create_cq(device, cqe, comp_vector, NULL, NULL); 1084 + cq = device->create_cq(device, &attr, NULL, NULL); 1084 1085 1085 1086 if (!IS_ERR(cq)) { 1086 1087 cq->device = device;
+6 -1
drivers/infiniband/hw/amso1100/c2_provider.c
··· 286 286 return 0; 287 287 } 288 288 289 - static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector, 289 + static struct ib_cq *c2_create_cq(struct ib_device *ibdev, 290 + const struct ib_cq_init_attr *attr, 290 291 struct ib_ucontext *context, 291 292 struct ib_udata *udata) 292 293 { 294 + int entries = attr->cqe; 293 295 struct c2_cq *cq; 294 296 int err; 297 + 298 + if (attr->flags) 299 + return ERR_PTR(-EINVAL); 295 300 296 301 cq = kmalloc(sizeof(*cq), GFP_KERNEL); 297 302 if (!cq) {
+8 -3
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 138 138 return 0; 139 139 } 140 140 141 - static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, 142 - struct ib_ucontext *ib_context, 143 - struct ib_udata *udata) 141 + static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, 142 + const struct ib_cq_init_attr *attr, 143 + struct ib_ucontext *ib_context, 144 + struct ib_udata *udata) 144 145 { 146 + int entries = attr->cqe; 145 147 struct iwch_dev *rhp; 146 148 struct iwch_cq *chp; 147 149 struct iwch_create_cq_resp uresp; ··· 153 151 size_t resplen; 154 152 155 153 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); 154 + if (attr->flags) 155 + return ERR_PTR(-EINVAL); 156 + 156 157 rhp = to_iwch_dev(ibdev); 157 158 chp = kzalloc(sizeof(*chp), GFP_KERNEL); 158 159 if (!chp)
+7 -2
drivers/infiniband/hw/cxgb4/cq.c
··· 864 864 return 0; 865 865 } 866 866 867 - struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, 868 - int vector, struct ib_ucontext *ib_context, 867 + struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, 868 + const struct ib_cq_init_attr *attr, 869 + struct ib_ucontext *ib_context, 869 870 struct ib_udata *udata) 870 871 { 872 + int entries = attr->cqe; 873 + int vector = attr->comp_vector; 871 874 struct c4iw_dev *rhp; 872 875 struct c4iw_cq *chp; 873 876 struct c4iw_create_cq_resp uresp; ··· 880 877 struct c4iw_mm_entry *mm, *mm2; 881 878 882 879 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); 880 + if (attr->flags) 881 + return ERR_PTR(-EINVAL); 883 882 884 883 rhp = to_c4iw_dev(ibdev); 885 884
+4 -4
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 990 990 int acc, u64 *iova_start); 991 991 int c4iw_dereg_mr(struct ib_mr *ib_mr); 992 992 int c4iw_destroy_cq(struct ib_cq *ib_cq); 993 - struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, 994 - int vector, 995 - struct ib_ucontext *ib_context, 996 - struct ib_udata *udata); 993 + struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, 994 + const struct ib_cq_init_attr *attr, 995 + struct ib_ucontext *ib_context, 996 + struct ib_udata *udata); 997 997 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); 998 998 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 999 999 int c4iw_destroy_qp(struct ib_qp *ib_qp);
+6 -1
drivers/infiniband/hw/ehca/ehca_cq.c
··· 113 113 return ret; 114 114 } 115 115 116 - struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, 116 + struct ib_cq *ehca_create_cq(struct ib_device *device, 117 + const struct ib_cq_init_attr *attr, 117 118 struct ib_ucontext *context, 118 119 struct ib_udata *udata) 119 120 { 121 + int cqe = attr->cqe; 120 122 static const u32 additional_cqe = 20; 121 123 struct ib_cq *cq; 122 124 struct ehca_cq *my_cq; ··· 132 130 u64 rpage, cqx_fec, h_ret; 133 131 int ipz_rc, i; 134 132 unsigned long flags; 133 + 134 + if (attr->flags) 135 + return ERR_PTR(-EINVAL); 135 136 136 137 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) 137 138 return ERR_PTR(-EINVAL);
+2 -1
drivers/infiniband/hw/ehca/ehca_iverbs.h
··· 129 129 void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq); 130 130 131 131 132 - struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, 132 + struct ib_cq *ehca_create_cq(struct ib_device *device, 133 + const struct ib_cq_init_attr *attr, 133 134 struct ib_ucontext *context, 134 135 struct ib_udata *udata); 135 136
+7 -2
drivers/infiniband/hw/ipath/ipath_cq.c
··· 188 188 /** 189 189 * ipath_create_cq - create a completion queue 190 190 * @ibdev: the device this completion queue is attached to 191 - * @entries: the minimum size of the completion queue 191 + * @attr: creation attributes 192 192 * @context: unused by the InfiniPath driver 193 193 * @udata: unused by the InfiniPath driver 194 194 * ··· 197 197 * 198 198 * Called by ib_create_cq() in the generic verbs code. 199 199 */ 200 - struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, 200 + struct ib_cq *ipath_create_cq(struct ib_device *ibdev, 201 + const struct ib_cq_init_attr *attr, 201 202 struct ib_ucontext *context, 202 203 struct ib_udata *udata) 203 204 { 205 + int entries = attr->cqe; 204 206 struct ipath_ibdev *dev = to_idev(ibdev); 205 207 struct ipath_cq *cq; 206 208 struct ipath_cq_wc *wc; 207 209 struct ib_cq *ret; 208 210 u32 sz; 211 + 212 + if (attr->flags) 213 + return ERR_PTR(-EINVAL); 209 214 210 215 if (entries < 1 || entries > ib_ipath_max_cqes) { 211 216 ret = ERR_PTR(-EINVAL);
+2 -1
drivers/infiniband/hw/ipath/ipath_verbs.h
··· 807 807 808 808 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); 809 809 810 - struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, 810 + struct ib_cq *ipath_create_cq(struct ib_device *ibdev, 811 + const struct ib_cq_init_attr *attr, 811 812 struct ib_ucontext *context, 812 813 struct ib_udata *udata); 813 814
+7 -1
drivers/infiniband/hw/mlx4/cq.c
··· 166 166 return err; 167 167 } 168 168 169 - struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, 169 + struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, 170 + const struct ib_cq_init_attr *attr, 170 171 struct ib_ucontext *context, 171 172 struct ib_udata *udata) 172 173 { 174 + int entries = attr->cqe; 175 + int vector = attr->comp_vector; 173 176 struct mlx4_ib_dev *dev = to_mdev(ibdev); 174 177 struct mlx4_ib_cq *cq; 175 178 struct mlx4_uar *uar; 176 179 int err; 180 + 181 + if (attr->flags) 182 + return ERR_PTR(-EINVAL); 177 183 178 184 if (entries < 1 || entries > dev->dev->caps.max_cqes) 179 185 return ERR_PTR(-EINVAL);
+2 -1
drivers/infiniband/hw/mlx4/mlx4_ib.h
··· 668 668 669 669 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 670 670 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); 671 - struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, 671 + struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, 672 + const struct ib_cq_init_attr *attr, 672 673 struct ib_ucontext *context, 673 674 struct ib_udata *udata); 674 675 int mlx4_ib_destroy_cq(struct ib_cq *cq);
+8 -2
drivers/infiniband/hw/mlx5/cq.c
··· 736 736 mlx5_db_free(dev->mdev, &cq->db); 737 737 } 738 738 739 - struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, 740 - int vector, struct ib_ucontext *context, 739 + struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, 740 + const struct ib_cq_init_attr *attr, 741 + struct ib_ucontext *context, 741 742 struct ib_udata *udata) 742 743 { 744 + int entries = attr->cqe; 745 + int vector = attr->comp_vector; 743 746 struct mlx5_create_cq_mbox_in *cqb = NULL; 744 747 struct mlx5_ib_dev *dev = to_mdev(ibdev); 745 748 struct mlx5_ib_cq *cq; ··· 752 749 int irqn; 753 750 int eqn; 754 751 int err; 752 + 753 + if (attr->flags) 754 + return ERR_PTR(-EINVAL); 755 755 756 756 if (entries < 0) 757 757 return ERR_PTR(-EINVAL);
+2 -1
drivers/infiniband/hw/mlx5/main.c
··· 1087 1087 { 1088 1088 struct ib_srq_init_attr attr; 1089 1089 struct mlx5_ib_dev *dev; 1090 + struct ib_cq_init_attr cq_attr = {.cqe = 1}; 1090 1091 int ret = 0; 1091 1092 1092 1093 dev = container_of(devr, struct mlx5_ib_dev, devr); ··· 1101 1100 devr->p0->uobject = NULL; 1102 1101 atomic_set(&devr->p0->usecnt, 0); 1103 1102 1104 - devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, 1, 0, NULL, NULL); 1103 + devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); 1105 1104 if (IS_ERR(devr->c0)) { 1106 1105 ret = PTR_ERR(devr->c0); 1107 1106 goto error1;
+3 -2
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 556 556 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); 557 557 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, 558 558 void *buffer, u32 length); 559 - struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, 560 - int vector, struct ib_ucontext *context, 559 + struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, 560 + const struct ib_cq_init_attr *attr, 561 + struct ib_ucontext *context, 561 562 struct ib_udata *udata); 562 563 int mlx5_ib_destroy_cq(struct ib_cq *cq); 563 564 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+6 -2
drivers/infiniband/hw/mthca/mthca_provider.c
··· 641 641 return 0; 642 642 } 643 643 644 - static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, 645 - int comp_vector, 644 + static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, 645 + const struct ib_cq_init_attr *attr, 646 646 struct ib_ucontext *context, 647 647 struct ib_udata *udata) 648 648 { 649 + int entries = attr->cqe; 649 650 struct mthca_create_cq ucmd; 650 651 struct mthca_cq *cq; 651 652 int nent; 652 653 int err; 654 + 655 + if (attr->flags) 656 + return ERR_PTR(-EINVAL); 653 657 654 658 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) 655 659 return ERR_PTR(-EINVAL);
+8 -3
drivers/infiniband/hw/nes/nes_verbs.c
··· 1526 1526 /** 1527 1527 * nes_create_cq 1528 1528 */ 1529 - static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, 1530 - int comp_vector, 1531 - struct ib_ucontext *context, struct ib_udata *udata) 1529 + static struct ib_cq *nes_create_cq(struct ib_device *ibdev, 1530 + const struct ib_cq_init_attr *attr, 1531 + struct ib_ucontext *context, 1532 + struct ib_udata *udata) 1532 1533 { 1534 + int entries = attr->cqe; 1533 1535 u64 u64temp; 1534 1536 struct nes_vnic *nesvnic = to_nesvnic(ibdev); 1535 1537 struct nes_device *nesdev = nesvnic->nesdev; ··· 1550 1548 int err; 1551 1549 unsigned long flags; 1552 1550 int ret; 1551 + 1552 + if (attr->flags) 1553 + return ERR_PTR(-EINVAL); 1553 1554 1554 1555 if (entries > nesadapter->max_cqe) 1555 1556 return ERR_PTR(-EINVAL);
+6 -1
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 1004 1004 return status; 1005 1005 } 1006 1006 1007 - struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, 1007 + struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, 1008 + const struct ib_cq_init_attr *attr, 1008 1009 struct ib_ucontext *ib_ctx, 1009 1010 struct ib_udata *udata) 1010 1011 { 1012 + int entries = attr->cqe; 1011 1013 struct ocrdma_cq *cq; 1012 1014 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); 1013 1015 struct ocrdma_ucontext *uctx = NULL; 1014 1016 u16 pd_id = 0; 1015 1017 int status; 1016 1018 struct ocrdma_create_cq_ureq ureq; 1019 + 1020 + if (attr->flags) 1021 + return ERR_PTR(-EINVAL); 1017 1022 1018 1023 if (udata) { 1019 1024 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
+4 -2
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
··· 59 59 struct ib_ucontext *, struct ib_udata *); 60 60 int ocrdma_dealloc_pd(struct ib_pd *pd); 61 61 62 - struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector, 63 - struct ib_ucontext *, struct ib_udata *); 62 + struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, 63 + const struct ib_cq_init_attr *attr, 64 + struct ib_ucontext *ib_ctx, 65 + struct ib_udata *udata); 64 66 int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); 65 67 int ocrdma_destroy_cq(struct ib_cq *); 66 68
+8 -3
drivers/infiniband/hw/qib/qib_cq.c
··· 203 203 /** 204 204 * qib_create_cq - create a completion queue 205 205 * @ibdev: the device this completion queue is attached to 206 - * @entries: the minimum size of the completion queue 206 + * @attr: creation attributes 207 207 * @context: unused by the QLogic_IB driver 208 208 * @udata: user data for libibverbs.so 209 209 * ··· 212 212 * 213 213 * Called by ib_create_cq() in the generic verbs code. 214 214 */ 215 - struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, 216 - int comp_vector, struct ib_ucontext *context, 215 + struct ib_cq *qib_create_cq(struct ib_device *ibdev, 216 + const struct ib_cq_init_attr *attr, 217 + struct ib_ucontext *context, 217 218 struct ib_udata *udata) 218 219 { 220 + int entries = attr->cqe; 219 221 struct qib_ibdev *dev = to_idev(ibdev); 220 222 struct qib_cq *cq; 221 223 struct qib_cq_wc *wc; 222 224 struct ib_cq *ret; 223 225 u32 sz; 226 + 227 + if (attr->flags) 228 + return ERR_PTR(-EINVAL); 224 229 225 230 if (entries < 1 || entries > ib_qib_max_cqes) { 226 231 ret = ERR_PTR(-EINVAL);
+3 -2
drivers/infiniband/hw/qib/qib_verbs.h
··· 1007 1007 1008 1008 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); 1009 1009 1010 - struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, 1011 - int comp_vector, struct ib_ucontext *context, 1010 + struct ib_cq *qib_create_cq(struct ib_device *ibdev, 1011 + const struct ib_cq_init_attr *attr, 1012 + struct ib_ucontext *context, 1012 1013 struct ib_udata *udata); 1013 1014 1014 1015 int qib_destroy_cq(struct ib_cq *ibcq);
+7 -3
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
··· 570 570 return status; 571 571 } 572 572 573 - struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries, 574 - int vector, struct ib_ucontext *context, 575 - struct ib_udata *udata) 573 + struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, 574 + const struct ib_cq_init_attr *attr, 575 + struct ib_ucontext *context, 576 + struct ib_udata *udata) 576 577 { 577 578 struct ib_cq *cq; 578 579 579 580 usnic_dbg("\n"); 581 + if (attr->flags) 582 + return ERR_PTR(-EINVAL); 583 + 580 584 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 581 585 if (!cq) 582 586 return ERR_PTR(-EBUSY);
+4 -3
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
··· 46 46 int usnic_ib_destroy_qp(struct ib_qp *qp); 47 47 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 48 48 int attr_mask, struct ib_udata *udata); 49 - struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries, 50 - int vector, struct ib_ucontext *context, 51 - struct ib_udata *udata); 49 + struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, 50 + const struct ib_cq_init_attr *attr, 51 + struct ib_ucontext *context, 52 + struct ib_udata *udata); 52 53 int usnic_ib_destroy_cq(struct ib_cq *cq); 53 54 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, 54 55 u64 virt_addr, int access_flags,
+8 -2
include/rdma/ib_verbs.h
··· 173 173 } per_transport_caps; 174 174 }; 175 175 176 + struct ib_cq_init_attr { 177 + unsigned int cqe; 178 + int comp_vector; 179 + u32 flags; 180 + }; 181 + 176 182 struct ib_device_attr { 177 183 u64 fw_ver; 178 184 __be64 sys_image_guid; ··· 1619 1613 int (*post_recv)(struct ib_qp *qp, 1620 1614 struct ib_recv_wr *recv_wr, 1621 1615 struct ib_recv_wr **bad_recv_wr); 1622 - struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, 1623 - int comp_vector, 1616 + struct ib_cq * (*create_cq)(struct ib_device *device, 1617 + const struct ib_cq_init_attr *attr, 1624 1618 struct ib_ucontext *context, 1625 1619 struct ib_udata *udata); 1626 1620 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,