Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

{net,IB}/mlx5: QP/XRCD commands via mlx5 ifc

Remove old representation of manually created QP/XRCD commands layout
amd use mlx5_ifc canonical structures and defines.

Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>

authored by

Saeed Mahameed and committed by
Leon Romanovsky
09a7d9ec ec22eb53

+166 -284
+85 -71
drivers/infiniband/hw/mlx5/qp.c
··· 726 726 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 727 727 struct mlx5_ib_qp *qp, struct ib_udata *udata, 728 728 struct ib_qp_init_attr *attr, 729 - struct mlx5_create_qp_mbox_in **in, 729 + u32 **in, 730 730 struct mlx5_ib_create_qp_resp *resp, int *inlen, 731 731 struct mlx5_ib_qp_base *base) 732 732 { ··· 739 739 u32 offset = 0; 740 740 int uuarn; 741 741 int ncont = 0; 742 + __be64 *pas; 743 + void *qpc; 742 744 int err; 743 745 744 746 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); ··· 797 795 ubuffer->umem = NULL; 798 796 } 799 797 800 - *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; 798 + *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + 799 + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; 801 800 *in = mlx5_vzalloc(*inlen); 802 801 if (!*in) { 803 802 err = -ENOMEM; 804 803 goto err_umem; 805 804 } 806 - if (ubuffer->umem) 807 - mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, 808 - (*in)->pas, 0); 809 - (*in)->ctx.log_pg_sz_remote_qpn = 810 - cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); 811 - (*in)->ctx.params2 = cpu_to_be32(offset << 6); 812 805 813 - (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); 806 + pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas); 807 + if (ubuffer->umem) 808 + mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0); 809 + 810 + qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 811 + 812 + MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); 813 + MLX5_SET(qpc, qpc, page_offset, offset); 814 + 815 + MLX5_SET(qpc, qpc, uar_page, uar_index); 814 816 resp->uuar_index = uuarn; 815 817 qp->uuarn = uuarn; 816 818 ··· 863 857 static int create_kernel_qp(struct mlx5_ib_dev *dev, 864 858 struct ib_qp_init_attr *init_attr, 865 859 struct mlx5_ib_qp *qp, 866 - struct mlx5_create_qp_mbox_in **in, int *inlen, 860 + u32 **in, int *inlen, 867 861 struct mlx5_ib_qp_base *base) 868 862 { 869 863 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; 870 864 struct mlx5_uuar_info *uuari; 871 865 int uar_index; 866 + void *qpc; 872 867 int uuarn; 873 868 int err; 874 869 ··· 909 902 } 910 903 911 904 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); 912 - *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; 905 + *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + 906 + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; 913 907 *in = mlx5_vzalloc(*inlen); 914 908 if (!*in) { 915 909 err = -ENOMEM; 916 910 goto err_buf; 917 911 } 918 - (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); 919 - (*in)->ctx.log_pg_sz_remote_qpn = 920 - cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); 912 + 913 + qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 914 + MLX5_SET(qpc, qpc, uar_page, uar_index); 915 + MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 916 + 921 917 /* Set "fast registration enabled" for all kernel QPs */ 922 - (*in)->ctx.params1 |= cpu_to_be32(1 << 11); 923 - (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); 918 + MLX5_SET(qpc, qpc, fre, 1); 919 + MLX5_SET(qpc, qpc, rlky, 1); 924 920 925 921 if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) { 926 - (*in)->ctx.deth_sqpn = cpu_to_be32(1); 922 + MLX5_SET(qpc, qpc, deth_sqpn, 1); 927 923 qp->flags |= MLX5_IB_QP_SQPN_QP1; 928 924 } 929 925 930 - mlx5_fill_page_array(&qp->buf, (*in)->pas); 926 + mlx5_fill_page_array(&qp->buf, 927 + (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas)); 931 928 932 929 err = mlx5_db_alloc(dev->mdev, &qp->db); 933 930 if (err) { ··· 985 974 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); 986 975 } 987 976 988 - static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) 977 + static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) 989 978 { 990 979 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) || 991 980 (attr->qp_type == IB_QPT_XRC_INI)) 992 - return cpu_to_be32(MLX5_SRQ_RQ); 981 + return MLX5_SRQ_RQ; 993 982 else if (!qp->has_rq) 994 - return cpu_to_be32(MLX5_ZERO_LEN_RQ); 983 + return MLX5_ZERO_LEN_RQ; 995 984 else 996 - return cpu_to_be32(MLX5_NON_ZERO_RQ); 985 + return MLX5_NON_ZERO_RQ; 997 986 } 998 987 999 988 static int is_connected(enum ib_qp_type qp_type) ··· 1202 1191 } 1203 1192 1204 1193 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1205 - struct mlx5_create_qp_mbox_in *in, 1194 + u32 *in, 1206 1195 struct ib_pd *pd) 1207 1196 { 1208 1197 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; ··· 1472 1461 struct ib_udata *udata, struct mlx5_ib_qp *qp) 1473 1462 { 1474 1463 struct mlx5_ib_resources *devr = &dev->devr; 1464 + int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 1475 1465 struct mlx5_core_dev *mdev = dev->mdev; 1476 - struct mlx5_ib_qp_base *base; 1477 1466 struct mlx5_ib_create_qp_resp resp; 1478 - struct mlx5_create_qp_mbox_in *in; 1479 - struct mlx5_ib_create_qp ucmd; 1480 1467 struct mlx5_ib_cq *send_cq; 1481 1468 struct mlx5_ib_cq *recv_cq; 1482 1469 unsigned long flags; 1483 - int inlen = sizeof(*in); 1484 - int err; 1485 1470 u32 uidx = MLX5_IB_DEFAULT_UIDX; 1471 + struct mlx5_ib_create_qp ucmd; 1472 + struct mlx5_ib_qp_base *base; 1486 1473 void *qpc; 1474 + u32 *in; 1475 + int err; 1487 1476 1488 1477 base = init_attr->qp_type == IB_QPT_RAW_PACKET ? 1489 1478 &qp->raw_packet_qp.rq.base : ··· 1611 1600 if (err) 1612 1601 return err; 1613 1602 } else { 1614 - in = mlx5_vzalloc(sizeof(*in)); 1603 + in = mlx5_vzalloc(inlen); 1615 1604 if (!in) 1616 1605 return -ENOMEM; 1617 1606 ··· 1621 1610 if (is_sqp(init_attr->qp_type)) 1622 1611 qp->port = init_attr->port_num; 1623 1612 1624 - in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 | 1625 - MLX5_QP_PM_MIGRATED << 11); 1613 + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1614 + 1615 + MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); 1616 + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1626 1617 1627 1618 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) 1628 - in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); 1619 + MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn); 1629 1620 else 1630 - in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE); 1621 + MLX5_SET(qpc, qpc, latency_sensitive, 1); 1622 + 1631 1623 1632 1624 if (qp->wq_sig) 1633 - in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); 1625 + MLX5_SET(qpc, qpc, wq_signature, 1); 1634 1626 1635 1627 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) 1636 - in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST); 1628 + MLX5_SET(qpc, qpc, block_lb_mc, 1); 1637 1629 1638 1630 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) 1639 - in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER); 1631 + MLX5_SET(qpc, qpc, cd_master, 1); 1640 1632 if (qp->flags & MLX5_IB_QP_MANAGED_SEND) 1641 - in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND); 1633 + MLX5_SET(qpc, qpc, cd_slave_send, 1); 1642 1634 if (qp->flags & MLX5_IB_QP_MANAGED_RECV) 1643 - in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV); 1635 + MLX5_SET(qpc, qpc, cd_slave_receive, 1); 1644 1636 1645 1637 if (qp->scat_cqe && is_connected(init_attr->qp_type)) { 1646 1638 int rcqe_sz; ··· 1653 1639 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); 1654 1640 1655 1641 if (rcqe_sz == 128) 1656 - in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE; 1642 + MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE); 1657 1643 else 1658 - in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE; 1644 + MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE); 1659 1645 1660 1646 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) { 1661 1647 if (scqe_sz == 128) 1662 - in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE; 1648 + MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE); 1663 1649 else 1664 - in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE; 1650 + MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE); 1665 1651 } 1666 1652 } 1667 1653 1668 1654 if (qp->rq.wqe_cnt) { 1669 - in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); 1670 - in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; 1655 + MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); 1656 + MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); 1671 1657 } 1672 1658 1673 - in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); 1659 + MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); 1674 1660 1675 1661 if (qp->sq.wqe_cnt) 1676 - in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); 1662 + MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); 1677 1663 else 1678 - in->ctx.sq_crq_size |= cpu_to_be16(0x8000); 1664 + MLX5_SET(qpc, qpc, no_sq, 1); 1679 1665 1680 1666 /* Set default resources */ 1681 1667 switch (init_attr->qp_type) { 1682 1668 case IB_QPT_XRC_TGT: 1683 - in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); 1684 - in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); 1685 - in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); 1686 - in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn); 1669 + MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); 1670 + MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn); 1671 + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); 1672 + MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn); 1687 1673 break; 1688 1674 case IB_QPT_XRC_INI: 1689 - in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); 1690 - in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); 1691 - in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); 1675 + MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); 1676 + MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); 1677 + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); 1692 1678 break; 1693 1679 default: 1694 1680 if (init_attr->srq) { 1695 - in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn); 1696 - in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); 1681 + MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn); 1682 + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn); 1697 1683 } else { 1698 - in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); 1699 - in->ctx.rq_type_srqn |= 1700 - cpu_to_be32(to_msrq(devr->s1)->msrq.srqn); 1684 + MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); 1685 + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn); 1701 1686 } 1702 1687 } 1703 1688 1704 1689 if (init_attr->send_cq) 1705 - in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); 1690 + MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); 1706 1691 1707 1692 if (init_attr->recv_cq) 1708 - in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn); 1693 + MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn); 1709 1694 1710 - in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); 1695 + MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 1711 1696 1712 - if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) { 1713 - qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1714 - /* 0xffffff means we ask to work with cqe version 0 */ 1697 + /* 0xffffff means we ask to work with cqe version 0 */ 1698 + if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 1715 1699 MLX5_SET(qpc, qpc, user_index, uidx); 1716 - } 1700 + 1717 1701 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ 1718 1702 if (init_attr->qp_type == IB_QPT_UD && 1719 1703 (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) { 1720 - qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1721 1704 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); 1722 1705 qp->flags |= MLX5_IB_QP_LSO; 1723 1706 } ··· 4331 4320 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 4332 4321 struct ib_qp_attr *qp_attr) 4333 4322 { 4334 - struct mlx5_query_qp_mbox_out *outb; 4323 + int outlen = MLX5_ST_SZ_BYTES(query_qp_out); 4335 4324 struct mlx5_qp_context *context; 4336 4325 int mlx5_state; 4326 + u32 *outb; 4337 4327 int err = 0; 4338 4328 4339 - outb = kzalloc(sizeof(*outb), GFP_KERNEL); 4329 + outb = kzalloc(outlen, GFP_KERNEL); 4340 4330 if (!outb) 4341 4331 return -ENOMEM; 4342 4332 4343 - context = &outb->ctx; 4344 4333 err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, 4345 - sizeof(*outb)); 4334 + outlen); 4346 4335 if (err) 4347 4336 goto out; 4337 + 4338 + /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */ 4339 + context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc); 4348 4340 4349 4341 mlx5_state = be32_to_cpu(context->flags) >> 28; 4350 4342
+9 -5
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
··· 277 277 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, 278 278 int index, int *is_str) 279 279 { 280 - struct mlx5_query_qp_mbox_out *out; 280 + int outlen = MLX5_ST_SZ_BYTES(query_qp_out); 281 281 struct mlx5_qp_context *ctx; 282 282 u64 param = 0; 283 + u32 *out; 283 284 int err; 284 285 int no_sq; 285 286 286 - out = kzalloc(sizeof(*out), GFP_KERNEL); 287 + out = kzalloc(outlen, GFP_KERNEL); 287 288 if (!out) 288 289 return param; 289 290 290 - err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); 291 + err = mlx5_core_qp_query(dev, qp, out, outlen); 291 292 if (err) { 292 - mlx5_core_warn(dev, "failed to query qp\n"); 293 + mlx5_core_warn(dev, "failed to query qp err=%d\n", err); 293 294 goto out; 294 295 } 295 296 296 297 *is_str = 0; 297 - ctx = &out->ctx; 298 + 299 + /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */ 300 + ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc); 301 + 298 302 switch (index) { 299 303 case QP_PID: 300 304 param = qp->pid;
+61 -106
drivers/net/ethernet/mellanox/mlx5/core/qp.c
··· 271 271 272 272 int mlx5_core_create_qp(struct mlx5_core_dev *dev, 273 273 struct mlx5_core_qp *qp, 274 - struct mlx5_create_qp_mbox_in *in, 275 - int inlen) 274 + u32 *in, int inlen) 276 275 { 277 - struct mlx5_create_qp_mbox_out out; 278 - struct mlx5_destroy_qp_mbox_in din; 279 - struct mlx5_destroy_qp_mbox_out dout; 276 + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; 277 + u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)]; 278 + u32 din[MLX5_ST_SZ_DW(destroy_qp_in)]; 280 279 int err; 281 280 282 - memset(&out, 0, sizeof(out)); 283 - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); 281 + MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 284 282 285 - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 286 - if (err) { 287 - mlx5_core_warn(dev, "ret %d\n", err); 283 + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 284 + err = err ? : mlx5_cmd_status_to_err_v2(out); 285 + if (err) 288 286 return err; 289 - } 290 287 291 - if (out.hdr.status) { 292 - mlx5_core_warn(dev, "current num of QPs 0x%x\n", 293 - atomic_read(&dev->num_qps)); 294 - return mlx5_cmd_status_to_err(&out.hdr); 295 - } 296 - 297 - qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; 288 + qp->qpn = MLX5_GET(create_qp_out, out, qpn); 298 289 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); 299 290 300 291 err = create_qprqsq_common(dev, qp, MLX5_RES_QP); ··· 302 311 return 0; 303 312 304 313 err_cmd: 305 - memset(&din, 0, sizeof(din)); 306 - memset(&dout, 0, sizeof(dout)); 307 - din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); 308 - din.qpn = cpu_to_be32(qp->qpn); 309 - mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); 310 - 314 + memset(din, 0, sizeof(din)); 315 + memset(dout, 0, sizeof(dout)); 316 + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); 317 + MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); 318 + mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); 319 + mlx5_cmd_status_to_err_v2(dout); 311 320 return err; 312 321 } 313 322 EXPORT_SYMBOL_GPL(mlx5_core_create_qp); ··· 315 324 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, 316 325 struct mlx5_core_qp *qp) 317 326 { 318 - struct mlx5_destroy_qp_mbox_in in; 319 - struct mlx5_destroy_qp_mbox_out out; 327 + u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0}; 328 + u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0}; 320 329 int err; 321 330 322 331 mlx5_debug_qp_remove(dev, qp); 323 332 324 333 destroy_qprqsq_common(dev, qp); 325 334 326 - memset(&in, 0, sizeof(in)); 327 - memset(&out, 0, sizeof(out)); 328 - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); 329 - in.qpn = cpu_to_be32(qp->qpn); 330 - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 335 + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); 336 + MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); 337 + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 338 + err = err ? : mlx5_cmd_status_to_err_v2(out); 331 339 if (err) 332 340 return err; 333 - 334 - if (out.hdr.status) 335 - return mlx5_cmd_status_to_err(&out.hdr); 336 341 337 342 atomic_dec(&dev->num_qps); 338 343 return 0; ··· 369 382 } 370 383 371 384 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, 372 - struct mlx5_query_qp_mbox_out *out, int outlen) 385 + u32 *out, int outlen) 373 386 { 374 - struct mlx5_query_qp_mbox_in in; 387 + u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0}; 375 388 int err; 376 389 377 - memset(&in, 0, sizeof(in)); 378 - memset(out, 0, outlen); 379 - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP); 380 - in.qpn = cpu_to_be32(qp->qpn); 381 - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 382 - if (err) 383 - return err; 390 + MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP); 391 + MLX5_SET(query_qp_in, in, qpn, qp->qpn); 384 392 385 - if (out->hdr.status) 386 - return mlx5_cmd_status_to_err(&out->hdr); 387 - 388 - return err; 393 + err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 394 + return err ? : mlx5_cmd_status_to_err_v2(out); 389 395 } 390 396 EXPORT_SYMBOL_GPL(mlx5_core_qp_query); 391 397 392 398 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) 393 399 { 394 - struct mlx5_alloc_xrcd_mbox_in in; 395 - struct mlx5_alloc_xrcd_mbox_out out; 400 + u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0}; 401 + u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0}; 396 402 int err; 397 403 398 - memset(&in, 0, sizeof(in)); 399 - memset(&out, 0, sizeof(out)); 400 - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD); 401 - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 402 - if (err) 403 - return err; 404 - 405 - if (out.hdr.status) 406 - err = mlx5_cmd_status_to_err(&out.hdr); 407 - else 408 - *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff; 409 - 404 + MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD); 405 + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 406 + err = err ? : mlx5_cmd_status_to_err_v2(out); 407 + if (!err) 408 + *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd); 410 409 return err; 411 410 } 412 411 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); 413 412 414 413 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) 415 414 { 416 - struct mlx5_dealloc_xrcd_mbox_in in; 417 - struct mlx5_dealloc_xrcd_mbox_out out; 415 + u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0}; 416 + u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0}; 418 417 int err; 419 418 420 - memset(&in, 0, sizeof(in)); 421 - memset(&out, 0, sizeof(out)); 422 - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD); 423 - in.xrcdn = cpu_to_be32(xrcdn); 424 - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 425 - if (err) 426 - return err; 427 - 428 - if (out.hdr.status) 429 - err = mlx5_cmd_status_to_err(&out.hdr); 430 - 431 - return err; 419 + MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD); 420 + MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn); 421 + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 422 + return err ? : mlx5_cmd_status_to_err_v2(out); 432 423 } 433 424 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); 434 425 ··· 414 449 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, 415 450 u8 flags, int error) 416 451 { 417 - struct mlx5_page_fault_resume_mbox_in in; 418 - struct mlx5_page_fault_resume_mbox_out out; 452 + u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; 453 + u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; 419 454 int err; 420 455 421 - memset(&in, 0, sizeof(in)); 422 - memset(&out, 0, sizeof(out)); 423 - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME); 424 - in.hdr.opmod = 0; 425 - flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR | 426 - MLX5_PAGE_FAULT_RESUME_WRITE | 427 - MLX5_PAGE_FAULT_RESUME_RDMA); 428 - flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0); 429 - in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) | 430 - (flags << MLX5_QPN_BITS)); 431 - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 432 - if (err) 433 - return err; 456 + MLX5_SET(page_fault_resume_in, in, opcode, 457 + MLX5_CMD_OP_PAGE_FAULT_RESUME); 434 458 435 - if (out.hdr.status) 436 - err = mlx5_cmd_status_to_err(&out.hdr); 459 + MLX5_SET(page_fault_resume_in, in, qpn, qpn); 437 460 438 - return err; 461 + if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR) 462 + MLX5_SET(page_fault_resume_in, in, req_res, 1); 463 + if (flags & MLX5_PAGE_FAULT_RESUME_WRITE) 464 + MLX5_SET(page_fault_resume_in, in, read_write, 1); 465 + if (flags & MLX5_PAGE_FAULT_RESUME_RDMA) 466 + MLX5_SET(page_fault_resume_in, in, rdma, 1); 467 + if (error) 468 + MLX5_SET(page_fault_resume_in, in, error, 1); 469 + 470 + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 471 + return err ? : mlx5_cmd_status_to_err_v2(out); 439 472 } 440 473 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); 441 474 #endif ··· 504 541 505 542 int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id) 506 543 { 507 - u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]; 508 - u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)]; 544 + u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0}; 545 + u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0}; 509 546 int err; 510 - 511 - memset(in, 0, sizeof(in)); 512 - memset(out, 0, sizeof(out)); 513 547 514 548 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); 515 549 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); ··· 519 559 520 560 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id) 521 561 { 522 - u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]; 523 - u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)]; 524 - 525 - memset(in, 0, sizeof(in)); 526 - memset(out, 0, sizeof(out)); 562 + u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0}; 563 + u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0}; 527 564 528 565 MLX5_SET(dealloc_q_counter_in, in, opcode, 529 566 MLX5_CMD_OP_DEALLOC_Q_COUNTER); ··· 533 576 int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, 534 577 int reset, void *out, int out_size) 535 578 { 536 - u32 in[MLX5_ST_SZ_DW(query_q_counter_in)]; 537 - 538 - memset(in, 0, sizeof(in)); 579 + u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0}; 539 580 540 581 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); 541 582 MLX5_SET(query_q_counter_in, in, clear, reset);
+4 -1
include/linux/mlx5/mlx5_ifc.h
··· 1966 1966 u8 reserved_at_3e0[0x8]; 1967 1967 u8 cqn_snd[0x18]; 1968 1968 1969 - u8 reserved_at_400[0x40]; 1969 + u8 reserved_at_400[0x8]; 1970 + u8 deth_sqpn[0x18]; 1971 + 1972 + u8 reserved_at_420[0x20]; 1970 1973 1971 1974 u8 reserved_at_440[0x8]; 1972 1975 u8 last_acked_psn[0x18];
+7 -101
include/linux/mlx5/qp.h
··· 123 123 }; 124 124 125 125 enum { 126 - MLX5_NON_ZERO_RQ = 0 << 24, 127 - MLX5_SRQ_RQ = 1 << 24, 128 - MLX5_CRQ_RQ = 2 << 24, 129 - MLX5_ZERO_LEN_RQ = 3 << 24 126 + MLX5_NON_ZERO_RQ = 0x0, 127 + MLX5_SRQ_RQ = 0x1, 128 + MLX5_CRQ_RQ = 0x2, 129 + MLX5_ZERO_LEN_RQ = 0x3 130 130 }; 131 131 132 + /* TODO REM */ 132 133 enum { 133 134 /* params1 */ 134 135 MLX5_QP_BIT_SRE = 1 << 15, ··· 176 175 MLX5_FENCE_MODE_FENCE = 2 << 5, 177 176 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, 178 177 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, 179 - }; 180 - 181 - enum { 182 - MLX5_QP_LAT_SENSITIVE = 1 << 28, 183 - MLX5_QP_BLOCK_MCAST = 1 << 30, 184 - MLX5_QP_ENABLE_SIG = 1 << 31, 185 178 }; 186 179 187 180 enum { ··· 520 525 u8 rsvd1[24]; 521 526 }; 522 527 523 - struct mlx5_create_qp_mbox_in { 524 - struct mlx5_inbox_hdr hdr; 525 - __be32 input_qpn; 526 - u8 rsvd0[4]; 527 - __be32 opt_param_mask; 528 - u8 rsvd1[4]; 529 - struct mlx5_qp_context ctx; 530 - u8 rsvd3[16]; 531 - __be64 pas[0]; 532 - }; 533 - 534 - struct mlx5_create_qp_mbox_out { 535 - struct mlx5_outbox_hdr hdr; 536 - __be32 qpn; 537 - u8 rsvd0[4]; 538 - }; 539 - 540 - struct mlx5_destroy_qp_mbox_in { 541 - struct mlx5_inbox_hdr hdr; 542 - __be32 qpn; 543 - u8 rsvd0[4]; 544 - }; 545 - 546 - struct mlx5_destroy_qp_mbox_out { 547 - struct mlx5_outbox_hdr hdr; 548 - u8 rsvd0[8]; 549 - }; 550 - 551 528 struct mlx5_modify_qp_mbox_in { 552 529 struct mlx5_inbox_hdr hdr; 553 530 __be32 qpn; ··· 535 568 u8 rsvd0[8]; 536 569 }; 537 570 538 - struct mlx5_query_qp_mbox_in { 539 - struct mlx5_inbox_hdr hdr; 540 - __be32 qpn; 541 - u8 rsvd[4]; 542 - }; 543 - 544 - struct mlx5_query_qp_mbox_out { 545 - struct mlx5_outbox_hdr hdr; 546 - u8 rsvd1[8]; 547 - __be32 optparam; 548 - u8 rsvd0[4]; 549 - struct mlx5_qp_context ctx; 550 - u8 rsvd2[16]; 551 - __be64 pas[0]; 552 - }; 553 - 554 - struct mlx5_conf_sqp_mbox_in { 555 - struct mlx5_inbox_hdr hdr; 556 - __be32 qpn; 557 - u8 rsvd[3]; 558 - u8 type; 559 - }; 560 - 561 - struct mlx5_conf_sqp_mbox_out { 562 - struct mlx5_outbox_hdr hdr; 563 - u8 rsvd[8]; 564 - }; 565 - 566 - struct mlx5_alloc_xrcd_mbox_in { 567 - struct mlx5_inbox_hdr hdr; 568 - u8 rsvd[8]; 569 - }; 570 - 571 - struct mlx5_alloc_xrcd_mbox_out { 572 - struct mlx5_outbox_hdr hdr; 573 - __be32 xrcdn; 574 - u8 rsvd[4]; 575 - }; 576 - 577 - struct mlx5_dealloc_xrcd_mbox_in { 578 - struct mlx5_inbox_hdr hdr; 579 - __be32 xrcdn; 580 - u8 rsvd[4]; 581 - }; 582 - 583 - struct mlx5_dealloc_xrcd_mbox_out { 584 - struct mlx5_outbox_hdr hdr; 585 - u8 rsvd[8]; 586 - }; 587 - 588 571 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) 589 572 { 590 573 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); ··· 545 628 return radix_tree_lookup(&dev->priv.mkey_table.tree, key); 546 629 } 547 630 548 - struct mlx5_page_fault_resume_mbox_in { 549 - struct mlx5_inbox_hdr hdr; 550 - __be32 flags_qpn; 551 - u8 reserved[4]; 552 - }; 553 - 554 - struct mlx5_page_fault_resume_mbox_out { 555 - struct mlx5_outbox_hdr hdr; 556 - u8 rsvd[8]; 557 - }; 558 - 559 631 int mlx5_core_create_qp(struct mlx5_core_dev *dev, 560 632 struct mlx5_core_qp *qp, 561 - struct mlx5_create_qp_mbox_in *in, 633 + u32 *in, 562 634 int inlen); 563 635 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation, 564 636 struct mlx5_modify_qp_mbox_in *in, int sqd_event, ··· 555 649 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, 556 650 struct mlx5_core_qp *qp); 557 651 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, 558 - struct mlx5_query_qp_mbox_out *out, int outlen); 652 + u32 *out, int outlen); 559 653 560 654 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); 561 655 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);