Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx5_tir_icm' into rdma.git for-next

Ariel Levkovich says:

====================
The series exposes the ICM address of the receive transport
interface (TIR) of Raw Packet and RSS QPs to the user since they are
required to properly create and insert steering rules that direct flows to
these QPs.
====================

For dependencies this branch is based on mlx5-next from
git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

* branch 'mlx5_tir_icm':
IB/mlx5: Expose TIR ICM address to user space
net/mlx5: Introduce new TIR creation core API
net/mlx5: Expose TIR ICM address in command outbox

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

+105 -15
+41 -5
drivers/infiniband/hw/mlx5/qp.c
··· 1403 1403 static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 1404 1404 struct mlx5_ib_rq *rq, u32 tdn, 1405 1405 u32 *qp_flags_en, 1406 - struct ib_pd *pd) 1406 + struct ib_pd *pd, 1407 + u32 *out, int outlen) 1407 1408 { 1408 1409 u8 lb_flag = 0; 1409 1410 u32 *in; ··· 1438 1437 1439 1438 MLX5_SET(tirc, tirc, self_lb_block, lb_flag); 1440 1439 1441 - err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn); 1440 + err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen); 1442 1441 1442 + rq->tirn = MLX5_GET(create_tir_out, out, tirn); 1443 1443 if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { 1444 1444 err = mlx5_ib_enable_lb(dev, false, true); 1445 1445 ··· 1466 1464 int err; 1467 1465 u32 tdn = mucontext->tdn; 1468 1466 u16 uid = to_mpd(pd)->uid; 1467 + u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; 1469 1468 1470 1469 if (qp->sq.wqe_cnt) { 1471 1470 err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd); ··· 1499 1496 if (err) 1500 1497 goto err_destroy_sq; 1501 1498 1502 - err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd); 1499 + err = create_raw_packet_qp_tir( 1500 + dev, rq, tdn, &qp->flags_en, pd, out, 1501 + MLX5_ST_SZ_BYTES(create_tir_out)); 1503 1502 if (err) 1504 1503 goto err_destroy_rq; 1505 1504 ··· 1510 1505 resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN; 1511 1506 resp->tirn = rq->tirn; 1512 1507 resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN; 1508 + if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) { 1509 + resp->tir_icm_addr = MLX5_GET( 1510 + create_tir_out, out, icm_address_31_0); 1511 + resp->tir_icm_addr |= 1512 + (u64)MLX5_GET(create_tir_out, out, 1513 + icm_address_39_32) 1514 + << 32; 1515 + resp->tir_icm_addr |= 1516 + (u64)MLX5_GET(create_tir_out, out, 1517 + icm_address_63_40) 1518 + << 40; 1519 + resp->comp_mask |= 1520 + MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR; 1521 + } 1513 1522 } 1514 1523 } 1515 1524 ··· 1597 1578 udata, struct mlx5_ib_ucontext, ibucontext); 1598 1579 struct mlx5_ib_create_qp_resp resp = {}; 1599 1580 int inlen; 1581 + int outlen; 1600 1582 int err; 1601 1583 u32 *in; 1584 + u32 *out; 1602 1585 void *tirc; 1603 1586 void *hfso; 1604 1587 u32 selected_fields = 0; ··· 1680 1659 } 1681 1660 1682 1661 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 1683 - in = kvzalloc(inlen, GFP_KERNEL); 1662 + outlen = MLX5_ST_SZ_BYTES(create_tir_out); 1663 + in = kvzalloc(inlen + outlen, GFP_KERNEL); 1684 1664 if (!in) 1685 1665 return -ENOMEM; 1686 1666 1667 + out = in + MLX5_ST_SZ_DW(create_tir_in); 1687 1668 MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid); 1688 1669 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 1689 1670 MLX5_SET(tirc, tirc, disp_type, ··· 1797 1774 MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields); 1798 1775 1799 1776 create_tir: 1800 - err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn); 1777 + err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen); 1801 1778 1779 + qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn); 1802 1780 if (!err && MLX5_GET(tirc, tirc, self_lb_block)) { 1803 1781 err = mlx5_ib_enable_lb(dev, false, true); 1804 1782 ··· 1814 1790 if (mucontext->devx_uid) { 1815 1791 resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN; 1816 1792 resp.tirn = qp->rss_qp.tirn; 1793 + if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) { 1794 + resp.tir_icm_addr = 1795 + MLX5_GET(create_tir_out, out, icm_address_31_0); 1796 + resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out, 1797 + icm_address_39_32) 1798 + << 32; 1799 + resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out, 1800 + icm_address_63_40) 1801 + << 40; 1802 + resp.comp_mask |= 1803 + MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR; 1804 + } 1817 1805 } 1818 1806 1819 1807 err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
+13 -5
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
··· 182 182 } 183 183 EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state); 184 184 185 + int mlx5_core_create_tir_out(struct mlx5_core_dev *dev, 186 + u32 *in, int inlen, 187 + u32 *out, int outlen) 188 + { 189 + MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 190 + 191 + return mlx5_cmd_exec(dev, in, inlen, out, outlen); 192 + } 193 + EXPORT_SYMBOL(mlx5_core_create_tir_out); 194 + 185 195 int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, 186 196 u32 *tirn) 187 197 { 188 - u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; 198 + u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; 189 199 int err; 190 200 191 - MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 192 - 193 - memset(out, 0, sizeof(out)); 194 - err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 201 + err = mlx5_core_create_tir_out(dev, in, inlen, 202 + out, sizeof(out)); 195 203 if (!err) 196 204 *tirn = MLX5_GET(create_tir_out, out, tirn); 197 205
+46 -5
include/linux/mlx5/mlx5_ifc.h
··· 81 81 }; 82 82 83 83 enum { 84 + MLX5_OBJ_TYPE_SW_ICM = 0x0008, 85 + }; 86 + 87 + enum { 88 + MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM), 89 + }; 90 + 91 + enum { 84 92 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, 85 93 MLX5_CMD_OP_QUERY_ADAPTER = 0x101, 86 94 MLX5_CMD_OP_INIT_HCA = 0x102, ··· 365 357 u8 pop_vlan_2[0x1]; 366 358 u8 push_vlan_2[0x1]; 367 359 u8 reformat_and_vlan_action[0x1]; 368 - u8 reserved_at_10[0x2]; 360 + u8 reserved_at_10[0x1]; 361 + u8 sw_owner[0x1]; 369 362 u8 reformat_l3_tunnel_to_l2[0x1]; 370 363 u8 reformat_l2_to_l3_tunnel[0x1]; 371 364 u8 reformat_and_modify_action[0x1]; ··· 779 770 780 771 u8 max_memic_size[0x20]; 781 772 782 - u8 reserved_at_c0[0x740]; 773 + u8 steering_sw_icm_start_address[0x40]; 774 + 775 + u8 reserved_at_100[0x8]; 776 + u8 log_header_modify_sw_icm_size[0x8]; 777 + u8 reserved_at_110[0x2]; 778 + u8 log_sw_icm_alloc_granularity[0x6]; 779 + u8 log_steering_sw_icm_size[0x8]; 780 + 781 + u8 reserved_at_120[0x20]; 782 + 783 + u8 header_modify_sw_icm_start_address[0x40]; 784 + 785 + u8 reserved_at_180[0x680]; 783 786 }; 784 787 785 788 enum { ··· 940 919 941 920 enum { 942 921 MLX5_UCTX_CAP_RAW_TX = 1UL << 0, 922 + MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, 943 923 }; 944 924 945 925 struct mlx5_ifc_cmd_hca_cap_bits { ··· 2942 2920 MLX5_MKC_ACCESS_MODE_MTT = 0x1, 2943 2921 MLX5_MKC_ACCESS_MODE_KLMS = 0x2, 2944 2922 MLX5_MKC_ACCESS_MODE_KSM = 0x3, 2923 + MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4, 2945 2924 MLX5_MKC_ACCESS_MODE_MEMIC = 0x5, 2946 2925 }; 2947 2926 ··· 6897 6874 6898 6875 struct mlx5_ifc_create_tir_out_bits { 6899 6876 u8 status[0x8]; 6900 - u8 reserved_at_8[0x18]; 6877 + u8 icm_address_63_40[0x18]; 6901 6878 6902 6879 u8 syndrome[0x20]; 6903 6880 6904 - u8 reserved_at_40[0x8]; 6881 + u8 icm_address_39_32[0x8]; 6905 6882 u8 tirn[0x18]; 6906 6883 6907 - u8 reserved_at_60[0x20]; 6884 + u8 icm_address_31_0[0x20]; 6908 6885 }; 6909 6886 6910 6887 struct mlx5_ifc_create_tir_in_bits { ··· 9514 9491 u8 reserved_at_20[0x160]; 9515 9492 }; 9516 9493 9494 + struct mlx5_ifc_sw_icm_bits { 9495 + u8 modify_field_select[0x40]; 9496 + 9497 + u8 reserved_at_40[0x18]; 9498 + u8 log_sw_icm_size[0x8]; 9499 + 9500 + u8 reserved_at_60[0x20]; 9501 + 9502 + u8 sw_icm_start_addr[0x40]; 9503 + 9504 + u8 reserved_at_c0[0x140]; 9505 + }; 9506 + 9517 9507 struct mlx5_ifc_create_umem_in_bits { 9518 9508 u8 opcode[0x10]; 9519 9509 u8 uid[0x10]; ··· 9562 9526 u8 uid[0x10]; 9563 9527 9564 9528 u8 reserved_at_60[0x20]; 9529 + }; 9530 + 9531 + struct mlx5_ifc_create_sw_icm_in_bits { 9532 + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; 9533 + struct mlx5_ifc_sw_icm_bits sw_icm; 9565 9534 }; 9566 9535 9567 9536 struct mlx5_ifc_mtrc_string_db_param_bits {
+3
include/linux/mlx5/transobj.h
··· 50 50 int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); 51 51 int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, 52 52 u32 *tirn); 53 + int mlx5_core_create_tir_out(struct mlx5_core_dev *dev, 54 + u32 *in, int inlen, 55 + u32 *out, int outlen); 53 56 int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, 54 57 int inlen); 55 58 void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+2
include/uapi/rdma/mlx5-abi.h
··· 360 360 MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1, 361 361 MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2, 362 362 MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3, 363 + MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR = 1UL << 4, 363 364 }; 364 365 365 366 struct mlx5_ib_create_qp_resp { ··· 372 371 __u32 rqn; 373 372 __u32 sqn; 374 373 __u32 reserved1; 374 + __u64 tir_icm_addr; 375 375 }; 376 376 377 377 struct mlx5_ib_alloc_mw {