Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-updates-2019-07-04-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-update-2019-07-04

This series adds mlx5 support for devlink fw versions query.

1) Implement the required low level firmware commands
2) Implement the devlink knobs and callbacks for fw versions query.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1067 -381
+19
Documentation/networking/device_drivers/mellanox/mlx5.rst
··· 10 10 ======== 11 11 12 12 - `Enabling the driver and kconfig options`_ 13 + - `Devlink info`_ 13 14 - `Devlink health reporters`_ 14 15 15 16 Enabling the driver and kconfig options ··· 102 101 - CONFIG_VXLAN: When chosen, mlx5 vxaln support will be enabled. 103 102 - CONFIG_MLXFW: When chosen, mlx5 firmware flashing support will be enabled (via devlink and ethtool). 104 103 104 + Devlink info 105 + ============ 106 + 107 + The devlink info reports the running and stored firmware versions on device. 108 + It also prints the device PSID which represents the HCA board type ID. 109 + 110 + User command example:: 111 + 112 + $ devlink dev info pci/0000:00:06.0 113 + pci/0000:00:06.0: 114 + driver mlx5_core 115 + versions: 116 + fixed: 117 + fw.psid MT_0000000009 118 + running: 119 + fw.version 16.26.0100 120 + stored: 121 + fw.version 16.26.0100 105 122 106 123 Devlink health reporters 107 124 ========================
+3 -2
drivers/infiniband/hw/mlx5/cq.c
··· 37 37 #include "mlx5_ib.h" 38 38 #include "srq.h" 39 39 40 - static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) 40 + static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) 41 41 { 42 42 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 43 43 ··· 891 891 int entries = attr->cqe; 892 892 int vector = attr->comp_vector; 893 893 struct mlx5_ib_dev *dev = to_mdev(ibdev); 894 + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; 894 895 struct mlx5_ib_cq *cq; 895 896 int uninitialized_var(index); 896 897 int uninitialized_var(inlen); ··· 959 958 if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN) 960 959 MLX5_SET(cqc, cqc, oi, 1); 961 960 962 - err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); 961 + err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out)); 963 962 if (err) 964 963 goto err_cqb; 965 964
+14 -10
drivers/infiniband/hw/mlx5/ib_rep.c
··· 14 14 int vport_index; 15 15 16 16 ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch); 17 - vport_index = ibdev->free_port++; 17 + vport_index = rep->vport_index; 18 18 19 19 ibdev->port[vport_index].rep = rep; 20 + rep->rep_data[REP_IB].priv = ibdev; 20 21 write_lock(&ibdev->port[vport_index].roce.netdev_lock); 21 22 ibdev->port[vport_index].roce.netdev = 22 23 mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport); ··· 29 28 static int 30 29 mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) 31 30 { 32 - int num_ports = MLX5_TOTAL_VPORTS(dev); 31 + int num_ports = mlx5_eswitch_get_total_vports(dev); 33 32 const struct mlx5_ib_profile *profile; 34 33 struct mlx5_ib_dev *ibdev; 35 34 int vport_index; ··· 51 50 } 52 51 53 52 ibdev->is_rep = true; 54 - vport_index = ibdev->free_port++; 53 + vport_index = rep->vport_index; 55 54 ibdev->port[vport_index].rep = rep; 56 55 ibdev->port[vport_index].roce.netdev = 57 56 mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport); ··· 69 68 static void 70 69 mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) 71 70 { 72 - struct mlx5_ib_dev *dev; 71 + struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep); 72 + struct mlx5_ib_port *port; 73 73 74 - if (!rep->rep_data[REP_IB].priv || 75 - rep->vport != MLX5_VPORT_UPLINK) 76 - return; 77 - 78 - dev = mlx5_ib_rep_to_dev(rep); 79 - __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); 74 + port = &dev->port[rep->vport_index]; 75 + write_lock(&port->roce.netdev_lock); 76 + port->roce.netdev = NULL; 77 + write_unlock(&port->roce.netdev_lock); 80 78 rep->rep_data[REP_IB].priv = NULL; 79 + port->rep = NULL; 80 + 81 + if (rep->vport == MLX5_VPORT_UPLINK) 82 + __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); 81 83 } 82 84 83 85 static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
+1 -1
drivers/infiniband/hw/mlx5/ib_rep.h
··· 28 28 #else /* CONFIG_MLX5_ESWITCH */ 29 29 static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw) 30 30 { 31 - return SRIOV_NONE; 31 + return MLX5_ESWITCH_NONE; 32 32 } 33 33 34 34 static inline
+2 -2
drivers/infiniband/hw/mlx5/main.c
··· 4492 4492 * lock/unlock above locks Now need to arm all involved CQs. 4493 4493 */ 4494 4494 list_for_each_entry(mcq, &cq_armed_list, reset_notify) { 4495 - mcq->comp(mcq); 4495 + mcq->comp(mcq, NULL); 4496 4496 } 4497 4497 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 4498 4498 } ··· 6814 6814 printk_once(KERN_INFO "%s", mlx5_version); 6815 6815 6816 6816 if (MLX5_ESWITCH_MANAGER(mdev) && 6817 - mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { 6817 + mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { 6818 6818 if (!mlx5_core_mp_enabled(mdev)) 6819 6819 mlx5_ib_register_vport_reps(mdev); 6820 6820 return mdev;
-1
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 978 978 u16 devx_whitelist_uid; 979 979 struct mlx5_srq_table srq_table; 980 980 struct mlx5_async_ctx async_ctx; 981 - int free_port; 982 981 }; 983 982 984 983 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
+1 -1
drivers/infiniband/hw/mlx5/odp.c
··· 1558 1558 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; 1559 1559 param = (struct mlx5_eq_param) { 1560 1560 .irq_index = 0, 1561 - .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT, 1562 1561 .nent = MLX5_IB_NUM_PF_EQE, 1563 1562 }; 1563 + param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; 1564 1564 eq->core = mlx5_eq_create_generic(dev->mdev, &param); 1565 1565 if (IS_ERR(eq->core)) { 1566 1566 err = PTR_ERR(eq->core);
+1 -1
drivers/infiniband/hw/mlx5/qp.c
··· 6297 6297 /* Run the CQ handler - this makes sure that the drain WR will 6298 6298 * be processed if wasn't processed yet. 6299 6299 */ 6300 - mcq->mcq.comp(&mcq->mcq); 6300 + mcq->mcq.comp(&mcq->mcq, NULL); 6301 6301 } 6302 6302 6303 6303 wait_for_completion(&sdrain->done);
+8 -13
drivers/net/ethernet/mellanox/mlx5/core/cq.c
··· 58 58 list_for_each_entry_safe(mcq, temp, &ctx->process_list, 59 59 tasklet_ctx.list) { 60 60 list_del_init(&mcq->tasklet_ctx.list); 61 - mcq->tasklet_ctx.comp(mcq); 61 + mcq->tasklet_ctx.comp(mcq, NULL); 62 62 mlx5_cq_put(mcq); 63 63 if (time_after(jiffies, end)) 64 64 break; ··· 68 68 tasklet_schedule(&ctx->task); 69 69 } 70 70 71 - static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq) 71 + static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, 72 + struct mlx5_eqe *eqe) 72 73 { 73 74 unsigned long flags; 74 75 struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; ··· 88 87 } 89 88 90 89 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 91 - u32 *in, int inlen) 90 + u32 *in, int inlen, u32 *out, int outlen) 92 91 { 93 92 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); 94 93 u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; 95 - u32 out[MLX5_ST_SZ_DW(create_cq_out)]; 96 94 u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; 97 95 struct mlx5_eq_comp *eq; 98 96 int err; ··· 100 100 if (IS_ERR(eq)) 101 101 return PTR_ERR(eq); 102 102 103 - memset(out, 0, sizeof(out)); 103 + memset(out, 0, outlen); 104 104 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 105 - err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 105 + err = mlx5_cmd_exec(dev, in, inlen, out, outlen); 106 106 if (err) 107 107 return err; 108 108 ··· 158 158 u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; 159 159 int err; 160 160 161 - err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq); 162 - if (err) 163 - return err; 164 - 165 - err = mlx5_eq_del_cq(&cq->eq->core, cq); 166 - if (err) 167 - return err; 161 + mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq); 162 + mlx5_eq_del_cq(&cq->eq->core, cq); 168 163 169 164 MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); 170 165 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
+8 -1
drivers/net/ethernet/mellanox/mlx5/core/dev.c
··· 311 311 /* Must be called with intf_mutex held */ 312 312 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) 313 313 { 314 - u32 pci_id = mlx5_gen_pci_id(dev); 315 314 struct mlx5_core_dev *res = NULL; 316 315 struct mlx5_core_dev *tmp_dev; 317 316 struct mlx5_priv *priv; 317 + u32 pci_id; 318 318 319 + if (!mlx5_core_is_pf(dev)) 320 + return NULL; 321 + 322 + pci_id = mlx5_gen_pci_id(dev); 319 323 list_for_each_entry(priv, &mlx5_dev_list, dev_list) { 320 324 tmp_dev = container_of(priv, struct mlx5_core_dev, priv); 325 + if (!mlx5_core_is_pf(tmp_dev)) 326 + continue; 327 + 321 328 if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) { 322 329 res = tmp_dev; 323 330 break;
+60
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
··· 25 25 return mlx5_firmware_flash(dev, fw, extack); 26 26 } 27 27 28 + static u8 mlx5_fw_ver_major(u32 version) 29 + { 30 + return (version >> 24) & 0xff; 31 + } 32 + 33 + static u8 mlx5_fw_ver_minor(u32 version) 34 + { 35 + return (version >> 16) & 0xff; 36 + } 37 + 38 + static u16 mlx5_fw_ver_subminor(u32 version) 39 + { 40 + return version & 0xffff; 41 + } 42 + 43 + #define DEVLINK_FW_STRING_LEN 32 44 + 45 + static int 46 + mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, 47 + struct netlink_ext_ack *extack) 48 + { 49 + struct mlx5_core_dev *dev = devlink_priv(devlink); 50 + char version_str[DEVLINK_FW_STRING_LEN]; 51 + u32 running_fw, stored_fw; 52 + int err; 53 + 54 + err = devlink_info_driver_name_put(req, DRIVER_NAME); 55 + if (err) 56 + return err; 57 + 58 + err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id); 59 + if (err) 60 + return err; 61 + 62 + err = mlx5_fw_version_query(dev, &running_fw, &stored_fw); 63 + if (err) 64 + return err; 65 + 66 + snprintf(version_str, sizeof(version_str), "%d.%d.%04d", 67 + mlx5_fw_ver_major(running_fw), mlx5_fw_ver_minor(running_fw), 68 + mlx5_fw_ver_subminor(running_fw)); 69 + err = devlink_info_version_running_put(req, "fw.version", version_str); 70 + if (err) 71 + return err; 72 + 73 + /* no pending version, return running (stored) version */ 74 + if (stored_fw == 0) 75 + stored_fw = running_fw; 76 + 77 + snprintf(version_str, sizeof(version_str), "%d.%d.%04d", 78 + mlx5_fw_ver_major(stored_fw), mlx5_fw_ver_minor(stored_fw), 79 + mlx5_fw_ver_subminor(stored_fw)); 80 + err = devlink_info_version_stored_put(req, "fw.version", version_str); 81 + if (err) 82 + return err; 83 + 84 + return 0; 85 + } 86 + 28 87 static const struct devlink_ops mlx5_devlink_ops = { 29 88 #ifdef CONFIG_MLX5_ESWITCH 30 89 .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, ··· 94 35 .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get, 95 36 #endif 96 37 .flash_update = mlx5_devlink_flash_update, 38 + .info_get = mlx5_devlink_info_get, 97 39 }; 98 40 99 41 struct devlink *mlx5_devlink_alloc(void)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 880 880 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more); 881 881 882 882 void mlx5e_trigger_irq(struct mlx5e_icosq *sq); 883 - void mlx5e_completion_event(struct mlx5_core_cq *mcq); 883 + void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); 884 884 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); 885 885 int mlx5e_napi_poll(struct napi_struct *napi, int budget); 886 886 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
··· 680 680 681 681 memset(perm_addr, 0xff, MAX_ADDR_LEN); 682 682 683 - mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr); 683 + mlx5_query_mac_address(priv->mdev, perm_addr); 684 684 } 685 685 686 686 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
+4 -3
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 1551 1551 1552 1552 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) 1553 1553 { 1554 + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; 1554 1555 struct mlx5_core_dev *mdev = cq->mdev; 1555 1556 struct mlx5_core_cq *mcq = &cq->mcq; 1556 1557 ··· 1586 1585 MLX5_ADAPTER_PAGE_SHIFT); 1587 1586 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); 1588 1587 1589 - err = mlx5_core_create_cq(mdev, mcq, in, inlen); 1588 + err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); 1590 1589 1591 1590 kvfree(in); 1592 1591 ··· 4791 4790 { 4792 4791 struct mlx5e_priv *priv = netdev_priv(netdev); 4793 4792 4794 - mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr); 4793 + mlx5_query_mac_address(priv->mdev, netdev->dev_addr); 4795 4794 if (is_zero_ether_addr(netdev->dev_addr) && 4796 4795 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) { 4797 4796 eth_hw_addr_random(netdev); ··· 5357 5356 5358 5357 #ifdef CONFIG_MLX5_ESWITCH 5359 5358 if (MLX5_ESWITCH_MANAGER(mdev) && 5360 - mlx5_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { 5359 + mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { 5361 5360 mlx5e_rep_register_vport_reps(mdev); 5362 5361 return mdev; 5363 5362 }
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 398 398 priv = netdev_priv(dev); 399 399 esw = priv->mdev->priv.eswitch; 400 400 401 - if (esw->mode == SRIOV_NONE) 401 + if (esw->mode == MLX5_ESWITCH_NONE) 402 402 return -EOPNOTSUPP; 403 403 404 404 parent_id = mlx5_query_nic_system_image_guid(priv->mdev); ··· 414 414 struct mlx5e_rep_sq *rep_sq, *tmp; 415 415 struct mlx5e_rep_priv *rpriv; 416 416 417 - if (esw->mode != SRIOV_OFFLOADS) 417 + if (esw->mode != MLX5_ESWITCH_OFFLOADS) 418 418 return; 419 419 420 420 rpriv = mlx5e_rep_to_rep_priv(rep); ··· 435 435 int err; 436 436 int i; 437 437 438 - if (esw->mode != SRIOV_OFFLOADS) 438 + if (esw->mode != MLX5_ESWITCH_OFFLOADS) 439 439 return 0; 440 440 441 441 rpriv = mlx5e_rep_to_rep_priv(rep); ··· 1392 1392 SET_NETDEV_DEV(netdev, mdev->device); 1393 1393 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; 1394 1394 /* we want a persistent mac for the uplink rep */ 1395 - mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr); 1395 + mlx5_query_mac_address(mdev, netdev->dev_addr); 1396 1396 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops; 1397 1397 #ifdef CONFIG_MLX5_CORE_EN_DCB 1398 1398 if (MLX5_CAP_GEN(mdev, qos))
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 3372 3372 if (!tc_can_offload_extack(priv->netdev, f->common.extack)) 3373 3373 return -EOPNOTSUPP; 3374 3374 3375 - if (esw && esw->mode == SRIOV_OFFLOADS) 3375 + if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) 3376 3376 err = mlx5e_add_fdb_flow(priv, f, flow_flags, 3377 3377 filter_dev, flow); 3378 3378 else
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
··· 168 168 return work_done; 169 169 } 170 170 171 - void mlx5e_completion_event(struct mlx5_core_cq *mcq) 171 + void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) 172 172 { 173 173 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); 174 174
+44 -19
drivers/net/ethernet/mellanox/mlx5/core/eq.c
··· 153 153 cq = mlx5_eq_cq_get(eq, cqn); 154 154 if (likely(cq)) { 155 155 ++cq->arm_sn; 156 - cq->comp(cq); 156 + cq->comp(cq, eqe); 157 157 mlx5_cq_put(cq); 158 158 } else { 159 159 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); ··· 256 256 int inlen; 257 257 u32 *in; 258 258 int err; 259 + int i; 259 260 260 261 /* Init CQ table */ 261 262 memset(cq_table, 0, sizeof(*cq_table)); ··· 284 283 mlx5_fill_page_array(&eq->buf, pas); 285 284 286 285 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); 287 - if (!param->mask && MLX5_CAP_GEN(dev, log_max_uctx)) 286 + if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx)) 288 287 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID); 289 288 290 - MLX5_SET64(create_eq_in, in, event_bitmask, param->mask); 289 + for (i = 0; i < 4; i++) 290 + MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i, 291 + param->mask[i]); 291 292 292 293 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); 293 294 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); ··· 392 389 return err; 393 390 } 394 391 395 - int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) 392 + void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) 396 393 { 397 394 struct mlx5_cq_table *table = &eq->cq_table; 398 395 struct mlx5_core_cq *tmp; ··· 402 399 spin_unlock(&table->lock); 403 400 404 401 if (!tmp) { 405 - mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn); 406 - return -ENOENT; 402 + mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", 403 + eq->eqn, cq->cqn); 404 + return; 407 405 } 408 406 409 - if (tmp != cq) { 410 - mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn); 411 - return -EINVAL; 412 - } 413 - 414 - return 0; 407 + if (tmp != cq) 408 + mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", 409 + eq->eqn, cq->cqn); 415 410 } 416 411 417 412 int mlx5_eq_table_init(struct mlx5_core_dev *dev) ··· 503 502 return NOTIFY_OK; 504 503 } 505 504 506 - cq->event(cq, type); 505 + if (cq->event) 506 + cq->event(cq, type); 507 507 508 508 mlx5_cq_put(cq); 509 509 510 510 return NOTIFY_OK; 511 511 } 512 512 513 - static u64 gather_async_events_mask(struct mlx5_core_dev *dev) 513 + static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4]) 514 + { 515 + __be64 *user_unaffiliated_events; 516 + __be64 *user_affiliated_events; 517 + int i; 518 + 519 + user_affiliated_events = 520 + MLX5_CAP_DEV_EVENT(dev, user_affiliated_events); 521 + user_unaffiliated_events = 522 + MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events); 523 + 524 + for (i = 0; i < 4; i++) 525 + mask[i] |= be64_to_cpu(user_affiliated_events[i] | 526 + user_unaffiliated_events[i]); 527 + } 528 + 529 + static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) 514 530 { 515 531 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; 516 532 ··· 564 546 async_event_mask |= 565 547 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED); 566 548 567 - return async_event_mask; 549 + mask[0] = async_event_mask; 550 + 551 + if (MLX5_CAP_GEN(dev, event_cap)) 552 + gather_user_async_events(dev, mask); 568 553 } 569 554 570 555 static int create_async_eqs(struct mlx5_core_dev *dev) ··· 582 561 table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int; 583 562 param = (struct mlx5_eq_param) { 584 563 .irq_index = 0, 585 - .mask = 1ull << MLX5_EVENT_TYPE_CMD, 586 564 .nent = MLX5_NUM_CMD_EQE, 587 565 }; 566 + 567 + param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD; 588 568 err = create_async_eq(dev, &table->cmd_eq.core, &param); 589 569 if (err) { 590 570 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); ··· 601 579 table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int; 602 580 param = (struct mlx5_eq_param) { 603 581 .irq_index = 0, 604 - .mask = gather_async_events_mask(dev), 605 582 .nent = MLX5_NUM_ASYNC_EQE, 606 583 }; 584 + 585 + gather_async_events_mask(dev, param.mask); 607 586 err = create_async_eq(dev, &table->async_eq.core, &param); 608 587 if (err) { 609 588 mlx5_core_warn(dev, "failed to create async EQ %d\n", err); ··· 620 597 table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int; 621 598 param = (struct mlx5_eq_param) { 622 599 .irq_index = 0, 623 - .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, 624 600 .nent = /* TODO: sriov max_vf + */ 1, 625 601 }; 602 + 603 + param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST; 626 604 err = create_async_eq(dev, &table->pages_eq.core, &param); 627 605 if (err) { 628 606 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); ··· 815 791 eq->irq_nb.notifier_call = mlx5_eq_comp_int; 816 792 param = (struct mlx5_eq_param) { 817 793 .irq_index = vecidx, 818 - .mask = 0, 819 794 .nent = nent, 820 795 }; 821 796 err = create_map_eq(dev, &eq->core, &param); ··· 950 927 951 928 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb); 952 929 } 930 + EXPORT_SYMBOL(mlx5_eq_notifier_register); 953 931 954 932 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb) 955 933 { ··· 961 937 962 938 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb); 963 939 } 940 + EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
+117 -64
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 497 497 498 498 fdb_add: 499 499 /* SRIOV is enabled: Forward UC MAC to vport */ 500 - if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY) 500 + if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) 501 501 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 502 502 503 503 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", ··· 897 897 struct mlx5_eswitch *esw = dev->priv.eswitch; 898 898 u8 mac[ETH_ALEN]; 899 899 900 - mlx5_query_nic_vport_mac_address(dev, vport->vport, mac); 900 + mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac); 901 901 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", 902 902 vport->vport, mac); 903 903 ··· 1553 1553 struct mlx5_vport *vport) 1554 1554 { 1555 1555 u16 vport_num = vport->vport; 1556 + int flags; 1556 1557 1557 1558 if (esw->manager_vport == vport_num) 1558 1559 return; ··· 1571 1570 vport->info.node_guid); 1572 1571 } 1573 1572 1573 + flags = (vport->info.vlan || vport->info.qos) ? 1574 + SET_VLAN_STRIP | SET_VLAN_INSERT : 0; 1574 1575 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos, 1575 - (vport->info.vlan || vport->info.qos)); 1576 + flags); 1576 1577 1577 1578 /* Only legacy mode needs ACLs */ 1578 - if (esw->mode == SRIOV_LEGACY) { 1579 + if (esw->mode == MLX5_ESWITCH_LEGACY) { 1579 1580 esw_vport_ingress_config(esw, vport); 1580 1581 esw_vport_egress_config(esw, vport); 1581 1582 } ··· 1629 1626 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); 1630 1627 1631 1628 /* Create steering drop counters for ingress and egress ACLs */ 1632 - if (vport_num && esw->mode == SRIOV_LEGACY) 1629 + if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY) 1633 1630 esw_vport_create_drop_counters(vport); 1634 1631 1635 1632 /* Restore old vport configuration */ ··· 1683 1680 vport->enabled_events = 0; 1684 1681 esw_vport_disable_qos(esw, vport); 1685 1682 if (esw->manager_vport != vport_num && 1686 - esw->mode == SRIOV_LEGACY) { 1683 + esw->mode == MLX5_ESWITCH_LEGACY) { 1687 1684 mlx5_modify_vport_admin_state(esw->dev, 1688 1685 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, 1689 1686 vport_num, 1, ··· 1715 1712 return NOTIFY_OK; 1716 1713 } 1717 1714 1718 - int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen) 1715 + /** 1716 + * mlx5_esw_query_functions - Returns raw output about functions state 1717 + * @dev: Pointer to device to query 1718 + * 1719 + * mlx5_esw_query_functions() allocates and returns functions changed 1720 + * raw output memory pointer from device on success. Otherwise returns ERR_PTR. 1721 + * Caller must free the memory using kvfree() when valid pointer is returned. 1722 + */ 1723 + const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 1719 1724 { 1725 + int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out); 1720 1726 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {}; 1727 + u32 *out; 1728 + int err; 1729 + 1730 + out = kvzalloc(outlen, GFP_KERNEL); 1731 + if (!out) 1732 + return ERR_PTR(-ENOMEM); 1721 1733 1722 1734 MLX5_SET(query_esw_functions_in, in, opcode, 1723 1735 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS); 1724 1736 1725 - return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 1737 + err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 1738 + if (!err) 1739 + return out; 1740 + 1741 + kvfree(out); 1742 + return ERR_PTR(err); 1743 + } 1744 + 1745 + static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw) 1746 + { 1747 + MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); 1748 + mlx5_eq_notifier_register(esw->dev, &esw->nb); 1749 + 1750 + if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) { 1751 + MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler, 1752 + ESW_FUNCTIONS_CHANGED); 1753 + mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb); 1754 + } 1755 + } 1756 + 1757 + static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) 1758 + { 1759 + if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) 1760 + mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); 1761 + 1762 + mlx5_eq_notifier_unregister(esw->dev, &esw->nb); 1763 + 1764 + flush_workqueue(esw->work_queue); 1726 1765 } 1727 1766 1728 1767 /* Public E-Switch API */ 1729 1768 #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) 1730 1769 1731 - int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) 1770 + int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) 1732 1771 { 1733 1772 struct mlx5_vport *vport; 1734 - int total_nvports = 0; 1735 1773 int err; 1736 1774 int i, enabled_events; 1737 1775 1738 1776 if (!ESW_ALLOWED(esw) || 1739 1777 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1740 - esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); 1778 + esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); 1741 1779 return -EOPNOTSUPP; 1742 1780 } 1743 1781 1744 1782 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) 1745 - esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n"); 1783 + esw_warn(esw->dev, "ingress ACL is not supported by FW\n"); 1746 1784 1747 1785 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) 1748 - esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n"); 1749 - 1750 - esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); 1751 - 1752 - if (mode == SRIOV_OFFLOADS) { 1753 - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) 1754 - total_nvports = esw->total_vports; 1755 - else 1756 - total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev); 1757 - } 1786 + esw_warn(esw->dev, "engress ACL is not supported by FW\n"); 1758 1787 1759 1788 esw->mode = mode; 1760 1789 1761 1790 mlx5_lag_update(esw->dev); 1762 1791 1763 - if (mode == SRIOV_LEGACY) { 1792 + if (mode == MLX5_ESWITCH_LEGACY) { 1764 1793 err = esw_create_legacy_table(esw); 1765 1794 if (err) 1766 1795 goto abort; 1767 1796 } else { 1768 1797 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); 1769 1798 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); 1770 - err = esw_offloads_init(esw, nvfs, total_nvports); 1799 + err = esw_offloads_init(esw); 1771 1800 } 1772 1801 1773 1802 if (err) ··· 1809 1774 if (err) 1810 1775 esw_warn(esw->dev, "Failed to create eswitch TSAR"); 1811 1776 1812 - /* Don't enable vport events when in SRIOV_OFFLOADS mode, since: 1813 - * 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode 1814 - * 2. FDB/Eswitch is programmed by user space tools 1815 - */ 1816 - enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0; 1777 + enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS : 1778 + UC_ADDR_CHANGE; 1817 1779 1818 1780 /* Enable PF vport */ 1819 1781 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); ··· 1823 1791 } 1824 1792 1825 1793 /* Enable VF vports */ 1826 - mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) 1794 + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) 1827 1795 esw_enable_vport(esw, vport, enabled_events); 1828 1796 1829 - if (mode == SRIOV_LEGACY) { 1830 - MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); 1831 - mlx5_eq_notifier_register(esw->dev, &esw->nb); 1832 - } 1797 + mlx5_eswitch_event_handlers_register(esw); 1833 1798 1834 - esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n", 1835 - esw->enabled_vports); 1799 + esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", 1800 + mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1801 + esw->esw_funcs.num_vfs, esw->enabled_vports); 1802 + 1836 1803 return 0; 1837 1804 1838 1805 abort: 1839 - esw->mode = SRIOV_NONE; 1806 + esw->mode = MLX5_ESWITCH_NONE; 1840 1807 1841 - if (mode == SRIOV_OFFLOADS) { 1808 + if (mode == MLX5_ESWITCH_OFFLOADS) { 1842 1809 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); 1843 1810 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); 1844 1811 } ··· 1845 1814 return err; 1846 1815 } 1847 1816 1848 - void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) 1817 + void mlx5_eswitch_disable(struct mlx5_eswitch *esw) 1849 1818 { 1850 1819 struct esw_mc_addr *mc_promisc; 1851 1820 struct mlx5_vport *vport; 1852 1821 int old_mode; 1853 1822 int i; 1854 1823 1855 - if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE) 1824 + if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE) 1856 1825 return; 1857 1826 1858 - esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", 1859 - esw->enabled_vports, esw->mode); 1827 + esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", 1828 + esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", 1829 + esw->esw_funcs.num_vfs, esw->enabled_vports); 1860 1830 1861 1831 mc_promisc = &esw->mc_promisc; 1862 - 1863 - if (esw->mode == SRIOV_LEGACY) 1864 - mlx5_eq_notifier_unregister(esw->dev, &esw->nb); 1832 + mlx5_eswitch_event_handlers_unregister(esw); 1865 1833 1866 1834 mlx5_esw_for_all_vports(esw, i, vport) 1867 1835 esw_disable_vport(esw, vport); ··· 1870 1840 1871 1841 esw_destroy_tsar(esw); 1872 1842 1873 - if (esw->mode == SRIOV_LEGACY) 1843 + if (esw->mode == MLX5_ESWITCH_LEGACY) 1874 1844 esw_destroy_legacy_table(esw); 1875 - else if (esw->mode == SRIOV_OFFLOADS) 1845 + else if (esw->mode == MLX5_ESWITCH_OFFLOADS) 1876 1846 esw_offloads_cleanup(esw); 1877 1847 1878 1848 old_mode = esw->mode; 1879 - esw->mode = SRIOV_NONE; 1849 + esw->mode = MLX5_ESWITCH_NONE; 1880 1850 1881 1851 mlx5_lag_update(esw->dev); 1882 1852 1883 - if (old_mode == SRIOV_OFFLOADS) { 1853 + if (old_mode == MLX5_ESWITCH_OFFLOADS) { 1884 1854 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); 1885 1855 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); 1886 1856 } ··· 1888 1858 1889 1859 int mlx5_eswitch_init(struct mlx5_core_dev *dev) 1890 1860 { 1891 - int total_vports = MLX5_TOTAL_VPORTS(dev); 1892 1861 struct mlx5_eswitch *esw; 1893 1862 struct mlx5_vport *vport; 1863 + int total_vports; 1894 1864 int err, i; 1895 1865 1896 1866 if (!MLX5_VPORT_MANAGER(dev)) 1897 1867 return 0; 1868 + 1869 + total_vports = mlx5_eswitch_get_total_vports(dev); 1898 1870 1899 1871 esw_info(dev, 1900 1872 "Total vports %d, per vport: max uc(%d) max mc(%d)\n", ··· 1910 1878 1911 1879 esw->dev = dev; 1912 1880 esw->manager_vport = mlx5_eswitch_manager_vport(dev); 1881 + esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev); 1913 1882 1914 1883 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); 1915 1884 if (!esw->work_queue) { ··· 1944 1911 } 1945 1912 1946 1913 esw->enabled_vports = 0; 1947 - esw->mode = SRIOV_NONE; 1914 + esw->mode = MLX5_ESWITCH_NONE; 1948 1915 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; 1949 1916 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) && 1950 1917 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) ··· 2014 1981 2015 1982 ether_addr_copy(evport->info.mac, mac); 2016 1983 evport->info.node_guid = node_guid; 2017 - if (evport->enabled && esw->mode == SRIOV_LEGACY) 1984 + if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) 2018 1985 err = esw_vport_ingress_config(esw, evport); 2019 1986 2020 1987 unlock: ··· 2098 2065 2099 2066 evport->info.vlan = vlan; 2100 2067 evport->info.qos = qos; 2101 - if (evport->enabled && esw->mode == SRIOV_LEGACY) { 2068 + if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) { 2102 2069 err = esw_vport_ingress_config(esw, evport); 2103 2070 if (err) 2104 2071 goto unlock; ··· 2140 2107 mlx5_core_warn(esw->dev, 2141 2108 "Spoofchk in set while MAC is invalid, vport(%d)\n", 2142 2109 evport->vport); 2143 - if (evport->enabled && esw->mode == SRIOV_LEGACY) 2110 + if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) 2144 2111 err = esw_vport_ingress_config(esw, evport); 2145 2112 if (err) 2146 2113 evport->info.spoofchk = pschk; ··· 2236 2203 return -EPERM; 2237 2204 2238 2205 mutex_lock(&esw->state_lock); 2239 - if (esw->mode != SRIOV_LEGACY) { 2206 + if (esw->mode != MLX5_ESWITCH_LEGACY) { 2240 2207 err = -EOPNOTSUPP; 2241 2208 goto out; 2242 2209 } ··· 2259 2226 return -EPERM; 2260 2227 2261 2228 mutex_lock(&esw->state_lock); 2262 - if (esw->mode != SRIOV_LEGACY) { 2229 + if (esw->mode != MLX5_ESWITCH_LEGACY) { 2263 2230 err = -EOPNOTSUPP; 2264 2231 goto out; 2265 2232 } ··· 2402 2369 u64 bytes = 0; 2403 2370 int err = 0; 2404 2371 2405 - if (!vport->enabled || esw->mode != SRIOV_LEGACY) 2372 + if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY) 2406 2373 return 0; 2407 2374 2408 2375 if (vport->egress.drop_counter) ··· 2512 2479 2513 2480 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) 2514 2481 { 2515 - return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE; 2482 + return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE; 2516 2483 } 2517 2484 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); 2518 2485 ··· 2529 2496 2530 2497 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) 2531 2498 { 2532 - if ((dev0->priv.eswitch->mode == SRIOV_NONE && 2533 - dev1->priv.eswitch->mode == SRIOV_NONE) || 2534 - (dev0->priv.eswitch->mode == SRIOV_OFFLOADS && 2535 - dev1->priv.eswitch->mode == SRIOV_OFFLOADS)) 2499 + if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE && 2500 + dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) || 2501 + (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && 2502 + dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS)) 2536 2503 return true; 2537 2504 2538 2505 return false; ··· 2541 2508 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 2542 2509 struct mlx5_core_dev *dev1) 2543 2510 { 2544 - return (dev0->priv.eswitch->mode == SRIOV_OFFLOADS && 2545 - dev1->priv.eswitch->mode == SRIOV_OFFLOADS); 2511 + return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && 2512 + dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); 2513 + } 2514 + 2515 + void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) 2516 + { 2517 + const u32 *out; 2518 + 2519 + WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE); 2520 + 2521 + if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) { 2522 + esw->esw_funcs.num_vfs = num_vfs; 2523 + return; 2524 + } 2525 + 2526 + out = mlx5_esw_query_functions(esw->dev); 2527 + if (IS_ERR(out)) 2528 + return; 2529 + 2530 + esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, 2531 + host_params_context.host_num_of_vfs); 2532 + kvfree(out); 2546 2533 }
+40 -11
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 229 229 int mode; 230 230 int nvports; 231 231 u16 manager_vport; 232 + u16 first_host_vport; 232 233 struct mlx5_esw_functions esw_funcs; 233 234 }; 234 235 235 236 void esw_offloads_cleanup(struct mlx5_eswitch *esw); 236 - int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, 237 - int total_nvports); 237 + int esw_offloads_init(struct mlx5_eswitch *esw); 238 238 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); 239 239 int esw_offloads_init_reps(struct mlx5_eswitch *esw); 240 240 void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, ··· 255 255 /* E-Switch API */ 256 256 int mlx5_eswitch_init(struct mlx5_core_dev *dev); 257 257 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); 258 - int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode); 259 - void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); 258 + int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode); 259 + void mlx5_eswitch_disable(struct mlx5_eswitch *esw); 260 260 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 261 261 u16 vport, u8 mac[ETH_ALEN]); 262 262 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, ··· 392 392 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 393 393 struct netlink_ext_ack *extack); 394 394 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); 395 - int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); 395 + int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode); 396 396 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 397 397 enum devlink_eswitch_encap_mode encap, 398 398 struct netlink_ext_ack *extack); ··· 425 425 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 426 426 struct mlx5_core_dev *dev1); 427 427 428 - int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen); 428 + const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); 429 429 430 430 #define MLX5_DEBUG_ESWITCH_MASK BIT(3) 431 431 ··· 443 443 { 444 444 return mlx5_core_is_ecpf_esw_manager(dev) ? 445 445 MLX5_VPORT_ECPF : MLX5_VPORT_PF; 446 + } 447 + 448 + static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) 449 + { 450 + return mlx5_core_is_ecpf_esw_manager(dev) ? 451 + MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; 446 452 } 447 453 448 454 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) ··· 547 541 #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \ 548 542 for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) 549 543 544 + /* Includes host PF (vport 0) if it's not esw manager. */ 545 + #define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \ 546 + for ((i) = (esw)->first_host_vport; \ 547 + (rep) = &(esw)->offloads.vport_reps[i], \ 548 + (i) <= (nvfs); (i)++) 549 + 550 + #define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \ 551 + for ((i) = (nvfs); \ 552 + (rep) = &(esw)->offloads.vport_reps[i], \ 553 + (i) >= (esw)->first_host_vport; (i)--) 554 + 555 + #define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \ 556 + for ((vport) = (esw)->first_host_vport; \ 557 + (vport) <= (nvfs); (vport)++) 558 + 559 + #define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \ 560 + for ((vport) = (nvfs); \ 561 + (vport) >= (esw)->first_host_vport; (vport)--) 562 + 550 563 struct mlx5_vport *__must_check 551 564 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); 552 565 553 566 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); 554 567 568 + void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); 569 + int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); 570 + 555 571 #else /* CONFIG_MLX5_ESWITCH */ 556 572 /* eswitch API stubs */ 557 573 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } 558 574 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} 559 - static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } 560 - static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} 575 + static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; } 576 + static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} 561 577 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } 562 578 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } 563 - static inline int 564 - mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen) 579 + static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 565 580 { 566 - return -EOPNOTSUPP; 581 + return ERR_PTR(-EOPNOTSUPP); 567 582 } 583 + 584 + static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} 568 585 569 586 #define FDB_MAX_CHAIN 1 570 587 #define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
+109 -130
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 147 147 struct mlx5_flow_table *fdb; 148 148 int j, i = 0; 149 149 150 - if (esw->mode != SRIOV_OFFLOADS) 150 + if (esw->mode != MLX5_ESWITCH_OFFLOADS) 151 151 return ERR_PTR(-EOPNOTSUPP); 152 152 153 153 flow_act.action = attr->action; ··· 357 357 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) 358 358 { 359 359 struct mlx5_eswitch_rep *rep; 360 - int vf_vport, err = 0; 360 + int i, err = 0; 361 361 362 362 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); 363 - for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { 364 - rep = &esw->offloads.vport_reps[vf_vport]; 363 + mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) { 365 364 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) 366 365 continue; 367 366 ··· 1369 1370 static int esw_offloads_start(struct mlx5_eswitch *esw, 1370 1371 struct netlink_ext_ack *extack) 1371 1372 { 1372 - int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; 1373 + int err, err1; 1373 1374 1374 - if (esw->mode != SRIOV_LEGACY && 1375 + if (esw->mode != MLX5_ESWITCH_LEGACY && 1375 1376 !mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1376 1377 NL_SET_ERR_MSG_MOD(extack, 1377 1378 "Can't set offloads mode, SRIOV legacy not enabled"); 1378 1379 return -EINVAL; 1379 1380 } 1380 1381 1381 - mlx5_eswitch_disable_sriov(esw); 1382 - err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); 1382 + mlx5_eswitch_disable(esw); 1383 + mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); 1384 + err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); 1383 1385 if (err) { 1384 1386 NL_SET_ERR_MSG_MOD(extack, 1385 1387 "Failed setting eswitch to offloads"); 1386 - err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 1388 + err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); 1387 1389 if (err1) { 1388 1390 NL_SET_ERR_MSG_MOD(extack, 1389 1391 "Failed setting eswitch back to legacy"); ··· 1392 1392 } 1393 1393 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 1394 1394 if (mlx5_eswitch_inline_mode_get(esw, 1395 - num_vfs, 1396 1395 &esw->offloads.inline_mode)) { 1397 1396 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; 1398 1397 NL_SET_ERR_MSG_MOD(extack, ··· 1408 1409 1409 1410 int esw_offloads_init_reps(struct mlx5_eswitch *esw) 1410 1411 { 1411 - int total_vports = MLX5_TOTAL_VPORTS(esw->dev); 1412 + int total_vports = esw->total_vports; 1412 1413 struct mlx5_core_dev *dev = esw->dev; 1413 1414 struct mlx5_eswitch_rep *rep; 1414 1415 u8 hw_id[ETH_ALEN], rep_type; 1415 - int vport; 1416 + int vport_index; 1416 1417 1417 1418 esw->offloads.vport_reps = kcalloc(total_vports, 1418 1419 sizeof(struct mlx5_eswitch_rep), ··· 1420 1421 if (!esw->offloads.vport_reps) 1421 1422 return -ENOMEM; 1422 1423 1423 - mlx5_query_nic_vport_mac_address(dev, 0, hw_id); 1424 + mlx5_query_mac_address(dev, hw_id); 1424 1425 1425 - mlx5_esw_for_all_reps(esw, vport, rep) { 1426 - rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport); 1426 + mlx5_esw_for_all_reps(esw, vport_index, rep) { 1427 + rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index); 1428 + rep->vport_index = vport_index; 1427 1429 ether_addr_copy(rep->hw_id, hw_id); 1428 1430 1429 1431 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) ··· 1479 1479 __unload_reps_vf_vport(esw, nvports, rep_type); 1480 1480 } 1481 1481 1482 - static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports, 1483 - u8 rep_type) 1482 + static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) 1484 1483 { 1485 - __unload_reps_vf_vport(esw, nvports, rep_type); 1484 + __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type); 1486 1485 1487 1486 /* Special vports must be the last to unload. */ 1488 1487 __unload_reps_special_vport(esw, rep_type); 1489 1488 } 1490 1489 1491 - static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports) 1490 + static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw) 1492 1491 { 1493 1492 u8 rep_type = NUM_REP_TYPES; 1494 1493 1495 1494 while (rep_type-- > 0) 1496 - __unload_reps_all_vport(esw, nvports, rep_type); 1495 + __unload_reps_all_vport(esw, rep_type); 1497 1496 } 1498 1497 1499 1498 static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, ··· 1568 1569 return err; 1569 1570 } 1570 1571 1572 + static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) 1573 + { 1574 + int err; 1575 + 1576 + /* Special vports must be loaded first, uplink rep creates mdev resource. */ 1577 + err = __load_reps_special_vport(esw, rep_type); 1578 + if (err) 1579 + return err; 1580 + 1581 + err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type); 1582 + if (err) 1583 + goto err_vfs; 1584 + 1585 + return 0; 1586 + 1587 + err_vfs: 1588 + __unload_reps_special_vport(esw, rep_type); 1589 + return err; 1590 + } 1591 + 1571 1592 static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports) 1572 1593 { 1573 1594 u8 rep_type = 0; ··· 1607 1588 return err; 1608 1589 } 1609 1590 1610 - static int esw_offloads_load_special_vport(struct mlx5_eswitch *esw) 1591 + static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw) 1611 1592 { 1612 1593 u8 rep_type = 0; 1613 1594 int err; 1614 1595 1615 1596 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { 1616 - err = __load_reps_special_vport(esw, rep_type); 1597 + err = __load_reps_all_vport(esw, rep_type); 1617 1598 if (err) 1618 1599 goto err_reps; 1619 1600 } ··· 1622 1603 1623 1604 err_reps: 1624 1605 while (rep_type-- > 0) 1625 - __unload_reps_special_vport(esw, rep_type); 1606 + __unload_reps_all_vport(esw, rep_type); 1626 1607 return err; 1627 1608 } 1628 1609 ··· 2008 1989 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; 2009 1990 } 2010 1991 2011 - static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int vf_nvports, 2012 - int nvports) 1992 + static int esw_offloads_steering_init(struct mlx5_eswitch *esw) 2013 1993 { 1994 + int num_vfs = esw->esw_funcs.num_vfs; 1995 + int total_vports; 2014 1996 int err; 1997 + 1998 + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) 1999 + total_vports = esw->total_vports; 2000 + else 2001 + total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev); 2015 2002 2016 2003 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); 2017 2004 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); ··· 2026 2001 if (err) 2027 2002 return err; 2028 2003 2029 - err = esw_create_offloads_fdb_tables(esw, nvports); 2004 + err = esw_create_offloads_fdb_tables(esw, total_vports); 2030 2005 if (err) 2031 2006 goto create_fdb_err; 2032 2007 2033 - err = esw_create_offloads_table(esw, nvports); 2008 + err = esw_create_offloads_table(esw, total_vports); 2034 2009 if (err) 2035 2010 goto create_ft_err; 2036 2011 2037 - err = esw_create_vport_rx_group(esw, nvports); 2012 + err = esw_create_vport_rx_group(esw, total_vports); 2038 2013 if (err) 2039 2014 goto create_fg_err; 2040 2015 ··· 2060 2035 esw_destroy_offloads_acl_tables(esw); 2061 2036 } 2062 2037 2063 - static void esw_functions_changed_event_handler(struct work_struct *work) 2038 + static void 2039 + esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) 2064 2040 { 2065 - u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {}; 2066 - struct mlx5_host_work *host_work; 2067 - struct mlx5_eswitch *esw; 2068 - u16 num_vfs = 0; 2069 - int err; 2041 + bool host_pf_disabled; 2042 + u16 new_num_vfs; 2070 2043 2071 - host_work = container_of(work, struct mlx5_host_work, work); 2072 - esw = host_work->esw; 2044 + new_num_vfs = MLX5_GET(query_esw_functions_out, out, 2045 + host_params_context.host_num_of_vfs); 2046 + host_pf_disabled = MLX5_GET(query_esw_functions_out, out, 2047 + host_params_context.host_pf_disabled); 2073 2048 2074 - err = mlx5_esw_query_functions(esw->dev, out, sizeof(out)); 2075 - num_vfs = MLX5_GET(query_esw_functions_out, out, 2076 - host_params_context.host_num_of_vfs); 2077 - if (err || num_vfs == esw->esw_funcs.num_vfs) 2078 - goto out; 2049 + if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) 2050 + return; 2079 2051 2080 2052 /* Number of VFs can only change from "0 to x" or "x to 0". */ 2081 2053 if (esw->esw_funcs.num_vfs > 0) { 2082 2054 esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs); 2083 2055 } else { 2084 - err = esw_offloads_load_vf_reps(esw, num_vfs); 2056 + int err; 2085 2057 2058 + err = esw_offloads_load_vf_reps(esw, new_num_vfs); 2086 2059 if (err) 2087 - goto out; 2060 + return; 2088 2061 } 2062 + esw->esw_funcs.num_vfs = new_num_vfs; 2063 + } 2089 2064 2090 - esw->esw_funcs.num_vfs = num_vfs; 2065 + static void esw_functions_changed_event_handler(struct work_struct *work) 2066 + { 2067 + struct mlx5_host_work *host_work; 2068 + struct mlx5_eswitch *esw; 2069 + const u32 *out; 2091 2070 2071 + host_work = container_of(work, struct mlx5_host_work, work); 2072 + esw = host_work->esw; 2073 + 2074 + out = mlx5_esw_query_functions(esw->dev); 2075 + if (IS_ERR(out)) 2076 + goto out; 2077 + 2078 + esw_vfs_changed_event_handler(esw, out); 2079 + kvfree(out); 2092 2080 out: 2093 2081 kfree(host_work); 2094 2082 } 2095 2083 2096 - static void esw_emulate_event_handler(struct work_struct *work) 2097 - { 2098 - struct mlx5_host_work *host_work = 2099 - container_of(work, struct mlx5_host_work, work); 2100 - struct mlx5_eswitch *esw = host_work->esw; 2101 - int err; 2102 - 2103 - if (esw->esw_funcs.num_vfs) { 2104 - err = esw_offloads_load_vf_reps(esw, esw->esw_funcs.num_vfs); 2105 - if (err) 2106 - esw_warn(esw->dev, "Load vf reps err=%d\n", err); 2107 - } 2108 - kfree(host_work); 2109 - } 2110 - 2111 - static int esw_functions_changed_event(struct notifier_block *nb, 2112 - unsigned long type, void *data) 2084 + int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data) 2113 2085 { 2114 2086 struct mlx5_esw_functions *esw_funcs; 2115 2087 struct mlx5_host_work *host_work; ··· 2121 2099 2122 2100 host_work->esw = esw; 2123 2101 2124 - if (mlx5_eswitch_is_funcs_handler(esw->dev)) 2125 - INIT_WORK(&host_work->work, 2126 - esw_functions_changed_event_handler); 2127 - else 2128 - INIT_WORK(&host_work->work, esw_emulate_event_handler); 2102 + INIT_WORK(&host_work->work, esw_functions_changed_event_handler); 2129 2103 queue_work(esw->work_queue, &host_work->work); 2130 2104 2131 2105 return NOTIFY_OK; 2132 2106 } 2133 2107 2134 - static void esw_functions_changed_event_init(struct mlx5_eswitch *esw, 2135 - u16 vf_nvports) 2136 - { 2137 - if (mlx5_eswitch_is_funcs_handler(esw->dev)) { 2138 - esw->esw_funcs.num_vfs = 0; 2139 - MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event, 2140 - ESW_FUNCTIONS_CHANGED); 2141 - mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb); 2142 - } else { 2143 - esw->esw_funcs.num_vfs = vf_nvports; 2144 - } 2145 - } 2146 - 2147 - static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw) 2148 - { 2149 - if (!mlx5_eswitch_is_funcs_handler(esw->dev)) 2150 - return; 2151 - 2152 - mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); 2153 - flush_workqueue(esw->work_queue); 2154 - } 2155 - 2156 - int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, 2157 - int total_nvports) 2108 + int esw_offloads_init(struct mlx5_eswitch *esw) 2158 2109 { 2159 2110 int err; 2160 2111 2161 - err = esw_offloads_steering_init(esw, vf_nvports, total_nvports); 2112 + err = esw_offloads_steering_init(esw); 2162 2113 if (err) 2163 2114 return err; 2164 2115 ··· 2141 2146 goto err_vport_metadata; 2142 2147 } 2143 2148 2144 - /* Only load special vports reps. VF reps will be loaded in 2145 - * context of functions_changed event handler through real 2146 - * or emulated event. 2147 - */ 2148 - err = esw_offloads_load_special_vport(esw); 2149 + err = esw_offloads_load_all_reps(esw); 2149 2150 if (err) 2150 2151 goto err_reps; 2151 2152 2152 2153 esw_offloads_devcom_init(esw); 2153 2154 mutex_init(&esw->offloads.termtbl_mutex); 2154 2155 2155 - esw_functions_changed_event_init(esw, vf_nvports); 2156 - 2157 2156 mlx5_rdma_enable_roce(esw->dev); 2158 - 2159 - /* Call esw_functions_changed event to load VF reps: 2160 - * 1. HW does not support the event then emulate it 2161 - * Or 2162 - * 2. The event was already notified when num_vfs changed 2163 - * and eswitch was in legacy mode 2164 - */ 2165 - esw_functions_changed_event(&esw->esw_funcs.nb.nb, 2166 - MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED, 2167 - NULL); 2168 2157 2169 2158 return 0; 2170 2159 ··· 2163 2184 static int esw_offloads_stop(struct mlx5_eswitch *esw, 2164 2185 struct netlink_ext_ack *extack) 2165 2186 { 2166 - int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; 2187 + int err, err1; 2167 2188 2168 - mlx5_eswitch_disable_sriov(esw); 2169 - err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 2189 + mlx5_eswitch_disable(esw); 2190 + err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); 2170 2191 if (err) { 2171 2192 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); 2172 - err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); 2193 + err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); 2173 2194 if (err1) { 2174 2195 NL_SET_ERR_MSG_MOD(extack, 2175 2196 "Failed setting eswitch back to offloads"); ··· 2181 2202 2182 2203 void esw_offloads_cleanup(struct mlx5_eswitch *esw) 2183 2204 { 2184 - esw_functions_changed_event_cleanup(esw); 2185 2205 mlx5_rdma_disable_roce(esw->dev); 2186 2206 esw_offloads_devcom_cleanup(esw); 2187 - esw_offloads_unload_all_reps(esw, esw->esw_funcs.num_vfs); 2207 + esw_offloads_unload_all_reps(esw); 2188 2208 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) 2189 2209 mlx5_eswitch_disable_passing_vport_metadata(esw); 2190 2210 esw_offloads_steering_cleanup(esw); ··· 2193 2215 { 2194 2216 switch (mode) { 2195 2217 case DEVLINK_ESWITCH_MODE_LEGACY: 2196 - *mlx5_mode = SRIOV_LEGACY; 2218 + *mlx5_mode = MLX5_ESWITCH_LEGACY; 2197 2219 break; 2198 2220 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 2199 - *mlx5_mode = SRIOV_OFFLOADS; 2221 + *mlx5_mode = MLX5_ESWITCH_OFFLOADS; 2200 2222 break; 2201 2223 default: 2202 2224 return -EINVAL; ··· 2208 2230 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) 2209 2231 { 2210 2232 switch (mlx5_mode) { 2211 - case SRIOV_LEGACY: 2233 + case MLX5_ESWITCH_LEGACY: 2212 2234 *mode = DEVLINK_ESWITCH_MODE_LEGACY; 2213 2235 break; 2214 - case SRIOV_OFFLOADS: 2236 + case MLX5_ESWITCH_OFFLOADS: 2215 2237 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; 2216 2238 break; 2217 2239 default: ··· 2275 2297 if(!MLX5_ESWITCH_MANAGER(dev)) 2276 2298 return -EPERM; 2277 2299 2278 - if (dev->priv.eswitch->mode == SRIOV_NONE && 2300 + if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE && 2279 2301 !mlx5_core_is_ecpf_esw_manager(dev)) 2280 2302 return -EOPNOTSUPP; 2281 2303 ··· 2326 2348 { 2327 2349 struct mlx5_core_dev *dev = devlink_priv(devlink); 2328 2350 struct mlx5_eswitch *esw = dev->priv.eswitch; 2329 - int err, vport; 2351 + int err, vport, num_vport; 2330 2352 u8 mlx5_mode; 2331 2353 2332 2354 err = mlx5_devlink_eswitch_check(devlink); ··· 2355 2377 if (err) 2356 2378 goto out; 2357 2379 2358 - for (vport = 1; vport < esw->enabled_vports; vport++) { 2380 + mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) { 2359 2381 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); 2360 2382 if (err) { 2361 2383 NL_SET_ERR_MSG_MOD(extack, ··· 2368 2390 return 0; 2369 2391 2370 2392 revert_inline_mode: 2371 - while (--vport > 0) 2393 + num_vport = --vport; 2394 + mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport) 2372 2395 mlx5_modify_nic_vport_min_inline(dev, 2373 2396 vport, 2374 2397 esw->offloads.inline_mode); ··· 2390 2411 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 2391 2412 } 2392 2413 2393 - int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) 2414 + int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) 2394 2415 { 2395 2416 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 2396 2417 struct mlx5_core_dev *dev = esw->dev; ··· 2399 2420 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 2400 2421 return -EOPNOTSUPP; 2401 2422 2402 - if (esw->mode == SRIOV_NONE) 2423 + if (esw->mode == MLX5_ESWITCH_NONE) 2403 2424 return -EOPNOTSUPP; 2404 2425 2405 2426 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { ··· 2414 2435 } 2415 2436 2416 2437 query_vports: 2417 - for (vport = 1; vport <= nvfs; vport++) { 2438 + mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode); 2439 + mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) { 2418 2440 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); 2419 - if (vport > 1 && prev_mlx5_mode != mlx5_mode) 2441 + if (prev_mlx5_mode != mlx5_mode) 2420 2442 return -EINVAL; 2421 2443 prev_mlx5_mode = mlx5_mode; 2422 2444 } ··· 2447 2467 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) 2448 2468 return -EOPNOTSUPP; 2449 2469 2450 - if (esw->mode == SRIOV_LEGACY) { 2470 + if (esw->mode == MLX5_ESWITCH_LEGACY) { 2451 2471 esw->offloads.encap = encap; 2452 2472 return 0; 2453 2473 } ··· 2510 2530 2511 2531 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) 2512 2532 { 2513 - u16 max_vf = mlx5_core_max_vfs(esw->dev); 2514 2533 struct mlx5_eswitch_rep *rep; 2515 2534 int i; 2516 2535 2517 - if (esw->mode == SRIOV_OFFLOADS) 2518 - __unload_reps_all_vport(esw, max_vf, rep_type); 2536 + if (esw->mode == MLX5_ESWITCH_OFFLOADS) 2537 + __unload_reps_all_vport(esw, rep_type); 2519 2538 2520 2539 mlx5_esw_for_all_reps(esw, i, rep) 2521 2540 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
+5 -3
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
··· 414 414 mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET); 415 415 } 416 416 417 - static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq) 417 + static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq, 418 + struct mlx5_eqe *eqe) 418 419 { 419 420 struct mlx5_fpga_conn *conn; 420 421 ··· 430 429 struct mlx5_fpga_device *fdev = conn->fdev; 431 430 struct mlx5_core_dev *mdev = fdev->mdev; 432 431 u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0}; 432 + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; 433 433 struct mlx5_wq_param wqp; 434 434 struct mlx5_cqe64 *cqe; 435 435 int inlen, err, eqn; ··· 478 476 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); 479 477 mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas); 480 478 481 - err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen); 479 + err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out)); 482 480 kvfree(in); 483 481 484 482 if (err) ··· 869 867 conn->cb_arg = attr->cb_arg; 870 868 871 869 remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32); 872 - err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac); 870 + err = mlx5_query_mac_address(fdev->mdev, remote_mac); 873 871 if (err) { 874 872 mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err); 875 873 ret = ERR_PTR(err);
+15 -11
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 2092 2092 { 2093 2093 struct mlx5_flow_steering *steering = dev->priv.steering; 2094 2094 2095 - if (!steering || vport >= MLX5_TOTAL_VPORTS(dev)) 2095 + if (!steering || vport >= mlx5_eswitch_get_total_vports(dev)) 2096 2096 return NULL; 2097 2097 2098 2098 switch (type) { ··· 2423 2423 if (!steering->esw_egress_root_ns) 2424 2424 return; 2425 2425 2426 - for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) 2426 + for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++) 2427 2427 cleanup_root_ns(steering->esw_egress_root_ns[i]); 2428 2428 2429 2429 kfree(steering->esw_egress_root_ns); ··· 2438 2438 if (!steering->esw_ingress_root_ns) 2439 2439 return; 2440 2440 2441 - for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) 2441 + for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++) 2442 2442 cleanup_root_ns(steering->esw_ingress_root_ns[i]); 2443 2443 2444 2444 kfree(steering->esw_ingress_root_ns); ··· 2606 2606 static int init_egress_acls_root_ns(struct mlx5_core_dev *dev) 2607 2607 { 2608 2608 struct mlx5_flow_steering *steering = dev->priv.steering; 2609 + int total_vports = mlx5_eswitch_get_total_vports(dev); 2609 2610 int err; 2610 2611 int i; 2611 2612 2612 - steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev), 2613 - sizeof(*steering->esw_egress_root_ns), 2614 - GFP_KERNEL); 2613 + steering->esw_egress_root_ns = 2614 + kcalloc(total_vports, 2615 + sizeof(*steering->esw_egress_root_ns), 2616 + GFP_KERNEL); 2615 2617 if (!steering->esw_egress_root_ns) 2616 2618 return -ENOMEM; 2617 2619 2618 - for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) { 2620 + for (i = 0; i < total_vports; i++) { 2619 2621 err = init_egress_acl_root_ns(steering, i); 2620 2622 if (err) 2621 2623 goto cleanup_root_ns; ··· 2636 2634 static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) 2637 2635 { 2638 2636 struct mlx5_flow_steering *steering = dev->priv.steering; 2637 + int total_vports = mlx5_eswitch_get_total_vports(dev); 2639 2638 int err; 2640 2639 int i; 2641 2640 2642 - steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev), 2643 - sizeof(*steering->esw_ingress_root_ns), 2644 - GFP_KERNEL); 2641 + steering->esw_ingress_root_ns = 2642 + kcalloc(total_vports, 2643 + sizeof(*steering->esw_ingress_root_ns), 2644 + GFP_KERNEL); 2645 2645 if (!steering->esw_ingress_root_ns) 2646 2646 return -ENOMEM; 2647 2647 2648 - for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) { 2648 + for (i = 0; i < total_vports; i++) { 2649 2649 err = init_ingress_acl_root_ns(steering, i); 2650 2650 if (err) 2651 2651 goto cleanup_root_ns;
+205 -20
drivers/net/ethernet/mellanox/mlx5/core/fw.c
··· 37 37 #include "mlx5_core.h" 38 38 #include "../../mlxfw/mlxfw.h" 39 39 40 + enum { 41 + MCQS_IDENTIFIER_BOOT_IMG = 0x1, 42 + MCQS_IDENTIFIER_OEM_NVCONFIG = 0x4, 43 + MCQS_IDENTIFIER_MLNX_NVCONFIG = 0x5, 44 + MCQS_IDENTIFIER_CS_TOKEN = 0x6, 45 + MCQS_IDENTIFIER_DBG_TOKEN = 0x7, 46 + MCQS_IDENTIFIER_GEARBOX = 0xA, 47 + }; 48 + 49 + enum { 50 + MCQS_UPDATE_STATE_IDLE, 51 + MCQS_UPDATE_STATE_IN_PROGRESS, 52 + MCQS_UPDATE_STATE_APPLIED, 53 + MCQS_UPDATE_STATE_ACTIVE, 54 + MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET, 55 + MCQS_UPDATE_STATE_FAILED, 56 + MCQS_UPDATE_STATE_CANCELED, 57 + MCQS_UPDATE_STATE_BUSY, 58 + }; 59 + 60 + enum { 61 + MCQI_INFO_TYPE_CAPABILITIES = 0x0, 62 + MCQI_INFO_TYPE_VERSION = 0x1, 63 + MCQI_INFO_TYPE_ACTIVATION_METHOD = 0x5, 64 + }; 65 + 66 + enum { 67 + MCQI_FW_RUNNING_VERSION = 0, 68 + MCQI_FW_STORED_VERSION = 1, 69 + }; 70 + 40 71 static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, 41 72 int outlen) 42 73 { ··· 229 198 230 199 if (MLX5_CAP_GEN(dev, device_memory)) { 231 200 err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_MEM); 201 + if (err) 202 + return err; 203 + } 204 + 205 + if (MLX5_CAP_GEN(dev, event_cap)) { 206 + err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT); 232 207 if (err) 233 208 return err; 234 209 } ··· 429 392 } 430 393 431 394 static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev, 432 - u16 component_index, 433 - u32 *max_component_size, 434 - u8 *log_mcda_word_size, 435 - u16 *mcda_max_write_size) 395 + u16 component_index, bool read_pending, 396 + u8 info_type, u16 data_size, void *mcqi_data) 436 397 { 437 - u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)]; 438 - int offset = MLX5_ST_SZ_DW(mcqi_reg); 439 - u32 in[MLX5_ST_SZ_DW(mcqi_reg)]; 398 + u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_UN_SZ_DW(mcqi_reg_data)] = {}; 399 + u32 in[MLX5_ST_SZ_DW(mcqi_reg)] = {}; 400 + void *data; 440 401 int err; 441 402 442 - memset(in, 0, sizeof(in)); 443 - memset(out, 0, sizeof(out)); 444 - 445 403 MLX5_SET(mcqi_reg, in, component_index, component_index); 446 - MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap)); 404 + MLX5_SET(mcqi_reg, in, read_pending_component, read_pending); 405 + MLX5_SET(mcqi_reg, in, info_type, info_type); 406 + MLX5_SET(mcqi_reg, in, data_size, data_size); 447 407 448 408 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 449 - sizeof(out), MLX5_REG_MCQI, 0, 0); 409 + MLX5_ST_SZ_BYTES(mcqi_reg) + data_size, 410 + MLX5_REG_MCQI, 0, 0); 450 411 if (err) 451 - goto out; 412 + return err; 452 413 453 - *max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size); 454 - *log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size); 455 - *mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size); 414 + data = MLX5_ADDR_OF(mcqi_reg, out, data); 415 + memcpy(mcqi_data, data, data_size); 456 416 457 - out: 458 - return err; 417 + return 0; 418 + } 419 + 420 + static int mlx5_reg_mcqi_caps_query(struct mlx5_core_dev *dev, u16 component_index, 421 + u32 *max_component_size, u8 *log_mcda_word_size, 422 + u16 *mcda_max_write_size) 423 + { 424 + u32 mcqi_reg[MLX5_ST_SZ_DW(mcqi_cap)] = {}; 425 + int err; 426 + 427 + err = mlx5_reg_mcqi_query(dev, component_index, 0, 428 + MCQI_INFO_TYPE_CAPABILITIES, 429 + MLX5_ST_SZ_BYTES(mcqi_cap), mcqi_reg); 430 + if (err) 431 + return err; 432 + 433 + *max_component_size = MLX5_GET(mcqi_cap, mcqi_reg, max_component_size); 434 + *log_mcda_word_size = MLX5_GET(mcqi_cap, mcqi_reg, log_mcda_word_size); 435 + *mcda_max_write_size = MLX5_GET(mcqi_cap, mcqi_reg, mcda_max_write_size); 436 + 437 + return 0; 459 438 } 460 439 461 440 struct mlx5_mlxfw_dev { ··· 487 434 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 488 435 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 489 436 490 - return mlx5_reg_mcqi_query(dev, component_index, p_max_size, 491 - p_align_bits, p_max_write_size); 437 + if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi)) { 438 + mlx5_core_warn(dev, "caps query isn't supported by running FW\n"); 439 + return -EOPNOTSUPP; 440 + } 441 + 442 + return mlx5_reg_mcqi_caps_query(dev, component_index, p_max_size, 443 + p_align_bits, p_max_write_size); 492 444 } 493 445 494 446 static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) ··· 632 574 633 575 return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, 634 576 firmware, extack); 577 + } 578 + 579 + static int mlx5_reg_mcqi_version_query(struct mlx5_core_dev *dev, 580 + u16 component_index, bool read_pending, 581 + u32 *mcqi_version_out) 582 + { 583 + return mlx5_reg_mcqi_query(dev, component_index, read_pending, 584 + MCQI_INFO_TYPE_VERSION, 585 + MLX5_ST_SZ_BYTES(mcqi_version), 586 + mcqi_version_out); 587 + } 588 + 589 + static int mlx5_reg_mcqs_query(struct mlx5_core_dev *dev, u32 *out, 590 + u16 component_index) 591 + { 592 + u8 out_sz = MLX5_ST_SZ_BYTES(mcqs_reg); 593 + u32 in[MLX5_ST_SZ_DW(mcqs_reg)] = {}; 594 + int err; 595 + 596 + memset(out, 0, out_sz); 597 + 598 + MLX5_SET(mcqs_reg, in, component_index, component_index); 599 + 600 + err = mlx5_core_access_reg(dev, in, sizeof(in), out, 601 + out_sz, MLX5_REG_MCQS, 0, 0); 602 + return err; 603 + } 604 + 605 + /* scans component index sequentially, to find the boot img index */ 606 + static int mlx5_get_boot_img_component_index(struct mlx5_core_dev *dev) 607 + { 608 + u32 out[MLX5_ST_SZ_DW(mcqs_reg)] = {}; 609 + u16 identifier, component_idx = 0; 610 + bool quit; 611 + int err; 612 + 613 + do { 614 + err = mlx5_reg_mcqs_query(dev, out, component_idx); 615 + if (err) 616 + return err; 617 + 618 + identifier = MLX5_GET(mcqs_reg, out, identifier); 619 + quit = !!MLX5_GET(mcqs_reg, out, last_index_flag); 620 + quit |= identifier == MCQS_IDENTIFIER_BOOT_IMG; 621 + } while (!quit && ++component_idx); 622 + 623 + if (identifier != MCQS_IDENTIFIER_BOOT_IMG) { 624 + mlx5_core_warn(dev, "mcqs: can't find boot_img component ix, last scanned idx %d\n", 625 + component_idx); 626 + return -EOPNOTSUPP; 627 + } 628 + 629 + return component_idx; 630 + } 631 + 632 + static int 633 + mlx5_fw_image_pending(struct mlx5_core_dev *dev, 634 + int component_index, 635 + bool *pending_version_exists) 636 + { 637 + u32 out[MLX5_ST_SZ_DW(mcqs_reg)]; 638 + u8 component_update_state; 639 + int err; 640 + 641 + err = mlx5_reg_mcqs_query(dev, out, component_index); 642 + if (err) 643 + return err; 644 + 645 + component_update_state = MLX5_GET(mcqs_reg, out, component_update_state); 646 + 647 + if (component_update_state == MCQS_UPDATE_STATE_IDLE) { 648 + *pending_version_exists = false; 649 + } else if (component_update_state == MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET) { 650 + *pending_version_exists = true; 651 + } else { 652 + mlx5_core_warn(dev, 653 + "mcqs: can't read pending fw version while fw state is %d\n", 654 + component_update_state); 655 + return -ENODATA; 656 + } 657 + return 0; 658 + } 659 + 660 + int mlx5_fw_version_query(struct mlx5_core_dev *dev, 661 + u32 *running_ver, u32 *pending_ver) 662 + { 663 + u32 reg_mcqi_version[MLX5_ST_SZ_DW(mcqi_version)] = {}; 664 + bool pending_version_exists; 665 + int component_index; 666 + int err; 667 + 668 + if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi) || 669 + !MLX5_CAP_MCAM_REG(dev, mcqs)) { 670 + mlx5_core_warn(dev, "fw query isn't supported by the FW\n"); 671 + return -EOPNOTSUPP; 672 + } 673 + 674 + component_index = mlx5_get_boot_img_component_index(dev); 675 + if (component_index < 0) 676 + return component_index; 677 + 678 + err = mlx5_reg_mcqi_version_query(dev, component_index, 679 + MCQI_FW_RUNNING_VERSION, 680 + reg_mcqi_version); 681 + if (err) 682 + return err; 683 + 684 + *running_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version); 685 + 686 + err = mlx5_fw_image_pending(dev, component_index, &pending_version_exists); 687 + if (err) 688 + return err; 689 + 690 + if (!pending_version_exists) { 691 + *pending_ver = 0; 692 + return 0; 693 + } 694 + 695 + err = mlx5_reg_mcqi_version_query(dev, component_index, 696 + MCQI_FW_STORED_VERSION, 697 + reg_mcqi_version); 698 + if (err) 699 + return err; 700 + 701 + *pending_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version); 702 + 703 + return 0; 635 704 }
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/lag.c
··· 305 305 !mlx5_sriov_is_enabled(dev1); 306 306 307 307 #ifdef CONFIG_MLX5_ESWITCH 308 - roce_lag &= dev0->priv.eswitch->mode == SRIOV_NONE && 309 - dev1->priv.eswitch->mode == SRIOV_NONE; 308 + roce_lag &= dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE && 309 + dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE; 310 310 #endif 311 311 312 312 if (roce_lag)
+1 -4
drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
··· 75 75 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev); 76 76 77 77 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); 78 - int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); 78 + void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); 79 79 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn); 80 80 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev); 81 81 void mlx5_cq_tasklet_cb(unsigned long data); ··· 96 96 #ifdef CONFIG_RFS_ACCEL 97 97 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev); 98 98 #endif 99 - 100 - int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); 101 - int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); 102 99 103 100 #endif
+4 -3
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 734 734 struct mlx5_priv *priv = &dev->priv; 735 735 int err = 0; 736 736 737 - priv->pci_dev_data = id->driver_data; 738 - 737 + mutex_init(&dev->pci_status_mutex); 739 738 pci_set_drvdata(dev->pdev, dev); 740 739 741 740 dev->bar_addr = pci_resource_start(pdev, 0); ··· 1254 1255 1255 1256 INIT_LIST_HEAD(&priv->ctx_list); 1256 1257 spin_lock_init(&priv->ctx_lock); 1257 - mutex_init(&dev->pci_status_mutex); 1258 1258 mutex_init(&dev->intf_state_mutex); 1259 1259 1260 1260 mutex_init(&priv->bfregs.reg_head.lock); ··· 1314 1316 dev = devlink_priv(devlink); 1315 1317 dev->device = &pdev->dev; 1316 1318 dev->pdev = pdev; 1319 + 1320 + dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ? 1321 + MLX5_COREDEV_VF : MLX5_COREDEV_PF; 1317 1322 1318 1323 err = mlx5_mdev_init(dev, prof_sel); 1319 1324 if (err)
+2
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 205 205 206 206 int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw, 207 207 struct netlink_ext_ack *extack); 208 + int mlx5_fw_version_query(struct mlx5_core_dev *dev, 209 + u32 *running_ver, u32 *stored_ver); 208 210 209 211 void mlx5e_init(void); 210 212 void mlx5e_cleanup(void);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
··· 126 126 { 127 127 u8 hw_id[ETH_ALEN]; 128 128 129 - mlx5_query_nic_vport_mac_address(dev, 0, hw_id); 129 + mlx5_query_mac_address(dev, hw_id); 130 130 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 131 131 addrconf_addr_eui48(&gid->raw[8], hw_id); 132 132 }
+16 -26
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
··· 74 74 int err; 75 75 int vf; 76 76 77 - if (sriov->enabled_vfs) { 78 - mlx5_core_warn(dev, 79 - "failed to enable SRIOV on device, already enabled with %d vfs\n", 80 - sriov->enabled_vfs); 81 - return -EBUSY; 82 - } 83 - 84 77 if (!MLX5_ESWITCH_MANAGER(dev)) 85 78 goto enable_vfs_hca; 86 79 87 - err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); 80 + mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs); 81 + err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY); 88 82 if (err) { 89 83 mlx5_core_warn(dev, 90 84 "failed to enable eswitch SRIOV (%d)\n", err); ··· 93 99 continue; 94 100 } 95 101 sriov->vfs_ctx[vf].enabled = 1; 96 - sriov->enabled_vfs++; 97 102 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) { 98 103 err = sriov_restore_guids(dev, vf); 99 104 if (err) { ··· 111 118 static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) 112 119 { 113 120 struct mlx5_core_sriov *sriov = &dev->priv.sriov; 121 + int num_vfs = pci_num_vf(dev->pdev); 114 122 int err; 115 123 int vf; 116 124 117 - if (!sriov->enabled_vfs) 118 - goto out; 119 - 120 - for (vf = 0; vf < sriov->num_vfs; vf++) { 125 + for (vf = num_vfs - 1; vf >= 0; vf--) { 121 126 if (!sriov->vfs_ctx[vf].enabled) 122 127 continue; 123 128 err = mlx5_core_disable_hca(dev, vf + 1); ··· 124 133 continue; 125 134 } 126 135 sriov->vfs_ctx[vf].enabled = 0; 127 - sriov->enabled_vfs--; 128 136 } 129 137 130 - out: 131 138 if (MLX5_ESWITCH_MANAGER(dev)) 132 - mlx5_eswitch_disable_sriov(dev->priv.eswitch); 139 + mlx5_eswitch_disable(dev->priv.eswitch); 133 140 134 141 if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages)) 135 142 mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); ··· 180 191 181 192 int mlx5_sriov_attach(struct mlx5_core_dev *dev) 182 193 { 183 - struct mlx5_core_sriov *sriov = &dev->priv.sriov; 184 - 185 - if (!mlx5_core_is_pf(dev) || !sriov->num_vfs) 194 + if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev)) 186 195 return 0; 187 196 188 197 /* If sriov VFs exist in PCI level, enable them in device level */ 189 - return mlx5_device_enable_sriov(dev, sriov->num_vfs); 198 + return mlx5_device_enable_sriov(dev, pci_num_vf(dev->pdev)); 190 199 } 191 200 192 201 void mlx5_sriov_detach(struct mlx5_core_dev *dev) ··· 197 210 198 211 static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev) 199 212 { 200 - u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {}; 201 213 u16 host_total_vfs; 202 - int err; 214 + const u32 *out; 203 215 204 216 if (mlx5_core_is_ecpf_esw_manager(dev)) { 205 - err = mlx5_esw_query_functions(dev, out, sizeof(out)); 206 - host_total_vfs = MLX5_GET(query_esw_functions_out, out, 207 - host_params_context.host_total_vfs); 217 + out = mlx5_esw_query_functions(dev); 208 218 209 219 /* Old FW doesn't support getting total_vfs from esw func 210 220 * but supports getting it from pci_sriov. 211 221 */ 212 - if (!err && host_total_vfs) 222 + if (IS_ERR(out)) 223 + goto done; 224 + host_total_vfs = MLX5_GET(query_esw_functions_out, out, 225 + host_params_context.host_total_vfs); 226 + kvfree(out); 227 + if (host_total_vfs) 213 228 return host_total_vfs; 214 229 } 215 230 231 + done: 216 232 return pci_sriov_get_totalvfs(dev->pdev); 217 233 } 218 234
+33 -10
drivers/net/ethernet/mellanox/mlx5/core/vport.c
··· 34 34 #include <linux/etherdevice.h> 35 35 #include <linux/mlx5/driver.h> 36 36 #include <linux/mlx5/vport.h> 37 + #include <linux/mlx5/eswitch.h> 37 38 #include "mlx5_core.h" 38 39 39 40 /* Mutex to hold while enabling or disabling RoCE */ ··· 156 155 } 157 156 158 157 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, 159 - u16 vport, u8 *addr) 158 + u16 vport, bool other, u8 *addr) 160 159 { 161 - u32 *out; 162 160 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 161 + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {}; 163 162 u8 *out_addr; 163 + u32 *out; 164 164 int err; 165 165 166 166 out = kvzalloc(outlen, GFP_KERNEL); ··· 171 169 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out, 172 170 nic_vport_context.permanent_address); 173 171 174 - err = mlx5_query_nic_vport_context(mdev, vport, out, outlen); 172 + MLX5_SET(query_nic_vport_context_in, in, opcode, 173 + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 174 + MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 175 + MLX5_SET(query_nic_vport_context_in, in, other_vport, other); 176 + 177 + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); 175 178 if (!err) 176 179 ether_addr_copy(addr, &out_addr[2]); 177 180 ··· 184 177 return err; 185 178 } 186 179 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address); 180 + 181 + int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr) 182 + { 183 + return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr); 184 + } 185 + EXPORT_SYMBOL_GPL(mlx5_query_mac_address); 187 186 188 187 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, 189 188 u16 vport, u8 *addr) ··· 207 194 MLX5_SET(modify_nic_vport_context_in, in, 208 195 field_select.permanent_address, 1); 209 196 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 210 - 211 - if (vport) 212 - MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 197 + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 213 198 214 199 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 215 200 in, nic_vport_context); ··· 302 291 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 303 292 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type); 304 293 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 305 - 306 - if (vport) 307 - MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); 294 + MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); 308 295 309 296 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); 310 297 if (err) ··· 492 483 MLX5_SET(modify_nic_vport_context_in, in, 493 484 field_select.node_guid, 1); 494 485 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 495 - MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport); 486 + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); 496 487 497 488 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, 498 489 in, nic_vport_context); ··· 1166 1157 return tmp; 1167 1158 } 1168 1159 EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); 1160 + 1161 + /** 1162 + * mlx5_eswitch_get_total_vports - Get total vports of the eswitch 1163 + * 1164 + * @dev: Pointer to core device 1165 + * 1166 + * mlx5_eswitch_get_total_vports returns total number of vports for 1167 + * the eswitch. 1168 + */ 1169 + u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) 1170 + { 1171 + return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev); 1172 + } 1173 + EXPORT_SYMBOL(mlx5_eswitch_get_total_vports);
+3 -3
include/linux/mlx5/cq.h
··· 47 47 struct completion free; 48 48 unsigned vector; 49 49 unsigned int irqn; 50 - void (*comp) (struct mlx5_core_cq *); 50 + void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); 51 51 void (*event) (struct mlx5_core_cq *, enum mlx5_event); 52 52 u32 cons_index; 53 53 unsigned arm_sn; ··· 55 55 int pid; 56 56 struct { 57 57 struct list_head list; 58 - void (*comp)(struct mlx5_core_cq *); 58 + void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); 59 59 void *priv; 60 60 } tasklet_ctx; 61 61 int reset_notify_added; ··· 185 185 } 186 186 187 187 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 188 - u32 *in, int inlen); 188 + u32 *in, int inlen, u32 *out, int outlen); 189 189 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 190 190 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 191 191 u32 *out, int outlen);
+19 -1
include/linux/mlx5/device.h
··· 351 351 352 352 MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, 353 353 354 - MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1, 354 + MLX5_EVENT_TYPE_MAX = 0x100, 355 355 }; 356 356 357 357 enum { ··· 437 437 MLX5_OPCODE_SET_PSV = 0x20, 438 438 MLX5_OPCODE_GET_PSV = 0x21, 439 439 MLX5_OPCODE_CHECK_PSV = 0x22, 440 + MLX5_OPCODE_DUMP = 0x23, 440 441 MLX5_OPCODE_RGET_PSV = 0x26, 441 442 MLX5_OPCODE_RCHECK_PSV = 0x27, 442 443 443 444 MLX5_OPCODE_UMR = 0x25, 444 445 446 + }; 447 + 448 + enum { 449 + MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20, 450 + }; 451 + 452 + enum { 453 + MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20, 445 454 }; 446 455 447 456 enum { ··· 1094 1085 MLX5_CAP_DEBUG, 1095 1086 MLX5_CAP_RESERVED_14, 1096 1087 MLX5_CAP_DEV_MEM, 1088 + MLX5_CAP_RESERVED_16, 1089 + MLX5_CAP_TLS, 1090 + MLX5_CAP_DEV_EVENT = 0x14, 1097 1091 /* NUM OF CAP Types */ 1098 1092 MLX5_CAP_NUM 1099 1093 }; ··· 1274 1262 1275 1263 #define MLX5_CAP64_DEV_MEM(mdev, cap)\ 1276 1264 MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) 1265 + 1266 + #define MLX5_CAP_TLS(mdev, cap) \ 1267 + MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap) 1268 + 1269 + #define MLX5_CAP_DEV_EVENT(mdev, cap)\ 1270 + MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) 1277 1271 1278 1272 enum { 1279 1273 MLX5_CMD_STAT_OK = 0x0,
+15 -7
include/linux/mlx5/driver.h
··· 139 139 MLX5_REG_MTPPS = 0x9053, 140 140 MLX5_REG_MTPPSE = 0x9054, 141 141 MLX5_REG_MPEGC = 0x9056, 142 + MLX5_REG_MCQS = 0x9060, 142 143 MLX5_REG_MCQI = 0x9061, 143 144 MLX5_REG_MCC = 0x9062, 144 145 MLX5_REG_MCDA = 0x9063, ··· 181 180 MLX5_POLICY_UP = 1, 182 181 MLX5_POLICY_FOLLOW = 2, 183 182 MLX5_POLICY_INVALID = 0xffffffff 183 + }; 184 + 185 + enum mlx5_coredev_type { 186 + MLX5_COREDEV_PF, 187 + MLX5_COREDEV_VF 184 188 }; 185 189 186 190 struct mlx5_field_desc { ··· 474 468 struct mlx5_core_sriov { 475 469 struct mlx5_vf_context *vfs_ctx; 476 470 int num_vfs; 477 - int enabled_vfs; 478 471 u16 max_vfs; 479 472 }; 480 473 ··· 577 572 struct mlx5_core_sriov sriov; 578 573 struct mlx5_lag *lag; 579 574 struct mlx5_devcom *devcom; 580 - unsigned long pci_dev_data; 581 575 struct mlx5_core_roce roce; 582 576 struct mlx5_fc_stats fc_stats; 583 577 struct mlx5_rl_table rl_table; ··· 657 653 658 654 struct mlx5_core_dev { 659 655 struct device *device; 656 + enum mlx5_coredev_type coredev_type; 660 657 struct pci_dev *pdev; 661 658 /* sync pci state */ 662 659 struct mutex pci_status_mutex; ··· 1052 1047 void mlx5_unregister_interface(struct mlx5_interface *intf); 1053 1048 int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); 1054 1049 int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); 1050 + int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); 1051 + int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); 1055 1052 1056 1053 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); 1057 1054 ··· 1094 1087 MLX5_PCI_DEV_IS_VF = 1 << 0, 1095 1088 }; 1096 1089 1097 - static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) 1090 + static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev) 1098 1091 { 1099 - return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); 1092 + return dev->coredev_type == MLX5_COREDEV_PF; 1100 1093 } 1101 1094 1102 1095 static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) ··· 1104 1097 return dev->caps.embedded_cpu; 1105 1098 } 1106 1099 1107 - static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev) 1100 + static inline bool 1101 + mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev) 1108 1102 { 1109 1103 return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); 1110 1104 } 1111 1105 1112 - static inline bool mlx5_ecpf_vport_exists(struct mlx5_core_dev *dev) 1106 + static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev) 1113 1107 { 1114 1108 return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); 1115 1109 } 1116 1110 1117 - static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev) 1111 + static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) 1118 1112 { 1119 1113 return dev->priv.sriov.max_vfs; 1120 1114 }
+1 -1
include/linux/mlx5/eq.h
··· 15 15 struct mlx5_eq_param { 16 16 u8 irq_index; 17 17 int nent; 18 - u64 mask; 18 + u64 mask[4]; 19 19 }; 20 20 21 21 struct mlx5_eq *
+8 -3
include/linux/mlx5/eswitch.h
··· 12 12 #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) 13 13 14 14 enum { 15 - SRIOV_NONE, 16 - SRIOV_LEGACY, 17 - SRIOV_OFFLOADS 15 + MLX5_ESWITCH_NONE, 16 + MLX5_ESWITCH_LEGACY, 17 + MLX5_ESWITCH_OFFLOADS 18 18 }; 19 19 20 20 enum { ··· 46 46 u16 vport; 47 47 u8 hw_id[ETH_ALEN]; 48 48 u16 vlan; 49 + /* Only IB rep is using vport_index */ 50 + u16 vport_index; 49 51 u32 vlan_refcount; 50 52 }; 51 53 ··· 65 63 struct mlx5_flow_handle * 66 64 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, 67 65 u16 vport_num, u32 sqn); 66 + 67 + u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); 68 68 69 69 #ifdef CONFIG_MLX5_ESWITCH 70 70 enum devlink_eswitch_encap_mode ··· 95 91 return 0; 96 92 }; 97 93 #endif /* CONFIG_MLX5_ESWITCH */ 94 + 98 95 #endif
+289 -14
include/linux/mlx5/mlx5_ifc.h
··· 91 91 92 92 enum { 93 93 MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, 94 + MLX5_OBJ_TYPE_MKEY = 0xff01, 95 + MLX5_OBJ_TYPE_QP = 0xff02, 96 + MLX5_OBJ_TYPE_PSV = 0xff03, 97 + MLX5_OBJ_TYPE_RMP = 0xff04, 98 + MLX5_OBJ_TYPE_XRC_SRQ = 0xff05, 99 + MLX5_OBJ_TYPE_RQ = 0xff06, 100 + MLX5_OBJ_TYPE_SQ = 0xff07, 101 + MLX5_OBJ_TYPE_TIR = 0xff08, 102 + MLX5_OBJ_TYPE_TIS = 0xff09, 103 + MLX5_OBJ_TYPE_DCT = 0xff0a, 104 + MLX5_OBJ_TYPE_XRQ = 0xff0b, 105 + MLX5_OBJ_TYPE_RQT = 0xff0e, 106 + MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f, 107 + MLX5_OBJ_TYPE_CQ = 0xff10, 94 108 }; 95 109 96 110 enum { ··· 120 106 MLX5_CMD_OP_QUERY_ISSI = 0x10a, 121 107 MLX5_CMD_OP_SET_ISSI = 0x10b, 122 108 MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, 109 + MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111, 110 + MLX5_CMD_OP_ALLOC_SF = 0x113, 111 + MLX5_CMD_OP_DEALLOC_SF = 0x114, 123 112 MLX5_CMD_OP_CREATE_MKEY = 0x200, 124 113 MLX5_CMD_OP_QUERY_MKEY = 0x201, 125 114 MLX5_CMD_OP_DESTROY_MKEY = 0x202, ··· 730 713 u8 reserved_2b[0x6]; 731 714 u8 max_encap_header_size[0xa]; 732 715 733 - u8 reserved_40[0x7c0]; 716 + u8 reserved_at_40[0xb]; 717 + u8 log_max_esw_sf[0x5]; 718 + u8 esw_sf_base_id[0x10]; 719 + 720 + u8 reserved_at_60[0x7a0]; 734 721 735 722 }; 736 723 ··· 874 853 u8 reserved_at_180[0x680]; 875 854 }; 876 855 856 + struct mlx5_ifc_device_event_cap_bits { 857 + u8 user_affiliated_events[4][0x40]; 858 + 859 + u8 user_unaffiliated_events[4][0x40]; 860 + }; 861 + 877 862 enum { 878 863 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, 879 864 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, ··· 973 946 u8 reserved_at_c0[0x720]; 974 947 }; 975 948 949 + struct mlx5_ifc_tls_cap_bits { 950 + u8 tls_1_2_aes_gcm_128[0x1]; 951 + u8 tls_1_3_aes_gcm_128[0x1]; 952 + u8 tls_1_2_aes_gcm_256[0x1]; 953 + u8 tls_1_3_aes_gcm_256[0x1]; 954 + u8 reserved_at_4[0x1c]; 955 + 956 + u8 reserved_at_20[0x7e0]; 957 + }; 958 + 976 959 enum { 977 960 MLX5_WQ_TYPE_LINKED_LIST = 0x0, 978 961 MLX5_WQ_TYPE_CYCLIC = 0x1, ··· 1047 1010 1048 1011 u8 log_max_srq_sz[0x8]; 1049 1012 u8 log_max_qp_sz[0x8]; 1050 - u8 reserved_at_90[0x8]; 1013 + u8 event_cap[0x1]; 1014 + u8 reserved_at_91[0x7]; 1051 1015 u8 prio_tag_required[0x1]; 1052 1016 u8 reserved_at_99[0x2]; 1053 1017 u8 log_max_qp[0x5]; ··· 1096 1058 u8 cc_modify_allowed[0x1]; 1097 1059 u8 start_pad[0x1]; 1098 1060 u8 cache_line_128byte[0x1]; 1099 - u8 reserved_at_165[0xa]; 1061 + u8 reserved_at_165[0x4]; 1062 + u8 rts2rts_qp_counters_set_id[0x1]; 1063 + u8 reserved_at_16a[0x5]; 1100 1064 u8 qcam_reg[0x1]; 1101 1065 u8 gid_table_size[0x10]; 1102 1066 ··· 1315 1275 1316 1276 u8 reserved_at_440[0x20]; 1317 1277 1318 - u8 reserved_at_460[0x3]; 1278 + u8 tls[0x1]; 1279 + u8 reserved_at_461[0x2]; 1319 1280 u8 log_max_uctx[0x5]; 1320 1281 u8 reserved_at_468[0x3]; 1321 1282 u8 log_max_umem[0x5]; ··· 1341 1300 u8 max_geneve_tlv_option_data_len[0x5]; 1342 1301 u8 reserved_at_570[0x10]; 1343 1302 1344 - u8 reserved_at_580[0x3c]; 1303 + u8 reserved_at_580[0x33]; 1304 + u8 log_max_dek[0x5]; 1305 + u8 reserved_at_5b8[0x4]; 1345 1306 u8 mini_cqe_resp_stride_index[0x1]; 1346 1307 u8 cqe_128_always[0x1]; 1347 1308 u8 cqe_compression_128[0x1]; ··· 1373 1330 u8 reserved_at_640[0x10]; 1374 1331 u8 num_q_monitor_counters[0x10]; 1375 1332 1376 - u8 reserved_at_660[0x40]; 1333 + u8 reserved_at_660[0x20]; 1334 + 1335 + u8 sf[0x1]; 1336 + u8 sf_set_partition[0x1]; 1337 + u8 reserved_at_682[0x1]; 1338 + u8 log_max_sf[0x5]; 1339 + u8 reserved_at_688[0x8]; 1340 + u8 log_min_sf_size[0x8]; 1341 + u8 max_num_sf_partitions[0x8]; 1377 1342 1378 1343 u8 uctx_cap[0x20]; 1379 1344 1380 1345 u8 reserved_at_6c0[0x4]; 1381 1346 u8 flex_parser_id_geneve_tlv_option_0[0x4]; 1382 - u8 reserved_at_6c8[0x138]; 1347 + u8 reserved_at_6c8[0x28]; 1348 + u8 sf_base_id[0x10]; 1349 + 1350 + u8 reserved_at_700[0x100]; 1383 1351 }; 1384 1352 1385 1353 enum mlx5_flow_destination_type { ··· 2622 2568 struct mlx5_ifc_qos_cap_bits qos_cap; 2623 2569 struct mlx5_ifc_debug_cap_bits debug_cap; 2624 2570 struct mlx5_ifc_fpga_cap_bits fpga_cap; 2571 + struct mlx5_ifc_tls_cap_bits tls_cap; 2625 2572 u8 reserved_at_0[0x8000]; 2626 2573 }; 2627 2574 ··· 2762 2707 2763 2708 struct mlx5_ifc_tisc_bits { 2764 2709 u8 strict_lag_tx_port_affinity[0x1]; 2765 - u8 reserved_at_1[0x3]; 2710 + u8 tls_en[0x1]; 2711 + u8 reserved_at_1[0x2]; 2766 2712 u8 lag_tx_port_affinity[0x04]; 2767 2713 2768 2714 u8 reserved_at_8[0x4]; ··· 2777 2721 2778 2722 u8 reserved_at_140[0x8]; 2779 2723 u8 underlay_qpn[0x18]; 2780 - u8 reserved_at_160[0x3a0]; 2724 + 2725 + u8 reserved_at_160[0x8]; 2726 + u8 pd[0x18]; 2727 + 2728 + u8 reserved_at_180[0x380]; 2781 2729 }; 2782 2730 2783 2731 enum { ··· 7464 7404 7465 7405 u8 reserved_at_280[0x40]; 7466 7406 7467 - u8 event_bitmask[0x40]; 7407 + u8 event_bitmask[4][0x40]; 7468 7408 7469 - u8 reserved_at_300[0x580]; 7409 + u8 reserved_at_3c0[0x4c0]; 7470 7410 7471 7411 u8 pas[0][0x40]; 7472 7412 }; ··· 8584 8524 u8 mcda[0x1]; 8585 8525 u8 mcc[0x1]; 8586 8526 u8 mcqi[0x1]; 8587 - u8 reserved_at_1f[0x1]; 8527 + u8 mcqs[0x1]; 8588 8528 8589 8529 u8 regs_95_to_87[0x9]; 8590 8530 u8 mpegc[0x1]; ··· 9076 9016 u8 reserved_at_40[0x40]; 9077 9017 }; 9078 9018 9019 + struct mlx5_ifc_mcqs_reg_bits { 9020 + u8 last_index_flag[0x1]; 9021 + u8 reserved_at_1[0x7]; 9022 + u8 fw_device[0x8]; 9023 + u8 component_index[0x10]; 9024 + 9025 + u8 reserved_at_20[0x10]; 9026 + u8 identifier[0x10]; 9027 + 9028 + u8 reserved_at_40[0x17]; 9029 + u8 component_status[0x5]; 9030 + u8 component_update_state[0x4]; 9031 + 9032 + u8 last_update_state_changer_type[0x4]; 9033 + u8 last_update_state_changer_host_id[0x4]; 9034 + u8 reserved_at_68[0x18]; 9035 + }; 9036 + 9079 9037 struct mlx5_ifc_mcqi_cap_bits { 9080 9038 u8 supported_info_bitmask[0x20]; 9081 9039 ··· 9114 9036 u8 reserved_at_86[0x1a]; 9115 9037 }; 9116 9038 9039 + struct mlx5_ifc_mcqi_version_bits { 9040 + u8 reserved_at_0[0x2]; 9041 + u8 build_time_valid[0x1]; 9042 + u8 user_defined_time_valid[0x1]; 9043 + u8 reserved_at_4[0x14]; 9044 + u8 version_string_length[0x8]; 9045 + 9046 + u8 version[0x20]; 9047 + 9048 + u8 build_time[0x40]; 9049 + 9050 + u8 user_defined_time[0x40]; 9051 + 9052 + u8 build_tool_version[0x20]; 9053 + 9054 + u8 reserved_at_e0[0x20]; 9055 + 9056 + u8 version_string[92][0x8]; 9057 + }; 9058 + 9059 + struct mlx5_ifc_mcqi_activation_method_bits { 9060 + u8 pending_server_ac_power_cycle[0x1]; 9061 + u8 pending_server_dc_power_cycle[0x1]; 9062 + u8 pending_server_reboot[0x1]; 9063 + u8 pending_fw_reset[0x1]; 9064 + u8 auto_activate[0x1]; 9065 + u8 all_hosts_sync[0x1]; 9066 + u8 device_hw_reset[0x1]; 9067 + u8 reserved_at_7[0x19]; 9068 + }; 9069 + 9070 + union mlx5_ifc_mcqi_reg_data_bits { 9071 + struct mlx5_ifc_mcqi_cap_bits mcqi_caps; 9072 + struct mlx5_ifc_mcqi_version_bits mcqi_version; 9073 + struct mlx5_ifc_mcqi_activation_method_bits mcqi_activation_mathod; 9074 + }; 9075 + 9117 9076 struct mlx5_ifc_mcqi_reg_bits { 9118 9077 u8 read_pending_component[0x1]; 9119 9078 u8 reserved_at_1[0xf]; ··· 9168 9053 u8 reserved_at_a0[0x10]; 9169 9054 u8 data_size[0x10]; 9170 9055 9171 - u8 data[0][0x20]; 9056 + union mlx5_ifc_mcqi_reg_data_bits data[0]; 9172 9057 }; 9173 9058 9174 9059 struct mlx5_ifc_mcc_reg_bits { ··· 9865 9750 9866 9751 struct mlx5_ifc_host_params_context_bits { 9867 9752 u8 host_number[0x8]; 9868 - u8 reserved_at_8[0x8]; 9753 + u8 reserved_at_8[0x7]; 9754 + u8 host_pf_disabled[0x1]; 9869 9755 u8 host_num_of_vfs[0x10]; 9870 9756 9871 9757 u8 host_total_vfs[0x10]; ··· 9902 9786 struct mlx5_ifc_host_params_context_bits host_params_context; 9903 9787 9904 9788 u8 reserved_at_280[0x180]; 9789 + u8 host_sf_enable[0][0x40]; 9790 + }; 9791 + 9792 + struct mlx5_ifc_sf_partition_bits { 9793 + u8 reserved_at_0[0x10]; 9794 + u8 log_num_sf[0x8]; 9795 + u8 log_sf_bar_size[0x8]; 9796 + }; 9797 + 9798 + struct mlx5_ifc_query_sf_partitions_out_bits { 9799 + u8 status[0x8]; 9800 + u8 reserved_at_8[0x18]; 9801 + 9802 + u8 syndrome[0x20]; 9803 + 9804 + u8 reserved_at_40[0x18]; 9805 + u8 num_sf_partitions[0x8]; 9806 + 9807 + u8 reserved_at_60[0x20]; 9808 + 9809 + struct mlx5_ifc_sf_partition_bits sf_partition[0]; 9810 + }; 9811 + 9812 + struct mlx5_ifc_query_sf_partitions_in_bits { 9813 + u8 opcode[0x10]; 9814 + u8 reserved_at_10[0x10]; 9815 + 9816 + u8 reserved_at_20[0x10]; 9817 + u8 op_mod[0x10]; 9818 + 9819 + u8 reserved_at_40[0x40]; 9820 + }; 9821 + 9822 + struct mlx5_ifc_dealloc_sf_out_bits { 9823 + u8 status[0x8]; 9824 + u8 reserved_at_8[0x18]; 9825 + 9826 + u8 syndrome[0x20]; 9827 + 9828 + u8 reserved_at_40[0x40]; 9829 + }; 9830 + 9831 + struct mlx5_ifc_dealloc_sf_in_bits { 9832 + u8 opcode[0x10]; 9833 + u8 reserved_at_10[0x10]; 9834 + 9835 + u8 reserved_at_20[0x10]; 9836 + u8 op_mod[0x10]; 9837 + 9838 + u8 reserved_at_40[0x10]; 9839 + u8 function_id[0x10]; 9840 + 9841 + u8 reserved_at_60[0x20]; 9842 + }; 9843 + 9844 + struct mlx5_ifc_alloc_sf_out_bits { 9845 + u8 status[0x8]; 9846 + u8 reserved_at_8[0x18]; 9847 + 9848 + u8 syndrome[0x20]; 9849 + 9850 + u8 reserved_at_40[0x40]; 9851 + }; 9852 + 9853 + struct mlx5_ifc_alloc_sf_in_bits { 9854 + u8 opcode[0x10]; 9855 + u8 reserved_at_10[0x10]; 9856 + 9857 + u8 reserved_at_20[0x10]; 9858 + u8 op_mod[0x10]; 9859 + 9860 + u8 reserved_at_40[0x10]; 9861 + u8 function_id[0x10]; 9862 + 9863 + u8 reserved_at_60[0x20]; 9864 + }; 9865 + 9866 + struct mlx5_ifc_affiliated_event_header_bits { 9867 + u8 reserved_at_0[0x10]; 9868 + u8 obj_type[0x10]; 9869 + 9870 + u8 obj_id[0x20]; 9871 + }; 9872 + 9873 + enum { 9874 + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), 9875 + }; 9876 + 9877 + enum { 9878 + MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, 9879 + }; 9880 + 9881 + struct mlx5_ifc_encryption_key_obj_bits { 9882 + u8 modify_field_select[0x40]; 9883 + 9884 + u8 reserved_at_40[0x14]; 9885 + u8 key_size[0x4]; 9886 + u8 reserved_at_58[0x4]; 9887 + u8 key_type[0x4]; 9888 + 9889 + u8 reserved_at_60[0x8]; 9890 + u8 pd[0x18]; 9891 + 9892 + u8 reserved_at_80[0x180]; 9893 + u8 key[8][0x20]; 9894 + 9895 + u8 reserved_at_300[0x500]; 9896 + }; 9897 + 9898 + struct mlx5_ifc_create_encryption_key_in_bits { 9899 + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; 9900 + struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; 9901 + }; 9902 + 9903 + enum { 9904 + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0, 9905 + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1, 9906 + }; 9907 + 9908 + enum { 9909 + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1, 9910 + }; 9911 + 9912 + struct mlx5_ifc_tls_static_params_bits { 9913 + u8 const_2[0x2]; 9914 + u8 tls_version[0x4]; 9915 + u8 const_1[0x2]; 9916 + u8 reserved_at_8[0x14]; 9917 + u8 encryption_standard[0x4]; 9918 + 9919 + u8 reserved_at_20[0x20]; 9920 + 9921 + u8 initial_record_number[0x40]; 9922 + 9923 + u8 resync_tcp_sn[0x20]; 9924 + 9925 + u8 gcm_iv[0x20]; 9926 + 9927 + u8 implicit_iv[0x40]; 9928 + 9929 + u8 reserved_at_100[0x8]; 9930 + u8 dek_index[0x18]; 9931 + 9932 + u8 reserved_at_120[0xe0]; 9933 + }; 9934 + 9935 + struct mlx5_ifc_tls_progress_params_bits { 9936 + u8 valid[0x1]; 9937 + u8 reserved_at_1[0x7]; 9938 + u8 pd[0x18]; 9939 + 9940 + u8 next_record_tcp_sn[0x20]; 9941 + 9942 + u8 hw_resync_tcp_sn[0x20]; 9943 + 9944 + u8 record_tracker_state[0x2]; 9945 + u8 auth_state[0x2]; 9946 + u8 reserved_at_64[0x4]; 9947 + u8 hw_offset_record_number[0x18]; 9905 9948 }; 9906 9949 9907 9950 #endif /* MLX5_IFC_H */
+6 -1
include/linux/mlx5/qp.h
··· 202 202 u8 signature; 203 203 u8 rsvd[2]; 204 204 u8 fm_ce_se; 205 - __be32 imm; 205 + union { 206 + __be32 general_id; 207 + __be32 imm; 208 + __be32 umr_mkey; 209 + __be32 tisn; 210 + }; 206 211 }; 207 212 208 213 #define MLX5_WQE_CTRL_DS_MASK 0x3f
+3 -4
include/linux/mlx5/vport.h
··· 44 44 MLX5_VPORT_UPLINK_PLACEHOLDER + \ 45 45 MLX5_VPORT_ECPF_PLACEHOLDER(mdev)) 46 46 47 - #define MLX5_TOTAL_VPORTS(mdev) (MLX5_SPECIAL_VPORTS(mdev) + \ 48 - mlx5_core_max_vfs(mdev)) 49 - 50 47 #define MLX5_VPORT_MANAGER(mdev) \ 51 48 (MLX5_CAP_GEN(mdev, vport_group_manager) && \ 52 49 (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ ··· 55 58 MLX5_CAP_INLINE_MODE_NOT_REQUIRED, 56 59 }; 57 60 61 + /* Vport number for each function must keep unchanged */ 58 62 enum { 59 63 MLX5_VPORT_PF = 0x0, 60 64 MLX5_VPORT_FIRST_VF = 0x1, ··· 67 69 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, 68 70 u16 vport, u8 other_vport, u8 state); 69 71 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, 70 - u16 vport, u8 *addr); 72 + u16 vport, bool other, u8 *addr); 73 + int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr); 71 74 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, 72 75 u16 vport, u8 *min_inline); 73 76 void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);