Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-updates-2023-12-13' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-12-13

Preparation for mlx5e socket direct feature.

Socket direct will allow multiple PF devices attached to different
NUMA nodes but sharing the same physical port.

The following series is a small refactoring series in preparation
to support socket direct in the following submission.

Highlights:
- Define required device registers and bits related to socket direct
- Flow steering re-arrangements
- Generalize TX objects (TISs) and store them in a common object, will
be useful in the next series for per function object management.
- Decouple raw CQ objects from their parent netdev priv
- Prepare devcom for Socket Direct device group discovery.

Please see the individual patches for more information.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+290 -161
+15 -10
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 72 72 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) 73 73 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) 74 74 75 - #define MLX5E_MAX_NUM_TC 8 76 75 #define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE 77 76 78 77 #define MLX5_RX_HEADROOM NET_SKB_PAD ··· 363 364 /* control */ 364 365 struct net_device *netdev; 365 366 struct mlx5_core_dev *mdev; 366 - struct mlx5e_priv *priv; 367 + struct workqueue_struct *workqueue; 367 368 struct mlx5_wq_ctrl wq_ctrl; 368 369 } ____cacheline_aligned_in_smp; 369 370 ··· 757 758 /* data path */ 758 759 struct mlx5e_rq rq; 759 760 struct mlx5e_xdpsq rq_xdpsq; 760 - struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC]; 761 + struct mlx5e_txqsq sq[MLX5_MAX_NUM_TC]; 761 762 struct mlx5e_icosq icosq; /* internal control operations */ 762 763 struct mlx5e_txqsq __rcu * __rcu *qos_sqs; 763 764 bool xdp; ··· 807 808 808 809 struct mlx5e_channel_stats { 809 810 struct mlx5e_ch_stats ch; 810 - struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; 811 + struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC]; 811 812 struct mlx5e_rq_stats rq; 812 813 struct mlx5e_rq_stats xskrq; 813 814 struct mlx5e_xdpsq_stats rq_xdpsq; ··· 817 818 818 819 struct mlx5e_ptp_stats { 819 820 struct mlx5e_ch_stats ch; 820 - struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; 821 - struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC]; 821 + struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC]; 822 + struct mlx5e_ptp_cq_stats cq[MLX5_MAX_NUM_TC]; 822 823 struct mlx5e_rq_stats rq; 823 824 } ____cacheline_aligned_in_smp; 824 825 ··· 886 887 struct mlx5e_rq drop_rq; 887 888 888 889 struct mlx5e_channels channels; 889 - u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC]; 890 890 struct mlx5e_rx_res *rx_res; 891 891 u32 *tx_rates; 892 892 ··· 983 985 void (*update_stats)(struct mlx5e_priv *priv); 984 986 void (*update_carrier)(struct mlx5e_priv *priv); 985 987 int (*max_nch_limit)(struct mlx5_core_dev *mdev); 988 + u32 (*get_tisn)(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, 989 + u8 lag_port, u8 tc); 986 990 unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); 987 991 mlx5e_stats_grp_t *stats_grps; 988 992 const struct mlx5e_rx_handlers *rx_handlers; 989 993 int max_tc; 990 994 u32 features; 991 995 }; 996 + 997 + u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev, 998 + struct mlx5e_priv *priv, 999 + const struct mlx5e_profile *profile, 1000 + u8 lag_port, u8 tc); 992 1001 993 1002 #define mlx5e_profile_feature_cap(profile, feature) \ 994 1003 ((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature)) ··· 1044 1039 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq); 1045 1040 1046 1041 struct mlx5e_create_cq_param { 1042 + struct net_device *netdev; 1043 + struct workqueue_struct *wq; 1047 1044 struct napi_struct *napi; 1048 1045 struct mlx5e_ch_stats *ch_stats; 1049 1046 int node; ··· 1053 1046 }; 1054 1047 1055 1048 struct mlx5e_cq_param; 1056 - int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, 1049 + int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, 1057 1050 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, 1058 1051 struct mlx5e_cq *cq); 1059 1052 void mlx5e_close_cq(struct mlx5e_cq *cq); ··· 1140 1133 int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); 1141 1134 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); 1142 1135 1143 - int mlx5e_create_tises(struct mlx5e_priv *priv); 1144 - void mlx5e_destroy_tises(struct mlx5e_priv *priv); 1145 1136 int mlx5e_update_nic_rx(struct mlx5e_priv *priv); 1146 1137 void mlx5e_update_carrier(struct mlx5e_priv *priv); 1147 1138 int mlx5e_close(struct net_device *netdev);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
··· 36 36 return true; 37 37 } 38 38 39 - void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv) 39 + static void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv) 40 40 { 41 41 u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {}; 42 42
-1
drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
··· 7 7 int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv); 8 8 void mlx5e_monitor_counter_init(struct mlx5e_priv *priv); 9 9 void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv); 10 - void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv); 11 10 12 11 #endif /* __MLX5_MONITOR_H__ */
+2
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 669 669 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) 670 670 { 671 671 *ccp = (struct mlx5e_create_cq_param) { 672 + .netdev = c->netdev, 673 + .wq = c->priv->wq, 672 674 .napi = &c->napi, 673 675 .ch_stats = c->stats, 674 676 .node = cpu_to_node(c->cpu),
+11 -5
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
··· 518 518 519 519 for (tc = 0; tc < num_tc; tc++) { 520 520 int txq_ix = ix_base + tc; 521 + u32 tisn; 521 522 522 - err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, 523 - cparams, tc, &c->ptpsq[tc]); 523 + tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile, 524 + c->lag_port, tc); 525 + err = mlx5e_ptp_open_txqsq(c, tisn, txq_ix, cparams, tc, &c->ptpsq[tc]); 524 526 if (err) 525 527 goto close_txqsq; 526 528 } ··· 557 555 558 556 num_tc = mlx5e_get_dcb_num_tc(params); 559 557 558 + ccp.netdev = c->netdev; 559 + ccp.wq = c->priv->wq; 560 560 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev)); 561 561 ccp.ch_stats = c->stats; 562 562 ccp.napi = &c->napi; ··· 569 565 for (tc = 0; tc < num_tc; tc++) { 570 566 struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq; 571 567 572 - err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); 568 + err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq); 573 569 if (err) 574 570 goto out_err_txqsq_cq; 575 571 } ··· 578 574 struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq; 579 575 struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc]; 580 576 581 - err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); 577 + err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq); 582 578 if (err) 583 579 goto out_err_ts_cq; 584 580 ··· 606 602 struct mlx5e_cq_param *cq_param; 607 603 struct mlx5e_cq *cq = &c->rq.cq; 608 604 605 + ccp.netdev = c->netdev; 606 + ccp.wq = c->priv->wq; 609 607 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev)); 610 608 ccp.ch_stats = c->stats; 611 609 ccp.napi = &c->napi; ··· 615 609 616 610 cq_param = &cparams->rq_param.cqp; 617 611 618 - return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); 612 + return mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq); 619 613 } 620 614 621 615 static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
··· 49 49 50 50 struct mlx5e_ptp { 51 51 /* data path */ 52 - struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC]; 52 + struct mlx5e_ptpsq ptpsq[MLX5_MAX_NUM_TC]; 53 53 struct mlx5e_rq rq; 54 54 struct napi_struct napi; 55 55 struct device *pdev;
+6 -3
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
··· 77 77 struct mlx5e_params *params; 78 78 struct mlx5e_channel *c; 79 79 struct mlx5e_txqsq *sq; 80 + u32 tisn; 80 81 81 82 params = &chs->params; 82 83 ··· 124 123 memset(&param_cq, 0, sizeof(param_cq)); 125 124 mlx5e_build_sq_param(priv->mdev, params, &param_sq); 126 125 mlx5e_build_tx_cq_param(priv->mdev, params, &param_cq); 127 - err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq); 126 + err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq); 128 127 if (err) 129 128 goto err_free_sq; 130 - err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params, 131 - &param_sq, sq, 0, hw_id, 129 + 130 + tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile, 131 + c->lag_port, 0); 132 + err = mlx5e_open_txqsq(c, tisn, txq_ix, params, &param_sq, sq, 0, hw_id, 132 133 priv->htb_qos_sq_stats[node_qid]); 133 134 if (err) 134 135 goto err_close_cq;
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
··· 68 68 69 69 node = dev_to_node(mdev->device); 70 70 71 + ccp.netdev = priv->netdev; 72 + ccp.wq = priv->wq; 71 73 ccp.node = node; 72 74 ccp.ch_stats = t->stats; 73 75 ccp.napi = &t->napi; 74 76 ccp.ix = 0; 75 - err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, &rq->cq); 77 + err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq); 76 78 if (err) 77 79 return err; 78 80
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
··· 127 127 128 128 mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam); 129 129 130 - err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, 130 + err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, 131 131 &c->xskrq.cq); 132 132 if (unlikely(err)) 133 133 goto err_free_cparam; ··· 136 136 if (unlikely(err)) 137 137 goto err_close_rx_cq; 138 138 139 - err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, 139 + err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, 140 140 &c->xsksq.cq); 141 141 if (unlikely(err)) 142 142 goto err_close_rq;
+74
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
··· 74 74 return err; 75 75 } 76 76 77 + int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn) 78 + { 79 + void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 80 + 81 + MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); 82 + 83 + if (mlx5_lag_is_lacp_owner(mdev)) 84 + MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); 85 + 86 + return mlx5_core_create_tis(mdev, in, tisn); 87 + } 88 + 89 + void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn) 90 + { 91 + mlx5_core_destroy_tis(mdev, tisn); 92 + } 93 + 94 + static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]) 95 + { 96 + int tc, i; 97 + 98 + for (i = 0; i < MLX5_MAX_PORTS; i++) 99 + for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) 100 + mlx5e_destroy_tis(mdev, tisn[i][tc]); 101 + } 102 + 103 + static bool mlx5_lag_should_assign_affinity(struct mlx5_core_dev *mdev) 104 + { 105 + return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1; 106 + } 107 + 108 + static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]) 109 + { 110 + int tc, i; 111 + int err; 112 + 113 + for (i = 0; i < MLX5_MAX_PORTS; i++) { 114 + for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) { 115 + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 116 + void *tisc; 117 + 118 + tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 119 + 120 + MLX5_SET(tisc, tisc, prio, tc << 1); 121 + 122 + if (mlx5_lag_should_assign_affinity(mdev)) 123 + MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1); 124 + 125 + err = mlx5e_create_tis(mdev, in, &tisn[i][tc]); 126 + if (err) 127 + goto err_close_tises; 128 + } 129 + } 130 + 131 + return 0; 132 + 133 + err_close_tises: 134 + for (; i >= 0; i--) { 135 + for (tc--; tc >= 0; tc--) 136 + mlx5e_destroy_tis(mdev, tisn[i][tc]); 137 + tc = MLX5_MAX_NUM_TC; 138 + } 139 + 140 + return err; 141 + } 142 + 77 143 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) 78 144 { 79 145 struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; ··· 169 103 goto err_destroy_mkey; 170 104 } 171 105 106 + err = mlx5e_create_tises(mdev, res->tisn); 107 + if (err) { 108 + mlx5_core_err(mdev, "alloc tises failed, %d\n", err); 109 + goto err_destroy_bfreg; 110 + } 172 111 INIT_LIST_HEAD(&res->td.tirs_list); 173 112 mutex_init(&res->td.list_lock); 174 113 ··· 186 115 187 116 return 0; 188 117 118 + err_destroy_bfreg: 119 + mlx5_free_bfreg(mdev, &res->bfreg); 189 120 err_destroy_mkey: 190 121 mlx5_core_destroy_mkey(mdev, res->mkey); 191 122 err_dealloc_transport_domain: ··· 203 130 204 131 mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv); 205 132 mdev->mlx5e_res.dek_priv = NULL; 133 + mlx5e_destroy_tises(mdev, res->tisn); 206 134 mlx5_free_bfreg(mdev, &res->bfreg); 207 135 mlx5_core_destroy_mkey(mdev, res->mkey); 208 136 mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+51 -108
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 1352 1352 mlx5e_free_rq(rq); 1353 1353 } 1354 1354 1355 + u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev, 1356 + struct mlx5e_priv *priv, 1357 + const struct mlx5e_profile *profile, 1358 + u8 lag_port, u8 tc) 1359 + { 1360 + if (profile->get_tisn) 1361 + return profile->get_tisn(mdev, priv, lag_port, tc); 1362 + 1363 + return mdev->mlx5e_res.hw_objs.tisn[lag_port][tc]; 1364 + } 1365 + 1355 1366 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) 1356 1367 { 1357 1368 kvfree(sq->db.xdpi_fifo.xi); ··· 1931 1920 return err; 1932 1921 1933 1922 csp.tis_lst_sz = 1; 1934 - csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */ 1923 + csp.tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile, 1924 + c->lag_port, 0); /* tc = 0 */ 1935 1925 csp.cqn = sq->cq.mcq.cqn; 1936 1926 csp.wq_ctrl = &sq->wq_ctrl; 1937 1927 csp.min_inline_mode = sq->min_inline_mode; ··· 1994 1982 mlx5e_free_xdpsq(sq); 1995 1983 } 1996 1984 1997 - static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv, 1985 + static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, 1986 + struct net_device *netdev, 1987 + struct workqueue_struct *workqueue, 1998 1988 struct mlx5e_cq_param *param, 1999 1989 struct mlx5e_cq *cq) 2000 1990 { 2001 - struct mlx5_core_dev *mdev = priv->mdev; 2002 1991 struct mlx5_core_cq *mcq = &cq->mcq; 2003 1992 int err; 2004 1993 u32 i; ··· 2026 2013 } 2027 2014 2028 2015 cq->mdev = mdev; 2029 - cq->netdev = priv->netdev; 2030 - cq->priv = priv; 2016 + cq->netdev = netdev; 2017 + cq->workqueue = workqueue; 2031 2018 2032 2019 return 0; 2033 2020 } 2034 2021 2035 - static int mlx5e_alloc_cq(struct mlx5e_priv *priv, 2022 + static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev, 2036 2023 struct mlx5e_cq_param *param, 2037 2024 struct mlx5e_create_cq_param *ccp, 2038 2025 struct mlx5e_cq *cq) ··· 2043 2030 param->wq.db_numa_node = ccp->node; 2044 2031 param->eq_ix = ccp->ix; 2045 2032 2046 - err = mlx5e_alloc_cq_common(priv, param, cq); 2033 + err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq, param, cq); 2047 2034 2048 2035 cq->napi = ccp->napi; 2049 2036 cq->ch_stats = ccp->ch_stats; ··· 2109 2096 mlx5_core_destroy_cq(cq->mdev, &cq->mcq); 2110 2097 } 2111 2098 2112 - int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, 2099 + int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, 2113 2100 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, 2114 2101 struct mlx5e_cq *cq) 2115 2102 { 2116 - struct mlx5_core_dev *mdev = priv->mdev; 2117 2103 int err; 2118 2104 2119 - err = mlx5e_alloc_cq(priv, param, ccp, cq); 2105 + err = mlx5e_alloc_cq(mdev, param, ccp, cq); 2120 2106 if (err) 2121 2107 return err; 2122 2108 ··· 2148 2136 int tc; 2149 2137 2150 2138 for (tc = 0; tc < c->num_tc; tc++) { 2151 - err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp, 2139 + err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->txq_sq.cqp, 2152 2140 ccp, &c->sq[tc].cq); 2153 2141 if (err) 2154 2142 goto err_close_tx_cqs; ··· 2216 2204 for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) { 2217 2205 int txq_ix = c->ix + tc * params->num_channels; 2218 2206 u32 qos_queue_group_id; 2207 + u32 tisn; 2219 2208 2209 + tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile, 2210 + c->lag_port, tc); 2220 2211 err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id); 2221 2212 if (err) 2222 2213 goto err_close_sqs; 2223 2214 2224 - err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, 2215 + err = mlx5e_open_txqsq(c, tisn, txq_ix, 2225 2216 params, &cparam->txq_sq, &c->sq[tc], tc, 2226 2217 qos_queue_group_id, 2227 2218 &c->priv->channel_stats[c->ix]->sq[tc]); ··· 2352 2337 2353 2338 mlx5e_build_create_cq_param(&ccp, c); 2354 2339 2355 - err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp, 2340 + err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp, 2356 2341 &c->async_icosq.cq); 2357 2342 if (err) 2358 2343 return err; 2359 2344 2360 - err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp, 2345 + err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp, 2361 2346 &c->icosq.cq); 2362 2347 if (err) 2363 2348 goto err_close_async_icosq_cq; ··· 2366 2351 if (err) 2367 2352 goto err_close_icosq_cq; 2368 2353 2369 - err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, 2354 + err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, 2370 2355 &c->xdpsq.cq); 2371 2356 if (err) 2372 2357 goto err_close_tx_cqs; 2373 2358 2374 - err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, 2359 + err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, 2375 2360 &c->rq.cq); 2376 2361 if (err) 2377 2362 goto err_close_xdp_tx_cqs; 2378 2363 2379 - err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, 2364 + err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, 2380 2365 &ccp, &c->rq_xdpsq.cq) : 0; 2381 2366 if (err) 2382 2367 goto err_close_rx_cq; ··· 3323 3308 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 3324 3309 param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); 3325 3310 3326 - return mlx5e_alloc_cq_common(priv, param, cq); 3311 + return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq, param, cq); 3327 3312 } 3328 3313 3329 3314 int mlx5e_open_drop_rq(struct mlx5e_priv *priv, ··· 3379 3364 mlx5e_free_cq(&drop_rq->cq); 3380 3365 } 3381 3366 3382 - int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn) 3383 - { 3384 - void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 3385 - 3386 - MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); 3387 - 3388 - if (MLX5_GET(tisc, tisc, tls_en)) 3389 - MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn); 3390 - 3391 - if (mlx5_lag_is_lacp_owner(mdev)) 3392 - MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); 3393 - 3394 - return mlx5_core_create_tis(mdev, in, tisn); 3395 - } 3396 - 3397 - void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn) 3398 - { 3399 - mlx5_core_destroy_tis(mdev, tisn); 3400 - } 3401 - 3402 - void mlx5e_destroy_tises(struct mlx5e_priv *priv) 3403 - { 3404 - int tc, i; 3405 - 3406 - for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) 3407 - for (tc = 0; tc < priv->profile->max_tc; tc++) 3408 - mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); 3409 - } 3410 - 3411 - static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev) 3412 - { 3413 - return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1; 3414 - } 3415 - 3416 - int mlx5e_create_tises(struct mlx5e_priv *priv) 3417 - { 3418 - int tc, i; 3419 - int err; 3420 - 3421 - for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) { 3422 - for (tc = 0; tc < priv->profile->max_tc; tc++) { 3423 - u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 3424 - void *tisc; 3425 - 3426 - tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 3427 - 3428 - MLX5_SET(tisc, tisc, prio, tc << 1); 3429 - 3430 - if (mlx5e_lag_should_assign_affinity(priv->mdev)) 3431 - MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1); 3432 - 3433 - err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]); 3434 - if (err) 3435 - goto err_close_tises; 3436 - } 3437 - } 3438 - 3439 - return 0; 3440 - 3441 - err_close_tises: 3442 - for (; i >= 0; i--) { 3443 - for (tc--; tc >= 0; tc--) 3444 - mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); 3445 - tc = priv->profile->max_tc; 3446 - } 3447 - 3448 - return err; 3449 - } 3450 - 3451 3367 static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) 3452 3368 { 3453 3369 if (priv->mqprio_rl) { ··· 3387 3441 priv->mqprio_rl = NULL; 3388 3442 } 3389 3443 mlx5e_accel_cleanup_tx(priv); 3390 - mlx5e_destroy_tises(priv); 3391 3444 } 3392 3445 3393 3446 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) ··· 3488 3543 3489 3544 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 3490 3545 3491 - if (tc && tc != MLX5E_MAX_NUM_TC) 3546 + if (tc && tc != MLX5_MAX_NUM_TC) 3492 3547 return -EINVAL; 3493 3548 3494 3549 new_params = priv->channels.params; ··· 5451 5506 { 5452 5507 int err; 5453 5508 5454 - err = mlx5e_create_tises(priv); 5455 - if (err) { 5456 - mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); 5457 - return err; 5458 - } 5459 - 5460 5509 err = mlx5e_accel_init_tx(priv); 5461 5510 if (err) 5462 - goto err_destroy_tises; 5511 + return err; 5463 5512 5464 5513 mlx5e_set_mqprio_rl(priv); 5465 5514 mlx5e_dcbnl_initialize(priv); 5466 5515 return 0; 5467 - 5468 - err_destroy_tises: 5469 - mlx5e_destroy_tises(priv); 5470 - return err; 5471 5516 } 5472 5517 5473 5518 static void mlx5e_nic_enable(struct mlx5e_priv *priv) ··· 5552 5617 .update_stats = mlx5e_stats_update_ndo_stats, 5553 5618 .update_carrier = mlx5e_update_carrier, 5554 5619 .rx_handlers = &mlx5e_rx_handlers_nic, 5555 - .max_tc = MLX5E_MAX_NUM_TC, 5620 + .max_tc = MLX5_MAX_NUM_TC, 5556 5621 .stats_grps = mlx5e_nic_stats_grps, 5557 5622 .stats_grps_num = mlx5e_nic_stats_grps_num, 5558 5623 .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) | ··· 6005 6070 return 0; 6006 6071 } 6007 6072 6008 - static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) 6073 + static int _mlx5e_suspend(struct auxiliary_device *adev) 6009 6074 { 6010 6075 struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); 6011 6076 struct mlx5e_priv *priv = mlx5e_dev->priv; ··· 6023 6088 return 0; 6024 6089 } 6025 6090 6026 - static int mlx5e_probe(struct auxiliary_device *adev, 6027 - const struct auxiliary_device_id *id) 6091 + static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) 6092 + { 6093 + return _mlx5e_suspend(adev); 6094 + } 6095 + 6096 + static int _mlx5e_probe(struct auxiliary_device *adev) 6028 6097 { 6029 6098 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); 6030 6099 const struct mlx5e_profile *profile = &mlx5e_nic_profile; 6031 6100 struct mlx5_core_dev *mdev = edev->mdev; 6032 6101 struct mlx5e_dev *mlx5e_dev; 6033 6102 struct net_device *netdev; 6034 - pm_message_t state = {}; 6035 6103 struct mlx5e_priv *priv; 6036 6104 int err; 6037 6105 ··· 6089 6151 return 0; 6090 6152 6091 6153 err_resume: 6092 - mlx5e_suspend(adev, state); 6154 + _mlx5e_suspend(adev); 6093 6155 err_profile_cleanup: 6094 6156 profile->cleanup(priv); 6095 6157 err_destroy_netdev: ··· 6101 6163 return err; 6102 6164 } 6103 6165 6166 + static int mlx5e_probe(struct auxiliary_device *adev, 6167 + const struct auxiliary_device_id *id) 6168 + { 6169 + return _mlx5e_probe(adev); 6170 + } 6171 + 6104 6172 static void mlx5e_remove(struct auxiliary_device *adev) 6105 6173 { 6106 6174 struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); 6107 6175 struct mlx5e_priv *priv = mlx5e_dev->priv; 6108 - pm_message_t state = {}; 6109 6176 6110 6177 mlx5_core_uplink_netdev_set(priv->mdev, NULL); 6111 6178 mlx5e_dcbnl_delete_app(priv); 6112 6179 unregister_netdev(priv->netdev); 6113 - mlx5e_suspend(adev, state); 6180 + _mlx5e_suspend(adev); 6114 6181 priv->profile->cleanup(priv); 6115 6182 mlx5e_destroy_netdev(priv); 6116 6183 mlx5e_devlink_port_unregister(mlx5e_dev);
+1 -9
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 1180 1180 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1181 1181 int err; 1182 1182 1183 - err = mlx5e_create_tises(priv); 1184 - if (err) { 1185 - mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); 1186 - return err; 1187 - } 1188 - 1189 1183 err = mlx5e_rep_neigh_init(rpriv); 1190 1184 if (err) 1191 1185 goto err_neigh_init; ··· 1202 1208 err_init_tx: 1203 1209 mlx5e_rep_neigh_cleanup(rpriv); 1204 1210 err_neigh_init: 1205 - mlx5e_destroy_tises(priv); 1206 1211 return err; 1207 1212 } 1208 1213 ··· 1215 1222 mlx5e_cleanup_uplink_rep_tx(rpriv); 1216 1223 1217 1224 mlx5e_rep_neigh_cleanup(rpriv); 1218 - mlx5e_destroy_tises(priv); 1219 1225 } 1220 1226 1221 1227 static void mlx5e_rep_enable(struct mlx5e_priv *priv) ··· 1444 1452 .update_stats = mlx5e_stats_update_ndo_stats, 1445 1453 .update_carrier = mlx5e_update_carrier, 1446 1454 .rx_handlers = &mlx5e_rx_handlers_rep, 1447 - .max_tc = MLX5E_MAX_NUM_TC, 1455 + .max_tc = MLX5_MAX_NUM_TC, 1448 1456 .stats_grps = mlx5e_ul_rep_stats_grps, 1449 1457 .stats_grps_num = mlx5e_ul_rep_stats_grps_num, 1450 1458 };
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 1039 1039 (struct mlx5_err_cqe *)cqe); 1040 1040 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 1041 1041 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 1042 - queue_work(cq->priv->wq, &sq->recover_work); 1042 + queue_work(cq->workqueue, &sq->recover_work); 1043 1043 break; 1044 1044 } 1045 1045
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 861 861 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 862 862 (struct mlx5_err_cqe *)cqe); 863 863 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 864 - queue_work(cq->priv->wq, &sq->recover_work); 864 + queue_work(cq->workqueue, &sq->recover_work); 865 865 } 866 866 stats->cqe_err++; 867 867 }
+34
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
··· 1144 1144 return mlx5_fs_cmd_get_stub_cmds(); 1145 1145 } 1146 1146 } 1147 + 1148 + int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode) 1149 + { 1150 + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {}; 1151 + 1152 + if (silent_mode && !MLX5_CAP_GEN(dev, silent_mode)) 1153 + return -EOPNOTSUPP; 1154 + 1155 + MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY); 1156 + MLX5_SET(set_l2_table_entry_in, in, silent_mode_valid, 1); 1157 + MLX5_SET(set_l2_table_entry_in, in, silent_mode, silent_mode); 1158 + 1159 + return mlx5_cmd_exec_in(dev, set_l2_table_entry, in); 1160 + } 1161 + 1162 + int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect) 1163 + { 1164 + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 1165 + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 1166 + 1167 + if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default)) 1168 + return -EOPNOTSUPP; 1169 + 1170 + MLX5_SET(set_flow_table_root_in, in, opcode, 1171 + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 1172 + MLX5_SET(set_flow_table_root_in, in, table_type, 1173 + FS_FT_NIC_TX); 1174 + if (disconnect) 1175 + MLX5_SET(set_flow_table_root_in, in, op_mod, 1); 1176 + else 1177 + MLX5_SET(set_flow_table_root_in, in, table_id, ft_id); 1178 + 1179 + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 1180 + }
+2
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
··· 122 122 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type); 123 123 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void); 124 124 125 + int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode); 126 + int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect); 125 127 #endif
+16 -3
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 339 339 return err; 340 340 } 341 341 342 - err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]); 342 + err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &ipriv->tisn); 343 343 if (err) { 344 344 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); 345 345 goto err_destroy_underlay_qp; ··· 356 356 { 357 357 struct mlx5i_priv *ipriv = priv->ppriv; 358 358 359 - mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]); 359 + mlx5e_destroy_tis(priv->mdev, ipriv->tisn); 360 360 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); 361 361 } 362 362 ··· 483 483 return ARRAY_SIZE(mlx5i_stats_grps); 484 484 } 485 485 486 + u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc) 487 + { 488 + struct mlx5i_priv *ipriv = priv->ppriv; 489 + 490 + if (WARN(lag_port || tc, 491 + "IPoIB unexpected non-zero value: lag_port (%u), tc (%u)\n", 492 + lag_port, tc)) 493 + return 0; 494 + 495 + return ipriv->tisn; 496 + } 497 + 486 498 static const struct mlx5e_profile mlx5i_nic_profile = { 487 499 .init = mlx5i_init, 488 500 .cleanup = mlx5i_cleanup, ··· 511 499 .max_tc = MLX5I_MAX_NUM_TC, 512 500 .stats_grps = mlx5i_stats_grps, 513 501 .stats_grps_num = mlx5i_stats_grps_num, 502 + .get_tisn = mlx5i_get_tisn, 514 503 }; 515 504 516 505 /* mlx5i netdev NDos */ ··· 842 829 *params = (struct rdma_netdev_alloc_params){ 843 830 .sizeof_priv = sizeof(struct mlx5i_priv) + 844 831 sizeof(struct mlx5e_priv), 845 - .txqs = nch * MLX5E_MAX_NUM_TC, 832 + .txqs = nch * MLX5_MAX_NUM_TC, 846 833 .rxqs = nch, 847 834 .param = mdev, 848 835 .initialize_rdma_netdev = mlx5_rdma_setup_rn,
+2
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
··· 53 53 struct mlx5i_priv { 54 54 struct rdma_netdev rn; /* keep this first */ 55 55 u32 qpn; 56 + u32 tisn; 56 57 bool sub_interface; 57 58 u32 num_sub_interfaces; 58 59 u32 qkey; ··· 64 63 }; 65 64 66 65 int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn); 66 + u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc); 67 67 68 68 /* Underlay QP create/destroy functions */ 69 69 int mlx5i_create_underlay_qp(struct mlx5e_priv *priv);
+4 -3
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
··· 218 218 goto err_unint_underlay_qp; 219 219 } 220 220 221 - err = mlx5i_create_tis(mdev, ipriv->qpn, &epriv->tisn[0][0]); 221 + err = mlx5i_create_tis(mdev, ipriv->qpn, &ipriv->tisn); 222 222 if (err) { 223 223 mlx5_core_warn(mdev, "create child tis failed, %d\n", err); 224 224 goto err_remove_rx_uderlay_qp; ··· 240 240 err_close_channels: 241 241 mlx5e_close_channels(&epriv->channels); 242 242 err_clear_state_opened_flag: 243 - mlx5e_destroy_tis(mdev, epriv->tisn[0][0]); 243 + mlx5e_destroy_tis(mdev, ipriv->tisn); 244 244 err_remove_rx_uderlay_qp: 245 245 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); 246 246 err_unint_underlay_qp: ··· 269 269 mlx5i_uninit_underlay_qp(priv); 270 270 mlx5e_deactivate_priv_channels(priv); 271 271 mlx5e_close_channels(&priv->channels); 272 - mlx5e_destroy_tis(mdev, priv->tisn[0][0]); 272 + mlx5e_destroy_tis(mdev, ipriv->tisn); 273 273 unlock: 274 274 mutex_unlock(&priv->state_lock); 275 275 return 0; ··· 361 361 .update_stats = NULL, 362 362 .rx_handlers = &mlx5i_rx_handlers, 363 363 .max_tc = MLX5I_MAX_NUM_TC, 364 + .get_tisn = mlx5i_get_tisn, 364 365 }; 365 366 366 367 const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
+7
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
··· 256 256 devcom_free_comp_dev(devcom); 257 257 } 258 258 259 + int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom) 260 + { 261 + struct mlx5_devcom_comp *comp = devcom->comp; 262 + 263 + return kref_read(&comp->ref); 264 + } 265 + 259 266 int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom, 260 267 int event, int rollback_event, 261 268 void *event_data)
+1
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
··· 31 31 int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom, 32 32 int event, int rollback_event, 33 33 void *event_data); 34 + int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom); 34 35 35 36 void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready); 36 37 bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom);
+1
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 243 243 u8 access_reg_group); 244 244 int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, 245 245 u8 feature_group, u8 access_reg_group); 246 + int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir); 246 247 247 248 void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); 248 249 void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
+10
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 1206 1206 *speed = max_speed; 1207 1207 return 0; 1208 1208 } 1209 + 1210 + int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir) 1211 + { 1212 + u32 in[MLX5_ST_SZ_DW(mpir_reg)] = {}; 1213 + int sz = MLX5_ST_SZ_BYTES(mpir_reg); 1214 + 1215 + MLX5_SET(mpir_reg, in, local_port, 1); 1216 + 1217 + return mlx5_core_access_reg(dev, in, sz, mpir, sz, MLX5_REG_MPIR, 0, 0); 1218 + }
+2 -6
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
··· 1170 1170 bool ignore_flow_level, 1171 1171 u32 flow_source) 1172 1172 { 1173 - struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest; 1174 1173 struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; 1175 1174 struct mlx5dr_action **ref_actions; 1176 1175 struct mlx5dr_action *action; ··· 1248 1249 * one that done in the TX. 1249 1250 * So, if one of the ft target is wire, put it at the end of the dest list. 1250 1251 */ 1251 - if (is_ft_wire && num_dst_ft > 1) { 1252 - tmp_hw_dest = hw_dests[last_dest]; 1253 - hw_dests[last_dest] = hw_dests[num_of_dests - 1]; 1254 - hw_dests[num_of_dests - 1] = tmp_hw_dest; 1255 - } 1252 + if (is_ft_wire && num_dst_ft > 1) 1253 + swap(hw_dests[last_dest], hw_dests[num_of_dests - 1]); 1256 1254 1257 1255 action = dr_action_create_generic(DR_ACTION_TYP_FT); 1258 1256 if (!action)
+3
include/linux/mlx5/driver.h
··· 150 150 MLX5_REG_MTPPSE = 0x9054, 151 151 MLX5_REG_MTUTC = 0x9055, 152 152 MLX5_REG_MPEGC = 0x9056, 153 + MLX5_REG_MPIR = 0x9059, 153 154 MLX5_REG_MCQS = 0x9060, 154 155 MLX5_REG_MCQI = 0x9061, 155 156 MLX5_REG_MCC = 0x9062, ··· 679 678 struct mlx5_td td; 680 679 u32 mkey; 681 680 struct mlx5_sq_bfreg bfreg; 681 + #define MLX5_MAX_NUM_TC 8 682 + u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]; 682 683 } hw_objs; 683 684 struct net_device *uplink_netdev; 684 685 struct mutex uplink_netdev_lock;
+39 -6
include/linux/mlx5/mlx5_ifc.h
··· 435 435 u8 flow_table_modify[0x1]; 436 436 u8 reformat[0x1]; 437 437 u8 decap[0x1]; 438 - u8 reserved_at_9[0x1]; 438 + u8 reset_root_to_default[0x1]; 439 439 u8 pop_vlan[0x1]; 440 440 u8 push_vlan[0x1]; 441 441 u8 reserved_at_c[0x1]; ··· 1801 1801 u8 disable_local_lb_uc[0x1]; 1802 1802 u8 disable_local_lb_mc[0x1]; 1803 1803 u8 log_min_hairpin_wq_data_sz[0x5]; 1804 - u8 reserved_at_3e8[0x2]; 1804 + u8 reserved_at_3e8[0x1]; 1805 + u8 silent_mode[0x1]; 1805 1806 u8 vhca_state[0x1]; 1806 1807 u8 log_max_vlan_list[0x5]; 1807 1808 u8 reserved_at_3f0[0x3]; ··· 1819 1818 1820 1819 u8 reserved_at_460[0x1]; 1821 1820 u8 ats[0x1]; 1822 - u8 reserved_at_462[0x1]; 1821 + u8 cross_vhca_rqt[0x1]; 1823 1822 u8 log_max_uctx[0x5]; 1824 1823 u8 reserved_at_468[0x1]; 1825 1824 u8 crypto[0x1]; ··· 1944 1943 1945 1944 enum { 1946 1945 MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000, 1946 + MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE = (1ULL << 20), 1947 1947 }; 1948 1948 1949 1949 enum { ··· 1994 1992 u8 reserved_at_260[0x120]; 1995 1993 u8 reserved_at_380[0x10]; 1996 1994 u8 ec_vf_vport_base[0x10]; 1997 - u8 reserved_at_3a0[0x460]; 1995 + 1996 + u8 reserved_at_3a0[0x10]; 1997 + u8 max_rqt_vhca_id[0x10]; 1998 + 1999 + u8 reserved_at_3c0[0x440]; 1998 2000 }; 1999 2001 2000 2002 enum mlx5_ifc_flow_destination_type { ··· 2155 2149 struct mlx5_ifc_rq_num_bits { 2156 2150 u8 reserved_at_0[0x8]; 2157 2151 u8 rq_num[0x18]; 2152 + }; 2153 + 2154 + struct mlx5_ifc_rq_vhca_bits { 2155 + u8 reserved_at_0[0x8]; 2156 + u8 rq_num[0x18]; 2157 + u8 reserved_at_20[0x10]; 2158 + u8 rq_vhca_id[0x10]; 2158 2159 }; 2159 2160 2160 2161 struct mlx5_ifc_mac_address_layout_bits { ··· 3914 3901 3915 3902 u8 reserved_at_e0[0x6a0]; 3916 3903 3917 - struct mlx5_ifc_rq_num_bits rq_num[]; 3904 + union { 3905 + DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_num_bits, rq_num); 3906 + DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_vhca_bits, rq_vhca); 3907 + }; 3918 3908 }; 3919 3909 3920 3910 enum { ··· 4760 4744 4761 4745 u8 reserved_at_c0[0x20]; 4762 4746 4763 - u8 reserved_at_e0[0x13]; 4747 + u8 reserved_at_e0[0x10]; 4748 + u8 silent_mode_valid[0x1]; 4749 + u8 silent_mode[0x1]; 4750 + u8 reserved_at_f2[0x1]; 4764 4751 u8 vlan_valid[0x1]; 4765 4752 u8 vlan[0xc]; 4766 4753 ··· 10106 10087 u8 tx_lossy_overflow_oper[0x2]; 10107 10088 10108 10089 u8 reserved_at_60[0x100]; 10090 + }; 10091 + 10092 + struct mlx5_ifc_mpir_reg_bits { 10093 + u8 sdm[0x1]; 10094 + u8 reserved_at_1[0x1b]; 10095 + u8 host_buses[0x4]; 10096 + 10097 + u8 reserved_at_20[0x20]; 10098 + 10099 + u8 local_port[0x8]; 10100 + u8 reserved_at_28[0x15]; 10101 + u8 sd_group[0x3]; 10102 + 10103 + u8 reserved_at_60[0x20]; 10109 10104 }; 10110 10105 10111 10106 enum {