Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx5e-per-queue-coalescing'

Tariq Toukan says:

====================
mlx5e per-queue coalescing

This patchset adds ethtool per-queue coalescing support for the mlx5e
driver.

The series introduce some changes needed as preparations for the final
patch which adds the support and implements the callbacks. Main
changes:
- DIM code movements into its own header file.
- Switch to dynamic allocation of the DIM struct in the RQs/SQs.
- Allow coalescing config change without channels reset when possible.
====================

Link: https://lore.kernel.org/r/20240419080445.417574-1-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+673 -197
+26 -4
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 320 320 bool scatter_fcs_en; 321 321 bool rx_dim_enabled; 322 322 bool tx_dim_enabled; 323 + bool rx_moder_use_cqe_mode; 324 + bool tx_moder_use_cqe_mode; 323 325 u32 pflags; 324 326 struct bpf_prog *xdp_prog; 325 327 struct mlx5e_xsk *xsk; ··· 432 430 u16 cc; 433 431 u16 skb_fifo_cc; 434 432 u32 dma_fifo_cc; 435 - struct dim dim; /* Adaptive Moderation */ 433 + struct dim *dim; /* Adaptive Moderation */ 436 434 437 435 /* dirtied @xmit */ 438 436 u16 pc ____cacheline_aligned_in_smp; ··· 724 722 int ix; 725 723 unsigned int hw_mtu; 726 724 727 - struct dim dim; /* Dynamic Interrupt Moderation */ 725 + struct dim *dim; /* Dynamic Interrupt Moderation */ 728 726 729 727 /* XDP */ 730 728 struct bpf_prog __rcu *xdp_prog; ··· 799 797 int cpu; 800 798 /* Sync between icosq recovery and XSK enable/disable. */ 801 799 struct mutex icosq_recovery_lock; 800 + 801 + /* coalescing configuration */ 802 + struct dim_cq_moder rx_cq_moder; 803 + struct dim_cq_moder tx_cq_moder; 802 804 }; 803 805 804 806 struct mlx5e_ptp; ··· 1046 1040 int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter); 1047 1041 void mlx5e_destroy_rq(struct mlx5e_rq *rq); 1048 1042 1043 + bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, 1044 + bool dim_enabled); 1045 + bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, 1046 + bool dim_enabled, bool keep_dim_state); 1047 + 1049 1048 struct mlx5e_sq_param; 1050 1049 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, 1051 1050 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, ··· 1071 1060 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, 1072 1061 struct mlx5e_cq *cq); 1073 1062 void mlx5e_close_cq(struct mlx5e_cq *cq); 1063 + int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 1064 + u8 cq_period_mode); 1065 + int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 1066 + u16 cq_period, u16 cq_max_count, u8 cq_period_mode); 1074 1067 1075 1068 int mlx5e_open_locked(struct net_device *netdev); 1076 1069 int mlx5e_close_locked(struct net_device *netdev); ··· 1132 1117 u32 *sqn); 1133 1118 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); 1134 1119 void mlx5e_close_txqsq(struct mlx5e_txqsq *sq); 1120 + 1121 + bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, 1122 + bool dim_enabled); 1123 + bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, 1124 + bool dim_enabled, bool keep_dim_state); 1135 1125 1136 1126 static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) 1137 1127 { ··· 1199 1179 struct ethtool_coalesce *coal, 1200 1180 struct kernel_ethtool_coalesce *kernel_coal, 1201 1181 struct netlink_ext_ack *extack); 1182 + int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue, 1183 + struct ethtool_coalesce *coal); 1184 + int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue, 1185 + struct ethtool_coalesce *coal); 1202 1186 u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); 1203 1187 u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); 1204 1188 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, ··· 1234 1210 void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); 1235 1211 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); 1236 1212 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); 1237 - void mlx5e_rx_dim_work(struct work_struct *work); 1238 - void mlx5e_tx_dim_work(struct work_struct *work); 1239 1213 1240 1214 void mlx5e_set_xdp_feature(struct net_device *netdev); 1241 1215 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
+83
drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
··· 3 3 4 4 #include "channels.h" 5 5 #include "en.h" 6 + #include "en/dim.h" 6 7 #include "en/ptp.h" 7 8 8 9 unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs) ··· 55 54 56 55 *rqn = c->rq.rqn; 57 56 return true; 57 + } 58 + 59 + int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enable) 60 + { 61 + int i; 62 + 63 + for (i = 0; i < chs->num; i++) { 64 + int err = mlx5e_dim_rx_change(&chs->c[i]->rq, enable); 65 + 66 + if (err) 67 + return err; 68 + } 69 + 70 + return 0; 71 + } 72 + 73 + int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enable) 74 + { 75 + int i, tc; 76 + 77 + for (i = 0; i < chs->num; i++) { 78 + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) { 79 + int err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], enable); 80 + 81 + if (err) 82 + return err; 83 + } 84 + } 85 + 86 + return 0; 87 + } 88 + 89 + int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs) 90 + { 91 + int i; 92 + 93 + for (i = 0; i < chs->num; i++) { 94 + /* If dim is enabled for the channel, reset the dim state so the 95 + * collected statistics will be reset. This is useful for 96 + * supporting legacy interfaces that allow things like changing 97 + * the CQ period mode for all channels without disturbing 98 + * individual channel configurations. 99 + */ 100 + if (chs->c[i]->rq.dim) { 101 + int err; 102 + 103 + mlx5e_dim_rx_change(&chs->c[i]->rq, false); 104 + err = mlx5e_dim_rx_change(&chs->c[i]->rq, true); 105 + if (err) 106 + return err; 107 + } 108 + } 109 + 110 + return 0; 111 + } 112 + 113 + int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs) 114 + { 115 + int i, tc; 116 + 117 + for (i = 0; i < chs->num; i++) { 118 + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) { 119 + int err; 120 + 121 + /* If dim is enabled for the channel, reset the dim 122 + * state so the collected statistics will be reset. This 123 + * is useful for supporting legacy interfaces that allow 124 + * things like changing the CQ period mode for all 125 + * channels without disturbing individual channel 126 + * configurations. 127 + */ 128 + if (!chs->c[i]->sq[tc].dim) 129 + continue; 130 + 131 + mlx5e_dim_tx_change(&chs->c[i]->sq[tc], false); 132 + err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], true); 133 + if (err) 134 + return err; 135 + } 136 + } 137 + 138 + return 0; 58 139 }
+4
drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
··· 15 15 void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, 16 16 u32 *vhca_id); 17 17 bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn); 18 + int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enabled); 19 + int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enabled); 20 + int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs); 21 + int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs); 18 22 19 23 #endif /* __MLX5_EN_CHANNELS_H__ */
+45
drivers/net/ethernet/mellanox/mlx5/core/en/dim.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 + /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved */ 3 + 4 + #ifndef __MLX5_EN_DIM_H__ 5 + #define __MLX5_EN_DIM_H__ 6 + 7 + #include <linux/dim.h> 8 + #include <linux/types.h> 9 + #include <linux/mlx5/mlx5_ifc.h> 10 + 11 + /* Forward declarations */ 12 + struct mlx5e_rq; 13 + struct mlx5e_txqsq; 14 + struct work_struct; 15 + 16 + /* convert a boolean value for cqe mode to appropriate dim constant 17 + * true : DIM_CQ_PERIOD_MODE_START_FROM_CQE 18 + * false : DIM_CQ_PERIOD_MODE_START_FROM_EQE 19 + */ 20 + static inline int mlx5e_dim_cq_period_mode(bool start_from_cqe) 21 + { 22 + return start_from_cqe ? DIM_CQ_PERIOD_MODE_START_FROM_CQE : 23 + DIM_CQ_PERIOD_MODE_START_FROM_EQE; 24 + } 25 + 26 + static inline enum mlx5_cq_period_mode 27 + mlx5e_cq_period_mode(enum dim_cq_period_mode cq_period_mode) 28 + { 29 + switch (cq_period_mode) { 30 + case DIM_CQ_PERIOD_MODE_START_FROM_EQE: 31 + return MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 32 + case DIM_CQ_PERIOD_MODE_START_FROM_CQE: 33 + return MLX5_CQ_PERIOD_MODE_START_FROM_CQE; 34 + default: 35 + WARN_ON_ONCE(true); 36 + return MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 37 + } 38 + } 39 + 40 + void mlx5e_rx_dim_work(struct work_struct *work); 41 + void mlx5e_tx_dim_work(struct work_struct *work); 42 + int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enabled); 43 + int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enabled); 44 + 45 + #endif /* __MLX5_EN_DIM_H__ */
+1 -71
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 6 6 #include "en/port.h" 7 7 #include "en_accel/en_accel.h" 8 8 #include "en_accel/ipsec.h" 9 + #include <linux/dim.h> 9 10 #include <net/page_pool/types.h> 10 11 #include <net/xdp_sock_drv.h> 11 12 ··· 512 511 } 513 512 514 513 return 0; 515 - } 516 - 517 - static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) 518 - { 519 - struct dim_cq_moder moder = {}; 520 - 521 - moder.cq_period_mode = cq_period_mode; 522 - moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; 523 - moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; 524 - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 525 - moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; 526 - 527 - return moder; 528 - } 529 - 530 - static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) 531 - { 532 - struct dim_cq_moder moder = {}; 533 - 534 - moder.cq_period_mode = cq_period_mode; 535 - moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; 536 - moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; 537 - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) 538 - moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; 539 - 540 - return moder; 541 - } 542 - 543 - static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) 544 - { 545 - return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? 546 - DIM_CQ_PERIOD_MODE_START_FROM_CQE : 547 - DIM_CQ_PERIOD_MODE_START_FROM_EQE; 548 - } 549 - 550 - void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 551 - { 552 - if (params->tx_dim_enabled) { 553 - u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 554 - 555 - params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); 556 - } else { 557 - params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); 558 - } 559 - } 560 - 561 - void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 562 - { 563 - if (params->rx_dim_enabled) { 564 - u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); 565 - 566 - params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); 567 - } else { 568 - params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); 569 - } 570 - } 571 - 572 - void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 573 - { 574 - mlx5e_reset_tx_moderation(params, cq_period_mode); 575 - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 576 - params->tx_cq_moderation.cq_period_mode == 577 - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 578 - } 579 - 580 - void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 581 - { 582 - mlx5e_reset_rx_moderation(params, cq_period_mode); 583 - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 584 - params->rx_cq_moderation.cq_period_mode == 585 - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 586 514 } 587 515 588 516 bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
-5
drivers/net/ethernet/mellanox/mlx5/core/en/params.h
··· 77 77 78 78 /* Parameter calculations */ 79 79 80 - void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); 81 - void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); 82 - void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); 83 - void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); 84 - 85 80 bool slow_pci_heuristic(struct mlx5_core_dev *mdev); 86 81 int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params); 87 82 int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
+91 -4
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 - #include <linux/dim.h> 34 33 #include "en.h" 34 + #include "en/dim.h" 35 35 36 36 static void 37 37 mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder, 38 38 struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq) 39 39 { 40 - mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts); 40 + mlx5e_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts, 41 + mlx5e_cq_period_mode(moder.cq_period_mode)); 41 42 dim->state = DIM_START_MEASURE; 42 43 } 43 44 44 45 void mlx5e_rx_dim_work(struct work_struct *work) 45 46 { 46 47 struct dim *dim = container_of(work, struct dim, work); 47 - struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim); 48 + struct mlx5e_rq *rq = dim->priv; 48 49 struct dim_cq_moder cur_moder = 49 50 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 50 51 ··· 55 54 void mlx5e_tx_dim_work(struct work_struct *work) 56 55 { 57 56 struct dim *dim = container_of(work, struct dim, work); 58 - struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim); 57 + struct mlx5e_txqsq *sq = dim->priv; 59 58 struct dim_cq_moder cur_moder = 60 59 net_dim_get_tx_moderation(dim->mode, dim->profile_ix); 61 60 62 61 mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); 62 + } 63 + 64 + static struct dim *mlx5e_dim_enable(struct mlx5_core_dev *mdev, 65 + void (*work_fun)(struct work_struct *), int cpu, 66 + u8 cq_period_mode, struct mlx5_core_cq *mcq, 67 + void *queue) 68 + { 69 + struct dim *dim; 70 + int err; 71 + 72 + dim = kvzalloc_node(sizeof(*dim), GFP_KERNEL, cpu_to_node(cpu)); 73 + if (!dim) 74 + return ERR_PTR(-ENOMEM); 75 + 76 + INIT_WORK(&dim->work, work_fun); 77 + 78 + dim->mode = cq_period_mode; 79 + dim->priv = queue; 80 + 81 + err = mlx5e_modify_cq_period_mode(mdev, mcq, dim->mode); 82 + if (err) { 83 + kvfree(dim); 84 + return ERR_PTR(err); 85 + } 86 + 87 + return dim; 88 + } 89 + 90 + static void mlx5e_dim_disable(struct dim *dim) 91 + { 92 + cancel_work_sync(&dim->work); 93 + kvfree(dim); 94 + } 95 + 96 + int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable) 97 + { 98 + if (enable == !!rq->dim) 99 + return 0; 100 + 101 + if (enable) { 102 + struct mlx5e_channel *c = rq->channel; 103 + struct dim *dim; 104 + 105 + dim = mlx5e_dim_enable(rq->mdev, mlx5e_rx_dim_work, c->cpu, 106 + c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq); 107 + if (IS_ERR(dim)) 108 + return PTR_ERR(dim); 109 + 110 + rq->dim = dim; 111 + 112 + __set_bit(MLX5E_RQ_STATE_DIM, &rq->state); 113 + } else { 114 + __clear_bit(MLX5E_RQ_STATE_DIM, &rq->state); 115 + 116 + mlx5e_dim_disable(rq->dim); 117 + rq->dim = NULL; 118 + } 119 + 120 + return 0; 121 + } 122 + 123 + int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable) 124 + { 125 + if (enable == !!sq->dim) 126 + return 0; 127 + 128 + if (enable) { 129 + struct mlx5e_channel *c = sq->channel; 130 + struct dim *dim; 131 + 132 + dim = mlx5e_dim_enable(sq->mdev, mlx5e_tx_dim_work, c->cpu, 133 + c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq); 134 + if (IS_ERR(dim)) 135 + return PTR_ERR(dim); 136 + 137 + sq->dim = dim; 138 + 139 + __set_bit(MLX5E_SQ_STATE_DIM, &sq->state); 140 + } else { 141 + __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state); 142 + 143 + mlx5e_dim_disable(sq->dim); 144 + sq->dim = NULL; 145 + } 146 + 147 + return 0; 63 148 }
+233 -69
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 + #include <linux/dim.h> 33 34 #include <linux/ethtool_netlink.h> 34 35 35 36 #include "en.h" 37 + #include "en/channels.h" 38 + #include "en/dim.h" 36 39 #include "en/port.h" 37 40 #include "en/params.h" 38 41 #include "en/ptp.h" ··· 568 565 coal->rx_coalesce_usecs = rx_moder->usec; 569 566 coal->rx_max_coalesced_frames = rx_moder->pkts; 570 567 coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled; 568 + kernel_coal->use_cqe_mode_rx = priv->channels.params.rx_moder_use_cqe_mode; 571 569 572 570 tx_moder = &priv->channels.params.tx_cq_moderation; 573 571 coal->tx_coalesce_usecs = tx_moder->usec; 574 572 coal->tx_max_coalesced_frames = tx_moder->pkts; 575 573 coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled; 576 - 577 - kernel_coal->use_cqe_mode_rx = 578 - MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER); 579 - kernel_coal->use_cqe_mode_tx = 580 - MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER); 574 + kernel_coal->use_cqe_mode_tx = priv->channels.params.tx_moder_use_cqe_mode; 581 575 582 576 return 0; 583 577 } ··· 589 589 return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); 590 590 } 591 591 592 + static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue, 593 + struct ethtool_coalesce *coal) 594 + { 595 + struct dim_cq_moder cur_moder; 596 + struct mlx5e_channels *chs; 597 + struct mlx5e_channel *c; 598 + 599 + if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) 600 + return -EOPNOTSUPP; 601 + 602 + mutex_lock(&priv->state_lock); 603 + 604 + chs = &priv->channels; 605 + if (chs->num <= queue) { 606 + mutex_unlock(&priv->state_lock); 607 + return -EINVAL; 608 + } 609 + 610 + c = chs->c[queue]; 611 + 612 + coal->use_adaptive_rx_coalesce = !!c->rq.dim; 613 + if (coal->use_adaptive_rx_coalesce) { 614 + cur_moder = net_dim_get_rx_moderation(c->rq.dim->mode, 615 + c->rq.dim->profile_ix); 616 + 617 + coal->rx_coalesce_usecs = cur_moder.usec; 618 + coal->rx_max_coalesced_frames = cur_moder.pkts; 619 + } else { 620 + coal->rx_coalesce_usecs = c->rx_cq_moder.usec; 621 + coal->rx_max_coalesced_frames = c->rx_cq_moder.pkts; 622 + } 623 + 624 + coal->use_adaptive_tx_coalesce = !!c->sq[0].dim; 625 + if (coal->use_adaptive_tx_coalesce) { 626 + /* NOTE: Will only display DIM coalesce profile information of 627 + * first channel. The current interface cannot display this 628 + * information for all tc. 629 + */ 630 + cur_moder = net_dim_get_tx_moderation(c->sq[0].dim->mode, 631 + c->sq[0].dim->profile_ix); 632 + 633 + coal->tx_coalesce_usecs = cur_moder.usec; 634 + coal->tx_max_coalesced_frames = cur_moder.pkts; 635 + 636 + } else { 637 + coal->tx_coalesce_usecs = c->tx_cq_moder.usec; 638 + coal->tx_max_coalesced_frames = c->tx_cq_moder.pkts; 639 + } 640 + 641 + mutex_unlock(&priv->state_lock); 642 + 643 + return 0; 644 + } 645 + 646 + int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue, 647 + struct ethtool_coalesce *coal) 648 + { 649 + struct mlx5e_priv *priv = netdev_priv(dev); 650 + 651 + return mlx5e_ethtool_get_per_queue_coalesce(priv, queue, coal); 652 + } 653 + 592 654 #define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD 593 655 #define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT 594 656 595 657 static void 596 - mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) 658 + mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder) 597 659 { 598 660 int tc; 599 661 int i; ··· 663 601 for (i = 0; i < priv->channels.num; ++i) { 664 602 struct mlx5e_channel *c = priv->channels.c[i]; 665 603 struct mlx5_core_dev *mdev = c->mdev; 604 + enum mlx5_cq_period_mode mode; 605 + 606 + mode = mlx5e_cq_period_mode(moder->cq_period_mode); 607 + c->tx_cq_moder = *moder; 666 608 667 609 for (tc = 0; tc < c->num_tc; tc++) { 668 - mlx5_core_modify_cq_moderation(mdev, 669 - &c->sq[tc].cq.mcq, 670 - coal->tx_coalesce_usecs, 671 - coal->tx_max_coalesced_frames); 610 + mlx5e_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq, 611 + moder->usec, moder->pkts, 612 + mode); 672 613 } 673 614 } 674 615 } 675 616 676 617 static void 677 - mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) 618 + mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder) 678 619 { 679 620 int i; 680 621 681 622 for (i = 0; i < priv->channels.num; ++i) { 682 623 struct mlx5e_channel *c = priv->channels.c[i]; 683 624 struct mlx5_core_dev *mdev = c->mdev; 625 + enum mlx5_cq_period_mode mode; 684 626 685 - mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, 686 - coal->rx_coalesce_usecs, 687 - coal->rx_max_coalesced_frames); 627 + mode = mlx5e_cq_period_mode(moder->cq_period_mode); 628 + c->rx_cq_moder = *moder; 629 + 630 + mlx5e_modify_cq_moderation(mdev, &c->rq.cq.mcq, moder->usec, moder->pkts, 631 + mode); 688 632 } 689 - } 690 - 691 - /* convert a boolean value of cq_mode to mlx5 period mode 692 - * true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE 693 - * false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE 694 - */ 695 - static int cqe_mode_to_period_mode(bool val) 696 - { 697 - return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 698 633 } 699 634 700 635 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, ··· 701 642 { 702 643 struct dim_cq_moder *rx_moder, *tx_moder; 703 644 struct mlx5_core_dev *mdev = priv->mdev; 645 + bool rx_dim_enabled, tx_dim_enabled; 704 646 struct mlx5e_params new_params; 705 647 bool reset_rx, reset_tx; 706 - bool reset = true; 707 648 u8 cq_period_mode; 708 649 int err = 0; 709 650 710 - if (!MLX5_CAP_GEN(mdev, cq_moderation)) 651 + if (!MLX5_CAP_GEN(mdev, cq_moderation) || 652 + !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) 711 653 return -EOPNOTSUPP; 712 654 713 655 if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME || ··· 731 671 return -EOPNOTSUPP; 732 672 } 733 673 674 + rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; 675 + tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; 676 + 734 677 mutex_lock(&priv->state_lock); 735 678 new_params = priv->channels.params; 736 679 737 - rx_moder = &new_params.rx_cq_moderation; 738 - rx_moder->usec = coal->rx_coalesce_usecs; 739 - rx_moder->pkts = coal->rx_max_coalesced_frames; 740 - new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; 680 + cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_rx); 681 + reset_rx = mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode, 682 + rx_dim_enabled, false); 683 + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER, cq_period_mode); 741 684 742 - tx_moder = &new_params.tx_cq_moderation; 743 - tx_moder->usec = coal->tx_coalesce_usecs; 744 - tx_moder->pkts = coal->tx_max_coalesced_frames; 745 - new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; 685 + cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_tx); 686 + reset_tx = mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode, 687 + tx_dim_enabled, false); 688 + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER, cq_period_mode); 746 689 747 - reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; 748 - reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; 690 + reset_rx |= rx_dim_enabled != new_params.rx_dim_enabled; 691 + reset_tx |= tx_dim_enabled != new_params.tx_dim_enabled; 749 692 750 - cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx); 751 - if (cq_period_mode != rx_moder->cq_period_mode) { 752 - mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); 753 - reset_rx = true; 754 - } 693 + /* Solely used for global ethtool get coalesce */ 694 + rx_moder = &new_params.rx_cq_moderation; 695 + new_params.rx_dim_enabled = rx_dim_enabled; 696 + new_params.rx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_rx; 755 697 756 - cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx); 757 - if (cq_period_mode != tx_moder->cq_period_mode) { 758 - mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); 759 - reset_tx = true; 760 - } 698 + tx_moder = &new_params.tx_cq_moderation; 699 + new_params.tx_dim_enabled = tx_dim_enabled; 700 + new_params.tx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_tx; 761 701 762 702 if (reset_rx) { 763 - u8 mode = MLX5E_GET_PFLAG(&new_params, 764 - MLX5E_PFLAG_RX_CQE_BASED_MODER); 703 + mlx5e_channels_rx_change_dim(&priv->channels, false); 704 + mlx5e_reset_rx_moderation(rx_moder, new_params.rx_moder_use_cqe_mode, 705 + rx_dim_enabled); 765 706 766 - mlx5e_reset_rx_moderation(&new_params, mode); 707 + mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder); 708 + } else if (!rx_dim_enabled) { 709 + rx_moder->usec = coal->rx_coalesce_usecs; 710 + rx_moder->pkts = coal->rx_max_coalesced_frames; 711 + 712 + mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder); 767 713 } 714 + 768 715 if (reset_tx) { 769 - u8 mode = MLX5E_GET_PFLAG(&new_params, 770 - MLX5E_PFLAG_TX_CQE_BASED_MODER); 716 + mlx5e_channels_tx_change_dim(&priv->channels, false); 717 + mlx5e_reset_tx_moderation(tx_moder, new_params.tx_moder_use_cqe_mode, 718 + tx_dim_enabled); 771 719 772 - mlx5e_reset_tx_moderation(&new_params, mode); 720 + mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder); 721 + } else if (!tx_dim_enabled) { 722 + tx_moder->usec = coal->tx_coalesce_usecs; 723 + tx_moder->pkts = coal->tx_max_coalesced_frames; 724 + 725 + mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder); 773 726 } 774 727 775 - /* If DIM state hasn't changed, it's possible to modify interrupt 776 - * moderation parameters on the fly, even if the channels are open. 777 - */ 778 - if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) { 779 - if (!coal->use_adaptive_rx_coalesce) 780 - mlx5e_set_priv_channels_rx_coalesce(priv, coal); 781 - if (!coal->use_adaptive_tx_coalesce) 782 - mlx5e_set_priv_channels_tx_coalesce(priv, coal); 783 - reset = false; 784 - } 728 + /* DIM enable/disable Rx and Tx channels */ 729 + err = mlx5e_channels_rx_change_dim(&priv->channels, rx_dim_enabled); 730 + if (err) 731 + goto state_unlock; 732 + err = mlx5e_channels_tx_change_dim(&priv->channels, tx_dim_enabled); 733 + if (err) 734 + goto state_unlock; 785 735 786 - err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); 787 - 736 + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false); 737 + state_unlock: 788 738 mutex_unlock(&priv->state_lock); 789 739 return err; 790 740 } ··· 807 737 struct mlx5e_priv *priv = netdev_priv(netdev); 808 738 809 739 return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack); 740 + } 741 + 742 + static int mlx5e_ethtool_set_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue, 743 + struct ethtool_coalesce *coal) 744 + { 745 + struct mlx5_core_dev *mdev = priv->mdev; 746 + bool rx_dim_enabled, tx_dim_enabled; 747 + struct mlx5e_channels *chs; 748 + struct mlx5e_channel *c; 749 + int err = 0; 750 + int tc; 751 + 752 + if (!MLX5_CAP_GEN(mdev, cq_moderation)) 753 + return -EOPNOTSUPP; 754 + 755 + if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME || 756 + coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) { 757 + netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n", 758 + __func__, MLX5E_MAX_COAL_TIME); 759 + return -ERANGE; 760 + } 761 + 762 + if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES || 763 + coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) { 764 + netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n", 765 + __func__, MLX5E_MAX_COAL_FRAMES); 766 + return -ERANGE; 767 + } 768 + 769 + rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; 770 + tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; 771 + 772 + mutex_lock(&priv->state_lock); 773 + 774 + chs = &priv->channels; 775 + if (chs->num <= queue) { 776 + mutex_unlock(&priv->state_lock); 777 + return -EINVAL; 778 + } 779 + 780 + c = chs->c[queue]; 781 + 782 + err = mlx5e_dim_rx_change(&c->rq, rx_dim_enabled); 783 + if (err) 784 + goto state_unlock; 785 + 786 + for (tc = 0; tc < c->num_tc; tc++) { 787 + err = mlx5e_dim_tx_change(&c->sq[tc], tx_dim_enabled); 788 + if (err) 789 + goto state_unlock; 790 + } 791 + 792 + if (!rx_dim_enabled) { 793 + c->rx_cq_moder.usec = coal->rx_coalesce_usecs; 794 + c->rx_cq_moder.pkts = coal->rx_max_coalesced_frames; 795 + 796 + mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, 797 + coal->rx_coalesce_usecs, 798 + coal->rx_max_coalesced_frames); 799 + } 800 + 801 + if (!tx_dim_enabled) { 802 + c->tx_cq_moder.usec = coal->tx_coalesce_usecs; 803 + c->tx_cq_moder.pkts = coal->tx_max_coalesced_frames; 804 + 805 + for (tc = 0; tc < c->num_tc; tc++) 806 + mlx5_core_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq, 807 + coal->tx_coalesce_usecs, 808 + coal->tx_max_coalesced_frames); 809 + } 810 + 811 + state_unlock: 812 + mutex_unlock(&priv->state_lock); 813 + return err; 814 + } 815 + 816 + int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue, 817 + struct ethtool_coalesce *coal) 818 + { 819 + struct mlx5e_priv *priv = netdev_priv(dev); 820 + 821 + return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal); 810 822 } 811 823 812 824 static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, ··· 2066 1914 if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) 2067 1915 return -EOPNOTSUPP; 2068 1916 2069 - cq_period_mode = cqe_mode_to_period_mode(enable); 1917 + cq_period_mode = mlx5e_dim_cq_period_mode(enable); 2070 1918 2071 1919 current_cq_period_mode = is_rx_cq ? 2072 1920 priv->channels.params.rx_cq_moderation.cq_period_mode : ··· 2076 1924 return 0; 2077 1925 2078 1926 new_params = priv->channels.params; 2079 - if (is_rx_cq) 2080 - mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); 2081 - else 2082 - mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); 1927 + if (is_rx_cq) { 1928 + mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode, 1929 + false, true); 1930 + mlx5e_channels_rx_toggle_dim(&priv->channels); 1931 + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 1932 + cq_period_mode); 1933 + } else { 1934 + mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode, 1935 + false, true); 1936 + mlx5e_channels_tx_toggle_dim(&priv->channels); 1937 + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 1938 + cq_period_mode); 1939 + } 2083 1940 2084 - return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); 1941 + /* Update pflags of existing channels without resetting them */ 1942 + return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false); 2085 1943 } 2086 1944 2087 1945 static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) ··· 2616 2454 .set_channels = mlx5e_set_channels, 2617 2455 .get_coalesce = mlx5e_get_coalesce, 2618 2456 .set_coalesce = mlx5e_set_coalesce, 2457 + .get_per_queue_coalesce = mlx5e_get_per_queue_coalesce, 2458 + .set_per_queue_coalesce = mlx5e_set_per_queue_coalesce, 2619 2459 .get_link_ksettings = mlx5e_get_link_ksettings, 2620 2460 .set_link_ksettings = mlx5e_set_link_ksettings, 2621 2461 .get_rxfh_key_size = mlx5e_get_rxfh_key_size,
+175 -31
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 + #include <linux/dim.h> 33 34 #include <net/tc_act/tc_gact.h> 34 35 #include <linux/mlx5/fs.h> 35 36 #include <net/vxlan.h> ··· 44 43 #include <net/xdp_sock_drv.h> 45 44 #include "eswitch.h" 46 45 #include "en.h" 46 + #include "en/dim.h" 47 47 #include "en/txrx.h" 48 48 #include "en_tc.h" 49 49 #include "en_rep.h" ··· 962 960 } 963 961 } 964 962 965 - INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); 966 - 967 - switch (params->rx_cq_moderation.cq_period_mode) { 968 - case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: 969 - rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; 970 - break; 971 - case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: 972 - default: 973 - rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 974 - } 975 - 976 963 return 0; 977 964 978 965 err_destroy_page_pool: ··· 1011 1020 mlx5e_free_wqe_alloc_info(rq); 1012 1021 } 1013 1022 1023 + kvfree(rq->dim); 1014 1024 xdp_rxq_info_unreg(&rq->xdp_rxq); 1015 1025 page_pool_destroy(rq->page_pool); 1016 1026 mlx5_wq_destroy(&rq->wq_ctrl); ··· 1292 1300 if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) 1293 1301 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); 1294 1302 1295 - if (params->rx_dim_enabled) 1296 - __set_bit(MLX5E_RQ_STATE_DIM, &rq->state); 1303 + if (rq->channel && !params->rx_dim_enabled) { 1304 + rq->channel->rx_cq_moder = params->rx_cq_moderation; 1305 + } else if (rq->channel) { 1306 + u8 cq_period_mode; 1307 + 1308 + cq_period_mode = params->rx_moder_use_cqe_mode ? 1309 + DIM_CQ_PERIOD_MODE_START_FROM_CQE : 1310 + DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1311 + mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode, 1312 + params->rx_dim_enabled); 1313 + 1314 + err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled); 1315 + if (err) 1316 + goto err_destroy_rq; 1317 + } 1297 1318 1298 1319 /* We disable csum_complete when XDP is enabled since 1299 1320 * XDP programs might manipulate packets which will render ··· 1352 1347 1353 1348 void mlx5e_close_rq(struct mlx5e_rq *rq) 1354 1349 { 1355 - cancel_work_sync(&rq->dim.work); 1350 + if (rq->dim) 1351 + cancel_work_sync(&rq->dim->work); 1356 1352 cancel_work_sync(&rq->recover_work); 1357 1353 mlx5e_destroy_rq(rq); 1358 1354 mlx5e_free_rx_descs(rq); ··· 1629 1623 if (err) 1630 1624 goto err_sq_wq_destroy; 1631 1625 1632 - INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); 1633 - sq->dim.mode = params->tx_cq_moderation.cq_period_mode; 1634 - 1635 1626 return 0; 1636 1627 1637 1628 err_sq_wq_destroy: ··· 1639 1636 1640 1637 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) 1641 1638 { 1639 + kvfree(sq->dim); 1642 1640 mlx5e_free_txqsq_db(sq); 1643 1641 mlx5_wq_destroy(&sq->wq_ctrl); 1644 1642 } ··· 1795 1791 if (tx_rate) 1796 1792 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); 1797 1793 1798 - if (params->tx_dim_enabled) 1799 - sq->state |= BIT(MLX5E_SQ_STATE_DIM); 1794 + if (sq->channel && !params->tx_dim_enabled) { 1795 + sq->channel->tx_cq_moder = params->tx_cq_moderation; 1796 + } else if (sq->channel) { 1797 + u8 cq_period_mode; 1798 + 1799 + cq_period_mode = params->tx_moder_use_cqe_mode ? 1800 + DIM_CQ_PERIOD_MODE_START_FROM_CQE : 1801 + DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1802 + mlx5e_reset_tx_moderation(&sq->channel->tx_cq_moder, 1803 + cq_period_mode, 1804 + params->tx_dim_enabled); 1805 + 1806 + err = mlx5e_dim_tx_change(sq, params->tx_dim_enabled); 1807 + if (err) 1808 + goto err_destroy_sq; 1809 + } 1800 1810 1801 1811 return 0; 1802 1812 1813 + err_destroy_sq: 1814 + mlx5e_destroy_sq(c->mdev, sq->sqn); 1803 1815 err_free_txqsq: 1804 1816 mlx5e_free_txqsq(sq); 1805 1817 ··· 1867 1847 struct mlx5_core_dev *mdev = sq->mdev; 1868 1848 struct mlx5_rate_limit rl = {0}; 1869 1849 1870 - cancel_work_sync(&sq->dim.work); 1850 + if (sq->dim) 1851 + cancel_work_sync(&sq->dim->work); 1871 1852 cancel_work_sync(&sq->recover_work); 1872 1853 mlx5e_destroy_sq(mdev, sq->sqn); 1873 1854 if (sq->rate_limit) { ··· 1885 1864 recover_work); 1886 1865 1887 1866 mlx5e_reporter_tx_err_cqe(sq); 1867 + } 1868 + 1869 + static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) 1870 + { 1871 + return (struct dim_cq_moder) { 1872 + .cq_period_mode = cq_period_mode, 1873 + .pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS, 1874 + .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ? 1875 + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE : 1876 + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC, 1877 + }; 1878 + } 1879 + 1880 + bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, 1881 + bool dim_enabled) 1882 + { 1883 + bool reset_needed = cq_moder->cq_period_mode != cq_period_mode; 1884 + 1885 + if (dim_enabled) 1886 + *cq_moder = net_dim_get_def_tx_moderation(cq_period_mode); 1887 + else 1888 + *cq_moder = mlx5e_get_def_tx_moderation(cq_period_mode); 1889 + 1890 + return reset_needed; 1891 + } 1892 + 1893 + bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, 1894 + bool dim_enabled, bool keep_dim_state) 1895 + { 1896 + bool reset = false; 1897 + int i, tc; 1898 + 1899 + for (i = 0; i < chs->num; i++) { 1900 + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) { 1901 + if (keep_dim_state) 1902 + dim_enabled = !!chs->c[i]->sq[tc].dim; 1903 + 1904 + reset |= mlx5e_reset_tx_moderation(&chs->c[i]->tx_cq_moder, 1905 + cq_period_mode, dim_enabled); 1906 + } 1907 + } 1908 + 1909 + return reset; 1888 1910 } 1889 1911 1890 1912 static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, ··· 2153 2089 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, 2154 2090 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); 2155 2091 2156 - MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); 2092 + MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode)); 2093 + 2157 2094 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); 2158 2095 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); 2159 2096 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - ··· 2192 2127 if (err) 2193 2128 goto err_free_cq; 2194 2129 2195 - if (MLX5_CAP_GEN(mdev, cq_moderation)) 2196 - mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts); 2130 + if (MLX5_CAP_GEN(mdev, cq_moderation) && 2131 + MLX5_CAP_GEN(mdev, cq_period_mode_modify)) 2132 + mlx5e_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts, 2133 + mlx5e_cq_period_mode(moder.cq_period_mode)); 2197 2134 return 0; 2198 2135 2199 2136 err_free_cq: ··· 2208 2141 { 2209 2142 mlx5e_destroy_cq(cq); 2210 2143 mlx5e_free_cq(cq); 2144 + } 2145 + 2146 + int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 2147 + u8 cq_period_mode) 2148 + { 2149 + u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {}; 2150 + void *cqc; 2151 + 2152 + MLX5_SET(modify_cq_in, in, cqn, cq->cqn); 2153 + cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 2154 + MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(cq_period_mode)); 2155 + MLX5_SET(modify_cq_in, in, 2156 + modify_field_select_resize_field_select.modify_field_select.modify_field_select, 2157 + MLX5_CQ_MODIFY_PERIOD_MODE); 2158 + 2159 + return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); 2160 + } 2161 + 2162 + int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 2163 + u16 cq_period, u16 cq_max_count, u8 cq_period_mode) 2164 + { 2165 + u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {}; 2166 + void *cqc; 2167 + 2168 + MLX5_SET(modify_cq_in, in, cqn, cq->cqn); 2169 + cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 2170 + MLX5_SET(cqc, cqc, cq_period, cq_period); 2171 + MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); 2172 + MLX5_SET(cqc, cqc, cq_period_mode, cq_period_mode); 2173 + MLX5_SET(modify_cq_in, in, 2174 + modify_field_select_resize_field_select.modify_field_select.modify_field_select, 2175 + MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE); 2176 + 2177 + return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); 2211 2178 } 2212 2179 2213 2180 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, ··· 4061 3960 return mlx5_set_port_fcs(mdev, !enable); 4062 3961 } 4063 3962 3963 + static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) 3964 + { 3965 + return (struct dim_cq_moder) { 3966 + .cq_period_mode = cq_period_mode, 3967 + .pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS, 3968 + .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ? 3969 + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : 3970 + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC, 3971 + }; 3972 + } 3973 + 3974 + bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, 3975 + bool dim_enabled) 3976 + { 3977 + bool reset_needed = cq_moder->cq_period_mode != cq_period_mode; 3978 + 3979 + if (dim_enabled) 3980 + *cq_moder = net_dim_get_def_rx_moderation(cq_period_mode); 3981 + else 3982 + *cq_moder = mlx5e_get_def_rx_moderation(cq_period_mode); 3983 + 3984 + return reset_needed; 3985 + } 3986 + 3987 + bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, 3988 + bool dim_enabled, bool keep_dim_state) 3989 + { 3990 + bool reset = false; 3991 + int i; 3992 + 3993 + for (i = 0; i < chs->num; i++) { 3994 + if (keep_dim_state) 3995 + dim_enabled = !!chs->c[i]->rq.dim; 3996 + 3997 + reset |= mlx5e_reset_rx_moderation(&chs->c[i]->rx_cq_moder, 3998 + cq_period_mode, dim_enabled); 3999 + } 4000 + 4001 + return reset; 4002 + } 4003 + 4064 4004 static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable) 4065 4005 { 4066 4006 u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {}; ··· 5166 5024 { 5167 5025 struct mlx5e_params *params = &priv->channels.params; 5168 5026 struct mlx5_core_dev *mdev = priv->mdev; 5169 - u8 rx_cq_period_mode; 5170 5027 5171 5028 params->sw_mtu = mtu; 5172 5029 params->hard_mtu = MLX5E_ETH_HARD_MTU; ··· 5199 5058 params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); 5200 5059 5201 5060 /* CQ moderation params */ 5202 - rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 5203 - MLX5_CQ_PERIOD_MODE_START_FROM_CQE : 5204 - MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 5205 - params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); 5206 - params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); 5207 - mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode); 5208 - mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 5061 + params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) && 5062 + MLX5_CAP_GEN(mdev, cq_period_mode_modify); 5063 + params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) && 5064 + MLX5_CAP_GEN(mdev, cq_period_mode_modify); 5065 + params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe); 5066 + params->tx_moder_use_cqe_mode = false; 5067 + mlx5e_reset_rx_moderation(&params->rx_cq_moderation, params->rx_moder_use_cqe_mode, 5068 + params->rx_dim_enabled); 5069 + mlx5e_reset_tx_moderation(&params->tx_cq_moderation, params->tx_moder_use_cqe_mode, 5070 + params->tx_dim_enabled); 5209 5071 5210 5072 /* TX inline */ 5211 5073 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
+5 -5
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 + #include <linux/dim.h> 33 34 #include <linux/debugfs.h> 34 35 #include <linux/mlx5/fs.h> 35 36 #include <net/switchdev.h> ··· 41 40 42 41 #include "eswitch.h" 43 42 #include "en.h" 43 + #include "en/dim.h" 44 44 #include "en_rep.h" 45 45 #include "en/params.h" 46 46 #include "en/txrx.h" ··· 428 426 .set_channels = mlx5e_rep_set_channels, 429 427 .get_coalesce = mlx5e_rep_get_coalesce, 430 428 .set_coalesce = mlx5e_rep_set_coalesce, 429 + .get_per_queue_coalesce = mlx5e_get_per_queue_coalesce, 430 + .set_per_queue_coalesce = mlx5e_set_per_queue_coalesce, 431 431 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, 432 432 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, 433 433 }; ··· 840 836 struct mlx5_core_dev *mdev = priv->mdev; 841 837 struct mlx5e_params *params; 842 838 843 - u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 844 - MLX5_CQ_PERIOD_MODE_START_FROM_CQE : 845 - MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 846 - 847 839 params = &priv->channels.params; 848 840 849 841 params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; ··· 867 867 868 868 /* CQ moderation params */ 869 869 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); 870 - mlx5e_set_rx_cq_mode_params(params, cq_period_mode); 870 + params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe); 871 871 872 872 params->mqprio.num_tc = 1; 873 873 if (rep->vport != MLX5_VPORT_UPLINK)
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
··· 55 55 return; 56 56 57 57 dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); 58 - net_dim(&sq->dim, dim_sample); 58 + net_dim(sq->dim, dim_sample); 59 59 } 60 60 61 61 static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) ··· 67 67 return; 68 68 69 69 dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); 70 - net_dim(&rq->dim, dim_sample); 70 + net_dim(rq->dim, dim_sample); 71 71 } 72 72 73 73 void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
+4 -3
include/linux/mlx5/cq.h
··· 95 95 }; 96 96 97 97 enum { 98 - MLX5_CQ_MODIFY_PERIOD = 1 << 0, 99 - MLX5_CQ_MODIFY_COUNT = 1 << 1, 100 - MLX5_CQ_MODIFY_OVERRUN = 1 << 2, 98 + MLX5_CQ_MODIFY_PERIOD = BIT(0), 99 + MLX5_CQ_MODIFY_COUNT = BIT(1), 100 + MLX5_CQ_MODIFY_OVERRUN = BIT(2), 101 + MLX5_CQ_MODIFY_PERIOD_MODE = BIT(4), 101 102 }; 102 103 103 104 enum {
+4 -3
include/linux/mlx5/mlx5_ifc.h
··· 1686 1686 u8 cq_oi[0x1]; 1687 1687 u8 cq_resize[0x1]; 1688 1688 u8 cq_moderation[0x1]; 1689 - u8 reserved_at_223[0x3]; 1689 + u8 cq_period_mode_modify[0x1]; 1690 + u8 reserved_at_224[0x2]; 1690 1691 u8 cq_eq_remap[0x1]; 1691 1692 u8 pg[0x1]; 1692 1693 u8 block_lb_mc[0x1]; ··· 4386 4385 MLX5_CQC_ST_FIRED = 0xa, 4387 4386 }; 4388 4387 4389 - enum { 4388 + enum mlx5_cq_period_mode { 4390 4389 MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, 4391 4390 MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, 4392 - MLX5_CQ_PERIOD_NUM_MODES 4391 + MLX5_CQ_PERIOD_NUM_MODES, 4393 4392 }; 4394 4393 4395 4394 struct mlx5_ifc_cqc_bits {