Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-fixes-2021-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-03-22

This series introduces some fixes to mlx5 driver.
Please pull and let me know if there is any problem.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+79 -37
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
··· 1181 1181 1182 1182 mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG, 1183 1183 &ctstate, &ctstate_mask); 1184 - if (ctstate_mask) 1184 + 1185 + if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT) 1185 1186 return -EOPNOTSUPP; 1186 1187 1187 1188 ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
+5 -1
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1887 1887 { 1888 1888 struct mlx5e_priv *priv = netdev_priv(netdev); 1889 1889 struct mlx5_core_dev *mdev = priv->mdev; 1890 + int err; 1890 1891 1891 1892 if (!MLX5_CAP_GEN(mdev, cqe_compression)) 1892 1893 return -EOPNOTSUPP; ··· 1897 1896 return -EINVAL; 1898 1897 } 1899 1898 1900 - mlx5e_modify_rx_cqe_compression_locked(priv, enable); 1899 + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); 1900 + if (err) 1901 + return err; 1902 + 1901 1903 priv->channels.params.rx_cqe_compress_def = enable; 1902 1904 1903 1905 return 0;
+12
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 3846 3846 } 3847 3847 3848 3848 if (mlx5e_is_uplink_rep(priv)) { 3849 + struct mlx5e_vport_stats *vstats = &priv->stats.vport; 3850 + 3849 3851 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); 3850 3852 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); 3851 3853 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); 3852 3854 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); 3855 + 3856 + /* vport multicast also counts packets that are dropped due to steering 3857 + * or rx out of buffer 3858 + */ 3859 + stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); 3853 3860 } else { 3854 3861 mlx5e_fold_sw_stats64(priv, stats); 3855 3862 } ··· 4978 4971 params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2, 4979 4972 priv->max_nch); 4980 4973 params->num_tc = 1; 4974 + 4975 + /* Set an initial non-zero value, so that mlx5e_select_queue won't 4976 + * divide by zero if called before first activating channels. 4977 + */ 4978 + priv->num_tc_x_num_ch = params->num_channels * params->num_tc; 4981 4979 4982 4980 /* SQ */ 4983 4981 params->log_sq_size = is_kdump_kernel() ?
+43 -11
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 2296 2296 *match_level = MLX5_MATCH_L4; 2297 2297 } 2298 2298 2299 + /* Currenlty supported only for MPLS over UDP */ 2300 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && 2301 + !netif_is_bareudp(filter_dev)) { 2302 + NL_SET_ERR_MSG_MOD(extack, 2303 + "Matching on MPLS is supported only for MPLS over UDP"); 2304 + netdev_err(priv->netdev, 2305 + "Matching on MPLS is supported only for MPLS over UDP\n"); 2306 + return -EOPNOTSUPP; 2307 + } 2308 + 2299 2309 return 0; 2300 2310 } 2301 2311 ··· 2909 2899 return 0; 2910 2900 } 2911 2901 2902 + static bool modify_tuple_supported(bool modify_tuple, bool ct_clear, 2903 + bool ct_flow, struct netlink_ext_ack *extack, 2904 + struct mlx5e_priv *priv, 2905 + struct mlx5_flow_spec *spec) 2906 + { 2907 + if (!modify_tuple || ct_clear) 2908 + return true; 2909 + 2910 + if (ct_flow) { 2911 + NL_SET_ERR_MSG_MOD(extack, 2912 + "can't offload tuple modification with non-clear ct()"); 2913 + netdev_info(priv->netdev, 2914 + "can't offload tuple modification with non-clear ct()"); 2915 + return false; 2916 + } 2917 + 2918 + /* Add ct_state=-trk match so it will be offloaded for non ct flows 2919 + * (or after clear action), as otherwise, since the tuple is changed, 2920 + * we can't restore ct state 2921 + */ 2922 + if (mlx5_tc_ct_add_no_trk_match(spec)) { 2923 + NL_SET_ERR_MSG_MOD(extack, 2924 + "can't offload tuple modification with ct matches and no ct(clear) action"); 2925 + netdev_info(priv->netdev, 2926 + "can't offload tuple modification with ct matches and no ct(clear) action"); 2927 + return false; 2928 + } 2929 + 2930 + return true; 2931 + } 2932 + 2912 2933 static bool modify_header_match_supported(struct mlx5e_priv *priv, 2913 2934 struct mlx5_flow_spec *spec, 2914 2935 struct flow_action *flow_action, ··· 2978 2937 return err; 2979 2938 } 2980 2939 2981 - /* Add ct_state=-trk match so it will be offloaded for non ct flows 2982 - * (or after clear action), as otherwise, since the tuple is changed, 2983 - * we can't restore ct state 2984 - */ 2985 - if (!ct_clear && modify_tuple && 2986 - mlx5_tc_ct_add_no_trk_match(spec)) { 2987 - NL_SET_ERR_MSG_MOD(extack, 2988 - "can't offload tuple modify header with ct matches"); 2989 - netdev_info(priv->netdev, 2990 - "can't offload tuple modify header with ct matches"); 2940 + if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack, 2941 + priv, spec)) 2991 2942 return false; 2992 - } 2993 2943 2994 2944 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); 2995 2945 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
+1 -3
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
··· 181 181 u16 max_functions; 182 182 u16 function_id; 183 183 int err = 0; 184 - bool ecpu; 185 184 int i; 186 185 187 186 max_functions = mlx5_sf_max_functions(dev); 188 187 function_id = MLX5_CAP_GEN(dev, sf_base_id); 189 - ecpu = mlx5_read_embedded_cpu(dev); 190 188 /* Arm the vhca context as the vhca event notifier */ 191 189 for (i = 0; i < max_functions; i++) { 192 - err = mlx5_vhca_event_arm(dev, function_id, ecpu); 190 + err = mlx5_vhca_event_arm(dev, function_id); 193 191 if (err) 194 192 return err; 195 193
+3 -5
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
··· 6 6 #include "sf.h" 7 7 #include "mlx5_ifc_vhca_event.h" 8 8 #include "vhca_event.h" 9 - #include "ecpf.h" 9 + #include "mlx5_core.h" 10 10 11 11 struct mlx5_sf_hw { 12 12 u32 usr_sfnum; ··· 18 18 struct mlx5_core_dev *dev; 19 19 struct mlx5_sf_hw *sfs; 20 20 int max_local_functions; 21 - u8 ecpu: 1; 22 21 struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */ 23 22 struct notifier_block vhca_nb; 24 23 }; ··· 71 72 if (err) 72 73 goto err; 73 74 74 - err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum); 75 + err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum); 75 76 if (err) 76 77 goto vhca_err; 77 78 ··· 117 118 118 119 hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); 119 120 mutex_lock(&table->table_lock); 120 - err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out)); 121 + err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out)); 121 122 if (err) 122 123 goto err; 123 124 state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); ··· 163 164 table->dev = dev; 164 165 table->sfs = sfs; 165 166 table->max_local_functions = max_functions; 166 - table->ecpu = mlx5_read_embedded_cpu(dev); 167 167 dev->priv.sf_hw_table = table; 168 168 mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions); 169 169 return 0;
+10 -12
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
··· 19 19 struct mlx5_vhca_state_event event; 20 20 }; 21 21 22 - int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, 23 - bool ecpu, u32 *out, u32 outlen) 22 + int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen) 24 23 { 25 24 u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {}; 26 25 27 26 MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE); 28 27 MLX5_SET(query_vhca_state_in, in, function_id, function_id); 29 - MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu); 28 + MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0); 30 29 31 30 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 32 31 } 33 32 34 33 static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id, 35 - bool ecpu, u32 *in, u32 inlen) 34 + u32 *in, u32 inlen) 36 35 { 37 36 u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; 38 37 39 38 MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); 40 39 MLX5_SET(modify_vhca_state_in, in, function_id, function_id); 41 - MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); 40 + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0); 42 41 43 42 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 44 43 } 45 44 46 - int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id) 45 + int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id) 47 46 { 48 47 u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; 49 48 u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; 50 49 51 50 MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); 52 51 MLX5_SET(modify_vhca_state_in, in, function_id, function_id); 53 - MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); 52 + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0); 54 53 MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1); 55 54 MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id); 56 55 57 56 return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out); 58 57 } 59 58 60 - int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu) 59 + int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id) 61 60 { 62 61 u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; 63 62 64 63 MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1); 65 64 MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1); 66 65 67 - return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in)); 66 + return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in)); 68 67 } 69 68 70 69 static void ··· 72 73 u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; 73 74 int err; 74 75 75 - err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out)); 76 + err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out)); 76 77 if (err) 77 78 return; 78 79 ··· 81 82 event->new_vhca_state = MLX5_GET(query_vhca_state_out, out, 82 83 vhca_state_context.vhca_state); 83 84 84 - mlx5_vhca_event_arm(dev, event->function_id, event->ecpu); 85 + mlx5_vhca_event_arm(dev, event->function_id); 85 86 86 87 blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event); 87 88 } ··· 110 111 INIT_WORK(&work->work, &mlx5_vhca_state_work_handler); 111 112 work->notifier = notifier; 112 113 work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); 113 - work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function); 114 114 mlx5_events_work_enqueue(notifier->dev, &work->work); 115 115 return NOTIFY_OK; 116 116 }
+3 -4
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
··· 10 10 u16 function_id; 11 11 u16 sw_function_id; 12 12 u8 new_vhca_state; 13 - bool ecpu; 14 13 }; 15 14 16 15 static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev) ··· 24 25 void mlx5_vhca_event_stop(struct mlx5_core_dev *dev); 25 26 int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); 26 27 void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); 27 - int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id); 28 - int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu); 28 + int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id); 29 + int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id); 29 30 int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, 30 - bool ecpu, u32 *out, u32 outlen); 31 + u32 *out, u32 outlen); 31 32 #else 32 33 33 34 static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)