Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-fixes-2022-12-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-fixes-2022-12-28

+104 -28
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
··· 468 468 bool new_state = val.vbool; 469 469 470 470 if (new_state && !MLX5_CAP_GEN(dev, roce) && 471 - !MLX5_CAP_GEN(dev, roce_rw_supported)) { 471 + !(MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))) { 472 472 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE"); 473 473 return -EOPNOTSUPP; 474 474 } ··· 563 563 union devlink_param_value val, 564 564 struct netlink_ext_ack *extack) 565 565 { 566 - return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL; 566 + return (val.vu32 >= 64 && val.vu32 <= 4096) ? 0 : -EINVAL; 567 567 } 568 568 569 569 static const struct devlink_param mlx5_devlink_params[] = {
+5 -1
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
··· 459 459 goto unlock; 460 460 461 461 for (i = 0; i < priv->channels.num; i++) { 462 - struct mlx5e_rq *rq = &priv->channels.c[i]->rq; 462 + struct mlx5e_channel *c = priv->channels.c[i]; 463 + struct mlx5e_rq *rq; 464 + 465 + rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ? 466 + &c->xskrq : &c->rq; 463 467 464 468 err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg); 465 469 if (err)
+1 -6
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
··· 2103 2103 static void 2104 2104 mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv) 2105 2105 { 2106 - bool is_fdb = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB; 2107 2106 struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs; 2108 - char dirname[16] = {}; 2109 2107 2110 - if (sscanf(dirname, "ct_%s", is_fdb ? "fdb" : "nic") < 0) 2111 - return; 2112 - 2113 - ct_dbgfs->root = debugfs_create_dir(dirname, mlx5_debugfs_get_dev_root(ct_priv->dev)); 2108 + ct_dbgfs->root = debugfs_create_dir("ct", mlx5_debugfs_get_dev_root(ct_priv->dev)); 2114 2109 debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root, 2115 2110 &ct_dbgfs->stats.offloaded); 2116 2111 debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root,
+8 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
··· 222 222 int err; 223 223 224 224 list_for_each_entry(flow, flow_list, tmp_list) { 225 - if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW)) 225 + if (!mlx5e_is_offloaded_flow(flow)) 226 226 continue; 227 227 228 228 attr = mlx5e_tc_get_encap_attr(flow); ··· 230 230 /* mark the flow's encap dest as non-valid */ 231 231 esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; 232 232 esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL; 233 + 234 + /* Clear pkt_reformat before checking slow path flag. Because 235 + * in next iteration, the same flow is already set slow path 236 + * flag, but still need to clear the pkt_reformat. 237 + */ 238 + if (flow_flag_test(flow, SLOW)) 239 + continue; 233 240 234 241 /* update from encap rule to slow path rule */ 235 242 spec = &flow->attr->parse_attr->spec;
+5
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
··· 273 273 geneve_tlv_option_0_data, be32_to_cpu(opt_data_key)); 274 274 MLX5_SET(fte_match_set_misc3, misc_3_c, 275 275 geneve_tlv_option_0_data, be32_to_cpu(opt_data_mask)); 276 + if (MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, 277 + ft_field_support.geneve_tlv_option_0_exist)) { 278 + MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, geneve_tlv_option_0_exist); 279 + MLX5_SET_TO_ONES(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist); 280 + } 276 281 277 282 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; 278 283
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 1305 1305 sq->channel = c; 1306 1306 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; 1307 1307 sq->min_inline_mode = params->tx_min_inline_mode; 1308 - sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 1308 + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN; 1309 1309 sq->xsk_pool = xsk_pool; 1310 1310 1311 1311 sq->stats = sq->xsk_pool ?
+6 -1
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
··· 67 67 int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, 68 68 struct mlx5_vport *vport) 69 69 { 70 + bool vst_mode_steering = esw_vst_mode_is_steering(esw); 70 71 struct mlx5_flow_destination drop_ctr_dst = {}; 71 72 struct mlx5_flow_destination *dst = NULL; 72 73 struct mlx5_fc *drop_counter = NULL; ··· 78 77 */ 79 78 int table_size = 2; 80 79 int dest_num = 0; 80 + int actions_flag; 81 81 int err = 0; 82 82 83 83 if (vport->egress.legacy.drop_counter) { ··· 121 119 vport->vport, vport->info.vlan, vport->info.qos); 122 120 123 121 /* Allowed vlan rule */ 122 + actions_flag = MLX5_FLOW_CONTEXT_ACTION_ALLOW; 123 + if (vst_mode_steering) 124 + actions_flag |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 124 125 err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan, 125 - MLX5_FLOW_CONTEXT_ACTION_ALLOW); 126 + actions_flag); 126 127 if (err) 127 128 goto out; 128 129
+28 -5
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
··· 139 139 int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, 140 140 struct mlx5_vport *vport) 141 141 { 142 + bool vst_mode_steering = esw_vst_mode_is_steering(esw); 142 143 struct mlx5_flow_destination drop_ctr_dst = {}; 143 144 struct mlx5_flow_destination *dst = NULL; 144 145 struct mlx5_flow_act flow_act = {}; 145 146 struct mlx5_flow_spec *spec = NULL; 146 147 struct mlx5_fc *counter = NULL; 148 + bool vst_check_cvlan = false; 149 + bool vst_push_cvlan = false; 147 150 /* The ingress acl table contains 4 groups 148 151 * (2 active rules at the same time - 149 152 * 1 allow rule from one of the first 3 groups. ··· 206 203 goto out; 207 204 } 208 205 209 - if (vport->info.vlan || vport->info.qos) 206 + if ((vport->info.vlan || vport->info.qos)) { 207 + if (vst_mode_steering) 208 + vst_push_cvlan = true; 209 + else if (!MLX5_CAP_ESW(esw->dev, vport_cvlan_insert_always)) 210 + vst_check_cvlan = true; 211 + } 212 + 213 + if (vst_check_cvlan || vport->info.spoofchk) 214 + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 215 + 216 + /* Create ingress allow rule */ 217 + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; 218 + if (vst_push_cvlan) { 219 + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; 220 + flow_act.vlan[0].prio = vport->info.qos; 221 + flow_act.vlan[0].vid = vport->info.vlan; 222 + flow_act.vlan[0].ethtype = ETH_P_8021Q; 223 + } 224 + 225 + if (vst_check_cvlan) 210 226 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 211 227 outer_headers.cvlan_tag); 212 228 ··· 240 218 ether_addr_copy(smac_v, vport->info.mac); 241 219 } 242 220 243 - /* Create ingress allow rule */ 244 - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 245 - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; 246 221 vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, 247 222 &flow_act, NULL, 0); 248 223 if (IS_ERR(vport->ingress.allow_rule)) { ··· 250 231 vport->ingress.allow_rule = NULL; 251 232 goto out; 252 233 } 234 + 235 + if (!vst_check_cvlan && !vport->info.spoofchk) 236 + goto out; 253 237 254 238 memset(&flow_act, 0, sizeof(flow_act)); 255 239 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; ··· 279 257 return 0; 280 258 281 259 out: 282 - esw_acl_ingress_lgcy_cleanup(esw, vport); 260 + if (err) 261 + esw_acl_ingress_lgcy_cleanup(esw, vport); 283 262 kvfree(spec); 284 263 return err; 285 264 }
+21 -9
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 161 161 esw_vport_context.vport_cvlan_strip, 1); 162 162 163 163 if (set_flags & SET_VLAN_INSERT) { 164 - /* insert only if no vlan in packet */ 165 - MLX5_SET(modify_esw_vport_context_in, in, 166 - esw_vport_context.vport_cvlan_insert, 1); 167 - 164 + if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) { 165 + /* insert either if vlan exist in packet or not */ 166 + MLX5_SET(modify_esw_vport_context_in, in, 167 + esw_vport_context.vport_cvlan_insert, 168 + MLX5_VPORT_CVLAN_INSERT_ALWAYS); 169 + } else { 170 + /* insert only if no vlan in packet */ 171 + MLX5_SET(modify_esw_vport_context_in, in, 172 + esw_vport_context.vport_cvlan_insert, 173 + MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN); 174 + } 168 175 MLX5_SET(modify_esw_vport_context_in, in, 169 176 esw_vport_context.cvlan_pcp, qos); 170 177 MLX5_SET(modify_esw_vport_context_in, in, ··· 816 809 817 810 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 818 811 { 812 + bool vst_mode_steering = esw_vst_mode_is_steering(esw); 819 813 u16 vport_num = vport->vport; 820 814 int flags; 821 815 int err; ··· 847 839 848 840 flags = (vport->info.vlan || vport->info.qos) ? 849 841 SET_VLAN_STRIP | SET_VLAN_INSERT : 0; 850 - modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, 851 - vport->info.qos, flags); 842 + if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) 843 + modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, 844 + vport->info.qos, flags); 852 845 853 846 return 0; 854 847 ··· 1857 1848 u16 vport, u16 vlan, u8 qos, u8 set_flags) 1858 1849 { 1859 1850 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1851 + bool vst_mode_steering = esw_vst_mode_is_steering(esw); 1860 1852 int err = 0; 1861 1853 1862 1854 if (IS_ERR(evport)) ··· 1865 1855 if (vlan > 4095 || qos > 7) 1866 1856 return -EINVAL; 1867 1857 1868 - err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); 1869 - if (err) 1870 - return err; 1858 + if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) { 1859 + err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); 1860 + if (err) 1861 + return err; 1862 + } 1871 1863 1872 1864 evport->info.vlan = vlan; 1873 1865 evport->info.qos = qos;
+6
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 527 527 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 528 528 u16 vport, u16 vlan, u8 qos, u8 set_flags); 529 529 530 + static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw) 531 + { 532 + return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) && 533 + MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan)); 534 + } 535 + 530 536 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 531 537 u8 vlan_depth) 532 538 {
+6
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 674 674 dev = container_of(priv, struct mlx5_core_dev, priv); 675 675 devlink = priv_to_devlink(dev); 676 676 677 + mutex_lock(&dev->intf_state_mutex); 678 + if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) { 679 + mlx5_core_err(dev, "health works are not permitted at this stage\n"); 680 + return; 681 + } 682 + mutex_unlock(&dev->intf_state_mutex); 677 683 enter_error_state(dev, false); 678 684 if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) { 679 685 devl_lock(devlink);
+4
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 71 71 params->packet_merge.type = MLX5E_PACKET_MERGE_NONE; 72 72 params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; 73 73 params->tunneled_offload_en = false; 74 + 75 + /* CQE compression is not supported for IPoIB */ 76 + params->rx_cqe_compress_def = false; 77 + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); 74 78 } 75 79 76 80 /* Called directly after IPoIB netdevice was created to initialize SW structs */
+1
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
··· 228 228 if (ldev->nb.notifier_call) 229 229 unregister_netdevice_notifier_net(&init_net, &ldev->nb); 230 230 mlx5_lag_mp_cleanup(ldev); 231 + cancel_delayed_work_sync(&ldev->bond_work); 231 232 destroy_workqueue(ldev->wq); 232 233 mlx5_lag_mpesw_cleanup(ldev); 233 234 mutex_destroy(&ldev->lock);
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 613 613 MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix, 614 614 MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix)); 615 615 616 - if (MLX5_CAP_GEN(dev, roce_rw_supported)) 616 + if (MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce)) 617 617 MLX5_SET(cmd_hca_cap, set_hca_cap, roce, 618 618 mlx5_is_roce_on(dev)); 619 619 ··· 1050 1050 err_tables_cleanup: 1051 1051 mlx5_geneve_destroy(dev->geneve); 1052 1052 mlx5_vxlan_destroy(dev->vxlan); 1053 + mlx5_cleanup_clock(dev); 1054 + mlx5_cleanup_reserved_gids(dev); 1053 1055 mlx5_cq_debugfs_cleanup(dev); 1054 1056 mlx5_fw_reset_cleanup(dev); 1055 1057 err_events_cleanup:
+5
include/linux/mlx5/device.h
··· 1091 1091 }; 1092 1092 1093 1093 enum { 1094 + MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN = 0x1, 1095 + MLX5_VPORT_CVLAN_INSERT_ALWAYS = 0x3, 1096 + }; 1097 + 1098 + enum { 1094 1099 MLX5_L3_PROT_TYPE_IPV4 = 0, 1095 1100 MLX5_L3_PROT_TYPE_IPV6 = 1, 1096 1101 };
+2 -1
include/linux/mlx5/mlx5_ifc.h
··· 913 913 u8 vport_svlan_insert[0x1]; 914 914 u8 vport_cvlan_insert_if_not_exist[0x1]; 915 915 u8 vport_cvlan_insert_overwrite[0x1]; 916 - u8 reserved_at_5[0x2]; 916 + u8 reserved_at_5[0x1]; 917 + u8 vport_cvlan_insert_always[0x1]; 917 918 u8 esw_shared_ingress_acl[0x1]; 918 919 u8 esw_uplink_ingress_acl[0x1]; 919 920 u8 root_ft_on_other_esw[0x1];