Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-fixes-2022-06-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-06-08

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2022-06-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
net/mlx5: fs, fail conflicting actions
net/mlx5: Rearm the FW tracer after each tracer event
net/mlx5: E-Switch, pair only capable devices
net/mlx5e: CT: Fix cleanup of CT before cleanup of TC ct rules
Revert "net/mlx5e: Allow relaxed ordering over VFs"
MAINTAINERS: adjust MELLANOX ETHERNET INNOVA DRIVERS to TLS support removal
====================

Link: https://lore.kernel.org/r/20220608185855.19818-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+74 -46
-1
MAINTAINERS
··· 12703 12703 S: Supported 12704 12704 W: http://www.mellanox.com 12705 12705 Q: https://patchwork.kernel.org/project/netdevbpf/list/ 12706 - F: drivers/net/ethernet/mellanox/mlx5/core/accel/* 12707 12706 F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/* 12708 12707 F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* 12709 12708 F: include/linux/mlx5/mlx5_ifc_fpga.h
-18
drivers/net/ethernet/mellanox/mlx5/core/dev.c
··· 579 579 return pci_get_drvdata(to_pci_dev(other)); 580 580 } 581 581 582 - static int next_phys_dev(struct device *dev, const void *data) 583 - { 584 - struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; 585 - 586 - mdev = pci_get_other_drvdata(this->device, dev); 587 - if (!mdev) 588 - return 0; 589 - 590 - return _next_phys_dev(mdev, data); 591 - } 592 - 593 582 static int next_phys_dev_lag(struct device *dev, const void *data) 594 583 { 595 584 struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; ··· 610 621 611 622 put_device(next); 612 623 return pci_get_drvdata(to_pci_dev(next)); 613 - } 614 - 615 - /* Must be called with intf_mutex held */ 616 - struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) 617 - { 618 - lockdep_assert_held(&mlx5_intf_mutex); 619 - return mlx5_get_next_dev(dev, &next_phys_dev); 620 624 } 621 625 622 626 /* Must be called with intf_mutex held */
+5 -2
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
··· 675 675 if (!tracer->owner) 676 676 return; 677 677 678 + if (unlikely(!tracer->str_db.loaded)) 679 + goto arm; 680 + 678 681 block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; 679 682 start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; 680 683 ··· 735 732 &tmp_trace_block[TRACES_PER_BLOCK - 1]); 736 733 } 737 734 735 + arm: 738 736 mlx5_fw_tracer_arm(dev); 739 737 } 740 738 ··· 1140 1136 queue_work(tracer->work_queue, &tracer->ownership_change_work); 1141 1137 break; 1142 1138 case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: 1143 - if (likely(tracer->str_db.loaded)) 1144 - queue_work(tracer->work_queue, &tracer->handle_traces_work); 1139 + queue_work(tracer->work_queue, &tracer->handle_traces_work); 1145 1140 break; 1146 1141 default: 1147 1142 mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 565 565 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) 566 566 { 567 567 bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; 568 - bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write); 568 + bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && 569 + MLX5_CAP_GEN(mdev, relaxed_ordering_write); 569 570 570 571 return ro && lro_en ? 571 572 MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
··· 38 38 39 39 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) 40 40 { 41 + bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); 41 42 bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); 42 43 bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); 43 44 44 - MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read); 45 - MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write); 45 + MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read); 46 + MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write); 46 47 } 47 48 48 49 static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+16 -15
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 950 950 return err; 951 951 } 952 952 953 + static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) 954 + { 955 + mlx5e_rep_tc_netdevice_event_unregister(rpriv); 956 + mlx5e_rep_bond_cleanup(rpriv); 957 + mlx5e_rep_tc_cleanup(rpriv); 958 + } 959 + 953 960 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) 954 961 { 955 962 struct mlx5e_rep_priv *rpriv = priv->ppriv; ··· 968 961 return err; 969 962 } 970 963 971 - err = mlx5e_tc_ht_init(&rpriv->tc_ht); 972 - if (err) 973 - goto err_ht_init; 974 - 975 964 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { 976 965 err = mlx5e_init_uplink_rep_tx(rpriv); 977 966 if (err) 978 967 goto err_init_tx; 979 968 } 980 969 970 + err = mlx5e_tc_ht_init(&rpriv->tc_ht); 971 + if (err) 972 + goto err_ht_init; 973 + 981 974 return 0; 982 975 983 - err_init_tx: 984 - mlx5e_tc_ht_cleanup(&rpriv->tc_ht); 985 976 err_ht_init: 977 + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) 978 + mlx5e_cleanup_uplink_rep_tx(rpriv); 979 + err_init_tx: 986 980 mlx5e_destroy_tises(priv); 987 981 return err; 988 - } 989 - 990 - static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) 991 - { 992 - mlx5e_rep_tc_netdevice_event_unregister(rpriv); 993 - mlx5e_rep_bond_cleanup(rpriv); 994 - mlx5e_rep_tc_cleanup(rpriv); 995 982 } 996 983 997 984 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) 998 985 { 999 986 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1000 987 1001 - mlx5e_destroy_tises(priv); 988 + mlx5e_tc_ht_cleanup(&rpriv->tc_ht); 1002 989 1003 990 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) 1004 991 mlx5e_cleanup_uplink_rep_tx(rpriv); 1005 992 1006 - mlx5e_tc_ht_cleanup(&rpriv->tc_ht); 993 + mlx5e_destroy_tises(priv); 1007 994 } 1008 995 1009 996 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
+6 -3
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 2690 2690 2691 2691 switch (event) { 2692 2692 case ESW_OFFLOADS_DEVCOM_PAIR: 2693 - if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev) 2694 - break; 2695 - 2696 2693 if (mlx5_eswitch_vport_match_metadata_enabled(esw) != 2697 2694 mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) 2698 2695 break; ··· 2741 2744 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2742 2745 return; 2743 2746 2747 + if (!mlx5_is_lag_supported(esw->dev)) 2748 + return; 2749 + 2744 2750 mlx5_devcom_register_component(devcom, 2745 2751 MLX5_DEVCOM_ESW_OFFLOADS, 2746 2752 mlx5_esw_offloads_devcom_event, ··· 2759 2759 struct mlx5_devcom *devcom = esw->dev->priv.devcom; 2760 2760 2761 2761 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 2762 + return; 2763 + 2764 + if (!mlx5_is_lag_supported(esw->dev)) 2762 2765 return; 2763 2766 2764 2767 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
+32 -3
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1574 1574 return NULL; 1575 1575 } 1576 1576 1577 - static bool check_conflicting_actions(u32 action1, u32 action2) 1577 + static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0, 1578 + const struct mlx5_fs_vlan *vlan1) 1578 1579 { 1579 - u32 xored_actions = action1 ^ action2; 1580 + return vlan0->ethtype != vlan1->ethtype || 1581 + vlan0->vid != vlan1->vid || 1582 + vlan0->prio != vlan1->prio; 1583 + } 1584 + 1585 + static bool check_conflicting_actions(const struct mlx5_flow_act *act1, 1586 + const struct mlx5_flow_act *act2) 1587 + { 1588 + u32 action1 = act1->action; 1589 + u32 action2 = act2->action; 1590 + u32 xored_actions; 1591 + 1592 + xored_actions = action1 ^ action2; 1580 1593 1581 1594 /* if one rule only wants to count, it's ok */ 1582 1595 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT || ··· 1606 1593 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) 1607 1594 return true; 1608 1595 1596 + if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT && 1597 + act1->pkt_reformat != act2->pkt_reformat) 1598 + return true; 1599 + 1600 + if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 1601 + act1->modify_hdr != act2->modify_hdr) 1602 + return true; 1603 + 1604 + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH && 1605 + check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0])) 1606 + return true; 1607 + 1608 + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 && 1609 + check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1])) 1610 + return true; 1611 + 1609 1612 return false; 1610 1613 } 1611 1614 ··· 1629 1600 const struct mlx5_flow_context *flow_context, 1630 1601 const struct mlx5_flow_act *flow_act) 1631 1602 { 1632 - if (check_conflicting_actions(flow_act->action, fte->action.action)) { 1603 + if (check_conflicting_actions(flow_act, &fte->action)) { 1633 1604 mlx5_core_warn(get_dev(&fte->node), 1634 1605 "Found two FTEs with conflicting actions\n"); 1635 1606 return -EEXIST;
+10
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
··· 74 74 struct lag_mpesw lag_mpesw; 75 75 }; 76 76 77 + static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev) 78 + { 79 + if (!MLX5_CAP_GEN(dev, vport_group_manager) || 80 + !MLX5_CAP_GEN(dev, lag_master) || 81 + MLX5_CAP_GEN(dev, num_lag_ports) < 2 || 82 + MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) 83 + return false; 84 + return true; 85 + } 86 + 77 87 static inline struct mlx5_lag * 78 88 mlx5_lag_dev(struct mlx5_core_dev *dev) 79 89 {
-1
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 209 209 void mlx5_detach_device(struct mlx5_core_dev *dev); 210 210 int mlx5_register_device(struct mlx5_core_dev *dev); 211 211 void mlx5_unregister_device(struct mlx5_core_dev *dev); 212 - struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); 213 212 struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev); 214 213 void mlx5_dev_list_lock(void); 215 214 void mlx5_dev_list_unlock(void);