Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mlxsw: spectrum_nve: Un/offload FDB on nve_fid_disable/enable

Any existing NVE FDB entries need to be offloaded when NVE is enabled
for a given FID. Recent patches have added fdb_replay op for this, so
just invoke it from mlxsw_sp_nve_fid_enable().

When NVE is disabled on a FID, any existing FDB offloaded marks need to
be cleared on NVE device as well as on its bridge master. An op to
handle this, fdb_clear_offload, has been added to FID ops and NVE ops in
previous patches. Add code to resolve the NVE device, NVE type, and
dispatch to both fdb_clear_offload ops.

Signed-off-by: Petr Machata <petrm@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Petr Machata and committed by
David S. Miller
8a5969d8 83de7883

+41
+41
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
··· 789 789 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); 790 790 } 791 791 792 + static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp, 793 + const struct mlxsw_sp_fid *fid, 794 + const struct net_device *nve_dev, 795 + __be32 vni) 796 + { 797 + const struct mlxsw_sp_nve_ops *ops; 798 + enum mlxsw_sp_nve_type type; 799 + 800 + if (WARN_ON(mlxsw_sp_fid_nve_type(fid, &type))) 801 + return; 802 + 803 + ops = mlxsw_sp->nve->nve_ops_arr[type]; 804 + ops->fdb_clear_offload(nve_dev, vni); 805 + } 806 + 792 807 int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, 793 808 struct mlxsw_sp_nve_params *params, 794 809 struct netlink_ext_ack *extack) ··· 841 826 842 827 nve->config = config; 843 828 829 + err = ops->fdb_replay(params->dev, params->vni); 830 + if (err) { 831 + NL_SET_ERR_MSG_MOD(extack, "Failed to offload the FDB"); 832 + goto err_fdb_replay; 833 + } 834 + 844 835 return 0; 845 836 837 + err_fdb_replay: 838 + mlxsw_sp_fid_vni_clear(fid); 846 839 err_fid_vni_set: 847 840 mlxsw_sp_nve_tunnel_fini(mlxsw_sp); 848 841 return err; ··· 860 837 struct mlxsw_sp_fid *fid) 861 838 { 862 839 u16 fid_index = mlxsw_sp_fid_index(fid); 840 + struct net_device *nve_dev; 841 + int nve_ifindex; 842 + __be32 vni; 863 843 864 844 mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid); 865 845 mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index); 846 + 847 + if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) || 848 + mlxsw_sp_fid_vni(fid, &vni))) 849 + goto out; 850 + 851 + nve_dev = dev_get_by_index(&init_net, nve_ifindex); 852 + if (!nve_dev) 853 + goto out; 854 + 855 + mlxsw_sp_nve_fdb_clear_offload(mlxsw_sp, fid, nve_dev, vni); 856 + mlxsw_sp_fid_fdb_clear_offload(fid, nve_dev); 857 + 858 + dev_put(nve_dev); 859 + 860 + out: 866 861 mlxsw_sp_fid_vni_clear(fid); 867 862 mlxsw_sp_nve_tunnel_fini(mlxsw_sp); 868 863 }