Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/mlx5: Add RoCE MACsec steering infrastructure in core

Adds all the core steering helper functions that are needed in order
to setup RoCE steering rules which includes both the RX and TX rules
addition and deletion.
As well as exporting the function to be ready to use from the IB driver
where we expose functions to allow deletion of all rules, which is
needed when a GID is deleted, or a deletion of a specific rule when an SA
is deleted, and a similar manner for the rules addition.

These functions are used in a later patch by IB driver to trigger the
rules addition/deletion when needed.

Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>

authored by

Patrisious Haddad and committed by
Leon Romanovsky
ac7ea1c7 8c14a2c7

+427 -9
+1
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
··· 967 967 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions); 968 968 table_type = FS_FT_ESW_INGRESS_ACL; 969 969 break; 970 + case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC: 970 971 case MLX5_FLOW_NAMESPACE_RDMA_TX: 971 972 max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions); 972 973 table_type = FS_FT_RDMA_TX;
+390 -9
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
··· 4 4 #include <net/macsec.h> 5 5 #include <linux/mlx5/qp.h> 6 6 #include <linux/if_vlan.h> 7 + #include <linux/mlx5/fs_helpers.h> 8 + #include <linux/mlx5/macsec.h> 7 9 #include "fs_core.h" 8 10 #include "lib/macsec_fs.h" 9 11 #include "mlx5_core.h" ··· 48 46 /* MACsec RX flow steering */ 49 47 #define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E 50 48 49 + /* MACsec fs_id handling for steering */ 50 + #define macsec_fs_set_tx_fs_id(fs_id) (MLX5_ETH_WQE_FT_META_MACSEC | (fs_id) << 2) 51 + #define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30)) 52 + 51 53 struct mlx5_sectag_header { 52 54 __be16 ethertype; 53 55 u8 tci_an; ··· 59 53 u32 pn; 60 54 u8 sci[MACSEC_SCI_LEN]; /* optional */ 61 55 } __packed; 56 + 57 + struct mlx5_roce_macsec_tx_rule { 58 + u32 fs_id; 59 + u16 gid_idx; 60 + struct list_head entry; 61 + struct mlx5_flow_handle *rule; 62 + struct mlx5_modify_hdr *meta_modhdr; 63 + }; 62 64 63 65 struct mlx5_macsec_tx_rule { 64 66 struct mlx5_flow_handle *rule; ··· 116 102 struct mlx5_macsec_tables tables; 117 103 118 104 struct mlx5_flow_table *ft_rdma_tx; 105 + }; 106 + 107 + struct mlx5_roce_macsec_rx_rule { 108 + u32 fs_id; 109 + u16 gid_idx; 110 + struct mlx5_flow_handle *op; 111 + struct mlx5_flow_handle *ip; 112 + struct list_head entry; 119 113 }; 120 114 121 115 struct mlx5_macsec_rx_rule { ··· 596 574 MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a, 597 575 MLX5_ETH_WQE_FT_META_MACSEC_MASK); 598 576 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a, 599 - MLX5_ETH_WQE_FT_META_MACSEC | id << 2); 577 + macsec_fs_set_tx_fs_id(id)); 600 578 601 579 *fs_id = id; 602 580 flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; ··· 798 776 static union mlx5_macsec_rule * 799 777 macsec_fs_tx_add_rule(struct mlx5_macsec_fs *macsec_fs, 800 778 const struct macsec_context *macsec_ctx, 801 - struct mlx5_macsec_rule_attrs *attrs) 779 + struct mlx5_macsec_rule_attrs *attrs, u32 *fs_id) 802 780 { 803 781 char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN]; 804 782 struct mlx5_pkt_reformat_params reformat_params = {}; ··· 813 791 struct mlx5_flow_spec *spec; 814 792 size_t reformat_size; 815 793 int err = 0; 816 - u32 fs_id; 817 794 818 795 tx_tables = &tx_fs->tables; 819 796 ··· 852 831 } 853 832 tx_rule->pkt_reformat = flow_act.pkt_reformat; 854 833 855 - err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, &fs_id); 834 + err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, fs_id); 856 835 if (err) { 857 836 mlx5_core_err(mdev, 858 837 "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n", ··· 860 839 goto err; 861 840 } 862 841 863 - tx_rule->fs_id = fs_id; 842 + tx_rule->fs_id = *fs_id; 864 843 865 844 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 866 845 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | ··· 875 854 } 876 855 tx_rule->rule = rule; 877 856 878 - err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, fs_id, macsec_ctx->secy->netdev, 857 + err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, *fs_id, macsec_ctx->secy->netdev, 879 858 &macsec_fs->sci_hash, attrs->sci, true); 880 859 if (err) { 881 860 mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err); ··· 1764 1743 /* Set bit[15-0] fs id */ 1765 1744 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); 1766 1745 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); 1767 - MLX5_SET(set_action_in, action, data, MLX5_MACSEC_RX_METADAT_HANDLE(fs_id) | BIT(30)); 1746 + MLX5_SET(set_action_in, action, data, macsec_fs_set_rx_fs_id(fs_id)); 1768 1747 MLX5_SET(set_action_in, action, offset, 0); 1769 1748 MLX5_SET(set_action_in, action, length, 32); 1770 1749 ··· 1923 1902 macsec_fs->rx_fs = NULL; 1924 1903 } 1925 1904 1905 + static void set_ipaddr_spec_v4(struct sockaddr_in *in, struct mlx5_flow_spec *spec, bool is_dst_ip) 1906 + { 1907 + MLX5_SET(fte_match_param, spec->match_value, 1908 + outer_headers.ip_version, MLX5_FS_IPV4_VERSION); 1909 + 1910 + if (is_dst_ip) { 1911 + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 1912 + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 1913 + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 1914 + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 1915 + &in->sin_addr.s_addr, 4); 1916 + } else { 1917 + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 1918 + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); 1919 + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 1920 + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), 1921 + &in->sin_addr.s_addr, 4); 1922 + } 1923 + } 1924 + 1925 + static void set_ipaddr_spec_v6(struct sockaddr_in6 *in6, struct mlx5_flow_spec *spec, 1926 + bool is_dst_ip) 1927 + { 1928 + MLX5_SET(fte_match_param, spec->match_value, 1929 + outer_headers.ip_version, MLX5_FS_IPV6_VERSION); 1930 + 1931 + if (is_dst_ip) { 1932 + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1933 + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1934 + 0xff, 16); 1935 + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 1936 + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1937 + &in6->sin6_addr, 16); 1938 + } else { 1939 + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1940 + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 1941 + 0xff, 16); 1942 + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 1943 + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 1944 + &in6->sin6_addr, 16); 1945 + } 1946 + } 1947 + 1948 + static void set_ipaddr_spec(const struct sockaddr *addr, 1949 + struct mlx5_flow_spec *spec, bool is_dst_ip) 1950 + { 1951 + struct sockaddr_in6 *in6; 1952 + 1953 + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 1954 + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 1955 + outer_headers.ip_version); 1956 + 1957 + if (addr->sa_family == AF_INET) { 1958 + struct sockaddr_in *in = (struct sockaddr_in *)addr; 1959 + 1960 + set_ipaddr_spec_v4(in, spec, is_dst_ip); 1961 + return; 1962 + } 1963 + 1964 + in6 = (struct sockaddr_in6 *)addr; 1965 + set_ipaddr_spec_v6(in6, spec, is_dst_ip); 1966 + } 1967 + 1968 + static void macsec_fs_del_roce_rule_rx(struct mlx5_roce_macsec_rx_rule *rx_rule) 1969 + { 1970 + mlx5_del_flow_rules(rx_rule->op); 1971 + mlx5_del_flow_rules(rx_rule->ip); 1972 + list_del(&rx_rule->entry); 1973 + kfree(rx_rule); 1974 + } 1975 + 1976 + static void macsec_fs_del_roce_rules_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, 1977 + struct list_head *rx_rules_list) 1978 + { 1979 + struct mlx5_roce_macsec_rx_rule *rx_rule, *next; 1980 + 1981 + if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) 1982 + return; 1983 + 1984 + list_for_each_entry_safe(rx_rule, next, rx_rules_list, entry) { 1985 + if (rx_rule->fs_id == fs_id) 1986 + macsec_fs_del_roce_rule_rx(rx_rule); 1987 + } 1988 + } 1989 + 1990 + static void macsec_fs_del_roce_rule_tx(struct mlx5_core_dev *mdev, 1991 + struct mlx5_roce_macsec_tx_rule *tx_rule) 1992 + { 1993 + mlx5_del_flow_rules(tx_rule->rule); 1994 + mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr); 1995 + list_del(&tx_rule->entry); 1996 + kfree(tx_rule); 1997 + } 1998 + 1999 + static void macsec_fs_del_roce_rules_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, 2000 + struct list_head *tx_rules_list) 2001 + { 2002 + struct mlx5_roce_macsec_tx_rule *tx_rule, *next; 2003 + 2004 + if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) 2005 + return; 2006 + 2007 + list_for_each_entry_safe(tx_rule, next, tx_rules_list, entry) { 2008 + if (tx_rule->fs_id == fs_id) 2009 + macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule); 2010 + } 2011 + } 2012 + 1926 2013 void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats) 1927 2014 { 1928 2015 struct mlx5_macsec_stats *stats = (struct mlx5_macsec_stats *)macsec_stats; ··· 2083 1954 struct mlx5_macsec_rule_attrs *attrs, 2084 1955 u32 *sa_fs_id) 2085 1956 { 2086 - return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? 2087 - macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs) : 1957 + struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs, 1958 + .macdev = macsec_ctx->secy->netdev, 1959 + .is_tx = 1960 + (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) 1961 + }; 1962 + union mlx5_macsec_rule *macsec_rule; 1963 + u32 tx_new_fs_id; 1964 + 1965 + macsec_rule = (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? 1966 + macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, &tx_new_fs_id) : 2088 1967 macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id); 1968 + 1969 + data.fs_id = (data.is_tx) ? tx_new_fs_id : *sa_fs_id; 1970 + if (macsec_rule) 1971 + blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh, 1972 + MLX5_DRIVER_EVENT_MACSEC_SA_ADDED, 1973 + &data); 1974 + 1975 + return macsec_rule; 2089 1976 } 2090 1977 2091 1978 void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs, 2092 1979 union mlx5_macsec_rule *macsec_rule, 2093 1980 int action, void *macdev, u32 sa_fs_id) 2094 1981 { 1982 + struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs, 1983 + .macdev = macdev, 1984 + .is_tx = (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) 1985 + }; 1986 + 1987 + data.fs_id = (data.is_tx) ? macsec_rule->tx_rule.fs_id : sa_fs_id; 1988 + blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh, 1989 + MLX5_DRIVER_EVENT_MACSEC_SA_DELETED, 1990 + &data); 1991 + 2095 1992 (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? 2096 1993 macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule, macdev) : 2097 1994 macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule, macdev, sa_fs_id); 2098 1995 } 1996 + 1997 + static int mlx5_macsec_fs_add_roce_rule_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx, 1998 + const struct sockaddr *addr, 1999 + struct list_head *rx_rules_list) 2000 + { 2001 + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; 2002 + struct mlx5_roce_macsec_rx_rule *rx_rule; 2003 + struct mlx5_flow_destination dest = {}; 2004 + struct mlx5_flow_act flow_act = {}; 2005 + struct mlx5_flow_handle *new_rule; 2006 + struct mlx5_flow_spec *spec; 2007 + int err = 0; 2008 + 2009 + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 2010 + if (!spec) 2011 + return -ENOMEM; 2012 + 2013 + rx_rule = kzalloc(sizeof(*rx_rule), GFP_KERNEL); 2014 + if (!rx_rule) { 2015 + err = -ENOMEM; 2016 + goto out; 2017 + } 2018 + 2019 + set_ipaddr_spec(addr, spec, true); 2020 + 2021 + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2022 + dest.ft = rx_fs->roce.ft_macsec_op_check; 2023 + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 2024 + new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_ip_check, spec, &flow_act, 2025 + &dest, 1); 2026 + if (IS_ERR(new_rule)) { 2027 + err = PTR_ERR(new_rule); 2028 + goto ip_rule_err; 2029 + } 2030 + rx_rule->ip = new_rule; 2031 + 2032 + memset(&flow_act, 0, sizeof(flow_act)); 2033 + memset(spec, 0, sizeof(*spec)); 2034 + 2035 + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 2036 + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_5); 2037 + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_5, 2038 + macsec_fs_set_rx_fs_id(fs_id)); 2039 + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; 2040 + new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_macsec_op_check, spec, &flow_act, 2041 + NULL, 0); 2042 + if (IS_ERR(new_rule)) { 2043 + err = PTR_ERR(new_rule); 2044 + goto op_rule_err; 2045 + } 2046 + rx_rule->op = new_rule; 2047 + rx_rule->gid_idx = gid_idx; 2048 + rx_rule->fs_id = fs_id; 2049 + list_add_tail(&rx_rule->entry, rx_rules_list); 2050 + 2051 + goto out; 2052 + 2053 + op_rule_err: 2054 + mlx5_del_flow_rules(rx_rule->ip); 2055 + rx_rule->ip = NULL; 2056 + ip_rule_err: 2057 + kfree(rx_rule); 2058 + out: 2059 + kvfree(spec); 2060 + return err; 2061 + } 2062 + 2063 + static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx, 2064 + const struct sockaddr *addr, 2065 + struct list_head *tx_rules_list) 2066 + { 2067 + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 2068 + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; 2069 + struct mlx5_core_dev *mdev = macsec_fs->mdev; 2070 + struct mlx5_modify_hdr *modify_hdr = NULL; 2071 + struct mlx5_roce_macsec_tx_rule *tx_rule; 2072 + struct mlx5_flow_destination dest = {}; 2073 + struct mlx5_flow_act flow_act = {}; 2074 + struct mlx5_flow_handle *new_rule; 2075 + struct mlx5_flow_spec *spec; 2076 + int err = 0; 2077 + 2078 + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 2079 + if (!spec) 2080 + return -ENOMEM; 2081 + 2082 + tx_rule = kzalloc(sizeof(*tx_rule), GFP_KERNEL); 2083 + if (!tx_rule) { 2084 + err = -ENOMEM; 2085 + goto out; 2086 + } 2087 + 2088 + set_ipaddr_spec(addr, spec, false); 2089 + 2090 + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); 2091 + MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A); 2092 + MLX5_SET(set_action_in, action, data, macsec_fs_set_tx_fs_id(fs_id)); 2093 + MLX5_SET(set_action_in, action, offset, 0); 2094 + MLX5_SET(set_action_in, action, length, 32); 2095 + 2096 + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC, 2097 + 1, action); 2098 + if (IS_ERR(modify_hdr)) { 2099 + err = PTR_ERR(modify_hdr); 2100 + mlx5_core_err(mdev, "Fail to alloc ROCE MACsec set modify_header_id err=%d\n", 2101 + err); 2102 + modify_hdr = NULL; 2103 + goto modify_hdr_err; 2104 + } 2105 + tx_rule->meta_modhdr = modify_hdr; 2106 + 2107 + flow_act.modify_hdr = modify_hdr; 2108 + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2109 + 2110 + dest.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; 2111 + dest.ft = tx_fs->tables.ft_crypto.t; 2112 + new_rule = mlx5_add_flow_rules(tx_fs->ft_rdma_tx, spec, &flow_act, &dest, 1); 2113 + if (IS_ERR(new_rule)) { 2114 + err = PTR_ERR(new_rule); 2115 + mlx5_core_err(mdev, "Failed to add ROCE TX rule, err=%d\n", err); 2116 + goto rule_err; 2117 + } 2118 + tx_rule->rule = new_rule; 2119 + tx_rule->gid_idx = gid_idx; 2120 + tx_rule->fs_id = fs_id; 2121 + list_add_tail(&tx_rule->entry, tx_rules_list); 2122 + 2123 + goto out; 2124 + 2125 + rule_err: 2126 + mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr); 2127 + modify_hdr_err: 2128 + kfree(tx_rule); 2129 + out: 2130 + kvfree(spec); 2131 + return err; 2132 + } 2133 + 2134 + void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs, 2135 + struct list_head *tx_rules_list, struct list_head *rx_rules_list) 2136 + { 2137 + struct mlx5_roce_macsec_rx_rule *rx_rule, *next_rx; 2138 + struct mlx5_roce_macsec_tx_rule *tx_rule, *next_tx; 2139 + 2140 + list_for_each_entry_safe(tx_rule, next_tx, tx_rules_list, entry) { 2141 + if (tx_rule->gid_idx == gid_idx) 2142 + macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule); 2143 + } 2144 + 2145 + list_for_each_entry_safe(rx_rule, next_rx, rx_rules_list, entry) { 2146 + if (rx_rule->gid_idx == gid_idx) 2147 + macsec_fs_del_roce_rule_rx(rx_rule); 2148 + } 2149 + } 2150 + EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_rule); 2151 + 2152 + int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx, 2153 + struct list_head *tx_rules_list, struct list_head *rx_rules_list, 2154 + struct mlx5_macsec_fs *macsec_fs) 2155 + { 2156 + struct mlx5_macsec_device *iter, *macsec_device = NULL; 2157 + struct mlx5_core_dev *mdev = macsec_fs->mdev; 2158 + struct mlx5_fs_id *fs_id_iter; 2159 + unsigned long index = 0; 2160 + int err; 2161 + 2162 + list_for_each_entry(iter, &macsec_fs->macsec_devices_list, macsec_devices_list_entry) { 2163 + if (iter->macdev == macdev) { 2164 + macsec_device = iter; 2165 + break; 2166 + } 2167 + } 2168 + 2169 + if (!macsec_device) 2170 + return 0; 2171 + 2172 + xa_for_each(&macsec_device->tx_id_xa, index, fs_id_iter) { 2173 + err = mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id_iter->id, gid_idx, addr, 2174 + tx_rules_list); 2175 + if (err) { 2176 + mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n"); 2177 + goto out; 2178 + } 2179 + } 2180 + 2181 + index = 0; 2182 + xa_for_each(&macsec_device->rx_id_xa, index, fs_id_iter) { 2183 + err = mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id_iter->id, gid_idx, addr, 2184 + rx_rules_list); 2185 + if (err) { 2186 + mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n"); 2187 + goto out; 2188 + } 2189 + } 2190 + 2191 + return 0; 2192 + out: 2193 + mlx5_macsec_del_roce_rule(gid_idx, macsec_fs, tx_rules_list, rx_rules_list); 2194 + return err; 2195 + } 2196 + EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_rule); 2197 + 2198 + void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx, 2199 + struct list_head *tx_rules_list, 2200 + struct list_head *rx_rules_list, 2201 + struct mlx5_macsec_fs *macsec_fs, bool is_tx) 2202 + { 2203 + (is_tx) ? 2204 + mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id, gid_idx, addr, 2205 + tx_rules_list) : 2206 + mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id, gid_idx, addr, 2207 + rx_rules_list); 2208 + } 2209 + EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_sa_rules); 2210 + 2211 + void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs, 2212 + struct list_head *tx_rules_list, 2213 + struct list_head *rx_rules_list, bool is_tx) 2214 + { 2215 + (is_tx) ? 2216 + macsec_fs_del_roce_rules_tx(macsec_fs, fs_id, tx_rules_list) : 2217 + macsec_fs_del_roce_rules_rx(macsec_fs, fs_id, rx_rules_list); 2218 + } 2219 + EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_sa_rules); 2099 2220 2100 2221 void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs) 2101 2222 { ··· 2393 2014 mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); 2394 2015 goto tx_cleanup; 2395 2016 } 2017 + 2018 + BLOCKING_INIT_NOTIFIER_HEAD(&mdev->macsec_nh); 2396 2019 2397 2020 return macsec_fs; 2398 2021
+2
include/linux/mlx5/device.h
··· 364 364 enum mlx5_driver_event { 365 365 MLX5_DRIVER_EVENT_TYPE_TRAP = 0, 366 366 MLX5_DRIVER_EVENT_UPLINK_NETDEV, 367 + MLX5_DRIVER_EVENT_MACSEC_SA_ADDED, 368 + MLX5_DRIVER_EVENT_MACSEC_SA_DELETED, 367 369 }; 368 370 369 371 enum {
+2
include/linux/mlx5/driver.h
··· 807 807 struct mlx5_thermal *thermal; 808 808 #ifdef CONFIG_MLX5_MACSEC 809 809 struct mlx5_macsec_fs *macsec_fs; 810 + /* MACsec notifier chain to sync MACsec core and IB database */ 811 + struct blocking_notifier_head macsec_nh; 810 812 #endif 811 813 }; 812 814
+32
include/linux/mlx5/macsec.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 + /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */ 3 + 4 + #ifndef MLX5_MACSEC_H 5 + #define MLX5_MACSEC_H 6 + 7 + #ifdef CONFIG_MLX5_MACSEC 8 + struct mlx5_macsec_event_data { 9 + struct mlx5_macsec_fs *macsec_fs; 10 + void *macdev; 11 + u32 fs_id; 12 + bool is_tx; 13 + }; 14 + 15 + int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx, 16 + struct list_head *tx_rules_list, struct list_head *rx_rules_list, 17 + struct mlx5_macsec_fs *macsec_fs); 18 + 19 + void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs, 20 + struct list_head *tx_rules_list, struct list_head *rx_rules_list); 21 + 22 + void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx, 23 + struct list_head *tx_rules_list, 24 + struct list_head *rx_rules_list, 25 + struct mlx5_macsec_fs *macsec_fs, bool is_tx); 26 + 27 + void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs, 28 + struct list_head *tx_rules_list, 29 + struct list_head *rx_rules_list, bool is_tx); 30 + 31 + #endif 32 + #endif /* MLX5_MACSEC_H */