Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: switchdev bridge offload

Wojciech Drewek says:

Linux bridge provides ability to learn MAC addresses and vlans
detected on bridge's ports. As a result of this, FDB (forward data base)
entries are created and they can be offloaded to the HW. By adding
VF's port representors to the bridge together with the uplink netdev,
we can learn VF's and link partner's MAC addresses. This is achieved
by slow/exception-path, where packets that do not match any filters
(FDB entries in this case) are send to the bridge ports.

Driver keeps track of the netdevs added to the bridge
by listening for NETDEV_CHANGEUPPER event. We distinguish two types
of bridge ports: uplink port and VF's representor port. Linux
bridge always learns src MAC of the packet on rx path. With the
current slow-path implementation, it means that we will learn
VF's MAC on port repr (when the VF transmits the packet) and
link partner's MAC on uplink (when we receive it on uplink from LAN).

The driver is notified about learning of the MAC/VLAN by
SWITCHDEV_FDB_{ADD|DEL}_TO_DEVICE events. This is followed by creation
of the HW filter. The direction of the filter is based on port
type (uplink or VF repr). In case of the uplink, rule forwards
the packets to the LAN (matching on link partner's MAC). When the
notification is received on VF repr then the rule forwards the
packets to the associated VF (matching on VF's MAC).

This approach would not work on its own however. This is because if
one of the directions is offloaded, then the bridge would not be able
to learn the other one. If the egress rule is added (learned on uplink)
then the response from the VF will be sent directly to the LAN.
The packet will not got through slow-path, it would not be seen on
VF's port repr. Because of that, the bridge would not learn VF's MAC.

This is solved by introducing guard rule. It prevents forward rule from
working until the opposite direction is offloaded.

Aging is not fully supported yet, aging time is static for now. The
follow up submissions will introduce counters that will allow us to
keep track if the rule is actually being used or not.

A few fixes/changes are needed for this feature to work with ice driver.
These are introduced in first 5 patches.

Reviewed-by: Vlad Buslov <vladbu@nvidia.com>

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
ice: add tracepoints for the switchdev bridge
ice: implement static version of ageing
ice: implement bridge port vlan
ice: Add VLAN FDB support in switchdev mode
ice: Add guard rule when creating FDB in switchdev
ice: Switchdev FDB events support
ice: Implement basic eswitch bridge setup
ice: Unset src prune on uplink VSI
ice: Disable vlan pruning for uplink VSI
ice: Don't tx before switchdev is fully configured
ice: Prohibit rx mode change in switchdev mode
ice: Skip adv rules removal upon switchdev release
====================

Link: https://lore.kernel.org/r/20230724161152.2177196-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+1861 -186
+1 -1
drivers/net/ethernet/intel/ice/Makefile
··· 47 47 ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o 48 48 ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o 49 49 ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o 50 - ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o 50 + ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o 51 51 ice-$(CONFIG_GNSS) += ice_gnss.o
+4 -1
drivers/net/ethernet/intel/ice/ice.h
··· 370 370 u16 rx_buf_len; 371 371 372 372 struct ice_aqc_vsi_props info; /* VSI properties */ 373 + struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ 373 374 374 375 /* VSI stats */ 375 376 struct rtnl_link_stats64 net_stats; ··· 518 517 struct ice_switchdev_info { 519 518 struct ice_vsi *control_vsi; 520 519 struct ice_vsi *uplink_vsi; 520 + struct ice_esw_br_offloads *br_offloads; 521 521 bool is_running; 522 522 }; 523 523 ··· 628 626 struct ice_lag *lag; /* Link Aggregation information */ 629 627 630 628 struct ice_switchdev_info switchdev; 629 + struct ice_esw_br_port *br_port; 631 630 632 631 #define ICE_INVALID_AGG_NODE_ID 0 633 632 #define ICE_PF_AGG_NODE_ID_START 1 ··· 856 853 return false; 857 854 } 858 855 859 - bool netif_is_ice(struct net_device *dev); 856 + bool netif_is_ice(const struct net_device *dev); 860 857 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); 861 858 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); 862 859 int ice_vsi_open_ctrl(struct ice_vsi *vsi);
+41 -5
drivers/net/ethernet/intel/ice/ice_eswitch.c
··· 4 4 #include "ice.h" 5 5 #include "ice_lib.h" 6 6 #include "ice_eswitch.h" 7 + #include "ice_eswitch_br.h" 7 8 #include "ice_fltr.h" 8 9 #include "ice_repr.h" 9 10 #include "ice_devlink.h" ··· 104 103 rule_added = true; 105 104 } 106 105 106 + vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); 107 + if (vlan_ops->dis_rx_filtering(uplink_vsi)) 108 + goto err_dis_rx; 109 + 107 110 if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) 108 111 goto err_override_uplink; 109 112 110 113 if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) 111 114 goto err_override_control; 112 115 116 + if (ice_vsi_update_local_lb(uplink_vsi, true)) 117 + goto err_override_local_lb; 118 + 113 119 return 0; 114 120 121 + err_override_local_lb: 122 + ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); 115 123 err_override_control: 116 124 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); 117 125 err_override_uplink: 126 + vlan_ops->ena_rx_filtering(uplink_vsi); 127 + err_dis_rx: 118 128 if (rule_added) 119 129 ice_clear_dflt_vsi(uplink_vsi); 120 130 err_def_rx: ··· 318 306 repr->src_vsi = vsi; 319 307 repr->dst->u.port_info.port_id = vsi->vsi_num; 320 308 309 + if (repr->br_port) 310 + repr->br_port->vsi = vsi; 311 + 321 312 ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); 322 313 if (ret) { 323 314 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); ··· 345 330 346 331 np = netdev_priv(netdev); 347 332 vsi = np->vsi; 333 + 334 + if (!vsi || !ice_is_switchdev_running(vsi->back)) 335 + return NETDEV_TX_BUSY; 348 336 349 337 if (ice_is_reset_in_progress(vsi->back->state) || 350 338 test_bit(ICE_VF_DIS, vsi->back->state)) ··· 396 378 { 397 379 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; 398 380 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 381 + struct ice_vsi_vlan_ops *vlan_ops; 399 382 383 + vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); 384 + 385 + ice_vsi_update_local_lb(uplink_vsi, false); 400 386 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); 401 387 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); 388 + vlan_ops->ena_rx_filtering(uplink_vsi); 402 389 ice_clear_dflt_vsi(uplink_vsi); 403 390 ice_fltr_add_mac_and_broadcast(uplink_vsi, 404 391 uplink_vsi->port_info->mac.perm_addr, ··· 478 455 */ 479 456 static int ice_eswitch_enable_switchdev(struct ice_pf *pf) 480 457 { 481 - struct ice_vsi *ctrl_vsi; 458 + struct ice_vsi *ctrl_vsi, *uplink_vsi; 459 + 460 + uplink_vsi = ice_get_main_vsi(pf); 461 + if (!uplink_vsi) 462 + return -ENODEV; 463 + 464 + if (netif_is_any_bridge_port(uplink_vsi->netdev)) { 465 + dev_err(ice_pf_to_dev(pf), 466 + "Uplink port cannot be a bridge port\n"); 467 + return -EINVAL; 468 + } 482 469 483 470 pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); 484 471 if (!pf->switchdev.control_vsi) 485 472 return -ENODEV; 486 473 487 474 ctrl_vsi = pf->switchdev.control_vsi; 488 - pf->switchdev.uplink_vsi = ice_get_main_vsi(pf); 489 - if (!pf->switchdev.uplink_vsi) 490 - goto err_vsi; 475 + pf->switchdev.uplink_vsi = uplink_vsi; 491 476 492 477 if (ice_eswitch_setup_env(pf)) 493 478 goto err_vsi; ··· 511 480 if (ice_vsi_open(ctrl_vsi)) 512 481 goto err_setup_reprs; 513 482 483 + if (ice_eswitch_br_offloads_init(pf)) 484 + goto err_br_offloads; 485 + 514 486 ice_eswitch_napi_enable(pf); 515 487 516 488 return 0; 517 489 490 + err_br_offloads: 491 + ice_vsi_close(ctrl_vsi); 518 492 err_setup_reprs: 519 493 ice_repr_rem_from_all_vfs(pf); 520 494 err_repr_add: ··· 538 502 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; 539 503 540 504 ice_eswitch_napi_disable(pf); 505 + ice_eswitch_br_offloads_deinit(pf); 541 506 ice_eswitch_release_env(pf); 542 - ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx); 543 507 ice_eswitch_release_reprs(pf, ctrl_vsi); 544 508 ice_vsi_release(ctrl_vsi); 545 509 ice_repr_rem_from_all_vfs(pf);
+1309
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2023, Intel Corporation. */ 3 + 4 + #include "ice.h" 5 + #include "ice_eswitch_br.h" 6 + #include "ice_repr.h" 7 + #include "ice_switch.h" 8 + #include "ice_vlan.h" 9 + #include "ice_vf_vsi_vlan_ops.h" 10 + #include "ice_trace.h" 11 + 12 + #define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000) 13 + 14 + static const struct rhashtable_params ice_fdb_ht_params = { 15 + .key_offset = offsetof(struct ice_esw_br_fdb_entry, data), 16 + .key_len = sizeof(struct ice_esw_br_fdb_data), 17 + .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node), 18 + .automatic_shrinking = true, 19 + }; 20 + 21 + static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev) 22 + { 23 + /* Accept only PF netdev and PRs */ 24 + return ice_is_port_repr_netdev(dev) || netif_is_ice(dev); 25 + } 26 + 27 + static struct ice_esw_br_port * 28 + ice_eswitch_br_netdev_to_port(struct net_device *dev) 29 + { 30 + if (ice_is_port_repr_netdev(dev)) { 31 + struct ice_repr *repr = ice_netdev_to_repr(dev); 32 + 33 + return repr->br_port; 34 + } else if (netif_is_ice(dev)) { 35 + struct ice_pf *pf = ice_netdev_to_pf(dev); 36 + 37 + return pf->br_port; 38 + } 39 + 40 + return NULL; 41 + } 42 + 43 + static void 44 + ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info, 45 + u8 pf_id, u16 vf_vsi_idx) 46 + { 47 + rule_info->sw_act.vsi_handle = vf_vsi_idx; 48 + rule_info->sw_act.flag |= ICE_FLTR_RX; 49 + rule_info->sw_act.src = pf_id; 50 + rule_info->priority = 5; 51 + } 52 + 53 + static void 54 + ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info, 55 + u16 pf_vsi_idx) 56 + { 57 + rule_info->sw_act.vsi_handle = pf_vsi_idx; 58 + rule_info->sw_act.flag |= ICE_FLTR_TX; 59 + rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; 60 + rule_info->flags_info.act_valid = true; 61 + rule_info->priority = 5; 62 + } 63 + 64 + static int 65 + ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule) 66 + { 67 + int err; 68 + 69 + if (!rule) 70 + return -EINVAL; 71 + 72 + err = ice_rem_adv_rule_by_id(hw, rule); 73 + kfree(rule); 74 + 75 + return err; 76 + } 77 + 78 + static u16 79 + ice_eswitch_br_get_lkups_cnt(u16 vid) 80 + { 81 + return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1; 82 + } 83 + 84 + static void 85 + ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid) 86 + { 87 + if (ice_eswitch_br_is_vid_valid(vid)) { 88 + list[1].type = ICE_VLAN_OFOS; 89 + list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK); 90 + list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); 91 + } 92 + } 93 + 94 + static struct ice_rule_query_data * 95 + ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type, 96 + const unsigned char *mac, u16 vid) 97 + { 98 + struct ice_adv_rule_info rule_info = { 0 }; 99 + struct ice_rule_query_data *rule; 100 + struct ice_adv_lkup_elem *list; 101 + u16 lkups_cnt; 102 + int err; 103 + 104 + lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); 105 + 106 + rule = kzalloc(sizeof(*rule), GFP_KERNEL); 107 + if (!rule) 108 + return ERR_PTR(-ENOMEM); 109 + 110 + list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 111 + if (!list) { 112 + err = -ENOMEM; 113 + goto err_list_alloc; 114 + } 115 + 116 + switch (port_type) { 117 + case ICE_ESWITCH_BR_UPLINK_PORT: 118 + ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx); 119 + break; 120 + case ICE_ESWITCH_BR_VF_REPR_PORT: 121 + ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id, 122 + vsi_idx); 123 + break; 124 + default: 125 + err = -EINVAL; 126 + goto err_add_rule; 127 + } 128 + 129 + list[0].type = ICE_MAC_OFOS; 130 + ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac); 131 + eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr); 132 + 133 + ice_eswitch_br_add_vlan_lkup(list, vid); 134 + 135 + rule_info.need_pass_l2 = true; 136 + 137 + rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 138 + 139 + err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); 140 + if (err) 141 + goto err_add_rule; 142 + 143 + kfree(list); 144 + 145 + return rule; 146 + 147 + err_add_rule: 148 + kfree(list); 149 + err_list_alloc: 150 + kfree(rule); 151 + 152 + return ERR_PTR(err); 153 + } 154 + 155 + static struct ice_rule_query_data * 156 + ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx, 157 + const unsigned char *mac, u16 vid) 158 + { 159 + struct ice_adv_rule_info rule_info = { 0 }; 160 + struct ice_rule_query_data *rule; 161 + struct ice_adv_lkup_elem *list; 162 + int err = -ENOMEM; 163 + u16 lkups_cnt; 164 + 165 + lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); 166 + 167 + rule = kzalloc(sizeof(*rule), GFP_KERNEL); 168 + if (!rule) 169 + goto err_exit; 170 + 171 + list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); 172 + if (!list) 173 + goto err_list_alloc; 174 + 175 + list[0].type = ICE_MAC_OFOS; 176 + ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); 177 + eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); 178 + 179 + ice_eswitch_br_add_vlan_lkup(list, vid); 180 + 181 + rule_info.allow_pass_l2 = true; 182 + rule_info.sw_act.vsi_handle = vsi_idx; 183 + rule_info.sw_act.fltr_act = ICE_NOP; 184 + rule_info.priority = 5; 185 + 186 + err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); 187 + if (err) 188 + goto err_add_rule; 189 + 190 + kfree(list); 191 + 192 + return rule; 193 + 194 + err_add_rule: 195 + kfree(list); 196 + err_list_alloc: 197 + kfree(rule); 198 + err_exit: 199 + return ERR_PTR(err); 200 + } 201 + 202 + static struct ice_esw_br_flow * 203 + ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx, 204 + int port_type, const unsigned char *mac, u16 vid) 205 + { 206 + struct ice_rule_query_data *fwd_rule, *guard_rule; 207 + struct ice_esw_br_flow *flow; 208 + int err; 209 + 210 + flow = kzalloc(sizeof(*flow), GFP_KERNEL); 211 + if (!flow) 212 + return ERR_PTR(-ENOMEM); 213 + 214 + fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac, 215 + vid); 216 + err = PTR_ERR_OR_ZERO(fwd_rule); 217 + if (err) { 218 + dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n", 219 + port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", 220 + err); 221 + goto err_fwd_rule; 222 + } 223 + 224 + guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid); 225 + err = PTR_ERR_OR_ZERO(guard_rule); 226 + if (err) { 227 + dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n", 228 + port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", 229 + err); 230 + goto err_guard_rule; 231 + } 232 + 233 + flow->fwd_rule = fwd_rule; 234 + flow->guard_rule = guard_rule; 235 + 236 + return flow; 237 + 238 + err_guard_rule: 239 + ice_eswitch_br_rule_delete(hw, fwd_rule); 240 + err_fwd_rule: 241 + kfree(flow); 242 + 243 + return ERR_PTR(err); 244 + } 245 + 246 + static struct ice_esw_br_fdb_entry * 247 + ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac, 248 + u16 vid) 249 + { 250 + struct ice_esw_br_fdb_data data = { 251 + .vid = vid, 252 + }; 253 + 254 + ether_addr_copy(data.addr, mac); 255 + return rhashtable_lookup_fast(&bridge->fdb_ht, &data, 256 + ice_fdb_ht_params); 257 + } 258 + 259 + static void 260 + ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow) 261 + { 262 + struct device *dev = ice_pf_to_dev(pf); 263 + int err; 264 + 265 + err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule); 266 + if (err) 267 + dev_err(dev, "Failed to delete FDB forward rule, err: %d\n", 268 + err); 269 + 270 + err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule); 271 + if (err) 272 + dev_err(dev, "Failed to delete FDB guard rule, err: %d\n", 273 + err); 274 + 275 + kfree(flow); 276 + } 277 + 278 + static struct ice_esw_br_vlan * 279 + ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) 280 + { 281 + struct ice_pf *pf = bridge->br_offloads->pf; 282 + struct device *dev = ice_pf_to_dev(pf); 283 + struct ice_esw_br_port *port; 284 + struct ice_esw_br_vlan *vlan; 285 + 286 + port = xa_load(&bridge->ports, vsi_idx); 287 + if (!port) { 288 + dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx); 289 + return ERR_PTR(-EINVAL); 290 + } 291 + 292 + vlan = xa_load(&port->vlans, vid); 293 + if (!vlan) { 294 + dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n", 295 + vsi_idx); 296 + return ERR_PTR(-EINVAL); 297 + } 298 + 299 + return vlan; 300 + } 301 + 302 + static void 303 + ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge, 304 + struct ice_esw_br_fdb_entry *fdb_entry) 305 + { 306 + struct ice_pf *pf = bridge->br_offloads->pf; 307 + 308 + rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node, 309 + ice_fdb_ht_params); 310 + list_del(&fdb_entry->list); 311 + 312 + ice_eswitch_br_flow_delete(pf, fdb_entry->flow); 313 + 314 + kfree(fdb_entry); 315 + } 316 + 317 + static void 318 + ice_eswitch_br_fdb_offload_notify(struct net_device *dev, 319 + const unsigned char *mac, u16 vid, 320 + unsigned long val) 321 + { 322 + struct switchdev_notifier_fdb_info fdb_info = { 323 + .addr = mac, 324 + .vid = vid, 325 + .offloaded = true, 326 + }; 327 + 328 + call_switchdev_notifiers(val, dev, &fdb_info.info, NULL); 329 + } 330 + 331 + static void 332 + ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge, 333 + struct ice_esw_br_fdb_entry *entry) 334 + { 335 + if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)) 336 + ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr, 337 + entry->data.vid, 338 + SWITCHDEV_FDB_DEL_TO_BRIDGE); 339 + ice_eswitch_br_fdb_entry_delete(bridge, entry); 340 + } 341 + 342 + static void 343 + ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge, 344 + const unsigned char *mac, u16 vid) 345 + { 346 + struct ice_pf *pf = bridge->br_offloads->pf; 347 + struct ice_esw_br_fdb_entry *fdb_entry; 348 + struct device *dev = ice_pf_to_dev(pf); 349 + 350 + fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); 351 + if (!fdb_entry) { 352 + dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n", 353 + mac, vid); 354 + return; 355 + } 356 + 357 + trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry); 358 + ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); 359 + } 360 + 361 + static void 362 + ice_eswitch_br_fdb_entry_create(struct net_device *netdev, 363 + struct ice_esw_br_port *br_port, 364 + bool added_by_user, 365 + const unsigned char *mac, u16 vid) 366 + { 367 + struct ice_esw_br *bridge = br_port->bridge; 368 + struct ice_pf *pf = bridge->br_offloads->pf; 369 + struct device *dev = ice_pf_to_dev(pf); 370 + struct ice_esw_br_fdb_entry *fdb_entry; 371 + struct ice_esw_br_flow *flow; 372 + struct ice_esw_br_vlan *vlan; 373 + struct ice_hw *hw = &pf->hw; 374 + unsigned long event; 375 + int err; 376 + 377 + /* untagged filtering is not yet supported */ 378 + if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid) 379 + return; 380 + 381 + if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) { 382 + vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx, 383 + vid); 384 + if (IS_ERR(vlan)) { 385 + dev_err(dev, "Failed to find vlan lookup, err: %ld\n", 386 + PTR_ERR(vlan)); 387 + return; 388 + } 389 + } 390 + 391 + fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); 392 + if (fdb_entry) 393 + ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); 394 + 395 + fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL); 396 + if (!fdb_entry) { 397 + err = -ENOMEM; 398 + goto err_exit; 399 + } 400 + 401 + flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx, 402 + br_port->type, mac, vid); 403 + if (IS_ERR(flow)) { 404 + err = PTR_ERR(flow); 405 + goto err_add_flow; 406 + } 407 + 408 + ether_addr_copy(fdb_entry->data.addr, mac); 409 + fdb_entry->data.vid = vid; 410 + fdb_entry->br_port = br_port; 411 + fdb_entry->flow = flow; 412 + fdb_entry->dev = netdev; 413 + fdb_entry->last_use = jiffies; 414 + event = SWITCHDEV_FDB_ADD_TO_BRIDGE; 415 + 416 + if (added_by_user) { 417 + fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER; 418 + event = SWITCHDEV_FDB_OFFLOADED; 419 + } 420 + 421 + err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node, 422 + ice_fdb_ht_params); 423 + if (err) 424 + goto err_fdb_insert; 425 + 426 + list_add(&fdb_entry->list, &bridge->fdb_list); 427 + trace_ice_eswitch_br_fdb_entry_create(fdb_entry); 428 + 429 + ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event); 430 + 431 + return; 432 + 433 + err_fdb_insert: 434 + ice_eswitch_br_flow_delete(pf, flow); 435 + err_add_flow: 436 + kfree(fdb_entry); 437 + err_exit: 438 + dev_err(dev, "Failed to create fdb entry, err: %d\n", err); 439 + } 440 + 441 + static void 442 + ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work) 443 + { 444 + kfree(fdb_work->fdb_info.addr); 445 + kfree(fdb_work); 446 + } 447 + 448 + static void 449 + ice_eswitch_br_fdb_event_work(struct work_struct *work) 450 + { 451 + struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work); 452 + bool added_by_user = fdb_work->fdb_info.added_by_user; 453 + const unsigned char *mac = fdb_work->fdb_info.addr; 454 + u16 vid = fdb_work->fdb_info.vid; 455 + struct ice_esw_br_port *br_port; 456 + 457 + rtnl_lock(); 458 + 459 + br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev); 460 + if (!br_port) 461 + goto err_exit; 462 + 463 + switch (fdb_work->event) { 464 + case SWITCHDEV_FDB_ADD_TO_DEVICE: 465 + ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port, 466 + added_by_user, mac, vid); 467 + break; 468 + case SWITCHDEV_FDB_DEL_TO_DEVICE: 469 + ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge, 470 + mac, vid); 471 + break; 472 + default: 473 + goto err_exit; 474 + } 475 + 476 + err_exit: 477 + rtnl_unlock(); 478 + dev_put(fdb_work->dev); 479 + ice_eswitch_br_fdb_work_dealloc(fdb_work); 480 + } 481 + 482 + static struct ice_esw_br_fdb_work * 483 + ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info, 484 + struct net_device *dev, 485 + unsigned long event) 486 + { 487 + struct ice_esw_br_fdb_work *work; 488 + unsigned char *mac; 489 + 490 + work = kzalloc(sizeof(*work), GFP_ATOMIC); 491 + if (!work) 492 + return ERR_PTR(-ENOMEM); 493 + 494 + INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work); 495 + memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info)); 496 + 497 + mac = kzalloc(ETH_ALEN, GFP_ATOMIC); 498 + if (!mac) { 499 + kfree(work); 500 + return ERR_PTR(-ENOMEM); 501 + } 502 + 503 + ether_addr_copy(mac, fdb_info->addr); 504 + work->fdb_info.addr = mac; 505 + work->event = event; 506 + work->dev = dev; 507 + 508 + return work; 509 + } 510 + 511 + static int 512 + ice_eswitch_br_switchdev_event(struct notifier_block *nb, 513 + unsigned long event, void *ptr) 514 + { 515 + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 516 + struct switchdev_notifier_fdb_info *fdb_info; 517 + struct switchdev_notifier_info *info = ptr; 518 + struct ice_esw_br_offloads *br_offloads; 519 + struct ice_esw_br_fdb_work *work; 520 + struct netlink_ext_ack *extack; 521 + struct net_device *upper; 522 + 523 + br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb); 524 + extack = switchdev_notifier_info_to_extack(ptr); 525 + 526 + upper = netdev_master_upper_dev_get_rcu(dev); 527 + if (!upper) 528 + return NOTIFY_DONE; 529 + 530 + if (!netif_is_bridge_master(upper)) 531 + return NOTIFY_DONE; 532 + 533 + if (!ice_eswitch_br_is_dev_valid(dev)) 534 + return NOTIFY_DONE; 535 + 536 + if (!ice_eswitch_br_netdev_to_port(dev)) 537 + return NOTIFY_DONE; 538 + 539 + switch (event) { 540 + case SWITCHDEV_FDB_ADD_TO_DEVICE: 541 + case SWITCHDEV_FDB_DEL_TO_DEVICE: 542 + fdb_info = container_of(info, typeof(*fdb_info), info); 543 + 544 + work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event); 545 + if (IS_ERR(work)) { 546 + NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work"); 547 + return notifier_from_errno(PTR_ERR(work)); 548 + } 549 + dev_hold(dev); 550 + 551 + queue_work(br_offloads->wq, &work->work); 552 + break; 553 + default: 554 + break; 555 + } 556 + return NOTIFY_DONE; 557 + } 558 + 559 + static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge) 560 + { 561 + struct ice_esw_br_fdb_entry *entry, *tmp; 562 + 563 + list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) 564 + ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); 565 + } 566 + 567 + static void 568 + ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable) 569 + { 570 + if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) 571 + return; 572 + 573 + ice_eswitch_br_fdb_flush(bridge); 574 + if (enable) 575 + bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING; 576 + else 577 + bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING; 578 + } 579 + 580 + static void 581 + ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port) 582 + { 583 + struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0); 584 + struct ice_vsi_vlan_ops *vlan_ops; 585 + 586 + vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); 587 + 588 + vlan_ops->del_vlan(port->vsi, &port_vlan); 589 + vlan_ops->clear_port_vlan(port->vsi); 590 + 591 + ice_vf_vsi_disable_port_vlan(port->vsi); 592 + 593 + port->pvid = 0; 594 + } 595 + 596 + static void 597 + ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port, 598 + struct ice_esw_br_vlan *vlan) 599 + { 600 + struct ice_esw_br_fdb_entry *fdb_entry, *tmp; 601 + struct ice_esw_br *bridge = port->bridge; 602 + 603 + trace_ice_eswitch_br_vlan_cleanup(vlan); 604 + 605 + list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { 606 + if (vlan->vid == fdb_entry->data.vid) 607 + ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); 608 + } 609 + 610 + xa_erase(&port->vlans, vlan->vid); 611 + if (port->pvid == vlan->vid) 612 + ice_eswitch_br_clear_pvid(port); 613 + kfree(vlan); 614 + } 615 + 616 + static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port) 617 + { 618 + struct ice_esw_br_vlan *vlan; 619 + unsigned long index; 620 + 621 + xa_for_each(&port->vlans, index, vlan) 622 + ice_eswitch_br_vlan_cleanup(port, vlan); 623 + } 624 + 625 + static int 626 + ice_eswitch_br_set_pvid(struct ice_esw_br_port *port, 627 + struct ice_esw_br_vlan *vlan) 628 + { 629 + struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0); 630 + struct device *dev = ice_pf_to_dev(port->vsi->back); 631 + struct ice_vsi_vlan_ops *vlan_ops; 632 + int err; 633 + 634 + if (port->pvid == vlan->vid || vlan->vid == 1) 635 + return 0; 636 + 637 + /* Setting port vlan on uplink isn't supported by hw */ 638 + if (port->type == ICE_ESWITCH_BR_UPLINK_PORT) 639 + return -EOPNOTSUPP; 640 + 641 + if (port->pvid) { 642 + dev_info(dev, 643 + "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n", 644 + port->vsi_idx, port->pvid); 645 + return -EEXIST; 646 + } 647 + 648 + ice_vf_vsi_enable_port_vlan(port->vsi); 649 + 650 + vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); 651 + err = vlan_ops->set_port_vlan(port->vsi, &port_vlan); 652 + if (err) 653 + return err; 654 + 655 + err = vlan_ops->add_vlan(port->vsi, &port_vlan); 656 + if (err) 657 + return err; 658 + 659 + ice_eswitch_br_port_vlans_flush(port); 660 + port->pvid = vlan->vid; 661 + 662 + return 0; 663 + } 664 + 665 + static struct ice_esw_br_vlan * 666 + ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port) 667 + { 668 + struct device *dev = ice_pf_to_dev(port->vsi->back); 669 + struct ice_esw_br_vlan *vlan; 670 + int err; 671 + 672 + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 673 + if (!vlan) 674 + return ERR_PTR(-ENOMEM); 675 + 676 + vlan->vid = vid; 677 + vlan->flags = flags; 678 + if ((flags & BRIDGE_VLAN_INFO_PVID) && 679 + (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 680 + err = ice_eswitch_br_set_pvid(port, vlan); 681 + if (err) 682 + goto err_set_pvid; 683 + } else if ((flags & BRIDGE_VLAN_INFO_PVID) || 684 + (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { 685 + dev_info(dev, "VLAN push and pop are supported only simultaneously\n"); 686 + err = -EOPNOTSUPP; 687 + goto err_set_pvid; 688 + } 689 + 690 + err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL); 691 + if (err) 692 + goto err_insert; 693 + 694 + trace_ice_eswitch_br_vlan_create(vlan); 695 + 696 + return vlan; 697 + 698 + err_insert: 699 + if (port->pvid) 700 + ice_eswitch_br_clear_pvid(port); 701 + err_set_pvid: 702 + kfree(vlan); 703 + return ERR_PTR(err); 704 + } 705 + 706 + static int 707 + ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid, 708 + u16 flags, struct netlink_ext_ack *extack) 709 + { 710 + struct ice_esw_br_port *port; 711 + struct ice_esw_br_vlan *vlan; 712 + 713 + port = xa_load(&bridge->ports, vsi_idx); 714 + if (!port) 715 + return -EINVAL; 716 + 717 + if (port->pvid) { 718 + dev_info(ice_pf_to_dev(port->vsi->back), 719 + "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n", 720 + port->vsi_idx, port->pvid); 721 + return -EEXIST; 722 + } 723 + 724 + vlan = xa_load(&port->vlans, vid); 725 + if (vlan) { 726 + if (vlan->flags == flags) 727 + return 0; 728 + 729 + ice_eswitch_br_vlan_cleanup(port, vlan); 730 + } 731 + 732 + vlan = ice_eswitch_br_vlan_create(vid, flags, port); 733 + if (IS_ERR(vlan)) { 734 + NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u", 735 + vid, vsi_idx); 736 + return PTR_ERR(vlan); 737 + } 738 + 739 + return 0; 740 + } 741 + 742 + static void 743 + ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) 744 + { 745 + struct ice_esw_br_port *port; 746 + struct ice_esw_br_vlan *vlan; 747 + 748 + port = xa_load(&bridge->ports, vsi_idx); 749 + if (!port) 750 + return; 751 + 752 + vlan = xa_load(&port->vlans, vid); 753 + if (!vlan) 754 + return; 755 + 756 + ice_eswitch_br_vlan_cleanup(port, vlan); 757 + } 758 + 759 + static int 760 + ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx, 761 + const struct switchdev_obj *obj, 762 + struct netlink_ext_ack *extack) 763 + { 764 + struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 765 + struct switchdev_obj_port_vlan *vlan; 766 + int err; 767 + 768 + if (!br_port) 769 + return -EINVAL; 770 + 771 + switch (obj->id) { 772 + case SWITCHDEV_OBJ_ID_PORT_VLAN: 773 + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 774 + err = ice_eswitch_br_port_vlan_add(br_port->bridge, 775 + br_port->vsi_idx, vlan->vid, 776 + vlan->flags, extack); 777 + return err; 778 + default: 779 + return -EOPNOTSUPP; 780 + } 781 + } 782 + 783 + static int 784 + ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx, 785 + const struct switchdev_obj *obj) 786 + { 787 + struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 788 + struct switchdev_obj_port_vlan *vlan; 789 + 790 + if (!br_port) 791 + return -EINVAL; 792 + 793 + switch (obj->id) { 794 + case SWITCHDEV_OBJ_ID_PORT_VLAN: 795 + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); 796 + ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx, 797 + vlan->vid); 798 + return 0; 799 + default: 800 + return -EOPNOTSUPP; 801 + } 802 + } 803 + 804 + static int 805 + ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx, 806 + const struct switchdev_attr *attr, 807 + struct netlink_ext_ack *extack) 808 + { 809 + struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); 810 + 811 + if (!br_port) 812 + return -EINVAL; 813 + 814 + switch (attr->id) { 815 + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 816 + ice_eswitch_br_vlan_filtering_set(br_port->bridge, 817 + attr->u.vlan_filtering); 818 + return 0; 819 + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: 820 + br_port->bridge->ageing_time = 821 + clock_t_to_jiffies(attr->u.ageing_time); 822 + return 0; 823 + default: 824 + return -EOPNOTSUPP; 825 + } 826 + } 827 + 828 + static int 829 + ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event, 830 + void *ptr) 831 + { 832 + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 833 + int err; 834 + 835 + switch (event) { 836 + case SWITCHDEV_PORT_OBJ_ADD: 837 + err = switchdev_handle_port_obj_add(dev, ptr, 838 + ice_eswitch_br_is_dev_valid, 839 + ice_eswitch_br_port_obj_add); 840 + break; 841 + case SWITCHDEV_PORT_OBJ_DEL: 842 + err = switchdev_handle_port_obj_del(dev, ptr, 843 + ice_eswitch_br_is_dev_valid, 844 + ice_eswitch_br_port_obj_del); 845 + break; 846 + case SWITCHDEV_PORT_ATTR_SET: 847 + err = switchdev_handle_port_attr_set(dev, ptr, 848 + ice_eswitch_br_is_dev_valid, 849 + ice_eswitch_br_port_obj_attr_set); 850 + break; 851 + default: 852 + err = 0; 853 + } 854 + 855 + return notifier_from_errno(err); 856 + } 857 + 858 + static void 859 + ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, 860 + struct ice_esw_br_port *br_port) 861 + { 862 + struct ice_esw_br_fdb_entry *fdb_entry, *tmp; 863 + struct ice_vsi *vsi = br_port->vsi; 864 + 865 + list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { 866 + if (br_port == fdb_entry->br_port) 867 + ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); 868 + } 869 + 870 + if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) 871 + vsi->back->br_port = NULL; 872 + else if (vsi->vf && vsi->vf->repr) 873 + vsi->vf->repr->br_port = NULL; 874 + 875 + xa_erase(&bridge->ports, br_port->vsi_idx); 876 + ice_eswitch_br_port_vlans_flush(br_port); 877 + kfree(br_port); 878 + } 879 + 880 + static struct ice_esw_br_port * 881 + ice_eswitch_br_port_init(struct ice_esw_br *bridge) 882 + { 883 + struct ice_esw_br_port *br_port; 884 + 885 + br_port = kzalloc(sizeof(*br_port), GFP_KERNEL); 886 + if (!br_port) 887 + return ERR_PTR(-ENOMEM); 888 + 889 + xa_init(&br_port->vlans); 890 + 891 + br_port->bridge = bridge; 892 + 893 + return br_port; 894 + } 895 + 896 + static int 897 + ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, 898 + struct ice_repr *repr) 899 + { 900 + struct ice_esw_br_port *br_port; 901 + int err; 902 + 903 + br_port = ice_eswitch_br_port_init(bridge); 904 + if (IS_ERR(br_port)) 905 + return PTR_ERR(br_port); 906 + 907 + br_port->vsi = repr->src_vsi; 908 + br_port->vsi_idx = br_port->vsi->idx; 909 + br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT; 910 + repr->br_port = br_port; 911 + 912 + err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); 913 + if (err) { 914 + ice_eswitch_br_port_deinit(bridge, br_port); 915 + return err; 916 + } 917 + 918 + return 0; 919 + } 920 + 921 + static int 922 + ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf) 923 + { 924 + struct ice_vsi *vsi = pf->switchdev.uplink_vsi; 925 + struct ice_esw_br_port *br_port; 926 + int err; 927 + 928 + br_port = ice_eswitch_br_port_init(bridge); 929 + if (IS_ERR(br_port)) 930 + return PTR_ERR(br_port); 931 + 932 + br_port->vsi = vsi; 933 + br_port->vsi_idx = br_port->vsi->idx; 934 + br_port->type = ICE_ESWITCH_BR_UPLINK_PORT; 935 + pf->br_port = br_port; 936 + 937 + err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); 938 + if (err) { 939 + ice_eswitch_br_port_deinit(bridge, br_port); 940 + return err; 941 + } 942 + 943 + return 0; 944 + } 945 + 946 + static void 947 + ice_eswitch_br_ports_flush(struct ice_esw_br *bridge) 948 + { 949 + struct ice_esw_br_port *port; 950 + unsigned long i; 951 + 952 + xa_for_each(&bridge->ports, i, port) 953 + ice_eswitch_br_port_deinit(bridge, port); 954 + } 955 + 956 + static void 957 + ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads, 958 + struct ice_esw_br *bridge) 959 + { 960 + if (!bridge) 961 + return; 962 + 963 + /* Cleanup all the ports that were added asynchronously 964 + * through NETDEV_CHANGEUPPER event. 965 + */ 966 + ice_eswitch_br_ports_flush(bridge); 967 + WARN_ON(!xa_empty(&bridge->ports)); 968 + xa_destroy(&bridge->ports); 969 + rhashtable_destroy(&bridge->fdb_ht); 970 + 971 + br_offloads->bridge = NULL; 972 + kfree(bridge); 973 + } 974 + 975 + static struct ice_esw_br * 976 + ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex) 977 + { 978 + struct ice_esw_br *bridge; 979 + int err; 980 + 981 + bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 982 + if (!bridge) 983 + return ERR_PTR(-ENOMEM); 984 + 985 + err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params); 986 + if (err) { 987 + kfree(bridge); 988 + return ERR_PTR(err); 989 + } 990 + 991 + INIT_LIST_HEAD(&bridge->fdb_list); 992 + bridge->br_offloads = br_offloads; 993 + bridge->ifindex = ifindex; 994 + bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME); 995 + xa_init(&bridge->ports); 996 + br_offloads->bridge = bridge; 997 + 998 + return bridge; 999 + } 1000 + 1001 + static struct ice_esw_br * 1002 + ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex, 1003 + struct netlink_ext_ack *extack) 1004 + { 1005 + struct ice_esw_br *bridge = br_offloads->bridge; 1006 + 1007 + if (bridge) { 1008 + if (bridge->ifindex != ifindex) { 1009 + NL_SET_ERR_MSG_MOD(extack, 1010 + "Only one bridge is supported per eswitch"); 1011 + return ERR_PTR(-EOPNOTSUPP); 1012 + } 1013 + return bridge; 1014 + } 1015 + 1016 + /* Create the bridge if it doesn't exist yet */ 1017 + bridge = ice_eswitch_br_init(br_offloads, ifindex); 1018 + if (IS_ERR(bridge)) 1019 + NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge"); 1020 + 1021 + return bridge; 1022 + } 1023 + 1024 + static void 1025 + ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads, 1026 + struct ice_esw_br *bridge) 1027 + { 1028 + /* Remove the bridge if it exists and there are no ports left */ 1029 + if (!bridge || !xa_empty(&bridge->ports)) 1030 + return; 1031 + 1032 + ice_eswitch_br_deinit(br_offloads, bridge); 1033 + } 1034 + 1035 + static int 1036 + ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads, 1037 + struct net_device *dev, int ifindex, 1038 + struct netlink_ext_ack *extack) 1039 + { 1040 + struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev); 1041 + struct ice_esw_br *bridge; 1042 + 1043 + if (!br_port) { 1044 + NL_SET_ERR_MSG_MOD(extack, 1045 + "Port representor is not attached to any bridge"); 1046 + return -EINVAL; 1047 + } 1048 + 1049 + if (br_port->bridge->ifindex != ifindex) { 1050 + NL_SET_ERR_MSG_MOD(extack, 1051 + "Port representor is attached to another bridge"); 1052 + return -EINVAL; 1053 + } 1054 + 1055 + bridge = br_port->bridge; 1056 + 1057 + trace_ice_eswitch_br_port_unlink(br_port); 1058 + ice_eswitch_br_port_deinit(br_port->bridge, br_port); 1059 + ice_eswitch_br_verify_deinit(br_offloads, bridge); 1060 + 1061 + return 0; 1062 + } 1063 + 1064 + static int 1065 + ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads, 1066 + struct net_device *dev, int ifindex, 1067 + struct netlink_ext_ack *extack) 1068 + { 1069 + struct ice_esw_br *bridge; 1070 + int err; 1071 + 1072 + if (ice_eswitch_br_netdev_to_port(dev)) { 1073 + NL_SET_ERR_MSG_MOD(extack, 1074 + "Port is already attached to the bridge"); 1075 + return -EINVAL; 1076 + } 1077 + 1078 + bridge = ice_eswitch_br_get(br_offloads, ifindex, extack); 1079 + if (IS_ERR(bridge)) 1080 + return PTR_ERR(bridge); 1081 + 1082 + if (ice_is_port_repr_netdev(dev)) { 1083 + struct ice_repr *repr = ice_netdev_to_repr(dev); 1084 + 1085 + err = ice_eswitch_br_vf_repr_port_init(bridge, repr); 1086 + trace_ice_eswitch_br_port_link(repr->br_port); 1087 + } else { 1088 + struct ice_pf *pf = ice_netdev_to_pf(dev); 1089 + 1090 + err = ice_eswitch_br_uplink_port_init(bridge, pf); 1091 + trace_ice_eswitch_br_port_link(pf->br_port); 1092 + } 1093 + if (err) { 1094 + NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port"); 1095 + goto err_port_init; 1096 + } 1097 + 1098 + return 0; 1099 + 1100 + err_port_init: 1101 + ice_eswitch_br_verify_deinit(br_offloads, bridge); 1102 + return err; 1103 + } 1104 + 1105 + static int 1106 + ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr) 1107 + { 1108 + struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1109 + struct netdev_notifier_changeupper_info *info = ptr; 1110 + struct ice_esw_br_offloads *br_offloads; 1111 + struct netlink_ext_ack *extack; 1112 + struct net_device *upper; 1113 + 1114 + br_offloads = ice_nb_to_br_offloads(nb, netdev_nb); 1115 + 1116 + if (!ice_eswitch_br_is_dev_valid(dev)) 1117 + return 0; 1118 + 1119 + upper = info->upper_dev; 1120 + if (!netif_is_bridge_master(upper)) 1121 + return 0; 1122 + 1123 + extack = netdev_notifier_info_to_extack(&info->info); 1124 + 1125 + if (info->linking) 1126 + return ice_eswitch_br_port_link(br_offloads, dev, 1127 + upper->ifindex, extack); 1128 + else 1129 + return ice_eswitch_br_port_unlink(br_offloads, dev, 1130 + upper->ifindex, extack); 1131 + } 1132 + 1133 + static int 1134 + ice_eswitch_br_port_event(struct notifier_block *nb, 1135 + unsigned long event, void *ptr) 1136 + { 1137 + int err = 0; 1138 + 1139 + switch (event) { 1140 + case NETDEV_CHANGEUPPER: 1141 + err = ice_eswitch_br_port_changeupper(nb, ptr); 1142 + break; 1143 + } 1144 + 1145 + return notifier_from_errno(err); 1146 + } 1147 + 1148 + static void 1149 + ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) 1150 + { 1151 + struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads; 1152 + 1153 + ASSERT_RTNL(); 1154 + 1155 + if (!br_offloads) 1156 + return; 1157 + 1158 + ice_eswitch_br_deinit(br_offloads, br_offloads->bridge); 1159 + 1160 + pf->switchdev.br_offloads = NULL; 1161 + kfree(br_offloads); 1162 + } 1163 + 1164 + static struct ice_esw_br_offloads * 1165 + ice_eswitch_br_offloads_alloc(struct ice_pf *pf) 1166 + { 1167 + struct ice_esw_br_offloads *br_offloads; 1168 + 1169 + ASSERT_RTNL(); 1170 + 1171 + if (pf->switchdev.br_offloads) 1172 + return ERR_PTR(-EEXIST); 1173 + 1174 + br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL); 1175 + if (!br_offloads) 1176 + return ERR_PTR(-ENOMEM); 1177 + 1178 + pf->switchdev.br_offloads = br_offloads; 1179 + br_offloads->pf = pf; 1180 + 1181 + return br_offloads; 1182 + } 1183 + 1184 + void 1185 + ice_eswitch_br_offloads_deinit(struct ice_pf *pf) 1186 + { 1187 + struct ice_esw_br_offloads *br_offloads; 1188 + 1189 + br_offloads = pf->switchdev.br_offloads; 1190 + if (!br_offloads) 1191 + return; 1192 + 1193 + cancel_delayed_work_sync(&br_offloads->update_work); 1194 + unregister_netdevice_notifier(&br_offloads->netdev_nb); 1195 + unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1196 + unregister_switchdev_notifier(&br_offloads->switchdev_nb); 1197 + destroy_workqueue(br_offloads->wq); 1198 + /* Although notifier block is unregistered just before, 1199 + * so we don't get any new events, some events might be 1200 + * already in progress. Hold the rtnl lock and wait for 1201 + * them to finished. 1202 + */ 1203 + rtnl_lock(); 1204 + ice_eswitch_br_offloads_dealloc(pf); 1205 + rtnl_unlock(); 1206 + } 1207 + 1208 + static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads) 1209 + { 1210 + struct ice_esw_br *bridge = br_offloads->bridge; 1211 + struct ice_esw_br_fdb_entry *entry, *tmp; 1212 + 1213 + if (!bridge) 1214 + return; 1215 + 1216 + rtnl_lock(); 1217 + list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) { 1218 + if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER) 1219 + continue; 1220 + 1221 + if (time_is_after_eq_jiffies(entry->last_use + 1222 + bridge->ageing_time)) 1223 + continue; 1224 + 1225 + ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); 1226 + } 1227 + rtnl_unlock(); 1228 + } 1229 + 1230 + static void ice_eswitch_br_update_work(struct work_struct *work) 1231 + { 1232 + struct ice_esw_br_offloads *br_offloads; 1233 + 1234 + br_offloads = ice_work_to_br_offloads(work); 1235 + 1236 + ice_eswitch_br_update(br_offloads); 1237 + 1238 + queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 1239 + ICE_ESW_BRIDGE_UPDATE_INTERVAL); 1240 + } 1241 + 1242 + int 1243 + ice_eswitch_br_offloads_init(struct ice_pf *pf) 1244 + { 1245 + struct ice_esw_br_offloads *br_offloads; 1246 + struct device *dev = ice_pf_to_dev(pf); 1247 + int err; 1248 + 1249 + rtnl_lock(); 1250 + br_offloads = ice_eswitch_br_offloads_alloc(pf); 1251 + rtnl_unlock(); 1252 + if (IS_ERR(br_offloads)) { 1253 + dev_err(dev, "Failed to init eswitch bridge\n"); 1254 + return PTR_ERR(br_offloads); 1255 + } 1256 + 1257 + br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0); 1258 + if (!br_offloads->wq) { 1259 + err = -ENOMEM; 1260 + dev_err(dev, "Failed to allocate bridge workqueue\n"); 1261 + goto err_alloc_wq; 1262 + } 1263 + 1264 + br_offloads->switchdev_nb.notifier_call = 1265 + ice_eswitch_br_switchdev_event; 1266 + err = register_switchdev_notifier(&br_offloads->switchdev_nb); 1267 + if (err) { 1268 + dev_err(dev, 1269 + "Failed to register switchdev notifier\n"); 1270 + goto err_reg_switchdev_nb; 1271 + } 1272 + 1273 + br_offloads->switchdev_blk.notifier_call = 1274 + ice_eswitch_br_event_blocking; 1275 + err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1276 + if (err) { 1277 + dev_err(dev, 1278 + "Failed to register bridge blocking switchdev notifier\n"); 1279 + goto err_reg_switchdev_blk; 1280 + } 1281 + 1282 + br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event; 1283 + err = register_netdevice_notifier(&br_offloads->netdev_nb); 1284 + if (err) { 1285 + dev_err(dev, 1286 + "Failed to register bridge port event notifier\n"); 1287 + goto err_reg_netdev_nb; 1288 + } 1289 + 1290 + INIT_DELAYED_WORK(&br_offloads->update_work, 1291 + ice_eswitch_br_update_work); 1292 + queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 1293 + ICE_ESW_BRIDGE_UPDATE_INTERVAL); 1294 + 1295 + return 0; 1296 + 1297 + err_reg_netdev_nb: 1298 + unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); 1299 + err_reg_switchdev_blk: 1300 + unregister_switchdev_notifier(&br_offloads->switchdev_nb); 1301 + err_reg_switchdev_nb: 1302 + destroy_workqueue(br_offloads->wq); 1303 + err_alloc_wq: 1304 + rtnl_lock(); 1305 + ice_eswitch_br_offloads_dealloc(pf); 1306 + rtnl_unlock(); 1307 + 1308 + return err; 1309 + }
+120
drivers/net/ethernet/intel/ice/ice_eswitch_br.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2023, Intel Corporation. */ 3 + 4 + #ifndef _ICE_ESWITCH_BR_H_ 5 + #define _ICE_ESWITCH_BR_H_ 6 + 7 + #include <linux/rhashtable.h> 8 + #include <linux/workqueue.h> 9 + 10 + struct ice_esw_br_fdb_data { 11 + unsigned char addr[ETH_ALEN]; 12 + u16 vid; 13 + }; 14 + 15 + struct ice_esw_br_flow { 16 + struct ice_rule_query_data *fwd_rule; 17 + struct ice_rule_query_data *guard_rule; 18 + }; 19 + 20 + enum { 21 + ICE_ESWITCH_BR_FDB_ADDED_BY_USER = BIT(0), 22 + }; 23 + 24 + struct ice_esw_br_fdb_entry { 25 + struct ice_esw_br_fdb_data data; 26 + struct rhash_head ht_node; 27 + struct list_head list; 28 + 29 + int flags; 30 + 31 + struct net_device *dev; 32 + struct ice_esw_br_port *br_port; 33 + struct ice_esw_br_flow *flow; 34 + 35 + unsigned long last_use; 36 + }; 37 + 38 + enum ice_esw_br_port_type { 39 + ICE_ESWITCH_BR_UPLINK_PORT = 0, 40 + ICE_ESWITCH_BR_VF_REPR_PORT = 1, 41 + }; 42 + 43 + struct ice_esw_br_port { 44 + struct ice_esw_br *bridge; 45 + struct ice_vsi *vsi; 46 + enum ice_esw_br_port_type type; 47 + u16 vsi_idx; 48 + u16 pvid; 49 + struct xarray vlans; 50 + }; 51 + 52 + enum { 53 + ICE_ESWITCH_BR_VLAN_FILTERING = BIT(0), 54 + }; 55 + 56 + struct ice_esw_br { 57 + struct ice_esw_br_offloads *br_offloads; 58 + struct xarray ports; 59 + 60 + struct rhashtable fdb_ht; 61 + struct list_head fdb_list; 62 + 63 + int ifindex; 64 + u32 flags; 65 + unsigned long ageing_time; 66 + }; 67 + 68 + struct ice_esw_br_offloads { 69 + struct ice_pf *pf; 70 + struct ice_esw_br *bridge; 71 + struct notifier_block netdev_nb; 72 + struct notifier_block switchdev_blk; 73 + struct notifier_block switchdev_nb; 74 + 75 + struct workqueue_struct *wq; 76 + struct delayed_work update_work; 77 + }; 78 + 79 + struct ice_esw_br_fdb_work { 80 + struct work_struct work; 81 + struct switchdev_notifier_fdb_info fdb_info; 82 + struct net_device *dev; 83 + unsigned long event; 84 + }; 85 + 86 + struct ice_esw_br_vlan { 87 + u16 vid; 88 + u16 flags; 89 + }; 90 + 91 + #define ice_nb_to_br_offloads(nb, nb_name) \ 92 + container_of(nb, \ 93 + struct ice_esw_br_offloads, \ 94 + nb_name) 95 + 96 + #define ice_work_to_br_offloads(w) \ 97 + container_of(w, \ 98 + struct ice_esw_br_offloads, \ 99 + update_work.work) 100 + 101 + #define ice_work_to_fdb_work(w) \ 102 + container_of(w, \ 103 + struct ice_esw_br_fdb_work, \ 104 + work) 105 + 106 + static inline bool ice_eswitch_br_is_vid_valid(u16 vid) 107 + { 108 + /* In trunk VLAN mode, for untagged traffic the bridge sends requests 109 + * to offload VLAN 1 with pvid and untagged flags set. Since these 110 + * flags are not supported, add a MAC filter instead. 111 + */ 112 + return vid > 1; 113 + } 114 + 115 + void 116 + ice_eswitch_br_offloads_deinit(struct ice_pf *pf); 117 + int 118 + ice_eswitch_br_offloads_init(struct ice_pf *pf); 119 + 120 + #endif /* _ICE_ESWITCH_BR_H_ */
+25
drivers/net/ethernet/intel/ice/ice_lib.c
··· 4076 4076 { 4077 4077 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; 4078 4078 } 4079 + 4080 + /** 4081 + * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit 4082 + * @vsi: pointer to VSI structure 4083 + * @set: set or unset the bit 4084 + */ 4085 + int 4086 + ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set) 4087 + { 4088 + struct ice_vsi_ctx ctx = { 4089 + .info = vsi->info, 4090 + }; 4091 + 4092 + ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 4093 + if (set) 4094 + ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB; 4095 + else 4096 + ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB; 4097 + 4098 + if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) 4099 + return -ENODEV; 4100 + 4101 + vsi->info = ctx.info; 4102 + return 0; 4103 + }
+1
drivers/net/ethernet/intel/ice/ice_lib.h
··· 157 157 void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); 158 158 159 159 void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); 160 + int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set); 160 161 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi); 161 162 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi); 162 163 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
+2 -2
drivers/net/ethernet/intel/ice/ice_main.c
··· 80 80 void *data, 81 81 void (*cleanup)(struct flow_block_cb *block_cb)); 82 82 83 - bool netif_is_ice(struct net_device *dev) 83 + bool netif_is_ice(const struct net_device *dev) 84 84 { 85 85 return dev && (dev->netdev_ops == &ice_netdev_ops); 86 86 } ··· 5704 5704 struct ice_netdev_priv *np = netdev_priv(netdev); 5705 5705 struct ice_vsi *vsi = np->vsi; 5706 5706 5707 - if (!vsi) 5707 + if (!vsi || ice_is_switchdev_running(vsi->back)) 5708 5708 return; 5709 5709 5710 5710 /* Set the flags to synchronize filters
+1 -1
drivers/net/ethernet/intel/ice/ice_repr.c
··· 254 254 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev 255 255 * @netdev: pointer to netdev 256 256 */ 257 - bool ice_is_port_repr_netdev(struct net_device *netdev) 257 + bool ice_is_port_repr_netdev(const struct net_device *netdev) 258 258 { 259 259 return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); 260 260 }
+2 -1
drivers/net/ethernet/intel/ice/ice_repr.h
··· 12 12 struct ice_q_vector *q_vector; 13 13 struct net_device *netdev; 14 14 struct metadata_dst *dst; 15 + struct ice_esw_br_port *br_port; 15 16 #ifdef CONFIG_ICE_SWITCHDEV 16 17 /* info about slow path rule */ 17 18 struct ice_rule_query_data sp_rule; ··· 28 27 void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); 29 28 30 29 struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); 31 - bool ice_is_port_repr_netdev(struct net_device *netdev); 30 + bool ice_is_port_repr_netdev(const struct net_device *netdev); 32 31 #endif
+62 -88
drivers/net/ethernet/intel/ice/ice_switch.c
··· 2272 2272 /* Propagate some data to the recipe database */ 2273 2273 recps[idx].is_root = !!is_root; 2274 2274 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 2275 + recps[idx].need_pass_l2 = root_bufs.content.act_ctrl & 2276 + ICE_AQ_RECIPE_ACT_NEED_PASS_L2; 2277 + recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl & 2278 + ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; 2275 2279 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); 2276 2280 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { 2277 2281 recps[idx].chain_idx = root_bufs.content.result_indx & ··· 4617 4613 * ice_find_recp - find a recipe 4618 4614 * @hw: pointer to the hardware structure 4619 4615 * @lkup_exts: extension sequence to match 4620 - * @tun_type: type of recipe tunnel 4616 + * @rinfo: information regarding the rule e.g. priority and action info 4621 4617 * 4622 4618 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. 4623 4619 */ 4624 4620 static u16 4625 4621 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, 4626 - enum ice_sw_tunnel_type tun_type) 4622 + const struct ice_adv_rule_info *rinfo) 4627 4623 { 4628 4624 bool refresh_required = true; 4629 4625 struct ice_sw_recipe *recp; ··· 4684 4680 } 4685 4681 /* If for "i"th recipe the found was never set to false 4686 4682 * then it means we found our match 4687 - * Also tun type of recipe needs to be checked 4683 + * Also tun type and *_pass_l2 of recipe needs to be 4684 + * checked 4688 4685 */ 4689 - if (found && recp[i].tun_type == tun_type) 4686 + if (found && recp[i].tun_type == rinfo->tun_type && 4687 + recp[i].need_pass_l2 == rinfo->need_pass_l2 && 4688 + recp[i].allow_pass_l2 == rinfo->allow_pass_l2) 4690 4689 return i; /* Return the recipe ID */ 4691 4690 } 4692 4691 } ··· 4959 4952 unsigned long *profiles) 4960 4953 { 4961 4954 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); 4955 + struct ice_aqc_recipe_content *content; 4962 4956 struct ice_aqc_recipe_data_elem *tmp; 4963 4957 struct ice_aqc_recipe_data_elem *buf; 4964 4958 struct ice_recp_grp_entry *entry; ··· 5020 5012 if (status) 5021 5013 goto err_unroll; 5022 5014 5015 + content = &buf[recps].content; 5016 + 5023 5017 /* Clear the result index of the located recipe, as this will be 5024 5018 * updated, if needed, later in the recipe creation process. 5025 5019 */ ··· 5032 5022 /* if the recipe is a non-root recipe RID should be programmed 5033 5023 * as 0 for the rules to be applied correctly. 5034 5024 */ 5035 - buf[recps].content.rid = 0; 5036 - memset(&buf[recps].content.lkup_indx, 0, 5037 - sizeof(buf[recps].content.lkup_indx)); 5025 + content->rid = 0; 5026 + memset(&content->lkup_indx, 0, 5027 + sizeof(content->lkup_indx)); 5038 5028 5039 5029 /* All recipes use look-up index 0 to match switch ID. */ 5040 - buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 5041 - buf[recps].content.mask[0] = 5042 - cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 5030 + content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 5031 + content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 5043 5032 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask 5044 5033 * to be 0 5045 5034 */ 5046 5035 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 5047 - buf[recps].content.lkup_indx[i] = 0x80; 5048 - buf[recps].content.mask[i] = 0; 5036 + content->lkup_indx[i] = 0x80; 5037 + content->mask[i] = 0; 5049 5038 } 5050 5039 5051 5040 for (i = 0; i < entry->r_group.n_val_pairs; i++) { 5052 - buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; 5053 - buf[recps].content.mask[i + 1] = 5054 - cpu_to_le16(entry->fv_mask[i]); 5041 + content->lkup_indx[i + 1] = entry->fv_idx[i]; 5042 + content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]); 5055 5043 } 5056 5044 5057 5045 if (rm->n_grp_count > 1) { ··· 5063 5055 } 5064 5056 5065 5057 entry->chain_idx = chain_idx; 5066 - buf[recps].content.result_indx = 5058 + content->result_indx = 5067 5059 ICE_AQ_RECIPE_RESULT_EN | 5068 5060 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & 5069 5061 ICE_AQ_RECIPE_RESULT_DATA_M); ··· 5077 5069 ICE_MAX_NUM_RECIPES); 5078 5070 set_bit(buf[recps].recipe_indx, 5079 5071 (unsigned long *)buf[recps].recipe_bitmap); 5080 - buf[recps].content.act_ctrl_fwd_priority = rm->priority; 5072 + content->act_ctrl_fwd_priority = rm->priority; 5073 + 5074 + if (rm->need_pass_l2) 5075 + content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2; 5076 + 5077 + if (rm->allow_pass_l2) 5078 + content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; 5081 5079 recps++; 5082 5080 } 5083 5081 ··· 5121 5107 if (status) 5122 5108 goto err_unroll; 5123 5109 5110 + content = &buf[recps].content; 5111 + 5124 5112 buf[recps].recipe_indx = (u8)rid; 5125 - buf[recps].content.rid = (u8)rid; 5126 - buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; 5113 + content->rid = (u8)rid; 5114 + content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT; 5127 5115 /* the new entry created should also be part of rg_list to 5128 5116 * make sure we have complete recipe 5129 5117 */ ··· 5137 5121 goto err_unroll; 5138 5122 } 5139 5123 last_chain_entry->rid = rid; 5140 - memset(&buf[recps].content.lkup_indx, 0, 5141 - sizeof(buf[recps].content.lkup_indx)); 5124 + memset(&content->lkup_indx, 0, sizeof(content->lkup_indx)); 5142 5125 /* All recipes use look-up index 0 to match switch ID. */ 5143 - buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 5144 - buf[recps].content.mask[0] = 5145 - cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 5126 + content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 5127 + content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 5146 5128 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 5147 - buf[recps].content.lkup_indx[i] = 5148 - ICE_AQ_RECIPE_LKUP_IGNORE; 5149 - buf[recps].content.mask[i] = 0; 5129 + content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE; 5130 + content->mask[i] = 0; 5150 5131 } 5151 5132 5152 5133 i = 1; ··· 5155 5142 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; 5156 5143 list_for_each_entry(entry, &rm->rg_list, l_entry) { 5157 5144 last_chain_entry->fv_idx[i] = entry->chain_idx; 5158 - buf[recps].content.lkup_indx[i] = entry->chain_idx; 5159 - buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); 5145 + content->lkup_indx[i] = entry->chain_idx; 5146 + content->mask[i++] = cpu_to_le16(0xFFFF); 5160 5147 set_bit(entry->rid, rm->r_bitmap); 5161 5148 } 5162 5149 list_add(&last_chain_entry->l_entry, &rm->rg_list); ··· 5168 5155 status = -EINVAL; 5169 5156 goto err_unroll; 5170 5157 } 5171 - buf[recps].content.act_ctrl_fwd_priority = rm->priority; 5158 + content->act_ctrl_fwd_priority = rm->priority; 5172 5159 5173 5160 recps++; 5174 5161 rm->root_rid = (u8)rid; ··· 5233 5220 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; 5234 5221 recp->n_grp_count = rm->n_grp_count; 5235 5222 recp->tun_type = rm->tun_type; 5223 + recp->need_pass_l2 = rm->need_pass_l2; 5224 + recp->allow_pass_l2 = rm->allow_pass_l2; 5236 5225 recp->recp_created = true; 5237 5226 } 5238 5227 rm->root_buf = buf; ··· 5403 5388 /* set the recipe priority if specified */ 5404 5389 rm->priority = (u8)rinfo->priority; 5405 5390 5391 + rm->need_pass_l2 = rinfo->need_pass_l2; 5392 + rm->allow_pass_l2 = rinfo->allow_pass_l2; 5393 + 5406 5394 /* Find offsets from the field vector. Pick the first one for all the 5407 5395 * recipes. 5408 5396 */ ··· 5421 5403 } 5422 5404 5423 5405 /* Look for a recipe which matches our requested fv / mask list */ 5424 - *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); 5406 + *rid = ice_find_recp(hw, lkup_exts, rinfo); 5425 5407 if (*rid < ICE_MAX_NUM_RECIPES) 5426 5408 /* Success if found a recipe that match the existing criteria */ 5427 5409 goto err_unroll; ··· 5857 5839 return first->sw_act.flag == second->sw_act.flag && 5858 5840 first->tun_type == second->tun_type && 5859 5841 first->vlan_type == second->vlan_type && 5860 - first->src_vsi == second->src_vsi; 5842 + first->src_vsi == second->src_vsi && 5843 + first->need_pass_l2 == second->need_pass_l2 && 5844 + first->allow_pass_l2 == second->allow_pass_l2; 5861 5845 } 5862 5846 5863 5847 /** ··· 6098 6078 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || 6099 6079 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || 6100 6080 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || 6101 - rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) { 6081 + rinfo->sw_act.fltr_act == ICE_DROP_PACKET || 6082 + rinfo->sw_act.fltr_act == ICE_NOP)) { 6102 6083 status = -EIO; 6103 6084 goto free_pkt_profile; 6104 6085 } ··· 6110 6089 goto free_pkt_profile; 6111 6090 } 6112 6091 6113 - if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 6092 + if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || 6093 + rinfo->sw_act.fltr_act == ICE_NOP) 6114 6094 rinfo->sw_act.fwd_id.hw_vsi_id = 6115 6095 ice_get_hw_vsi_num(hw, vsi_handle); 6116 6096 ··· 6180 6158 case ICE_DROP_PACKET: 6181 6159 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 6182 6160 ICE_SINGLE_ACT_VALID_BIT; 6161 + break; 6162 + case ICE_NOP: 6163 + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, 6164 + rinfo->sw_act.fwd_id.hw_vsi_id); 6165 + act &= ~ICE_SINGLE_ACT_VALID_BIT; 6183 6166 break; 6184 6167 default: 6185 6168 status = -EIO; ··· 6466 6439 return -EIO; 6467 6440 } 6468 6441 6469 - rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); 6442 + rid = ice_find_recp(hw, &lkup_exts, rinfo); 6470 6443 /* If did not find a recipe that match the existing criteria */ 6471 6444 if (rid == ICE_MAX_NUM_RECIPES) 6472 6445 return -EINVAL; ··· 6557 6530 } 6558 6531 /* either list is empty or unable to find rule */ 6559 6532 return -ENOENT; 6560 - } 6561 - 6562 - /** 6563 - * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a 6564 - * given VSI handle 6565 - * @hw: pointer to the hardware structure 6566 - * @vsi_handle: VSI handle for which we are supposed to remove all the rules. 6567 - * 6568 - * This function is used to remove all the rules for a given VSI and as soon 6569 - * as removing a rule fails, it will return immediately with the error code, 6570 - * else it will return success. 6571 - */ 6572 - int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) 6573 - { 6574 - struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry; 6575 - struct ice_vsi_list_map_info *map_info; 6576 - struct ice_adv_rule_info rinfo; 6577 - struct list_head *list_head; 6578 - struct ice_switch_info *sw; 6579 - int status; 6580 - u8 rid; 6581 - 6582 - sw = hw->switch_info; 6583 - for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) { 6584 - if (!sw->recp_list[rid].recp_created) 6585 - continue; 6586 - if (!sw->recp_list[rid].adv_rule) 6587 - continue; 6588 - 6589 - list_head = &sw->recp_list[rid].filt_rules; 6590 - list_for_each_entry_safe(list_itr, tmp_entry, list_head, 6591 - list_entry) { 6592 - rinfo = list_itr->rule_info; 6593 - 6594 - if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) { 6595 - map_info = list_itr->vsi_list_info; 6596 - if (!map_info) 6597 - continue; 6598 - 6599 - if (!test_bit(vsi_handle, map_info->vsi_map)) 6600 - continue; 6601 - } else if (rinfo.sw_act.vsi_handle != vsi_handle) { 6602 - continue; 6603 - } 6604 - 6605 - rinfo.sw_act.vsi_handle = vsi_handle; 6606 - status = ice_rem_adv_rule(hw, list_itr->lkups, 6607 - list_itr->lkups_cnt, &rinfo); 6608 - if (status) 6609 - return status; 6610 - } 6611 - } 6612 - return 0; 6613 6533 } 6614 6534 6615 6535 /**
+5 -1
drivers/net/ethernet/intel/ice/ice_switch.h
··· 191 191 u16 vlan_type; 192 192 u16 fltr_rule_id; 193 193 u32 priority; 194 + u16 need_pass_l2:1; 195 + u16 allow_pass_l2:1; 194 196 u16 src_vsi; 195 197 struct ice_sw_act_ctrl sw_act; 196 198 struct ice_adv_rule_flags_info flags_info; ··· 255 253 * priority. 256 254 */ 257 255 u8 priority; 256 + 257 + u8 need_pass_l2:1; 258 + u8 allow_pass_l2:1; 258 259 259 260 struct list_head rg_list; 260 261 ··· 384 379 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 385 380 bool rm_vlan_promisc); 386 381 387 - int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); 388 382 int 389 383 ice_rem_adv_rule_by_id(struct ice_hw *hw, 390 384 struct ice_rule_query_data *remove_entry);
+90
drivers/net/ethernet/intel/ice/ice_trace.h
··· 21 21 #define _ICE_TRACE_H_ 22 22 23 23 #include <linux/tracepoint.h> 24 + #include "ice_eswitch_br.h" 24 25 25 26 /* ice_trace() macro enables shared code to refer to trace points 26 27 * like: ··· 240 239 DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req); 241 240 DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done); 242 241 DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete); 242 + 243 + DECLARE_EVENT_CLASS(ice_esw_br_fdb_template, 244 + TP_PROTO(struct ice_esw_br_fdb_entry *fdb), 245 + TP_ARGS(fdb), 246 + TP_STRUCT__entry(__array(char, dev_name, IFNAMSIZ) 247 + __array(unsigned char, addr, ETH_ALEN) 248 + __field(u16, vid) 249 + __field(int, flags)), 250 + TP_fast_assign(strscpy(__entry->dev_name, 251 + netdev_name(fdb->dev), 252 + IFNAMSIZ); 253 + memcpy(__entry->addr, fdb->data.addr, ETH_ALEN); 254 + __entry->vid = fdb->data.vid; 255 + __entry->flags = fdb->flags;), 256 + TP_printk("net_device=%s addr=%pM vid=%u flags=%x", 257 + __entry->dev_name, 258 + __entry->addr, 259 + __entry->vid, 260 + __entry->flags) 261 + ); 262 + 263 + DEFINE_EVENT(ice_esw_br_fdb_template, 264 + ice_eswitch_br_fdb_entry_create, 265 + TP_PROTO(struct ice_esw_br_fdb_entry *fdb), 266 + TP_ARGS(fdb) 267 + ); 268 + 269 + DEFINE_EVENT(ice_esw_br_fdb_template, 270 + ice_eswitch_br_fdb_entry_find_and_delete, 271 + TP_PROTO(struct ice_esw_br_fdb_entry *fdb), 272 + TP_ARGS(fdb) 273 + ); 274 + 275 + DECLARE_EVENT_CLASS(ice_esw_br_vlan_template, 276 + TP_PROTO(struct ice_esw_br_vlan *vlan), 277 + TP_ARGS(vlan), 278 + TP_STRUCT__entry(__field(u16, vid) 279 + __field(u16, flags)), 280 + TP_fast_assign(__entry->vid = vlan->vid; 281 + __entry->flags = vlan->flags;), 282 + TP_printk("vid=%u flags=%x", 283 + __entry->vid, 284 + __entry->flags) 285 + ); 286 + 287 + DEFINE_EVENT(ice_esw_br_vlan_template, 288 + ice_eswitch_br_vlan_create, 289 + TP_PROTO(struct ice_esw_br_vlan *vlan), 290 + TP_ARGS(vlan) 291 + ); 292 + 293 + DEFINE_EVENT(ice_esw_br_vlan_template, 294 + ice_eswitch_br_vlan_cleanup, 295 + TP_PROTO(struct ice_esw_br_vlan *vlan), 296 + TP_ARGS(vlan) 297 + ); 298 + 299 + #define ICE_ESW_BR_PORT_NAME_L 16 300 + 301 + DECLARE_EVENT_CLASS(ice_esw_br_port_template, 302 + TP_PROTO(struct ice_esw_br_port *port), 303 + TP_ARGS(port), 304 + TP_STRUCT__entry(__field(u16, vport_num) 305 + __array(char, port_type, ICE_ESW_BR_PORT_NAME_L)), 306 + TP_fast_assign(__entry->vport_num = port->vsi_idx; 307 + if (port->type == ICE_ESWITCH_BR_UPLINK_PORT) 308 + strscpy(__entry->port_type, 309 + "Uplink", 310 + ICE_ESW_BR_PORT_NAME_L); 311 + else 312 + strscpy(__entry->port_type, 313 + "VF Representor", 314 + ICE_ESW_BR_PORT_NAME_L);), 315 + TP_printk("vport_num=%u port type=%s", 316 + __entry->vport_num, 317 + __entry->port_type) 318 + ); 319 + 320 + DEFINE_EVENT(ice_esw_br_port_template, 321 + ice_eswitch_br_port_link, 322 + TP_PROTO(struct ice_esw_br_port *port), 323 + TP_ARGS(port) 324 + ); 325 + 326 + DEFINE_EVENT(ice_esw_br_port_template, 327 + ice_eswitch_br_port_unlink, 328 + TP_PROTO(struct ice_esw_br_port *port), 329 + TP_ARGS(port) 330 + ); 243 331 244 332 /* End tracepoints */ 245 333
+1
drivers/net/ethernet/intel/ice/ice_type.h
··· 1033 1033 ICE_FWD_TO_Q, 1034 1034 ICE_FWD_TO_QGRP, 1035 1035 ICE_DROP_PACKET, 1036 + ICE_NOP, 1036 1037 ICE_INVAL_ACT 1037 1038 }; 1038 1039
+103 -83
drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
··· 21 21 return 0; 22 22 } 23 23 24 + static void ice_port_vlan_on(struct ice_vsi *vsi) 25 + { 26 + struct ice_vsi_vlan_ops *vlan_ops; 27 + struct ice_pf *pf = vsi->back; 28 + 29 + if (ice_is_dvm_ena(&pf->hw)) { 30 + vlan_ops = &vsi->outer_vlan_ops; 31 + 32 + /* setup outer VLAN ops */ 33 + vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; 34 + vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan; 35 + vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan; 36 + 37 + /* setup inner VLAN ops */ 38 + vlan_ops = &vsi->inner_vlan_ops; 39 + vlan_ops->add_vlan = noop_vlan_arg; 40 + vlan_ops->del_vlan = noop_vlan_arg; 41 + vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; 42 + vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; 43 + vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; 44 + vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; 45 + } else { 46 + vlan_ops = &vsi->inner_vlan_ops; 47 + 48 + vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan; 49 + vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan; 50 + vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan; 51 + } 52 + vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; 53 + } 54 + 55 + static void ice_port_vlan_off(struct ice_vsi *vsi) 56 + { 57 + struct ice_vsi_vlan_ops *vlan_ops; 58 + struct ice_pf *pf = vsi->back; 59 + 60 + /* setup inner VLAN ops */ 61 + vlan_ops = &vsi->inner_vlan_ops; 62 + 63 + vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; 64 + vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; 65 + vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; 66 + vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; 67 + 68 + if (ice_is_dvm_ena(&pf->hw)) { 69 + vlan_ops = &vsi->outer_vlan_ops; 70 + 71 + vlan_ops->del_vlan = ice_vsi_del_vlan; 72 + vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping; 73 + vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping; 74 + vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion; 75 + vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion; 76 + } else { 77 + vlan_ops->del_vlan = ice_vsi_del_vlan; 78 + } 79 + 80 + if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) 81 + vlan_ops->ena_rx_filtering = noop_vlan; 82 + else 83 + vlan_ops->ena_rx_filtering = 84 + ice_vsi_ena_rx_vlan_filtering; 85 + } 86 + 87 + /** 88 + * ice_vf_vsi_enable_port_vlan - Set VSI VLAN ops to support port VLAN 89 + * @vsi: VF's VSI being configured 90 + * 91 + * The function won't create port VLAN, it only allows to create port VLAN 92 + * using VLAN ops on the VF VSI. 93 + */ 94 + void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi) 95 + { 96 + if (WARN_ON_ONCE(!vsi->vf)) 97 + return; 98 + 99 + ice_port_vlan_on(vsi); 100 + } 101 + 102 + /** 103 + * ice_vf_vsi_disable_port_vlan - Clear VSI support for creating port VLAN 104 + * @vsi: VF's VSI being configured 105 + * 106 + * The function should be called after removing port VLAN on VSI 107 + * (using VLAN ops) 108 + */ 109 + void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi) 110 + { 111 + if (WARN_ON_ONCE(!vsi->vf)) 112 + return; 113 + 114 + ice_port_vlan_off(vsi); 115 + } 116 + 24 117 /** 25 118 * ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI 26 119 * @vsi: VF's VSI being configured ··· 132 39 if (WARN_ON(!vf)) 133 40 return; 134 41 135 - if (ice_is_dvm_ena(&pf->hw)) { 136 - vlan_ops = &vsi->outer_vlan_ops; 42 + if (ice_vf_is_port_vlan_ena(vf)) 43 + ice_port_vlan_on(vsi); 44 + else 45 + ice_port_vlan_off(vsi); 137 46 138 - /* outer VLAN ops regardless of port VLAN config */ 139 - vlan_ops->add_vlan = ice_vsi_add_vlan; 140 - vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; 141 - vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; 47 + vlan_ops = ice_is_dvm_ena(&pf->hw) ? 48 + &vsi->outer_vlan_ops : &vsi->inner_vlan_ops; 142 49 143 - if (ice_vf_is_port_vlan_ena(vf)) { 144 - /* setup outer VLAN ops */ 145 - vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; 146 - /* all Rx traffic should be in the domain of the 147 - * assigned port VLAN, so prevent disabling Rx VLAN 148 - * filtering 149 - */ 150 - vlan_ops->dis_rx_filtering = noop_vlan; 151 - vlan_ops->ena_rx_filtering = 152 - ice_vsi_ena_rx_vlan_filtering; 153 - 154 - /* setup inner VLAN ops */ 155 - vlan_ops = &vsi->inner_vlan_ops; 156 - vlan_ops->add_vlan = noop_vlan_arg; 157 - vlan_ops->del_vlan = noop_vlan_arg; 158 - vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; 159 - vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; 160 - vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; 161 - vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; 162 - } else { 163 - vlan_ops->dis_rx_filtering = 164 - ice_vsi_dis_rx_vlan_filtering; 165 - 166 - if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) 167 - vlan_ops->ena_rx_filtering = noop_vlan; 168 - else 169 - vlan_ops->ena_rx_filtering = 170 - ice_vsi_ena_rx_vlan_filtering; 171 - 172 - vlan_ops->del_vlan = ice_vsi_del_vlan; 173 - vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping; 174 - vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping; 175 - vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion; 176 - vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion; 177 - 178 - /* setup inner VLAN ops */ 179 - vlan_ops = &vsi->inner_vlan_ops; 180 - 181 - vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; 182 - vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; 183 - vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; 184 - vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; 185 - } 186 - } else { 187 - vlan_ops = &vsi->inner_vlan_ops; 188 - 189 - /* inner VLAN ops regardless of port VLAN config */ 190 - vlan_ops->add_vlan = ice_vsi_add_vlan; 191 - vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; 192 - vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; 193 - vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; 194 - 195 - if (ice_vf_is_port_vlan_ena(vf)) { 196 - vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan; 197 - vlan_ops->ena_rx_filtering = 198 - ice_vsi_ena_rx_vlan_filtering; 199 - /* all Rx traffic should be in the domain of the 200 - * assigned port VLAN, so prevent disabling Rx VLAN 201 - * filtering 202 - */ 203 - vlan_ops->dis_rx_filtering = noop_vlan; 204 - } else { 205 - vlan_ops->dis_rx_filtering = 206 - ice_vsi_dis_rx_vlan_filtering; 207 - if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) 208 - vlan_ops->ena_rx_filtering = noop_vlan; 209 - else 210 - vlan_ops->ena_rx_filtering = 211 - ice_vsi_ena_rx_vlan_filtering; 212 - 213 - vlan_ops->del_vlan = ice_vsi_del_vlan; 214 - vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; 215 - vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; 216 - vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; 217 - vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; 218 - } 219 - } 50 + vlan_ops->add_vlan = ice_vsi_add_vlan; 51 + vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; 52 + vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; 53 + vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; 220 54 } 221 55 222 56 /**
+4
drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
··· 13 13 14 14 #ifdef CONFIG_PCI_IOV 15 15 void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi); 16 + void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi); 17 + void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi); 16 18 #else 17 19 static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { } 20 + static inline void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi) { } 21 + static inline void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi) { } 18 22 #endif /* CONFIG_PCI_IOV */ 19 23 #endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
+81 -3
drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
··· 202 202 return ice_vsi_manage_vlan_insertion(vsi); 203 203 } 204 204 205 + static void 206 + ice_save_vlan_info(struct ice_aqc_vsi_props *info, 207 + struct ice_vsi_vlan_info *vlan) 208 + { 209 + vlan->sw_flags2 = info->sw_flags2; 210 + vlan->inner_vlan_flags = info->inner_vlan_flags; 211 + vlan->outer_vlan_flags = info->outer_vlan_flags; 212 + } 213 + 214 + static void 215 + ice_restore_vlan_info(struct ice_aqc_vsi_props *info, 216 + struct ice_vsi_vlan_info *vlan) 217 + { 218 + info->sw_flags2 = vlan->sw_flags2; 219 + info->inner_vlan_flags = vlan->inner_vlan_flags; 220 + info->outer_vlan_flags = vlan->outer_vlan_flags; 221 + } 222 + 205 223 /** 206 224 * __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN 207 225 * @vsi: the VSI to update ··· 236 218 if (!ctxt) 237 219 return -ENOMEM; 238 220 221 + ice_save_vlan_info(&vsi->info, &vsi->vlan_info); 239 222 ctxt->info = vsi->info; 240 223 info = &ctxt->info; 241 224 info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED | ··· 276 257 port_vlan_info = vlan->vid | (vlan->prio << VLAN_PRIO_SHIFT); 277 258 278 259 return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info); 260 + } 261 + 262 + int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi) 263 + { 264 + struct ice_hw *hw = &vsi->back->hw; 265 + struct ice_aqc_vsi_props *info; 266 + struct ice_vsi_ctx *ctxt; 267 + int ret; 268 + 269 + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 270 + if (!ctxt) 271 + return -ENOMEM; 272 + 273 + ice_restore_vlan_info(&vsi->info, &vsi->vlan_info); 274 + vsi->info.port_based_inner_vlan = 0; 275 + ctxt->info = vsi->info; 276 + info = &ctxt->info; 277 + info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | 278 + ICE_AQ_VSI_PROP_SW_VALID); 279 + 280 + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 281 + if (ret) 282 + dev_err(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n", 283 + ret, ice_aq_str(hw->adminq.sq_last_status)); 284 + 285 + kfree(ctxt); 286 + return ret; 279 287 } 280 288 281 289 /** ··· 693 647 if (!ctxt) 694 648 return -ENOMEM; 695 649 650 + ice_save_vlan_info(&vsi->info, &vsi->vlan_info); 696 651 ctxt->info = vsi->info; 697 652 698 653 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; ··· 736 689 * used if DVM is supported. Also, this function should never be called directly 737 690 * as it should be part of ice_vsi_vlan_ops if it's needed. 738 691 * 739 - * This function does not support clearing the port VLAN as there is currently 740 - * no use case for this. 741 - * 742 692 * Use the ice_vlan structure passed in to set this VSI in a port VLAN. 743 693 */ 744 694 int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) ··· 748 704 port_vlan_info = vlan->vid | (vlan->prio << VLAN_PRIO_SHIFT); 749 705 750 706 return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid); 707 + } 708 + 709 + /** 710 + * ice_vsi_clear_outer_port_vlan - clear outer port vlan 711 + * @vsi: VSI to configure 712 + * 713 + * The function is restoring previously set vlan config (saved in 714 + * vsi->vlan_info). Setting happens in port vlan configuration. 715 + */ 716 + int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi) 717 + { 718 + struct ice_hw *hw = &vsi->back->hw; 719 + struct ice_vsi_ctx *ctxt; 720 + int err; 721 + 722 + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 723 + if (!ctxt) 724 + return -ENOMEM; 725 + 726 + ice_restore_vlan_info(&vsi->info, &vsi->vlan_info); 727 + vsi->info.port_based_outer_vlan = 0; 728 + ctxt->info = vsi->info; 729 + 730 + ctxt->info.valid_sections = 731 + cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID | 732 + ICE_AQ_VSI_PROP_SW_VALID); 733 + 734 + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 735 + if (err) 736 + dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing outer port based VLAN failed, err %d aq_err %s\n", 737 + err, ice_aq_str(hw->adminq.sq_last_status)); 738 + 739 + kfree(ctxt); 740 + return err; 751 741 }
+8
drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
··· 7 7 #include <linux/types.h> 8 8 #include "ice_vlan.h" 9 9 10 + struct ice_vsi_vlan_info { 11 + u8 sw_flags2; 12 + u8 inner_vlan_flags; 13 + u8 outer_vlan_flags; 14 + }; 15 + 10 16 struct ice_vsi; 11 17 12 18 int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); ··· 23 17 int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid); 24 18 int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi); 25 19 int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); 20 + int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi); 26 21 27 22 int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi); 28 23 int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi); ··· 35 28 int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid); 36 29 int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi); 37 30 int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); 31 + int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi); 38 32 39 33 #endif /* _ICE_VSI_VLAN_LIB_H_ */
+1
drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
··· 21 21 int (*ena_tx_filtering)(struct ice_vsi *vsi); 22 22 int (*dis_tx_filtering)(struct ice_vsi *vsi); 23 23 int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan); 24 + int (*clear_port_vlan)(struct ice_vsi *vsi); 24 25 }; 25 26 26 27 void ice_vsi_init_vlan_ops(struct ice_vsi *vsi);