Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: dsa: felix: manage host flooding using a specific driver callback

At the time - commit 7569459a52c9 ("net: dsa: manage flooding on the CPU
ports") - not introducing a dedicated switch callback for host flooding
made sense, because for the only user, the felix driver, there was
nothing different to do for the CPU port than set the flood flags on the
CPU port just like on any other bridge port.

There are 2 reasons why this approach is not good enough, however.

(1) Other drivers, like sja1105, support configuring flooding as a
function of {ingress port, egress port}, whereas the DSA
->port_bridge_flags() function only operates on an egress port.
So with that driver we'd have useless host flooding from user ports
which don't need it.

(2) Even with the felix driver, support for multiple CPU ports makes it
difficult to piggyback on ->port_bridge_flags(). The way in which
the felix driver is going to support host-filtered addresses with
multiple CPU ports is that it will direct these addresses towards
both CPU ports (in a sort of multicast fashion), then restrict the
forwarding to only one of the two using the forwarding masks.
Consequently, flooding will also be enabled towards both CPU ports.
However, ->port_bridge_flags() gets passed the index of a single CPU
port, and that leaves the flood settings out of sync between the 2
CPU ports.

This is to say, it's better to have a specific driver method for host
flooding, which takes the user port as argument. This solves problem (1)
by allowing the driver to do different things for different user ports,
and problem (2) by abstracting the operation and letting the driver do
whatever, rather than explicitly making the DSA core point to the CPU
port it thinks needs to be touched.

This new method also creates a problem, which is that cross-chip setups
are not handled. However I don't have hardware right now where I can
test what is the proper thing to do, and there isn't hardware compatible
with multi-switch trees that supports host flooding. So it remains a
problem to be tackled in the future.

Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Vladimir Oltean and committed by
Jakub Kicinski
72c3b0c7 465c3de4

+51 -30
+32
drivers/net/dsa/ocelot/felix.c
··· 634 634 return felix->tag_proto; 635 635 } 636 636 637 + static void felix_port_set_host_flood(struct dsa_switch *ds, int port, 638 + bool uc, bool mc) 639 + { 640 + struct ocelot *ocelot = ds->priv; 641 + struct felix *felix = ocelot_to_felix(ocelot); 642 + unsigned long mask, val; 643 + 644 + if (uc) 645 + felix->host_flood_uc_mask |= BIT(port); 646 + else 647 + felix->host_flood_uc_mask &= ~BIT(port); 648 + 649 + if (mc) 650 + felix->host_flood_mc_mask |= BIT(port); 651 + else 652 + felix->host_flood_mc_mask &= ~BIT(port); 653 + 654 + if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) 655 + mask = dsa_cpu_ports(ds); 656 + else 657 + mask = BIT(ocelot->num_phys_ports); 658 + 659 + val = (felix->host_flood_uc_mask) ? mask : 0; 660 + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC); 661 + 662 + val = (felix->host_flood_mc_mask) ? mask : 0; 663 + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); 664 + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); 665 + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); 666 + } 667 + 637 668 static int felix_set_ageing_time(struct dsa_switch *ds, 638 669 unsigned int ageing_time) 639 670 { ··· 1907 1876 .port_get_dscp_prio = felix_port_get_dscp_prio, 1908 1877 .port_add_dscp_prio = felix_port_add_dscp_prio, 1909 1878 .port_del_dscp_prio = felix_port_del_dscp_prio, 1879 + .port_set_host_flood = felix_port_set_host_flood, 1910 1880 }; 1911 1881 1912 1882 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
+2
drivers/net/dsa/ocelot/felix.h
··· 72 72 resource_size_t imdio_base; 73 73 enum dsa_tag_protocol tag_proto; 74 74 struct kthread_worker *xmit_worker; 75 + unsigned long host_flood_uc_mask; 76 + unsigned long host_flood_mc_mask; 75 77 }; 76 78 77 79 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
+2
include/net/dsa.h
··· 978 978 int (*port_bridge_flags)(struct dsa_switch *ds, int port, 979 979 struct switchdev_brport_flags flags, 980 980 struct netlink_ext_ack *extack); 981 + void (*port_set_host_flood)(struct dsa_switch *ds, int port, 982 + bool uc, bool mc); 981 983 982 984 /* 983 985 * VLAN support
+1
net/dsa/dsa_priv.h
··· 291 291 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr); 292 292 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast); 293 293 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast); 294 + void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc); 294 295 295 296 /* slave.c */ 296 297 extern const struct dsa_device_ops notag_netdev_ops;
+8
net/dsa/port.c
··· 920 920 return 0; 921 921 } 922 922 923 + void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc) 924 + { 925 + struct dsa_switch *ds = dp->ds; 926 + 927 + if (ds->ops->port_set_host_flood) 928 + ds->ops->port_set_host_flood(ds, dp->index, uc, mc); 929 + } 930 + 923 931 int dsa_port_vlan_msti(struct dsa_port *dp, 924 932 const struct switchdev_vlan_msti *msti) 925 933 {
+6 -30
net/dsa/slave.c
··· 262 262 return 0; 263 263 } 264 264 265 - /* Keep flooding enabled towards this port's CPU port as long as it serves at 266 - * least one port in the tree that requires it. 267 - */ 268 - static void dsa_port_manage_cpu_flood(struct dsa_port *dp) 265 + static void dsa_slave_manage_host_flood(struct net_device *dev) 269 266 { 270 - struct switchdev_brport_flags flags = { 271 - .mask = BR_FLOOD | BR_MCAST_FLOOD, 272 - }; 273 - struct dsa_switch_tree *dst = dp->ds->dst; 274 - struct dsa_port *cpu_dp = dp->cpu_dp; 275 - struct dsa_port *other_dp; 276 - int err; 267 + bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI); 268 + struct dsa_port *dp = dsa_slave_to_port(dev); 269 + bool uc = dev->flags & IFF_PROMISC; 277 270 278 - list_for_each_entry(other_dp, &dst->ports, list) { 279 - if (!dsa_port_is_user(other_dp)) 280 - continue; 281 - 282 - if (other_dp->cpu_dp != cpu_dp) 283 - continue; 284 - 285 - if (other_dp->slave->flags & IFF_ALLMULTI) 286 - flags.val |= BR_MCAST_FLOOD; 287 - if (other_dp->slave->flags & IFF_PROMISC) 288 - flags.val |= BR_FLOOD | BR_MCAST_FLOOD; 289 - } 290 - 291 - err = dsa_port_pre_bridge_flags(dp, flags, NULL); 292 - if (err) 293 - return; 294 - 295 - dsa_port_bridge_flags(cpu_dp, flags, NULL); 271 + dsa_port_set_host_flood(dp, uc, mc); 296 272 } 297 273 298 274 static void dsa_slave_change_rx_flags(struct net_device *dev, int change) ··· 286 310 287 311 if (dsa_switch_supports_uc_filtering(ds) && 288 312 dsa_switch_supports_mc_filtering(ds)) 289 - dsa_port_manage_cpu_flood(dp); 313 + dsa_slave_manage_host_flood(dev); 290 314 } 291 315 292 316 static void dsa_slave_set_rx_mode(struct net_device *dev)