Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * net/dsa/slave.c - Slave device handling
3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/list.h>
12#include <linux/etherdevice.h>
13#include <linux/netdevice.h>
14#include <linux/phy.h>
15#include <linux/phy_fixed.h>
16#include <linux/phylink.h>
17#include <linux/of_net.h>
18#include <linux/of_mdio.h>
19#include <linux/mdio.h>
20#include <net/rtnetlink.h>
21#include <net/pkt_cls.h>
22#include <net/tc_act/tc_mirred.h>
23#include <linux/if_bridge.h>
24#include <linux/netpoll.h>
25#include <linux/ptp_classify.h>
26
27#include "dsa_priv.h"
28
29static bool dsa_slave_dev_check(struct net_device *dev);
30
31/* slave mii_bus handling ***************************************************/
32static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
33{
34 struct dsa_switch *ds = bus->priv;
35
36 if (ds->phys_mii_mask & (1 << addr))
37 return ds->ops->phy_read(ds, addr, reg);
38
39 return 0xffff;
40}
41
42static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
43{
44 struct dsa_switch *ds = bus->priv;
45
46 if (ds->phys_mii_mask & (1 << addr))
47 return ds->ops->phy_write(ds, addr, reg, val);
48
49 return 0;
50}
51
52void dsa_slave_mii_bus_init(struct dsa_switch *ds)
53{
54 ds->slave_mii_bus->priv = (void *)ds;
55 ds->slave_mii_bus->name = "dsa slave smi";
56 ds->slave_mii_bus->read = dsa_slave_phy_read;
57 ds->slave_mii_bus->write = dsa_slave_phy_write;
58 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
59 ds->dst->index, ds->index);
60 ds->slave_mii_bus->parent = ds->dev;
61 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
62}
63
64
65/* slave device handling ****************************************************/
66static int dsa_slave_get_iflink(const struct net_device *dev)
67{
68 return dsa_slave_to_master(dev)->ifindex;
69}
70
71static int dsa_slave_open(struct net_device *dev)
72{
73 struct net_device *master = dsa_slave_to_master(dev);
74 struct dsa_port *dp = dsa_slave_to_port(dev);
75 int err;
76
77 if (!(master->flags & IFF_UP))
78 return -ENETDOWN;
79
80 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
81 err = dev_uc_add(master, dev->dev_addr);
82 if (err < 0)
83 goto out;
84 }
85
86 if (dev->flags & IFF_ALLMULTI) {
87 err = dev_set_allmulti(master, 1);
88 if (err < 0)
89 goto del_unicast;
90 }
91 if (dev->flags & IFF_PROMISC) {
92 err = dev_set_promiscuity(master, 1);
93 if (err < 0)
94 goto clear_allmulti;
95 }
96
97 err = dsa_port_enable(dp, dev->phydev);
98 if (err)
99 goto clear_promisc;
100
101 phylink_start(dp->pl);
102
103 return 0;
104
105clear_promisc:
106 if (dev->flags & IFF_PROMISC)
107 dev_set_promiscuity(master, -1);
108clear_allmulti:
109 if (dev->flags & IFF_ALLMULTI)
110 dev_set_allmulti(master, -1);
111del_unicast:
112 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
113 dev_uc_del(master, dev->dev_addr);
114out:
115 return err;
116}
117
118static int dsa_slave_close(struct net_device *dev)
119{
120 struct net_device *master = dsa_slave_to_master(dev);
121 struct dsa_port *dp = dsa_slave_to_port(dev);
122
123 phylink_stop(dp->pl);
124
125 dsa_port_disable(dp);
126
127 dev_mc_unsync(master, dev);
128 dev_uc_unsync(master, dev);
129 if (dev->flags & IFF_ALLMULTI)
130 dev_set_allmulti(master, -1);
131 if (dev->flags & IFF_PROMISC)
132 dev_set_promiscuity(master, -1);
133
134 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
135 dev_uc_del(master, dev->dev_addr);
136
137 return 0;
138}
139
140static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
141{
142 struct net_device *master = dsa_slave_to_master(dev);
143 if (dev->flags & IFF_UP) {
144 if (change & IFF_ALLMULTI)
145 dev_set_allmulti(master,
146 dev->flags & IFF_ALLMULTI ? 1 : -1);
147 if (change & IFF_PROMISC)
148 dev_set_promiscuity(master,
149 dev->flags & IFF_PROMISC ? 1 : -1);
150 }
151}
152
153static void dsa_slave_set_rx_mode(struct net_device *dev)
154{
155 struct net_device *master = dsa_slave_to_master(dev);
156
157 dev_mc_sync(master, dev);
158 dev_uc_sync(master, dev);
159}
160
161static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
162{
163 struct net_device *master = dsa_slave_to_master(dev);
164 struct sockaddr *addr = a;
165 int err;
166
167 if (!is_valid_ether_addr(addr->sa_data))
168 return -EADDRNOTAVAIL;
169
170 if (!(dev->flags & IFF_UP))
171 goto out;
172
173 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
174 err = dev_uc_add(master, addr->sa_data);
175 if (err < 0)
176 return err;
177 }
178
179 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
180 dev_uc_del(master, dev->dev_addr);
181
182out:
183 ether_addr_copy(dev->dev_addr, addr->sa_data);
184
185 return 0;
186}
187
188struct dsa_slave_dump_ctx {
189 struct net_device *dev;
190 struct sk_buff *skb;
191 struct netlink_callback *cb;
192 int idx;
193};
194
195static int
196dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
197 bool is_static, void *data)
198{
199 struct dsa_slave_dump_ctx *dump = data;
200 u32 portid = NETLINK_CB(dump->cb->skb).portid;
201 u32 seq = dump->cb->nlh->nlmsg_seq;
202 struct nlmsghdr *nlh;
203 struct ndmsg *ndm;
204
205 if (dump->idx < dump->cb->args[2])
206 goto skip;
207
208 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
209 sizeof(*ndm), NLM_F_MULTI);
210 if (!nlh)
211 return -EMSGSIZE;
212
213 ndm = nlmsg_data(nlh);
214 ndm->ndm_family = AF_BRIDGE;
215 ndm->ndm_pad1 = 0;
216 ndm->ndm_pad2 = 0;
217 ndm->ndm_flags = NTF_SELF;
218 ndm->ndm_type = 0;
219 ndm->ndm_ifindex = dump->dev->ifindex;
220 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
221
222 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
223 goto nla_put_failure;
224
225 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
226 goto nla_put_failure;
227
228 nlmsg_end(dump->skb, nlh);
229
230skip:
231 dump->idx++;
232 return 0;
233
234nla_put_failure:
235 nlmsg_cancel(dump->skb, nlh);
236 return -EMSGSIZE;
237}
238
239static int
240dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
241 struct net_device *dev, struct net_device *filter_dev,
242 int *idx)
243{
244 struct dsa_port *dp = dsa_slave_to_port(dev);
245 struct dsa_slave_dump_ctx dump = {
246 .dev = dev,
247 .skb = skb,
248 .cb = cb,
249 .idx = *idx,
250 };
251 int err;
252
253 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
254 *idx = dump.idx;
255
256 return err;
257}
258
259static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
260{
261 struct dsa_slave_priv *p = netdev_priv(dev);
262 struct dsa_switch *ds = p->dp->ds;
263 int port = p->dp->index;
264
265 /* Pass through to switch driver if it supports timestamping */
266 switch (cmd) {
267 case SIOCGHWTSTAMP:
268 if (ds->ops->port_hwtstamp_get)
269 return ds->ops->port_hwtstamp_get(ds, port, ifr);
270 break;
271 case SIOCSHWTSTAMP:
272 if (ds->ops->port_hwtstamp_set)
273 return ds->ops->port_hwtstamp_set(ds, port, ifr);
274 break;
275 }
276
277 return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
278}
279
280static int dsa_slave_port_attr_set(struct net_device *dev,
281 const struct switchdev_attr *attr,
282 struct switchdev_trans *trans)
283{
284 struct dsa_port *dp = dsa_slave_to_port(dev);
285 int ret;
286
287 switch (attr->id) {
288 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
289 ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
290 break;
291 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
292 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
293 trans);
294 break;
295 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
296 ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
297 break;
298 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
299 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
300 trans);
301 break;
302 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
303 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans);
304 break;
305 default:
306 ret = -EOPNOTSUPP;
307 break;
308 }
309
310 return ret;
311}
312
313static int dsa_slave_port_obj_add(struct net_device *dev,
314 const struct switchdev_obj *obj,
315 struct switchdev_trans *trans)
316{
317 struct dsa_port *dp = dsa_slave_to_port(dev);
318 int err;
319
320 /* For the prepare phase, ensure the full set of changes is feasable in
321 * one go in order to signal a failure properly. If an operation is not
322 * supported, return -EOPNOTSUPP.
323 */
324
325 switch (obj->id) {
326 case SWITCHDEV_OBJ_ID_PORT_MDB:
327 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
328 break;
329 case SWITCHDEV_OBJ_ID_HOST_MDB:
330 /* DSA can directly translate this to a normal MDB add,
331 * but on the CPU port.
332 */
333 err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj),
334 trans);
335 break;
336 case SWITCHDEV_OBJ_ID_PORT_VLAN:
337 err = dsa_port_vlan_add(dp, SWITCHDEV_OBJ_PORT_VLAN(obj),
338 trans);
339 break;
340 default:
341 err = -EOPNOTSUPP;
342 break;
343 }
344
345 return err;
346}
347
348static int dsa_slave_port_obj_del(struct net_device *dev,
349 const struct switchdev_obj *obj)
350{
351 struct dsa_port *dp = dsa_slave_to_port(dev);
352 int err;
353
354 switch (obj->id) {
355 case SWITCHDEV_OBJ_ID_PORT_MDB:
356 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
357 break;
358 case SWITCHDEV_OBJ_ID_HOST_MDB:
359 /* DSA can directly translate this to a normal MDB add,
360 * but on the CPU port.
361 */
362 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
363 break;
364 case SWITCHDEV_OBJ_ID_PORT_VLAN:
365 err = dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
366 break;
367 default:
368 err = -EOPNOTSUPP;
369 break;
370 }
371
372 return err;
373}
374
375static int dsa_slave_get_port_parent_id(struct net_device *dev,
376 struct netdev_phys_item_id *ppid)
377{
378 struct dsa_port *dp = dsa_slave_to_port(dev);
379 struct dsa_switch *ds = dp->ds;
380 struct dsa_switch_tree *dst = ds->dst;
381
382 ppid->id_len = sizeof(dst->index);
383 memcpy(&ppid->id, &dst->index, ppid->id_len);
384
385 return 0;
386}
387
388static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
389 struct sk_buff *skb)
390{
391#ifdef CONFIG_NET_POLL_CONTROLLER
392 struct dsa_slave_priv *p = netdev_priv(dev);
393
394 if (p->netpoll)
395 netpoll_send_skb(p->netpoll, skb);
396#else
397 BUG();
398#endif
399 return NETDEV_TX_OK;
400}
401
402static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
403 struct sk_buff *skb)
404{
405 struct dsa_switch *ds = p->dp->ds;
406 struct sk_buff *clone;
407 unsigned int type;
408
409 type = ptp_classify_raw(skb);
410 if (type == PTP_CLASS_NONE)
411 return;
412
413 if (!ds->ops->port_txtstamp)
414 return;
415
416 clone = skb_clone_sk(skb);
417 if (!clone)
418 return;
419
420 if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
421 return;
422
423 kfree_skb(clone);
424}
425
426static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
427{
428 struct dsa_slave_priv *p = netdev_priv(dev);
429 struct pcpu_sw_netstats *s;
430 struct sk_buff *nskb;
431
432 s = this_cpu_ptr(p->stats64);
433 u64_stats_update_begin(&s->syncp);
434 s->tx_packets++;
435 s->tx_bytes += skb->len;
436 u64_stats_update_end(&s->syncp);
437
438 /* Identify PTP protocol packets, clone them, and pass them to the
439 * switch driver
440 */
441 dsa_skb_tx_timestamp(p, skb);
442
443 /* Transmit function may have to reallocate the original SKB,
444 * in which case it must have freed it. Only free it here on error.
445 */
446 nskb = p->xmit(skb, dev);
447 if (!nskb) {
448 kfree_skb(skb);
449 return NETDEV_TX_OK;
450 }
451
452 /* SKB for netpoll still need to be mangled with the protocol-specific
453 * tag to be successfully transmitted
454 */
455 if (unlikely(netpoll_tx_running(dev)))
456 return dsa_slave_netpoll_send_skb(dev, nskb);
457
458 /* Queue the SKB for transmission on the parent interface, but
459 * do not modify its EtherType
460 */
461 nskb->dev = dsa_slave_to_master(dev);
462 dev_queue_xmit(nskb);
463
464 return NETDEV_TX_OK;
465}
466
467/* ethtool operations *******************************************************/
468
469static void dsa_slave_get_drvinfo(struct net_device *dev,
470 struct ethtool_drvinfo *drvinfo)
471{
472 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
473 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
474 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
475}
476
477static int dsa_slave_get_regs_len(struct net_device *dev)
478{
479 struct dsa_port *dp = dsa_slave_to_port(dev);
480 struct dsa_switch *ds = dp->ds;
481
482 if (ds->ops->get_regs_len)
483 return ds->ops->get_regs_len(ds, dp->index);
484
485 return -EOPNOTSUPP;
486}
487
488static void
489dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
490{
491 struct dsa_port *dp = dsa_slave_to_port(dev);
492 struct dsa_switch *ds = dp->ds;
493
494 if (ds->ops->get_regs)
495 ds->ops->get_regs(ds, dp->index, regs, _p);
496}
497
498static int dsa_slave_nway_reset(struct net_device *dev)
499{
500 struct dsa_port *dp = dsa_slave_to_port(dev);
501
502 return phylink_ethtool_nway_reset(dp->pl);
503}
504
505static int dsa_slave_get_eeprom_len(struct net_device *dev)
506{
507 struct dsa_port *dp = dsa_slave_to_port(dev);
508 struct dsa_switch *ds = dp->ds;
509
510 if (ds->cd && ds->cd->eeprom_len)
511 return ds->cd->eeprom_len;
512
513 if (ds->ops->get_eeprom_len)
514 return ds->ops->get_eeprom_len(ds);
515
516 return 0;
517}
518
519static int dsa_slave_get_eeprom(struct net_device *dev,
520 struct ethtool_eeprom *eeprom, u8 *data)
521{
522 struct dsa_port *dp = dsa_slave_to_port(dev);
523 struct dsa_switch *ds = dp->ds;
524
525 if (ds->ops->get_eeprom)
526 return ds->ops->get_eeprom(ds, eeprom, data);
527
528 return -EOPNOTSUPP;
529}
530
531static int dsa_slave_set_eeprom(struct net_device *dev,
532 struct ethtool_eeprom *eeprom, u8 *data)
533{
534 struct dsa_port *dp = dsa_slave_to_port(dev);
535 struct dsa_switch *ds = dp->ds;
536
537 if (ds->ops->set_eeprom)
538 return ds->ops->set_eeprom(ds, eeprom, data);
539
540 return -EOPNOTSUPP;
541}
542
543static void dsa_slave_get_strings(struct net_device *dev,
544 uint32_t stringset, uint8_t *data)
545{
546 struct dsa_port *dp = dsa_slave_to_port(dev);
547 struct dsa_switch *ds = dp->ds;
548
549 if (stringset == ETH_SS_STATS) {
550 int len = ETH_GSTRING_LEN;
551
552 strncpy(data, "tx_packets", len);
553 strncpy(data + len, "tx_bytes", len);
554 strncpy(data + 2 * len, "rx_packets", len);
555 strncpy(data + 3 * len, "rx_bytes", len);
556 if (ds->ops->get_strings)
557 ds->ops->get_strings(ds, dp->index, stringset,
558 data + 4 * len);
559 }
560}
561
562static void dsa_slave_get_ethtool_stats(struct net_device *dev,
563 struct ethtool_stats *stats,
564 uint64_t *data)
565{
566 struct dsa_port *dp = dsa_slave_to_port(dev);
567 struct dsa_slave_priv *p = netdev_priv(dev);
568 struct dsa_switch *ds = dp->ds;
569 struct pcpu_sw_netstats *s;
570 unsigned int start;
571 int i;
572
573 for_each_possible_cpu(i) {
574 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
575
576 s = per_cpu_ptr(p->stats64, i);
577 do {
578 start = u64_stats_fetch_begin_irq(&s->syncp);
579 tx_packets = s->tx_packets;
580 tx_bytes = s->tx_bytes;
581 rx_packets = s->rx_packets;
582 rx_bytes = s->rx_bytes;
583 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
584 data[0] += tx_packets;
585 data[1] += tx_bytes;
586 data[2] += rx_packets;
587 data[3] += rx_bytes;
588 }
589 if (ds->ops->get_ethtool_stats)
590 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
591}
592
593static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
594{
595 struct dsa_port *dp = dsa_slave_to_port(dev);
596 struct dsa_switch *ds = dp->ds;
597
598 if (sset == ETH_SS_STATS) {
599 int count;
600
601 count = 4;
602 if (ds->ops->get_sset_count)
603 count += ds->ops->get_sset_count(ds, dp->index, sset);
604
605 return count;
606 }
607
608 return -EOPNOTSUPP;
609}
610
611static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
612{
613 struct dsa_port *dp = dsa_slave_to_port(dev);
614 struct dsa_switch *ds = dp->ds;
615
616 phylink_ethtool_get_wol(dp->pl, w);
617
618 if (ds->ops->get_wol)
619 ds->ops->get_wol(ds, dp->index, w);
620}
621
622static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
623{
624 struct dsa_port *dp = dsa_slave_to_port(dev);
625 struct dsa_switch *ds = dp->ds;
626 int ret = -EOPNOTSUPP;
627
628 phylink_ethtool_set_wol(dp->pl, w);
629
630 if (ds->ops->set_wol)
631 ret = ds->ops->set_wol(ds, dp->index, w);
632
633 return ret;
634}
635
636static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
637{
638 struct dsa_port *dp = dsa_slave_to_port(dev);
639 struct dsa_switch *ds = dp->ds;
640 int ret;
641
642 /* Port's PHY and MAC both need to be EEE capable */
643 if (!dev->phydev || !dp->pl)
644 return -ENODEV;
645
646 if (!ds->ops->set_mac_eee)
647 return -EOPNOTSUPP;
648
649 ret = ds->ops->set_mac_eee(ds, dp->index, e);
650 if (ret)
651 return ret;
652
653 return phylink_ethtool_set_eee(dp->pl, e);
654}
655
656static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
657{
658 struct dsa_port *dp = dsa_slave_to_port(dev);
659 struct dsa_switch *ds = dp->ds;
660 int ret;
661
662 /* Port's PHY and MAC both need to be EEE capable */
663 if (!dev->phydev || !dp->pl)
664 return -ENODEV;
665
666 if (!ds->ops->get_mac_eee)
667 return -EOPNOTSUPP;
668
669 ret = ds->ops->get_mac_eee(ds, dp->index, e);
670 if (ret)
671 return ret;
672
673 return phylink_ethtool_get_eee(dp->pl, e);
674}
675
676static int dsa_slave_get_link_ksettings(struct net_device *dev,
677 struct ethtool_link_ksettings *cmd)
678{
679 struct dsa_port *dp = dsa_slave_to_port(dev);
680
681 return phylink_ethtool_ksettings_get(dp->pl, cmd);
682}
683
684static int dsa_slave_set_link_ksettings(struct net_device *dev,
685 const struct ethtool_link_ksettings *cmd)
686{
687 struct dsa_port *dp = dsa_slave_to_port(dev);
688
689 return phylink_ethtool_ksettings_set(dp->pl, cmd);
690}
691
692#ifdef CONFIG_NET_POLL_CONTROLLER
693static int dsa_slave_netpoll_setup(struct net_device *dev,
694 struct netpoll_info *ni)
695{
696 struct net_device *master = dsa_slave_to_master(dev);
697 struct dsa_slave_priv *p = netdev_priv(dev);
698 struct netpoll *netpoll;
699 int err = 0;
700
701 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
702 if (!netpoll)
703 return -ENOMEM;
704
705 err = __netpoll_setup(netpoll, master);
706 if (err) {
707 kfree(netpoll);
708 goto out;
709 }
710
711 p->netpoll = netpoll;
712out:
713 return err;
714}
715
716static void dsa_slave_netpoll_cleanup(struct net_device *dev)
717{
718 struct dsa_slave_priv *p = netdev_priv(dev);
719 struct netpoll *netpoll = p->netpoll;
720
721 if (!netpoll)
722 return;
723
724 p->netpoll = NULL;
725
726 __netpoll_free(netpoll);
727}
728
729static void dsa_slave_poll_controller(struct net_device *dev)
730{
731}
732#endif
733
734static int dsa_slave_get_phys_port_name(struct net_device *dev,
735 char *name, size_t len)
736{
737 struct dsa_port *dp = dsa_slave_to_port(dev);
738
739 if (snprintf(name, len, "p%d", dp->index) >= len)
740 return -EINVAL;
741
742 return 0;
743}
744
745static struct dsa_mall_tc_entry *
746dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
747{
748 struct dsa_slave_priv *p = netdev_priv(dev);
749 struct dsa_mall_tc_entry *mall_tc_entry;
750
751 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
752 if (mall_tc_entry->cookie == cookie)
753 return mall_tc_entry;
754
755 return NULL;
756}
757
758static int dsa_slave_add_cls_matchall(struct net_device *dev,
759 struct tc_cls_matchall_offload *cls,
760 bool ingress)
761{
762 struct dsa_port *dp = dsa_slave_to_port(dev);
763 struct dsa_slave_priv *p = netdev_priv(dev);
764 struct dsa_mall_tc_entry *mall_tc_entry;
765 __be16 protocol = cls->common.protocol;
766 struct dsa_switch *ds = dp->ds;
767 struct net_device *to_dev;
768 const struct tc_action *a;
769 struct dsa_port *to_dp;
770 int err = -EOPNOTSUPP;
771
772 if (!ds->ops->port_mirror_add)
773 return err;
774
775 if (!tcf_exts_has_one_action(cls->exts))
776 return err;
777
778 a = tcf_exts_first_action(cls->exts);
779
780 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
781 struct dsa_mall_mirror_tc_entry *mirror;
782
783 to_dev = tcf_mirred_dev(a);
784 if (!to_dev)
785 return -EINVAL;
786
787 if (!dsa_slave_dev_check(to_dev))
788 return -EOPNOTSUPP;
789
790 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
791 if (!mall_tc_entry)
792 return -ENOMEM;
793
794 mall_tc_entry->cookie = cls->cookie;
795 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
796 mirror = &mall_tc_entry->mirror;
797
798 to_dp = dsa_slave_to_port(to_dev);
799
800 mirror->to_local_port = to_dp->index;
801 mirror->ingress = ingress;
802
803 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
804 if (err) {
805 kfree(mall_tc_entry);
806 return err;
807 }
808
809 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
810 }
811
812 return 0;
813}
814
815static void dsa_slave_del_cls_matchall(struct net_device *dev,
816 struct tc_cls_matchall_offload *cls)
817{
818 struct dsa_port *dp = dsa_slave_to_port(dev);
819 struct dsa_mall_tc_entry *mall_tc_entry;
820 struct dsa_switch *ds = dp->ds;
821
822 if (!ds->ops->port_mirror_del)
823 return;
824
825 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
826 if (!mall_tc_entry)
827 return;
828
829 list_del(&mall_tc_entry->list);
830
831 switch (mall_tc_entry->type) {
832 case DSA_PORT_MALL_MIRROR:
833 ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror);
834 break;
835 default:
836 WARN_ON(1);
837 }
838
839 kfree(mall_tc_entry);
840}
841
842static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
843 struct tc_cls_matchall_offload *cls,
844 bool ingress)
845{
846 if (cls->common.chain_index)
847 return -EOPNOTSUPP;
848
849 switch (cls->command) {
850 case TC_CLSMATCHALL_REPLACE:
851 return dsa_slave_add_cls_matchall(dev, cls, ingress);
852 case TC_CLSMATCHALL_DESTROY:
853 dsa_slave_del_cls_matchall(dev, cls);
854 return 0;
855 default:
856 return -EOPNOTSUPP;
857 }
858}
859
860static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
861 void *cb_priv, bool ingress)
862{
863 struct net_device *dev = cb_priv;
864
865 if (!tc_can_offload(dev))
866 return -EOPNOTSUPP;
867
868 switch (type) {
869 case TC_SETUP_CLSMATCHALL:
870 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
871 default:
872 return -EOPNOTSUPP;
873 }
874}
875
876static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
877 void *type_data, void *cb_priv)
878{
879 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
880}
881
882static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
883 void *type_data, void *cb_priv)
884{
885 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
886}
887
888static int dsa_slave_setup_tc_block(struct net_device *dev,
889 struct tc_block_offload *f)
890{
891 tc_setup_cb_t *cb;
892
893 if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
894 cb = dsa_slave_setup_tc_block_cb_ig;
895 else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
896 cb = dsa_slave_setup_tc_block_cb_eg;
897 else
898 return -EOPNOTSUPP;
899
900 switch (f->command) {
901 case TC_BLOCK_BIND:
902 return tcf_block_cb_register(f->block, cb, dev, dev, f->extack);
903 case TC_BLOCK_UNBIND:
904 tcf_block_cb_unregister(f->block, cb, dev);
905 return 0;
906 default:
907 return -EOPNOTSUPP;
908 }
909}
910
911static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
912 void *type_data)
913{
914 switch (type) {
915 case TC_SETUP_BLOCK:
916 return dsa_slave_setup_tc_block(dev, type_data);
917 default:
918 return -EOPNOTSUPP;
919 }
920}
921
922static void dsa_slave_get_stats64(struct net_device *dev,
923 struct rtnl_link_stats64 *stats)
924{
925 struct dsa_slave_priv *p = netdev_priv(dev);
926 struct pcpu_sw_netstats *s;
927 unsigned int start;
928 int i;
929
930 netdev_stats_to_stats64(stats, &dev->stats);
931 for_each_possible_cpu(i) {
932 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
933
934 s = per_cpu_ptr(p->stats64, i);
935 do {
936 start = u64_stats_fetch_begin_irq(&s->syncp);
937 tx_packets = s->tx_packets;
938 tx_bytes = s->tx_bytes;
939 rx_packets = s->rx_packets;
940 rx_bytes = s->rx_bytes;
941 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
942
943 stats->tx_packets += tx_packets;
944 stats->tx_bytes += tx_bytes;
945 stats->rx_packets += rx_packets;
946 stats->rx_bytes += rx_bytes;
947 }
948}
949
950static int dsa_slave_get_rxnfc(struct net_device *dev,
951 struct ethtool_rxnfc *nfc, u32 *rule_locs)
952{
953 struct dsa_port *dp = dsa_slave_to_port(dev);
954 struct dsa_switch *ds = dp->ds;
955
956 if (!ds->ops->get_rxnfc)
957 return -EOPNOTSUPP;
958
959 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
960}
961
962static int dsa_slave_set_rxnfc(struct net_device *dev,
963 struct ethtool_rxnfc *nfc)
964{
965 struct dsa_port *dp = dsa_slave_to_port(dev);
966 struct dsa_switch *ds = dp->ds;
967
968 if (!ds->ops->set_rxnfc)
969 return -EOPNOTSUPP;
970
971 return ds->ops->set_rxnfc(ds, dp->index, nfc);
972}
973
974static int dsa_slave_get_ts_info(struct net_device *dev,
975 struct ethtool_ts_info *ts)
976{
977 struct dsa_slave_priv *p = netdev_priv(dev);
978 struct dsa_switch *ds = p->dp->ds;
979
980 if (!ds->ops->get_ts_info)
981 return -EOPNOTSUPP;
982
983 return ds->ops->get_ts_info(ds, p->dp->index, ts);
984}
985
986static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
987 u16 vid)
988{
989 struct dsa_port *dp = dsa_slave_to_port(dev);
990 struct switchdev_obj_port_vlan vlan = {
991 .vid_begin = vid,
992 .vid_end = vid,
993 /* This API only allows programming tagged, non-PVID VIDs */
994 .flags = 0,
995 };
996 struct switchdev_trans trans;
997 struct bridge_vlan_info info;
998 int ret;
999
1000 /* Check for a possible bridge VLAN entry now since there is no
1001 * need to emulate the switchdev prepare + commit phase.
1002 */
1003 if (dp->bridge_dev) {
1004 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1005 * device, respectively the VID is not found, returning
1006 * 0 means success, which is a failure for us here.
1007 */
1008 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1009 if (ret == 0)
1010 return -EBUSY;
1011 }
1012
1013 trans.ph_prepare = true;
1014 ret = dsa_port_vlan_add(dp, &vlan, &trans);
1015 if (ret == -EOPNOTSUPP)
1016 return 0;
1017
1018 trans.ph_prepare = false;
1019 return dsa_port_vlan_add(dp, &vlan, &trans);
1020}
1021
1022static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1023 u16 vid)
1024{
1025 struct dsa_port *dp = dsa_slave_to_port(dev);
1026 struct switchdev_obj_port_vlan vlan = {
1027 .vid_begin = vid,
1028 .vid_end = vid,
1029 /* This API only allows programming tagged, non-PVID VIDs */
1030 .flags = 0,
1031 };
1032 struct bridge_vlan_info info;
1033 int ret;
1034
1035 /* Check for a possible bridge VLAN entry now since there is no
1036 * need to emulate the switchdev prepare + commit phase.
1037 */
1038 if (dp->bridge_dev) {
1039 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1040 * device, respectively the VID is not found, returning
1041 * 0 means success, which is a failure for us here.
1042 */
1043 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1044 if (ret == 0)
1045 return -EBUSY;
1046 }
1047
1048 ret = dsa_port_vlan_del(dp, &vlan);
1049 if (ret == -EOPNOTSUPP)
1050 ret = 0;
1051
1052 return ret;
1053}
1054
1055static const struct ethtool_ops dsa_slave_ethtool_ops = {
1056 .get_drvinfo = dsa_slave_get_drvinfo,
1057 .get_regs_len = dsa_slave_get_regs_len,
1058 .get_regs = dsa_slave_get_regs,
1059 .nway_reset = dsa_slave_nway_reset,
1060 .get_link = ethtool_op_get_link,
1061 .get_eeprom_len = dsa_slave_get_eeprom_len,
1062 .get_eeprom = dsa_slave_get_eeprom,
1063 .set_eeprom = dsa_slave_set_eeprom,
1064 .get_strings = dsa_slave_get_strings,
1065 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
1066 .get_sset_count = dsa_slave_get_sset_count,
1067 .set_wol = dsa_slave_set_wol,
1068 .get_wol = dsa_slave_get_wol,
1069 .set_eee = dsa_slave_set_eee,
1070 .get_eee = dsa_slave_get_eee,
1071 .get_link_ksettings = dsa_slave_get_link_ksettings,
1072 .set_link_ksettings = dsa_slave_set_link_ksettings,
1073 .get_rxnfc = dsa_slave_get_rxnfc,
1074 .set_rxnfc = dsa_slave_set_rxnfc,
1075 .get_ts_info = dsa_slave_get_ts_info,
1076};
1077
1078/* legacy way, bypassing the bridge *****************************************/
1079int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1080 struct net_device *dev,
1081 const unsigned char *addr, u16 vid,
1082 u16 flags,
1083 struct netlink_ext_ack *extack)
1084{
1085 struct dsa_port *dp = dsa_slave_to_port(dev);
1086
1087 return dsa_port_fdb_add(dp, addr, vid);
1088}
1089
1090int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1091 struct net_device *dev,
1092 const unsigned char *addr, u16 vid)
1093{
1094 struct dsa_port *dp = dsa_slave_to_port(dev);
1095
1096 return dsa_port_fdb_del(dp, addr, vid);
1097}
1098
1099static const struct net_device_ops dsa_slave_netdev_ops = {
1100 .ndo_open = dsa_slave_open,
1101 .ndo_stop = dsa_slave_close,
1102 .ndo_start_xmit = dsa_slave_xmit,
1103 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
1104 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
1105 .ndo_set_mac_address = dsa_slave_set_mac_address,
1106 .ndo_fdb_add = dsa_legacy_fdb_add,
1107 .ndo_fdb_del = dsa_legacy_fdb_del,
1108 .ndo_fdb_dump = dsa_slave_fdb_dump,
1109 .ndo_do_ioctl = dsa_slave_ioctl,
1110 .ndo_get_iflink = dsa_slave_get_iflink,
1111#ifdef CONFIG_NET_POLL_CONTROLLER
1112 .ndo_netpoll_setup = dsa_slave_netpoll_setup,
1113 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
1114 .ndo_poll_controller = dsa_slave_poll_controller,
1115#endif
1116 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
1117 .ndo_setup_tc = dsa_slave_setup_tc,
1118 .ndo_get_stats64 = dsa_slave_get_stats64,
1119 .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
1120 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
1121 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
1122};
1123
1124static struct device_type dsa_type = {
1125 .name = "dsa",
1126};
1127
1128static void dsa_slave_phylink_validate(struct net_device *dev,
1129 unsigned long *supported,
1130 struct phylink_link_state *state)
1131{
1132 struct dsa_port *dp = dsa_slave_to_port(dev);
1133 struct dsa_switch *ds = dp->ds;
1134
1135 if (!ds->ops->phylink_validate)
1136 return;
1137
1138 ds->ops->phylink_validate(ds, dp->index, supported, state);
1139}
1140
1141static int dsa_slave_phylink_mac_link_state(struct net_device *dev,
1142 struct phylink_link_state *state)
1143{
1144 struct dsa_port *dp = dsa_slave_to_port(dev);
1145 struct dsa_switch *ds = dp->ds;
1146
1147 /* Only called for SGMII and 802.3z */
1148 if (!ds->ops->phylink_mac_link_state)
1149 return -EOPNOTSUPP;
1150
1151 return ds->ops->phylink_mac_link_state(ds, dp->index, state);
1152}
1153
1154static void dsa_slave_phylink_mac_config(struct net_device *dev,
1155 unsigned int mode,
1156 const struct phylink_link_state *state)
1157{
1158 struct dsa_port *dp = dsa_slave_to_port(dev);
1159 struct dsa_switch *ds = dp->ds;
1160
1161 if (!ds->ops->phylink_mac_config)
1162 return;
1163
1164 ds->ops->phylink_mac_config(ds, dp->index, mode, state);
1165}
1166
1167static void dsa_slave_phylink_mac_an_restart(struct net_device *dev)
1168{
1169 struct dsa_port *dp = dsa_slave_to_port(dev);
1170 struct dsa_switch *ds = dp->ds;
1171
1172 if (!ds->ops->phylink_mac_an_restart)
1173 return;
1174
1175 ds->ops->phylink_mac_an_restart(ds, dp->index);
1176}
1177
1178static void dsa_slave_phylink_mac_link_down(struct net_device *dev,
1179 unsigned int mode,
1180 phy_interface_t interface)
1181{
1182 struct dsa_port *dp = dsa_slave_to_port(dev);
1183 struct dsa_switch *ds = dp->ds;
1184
1185 if (!ds->ops->phylink_mac_link_down) {
1186 if (ds->ops->adjust_link && dev->phydev)
1187 ds->ops->adjust_link(ds, dp->index, dev->phydev);
1188 return;
1189 }
1190
1191 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
1192}
1193
1194static void dsa_slave_phylink_mac_link_up(struct net_device *dev,
1195 unsigned int mode,
1196 phy_interface_t interface,
1197 struct phy_device *phydev)
1198{
1199 struct dsa_port *dp = dsa_slave_to_port(dev);
1200 struct dsa_switch *ds = dp->ds;
1201
1202 if (!ds->ops->phylink_mac_link_up) {
1203 if (ds->ops->adjust_link && dev->phydev)
1204 ds->ops->adjust_link(ds, dp->index, dev->phydev);
1205 return;
1206 }
1207
1208 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev);
1209}
1210
1211static const struct phylink_mac_ops dsa_slave_phylink_mac_ops = {
1212 .validate = dsa_slave_phylink_validate,
1213 .mac_link_state = dsa_slave_phylink_mac_link_state,
1214 .mac_config = dsa_slave_phylink_mac_config,
1215 .mac_an_restart = dsa_slave_phylink_mac_an_restart,
1216 .mac_link_down = dsa_slave_phylink_mac_link_down,
1217 .mac_link_up = dsa_slave_phylink_mac_link_up,
1218};
1219
1220void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1221{
1222 const struct dsa_port *dp = dsa_to_port(ds, port);
1223
1224 phylink_mac_change(dp->pl, up);
1225}
1226EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1227
1228static void dsa_slave_phylink_fixed_state(struct net_device *dev,
1229 struct phylink_link_state *state)
1230{
1231 struct dsa_port *dp = dsa_slave_to_port(dev);
1232 struct dsa_switch *ds = dp->ds;
1233
1234 /* No need to check that this operation is valid, the callback would
1235 * not be called if it was not.
1236 */
1237 ds->ops->phylink_fixed_state(ds, dp->index, state);
1238}
1239
1240/* slave device setup *******************************************************/
1241static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1242{
1243 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1244 struct dsa_switch *ds = dp->ds;
1245
1246 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1247 if (!slave_dev->phydev) {
1248 netdev_err(slave_dev, "no phy at %d\n", addr);
1249 return -ENODEV;
1250 }
1251
1252 return phylink_connect_phy(dp->pl, slave_dev->phydev);
1253}
1254
1255static int dsa_slave_phy_setup(struct net_device *slave_dev)
1256{
1257 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1258 struct device_node *port_dn = dp->dn;
1259 struct dsa_switch *ds = dp->ds;
1260 u32 phy_flags = 0;
1261 int mode, ret;
1262
1263 mode = of_get_phy_mode(port_dn);
1264 if (mode < 0)
1265 mode = PHY_INTERFACE_MODE_NA;
1266
1267 dp->pl = phylink_create(slave_dev, of_fwnode_handle(port_dn), mode,
1268 &dsa_slave_phylink_mac_ops);
1269 if (IS_ERR(dp->pl)) {
1270 netdev_err(slave_dev,
1271 "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1272 return PTR_ERR(dp->pl);
1273 }
1274
1275 /* Register only if the switch provides such a callback, since this
1276 * callback takes precedence over polling the link GPIO in PHYLINK
1277 * (see phylink_get_fixed_state).
1278 */
1279 if (ds->ops->phylink_fixed_state)
1280 phylink_fixed_state_cb(dp->pl, dsa_slave_phylink_fixed_state);
1281
1282 if (ds->ops->get_phy_flags)
1283 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1284
1285 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1286 if (ret == -ENODEV) {
1287 /* We could not connect to a designated PHY or SFP, so use the
1288 * switch internal MDIO bus instead
1289 */
1290 ret = dsa_slave_phy_connect(slave_dev, dp->index);
1291 if (ret) {
1292 netdev_err(slave_dev,
1293 "failed to connect to port %d: %d\n",
1294 dp->index, ret);
1295 phylink_destroy(dp->pl);
1296 return ret;
1297 }
1298 }
1299
1300 return 0;
1301}
1302
1303static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1304static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1305 struct netdev_queue *txq,
1306 void *_unused)
1307{
1308 lockdep_set_class(&txq->_xmit_lock,
1309 &dsa_slave_netdev_xmit_lock_key);
1310}
1311
1312int dsa_slave_suspend(struct net_device *slave_dev)
1313{
1314 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1315
1316 if (!netif_running(slave_dev))
1317 return 0;
1318
1319 netif_device_detach(slave_dev);
1320
1321 rtnl_lock();
1322 phylink_stop(dp->pl);
1323 rtnl_unlock();
1324
1325 return 0;
1326}
1327
1328int dsa_slave_resume(struct net_device *slave_dev)
1329{
1330 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1331
1332 if (!netif_running(slave_dev))
1333 return 0;
1334
1335 netif_device_attach(slave_dev);
1336
1337 rtnl_lock();
1338 phylink_start(dp->pl);
1339 rtnl_unlock();
1340
1341 return 0;
1342}
1343
1344static void dsa_slave_notify(struct net_device *dev, unsigned long val)
1345{
1346 struct net_device *master = dsa_slave_to_master(dev);
1347 struct dsa_port *dp = dsa_slave_to_port(dev);
1348 struct dsa_notifier_register_info rinfo = {
1349 .switch_number = dp->ds->index,
1350 .port_number = dp->index,
1351 .master = master,
1352 .info.dev = dev,
1353 };
1354
1355 call_dsa_notifiers(val, dev, &rinfo.info);
1356}
1357
1358int dsa_slave_create(struct dsa_port *port)
1359{
1360 const struct dsa_port *cpu_dp = port->cpu_dp;
1361 struct net_device *master = cpu_dp->master;
1362 struct dsa_switch *ds = port->ds;
1363 const char *name = port->name;
1364 struct net_device *slave_dev;
1365 struct dsa_slave_priv *p;
1366 int ret;
1367
1368 if (!ds->num_tx_queues)
1369 ds->num_tx_queues = 1;
1370
1371 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1372 NET_NAME_UNKNOWN, ether_setup,
1373 ds->num_tx_queues, 1);
1374 if (slave_dev == NULL)
1375 return -ENOMEM;
1376
1377 slave_dev->features = master->vlan_features | NETIF_F_HW_TC |
1378 NETIF_F_HW_VLAN_CTAG_FILTER;
1379 slave_dev->hw_features |= NETIF_F_HW_TC;
1380 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1381 eth_hw_addr_inherit(slave_dev, master);
1382 slave_dev->priv_flags |= IFF_NO_QUEUE;
1383 slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1384 slave_dev->min_mtu = 0;
1385 slave_dev->max_mtu = ETH_MAX_MTU;
1386 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1387
1388 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1389 NULL);
1390
1391 SET_NETDEV_DEV(slave_dev, port->ds->dev);
1392 slave_dev->dev.of_node = port->dn;
1393 slave_dev->vlan_features = master->vlan_features;
1394
1395 p = netdev_priv(slave_dev);
1396 p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1397 if (!p->stats64) {
1398 free_netdev(slave_dev);
1399 return -ENOMEM;
1400 }
1401 p->dp = port;
1402 INIT_LIST_HEAD(&p->mall_tc_list);
1403 p->xmit = cpu_dp->tag_ops->xmit;
1404 port->slave = slave_dev;
1405
1406 netif_carrier_off(slave_dev);
1407
1408 ret = dsa_slave_phy_setup(slave_dev);
1409 if (ret) {
1410 netdev_err(master, "error %d setting up slave phy\n", ret);
1411 goto out_free;
1412 }
1413
1414 dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1415
1416 ret = register_netdev(slave_dev);
1417 if (ret) {
1418 netdev_err(master, "error %d registering interface %s\n",
1419 ret, slave_dev->name);
1420 goto out_phy;
1421 }
1422
1423 return 0;
1424
1425out_phy:
1426 rtnl_lock();
1427 phylink_disconnect_phy(p->dp->pl);
1428 rtnl_unlock();
1429 phylink_destroy(p->dp->pl);
1430out_free:
1431 free_percpu(p->stats64);
1432 free_netdev(slave_dev);
1433 port->slave = NULL;
1434 return ret;
1435}
1436
1437void dsa_slave_destroy(struct net_device *slave_dev)
1438{
1439 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1440 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1441
1442 netif_carrier_off(slave_dev);
1443 rtnl_lock();
1444 phylink_disconnect_phy(dp->pl);
1445 rtnl_unlock();
1446
1447 dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1448 unregister_netdev(slave_dev);
1449 phylink_destroy(dp->pl);
1450 free_percpu(p->stats64);
1451 free_netdev(slave_dev);
1452}
1453
1454static bool dsa_slave_dev_check(struct net_device *dev)
1455{
1456 return dev->netdev_ops == &dsa_slave_netdev_ops;
1457}
1458
1459static int dsa_slave_changeupper(struct net_device *dev,
1460 struct netdev_notifier_changeupper_info *info)
1461{
1462 struct dsa_port *dp = dsa_slave_to_port(dev);
1463 int err = NOTIFY_DONE;
1464
1465 if (netif_is_bridge_master(info->upper_dev)) {
1466 if (info->linking) {
1467 err = dsa_port_bridge_join(dp, info->upper_dev);
1468 err = notifier_from_errno(err);
1469 } else {
1470 dsa_port_bridge_leave(dp, info->upper_dev);
1471 err = NOTIFY_OK;
1472 }
1473 }
1474
1475 return err;
1476}
1477
1478static int dsa_slave_upper_vlan_check(struct net_device *dev,
1479 struct netdev_notifier_changeupper_info *
1480 info)
1481{
1482 struct netlink_ext_ack *ext_ack;
1483 struct net_device *slave;
1484 struct dsa_port *dp;
1485
1486 ext_ack = netdev_notifier_info_to_extack(&info->info);
1487
1488 if (!is_vlan_dev(dev))
1489 return NOTIFY_DONE;
1490
1491 slave = vlan_dev_real_dev(dev);
1492 if (!dsa_slave_dev_check(slave))
1493 return NOTIFY_DONE;
1494
1495 dp = dsa_slave_to_port(slave);
1496 if (!dp->bridge_dev)
1497 return NOTIFY_DONE;
1498
1499 /* Deny enslaving a VLAN device into a VLAN-aware bridge */
1500 if (br_vlan_enabled(dp->bridge_dev) &&
1501 netif_is_bridge_master(info->upper_dev) && info->linking) {
1502 NL_SET_ERR_MSG_MOD(ext_ack,
1503 "Cannot enslave VLAN device into VLAN aware bridge");
1504 return notifier_from_errno(-EINVAL);
1505 }
1506
1507 return NOTIFY_DONE;
1508}
1509
1510static int dsa_slave_netdevice_event(struct notifier_block *nb,
1511 unsigned long event, void *ptr)
1512{
1513 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1514
1515 if (event == NETDEV_CHANGEUPPER) {
1516 if (!dsa_slave_dev_check(dev))
1517 return dsa_slave_upper_vlan_check(dev, ptr);
1518
1519 return dsa_slave_changeupper(dev, ptr);
1520 }
1521
1522 return NOTIFY_DONE;
1523}
1524
1525static int
1526dsa_slave_switchdev_port_attr_set_event(struct net_device *netdev,
1527 struct switchdev_notifier_port_attr_info *port_attr_info)
1528{
1529 int err;
1530
1531 err = dsa_slave_port_attr_set(netdev, port_attr_info->attr,
1532 port_attr_info->trans);
1533
1534 port_attr_info->handled = true;
1535 return notifier_from_errno(err);
1536}
1537
1538struct dsa_switchdev_event_work {
1539 struct work_struct work;
1540 struct switchdev_notifier_fdb_info fdb_info;
1541 struct net_device *dev;
1542 unsigned long event;
1543};
1544
1545static void dsa_slave_switchdev_event_work(struct work_struct *work)
1546{
1547 struct dsa_switchdev_event_work *switchdev_work =
1548 container_of(work, struct dsa_switchdev_event_work, work);
1549 struct net_device *dev = switchdev_work->dev;
1550 struct switchdev_notifier_fdb_info *fdb_info;
1551 struct dsa_port *dp = dsa_slave_to_port(dev);
1552 int err;
1553
1554 rtnl_lock();
1555 switch (switchdev_work->event) {
1556 case SWITCHDEV_FDB_ADD_TO_DEVICE:
1557 fdb_info = &switchdev_work->fdb_info;
1558 if (!fdb_info->added_by_user)
1559 break;
1560
1561 err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid);
1562 if (err) {
1563 netdev_dbg(dev, "fdb add failed err=%d\n", err);
1564 break;
1565 }
1566 fdb_info->offloaded = true;
1567 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
1568 &fdb_info->info, NULL);
1569 break;
1570
1571 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1572 fdb_info = &switchdev_work->fdb_info;
1573 if (!fdb_info->added_by_user)
1574 break;
1575
1576 err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid);
1577 if (err) {
1578 netdev_dbg(dev, "fdb del failed err=%d\n", err);
1579 dev_close(dev);
1580 }
1581 break;
1582 }
1583 rtnl_unlock();
1584
1585 kfree(switchdev_work->fdb_info.addr);
1586 kfree(switchdev_work);
1587 dev_put(dev);
1588}
1589
1590static int
1591dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work *
1592 switchdev_work,
1593 const struct switchdev_notifier_fdb_info *
1594 fdb_info)
1595{
1596 memcpy(&switchdev_work->fdb_info, fdb_info,
1597 sizeof(switchdev_work->fdb_info));
1598 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1599 if (!switchdev_work->fdb_info.addr)
1600 return -ENOMEM;
1601 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1602 fdb_info->addr);
1603 return 0;
1604}
1605
1606/* Called under rcu_read_lock() */
1607static int dsa_slave_switchdev_event(struct notifier_block *unused,
1608 unsigned long event, void *ptr)
1609{
1610 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1611 struct dsa_switchdev_event_work *switchdev_work;
1612
1613 if (!dsa_slave_dev_check(dev))
1614 return NOTIFY_DONE;
1615
1616 if (event == SWITCHDEV_PORT_ATTR_SET)
1617 return dsa_slave_switchdev_port_attr_set_event(dev, ptr);
1618
1619 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1620 if (!switchdev_work)
1621 return NOTIFY_BAD;
1622
1623 INIT_WORK(&switchdev_work->work,
1624 dsa_slave_switchdev_event_work);
1625 switchdev_work->dev = dev;
1626 switchdev_work->event = event;
1627
1628 switch (event) {
1629 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
1630 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1631 if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr))
1632 goto err_fdb_work_init;
1633 dev_hold(dev);
1634 break;
1635 default:
1636 kfree(switchdev_work);
1637 return NOTIFY_DONE;
1638 }
1639
1640 dsa_schedule_work(&switchdev_work->work);
1641 return NOTIFY_OK;
1642
1643err_fdb_work_init:
1644 kfree(switchdev_work);
1645 return NOTIFY_BAD;
1646}
1647
1648static int
1649dsa_slave_switchdev_port_obj_event(unsigned long event,
1650 struct net_device *netdev,
1651 struct switchdev_notifier_port_obj_info *port_obj_info)
1652{
1653 int err = -EOPNOTSUPP;
1654
1655 switch (event) {
1656 case SWITCHDEV_PORT_OBJ_ADD:
1657 err = dsa_slave_port_obj_add(netdev, port_obj_info->obj,
1658 port_obj_info->trans);
1659 break;
1660 case SWITCHDEV_PORT_OBJ_DEL:
1661 err = dsa_slave_port_obj_del(netdev, port_obj_info->obj);
1662 break;
1663 }
1664
1665 port_obj_info->handled = true;
1666 return notifier_from_errno(err);
1667}
1668
1669static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
1670 unsigned long event, void *ptr)
1671{
1672 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1673
1674 if (!dsa_slave_dev_check(dev))
1675 return NOTIFY_DONE;
1676
1677 switch (event) {
1678 case SWITCHDEV_PORT_OBJ_ADD: /* fall through */
1679 case SWITCHDEV_PORT_OBJ_DEL:
1680 return dsa_slave_switchdev_port_obj_event(event, dev, ptr);
1681 case SWITCHDEV_PORT_ATTR_SET:
1682 return dsa_slave_switchdev_port_attr_set_event(dev, ptr);
1683 }
1684
1685 return NOTIFY_DONE;
1686}
1687
1688static struct notifier_block dsa_slave_nb __read_mostly = {
1689 .notifier_call = dsa_slave_netdevice_event,
1690};
1691
1692static struct notifier_block dsa_slave_switchdev_notifier = {
1693 .notifier_call = dsa_slave_switchdev_event,
1694};
1695
1696static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
1697 .notifier_call = dsa_slave_switchdev_blocking_event,
1698};
1699
1700int dsa_slave_register_notifier(void)
1701{
1702 struct notifier_block *nb;
1703 int err;
1704
1705 err = register_netdevice_notifier(&dsa_slave_nb);
1706 if (err)
1707 return err;
1708
1709 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
1710 if (err)
1711 goto err_switchdev_nb;
1712
1713 nb = &dsa_slave_switchdev_blocking_notifier;
1714 err = register_switchdev_blocking_notifier(nb);
1715 if (err)
1716 goto err_switchdev_blocking_nb;
1717
1718 return 0;
1719
1720err_switchdev_blocking_nb:
1721 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
1722err_switchdev_nb:
1723 unregister_netdevice_notifier(&dsa_slave_nb);
1724 return err;
1725}
1726
1727void dsa_slave_unregister_notifier(void)
1728{
1729 struct notifier_block *nb;
1730 int err;
1731
1732 nb = &dsa_slave_switchdev_blocking_notifier;
1733 err = unregister_switchdev_blocking_notifier(nb);
1734 if (err)
1735 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
1736
1737 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
1738 if (err)
1739 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
1740
1741 err = unregister_netdevice_notifier(&dsa_slave_nb);
1742 if (err)
1743 pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
1744}