Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
7 */
8
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/list.h>
12#include <linux/netdevice.h>
13#include <linux/slab.h>
14#include <linux/rtnetlink.h>
15#include <linux/of.h>
16#include <linux/of_net.h>
17#include <net/devlink.h>
18#include <net/sch_generic.h>
19
20#include "dsa_priv.h"
21
22static DEFINE_MUTEX(dsa2_mutex);
23LIST_HEAD(dsa_tree_list);
24
25/* Track the bridges with forwarding offload enabled */
26static unsigned long dsa_fwd_offloading_bridges;
27
28/**
29 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
30 * @dst: collection of struct dsa_switch devices to notify.
31 * @e: event, must be of type DSA_NOTIFIER_*
32 * @v: event-specific value.
33 *
34 * Given a struct dsa_switch_tree, this can be used to run a function once for
35 * each member DSA switch. The other alternative of traversing the tree is only
36 * through its ports list, which does not uniquely list the switches.
37 */
38int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
39{
40 struct raw_notifier_head *nh = &dst->nh;
41 int err;
42
43 err = raw_notifier_call_chain(nh, e, v);
44
45 return notifier_to_errno(err);
46}
47
48/**
49 * dsa_broadcast - Notify all DSA trees in the system.
50 * @e: event, must be of type DSA_NOTIFIER_*
51 * @v: event-specific value.
52 *
53 * Can be used to notify the switching fabric of events such as cross-chip
54 * bridging between disjoint trees (such as islands of tagger-compatible
55 * switches bridged by an incompatible middle switch).
56 *
57 * WARNING: this function is not reliable during probe time, because probing
58 * between trees is asynchronous and not all DSA trees might have probed.
59 */
60int dsa_broadcast(unsigned long e, void *v)
61{
62 struct dsa_switch_tree *dst;
63 int err = 0;
64
65 list_for_each_entry(dst, &dsa_tree_list, list) {
66 err = dsa_tree_notify(dst, e, v);
67 if (err)
68 break;
69 }
70
71 return err;
72}
73
74/**
75 * dsa_lag_map() - Map LAG structure to a linear LAG array
76 * @dst: Tree in which to record the mapping.
77 * @lag: LAG structure that is to be mapped to the tree's array.
78 *
79 * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
80 * two spaces. The size of the mapping space is determined by the
81 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
82 * it unset if it is not needed, in which case these functions become
83 * no-ops.
84 */
85void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
86{
87 unsigned int id;
88
89 for (id = 1; id <= dst->lags_len; id++) {
90 if (!dsa_lag_by_id(dst, id)) {
91 dst->lags[id - 1] = lag;
92 lag->id = id;
93 return;
94 }
95 }
96
97 /* No IDs left, which is OK. Some drivers do not need it. The
98 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
99 * returns an error for this device when joining the LAG. The
100 * driver can then return -EOPNOTSUPP back to DSA, which will
101 * fall back to a software LAG.
102 */
103}
104
105/**
106 * dsa_lag_unmap() - Remove a LAG ID mapping
107 * @dst: Tree in which the mapping is recorded.
108 * @lag: LAG structure that was mapped.
109 *
110 * As there may be multiple users of the mapping, it is only removed
111 * if there are no other references to it.
112 */
113void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
114{
115 unsigned int id;
116
117 dsa_lags_foreach_id(id, dst) {
118 if (dsa_lag_by_id(dst, id) == lag) {
119 dst->lags[id - 1] = NULL;
120 lag->id = 0;
121 break;
122 }
123 }
124}
125
126struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
127 const struct net_device *lag_dev)
128{
129 struct dsa_port *dp;
130
131 list_for_each_entry(dp, &dst->ports, list)
132 if (dsa_port_lag_dev_get(dp) == lag_dev)
133 return dp->lag;
134
135 return NULL;
136}
137
138struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
139 const struct net_device *br)
140{
141 struct dsa_port *dp;
142
143 list_for_each_entry(dp, &dst->ports, list)
144 if (dsa_port_bridge_dev_get(dp) == br)
145 return dp->bridge;
146
147 return NULL;
148}
149
150static int dsa_bridge_num_find(const struct net_device *bridge_dev)
151{
152 struct dsa_switch_tree *dst;
153
154 list_for_each_entry(dst, &dsa_tree_list, list) {
155 struct dsa_bridge *bridge;
156
157 bridge = dsa_tree_bridge_find(dst, bridge_dev);
158 if (bridge)
159 return bridge->num;
160 }
161
162 return 0;
163}
164
165unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
166{
167 unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
168
169 /* Switches without FDB isolation support don't get unique
170 * bridge numbering
171 */
172 if (!max)
173 return 0;
174
175 if (!bridge_num) {
176 /* First port that requests FDB isolation or TX forwarding
177 * offload for this bridge
178 */
179 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
180 DSA_MAX_NUM_OFFLOADING_BRIDGES,
181 1);
182 if (bridge_num >= max)
183 return 0;
184
185 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
186 }
187
188 return bridge_num;
189}
190
191void dsa_bridge_num_put(const struct net_device *bridge_dev,
192 unsigned int bridge_num)
193{
194 /* Since we refcount bridges, we know that when we call this function
195 * it is no longer in use, so we can just go ahead and remove it from
196 * the bit mask.
197 */
198 clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
199}
200
201struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
202{
203 struct dsa_switch_tree *dst;
204 struct dsa_port *dp;
205
206 list_for_each_entry(dst, &dsa_tree_list, list) {
207 if (dst->index != tree_index)
208 continue;
209
210 list_for_each_entry(dp, &dst->ports, list) {
211 if (dp->ds->index != sw_index)
212 continue;
213
214 return dp->ds;
215 }
216 }
217
218 return NULL;
219}
220EXPORT_SYMBOL_GPL(dsa_switch_find);
221
222static struct dsa_switch_tree *dsa_tree_find(int index)
223{
224 struct dsa_switch_tree *dst;
225
226 list_for_each_entry(dst, &dsa_tree_list, list)
227 if (dst->index == index)
228 return dst;
229
230 return NULL;
231}
232
233static struct dsa_switch_tree *dsa_tree_alloc(int index)
234{
235 struct dsa_switch_tree *dst;
236
237 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
238 if (!dst)
239 return NULL;
240
241 dst->index = index;
242
243 INIT_LIST_HEAD(&dst->rtable);
244
245 INIT_LIST_HEAD(&dst->ports);
246
247 INIT_LIST_HEAD(&dst->list);
248 list_add_tail(&dst->list, &dsa_tree_list);
249
250 kref_init(&dst->refcount);
251
252 return dst;
253}
254
255static void dsa_tree_free(struct dsa_switch_tree *dst)
256{
257 if (dst->tag_ops)
258 dsa_tag_driver_put(dst->tag_ops);
259 list_del(&dst->list);
260 kfree(dst);
261}
262
263static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
264{
265 if (dst)
266 kref_get(&dst->refcount);
267
268 return dst;
269}
270
271static struct dsa_switch_tree *dsa_tree_touch(int index)
272{
273 struct dsa_switch_tree *dst;
274
275 dst = dsa_tree_find(index);
276 if (dst)
277 return dsa_tree_get(dst);
278 else
279 return dsa_tree_alloc(index);
280}
281
282static void dsa_tree_release(struct kref *ref)
283{
284 struct dsa_switch_tree *dst;
285
286 dst = container_of(ref, struct dsa_switch_tree, refcount);
287
288 dsa_tree_free(dst);
289}
290
291static void dsa_tree_put(struct dsa_switch_tree *dst)
292{
293 if (dst)
294 kref_put(&dst->refcount, dsa_tree_release);
295}
296
297static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
298 struct device_node *dn)
299{
300 struct dsa_port *dp;
301
302 list_for_each_entry(dp, &dst->ports, list)
303 if (dp->dn == dn)
304 return dp;
305
306 return NULL;
307}
308
309static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
310 struct dsa_port *link_dp)
311{
312 struct dsa_switch *ds = dp->ds;
313 struct dsa_switch_tree *dst;
314 struct dsa_link *dl;
315
316 dst = ds->dst;
317
318 list_for_each_entry(dl, &dst->rtable, list)
319 if (dl->dp == dp && dl->link_dp == link_dp)
320 return dl;
321
322 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
323 if (!dl)
324 return NULL;
325
326 dl->dp = dp;
327 dl->link_dp = link_dp;
328
329 INIT_LIST_HEAD(&dl->list);
330 list_add_tail(&dl->list, &dst->rtable);
331
332 return dl;
333}
334
335static bool dsa_port_setup_routing_table(struct dsa_port *dp)
336{
337 struct dsa_switch *ds = dp->ds;
338 struct dsa_switch_tree *dst = ds->dst;
339 struct device_node *dn = dp->dn;
340 struct of_phandle_iterator it;
341 struct dsa_port *link_dp;
342 struct dsa_link *dl;
343 int err;
344
345 of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
346 link_dp = dsa_tree_find_port_by_node(dst, it.node);
347 if (!link_dp) {
348 of_node_put(it.node);
349 return false;
350 }
351
352 dl = dsa_link_touch(dp, link_dp);
353 if (!dl) {
354 of_node_put(it.node);
355 return false;
356 }
357 }
358
359 return true;
360}
361
362static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
363{
364 bool complete = true;
365 struct dsa_port *dp;
366
367 list_for_each_entry(dp, &dst->ports, list) {
368 if (dsa_port_is_dsa(dp)) {
369 complete = dsa_port_setup_routing_table(dp);
370 if (!complete)
371 break;
372 }
373 }
374
375 return complete;
376}
377
378static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
379{
380 struct dsa_port *dp;
381
382 list_for_each_entry(dp, &dst->ports, list)
383 if (dsa_port_is_cpu(dp))
384 return dp;
385
386 return NULL;
387}
388
389/* Assign the default CPU port (the first one in the tree) to all ports of the
390 * fabric which don't already have one as part of their own switch.
391 */
392static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
393{
394 struct dsa_port *cpu_dp, *dp;
395
396 cpu_dp = dsa_tree_find_first_cpu(dst);
397 if (!cpu_dp) {
398 pr_err("DSA: tree %d has no CPU port\n", dst->index);
399 return -EINVAL;
400 }
401
402 list_for_each_entry(dp, &dst->ports, list) {
403 if (dp->cpu_dp)
404 continue;
405
406 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
407 dp->cpu_dp = cpu_dp;
408 }
409
410 return 0;
411}
412
413/* Perform initial assignment of CPU ports to user ports and DSA links in the
414 * fabric, giving preference to CPU ports local to each switch. Default to
415 * using the first CPU port in the switch tree if the port does not have a CPU
416 * port local to this switch.
417 */
418static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
419{
420 struct dsa_port *cpu_dp, *dp;
421
422 list_for_each_entry(cpu_dp, &dst->ports, list) {
423 if (!dsa_port_is_cpu(cpu_dp))
424 continue;
425
426 /* Prefer a local CPU port */
427 dsa_switch_for_each_port(dp, cpu_dp->ds) {
428 /* Prefer the first local CPU port found */
429 if (dp->cpu_dp)
430 continue;
431
432 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
433 dp->cpu_dp = cpu_dp;
434 }
435 }
436
437 return dsa_tree_setup_default_cpu(dst);
438}
439
440static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
441{
442 struct dsa_port *dp;
443
444 list_for_each_entry(dp, &dst->ports, list)
445 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
446 dp->cpu_dp = NULL;
447}
448
449static int dsa_port_setup(struct dsa_port *dp)
450{
451 struct devlink_port *dlp = &dp->devlink_port;
452 bool dsa_port_link_registered = false;
453 struct dsa_switch *ds = dp->ds;
454 bool dsa_port_enabled = false;
455 int err = 0;
456
457 if (dp->setup)
458 return 0;
459
460 if (ds->ops->port_setup) {
461 err = ds->ops->port_setup(ds, dp->index);
462 if (err)
463 return err;
464 }
465
466 switch (dp->type) {
467 case DSA_PORT_TYPE_UNUSED:
468 dsa_port_disable(dp);
469 break;
470 case DSA_PORT_TYPE_CPU:
471 err = dsa_port_link_register_of(dp);
472 if (err)
473 break;
474 dsa_port_link_registered = true;
475
476 err = dsa_port_enable(dp, NULL);
477 if (err)
478 break;
479 dsa_port_enabled = true;
480
481 break;
482 case DSA_PORT_TYPE_DSA:
483 err = dsa_port_link_register_of(dp);
484 if (err)
485 break;
486 dsa_port_link_registered = true;
487
488 err = dsa_port_enable(dp, NULL);
489 if (err)
490 break;
491 dsa_port_enabled = true;
492
493 break;
494 case DSA_PORT_TYPE_USER:
495 of_get_mac_address(dp->dn, dp->mac);
496 err = dsa_slave_create(dp);
497 if (err)
498 break;
499
500 devlink_port_type_eth_set(dlp, dp->slave);
501 break;
502 }
503
504 if (err && dsa_port_enabled)
505 dsa_port_disable(dp);
506 if (err && dsa_port_link_registered)
507 dsa_port_link_unregister_of(dp);
508 if (err) {
509 if (ds->ops->port_teardown)
510 ds->ops->port_teardown(ds, dp->index);
511 return err;
512 }
513
514 dp->setup = true;
515
516 return 0;
517}
518
519static int dsa_port_devlink_setup(struct dsa_port *dp)
520{
521 struct devlink_port *dlp = &dp->devlink_port;
522 struct dsa_switch_tree *dst = dp->ds->dst;
523 struct devlink_port_attrs attrs = {};
524 struct devlink *dl = dp->ds->devlink;
525 const unsigned char *id;
526 unsigned char len;
527 int err;
528
529 id = (const unsigned char *)&dst->index;
530 len = sizeof(dst->index);
531
532 attrs.phys.port_number = dp->index;
533 memcpy(attrs.switch_id.id, id, len);
534 attrs.switch_id.id_len = len;
535 memset(dlp, 0, sizeof(*dlp));
536
537 switch (dp->type) {
538 case DSA_PORT_TYPE_UNUSED:
539 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
540 break;
541 case DSA_PORT_TYPE_CPU:
542 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
543 break;
544 case DSA_PORT_TYPE_DSA:
545 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
546 break;
547 case DSA_PORT_TYPE_USER:
548 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
549 break;
550 }
551
552 devlink_port_attrs_set(dlp, &attrs);
553 err = devlink_port_register(dl, dlp, dp->index);
554
555 if (!err)
556 dp->devlink_port_setup = true;
557
558 return err;
559}
560
561static void dsa_port_teardown(struct dsa_port *dp)
562{
563 struct devlink_port *dlp = &dp->devlink_port;
564 struct dsa_switch *ds = dp->ds;
565
566 if (!dp->setup)
567 return;
568
569 if (ds->ops->port_teardown)
570 ds->ops->port_teardown(ds, dp->index);
571
572 devlink_port_type_clear(dlp);
573
574 switch (dp->type) {
575 case DSA_PORT_TYPE_UNUSED:
576 break;
577 case DSA_PORT_TYPE_CPU:
578 dsa_port_disable(dp);
579 dsa_port_link_unregister_of(dp);
580 break;
581 case DSA_PORT_TYPE_DSA:
582 dsa_port_disable(dp);
583 dsa_port_link_unregister_of(dp);
584 break;
585 case DSA_PORT_TYPE_USER:
586 if (dp->slave) {
587 dsa_slave_destroy(dp->slave);
588 dp->slave = NULL;
589 }
590 break;
591 }
592
593 dp->setup = false;
594}
595
596static void dsa_port_devlink_teardown(struct dsa_port *dp)
597{
598 struct devlink_port *dlp = &dp->devlink_port;
599
600 if (dp->devlink_port_setup)
601 devlink_port_unregister(dlp);
602 dp->devlink_port_setup = false;
603}
604
605/* Destroy the current devlink port, and create a new one which has the UNUSED
606 * flavour. At this point, any call to ds->ops->port_setup has been already
607 * balanced out by a call to ds->ops->port_teardown, so we know that any
608 * devlink port regions the driver had are now unregistered. We then call its
609 * ds->ops->port_setup again, in order for the driver to re-create them on the
610 * new devlink port.
611 */
612static int dsa_port_reinit_as_unused(struct dsa_port *dp)
613{
614 struct dsa_switch *ds = dp->ds;
615 int err;
616
617 dsa_port_devlink_teardown(dp);
618 dp->type = DSA_PORT_TYPE_UNUSED;
619 err = dsa_port_devlink_setup(dp);
620 if (err)
621 return err;
622
623 if (ds->ops->port_setup) {
624 /* On error, leave the devlink port registered,
625 * dsa_switch_teardown will clean it up later.
626 */
627 err = ds->ops->port_setup(ds, dp->index);
628 if (err)
629 return err;
630 }
631
632 return 0;
633}
634
635static int dsa_devlink_info_get(struct devlink *dl,
636 struct devlink_info_req *req,
637 struct netlink_ext_ack *extack)
638{
639 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
640
641 if (ds->ops->devlink_info_get)
642 return ds->ops->devlink_info_get(ds, req, extack);
643
644 return -EOPNOTSUPP;
645}
646
647static int dsa_devlink_sb_pool_get(struct devlink *dl,
648 unsigned int sb_index, u16 pool_index,
649 struct devlink_sb_pool_info *pool_info)
650{
651 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
652
653 if (!ds->ops->devlink_sb_pool_get)
654 return -EOPNOTSUPP;
655
656 return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
657 pool_info);
658}
659
660static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
661 u16 pool_index, u32 size,
662 enum devlink_sb_threshold_type threshold_type,
663 struct netlink_ext_ack *extack)
664{
665 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
666
667 if (!ds->ops->devlink_sb_pool_set)
668 return -EOPNOTSUPP;
669
670 return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
671 threshold_type, extack);
672}
673
674static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
675 unsigned int sb_index, u16 pool_index,
676 u32 *p_threshold)
677{
678 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
679 int port = dsa_devlink_port_to_port(dlp);
680
681 if (!ds->ops->devlink_sb_port_pool_get)
682 return -EOPNOTSUPP;
683
684 return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
685 pool_index, p_threshold);
686}
687
688static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
689 unsigned int sb_index, u16 pool_index,
690 u32 threshold,
691 struct netlink_ext_ack *extack)
692{
693 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
694 int port = dsa_devlink_port_to_port(dlp);
695
696 if (!ds->ops->devlink_sb_port_pool_set)
697 return -EOPNOTSUPP;
698
699 return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
700 pool_index, threshold, extack);
701}
702
703static int
704dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
705 unsigned int sb_index, u16 tc_index,
706 enum devlink_sb_pool_type pool_type,
707 u16 *p_pool_index, u32 *p_threshold)
708{
709 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
710 int port = dsa_devlink_port_to_port(dlp);
711
712 if (!ds->ops->devlink_sb_tc_pool_bind_get)
713 return -EOPNOTSUPP;
714
715 return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
716 tc_index, pool_type,
717 p_pool_index, p_threshold);
718}
719
720static int
721dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
722 unsigned int sb_index, u16 tc_index,
723 enum devlink_sb_pool_type pool_type,
724 u16 pool_index, u32 threshold,
725 struct netlink_ext_ack *extack)
726{
727 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
728 int port = dsa_devlink_port_to_port(dlp);
729
730 if (!ds->ops->devlink_sb_tc_pool_bind_set)
731 return -EOPNOTSUPP;
732
733 return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
734 tc_index, pool_type,
735 pool_index, threshold,
736 extack);
737}
738
739static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
740 unsigned int sb_index)
741{
742 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
743
744 if (!ds->ops->devlink_sb_occ_snapshot)
745 return -EOPNOTSUPP;
746
747 return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
748}
749
750static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
751 unsigned int sb_index)
752{
753 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
754
755 if (!ds->ops->devlink_sb_occ_max_clear)
756 return -EOPNOTSUPP;
757
758 return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
759}
760
761static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
762 unsigned int sb_index,
763 u16 pool_index, u32 *p_cur,
764 u32 *p_max)
765{
766 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
767 int port = dsa_devlink_port_to_port(dlp);
768
769 if (!ds->ops->devlink_sb_occ_port_pool_get)
770 return -EOPNOTSUPP;
771
772 return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
773 pool_index, p_cur, p_max);
774}
775
776static int
777dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
778 unsigned int sb_index, u16 tc_index,
779 enum devlink_sb_pool_type pool_type,
780 u32 *p_cur, u32 *p_max)
781{
782 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
783 int port = dsa_devlink_port_to_port(dlp);
784
785 if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
786 return -EOPNOTSUPP;
787
788 return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
789 sb_index, tc_index,
790 pool_type, p_cur,
791 p_max);
792}
793
794static const struct devlink_ops dsa_devlink_ops = {
795 .info_get = dsa_devlink_info_get,
796 .sb_pool_get = dsa_devlink_sb_pool_get,
797 .sb_pool_set = dsa_devlink_sb_pool_set,
798 .sb_port_pool_get = dsa_devlink_sb_port_pool_get,
799 .sb_port_pool_set = dsa_devlink_sb_port_pool_set,
800 .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
801 .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
802 .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
803 .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
804 .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
805 .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
806};
807
808static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
809{
810 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
811 struct dsa_switch_tree *dst = ds->dst;
812 struct dsa_port *cpu_dp;
813 int err;
814
815 if (tag_ops->proto == dst->default_proto)
816 goto connect;
817
818 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
819 rtnl_lock();
820 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
821 tag_ops->proto);
822 rtnl_unlock();
823 if (err) {
824 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
825 tag_ops->name, ERR_PTR(err));
826 return err;
827 }
828 }
829
830connect:
831 if (tag_ops->connect) {
832 err = tag_ops->connect(ds);
833 if (err)
834 return err;
835 }
836
837 if (ds->ops->connect_tag_protocol) {
838 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
839 if (err) {
840 dev_err(ds->dev,
841 "Unable to connect to tag protocol \"%s\": %pe\n",
842 tag_ops->name, ERR_PTR(err));
843 goto disconnect;
844 }
845 }
846
847 return 0;
848
849disconnect:
850 if (tag_ops->disconnect)
851 tag_ops->disconnect(ds);
852
853 return err;
854}
855
856static int dsa_switch_setup(struct dsa_switch *ds)
857{
858 struct dsa_devlink_priv *dl_priv;
859 struct dsa_port *dp;
860 int err;
861
862 if (ds->setup)
863 return 0;
864
865 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
866 * driver and before ops->setup() has run, since the switch drivers and
867 * the slave MDIO bus driver rely on these values for probing PHY
868 * devices or not
869 */
870 ds->phys_mii_mask |= dsa_user_ports(ds);
871
872 /* Add the switch to devlink before calling setup, so that setup can
873 * add dpipe tables
874 */
875 ds->devlink =
876 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
877 if (!ds->devlink)
878 return -ENOMEM;
879 dl_priv = devlink_priv(ds->devlink);
880 dl_priv->ds = ds;
881
882 /* Setup devlink port instances now, so that the switch
883 * setup() can register regions etc, against the ports
884 */
885 dsa_switch_for_each_port(dp, ds) {
886 err = dsa_port_devlink_setup(dp);
887 if (err)
888 goto unregister_devlink_ports;
889 }
890
891 err = dsa_switch_register_notifier(ds);
892 if (err)
893 goto unregister_devlink_ports;
894
895 ds->configure_vlan_while_not_filtering = true;
896
897 err = ds->ops->setup(ds);
898 if (err < 0)
899 goto unregister_notifier;
900
901 err = dsa_switch_setup_tag_protocol(ds);
902 if (err)
903 goto teardown;
904
905 if (!ds->slave_mii_bus && ds->ops->phy_read) {
906 ds->slave_mii_bus = mdiobus_alloc();
907 if (!ds->slave_mii_bus) {
908 err = -ENOMEM;
909 goto teardown;
910 }
911
912 dsa_slave_mii_bus_init(ds);
913
914 err = mdiobus_register(ds->slave_mii_bus);
915 if (err < 0)
916 goto free_slave_mii_bus;
917 }
918
919 ds->setup = true;
920 devlink_register(ds->devlink);
921 return 0;
922
923free_slave_mii_bus:
924 if (ds->slave_mii_bus && ds->ops->phy_read)
925 mdiobus_free(ds->slave_mii_bus);
926teardown:
927 if (ds->ops->teardown)
928 ds->ops->teardown(ds);
929unregister_notifier:
930 dsa_switch_unregister_notifier(ds);
931unregister_devlink_ports:
932 dsa_switch_for_each_port(dp, ds)
933 dsa_port_devlink_teardown(dp);
934 devlink_free(ds->devlink);
935 ds->devlink = NULL;
936 return err;
937}
938
939static void dsa_switch_teardown(struct dsa_switch *ds)
940{
941 struct dsa_port *dp;
942
943 if (!ds->setup)
944 return;
945
946 if (ds->devlink)
947 devlink_unregister(ds->devlink);
948
949 if (ds->slave_mii_bus && ds->ops->phy_read) {
950 mdiobus_unregister(ds->slave_mii_bus);
951 mdiobus_free(ds->slave_mii_bus);
952 ds->slave_mii_bus = NULL;
953 }
954
955 if (ds->ops->teardown)
956 ds->ops->teardown(ds);
957
958 dsa_switch_unregister_notifier(ds);
959
960 if (ds->devlink) {
961 dsa_switch_for_each_port(dp, ds)
962 dsa_port_devlink_teardown(dp);
963 devlink_free(ds->devlink);
964 ds->devlink = NULL;
965 }
966
967 ds->setup = false;
968}
969
970/* First tear down the non-shared, then the shared ports. This ensures that
971 * all work items scheduled by our switchdev handlers for user ports have
972 * completed before we destroy the refcounting kept on the shared ports.
973 */
974static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
975{
976 struct dsa_port *dp;
977
978 list_for_each_entry(dp, &dst->ports, list)
979 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
980 dsa_port_teardown(dp);
981
982 dsa_flush_workqueue();
983
984 list_for_each_entry(dp, &dst->ports, list)
985 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
986 dsa_port_teardown(dp);
987}
988
989static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
990{
991 struct dsa_port *dp;
992
993 list_for_each_entry(dp, &dst->ports, list)
994 dsa_switch_teardown(dp->ds);
995}
996
997/* Bring shared ports up first, then non-shared ports */
998static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
999{
1000 struct dsa_port *dp;
1001 int err = 0;
1002
1003 list_for_each_entry(dp, &dst->ports, list) {
1004 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
1005 err = dsa_port_setup(dp);
1006 if (err)
1007 goto teardown;
1008 }
1009 }
1010
1011 list_for_each_entry(dp, &dst->ports, list) {
1012 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
1013 err = dsa_port_setup(dp);
1014 if (err) {
1015 err = dsa_port_reinit_as_unused(dp);
1016 if (err)
1017 goto teardown;
1018 }
1019 }
1020 }
1021
1022 return 0;
1023
1024teardown:
1025 dsa_tree_teardown_ports(dst);
1026
1027 return err;
1028}
1029
1030static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
1031{
1032 struct dsa_port *dp;
1033 int err = 0;
1034
1035 list_for_each_entry(dp, &dst->ports, list) {
1036 err = dsa_switch_setup(dp->ds);
1037 if (err) {
1038 dsa_tree_teardown_switches(dst);
1039 break;
1040 }
1041 }
1042
1043 return err;
1044}
1045
1046static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1047{
1048 struct dsa_port *dp;
1049 int err = 0;
1050
1051 rtnl_lock();
1052
1053 list_for_each_entry(dp, &dst->ports, list) {
1054 if (dsa_port_is_cpu(dp)) {
1055 struct net_device *master = dp->master;
1056 bool admin_up = (master->flags & IFF_UP) &&
1057 !qdisc_tx_is_noop(master);
1058
1059 err = dsa_master_setup(master, dp);
1060 if (err)
1061 break;
1062
1063 /* Replay master state event */
1064 dsa_tree_master_admin_state_change(dst, master, admin_up);
1065 dsa_tree_master_oper_state_change(dst, master,
1066 netif_oper_up(master));
1067 }
1068 }
1069
1070 rtnl_unlock();
1071
1072 return err;
1073}
1074
1075static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1076{
1077 struct dsa_port *dp;
1078
1079 rtnl_lock();
1080
1081 list_for_each_entry(dp, &dst->ports, list) {
1082 if (dsa_port_is_cpu(dp)) {
1083 struct net_device *master = dp->master;
1084
1085 /* Synthesizing an "admin down" state is sufficient for
1086 * the switches to get a notification if the master is
1087 * currently up and running.
1088 */
1089 dsa_tree_master_admin_state_change(dst, master, false);
1090
1091 dsa_master_teardown(master);
1092 }
1093 }
1094
1095 rtnl_unlock();
1096}
1097
1098static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1099{
1100 unsigned int len = 0;
1101 struct dsa_port *dp;
1102
1103 list_for_each_entry(dp, &dst->ports, list) {
1104 if (dp->ds->num_lag_ids > len)
1105 len = dp->ds->num_lag_ids;
1106 }
1107
1108 if (!len)
1109 return 0;
1110
1111 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1112 if (!dst->lags)
1113 return -ENOMEM;
1114
1115 dst->lags_len = len;
1116 return 0;
1117}
1118
1119static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1120{
1121 kfree(dst->lags);
1122}
1123
1124static int dsa_tree_setup(struct dsa_switch_tree *dst)
1125{
1126 bool complete;
1127 int err;
1128
1129 if (dst->setup) {
1130 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1131 dst->index);
1132 return -EEXIST;
1133 }
1134
1135 complete = dsa_tree_setup_routing_table(dst);
1136 if (!complete)
1137 return 0;
1138
1139 err = dsa_tree_setup_cpu_ports(dst);
1140 if (err)
1141 return err;
1142
1143 err = dsa_tree_setup_switches(dst);
1144 if (err)
1145 goto teardown_cpu_ports;
1146
1147 err = dsa_tree_setup_ports(dst);
1148 if (err)
1149 goto teardown_switches;
1150
1151 err = dsa_tree_setup_master(dst);
1152 if (err)
1153 goto teardown_ports;
1154
1155 err = dsa_tree_setup_lags(dst);
1156 if (err)
1157 goto teardown_master;
1158
1159 dst->setup = true;
1160
1161 pr_info("DSA: tree %d setup\n", dst->index);
1162
1163 return 0;
1164
1165teardown_master:
1166 dsa_tree_teardown_master(dst);
1167teardown_ports:
1168 dsa_tree_teardown_ports(dst);
1169teardown_switches:
1170 dsa_tree_teardown_switches(dst);
1171teardown_cpu_ports:
1172 dsa_tree_teardown_cpu_ports(dst);
1173
1174 return err;
1175}
1176
1177static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1178{
1179 struct dsa_link *dl, *next;
1180
1181 if (!dst->setup)
1182 return;
1183
1184 dsa_tree_teardown_lags(dst);
1185
1186 dsa_tree_teardown_master(dst);
1187
1188 dsa_tree_teardown_ports(dst);
1189
1190 dsa_tree_teardown_switches(dst);
1191
1192 dsa_tree_teardown_cpu_ports(dst);
1193
1194 list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1195 list_del(&dl->list);
1196 kfree(dl);
1197 }
1198
1199 pr_info("DSA: tree %d torn down\n", dst->index);
1200
1201 dst->setup = false;
1202}
1203
1204static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
1205 const struct dsa_device_ops *tag_ops)
1206{
1207 const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
1208 struct dsa_notifier_tag_proto_info info;
1209 int err;
1210
1211 dst->tag_ops = tag_ops;
1212
1213 /* Notify the switches from this tree about the connection
1214 * to the new tagger
1215 */
1216 info.tag_ops = tag_ops;
1217 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
1218 if (err && err != -EOPNOTSUPP)
1219 goto out_disconnect;
1220
1221 /* Notify the old tagger about the disconnection from this tree */
1222 info.tag_ops = old_tag_ops;
1223 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1224
1225 return 0;
1226
1227out_disconnect:
1228 info.tag_ops = tag_ops;
1229 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1230 dst->tag_ops = old_tag_ops;
1231
1232 return err;
1233}
1234
1235/* Since the dsa/tagging sysfs device attribute is per master, the assumption
1236 * is that all DSA switches within a tree share the same tagger, otherwise
1237 * they would have formed disjoint trees (different "dsa,member" values).
1238 */
1239int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1240 struct net_device *master,
1241 const struct dsa_device_ops *tag_ops,
1242 const struct dsa_device_ops *old_tag_ops)
1243{
1244 struct dsa_notifier_tag_proto_info info;
1245 struct dsa_port *dp;
1246 int err = -EBUSY;
1247
1248 if (!rtnl_trylock())
1249 return restart_syscall();
1250
1251 /* At the moment we don't allow changing the tag protocol under
1252 * traffic. The rtnl_mutex also happens to serialize concurrent
1253 * attempts to change the tagging protocol. If we ever lift the IFF_UP
1254 * restriction, there needs to be another mutex which serializes this.
1255 */
1256 if (master->flags & IFF_UP)
1257 goto out_unlock;
1258
1259 list_for_each_entry(dp, &dst->ports, list) {
1260 if (!dsa_port_is_user(dp))
1261 continue;
1262
1263 if (dp->slave->flags & IFF_UP)
1264 goto out_unlock;
1265 }
1266
1267 /* Notify the tag protocol change */
1268 info.tag_ops = tag_ops;
1269 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1270 if (err)
1271 goto out_unwind_tagger;
1272
1273 err = dsa_tree_bind_tag_proto(dst, tag_ops);
1274 if (err)
1275 goto out_unwind_tagger;
1276
1277 rtnl_unlock();
1278
1279 return 0;
1280
1281out_unwind_tagger:
1282 info.tag_ops = old_tag_ops;
1283 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1284out_unlock:
1285 rtnl_unlock();
1286 return err;
1287}
1288
1289static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1290 struct net_device *master)
1291{
1292 struct dsa_notifier_master_state_info info;
1293 struct dsa_port *cpu_dp = master->dsa_ptr;
1294
1295 info.master = master;
1296 info.operational = dsa_port_master_is_operational(cpu_dp);
1297
1298 dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1299}
1300
1301void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1302 struct net_device *master,
1303 bool up)
1304{
1305 struct dsa_port *cpu_dp = master->dsa_ptr;
1306 bool notify = false;
1307
1308 if ((dsa_port_master_is_operational(cpu_dp)) !=
1309 (up && cpu_dp->master_oper_up))
1310 notify = true;
1311
1312 cpu_dp->master_admin_up = up;
1313
1314 if (notify)
1315 dsa_tree_master_state_change(dst, master);
1316}
1317
1318void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1319 struct net_device *master,
1320 bool up)
1321{
1322 struct dsa_port *cpu_dp = master->dsa_ptr;
1323 bool notify = false;
1324
1325 if ((dsa_port_master_is_operational(cpu_dp)) !=
1326 (cpu_dp->master_admin_up && up))
1327 notify = true;
1328
1329 cpu_dp->master_oper_up = up;
1330
1331 if (notify)
1332 dsa_tree_master_state_change(dst, master);
1333}
1334
1335static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1336{
1337 struct dsa_switch_tree *dst = ds->dst;
1338 struct dsa_port *dp;
1339
1340 dsa_switch_for_each_port(dp, ds)
1341 if (dp->index == index)
1342 return dp;
1343
1344 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1345 if (!dp)
1346 return NULL;
1347
1348 dp->ds = ds;
1349 dp->index = index;
1350
1351 mutex_init(&dp->addr_lists_lock);
1352 mutex_init(&dp->vlans_lock);
1353 INIT_LIST_HEAD(&dp->fdbs);
1354 INIT_LIST_HEAD(&dp->mdbs);
1355 INIT_LIST_HEAD(&dp->vlans);
1356 INIT_LIST_HEAD(&dp->list);
1357 list_add_tail(&dp->list, &dst->ports);
1358
1359 return dp;
1360}
1361
1362static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1363{
1364 if (!name)
1365 name = "eth%d";
1366
1367 dp->type = DSA_PORT_TYPE_USER;
1368 dp->name = name;
1369
1370 return 0;
1371}
1372
1373static int dsa_port_parse_dsa(struct dsa_port *dp)
1374{
1375 dp->type = DSA_PORT_TYPE_DSA;
1376
1377 return 0;
1378}
1379
1380static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1381 struct net_device *master)
1382{
1383 enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1384 struct dsa_switch *mds, *ds = dp->ds;
1385 unsigned int mdp_upstream;
1386 struct dsa_port *mdp;
1387
1388 /* It is possible to stack DSA switches onto one another when that
1389 * happens the switch driver may want to know if its tagging protocol
1390 * is going to work in such a configuration.
1391 */
1392 if (dsa_slave_dev_check(master)) {
1393 mdp = dsa_slave_to_port(master);
1394 mds = mdp->ds;
1395 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1396 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1397 DSA_TAG_PROTO_NONE);
1398 }
1399
1400 /* If the master device is not itself a DSA slave in a disjoint DSA
1401 * tree, then return immediately.
1402 */
1403 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1404}
1405
1406static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1407 const char *user_protocol)
1408{
1409 struct dsa_switch *ds = dp->ds;
1410 struct dsa_switch_tree *dst = ds->dst;
1411 const struct dsa_device_ops *tag_ops;
1412 enum dsa_tag_protocol default_proto;
1413
1414 /* Find out which protocol the switch would prefer. */
1415 default_proto = dsa_get_tag_protocol(dp, master);
1416 if (dst->default_proto) {
1417 if (dst->default_proto != default_proto) {
1418 dev_err(ds->dev,
1419 "A DSA switch tree can have only one tagging protocol\n");
1420 return -EINVAL;
1421 }
1422 } else {
1423 dst->default_proto = default_proto;
1424 }
1425
1426 /* See if the user wants to override that preference. */
1427 if (user_protocol) {
1428 if (!ds->ops->change_tag_protocol) {
1429 dev_err(ds->dev, "Tag protocol cannot be modified\n");
1430 return -EINVAL;
1431 }
1432
1433 tag_ops = dsa_find_tagger_by_name(user_protocol);
1434 } else {
1435 tag_ops = dsa_tag_driver_get(default_proto);
1436 }
1437
1438 if (IS_ERR(tag_ops)) {
1439 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1440 return -EPROBE_DEFER;
1441
1442 dev_warn(ds->dev, "No tagger for this switch\n");
1443 return PTR_ERR(tag_ops);
1444 }
1445
1446 if (dst->tag_ops) {
1447 if (dst->tag_ops != tag_ops) {
1448 dev_err(ds->dev,
1449 "A DSA switch tree can have only one tagging protocol\n");
1450
1451 dsa_tag_driver_put(tag_ops);
1452 return -EINVAL;
1453 }
1454
1455 /* In the case of multiple CPU ports per switch, the tagging
1456 * protocol is still reference-counted only per switch tree.
1457 */
1458 dsa_tag_driver_put(tag_ops);
1459 } else {
1460 dst->tag_ops = tag_ops;
1461 }
1462
1463 dp->master = master;
1464 dp->type = DSA_PORT_TYPE_CPU;
1465 dsa_port_set_tag_protocol(dp, dst->tag_ops);
1466 dp->dst = dst;
1467
1468 /* At this point, the tree may be configured to use a different
1469 * tagger than the one chosen by the switch driver during
1470 * .setup, in the case when a user selects a custom protocol
1471 * through the DT.
1472 *
1473 * This is resolved by syncing the driver with the tree in
1474 * dsa_switch_setup_tag_protocol once .setup has run and the
1475 * driver is ready to accept calls to .change_tag_protocol. If
1476 * the driver does not support the custom protocol at that
1477 * point, the tree is wholly rejected, thereby ensuring that the
1478 * tree and driver are always in agreement on the protocol to
1479 * use.
1480 */
1481 return 0;
1482}
1483
1484static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1485{
1486 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1487 const char *name = of_get_property(dn, "label", NULL);
1488 bool link = of_property_read_bool(dn, "link");
1489
1490 dp->dn = dn;
1491
1492 if (ethernet) {
1493 struct net_device *master;
1494 const char *user_protocol;
1495
1496 master = of_find_net_device_by_node(ethernet);
1497 of_node_put(ethernet);
1498 if (!master)
1499 return -EPROBE_DEFER;
1500
1501 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1502 return dsa_port_parse_cpu(dp, master, user_protocol);
1503 }
1504
1505 if (link)
1506 return dsa_port_parse_dsa(dp);
1507
1508 return dsa_port_parse_user(dp, name);
1509}
1510
1511static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1512 struct device_node *dn)
1513{
1514 struct device_node *ports, *port;
1515 struct dsa_port *dp;
1516 int err = 0;
1517 u32 reg;
1518
1519 ports = of_get_child_by_name(dn, "ports");
1520 if (!ports) {
1521 /* The second possibility is "ethernet-ports" */
1522 ports = of_get_child_by_name(dn, "ethernet-ports");
1523 if (!ports) {
1524 dev_err(ds->dev, "no ports child node found\n");
1525 return -EINVAL;
1526 }
1527 }
1528
1529 for_each_available_child_of_node(ports, port) {
1530 err = of_property_read_u32(port, "reg", ®);
1531 if (err) {
1532 of_node_put(port);
1533 goto out_put_node;
1534 }
1535
1536 if (reg >= ds->num_ports) {
1537 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1538 port, reg, ds->num_ports);
1539 of_node_put(port);
1540 err = -EINVAL;
1541 goto out_put_node;
1542 }
1543
1544 dp = dsa_to_port(ds, reg);
1545
1546 err = dsa_port_parse_of(dp, port);
1547 if (err) {
1548 of_node_put(port);
1549 goto out_put_node;
1550 }
1551 }
1552
1553out_put_node:
1554 of_node_put(ports);
1555 return err;
1556}
1557
1558static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1559 struct device_node *dn)
1560{
1561 u32 m[2] = { 0, 0 };
1562 int sz;
1563
1564 /* Don't error out if this optional property isn't found */
1565 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1566 if (sz < 0 && sz != -EINVAL)
1567 return sz;
1568
1569 ds->index = m[1];
1570
1571 ds->dst = dsa_tree_touch(m[0]);
1572 if (!ds->dst)
1573 return -ENOMEM;
1574
1575 if (dsa_switch_find(ds->dst->index, ds->index)) {
1576 dev_err(ds->dev,
1577 "A DSA switch with index %d already exists in tree %d\n",
1578 ds->index, ds->dst->index);
1579 return -EEXIST;
1580 }
1581
1582 if (ds->dst->last_switch < ds->index)
1583 ds->dst->last_switch = ds->index;
1584
1585 return 0;
1586}
1587
1588static int dsa_switch_touch_ports(struct dsa_switch *ds)
1589{
1590 struct dsa_port *dp;
1591 int port;
1592
1593 for (port = 0; port < ds->num_ports; port++) {
1594 dp = dsa_port_touch(ds, port);
1595 if (!dp)
1596 return -ENOMEM;
1597 }
1598
1599 return 0;
1600}
1601
1602static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1603{
1604 int err;
1605
1606 err = dsa_switch_parse_member_of(ds, dn);
1607 if (err)
1608 return err;
1609
1610 err = dsa_switch_touch_ports(ds);
1611 if (err)
1612 return err;
1613
1614 return dsa_switch_parse_ports_of(ds, dn);
1615}
1616
1617static int dsa_port_parse(struct dsa_port *dp, const char *name,
1618 struct device *dev)
1619{
1620 if (!strcmp(name, "cpu")) {
1621 struct net_device *master;
1622
1623 master = dsa_dev_to_net_device(dev);
1624 if (!master)
1625 return -EPROBE_DEFER;
1626
1627 dev_put(master);
1628
1629 return dsa_port_parse_cpu(dp, master, NULL);
1630 }
1631
1632 if (!strcmp(name, "dsa"))
1633 return dsa_port_parse_dsa(dp);
1634
1635 return dsa_port_parse_user(dp, name);
1636}
1637
1638static int dsa_switch_parse_ports(struct dsa_switch *ds,
1639 struct dsa_chip_data *cd)
1640{
1641 bool valid_name_found = false;
1642 struct dsa_port *dp;
1643 struct device *dev;
1644 const char *name;
1645 unsigned int i;
1646 int err;
1647
1648 for (i = 0; i < DSA_MAX_PORTS; i++) {
1649 name = cd->port_names[i];
1650 dev = cd->netdev[i];
1651 dp = dsa_to_port(ds, i);
1652
1653 if (!name)
1654 continue;
1655
1656 err = dsa_port_parse(dp, name, dev);
1657 if (err)
1658 return err;
1659
1660 valid_name_found = true;
1661 }
1662
1663 if (!valid_name_found && i == DSA_MAX_PORTS)
1664 return -EINVAL;
1665
1666 return 0;
1667}
1668
1669static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1670{
1671 int err;
1672
1673 ds->cd = cd;
1674
1675 /* We don't support interconnected switches nor multiple trees via
1676 * platform data, so this is the unique switch of the tree.
1677 */
1678 ds->index = 0;
1679 ds->dst = dsa_tree_touch(0);
1680 if (!ds->dst)
1681 return -ENOMEM;
1682
1683 err = dsa_switch_touch_ports(ds);
1684 if (err)
1685 return err;
1686
1687 return dsa_switch_parse_ports(ds, cd);
1688}
1689
1690static void dsa_switch_release_ports(struct dsa_switch *ds)
1691{
1692 struct dsa_port *dp, *next;
1693
1694 dsa_switch_for_each_port_safe(dp, next, ds) {
1695 WARN_ON(!list_empty(&dp->fdbs));
1696 WARN_ON(!list_empty(&dp->mdbs));
1697 WARN_ON(!list_empty(&dp->vlans));
1698 list_del(&dp->list);
1699 kfree(dp);
1700 }
1701}
1702
1703static int dsa_switch_probe(struct dsa_switch *ds)
1704{
1705 struct dsa_switch_tree *dst;
1706 struct dsa_chip_data *pdata;
1707 struct device_node *np;
1708 int err;
1709
1710 if (!ds->dev)
1711 return -ENODEV;
1712
1713 pdata = ds->dev->platform_data;
1714 np = ds->dev->of_node;
1715
1716 if (!ds->num_ports)
1717 return -EINVAL;
1718
1719 if (np) {
1720 err = dsa_switch_parse_of(ds, np);
1721 if (err)
1722 dsa_switch_release_ports(ds);
1723 } else if (pdata) {
1724 err = dsa_switch_parse(ds, pdata);
1725 if (err)
1726 dsa_switch_release_ports(ds);
1727 } else {
1728 err = -ENODEV;
1729 }
1730
1731 if (err)
1732 return err;
1733
1734 dst = ds->dst;
1735 dsa_tree_get(dst);
1736 err = dsa_tree_setup(dst);
1737 if (err) {
1738 dsa_switch_release_ports(ds);
1739 dsa_tree_put(dst);
1740 }
1741
1742 return err;
1743}
1744
1745int dsa_register_switch(struct dsa_switch *ds)
1746{
1747 int err;
1748
1749 mutex_lock(&dsa2_mutex);
1750 err = dsa_switch_probe(ds);
1751 dsa_tree_put(ds->dst);
1752 mutex_unlock(&dsa2_mutex);
1753
1754 return err;
1755}
1756EXPORT_SYMBOL_GPL(dsa_register_switch);
1757
1758static void dsa_switch_remove(struct dsa_switch *ds)
1759{
1760 struct dsa_switch_tree *dst = ds->dst;
1761
1762 dsa_tree_teardown(dst);
1763 dsa_switch_release_ports(ds);
1764 dsa_tree_put(dst);
1765}
1766
1767void dsa_unregister_switch(struct dsa_switch *ds)
1768{
1769 mutex_lock(&dsa2_mutex);
1770 dsa_switch_remove(ds);
1771 mutex_unlock(&dsa2_mutex);
1772}
1773EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1774
1775/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1776 * blocking that operation from completion, due to the dev_hold taken inside
1777 * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1778 * the DSA master, so that the system can reboot successfully.
1779 */
1780void dsa_switch_shutdown(struct dsa_switch *ds)
1781{
1782 struct net_device *master, *slave_dev;
1783 struct dsa_port *dp;
1784
1785 mutex_lock(&dsa2_mutex);
1786
1787 if (!ds->setup)
1788 goto out;
1789
1790 rtnl_lock();
1791
1792 dsa_switch_for_each_user_port(dp, ds) {
1793 master = dp->cpu_dp->master;
1794 slave_dev = dp->slave;
1795
1796 netdev_upper_dev_unlink(master, slave_dev);
1797 }
1798
1799 /* Disconnect from further netdevice notifiers on the master,
1800 * since netdev_uses_dsa() will now return false.
1801 */
1802 dsa_switch_for_each_cpu_port(dp, ds)
1803 dp->master->dsa_ptr = NULL;
1804
1805 rtnl_unlock();
1806out:
1807 mutex_unlock(&dsa2_mutex);
1808}
1809EXPORT_SYMBOL_GPL(dsa_switch_shutdown);