Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Handling of a single switch chip, part of a switch fabric
4 *
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7 */
8
9#include <linux/if_bridge.h>
10#include <linux/netdevice.h>
11#include <linux/notifier.h>
12#include <linux/if_vlan.h>
13#include <net/switchdev.h>
14
15#include "dsa_priv.h"
16
17static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 unsigned int ageing_time)
19{
20 struct dsa_port *dp;
21
22 dsa_switch_for_each_port(dp, ds)
23 if (dp->ageing_time && dp->ageing_time < ageing_time)
24 ageing_time = dp->ageing_time;
25
26 return ageing_time;
27}
28
29static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 struct dsa_notifier_ageing_time_info *info)
31{
32 unsigned int ageing_time = info->ageing_time;
33
34 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
35 return -ERANGE;
36
37 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
38 return -ERANGE;
39
40 /* Program the fastest ageing time in case of multiple bridges */
41 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
42
43 if (ds->ops->set_ageing_time)
44 return ds->ops->set_ageing_time(ds, ageing_time);
45
46 return 0;
47}
48
49static bool dsa_port_mtu_match(struct dsa_port *dp,
50 struct dsa_notifier_mtu_info *info)
51{
52 return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
53}
54
55static int dsa_switch_mtu(struct dsa_switch *ds,
56 struct dsa_notifier_mtu_info *info)
57{
58 struct dsa_port *dp;
59 int ret;
60
61 if (!ds->ops->port_change_mtu)
62 return -EOPNOTSUPP;
63
64 dsa_switch_for_each_port(dp, ds) {
65 if (dsa_port_mtu_match(dp, info)) {
66 ret = ds->ops->port_change_mtu(ds, dp->index,
67 info->mtu);
68 if (ret)
69 return ret;
70 }
71 }
72
73 return 0;
74}
75
76static int dsa_switch_bridge_join(struct dsa_switch *ds,
77 struct dsa_notifier_bridge_info *info)
78{
79 int err;
80
81 if (info->dp->ds == ds) {
82 if (!ds->ops->port_bridge_join)
83 return -EOPNOTSUPP;
84
85 err = ds->ops->port_bridge_join(ds, info->dp->index,
86 info->bridge,
87 &info->tx_fwd_offload,
88 info->extack);
89 if (err)
90 return err;
91 }
92
93 if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
94 err = ds->ops->crosschip_bridge_join(ds,
95 info->dp->ds->dst->index,
96 info->dp->ds->index,
97 info->dp->index,
98 info->bridge,
99 info->extack);
100 if (err)
101 return err;
102 }
103
104 return 0;
105}
106
107static int dsa_switch_bridge_leave(struct dsa_switch *ds,
108 struct dsa_notifier_bridge_info *info)
109{
110 if (info->dp->ds == ds && ds->ops->port_bridge_leave)
111 ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
112
113 if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
114 ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
115 info->dp->ds->index,
116 info->dp->index,
117 info->bridge);
118
119 return 0;
120}
121
122/* Matches for all upstream-facing ports (the CPU port and all upstream-facing
123 * DSA links) that sit between the targeted port on which the notifier was
124 * emitted and its dedicated CPU port.
125 */
126static bool dsa_port_host_address_match(struct dsa_port *dp,
127 const struct dsa_port *targeted_dp)
128{
129 struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
130
131 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
132 return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
133 cpu_dp->index);
134
135 return false;
136}
137
138static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
139 const unsigned char *addr, u16 vid,
140 struct dsa_db db)
141{
142 struct dsa_mac_addr *a;
143
144 list_for_each_entry(a, addr_list, list)
145 if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
146 dsa_db_equal(&a->db, &db))
147 return a;
148
149 return NULL;
150}
151
152static int dsa_port_do_mdb_add(struct dsa_port *dp,
153 const struct switchdev_obj_port_mdb *mdb,
154 struct dsa_db db)
155{
156 struct dsa_switch *ds = dp->ds;
157 struct dsa_mac_addr *a;
158 int port = dp->index;
159 int err = 0;
160
161 /* No need to bother with refcounting for user ports */
162 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
163 return ds->ops->port_mdb_add(ds, port, mdb, db);
164
165 mutex_lock(&dp->addr_lists_lock);
166
167 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
168 if (a) {
169 refcount_inc(&a->refcount);
170 goto out;
171 }
172
173 a = kzalloc(sizeof(*a), GFP_KERNEL);
174 if (!a) {
175 err = -ENOMEM;
176 goto out;
177 }
178
179 err = ds->ops->port_mdb_add(ds, port, mdb, db);
180 if (err) {
181 kfree(a);
182 goto out;
183 }
184
185 ether_addr_copy(a->addr, mdb->addr);
186 a->vid = mdb->vid;
187 a->db = db;
188 refcount_set(&a->refcount, 1);
189 list_add_tail(&a->list, &dp->mdbs);
190
191out:
192 mutex_unlock(&dp->addr_lists_lock);
193
194 return err;
195}
196
197static int dsa_port_do_mdb_del(struct dsa_port *dp,
198 const struct switchdev_obj_port_mdb *mdb,
199 struct dsa_db db)
200{
201 struct dsa_switch *ds = dp->ds;
202 struct dsa_mac_addr *a;
203 int port = dp->index;
204 int err = 0;
205
206 /* No need to bother with refcounting for user ports */
207 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
208 return ds->ops->port_mdb_del(ds, port, mdb, db);
209
210 mutex_lock(&dp->addr_lists_lock);
211
212 a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
213 if (!a) {
214 err = -ENOENT;
215 goto out;
216 }
217
218 if (!refcount_dec_and_test(&a->refcount))
219 goto out;
220
221 err = ds->ops->port_mdb_del(ds, port, mdb, db);
222 if (err) {
223 refcount_set(&a->refcount, 1);
224 goto out;
225 }
226
227 list_del(&a->list);
228 kfree(a);
229
230out:
231 mutex_unlock(&dp->addr_lists_lock);
232
233 return err;
234}
235
236static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
237 u16 vid, struct dsa_db db)
238{
239 struct dsa_switch *ds = dp->ds;
240 struct dsa_mac_addr *a;
241 int port = dp->index;
242 int err = 0;
243
244 /* No need to bother with refcounting for user ports */
245 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
246 return ds->ops->port_fdb_add(ds, port, addr, vid, db);
247
248 mutex_lock(&dp->addr_lists_lock);
249
250 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
251 if (a) {
252 refcount_inc(&a->refcount);
253 goto out;
254 }
255
256 a = kzalloc(sizeof(*a), GFP_KERNEL);
257 if (!a) {
258 err = -ENOMEM;
259 goto out;
260 }
261
262 err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
263 if (err) {
264 kfree(a);
265 goto out;
266 }
267
268 ether_addr_copy(a->addr, addr);
269 a->vid = vid;
270 a->db = db;
271 refcount_set(&a->refcount, 1);
272 list_add_tail(&a->list, &dp->fdbs);
273
274out:
275 mutex_unlock(&dp->addr_lists_lock);
276
277 return err;
278}
279
280static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
281 u16 vid, struct dsa_db db)
282{
283 struct dsa_switch *ds = dp->ds;
284 struct dsa_mac_addr *a;
285 int port = dp->index;
286 int err = 0;
287
288 /* No need to bother with refcounting for user ports */
289 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
290 return ds->ops->port_fdb_del(ds, port, addr, vid, db);
291
292 mutex_lock(&dp->addr_lists_lock);
293
294 a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
295 if (!a) {
296 err = -ENOENT;
297 goto out;
298 }
299
300 if (!refcount_dec_and_test(&a->refcount))
301 goto out;
302
303 err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
304 if (err) {
305 refcount_set(&a->refcount, 1);
306 goto out;
307 }
308
309 list_del(&a->list);
310 kfree(a);
311
312out:
313 mutex_unlock(&dp->addr_lists_lock);
314
315 return err;
316}
317
318static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
319 const unsigned char *addr, u16 vid,
320 struct dsa_db db)
321{
322 struct dsa_mac_addr *a;
323 int err = 0;
324
325 mutex_lock(&lag->fdb_lock);
326
327 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
328 if (a) {
329 refcount_inc(&a->refcount);
330 goto out;
331 }
332
333 a = kzalloc(sizeof(*a), GFP_KERNEL);
334 if (!a) {
335 err = -ENOMEM;
336 goto out;
337 }
338
339 err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
340 if (err) {
341 kfree(a);
342 goto out;
343 }
344
345 ether_addr_copy(a->addr, addr);
346 a->vid = vid;
347 refcount_set(&a->refcount, 1);
348 list_add_tail(&a->list, &lag->fdbs);
349
350out:
351 mutex_unlock(&lag->fdb_lock);
352
353 return err;
354}
355
356static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
357 const unsigned char *addr, u16 vid,
358 struct dsa_db db)
359{
360 struct dsa_mac_addr *a;
361 int err = 0;
362
363 mutex_lock(&lag->fdb_lock);
364
365 a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
366 if (!a) {
367 err = -ENOENT;
368 goto out;
369 }
370
371 if (!refcount_dec_and_test(&a->refcount))
372 goto out;
373
374 err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
375 if (err) {
376 refcount_set(&a->refcount, 1);
377 goto out;
378 }
379
380 list_del(&a->list);
381 kfree(a);
382
383out:
384 mutex_unlock(&lag->fdb_lock);
385
386 return err;
387}
388
389static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
390 struct dsa_notifier_fdb_info *info)
391{
392 struct dsa_port *dp;
393 int err = 0;
394
395 if (!ds->ops->port_fdb_add)
396 return -EOPNOTSUPP;
397
398 dsa_switch_for_each_port(dp, ds) {
399 if (dsa_port_host_address_match(dp, info->dp)) {
400 err = dsa_port_do_fdb_add(dp, info->addr, info->vid,
401 info->db);
402 if (err)
403 break;
404 }
405 }
406
407 return err;
408}
409
410static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
411 struct dsa_notifier_fdb_info *info)
412{
413 struct dsa_port *dp;
414 int err = 0;
415
416 if (!ds->ops->port_fdb_del)
417 return -EOPNOTSUPP;
418
419 dsa_switch_for_each_port(dp, ds) {
420 if (dsa_port_host_address_match(dp, info->dp)) {
421 err = dsa_port_do_fdb_del(dp, info->addr, info->vid,
422 info->db);
423 if (err)
424 break;
425 }
426 }
427
428 return err;
429}
430
431static int dsa_switch_fdb_add(struct dsa_switch *ds,
432 struct dsa_notifier_fdb_info *info)
433{
434 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
435 struct dsa_port *dp = dsa_to_port(ds, port);
436
437 if (!ds->ops->port_fdb_add)
438 return -EOPNOTSUPP;
439
440 return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
441}
442
443static int dsa_switch_fdb_del(struct dsa_switch *ds,
444 struct dsa_notifier_fdb_info *info)
445{
446 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
447 struct dsa_port *dp = dsa_to_port(ds, port);
448
449 if (!ds->ops->port_fdb_del)
450 return -EOPNOTSUPP;
451
452 return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
453}
454
455static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
456 struct dsa_notifier_lag_fdb_info *info)
457{
458 struct dsa_port *dp;
459
460 if (!ds->ops->lag_fdb_add)
461 return -EOPNOTSUPP;
462
463 /* Notify switch only if it has a port in this LAG */
464 dsa_switch_for_each_port(dp, ds)
465 if (dsa_port_offloads_lag(dp, info->lag))
466 return dsa_switch_do_lag_fdb_add(ds, info->lag,
467 info->addr, info->vid,
468 info->db);
469
470 return 0;
471}
472
473static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
474 struct dsa_notifier_lag_fdb_info *info)
475{
476 struct dsa_port *dp;
477
478 if (!ds->ops->lag_fdb_del)
479 return -EOPNOTSUPP;
480
481 /* Notify switch only if it has a port in this LAG */
482 dsa_switch_for_each_port(dp, ds)
483 if (dsa_port_offloads_lag(dp, info->lag))
484 return dsa_switch_do_lag_fdb_del(ds, info->lag,
485 info->addr, info->vid,
486 info->db);
487
488 return 0;
489}
490
491static int dsa_switch_lag_change(struct dsa_switch *ds,
492 struct dsa_notifier_lag_info *info)
493{
494 if (info->dp->ds == ds && ds->ops->port_lag_change)
495 return ds->ops->port_lag_change(ds, info->dp->index);
496
497 if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
498 return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
499 info->dp->index);
500
501 return 0;
502}
503
504static int dsa_switch_lag_join(struct dsa_switch *ds,
505 struct dsa_notifier_lag_info *info)
506{
507 if (info->dp->ds == ds && ds->ops->port_lag_join)
508 return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
509 info->info);
510
511 if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
512 return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
513 info->dp->index, info->lag,
514 info->info);
515
516 return -EOPNOTSUPP;
517}
518
519static int dsa_switch_lag_leave(struct dsa_switch *ds,
520 struct dsa_notifier_lag_info *info)
521{
522 if (info->dp->ds == ds && ds->ops->port_lag_leave)
523 return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
524
525 if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
526 return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
527 info->dp->index, info->lag);
528
529 return -EOPNOTSUPP;
530}
531
532static int dsa_switch_mdb_add(struct dsa_switch *ds,
533 struct dsa_notifier_mdb_info *info)
534{
535 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
536 struct dsa_port *dp = dsa_to_port(ds, port);
537
538 if (!ds->ops->port_mdb_add)
539 return -EOPNOTSUPP;
540
541 return dsa_port_do_mdb_add(dp, info->mdb, info->db);
542}
543
544static int dsa_switch_mdb_del(struct dsa_switch *ds,
545 struct dsa_notifier_mdb_info *info)
546{
547 int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
548 struct dsa_port *dp = dsa_to_port(ds, port);
549
550 if (!ds->ops->port_mdb_del)
551 return -EOPNOTSUPP;
552
553 return dsa_port_do_mdb_del(dp, info->mdb, info->db);
554}
555
556static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
557 struct dsa_notifier_mdb_info *info)
558{
559 struct dsa_port *dp;
560 int err = 0;
561
562 if (!ds->ops->port_mdb_add)
563 return -EOPNOTSUPP;
564
565 dsa_switch_for_each_port(dp, ds) {
566 if (dsa_port_host_address_match(dp, info->dp)) {
567 err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
568 if (err)
569 break;
570 }
571 }
572
573 return err;
574}
575
576static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
577 struct dsa_notifier_mdb_info *info)
578{
579 struct dsa_port *dp;
580 int err = 0;
581
582 if (!ds->ops->port_mdb_del)
583 return -EOPNOTSUPP;
584
585 dsa_switch_for_each_port(dp, ds) {
586 if (dsa_port_host_address_match(dp, info->dp)) {
587 err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
588 if (err)
589 break;
590 }
591 }
592
593 return err;
594}
595
596/* Port VLANs match on the targeted port and on all DSA ports */
597static bool dsa_port_vlan_match(struct dsa_port *dp,
598 struct dsa_notifier_vlan_info *info)
599{
600 return dsa_port_is_dsa(dp) || dp == info->dp;
601}
602
603/* Host VLANs match on the targeted port's CPU port, and on all DSA ports
604 * (upstream and downstream) of that switch and its upstream switches.
605 */
606static bool dsa_port_host_vlan_match(struct dsa_port *dp,
607 const struct dsa_port *targeted_dp)
608{
609 struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
610
611 if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
612 return dsa_port_is_dsa(dp) || dp == cpu_dp;
613
614 return false;
615}
616
617static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
618 const struct switchdev_obj_port_vlan *vlan)
619{
620 struct dsa_vlan *v;
621
622 list_for_each_entry(v, vlan_list, list)
623 if (v->vid == vlan->vid)
624 return v;
625
626 return NULL;
627}
628
629static int dsa_port_do_vlan_add(struct dsa_port *dp,
630 const struct switchdev_obj_port_vlan *vlan,
631 struct netlink_ext_ack *extack)
632{
633 struct dsa_switch *ds = dp->ds;
634 int port = dp->index;
635 struct dsa_vlan *v;
636 int err = 0;
637
638 /* No need to bother with refcounting for user ports. */
639 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
640 return ds->ops->port_vlan_add(ds, port, vlan, extack);
641
642 /* No need to propagate on shared ports the existing VLANs that were
643 * re-notified after just the flags have changed. This would cause a
644 * refcount bump which we need to avoid, since it unbalances the
645 * additions with the deletions.
646 */
647 if (vlan->changed)
648 return 0;
649
650 mutex_lock(&dp->vlans_lock);
651
652 v = dsa_vlan_find(&dp->vlans, vlan);
653 if (v) {
654 refcount_inc(&v->refcount);
655 goto out;
656 }
657
658 v = kzalloc(sizeof(*v), GFP_KERNEL);
659 if (!v) {
660 err = -ENOMEM;
661 goto out;
662 }
663
664 err = ds->ops->port_vlan_add(ds, port, vlan, extack);
665 if (err) {
666 kfree(v);
667 goto out;
668 }
669
670 v->vid = vlan->vid;
671 refcount_set(&v->refcount, 1);
672 list_add_tail(&v->list, &dp->vlans);
673
674out:
675 mutex_unlock(&dp->vlans_lock);
676
677 return err;
678}
679
680static int dsa_port_do_vlan_del(struct dsa_port *dp,
681 const struct switchdev_obj_port_vlan *vlan)
682{
683 struct dsa_switch *ds = dp->ds;
684 int port = dp->index;
685 struct dsa_vlan *v;
686 int err = 0;
687
688 /* No need to bother with refcounting for user ports */
689 if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
690 return ds->ops->port_vlan_del(ds, port, vlan);
691
692 mutex_lock(&dp->vlans_lock);
693
694 v = dsa_vlan_find(&dp->vlans, vlan);
695 if (!v) {
696 err = -ENOENT;
697 goto out;
698 }
699
700 if (!refcount_dec_and_test(&v->refcount))
701 goto out;
702
703 err = ds->ops->port_vlan_del(ds, port, vlan);
704 if (err) {
705 refcount_set(&v->refcount, 1);
706 goto out;
707 }
708
709 list_del(&v->list);
710 kfree(v);
711
712out:
713 mutex_unlock(&dp->vlans_lock);
714
715 return err;
716}
717
718static int dsa_switch_vlan_add(struct dsa_switch *ds,
719 struct dsa_notifier_vlan_info *info)
720{
721 struct dsa_port *dp;
722 int err;
723
724 if (!ds->ops->port_vlan_add)
725 return -EOPNOTSUPP;
726
727 dsa_switch_for_each_port(dp, ds) {
728 if (dsa_port_vlan_match(dp, info)) {
729 err = dsa_port_do_vlan_add(dp, info->vlan,
730 info->extack);
731 if (err)
732 return err;
733 }
734 }
735
736 return 0;
737}
738
739static int dsa_switch_vlan_del(struct dsa_switch *ds,
740 struct dsa_notifier_vlan_info *info)
741{
742 struct dsa_port *dp;
743 int err;
744
745 if (!ds->ops->port_vlan_del)
746 return -EOPNOTSUPP;
747
748 dsa_switch_for_each_port(dp, ds) {
749 if (dsa_port_vlan_match(dp, info)) {
750 err = dsa_port_do_vlan_del(dp, info->vlan);
751 if (err)
752 return err;
753 }
754 }
755
756 return 0;
757}
758
759static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
760 struct dsa_notifier_vlan_info *info)
761{
762 struct dsa_port *dp;
763 int err;
764
765 if (!ds->ops->port_vlan_add)
766 return -EOPNOTSUPP;
767
768 dsa_switch_for_each_port(dp, ds) {
769 if (dsa_port_host_vlan_match(dp, info->dp)) {
770 err = dsa_port_do_vlan_add(dp, info->vlan,
771 info->extack);
772 if (err)
773 return err;
774 }
775 }
776
777 return 0;
778}
779
780static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
781 struct dsa_notifier_vlan_info *info)
782{
783 struct dsa_port *dp;
784 int err;
785
786 if (!ds->ops->port_vlan_del)
787 return -EOPNOTSUPP;
788
789 dsa_switch_for_each_port(dp, ds) {
790 if (dsa_port_host_vlan_match(dp, info->dp)) {
791 err = dsa_port_do_vlan_del(dp, info->vlan);
792 if (err)
793 return err;
794 }
795 }
796
797 return 0;
798}
799
800static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
801 struct dsa_notifier_tag_proto_info *info)
802{
803 const struct dsa_device_ops *tag_ops = info->tag_ops;
804 struct dsa_port *dp, *cpu_dp;
805 int err;
806
807 if (!ds->ops->change_tag_protocol)
808 return -EOPNOTSUPP;
809
810 ASSERT_RTNL();
811
812 err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
813 if (err)
814 return err;
815
816 dsa_switch_for_each_cpu_port(cpu_dp, ds)
817 dsa_port_set_tag_protocol(cpu_dp, tag_ops);
818
819 /* Now that changing the tag protocol can no longer fail, let's update
820 * the remaining bits which are "duplicated for faster access", and the
821 * bits that depend on the tagger, such as the MTU.
822 */
823 dsa_switch_for_each_user_port(dp, ds) {
824 struct net_device *slave = dp->slave;
825
826 dsa_slave_setup_tagger(slave);
827
828 /* rtnl_mutex is held in dsa_tree_change_tag_proto */
829 dsa_slave_change_mtu(slave, slave->mtu);
830 }
831
832 return 0;
833}
834
835/* We use the same cross-chip notifiers to inform both the tagger side, as well
836 * as the switch side, of connection and disconnection events.
837 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
838 * switch side doesn't support connecting to this tagger, and therefore, the
839 * fact that we don't disconnect the tagger side doesn't constitute a memory
840 * leak: the tagger will still operate with persistent per-switch memory, just
841 * with the switch side unconnected to it. What does constitute a hard error is
842 * when the switch side supports connecting but fails.
843 */
844static int
845dsa_switch_connect_tag_proto(struct dsa_switch *ds,
846 struct dsa_notifier_tag_proto_info *info)
847{
848 const struct dsa_device_ops *tag_ops = info->tag_ops;
849 int err;
850
851 /* Notify the new tagger about the connection to this switch */
852 if (tag_ops->connect) {
853 err = tag_ops->connect(ds);
854 if (err)
855 return err;
856 }
857
858 if (!ds->ops->connect_tag_protocol)
859 return -EOPNOTSUPP;
860
861 /* Notify the switch about the connection to the new tagger */
862 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
863 if (err) {
864 /* Revert the new tagger's connection to this tree */
865 if (tag_ops->disconnect)
866 tag_ops->disconnect(ds);
867 return err;
868 }
869
870 return 0;
871}
872
873static int
874dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
875 struct dsa_notifier_tag_proto_info *info)
876{
877 const struct dsa_device_ops *tag_ops = info->tag_ops;
878
879 /* Notify the tagger about the disconnection from this switch */
880 if (tag_ops->disconnect && ds->tagger_data)
881 tag_ops->disconnect(ds);
882
883 /* No need to notify the switch, since it shouldn't have any
884 * resources to tear down
885 */
886 return 0;
887}
888
889static int
890dsa_switch_master_state_change(struct dsa_switch *ds,
891 struct dsa_notifier_master_state_info *info)
892{
893 if (!ds->ops->master_state_change)
894 return 0;
895
896 ds->ops->master_state_change(ds, info->master, info->operational);
897
898 return 0;
899}
900
901static int dsa_switch_event(struct notifier_block *nb,
902 unsigned long event, void *info)
903{
904 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
905 int err;
906
907 switch (event) {
908 case DSA_NOTIFIER_AGEING_TIME:
909 err = dsa_switch_ageing_time(ds, info);
910 break;
911 case DSA_NOTIFIER_BRIDGE_JOIN:
912 err = dsa_switch_bridge_join(ds, info);
913 break;
914 case DSA_NOTIFIER_BRIDGE_LEAVE:
915 err = dsa_switch_bridge_leave(ds, info);
916 break;
917 case DSA_NOTIFIER_FDB_ADD:
918 err = dsa_switch_fdb_add(ds, info);
919 break;
920 case DSA_NOTIFIER_FDB_DEL:
921 err = dsa_switch_fdb_del(ds, info);
922 break;
923 case DSA_NOTIFIER_HOST_FDB_ADD:
924 err = dsa_switch_host_fdb_add(ds, info);
925 break;
926 case DSA_NOTIFIER_HOST_FDB_DEL:
927 err = dsa_switch_host_fdb_del(ds, info);
928 break;
929 case DSA_NOTIFIER_LAG_FDB_ADD:
930 err = dsa_switch_lag_fdb_add(ds, info);
931 break;
932 case DSA_NOTIFIER_LAG_FDB_DEL:
933 err = dsa_switch_lag_fdb_del(ds, info);
934 break;
935 case DSA_NOTIFIER_LAG_CHANGE:
936 err = dsa_switch_lag_change(ds, info);
937 break;
938 case DSA_NOTIFIER_LAG_JOIN:
939 err = dsa_switch_lag_join(ds, info);
940 break;
941 case DSA_NOTIFIER_LAG_LEAVE:
942 err = dsa_switch_lag_leave(ds, info);
943 break;
944 case DSA_NOTIFIER_MDB_ADD:
945 err = dsa_switch_mdb_add(ds, info);
946 break;
947 case DSA_NOTIFIER_MDB_DEL:
948 err = dsa_switch_mdb_del(ds, info);
949 break;
950 case DSA_NOTIFIER_HOST_MDB_ADD:
951 err = dsa_switch_host_mdb_add(ds, info);
952 break;
953 case DSA_NOTIFIER_HOST_MDB_DEL:
954 err = dsa_switch_host_mdb_del(ds, info);
955 break;
956 case DSA_NOTIFIER_VLAN_ADD:
957 err = dsa_switch_vlan_add(ds, info);
958 break;
959 case DSA_NOTIFIER_VLAN_DEL:
960 err = dsa_switch_vlan_del(ds, info);
961 break;
962 case DSA_NOTIFIER_HOST_VLAN_ADD:
963 err = dsa_switch_host_vlan_add(ds, info);
964 break;
965 case DSA_NOTIFIER_HOST_VLAN_DEL:
966 err = dsa_switch_host_vlan_del(ds, info);
967 break;
968 case DSA_NOTIFIER_MTU:
969 err = dsa_switch_mtu(ds, info);
970 break;
971 case DSA_NOTIFIER_TAG_PROTO:
972 err = dsa_switch_change_tag_proto(ds, info);
973 break;
974 case DSA_NOTIFIER_TAG_PROTO_CONNECT:
975 err = dsa_switch_connect_tag_proto(ds, info);
976 break;
977 case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
978 err = dsa_switch_disconnect_tag_proto(ds, info);
979 break;
980 case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
981 err = dsa_switch_tag_8021q_vlan_add(ds, info);
982 break;
983 case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
984 err = dsa_switch_tag_8021q_vlan_del(ds, info);
985 break;
986 case DSA_NOTIFIER_MASTER_STATE_CHANGE:
987 err = dsa_switch_master_state_change(ds, info);
988 break;
989 default:
990 err = -EOPNOTSUPP;
991 break;
992 }
993
994 if (err)
995 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
996 event, err);
997
998 return notifier_from_errno(err);
999}
1000
1001int dsa_switch_register_notifier(struct dsa_switch *ds)
1002{
1003 ds->nb.notifier_call = dsa_switch_event;
1004
1005 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1006}
1007
1008void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1009{
1010 int err;
1011
1012 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1013 if (err)
1014 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1015}