Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Handling of a single switch chip, part of a switch fabric
4 *
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7 */
8
9#include <linux/if_bridge.h>
10#include <linux/netdevice.h>
11#include <linux/notifier.h>
12#include <linux/if_vlan.h>
13#include <net/switchdev.h>
14
15#include "dsa_priv.h"
16
17static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 unsigned int ageing_time)
19{
20 int i;
21
22 for (i = 0; i < ds->num_ports; ++i) {
23 struct dsa_port *dp = dsa_to_port(ds, i);
24
25 if (dp->ageing_time && dp->ageing_time < ageing_time)
26 ageing_time = dp->ageing_time;
27 }
28
29 return ageing_time;
30}
31
32static int dsa_switch_ageing_time(struct dsa_switch *ds,
33 struct dsa_notifier_ageing_time_info *info)
34{
35 unsigned int ageing_time = info->ageing_time;
36 struct switchdev_trans *trans = info->trans;
37
38 if (switchdev_trans_ph_prepare(trans)) {
39 if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
40 return -ERANGE;
41 if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
42 return -ERANGE;
43 return 0;
44 }
45
46 /* Program the fastest ageing time in case of multiple bridges */
47 ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
48
49 if (ds->ops->set_ageing_time)
50 return ds->ops->set_ageing_time(ds, ageing_time);
51
52 return 0;
53}
54
55static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
56 struct dsa_notifier_mtu_info *info)
57{
58 if (ds->index == info->sw_index)
59 return (port == info->port) || dsa_is_dsa_port(ds, port);
60
61 if (!info->propagate_upstream)
62 return false;
63
64 if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
65 return true;
66
67 return false;
68}
69
70static int dsa_switch_mtu(struct dsa_switch *ds,
71 struct dsa_notifier_mtu_info *info)
72{
73 int port, ret;
74
75 if (!ds->ops->port_change_mtu)
76 return -EOPNOTSUPP;
77
78 for (port = 0; port < ds->num_ports; port++) {
79 if (dsa_switch_mtu_match(ds, port, info)) {
80 ret = ds->ops->port_change_mtu(ds, port, info->mtu);
81 if (ret)
82 return ret;
83 }
84 }
85
86 return 0;
87}
88
89static int dsa_switch_bridge_join(struct dsa_switch *ds,
90 struct dsa_notifier_bridge_info *info)
91{
92 if (ds->index == info->sw_index && ds->ops->port_bridge_join)
93 return ds->ops->port_bridge_join(ds, info->port, info->br);
94
95 if (ds->index != info->sw_index && ds->ops->crosschip_bridge_join)
96 return ds->ops->crosschip_bridge_join(ds, info->sw_index,
97 info->port, info->br);
98
99 return 0;
100}
101
102static int dsa_switch_bridge_leave(struct dsa_switch *ds,
103 struct dsa_notifier_bridge_info *info)
104{
105 bool unset_vlan_filtering = br_vlan_enabled(info->br);
106 int err, i;
107
108 if (ds->index == info->sw_index && ds->ops->port_bridge_leave)
109 ds->ops->port_bridge_leave(ds, info->port, info->br);
110
111 if (ds->index != info->sw_index && ds->ops->crosschip_bridge_leave)
112 ds->ops->crosschip_bridge_leave(ds, info->sw_index, info->port,
113 info->br);
114
115 /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
116 * event for changing vlan_filtering setting upon slave ports leaving
117 * it. That is a good thing, because that lets us handle it and also
118 * handle the case where the switch's vlan_filtering setting is global
119 * (not per port). When that happens, the correct moment to trigger the
120 * vlan_filtering callback is only when the last port left this bridge.
121 */
122 if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
123 for (i = 0; i < ds->num_ports; i++) {
124 if (i == info->port)
125 continue;
126 if (dsa_to_port(ds, i)->bridge_dev == info->br) {
127 unset_vlan_filtering = false;
128 break;
129 }
130 }
131 }
132 if (unset_vlan_filtering) {
133 struct switchdev_trans trans = {0};
134
135 err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
136 false, &trans);
137 if (err && err != EOPNOTSUPP)
138 return err;
139 }
140 return 0;
141}
142
143static int dsa_switch_fdb_add(struct dsa_switch *ds,
144 struct dsa_notifier_fdb_info *info)
145{
146 int port = dsa_towards_port(ds, info->sw_index, info->port);
147
148 if (!ds->ops->port_fdb_add)
149 return -EOPNOTSUPP;
150
151 return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
152}
153
154static int dsa_switch_fdb_del(struct dsa_switch *ds,
155 struct dsa_notifier_fdb_info *info)
156{
157 int port = dsa_towards_port(ds, info->sw_index, info->port);
158
159 if (!ds->ops->port_fdb_del)
160 return -EOPNOTSUPP;
161
162 return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
163}
164
165static bool dsa_switch_mdb_match(struct dsa_switch *ds, int port,
166 struct dsa_notifier_mdb_info *info)
167{
168 if (ds->index == info->sw_index && port == info->port)
169 return true;
170
171 if (dsa_is_dsa_port(ds, port))
172 return true;
173
174 return false;
175}
176
177static int dsa_switch_mdb_prepare(struct dsa_switch *ds,
178 struct dsa_notifier_mdb_info *info)
179{
180 int port, err;
181
182 if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
183 return -EOPNOTSUPP;
184
185 for (port = 0; port < ds->num_ports; port++) {
186 if (dsa_switch_mdb_match(ds, port, info)) {
187 err = ds->ops->port_mdb_prepare(ds, port, info->mdb);
188 if (err)
189 return err;
190 }
191 }
192
193 return 0;
194}
195
196static int dsa_switch_mdb_add(struct dsa_switch *ds,
197 struct dsa_notifier_mdb_info *info)
198{
199 int port;
200
201 if (switchdev_trans_ph_prepare(info->trans))
202 return dsa_switch_mdb_prepare(ds, info);
203
204 if (!ds->ops->port_mdb_add)
205 return 0;
206
207 for (port = 0; port < ds->num_ports; port++)
208 if (dsa_switch_mdb_match(ds, port, info))
209 ds->ops->port_mdb_add(ds, port, info->mdb);
210
211 return 0;
212}
213
214static int dsa_switch_mdb_del(struct dsa_switch *ds,
215 struct dsa_notifier_mdb_info *info)
216{
217 if (!ds->ops->port_mdb_del)
218 return -EOPNOTSUPP;
219
220 if (ds->index == info->sw_index)
221 return ds->ops->port_mdb_del(ds, info->port, info->mdb);
222
223 return 0;
224}
225
226static int dsa_port_vlan_device_check(struct net_device *vlan_dev,
227 int vlan_dev_vid,
228 void *arg)
229{
230 struct switchdev_obj_port_vlan *vlan = arg;
231 u16 vid;
232
233 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
234 if (vid == vlan_dev_vid)
235 return -EBUSY;
236 }
237
238 return 0;
239}
240
241static int dsa_port_vlan_check(struct dsa_switch *ds, int port,
242 const struct switchdev_obj_port_vlan *vlan)
243{
244 const struct dsa_port *dp = dsa_to_port(ds, port);
245 int err = 0;
246
247 /* Device is not bridged, let it proceed with the VLAN device
248 * creation.
249 */
250 if (!dp->bridge_dev)
251 return err;
252
253 /* dsa_slave_vlan_rx_{add,kill}_vid() cannot use the prepare phase and
254 * already checks whether there is an overlapping bridge VLAN entry
255 * with the same VID, so here we only need to check that if we are
256 * adding a bridge VLAN entry there is not an overlapping VLAN device
257 * claiming that VID.
258 */
259 return vlan_for_each(dp->slave, dsa_port_vlan_device_check,
260 (void *)vlan);
261}
262
263static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
264 struct dsa_notifier_vlan_info *info)
265{
266 if (ds->index == info->sw_index && port == info->port)
267 return true;
268
269 if (dsa_is_dsa_port(ds, port))
270 return true;
271
272 return false;
273}
274
275static int dsa_switch_vlan_prepare(struct dsa_switch *ds,
276 struct dsa_notifier_vlan_info *info)
277{
278 int port, err;
279
280 if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
281 return -EOPNOTSUPP;
282
283 for (port = 0; port < ds->num_ports; port++) {
284 if (dsa_switch_vlan_match(ds, port, info)) {
285 err = dsa_port_vlan_check(ds, port, info->vlan);
286 if (err)
287 return err;
288
289 err = ds->ops->port_vlan_prepare(ds, port, info->vlan);
290 if (err)
291 return err;
292 }
293 }
294
295 return 0;
296}
297
298static int dsa_switch_vlan_add(struct dsa_switch *ds,
299 struct dsa_notifier_vlan_info *info)
300{
301 int port;
302
303 if (switchdev_trans_ph_prepare(info->trans))
304 return dsa_switch_vlan_prepare(ds, info);
305
306 if (!ds->ops->port_vlan_add)
307 return 0;
308
309 for (port = 0; port < ds->num_ports; port++)
310 if (dsa_switch_vlan_match(ds, port, info))
311 ds->ops->port_vlan_add(ds, port, info->vlan);
312
313 return 0;
314}
315
316static int dsa_switch_vlan_del(struct dsa_switch *ds,
317 struct dsa_notifier_vlan_info *info)
318{
319 if (!ds->ops->port_vlan_del)
320 return -EOPNOTSUPP;
321
322 if (ds->index == info->sw_index)
323 return ds->ops->port_vlan_del(ds, info->port, info->vlan);
324
325 /* Do not deprogram the DSA links as they may be used as conduit
326 * for other VLAN members in the fabric.
327 */
328 return 0;
329}
330
331static int dsa_switch_event(struct notifier_block *nb,
332 unsigned long event, void *info)
333{
334 struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
335 int err;
336
337 switch (event) {
338 case DSA_NOTIFIER_AGEING_TIME:
339 err = dsa_switch_ageing_time(ds, info);
340 break;
341 case DSA_NOTIFIER_BRIDGE_JOIN:
342 err = dsa_switch_bridge_join(ds, info);
343 break;
344 case DSA_NOTIFIER_BRIDGE_LEAVE:
345 err = dsa_switch_bridge_leave(ds, info);
346 break;
347 case DSA_NOTIFIER_FDB_ADD:
348 err = dsa_switch_fdb_add(ds, info);
349 break;
350 case DSA_NOTIFIER_FDB_DEL:
351 err = dsa_switch_fdb_del(ds, info);
352 break;
353 case DSA_NOTIFIER_MDB_ADD:
354 err = dsa_switch_mdb_add(ds, info);
355 break;
356 case DSA_NOTIFIER_MDB_DEL:
357 err = dsa_switch_mdb_del(ds, info);
358 break;
359 case DSA_NOTIFIER_VLAN_ADD:
360 err = dsa_switch_vlan_add(ds, info);
361 break;
362 case DSA_NOTIFIER_VLAN_DEL:
363 err = dsa_switch_vlan_del(ds, info);
364 break;
365 case DSA_NOTIFIER_MTU:
366 err = dsa_switch_mtu(ds, info);
367 break;
368 default:
369 err = -EOPNOTSUPP;
370 break;
371 }
372
373 /* Non-switchdev operations cannot be rolled back. If a DSA driver
374 * returns an error during the chained call, switch chips may be in an
375 * inconsistent state.
376 */
377 if (err)
378 dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
379 event, err);
380
381 return notifier_from_errno(err);
382}
383
384int dsa_switch_register_notifier(struct dsa_switch *ds)
385{
386 ds->nb.notifier_call = dsa_switch_event;
387
388 return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
389}
390
391void dsa_switch_unregister_notifier(struct dsa_switch *ds)
392{
393 int err;
394
395 err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
396 if (err)
397 dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
398}