Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Handling of a master device, switching frames via its switch fabric CPU port
4 *
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7 */
8
9#include "dsa_priv.h"
10
11static int dsa_master_get_regs_len(struct net_device *dev)
12{
13 struct dsa_port *cpu_dp = dev->dsa_ptr;
14 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
15 struct dsa_switch *ds = cpu_dp->ds;
16 int port = cpu_dp->index;
17 int ret = 0;
18 int len;
19
20 if (ops->get_regs_len) {
21 len = ops->get_regs_len(dev);
22 if (len < 0)
23 return len;
24 ret += len;
25 }
26
27 ret += sizeof(struct ethtool_drvinfo);
28 ret += sizeof(struct ethtool_regs);
29
30 if (ds->ops->get_regs_len) {
31 len = ds->ops->get_regs_len(ds, port);
32 if (len < 0)
33 return len;
34 ret += len;
35 }
36
37 return ret;
38}
39
40static void dsa_master_get_regs(struct net_device *dev,
41 struct ethtool_regs *regs, void *data)
42{
43 struct dsa_port *cpu_dp = dev->dsa_ptr;
44 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
45 struct dsa_switch *ds = cpu_dp->ds;
46 struct ethtool_drvinfo *cpu_info;
47 struct ethtool_regs *cpu_regs;
48 int port = cpu_dp->index;
49 int len;
50
51 if (ops->get_regs_len && ops->get_regs) {
52 len = ops->get_regs_len(dev);
53 if (len < 0)
54 return;
55 regs->len = len;
56 ops->get_regs(dev, regs, data);
57 data += regs->len;
58 }
59
60 cpu_info = (struct ethtool_drvinfo *)data;
61 strscpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver));
62 data += sizeof(*cpu_info);
63 cpu_regs = (struct ethtool_regs *)data;
64 data += sizeof(*cpu_regs);
65
66 if (ds->ops->get_regs_len && ds->ops->get_regs) {
67 len = ds->ops->get_regs_len(ds, port);
68 if (len < 0)
69 return;
70 cpu_regs->len = len;
71 ds->ops->get_regs(ds, port, cpu_regs, data);
72 }
73}
74
75static void dsa_master_get_ethtool_stats(struct net_device *dev,
76 struct ethtool_stats *stats,
77 uint64_t *data)
78{
79 struct dsa_port *cpu_dp = dev->dsa_ptr;
80 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
81 struct dsa_switch *ds = cpu_dp->ds;
82 int port = cpu_dp->index;
83 int count = 0;
84
85 if (ops->get_sset_count && ops->get_ethtool_stats) {
86 count = ops->get_sset_count(dev, ETH_SS_STATS);
87 ops->get_ethtool_stats(dev, stats, data);
88 }
89
90 if (ds->ops->get_ethtool_stats)
91 ds->ops->get_ethtool_stats(ds, port, data + count);
92}
93
94static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
95 struct ethtool_stats *stats,
96 uint64_t *data)
97{
98 struct dsa_port *cpu_dp = dev->dsa_ptr;
99 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
100 struct dsa_switch *ds = cpu_dp->ds;
101 int port = cpu_dp->index;
102 int count = 0;
103
104 if (dev->phydev && !ops->get_ethtool_phy_stats) {
105 count = phy_ethtool_get_sset_count(dev->phydev);
106 if (count >= 0)
107 phy_ethtool_get_stats(dev->phydev, stats, data);
108 } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
109 count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
110 ops->get_ethtool_phy_stats(dev, stats, data);
111 }
112
113 if (count < 0)
114 count = 0;
115
116 if (ds->ops->get_ethtool_phy_stats)
117 ds->ops->get_ethtool_phy_stats(ds, port, data + count);
118}
119
120static int dsa_master_get_sset_count(struct net_device *dev, int sset)
121{
122 struct dsa_port *cpu_dp = dev->dsa_ptr;
123 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
124 struct dsa_switch *ds = cpu_dp->ds;
125 int count = 0;
126
127 if (sset == ETH_SS_PHY_STATS && dev->phydev &&
128 !ops->get_ethtool_phy_stats)
129 count = phy_ethtool_get_sset_count(dev->phydev);
130 else if (ops->get_sset_count)
131 count = ops->get_sset_count(dev, sset);
132
133 if (count < 0)
134 count = 0;
135
136 if (ds->ops->get_sset_count)
137 count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
138
139 return count;
140}
141
142static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
143 uint8_t *data)
144{
145 struct dsa_port *cpu_dp = dev->dsa_ptr;
146 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
147 struct dsa_switch *ds = cpu_dp->ds;
148 int port = cpu_dp->index;
149 int len = ETH_GSTRING_LEN;
150 int mcount = 0, count, i;
151 uint8_t pfx[4];
152 uint8_t *ndata;
153
154 snprintf(pfx, sizeof(pfx), "p%.2d", port);
155 /* We do not want to be NULL-terminated, since this is a prefix */
156 pfx[sizeof(pfx) - 1] = '_';
157
158 if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
159 !ops->get_ethtool_phy_stats) {
160 mcount = phy_ethtool_get_sset_count(dev->phydev);
161 if (mcount < 0)
162 mcount = 0;
163 else
164 phy_ethtool_get_strings(dev->phydev, data);
165 } else if (ops->get_sset_count && ops->get_strings) {
166 mcount = ops->get_sset_count(dev, stringset);
167 if (mcount < 0)
168 mcount = 0;
169 ops->get_strings(dev, stringset, data);
170 }
171
172 if (ds->ops->get_strings) {
173 ndata = data + mcount * len;
174 /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
175 * the output after to prepend our CPU port prefix we
176 * constructed earlier
177 */
178 ds->ops->get_strings(ds, port, stringset, ndata);
179 count = ds->ops->get_sset_count(ds, port, stringset);
180 if (count < 0)
181 return;
182 for (i = 0; i < count; i++) {
183 memmove(ndata + (i * len + sizeof(pfx)),
184 ndata + i * len, len - sizeof(pfx));
185 memcpy(ndata + i * len, pfx, sizeof(pfx));
186 }
187 }
188}
189
190static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
191{
192 struct dsa_port *cpu_dp = dev->dsa_ptr;
193 struct dsa_switch *ds = cpu_dp->ds;
194 struct dsa_switch_tree *dst;
195 int err = -EOPNOTSUPP;
196 struct dsa_port *dp;
197
198 dst = ds->dst;
199
200 switch (cmd) {
201 case SIOCGHWTSTAMP:
202 case SIOCSHWTSTAMP:
203 /* Deny PTP operations on master if there is at least one
204 * switch in the tree that is PTP capable.
205 */
206 list_for_each_entry(dp, &dst->ports, list)
207 if (dsa_port_supports_hwtstamp(dp, ifr))
208 return -EBUSY;
209 break;
210 }
211
212 if (dev->netdev_ops->ndo_eth_ioctl)
213 err = dev->netdev_ops->ndo_eth_ioctl(dev, ifr, cmd);
214
215 return err;
216}
217
218static const struct dsa_netdevice_ops dsa_netdev_ops = {
219 .ndo_eth_ioctl = dsa_master_ioctl,
220};
221
222static int dsa_master_ethtool_setup(struct net_device *dev)
223{
224 struct dsa_port *cpu_dp = dev->dsa_ptr;
225 struct dsa_switch *ds = cpu_dp->ds;
226 struct ethtool_ops *ops;
227
228 if (netif_is_lag_master(dev))
229 return 0;
230
231 ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
232 if (!ops)
233 return -ENOMEM;
234
235 cpu_dp->orig_ethtool_ops = dev->ethtool_ops;
236 if (cpu_dp->orig_ethtool_ops)
237 memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops));
238
239 ops->get_regs_len = dsa_master_get_regs_len;
240 ops->get_regs = dsa_master_get_regs;
241 ops->get_sset_count = dsa_master_get_sset_count;
242 ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
243 ops->get_strings = dsa_master_get_strings;
244 ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
245
246 dev->ethtool_ops = ops;
247
248 return 0;
249}
250
251static void dsa_master_ethtool_teardown(struct net_device *dev)
252{
253 struct dsa_port *cpu_dp = dev->dsa_ptr;
254
255 if (netif_is_lag_master(dev))
256 return;
257
258 dev->ethtool_ops = cpu_dp->orig_ethtool_ops;
259 cpu_dp->orig_ethtool_ops = NULL;
260}
261
262static void dsa_netdev_ops_set(struct net_device *dev,
263 const struct dsa_netdevice_ops *ops)
264{
265 if (netif_is_lag_master(dev))
266 return;
267
268 dev->dsa_ptr->netdev_ops = ops;
269}
270
271/* Keep the master always promiscuous if the tagging protocol requires that
272 * (garbles MAC DA) or if it doesn't support unicast filtering, case in which
273 * it would revert to promiscuous mode as soon as we call dev_uc_add() on it
274 * anyway.
275 */
276static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
277{
278 const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
279
280 if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
281 return;
282
283 ASSERT_RTNL();
284
285 dev_set_promiscuity(dev, inc);
286}
287
288static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
289 char *buf)
290{
291 struct net_device *dev = to_net_dev(d);
292 struct dsa_port *cpu_dp = dev->dsa_ptr;
293
294 return sprintf(buf, "%s\n",
295 dsa_tag_protocol_to_str(cpu_dp->tag_ops));
296}
297
298static ssize_t tagging_store(struct device *d, struct device_attribute *attr,
299 const char *buf, size_t count)
300{
301 const struct dsa_device_ops *new_tag_ops, *old_tag_ops;
302 struct net_device *dev = to_net_dev(d);
303 struct dsa_port *cpu_dp = dev->dsa_ptr;
304 int err;
305
306 old_tag_ops = cpu_dp->tag_ops;
307 new_tag_ops = dsa_find_tagger_by_name(buf);
308 /* Bad tagger name, or module is not loaded? */
309 if (IS_ERR(new_tag_ops))
310 return PTR_ERR(new_tag_ops);
311
312 if (new_tag_ops == old_tag_ops)
313 /* Drop the temporarily held duplicate reference, since
314 * the DSA switch tree uses this tagger.
315 */
316 goto out;
317
318 err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops,
319 old_tag_ops);
320 if (err) {
321 /* On failure the old tagger is restored, so we don't need the
322 * driver for the new one.
323 */
324 dsa_tag_driver_put(new_tag_ops);
325 return err;
326 }
327
328 /* On success we no longer need the module for the old tagging protocol
329 */
330out:
331 dsa_tag_driver_put(old_tag_ops);
332 return count;
333}
334static DEVICE_ATTR_RW(tagging);
335
336static struct attribute *dsa_slave_attrs[] = {
337 &dev_attr_tagging.attr,
338 NULL
339};
340
341static const struct attribute_group dsa_group = {
342 .name = "dsa",
343 .attrs = dsa_slave_attrs,
344};
345
346static void dsa_master_reset_mtu(struct net_device *dev)
347{
348 int err;
349
350 err = dev_set_mtu(dev, ETH_DATA_LEN);
351 if (err)
352 netdev_dbg(dev,
353 "Unable to reset MTU to exclude DSA overheads\n");
354}
355
356int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
357{
358 const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
359 struct dsa_switch *ds = cpu_dp->ds;
360 struct device_link *consumer_link;
361 int mtu, ret;
362
363 mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
364
365 /* The DSA master must use SET_NETDEV_DEV for this to work. */
366 if (!netif_is_lag_master(dev)) {
367 consumer_link = device_link_add(ds->dev, dev->dev.parent,
368 DL_FLAG_AUTOREMOVE_CONSUMER);
369 if (!consumer_link)
370 netdev_err(dev,
371 "Failed to create a device link to DSA switch %s\n",
372 dev_name(ds->dev));
373 }
374
375 /* The switch driver may not implement ->port_change_mtu(), case in
376 * which dsa_slave_change_mtu() will not update the master MTU either,
377 * so we need to do that here.
378 */
379 ret = dev_set_mtu(dev, mtu);
380 if (ret)
381 netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
382 ret, mtu);
383
384 /* If we use a tagging format that doesn't have an ethertype
385 * field, make sure that all packets from this point on get
386 * sent to the tag format's receive function.
387 */
388 wmb();
389
390 dev->dsa_ptr = cpu_dp;
391
392 dsa_master_set_promiscuity(dev, 1);
393
394 ret = dsa_master_ethtool_setup(dev);
395 if (ret)
396 goto out_err_reset_promisc;
397
398 dsa_netdev_ops_set(dev, &dsa_netdev_ops);
399
400 ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
401 if (ret)
402 goto out_err_ndo_teardown;
403
404 return ret;
405
406out_err_ndo_teardown:
407 dsa_netdev_ops_set(dev, NULL);
408 dsa_master_ethtool_teardown(dev);
409out_err_reset_promisc:
410 dsa_master_set_promiscuity(dev, -1);
411 return ret;
412}
413
414void dsa_master_teardown(struct net_device *dev)
415{
416 sysfs_remove_group(&dev->dev.kobj, &dsa_group);
417 dsa_netdev_ops_set(dev, NULL);
418 dsa_master_ethtool_teardown(dev);
419 dsa_master_reset_mtu(dev);
420 dsa_master_set_promiscuity(dev, -1);
421
422 dev->dsa_ptr = NULL;
423
424 /* If we used a tagging format that doesn't have an ethertype
425 * field, make sure that all packets from this point get sent
426 * without the tag and go through the regular receive path.
427 */
428 wmb();
429}
430
431int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
432 struct netdev_lag_upper_info *uinfo,
433 struct netlink_ext_ack *extack)
434{
435 bool master_setup = false;
436 int err;
437
438 if (!netdev_uses_dsa(lag_dev)) {
439 err = dsa_master_setup(lag_dev, cpu_dp);
440 if (err)
441 return err;
442
443 master_setup = true;
444 }
445
446 err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
447 if (err) {
448 if (extack && !extack->_msg)
449 NL_SET_ERR_MSG_MOD(extack,
450 "CPU port failed to join LAG");
451 goto out_master_teardown;
452 }
453
454 return 0;
455
456out_master_teardown:
457 if (master_setup)
458 dsa_master_teardown(lag_dev);
459 return err;
460}
461
462/* Tear down a master if there isn't any other user port on it,
463 * optionally also destroying LAG information.
464 */
465void dsa_master_lag_teardown(struct net_device *lag_dev,
466 struct dsa_port *cpu_dp)
467{
468 struct net_device *upper;
469 struct list_head *iter;
470
471 dsa_port_lag_leave(cpu_dp, lag_dev);
472
473 netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
474 if (dsa_slave_dev_check(upper))
475 return;
476
477 dsa_master_teardown(lag_dev);
478}