Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2022 Schneider-Electric
4 *
5 * Clément Léger <clement.leger@bootlin.com>
6 */
7
8#include <linux/clk.h>
9#include <linux/etherdevice.h>
10#include <linux/if_bridge.h>
11#include <linux/if_ether.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_mdio.h>
16#include <net/dsa.h>
17
18#include "rzn1_a5psw.h"
19
20struct a5psw_stats {
21 u16 offset;
22 const char name[ETH_GSTRING_LEN];
23};
24
25#define STAT_DESC(_offset) { \
26 .offset = A5PSW_##_offset, \
27 .name = __stringify(_offset), \
28}
29
30static const struct a5psw_stats a5psw_stats[] = {
31 STAT_DESC(aFramesTransmittedOK),
32 STAT_DESC(aFramesReceivedOK),
33 STAT_DESC(aFrameCheckSequenceErrors),
34 STAT_DESC(aAlignmentErrors),
35 STAT_DESC(aOctetsTransmittedOK),
36 STAT_DESC(aOctetsReceivedOK),
37 STAT_DESC(aTxPAUSEMACCtrlFrames),
38 STAT_DESC(aRxPAUSEMACCtrlFrames),
39 STAT_DESC(ifInErrors),
40 STAT_DESC(ifOutErrors),
41 STAT_DESC(ifInUcastPkts),
42 STAT_DESC(ifInMulticastPkts),
43 STAT_DESC(ifInBroadcastPkts),
44 STAT_DESC(ifOutDiscards),
45 STAT_DESC(ifOutUcastPkts),
46 STAT_DESC(ifOutMulticastPkts),
47 STAT_DESC(ifOutBroadcastPkts),
48 STAT_DESC(etherStatsDropEvents),
49 STAT_DESC(etherStatsOctets),
50 STAT_DESC(etherStatsPkts),
51 STAT_DESC(etherStatsUndersizePkts),
52 STAT_DESC(etherStatsOversizePkts),
53 STAT_DESC(etherStatsPkts64Octets),
54 STAT_DESC(etherStatsPkts65to127Octets),
55 STAT_DESC(etherStatsPkts128to255Octets),
56 STAT_DESC(etherStatsPkts256to511Octets),
57 STAT_DESC(etherStatsPkts1024to1518Octets),
58 STAT_DESC(etherStatsPkts1519toXOctets),
59 STAT_DESC(etherStatsJabbers),
60 STAT_DESC(etherStatsFragments),
61 STAT_DESC(VLANReceived),
62 STAT_DESC(VLANTransmitted),
63 STAT_DESC(aDeferred),
64 STAT_DESC(aMultipleCollisions),
65 STAT_DESC(aSingleCollisions),
66 STAT_DESC(aLateCollisions),
67 STAT_DESC(aExcessiveCollisions),
68 STAT_DESC(aCarrierSenseErrors),
69};
70
71static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
72{
73 writel(value, a5psw->base + offset);
74}
75
76static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
77{
78 return readl(a5psw->base + offset);
79}
80
81static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
82{
83 u32 reg;
84
85 spin_lock(&a5psw->reg_lock);
86
87 reg = a5psw_reg_readl(a5psw, offset);
88 reg &= ~mask;
89 reg |= val;
90 a5psw_reg_writel(a5psw, offset, reg);
91
92 spin_unlock(&a5psw->reg_lock);
93}
94
95static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
96 int port,
97 enum dsa_tag_protocol mp)
98{
99 return DSA_TAG_PROTO_RZN1_A5PSW;
100}
101
102static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
103 bool enable)
104{
105 u32 rx_match = 0;
106
107 if (enable)
108 rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
109
110 a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
111 A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
112}
113
114static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
115{
116 /* Enable "management forward" pattern matching, this will forward
117 * packets from this port only towards the management port and thus
118 * isolate the port.
119 */
120 a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
121}
122
123static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
124{
125 u32 mask = A5PSW_PORT_ENA_TX(port);
126 u32 reg = enable ? mask : 0;
127
128 /* Even though the port TX is disabled through TXENA bit in the
129 * PORT_ENA register, it can still send BPDUs. This depends on the tag
130 * configuration added when sending packets from the CPU port to the
131 * switch port. Indeed, when using forced forwarding without filtering,
132 * even disabled ports will be able to send packets that are tagged.
133 * This allows to implement STP support when ports are in a state where
134 * forwarding traffic should be stopped but BPDUs should still be sent.
135 */
136 a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
137}
138
139static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
140{
141 u32 port_ena = 0;
142
143 if (enable)
144 port_ena |= A5PSW_PORT_ENA_TX_RX(port);
145
146 a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
147 port_ena);
148}
149
150static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
151{
152 int ret;
153
154 a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
155
156 ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
157 !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
158 A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
159 if (ret)
160 dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
161
162 return ret;
163}
164
165static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
166{
167 u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
168
169 mutex_lock(&a5psw->lk_lock);
170 a5psw_lk_execute_ctrl(a5psw, &ctrl);
171 mutex_unlock(&a5psw->lk_lock);
172}
173
174static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
175 bool authorize)
176{
177 u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
178
179 if (authorize)
180 reg |= A5PSW_AUTH_PORT_AUTHORIZED;
181 else
182 reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
183
184 a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
185}
186
187static void a5psw_port_disable(struct dsa_switch *ds, int port)
188{
189 struct a5psw *a5psw = ds->priv;
190
191 a5psw_port_authorize_set(a5psw, port, false);
192 a5psw_port_enable_set(a5psw, port, false);
193}
194
195static int a5psw_port_enable(struct dsa_switch *ds, int port,
196 struct phy_device *phy)
197{
198 struct a5psw *a5psw = ds->priv;
199
200 a5psw_port_authorize_set(a5psw, port, true);
201 a5psw_port_enable_set(a5psw, port, true);
202
203 return 0;
204}
205
206static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
207{
208 struct a5psw *a5psw = ds->priv;
209
210 new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
211 a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
212
213 return 0;
214}
215
216static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
217{
218 return A5PSW_MAX_MTU;
219}
220
221static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
222 struct phylink_config *config)
223{
224 unsigned long *intf = config->supported_interfaces;
225
226 config->mac_capabilities = MAC_1000FD;
227
228 if (dsa_is_cpu_port(ds, port)) {
229 /* GMII is used internally and GMAC2 is connected to the switch
230 * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
231 */
232 __set_bit(PHY_INTERFACE_MODE_GMII, intf);
233 } else {
234 config->mac_capabilities |= MAC_100 | MAC_10;
235 phy_interface_set_rgmii(intf);
236 __set_bit(PHY_INTERFACE_MODE_RMII, intf);
237 __set_bit(PHY_INTERFACE_MODE_MII, intf);
238 }
239}
240
241static struct phylink_pcs *
242a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
243 phy_interface_t interface)
244{
245 struct dsa_port *dp = dsa_to_port(ds, port);
246 struct a5psw *a5psw = ds->priv;
247
248 if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
249 return a5psw->pcs[port];
250
251 return NULL;
252}
253
254static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
255 unsigned int mode,
256 phy_interface_t interface)
257{
258 struct a5psw *a5psw = ds->priv;
259 u32 cmd_cfg;
260
261 cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
262 cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
263 a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
264}
265
266static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
267 unsigned int mode,
268 phy_interface_t interface,
269 struct phy_device *phydev, int speed,
270 int duplex, bool tx_pause, bool rx_pause)
271{
272 u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
273 A5PSW_CMD_CFG_TX_CRC_APPEND;
274 struct a5psw *a5psw = ds->priv;
275
276 if (speed == SPEED_1000)
277 cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
278
279 if (duplex == DUPLEX_HALF)
280 cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
281
282 cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
283
284 if (!rx_pause)
285 cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
286
287 a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
288}
289
290static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
291{
292 struct a5psw *a5psw = ds->priv;
293 unsigned long rate;
294 u64 max, tmp;
295 u32 agetime;
296
297 rate = clk_get_rate(a5psw->clk);
298 max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
299 rate) * 1000;
300 if (msecs > max)
301 return -EINVAL;
302
303 tmp = div_u64(rate, MSEC_PER_SEC);
304 agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
305
306 a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
307
308 return 0;
309}
310
311static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
312{
313 u32 mask = A5PSW_INPUT_LEARN_DIS(port);
314 u32 reg = !learn ? mask : 0;
315
316 a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
317}
318
319static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
320{
321 u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
322 u32 reg = block ? mask : 0;
323
324 a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
325}
326
327static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
328 bool set)
329{
330 u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
331 A5PSW_MCAST_DEF_MASK};
332 int i;
333
334 if (set)
335 a5psw->bridged_ports |= BIT(port);
336 else
337 a5psw->bridged_ports &= ~BIT(port);
338
339 for (i = 0; i < ARRAY_SIZE(offsets); i++)
340 a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
341}
342
343static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
344 bool standalone)
345{
346 a5psw_port_learning_set(a5psw, port, !standalone);
347 a5psw_flooding_set_resolution(a5psw, port, !standalone);
348 a5psw_port_mgmtfwd_set(a5psw, port, standalone);
349}
350
351static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
352 struct dsa_bridge bridge,
353 bool *tx_fwd_offload,
354 struct netlink_ext_ack *extack)
355{
356 struct a5psw *a5psw = ds->priv;
357
358 /* We only support 1 bridge device */
359 if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
360 NL_SET_ERR_MSG_MOD(extack,
361 "Forwarding offload supported for a single bridge");
362 return -EOPNOTSUPP;
363 }
364
365 a5psw->br_dev = bridge.dev;
366 a5psw_port_set_standalone(a5psw, port, false);
367
368 return 0;
369}
370
371static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
372 struct dsa_bridge bridge)
373{
374 struct a5psw *a5psw = ds->priv;
375
376 a5psw_port_set_standalone(a5psw, port, true);
377
378 /* No more ports bridged */
379 if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
380 a5psw->br_dev = NULL;
381}
382
383static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
384{
385 bool learning_enabled, rx_enabled, tx_enabled;
386 struct a5psw *a5psw = ds->priv;
387
388 switch (state) {
389 case BR_STATE_DISABLED:
390 case BR_STATE_BLOCKING:
391 case BR_STATE_LISTENING:
392 rx_enabled = false;
393 tx_enabled = false;
394 learning_enabled = false;
395 break;
396 case BR_STATE_LEARNING:
397 rx_enabled = false;
398 tx_enabled = false;
399 learning_enabled = true;
400 break;
401 case BR_STATE_FORWARDING:
402 rx_enabled = true;
403 tx_enabled = true;
404 learning_enabled = true;
405 break;
406 default:
407 dev_err(ds->dev, "invalid STP state: %d\n", state);
408 return;
409 }
410
411 a5psw_port_learning_set(a5psw, port, learning_enabled);
412 a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
413 a5psw_port_tx_enable(a5psw, port, tx_enabled);
414}
415
416static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
417{
418 struct a5psw *a5psw = ds->priv;
419
420 a5psw_port_fdb_flush(a5psw, port);
421}
422
423static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
424 u16 *entry)
425{
426 u32 ctrl;
427 int ret;
428
429 a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
430 a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
431
432 ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
433 ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
434 if (ret)
435 return ret;
436
437 *entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
438
439 return 0;
440}
441
442static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
443 const unsigned char *addr, u16 vid,
444 struct dsa_db db)
445{
446 struct a5psw *a5psw = ds->priv;
447 union lk_data lk_data = {0};
448 bool inc_learncount = false;
449 int ret = 0;
450 u16 entry;
451 u32 reg;
452
453 ether_addr_copy(lk_data.entry.mac, addr);
454 lk_data.entry.port_mask = BIT(port);
455
456 mutex_lock(&a5psw->lk_lock);
457
458 /* Set the value to be written in the lookup table */
459 ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
460 if (ret)
461 goto lk_unlock;
462
463 lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
464 if (!lk_data.entry.valid) {
465 inc_learncount = true;
466 /* port_mask set to 0x1f when entry is not valid, clear it */
467 lk_data.entry.port_mask = 0;
468 lk_data.entry.prio = 0;
469 }
470
471 lk_data.entry.port_mask |= BIT(port);
472 lk_data.entry.is_static = 1;
473 lk_data.entry.valid = 1;
474
475 a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
476
477 reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
478 ret = a5psw_lk_execute_ctrl(a5psw, ®);
479 if (ret)
480 goto lk_unlock;
481
482 if (inc_learncount) {
483 reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
484 a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
485 }
486
487lk_unlock:
488 mutex_unlock(&a5psw->lk_lock);
489
490 return ret;
491}
492
493static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
494 const unsigned char *addr, u16 vid,
495 struct dsa_db db)
496{
497 struct a5psw *a5psw = ds->priv;
498 union lk_data lk_data = {0};
499 bool clear = false;
500 u16 entry;
501 u32 reg;
502 int ret;
503
504 ether_addr_copy(lk_data.entry.mac, addr);
505
506 mutex_lock(&a5psw->lk_lock);
507
508 ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
509 if (ret)
510 goto lk_unlock;
511
512 lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
513
514 /* Our hardware does not associate any VID to the FDB entries so this
515 * means that if two entries were added for the same mac but for
516 * different VID, then, on the deletion of the first one, we would also
517 * delete the second one. Since there is unfortunately nothing we can do
518 * about that, do not return an error...
519 */
520 if (!lk_data.entry.valid)
521 goto lk_unlock;
522
523 lk_data.entry.port_mask &= ~BIT(port);
524 /* If there is no more port in the mask, clear the entry */
525 if (lk_data.entry.port_mask == 0)
526 clear = true;
527
528 a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
529
530 reg = entry;
531 if (clear)
532 reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
533 else
534 reg |= A5PSW_LK_ADDR_CTRL_WRITE;
535
536 ret = a5psw_lk_execute_ctrl(a5psw, ®);
537 if (ret)
538 goto lk_unlock;
539
540 /* Decrement LEARNCOUNT */
541 if (clear) {
542 reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
543 a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
544 }
545
546lk_unlock:
547 mutex_unlock(&a5psw->lk_lock);
548
549 return ret;
550}
551
552static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
553 dsa_fdb_dump_cb_t *cb, void *data)
554{
555 struct a5psw *a5psw = ds->priv;
556 union lk_data lk_data;
557 int i = 0, ret = 0;
558 u32 reg;
559
560 mutex_lock(&a5psw->lk_lock);
561
562 for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
563 reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
564
565 ret = a5psw_lk_execute_ctrl(a5psw, ®);
566 if (ret)
567 goto out_unlock;
568
569 lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
570 /* If entry is not valid or does not contain the port, skip */
571 if (!lk_data.entry.valid ||
572 !(lk_data.entry.port_mask & BIT(port)))
573 continue;
574
575 lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
576
577 ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
578 if (ret)
579 goto out_unlock;
580 }
581
582out_unlock:
583 mutex_unlock(&a5psw->lk_lock);
584
585 return ret;
586}
587
588static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
589{
590 u32 reg_lo, reg_hi;
591
592 reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
593 /* A5PSW_STATS_HIWORD is latched on stat read */
594 reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
595
596 return ((u64)reg_hi << 32) | reg_lo;
597}
598
599static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
600 uint8_t *data)
601{
602 unsigned int u;
603
604 if (stringset != ETH_SS_STATS)
605 return;
606
607 for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
608 memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
609 ETH_GSTRING_LEN);
610 }
611}
612
613static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
614 uint64_t *data)
615{
616 struct a5psw *a5psw = ds->priv;
617 unsigned int u;
618
619 for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
620 data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
621}
622
623static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
624{
625 if (sset != ETH_SS_STATS)
626 return 0;
627
628 return ARRAY_SIZE(a5psw_stats);
629}
630
631static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
632 struct ethtool_eth_mac_stats *mac_stats)
633{
634 struct a5psw *a5psw = ds->priv;
635
636#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
637 mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
638 mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
639 mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
640 mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
641 mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
642 mac_stats->AlignmentErrors = RD(aAlignmentErrors);
643 mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
644 mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
645 mac_stats->LateCollisions = RD(aLateCollisions);
646 mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
647 mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
648 mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
649 mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
650 mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
651 mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
652 mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
653 mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
654 mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
655 mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
656#undef RD
657}
658
659static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
660 { 0, 64 },
661 { 65, 127 },
662 { 128, 255 },
663 { 256, 511 },
664 { 512, 1023 },
665 { 1024, 1518 },
666 { 1519, A5PSW_MAX_MTU },
667 {}
668};
669
670static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
671 struct ethtool_rmon_stats *rmon_stats,
672 const struct ethtool_rmon_hist_range **ranges)
673{
674 struct a5psw *a5psw = ds->priv;
675
676#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
677 rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
678 rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
679 rmon_stats->fragments = RD(etherStatsFragments);
680 rmon_stats->jabbers = RD(etherStatsJabbers);
681 rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
682 rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
683 rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
684 rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
685 rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
686 rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
687 rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
688#undef RD
689
690 *ranges = a5psw_rmon_ranges;
691}
692
693static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
694 struct ethtool_eth_ctrl_stats *ctrl_stats)
695{
696 struct a5psw *a5psw = ds->priv;
697 u64 stat;
698
699 stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
700 ctrl_stats->MACControlFramesTransmitted = stat;
701 stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
702 ctrl_stats->MACControlFramesReceived = stat;
703}
704
705static int a5psw_setup(struct dsa_switch *ds)
706{
707 struct a5psw *a5psw = ds->priv;
708 int port, vlan, ret;
709 struct dsa_port *dp;
710 u32 reg;
711
712 /* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
713 dsa_switch_for_each_cpu_port(dp, ds) {
714 if (dp->index != A5PSW_CPU_PORT) {
715 dev_err(a5psw->dev, "Invalid CPU port\n");
716 return -EINVAL;
717 }
718 }
719
720 /* Configure management port */
721 reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
722 a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
723
724 /* Set pattern 0 to forward all frame to mgmt port */
725 a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
726 A5PSW_PATTERN_CTRL_MGMTFWD);
727
728 /* Enable port tagging */
729 reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
730 reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
731 a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
732
733 /* Enable normal switch operation */
734 reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
735 A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
736 A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
737 a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
738
739 ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
740 !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
741 A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
742 if (ret) {
743 dev_err(a5psw->dev, "Failed to clear lookup table\n");
744 return ret;
745 }
746
747 /* Reset learn count to 0 */
748 reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
749 a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
750
751 /* Clear VLAN resource table */
752 reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
753 for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
754 a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
755
756 /* Reset all ports */
757 dsa_switch_for_each_port(dp, ds) {
758 port = dp->index;
759
760 /* Reset the port */
761 a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
762 A5PSW_CMD_CFG_SW_RESET);
763
764 /* Enable only CPU port */
765 a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
766
767 if (dsa_port_is_unused(dp))
768 continue;
769
770 /* Enable egress flooding and learning for CPU port */
771 if (dsa_port_is_cpu(dp)) {
772 a5psw_flooding_set_resolution(a5psw, port, true);
773 a5psw_port_learning_set(a5psw, port, true);
774 }
775
776 /* Enable standalone mode for user ports */
777 if (dsa_port_is_user(dp))
778 a5psw_port_set_standalone(a5psw, port, true);
779 }
780
781 return 0;
782}
783
784static const struct dsa_switch_ops a5psw_switch_ops = {
785 .get_tag_protocol = a5psw_get_tag_protocol,
786 .setup = a5psw_setup,
787 .port_disable = a5psw_port_disable,
788 .port_enable = a5psw_port_enable,
789 .phylink_get_caps = a5psw_phylink_get_caps,
790 .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
791 .phylink_mac_link_down = a5psw_phylink_mac_link_down,
792 .phylink_mac_link_up = a5psw_phylink_mac_link_up,
793 .port_change_mtu = a5psw_port_change_mtu,
794 .port_max_mtu = a5psw_port_max_mtu,
795 .get_sset_count = a5psw_get_sset_count,
796 .get_strings = a5psw_get_strings,
797 .get_ethtool_stats = a5psw_get_ethtool_stats,
798 .get_eth_mac_stats = a5psw_get_eth_mac_stats,
799 .get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
800 .get_rmon_stats = a5psw_get_rmon_stats,
801 .set_ageing_time = a5psw_set_ageing_time,
802 .port_bridge_join = a5psw_port_bridge_join,
803 .port_bridge_leave = a5psw_port_bridge_leave,
804 .port_stp_state_set = a5psw_port_stp_state_set,
805 .port_fast_age = a5psw_port_fast_age,
806 .port_fdb_add = a5psw_port_fdb_add,
807 .port_fdb_del = a5psw_port_fdb_del,
808 .port_fdb_dump = a5psw_port_fdb_dump,
809};
810
811static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
812{
813 u32 status;
814 int err;
815
816 err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
817 !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
818 1000 * USEC_PER_MSEC);
819 if (err)
820 dev_err(a5psw->dev, "MDIO command timeout\n");
821
822 return err;
823}
824
825static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
826{
827 struct a5psw *a5psw = bus->priv;
828 u32 cmd, status;
829 int ret;
830
831 cmd = A5PSW_MDIO_COMMAND_READ;
832 cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
833 cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
834
835 a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
836
837 ret = a5psw_mdio_wait_busy(a5psw);
838 if (ret)
839 return ret;
840
841 ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
842
843 status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
844 if (status & A5PSW_MDIO_CFG_STATUS_READERR)
845 return -EIO;
846
847 return ret;
848}
849
850static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
851 u16 phy_data)
852{
853 struct a5psw *a5psw = bus->priv;
854 u32 cmd;
855
856 cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
857 cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
858
859 a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
860 a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
861
862 return a5psw_mdio_wait_busy(a5psw);
863}
864
865static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
866{
867 unsigned long rate;
868 unsigned long div;
869 u32 cfgstatus;
870
871 rate = clk_get_rate(a5psw->hclk);
872 div = ((rate / mdio_freq) / 2);
873 if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
874 div < A5PSW_MDIO_CLK_DIV_MIN) {
875 dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
876 return -ERANGE;
877 }
878
879 cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
880
881 a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
882
883 return 0;
884}
885
886static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
887{
888 struct device *dev = a5psw->dev;
889 struct mii_bus *bus;
890 u32 mdio_freq;
891 int ret;
892
893 if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
894 mdio_freq = A5PSW_MDIO_DEF_FREQ;
895
896 ret = a5psw_mdio_config(a5psw, mdio_freq);
897 if (ret)
898 return ret;
899
900 bus = devm_mdiobus_alloc(dev);
901 if (!bus)
902 return -ENOMEM;
903
904 bus->name = "a5psw_mdio";
905 bus->read = a5psw_mdio_read;
906 bus->write = a5psw_mdio_write;
907 bus->priv = a5psw;
908 bus->parent = dev;
909 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
910
911 a5psw->mii_bus = bus;
912
913 return devm_of_mdiobus_register(dev, bus, node);
914}
915
916static void a5psw_pcs_free(struct a5psw *a5psw)
917{
918 int i;
919
920 for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
921 if (a5psw->pcs[i])
922 miic_destroy(a5psw->pcs[i]);
923 }
924}
925
926static int a5psw_pcs_get(struct a5psw *a5psw)
927{
928 struct device_node *ports, *port, *pcs_node;
929 struct phylink_pcs *pcs;
930 int ret;
931 u32 reg;
932
933 ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
934 if (!ports)
935 return -EINVAL;
936
937 for_each_available_child_of_node(ports, port) {
938 pcs_node = of_parse_phandle(port, "pcs-handle", 0);
939 if (!pcs_node)
940 continue;
941
942 if (of_property_read_u32(port, "reg", ®)) {
943 ret = -EINVAL;
944 goto free_pcs;
945 }
946
947 if (reg >= ARRAY_SIZE(a5psw->pcs)) {
948 ret = -ENODEV;
949 goto free_pcs;
950 }
951
952 pcs = miic_create(a5psw->dev, pcs_node);
953 if (IS_ERR(pcs)) {
954 dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
955 reg);
956 ret = PTR_ERR(pcs);
957 goto free_pcs;
958 }
959
960 a5psw->pcs[reg] = pcs;
961 of_node_put(pcs_node);
962 }
963 of_node_put(ports);
964
965 return 0;
966
967free_pcs:
968 of_node_put(pcs_node);
969 of_node_put(port);
970 of_node_put(ports);
971 a5psw_pcs_free(a5psw);
972
973 return ret;
974}
975
976static int a5psw_probe(struct platform_device *pdev)
977{
978 struct device *dev = &pdev->dev;
979 struct device_node *mdio;
980 struct dsa_switch *ds;
981 struct a5psw *a5psw;
982 int ret;
983
984 a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
985 if (!a5psw)
986 return -ENOMEM;
987
988 a5psw->dev = dev;
989 mutex_init(&a5psw->lk_lock);
990 spin_lock_init(&a5psw->reg_lock);
991 a5psw->base = devm_platform_ioremap_resource(pdev, 0);
992 if (IS_ERR(a5psw->base))
993 return PTR_ERR(a5psw->base);
994
995 ret = a5psw_pcs_get(a5psw);
996 if (ret)
997 return ret;
998
999 a5psw->hclk = devm_clk_get(dev, "hclk");
1000 if (IS_ERR(a5psw->hclk)) {
1001 dev_err(dev, "failed get hclk clock\n");
1002 ret = PTR_ERR(a5psw->hclk);
1003 goto free_pcs;
1004 }
1005
1006 a5psw->clk = devm_clk_get(dev, "clk");
1007 if (IS_ERR(a5psw->clk)) {
1008 dev_err(dev, "failed get clk_switch clock\n");
1009 ret = PTR_ERR(a5psw->clk);
1010 goto free_pcs;
1011 }
1012
1013 ret = clk_prepare_enable(a5psw->clk);
1014 if (ret)
1015 goto free_pcs;
1016
1017 ret = clk_prepare_enable(a5psw->hclk);
1018 if (ret)
1019 goto clk_disable;
1020
1021 mdio = of_get_child_by_name(dev->of_node, "mdio");
1022 if (of_device_is_available(mdio)) {
1023 ret = a5psw_probe_mdio(a5psw, mdio);
1024 if (ret) {
1025 of_node_put(mdio);
1026 dev_err(dev, "Failed to register MDIO: %d\n", ret);
1027 goto hclk_disable;
1028 }
1029 }
1030
1031 of_node_put(mdio);
1032
1033 ds = &a5psw->ds;
1034 ds->dev = dev;
1035 ds->num_ports = A5PSW_PORTS_NUM;
1036 ds->ops = &a5psw_switch_ops;
1037 ds->priv = a5psw;
1038
1039 ret = dsa_register_switch(ds);
1040 if (ret) {
1041 dev_err(dev, "Failed to register DSA switch: %d\n", ret);
1042 goto hclk_disable;
1043 }
1044
1045 return 0;
1046
1047hclk_disable:
1048 clk_disable_unprepare(a5psw->hclk);
1049clk_disable:
1050 clk_disable_unprepare(a5psw->clk);
1051free_pcs:
1052 a5psw_pcs_free(a5psw);
1053
1054 return ret;
1055}
1056
1057static int a5psw_remove(struct platform_device *pdev)
1058{
1059 struct a5psw *a5psw = platform_get_drvdata(pdev);
1060
1061 if (!a5psw)
1062 return 0;
1063
1064 dsa_unregister_switch(&a5psw->ds);
1065 a5psw_pcs_free(a5psw);
1066 clk_disable_unprepare(a5psw->hclk);
1067 clk_disable_unprepare(a5psw->clk);
1068
1069 return 0;
1070}
1071
1072static void a5psw_shutdown(struct platform_device *pdev)
1073{
1074 struct a5psw *a5psw = platform_get_drvdata(pdev);
1075
1076 if (!a5psw)
1077 return;
1078
1079 dsa_switch_shutdown(&a5psw->ds);
1080
1081 platform_set_drvdata(pdev, NULL);
1082}
1083
1084static const struct of_device_id a5psw_of_mtable[] = {
1085 { .compatible = "renesas,rzn1-a5psw", },
1086 { /* sentinel */ },
1087};
1088MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
1089
1090static struct platform_driver a5psw_driver = {
1091 .driver = {
1092 .name = "rzn1_a5psw",
1093 .of_match_table = of_match_ptr(a5psw_of_mtable),
1094 },
1095 .probe = a5psw_probe,
1096 .remove = a5psw_remove,
1097 .shutdown = a5psw_shutdown,
1098};
1099module_platform_driver(a5psw_driver);
1100
1101MODULE_LICENSE("GPL");
1102MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
1103MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");