Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
3 *
4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 */
7
8#include <linux/etherdevice.h>
9#include <linux/if_vlan.h>
10#include <linux/interrupt.h>
11#include <linux/kernel.h>
12#include <linux/kmemleak.h>
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/net_tstamp.h>
16#include <linux/of.h>
17#include <linux/of_mdio.h>
18#include <linux/of_net.h>
19#include <linux/of_device.h>
20#include <linux/phy.h>
21#include <linux/phy/phy.h>
22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/regmap.h>
25#include <linux/mfd/syscon.h>
26#include <linux/dma/ti-cppi5.h>
27#include <linux/dma/k3-udma-glue.h>
28
29#include "cpsw_ale.h"
30#include "cpsw_sl.h"
31#include "am65-cpsw-nuss.h"
32#include "k3-cppi-desc-pool.h"
33#include "am65-cpts.h"
34
35#define AM65_CPSW_SS_BASE 0x0
36#define AM65_CPSW_SGMII_BASE 0x100
37#define AM65_CPSW_XGMII_BASE 0x2100
38#define AM65_CPSW_CPSW_NU_BASE 0x20000
39#define AM65_CPSW_NU_PORTS_BASE 0x1000
40#define AM65_CPSW_NU_FRAM_BASE 0x12000
41#define AM65_CPSW_NU_STATS_BASE 0x1a000
42#define AM65_CPSW_NU_ALE_BASE 0x1e000
43#define AM65_CPSW_NU_CPTS_BASE 0x1d000
44
45#define AM65_CPSW_NU_PORTS_OFFSET 0x1000
46#define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
47#define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200
48
49#define AM65_CPSW_MAX_PORTS 8
50
51#define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
52#define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
53
54#define AM65_CPSW_REG_CTL 0x004
55#define AM65_CPSW_REG_STAT_PORT_EN 0x014
56#define AM65_CPSW_REG_PTYPE 0x018
57
58#define AM65_CPSW_P0_REG_CTL 0x004
59#define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008
60
61#define AM65_CPSW_PORT_REG_PRI_CTL 0x01c
62#define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
63#define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
64
65#define AM65_CPSW_PORTN_REG_SA_L 0x308
66#define AM65_CPSW_PORTN_REG_SA_H 0x30c
67#define AM65_CPSW_PORTN_REG_TS_CTL 0x310
68#define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314
69#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
70#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
71
72#define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
73#define AM65_CPSW_CTL_P0_ENABLE BIT(2)
74#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
75#define AM65_CPSW_CTL_P0_RX_PAD BIT(14)
76
77/* AM65_CPSW_P0_REG_CTL */
78#define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0)
79
80/* AM65_CPSW_PORT_REG_PRI_CTL */
81#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
82
83/* AM65_CPSW_PN_TS_CTL register fields */
84#define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
85#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
86#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6)
87#define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7)
88#define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10)
89#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
90#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
91
92/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
93#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
94
95/* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
96#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16)
97#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17)
98#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18)
99#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19)
100#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20)
101#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21)
102#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22)
103#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
104
105/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
106#define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
107
108#define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
109
110#define AM65_CPSW_TS_TX_ANX_ALL_EN \
111 (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \
112 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
113 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
114
115#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
116/* Number of TX/RX descriptors */
117#define AM65_CPSW_MAX_TX_DESC 500
118#define AM65_CPSW_MAX_RX_DESC 500
119
120#define AM65_CPSW_NAV_PS_DATA_SIZE 16
121#define AM65_CPSW_NAV_SW_DATA_SIZE 16
122
123#define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
124 NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
125 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
126
127static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
128 const u8 *dev_addr)
129{
130 u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) |
131 (dev_addr[2] << 16) | (dev_addr[3] << 24);
132 u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8);
133
134 writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H);
135 writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
136}
137
138static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
139{
140 cpsw_sl_reset(port->slave.mac_sl, 100);
141 /* Max length register has to be restored after MAC SL reset */
142 writel(AM65_CPSW_MAX_PACKET_SIZE,
143 port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
144}
145
146static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
147{
148 common->nuss_ver = readl(common->ss_base);
149 common->cpsw_ver = readl(common->cpsw_base);
150 dev_info(common->dev,
151 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u\n",
152 common->nuss_ver,
153 common->cpsw_ver,
154 common->port_num + 1);
155}
156
157void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
158{
159 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
160 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
161 struct phy_device *phy = port->slave.phy;
162 u32 mac_control = 0;
163
164 if (!phy)
165 return;
166
167 if (phy->link) {
168 mac_control = CPSW_SL_CTL_GMII_EN;
169
170 if (phy->speed == 1000)
171 mac_control |= CPSW_SL_CTL_GIG;
172 if (phy->speed == 10 && phy_interface_is_rgmii(phy))
173 /* Can be used with in band mode only */
174 mac_control |= CPSW_SL_CTL_EXT_EN;
175 if (phy->duplex)
176 mac_control |= CPSW_SL_CTL_FULLDUPLEX;
177
178 /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/
179
180 /* rx_pause/tx_pause */
181 if (port->slave.rx_pause)
182 mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
183
184 if (port->slave.tx_pause)
185 mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
186
187 cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
188
189 /* enable forwarding */
190 cpsw_ale_control_set(common->ale, port->port_id,
191 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
192
193 am65_cpsw_qos_link_up(ndev, phy->speed);
194 netif_tx_wake_all_queues(ndev);
195 } else {
196 int tmo;
197
198 /* disable forwarding */
199 cpsw_ale_control_set(common->ale, port->port_id,
200 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
201
202 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
203
204 tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
205 dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n",
206 cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS),
207 tmo);
208
209 cpsw_sl_ctl_reset(port->slave.mac_sl);
210
211 am65_cpsw_qos_link_down(ndev);
212 netif_tx_stop_all_queues(ndev);
213 }
214
215 phy_print_status(phy);
216}
217
218static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
219 __be16 proto, u16 vid)
220{
221 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
222 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
223 u32 port_mask, unreg_mcast = 0;
224 int ret;
225
226 ret = pm_runtime_get_sync(common->dev);
227 if (ret < 0) {
228 pm_runtime_put_noidle(common->dev);
229 return ret;
230 }
231
232 port_mask = BIT(port->port_id) | ALE_PORT_HOST;
233 if (!vid)
234 unreg_mcast = port_mask;
235 dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
236 ret = cpsw_ale_add_vlan(common->ale, vid, port_mask,
237 unreg_mcast, port_mask, 0);
238
239 pm_runtime_put(common->dev);
240 return ret;
241}
242
243static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
244 __be16 proto, u16 vid)
245{
246 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
247 int ret;
248
249 ret = pm_runtime_get_sync(common->dev);
250 if (ret < 0) {
251 pm_runtime_put_noidle(common->dev);
252 return ret;
253 }
254
255 dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
256 ret = cpsw_ale_del_vlan(common->ale, vid, 0);
257
258 pm_runtime_put(common->dev);
259 return ret;
260}
261
262static void am65_cpsw_slave_set_promisc_2g(struct am65_cpsw_port *port,
263 bool promisc)
264{
265 struct am65_cpsw_common *common = port->common;
266
267 if (promisc) {
268 /* Enable promiscuous mode */
269 cpsw_ale_control_set(common->ale, port->port_id,
270 ALE_PORT_MACONLY_CAF, 1);
271 dev_dbg(common->dev, "promisc enabled\n");
272 } else {
273 /* Disable promiscuous mode */
274 cpsw_ale_control_set(common->ale, port->port_id,
275 ALE_PORT_MACONLY_CAF, 0);
276 dev_dbg(common->dev, "promisc disabled\n");
277 }
278}
279
280static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
281{
282 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
283 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
284 u32 port_mask;
285 bool promisc;
286
287 promisc = !!(ndev->flags & IFF_PROMISC);
288 am65_cpsw_slave_set_promisc_2g(port, promisc);
289
290 if (promisc)
291 return;
292
293 /* Restore allmulti on vlans if necessary */
294 cpsw_ale_set_allmulti(common->ale,
295 ndev->flags & IFF_ALLMULTI, port->port_id);
296
297 port_mask = ALE_PORT_HOST;
298 /* Clear all mcast from ALE */
299 cpsw_ale_flush_multicast(common->ale, port_mask, -1);
300
301 if (!netdev_mc_empty(ndev)) {
302 struct netdev_hw_addr *ha;
303
304 /* program multicast address list into ALE register */
305 netdev_for_each_mc_addr(ha, ndev) {
306 cpsw_ale_add_mcast(common->ale, ha->addr,
307 port_mask, 0, 0, 0);
308 }
309 }
310}
311
312static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
313 unsigned int txqueue)
314{
315 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
316 struct am65_cpsw_tx_chn *tx_chn;
317 struct netdev_queue *netif_txq;
318 unsigned long trans_start;
319
320 netif_txq = netdev_get_tx_queue(ndev, txqueue);
321 tx_chn = &common->tx_chns[txqueue];
322 trans_start = netif_txq->trans_start;
323
324 netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
325 txqueue,
326 netif_tx_queue_stopped(netif_txq),
327 jiffies_to_msecs(jiffies - trans_start),
328 dql_avail(&netif_txq->dql),
329 k3_cppi_desc_pool_avail(tx_chn->desc_pool));
330
331 if (netif_tx_queue_stopped(netif_txq)) {
332 /* try recover if stopped by us */
333 txq_trans_update(netif_txq);
334 netif_tx_wake_queue(netif_txq);
335 }
336}
337
338static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
339 struct sk_buff *skb)
340{
341 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
342 struct cppi5_host_desc_t *desc_rx;
343 struct device *dev = common->dev;
344 u32 pkt_len = skb_tailroom(skb);
345 dma_addr_t desc_dma;
346 dma_addr_t buf_dma;
347 void *swdata;
348
349 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
350 if (!desc_rx) {
351 dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
352 return -ENOMEM;
353 }
354 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
355
356 buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE);
357 if (unlikely(dma_mapping_error(dev, buf_dma))) {
358 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
359 dev_err(dev, "Failed to map rx skb buffer\n");
360 return -EINVAL;
361 }
362
363 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
364 AM65_CPSW_NAV_PS_DATA_SIZE);
365 cppi5_hdesc_attach_buf(desc_rx, 0, 0, buf_dma, skb_tailroom(skb));
366 swdata = cppi5_hdesc_get_swdata(desc_rx);
367 *((void **)swdata) = skb;
368
369 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
370}
371
372void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
373{
374 struct am65_cpsw_host *host_p = am65_common_get_host(common);
375 u32 val, pri_map;
376
377 /* P0 set Receive Priority Type */
378 val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
379
380 if (common->pf_p0_rx_ptype_rrobin) {
381 val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
382 /* Enet Ports fifos works in fixed priority mode only, so
383 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
384 */
385 pri_map = 0x0;
386 } else {
387 val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
388 /* restore P0_Rx_Pri_Map */
389 pri_map = 0x76543210;
390 }
391
392 writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP);
393 writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
394}
395
396static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
397 netdev_features_t features)
398{
399 struct am65_cpsw_host *host_p = am65_common_get_host(common);
400 int port_idx, i, ret;
401 struct sk_buff *skb;
402 u32 val, port_mask;
403
404 if (common->usage_count)
405 return 0;
406
407 /* Control register */
408 writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
409 AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
410 common->cpsw_base + AM65_CPSW_REG_CTL);
411 /* Max length register */
412 writel(AM65_CPSW_MAX_PACKET_SIZE,
413 host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
414 /* set base flow_id */
415 writel(common->rx_flow_id_base,
416 host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
417 /* en tx crc offload */
418 if (features & NETIF_F_HW_CSUM)
419 writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
420 host_p->port_base + AM65_CPSW_P0_REG_CTL);
421
422 am65_cpsw_nuss_set_p0_ptype(common);
423
424 /* enable statistic */
425 val = BIT(HOST_PORT_NUM);
426 for (port_idx = 0; port_idx < common->port_num; port_idx++) {
427 struct am65_cpsw_port *port = &common->ports[port_idx];
428
429 if (!port->disabled)
430 val |= BIT(port->port_id);
431 }
432 writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
433
434 /* disable priority elevation */
435 writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
436
437 cpsw_ale_start(common->ale);
438
439 /* limit to one RX flow only */
440 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
441 ALE_DEFAULT_THREAD_ID, 0);
442 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
443 ALE_DEFAULT_THREAD_ENABLE, 1);
444 if (AM65_CPSW_IS_CPSW2G(common))
445 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
446 ALE_PORT_NOLEARN, 1);
447 /* switch to vlan unaware mode */
448 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
449 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
450 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
451
452 /* default vlan cfg: create mask based on enabled ports */
453 port_mask = GENMASK(common->port_num, 0) &
454 ~common->disabled_ports_mask;
455
456 cpsw_ale_add_vlan(common->ale, 0, port_mask,
457 port_mask, port_mask,
458 port_mask & ~ALE_PORT_HOST);
459
460 for (i = 0; i < common->rx_chns.descs_num; i++) {
461 skb = __netdev_alloc_skb_ip_align(NULL,
462 AM65_CPSW_MAX_PACKET_SIZE,
463 GFP_KERNEL);
464 if (!skb) {
465 dev_err(common->dev, "cannot allocate skb\n");
466 return -ENOMEM;
467 }
468
469 ret = am65_cpsw_nuss_rx_push(common, skb);
470 if (ret < 0) {
471 dev_err(common->dev,
472 "cannot submit skb to channel rx, error %d\n",
473 ret);
474 kfree_skb(skb);
475 return ret;
476 }
477 kmemleak_not_leak(skb);
478 }
479 k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
480
481 for (i = 0; i < common->tx_ch_num; i++) {
482 ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
483 if (ret)
484 return ret;
485 napi_enable(&common->tx_chns[i].napi_tx);
486 }
487
488 napi_enable(&common->napi_rx);
489
490 dev_dbg(common->dev, "cpsw_nuss started\n");
491 return 0;
492}
493
494static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
495static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
496
497static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
498{
499 int i;
500
501 if (common->usage_count != 1)
502 return 0;
503
504 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
505 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
506
507 /* shutdown tx channels */
508 atomic_set(&common->tdown_cnt, common->tx_ch_num);
509 /* ensure new tdown_cnt value is visible */
510 smp_mb__after_atomic();
511 reinit_completion(&common->tdown_complete);
512
513 for (i = 0; i < common->tx_ch_num; i++)
514 k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
515
516 i = wait_for_completion_timeout(&common->tdown_complete,
517 msecs_to_jiffies(1000));
518 if (!i)
519 dev_err(common->dev, "tx timeout\n");
520 for (i = 0; i < common->tx_ch_num; i++)
521 napi_disable(&common->tx_chns[i].napi_tx);
522
523 for (i = 0; i < common->tx_ch_num; i++) {
524 k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
525 &common->tx_chns[i],
526 am65_cpsw_nuss_tx_cleanup);
527 k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
528 }
529
530 k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
531 napi_disable(&common->napi_rx);
532
533 for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
534 k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
535 &common->rx_chns,
536 am65_cpsw_nuss_rx_cleanup, !!i);
537
538 k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
539
540 cpsw_ale_stop(common->ale);
541
542 writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
543 writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
544
545 dev_dbg(common->dev, "cpsw_nuss stopped\n");
546 return 0;
547}
548
549static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
550{
551 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
552 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
553 int ret;
554
555 if (port->slave.phy)
556 phy_stop(port->slave.phy);
557
558 netif_tx_stop_all_queues(ndev);
559
560 if (port->slave.phy) {
561 phy_disconnect(port->slave.phy);
562 port->slave.phy = NULL;
563 }
564
565 ret = am65_cpsw_nuss_common_stop(common);
566 if (ret)
567 return ret;
568
569 common->usage_count--;
570 pm_runtime_put(common->dev);
571 return 0;
572}
573
574static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
575{
576 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
577 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
578 u32 port_mask;
579 int ret, i;
580
581 ret = pm_runtime_get_sync(common->dev);
582 if (ret < 0) {
583 pm_runtime_put_noidle(common->dev);
584 return ret;
585 }
586
587 /* Notify the stack of the actual queue counts. */
588 ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
589 if (ret) {
590 dev_err(common->dev, "cannot set real number of tx queues\n");
591 return ret;
592 }
593
594 ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
595 if (ret) {
596 dev_err(common->dev, "cannot set real number of rx queues\n");
597 return ret;
598 }
599
600 for (i = 0; i < common->tx_ch_num; i++)
601 netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
602
603 ret = am65_cpsw_nuss_common_open(common, ndev->features);
604 if (ret)
605 return ret;
606
607 common->usage_count++;
608
609 am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
610
611 if (port->slave.mac_only)
612 /* enable mac-only mode on port */
613 cpsw_ale_control_set(common->ale, port->port_id,
614 ALE_PORT_MACONLY, 1);
615 if (AM65_CPSW_IS_CPSW2G(common))
616 cpsw_ale_control_set(common->ale, port->port_id,
617 ALE_PORT_NOLEARN, 1);
618
619 port_mask = BIT(port->port_id) | ALE_PORT_HOST;
620 cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
621 HOST_PORT_NUM, ALE_SECURE, 0);
622 cpsw_ale_add_mcast(common->ale, ndev->broadcast,
623 port_mask, 0, 0, ALE_MCAST_FWD_2);
624
625 /* mac_sl should be configured via phy-link interface */
626 am65_cpsw_sl_ctl_reset(port);
627
628 ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET,
629 port->slave.phy_if);
630 if (ret)
631 goto error_cleanup;
632
633 if (port->slave.phy_node) {
634 port->slave.phy = of_phy_connect(ndev,
635 port->slave.phy_node,
636 &am65_cpsw_nuss_adjust_link,
637 0, port->slave.phy_if);
638 if (!port->slave.phy) {
639 dev_err(common->dev, "phy %pOF not found on slave %d\n",
640 port->slave.phy_node,
641 port->port_id);
642 ret = -ENODEV;
643 goto error_cleanup;
644 }
645 }
646
647 phy_attached_info(port->slave.phy);
648 phy_start(port->slave.phy);
649
650 return 0;
651
652error_cleanup:
653 am65_cpsw_nuss_ndo_slave_stop(ndev);
654 return ret;
655}
656
657static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
658{
659 struct am65_cpsw_rx_chn *rx_chn = data;
660 struct cppi5_host_desc_t *desc_rx;
661 struct sk_buff *skb;
662 dma_addr_t buf_dma;
663 u32 buf_dma_len;
664 void **swdata;
665
666 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
667 swdata = cppi5_hdesc_get_swdata(desc_rx);
668 skb = *swdata;
669 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
670
671 dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
672 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
673
674 dev_kfree_skb_any(skb);
675}
676
677static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
678{
679 struct skb_shared_hwtstamps *ssh;
680 u64 ns;
681
682 ns = ((u64)psdata[1] << 32) | psdata[0];
683
684 ssh = skb_hwtstamps(skb);
685 memset(ssh, 0, sizeof(*ssh));
686 ssh->hwtstamp = ns_to_ktime(ns);
687}
688
689/* RX psdata[2] word format - checksum information */
690#define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
691#define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
692#define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17)
693#define AM65_CPSW_RX_PSD_IS_TCP BIT(18)
694#define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19)
695#define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20)
696
697static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
698{
699 /* HW can verify IPv4/IPv6 TCP/UDP packets checksum
700 * csum information provides in psdata[2] word:
701 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
702 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
703 * bits - indicates IPv4/IPv6 packet
704 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
705 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
706 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
707 */
708 skb_checksum_none_assert(skb);
709
710 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
711 return;
712
713 if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
714 AM65_CPSW_RX_PSD_IPV4_VALID)) &&
715 !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
716 /* csum for fragmented packets is unsupported */
717 if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
718 skb->ip_summed = CHECKSUM_UNNECESSARY;
719 }
720}
721
722static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
723 u32 flow_idx)
724{
725 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
726 u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
727 struct am65_cpsw_ndev_priv *ndev_priv;
728 struct am65_cpsw_ndev_stats *stats;
729 struct cppi5_host_desc_t *desc_rx;
730 struct device *dev = common->dev;
731 struct sk_buff *skb, *new_skb;
732 dma_addr_t desc_dma, buf_dma;
733 struct am65_cpsw_port *port;
734 struct net_device *ndev;
735 void **swdata;
736 u32 *psdata;
737 int ret = 0;
738
739 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
740 if (ret) {
741 if (ret != -ENODATA)
742 dev_err(dev, "RX: pop chn fail %d\n", ret);
743 return ret;
744 }
745
746 if (desc_dma & 0x1) {
747 dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
748 return 0;
749 }
750
751 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
752 dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
753 __func__, flow_idx, &desc_dma);
754
755 swdata = cppi5_hdesc_get_swdata(desc_rx);
756 skb = *swdata;
757 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
758 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
759 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
760 dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
761 port = am65_common_get_port(common, port_id);
762 ndev = port->ndev;
763 skb->dev = ndev;
764
765 psdata = cppi5_hdesc_get_psdata(desc_rx);
766 /* add RX timestamp */
767 if (port->rx_ts_enabled)
768 am65_cpsw_nuss_rx_ts(skb, psdata);
769 csum_info = psdata[2];
770 dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
771
772 dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
773
774 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
775
776 new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
777 if (new_skb) {
778 skb_put(skb, pkt_len);
779 skb->protocol = eth_type_trans(skb, ndev);
780 am65_cpsw_nuss_rx_csum(skb, csum_info);
781 napi_gro_receive(&common->napi_rx, skb);
782
783 ndev_priv = netdev_priv(ndev);
784 stats = this_cpu_ptr(ndev_priv->stats);
785
786 u64_stats_update_begin(&stats->syncp);
787 stats->rx_packets++;
788 stats->rx_bytes += pkt_len;
789 u64_stats_update_end(&stats->syncp);
790 kmemleak_not_leak(new_skb);
791 } else {
792 ndev->stats.rx_dropped++;
793 new_skb = skb;
794 }
795
796 if (netif_dormant(ndev)) {
797 dev_kfree_skb_any(new_skb);
798 ndev->stats.rx_dropped++;
799 return 0;
800 }
801
802 ret = am65_cpsw_nuss_rx_push(common, new_skb);
803 if (WARN_ON(ret < 0)) {
804 dev_kfree_skb_any(new_skb);
805 ndev->stats.rx_errors++;
806 ndev->stats.rx_dropped++;
807 }
808
809 return ret;
810}
811
812static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
813{
814 struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
815 int flow = AM65_CPSW_MAX_RX_FLOWS;
816 int cur_budget, ret;
817 int num_rx = 0;
818
819 /* process every flow */
820 while (flow--) {
821 cur_budget = budget - num_rx;
822
823 while (cur_budget--) {
824 ret = am65_cpsw_nuss_rx_packets(common, flow);
825 if (ret)
826 break;
827 num_rx++;
828 }
829
830 if (num_rx >= budget)
831 break;
832 }
833
834 dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
835
836 if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
837 enable_irq(common->rx_chns.irq);
838
839 return num_rx;
840}
841
842static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
843 struct device *dev,
844 struct cppi5_host_desc_t *desc)
845{
846 struct cppi5_host_desc_t *first_desc, *next_desc;
847 dma_addr_t buf_dma, next_desc_dma;
848 u32 buf_dma_len;
849
850 first_desc = desc;
851 next_desc = first_desc;
852
853 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
854
855 dma_unmap_single(dev, buf_dma, buf_dma_len,
856 DMA_TO_DEVICE);
857
858 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
859 while (next_desc_dma) {
860 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
861 next_desc_dma);
862 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
863
864 dma_unmap_page(dev, buf_dma, buf_dma_len,
865 DMA_TO_DEVICE);
866
867 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
868
869 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
870 }
871
872 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
873}
874
875static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
876{
877 struct am65_cpsw_tx_chn *tx_chn = data;
878 struct cppi5_host_desc_t *desc_tx;
879 struct sk_buff *skb;
880 void **swdata;
881
882 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
883 swdata = cppi5_hdesc_get_swdata(desc_tx);
884 skb = *(swdata);
885 am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
886
887 dev_kfree_skb_any(skb);
888}
889
890static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
891 int chn, unsigned int budget)
892{
893 struct cppi5_host_desc_t *desc_tx;
894 struct device *dev = common->dev;
895 struct am65_cpsw_tx_chn *tx_chn;
896 struct netdev_queue *netif_txq;
897 unsigned int total_bytes = 0;
898 struct net_device *ndev;
899 struct sk_buff *skb;
900 dma_addr_t desc_dma;
901 int res, num_tx = 0;
902 void **swdata;
903
904 tx_chn = &common->tx_chns[chn];
905
906 while (true) {
907 struct am65_cpsw_ndev_priv *ndev_priv;
908 struct am65_cpsw_ndev_stats *stats;
909
910 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
911 if (res == -ENODATA)
912 break;
913
914 if (desc_dma & 0x1) {
915 if (atomic_dec_and_test(&common->tdown_cnt))
916 complete(&common->tdown_complete);
917 break;
918 }
919
920 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
921 desc_dma);
922 swdata = cppi5_hdesc_get_swdata(desc_tx);
923 skb = *(swdata);
924 am65_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx);
925
926 ndev = skb->dev;
927
928 am65_cpts_tx_timestamp(common->cpts, skb);
929
930 ndev_priv = netdev_priv(ndev);
931 stats = this_cpu_ptr(ndev_priv->stats);
932 u64_stats_update_begin(&stats->syncp);
933 stats->tx_packets++;
934 stats->tx_bytes += skb->len;
935 u64_stats_update_end(&stats->syncp);
936
937 total_bytes += skb->len;
938 napi_consume_skb(skb, budget);
939 num_tx++;
940 }
941
942 if (!num_tx)
943 return 0;
944
945 netif_txq = netdev_get_tx_queue(ndev, chn);
946
947 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
948
949 if (netif_tx_queue_stopped(netif_txq)) {
950 /* Check whether the queue is stopped due to stalled tx dma,
951 * if the queue is stopped then wake the queue as
952 * we have free desc for tx
953 */
954 __netif_tx_lock(netif_txq, smp_processor_id());
955 if (netif_running(ndev) &&
956 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
957 MAX_SKB_FRAGS))
958 netif_tx_wake_queue(netif_txq);
959
960 __netif_tx_unlock(netif_txq);
961 }
962 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
963
964 return num_tx;
965}
966
967static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
968{
969 struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
970 int num_tx;
971
972 num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id,
973 budget);
974 num_tx = min(num_tx, budget);
975 if (num_tx < budget) {
976 napi_complete(napi_tx);
977 enable_irq(tx_chn->irq);
978 }
979
980 return num_tx;
981}
982
983static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
984{
985 struct am65_cpsw_common *common = dev_id;
986
987 disable_irq_nosync(irq);
988 napi_schedule(&common->napi_rx);
989
990 return IRQ_HANDLED;
991}
992
993static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id)
994{
995 struct am65_cpsw_tx_chn *tx_chn = dev_id;
996
997 disable_irq_nosync(irq);
998 napi_schedule(&tx_chn->napi_tx);
999
1000 return IRQ_HANDLED;
1001}
1002
1003static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
1004 struct net_device *ndev)
1005{
1006 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1007 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
1008 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1009 struct device *dev = common->dev;
1010 struct am65_cpsw_tx_chn *tx_chn;
1011 struct netdev_queue *netif_txq;
1012 dma_addr_t desc_dma, buf_dma;
1013 int ret, q_idx, i;
1014 void **swdata;
1015 u32 *psdata;
1016 u32 pkt_len;
1017
1018 /* padding enabled in hw */
1019 pkt_len = skb_headlen(skb);
1020
1021 /* SKB TX timestamp */
1022 if (port->tx_ts_enabled)
1023 am65_cpts_prep_tx_timestamp(common->cpts, skb);
1024
1025 q_idx = skb_get_queue_mapping(skb);
1026 dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
1027
1028 tx_chn = &common->tx_chns[q_idx];
1029 netif_txq = netdev_get_tx_queue(ndev, q_idx);
1030
1031 /* Map the linear buffer */
1032 buf_dma = dma_map_single(dev, skb->data, pkt_len,
1033 DMA_TO_DEVICE);
1034 if (unlikely(dma_mapping_error(dev, buf_dma))) {
1035 dev_err(dev, "Failed to map tx skb buffer\n");
1036 ndev->stats.tx_errors++;
1037 goto err_free_skb;
1038 }
1039
1040 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1041 if (!first_desc) {
1042 dev_dbg(dev, "Failed to allocate descriptor\n");
1043 dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE);
1044 goto busy_stop_q;
1045 }
1046
1047 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
1048 AM65_CPSW_NAV_PS_DATA_SIZE);
1049 cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
1050 cppi5_hdesc_set_pkttype(first_desc, 0x7);
1051 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
1052
1053 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
1054 swdata = cppi5_hdesc_get_swdata(first_desc);
1055 *(swdata) = skb;
1056 psdata = cppi5_hdesc_get_psdata(first_desc);
1057
1058 /* HW csum offload if enabled */
1059 psdata[2] = 0;
1060 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1061 unsigned int cs_start, cs_offset;
1062
1063 cs_start = skb_transport_offset(skb);
1064 cs_offset = cs_start + skb->csum_offset;
1065 /* HW numerates bytes starting from 1 */
1066 psdata[2] = ((cs_offset + 1) << 24) |
1067 ((cs_start + 1) << 16) | (skb->len - cs_start);
1068 dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
1069 }
1070
1071 if (!skb_is_nonlinear(skb))
1072 goto done_tx;
1073
1074 dev_dbg(dev, "fragmented SKB\n");
1075
1076 /* Handle the case where skb is fragmented in pages */
1077 cur_desc = first_desc;
1078 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1079 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1080 u32 frag_size = skb_frag_size(frag);
1081
1082 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1083 if (!next_desc) {
1084 dev_err(dev, "Failed to allocate descriptor\n");
1085 goto busy_free_descs;
1086 }
1087
1088 buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size,
1089 DMA_TO_DEVICE);
1090 if (unlikely(dma_mapping_error(dev, buf_dma))) {
1091 dev_err(dev, "Failed to map tx skb page\n");
1092 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
1093 ndev->stats.tx_errors++;
1094 goto err_free_descs;
1095 }
1096
1097 cppi5_hdesc_reset_hbdesc(next_desc);
1098 cppi5_hdesc_attach_buf(next_desc,
1099 buf_dma, frag_size, buf_dma, frag_size);
1100
1101 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
1102 next_desc);
1103 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
1104
1105 pkt_len += frag_size;
1106 cur_desc = next_desc;
1107 }
1108 WARN_ON(pkt_len != skb->len);
1109
1110done_tx:
1111 skb_tx_timestamp(skb);
1112
1113 /* report bql before sending packet */
1114 netdev_tx_sent_queue(netif_txq, pkt_len);
1115
1116 cppi5_hdesc_set_pktlen(first_desc, pkt_len);
1117 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
1118 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1119 if (ret) {
1120 dev_err(dev, "can't push desc %d\n", ret);
1121 /* inform bql */
1122 netdev_tx_completed_queue(netif_txq, 1, pkt_len);
1123 ndev->stats.tx_errors++;
1124 goto err_free_descs;
1125 }
1126
1127 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
1128 netif_tx_stop_queue(netif_txq);
1129 /* Barrier, so that stop_queue visible to other cpus */
1130 smp_mb__after_atomic();
1131 dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx);
1132
1133 /* re-check for smp */
1134 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
1135 MAX_SKB_FRAGS) {
1136 netif_tx_wake_queue(netif_txq);
1137 dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx);
1138 }
1139 }
1140
1141 return NETDEV_TX_OK;
1142
1143err_free_descs:
1144 am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
1145err_free_skb:
1146 ndev->stats.tx_dropped++;
1147 dev_kfree_skb_any(skb);
1148 return NETDEV_TX_OK;
1149
1150busy_free_descs:
1151 am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
1152busy_stop_q:
1153 netif_tx_stop_queue(netif_txq);
1154 return NETDEV_TX_BUSY;
1155}
1156
1157static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
1158 void *addr)
1159{
1160 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1161 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1162 struct sockaddr *sockaddr = (struct sockaddr *)addr;
1163 int ret;
1164
1165 ret = eth_prepare_mac_addr_change(ndev, addr);
1166 if (ret < 0)
1167 return ret;
1168
1169 ret = pm_runtime_get_sync(common->dev);
1170 if (ret < 0) {
1171 pm_runtime_put_noidle(common->dev);
1172 return ret;
1173 }
1174
1175 cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
1176 HOST_PORT_NUM, 0, 0);
1177 cpsw_ale_add_ucast(common->ale, sockaddr->sa_data,
1178 HOST_PORT_NUM, ALE_SECURE, 0);
1179
1180 am65_cpsw_port_set_sl_mac(port, addr);
1181 eth_commit_mac_addr_change(ndev, sockaddr);
1182
1183 pm_runtime_put(common->dev);
1184
1185 return 0;
1186}
1187
1188static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
1189 struct ifreq *ifr)
1190{
1191 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1192 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1193 u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
1194 struct hwtstamp_config cfg;
1195
1196 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1197 return -EOPNOTSUPP;
1198
1199 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1200 return -EFAULT;
1201
1202 /* TX HW timestamp */
1203 switch (cfg.tx_type) {
1204 case HWTSTAMP_TX_OFF:
1205 case HWTSTAMP_TX_ON:
1206 break;
1207 default:
1208 return -ERANGE;
1209 }
1210
1211 switch (cfg.rx_filter) {
1212 case HWTSTAMP_FILTER_NONE:
1213 port->rx_ts_enabled = false;
1214 break;
1215 case HWTSTAMP_FILTER_ALL:
1216 case HWTSTAMP_FILTER_SOME:
1217 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1218 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1219 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1220 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1221 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1222 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1223 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1224 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1225 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1226 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1227 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1228 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1229 case HWTSTAMP_FILTER_NTP_ALL:
1230 port->rx_ts_enabled = true;
1231 cfg.rx_filter = HWTSTAMP_FILTER_ALL;
1232 break;
1233 default:
1234 return -ERANGE;
1235 }
1236
1237 port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
1238
1239 /* cfg TX timestamp */
1240 seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
1241 AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
1242
1243 ts_vlan_ltype = ETH_P_8021Q;
1244
1245 ts_ctrl_ltype2 = ETH_P_1588 |
1246 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
1247 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
1248 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
1249 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
1250 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
1251 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
1252 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
1253 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
1254
1255 ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
1256 AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
1257
1258 if (port->tx_ts_enabled)
1259 ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
1260 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
1261
1262 writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
1263 writel(ts_vlan_ltype, port->port_base +
1264 AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
1265 writel(ts_ctrl_ltype2, port->port_base +
1266 AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
1267 writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
1268
1269 /* en/dis RX timestamp */
1270 am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
1271
1272 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1273}
1274
1275static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
1276 struct ifreq *ifr)
1277{
1278 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1279 struct hwtstamp_config cfg;
1280
1281 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1282 return -EOPNOTSUPP;
1283
1284 cfg.flags = 0;
1285 cfg.tx_type = port->tx_ts_enabled ?
1286 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1287 cfg.rx_filter = port->rx_ts_enabled ?
1288 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1289
1290 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1291}
1292
1293static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
1294 struct ifreq *req, int cmd)
1295{
1296 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1297
1298 if (!netif_running(ndev))
1299 return -EINVAL;
1300
1301 switch (cmd) {
1302 case SIOCSHWTSTAMP:
1303 return am65_cpsw_nuss_hwtstamp_set(ndev, req);
1304 case SIOCGHWTSTAMP:
1305 return am65_cpsw_nuss_hwtstamp_get(ndev, req);
1306 }
1307
1308 if (!port->slave.phy)
1309 return -EOPNOTSUPP;
1310
1311 return phy_mii_ioctl(port->slave.phy, req, cmd);
1312}
1313
1314static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
1315 struct rtnl_link_stats64 *stats)
1316{
1317 struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
1318 unsigned int start;
1319 int cpu;
1320
1321 for_each_possible_cpu(cpu) {
1322 struct am65_cpsw_ndev_stats *cpu_stats;
1323 u64 rx_packets;
1324 u64 rx_bytes;
1325 u64 tx_packets;
1326 u64 tx_bytes;
1327
1328 cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
1329 do {
1330 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1331 rx_packets = cpu_stats->rx_packets;
1332 rx_bytes = cpu_stats->rx_bytes;
1333 tx_packets = cpu_stats->tx_packets;
1334 tx_bytes = cpu_stats->tx_bytes;
1335 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1336
1337 stats->rx_packets += rx_packets;
1338 stats->rx_bytes += rx_bytes;
1339 stats->tx_packets += tx_packets;
1340 stats->tx_bytes += tx_bytes;
1341 }
1342
1343 stats->rx_errors = dev->stats.rx_errors;
1344 stats->rx_dropped = dev->stats.rx_dropped;
1345 stats->tx_dropped = dev->stats.tx_dropped;
1346}
1347
1348static int am65_cpsw_nuss_ndo_slave_set_features(struct net_device *ndev,
1349 netdev_features_t features)
1350{
1351 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1352 netdev_features_t changes = features ^ ndev->features;
1353 struct am65_cpsw_host *host_p;
1354
1355 host_p = am65_common_get_host(common);
1356
1357 if (changes & NETIF_F_HW_CSUM) {
1358 bool enable = !!(features & NETIF_F_HW_CSUM);
1359
1360 dev_info(common->dev, "Turn %s tx-checksum-ip-generic\n",
1361 enable ? "ON" : "OFF");
1362 if (enable)
1363 writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
1364 host_p->port_base + AM65_CPSW_P0_REG_CTL);
1365 else
1366 writel(0,
1367 host_p->port_base + AM65_CPSW_P0_REG_CTL);
1368 }
1369
1370 return 0;
1371}
1372
1373static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
1374 .ndo_open = am65_cpsw_nuss_ndo_slave_open,
1375 .ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
1376 .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit,
1377 .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode,
1378 .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats,
1379 .ndo_validate_addr = eth_validate_addr,
1380 .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address,
1381 .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout,
1382 .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
1383 .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
1384 .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
1385 .ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
1386 .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
1387};
1388
1389static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
1390{
1391 struct am65_cpsw_common *common = port->common;
1392
1393 if (!port->disabled)
1394 return;
1395
1396 common->disabled_ports_mask |= BIT(port->port_id);
1397 cpsw_ale_control_set(common->ale, port->port_id,
1398 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1399
1400 cpsw_sl_reset(port->slave.mac_sl, 100);
1401 cpsw_sl_ctl_reset(port->slave.mac_sl);
1402}
1403
1404static void am65_cpsw_nuss_free_tx_chns(void *data)
1405{
1406 struct am65_cpsw_common *common = data;
1407 int i;
1408
1409 for (i = 0; i < common->tx_ch_num; i++) {
1410 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1411
1412 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
1413 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
1414
1415 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
1416 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
1417
1418 memset(tx_chn, 0, sizeof(*tx_chn));
1419 }
1420}
1421
1422void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
1423{
1424 struct device *dev = common->dev;
1425 int i;
1426
1427 devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
1428
1429 for (i = 0; i < common->tx_ch_num; i++) {
1430 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1431
1432 if (tx_chn->irq)
1433 devm_free_irq(dev, tx_chn->irq, tx_chn);
1434
1435 netif_napi_del(&tx_chn->napi_tx);
1436
1437 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
1438 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
1439
1440 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
1441 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
1442
1443 memset(tx_chn, 0, sizeof(*tx_chn));
1444 }
1445}
1446
1447static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
1448{
1449 u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
1450 struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
1451 struct device *dev = common->dev;
1452 struct k3_ring_cfg ring_cfg = {
1453 .elm_size = K3_RINGACC_RING_ELSIZE_8,
1454 .mode = K3_RINGACC_RING_MODE_RING,
1455 .flags = 0
1456 };
1457 u32 hdesc_size;
1458 int i, ret = 0;
1459
1460 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
1461 AM65_CPSW_NAV_SW_DATA_SIZE);
1462
1463 tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
1464 tx_cfg.tx_cfg = ring_cfg;
1465 tx_cfg.txcq_cfg = ring_cfg;
1466 tx_cfg.tx_cfg.size = max_desc_num;
1467 tx_cfg.txcq_cfg.size = max_desc_num;
1468
1469 for (i = 0; i < common->tx_ch_num; i++) {
1470 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1471
1472 snprintf(tx_chn->tx_chn_name,
1473 sizeof(tx_chn->tx_chn_name), "tx%d", i);
1474
1475 tx_chn->common = common;
1476 tx_chn->id = i;
1477 tx_chn->descs_num = max_desc_num;
1478 tx_chn->desc_pool =
1479 k3_cppi_desc_pool_create_name(dev,
1480 tx_chn->descs_num,
1481 hdesc_size,
1482 tx_chn->tx_chn_name);
1483 if (IS_ERR(tx_chn->desc_pool)) {
1484 ret = PTR_ERR(tx_chn->desc_pool);
1485 dev_err(dev, "Failed to create poll %d\n", ret);
1486 goto err;
1487 }
1488
1489 tx_chn->tx_chn =
1490 k3_udma_glue_request_tx_chn(dev,
1491 tx_chn->tx_chn_name,
1492 &tx_cfg);
1493 if (IS_ERR(tx_chn->tx_chn)) {
1494 ret = PTR_ERR(tx_chn->tx_chn);
1495 dev_err(dev, "Failed to request tx dma channel %d\n",
1496 ret);
1497 goto err;
1498 }
1499
1500 tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
1501 if (tx_chn->irq <= 0) {
1502 dev_err(dev, "Failed to get tx dma irq %d\n",
1503 tx_chn->irq);
1504 goto err;
1505 }
1506
1507 snprintf(tx_chn->tx_chn_name,
1508 sizeof(tx_chn->tx_chn_name), "%s-tx%d",
1509 dev_name(dev), tx_chn->id);
1510 }
1511
1512err:
1513 i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
1514 if (i) {
1515 dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
1516 return i;
1517 }
1518
1519 return ret;
1520}
1521
1522static void am65_cpsw_nuss_free_rx_chns(void *data)
1523{
1524 struct am65_cpsw_common *common = data;
1525 struct am65_cpsw_rx_chn *rx_chn;
1526
1527 rx_chn = &common->rx_chns;
1528
1529 if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
1530 k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
1531
1532 if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
1533 k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
1534}
1535
1536static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
1537{
1538 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
1539 struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
1540 u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
1541 struct device *dev = common->dev;
1542 u32 hdesc_size;
1543 u32 fdqring_id;
1544 int i, ret = 0;
1545
1546 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
1547 AM65_CPSW_NAV_SW_DATA_SIZE);
1548
1549 rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
1550 rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
1551 rx_cfg.flow_id_base = common->rx_flow_id_base;
1552
1553 /* init all flows */
1554 rx_chn->dev = dev;
1555 rx_chn->descs_num = max_desc_num;
1556 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
1557 rx_chn->descs_num,
1558 hdesc_size, "rx");
1559 if (IS_ERR(rx_chn->desc_pool)) {
1560 ret = PTR_ERR(rx_chn->desc_pool);
1561 dev_err(dev, "Failed to create rx poll %d\n", ret);
1562 goto err;
1563 }
1564
1565 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
1566 if (IS_ERR(rx_chn->rx_chn)) {
1567 ret = PTR_ERR(rx_chn->rx_chn);
1568 dev_err(dev, "Failed to request rx dma channel %d\n", ret);
1569 goto err;
1570 }
1571
1572 common->rx_flow_id_base =
1573 k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
1574 dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
1575
1576 fdqring_id = K3_RINGACC_RING_ID_ANY;
1577 for (i = 0; i < rx_cfg.flow_id_num; i++) {
1578 struct k3_ring_cfg rxring_cfg = {
1579 .elm_size = K3_RINGACC_RING_ELSIZE_8,
1580 .mode = K3_RINGACC_RING_MODE_RING,
1581 .flags = 0,
1582 };
1583 struct k3_ring_cfg fdqring_cfg = {
1584 .elm_size = K3_RINGACC_RING_ELSIZE_8,
1585 .mode = K3_RINGACC_RING_MODE_MESSAGE,
1586 .flags = K3_RINGACC_RING_SHARED,
1587 };
1588 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
1589 .rx_cfg = rxring_cfg,
1590 .rxfdq_cfg = fdqring_cfg,
1591 .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
1592 .src_tag_lo_sel =
1593 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
1594 };
1595
1596 rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
1597 rx_flow_cfg.rx_cfg.size = max_desc_num;
1598 rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
1599
1600 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
1601 i, &rx_flow_cfg);
1602 if (ret) {
1603 dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
1604 goto err;
1605 }
1606 if (!i)
1607 fdqring_id =
1608 k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
1609 i);
1610
1611 rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
1612
1613 if (rx_chn->irq <= 0) {
1614 dev_err(dev, "Failed to get rx dma irq %d\n",
1615 rx_chn->irq);
1616 ret = -ENXIO;
1617 goto err;
1618 }
1619 }
1620
1621err:
1622 i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
1623 if (i) {
1624 dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
1625 return i;
1626 }
1627
1628 return ret;
1629}
1630
1631static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common)
1632{
1633 struct am65_cpsw_host *host_p = am65_common_get_host(common);
1634
1635 host_p->common = common;
1636 host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE;
1637 host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE;
1638
1639 return 0;
1640}
1641
1642static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
1643 int slave, u8 *mac_addr)
1644{
1645 u32 mac_lo, mac_hi, offset;
1646 struct regmap *syscon;
1647 int ret;
1648
1649 syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
1650 if (IS_ERR(syscon)) {
1651 if (PTR_ERR(syscon) == -ENODEV)
1652 return 0;
1653 return PTR_ERR(syscon);
1654 }
1655
1656 ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
1657 &offset);
1658 if (ret)
1659 return ret;
1660
1661 regmap_read(syscon, offset, &mac_lo);
1662 regmap_read(syscon, offset + 4, &mac_hi);
1663
1664 mac_addr[0] = (mac_hi >> 8) & 0xff;
1665 mac_addr[1] = mac_hi & 0xff;
1666 mac_addr[2] = (mac_lo >> 24) & 0xff;
1667 mac_addr[3] = (mac_lo >> 16) & 0xff;
1668 mac_addr[4] = (mac_lo >> 8) & 0xff;
1669 mac_addr[5] = mac_lo & 0xff;
1670
1671 return 0;
1672}
1673
1674static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
1675{
1676 struct device *dev = common->dev;
1677 struct device_node *node;
1678 struct am65_cpts *cpts;
1679 void __iomem *reg_base;
1680
1681 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1682 return 0;
1683
1684 node = of_get_child_by_name(dev->of_node, "cpts");
1685 if (!node) {
1686 dev_err(dev, "%s cpts not found\n", __func__);
1687 return -ENOENT;
1688 }
1689
1690 reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
1691 cpts = am65_cpts_create(dev, reg_base, node);
1692 if (IS_ERR(cpts)) {
1693 int ret = PTR_ERR(cpts);
1694
1695 if (ret == -EOPNOTSUPP) {
1696 dev_info(dev, "cpts disabled\n");
1697 return 0;
1698 }
1699
1700 dev_err(dev, "cpts create err %d\n", ret);
1701 return ret;
1702 }
1703 common->cpts = cpts;
1704
1705 return 0;
1706}
1707
1708static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
1709{
1710 struct device_node *node, *port_np;
1711 struct device *dev = common->dev;
1712 int ret;
1713
1714 node = of_get_child_by_name(dev->of_node, "ethernet-ports");
1715 if (!node)
1716 return -ENOENT;
1717
1718 for_each_child_of_node(node, port_np) {
1719 struct am65_cpsw_port *port;
1720 const void *mac_addr;
1721 u32 port_id;
1722
1723 /* it is not a slave port node, continue */
1724 if (strcmp(port_np->name, "port"))
1725 continue;
1726
1727 ret = of_property_read_u32(port_np, "reg", &port_id);
1728 if (ret < 0) {
1729 dev_err(dev, "%pOF error reading port_id %d\n",
1730 port_np, ret);
1731 return ret;
1732 }
1733
1734 if (!port_id || port_id > common->port_num) {
1735 dev_err(dev, "%pOF has invalid port_id %u %s\n",
1736 port_np, port_id, port_np->name);
1737 return -EINVAL;
1738 }
1739
1740 port = am65_common_get_port(common, port_id);
1741 port->port_id = port_id;
1742 port->common = common;
1743 port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
1744 AM65_CPSW_NU_PORTS_OFFSET * (port_id);
1745 port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
1746 (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
1747 port->name = of_get_property(port_np, "label", NULL);
1748 port->fetch_ram_base =
1749 common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
1750 (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
1751
1752 port->disabled = !of_device_is_available(port_np);
1753 if (port->disabled)
1754 continue;
1755
1756 port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
1757 if (IS_ERR(port->slave.ifphy)) {
1758 ret = PTR_ERR(port->slave.ifphy);
1759 dev_err(dev, "%pOF error retrieving port phy: %d\n",
1760 port_np, ret);
1761 return ret;
1762 }
1763
1764 port->slave.mac_only =
1765 of_property_read_bool(port_np, "ti,mac-only");
1766
1767 /* get phy/link info */
1768 if (of_phy_is_fixed_link(port_np)) {
1769 ret = of_phy_register_fixed_link(port_np);
1770 if (ret) {
1771 if (ret != -EPROBE_DEFER)
1772 dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
1773 port_np, ret);
1774 return ret;
1775 }
1776 port->slave.phy_node = of_node_get(port_np);
1777 } else {
1778 port->slave.phy_node =
1779 of_parse_phandle(port_np, "phy-handle", 0);
1780 }
1781
1782 if (!port->slave.phy_node) {
1783 dev_err(dev,
1784 "slave[%d] no phy found\n", port_id);
1785 return -ENODEV;
1786 }
1787
1788 ret = of_get_phy_mode(port_np, &port->slave.phy_if);
1789 if (ret) {
1790 dev_err(dev, "%pOF read phy-mode err %d\n",
1791 port_np, ret);
1792 return ret;
1793 }
1794
1795 port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
1796 if (IS_ERR(port->slave.mac_sl))
1797 return PTR_ERR(port->slave.mac_sl);
1798
1799 mac_addr = of_get_mac_address(port_np);
1800 if (!IS_ERR(mac_addr)) {
1801 ether_addr_copy(port->slave.mac_addr, mac_addr);
1802 } else if (am65_cpsw_am654_get_efuse_macid(port_np,
1803 port->port_id,
1804 port->slave.mac_addr) ||
1805 !is_valid_ether_addr(port->slave.mac_addr)) {
1806 random_ether_addr(port->slave.mac_addr);
1807 dev_err(dev, "Use random MAC address\n");
1808 }
1809 }
1810 of_node_put(node);
1811
1812 return 0;
1813}
1814
1815static void am65_cpsw_pcpu_stats_free(void *data)
1816{
1817 struct am65_cpsw_ndev_stats __percpu *stats = data;
1818
1819 free_percpu(stats);
1820}
1821
1822static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
1823{
1824 struct am65_cpsw_ndev_priv *ndev_priv;
1825 struct device *dev = common->dev;
1826 struct am65_cpsw_port *port;
1827 int ret;
1828
1829 port = am65_common_get_port(common, 1);
1830
1831 /* alloc netdev */
1832 port->ndev = devm_alloc_etherdev_mqs(common->dev,
1833 sizeof(struct am65_cpsw_ndev_priv),
1834 AM65_CPSW_MAX_TX_QUEUES,
1835 AM65_CPSW_MAX_RX_QUEUES);
1836 if (!port->ndev) {
1837 dev_err(dev, "error allocating slave net_device %u\n",
1838 port->port_id);
1839 return -ENOMEM;
1840 }
1841
1842 ndev_priv = netdev_priv(port->ndev);
1843 ndev_priv->port = port;
1844 ndev_priv->msg_enable = AM65_CPSW_DEBUG;
1845 SET_NETDEV_DEV(port->ndev, dev);
1846
1847 ether_addr_copy(port->ndev->dev_addr, port->slave.mac_addr);
1848
1849 port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
1850 port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
1851 port->ndev->hw_features = NETIF_F_SG |
1852 NETIF_F_RXCSUM |
1853 NETIF_F_HW_CSUM |
1854 NETIF_F_HW_TC;
1855 port->ndev->features = port->ndev->hw_features |
1856 NETIF_F_HW_VLAN_CTAG_FILTER;
1857 port->ndev->vlan_features |= NETIF_F_SG;
1858 port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops_2g;
1859 port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
1860
1861 /* Disable TX checksum offload by default due to HW bug */
1862 if (common->pdata->quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
1863 port->ndev->features &= ~NETIF_F_HW_CSUM;
1864
1865 ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
1866 if (!ndev_priv->stats)
1867 return -ENOMEM;
1868
1869 ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
1870 ndev_priv->stats);
1871 if (ret) {
1872 dev_err(dev, "Failed to add percpu stat free action %d\n", ret);
1873 return ret;
1874 }
1875
1876 netif_napi_add(port->ndev, &common->napi_rx,
1877 am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
1878
1879 common->pf_p0_rx_ptype_rrobin = false;
1880
1881 return ret;
1882}
1883
1884static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
1885{
1886 struct device *dev = common->dev;
1887 struct am65_cpsw_port *port;
1888 int i, ret = 0;
1889
1890 port = am65_common_get_port(common, 1);
1891
1892 for (i = 0; i < common->tx_ch_num; i++) {
1893 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1894
1895 netif_tx_napi_add(port->ndev, &tx_chn->napi_tx,
1896 am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
1897
1898 ret = devm_request_irq(dev, tx_chn->irq,
1899 am65_cpsw_nuss_tx_irq,
1900 IRQF_TRIGGER_HIGH,
1901 tx_chn->tx_chn_name, tx_chn);
1902 if (ret) {
1903 dev_err(dev, "failure requesting tx%u irq %u, %d\n",
1904 tx_chn->id, tx_chn->irq, ret);
1905 goto err;
1906 }
1907 }
1908
1909err:
1910 return ret;
1911}
1912
1913static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
1914{
1915 struct device *dev = common->dev;
1916 struct am65_cpsw_port *port;
1917 int ret = 0;
1918
1919 port = am65_common_get_port(common, 1);
1920 ret = am65_cpsw_nuss_ndev_add_napi_2g(common);
1921 if (ret)
1922 goto err;
1923
1924 ret = devm_request_irq(dev, common->rx_chns.irq,
1925 am65_cpsw_nuss_rx_irq,
1926 IRQF_TRIGGER_HIGH, dev_name(dev), common);
1927 if (ret) {
1928 dev_err(dev, "failure requesting rx irq %u, %d\n",
1929 common->rx_chns.irq, ret);
1930 goto err;
1931 }
1932
1933 ret = register_netdev(port->ndev);
1934 if (ret)
1935 dev_err(dev, "error registering slave net device %d\n", ret);
1936
1937 /* can't auto unregister ndev using devm_add_action() due to
1938 * devres release sequence in DD core for DMA
1939 */
1940err:
1941 return ret;
1942}
1943
1944int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
1945{
1946 int ret;
1947
1948 common->tx_ch_num = num_tx;
1949 ret = am65_cpsw_nuss_init_tx_chns(common);
1950 if (ret)
1951 return ret;
1952
1953 return am65_cpsw_nuss_ndev_add_napi_2g(common);
1954}
1955
1956static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
1957{
1958 struct am65_cpsw_port *port;
1959 int i;
1960
1961 for (i = 0; i < common->port_num; i++) {
1962 port = &common->ports[i];
1963 if (port->ndev)
1964 unregister_netdev(port->ndev);
1965 }
1966}
1967
1968static const struct am65_cpsw_pdata am65x_sr1_0 = {
1969 .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
1970};
1971
1972static const struct am65_cpsw_pdata j721e_sr1_0 = {
1973 .quirks = 0,
1974};
1975
1976static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
1977 { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0 },
1978 { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_sr1_0 },
1979 { /* sentinel */ },
1980};
1981MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
1982
1983static int am65_cpsw_nuss_probe(struct platform_device *pdev)
1984{
1985 struct cpsw_ale_params ale_params = { 0 };
1986 const struct of_device_id *of_id;
1987 struct device *dev = &pdev->dev;
1988 struct am65_cpsw_common *common;
1989 struct device_node *node;
1990 struct resource *res;
1991 int ret, i;
1992
1993 common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
1994 if (!common)
1995 return -ENOMEM;
1996 common->dev = dev;
1997
1998 of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
1999 if (!of_id)
2000 return -EINVAL;
2001 common->pdata = of_id->data;
2002
2003 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
2004 common->ss_base = devm_ioremap_resource(&pdev->dev, res);
2005 if (IS_ERR(common->ss_base))
2006 return PTR_ERR(common->ss_base);
2007 common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
2008
2009 node = of_get_child_by_name(dev->of_node, "ethernet-ports");
2010 if (!node)
2011 return -ENOENT;
2012 common->port_num = of_get_child_count(node);
2013 if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
2014 return -ENOENT;
2015 of_node_put(node);
2016
2017 if (common->port_num != 1)
2018 return -EOPNOTSUPP;
2019
2020 common->rx_flow_id_base = -1;
2021 init_completion(&common->tdown_complete);
2022 common->tx_ch_num = 1;
2023
2024 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
2025 if (ret) {
2026 dev_err(dev, "error setting dma mask: %d\n", ret);
2027 return ret;
2028 }
2029
2030 common->ports = devm_kcalloc(dev, common->port_num,
2031 sizeof(*common->ports),
2032 GFP_KERNEL);
2033 if (!common->ports)
2034 return -ENOMEM;
2035
2036 pm_runtime_enable(dev);
2037 ret = pm_runtime_get_sync(dev);
2038 if (ret < 0) {
2039 pm_runtime_put_noidle(dev);
2040 pm_runtime_disable(dev);
2041 return ret;
2042 }
2043
2044 node = of_get_child_by_name(dev->of_node, "mdio");
2045 if (!node) {
2046 dev_warn(dev, "MDIO node not found\n");
2047 } else if (of_device_is_available(node)) {
2048 struct platform_device *mdio_pdev;
2049
2050 mdio_pdev = of_platform_device_create(node, NULL, dev);
2051 if (!mdio_pdev) {
2052 ret = -ENODEV;
2053 goto err_pm_clear;
2054 }
2055
2056 common->mdio_dev = &mdio_pdev->dev;
2057 }
2058 of_node_put(node);
2059
2060 am65_cpsw_nuss_get_ver(common);
2061
2062 /* init tx channels */
2063 ret = am65_cpsw_nuss_init_tx_chns(common);
2064 if (ret)
2065 goto err_of_clear;
2066 ret = am65_cpsw_nuss_init_rx_chns(common);
2067 if (ret)
2068 goto err_of_clear;
2069
2070 ret = am65_cpsw_nuss_init_host_p(common);
2071 if (ret)
2072 goto err_of_clear;
2073
2074 ret = am65_cpsw_nuss_init_slave_ports(common);
2075 if (ret)
2076 goto err_of_clear;
2077
2078 /* init common data */
2079 ale_params.dev = dev;
2080 ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
2081 ale_params.ale_entries = 0;
2082 ale_params.ale_ports = common->port_num + 1;
2083 ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
2084 ale_params.nu_switch_ale = true;
2085
2086 common->ale = cpsw_ale_create(&ale_params);
2087 if (IS_ERR(common->ale)) {
2088 dev_err(dev, "error initializing ale engine\n");
2089 ret = PTR_ERR(common->ale);
2090 goto err_of_clear;
2091 }
2092
2093 ret = am65_cpsw_init_cpts(common);
2094 if (ret)
2095 goto err_of_clear;
2096
2097 /* init ports */
2098 for (i = 0; i < common->port_num; i++)
2099 am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
2100
2101 dev_set_drvdata(dev, common);
2102
2103 ret = am65_cpsw_nuss_init_ndev_2g(common);
2104 if (ret)
2105 goto err_of_clear;
2106
2107 ret = am65_cpsw_nuss_ndev_reg_2g(common);
2108 if (ret)
2109 goto err_of_clear;
2110
2111 pm_runtime_put(dev);
2112 return 0;
2113
2114err_of_clear:
2115 of_platform_device_destroy(common->mdio_dev, NULL);
2116err_pm_clear:
2117 pm_runtime_put_sync(dev);
2118 pm_runtime_disable(dev);
2119 return ret;
2120}
2121
2122static int am65_cpsw_nuss_remove(struct platform_device *pdev)
2123{
2124 struct device *dev = &pdev->dev;
2125 struct am65_cpsw_common *common;
2126 int ret;
2127
2128 common = dev_get_drvdata(dev);
2129
2130 ret = pm_runtime_get_sync(&pdev->dev);
2131 if (ret < 0) {
2132 pm_runtime_put_noidle(&pdev->dev);
2133 return ret;
2134 }
2135
2136 /* must unregister ndevs here because DD release_driver routine calls
2137 * dma_deconfigure(dev) before devres_release_all(dev)
2138 */
2139 am65_cpsw_nuss_cleanup_ndev(common);
2140
2141 of_platform_device_destroy(common->mdio_dev, NULL);
2142
2143 pm_runtime_put_sync(&pdev->dev);
2144 pm_runtime_disable(&pdev->dev);
2145 return 0;
2146}
2147
2148static struct platform_driver am65_cpsw_nuss_driver = {
2149 .driver = {
2150 .name = AM65_CPSW_DRV_NAME,
2151 .of_match_table = am65_cpsw_nuss_of_mtable,
2152 },
2153 .probe = am65_cpsw_nuss_probe,
2154 .remove = am65_cpsw_nuss_remove,
2155};
2156
2157module_platform_driver(am65_cpsw_nuss_driver);
2158
2159MODULE_LICENSE("GPL v2");
2160MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
2161MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");