Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 3826 lines 106 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver 3 * 4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ 5 * 6 */ 7 8#include <linux/bpf_trace.h> 9#include <linux/clk.h> 10#include <linux/etherdevice.h> 11#include <linux/if_vlan.h> 12#include <linux/interrupt.h> 13#include <linux/irqdomain.h> 14#include <linux/kernel.h> 15#include <linux/kmemleak.h> 16#include <linux/module.h> 17#include <linux/netdevice.h> 18#include <linux/net_tstamp.h> 19#include <linux/of.h> 20#include <linux/of_mdio.h> 21#include <linux/of_net.h> 22#include <linux/of_device.h> 23#include <linux/of_platform.h> 24#include <linux/phylink.h> 25#include <linux/phy/phy.h> 26#include <linux/platform_device.h> 27#include <linux/pm_runtime.h> 28#include <linux/regmap.h> 29#include <linux/rtnetlink.h> 30#include <linux/mfd/syscon.h> 31#include <linux/sys_soc.h> 32#include <linux/dma/ti-cppi5.h> 33#include <linux/dma/k3-udma-glue.h> 34#include <net/page_pool/helpers.h> 35#include <net/dsa.h> 36#include <net/switchdev.h> 37 38#include "cpsw_ale.h" 39#include "cpsw_sl.h" 40#include "am65-cpsw-nuss.h" 41#include "am65-cpsw-switchdev.h" 42#include "k3-cppi-desc-pool.h" 43#include "am65-cpts.h" 44 45#define AM65_CPSW_SS_BASE 0x0 46#define AM65_CPSW_SGMII_BASE 0x100 47#define AM65_CPSW_XGMII_BASE 0x2100 48#define AM65_CPSW_CPSW_NU_BASE 0x20000 49#define AM65_CPSW_NU_PORTS_BASE 0x1000 50#define AM65_CPSW_NU_FRAM_BASE 0x12000 51#define AM65_CPSW_NU_STATS_BASE 0x1a000 52#define AM65_CPSW_NU_ALE_BASE 0x1e000 53#define AM65_CPSW_NU_CPTS_BASE 0x1d000 54 55#define AM65_CPSW_NU_PORTS_OFFSET 0x1000 56#define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200 57#define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200 58 59#define AM65_CPSW_MAX_PORTS 8 60 61#define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN 62#define AM65_CPSW_MAX_PACKET_SIZE 2024 63 64#define AM65_CPSW_REG_CTL 0x004 65#define AM65_CPSW_REG_STAT_PORT_EN 0x014 66#define AM65_CPSW_REG_PTYPE 0x018 67 68#define AM65_CPSW_P0_REG_CTL 0x004 69#define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008 70 71#define AM65_CPSW_PORT_REG_PRI_CTL 0x01c 72#define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020 73#define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024 74 75#define AM65_CPSW_PORTN_REG_CTL 0x004 76#define AM65_CPSW_PORTN_REG_DSCP_MAP 0x120 77#define AM65_CPSW_PORTN_REG_SA_L 0x308 78#define AM65_CPSW_PORTN_REG_SA_H 0x30c 79#define AM65_CPSW_PORTN_REG_TS_CTL 0x310 80#define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314 81#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318 82#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C 83 84#define AM65_CPSW_SGMII_CONTROL_REG 0x010 85#define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018 86#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0) 87 88#define AM65_CPSW_CTL_VLAN_AWARE BIT(1) 89#define AM65_CPSW_CTL_P0_ENABLE BIT(2) 90#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13) 91#define AM65_CPSW_CTL_P0_RX_PAD BIT(14) 92 93/* AM65_CPSW_P0_REG_CTL */ 94#define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0) 95#define AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN BIT(16) 96 97/* AM65_CPSW_PORT_REG_PRI_CTL */ 98#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8) 99 100/* AM65_CPSW_PN_REG_CTL */ 101#define AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN BIT(1) 102#define AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN BIT(2) 103 104/* AM65_CPSW_PN_TS_CTL register fields */ 105#define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4) 106#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5) 107#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6) 108#define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7) 109#define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10) 110#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11) 111#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16 112 113#define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0) 114#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1) 115#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2) 116#define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3) 117#define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9) 118 119/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */ 120#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16 121 122/* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */ 123#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16) 124#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17) 125#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18) 126#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19) 127#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20) 128#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21) 129#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22) 130#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23) 131 132/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */ 133#define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3)) 134 135#define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e) 136 137#define AM65_CPSW_TS_TX_ANX_ALL_EN \ 138 (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \ 139 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \ 140 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN) 141 142#define AM65_CPSW_TS_RX_ANX_ALL_EN \ 143 (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \ 144 AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \ 145 AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN) 146 147#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30 148/* Number of TX/RX descriptors per channel/flow */ 149#define AM65_CPSW_MAX_TX_DESC 500 150#define AM65_CPSW_MAX_RX_DESC 500 151 152#define AM65_CPSW_NAV_PS_DATA_SIZE 16 153#define AM65_CPSW_NAV_SW_DATA_SIZE 16 154 155#define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \ 156 NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \ 157 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 158 159#define AM65_CPSW_DEFAULT_TX_CHNS 8 160#define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1 161 162/* CPPI streaming packet interface */ 163#define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF 164#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7 165 166/* XDP */ 167#define AM65_CPSW_XDP_TX BIT(2) 168#define AM65_CPSW_XDP_CONSUMED BIT(1) 169#define AM65_CPSW_XDP_REDIRECT BIT(0) 170#define AM65_CPSW_XDP_PASS 0 171 172/* Include headroom compatible with both skb and xdpf */ 173#define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) 174#define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long)) 175 176static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave, 177 const u8 *dev_addr) 178{ 179 u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) | 180 (dev_addr[2] << 16) | (dev_addr[3] << 24); 181 u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8); 182 183 writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H); 184 writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L); 185} 186 187#define AM65_CPSW_DSCP_MAX GENMASK(5, 0) 188#define AM65_CPSW_PRI_MAX GENMASK(2, 0) 189#define AM65_CPSW_DSCP_PRI_PER_REG 8 190#define AM65_CPSW_DSCP_PRI_SIZE 4 /* in bits */ 191static int am65_cpsw_port_set_dscp_map(struct am65_cpsw_port *slave, u8 dscp, u8 pri) 192{ 193 int reg_ofs; 194 int bit_ofs; 195 u32 val; 196 197 if (dscp > AM65_CPSW_DSCP_MAX) 198 return -EINVAL; 199 200 if (pri > AM65_CPSW_PRI_MAX) 201 return -EINVAL; 202 203 /* 32-bit register offset to this dscp */ 204 reg_ofs = (dscp / AM65_CPSW_DSCP_PRI_PER_REG) * 4; 205 /* bit field offset to this dscp */ 206 bit_ofs = AM65_CPSW_DSCP_PRI_SIZE * (dscp % AM65_CPSW_DSCP_PRI_PER_REG); 207 208 val = readl(slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs); 209 val &= ~(AM65_CPSW_PRI_MAX << bit_ofs); /* clear */ 210 val |= pri << bit_ofs; /* set */ 211 writel(val, slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs); 212 213 return 0; 214} 215 216static void am65_cpsw_port_enable_dscp_map(struct am65_cpsw_port *slave) 217{ 218 int dscp, pri; 219 u32 val; 220 221 /* Default DSCP to User Priority mapping as per: 222 * https://datatracker.ietf.org/doc/html/rfc8325#section-4.3 223 * and 224 * https://datatracker.ietf.org/doc/html/rfc8622#section-11 225 */ 226 for (dscp = 0; dscp <= AM65_CPSW_DSCP_MAX; dscp++) { 227 switch (dscp) { 228 case 56: /* CS7 */ 229 case 48: /* CS6 */ 230 pri = 7; 231 break; 232 case 46: /* EF */ 233 case 44: /* VA */ 234 pri = 6; 235 break; 236 case 40: /* CS5 */ 237 pri = 5; 238 break; 239 case 34: /* AF41 */ 240 case 36: /* AF42 */ 241 case 38: /* AF43 */ 242 case 32: /* CS4 */ 243 case 26: /* AF31 */ 244 case 28: /* AF32 */ 245 case 30: /* AF33 */ 246 case 24: /* CS3 */ 247 pri = 4; 248 break; 249 case 18: /* AF21 */ 250 case 20: /* AF22 */ 251 case 22: /* AF23 */ 252 pri = 3; 253 break; 254 case 16: /* CS2 */ 255 case 10: /* AF11 */ 256 case 12: /* AF12 */ 257 case 14: /* AF13 */ 258 case 0: /* DF */ 259 pri = 0; 260 break; 261 case 8: /* CS1 */ 262 case 1: /* LE */ 263 pri = 1; 264 break; 265 default: 266 pri = 0; 267 break; 268 } 269 270 am65_cpsw_port_set_dscp_map(slave, dscp, pri); 271 } 272 273 /* enable port IPV4 and IPV6 DSCP for this port */ 274 val = readl(slave->port_base + AM65_CPSW_PORTN_REG_CTL); 275 val |= AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN | 276 AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN; 277 writel(val, slave->port_base + AM65_CPSW_PORTN_REG_CTL); 278} 279 280static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port) 281{ 282 cpsw_sl_reset(port->slave.mac_sl, 100); 283 /* Max length register has to be restored after MAC SL reset */ 284 writel(AM65_CPSW_MAX_PACKET_SIZE, 285 port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN); 286} 287 288static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common) 289{ 290 common->nuss_ver = readl(common->ss_base); 291 common->cpsw_ver = readl(common->cpsw_base); 292 dev_info(common->dev, 293 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n", 294 common->nuss_ver, 295 common->cpsw_ver, 296 common->port_num + 1, 297 common->pdata.quirks); 298} 299 300static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, 301 __be16 proto, u16 vid) 302{ 303 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 304 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 305 u32 port_mask, unreg_mcast = 0; 306 int ret; 307 308 if (!common->is_emac_mode) 309 return 0; 310 311 if (!netif_running(ndev) || !vid) 312 return 0; 313 314 ret = pm_runtime_resume_and_get(common->dev); 315 if (ret < 0) 316 return ret; 317 318 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 319 if (!vid) 320 unreg_mcast = port_mask; 321 dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid); 322 ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask, 323 unreg_mcast, port_mask, 0); 324 325 pm_runtime_put(common->dev); 326 return ret; 327} 328 329static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, 330 __be16 proto, u16 vid) 331{ 332 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 333 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 334 int ret; 335 336 if (!common->is_emac_mode) 337 return 0; 338 339 if (!netif_running(ndev) || !vid) 340 return 0; 341 342 ret = pm_runtime_resume_and_get(common->dev); 343 if (ret < 0) 344 return ret; 345 346 dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid); 347 ret = cpsw_ale_del_vlan(common->ale, vid, 348 BIT(port->port_id) | ALE_PORT_HOST); 349 350 pm_runtime_put(common->dev); 351 return ret; 352} 353 354static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port, 355 bool promisc) 356{ 357 struct am65_cpsw_common *common = port->common; 358 359 if (promisc && !common->is_emac_mode) { 360 dev_dbg(common->dev, "promisc mode requested in switch mode"); 361 return; 362 } 363 364 if (promisc) { 365 /* Enable promiscuous mode */ 366 cpsw_ale_control_set(common->ale, port->port_id, 367 ALE_PORT_MACONLY_CAF, 1); 368 dev_dbg(common->dev, "promisc enabled\n"); 369 } else { 370 /* Disable promiscuous mode */ 371 cpsw_ale_control_set(common->ale, port->port_id, 372 ALE_PORT_MACONLY_CAF, 0); 373 dev_dbg(common->dev, "promisc disabled\n"); 374 } 375} 376 377static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev) 378{ 379 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 380 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 381 u32 port_mask; 382 bool promisc; 383 384 promisc = !!(ndev->flags & IFF_PROMISC); 385 am65_cpsw_slave_set_promisc(port, promisc); 386 387 if (promisc) 388 return; 389 390 /* Restore allmulti on vlans if necessary */ 391 cpsw_ale_set_allmulti(common->ale, 392 ndev->flags & IFF_ALLMULTI, port->port_id); 393 394 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 395 /* Clear all mcast from ALE */ 396 cpsw_ale_flush_multicast(common->ale, port_mask, -1); 397 398 if (!netdev_mc_empty(ndev)) { 399 struct netdev_hw_addr *ha; 400 401 /* program multicast address list into ALE register */ 402 netdev_for_each_mc_addr(ha, ndev) { 403 cpsw_ale_add_mcast(common->ale, ha->addr, 404 port_mask, 0, 0, 0); 405 } 406 } 407} 408 409static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, 410 unsigned int txqueue) 411{ 412 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 413 struct am65_cpsw_tx_chn *tx_chn; 414 struct netdev_queue *netif_txq; 415 unsigned long trans_start; 416 417 netif_txq = netdev_get_tx_queue(ndev, txqueue); 418 tx_chn = &common->tx_chns[txqueue]; 419 trans_start = READ_ONCE(netif_txq->trans_start); 420 421 netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n", 422 txqueue, 423 netif_tx_queue_stopped(netif_txq), 424 jiffies_to_msecs(jiffies - trans_start), 425 netdev_queue_dql_avail(netif_txq), 426 k3_cppi_desc_pool_avail(tx_chn->desc_pool)); 427 428 if (netif_tx_queue_stopped(netif_txq)) { 429 /* try recover if stopped by us */ 430 txq_trans_update(ndev, netif_txq); 431 netif_tx_wake_queue(netif_txq); 432 } 433} 434 435static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, 436 struct page *page, u32 flow_idx) 437{ 438 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 439 struct cppi5_host_desc_t *desc_rx; 440 struct device *dev = common->dev; 441 struct am65_cpsw_swdata *swdata; 442 dma_addr_t desc_dma; 443 dma_addr_t buf_dma; 444 445 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); 446 if (!desc_rx) { 447 dev_err(dev, "Failed to allocate RXFDQ descriptor\n"); 448 return -ENOMEM; 449 } 450 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); 451 452 buf_dma = dma_map_single(rx_chn->dma_dev, 453 page_address(page) + AM65_CPSW_HEADROOM, 454 AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE); 455 if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) { 456 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 457 dev_err(dev, "Failed to map rx buffer\n"); 458 return -EINVAL; 459 } 460 461 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, 462 AM65_CPSW_NAV_PS_DATA_SIZE); 463 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); 464 cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE, 465 buf_dma, AM65_CPSW_MAX_PACKET_SIZE); 466 swdata = cppi5_hdesc_get_swdata(desc_rx); 467 swdata->page = page; 468 swdata->flow_id = flow_idx; 469 470 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx, 471 desc_rx, desc_dma); 472} 473 474void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common) 475{ 476 struct am65_cpsw_host *host_p = am65_common_get_host(common); 477 u32 val, pri_map; 478 479 /* P0 set Receive Priority Type */ 480 val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); 481 482 if (common->pf_p0_rx_ptype_rrobin) { 483 val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN; 484 /* Enet Ports fifos works in fixed priority mode only, so 485 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0 486 */ 487 pri_map = 0x0; 488 } else { 489 val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN; 490 /* restore P0_Rx_Pri_Map */ 491 pri_map = 0x76543210; 492 } 493 494 writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP); 495 writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); 496} 497 498static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common); 499static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common); 500static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port); 501static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port); 502static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow, 503 struct page *page, 504 bool allow_direct); 505static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma); 506static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma); 507 508static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id) 509{ 510 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 511 struct am65_cpsw_rx_flow *flow; 512 struct xdp_rxq_info *rxq; 513 int port; 514 515 flow = &rx_chn->flows[id]; 516 napi_disable(&flow->napi_rx); 517 hrtimer_cancel(&flow->rx_hrtimer); 518 k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn, 519 am65_cpsw_nuss_rx_cleanup); 520 521 for (port = 0; port < common->port_num; port++) { 522 if (!common->ports[port].ndev) 523 continue; 524 525 rxq = &common->ports[port].xdp_rxq[id]; 526 527 if (xdp_rxq_info_is_reg(rxq)) 528 xdp_rxq_info_unreg(rxq); 529 } 530 531 if (flow->page_pool) { 532 page_pool_destroy(flow->page_pool); 533 flow->page_pool = NULL; 534 } 535} 536 537static void am65_cpsw_destroy_rxqs(struct am65_cpsw_common *common) 538{ 539 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 540 int id; 541 542 reinit_completion(&common->tdown_complete); 543 k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true); 544 545 if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) { 546 id = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000)); 547 if (!id) 548 dev_err(common->dev, "rx teardown timeout\n"); 549 } 550 551 for (id = common->rx_ch_num_flows - 1; id >= 0; id--) 552 am65_cpsw_destroy_rxq(common, id); 553 554 k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn); 555} 556 557static int am65_cpsw_create_rxq(struct am65_cpsw_common *common, int id) 558{ 559 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 560 struct page_pool_params pp_params = { 561 .flags = PP_FLAG_DMA_MAP, 562 .order = 0, 563 .pool_size = AM65_CPSW_MAX_RX_DESC, 564 .nid = dev_to_node(common->dev), 565 .dev = common->dev, 566 .dma_dir = DMA_BIDIRECTIONAL, 567 /* .napi set dynamically */ 568 }; 569 struct am65_cpsw_rx_flow *flow; 570 struct xdp_rxq_info *rxq; 571 struct page_pool *pool; 572 struct page *page; 573 int port, ret, i; 574 575 flow = &rx_chn->flows[id]; 576 pp_params.napi = &flow->napi_rx; 577 pool = page_pool_create(&pp_params); 578 if (IS_ERR(pool)) { 579 ret = PTR_ERR(pool); 580 return ret; 581 } 582 583 flow->page_pool = pool; 584 585 /* using same page pool is allowed as no running rx handlers 586 * simultaneously for both ndevs 587 */ 588 for (port = 0; port < common->port_num; port++) { 589 if (!common->ports[port].ndev) 590 /* FIXME should we BUG here? */ 591 continue; 592 593 rxq = &common->ports[port].xdp_rxq[id]; 594 ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev, 595 id, flow->napi_rx.napi_id); 596 if (ret) 597 goto err; 598 599 ret = xdp_rxq_info_reg_mem_model(rxq, 600 MEM_TYPE_PAGE_POOL, 601 pool); 602 if (ret) 603 goto err; 604 } 605 606 for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) { 607 page = page_pool_dev_alloc_pages(flow->page_pool); 608 if (!page) { 609 dev_err(common->dev, "cannot allocate page in flow %d\n", 610 id); 611 ret = -ENOMEM; 612 goto err; 613 } 614 615 ret = am65_cpsw_nuss_rx_push(common, page, id); 616 if (ret < 0) { 617 dev_err(common->dev, 618 "cannot submit page to rx channel flow %d, error %d\n", 619 id, ret); 620 am65_cpsw_put_page(flow, page, false); 621 goto err; 622 } 623 } 624 625 napi_enable(&flow->napi_rx); 626 return 0; 627 628err: 629 am65_cpsw_destroy_rxq(common, id); 630 return ret; 631} 632 633static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common) 634{ 635 int id, ret; 636 637 for (id = 0; id < common->rx_ch_num_flows; id++) { 638 ret = am65_cpsw_create_rxq(common, id); 639 if (ret) { 640 dev_err(common->dev, "couldn't create rxq %d: %d\n", 641 id, ret); 642 goto err; 643 } 644 } 645 646 ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn); 647 if (ret) { 648 dev_err(common->dev, "couldn't enable rx chn: %d\n", ret); 649 goto err; 650 } 651 652 return 0; 653 654err: 655 for (--id; id >= 0; id--) 656 am65_cpsw_destroy_rxq(common, id); 657 658 return ret; 659} 660 661static void am65_cpsw_destroy_txq(struct am65_cpsw_common *common, int id) 662{ 663 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id]; 664 665 napi_disable(&tx_chn->napi_tx); 666 hrtimer_cancel(&tx_chn->tx_hrtimer); 667 k3_udma_glue_reset_tx_chn(tx_chn->tx_chn, tx_chn, 668 am65_cpsw_nuss_tx_cleanup); 669 k3_udma_glue_disable_tx_chn(tx_chn->tx_chn); 670} 671 672static void am65_cpsw_destroy_txqs(struct am65_cpsw_common *common) 673{ 674 struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; 675 int id; 676 677 /* shutdown tx channels */ 678 atomic_set(&common->tdown_cnt, common->tx_ch_num); 679 /* ensure new tdown_cnt value is visible */ 680 smp_mb__after_atomic(); 681 reinit_completion(&common->tdown_complete); 682 683 for (id = 0; id < common->tx_ch_num; id++) 684 k3_udma_glue_tdown_tx_chn(tx_chn[id].tx_chn, false); 685 686 id = wait_for_completion_timeout(&common->tdown_complete, 687 msecs_to_jiffies(1000)); 688 if (!id) 689 dev_err(common->dev, "tx teardown timeout\n"); 690 691 for (id = common->tx_ch_num - 1; id >= 0; id--) 692 am65_cpsw_destroy_txq(common, id); 693} 694 695static int am65_cpsw_create_txq(struct am65_cpsw_common *common, int id) 696{ 697 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id]; 698 int ret; 699 700 ret = k3_udma_glue_enable_tx_chn(tx_chn->tx_chn); 701 if (ret) 702 return ret; 703 704 napi_enable(&tx_chn->napi_tx); 705 706 return 0; 707} 708 709static int am65_cpsw_create_txqs(struct am65_cpsw_common *common) 710{ 711 int id, ret; 712 713 for (id = 0; id < common->tx_ch_num; id++) { 714 ret = am65_cpsw_create_txq(common, id); 715 if (ret) { 716 dev_err(common->dev, "couldn't create txq %d: %d\n", 717 id, ret); 718 goto err; 719 } 720 } 721 722 return 0; 723 724err: 725 for (--id; id >= 0; id--) 726 am65_cpsw_destroy_txq(common, id); 727 728 return ret; 729} 730 731static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool, 732 void *desc, 733 unsigned char dsize_log2) 734{ 735 void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool); 736 737 return (desc - pool_addr) >> dsize_log2; 738} 739 740static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn, 741 struct cppi5_host_desc_t *desc, 742 enum am65_cpsw_tx_buf_type buf_type) 743{ 744 int desc_idx; 745 746 desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc, 747 tx_chn->dsize_log2); 748 k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx, 749 (void *)buf_type); 750} 751 752static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn, 753 dma_addr_t desc_dma) 754{ 755 struct cppi5_host_desc_t *desc_tx; 756 int desc_idx; 757 758 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 759 desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx, 760 tx_chn->dsize_log2); 761 762 return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool, 763 desc_idx); 764} 765 766static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow, 767 struct page *page, 768 bool allow_direct) 769{ 770 page_pool_put_full_page(flow->page_pool, page, allow_direct); 771} 772 773static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) 774{ 775 struct am65_cpsw_rx_chn *rx_chn = data; 776 struct cppi5_host_desc_t *desc_rx; 777 struct am65_cpsw_swdata *swdata; 778 dma_addr_t buf_dma; 779 struct page *page; 780 u32 buf_dma_len; 781 u32 flow_id; 782 783 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 784 swdata = cppi5_hdesc_get_swdata(desc_rx); 785 page = swdata->page; 786 flow_id = swdata->flow_id; 787 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 788 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 789 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); 790 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 791 am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false); 792} 793 794static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, 795 struct cppi5_host_desc_t *desc) 796{ 797 struct cppi5_host_desc_t *first_desc, *next_desc; 798 dma_addr_t buf_dma, next_desc_dma; 799 u32 buf_dma_len; 800 801 first_desc = desc; 802 next_desc = first_desc; 803 804 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); 805 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 806 807 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE); 808 809 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); 810 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 811 while (next_desc_dma) { 812 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 813 next_desc_dma); 814 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); 815 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 816 817 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, 818 DMA_TO_DEVICE); 819 820 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); 821 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 822 823 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 824 } 825 826 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); 827} 828 829static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) 830{ 831 struct am65_cpsw_tx_chn *tx_chn = data; 832 enum am65_cpsw_tx_buf_type buf_type; 833 struct am65_cpsw_tx_swdata *swdata; 834 struct cppi5_host_desc_t *desc_tx; 835 struct xdp_frame *xdpf; 836 struct sk_buff *skb; 837 838 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 839 swdata = cppi5_hdesc_get_swdata(desc_tx); 840 buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); 841 if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { 842 skb = swdata->skb; 843 dev_kfree_skb_any(skb); 844 } else { 845 xdpf = swdata->xdpf; 846 xdp_return_frame(xdpf); 847 } 848 849 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 850} 851 852static struct sk_buff *am65_cpsw_build_skb(void *page_addr, 853 struct net_device *ndev, 854 unsigned int len, 855 unsigned int headroom) 856{ 857 struct sk_buff *skb; 858 859 skb = build_skb(page_addr, len); 860 if (unlikely(!skb)) 861 return NULL; 862 863 skb_reserve(skb, headroom); 864 skb->dev = ndev; 865 866 return skb; 867} 868 869static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) 870{ 871 struct am65_cpsw_host *host_p = am65_common_get_host(common); 872 u32 val, port_mask; 873 int port_idx, ret; 874 875 if (common->usage_count) 876 return 0; 877 878 /* Control register */ 879 writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE | 880 AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD, 881 common->cpsw_base + AM65_CPSW_REG_CTL); 882 /* Max length register */ 883 writel(AM65_CPSW_MAX_PACKET_SIZE, 884 host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN); 885 /* set base flow_id */ 886 writel(common->rx_flow_id_base, 887 host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET); 888 writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN | AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN, 889 host_p->port_base + AM65_CPSW_P0_REG_CTL); 890 891 am65_cpsw_nuss_set_p0_ptype(common); 892 893 /* enable statistic */ 894 val = BIT(HOST_PORT_NUM); 895 for (port_idx = 0; port_idx < common->port_num; port_idx++) { 896 struct am65_cpsw_port *port = &common->ports[port_idx]; 897 898 if (!port->disabled) 899 val |= BIT(port->port_id); 900 } 901 writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); 902 903 /* disable priority elevation */ 904 writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE); 905 906 cpsw_ale_start(common->ale); 907 908 /* limit to one RX flow only */ 909 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 910 ALE_DEFAULT_THREAD_ID, 0); 911 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 912 ALE_DEFAULT_THREAD_ENABLE, 1); 913 /* switch to vlan aware mode */ 914 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1); 915 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 916 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 917 918 /* default vlan cfg: create mask based on enabled ports */ 919 port_mask = GENMASK(common->port_num, 0) & 920 ~common->disabled_ports_mask; 921 922 cpsw_ale_add_vlan(common->ale, 0, port_mask, 923 port_mask, port_mask, 924 port_mask & ~ALE_PORT_HOST); 925 926 if (common->is_emac_mode) 927 am65_cpsw_init_host_port_emac(common); 928 else 929 am65_cpsw_init_host_port_switch(common); 930 931 am65_cpsw_qos_tx_p0_rate_init(common); 932 933 ret = am65_cpsw_create_rxqs(common); 934 if (ret) 935 return ret; 936 937 ret = am65_cpsw_create_txqs(common); 938 if (ret) 939 goto cleanup_rx; 940 941 dev_dbg(common->dev, "cpsw_nuss started\n"); 942 return 0; 943 944cleanup_rx: 945 am65_cpsw_destroy_rxqs(common); 946 947 return ret; 948} 949 950static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) 951{ 952 if (common->usage_count != 1) 953 return 0; 954 955 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, 956 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 957 958 am65_cpsw_destroy_txqs(common); 959 am65_cpsw_destroy_rxqs(common); 960 cpsw_ale_stop(common->ale); 961 962 writel(0, common->cpsw_base + AM65_CPSW_REG_CTL); 963 writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); 964 965 dev_dbg(common->dev, "cpsw_nuss stopped\n"); 966 return 0; 967} 968 969static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev) 970{ 971 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 972 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 973 int ret; 974 975 phylink_stop(port->slave.phylink); 976 977 netif_tx_stop_all_queues(ndev); 978 979 phylink_disconnect_phy(port->slave.phylink); 980 981 ret = am65_cpsw_nuss_common_stop(common); 982 if (ret) 983 return ret; 984 985 common->usage_count--; 986 pm_runtime_put(common->dev); 987 return 0; 988} 989 990static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) 991{ 992 struct am65_cpsw_port *port = arg; 993 994 if (!vdev) 995 return 0; 996 997 return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid); 998} 999 1000static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) 1001{ 1002 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1003 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1004 int ret, i; 1005 u32 reg; 1006 1007 ret = pm_runtime_resume_and_get(common->dev); 1008 if (ret < 0) 1009 return ret; 1010 1011 /* Idle MAC port */ 1012 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 1013 cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); 1014 cpsw_sl_ctl_reset(port->slave.mac_sl); 1015 1016 /* soft reset MAC */ 1017 cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1); 1018 mdelay(1); 1019 reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET); 1020 if (reg) { 1021 dev_err(common->dev, "soft RESET didn't complete\n"); 1022 ret = -ETIMEDOUT; 1023 goto runtime_put; 1024 } 1025 1026 /* Notify the stack of the actual queue counts. */ 1027 ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num); 1028 if (ret) { 1029 dev_err(common->dev, "cannot set real number of tx queues\n"); 1030 goto runtime_put; 1031 } 1032 1033 ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows); 1034 if (ret) { 1035 dev_err(common->dev, "cannot set real number of rx queues\n"); 1036 goto runtime_put; 1037 } 1038 1039 for (i = 0; i < common->tx_ch_num; i++) { 1040 struct netdev_queue *txq = netdev_get_tx_queue(ndev, i); 1041 1042 netdev_tx_reset_queue(txq); 1043 txq->tx_maxrate = common->tx_chns[i].rate_mbps; 1044 } 1045 1046 ret = am65_cpsw_nuss_common_open(common); 1047 if (ret) 1048 goto runtime_put; 1049 1050 common->usage_count++; 1051 1052 /* VLAN aware CPSW mode is incompatible with some DSA tagging schemes. 1053 * Therefore disable VLAN_AWARE mode if any of the ports is a DSA Port. 1054 */ 1055 if (netdev_uses_dsa(ndev)) { 1056 reg = readl(common->cpsw_base + AM65_CPSW_REG_CTL); 1057 reg &= ~AM65_CPSW_CTL_VLAN_AWARE; 1058 writel(reg, common->cpsw_base + AM65_CPSW_REG_CTL); 1059 } 1060 1061 am65_cpsw_port_set_sl_mac(port, ndev->dev_addr); 1062 am65_cpsw_port_enable_dscp_map(port); 1063 1064 if (common->is_emac_mode) 1065 am65_cpsw_init_port_emac_ale(port); 1066 else 1067 am65_cpsw_init_port_switch_ale(port); 1068 1069 /* mac_sl should be configured via phy-link interface */ 1070 am65_cpsw_sl_ctl_reset(port); 1071 1072 ret = phylink_of_phy_connect(port->slave.phylink, port->slave.port_np, 0); 1073 if (ret) 1074 goto error_cleanup; 1075 1076 /* restore vlan configurations */ 1077 vlan_for_each(ndev, cpsw_restore_vlans, port); 1078 1079 phylink_start(port->slave.phylink); 1080 1081 return 0; 1082 1083error_cleanup: 1084 am65_cpsw_nuss_ndo_slave_stop(ndev); 1085 return ret; 1086 1087runtime_put: 1088 pm_runtime_put(common->dev); 1089 return ret; 1090} 1091 1092static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, 1093 struct am65_cpsw_tx_chn *tx_chn, 1094 struct xdp_frame *xdpf, 1095 enum am65_cpsw_tx_buf_type buf_type) 1096{ 1097 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1098 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1099 struct cppi5_host_desc_t *host_desc; 1100 struct am65_cpsw_tx_swdata *swdata; 1101 struct netdev_queue *netif_txq; 1102 dma_addr_t dma_desc, dma_buf; 1103 u32 pkt_len = xdpf->len; 1104 int ret; 1105 1106 host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 1107 if (unlikely(!host_desc)) { 1108 ndev->stats.tx_dropped++; 1109 return AM65_CPSW_XDP_CONSUMED; /* drop */ 1110 } 1111 1112 am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type); 1113 1114 dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data, 1115 pkt_len, DMA_TO_DEVICE); 1116 if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) { 1117 ndev->stats.tx_dropped++; 1118 ret = AM65_CPSW_XDP_CONSUMED; /* drop */ 1119 goto pool_free; 1120 } 1121 1122 cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 1123 AM65_CPSW_NAV_PS_DATA_SIZE); 1124 cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE); 1125 cppi5_hdesc_set_pktlen(host_desc, pkt_len); 1126 cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID); 1127 cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id); 1128 1129 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf); 1130 cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len); 1131 1132 swdata = cppi5_hdesc_get_swdata(host_desc); 1133 swdata->ndev = ndev; 1134 swdata->xdpf = xdpf; 1135 1136 /* Report BQL before sending the packet */ 1137 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 1138 netdev_tx_sent_queue(netif_txq, pkt_len); 1139 1140 dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc); 1141 if (AM65_CPSW_IS_CPSW2G(common)) { 1142 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc, 1143 dma_desc); 1144 } else { 1145 spin_lock_bh(&tx_chn->lock); 1146 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc, 1147 dma_desc); 1148 spin_unlock_bh(&tx_chn->lock); 1149 } 1150 if (ret) { 1151 /* Inform BQL */ 1152 netdev_tx_completed_queue(netif_txq, 1, pkt_len); 1153 ndev->stats.tx_errors++; 1154 ret = AM65_CPSW_XDP_CONSUMED; /* drop */ 1155 goto dma_unmap; 1156 } 1157 1158 return 0; 1159 1160dma_unmap: 1161 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf); 1162 dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE); 1163pool_free: 1164 k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc); 1165 return ret; 1166} 1167 1168static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, 1169 struct am65_cpsw_port *port, 1170 struct xdp_buff *xdp, int *len) 1171{ 1172 struct am65_cpsw_common *common = flow->common; 1173 struct net_device *ndev = port->ndev; 1174 int ret = AM65_CPSW_XDP_CONSUMED; 1175 struct am65_cpsw_tx_chn *tx_chn; 1176 struct netdev_queue *netif_txq; 1177 int cpu = smp_processor_id(); 1178 struct xdp_frame *xdpf; 1179 struct bpf_prog *prog; 1180 int pkt_len; 1181 u32 act; 1182 int err; 1183 1184 pkt_len = *len; 1185 prog = READ_ONCE(port->xdp_prog); 1186 if (!prog) 1187 return AM65_CPSW_XDP_PASS; 1188 1189 act = bpf_prog_run_xdp(prog, xdp); 1190 /* XDP prog might have changed packet data and boundaries */ 1191 *len = xdp->data_end - xdp->data; 1192 1193 switch (act) { 1194 case XDP_PASS: 1195 return AM65_CPSW_XDP_PASS; 1196 case XDP_TX: 1197 tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES]; 1198 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 1199 1200 xdpf = xdp_convert_buff_to_frame(xdp); 1201 if (unlikely(!xdpf)) { 1202 ndev->stats.tx_dropped++; 1203 goto drop; 1204 } 1205 1206 __netif_tx_lock(netif_txq, cpu); 1207 err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf, 1208 AM65_CPSW_TX_BUF_TYPE_XDP_TX); 1209 __netif_tx_unlock(netif_txq); 1210 if (err) 1211 goto drop; 1212 1213 dev_sw_netstats_rx_add(ndev, pkt_len); 1214 return AM65_CPSW_XDP_TX; 1215 case XDP_REDIRECT: 1216 if (unlikely(xdp_do_redirect(ndev, xdp, prog))) 1217 goto drop; 1218 1219 dev_sw_netstats_rx_add(ndev, pkt_len); 1220 return AM65_CPSW_XDP_REDIRECT; 1221 default: 1222 bpf_warn_invalid_xdp_action(ndev, prog, act); 1223 fallthrough; 1224 case XDP_ABORTED: 1225drop: 1226 trace_xdp_exception(ndev, prog, act); 1227 fallthrough; 1228 case XDP_DROP: 1229 ndev->stats.rx_dropped++; 1230 } 1231 1232 return ret; 1233} 1234 1235/* RX psdata[2] word format - checksum information */ 1236#define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0) 1237#define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16) 1238#define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17) 1239#define AM65_CPSW_RX_PSD_IS_TCP BIT(18) 1240#define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19) 1241#define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20) 1242 1243static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info) 1244{ 1245 /* HW can verify IPv4/IPv6 TCP/UDP packets checksum 1246 * csum information provides in psdata[2] word: 1247 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error 1248 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID 1249 * bits - indicates IPv4/IPv6 packet 1250 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet 1251 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets 1252 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR 1253 */ 1254 skb_checksum_none_assert(skb); 1255 1256 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) 1257 return; 1258 1259 if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID | 1260 AM65_CPSW_RX_PSD_IPV4_VALID)) && 1261 !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) { 1262 /* csum for fragmented packets is unsupported */ 1263 if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT)) 1264 skb->ip_summed = CHECKSUM_UNNECESSARY; 1265 } 1266} 1267 1268static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, 1269 int *xdp_state) 1270{ 1271 struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns; 1272 u32 buf_dma_len, pkt_len, port_id = 0, csum_info; 1273 struct am65_cpsw_common *common = flow->common; 1274 struct am65_cpsw_ndev_priv *ndev_priv; 1275 struct cppi5_host_desc_t *desc_rx; 1276 struct device *dev = common->dev; 1277 struct am65_cpsw_swdata *swdata; 1278 struct page *page, *new_page; 1279 dma_addr_t desc_dma, buf_dma; 1280 struct am65_cpsw_port *port; 1281 struct net_device *ndev; 1282 u32 flow_idx = flow->id; 1283 struct sk_buff *skb; 1284 struct xdp_buff xdp; 1285 int headroom, ret; 1286 void *page_addr; 1287 u32 *psdata; 1288 1289 *xdp_state = AM65_CPSW_XDP_PASS; 1290 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma); 1291 if (ret) { 1292 if (ret != -ENODATA) 1293 dev_err(dev, "RX: pop chn fail %d\n", ret); 1294 return ret; 1295 } 1296 1297 if (cppi5_desc_is_tdcm(desc_dma)) { 1298 dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx); 1299 if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) 1300 complete(&common->tdown_complete); 1301 return 0; 1302 } 1303 1304 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 1305 dev_dbg(dev, "%s flow_idx: %u desc %pad\n", 1306 __func__, flow_idx, &desc_dma); 1307 1308 swdata = cppi5_hdesc_get_swdata(desc_rx); 1309 page = swdata->page; 1310 page_addr = page_address(page); 1311 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 1312 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 1313 pkt_len = cppi5_hdesc_get_pktlen(desc_rx); 1314 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); 1315 dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id); 1316 port = am65_common_get_port(common, port_id); 1317 ndev = port->ndev; 1318 psdata = cppi5_hdesc_get_psdata(desc_rx); 1319 csum_info = psdata[2]; 1320 dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info); 1321 1322 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); 1323 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 1324 1325 if (port->xdp_prog) { 1326 xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]); 1327 xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, 1328 pkt_len, false); 1329 *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, &pkt_len); 1330 if (*xdp_state == AM65_CPSW_XDP_CONSUMED) { 1331 page = virt_to_head_page(xdp.data); 1332 am65_cpsw_put_page(flow, page, true); 1333 goto allocate; 1334 } 1335 1336 if (*xdp_state != AM65_CPSW_XDP_PASS) 1337 goto allocate; 1338 1339 headroom = xdp.data - xdp.data_hard_start; 1340 } else { 1341 headroom = AM65_CPSW_HEADROOM; 1342 } 1343 1344 skb = am65_cpsw_build_skb(page_addr, ndev, 1345 PAGE_SIZE, headroom); 1346 if (unlikely(!skb)) { 1347 new_page = page; 1348 goto requeue; 1349 } 1350 1351 ndev_priv = netdev_priv(ndev); 1352 am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark); 1353 skb_put(skb, pkt_len); 1354 if (port->rx_ts_filter) 1355 am65_cpts_rx_timestamp(common->cpts, port_id, skb); 1356 skb_mark_for_recycle(skb); 1357 skb->protocol = eth_type_trans(skb, ndev); 1358 am65_cpsw_nuss_rx_csum(skb, csum_info); 1359 napi_gro_receive(&flow->napi_rx, skb); 1360 1361 dev_sw_netstats_rx_add(ndev, pkt_len); 1362 1363allocate: 1364 new_page = page_pool_dev_alloc_pages(flow->page_pool); 1365 if (unlikely(!new_page)) { 1366 dev_err(dev, "page alloc failed\n"); 1367 return -ENOMEM; 1368 } 1369 1370 if (netif_dormant(ndev)) { 1371 am65_cpsw_put_page(flow, new_page, true); 1372 ndev->stats.rx_dropped++; 1373 return 0; 1374 } 1375 1376requeue: 1377 ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx); 1378 if (WARN_ON(ret < 0)) { 1379 am65_cpsw_put_page(flow, new_page, true); 1380 ndev->stats.rx_errors++; 1381 ndev->stats.rx_dropped++; 1382 } 1383 1384 return ret; 1385} 1386 1387static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer) 1388{ 1389 struct am65_cpsw_rx_flow *flow = container_of(timer, 1390 struct am65_cpsw_rx_flow, 1391 rx_hrtimer); 1392 1393 enable_irq(flow->irq); 1394 return HRTIMER_NORESTART; 1395} 1396 1397static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) 1398{ 1399 struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx); 1400 struct am65_cpsw_common *common = flow->common; 1401 int xdp_state_or = 0; 1402 int cur_budget, ret; 1403 int xdp_state; 1404 int num_rx = 0; 1405 1406 /* process only this flow */ 1407 cur_budget = budget; 1408 while (cur_budget--) { 1409 ret = am65_cpsw_nuss_rx_packets(flow, &xdp_state); 1410 xdp_state_or |= xdp_state; 1411 if (ret) 1412 break; 1413 num_rx++; 1414 } 1415 1416 if (xdp_state_or & AM65_CPSW_XDP_REDIRECT) 1417 xdp_do_flush(); 1418 1419 dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); 1420 1421 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { 1422 if (flow->irq_disabled) { 1423 flow->irq_disabled = false; 1424 if (unlikely(flow->rx_pace_timeout)) { 1425 hrtimer_start(&flow->rx_hrtimer, 1426 ns_to_ktime(flow->rx_pace_timeout), 1427 HRTIMER_MODE_REL_PINNED); 1428 } else { 1429 enable_irq(flow->irq); 1430 } 1431 } 1432 } 1433 1434 return num_rx; 1435} 1436 1437static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev, 1438 struct netdev_queue *netif_txq) 1439{ 1440 if (netif_tx_queue_stopped(netif_txq)) { 1441 /* Check whether the queue is stopped due to stalled 1442 * tx dma, if the queue is stopped then wake the queue 1443 * as we have free desc for tx 1444 */ 1445 __netif_tx_lock(netif_txq, smp_processor_id()); 1446 if (netif_running(ndev) && 1447 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS)) 1448 netif_tx_wake_queue(netif_txq); 1449 1450 __netif_tx_unlock(netif_txq); 1451 } 1452} 1453 1454static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, 1455 int chn, unsigned int budget, bool *tdown) 1456{ 1457 bool single_port = AM65_CPSW_IS_CPSW2G(common); 1458 enum am65_cpsw_tx_buf_type buf_type; 1459 struct am65_cpsw_tx_swdata *swdata; 1460 struct cppi5_host_desc_t *desc_tx; 1461 struct device *dev = common->dev; 1462 struct am65_cpsw_tx_chn *tx_chn; 1463 struct netdev_queue *netif_txq; 1464 unsigned int total_bytes = 0; 1465 struct net_device *ndev; 1466 struct xdp_frame *xdpf; 1467 unsigned int pkt_len; 1468 struct sk_buff *skb; 1469 dma_addr_t desc_dma; 1470 int res, num_tx = 0; 1471 1472 tx_chn = &common->tx_chns[chn]; 1473 1474 while (true) { 1475 if (!single_port) 1476 spin_lock(&tx_chn->lock); 1477 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); 1478 if (!single_port) 1479 spin_unlock(&tx_chn->lock); 1480 1481 if (res == -ENODATA) 1482 break; 1483 1484 if (cppi5_desc_is_tdcm(desc_dma)) { 1485 if (atomic_dec_and_test(&common->tdown_cnt)) 1486 complete(&common->tdown_complete); 1487 *tdown = true; 1488 break; 1489 } 1490 1491 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 1492 desc_dma); 1493 swdata = cppi5_hdesc_get_swdata(desc_tx); 1494 ndev = swdata->ndev; 1495 buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); 1496 if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { 1497 skb = swdata->skb; 1498 am65_cpts_tx_timestamp(tx_chn->common->cpts, skb); 1499 pkt_len = skb->len; 1500 napi_consume_skb(skb, budget); 1501 } else { 1502 xdpf = swdata->xdpf; 1503 pkt_len = xdpf->len; 1504 if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) 1505 xdp_return_frame_rx_napi(xdpf); 1506 else 1507 xdp_return_frame(xdpf); 1508 } 1509 1510 total_bytes += pkt_len; 1511 num_tx++; 1512 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); 1513 dev_sw_netstats_tx_add(ndev, 1, pkt_len); 1514 if (!single_port) { 1515 /* as packets from multi ports can be interleaved 1516 * on the same channel, we have to figure out the 1517 * port/queue at every packet and report it/wake queue. 1518 */ 1519 netif_txq = netdev_get_tx_queue(ndev, chn); 1520 netdev_tx_completed_queue(netif_txq, 1, pkt_len); 1521 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); 1522 } 1523 } 1524 1525 if (single_port && num_tx) { 1526 netif_txq = netdev_get_tx_queue(ndev, chn); 1527 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); 1528 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); 1529 } 1530 1531 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); 1532 1533 return num_tx; 1534} 1535 1536static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer) 1537{ 1538 struct am65_cpsw_tx_chn *tx_chns = 1539 container_of(timer, struct am65_cpsw_tx_chn, tx_hrtimer); 1540 1541 enable_irq(tx_chns->irq); 1542 return HRTIMER_NORESTART; 1543} 1544 1545static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) 1546{ 1547 struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx); 1548 bool tdown = false; 1549 int num_tx; 1550 1551 num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, 1552 tx_chn->id, budget, &tdown); 1553 if (num_tx >= budget) 1554 return budget; 1555 1556 if (napi_complete_done(napi_tx, num_tx)) { 1557 if (unlikely(tx_chn->tx_pace_timeout && !tdown)) { 1558 hrtimer_start(&tx_chn->tx_hrtimer, 1559 ns_to_ktime(tx_chn->tx_pace_timeout), 1560 HRTIMER_MODE_REL_PINNED); 1561 } else { 1562 enable_irq(tx_chn->irq); 1563 } 1564 } 1565 1566 return 0; 1567} 1568 1569static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id) 1570{ 1571 struct am65_cpsw_rx_flow *flow = dev_id; 1572 1573 flow->irq_disabled = true; 1574 disable_irq_nosync(irq); 1575 napi_schedule(&flow->napi_rx); 1576 1577 return IRQ_HANDLED; 1578} 1579 1580static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id) 1581{ 1582 struct am65_cpsw_tx_chn *tx_chn = dev_id; 1583 1584 disable_irq_nosync(irq); 1585 napi_schedule(&tx_chn->napi_tx); 1586 1587 return IRQ_HANDLED; 1588} 1589 1590static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, 1591 struct net_device *ndev) 1592{ 1593 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1594 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; 1595 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1596 struct am65_cpsw_tx_swdata *swdata; 1597 struct device *dev = common->dev; 1598 struct am65_cpsw_tx_chn *tx_chn; 1599 struct netdev_queue *netif_txq; 1600 dma_addr_t desc_dma, buf_dma; 1601 int ret, q_idx, i; 1602 u32 *psdata; 1603 u32 pkt_len; 1604 1605 /* padding enabled in hw */ 1606 pkt_len = skb_headlen(skb); 1607 1608 /* SKB TX timestamp */ 1609 if (port->tx_ts_enabled) 1610 am65_cpts_prep_tx_timestamp(common->cpts, port->port_id, skb); 1611 1612 q_idx = skb_get_queue_mapping(skb); 1613 dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx); 1614 1615 tx_chn = &common->tx_chns[q_idx]; 1616 netif_txq = netdev_get_tx_queue(ndev, q_idx); 1617 1618 /* Map the linear buffer */ 1619 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, 1620 DMA_TO_DEVICE); 1621 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) { 1622 dev_err(dev, "Failed to map tx skb buffer\n"); 1623 ndev->stats.tx_errors++; 1624 goto err_free_skb; 1625 } 1626 1627 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 1628 if (!first_desc) { 1629 dev_dbg(dev, "Failed to allocate descriptor\n"); 1630 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, 1631 DMA_TO_DEVICE); 1632 goto busy_stop_q; 1633 } 1634 1635 am65_cpsw_nuss_set_buf_type(tx_chn, first_desc, 1636 AM65_CPSW_TX_BUF_TYPE_SKB); 1637 1638 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 1639 AM65_CPSW_NAV_PS_DATA_SIZE); 1640 cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID); 1641 cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE); 1642 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id); 1643 1644 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 1645 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); 1646 swdata = cppi5_hdesc_get_swdata(first_desc); 1647 swdata->ndev = ndev; 1648 swdata->skb = skb; 1649 psdata = cppi5_hdesc_get_psdata(first_desc); 1650 1651 /* HW csum offload if enabled */ 1652 psdata[2] = 0; 1653 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1654 unsigned int cs_start, cs_offset; 1655 1656 cs_start = skb_transport_offset(skb); 1657 cs_offset = cs_start + skb->csum_offset; 1658 /* HW numerates bytes starting from 1 */ 1659 psdata[2] = ((cs_offset + 1) << 24) | 1660 ((cs_start + 1) << 16) | (skb->len - cs_start); 1661 dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]); 1662 } 1663 1664 if (!skb_is_nonlinear(skb)) 1665 goto done_tx; 1666 1667 dev_dbg(dev, "fragmented SKB\n"); 1668 1669 /* Handle the case where skb is fragmented in pages */ 1670 cur_desc = first_desc; 1671 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1672 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1673 u32 frag_size = skb_frag_size(frag); 1674 1675 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 1676 if (!next_desc) { 1677 dev_err(dev, "Failed to allocate descriptor\n"); 1678 goto busy_free_descs; 1679 } 1680 1681 am65_cpsw_nuss_set_buf_type(tx_chn, next_desc, 1682 AM65_CPSW_TX_BUF_TYPE_SKB); 1683 1684 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, 1685 DMA_TO_DEVICE); 1686 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) { 1687 dev_err(dev, "Failed to map tx skb page\n"); 1688 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 1689 ndev->stats.tx_errors++; 1690 goto err_free_descs; 1691 } 1692 1693 cppi5_hdesc_reset_hbdesc(next_desc); 1694 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 1695 cppi5_hdesc_attach_buf(next_desc, 1696 buf_dma, frag_size, buf_dma, frag_size); 1697 1698 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, 1699 next_desc); 1700 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma); 1701 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); 1702 1703 pkt_len += frag_size; 1704 cur_desc = next_desc; 1705 } 1706 WARN_ON(pkt_len != skb->len); 1707 1708done_tx: 1709 skb_tx_timestamp(skb); 1710 1711 /* report bql before sending packet */ 1712 netdev_tx_sent_queue(netif_txq, pkt_len); 1713 1714 cppi5_hdesc_set_pktlen(first_desc, pkt_len); 1715 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); 1716 if (AM65_CPSW_IS_CPSW2G(common)) { 1717 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 1718 } else { 1719 spin_lock_bh(&tx_chn->lock); 1720 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 1721 spin_unlock_bh(&tx_chn->lock); 1722 } 1723 if (ret) { 1724 dev_err(dev, "can't push desc %d\n", ret); 1725 /* inform bql */ 1726 netdev_tx_completed_queue(netif_txq, 1, pkt_len); 1727 ndev->stats.tx_errors++; 1728 goto err_free_descs; 1729 } 1730 1731 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { 1732 netif_tx_stop_queue(netif_txq); 1733 /* Barrier, so that stop_queue visible to other cpus */ 1734 smp_mb__after_atomic(); 1735 dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx); 1736 1737 /* re-check for smp */ 1738 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= 1739 MAX_SKB_FRAGS) { 1740 netif_tx_wake_queue(netif_txq); 1741 dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx); 1742 } 1743 } 1744 1745 return NETDEV_TX_OK; 1746 1747err_free_descs: 1748 am65_cpsw_nuss_xmit_free(tx_chn, first_desc); 1749err_free_skb: 1750 ndev->stats.tx_dropped++; 1751 dev_kfree_skb_any(skb); 1752 return NETDEV_TX_OK; 1753 1754busy_free_descs: 1755 am65_cpsw_nuss_xmit_free(tx_chn, first_desc); 1756busy_stop_q: 1757 netif_tx_stop_queue(netif_txq); 1758 return NETDEV_TX_BUSY; 1759} 1760 1761static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, 1762 void *addr) 1763{ 1764 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1765 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1766 struct sockaddr *sockaddr = (struct sockaddr *)addr; 1767 int ret; 1768 1769 ret = eth_prepare_mac_addr_change(ndev, addr); 1770 if (ret < 0) 1771 return ret; 1772 1773 ret = pm_runtime_resume_and_get(common->dev); 1774 if (ret < 0) 1775 return ret; 1776 1777 cpsw_ale_del_ucast(common->ale, ndev->dev_addr, 1778 HOST_PORT_NUM, 0, 0); 1779 cpsw_ale_add_ucast(common->ale, sockaddr->sa_data, 1780 HOST_PORT_NUM, ALE_SECURE, 0); 1781 1782 am65_cpsw_port_set_sl_mac(port, addr); 1783 eth_commit_mac_addr_change(ndev, sockaddr); 1784 1785 pm_runtime_put(common->dev); 1786 1787 return 0; 1788} 1789 1790static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, 1791 struct kernel_hwtstamp_config *cfg, 1792 struct netlink_ext_ack *extack) 1793{ 1794 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1795 u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype; 1796 1797 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) { 1798 NL_SET_ERR_MSG(extack, "Time stamping is not supported"); 1799 return -EOPNOTSUPP; 1800 } 1801 1802 /* TX HW timestamp */ 1803 switch (cfg->tx_type) { 1804 case HWTSTAMP_TX_OFF: 1805 case HWTSTAMP_TX_ON: 1806 break; 1807 default: 1808 NL_SET_ERR_MSG(extack, "TX mode is not supported"); 1809 return -ERANGE; 1810 } 1811 1812 switch (cfg->rx_filter) { 1813 case HWTSTAMP_FILTER_NONE: 1814 port->rx_ts_filter = HWTSTAMP_FILTER_NONE; 1815 break; 1816 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1817 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1818 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1819 port->rx_ts_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1820 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1821 break; 1822 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1823 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1824 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1825 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1826 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1827 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1828 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1829 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1830 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1831 port->rx_ts_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1832 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1833 break; 1834 case HWTSTAMP_FILTER_ALL: 1835 case HWTSTAMP_FILTER_SOME: 1836 case HWTSTAMP_FILTER_NTP_ALL: 1837 NL_SET_ERR_MSG(extack, "RX filter is not supported"); 1838 return -EOPNOTSUPP; 1839 default: 1840 NL_SET_ERR_MSG(extack, "RX filter is not supported"); 1841 return -ERANGE; 1842 } 1843 1844 port->tx_ts_enabled = (cfg->tx_type == HWTSTAMP_TX_ON); 1845 1846 /* cfg TX timestamp */ 1847 seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET << 1848 AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588; 1849 1850 ts_vlan_ltype = ETH_P_8021Q; 1851 1852 ts_ctrl_ltype2 = ETH_P_1588 | 1853 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 | 1854 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 | 1855 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 | 1856 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 | 1857 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 | 1858 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 | 1859 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 | 1860 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO; 1861 1862 ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS << 1863 AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT; 1864 1865 if (port->tx_ts_enabled) 1866 ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN | 1867 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN; 1868 1869 if (port->rx_ts_filter) 1870 ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN | 1871 AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN; 1872 1873 writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG); 1874 writel(ts_vlan_ltype, port->port_base + 1875 AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG); 1876 writel(ts_ctrl_ltype2, port->port_base + 1877 AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2); 1878 writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL); 1879 1880 return 0; 1881} 1882 1883static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev, 1884 struct kernel_hwtstamp_config *cfg) 1885{ 1886 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1887 1888 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 1889 return -EOPNOTSUPP; 1890 1891 cfg->flags = 0; 1892 cfg->tx_type = port->tx_ts_enabled ? 1893 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 1894 cfg->rx_filter = port->rx_ts_filter; 1895 1896 return 0; 1897} 1898 1899static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, 1900 struct ifreq *req, int cmd) 1901{ 1902 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1903 1904 if (!netif_running(ndev)) 1905 return -EINVAL; 1906 1907 return phylink_mii_ioctl(port->slave.phylink, req, cmd); 1908} 1909 1910static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, 1911 struct rtnl_link_stats64 *stats) 1912{ 1913 dev_fetch_sw_netstats(stats, dev->tstats); 1914 1915 stats->rx_errors = dev->stats.rx_errors; 1916 stats->rx_dropped = dev->stats.rx_dropped; 1917 stats->tx_dropped = dev->stats.tx_dropped; 1918} 1919 1920static int am65_cpsw_xdp_prog_setup(struct net_device *ndev, 1921 struct bpf_prog *prog) 1922{ 1923 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1924 bool running = netif_running(ndev); 1925 struct bpf_prog *old_prog; 1926 1927 if (running) 1928 am65_cpsw_nuss_ndo_slave_stop(ndev); 1929 1930 old_prog = xchg(&port->xdp_prog, prog); 1931 if (old_prog) 1932 bpf_prog_put(old_prog); 1933 1934 if (running) 1935 return am65_cpsw_nuss_ndo_slave_open(ndev); 1936 1937 return 0; 1938} 1939 1940static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 1941{ 1942 switch (bpf->command) { 1943 case XDP_SETUP_PROG: 1944 return am65_cpsw_xdp_prog_setup(ndev, bpf->prog); 1945 default: 1946 return -EINVAL; 1947 } 1948} 1949 1950static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, 1951 struct xdp_frame **frames, u32 flags) 1952{ 1953 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 1954 struct am65_cpsw_tx_chn *tx_chn; 1955 struct netdev_queue *netif_txq; 1956 int cpu = smp_processor_id(); 1957 int i, nxmit = 0; 1958 1959 tx_chn = &common->tx_chns[cpu % common->tx_ch_num]; 1960 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); 1961 1962 __netif_tx_lock(netif_txq, cpu); 1963 for (i = 0; i < n; i++) { 1964 if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i], 1965 AM65_CPSW_TX_BUF_TYPE_XDP_NDO)) 1966 break; 1967 nxmit++; 1968 } 1969 __netif_tx_unlock(netif_txq); 1970 1971 return nxmit; 1972} 1973 1974static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { 1975 .ndo_open = am65_cpsw_nuss_ndo_slave_open, 1976 .ndo_stop = am65_cpsw_nuss_ndo_slave_stop, 1977 .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit, 1978 .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode, 1979 .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats, 1980 .ndo_validate_addr = eth_validate_addr, 1981 .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address, 1982 .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout, 1983 .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid, 1984 .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, 1985 .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, 1986 .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, 1987 .ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate, 1988 .ndo_bpf = am65_cpsw_ndo_bpf, 1989 .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit, 1990 .ndo_hwtstamp_get = am65_cpsw_nuss_hwtstamp_get, 1991 .ndo_hwtstamp_set = am65_cpsw_nuss_hwtstamp_set, 1992}; 1993 1994static void am65_cpsw_disable_phy(struct phy *phy) 1995{ 1996 phy_power_off(phy); 1997 phy_exit(phy); 1998} 1999 2000static int am65_cpsw_enable_phy(struct phy *phy) 2001{ 2002 int ret; 2003 2004 ret = phy_init(phy); 2005 if (ret < 0) 2006 return ret; 2007 2008 ret = phy_power_on(phy); 2009 if (ret < 0) { 2010 phy_exit(phy); 2011 return ret; 2012 } 2013 2014 return 0; 2015} 2016 2017static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common) 2018{ 2019 struct am65_cpsw_port *port; 2020 struct phy *phy; 2021 int i; 2022 2023 for (i = 0; i < common->port_num; i++) { 2024 port = &common->ports[i]; 2025 phy = port->slave.serdes_phy; 2026 if (phy) 2027 am65_cpsw_disable_phy(phy); 2028 } 2029} 2030 2031static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np, 2032 struct am65_cpsw_port *port) 2033{ 2034 const char *name = "serdes"; 2035 struct phy *phy; 2036 int ret; 2037 2038 phy = devm_of_phy_optional_get(dev, port_np, name); 2039 if (IS_ERR_OR_NULL(phy)) 2040 return PTR_ERR_OR_ZERO(phy); 2041 2042 /* Serdes PHY exists. Store it. */ 2043 port->slave.serdes_phy = phy; 2044 2045 ret = am65_cpsw_enable_phy(phy); 2046 if (ret < 0) 2047 goto err_phy; 2048 2049 return 0; 2050 2051err_phy: 2052 devm_phy_put(dev, phy); 2053 return ret; 2054} 2055 2056static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode, 2057 const struct phylink_link_state *state) 2058{ 2059 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2060 phylink_config); 2061 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2062 struct am65_cpsw_common *common = port->common; 2063 2064 if (common->pdata.extra_modes & BIT(state->interface)) { 2065 if (state->interface == PHY_INTERFACE_MODE_SGMII) { 2066 writel(ADVERTISE_SGMII, 2067 port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG); 2068 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN); 2069 } else { 2070 cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN); 2071 } 2072 2073 if (state->interface == PHY_INTERFACE_MODE_USXGMII) { 2074 cpsw_sl_ctl_set(port->slave.mac_sl, 2075 CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN); 2076 } else { 2077 cpsw_sl_ctl_clr(port->slave.mac_sl, 2078 CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN); 2079 } 2080 2081 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE, 2082 port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG); 2083 } 2084} 2085 2086static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode, 2087 phy_interface_t interface) 2088{ 2089 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2090 phylink_config); 2091 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2092 struct am65_cpsw_common *common = port->common; 2093 struct net_device *ndev = port->ndev; 2094 u32 mac_control; 2095 int tmo; 2096 2097 /* disable forwarding */ 2098 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 2099 2100 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 2101 2102 tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); 2103 dev_dbg(common->dev, "down msc_sl %08x tmo %d\n", 2104 cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo); 2105 2106 /* All the bits that am65_cpsw_nuss_mac_link_up() can possibly set */ 2107 mac_control = CPSW_SL_CTL_GMII_EN | CPSW_SL_CTL_GIG | CPSW_SL_CTL_IFCTL_A | 2108 CPSW_SL_CTL_FULLDUPLEX | CPSW_SL_CTL_RX_FLOW_EN | CPSW_SL_CTL_TX_FLOW_EN; 2109 /* If interface mode is RGMII, CPSW_SL_CTL_EXT_EN might have been set for 10 Mbps */ 2110 if (phy_interface_mode_is_rgmii(interface)) 2111 mac_control |= CPSW_SL_CTL_EXT_EN; 2112 /* Only clear those bits that can be set by am65_cpsw_nuss_mac_link_up() */ 2113 cpsw_sl_ctl_clr(port->slave.mac_sl, mac_control); 2114 2115 am65_cpsw_qos_link_down(ndev); 2116 netif_tx_stop_all_queues(ndev); 2117} 2118 2119static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy, 2120 unsigned int mode, phy_interface_t interface, int speed, 2121 int duplex, bool tx_pause, bool rx_pause) 2122{ 2123 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data, 2124 phylink_config); 2125 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave); 2126 struct am65_cpsw_common *common = port->common; 2127 u32 mac_control = CPSW_SL_CTL_GMII_EN; 2128 struct net_device *ndev = port->ndev; 2129 2130 /* Bring the port out of idle state */ 2131 cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); 2132 2133 if (speed == SPEED_1000) 2134 mac_control |= CPSW_SL_CTL_GIG; 2135 /* TODO: Verify whether in-band is necessary for 10 Mbps RGMII */ 2136 if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface)) 2137 /* Can be used with in band mode only */ 2138 mac_control |= CPSW_SL_CTL_EXT_EN; 2139 if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII) 2140 mac_control |= CPSW_SL_CTL_IFCTL_A; 2141 if (duplex) 2142 mac_control |= CPSW_SL_CTL_FULLDUPLEX; 2143 2144 /* rx_pause/tx_pause */ 2145 if (rx_pause) 2146 mac_control |= CPSW_SL_CTL_TX_FLOW_EN; 2147 2148 if (tx_pause) 2149 mac_control |= CPSW_SL_CTL_RX_FLOW_EN; 2150 2151 cpsw_sl_ctl_set(port->slave.mac_sl, mac_control); 2152 2153 /* enable forwarding */ 2154 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 2155 2156 am65_cpsw_qos_link_up(ndev, speed); 2157 netif_tx_wake_all_queues(ndev); 2158} 2159 2160static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = { 2161 .mac_config = am65_cpsw_nuss_mac_config, 2162 .mac_link_down = am65_cpsw_nuss_mac_link_down, 2163 .mac_link_up = am65_cpsw_nuss_mac_link_up, 2164}; 2165 2166static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port) 2167{ 2168 struct am65_cpsw_common *common = port->common; 2169 2170 if (!port->disabled) 2171 return; 2172 2173 cpsw_ale_control_set(common->ale, port->port_id, 2174 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 2175 2176 cpsw_sl_reset(port->slave.mac_sl, 100); 2177 cpsw_sl_ctl_reset(port->slave.mac_sl); 2178} 2179 2180static void am65_cpsw_nuss_free_tx_chns(void *data) 2181{ 2182 struct am65_cpsw_common *common = data; 2183 int i; 2184 2185 for (i = 0; i < common->tx_ch_num; i++) { 2186 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2187 2188 if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) 2189 k3_cppi_desc_pool_destroy(tx_chn->desc_pool); 2190 2191 if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) 2192 k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 2193 2194 memset(tx_chn, 0, sizeof(*tx_chn)); 2195 } 2196} 2197 2198static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) 2199{ 2200 struct device *dev = common->dev; 2201 int i; 2202 2203 common->tx_ch_rate_msk = 0; 2204 for (i = 0; i < common->tx_ch_num; i++) { 2205 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2206 2207 if (tx_chn->irq > 0) 2208 devm_free_irq(dev, tx_chn->irq, tx_chn); 2209 2210 netif_napi_del(&tx_chn->napi_tx); 2211 } 2212 2213 am65_cpsw_nuss_free_tx_chns(common); 2214} 2215 2216static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) 2217{ 2218 struct device *dev = common->dev; 2219 struct am65_cpsw_tx_chn *tx_chn; 2220 int i, ret = 0; 2221 2222 for (i = 0; i < common->tx_ch_num; i++) { 2223 tx_chn = &common->tx_chns[i]; 2224 2225 hrtimer_setup(&tx_chn->tx_hrtimer, &am65_cpsw_nuss_tx_timer_callback, 2226 CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 2227 2228 netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, 2229 am65_cpsw_nuss_tx_poll); 2230 2231 ret = devm_request_irq(dev, tx_chn->irq, 2232 am65_cpsw_nuss_tx_irq, 2233 IRQF_TRIGGER_HIGH, 2234 tx_chn->tx_chn_name, tx_chn); 2235 if (ret) { 2236 dev_err(dev, "failure requesting tx%u irq %u, %d\n", 2237 tx_chn->id, tx_chn->irq, ret); 2238 goto err; 2239 } 2240 } 2241 2242 return 0; 2243 2244err: 2245 netif_napi_del(&tx_chn->napi_tx); 2246 for (--i; i >= 0; i--) { 2247 tx_chn = &common->tx_chns[i]; 2248 devm_free_irq(dev, tx_chn->irq, tx_chn); 2249 netif_napi_del(&tx_chn->napi_tx); 2250 } 2251 2252 return ret; 2253} 2254 2255static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) 2256{ 2257 u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS); 2258 struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 }; 2259 struct device *dev = common->dev; 2260 struct k3_ring_cfg ring_cfg = { 2261 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2262 .mode = K3_RINGACC_RING_MODE_RING, 2263 .flags = 0 2264 }; 2265 u32 hdesc_size, hdesc_size_out; 2266 int i, ret = 0; 2267 2268 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, 2269 AM65_CPSW_NAV_SW_DATA_SIZE); 2270 2271 tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; 2272 tx_cfg.tx_cfg = ring_cfg; 2273 tx_cfg.txcq_cfg = ring_cfg; 2274 tx_cfg.tx_cfg.size = max_desc_num; 2275 tx_cfg.txcq_cfg.size = max_desc_num; 2276 2277 for (i = 0; i < common->tx_ch_num; i++) { 2278 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 2279 2280 snprintf(tx_chn->tx_chn_name, 2281 sizeof(tx_chn->tx_chn_name), "tx%d", i); 2282 2283 spin_lock_init(&tx_chn->lock); 2284 tx_chn->common = common; 2285 tx_chn->id = i; 2286 tx_chn->descs_num = max_desc_num; 2287 2288 tx_chn->tx_chn = 2289 k3_udma_glue_request_tx_chn(dev, 2290 tx_chn->tx_chn_name, 2291 &tx_cfg); 2292 if (IS_ERR(tx_chn->tx_chn)) { 2293 ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn), 2294 "Failed to request tx dma channel\n"); 2295 goto err; 2296 } 2297 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn); 2298 2299 tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev, 2300 tx_chn->descs_num, 2301 hdesc_size, 2302 tx_chn->tx_chn_name); 2303 if (IS_ERR(tx_chn->desc_pool)) { 2304 ret = PTR_ERR(tx_chn->desc_pool); 2305 dev_err(dev, "Failed to create poll %d\n", ret); 2306 goto err; 2307 } 2308 2309 hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool); 2310 tx_chn->dsize_log2 = __fls(hdesc_size_out); 2311 WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2)); 2312 2313 tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); 2314 if (tx_chn->irq < 0) { 2315 dev_err(dev, "Failed to get tx dma irq %d\n", 2316 tx_chn->irq); 2317 ret = tx_chn->irq; 2318 goto err; 2319 } 2320 2321 snprintf(tx_chn->tx_chn_name, 2322 sizeof(tx_chn->tx_chn_name), "%s-tx%d", 2323 dev_name(dev), tx_chn->id); 2324 } 2325 2326 ret = am65_cpsw_nuss_ndev_add_tx_napi(common); 2327 if (ret) { 2328 dev_err(dev, "Failed to add tx NAPI %d\n", ret); 2329 goto err; 2330 } 2331 2332 return 0; 2333 2334err: 2335 am65_cpsw_nuss_free_tx_chns(common); 2336 2337 return ret; 2338} 2339 2340static void am65_cpsw_nuss_free_rx_chns(void *data) 2341{ 2342 struct am65_cpsw_common *common = data; 2343 struct am65_cpsw_rx_chn *rx_chn; 2344 2345 rx_chn = &common->rx_chns; 2346 2347 if (!IS_ERR_OR_NULL(rx_chn->desc_pool)) 2348 k3_cppi_desc_pool_destroy(rx_chn->desc_pool); 2349 2350 if (!IS_ERR_OR_NULL(rx_chn->rx_chn)) 2351 k3_udma_glue_release_rx_chn(rx_chn->rx_chn); 2352} 2353 2354static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common) 2355{ 2356 struct device *dev = common->dev; 2357 struct am65_cpsw_rx_chn *rx_chn; 2358 struct am65_cpsw_rx_flow *flows; 2359 int i; 2360 2361 rx_chn = &common->rx_chns; 2362 flows = rx_chn->flows; 2363 2364 for (i = 0; i < common->rx_ch_num_flows; i++) { 2365 if (!(flows[i].irq < 0)) 2366 devm_free_irq(dev, flows[i].irq, &flows[i]); 2367 netif_napi_del(&flows[i].napi_rx); 2368 } 2369 2370 am65_cpsw_nuss_free_rx_chns(common); 2371 2372 common->rx_flow_id_base = -1; 2373} 2374 2375static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) 2376{ 2377 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; 2378 struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 }; 2379 u32 max_desc_num = AM65_CPSW_MAX_RX_DESC; 2380 struct device *dev = common->dev; 2381 struct am65_cpsw_rx_flow *flow; 2382 u32 hdesc_size, hdesc_size_out; 2383 u32 fdqring_id; 2384 int i, ret = 0; 2385 2386 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, 2387 AM65_CPSW_NAV_SW_DATA_SIZE); 2388 2389 rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; 2390 rx_cfg.flow_id_num = common->rx_ch_num_flows; 2391 rx_cfg.flow_id_base = common->rx_flow_id_base; 2392 2393 /* init all flows */ 2394 rx_chn->dev = dev; 2395 rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num; 2396 2397 for (i = 0; i < common->rx_ch_num_flows; i++) { 2398 flow = &rx_chn->flows[i]; 2399 flow->page_pool = NULL; 2400 } 2401 2402 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); 2403 if (IS_ERR(rx_chn->rx_chn)) { 2404 ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn), 2405 "Failed to request rx dma channel\n"); 2406 goto err; 2407 } 2408 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn); 2409 2410 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev, 2411 rx_chn->descs_num, 2412 hdesc_size, "rx"); 2413 if (IS_ERR(rx_chn->desc_pool)) { 2414 ret = PTR_ERR(rx_chn->desc_pool); 2415 dev_err(dev, "Failed to create rx poll %d\n", ret); 2416 goto err; 2417 } 2418 2419 hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool); 2420 rx_chn->dsize_log2 = __fls(hdesc_size_out); 2421 WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2)); 2422 2423 common->rx_flow_id_base = 2424 k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); 2425 dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base); 2426 2427 fdqring_id = K3_RINGACC_RING_ID_ANY; 2428 for (i = 0; i < rx_cfg.flow_id_num; i++) { 2429 struct k3_ring_cfg rxring_cfg = { 2430 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2431 .mode = K3_RINGACC_RING_MODE_RING, 2432 .flags = 0, 2433 }; 2434 struct k3_ring_cfg fdqring_cfg = { 2435 .elm_size = K3_RINGACC_RING_ELSIZE_8, 2436 .flags = K3_RINGACC_RING_SHARED, 2437 }; 2438 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { 2439 .rx_cfg = rxring_cfg, 2440 .rxfdq_cfg = fdqring_cfg, 2441 .ring_rxq_id = K3_RINGACC_RING_ID_ANY, 2442 .src_tag_lo_sel = 2443 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, 2444 }; 2445 2446 flow = &rx_chn->flows[i]; 2447 flow->id = i; 2448 flow->common = common; 2449 flow->irq = -EINVAL; 2450 2451 rx_flow_cfg.ring_rxfdq0_id = fdqring_id; 2452 rx_flow_cfg.rx_cfg.size = max_desc_num; 2453 /* share same FDQ for all flows */ 2454 rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num; 2455 rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode; 2456 2457 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, 2458 i, &rx_flow_cfg); 2459 if (ret) { 2460 dev_err(dev, "Failed to init rx flow%d %d\n", i, ret); 2461 goto err_flow; 2462 } 2463 if (!i) 2464 fdqring_id = 2465 k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, 2466 i); 2467 2468 flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); 2469 if (flow->irq <= 0) { 2470 dev_err(dev, "Failed to get rx dma irq %d\n", 2471 flow->irq); 2472 ret = flow->irq; 2473 goto err_flow; 2474 } 2475 2476 snprintf(flow->name, 2477 sizeof(flow->name), "%s-rx%d", 2478 dev_name(dev), i); 2479 hrtimer_setup(&flow->rx_hrtimer, &am65_cpsw_nuss_rx_timer_callback, CLOCK_MONOTONIC, 2480 HRTIMER_MODE_REL_PINNED); 2481 2482 netif_napi_add(common->dma_ndev, &flow->napi_rx, 2483 am65_cpsw_nuss_rx_poll); 2484 2485 ret = devm_request_irq(dev, flow->irq, 2486 am65_cpsw_nuss_rx_irq, 2487 IRQF_TRIGGER_HIGH, 2488 flow->name, flow); 2489 if (ret) { 2490 dev_err(dev, "failure requesting rx %d irq %u, %d\n", 2491 i, flow->irq, ret); 2492 flow->irq = -EINVAL; 2493 goto err_request_irq; 2494 } 2495 } 2496 2497 /* setup classifier to route priorities to flows */ 2498 cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows); 2499 2500 return 0; 2501 2502err_request_irq: 2503 netif_napi_del(&flow->napi_rx); 2504 2505err_flow: 2506 for (--i; i >= 0; i--) { 2507 flow = &rx_chn->flows[i]; 2508 devm_free_irq(dev, flow->irq, flow); 2509 netif_napi_del(&flow->napi_rx); 2510 } 2511 2512err: 2513 am65_cpsw_nuss_free_rx_chns(common); 2514 2515 return ret; 2516} 2517 2518static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common) 2519{ 2520 struct am65_cpsw_host *host_p = am65_common_get_host(common); 2521 2522 host_p->common = common; 2523 host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE; 2524 host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE; 2525 2526 return 0; 2527} 2528 2529static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node, 2530 int slave, u8 *mac_addr) 2531{ 2532 u32 mac_lo, mac_hi, offset; 2533 struct regmap *syscon; 2534 2535 syscon = syscon_regmap_lookup_by_phandle_args(of_node, "ti,syscon-efuse", 2536 1, &offset); 2537 if (IS_ERR(syscon)) { 2538 if (PTR_ERR(syscon) == -ENODEV) 2539 return 0; 2540 return PTR_ERR(syscon); 2541 } 2542 2543 regmap_read(syscon, offset, &mac_lo); 2544 regmap_read(syscon, offset + 4, &mac_hi); 2545 2546 mac_addr[0] = (mac_hi >> 8) & 0xff; 2547 mac_addr[1] = mac_hi & 0xff; 2548 mac_addr[2] = (mac_lo >> 24) & 0xff; 2549 mac_addr[3] = (mac_lo >> 16) & 0xff; 2550 mac_addr[4] = (mac_lo >> 8) & 0xff; 2551 mac_addr[5] = mac_lo & 0xff; 2552 2553 return 0; 2554} 2555 2556static int am65_cpsw_init_cpts(struct am65_cpsw_common *common) 2557{ 2558 struct device *dev = common->dev; 2559 struct device_node *node; 2560 struct am65_cpts *cpts; 2561 void __iomem *reg_base; 2562 2563 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) 2564 return 0; 2565 2566 node = of_get_child_by_name(dev->of_node, "cpts"); 2567 if (!node) { 2568 dev_err(dev, "%s cpts not found\n", __func__); 2569 return -ENOENT; 2570 } 2571 2572 reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE; 2573 cpts = am65_cpts_create(dev, reg_base, node); 2574 if (IS_ERR(cpts)) { 2575 int ret = PTR_ERR(cpts); 2576 2577 of_node_put(node); 2578 dev_err(dev, "cpts create err %d\n", ret); 2579 return ret; 2580 } 2581 common->cpts = cpts; 2582 /* Forbid PM runtime if CPTS is running. 2583 * K3 CPSWxG modules may completely lose context during ON->OFF 2584 * transitions depending on integration. 2585 * AM65x/J721E MCU CPSW2G: false 2586 * J721E MAIN_CPSW9G: true 2587 */ 2588 pm_runtime_forbid(dev); 2589 2590 return 0; 2591} 2592 2593static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) 2594{ 2595 struct device_node *node, *port_np; 2596 struct device *dev = common->dev; 2597 int ret; 2598 2599 node = of_get_child_by_name(dev->of_node, "ethernet-ports"); 2600 if (!node) 2601 return -ENOENT; 2602 2603 for_each_child_of_node(node, port_np) { 2604 phy_interface_t phy_if; 2605 struct am65_cpsw_port *port; 2606 u32 port_id; 2607 2608 /* it is not a slave port node, continue */ 2609 if (strcmp(port_np->name, "port")) 2610 continue; 2611 2612 ret = of_property_read_u32(port_np, "reg", &port_id); 2613 if (ret < 0) { 2614 dev_err(dev, "%pOF error reading port_id %d\n", 2615 port_np, ret); 2616 goto of_node_put; 2617 } 2618 2619 if (!port_id || port_id > common->port_num) { 2620 dev_err(dev, "%pOF has invalid port_id %u %s\n", 2621 port_np, port_id, port_np->name); 2622 ret = -EINVAL; 2623 goto of_node_put; 2624 } 2625 2626 port = am65_common_get_port(common, port_id); 2627 port->port_id = port_id; 2628 port->common = common; 2629 port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE + 2630 AM65_CPSW_NU_PORTS_OFFSET * (port_id); 2631 if (common->pdata.extra_modes) 2632 port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id); 2633 port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE + 2634 (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id); 2635 port->name = of_get_property(port_np, "label", NULL); 2636 port->fetch_ram_base = 2637 common->cpsw_base + AM65_CPSW_NU_FRAM_BASE + 2638 (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1)); 2639 2640 port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base); 2641 if (IS_ERR(port->slave.mac_sl)) { 2642 ret = PTR_ERR(port->slave.mac_sl); 2643 goto of_node_put; 2644 } 2645 2646 port->disabled = !of_device_is_available(port_np); 2647 if (port->disabled) { 2648 common->disabled_ports_mask |= BIT(port->port_id); 2649 continue; 2650 } 2651 2652 port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL); 2653 if (IS_ERR(port->slave.ifphy)) { 2654 ret = PTR_ERR(port->slave.ifphy); 2655 dev_err(dev, "%pOF error retrieving port phy: %d\n", 2656 port_np, ret); 2657 goto of_node_put; 2658 } 2659 2660 /* Initialize the Serdes PHY for the port */ 2661 ret = am65_cpsw_init_serdes_phy(dev, port_np, port); 2662 if (ret) 2663 goto of_node_put; 2664 2665 port->slave.mac_only = 2666 of_property_read_bool(port_np, "ti,mac-only"); 2667 2668 /* get phy/link info */ 2669 port->slave.port_np = of_node_get(port_np); 2670 ret = of_get_phy_mode(port_np, &phy_if); 2671 if (ret) { 2672 dev_err(dev, "%pOF read phy-mode err %d\n", 2673 port_np, ret); 2674 goto of_node_put; 2675 } 2676 2677 /* CPSW controllers supported by this driver have a fixed 2678 * internal TX delay in RGMII mode. Fix up PHY mode to account 2679 * for this and warn about Device Trees that claim to have a TX 2680 * delay on the PCB. 2681 */ 2682 switch (phy_if) { 2683 case PHY_INTERFACE_MODE_RGMII_ID: 2684 phy_if = PHY_INTERFACE_MODE_RGMII_RXID; 2685 break; 2686 case PHY_INTERFACE_MODE_RGMII_TXID: 2687 phy_if = PHY_INTERFACE_MODE_RGMII; 2688 break; 2689 case PHY_INTERFACE_MODE_RGMII: 2690 case PHY_INTERFACE_MODE_RGMII_RXID: 2691 dev_warn(dev, 2692 "RGMII mode without internal TX delay unsupported; please fix your Device Tree\n"); 2693 break; 2694 default: 2695 break; 2696 } 2697 2698 port->slave.phy_if = phy_if; 2699 ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, phy_if); 2700 if (ret) 2701 goto of_node_put; 2702 2703 ret = of_get_mac_address(port_np, port->slave.mac_addr); 2704 if (ret == -EPROBE_DEFER) { 2705 goto of_node_put; 2706 } else if (ret) { 2707 am65_cpsw_am654_get_efuse_macid(port_np, 2708 port->port_id, 2709 port->slave.mac_addr); 2710 if (!is_valid_ether_addr(port->slave.mac_addr)) { 2711 eth_random_addr(port->slave.mac_addr); 2712 dev_info(dev, "Use random MAC address\n"); 2713 } 2714 } 2715 2716 /* Reset all Queue priorities to 0 */ 2717 writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP); 2718 } 2719 of_node_put(node); 2720 2721 /* is there at least one ext.port */ 2722 if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) { 2723 dev_err(dev, "No Ext. port are available\n"); 2724 return -ENODEV; 2725 } 2726 2727 return 0; 2728 2729of_node_put: 2730 of_node_put(port_np); 2731 of_node_put(node); 2732 return ret; 2733} 2734 2735static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common) 2736{ 2737 struct am65_cpsw_port *port; 2738 int i; 2739 2740 for (i = 0; i < common->port_num; i++) { 2741 port = &common->ports[i]; 2742 if (port->slave.phylink) 2743 phylink_destroy(port->slave.phylink); 2744 } 2745} 2746 2747static void am65_cpsw_remove_dt(struct am65_cpsw_common *common) 2748{ 2749 struct am65_cpsw_port *port; 2750 int i; 2751 2752 for (i = 0; i < common->port_num; i++) { 2753 port = &common->ports[i]; 2754 of_node_put(port->slave.port_np); 2755 } 2756} 2757 2758static int 2759am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) 2760{ 2761 struct am65_cpsw_ndev_priv *ndev_priv; 2762 struct device *dev = common->dev; 2763 struct am65_cpsw_port *port; 2764 struct phylink *phylink; 2765 2766 port = &common->ports[port_idx]; 2767 2768 if (port->disabled) 2769 return 0; 2770 2771 /* alloc netdev */ 2772 port->ndev = alloc_etherdev_mqs(sizeof(struct am65_cpsw_ndev_priv), 2773 AM65_CPSW_MAX_QUEUES, 2774 AM65_CPSW_MAX_QUEUES); 2775 if (!port->ndev) { 2776 dev_err(dev, "error allocating slave net_device %u\n", 2777 port->port_id); 2778 return -ENOMEM; 2779 } 2780 2781 ndev_priv = netdev_priv(port->ndev); 2782 ndev_priv->port = port; 2783 ndev_priv->msg_enable = AM65_CPSW_DEBUG; 2784 mutex_init(&ndev_priv->mm_lock); 2785 port->qos.link_speed = SPEED_UNKNOWN; 2786 SET_NETDEV_DEV(port->ndev, dev); 2787 device_set_node(&port->ndev->dev, of_fwnode_handle(port->slave.port_np)); 2788 2789 eth_hw_addr_set(port->ndev, port->slave.mac_addr); 2790 2791 port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE; 2792 port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE - 2793 (VLAN_ETH_HLEN + ETH_FCS_LEN); 2794 port->ndev->hw_features = NETIF_F_SG | 2795 NETIF_F_RXCSUM | 2796 NETIF_F_HW_CSUM | 2797 NETIF_F_HW_TC; 2798 port->ndev->features = port->ndev->hw_features | 2799 NETIF_F_HW_VLAN_CTAG_FILTER; 2800 port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC | 2801 NETDEV_XDP_ACT_REDIRECT | 2802 NETDEV_XDP_ACT_NDO_XMIT; 2803 port->ndev->vlan_features |= NETIF_F_SG; 2804 port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops; 2805 port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave; 2806 2807 /* Configuring Phylink */ 2808 port->slave.phylink_config.dev = &port->ndev->dev; 2809 port->slave.phylink_config.type = PHYLINK_NETDEV; 2810 port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | 2811 MAC_1000FD | MAC_5000FD; 2812 port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */ 2813 2814 switch (port->slave.phy_if) { 2815 case PHY_INTERFACE_MODE_RGMII: 2816 case PHY_INTERFACE_MODE_RGMII_ID: 2817 case PHY_INTERFACE_MODE_RGMII_RXID: 2818 case PHY_INTERFACE_MODE_RGMII_TXID: 2819 phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces); 2820 break; 2821 2822 case PHY_INTERFACE_MODE_RMII: 2823 __set_bit(PHY_INTERFACE_MODE_RMII, 2824 port->slave.phylink_config.supported_interfaces); 2825 break; 2826 2827 case PHY_INTERFACE_MODE_QSGMII: 2828 case PHY_INTERFACE_MODE_SGMII: 2829 case PHY_INTERFACE_MODE_USXGMII: 2830 if (common->pdata.extra_modes & BIT(port->slave.phy_if)) { 2831 __set_bit(port->slave.phy_if, 2832 port->slave.phylink_config.supported_interfaces); 2833 } else { 2834 dev_err(dev, "selected phy-mode is not supported\n"); 2835 return -EOPNOTSUPP; 2836 } 2837 break; 2838 2839 default: 2840 dev_err(dev, "selected phy-mode is not supported\n"); 2841 return -EOPNOTSUPP; 2842 } 2843 2844 phylink = phylink_create(&port->slave.phylink_config, 2845 of_fwnode_handle(port->slave.port_np), 2846 port->slave.phy_if, 2847 &am65_cpsw_phylink_mac_ops); 2848 if (IS_ERR(phylink)) 2849 return PTR_ERR(phylink); 2850 2851 port->slave.phylink = phylink; 2852 2853 /* Disable TX checksum offload by default due to HW bug */ 2854 if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM) 2855 port->ndev->features &= ~NETIF_F_HW_CSUM; 2856 2857 port->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 2858 port->xdp_prog = NULL; 2859 2860 if (!common->dma_ndev) 2861 common->dma_ndev = port->ndev; 2862 2863 return 0; 2864} 2865 2866static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common) 2867{ 2868 int ret; 2869 int i; 2870 2871 for (i = 0; i < common->port_num; i++) { 2872 ret = am65_cpsw_nuss_init_port_ndev(common, i); 2873 if (ret) 2874 return ret; 2875 } 2876 2877 return ret; 2878} 2879 2880static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) 2881{ 2882 struct am65_cpsw_port *port; 2883 int i; 2884 2885 for (i = 0; i < common->port_num; i++) { 2886 port = &common->ports[i]; 2887 if (!port->ndev) 2888 continue; 2889 if (port->ndev->reg_state == NETREG_REGISTERED) 2890 unregister_netdev(port->ndev); 2891 free_netdev(port->ndev); 2892 port->ndev = NULL; 2893 } 2894} 2895 2896static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common) 2897{ 2898 int set_val = 0; 2899 int i; 2900 2901 if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask)) 2902 set_val = 1; 2903 2904 dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val); 2905 2906 for (i = 1; i <= common->port_num; i++) { 2907 struct am65_cpsw_port *port = am65_common_get_port(common, i); 2908 struct am65_cpsw_ndev_priv *priv; 2909 2910 if (!port->ndev) 2911 continue; 2912 2913 priv = am65_ndev_to_priv(port->ndev); 2914 priv->offload_fwd_mark = set_val; 2915 } 2916} 2917 2918bool am65_cpsw_port_dev_check(const struct net_device *ndev) 2919{ 2920 if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) { 2921 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2922 2923 return !common->is_emac_mode; 2924 } 2925 2926 return false; 2927} 2928 2929static int am65_cpsw_netdevice_port_link(struct net_device *ndev, 2930 struct net_device *br_ndev, 2931 struct netlink_ext_ack *extack) 2932{ 2933 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2934 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); 2935 int err; 2936 2937 if (!common->br_members) { 2938 common->hw_bridge_dev = br_ndev; 2939 } else { 2940 /* This is adding the port to a second bridge, this is 2941 * unsupported 2942 */ 2943 if (common->hw_bridge_dev != br_ndev) 2944 return -EOPNOTSUPP; 2945 } 2946 2947 err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, 2948 false, extack); 2949 if (err) 2950 return err; 2951 2952 common->br_members |= BIT(priv->port->port_id); 2953 2954 am65_cpsw_port_offload_fwd_mark_update(common); 2955 2956 return NOTIFY_DONE; 2957} 2958 2959static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev) 2960{ 2961 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 2962 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); 2963 2964 switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL); 2965 2966 common->br_members &= ~BIT(priv->port->port_id); 2967 2968 am65_cpsw_port_offload_fwd_mark_update(common); 2969 2970 if (!common->br_members) 2971 common->hw_bridge_dev = NULL; 2972} 2973 2974/* netdev notifier */ 2975static int am65_cpsw_netdevice_event(struct notifier_block *unused, 2976 unsigned long event, void *ptr) 2977{ 2978 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 2979 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 2980 struct netdev_notifier_changeupper_info *info; 2981 int ret = NOTIFY_DONE; 2982 2983 if (!am65_cpsw_port_dev_check(ndev)) 2984 return NOTIFY_DONE; 2985 2986 switch (event) { 2987 case NETDEV_CHANGEUPPER: 2988 info = ptr; 2989 2990 if (netif_is_bridge_master(info->upper_dev)) { 2991 if (info->linking) 2992 ret = am65_cpsw_netdevice_port_link(ndev, 2993 info->upper_dev, 2994 extack); 2995 else 2996 am65_cpsw_netdevice_port_unlink(ndev); 2997 } 2998 break; 2999 default: 3000 return NOTIFY_DONE; 3001 } 3002 3003 return notifier_from_errno(ret); 3004} 3005 3006static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw) 3007{ 3008 int ret = 0; 3009 3010 if (AM65_CPSW_IS_CPSW2G(cpsw) || 3011 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 3012 return 0; 3013 3014 cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event; 3015 ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 3016 if (ret) { 3017 dev_err(cpsw->dev, "can't register netdevice notifier\n"); 3018 return ret; 3019 } 3020 3021 ret = am65_cpsw_switchdev_register_notifiers(cpsw); 3022 if (ret) 3023 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 3024 3025 return ret; 3026} 3027 3028static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw) 3029{ 3030 if (AM65_CPSW_IS_CPSW2G(cpsw) || 3031 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 3032 return; 3033 3034 am65_cpsw_switchdev_unregister_notifiers(cpsw); 3035 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb); 3036} 3037 3038static const struct devlink_ops am65_cpsw_devlink_ops = {}; 3039 3040static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw) 3041{ 3042 cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0, 3043 ALE_MCAST_BLOCK_LEARN_FWD); 3044} 3045 3046static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common) 3047{ 3048 struct am65_cpsw_host *host = am65_common_get_host(common); 3049 3050 writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3051 3052 am65_cpsw_init_stp_ale_entry(common); 3053 3054 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1); 3055 dev_dbg(common->dev, "Set P0_UNI_FLOOD\n"); 3056 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0); 3057} 3058 3059static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common) 3060{ 3061 struct am65_cpsw_host *host = am65_common_get_host(common); 3062 3063 writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3064 3065 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0); 3066 dev_dbg(common->dev, "unset P0_UNI_FLOOD\n"); 3067 3068 /* learning make no sense in multi-mac mode */ 3069 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1); 3070} 3071 3072static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, 3073 struct devlink_param_gset_ctx *ctx, 3074 struct netlink_ext_ack *extack) 3075{ 3076 struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); 3077 struct am65_cpsw_common *common = dl_priv->common; 3078 3079 dev_dbg(common->dev, "%s id:%u\n", __func__, id); 3080 3081 if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE) 3082 return -EOPNOTSUPP; 3083 3084 ctx->val.vbool = !common->is_emac_mode; 3085 3086 return 0; 3087} 3088 3089static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port) 3090{ 3091 struct am65_cpsw_slave_data *slave = &port->slave; 3092 struct am65_cpsw_common *common = port->common; 3093 u32 port_mask; 3094 3095 writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3096 3097 if (slave->mac_only) 3098 /* enable mac-only mode on port */ 3099 cpsw_ale_control_set(common->ale, port->port_id, 3100 ALE_PORT_MACONLY, 1); 3101 3102 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1); 3103 3104 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 3105 3106 cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr, 3107 HOST_PORT_NUM, ALE_SECURE, slave->port_vlan); 3108 cpsw_ale_add_mcast(common->ale, port->ndev->broadcast, 3109 port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2); 3110} 3111 3112static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port) 3113{ 3114 struct am65_cpsw_slave_data *slave = &port->slave; 3115 struct am65_cpsw_common *cpsw = port->common; 3116 u32 port_mask; 3117 3118 cpsw_ale_control_set(cpsw->ale, port->port_id, 3119 ALE_PORT_NOLEARN, 0); 3120 3121 cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr, 3122 HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN, 3123 slave->port_vlan); 3124 3125 port_mask = BIT(port->port_id) | ALE_PORT_HOST; 3126 3127 cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast, 3128 port_mask, ALE_VLAN, slave->port_vlan, 3129 ALE_MCAST_FWD_2); 3130 3131 writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3132 3133 cpsw_ale_control_set(cpsw->ale, port->port_id, 3134 ALE_PORT_MACONLY, 0); 3135} 3136 3137static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, 3138 struct devlink_param_gset_ctx *ctx, 3139 struct netlink_ext_ack *extack) 3140{ 3141 struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); 3142 struct am65_cpsw_common *cpsw = dl_priv->common; 3143 bool switch_en = ctx->val.vbool; 3144 bool if_running = false; 3145 int i; 3146 3147 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); 3148 3149 if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE) 3150 return -EOPNOTSUPP; 3151 3152 if (switch_en == !cpsw->is_emac_mode) 3153 return 0; 3154 3155 if (!switch_en && cpsw->br_members) { 3156 dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n"); 3157 return -EINVAL; 3158 } 3159 3160 rtnl_lock(); 3161 3162 cpsw->is_emac_mode = !switch_en; 3163 3164 for (i = 0; i < cpsw->port_num; i++) { 3165 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3166 3167 if (!sl_ndev || !netif_running(sl_ndev)) 3168 continue; 3169 3170 if_running = true; 3171 } 3172 3173 if (!if_running) { 3174 /* all ndevs are down */ 3175 for (i = 0; i < cpsw->port_num; i++) { 3176 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3177 struct am65_cpsw_slave_data *slave; 3178 3179 if (!sl_ndev) 3180 continue; 3181 3182 slave = am65_ndev_to_slave(sl_ndev); 3183 if (switch_en) 3184 slave->port_vlan = cpsw->default_vlan; 3185 else 3186 slave->port_vlan = 0; 3187 } 3188 3189 goto exit; 3190 } 3191 3192 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1); 3193 /* clean up ALE table */ 3194 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1); 3195 cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT); 3196 3197 if (switch_en) { 3198 dev_info(cpsw->dev, "Enable switch mode\n"); 3199 3200 am65_cpsw_init_host_port_switch(cpsw); 3201 3202 for (i = 0; i < cpsw->port_num; i++) { 3203 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3204 struct am65_cpsw_slave_data *slave; 3205 struct am65_cpsw_port *port; 3206 3207 if (!sl_ndev) 3208 continue; 3209 3210 port = am65_ndev_to_port(sl_ndev); 3211 slave = am65_ndev_to_slave(sl_ndev); 3212 slave->port_vlan = cpsw->default_vlan; 3213 3214 if (netif_running(sl_ndev)) 3215 am65_cpsw_init_port_switch_ale(port); 3216 } 3217 3218 } else { 3219 dev_info(cpsw->dev, "Disable switch mode\n"); 3220 3221 am65_cpsw_init_host_port_emac(cpsw); 3222 3223 for (i = 0; i < cpsw->port_num; i++) { 3224 struct net_device *sl_ndev = cpsw->ports[i].ndev; 3225 struct am65_cpsw_port *port; 3226 3227 if (!sl_ndev) 3228 continue; 3229 3230 port = am65_ndev_to_port(sl_ndev); 3231 port->slave.port_vlan = 0; 3232 if (netif_running(sl_ndev)) 3233 am65_cpsw_init_port_emac_ale(port); 3234 } 3235 } 3236 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0); 3237exit: 3238 rtnl_unlock(); 3239 3240 return 0; 3241} 3242 3243static const struct devlink_param am65_cpsw_devlink_params[] = { 3244 DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode", 3245 DEVLINK_PARAM_TYPE_BOOL, 3246 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 3247 am65_cpsw_dl_switch_mode_get, 3248 am65_cpsw_dl_switch_mode_set, NULL), 3249}; 3250 3251static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) 3252{ 3253 struct devlink_port_attrs attrs = {}; 3254 struct am65_cpsw_devlink *dl_priv; 3255 struct device *dev = common->dev; 3256 struct devlink_port *dl_port; 3257 struct am65_cpsw_port *port; 3258 int ret = 0; 3259 int i; 3260 3261 common->devlink = 3262 devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev); 3263 if (!common->devlink) 3264 return -ENOMEM; 3265 3266 dl_priv = devlink_priv(common->devlink); 3267 dl_priv->common = common; 3268 3269 /* Provide devlink hook to switch mode when multiple external ports 3270 * are present NUSS switchdev driver is enabled. 3271 */ 3272 if (!AM65_CPSW_IS_CPSW2G(common) && 3273 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) { 3274 ret = devlink_params_register(common->devlink, 3275 am65_cpsw_devlink_params, 3276 ARRAY_SIZE(am65_cpsw_devlink_params)); 3277 if (ret) { 3278 dev_err(dev, "devlink params reg fail ret:%d\n", ret); 3279 goto dl_unreg; 3280 } 3281 } 3282 3283 for (i = 1; i <= common->port_num; i++) { 3284 port = am65_common_get_port(common, i); 3285 dl_port = &port->devlink_port; 3286 3287 if (port->ndev) 3288 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 3289 else 3290 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED; 3291 attrs.phys.port_number = port->port_id; 3292 attrs.switch_id.id_len = sizeof(resource_size_t); 3293 memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len); 3294 devlink_port_attrs_set(dl_port, &attrs); 3295 3296 ret = devlink_port_register(common->devlink, dl_port, port->port_id); 3297 if (ret) { 3298 dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n", 3299 port->port_id, ret); 3300 goto dl_port_unreg; 3301 } 3302 } 3303 devlink_register(common->devlink); 3304 return ret; 3305 3306dl_port_unreg: 3307 for (i = i - 1; i >= 1; i--) { 3308 port = am65_common_get_port(common, i); 3309 dl_port = &port->devlink_port; 3310 3311 devlink_port_unregister(dl_port); 3312 } 3313dl_unreg: 3314 devlink_free(common->devlink); 3315 return ret; 3316} 3317 3318static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) 3319{ 3320 struct devlink_port *dl_port; 3321 struct am65_cpsw_port *port; 3322 int i; 3323 3324 devlink_unregister(common->devlink); 3325 3326 for (i = 1; i <= common->port_num; i++) { 3327 port = am65_common_get_port(common, i); 3328 dl_port = &port->devlink_port; 3329 3330 devlink_port_unregister(dl_port); 3331 } 3332 3333 if (!AM65_CPSW_IS_CPSW2G(common) && 3334 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) 3335 devlink_params_unregister(common->devlink, 3336 am65_cpsw_devlink_params, 3337 ARRAY_SIZE(am65_cpsw_devlink_params)); 3338 3339 devlink_free(common->devlink); 3340} 3341 3342static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) 3343{ 3344 struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns; 3345 struct am65_cpsw_tx_chn *tx_chan = common->tx_chns; 3346 struct device *dev = common->dev; 3347 struct am65_cpsw_port *port; 3348 int ret = 0, i; 3349 3350 /* init tx channels */ 3351 ret = am65_cpsw_nuss_init_tx_chns(common); 3352 if (ret) 3353 return ret; 3354 ret = am65_cpsw_nuss_init_rx_chns(common); 3355 if (ret) 3356 goto err_remove_tx; 3357 3358 /* The DMA Channels are not guaranteed to be in a clean state. 3359 * Reset and disable them to ensure that they are back to the 3360 * clean state and ready to be used. 3361 */ 3362 for (i = 0; i < common->tx_ch_num; i++) { 3363 k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i], 3364 am65_cpsw_nuss_tx_cleanup); 3365 k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn); 3366 } 3367 3368 for (i = 0; i < common->rx_ch_num_flows; i++) 3369 k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, 3370 rx_chan, 3371 am65_cpsw_nuss_rx_cleanup); 3372 3373 k3_udma_glue_disable_rx_chn(rx_chan->rx_chn); 3374 3375 ret = am65_cpsw_nuss_register_devlink(common); 3376 if (ret) 3377 goto err_remove_rx; 3378 3379 for (i = 0; i < common->port_num; i++) { 3380 port = &common->ports[i]; 3381 3382 if (!port->ndev) 3383 continue; 3384 3385 SET_NETDEV_DEVLINK_PORT(port->ndev, &port->devlink_port); 3386 3387 ret = register_netdev(port->ndev); 3388 if (ret) { 3389 dev_err(dev, "error registering slave net device%i %d\n", 3390 i, ret); 3391 goto err_cleanup_ndev; 3392 } 3393 } 3394 3395 ret = am65_cpsw_register_notifiers(common); 3396 if (ret) 3397 goto err_cleanup_ndev; 3398 3399 /* can't auto unregister ndev using devm_add_action() due to 3400 * devres release sequence in DD core for DMA 3401 */ 3402 3403 return 0; 3404 3405err_cleanup_ndev: 3406 am65_cpsw_nuss_cleanup_ndev(common); 3407 am65_cpsw_unregister_devlink(common); 3408err_remove_rx: 3409 am65_cpsw_nuss_remove_rx_chns(common); 3410err_remove_tx: 3411 am65_cpsw_nuss_remove_tx_chns(common); 3412 3413 return ret; 3414} 3415 3416int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common, 3417 int num_tx, int num_rx) 3418{ 3419 int ret; 3420 3421 am65_cpsw_nuss_remove_tx_chns(common); 3422 am65_cpsw_nuss_remove_rx_chns(common); 3423 3424 common->tx_ch_num = num_tx; 3425 common->rx_ch_num_flows = num_rx; 3426 ret = am65_cpsw_nuss_init_tx_chns(common); 3427 if (ret) 3428 return ret; 3429 3430 ret = am65_cpsw_nuss_init_rx_chns(common); 3431 if (ret) 3432 am65_cpsw_nuss_remove_tx_chns(common); 3433 3434 return ret; 3435} 3436 3437struct am65_cpsw_soc_pdata { 3438 u32 quirks_dis; 3439}; 3440 3441static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = { 3442 .quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, 3443}; 3444 3445static const struct soc_device_attribute am65_cpsw_socinfo[] = { 3446 { .family = "AM65X", 3447 .revision = "SR2.0", 3448 .data = &am65x_soc_sr2_0 3449 }, 3450 {/* sentinel */} 3451}; 3452 3453static const struct am65_cpsw_pdata am65x_sr1_0 = { 3454 .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, 3455 .ale_dev_id = "am65x-cpsw2g", 3456 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3457}; 3458 3459static const struct am65_cpsw_pdata j721e_pdata = { 3460 .quirks = 0, 3461 .ale_dev_id = "am65x-cpsw2g", 3462 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3463}; 3464 3465static const struct am65_cpsw_pdata am64x_cpswxg_pdata = { 3466 .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ, 3467 .ale_dev_id = "am64-cpswxg", 3468 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 3469}; 3470 3471static const struct am65_cpsw_pdata j722s_cpswxg_pdata = { 3472 .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ, 3473 .ale_dev_id = "am64-cpswxg", 3474 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 3475 .extra_modes = BIT(PHY_INTERFACE_MODE_SGMII), 3476}; 3477 3478static const struct am65_cpsw_pdata j7200_cpswxg_pdata = { 3479 .quirks = 0, 3480 .ale_dev_id = "am64-cpswxg", 3481 .fdqring_mode = K3_RINGACC_RING_MODE_RING, 3482 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | 3483 BIT(PHY_INTERFACE_MODE_USXGMII), 3484}; 3485 3486static const struct am65_cpsw_pdata j721e_cpswxg_pdata = { 3487 .quirks = 0, 3488 .ale_dev_id = "am64-cpswxg", 3489 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3490 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII), 3491}; 3492 3493static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = { 3494 .quirks = 0, 3495 .ale_dev_id = "am64-cpswxg", 3496 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, 3497 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | 3498 BIT(PHY_INTERFACE_MODE_USXGMII), 3499}; 3500 3501static const struct of_device_id am65_cpsw_nuss_of_mtable[] = { 3502 { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0}, 3503 { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata}, 3504 { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata}, 3505 { .compatible = "ti,j722s-cpsw-nuss", .data = &j722s_cpswxg_pdata}, 3506 { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata}, 3507 { .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata}, 3508 { .compatible = "ti,j784s4-cpswxg-nuss", .data = &j784s4_cpswxg_pdata}, 3509 { /* sentinel */ }, 3510}; 3511MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable); 3512 3513static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common) 3514{ 3515 const struct soc_device_attribute *soc; 3516 3517 soc = soc_device_match(am65_cpsw_socinfo); 3518 if (soc && soc->data) { 3519 const struct am65_cpsw_soc_pdata *socdata = soc->data; 3520 3521 /* disable quirks */ 3522 common->pdata.quirks &= ~socdata->quirks_dis; 3523 } 3524} 3525 3526static int am65_cpsw_nuss_probe(struct platform_device *pdev) 3527{ 3528 struct cpsw_ale_params ale_params = { 0 }; 3529 const struct of_device_id *of_id; 3530 struct device *dev = &pdev->dev; 3531 struct am65_cpsw_common *common; 3532 struct device_node *node; 3533 struct resource *res; 3534 struct clk *clk; 3535 int ale_entries; 3536 __be64 id_temp; 3537 int ret, i; 3538 3539 BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_tx_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE, 3540 "TX SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE"); 3541 BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE, 3542 "SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE"); 3543 common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL); 3544 if (!common) 3545 return -ENOMEM; 3546 common->dev = dev; 3547 3548 of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev); 3549 if (!of_id) 3550 return -EINVAL; 3551 common->pdata = *(const struct am65_cpsw_pdata *)of_id->data; 3552 3553 am65_cpsw_nuss_apply_socinfo(common); 3554 3555 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss"); 3556 common->ss_base = devm_ioremap_resource(&pdev->dev, res); 3557 if (IS_ERR(common->ss_base)) 3558 return PTR_ERR(common->ss_base); 3559 common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE; 3560 /* Use device's physical base address as switch id */ 3561 id_temp = cpu_to_be64(res->start); 3562 memcpy(common->switch_id, &id_temp, sizeof(res->start)); 3563 3564 node = of_get_child_by_name(dev->of_node, "ethernet-ports"); 3565 if (!node) 3566 return -ENOENT; 3567 common->port_num = of_get_child_count(node); 3568 of_node_put(node); 3569 if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS) 3570 return -ENOENT; 3571 3572 common->rx_flow_id_base = -1; 3573 init_completion(&common->tdown_complete); 3574 common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS; 3575 common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS; 3576 common->pf_p0_rx_ptype_rrobin = true; 3577 common->default_vlan = 1; 3578 3579 common->ports = devm_kcalloc(dev, common->port_num, 3580 sizeof(*common->ports), 3581 GFP_KERNEL); 3582 if (!common->ports) 3583 return -ENOMEM; 3584 3585 clk = devm_clk_get(dev, "fck"); 3586 if (IS_ERR(clk)) 3587 return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n"); 3588 common->bus_freq = clk_get_rate(clk); 3589 3590 pm_runtime_enable(dev); 3591 ret = pm_runtime_resume_and_get(dev); 3592 if (ret < 0) { 3593 pm_runtime_disable(dev); 3594 return ret; 3595 } 3596 3597 am65_cpsw_nuss_get_ver(common); 3598 3599 ret = am65_cpsw_nuss_init_host_p(common); 3600 if (ret) 3601 goto err_pm_clear; 3602 3603 ret = am65_cpsw_nuss_init_slave_ports(common); 3604 if (ret) 3605 goto err_pm_clear; 3606 3607 node = of_get_child_by_name(dev->of_node, "mdio"); 3608 if (!node) { 3609 dev_warn(dev, "MDIO node not found\n"); 3610 } else if (of_device_is_available(node)) { 3611 struct platform_device *mdio_pdev; 3612 3613 mdio_pdev = of_platform_device_create(node, NULL, dev); 3614 if (!mdio_pdev) { 3615 ret = -ENODEV; 3616 goto err_pm_clear; 3617 } 3618 3619 common->mdio_dev = &mdio_pdev->dev; 3620 } 3621 of_node_put(node); 3622 3623 /* init common data */ 3624 ale_params.dev = dev; 3625 ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT; 3626 ale_params.ale_ports = common->port_num + 1; 3627 ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE; 3628 ale_params.dev_id = common->pdata.ale_dev_id; 3629 ale_params.bus_freq = common->bus_freq; 3630 3631 common->ale = cpsw_ale_create(&ale_params); 3632 if (IS_ERR(common->ale)) { 3633 dev_err(dev, "error initializing ale engine\n"); 3634 ret = PTR_ERR(common->ale); 3635 goto err_of_clear; 3636 } 3637 3638 ale_entries = common->ale->params.ale_entries; 3639 common->ale_context = devm_kzalloc(dev, 3640 ale_entries * ALE_ENTRY_WORDS * sizeof(u32), 3641 GFP_KERNEL); 3642 ret = am65_cpsw_init_cpts(common); 3643 if (ret) 3644 goto err_of_clear; 3645 3646 /* init ports */ 3647 for (i = 0; i < common->port_num; i++) 3648 am65_cpsw_nuss_slave_disable_unused(&common->ports[i]); 3649 3650 dev_set_drvdata(dev, common); 3651 3652 common->is_emac_mode = true; 3653 3654 ret = am65_cpsw_nuss_init_ndevs(common); 3655 if (ret) 3656 goto err_ndevs_clear; 3657 3658 ret = am65_cpsw_nuss_register_ndevs(common); 3659 if (ret) 3660 goto err_ndevs_clear; 3661 3662 pm_runtime_put(dev); 3663 return 0; 3664 3665err_ndevs_clear: 3666 am65_cpsw_nuss_cleanup_ndev(common); 3667 am65_cpsw_nuss_phylink_cleanup(common); 3668 am65_cpts_release(common->cpts); 3669 am65_cpsw_remove_dt(common); 3670err_of_clear: 3671 if (common->mdio_dev) 3672 of_platform_device_destroy(common->mdio_dev, NULL); 3673err_pm_clear: 3674 pm_runtime_put_sync(dev); 3675 pm_runtime_disable(dev); 3676 return ret; 3677} 3678 3679static void am65_cpsw_nuss_remove(struct platform_device *pdev) 3680{ 3681 struct device *dev = &pdev->dev; 3682 struct am65_cpsw_common *common; 3683 int ret; 3684 3685 common = dev_get_drvdata(dev); 3686 3687 ret = pm_runtime_resume_and_get(&pdev->dev); 3688 if (ret < 0) { 3689 /* Note, if this error path is taken, we're leaking some 3690 * resources. 3691 */ 3692 dev_err(&pdev->dev, "Failed to resume device (%pe)\n", 3693 ERR_PTR(ret)); 3694 return; 3695 } 3696 3697 am65_cpsw_unregister_notifiers(common); 3698 3699 /* must unregister ndevs here because DD release_driver routine calls 3700 * dma_deconfigure(dev) before devres_release_all(dev) 3701 */ 3702 am65_cpsw_nuss_cleanup_ndev(common); 3703 am65_cpsw_unregister_devlink(common); 3704 am65_cpsw_nuss_remove_rx_chns(common); 3705 am65_cpsw_nuss_remove_tx_chns(common); 3706 am65_cpsw_nuss_phylink_cleanup(common); 3707 am65_cpts_release(common->cpts); 3708 am65_cpsw_disable_serdes_phy(common); 3709 am65_cpsw_remove_dt(common); 3710 3711 if (common->mdio_dev) 3712 of_platform_device_destroy(common->mdio_dev, NULL); 3713 3714 pm_runtime_put_sync(&pdev->dev); 3715 pm_runtime_disable(&pdev->dev); 3716} 3717 3718static int am65_cpsw_nuss_suspend(struct device *dev) 3719{ 3720 struct am65_cpsw_common *common = dev_get_drvdata(dev); 3721 struct am65_cpsw_host *host_p = am65_common_get_host(common); 3722 struct am65_cpsw_port *port; 3723 struct net_device *ndev; 3724 int i, ret; 3725 3726 cpsw_ale_dump(common->ale, common->ale_context); 3727 host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3728 for (i = 0; i < common->port_num; i++) { 3729 port = &common->ports[i]; 3730 ndev = port->ndev; 3731 3732 if (!ndev) 3733 continue; 3734 3735 port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3736 netif_device_detach(ndev); 3737 if (netif_running(ndev)) { 3738 rtnl_lock(); 3739 ret = am65_cpsw_nuss_ndo_slave_stop(ndev); 3740 rtnl_unlock(); 3741 if (ret < 0) { 3742 netdev_err(ndev, "failed to stop: %d", ret); 3743 return ret; 3744 } 3745 } 3746 } 3747 3748 am65_cpts_suspend(common->cpts); 3749 3750 am65_cpsw_nuss_remove_rx_chns(common); 3751 am65_cpsw_nuss_remove_tx_chns(common); 3752 3753 return 0; 3754} 3755 3756static int am65_cpsw_nuss_resume(struct device *dev) 3757{ 3758 struct am65_cpsw_common *common = dev_get_drvdata(dev); 3759 struct am65_cpsw_host *host_p = am65_common_get_host(common); 3760 struct am65_cpsw_port *port; 3761 struct net_device *ndev; 3762 int i, ret; 3763 3764 ret = am65_cpsw_nuss_init_tx_chns(common); 3765 if (ret) 3766 return ret; 3767 ret = am65_cpsw_nuss_init_rx_chns(common); 3768 if (ret) { 3769 am65_cpsw_nuss_remove_tx_chns(common); 3770 return ret; 3771 } 3772 3773 /* If RX IRQ was disabled before suspend, keep it disabled */ 3774 for (i = 0; i < common->rx_ch_num_flows; i++) { 3775 if (common->rx_chns.flows[i].irq_disabled) 3776 disable_irq(common->rx_chns.flows[i].irq); 3777 } 3778 3779 am65_cpts_resume(common->cpts); 3780 3781 for (i = 0; i < common->port_num; i++) { 3782 port = &common->ports[i]; 3783 ndev = port->ndev; 3784 3785 if (!ndev) 3786 continue; 3787 3788 if (netif_running(ndev)) { 3789 rtnl_lock(); 3790 ret = am65_cpsw_nuss_ndo_slave_open(ndev); 3791 rtnl_unlock(); 3792 if (ret < 0) { 3793 netdev_err(ndev, "failed to start: %d", ret); 3794 return ret; 3795 } 3796 } 3797 3798 netif_device_attach(ndev); 3799 writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3800 } 3801 3802 writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET); 3803 cpsw_ale_restore(common->ale, common->ale_context); 3804 3805 return 0; 3806} 3807 3808static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = { 3809 SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume) 3810}; 3811 3812static struct platform_driver am65_cpsw_nuss_driver = { 3813 .driver = { 3814 .name = AM65_CPSW_DRV_NAME, 3815 .of_match_table = am65_cpsw_nuss_of_mtable, 3816 .pm = &am65_cpsw_nuss_dev_pm_ops, 3817 }, 3818 .probe = am65_cpsw_nuss_probe, 3819 .remove = am65_cpsw_nuss_remove, 3820}; 3821 3822module_platform_driver(am65_cpsw_nuss_driver); 3823 3824MODULE_LICENSE("GPL v2"); 3825MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>"); 3826MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");