Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ethernet: ti: introduce cpsw switchdev based driver part 1 - dual-emac

Part 1:
Introduce basic CPSW dual_mac driver (cpsw_new.c) which is operating in
dual-emac mode by default, thus working as 2 individual network interfaces.
Main differences from legacy CPSW driver are:

- optimized promiscuous mode: The P0_UNI_FLOOD (both ports) is enabled in
addition to ALLMULTI (current port) instead of ALE_BYPASS. So, Ports in
promiscuous mode will keep possibility of mcast and vlan filtering, which
is provides significant benefits when ports are joined to the same bridge,
but without enabling "switch" mode, or to different bridges.
- learning disabled on ports as it make not too much sense for
segregated ports - no forwarding in HW.
- enabled basic support for devlink.

devlink dev show
platform/48484000.switch

devlink dev param show
platform/48484000.switch:
name ale_bypass type driver-specific
values:
cmode runtime value false

- "ale_bypass" devlink driver parameter allows to enable
ALE_CONTROL(4).BYPASS mode for debug purposes.
- updated DT bindings.

Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Ilias Apalodimas and committed by
David S. Miller
ed3525ed ef63fe72

+1710 -5
+17 -2
drivers/net/ethernet/ti/Kconfig
··· 59 59 To compile this driver as a module, choose M here: the module 60 60 will be called cpsw. 61 61 62 + config TI_CPSW_SWITCHDEV 63 + tristate "TI CPSW Switch Support with switchdev" 64 + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST 65 + select NET_SWITCHDEV 66 + select TI_DAVINCI_MDIO 67 + select MFD_SYSCON 68 + select REGMAP 69 + select NET_DEVLINK 70 + imply PHY_TI_GMII_SEL 71 + help 72 + This driver supports TI's CPSW Ethernet Switch. 73 + 74 + To compile this driver as a module, choose M here: the module 75 + will be called cpsw_new. 76 + 62 77 config TI_CPTS 63 78 bool "TI Common Platform Time Sync (CPTS) Support" 64 - depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST 79 + depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST 65 80 depends on COMMON_CLK 66 81 depends on POSIX_TIMERS 67 82 ---help--- ··· 88 73 config TI_CPTS_MOD 89 74 tristate 90 75 depends on TI_CPTS 91 - default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y 76 + default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y 92 77 select NET_PTP_CLASSIFY 93 78 imply PTP_1588_CLOCK 94 79 default m
+2
drivers/net/ethernet/ti/Makefile
··· 15 15 obj-$(CONFIG_TI_CPTS_MOD) += cpts.o 16 16 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o 17 17 ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o 18 + obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o 19 + ti_cpsw_new-y := cpsw_new.o davinci_cpdma.o cpsw_ale.o cpsw_sl.o cpsw_priv.o cpsw_ethtool.o 18 20 19 21 obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o 20 22 keystone_netcp-y := netcp_core.o cpsw_ale.o
+1673
drivers/net/ethernet/ti/cpsw_new.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Texas Instruments Ethernet Switch Driver 4 + * 5 + * Copyright (C) 2019 Texas Instruments 6 + */ 7 + 8 + #include <linux/io.h> 9 + #include <linux/clk.h> 10 + #include <linux/timer.h> 11 + #include <linux/module.h> 12 + #include <linux/irqreturn.h> 13 + #include <linux/interrupt.h> 14 + #include <linux/if_ether.h> 15 + #include <linux/etherdevice.h> 16 + #include <linux/net_tstamp.h> 17 + #include <linux/phy.h> 18 + #include <linux/phy/phy.h> 19 + #include <linux/delay.h> 20 + #include <linux/pm_runtime.h> 21 + #include <linux/gpio/consumer.h> 22 + #include <linux/of.h> 23 + #include <linux/of_mdio.h> 24 + #include <linux/of_net.h> 25 + #include <linux/of_device.h> 26 + #include <linux/if_vlan.h> 27 + #include <linux/kmemleak.h> 28 + #include <linux/sys_soc.h> 29 + 30 + #include <net/page_pool.h> 31 + #include <net/pkt_cls.h> 32 + #include <net/devlink.h> 33 + 34 + #include "cpsw.h" 35 + #include "cpsw_ale.h" 36 + #include "cpsw_priv.h" 37 + #include "cpsw_sl.h" 38 + #include "cpts.h" 39 + #include "davinci_cpdma.h" 40 + 41 + #include <net/pkt_sched.h> 42 + 43 + static int debug_level; 44 + static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT; 45 + static int rx_packet_max = CPSW_MAX_PACKET_SIZE; 46 + static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; 47 + 48 + struct cpsw_devlink { 49 + struct cpsw_common *cpsw; 50 + }; 51 + 52 + enum cpsw_devlink_param_id { 53 + CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 54 + CPSW_DL_PARAM_ALE_BYPASS, 55 + }; 56 + 57 + /* struct cpsw_common is not needed, kept here for compatibility 58 + * reasons witrh the old driver 59 + */ 60 + static int cpsw_slave_index_priv(struct cpsw_common *cpsw, 61 + struct cpsw_priv *priv) 62 + { 63 + if (priv->emac_port == HOST_PORT_NUM) 64 + return -1; 65 + 66 + return priv->emac_port - 1; 67 + } 68 + 69 + static void cpsw_set_promiscious(struct net_device *ndev, bool enable) 70 + { 71 + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 72 + bool enable_uni = false; 73 + int i; 74 + 75 + /* Enabling promiscuous mode for one interface will be 76 + * common for both the interface as the interface shares 77 + * the same hardware resource. 78 + */ 79 + for (i = 0; i < cpsw->data.slaves; i++) 80 + if (cpsw->slaves[i].ndev && 81 + (cpsw->slaves[i].ndev->flags & IFF_PROMISC)) 82 + enable_uni = true; 83 + 84 + if (!enable && enable_uni) { 85 + enable = enable_uni; 86 + dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n"); 87 + } 88 + 89 + if (enable) { 90 + /* Enable unknown unicast, reg/unreg mcast */ 91 + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, 92 + ALE_P0_UNI_FLOOD, 1); 93 + 94 + dev_dbg(cpsw->dev, "promiscuity enabled\n"); 95 + } else { 96 + /* Disable unknown unicast */ 97 + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, 98 + ALE_P0_UNI_FLOOD, 0); 99 + dev_dbg(cpsw->dev, "promiscuity disabled\n"); 100 + } 101 + } 102 + 103 + /** 104 + * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes 105 + * if it's not deleted 106 + * @ndev: device to sync 107 + * @addr: address to be added or deleted 108 + * @vid: vlan id, if vid < 0 set/unset address for real device 109 + * @add: add address if the flag is set or remove otherwise 110 + */ 111 + static int cpsw_set_mc(struct net_device *ndev, const u8 *addr, 112 + int vid, int add) 113 + { 114 + struct cpsw_priv *priv = netdev_priv(ndev); 115 + struct cpsw_common *cpsw = priv->cpsw; 116 + int mask, flags, ret, slave_no; 117 + 118 + slave_no = cpsw_slave_index(cpsw, priv); 119 + if (vid < 0) 120 + vid = cpsw->slaves[slave_no].port_vlan; 121 + 122 + mask = ALE_PORT_HOST; 123 + flags = vid ? ALE_VLAN : 0; 124 + 125 + if (add) 126 + ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0); 127 + else 128 + ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); 129 + 130 + return ret; 131 + } 132 + 133 + static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx) 134 + { 135 + struct addr_sync_ctx *sync_ctx = ctx; 136 + struct netdev_hw_addr *ha; 137 + int found = 0, ret = 0; 138 + 139 + if (!vdev || !(vdev->flags & IFF_UP)) 140 + return 0; 141 + 142 + /* vlan address is relevant if its sync_cnt != 0 */ 143 + netdev_for_each_mc_addr(ha, vdev) { 144 + if (ether_addr_equal(ha->addr, sync_ctx->addr)) { 145 + found = ha->sync_cnt; 146 + break; 147 + } 148 + } 149 + 150 + if (found) 151 + sync_ctx->consumed++; 152 + 153 + if (sync_ctx->flush) { 154 + if (!found) 155 + cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); 156 + return 0; 157 + } 158 + 159 + if (found) 160 + ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1); 161 + 162 + return ret; 163 + } 164 + 165 + static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num) 166 + { 167 + struct addr_sync_ctx sync_ctx; 168 + int ret; 169 + 170 + sync_ctx.consumed = 0; 171 + sync_ctx.addr = addr; 172 + sync_ctx.ndev = ndev; 173 + sync_ctx.flush = 0; 174 + 175 + ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); 176 + if (sync_ctx.consumed < num && !ret) 177 + ret = cpsw_set_mc(ndev, addr, -1, 1); 178 + 179 + return ret; 180 + } 181 + 182 + static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num) 183 + { 184 + struct addr_sync_ctx sync_ctx; 185 + 186 + sync_ctx.consumed = 0; 187 + sync_ctx.addr = addr; 188 + sync_ctx.ndev = ndev; 189 + sync_ctx.flush = 1; 190 + 191 + vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); 192 + if (sync_ctx.consumed == num) 193 + cpsw_set_mc(ndev, addr, -1, 0); 194 + 195 + return 0; 196 + } 197 + 198 + static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx) 199 + { 200 + struct addr_sync_ctx *sync_ctx = ctx; 201 + struct netdev_hw_addr *ha; 202 + int found = 0; 203 + 204 + if (!vdev || !(vdev->flags & IFF_UP)) 205 + return 0; 206 + 207 + /* vlan address is relevant if its sync_cnt != 0 */ 208 + netdev_for_each_mc_addr(ha, vdev) { 209 + if (ether_addr_equal(ha->addr, sync_ctx->addr)) { 210 + found = ha->sync_cnt; 211 + break; 212 + } 213 + } 214 + 215 + if (!found) 216 + return 0; 217 + 218 + sync_ctx->consumed++; 219 + cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); 220 + return 0; 221 + } 222 + 223 + static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) 224 + { 225 + struct addr_sync_ctx sync_ctx; 226 + 227 + sync_ctx.addr = addr; 228 + sync_ctx.ndev = ndev; 229 + sync_ctx.consumed = 0; 230 + 231 + vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx); 232 + if (sync_ctx.consumed < num) 233 + cpsw_set_mc(ndev, addr, -1, 0); 234 + 235 + return 0; 236 + } 237 + 238 + static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 239 + { 240 + struct cpsw_priv *priv = netdev_priv(ndev); 241 + struct cpsw_common *cpsw = priv->cpsw; 242 + 243 + if (ndev->flags & IFF_PROMISC) { 244 + /* Enable promiscuous mode */ 245 + cpsw_set_promiscious(ndev, true); 246 + cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port); 247 + return; 248 + } 249 + 250 + /* Disable promiscuous mode */ 251 + cpsw_set_promiscious(ndev, false); 252 + 253 + /* Restore allmulti on vlans if necessary */ 254 + cpsw_ale_set_allmulti(cpsw->ale, 255 + ndev->flags & IFF_ALLMULTI, priv->emac_port); 256 + 257 + /* add/remove mcast address either for real netdev or for vlan */ 258 + __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, 259 + cpsw_del_mc_addr); 260 + } 261 + 262 + static unsigned int cpsw_rxbuf_total_len(unsigned int len) 263 + { 264 + len += CPSW_HEADROOM; 265 + len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 266 + 267 + return SKB_DATA_ALIGN(len); 268 + } 269 + 270 + static void cpsw_rx_handler(void *token, int len, int status) 271 + { 272 + struct page *new_page, *page = token; 273 + void *pa = page_address(page); 274 + int headroom = CPSW_HEADROOM; 275 + struct cpsw_meta_xdp *xmeta; 276 + struct cpsw_common *cpsw; 277 + struct net_device *ndev; 278 + int port, ch, pkt_size; 279 + struct cpsw_priv *priv; 280 + struct page_pool *pool; 281 + struct sk_buff *skb; 282 + struct xdp_buff xdp; 283 + int ret = 0; 284 + dma_addr_t dma; 285 + 286 + xmeta = pa + CPSW_XMETA_OFFSET; 287 + cpsw = ndev_to_cpsw(xmeta->ndev); 288 + ndev = xmeta->ndev; 289 + pkt_size = cpsw->rx_packet_max; 290 + ch = xmeta->ch; 291 + 292 + if (status >= 0) { 293 + port = CPDMA_RX_SOURCE_PORT(status); 294 + if (port) 295 + ndev = cpsw->slaves[--port].ndev; 296 + } 297 + 298 + priv = netdev_priv(ndev); 299 + pool = cpsw->page_pool[ch]; 300 + 301 + if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { 302 + /* In dual emac mode check for all interfaces */ 303 + if (cpsw->usage_count && status >= 0) { 304 + /* The packet received is for the interface which 305 + * is already down and the other interface is up 306 + * and running, instead of freeing which results 307 + * in reducing of the number of rx descriptor in 308 + * DMA engine, requeue page back to cpdma. 309 + */ 310 + new_page = page; 311 + goto requeue; 312 + } 313 + 314 + /* the interface is going down, pages are purged */ 315 + page_pool_recycle_direct(pool, page); 316 + return; 317 + } 318 + 319 + new_page = page_pool_dev_alloc_pages(pool); 320 + if (unlikely(!new_page)) { 321 + new_page = page; 322 + ndev->stats.rx_dropped++; 323 + goto requeue; 324 + } 325 + 326 + if (priv->xdp_prog) { 327 + if (status & CPDMA_RX_VLAN_ENCAP) { 328 + xdp.data = pa + CPSW_HEADROOM + 329 + CPSW_RX_VLAN_ENCAP_HDR_SIZE; 330 + xdp.data_end = xdp.data + len - 331 + CPSW_RX_VLAN_ENCAP_HDR_SIZE; 332 + } else { 333 + xdp.data = pa + CPSW_HEADROOM; 334 + xdp.data_end = xdp.data + len; 335 + } 336 + 337 + xdp_set_data_meta_invalid(&xdp); 338 + 339 + xdp.data_hard_start = pa; 340 + xdp.rxq = &priv->xdp_rxq[ch]; 341 + 342 + ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port); 343 + if (ret != CPSW_XDP_PASS) 344 + goto requeue; 345 + 346 + /* XDP prog might have changed packet data and boundaries */ 347 + len = xdp.data_end - xdp.data; 348 + headroom = xdp.data - xdp.data_hard_start; 349 + 350 + /* XDP prog can modify vlan tag, so can't use encap header */ 351 + status &= ~CPDMA_RX_VLAN_ENCAP; 352 + } 353 + 354 + /* pass skb to netstack if no XDP prog or returned XDP_PASS */ 355 + skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size)); 356 + if (!skb) { 357 + ndev->stats.rx_dropped++; 358 + page_pool_recycle_direct(pool, page); 359 + goto requeue; 360 + } 361 + 362 + skb_reserve(skb, headroom); 363 + skb_put(skb, len); 364 + skb->dev = ndev; 365 + if (status & CPDMA_RX_VLAN_ENCAP) 366 + cpsw_rx_vlan_encap(skb); 367 + if (priv->rx_ts_enabled) 368 + cpts_rx_timestamp(cpsw->cpts, skb); 369 + skb->protocol = eth_type_trans(skb, ndev); 370 + 371 + /* unmap page as no netstack skb page recycling */ 372 + page_pool_release_page(pool, page); 373 + netif_receive_skb(skb); 374 + 375 + ndev->stats.rx_bytes += len; 376 + ndev->stats.rx_packets++; 377 + 378 + requeue: 379 + xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; 380 + xmeta->ndev = ndev; 381 + xmeta->ch = ch; 382 + 383 + dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; 384 + ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, 385 + pkt_size, 0); 386 + if (ret < 0) { 387 + WARN_ON(ret == -ENOMEM); 388 + page_pool_recycle_direct(pool, new_page); 389 + } 390 + } 391 + 392 + static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, 393 + unsigned short vid) 394 + { 395 + struct cpsw_common *cpsw = priv->cpsw; 396 + int unreg_mcast_mask = 0; 397 + int mcast_mask; 398 + u32 port_mask; 399 + int ret; 400 + 401 + port_mask = (1 << priv->emac_port) | ALE_PORT_HOST; 402 + 403 + mcast_mask = ALE_PORT_HOST; 404 + if (priv->ndev->flags & IFF_ALLMULTI) 405 + unreg_mcast_mask = mcast_mask; 406 + 407 + ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask, 408 + unreg_mcast_mask); 409 + if (ret != 0) 410 + return ret; 411 + 412 + ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 413 + HOST_PORT_NUM, ALE_VLAN, vid); 414 + if (ret != 0) 415 + goto clean_vid; 416 + 417 + ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 418 + mcast_mask, ALE_VLAN, vid, 0); 419 + if (ret != 0) 420 + goto clean_vlan_ucast; 421 + return 0; 422 + 423 + clean_vlan_ucast: 424 + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 425 + HOST_PORT_NUM, ALE_VLAN, vid); 426 + clean_vid: 427 + cpsw_ale_del_vlan(cpsw->ale, vid, 0); 428 + return ret; 429 + } 430 + 431 + static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, 432 + __be16 proto, u16 vid) 433 + { 434 + struct cpsw_priv *priv = netdev_priv(ndev); 435 + struct cpsw_common *cpsw = priv->cpsw; 436 + int ret, i; 437 + 438 + if (vid == cpsw->data.default_vlan) 439 + return 0; 440 + 441 + ret = pm_runtime_get_sync(cpsw->dev); 442 + if (ret < 0) { 443 + pm_runtime_put_noidle(cpsw->dev); 444 + return ret; 445 + } 446 + 447 + /* In dual EMAC, reserved VLAN id should not be used for 448 + * creating VLAN interfaces as this can break the dual 449 + * EMAC port separation 450 + */ 451 + for (i = 0; i < cpsw->data.slaves; i++) { 452 + if (cpsw->slaves[i].ndev && 453 + vid == cpsw->slaves[i].port_vlan) { 454 + ret = -EINVAL; 455 + goto err; 456 + } 457 + } 458 + 459 + dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 460 + ret = cpsw_add_vlan_ale_entry(priv, vid); 461 + err: 462 + pm_runtime_put(cpsw->dev); 463 + return ret; 464 + } 465 + 466 + static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) 467 + { 468 + struct cpsw_priv *priv = arg; 469 + 470 + if (!vdev || !vid) 471 + return 0; 472 + 473 + cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid); 474 + return 0; 475 + } 476 + 477 + /* restore resources after port reset */ 478 + static void cpsw_restore(struct cpsw_priv *priv) 479 + { 480 + struct cpsw_common *cpsw = priv->cpsw; 481 + 482 + /* restore vlan configurations */ 483 + vlan_for_each(priv->ndev, cpsw_restore_vlans, priv); 484 + 485 + /* restore MQPRIO offload */ 486 + cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv); 487 + 488 + /* restore CBS offload */ 489 + cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv); 490 + } 491 + 492 + static void cpsw_init_host_port_dual_mac(struct cpsw_priv *priv) 493 + { 494 + struct cpsw_common *cpsw = priv->cpsw; 495 + int vlan = cpsw->data.default_vlan; 496 + 497 + writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl); 498 + 499 + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0); 500 + dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n"); 501 + 502 + writel(vlan, &cpsw->host_port_regs->port_vlan); 503 + 504 + cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); 505 + /* learning make no sense in dual_mac mode */ 506 + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1); 507 + } 508 + 509 + static void cpsw_init_host_port(struct cpsw_priv *priv) 510 + { 511 + struct cpsw_common *cpsw = priv->cpsw; 512 + u32 control_reg; 513 + 514 + /* soft reset the controller and initialize ale */ 515 + soft_reset("cpsw", &cpsw->regs->soft_reset); 516 + cpsw_ale_start(cpsw->ale); 517 + 518 + /* switch to vlan unaware mode */ 519 + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 520 + CPSW_ALE_VLAN_AWARE); 521 + control_reg = readl(&cpsw->regs->control); 522 + control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP; 523 + writel(control_reg, &cpsw->regs->control); 524 + 525 + /* setup host port priority mapping */ 526 + writel_relaxed(CPDMA_TX_PRIORITY_MAP, 527 + &cpsw->host_port_regs->cpdma_tx_pri_map); 528 + writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map); 529 + 530 + /* disable priority elevation */ 531 + writel_relaxed(0, &cpsw->regs->ptype); 532 + 533 + /* enable statistics collection only on all ports */ 534 + writel_relaxed(0x7, &cpsw->regs->stat_port_en); 535 + 536 + /* Enable internal fifo flow control */ 537 + writel(0x7, &cpsw->regs->flow_control); 538 + 539 + cpsw_init_host_port_dual_mac(priv); 540 + 541 + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, 542 + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 543 + } 544 + 545 + static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv, 546 + struct cpsw_slave *slave) 547 + { 548 + u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST; 549 + struct cpsw_common *cpsw = priv->cpsw; 550 + u32 reg; 551 + 552 + reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : 553 + CPSW2_PORT_VLAN; 554 + slave_write(slave, slave->port_vlan, reg); 555 + 556 + cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask, 557 + port_mask, port_mask, 0); 558 + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 559 + ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 560 + ALE_MCAST_FWD); 561 + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 562 + HOST_PORT_NUM, ALE_VLAN | 563 + ALE_SECURE, slave->port_vlan); 564 + cpsw_ale_control_set(cpsw->ale, priv->emac_port, 565 + ALE_PORT_DROP_UNKNOWN_VLAN, 1); 566 + /* learning make no sense in dual_mac mode */ 567 + cpsw_ale_control_set(cpsw->ale, priv->emac_port, 568 + ALE_PORT_NOLEARN, 1); 569 + } 570 + 571 + static void cpsw_adjust_link(struct net_device *ndev) 572 + { 573 + struct cpsw_priv *priv = netdev_priv(ndev); 574 + struct cpsw_common *cpsw = priv->cpsw; 575 + struct cpsw_slave *slave; 576 + struct phy_device *phy; 577 + u32 mac_control = 0; 578 + 579 + slave = &cpsw->slaves[priv->emac_port - 1]; 580 + phy = slave->phy; 581 + 582 + if (!phy) 583 + return; 584 + 585 + if (phy->link) { 586 + mac_control = CPSW_SL_CTL_GMII_EN; 587 + 588 + if (phy->speed == 1000) 589 + mac_control |= CPSW_SL_CTL_GIG; 590 + if (phy->duplex) 591 + mac_control |= CPSW_SL_CTL_FULLDUPLEX; 592 + 593 + /* set speed_in input in case RMII mode is used in 100Mbps */ 594 + if (phy->speed == 100) 595 + mac_control |= CPSW_SL_CTL_IFCTL_A; 596 + /* in band mode only works in 10Mbps RGMII mode */ 597 + else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) 598 + mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */ 599 + 600 + if (priv->rx_pause) 601 + mac_control |= CPSW_SL_CTL_RX_FLOW_EN; 602 + 603 + if (priv->tx_pause) 604 + mac_control |= CPSW_SL_CTL_TX_FLOW_EN; 605 + 606 + if (mac_control != slave->mac_control) 607 + cpsw_sl_ctl_set(slave->mac_sl, mac_control); 608 + 609 + /* enable forwarding */ 610 + cpsw_ale_control_set(cpsw->ale, priv->emac_port, 611 + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 612 + 613 + netif_tx_wake_all_queues(ndev); 614 + 615 + if (priv->shp_cfg_speed && 616 + priv->shp_cfg_speed != slave->phy->speed && 617 + !cpsw_shp_is_off(priv)) 618 + dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!"); 619 + } else { 620 + netif_tx_stop_all_queues(ndev); 621 + 622 + mac_control = 0; 623 + /* disable forwarding */ 624 + cpsw_ale_control_set(cpsw->ale, priv->emac_port, 625 + ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 626 + 627 + cpsw_sl_wait_for_idle(slave->mac_sl, 100); 628 + 629 + cpsw_sl_ctl_reset(slave->mac_sl); 630 + } 631 + 632 + if (mac_control != slave->mac_control) 633 + phy_print_status(phy); 634 + 635 + slave->mac_control = mac_control; 636 + 637 + if (phy->link && cpsw_need_resplit(cpsw)) 638 + cpsw_split_res(cpsw); 639 + } 640 + 641 + static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 642 + { 643 + struct cpsw_common *cpsw = priv->cpsw; 644 + struct phy_device *phy; 645 + 646 + cpsw_sl_reset(slave->mac_sl, 100); 647 + cpsw_sl_ctl_reset(slave->mac_sl); 648 + 649 + /* setup priority mapping */ 650 + cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP, 651 + RX_PRIORITY_MAPPING); 652 + 653 + switch (cpsw->version) { 654 + case CPSW_VERSION_1: 655 + slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); 656 + /* Increase RX FIFO size to 5 for supporting fullduplex 657 + * flow control mode 658 + */ 659 + slave_write(slave, 660 + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | 661 + CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); 662 + break; 663 + case CPSW_VERSION_2: 664 + case CPSW_VERSION_3: 665 + case CPSW_VERSION_4: 666 + slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); 667 + /* Increase RX FIFO size to 5 for supporting fullduplex 668 + * flow control mode 669 + */ 670 + slave_write(slave, 671 + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | 672 + CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); 673 + break; 674 + } 675 + 676 + /* setup max packet size, and mac address */ 677 + cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN, 678 + cpsw->rx_packet_max); 679 + cpsw_set_slave_mac(slave, priv); 680 + 681 + slave->mac_control = 0; /* no link yet */ 682 + 683 + cpsw_port_add_dual_emac_def_ale_entries(priv, slave); 684 + 685 + if (!slave->data->phy_node) 686 + dev_err(priv->dev, "no phy found on slave %d\n", 687 + slave->slave_num); 688 + phy = of_phy_connect(priv->ndev, slave->data->phy_node, 689 + &cpsw_adjust_link, 0, slave->data->phy_if); 690 + if (!phy) { 691 + dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n", 692 + slave->data->phy_node, 693 + slave->slave_num); 694 + return; 695 + } 696 + slave->phy = phy; 697 + 698 + phy_attached_info(slave->phy); 699 + 700 + phy_start(slave->phy); 701 + 702 + /* Configure GMII_SEL register */ 703 + phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET, 704 + slave->data->phy_if); 705 + } 706 + 707 + static int cpsw_ndo_stop(struct net_device *ndev) 708 + { 709 + struct cpsw_priv *priv = netdev_priv(ndev); 710 + struct cpsw_common *cpsw = priv->cpsw; 711 + struct cpsw_slave *slave; 712 + 713 + cpsw_info(priv, ifdown, "shutting down ndev\n"); 714 + slave = &cpsw->slaves[priv->emac_port - 1]; 715 + if (slave->phy) 716 + phy_stop(slave->phy); 717 + 718 + netif_tx_stop_all_queues(priv->ndev); 719 + 720 + if (slave->phy) { 721 + phy_disconnect(slave->phy); 722 + slave->phy = NULL; 723 + } 724 + 725 + __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc); 726 + 727 + if (cpsw->usage_count <= 1) { 728 + napi_disable(&cpsw->napi_rx); 729 + napi_disable(&cpsw->napi_tx); 730 + cpts_unregister(cpsw->cpts); 731 + cpsw_intr_disable(cpsw); 732 + cpdma_ctlr_stop(cpsw->dma); 733 + cpsw_ale_stop(cpsw->ale); 734 + cpsw_destroy_xdp_rxqs(cpsw); 735 + } 736 + 737 + if (cpsw_need_resplit(cpsw)) 738 + cpsw_split_res(cpsw); 739 + 740 + cpsw->usage_count--; 741 + pm_runtime_put_sync(cpsw->dev); 742 + return 0; 743 + } 744 + 745 + static int cpsw_ndo_open(struct net_device *ndev) 746 + { 747 + struct cpsw_priv *priv = netdev_priv(ndev); 748 + struct cpsw_common *cpsw = priv->cpsw; 749 + int ret; 750 + 751 + cpsw_info(priv, ifdown, "starting ndev\n"); 752 + ret = pm_runtime_get_sync(cpsw->dev); 753 + if (ret < 0) { 754 + pm_runtime_put_noidle(cpsw->dev); 755 + return ret; 756 + } 757 + 758 + /* Notify the stack of the actual queue counts. */ 759 + ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); 760 + if (ret) { 761 + dev_err(priv->dev, "cannot set real number of tx queues\n"); 762 + goto pm_cleanup; 763 + } 764 + 765 + ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num); 766 + if (ret) { 767 + dev_err(priv->dev, "cannot set real number of rx queues\n"); 768 + goto pm_cleanup; 769 + } 770 + 771 + /* Initialize host and slave ports */ 772 + if (!cpsw->usage_count) 773 + cpsw_init_host_port(priv); 774 + cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv); 775 + 776 + /* initialize shared resources for every ndev */ 777 + if (!cpsw->usage_count) { 778 + /* create rxqs for both infs in dual mac as they use same pool 779 + * and must be destroyed together when no users. 780 + */ 781 + ret = cpsw_create_xdp_rxqs(cpsw); 782 + if (ret < 0) 783 + goto err_cleanup; 784 + 785 + ret = cpsw_fill_rx_channels(priv); 786 + if (ret < 0) 787 + goto err_cleanup; 788 + 789 + if (cpts_register(cpsw->cpts)) 790 + dev_err(priv->dev, "error registering cpts device\n"); 791 + 792 + napi_enable(&cpsw->napi_rx); 793 + napi_enable(&cpsw->napi_tx); 794 + 795 + if (cpsw->tx_irq_disabled) { 796 + cpsw->tx_irq_disabled = false; 797 + enable_irq(cpsw->irqs_table[1]); 798 + } 799 + 800 + if (cpsw->rx_irq_disabled) { 801 + cpsw->rx_irq_disabled = false; 802 + enable_irq(cpsw->irqs_table[0]); 803 + } 804 + } 805 + 806 + cpsw_restore(priv); 807 + 808 + /* Enable Interrupt pacing if configured */ 809 + if (cpsw->coal_intvl != 0) { 810 + struct ethtool_coalesce coal; 811 + 812 + coal.rx_coalesce_usecs = cpsw->coal_intvl; 813 + cpsw_set_coalesce(ndev, &coal); 814 + } 815 + 816 + cpdma_ctlr_start(cpsw->dma); 817 + cpsw_intr_enable(cpsw); 818 + cpsw->usage_count++; 819 + 820 + return 0; 821 + 822 + err_cleanup: 823 + cpsw_ndo_stop(ndev); 824 + 825 + pm_cleanup: 826 + pm_runtime_put_sync(cpsw->dev); 827 + return ret; 828 + } 829 + 830 + static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, 831 + struct net_device *ndev) 832 + { 833 + struct cpsw_priv *priv = netdev_priv(ndev); 834 + struct cpsw_common *cpsw = priv->cpsw; 835 + struct cpts *cpts = cpsw->cpts; 836 + struct netdev_queue *txq; 837 + struct cpdma_chan *txch; 838 + int ret, q_idx; 839 + 840 + if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { 841 + cpsw_err(priv, tx_err, "packet pad failed\n"); 842 + ndev->stats.tx_dropped++; 843 + return NET_XMIT_DROP; 844 + } 845 + 846 + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 847 + priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb)) 848 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 849 + 850 + q_idx = skb_get_queue_mapping(skb); 851 + if (q_idx >= cpsw->tx_ch_num) 852 + q_idx = q_idx % cpsw->tx_ch_num; 853 + 854 + txch = cpsw->txv[q_idx].ch; 855 + txq = netdev_get_tx_queue(ndev, q_idx); 856 + skb_tx_timestamp(skb); 857 + ret = cpdma_chan_submit(txch, skb, skb->data, skb->len, 858 + priv->emac_port); 859 + if (unlikely(ret != 0)) { 860 + cpsw_err(priv, tx_err, "desc submit failed\n"); 861 + goto fail; 862 + } 863 + 864 + /* If there is no more tx desc left free then we need to 865 + * tell the kernel to stop sending us tx frames. 866 + */ 867 + if (unlikely(!cpdma_check_free_tx_desc(txch))) { 868 + netif_tx_stop_queue(txq); 869 + 870 + /* Barrier, so that stop_queue visible to other cpus */ 871 + smp_mb__after_atomic(); 872 + 873 + if (cpdma_check_free_tx_desc(txch)) 874 + netif_tx_wake_queue(txq); 875 + } 876 + 877 + return NETDEV_TX_OK; 878 + fail: 879 + ndev->stats.tx_dropped++; 880 + netif_tx_stop_queue(txq); 881 + 882 + /* Barrier, so that stop_queue visible to other cpus */ 883 + smp_mb__after_atomic(); 884 + 885 + if (cpdma_check_free_tx_desc(txch)) 886 + netif_tx_wake_queue(txq); 887 + 888 + return NETDEV_TX_BUSY; 889 + } 890 + 891 + static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) 892 + { 893 + struct sockaddr *addr = (struct sockaddr *)p; 894 + struct cpsw_priv *priv = netdev_priv(ndev); 895 + struct cpsw_common *cpsw = priv->cpsw; 896 + int ret, slave_no; 897 + int flags = 0; 898 + u16 vid = 0; 899 + 900 + slave_no = cpsw_slave_index(cpsw, priv); 901 + if (!is_valid_ether_addr(addr->sa_data)) 902 + return -EADDRNOTAVAIL; 903 + 904 + ret = pm_runtime_get_sync(cpsw->dev); 905 + if (ret < 0) { 906 + pm_runtime_put_noidle(cpsw->dev); 907 + return ret; 908 + } 909 + 910 + vid = cpsw->slaves[slave_no].port_vlan; 911 + flags = ALE_VLAN | ALE_SECURE; 912 + 913 + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, 914 + flags, vid); 915 + cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM, 916 + flags, vid); 917 + 918 + ether_addr_copy(priv->mac_addr, addr->sa_data); 919 + ether_addr_copy(ndev->dev_addr, priv->mac_addr); 920 + cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv); 921 + 922 + pm_runtime_put(cpsw->dev); 923 + 924 + return 0; 925 + } 926 + 927 + static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, 928 + __be16 proto, u16 vid) 929 + { 930 + struct cpsw_priv *priv = netdev_priv(ndev); 931 + struct cpsw_common *cpsw = priv->cpsw; 932 + int ret; 933 + int i; 934 + 935 + if (vid == cpsw->data.default_vlan) 936 + return 0; 937 + 938 + ret = pm_runtime_get_sync(cpsw->dev); 939 + if (ret < 0) { 940 + pm_runtime_put_noidle(cpsw->dev); 941 + return ret; 942 + } 943 + 944 + for (i = 0; i < cpsw->data.slaves; i++) { 945 + if (cpsw->slaves[i].ndev && 946 + vid == cpsw->slaves[i].port_vlan) 947 + goto err; 948 + } 949 + 950 + dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid); 951 + cpsw_ale_del_vlan(cpsw->ale, vid, 0); 952 + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 953 + HOST_PORT_NUM, ALE_VLAN, vid); 954 + cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 955 + 0, ALE_VLAN, vid); 956 + cpsw_ale_flush_multicast(cpsw->ale, 0, vid); 957 + err: 958 + pm_runtime_put(cpsw->dev); 959 + return ret; 960 + } 961 + 962 + static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name, 963 + size_t len) 964 + { 965 + struct cpsw_priv *priv = netdev_priv(ndev); 966 + int err; 967 + 968 + err = snprintf(name, len, "p%d", priv->emac_port); 969 + 970 + if (err >= len) 971 + return -EINVAL; 972 + 973 + return 0; 974 + } 975 + 976 + #ifdef CONFIG_NET_POLL_CONTROLLER 977 + static void cpsw_ndo_poll_controller(struct net_device *ndev) 978 + { 979 + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 980 + 981 + cpsw_intr_disable(cpsw); 982 + cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw); 983 + cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw); 984 + cpsw_intr_enable(cpsw); 985 + } 986 + #endif 987 + 988 + static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, 989 + struct xdp_frame **frames, u32 flags) 990 + { 991 + struct cpsw_priv *priv = netdev_priv(ndev); 992 + struct xdp_frame *xdpf; 993 + int i, drops = 0; 994 + 995 + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 996 + return -EINVAL; 997 + 998 + for (i = 0; i < n; i++) { 999 + xdpf = frames[i]; 1000 + if (xdpf->len < CPSW_MIN_PACKET_SIZE) { 1001 + xdp_return_frame_rx_napi(xdpf); 1002 + drops++; 1003 + continue; 1004 + } 1005 + 1006 + if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port)) 1007 + drops++; 1008 + } 1009 + 1010 + return n - drops; 1011 + } 1012 + 1013 + static const struct net_device_ops cpsw_netdev_ops = { 1014 + .ndo_open = cpsw_ndo_open, 1015 + .ndo_stop = cpsw_ndo_stop, 1016 + .ndo_start_xmit = cpsw_ndo_start_xmit, 1017 + .ndo_set_mac_address = cpsw_ndo_set_mac_address, 1018 + .ndo_do_ioctl = cpsw_ndo_ioctl, 1019 + .ndo_validate_addr = eth_validate_addr, 1020 + .ndo_tx_timeout = cpsw_ndo_tx_timeout, 1021 + .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, 1022 + .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate, 1023 + #ifdef CONFIG_NET_POLL_CONTROLLER 1024 + .ndo_poll_controller = cpsw_ndo_poll_controller, 1025 + #endif 1026 + .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, 1027 + .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, 1028 + .ndo_setup_tc = cpsw_ndo_setup_tc, 1029 + .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name, 1030 + .ndo_bpf = cpsw_ndo_bpf, 1031 + .ndo_xdp_xmit = cpsw_ndo_xdp_xmit, 1032 + }; 1033 + 1034 + static void cpsw_get_drvinfo(struct net_device *ndev, 1035 + struct ethtool_drvinfo *info) 1036 + { 1037 + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 1038 + struct platform_device *pdev; 1039 + 1040 + pdev = to_platform_device(cpsw->dev); 1041 + strlcpy(info->driver, "cpsw-switch", sizeof(info->driver)); 1042 + strlcpy(info->version, "2.0", sizeof(info->version)); 1043 + strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); 1044 + } 1045 + 1046 + static int cpsw_set_pauseparam(struct net_device *ndev, 1047 + struct ethtool_pauseparam *pause) 1048 + { 1049 + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 1050 + struct cpsw_priv *priv = netdev_priv(ndev); 1051 + int slave_no; 1052 + 1053 + slave_no = cpsw_slave_index(cpsw, priv); 1054 + if (!cpsw->slaves[slave_no].phy) 1055 + return -EINVAL; 1056 + 1057 + if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause)) 1058 + return -EINVAL; 1059 + 1060 + priv->rx_pause = pause->rx_pause ? true : false; 1061 + priv->tx_pause = pause->tx_pause ? true : false; 1062 + 1063 + phy_set_asym_pause(cpsw->slaves[slave_no].phy, 1064 + priv->rx_pause, priv->tx_pause); 1065 + 1066 + return 0; 1067 + } 1068 + 1069 + static int cpsw_set_channels(struct net_device *ndev, 1070 + struct ethtool_channels *chs) 1071 + { 1072 + return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler); 1073 + } 1074 + 1075 + static const struct ethtool_ops cpsw_ethtool_ops = { 1076 + .get_drvinfo = cpsw_get_drvinfo, 1077 + .get_msglevel = cpsw_get_msglevel, 1078 + .set_msglevel = cpsw_set_msglevel, 1079 + .get_link = ethtool_op_get_link, 1080 + .get_ts_info = cpsw_get_ts_info, 1081 + .get_coalesce = cpsw_get_coalesce, 1082 + .set_coalesce = cpsw_set_coalesce, 1083 + .get_sset_count = cpsw_get_sset_count, 1084 + .get_strings = cpsw_get_strings, 1085 + .get_ethtool_stats = cpsw_get_ethtool_stats, 1086 + .get_pauseparam = cpsw_get_pauseparam, 1087 + .set_pauseparam = cpsw_set_pauseparam, 1088 + .get_wol = cpsw_get_wol, 1089 + .set_wol = cpsw_set_wol, 1090 + .get_regs_len = cpsw_get_regs_len, 1091 + .get_regs = cpsw_get_regs, 1092 + .begin = cpsw_ethtool_op_begin, 1093 + .complete = cpsw_ethtool_op_complete, 1094 + .get_channels = cpsw_get_channels, 1095 + .set_channels = cpsw_set_channels, 1096 + .get_link_ksettings = cpsw_get_link_ksettings, 1097 + .set_link_ksettings = cpsw_set_link_ksettings, 1098 + .get_eee = cpsw_get_eee, 1099 + .set_eee = cpsw_set_eee, 1100 + .nway_reset = cpsw_nway_reset, 1101 + .get_ringparam = cpsw_get_ringparam, 1102 + .set_ringparam = cpsw_set_ringparam, 1103 + }; 1104 + 1105 + static int cpsw_probe_dt(struct cpsw_common *cpsw) 1106 + { 1107 + struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np; 1108 + struct cpsw_platform_data *data = &cpsw->data; 1109 + struct device *dev = cpsw->dev; 1110 + int ret; 1111 + u32 prop; 1112 + 1113 + if (!node) 1114 + return -EINVAL; 1115 + 1116 + tmp_node = of_get_child_by_name(node, "ethernet-ports"); 1117 + if (!tmp_node) 1118 + return -ENOENT; 1119 + data->slaves = of_get_child_count(tmp_node); 1120 + if (data->slaves != CPSW_SLAVE_PORTS_NUM) { 1121 + of_node_put(tmp_node); 1122 + return -ENOENT; 1123 + } 1124 + 1125 + data->active_slave = 0; 1126 + data->channels = CPSW_MAX_QUEUES; 1127 + data->ale_entries = CPSW_ALE_NUM_ENTRIES; 1128 + data->dual_emac = 1; 1129 + data->bd_ram_size = CPSW_BD_RAM_SIZE; 1130 + data->mac_control = 0; 1131 + 1132 + data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM, 1133 + sizeof(struct cpsw_slave_data), 1134 + GFP_KERNEL); 1135 + if (!data->slave_data) 1136 + return -ENOMEM; 1137 + 1138 + /* Populate all the child nodes here... 1139 + */ 1140 + ret = devm_of_platform_populate(dev); 1141 + /* We do not want to force this, as in some cases may not have child */ 1142 + if (ret) 1143 + dev_warn(dev, "Doesn't have any child node\n"); 1144 + 1145 + for_each_child_of_node(tmp_node, port_np) { 1146 + struct cpsw_slave_data *slave_data; 1147 + const void *mac_addr; 1148 + u32 port_id; 1149 + 1150 + ret = of_property_read_u32(port_np, "reg", &port_id); 1151 + if (ret < 0) { 1152 + dev_err(dev, "%pOF error reading port_id %d\n", 1153 + port_np, ret); 1154 + goto err_node_put; 1155 + } 1156 + 1157 + if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) { 1158 + dev_err(dev, "%pOF has invalid port_id %u\n", 1159 + port_np, port_id); 1160 + ret = -EINVAL; 1161 + goto err_node_put; 1162 + } 1163 + 1164 + slave_data = &data->slave_data[port_id - 1]; 1165 + 1166 + slave_data->disabled = !of_device_is_available(port_np); 1167 + if (slave_data->disabled) 1168 + continue; 1169 + 1170 + slave_data->slave_node = port_np; 1171 + slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL); 1172 + if (IS_ERR(slave_data->ifphy)) { 1173 + ret = PTR_ERR(slave_data->ifphy); 1174 + dev_err(dev, "%pOF: Error retrieving port phy: %d\n", 1175 + port_np, ret); 1176 + goto err_node_put; 1177 + } 1178 + 1179 + if (of_phy_is_fixed_link(port_np)) { 1180 + ret = of_phy_register_fixed_link(port_np); 1181 + if (ret) { 1182 + if (ret != -EPROBE_DEFER) 1183 + dev_err(dev, "%pOF failed to register fixed-link phy: %d\n", 1184 + port_np, ret); 1185 + goto err_node_put; 1186 + } 1187 + slave_data->phy_node = of_node_get(port_np); 1188 + } else { 1189 + slave_data->phy_node = 1190 + of_parse_phandle(port_np, "phy-handle", 0); 1191 + } 1192 + 1193 + if (!slave_data->phy_node) { 1194 + dev_err(dev, "%pOF no phy found\n", port_np); 1195 + ret = -ENODEV; 1196 + goto err_node_put; 1197 + } 1198 + 1199 + ret = of_get_phy_mode(port_np, &slave_data->phy_if); 1200 + if (ret) { 1201 + dev_err(dev, "%pOF read phy-mode err %d\n", 1202 + port_np, ret); 1203 + goto err_node_put; 1204 + } 1205 + 1206 + mac_addr = of_get_mac_address(port_np); 1207 + if (!IS_ERR(mac_addr)) { 1208 + ether_addr_copy(slave_data->mac_addr, mac_addr); 1209 + } else { 1210 + ret = ti_cm_get_macid(dev, port_id - 1, 1211 + slave_data->mac_addr); 1212 + if (ret) 1213 + goto err_node_put; 1214 + } 1215 + 1216 + if (of_property_read_u32(port_np, "ti,dual-emac-pvid", 1217 + &prop)) { 1218 + dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n", 1219 + port_np); 1220 + slave_data->dual_emac_res_vlan = port_id; 1221 + dev_err(dev, "%pOF Using %d as Reserved VLAN\n", 1222 + port_np, slave_data->dual_emac_res_vlan); 1223 + } else { 1224 + slave_data->dual_emac_res_vlan = prop; 1225 + } 1226 + } 1227 + 1228 + of_node_put(tmp_node); 1229 + return 0; 1230 + 1231 + err_node_put: 1232 + of_node_put(port_np); 1233 + return ret; 1234 + } 1235 + 1236 + static void cpsw_remove_dt(struct cpsw_common *cpsw) 1237 + { 1238 + struct cpsw_platform_data *data = &cpsw->data; 1239 + int i = 0; 1240 + 1241 + for (i = 0; i < cpsw->data.slaves; i++) { 1242 + struct cpsw_slave_data *slave_data = &data->slave_data[i]; 1243 + struct device_node *port_np = slave_data->phy_node; 1244 + 1245 + if (port_np) { 1246 + if (of_phy_is_fixed_link(port_np)) 1247 + of_phy_deregister_fixed_link(port_np); 1248 + 1249 + of_node_put(port_np); 1250 + } 1251 + } 1252 + } 1253 + 1254 + static int cpsw_create_ports(struct cpsw_common *cpsw) 1255 + { 1256 + struct cpsw_platform_data *data = &cpsw->data; 1257 + struct net_device *ndev, *napi_ndev = NULL; 1258 + struct device *dev = cpsw->dev; 1259 + struct cpsw_priv *priv; 1260 + int ret = 0, i = 0; 1261 + 1262 + for (i = 0; i < cpsw->data.slaves; i++) { 1263 + struct cpsw_slave_data *slave_data = &data->slave_data[i]; 1264 + 1265 + if (slave_data->disabled) 1266 + continue; 1267 + 1268 + ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv), 1269 + CPSW_MAX_QUEUES, 1270 + CPSW_MAX_QUEUES); 1271 + if (!ndev) { 1272 + dev_err(dev, "error allocating net_device\n"); 1273 + return -ENOMEM; 1274 + } 1275 + 1276 + priv = netdev_priv(ndev); 1277 + priv->cpsw = cpsw; 1278 + priv->ndev = ndev; 1279 + priv->dev = dev; 1280 + priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1281 + priv->emac_port = i + 1; 1282 + 1283 + if (is_valid_ether_addr(slave_data->mac_addr)) { 1284 + ether_addr_copy(priv->mac_addr, slave_data->mac_addr); 1285 + dev_info(cpsw->dev, "Detected MACID = %pM\n", 1286 + priv->mac_addr); 1287 + } else { 1288 + eth_random_addr(slave_data->mac_addr); 1289 + dev_info(cpsw->dev, "Random MACID = %pM\n", 1290 + priv->mac_addr); 1291 + } 1292 + ether_addr_copy(ndev->dev_addr, slave_data->mac_addr); 1293 + ether_addr_copy(priv->mac_addr, slave_data->mac_addr); 1294 + 1295 + cpsw->slaves[i].ndev = ndev; 1296 + 1297 + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 1298 + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL; 1299 + 1300 + ndev->netdev_ops = &cpsw_netdev_ops; 1301 + ndev->ethtool_ops = &cpsw_ethtool_ops; 1302 + SET_NETDEV_DEV(ndev, dev); 1303 + 1304 + if (!napi_ndev) { 1305 + /* CPSW Host port CPDMA interface is shared between 1306 + * ports and there is only one TX and one RX IRQs 1307 + * available for all possible TX and RX channels 1308 + * accordingly. 1309 + */ 1310 + netif_napi_add(ndev, &cpsw->napi_rx, 1311 + cpsw->quirk_irq ? 1312 + cpsw_rx_poll : cpsw_rx_mq_poll, 1313 + CPSW_POLL_WEIGHT); 1314 + netif_tx_napi_add(ndev, &cpsw->napi_tx, 1315 + cpsw->quirk_irq ? 1316 + cpsw_tx_poll : cpsw_tx_mq_poll, 1317 + CPSW_POLL_WEIGHT); 1318 + } 1319 + 1320 + napi_ndev = ndev; 1321 + } 1322 + 1323 + return ret; 1324 + } 1325 + 1326 + static void cpsw_unregister_ports(struct cpsw_common *cpsw) 1327 + { 1328 + int i = 0; 1329 + 1330 + for (i = 0; i < cpsw->data.slaves; i++) { 1331 + if (!cpsw->slaves[i].ndev) 1332 + continue; 1333 + 1334 + unregister_netdev(cpsw->slaves[i].ndev); 1335 + } 1336 + } 1337 + 1338 + static int cpsw_register_ports(struct cpsw_common *cpsw) 1339 + { 1340 + int ret = 0, i = 0; 1341 + 1342 + for (i = 0; i < cpsw->data.slaves; i++) { 1343 + if (!cpsw->slaves[i].ndev) 1344 + continue; 1345 + 1346 + /* register the network device */ 1347 + ret = register_netdev(cpsw->slaves[i].ndev); 1348 + if (ret) { 1349 + dev_err(cpsw->dev, 1350 + "cpsw: err registering net device%d\n", i); 1351 + cpsw->slaves[i].ndev = NULL; 1352 + break; 1353 + } 1354 + } 1355 + 1356 + if (ret) 1357 + cpsw_unregister_ports(cpsw); 1358 + return ret; 1359 + } 1360 + 1361 + static const struct devlink_ops cpsw_devlink_ops; 1362 + 1363 + static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id, 1364 + struct devlink_param_gset_ctx *ctx) 1365 + { 1366 + struct cpsw_devlink *dl_priv = devlink_priv(dl); 1367 + struct cpsw_common *cpsw = dl_priv->cpsw; 1368 + 1369 + dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); 1370 + 1371 + switch (id) { 1372 + case CPSW_DL_PARAM_ALE_BYPASS: 1373 + ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS); 1374 + break; 1375 + default: 1376 + return -EOPNOTSUPP; 1377 + } 1378 + 1379 + return 0; 1380 + } 1381 + 1382 + static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id, 1383 + struct devlink_param_gset_ctx *ctx) 1384 + { 1385 + struct cpsw_devlink *dl_priv = devlink_priv(dl); 1386 + struct cpsw_common *cpsw = dl_priv->cpsw; 1387 + int ret = -EOPNOTSUPP; 1388 + 1389 + dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); 1390 + 1391 + switch (id) { 1392 + case CPSW_DL_PARAM_ALE_BYPASS: 1393 + ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1394 + ctx->val.vbool); 1395 + break; 1396 + default: 1397 + return -EOPNOTSUPP; 1398 + } 1399 + 1400 + return 0; 1401 + } 1402 + 1403 + static const struct devlink_param cpsw_devlink_params[] = { 1404 + DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS, 1405 + "ale_bypass", DEVLINK_PARAM_TYPE_BOOL, 1406 + BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1407 + cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL), 1408 + }; 1409 + 1410 + static int cpsw_register_devlink(struct cpsw_common *cpsw) 1411 + { 1412 + struct device *dev = cpsw->dev; 1413 + struct cpsw_devlink *dl_priv; 1414 + int ret = 0; 1415 + 1416 + cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv)); 1417 + if (!cpsw->devlink) 1418 + return -ENOMEM; 1419 + 1420 + dl_priv = devlink_priv(cpsw->devlink); 1421 + dl_priv->cpsw = cpsw; 1422 + 1423 + ret = devlink_register(cpsw->devlink, dev); 1424 + if (ret) { 1425 + dev_err(dev, "DL reg fail ret:%d\n", ret); 1426 + goto dl_free; 1427 + } 1428 + 1429 + ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params, 1430 + ARRAY_SIZE(cpsw_devlink_params)); 1431 + if (ret) { 1432 + dev_err(dev, "DL params reg fail ret:%d\n", ret); 1433 + goto dl_unreg; 1434 + } 1435 + 1436 + devlink_params_publish(cpsw->devlink); 1437 + return ret; 1438 + 1439 + dl_unreg: 1440 + devlink_unregister(cpsw->devlink); 1441 + dl_free: 1442 + devlink_free(cpsw->devlink); 1443 + return ret; 1444 + } 1445 + 1446 + static void cpsw_unregister_devlink(struct cpsw_common *cpsw) 1447 + { 1448 + devlink_params_unpublish(cpsw->devlink); 1449 + devlink_params_unregister(cpsw->devlink, cpsw_devlink_params, 1450 + ARRAY_SIZE(cpsw_devlink_params)); 1451 + devlink_unregister(cpsw->devlink); 1452 + devlink_free(cpsw->devlink); 1453 + } 1454 + 1455 + static const struct of_device_id cpsw_of_mtable[] = { 1456 + { .compatible = "ti,cpsw-switch"}, 1457 + { .compatible = "ti,am335x-cpsw-switch"}, 1458 + { .compatible = "ti,am4372-cpsw-switch"}, 1459 + { .compatible = "ti,dra7-cpsw-switch"}, 1460 + { /* sentinel */ }, 1461 + }; 1462 + MODULE_DEVICE_TABLE(of, cpsw_of_mtable); 1463 + 1464 + static const struct soc_device_attribute cpsw_soc_devices[] = { 1465 + { .family = "AM33xx", .revision = "ES1.0"}, 1466 + { /* sentinel */ } 1467 + }; 1468 + 1469 + static int cpsw_probe(struct platform_device *pdev) 1470 + { 1471 + const struct soc_device_attribute *soc; 1472 + struct device *dev = &pdev->dev; 1473 + struct cpsw_common *cpsw; 1474 + struct resource *ss_res; 1475 + struct gpio_descs *mode; 1476 + void __iomem *ss_regs; 1477 + int ret = 0, ch; 1478 + struct clk *clk; 1479 + int irq; 1480 + 1481 + cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL); 1482 + if (!cpsw) 1483 + return -ENOMEM; 1484 + 1485 + cpsw_slave_index = cpsw_slave_index_priv; 1486 + 1487 + cpsw->dev = dev; 1488 + 1489 + cpsw->slaves = devm_kcalloc(dev, 1490 + CPSW_SLAVE_PORTS_NUM, 1491 + sizeof(struct cpsw_slave), 1492 + GFP_KERNEL); 1493 + if (!cpsw->slaves) 1494 + return -ENOMEM; 1495 + 1496 + mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); 1497 + if (IS_ERR(mode)) { 1498 + ret = PTR_ERR(mode); 1499 + dev_err(dev, "gpio request failed, ret %d\n", ret); 1500 + return ret; 1501 + } 1502 + 1503 + clk = devm_clk_get(dev, "fck"); 1504 + if (IS_ERR(clk)) { 1505 + ret = PTR_ERR(clk); 1506 + dev_err(dev, "fck is not found %d\n", ret); 1507 + return ret; 1508 + } 1509 + cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; 1510 + 1511 + ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1512 + ss_regs = devm_ioremap_resource(dev, ss_res); 1513 + if (IS_ERR(ss_regs)) { 1514 + ret = PTR_ERR(ss_regs); 1515 + return ret; 1516 + } 1517 + cpsw->regs = ss_regs; 1518 + 1519 + irq = platform_get_irq_byname(pdev, "rx"); 1520 + if (irq < 0) 1521 + return irq; 1522 + cpsw->irqs_table[0] = irq; 1523 + 1524 + irq = platform_get_irq_byname(pdev, "tx"); 1525 + if (irq < 0) 1526 + return irq; 1527 + cpsw->irqs_table[1] = irq; 1528 + 1529 + platform_set_drvdata(pdev, cpsw); 1530 + /* This may be required here for child devices. */ 1531 + pm_runtime_enable(dev); 1532 + 1533 + /* Need to enable clocks with runtime PM api to access module 1534 + * registers 1535 + */ 1536 + ret = pm_runtime_get_sync(dev); 1537 + if (ret < 0) { 1538 + pm_runtime_put_noidle(dev); 1539 + pm_runtime_disable(dev); 1540 + return ret; 1541 + } 1542 + 1543 + ret = cpsw_probe_dt(cpsw); 1544 + if (ret) 1545 + goto clean_dt_ret; 1546 + 1547 + soc = soc_device_match(cpsw_soc_devices); 1548 + if (soc) 1549 + cpsw->quirk_irq = 1; 1550 + 1551 + cpsw->rx_packet_max = rx_packet_max; 1552 + cpsw->descs_pool_size = descs_pool_size; 1553 + 1554 + ret = cpsw_init_common(cpsw, ss_regs, ale_ageout, 1555 + (u32 __force)ss_res->start + CPSW2_BD_OFFSET, 1556 + descs_pool_size); 1557 + if (ret) 1558 + goto clean_dt_ret; 1559 + 1560 + cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ? 1561 + ss_regs + CPSW1_WR_OFFSET : 1562 + ss_regs + CPSW2_WR_OFFSET; 1563 + 1564 + ch = cpsw->quirk_irq ? 0 : 7; 1565 + cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0); 1566 + if (IS_ERR(cpsw->txv[0].ch)) { 1567 + dev_err(dev, "error initializing tx dma channel\n"); 1568 + ret = PTR_ERR(cpsw->txv[0].ch); 1569 + goto clean_cpts; 1570 + } 1571 + 1572 + cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1); 1573 + if (IS_ERR(cpsw->rxv[0].ch)) { 1574 + dev_err(dev, "error initializing rx dma channel\n"); 1575 + ret = PTR_ERR(cpsw->rxv[0].ch); 1576 + goto clean_cpts; 1577 + } 1578 + cpsw_split_res(cpsw); 1579 + 1580 + /* setup netdevs */ 1581 + ret = cpsw_create_ports(cpsw); 1582 + if (ret) 1583 + goto clean_unregister_netdev; 1584 + 1585 + /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and 1586 + * MISC IRQs which are always kept disabled with this driver so 1587 + * we will not request them. 1588 + * 1589 + * If anyone wants to implement support for those, make sure to 1590 + * first request and append them to irqs_table array. 1591 + */ 1592 + 1593 + ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt, 1594 + 0, dev_name(dev), cpsw); 1595 + if (ret < 0) { 1596 + dev_err(dev, "error attaching irq (%d)\n", ret); 1597 + goto clean_unregister_netdev; 1598 + } 1599 + 1600 + ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt, 1601 + 0, dev_name(dev), cpsw); 1602 + if (ret < 0) { 1603 + dev_err(dev, "error attaching irq (%d)\n", ret); 1604 + goto clean_unregister_netdev; 1605 + } 1606 + 1607 + ret = cpsw_register_devlink(cpsw); 1608 + if (ret) 1609 + goto clean_unregister_notifiers; 1610 + 1611 + ret = cpsw_register_ports(cpsw); 1612 + if (ret) 1613 + goto clean_unregister_notifiers; 1614 + 1615 + dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n", 1616 + &ss_res->start, descs_pool_size, 1617 + cpsw->version, CPSW_MAJOR_VERSION(cpsw->version), 1618 + CPSW_MINOR_VERSION(cpsw->version), 1619 + CPSW_RTL_VERSION(cpsw->version)); 1620 + 1621 + pm_runtime_put(dev); 1622 + 1623 + return 0; 1624 + 1625 + clean_unregister_notifiers: 1626 + cpsw_unregister_notifiers(cpsw); 1627 + clean_unregister_netdev: 1628 + cpsw_unregister_ports(cpsw); 1629 + clean_cpts: 1630 + cpts_release(cpsw->cpts); 1631 + cpdma_ctlr_destroy(cpsw->dma); 1632 + clean_dt_ret: 1633 + cpsw_remove_dt(cpsw); 1634 + pm_runtime_put_sync(dev); 1635 + pm_runtime_disable(dev); 1636 + return ret; 1637 + } 1638 + 1639 + static int cpsw_remove(struct platform_device *pdev) 1640 + { 1641 + struct cpsw_common *cpsw = platform_get_drvdata(pdev); 1642 + int ret; 1643 + 1644 + ret = pm_runtime_get_sync(&pdev->dev); 1645 + if (ret < 0) { 1646 + pm_runtime_put_noidle(&pdev->dev); 1647 + return ret; 1648 + } 1649 + 1650 + cpsw_unregister_devlink(cpsw); 1651 + cpsw_unregister_ports(cpsw); 1652 + 1653 + cpts_release(cpsw->cpts); 1654 + cpdma_ctlr_destroy(cpsw->dma); 1655 + cpsw_remove_dt(cpsw); 1656 + pm_runtime_put_sync(&pdev->dev); 1657 + pm_runtime_disable(&pdev->dev); 1658 + return 0; 1659 + } 1660 + 1661 + static struct platform_driver cpsw_driver = { 1662 + .driver = { 1663 + .name = "cpsw-switch", 1664 + .of_match_table = cpsw_of_mtable, 1665 + }, 1666 + .probe = cpsw_probe, 1667 + .remove = cpsw_remove, 1668 + }; 1669 + 1670 + module_platform_driver(cpsw_driver); 1671 + 1672 + MODULE_LICENSE("GPL"); 1673 + MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
+8 -1
drivers/net/ethernet/ti/cpsw_priv.c
··· 13 13 #include <linux/module.h> 14 14 #include <linux/netdevice.h> 15 15 #include <linux/net_tstamp.h> 16 + #include <linux/of.h> 16 17 #include <linux/phy.h> 17 18 #include <linux/platform_device.h> 18 19 #include <linux/pm_runtime.h> ··· 423 422 struct cpsw_platform_data *data; 424 423 struct cpdma_params dma_params; 425 424 struct device *dev = cpsw->dev; 425 + struct device_node *cpts_node; 426 426 void __iomem *cpts_regs; 427 427 int ret = 0, i; 428 428 ··· 518 516 return -ENOMEM; 519 517 } 520 518 521 - cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node); 519 + cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts"); 520 + if (!cpts_node) 521 + cpts_node = cpsw->dev->of_node; 522 + 523 + cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node); 522 524 if (IS_ERR(cpsw->cpts)) { 523 525 ret = PTR_ERR(cpsw->cpts); 524 526 cpdma_ctlr_destroy(cpsw->dma); 525 527 } 528 + of_node_put(cpts_node); 526 529 527 530 return ret; 528 531 }
+10 -2
drivers/net/ethernet/ti/cpsw_priv.h
··· 54 54 55 55 #define HOST_PORT_NUM 0 56 56 #define CPSW_ALE_PORTS_NUM 3 57 + #define CPSW_SLAVE_PORTS_NUM 2 57 58 #define SLIVER_SIZE 0x40 58 59 59 60 #define CPSW1_HOST_PORT_OFFSET 0x028 ··· 66 65 #define CPSW1_CPTS_OFFSET 0x500 67 66 #define CPSW1_ALE_OFFSET 0x600 68 67 #define CPSW1_SLIVER_OFFSET 0x700 68 + #define CPSW1_WR_OFFSET 0x900 69 69 70 70 #define CPSW2_HOST_PORT_OFFSET 0x108 71 71 #define CPSW2_SLAVE_OFFSET 0x200 ··· 78 76 #define CPSW2_ALE_OFFSET 0xd00 79 77 #define CPSW2_SLIVER_OFFSET 0xd80 80 78 #define CPSW2_BD_OFFSET 0x2000 79 + #define CPSW2_WR_OFFSET 0x1200 81 80 82 81 #define CPDMA_RXTHRESH 0x0c0 83 82 #define CPDMA_RXFREE 0x0e0 ··· 116 113 #define IRQ_NUM 2 117 114 #define CPSW_MAX_QUEUES 8 118 115 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 116 + #define CPSW_ALE_AGEOUT_DEFAULT 10 /* sec */ 117 + #define CPSW_ALE_NUM_ENTRIES 1024 119 118 #define CPSW_FIFO_QUEUE_TYPE_SHIFT 16 120 119 #define CPSW_FIFO_SHAPE_EN_SHIFT 16 121 120 #define CPSW_FIFO_RATE_EN_SHIFT 20 122 121 #define CPSW_TC_NUM 4 123 122 #define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1) 124 123 #define CPSW_PCT_MASK 0x7f 124 + #define CPSW_BD_RAM_SIZE 0x2000 125 125 126 126 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29 127 127 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0) ··· 285 279 u8 mac_addr[ETH_ALEN]; 286 280 u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ 287 281 struct phy *ifphy; 282 + bool disabled; 288 283 }; 289 284 290 285 struct cpsw_platform_data { ··· 293 286 u32 ss_reg_ofs; /* Subsystem control register offset */ 294 287 u32 channels; /* number of cpdma channels (symmetric) */ 295 288 u32 slaves; /* number of slave cpgmac ports */ 296 - u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ 289 + u32 active_slave;/* time stamping, ethtool and SIOCGMIIPHY slave */ 297 290 u32 ale_entries; /* ale table size */ 298 - u32 bd_ram_size; /*buffer descriptor ram size */ 291 + u32 bd_ram_size; /*buffer descriptor ram size */ 299 292 u32 mac_control; /* Mac control register */ 300 293 u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ 301 294 bool dual_emac; /* Enable Dual EMAC mode */ ··· 351 344 bool tx_irq_disabled; 352 345 u32 irqs_table[IRQ_NUM]; 353 346 struct cpts *cpts; 347 + struct devlink *devlink; 354 348 int rx_ch_num, tx_ch_num; 355 349 int speed; 356 350 int usage_count;