Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/of_device.h>
16#include <linux/of_mdio.h>
17#include <linux/of_net.h>
18#include <linux/mfd/syscon.h>
19#include <linux/regmap.h>
20#include <linux/clk.h>
21#include <linux/pm_runtime.h>
22#include <linux/if_vlan.h>
23#include <linux/reset.h>
24#include <linux/tcp.h>
25#include <linux/interrupt.h>
26#include <linux/pinctrl/devinfo.h>
27
28#include "mtk_eth_soc.h"
29
30static int mtk_msg_level = -1;
31module_param_named(msg_level, mtk_msg_level, int, 0);
32MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
33
34#define MTK_ETHTOOL_STAT(x) { #x, \
35 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
36
37/* strings used by ethtool */
38static const struct mtk_ethtool_stats {
39 char str[ETH_GSTRING_LEN];
40 u32 offset;
41} mtk_ethtool_stats[] = {
42 MTK_ETHTOOL_STAT(tx_bytes),
43 MTK_ETHTOOL_STAT(tx_packets),
44 MTK_ETHTOOL_STAT(tx_skip),
45 MTK_ETHTOOL_STAT(tx_collisions),
46 MTK_ETHTOOL_STAT(rx_bytes),
47 MTK_ETHTOOL_STAT(rx_packets),
48 MTK_ETHTOOL_STAT(rx_overflow),
49 MTK_ETHTOOL_STAT(rx_fcs_errors),
50 MTK_ETHTOOL_STAT(rx_short_errors),
51 MTK_ETHTOOL_STAT(rx_long_errors),
52 MTK_ETHTOOL_STAT(rx_checksum_errors),
53 MTK_ETHTOOL_STAT(rx_flow_control_packets),
54};
55
56static const char * const mtk_clks_source_name[] = {
57 "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m",
58 "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll"
59};
60
61void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
62{
63 __raw_writel(val, eth->base + reg);
64}
65
66u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
67{
68 return __raw_readl(eth->base + reg);
69}
70
71static int mtk_mdio_busy_wait(struct mtk_eth *eth)
72{
73 unsigned long t_start = jiffies;
74
75 while (1) {
76 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
77 return 0;
78 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
79 break;
80 usleep_range(10, 20);
81 }
82
83 dev_err(eth->dev, "mdio: MDIO timeout\n");
84 return -1;
85}
86
87static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
88 u32 phy_register, u32 write_data)
89{
90 if (mtk_mdio_busy_wait(eth))
91 return -1;
92
93 write_data &= 0xffff;
94
95 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
96 (phy_register << PHY_IAC_REG_SHIFT) |
97 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
98 MTK_PHY_IAC);
99
100 if (mtk_mdio_busy_wait(eth))
101 return -1;
102
103 return 0;
104}
105
106static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
107{
108 u32 d;
109
110 if (mtk_mdio_busy_wait(eth))
111 return 0xffff;
112
113 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
114 (phy_reg << PHY_IAC_REG_SHIFT) |
115 (phy_addr << PHY_IAC_ADDR_SHIFT),
116 MTK_PHY_IAC);
117
118 if (mtk_mdio_busy_wait(eth))
119 return 0xffff;
120
121 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
122
123 return d;
124}
125
126static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
127 int phy_reg, u16 val)
128{
129 struct mtk_eth *eth = bus->priv;
130
131 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
132}
133
134static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
135{
136 struct mtk_eth *eth = bus->priv;
137
138 return _mtk_mdio_read(eth, phy_addr, phy_reg);
139}
140
141static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
142{
143 u32 val;
144 int ret;
145
146 val = (speed == SPEED_1000) ?
147 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
148 mtk_w32(eth, val, INTF_MODE);
149
150 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
151 ETHSYS_TRGMII_CLK_SEL362_5,
152 ETHSYS_TRGMII_CLK_SEL362_5);
153
154 val = (speed == SPEED_1000) ? 250000000 : 500000000;
155 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
156 if (ret)
157 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
158
159 val = (speed == SPEED_1000) ?
160 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
161 mtk_w32(eth, val, TRGMII_RCK_CTRL);
162
163 val = (speed == SPEED_1000) ?
164 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
165 mtk_w32(eth, val, TRGMII_TCK_CTRL);
166}
167
168static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id)
169{
170 u32 val;
171
172 /* Setup the link timer and QPHY power up inside SGMIISYS */
173 regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER,
174 SGMII_LINK_TIMER_DEFAULT);
175
176 regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val);
177 val |= SGMII_REMOTE_FAULT_DIS;
178 regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val);
179
180 regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val);
181 val |= SGMII_AN_RESTART;
182 regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val);
183
184 regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
185 val &= ~SGMII_PHYA_PWD;
186 regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val);
187
188 /* Determine MUX for which GMAC uses the SGMII interface */
189 if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) {
190 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
191 val &= ~SYSCFG0_SGMII_MASK;
192 val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2;
193 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
194
195 dev_info(eth->dev, "setup shared sgmii for gmac=%d\n",
196 mac_id);
197 }
198
199 /* Setup the GMAC1 going through SGMII path when SoC also support
200 * ESW on GMAC1
201 */
202 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) &&
203 !mac_id) {
204 mtk_w32(eth, 0, MTK_MAC_MISC);
205 dev_info(eth->dev, "setup gmac1 going through sgmii");
206 }
207}
208
209static void mtk_phy_link_adjust(struct net_device *dev)
210{
211 struct mtk_mac *mac = netdev_priv(dev);
212 u16 lcl_adv = 0, rmt_adv = 0;
213 u8 flowctrl;
214 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
215 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
216 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
217 MAC_MCR_BACKPR_EN;
218
219 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
220 return;
221
222 switch (dev->phydev->speed) {
223 case SPEED_1000:
224 mcr |= MAC_MCR_SPEED_1000;
225 break;
226 case SPEED_100:
227 mcr |= MAC_MCR_SPEED_100;
228 break;
229 };
230
231 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
232 !mac->id && !mac->trgmii)
233 mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
234
235 if (dev->phydev->link)
236 mcr |= MAC_MCR_FORCE_LINK;
237
238 if (dev->phydev->duplex) {
239 mcr |= MAC_MCR_FORCE_DPX;
240
241 if (dev->phydev->pause)
242 rmt_adv = LPA_PAUSE_CAP;
243 if (dev->phydev->asym_pause)
244 rmt_adv |= LPA_PAUSE_ASYM;
245
246 lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising);
247 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
248
249 if (flowctrl & FLOW_CTRL_TX)
250 mcr |= MAC_MCR_FORCE_TX_FC;
251 if (flowctrl & FLOW_CTRL_RX)
252 mcr |= MAC_MCR_FORCE_RX_FC;
253
254 netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
255 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
256 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
257 }
258
259 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
260
261 if (dev->phydev->link)
262 netif_carrier_on(dev);
263 else
264 netif_carrier_off(dev);
265
266 if (!of_phy_is_fixed_link(mac->of_node))
267 phy_print_status(dev->phydev);
268}
269
270static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
271 struct device_node *phy_node)
272{
273 struct phy_device *phydev;
274 int phy_mode;
275
276 phy_mode = of_get_phy_mode(phy_node);
277 if (phy_mode < 0) {
278 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
279 return -EINVAL;
280 }
281
282 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
283 mtk_phy_link_adjust, 0, phy_mode);
284 if (!phydev) {
285 dev_err(eth->dev, "could not connect to PHY\n");
286 return -ENODEV;
287 }
288
289 dev_info(eth->dev,
290 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
291 mac->id, phydev_name(phydev), phydev->phy_id,
292 phydev->drv->name);
293
294 return 0;
295}
296
297static int mtk_phy_connect(struct net_device *dev)
298{
299 struct mtk_mac *mac = netdev_priv(dev);
300 struct mtk_eth *eth;
301 struct device_node *np;
302 u32 val;
303
304 eth = mac->hw;
305 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
306 if (!np && of_phy_is_fixed_link(mac->of_node))
307 if (!of_phy_register_fixed_link(mac->of_node))
308 np = of_node_get(mac->of_node);
309 if (!np)
310 return -ENODEV;
311
312 mac->ge_mode = 0;
313 switch (of_get_phy_mode(np)) {
314 case PHY_INTERFACE_MODE_TRGMII:
315 mac->trgmii = true;
316 case PHY_INTERFACE_MODE_RGMII_TXID:
317 case PHY_INTERFACE_MODE_RGMII_RXID:
318 case PHY_INTERFACE_MODE_RGMII_ID:
319 case PHY_INTERFACE_MODE_RGMII:
320 break;
321 case PHY_INTERFACE_MODE_SGMII:
322 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII))
323 mtk_gmac_sgmii_hw_setup(eth, mac->id);
324 break;
325 case PHY_INTERFACE_MODE_MII:
326 mac->ge_mode = 1;
327 break;
328 case PHY_INTERFACE_MODE_REVMII:
329 mac->ge_mode = 2;
330 break;
331 case PHY_INTERFACE_MODE_RMII:
332 if (!mac->id)
333 goto err_phy;
334 mac->ge_mode = 3;
335 break;
336 default:
337 goto err_phy;
338 }
339
340 /* put the gmac into the right mode */
341 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
342 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
343 val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
344 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
345
346 /* couple phydev to net_device */
347 if (mtk_phy_connect_node(eth, mac, np))
348 goto err_phy;
349
350 dev->phydev->autoneg = AUTONEG_ENABLE;
351 dev->phydev->speed = 0;
352 dev->phydev->duplex = 0;
353
354 phy_set_max_speed(dev->phydev, SPEED_1000);
355 phy_support_asym_pause(dev->phydev);
356 dev->phydev->advertising = dev->phydev->supported |
357 ADVERTISED_Autoneg;
358 phy_start_aneg(dev->phydev);
359
360 of_node_put(np);
361
362 return 0;
363
364err_phy:
365 if (of_phy_is_fixed_link(mac->of_node))
366 of_phy_deregister_fixed_link(mac->of_node);
367 of_node_put(np);
368 dev_err(eth->dev, "%s: invalid phy\n", __func__);
369 return -EINVAL;
370}
371
372static int mtk_mdio_init(struct mtk_eth *eth)
373{
374 struct device_node *mii_np;
375 int ret;
376
377 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
378 if (!mii_np) {
379 dev_err(eth->dev, "no %s child node found", "mdio-bus");
380 return -ENODEV;
381 }
382
383 if (!of_device_is_available(mii_np)) {
384 ret = -ENODEV;
385 goto err_put_node;
386 }
387
388 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
389 if (!eth->mii_bus) {
390 ret = -ENOMEM;
391 goto err_put_node;
392 }
393
394 eth->mii_bus->name = "mdio";
395 eth->mii_bus->read = mtk_mdio_read;
396 eth->mii_bus->write = mtk_mdio_write;
397 eth->mii_bus->priv = eth;
398 eth->mii_bus->parent = eth->dev;
399
400 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
401 ret = of_mdiobus_register(eth->mii_bus, mii_np);
402
403err_put_node:
404 of_node_put(mii_np);
405 return ret;
406}
407
408static void mtk_mdio_cleanup(struct mtk_eth *eth)
409{
410 if (!eth->mii_bus)
411 return;
412
413 mdiobus_unregister(eth->mii_bus);
414}
415
416static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
417{
418 unsigned long flags;
419 u32 val;
420
421 spin_lock_irqsave(ð->tx_irq_lock, flags);
422 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
423 mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
424 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
425}
426
427static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
428{
429 unsigned long flags;
430 u32 val;
431
432 spin_lock_irqsave(ð->tx_irq_lock, flags);
433 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
434 mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
435 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
436}
437
438static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
439{
440 unsigned long flags;
441 u32 val;
442
443 spin_lock_irqsave(ð->rx_irq_lock, flags);
444 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
445 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
446 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
447}
448
449static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
450{
451 unsigned long flags;
452 u32 val;
453
454 spin_lock_irqsave(ð->rx_irq_lock, flags);
455 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
456 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
457 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
458}
459
460static int mtk_set_mac_address(struct net_device *dev, void *p)
461{
462 int ret = eth_mac_addr(dev, p);
463 struct mtk_mac *mac = netdev_priv(dev);
464 const char *macaddr = dev->dev_addr;
465
466 if (ret)
467 return ret;
468
469 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
470 return -EBUSY;
471
472 spin_lock_bh(&mac->hw->page_lock);
473 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
474 MTK_GDMA_MAC_ADRH(mac->id));
475 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
476 (macaddr[4] << 8) | macaddr[5],
477 MTK_GDMA_MAC_ADRL(mac->id));
478 spin_unlock_bh(&mac->hw->page_lock);
479
480 return 0;
481}
482
483void mtk_stats_update_mac(struct mtk_mac *mac)
484{
485 struct mtk_hw_stats *hw_stats = mac->hw_stats;
486 unsigned int base = MTK_GDM1_TX_GBCNT;
487 u64 stats;
488
489 base += hw_stats->reg_offset;
490
491 u64_stats_update_begin(&hw_stats->syncp);
492
493 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
494 stats = mtk_r32(mac->hw, base + 0x04);
495 if (stats)
496 hw_stats->rx_bytes += (stats << 32);
497 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
498 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
499 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
500 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
501 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
502 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
503 hw_stats->rx_flow_control_packets +=
504 mtk_r32(mac->hw, base + 0x24);
505 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
506 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
507 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
508 stats = mtk_r32(mac->hw, base + 0x34);
509 if (stats)
510 hw_stats->tx_bytes += (stats << 32);
511 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
512 u64_stats_update_end(&hw_stats->syncp);
513}
514
515static void mtk_stats_update(struct mtk_eth *eth)
516{
517 int i;
518
519 for (i = 0; i < MTK_MAC_COUNT; i++) {
520 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
521 continue;
522 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
523 mtk_stats_update_mac(eth->mac[i]);
524 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
525 }
526 }
527}
528
529static void mtk_get_stats64(struct net_device *dev,
530 struct rtnl_link_stats64 *storage)
531{
532 struct mtk_mac *mac = netdev_priv(dev);
533 struct mtk_hw_stats *hw_stats = mac->hw_stats;
534 unsigned int start;
535
536 if (netif_running(dev) && netif_device_present(dev)) {
537 if (spin_trylock_bh(&hw_stats->stats_lock)) {
538 mtk_stats_update_mac(mac);
539 spin_unlock_bh(&hw_stats->stats_lock);
540 }
541 }
542
543 do {
544 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
545 storage->rx_packets = hw_stats->rx_packets;
546 storage->tx_packets = hw_stats->tx_packets;
547 storage->rx_bytes = hw_stats->rx_bytes;
548 storage->tx_bytes = hw_stats->tx_bytes;
549 storage->collisions = hw_stats->tx_collisions;
550 storage->rx_length_errors = hw_stats->rx_short_errors +
551 hw_stats->rx_long_errors;
552 storage->rx_over_errors = hw_stats->rx_overflow;
553 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
554 storage->rx_errors = hw_stats->rx_checksum_errors;
555 storage->tx_aborted_errors = hw_stats->tx_skip;
556 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
557
558 storage->tx_errors = dev->stats.tx_errors;
559 storage->rx_dropped = dev->stats.rx_dropped;
560 storage->tx_dropped = dev->stats.tx_dropped;
561}
562
563static inline int mtk_max_frag_size(int mtu)
564{
565 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
566 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
567 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
568
569 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
570 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
571}
572
573static inline int mtk_max_buf_size(int frag_size)
574{
575 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
576 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
577
578 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
579
580 return buf_size;
581}
582
583static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
584 struct mtk_rx_dma *dma_rxd)
585{
586 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
587 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
588 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
589 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
590}
591
592/* the qdma core needs scratch memory to be setup */
593static int mtk_init_fq_dma(struct mtk_eth *eth)
594{
595 dma_addr_t phy_ring_tail;
596 int cnt = MTK_DMA_SIZE;
597 dma_addr_t dma_addr;
598 int i;
599
600 eth->scratch_ring = dma_zalloc_coherent(eth->dev,
601 cnt * sizeof(struct mtk_tx_dma),
602 ð->phy_scratch_ring,
603 GFP_ATOMIC);
604 if (unlikely(!eth->scratch_ring))
605 return -ENOMEM;
606
607 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
608 GFP_KERNEL);
609 if (unlikely(!eth->scratch_head))
610 return -ENOMEM;
611
612 dma_addr = dma_map_single(eth->dev,
613 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
614 DMA_FROM_DEVICE);
615 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
616 return -ENOMEM;
617
618 phy_ring_tail = eth->phy_scratch_ring +
619 (sizeof(struct mtk_tx_dma) * (cnt - 1));
620
621 for (i = 0; i < cnt; i++) {
622 eth->scratch_ring[i].txd1 =
623 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
624 if (i < cnt - 1)
625 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
626 ((i + 1) * sizeof(struct mtk_tx_dma)));
627 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
628 }
629
630 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
631 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
632 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
633 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
634
635 return 0;
636}
637
638static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
639{
640 void *ret = ring->dma;
641
642 return ret + (desc - ring->phys);
643}
644
645static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
646 struct mtk_tx_dma *txd)
647{
648 int idx = txd - ring->dma;
649
650 return &ring->buf[idx];
651}
652
653static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
654{
655 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
656 dma_unmap_single(eth->dev,
657 dma_unmap_addr(tx_buf, dma_addr0),
658 dma_unmap_len(tx_buf, dma_len0),
659 DMA_TO_DEVICE);
660 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
661 dma_unmap_page(eth->dev,
662 dma_unmap_addr(tx_buf, dma_addr0),
663 dma_unmap_len(tx_buf, dma_len0),
664 DMA_TO_DEVICE);
665 }
666 tx_buf->flags = 0;
667 if (tx_buf->skb &&
668 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
669 dev_kfree_skb_any(tx_buf->skb);
670 tx_buf->skb = NULL;
671}
672
673static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
674 int tx_num, struct mtk_tx_ring *ring, bool gso)
675{
676 struct mtk_mac *mac = netdev_priv(dev);
677 struct mtk_eth *eth = mac->hw;
678 struct mtk_tx_dma *itxd, *txd;
679 struct mtk_tx_buf *itx_buf, *tx_buf;
680 dma_addr_t mapped_addr;
681 unsigned int nr_frags;
682 int i, n_desc = 1;
683 u32 txd4 = 0, fport;
684
685 itxd = ring->next_free;
686 if (itxd == ring->last_free)
687 return -ENOMEM;
688
689 /* set the forward port */
690 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
691 txd4 |= fport;
692
693 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
694 memset(itx_buf, 0, sizeof(*itx_buf));
695
696 if (gso)
697 txd4 |= TX_DMA_TSO;
698
699 /* TX Checksum offload */
700 if (skb->ip_summed == CHECKSUM_PARTIAL)
701 txd4 |= TX_DMA_CHKSUM;
702
703 /* VLAN header offload */
704 if (skb_vlan_tag_present(skb))
705 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
706
707 mapped_addr = dma_map_single(eth->dev, skb->data,
708 skb_headlen(skb), DMA_TO_DEVICE);
709 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
710 return -ENOMEM;
711
712 WRITE_ONCE(itxd->txd1, mapped_addr);
713 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
714 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
715 MTK_TX_FLAGS_FPORT1;
716 dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
717 dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
718
719 /* TX SG offload */
720 txd = itxd;
721 nr_frags = skb_shinfo(skb)->nr_frags;
722 for (i = 0; i < nr_frags; i++) {
723 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
724 unsigned int offset = 0;
725 int frag_size = skb_frag_size(frag);
726
727 while (frag_size) {
728 bool last_frag = false;
729 unsigned int frag_map_size;
730
731 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
732 if (txd == ring->last_free)
733 goto err_dma;
734
735 n_desc++;
736 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
737 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
738 frag_map_size,
739 DMA_TO_DEVICE);
740 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
741 goto err_dma;
742
743 if (i == nr_frags - 1 &&
744 (frag_size - frag_map_size) == 0)
745 last_frag = true;
746
747 WRITE_ONCE(txd->txd1, mapped_addr);
748 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
749 TX_DMA_PLEN0(frag_map_size) |
750 last_frag * TX_DMA_LS0));
751 WRITE_ONCE(txd->txd4, fport);
752
753 tx_buf = mtk_desc_to_tx_buf(ring, txd);
754 memset(tx_buf, 0, sizeof(*tx_buf));
755 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
756 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
757 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
758 MTK_TX_FLAGS_FPORT1;
759
760 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
761 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
762 frag_size -= frag_map_size;
763 offset += frag_map_size;
764 }
765 }
766
767 /* store skb to cleanup */
768 itx_buf->skb = skb;
769
770 WRITE_ONCE(itxd->txd4, txd4);
771 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
772 (!nr_frags * TX_DMA_LS0)));
773
774 netdev_sent_queue(dev, skb->len);
775 skb_tx_timestamp(skb);
776
777 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
778 atomic_sub(n_desc, &ring->free_count);
779
780 /* make sure that all changes to the dma ring are flushed before we
781 * continue
782 */
783 wmb();
784
785 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
786 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
787
788 return 0;
789
790err_dma:
791 do {
792 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
793
794 /* unmap dma */
795 mtk_tx_unmap(eth, tx_buf);
796
797 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
798 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
799 } while (itxd != txd);
800
801 return -ENOMEM;
802}
803
804static inline int mtk_cal_txd_req(struct sk_buff *skb)
805{
806 int i, nfrags;
807 struct skb_frag_struct *frag;
808
809 nfrags = 1;
810 if (skb_is_gso(skb)) {
811 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
812 frag = &skb_shinfo(skb)->frags[i];
813 nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
814 }
815 } else {
816 nfrags += skb_shinfo(skb)->nr_frags;
817 }
818
819 return nfrags;
820}
821
822static int mtk_queue_stopped(struct mtk_eth *eth)
823{
824 int i;
825
826 for (i = 0; i < MTK_MAC_COUNT; i++) {
827 if (!eth->netdev[i])
828 continue;
829 if (netif_queue_stopped(eth->netdev[i]))
830 return 1;
831 }
832
833 return 0;
834}
835
836static void mtk_wake_queue(struct mtk_eth *eth)
837{
838 int i;
839
840 for (i = 0; i < MTK_MAC_COUNT; i++) {
841 if (!eth->netdev[i])
842 continue;
843 netif_wake_queue(eth->netdev[i]);
844 }
845}
846
847static void mtk_stop_queue(struct mtk_eth *eth)
848{
849 int i;
850
851 for (i = 0; i < MTK_MAC_COUNT; i++) {
852 if (!eth->netdev[i])
853 continue;
854 netif_stop_queue(eth->netdev[i]);
855 }
856}
857
858static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
859{
860 struct mtk_mac *mac = netdev_priv(dev);
861 struct mtk_eth *eth = mac->hw;
862 struct mtk_tx_ring *ring = ð->tx_ring;
863 struct net_device_stats *stats = &dev->stats;
864 bool gso = false;
865 int tx_num;
866
867 /* normally we can rely on the stack not calling this more than once,
868 * however we have 2 queues running on the same ring so we need to lock
869 * the ring access
870 */
871 spin_lock(ð->page_lock);
872
873 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
874 goto drop;
875
876 tx_num = mtk_cal_txd_req(skb);
877 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
878 mtk_stop_queue(eth);
879 netif_err(eth, tx_queued, dev,
880 "Tx Ring full when queue awake!\n");
881 spin_unlock(ð->page_lock);
882 return NETDEV_TX_BUSY;
883 }
884
885 /* TSO: fill MSS info in tcp checksum field */
886 if (skb_is_gso(skb)) {
887 if (skb_cow_head(skb, 0)) {
888 netif_warn(eth, tx_err, dev,
889 "GSO expand head fail.\n");
890 goto drop;
891 }
892
893 if (skb_shinfo(skb)->gso_type &
894 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
895 gso = true;
896 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
897 }
898 }
899
900 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
901 goto drop;
902
903 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
904 mtk_stop_queue(eth);
905
906 spin_unlock(ð->page_lock);
907
908 return NETDEV_TX_OK;
909
910drop:
911 spin_unlock(ð->page_lock);
912 stats->tx_dropped++;
913 dev_kfree_skb_any(skb);
914 return NETDEV_TX_OK;
915}
916
917static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
918{
919 int i;
920 struct mtk_rx_ring *ring;
921 int idx;
922
923 if (!eth->hwlro)
924 return ð->rx_ring[0];
925
926 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
927 ring = ð->rx_ring[i];
928 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
929 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
930 ring->calc_idx_update = true;
931 return ring;
932 }
933 }
934
935 return NULL;
936}
937
938static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
939{
940 struct mtk_rx_ring *ring;
941 int i;
942
943 if (!eth->hwlro) {
944 ring = ð->rx_ring[0];
945 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
946 } else {
947 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
948 ring = ð->rx_ring[i];
949 if (ring->calc_idx_update) {
950 ring->calc_idx_update = false;
951 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
952 }
953 }
954 }
955}
956
957static int mtk_poll_rx(struct napi_struct *napi, int budget,
958 struct mtk_eth *eth)
959{
960 struct mtk_rx_ring *ring;
961 int idx;
962 struct sk_buff *skb;
963 u8 *data, *new_data;
964 struct mtk_rx_dma *rxd, trxd;
965 int done = 0;
966
967 while (done < budget) {
968 struct net_device *netdev;
969 unsigned int pktlen;
970 dma_addr_t dma_addr;
971 int mac = 0;
972
973 ring = mtk_get_rx_ring(eth);
974 if (unlikely(!ring))
975 goto rx_done;
976
977 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
978 rxd = &ring->dma[idx];
979 data = ring->data[idx];
980
981 mtk_rx_get_desc(&trxd, rxd);
982 if (!(trxd.rxd2 & RX_DMA_DONE))
983 break;
984
985 /* find out which mac the packet come from. values start at 1 */
986 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
987 RX_DMA_FPORT_MASK;
988 mac--;
989
990 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
991 !eth->netdev[mac]))
992 goto release_desc;
993
994 netdev = eth->netdev[mac];
995
996 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
997 goto release_desc;
998
999 /* alloc new buffer */
1000 new_data = napi_alloc_frag(ring->frag_size);
1001 if (unlikely(!new_data)) {
1002 netdev->stats.rx_dropped++;
1003 goto release_desc;
1004 }
1005 dma_addr = dma_map_single(eth->dev,
1006 new_data + NET_SKB_PAD,
1007 ring->buf_size,
1008 DMA_FROM_DEVICE);
1009 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1010 skb_free_frag(new_data);
1011 netdev->stats.rx_dropped++;
1012 goto release_desc;
1013 }
1014
1015 /* receive data */
1016 skb = build_skb(data, ring->frag_size);
1017 if (unlikely(!skb)) {
1018 skb_free_frag(new_data);
1019 netdev->stats.rx_dropped++;
1020 goto release_desc;
1021 }
1022 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1023
1024 dma_unmap_single(eth->dev, trxd.rxd1,
1025 ring->buf_size, DMA_FROM_DEVICE);
1026 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1027 skb->dev = netdev;
1028 skb_put(skb, pktlen);
1029 if (trxd.rxd4 & RX_DMA_L4_VALID)
1030 skb->ip_summed = CHECKSUM_UNNECESSARY;
1031 else
1032 skb_checksum_none_assert(skb);
1033 skb->protocol = eth_type_trans(skb, netdev);
1034
1035 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1036 RX_DMA_VID(trxd.rxd3))
1037 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1038 RX_DMA_VID(trxd.rxd3));
1039 skb_record_rx_queue(skb, 0);
1040 napi_gro_receive(napi, skb);
1041
1042 ring->data[idx] = new_data;
1043 rxd->rxd1 = (unsigned int)dma_addr;
1044
1045release_desc:
1046 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1047
1048 ring->calc_idx = idx;
1049
1050 done++;
1051 }
1052
1053rx_done:
1054 if (done) {
1055 /* make sure that all changes to the dma ring are flushed before
1056 * we continue
1057 */
1058 wmb();
1059 mtk_update_rx_cpu_idx(eth);
1060 }
1061
1062 return done;
1063}
1064
1065static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1066{
1067 struct mtk_tx_ring *ring = ð->tx_ring;
1068 struct mtk_tx_dma *desc;
1069 struct sk_buff *skb;
1070 struct mtk_tx_buf *tx_buf;
1071 unsigned int done[MTK_MAX_DEVS];
1072 unsigned int bytes[MTK_MAX_DEVS];
1073 u32 cpu, dma;
1074 int total = 0, i;
1075
1076 memset(done, 0, sizeof(done));
1077 memset(bytes, 0, sizeof(bytes));
1078
1079 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1080 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1081
1082 desc = mtk_qdma_phys_to_virt(ring, cpu);
1083
1084 while ((cpu != dma) && budget) {
1085 u32 next_cpu = desc->txd2;
1086 int mac = 0;
1087
1088 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1089 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1090 break;
1091
1092 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1093 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1094 mac = 1;
1095
1096 skb = tx_buf->skb;
1097 if (!skb)
1098 break;
1099
1100 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1101 bytes[mac] += skb->len;
1102 done[mac]++;
1103 budget--;
1104 }
1105 mtk_tx_unmap(eth, tx_buf);
1106
1107 ring->last_free = desc;
1108 atomic_inc(&ring->free_count);
1109
1110 cpu = next_cpu;
1111 }
1112
1113 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1114
1115 for (i = 0; i < MTK_MAC_COUNT; i++) {
1116 if (!eth->netdev[i] || !done[i])
1117 continue;
1118 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1119 total += done[i];
1120 }
1121
1122 if (mtk_queue_stopped(eth) &&
1123 (atomic_read(&ring->free_count) > ring->thresh))
1124 mtk_wake_queue(eth);
1125
1126 return total;
1127}
1128
1129static void mtk_handle_status_irq(struct mtk_eth *eth)
1130{
1131 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1132
1133 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1134 mtk_stats_update(eth);
1135 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1136 MTK_INT_STATUS2);
1137 }
1138}
1139
1140static int mtk_napi_tx(struct napi_struct *napi, int budget)
1141{
1142 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1143 u32 status, mask;
1144 int tx_done = 0;
1145
1146 mtk_handle_status_irq(eth);
1147 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
1148 tx_done = mtk_poll_tx(eth, budget);
1149
1150 if (unlikely(netif_msg_intr(eth))) {
1151 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1152 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
1153 dev_info(eth->dev,
1154 "done tx %d, intr 0x%08x/0x%x\n",
1155 tx_done, status, mask);
1156 }
1157
1158 if (tx_done == budget)
1159 return budget;
1160
1161 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1162 if (status & MTK_TX_DONE_INT)
1163 return budget;
1164
1165 napi_complete(napi);
1166 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1167
1168 return tx_done;
1169}
1170
1171static int mtk_napi_rx(struct napi_struct *napi, int budget)
1172{
1173 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1174 u32 status, mask;
1175 int rx_done = 0;
1176 int remain_budget = budget;
1177
1178 mtk_handle_status_irq(eth);
1179
1180poll_again:
1181 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1182 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1183
1184 if (unlikely(netif_msg_intr(eth))) {
1185 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1186 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1187 dev_info(eth->dev,
1188 "done rx %d, intr 0x%08x/0x%x\n",
1189 rx_done, status, mask);
1190 }
1191 if (rx_done == remain_budget)
1192 return budget;
1193
1194 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1195 if (status & MTK_RX_DONE_INT) {
1196 remain_budget -= rx_done;
1197 goto poll_again;
1198 }
1199 napi_complete(napi);
1200 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1201
1202 return rx_done + budget - remain_budget;
1203}
1204
1205static int mtk_tx_alloc(struct mtk_eth *eth)
1206{
1207 struct mtk_tx_ring *ring = ð->tx_ring;
1208 int i, sz = sizeof(*ring->dma);
1209
1210 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1211 GFP_KERNEL);
1212 if (!ring->buf)
1213 goto no_tx_mem;
1214
1215 ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1216 &ring->phys, GFP_ATOMIC);
1217 if (!ring->dma)
1218 goto no_tx_mem;
1219
1220 for (i = 0; i < MTK_DMA_SIZE; i++) {
1221 int next = (i + 1) % MTK_DMA_SIZE;
1222 u32 next_ptr = ring->phys + next * sz;
1223
1224 ring->dma[i].txd2 = next_ptr;
1225 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1226 }
1227
1228 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1229 ring->next_free = &ring->dma[0];
1230 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1231 ring->thresh = MAX_SKB_FRAGS;
1232
1233 /* make sure that all changes to the dma ring are flushed before we
1234 * continue
1235 */
1236 wmb();
1237
1238 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1239 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1240 mtk_w32(eth,
1241 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1242 MTK_QTX_CRX_PTR);
1243 mtk_w32(eth,
1244 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1245 MTK_QTX_DRX_PTR);
1246 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1247
1248 return 0;
1249
1250no_tx_mem:
1251 return -ENOMEM;
1252}
1253
1254static void mtk_tx_clean(struct mtk_eth *eth)
1255{
1256 struct mtk_tx_ring *ring = ð->tx_ring;
1257 int i;
1258
1259 if (ring->buf) {
1260 for (i = 0; i < MTK_DMA_SIZE; i++)
1261 mtk_tx_unmap(eth, &ring->buf[i]);
1262 kfree(ring->buf);
1263 ring->buf = NULL;
1264 }
1265
1266 if (ring->dma) {
1267 dma_free_coherent(eth->dev,
1268 MTK_DMA_SIZE * sizeof(*ring->dma),
1269 ring->dma,
1270 ring->phys);
1271 ring->dma = NULL;
1272 }
1273}
1274
1275static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1276{
1277 struct mtk_rx_ring *ring;
1278 int rx_data_len, rx_dma_size;
1279 int i;
1280 u32 offset = 0;
1281
1282 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1283 if (ring_no)
1284 return -EINVAL;
1285 ring = ð->rx_ring_qdma;
1286 offset = 0x1000;
1287 } else {
1288 ring = ð->rx_ring[ring_no];
1289 }
1290
1291 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1292 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1293 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1294 } else {
1295 rx_data_len = ETH_DATA_LEN;
1296 rx_dma_size = MTK_DMA_SIZE;
1297 }
1298
1299 ring->frag_size = mtk_max_frag_size(rx_data_len);
1300 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1301 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1302 GFP_KERNEL);
1303 if (!ring->data)
1304 return -ENOMEM;
1305
1306 for (i = 0; i < rx_dma_size; i++) {
1307 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1308 if (!ring->data[i])
1309 return -ENOMEM;
1310 }
1311
1312 ring->dma = dma_zalloc_coherent(eth->dev,
1313 rx_dma_size * sizeof(*ring->dma),
1314 &ring->phys, GFP_ATOMIC);
1315 if (!ring->dma)
1316 return -ENOMEM;
1317
1318 for (i = 0; i < rx_dma_size; i++) {
1319 dma_addr_t dma_addr = dma_map_single(eth->dev,
1320 ring->data[i] + NET_SKB_PAD,
1321 ring->buf_size,
1322 DMA_FROM_DEVICE);
1323 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1324 return -ENOMEM;
1325 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1326
1327 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1328 }
1329 ring->dma_size = rx_dma_size;
1330 ring->calc_idx_update = false;
1331 ring->calc_idx = rx_dma_size - 1;
1332 ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1333 /* make sure that all changes to the dma ring are flushed before we
1334 * continue
1335 */
1336 wmb();
1337
1338 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1339 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1340 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1341 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1342
1343 return 0;
1344}
1345
1346static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1347{
1348 int i;
1349
1350 if (ring->data && ring->dma) {
1351 for (i = 0; i < ring->dma_size; i++) {
1352 if (!ring->data[i])
1353 continue;
1354 if (!ring->dma[i].rxd1)
1355 continue;
1356 dma_unmap_single(eth->dev,
1357 ring->dma[i].rxd1,
1358 ring->buf_size,
1359 DMA_FROM_DEVICE);
1360 skb_free_frag(ring->data[i]);
1361 }
1362 kfree(ring->data);
1363 ring->data = NULL;
1364 }
1365
1366 if (ring->dma) {
1367 dma_free_coherent(eth->dev,
1368 ring->dma_size * sizeof(*ring->dma),
1369 ring->dma,
1370 ring->phys);
1371 ring->dma = NULL;
1372 }
1373}
1374
1375static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1376{
1377 int i;
1378 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1379 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1380
1381 /* set LRO rings to auto-learn modes */
1382 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1383
1384 /* validate LRO ring */
1385 ring_ctrl_dw2 |= MTK_RING_VLD;
1386
1387 /* set AGE timer (unit: 20us) */
1388 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1389 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1390
1391 /* set max AGG timer (unit: 20us) */
1392 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1393
1394 /* set max LRO AGG count */
1395 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1396 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1397
1398 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1399 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1400 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1401 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1402 }
1403
1404 /* IPv4 checksum update enable */
1405 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1406
1407 /* switch priority comparison to packet count mode */
1408 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1409
1410 /* bandwidth threshold setting */
1411 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1412
1413 /* auto-learn score delta setting */
1414 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1415
1416 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1417 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1418 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1419
1420 /* set HW LRO mode & the max aggregation count for rx packets */
1421 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1422
1423 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1424 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1425
1426 /* enable HW LRO */
1427 lro_ctrl_dw0 |= MTK_LRO_EN;
1428
1429 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1430 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1431
1432 return 0;
1433}
1434
1435static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1436{
1437 int i;
1438 u32 val;
1439
1440 /* relinquish lro rings, flush aggregated packets */
1441 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1442
1443 /* wait for relinquishments done */
1444 for (i = 0; i < 10; i++) {
1445 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1446 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1447 msleep(20);
1448 continue;
1449 }
1450 break;
1451 }
1452
1453 /* invalidate lro rings */
1454 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1455 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1456
1457 /* disable HW LRO */
1458 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1459}
1460
1461static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1462{
1463 u32 reg_val;
1464
1465 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1466
1467 /* invalidate the IP setting */
1468 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1469
1470 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1471
1472 /* validate the IP setting */
1473 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1474}
1475
1476static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1477{
1478 u32 reg_val;
1479
1480 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1481
1482 /* invalidate the IP setting */
1483 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1484
1485 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1486}
1487
1488static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1489{
1490 int cnt = 0;
1491 int i;
1492
1493 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1494 if (mac->hwlro_ip[i])
1495 cnt++;
1496 }
1497
1498 return cnt;
1499}
1500
1501static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1502 struct ethtool_rxnfc *cmd)
1503{
1504 struct ethtool_rx_flow_spec *fsp =
1505 (struct ethtool_rx_flow_spec *)&cmd->fs;
1506 struct mtk_mac *mac = netdev_priv(dev);
1507 struct mtk_eth *eth = mac->hw;
1508 int hwlro_idx;
1509
1510 if ((fsp->flow_type != TCP_V4_FLOW) ||
1511 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1512 (fsp->location > 1))
1513 return -EINVAL;
1514
1515 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1516 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1517
1518 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1519
1520 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1521
1522 return 0;
1523}
1524
1525static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1526 struct ethtool_rxnfc *cmd)
1527{
1528 struct ethtool_rx_flow_spec *fsp =
1529 (struct ethtool_rx_flow_spec *)&cmd->fs;
1530 struct mtk_mac *mac = netdev_priv(dev);
1531 struct mtk_eth *eth = mac->hw;
1532 int hwlro_idx;
1533
1534 if (fsp->location > 1)
1535 return -EINVAL;
1536
1537 mac->hwlro_ip[fsp->location] = 0;
1538 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1539
1540 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1541
1542 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1543
1544 return 0;
1545}
1546
1547static void mtk_hwlro_netdev_disable(struct net_device *dev)
1548{
1549 struct mtk_mac *mac = netdev_priv(dev);
1550 struct mtk_eth *eth = mac->hw;
1551 int i, hwlro_idx;
1552
1553 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1554 mac->hwlro_ip[i] = 0;
1555 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1556
1557 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1558 }
1559
1560 mac->hwlro_ip_cnt = 0;
1561}
1562
1563static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1564 struct ethtool_rxnfc *cmd)
1565{
1566 struct mtk_mac *mac = netdev_priv(dev);
1567 struct ethtool_rx_flow_spec *fsp =
1568 (struct ethtool_rx_flow_spec *)&cmd->fs;
1569
1570 /* only tcp dst ipv4 is meaningful, others are meaningless */
1571 fsp->flow_type = TCP_V4_FLOW;
1572 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1573 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1574
1575 fsp->h_u.tcp_ip4_spec.ip4src = 0;
1576 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1577 fsp->h_u.tcp_ip4_spec.psrc = 0;
1578 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1579 fsp->h_u.tcp_ip4_spec.pdst = 0;
1580 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1581 fsp->h_u.tcp_ip4_spec.tos = 0;
1582 fsp->m_u.tcp_ip4_spec.tos = 0xff;
1583
1584 return 0;
1585}
1586
1587static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1588 struct ethtool_rxnfc *cmd,
1589 u32 *rule_locs)
1590{
1591 struct mtk_mac *mac = netdev_priv(dev);
1592 int cnt = 0;
1593 int i;
1594
1595 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1596 if (mac->hwlro_ip[i]) {
1597 rule_locs[cnt] = i;
1598 cnt++;
1599 }
1600 }
1601
1602 cmd->rule_cnt = cnt;
1603
1604 return 0;
1605}
1606
1607static netdev_features_t mtk_fix_features(struct net_device *dev,
1608 netdev_features_t features)
1609{
1610 if (!(features & NETIF_F_LRO)) {
1611 struct mtk_mac *mac = netdev_priv(dev);
1612 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1613
1614 if (ip_cnt) {
1615 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1616
1617 features |= NETIF_F_LRO;
1618 }
1619 }
1620
1621 return features;
1622}
1623
1624static int mtk_set_features(struct net_device *dev, netdev_features_t features)
1625{
1626 int err = 0;
1627
1628 if (!((dev->features ^ features) & NETIF_F_LRO))
1629 return 0;
1630
1631 if (!(features & NETIF_F_LRO))
1632 mtk_hwlro_netdev_disable(dev);
1633
1634 return err;
1635}
1636
1637/* wait for DMA to finish whatever it is doing before we start using it again */
1638static int mtk_dma_busy_wait(struct mtk_eth *eth)
1639{
1640 unsigned long t_start = jiffies;
1641
1642 while (1) {
1643 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1644 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1645 return 0;
1646 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1647 break;
1648 }
1649
1650 dev_err(eth->dev, "DMA init timeout\n");
1651 return -1;
1652}
1653
1654static int mtk_dma_init(struct mtk_eth *eth)
1655{
1656 int err;
1657 u32 i;
1658
1659 if (mtk_dma_busy_wait(eth))
1660 return -EBUSY;
1661
1662 /* QDMA needs scratch memory for internal reordering of the
1663 * descriptors
1664 */
1665 err = mtk_init_fq_dma(eth);
1666 if (err)
1667 return err;
1668
1669 err = mtk_tx_alloc(eth);
1670 if (err)
1671 return err;
1672
1673 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
1674 if (err)
1675 return err;
1676
1677 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
1678 if (err)
1679 return err;
1680
1681 if (eth->hwlro) {
1682 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1683 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
1684 if (err)
1685 return err;
1686 }
1687 err = mtk_hwlro_rx_init(eth);
1688 if (err)
1689 return err;
1690 }
1691
1692 /* Enable random early drop and set drop threshold automatically */
1693 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1694 MTK_QDMA_FC_THRES);
1695 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1696
1697 return 0;
1698}
1699
1700static void mtk_dma_free(struct mtk_eth *eth)
1701{
1702 int i;
1703
1704 for (i = 0; i < MTK_MAC_COUNT; i++)
1705 if (eth->netdev[i])
1706 netdev_reset_queue(eth->netdev[i]);
1707 if (eth->scratch_ring) {
1708 dma_free_coherent(eth->dev,
1709 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
1710 eth->scratch_ring,
1711 eth->phy_scratch_ring);
1712 eth->scratch_ring = NULL;
1713 eth->phy_scratch_ring = 0;
1714 }
1715 mtk_tx_clean(eth);
1716 mtk_rx_clean(eth, ð->rx_ring[0]);
1717 mtk_rx_clean(eth, ð->rx_ring_qdma);
1718
1719 if (eth->hwlro) {
1720 mtk_hwlro_rx_uninit(eth);
1721 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1722 mtk_rx_clean(eth, ð->rx_ring[i]);
1723 }
1724
1725 kfree(eth->scratch_head);
1726}
1727
1728static void mtk_tx_timeout(struct net_device *dev)
1729{
1730 struct mtk_mac *mac = netdev_priv(dev);
1731 struct mtk_eth *eth = mac->hw;
1732
1733 eth->netdev[mac->id]->stats.tx_errors++;
1734 netif_err(eth, tx_err, dev,
1735 "transmit timed out\n");
1736 schedule_work(ð->pending_work);
1737}
1738
1739static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
1740{
1741 struct mtk_eth *eth = _eth;
1742
1743 if (likely(napi_schedule_prep(ð->rx_napi))) {
1744 __napi_schedule(ð->rx_napi);
1745 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1746 }
1747
1748 return IRQ_HANDLED;
1749}
1750
1751static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
1752{
1753 struct mtk_eth *eth = _eth;
1754
1755 if (likely(napi_schedule_prep(ð->tx_napi))) {
1756 __napi_schedule(ð->tx_napi);
1757 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1758 }
1759
1760 return IRQ_HANDLED;
1761}
1762
1763#ifdef CONFIG_NET_POLL_CONTROLLER
1764static void mtk_poll_controller(struct net_device *dev)
1765{
1766 struct mtk_mac *mac = netdev_priv(dev);
1767 struct mtk_eth *eth = mac->hw;
1768
1769 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1770 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1771 mtk_handle_irq_rx(eth->irq[2], dev);
1772 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1773 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1774}
1775#endif
1776
1777static int mtk_start_dma(struct mtk_eth *eth)
1778{
1779 int err;
1780
1781 err = mtk_dma_init(eth);
1782 if (err) {
1783 mtk_dma_free(eth);
1784 return err;
1785 }
1786
1787 mtk_w32(eth,
1788 MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
1789 MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
1790 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
1791 MTK_RX_BT_32DWORDS,
1792 MTK_QDMA_GLO_CFG);
1793
1794 mtk_w32(eth,
1795 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
1796 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
1797 MTK_PDMA_GLO_CFG);
1798
1799 return 0;
1800}
1801
1802static int mtk_open(struct net_device *dev)
1803{
1804 struct mtk_mac *mac = netdev_priv(dev);
1805 struct mtk_eth *eth = mac->hw;
1806
1807 /* we run 2 netdevs on the same dma ring so we only bring it up once */
1808 if (!refcount_read(ð->dma_refcnt)) {
1809 int err = mtk_start_dma(eth);
1810
1811 if (err)
1812 return err;
1813
1814 napi_enable(ð->tx_napi);
1815 napi_enable(ð->rx_napi);
1816 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1817 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1818 refcount_set(ð->dma_refcnt, 1);
1819 }
1820 else
1821 refcount_inc(ð->dma_refcnt);
1822
1823 phy_start(dev->phydev);
1824 netif_start_queue(dev);
1825
1826 return 0;
1827}
1828
1829static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1830{
1831 u32 val;
1832 int i;
1833
1834 /* stop the dma engine */
1835 spin_lock_bh(ð->page_lock);
1836 val = mtk_r32(eth, glo_cfg);
1837 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1838 glo_cfg);
1839 spin_unlock_bh(ð->page_lock);
1840
1841 /* wait for dma stop */
1842 for (i = 0; i < 10; i++) {
1843 val = mtk_r32(eth, glo_cfg);
1844 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1845 msleep(20);
1846 continue;
1847 }
1848 break;
1849 }
1850}
1851
1852static int mtk_stop(struct net_device *dev)
1853{
1854 struct mtk_mac *mac = netdev_priv(dev);
1855 struct mtk_eth *eth = mac->hw;
1856
1857 netif_tx_disable(dev);
1858 phy_stop(dev->phydev);
1859
1860 /* only shutdown DMA if this is the last user */
1861 if (!refcount_dec_and_test(ð->dma_refcnt))
1862 return 0;
1863
1864 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1865 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1866 napi_disable(ð->tx_napi);
1867 napi_disable(ð->rx_napi);
1868
1869 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1870 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
1871
1872 mtk_dma_free(eth);
1873
1874 return 0;
1875}
1876
1877static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
1878{
1879 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1880 reset_bits,
1881 reset_bits);
1882
1883 usleep_range(1000, 1100);
1884 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1885 reset_bits,
1886 ~reset_bits);
1887 mdelay(10);
1888}
1889
1890static void mtk_clk_disable(struct mtk_eth *eth)
1891{
1892 int clk;
1893
1894 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
1895 clk_disable_unprepare(eth->clks[clk]);
1896}
1897
1898static int mtk_clk_enable(struct mtk_eth *eth)
1899{
1900 int clk, ret;
1901
1902 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
1903 ret = clk_prepare_enable(eth->clks[clk]);
1904 if (ret)
1905 goto err_disable_clks;
1906 }
1907
1908 return 0;
1909
1910err_disable_clks:
1911 while (--clk >= 0)
1912 clk_disable_unprepare(eth->clks[clk]);
1913
1914 return ret;
1915}
1916
1917static int mtk_hw_init(struct mtk_eth *eth)
1918{
1919 int i, val, ret;
1920
1921 if (test_and_set_bit(MTK_HW_INIT, ð->state))
1922 return 0;
1923
1924 pm_runtime_enable(eth->dev);
1925 pm_runtime_get_sync(eth->dev);
1926
1927 ret = mtk_clk_enable(eth);
1928 if (ret)
1929 goto err_disable_pm;
1930
1931 ethsys_reset(eth, RSTCTRL_FE);
1932 ethsys_reset(eth, RSTCTRL_PPE);
1933
1934 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
1935 for (i = 0; i < MTK_MAC_COUNT; i++) {
1936 if (!eth->mac[i])
1937 continue;
1938 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
1939 val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
1940 }
1941 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
1942
1943 if (eth->pctl) {
1944 /* Set GE2 driving and slew rate */
1945 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1946
1947 /* set GE2 TDSEL */
1948 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1949
1950 /* set GE2 TUNE */
1951 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1952 }
1953
1954 /* Set linkdown as the default for each GMAC. Its own MCR would be set
1955 * up with the more appropriate value when mtk_phy_link_adjust call is
1956 * being invoked.
1957 */
1958 for (i = 0; i < MTK_MAC_COUNT; i++)
1959 mtk_w32(eth, 0, MTK_MAC_MCR(i));
1960
1961 /* Indicates CDM to parse the MTK special tag from CPU
1962 * which also is working out for untag packets.
1963 */
1964 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
1965 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
1966
1967 /* Enable RX VLan Offloading */
1968 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1969
1970 /* enable interrupt delay for RX */
1971 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
1972
1973 /* disable delay and normal interrupt */
1974 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1975 mtk_tx_irq_disable(eth, ~0);
1976 mtk_rx_irq_disable(eth, ~0);
1977 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1978 mtk_w32(eth, 0, MTK_RST_GL);
1979
1980 /* FE int grouping */
1981 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
1982 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
1983 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
1984 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
1985 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
1986
1987 for (i = 0; i < 2; i++) {
1988 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1989
1990 /* setup the forward port to send frame to PDMA */
1991 val &= ~0xffff;
1992
1993 /* Enable RX checksum */
1994 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
1995
1996 /* setup the mac dma */
1997 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
1998 }
1999
2000 return 0;
2001
2002err_disable_pm:
2003 pm_runtime_put_sync(eth->dev);
2004 pm_runtime_disable(eth->dev);
2005
2006 return ret;
2007}
2008
2009static int mtk_hw_deinit(struct mtk_eth *eth)
2010{
2011 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
2012 return 0;
2013
2014 mtk_clk_disable(eth);
2015
2016 pm_runtime_put_sync(eth->dev);
2017 pm_runtime_disable(eth->dev);
2018
2019 return 0;
2020}
2021
2022static int __init mtk_init(struct net_device *dev)
2023{
2024 struct mtk_mac *mac = netdev_priv(dev);
2025 struct mtk_eth *eth = mac->hw;
2026 const char *mac_addr;
2027
2028 mac_addr = of_get_mac_address(mac->of_node);
2029 if (mac_addr)
2030 ether_addr_copy(dev->dev_addr, mac_addr);
2031
2032 /* If the mac address is invalid, use random mac address */
2033 if (!is_valid_ether_addr(dev->dev_addr)) {
2034 eth_hw_addr_random(dev);
2035 dev_err(eth->dev, "generated random MAC address %pM\n",
2036 dev->dev_addr);
2037 }
2038
2039 return mtk_phy_connect(dev);
2040}
2041
2042static void mtk_uninit(struct net_device *dev)
2043{
2044 struct mtk_mac *mac = netdev_priv(dev);
2045 struct mtk_eth *eth = mac->hw;
2046
2047 phy_disconnect(dev->phydev);
2048 if (of_phy_is_fixed_link(mac->of_node))
2049 of_phy_deregister_fixed_link(mac->of_node);
2050 mtk_tx_irq_disable(eth, ~0);
2051 mtk_rx_irq_disable(eth, ~0);
2052}
2053
2054static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2055{
2056 switch (cmd) {
2057 case SIOCGMIIPHY:
2058 case SIOCGMIIREG:
2059 case SIOCSMIIREG:
2060 return phy_mii_ioctl(dev->phydev, ifr, cmd);
2061 default:
2062 break;
2063 }
2064
2065 return -EOPNOTSUPP;
2066}
2067
2068static void mtk_pending_work(struct work_struct *work)
2069{
2070 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2071 int err, i;
2072 unsigned long restart = 0;
2073
2074 rtnl_lock();
2075
2076 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2077
2078 while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
2079 cpu_relax();
2080
2081 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2082 /* stop all devices to make sure that dma is properly shut down */
2083 for (i = 0; i < MTK_MAC_COUNT; i++) {
2084 if (!eth->netdev[i])
2085 continue;
2086 mtk_stop(eth->netdev[i]);
2087 __set_bit(i, &restart);
2088 }
2089 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2090
2091 /* restart underlying hardware such as power, clock, pin mux
2092 * and the connected phy
2093 */
2094 mtk_hw_deinit(eth);
2095
2096 if (eth->dev->pins)
2097 pinctrl_select_state(eth->dev->pins->p,
2098 eth->dev->pins->default_state);
2099 mtk_hw_init(eth);
2100
2101 for (i = 0; i < MTK_MAC_COUNT; i++) {
2102 if (!eth->mac[i] ||
2103 of_phy_is_fixed_link(eth->mac[i]->of_node))
2104 continue;
2105 err = phy_init_hw(eth->netdev[i]->phydev);
2106 if (err)
2107 dev_err(eth->dev, "%s: PHY init failed.\n",
2108 eth->netdev[i]->name);
2109 }
2110
2111 /* restart DMA and enable IRQs */
2112 for (i = 0; i < MTK_MAC_COUNT; i++) {
2113 if (!test_bit(i, &restart))
2114 continue;
2115 err = mtk_open(eth->netdev[i]);
2116 if (err) {
2117 netif_alert(eth, ifup, eth->netdev[i],
2118 "Driver up/down cycle failed, closing device.\n");
2119 dev_close(eth->netdev[i]);
2120 }
2121 }
2122
2123 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2124
2125 clear_bit_unlock(MTK_RESETTING, ð->state);
2126
2127 rtnl_unlock();
2128}
2129
2130static int mtk_free_dev(struct mtk_eth *eth)
2131{
2132 int i;
2133
2134 for (i = 0; i < MTK_MAC_COUNT; i++) {
2135 if (!eth->netdev[i])
2136 continue;
2137 free_netdev(eth->netdev[i]);
2138 }
2139
2140 return 0;
2141}
2142
2143static int mtk_unreg_dev(struct mtk_eth *eth)
2144{
2145 int i;
2146
2147 for (i = 0; i < MTK_MAC_COUNT; i++) {
2148 if (!eth->netdev[i])
2149 continue;
2150 unregister_netdev(eth->netdev[i]);
2151 }
2152
2153 return 0;
2154}
2155
2156static int mtk_cleanup(struct mtk_eth *eth)
2157{
2158 mtk_unreg_dev(eth);
2159 mtk_free_dev(eth);
2160 cancel_work_sync(ð->pending_work);
2161
2162 return 0;
2163}
2164
2165static int mtk_get_link_ksettings(struct net_device *ndev,
2166 struct ethtool_link_ksettings *cmd)
2167{
2168 struct mtk_mac *mac = netdev_priv(ndev);
2169
2170 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2171 return -EBUSY;
2172
2173 phy_ethtool_ksettings_get(ndev->phydev, cmd);
2174
2175 return 0;
2176}
2177
2178static int mtk_set_link_ksettings(struct net_device *ndev,
2179 const struct ethtool_link_ksettings *cmd)
2180{
2181 struct mtk_mac *mac = netdev_priv(ndev);
2182
2183 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2184 return -EBUSY;
2185
2186 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
2187}
2188
2189static void mtk_get_drvinfo(struct net_device *dev,
2190 struct ethtool_drvinfo *info)
2191{
2192 struct mtk_mac *mac = netdev_priv(dev);
2193
2194 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2195 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2196 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2197}
2198
2199static u32 mtk_get_msglevel(struct net_device *dev)
2200{
2201 struct mtk_mac *mac = netdev_priv(dev);
2202
2203 return mac->hw->msg_enable;
2204}
2205
2206static void mtk_set_msglevel(struct net_device *dev, u32 value)
2207{
2208 struct mtk_mac *mac = netdev_priv(dev);
2209
2210 mac->hw->msg_enable = value;
2211}
2212
2213static int mtk_nway_reset(struct net_device *dev)
2214{
2215 struct mtk_mac *mac = netdev_priv(dev);
2216
2217 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2218 return -EBUSY;
2219
2220 return genphy_restart_aneg(dev->phydev);
2221}
2222
2223static u32 mtk_get_link(struct net_device *dev)
2224{
2225 struct mtk_mac *mac = netdev_priv(dev);
2226 int err;
2227
2228 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2229 return -EBUSY;
2230
2231 err = genphy_update_link(dev->phydev);
2232 if (err)
2233 return ethtool_op_get_link(dev);
2234
2235 return dev->phydev->link;
2236}
2237
2238static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2239{
2240 int i;
2241
2242 switch (stringset) {
2243 case ETH_SS_STATS:
2244 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2245 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2246 data += ETH_GSTRING_LEN;
2247 }
2248 break;
2249 }
2250}
2251
2252static int mtk_get_sset_count(struct net_device *dev, int sset)
2253{
2254 switch (sset) {
2255 case ETH_SS_STATS:
2256 return ARRAY_SIZE(mtk_ethtool_stats);
2257 default:
2258 return -EOPNOTSUPP;
2259 }
2260}
2261
2262static void mtk_get_ethtool_stats(struct net_device *dev,
2263 struct ethtool_stats *stats, u64 *data)
2264{
2265 struct mtk_mac *mac = netdev_priv(dev);
2266 struct mtk_hw_stats *hwstats = mac->hw_stats;
2267 u64 *data_src, *data_dst;
2268 unsigned int start;
2269 int i;
2270
2271 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2272 return;
2273
2274 if (netif_running(dev) && netif_device_present(dev)) {
2275 if (spin_trylock_bh(&hwstats->stats_lock)) {
2276 mtk_stats_update_mac(mac);
2277 spin_unlock_bh(&hwstats->stats_lock);
2278 }
2279 }
2280
2281 data_src = (u64 *)hwstats;
2282
2283 do {
2284 data_dst = data;
2285 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2286
2287 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2288 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2289 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2290}
2291
2292static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2293 u32 *rule_locs)
2294{
2295 int ret = -EOPNOTSUPP;
2296
2297 switch (cmd->cmd) {
2298 case ETHTOOL_GRXRINGS:
2299 if (dev->features & NETIF_F_LRO) {
2300 cmd->data = MTK_MAX_RX_RING_NUM;
2301 ret = 0;
2302 }
2303 break;
2304 case ETHTOOL_GRXCLSRLCNT:
2305 if (dev->features & NETIF_F_LRO) {
2306 struct mtk_mac *mac = netdev_priv(dev);
2307
2308 cmd->rule_cnt = mac->hwlro_ip_cnt;
2309 ret = 0;
2310 }
2311 break;
2312 case ETHTOOL_GRXCLSRULE:
2313 if (dev->features & NETIF_F_LRO)
2314 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2315 break;
2316 case ETHTOOL_GRXCLSRLALL:
2317 if (dev->features & NETIF_F_LRO)
2318 ret = mtk_hwlro_get_fdir_all(dev, cmd,
2319 rule_locs);
2320 break;
2321 default:
2322 break;
2323 }
2324
2325 return ret;
2326}
2327
2328static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2329{
2330 int ret = -EOPNOTSUPP;
2331
2332 switch (cmd->cmd) {
2333 case ETHTOOL_SRXCLSRLINS:
2334 if (dev->features & NETIF_F_LRO)
2335 ret = mtk_hwlro_add_ipaddr(dev, cmd);
2336 break;
2337 case ETHTOOL_SRXCLSRLDEL:
2338 if (dev->features & NETIF_F_LRO)
2339 ret = mtk_hwlro_del_ipaddr(dev, cmd);
2340 break;
2341 default:
2342 break;
2343 }
2344
2345 return ret;
2346}
2347
2348static const struct ethtool_ops mtk_ethtool_ops = {
2349 .get_link_ksettings = mtk_get_link_ksettings,
2350 .set_link_ksettings = mtk_set_link_ksettings,
2351 .get_drvinfo = mtk_get_drvinfo,
2352 .get_msglevel = mtk_get_msglevel,
2353 .set_msglevel = mtk_set_msglevel,
2354 .nway_reset = mtk_nway_reset,
2355 .get_link = mtk_get_link,
2356 .get_strings = mtk_get_strings,
2357 .get_sset_count = mtk_get_sset_count,
2358 .get_ethtool_stats = mtk_get_ethtool_stats,
2359 .get_rxnfc = mtk_get_rxnfc,
2360 .set_rxnfc = mtk_set_rxnfc,
2361};
2362
2363static const struct net_device_ops mtk_netdev_ops = {
2364 .ndo_init = mtk_init,
2365 .ndo_uninit = mtk_uninit,
2366 .ndo_open = mtk_open,
2367 .ndo_stop = mtk_stop,
2368 .ndo_start_xmit = mtk_start_xmit,
2369 .ndo_set_mac_address = mtk_set_mac_address,
2370 .ndo_validate_addr = eth_validate_addr,
2371 .ndo_do_ioctl = mtk_do_ioctl,
2372 .ndo_tx_timeout = mtk_tx_timeout,
2373 .ndo_get_stats64 = mtk_get_stats64,
2374 .ndo_fix_features = mtk_fix_features,
2375 .ndo_set_features = mtk_set_features,
2376#ifdef CONFIG_NET_POLL_CONTROLLER
2377 .ndo_poll_controller = mtk_poll_controller,
2378#endif
2379};
2380
2381static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2382{
2383 struct mtk_mac *mac;
2384 const __be32 *_id = of_get_property(np, "reg", NULL);
2385 int id, err;
2386
2387 if (!_id) {
2388 dev_err(eth->dev, "missing mac id\n");
2389 return -EINVAL;
2390 }
2391
2392 id = be32_to_cpup(_id);
2393 if (id >= MTK_MAC_COUNT) {
2394 dev_err(eth->dev, "%d is not a valid mac id\n", id);
2395 return -EINVAL;
2396 }
2397
2398 if (eth->netdev[id]) {
2399 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2400 return -EINVAL;
2401 }
2402
2403 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2404 if (!eth->netdev[id]) {
2405 dev_err(eth->dev, "alloc_etherdev failed\n");
2406 return -ENOMEM;
2407 }
2408 mac = netdev_priv(eth->netdev[id]);
2409 eth->mac[id] = mac;
2410 mac->id = id;
2411 mac->hw = eth;
2412 mac->of_node = np;
2413
2414 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2415 mac->hwlro_ip_cnt = 0;
2416
2417 mac->hw_stats = devm_kzalloc(eth->dev,
2418 sizeof(*mac->hw_stats),
2419 GFP_KERNEL);
2420 if (!mac->hw_stats) {
2421 dev_err(eth->dev, "failed to allocate counter memory\n");
2422 err = -ENOMEM;
2423 goto free_netdev;
2424 }
2425 spin_lock_init(&mac->hw_stats->stats_lock);
2426 u64_stats_init(&mac->hw_stats->syncp);
2427 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2428
2429 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2430 eth->netdev[id]->watchdog_timeo = 5 * HZ;
2431 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2432 eth->netdev[id]->base_addr = (unsigned long)eth->base;
2433
2434 eth->netdev[id]->hw_features = MTK_HW_FEATURES;
2435 if (eth->hwlro)
2436 eth->netdev[id]->hw_features |= NETIF_F_LRO;
2437
2438 eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
2439 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2440 eth->netdev[id]->features |= MTK_HW_FEATURES;
2441 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2442
2443 eth->netdev[id]->irq = eth->irq[0];
2444 eth->netdev[id]->dev.of_node = np;
2445
2446 return 0;
2447
2448free_netdev:
2449 free_netdev(eth->netdev[id]);
2450 return err;
2451}
2452
2453static int mtk_probe(struct platform_device *pdev)
2454{
2455 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2456 struct device_node *mac_np;
2457 struct mtk_eth *eth;
2458 int err;
2459 int i;
2460
2461 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2462 if (!eth)
2463 return -ENOMEM;
2464
2465 eth->soc = of_device_get_match_data(&pdev->dev);
2466
2467 eth->dev = &pdev->dev;
2468 eth->base = devm_ioremap_resource(&pdev->dev, res);
2469 if (IS_ERR(eth->base))
2470 return PTR_ERR(eth->base);
2471
2472 spin_lock_init(ð->page_lock);
2473 spin_lock_init(ð->tx_irq_lock);
2474 spin_lock_init(ð->rx_irq_lock);
2475
2476 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2477 "mediatek,ethsys");
2478 if (IS_ERR(eth->ethsys)) {
2479 dev_err(&pdev->dev, "no ethsys regmap found\n");
2480 return PTR_ERR(eth->ethsys);
2481 }
2482
2483 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
2484 eth->sgmiisys =
2485 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2486 "mediatek,sgmiisys");
2487 if (IS_ERR(eth->sgmiisys)) {
2488 dev_err(&pdev->dev, "no sgmiisys regmap found\n");
2489 return PTR_ERR(eth->sgmiisys);
2490 }
2491 }
2492
2493 if (eth->soc->required_pctl) {
2494 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2495 "mediatek,pctl");
2496 if (IS_ERR(eth->pctl)) {
2497 dev_err(&pdev->dev, "no pctl regmap found\n");
2498 return PTR_ERR(eth->pctl);
2499 }
2500 }
2501
2502 for (i = 0; i < 3; i++) {
2503 eth->irq[i] = platform_get_irq(pdev, i);
2504 if (eth->irq[i] < 0) {
2505 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
2506 return -ENXIO;
2507 }
2508 }
2509 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
2510 eth->clks[i] = devm_clk_get(eth->dev,
2511 mtk_clks_source_name[i]);
2512 if (IS_ERR(eth->clks[i])) {
2513 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
2514 return -EPROBE_DEFER;
2515 if (eth->soc->required_clks & BIT(i)) {
2516 dev_err(&pdev->dev, "clock %s not found\n",
2517 mtk_clks_source_name[i]);
2518 return -EINVAL;
2519 }
2520 eth->clks[i] = NULL;
2521 }
2522 }
2523
2524 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2525 INIT_WORK(ð->pending_work, mtk_pending_work);
2526
2527 err = mtk_hw_init(eth);
2528 if (err)
2529 return err;
2530
2531 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
2532
2533 for_each_child_of_node(pdev->dev.of_node, mac_np) {
2534 if (!of_device_is_compatible(mac_np,
2535 "mediatek,eth-mac"))
2536 continue;
2537
2538 if (!of_device_is_available(mac_np))
2539 continue;
2540
2541 err = mtk_add_mac(eth, mac_np);
2542 if (err)
2543 goto err_deinit_hw;
2544 }
2545
2546 err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
2547 dev_name(eth->dev), eth);
2548 if (err)
2549 goto err_free_dev;
2550
2551 err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
2552 dev_name(eth->dev), eth);
2553 if (err)
2554 goto err_free_dev;
2555
2556 err = mtk_mdio_init(eth);
2557 if (err)
2558 goto err_free_dev;
2559
2560 for (i = 0; i < MTK_MAX_DEVS; i++) {
2561 if (!eth->netdev[i])
2562 continue;
2563
2564 err = register_netdev(eth->netdev[i]);
2565 if (err) {
2566 dev_err(eth->dev, "error bringing up device\n");
2567 goto err_deinit_mdio;
2568 } else
2569 netif_info(eth, probe, eth->netdev[i],
2570 "mediatek frame engine at 0x%08lx, irq %d\n",
2571 eth->netdev[i]->base_addr, eth->irq[0]);
2572 }
2573
2574 /* we run 2 devices on the same DMA ring so we need a dummy device
2575 * for NAPI to work
2576 */
2577 init_dummy_netdev(ð->dummy_dev);
2578 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
2579 MTK_NAPI_WEIGHT);
2580 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
2581 MTK_NAPI_WEIGHT);
2582
2583 platform_set_drvdata(pdev, eth);
2584
2585 return 0;
2586
2587err_deinit_mdio:
2588 mtk_mdio_cleanup(eth);
2589err_free_dev:
2590 mtk_free_dev(eth);
2591err_deinit_hw:
2592 mtk_hw_deinit(eth);
2593
2594 return err;
2595}
2596
2597static int mtk_remove(struct platform_device *pdev)
2598{
2599 struct mtk_eth *eth = platform_get_drvdata(pdev);
2600 int i;
2601
2602 /* stop all devices to make sure that dma is properly shut down */
2603 for (i = 0; i < MTK_MAC_COUNT; i++) {
2604 if (!eth->netdev[i])
2605 continue;
2606 mtk_stop(eth->netdev[i]);
2607 }
2608
2609 mtk_hw_deinit(eth);
2610
2611 netif_napi_del(ð->tx_napi);
2612 netif_napi_del(ð->rx_napi);
2613 mtk_cleanup(eth);
2614 mtk_mdio_cleanup(eth);
2615
2616 return 0;
2617}
2618
2619static const struct mtk_soc_data mt2701_data = {
2620 .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
2621 .required_clks = MT7623_CLKS_BITMAP,
2622 .required_pctl = true,
2623};
2624
2625static const struct mtk_soc_data mt7622_data = {
2626 .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
2627 .required_clks = MT7622_CLKS_BITMAP,
2628 .required_pctl = false,
2629};
2630
2631static const struct mtk_soc_data mt7623_data = {
2632 .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
2633 .required_clks = MT7623_CLKS_BITMAP,
2634 .required_pctl = true,
2635};
2636
2637const struct of_device_id of_mtk_match[] = {
2638 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
2639 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
2640 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
2641 {},
2642};
2643MODULE_DEVICE_TABLE(of, of_mtk_match);
2644
2645static struct platform_driver mtk_driver = {
2646 .probe = mtk_probe,
2647 .remove = mtk_remove,
2648 .driver = {
2649 .name = "mtk_soc_eth",
2650 .of_match_table = of_mtk_match,
2651 },
2652};
2653
2654module_platform_driver(mtk_driver);
2655
2656MODULE_LICENSE("GPL");
2657MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2658MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");