Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ethernet: mediatek: Add MT7628/88 SoC support

This patch adds support for the MediaTek MT7628/88 SoCs to the common
MediaTek ethernet driver. Some minor changes are needed for this and
a bigger change, as the MT7628 does not support QDMA (only PDMA).

Signed-off-by: Stefan Roese <sr@denx.de>
Cc: René van Dorst <opensource@vdorst.com>
Cc: Daniel Golle <daniel@makrotopia.org>
Cc: Sean Wang <sean.wang@mediatek.com>
Cc: John Crispin <john@phrozen.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Stefan Roese and committed by
David S. Miller
296c9120 08df5fa6

+425 -112
+1 -1
drivers/net/ethernet/mediatek/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config NET_VENDOR_MEDIATEK 3 3 bool "MediaTek ethernet driver" 4 - depends on ARCH_MEDIATEK || SOC_MT7621 4 + depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 5 5 ---help--- 6 6 If you have a Mediatek SoC with ethernet, say Y. 7 7
+4
drivers/net/ethernet/mediatek/mtk_eth_path.c
··· 315 315 { 316 316 int err; 317 317 318 + /* No mux'ing for MT7628/88 */ 319 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) 320 + return 0; 321 + 318 322 switch (phymode) { 319 323 case PHY_INTERFACE_MODE_TRGMII: 320 324 case PHY_INTERFACE_MODE_RGMII_TXID:
+373 -107
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 323 323 goto err_phy; 324 324 } 325 325 326 - /* put the gmac into the right mode */ 327 - regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); 328 - val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); 329 - val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id); 330 - regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); 326 + /* No MT7628/88 support for now */ 327 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { 328 + /* put the gmac into the right mode */ 329 + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); 330 + val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); 331 + val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id); 332 + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); 333 + } 331 334 332 335 /* couple phydev to net_device */ 333 336 if (mtk_phy_connect_node(eth, mac, np)) ··· 398 395 u32 val; 399 396 400 397 spin_lock_irqsave(&eth->tx_irq_lock, flags); 401 - val = mtk_r32(eth, MTK_QDMA_INT_MASK); 402 - mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); 398 + val = mtk_r32(eth, eth->tx_int_mask_reg); 399 + mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg); 403 400 spin_unlock_irqrestore(&eth->tx_irq_lock, flags); 404 401 } 405 402 ··· 409 406 u32 val; 410 407 411 408 spin_lock_irqsave(&eth->tx_irq_lock, flags); 412 - val = mtk_r32(eth, MTK_QDMA_INT_MASK); 413 - mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); 409 + val = mtk_r32(eth, eth->tx_int_mask_reg); 410 + mtk_w32(eth, val | mask, eth->tx_int_mask_reg); 414 411 spin_unlock_irqrestore(&eth->tx_irq_lock, flags); 415 412 } 416 413 ··· 440 437 { 441 438 int ret = eth_mac_addr(dev, p); 442 439 struct mtk_mac *mac = netdev_priv(dev); 440 + struct mtk_eth *eth = mac->hw; 443 441 const char *macaddr = dev->dev_addr; 444 442 445 443 if (ret) ··· 450 446 return -EBUSY; 451 447 452 448 spin_lock_bh(&mac->hw->page_lock); 453 - mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], 454 - MTK_GDMA_MAC_ADRH(mac->id)); 455 - mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | 456 - (macaddr[4] << 8) | macaddr[5], 457 - MTK_GDMA_MAC_ADRL(mac->id)); 449 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { 450 + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], 451 + MT7628_SDM_MAC_ADRH); 452 + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | 453 + (macaddr[4] << 8) | macaddr[5], 454 + MT7628_SDM_MAC_ADRL); 455 + } else { 456 + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], 457 + MTK_GDMA_MAC_ADRH(mac->id)); 458 + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | 459 + (macaddr[4] << 8) | macaddr[5], 460 + MTK_GDMA_MAC_ADRL(mac->id)); 461 + } 458 462 spin_unlock_bh(&mac->hw->page_lock); 459 463 460 464 return 0; ··· 638 626 return &ring->buf[idx]; 639 627 } 640 628 629 + static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring, 630 + struct mtk_tx_dma *dma) 631 + { 632 + return ring->dma_pdma - ring->dma + dma; 633 + } 634 + 635 + static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma) 636 + { 637 + return ((void *)dma - (void *)ring->dma) / sizeof(*dma); 638 + } 639 + 641 640 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) 642 641 { 643 - if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { 644 - dma_unmap_single(eth->dev, 645 - dma_unmap_addr(tx_buf, dma_addr0), 646 - dma_unmap_len(tx_buf, dma_len0), 647 - DMA_TO_DEVICE); 648 - } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { 649 - dma_unmap_page(eth->dev, 650 - dma_unmap_addr(tx_buf, dma_addr0), 651 - dma_unmap_len(tx_buf, dma_len0), 652 - DMA_TO_DEVICE); 642 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 643 + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { 644 + dma_unmap_single(eth->dev, 645 + dma_unmap_addr(tx_buf, dma_addr0), 646 + dma_unmap_len(tx_buf, dma_len0), 647 + DMA_TO_DEVICE); 648 + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { 649 + dma_unmap_page(eth->dev, 650 + dma_unmap_addr(tx_buf, dma_addr0), 651 + dma_unmap_len(tx_buf, dma_len0), 652 + DMA_TO_DEVICE); 653 + } 654 + } else { 655 + if (dma_unmap_len(tx_buf, dma_len0)) { 656 + dma_unmap_page(eth->dev, 657 + dma_unmap_addr(tx_buf, dma_addr0), 658 + dma_unmap_len(tx_buf, dma_len0), 659 + DMA_TO_DEVICE); 660 + } 661 + 662 + if (dma_unmap_len(tx_buf, dma_len1)) { 663 + dma_unmap_page(eth->dev, 664 + dma_unmap_addr(tx_buf, dma_addr1), 665 + dma_unmap_len(tx_buf, dma_len1), 666 + DMA_TO_DEVICE); 667 + } 653 668 } 669 + 654 670 tx_buf->flags = 0; 655 671 if (tx_buf->skb && 656 672 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) 657 673 dev_kfree_skb_any(tx_buf->skb); 658 674 tx_buf->skb = NULL; 675 + } 676 + 677 + static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, 678 + struct mtk_tx_dma *txd, dma_addr_t mapped_addr, 679 + size_t size, int idx) 680 + { 681 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 682 + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); 683 + dma_unmap_len_set(tx_buf, dma_len0, size); 684 + } else { 685 + if (idx & 1) { 686 + txd->txd3 = mapped_addr; 687 + txd->txd2 |= TX_DMA_PLEN1(size); 688 + dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr); 689 + dma_unmap_len_set(tx_buf, dma_len1, size); 690 + } else { 691 + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; 692 + txd->txd1 = mapped_addr; 693 + txd->txd2 = TX_DMA_PLEN0(size); 694 + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); 695 + dma_unmap_len_set(tx_buf, dma_len0, size); 696 + } 697 + } 659 698 } 660 699 661 700 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ··· 715 652 struct mtk_mac *mac = netdev_priv(dev); 716 653 struct mtk_eth *eth = mac->hw; 717 654 struct mtk_tx_dma *itxd, *txd; 655 + struct mtk_tx_dma *itxd_pdma, *txd_pdma; 718 656 struct mtk_tx_buf *itx_buf, *tx_buf; 719 657 dma_addr_t mapped_addr; 720 658 unsigned int nr_frags; 721 659 int i, n_desc = 1; 722 660 u32 txd4 = 0, fport; 661 + int k = 0; 723 662 724 663 itxd = ring->next_free; 664 + itxd_pdma = qdma_to_pdma(ring, itxd); 725 665 if (itxd == ring->last_free) 726 666 return -ENOMEM; 727 667 ··· 755 689 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; 756 690 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : 757 691 MTK_TX_FLAGS_FPORT1; 758 - dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr); 759 - dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb)); 692 + setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb), 693 + k++); 760 694 761 695 /* TX SG offload */ 762 696 txd = itxd; 697 + txd_pdma = qdma_to_pdma(ring, txd); 763 698 nr_frags = skb_shinfo(skb)->nr_frags; 699 + 764 700 for (i = 0; i < nr_frags; i++) { 765 701 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 766 702 unsigned int offset = 0; ··· 771 703 while (frag_size) { 772 704 bool last_frag = false; 773 705 unsigned int frag_map_size; 706 + bool new_desc = true; 774 707 775 - txd = mtk_qdma_phys_to_virt(ring, txd->txd2); 776 - if (txd == ring->last_free) 777 - goto err_dma; 708 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || 709 + (i & 0x1)) { 710 + txd = mtk_qdma_phys_to_virt(ring, txd->txd2); 711 + txd_pdma = qdma_to_pdma(ring, txd); 712 + if (txd == ring->last_free) 713 + goto err_dma; 778 714 779 - n_desc++; 715 + n_desc++; 716 + } else { 717 + new_desc = false; 718 + } 719 + 720 + 780 721 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); 781 722 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, 782 723 frag_map_size, ··· 804 727 WRITE_ONCE(txd->txd4, fport); 805 728 806 729 tx_buf = mtk_desc_to_tx_buf(ring, txd); 807 - memset(tx_buf, 0, sizeof(*tx_buf)); 730 + if (new_desc) 731 + memset(tx_buf, 0, sizeof(*tx_buf)); 808 732 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; 809 733 tx_buf->flags |= MTK_TX_FLAGS_PAGE0; 810 734 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : 811 735 MTK_TX_FLAGS_FPORT1; 812 736 813 - dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); 814 - dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); 737 + setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr, 738 + frag_map_size, k++); 739 + 815 740 frag_size -= frag_map_size; 816 741 offset += frag_map_size; 817 742 } ··· 825 746 WRITE_ONCE(itxd->txd4, txd4); 826 747 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | 827 748 (!nr_frags * TX_DMA_LS0))); 749 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 750 + if (k & 0x1) 751 + txd_pdma->txd2 |= TX_DMA_LS0; 752 + else 753 + txd_pdma->txd2 |= TX_DMA_LS1; 754 + } 828 755 829 756 netdev_sent_queue(dev, skb->len); 830 757 skb_tx_timestamp(skb); ··· 843 758 */ 844 759 wmb(); 845 760 846 - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || 847 - !netdev_xmit_more()) 848 - mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); 761 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 762 + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || 763 + !netdev_xmit_more()) 764 + mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); 765 + } else { 766 + int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd), 767 + ring->dma_size); 768 + mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); 769 + } 849 770 850 771 return 0; 851 772 ··· 863 772 mtk_tx_unmap(eth, tx_buf); 864 773 865 774 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; 775 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) 776 + itxd_pdma->txd2 = TX_DMA_DESP2_DEF; 777 + 866 778 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); 779 + itxd_pdma = qdma_to_pdma(ring, itxd); 867 780 } while (itxd != txd); 868 781 869 782 return -ENOMEM; ··· 1041 946 struct net_device *netdev; 1042 947 unsigned int pktlen; 1043 948 dma_addr_t dma_addr; 1044 - int mac = 0; 949 + int mac; 1045 950 1046 951 ring = mtk_get_rx_ring(eth); 1047 952 if (unlikely(!ring)) ··· 1056 961 break; 1057 962 1058 963 /* find out which mac the packet come from. values start at 1 */ 1059 - mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & 1060 - RX_DMA_FPORT_MASK; 1061 - mac--; 964 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { 965 + mac = 0; 966 + } else { 967 + mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & 968 + RX_DMA_FPORT_MASK; 969 + mac--; 970 + } 1062 971 1063 972 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || 1064 973 !eth->netdev[mac])) ··· 1080 981 goto release_desc; 1081 982 } 1082 983 dma_addr = dma_map_single(eth->dev, 1083 - new_data + NET_SKB_PAD, 984 + new_data + NET_SKB_PAD + 985 + eth->ip_align, 1084 986 ring->buf_size, 1085 987 DMA_FROM_DEVICE); 1086 988 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { ··· 1104 1004 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); 1105 1005 skb->dev = netdev; 1106 1006 skb_put(skb, pktlen); 1107 - if (trxd.rxd4 & RX_DMA_L4_VALID) 1007 + if (trxd.rxd4 & eth->rx_dma_l4_valid) 1108 1008 skb->ip_summed = CHECKSUM_UNNECESSARY; 1109 1009 else 1110 1010 skb_checksum_none_assert(skb); ··· 1121 1021 rxd->rxd1 = (unsigned int)dma_addr; 1122 1022 1123 1023 release_desc: 1124 - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); 1024 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) 1025 + rxd->rxd2 = RX_DMA_LSO; 1026 + else 1027 + rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); 1125 1028 1126 1029 ring->calc_idx = idx; 1127 1030 ··· 1143 1040 return done; 1144 1041 } 1145 1042 1146 - static int mtk_poll_tx(struct mtk_eth *eth, int budget) 1043 + static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, 1044 + unsigned int *done, unsigned int *bytes) 1147 1045 { 1148 1046 struct mtk_tx_ring *ring = &eth->tx_ring; 1149 1047 struct mtk_tx_dma *desc; 1150 1048 struct sk_buff *skb; 1151 1049 struct mtk_tx_buf *tx_buf; 1152 - unsigned int done[MTK_MAX_DEVS]; 1153 - unsigned int bytes[MTK_MAX_DEVS]; 1154 1050 u32 cpu, dma; 1155 - int total = 0, i; 1156 - 1157 - memset(done, 0, sizeof(done)); 1158 - memset(bytes, 0, sizeof(bytes)); 1159 1051 1160 1052 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); 1161 1053 dma = mtk_r32(eth, MTK_QTX_DRX_PTR); ··· 1188 1090 1189 1091 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); 1190 1092 1093 + return budget; 1094 + } 1095 + 1096 + static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, 1097 + unsigned int *done, unsigned int *bytes) 1098 + { 1099 + struct mtk_tx_ring *ring = &eth->tx_ring; 1100 + struct mtk_tx_dma *desc; 1101 + struct sk_buff *skb; 1102 + struct mtk_tx_buf *tx_buf; 1103 + u32 cpu, dma; 1104 + 1105 + cpu = ring->cpu_idx; 1106 + dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); 1107 + 1108 + while ((cpu != dma) && budget) { 1109 + tx_buf = &ring->buf[cpu]; 1110 + skb = tx_buf->skb; 1111 + if (!skb) 1112 + break; 1113 + 1114 + if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { 1115 + bytes[0] += skb->len; 1116 + done[0]++; 1117 + budget--; 1118 + } 1119 + 1120 + mtk_tx_unmap(eth, tx_buf); 1121 + 1122 + desc = &ring->dma[cpu]; 1123 + ring->last_free = desc; 1124 + atomic_inc(&ring->free_count); 1125 + 1126 + cpu = NEXT_DESP_IDX(cpu, ring->dma_size); 1127 + } 1128 + 1129 + ring->cpu_idx = cpu; 1130 + 1131 + return budget; 1132 + } 1133 + 1134 + static int mtk_poll_tx(struct mtk_eth *eth, int budget) 1135 + { 1136 + struct mtk_tx_ring *ring = &eth->tx_ring; 1137 + unsigned int done[MTK_MAX_DEVS]; 1138 + unsigned int bytes[MTK_MAX_DEVS]; 1139 + int total = 0, i; 1140 + 1141 + memset(done, 0, sizeof(done)); 1142 + memset(bytes, 0, sizeof(bytes)); 1143 + 1144 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) 1145 + budget = mtk_poll_tx_qdma(eth, budget, done, bytes); 1146 + else 1147 + budget = mtk_poll_tx_pdma(eth, budget, done, bytes); 1148 + 1191 1149 for (i = 0; i < MTK_MAC_COUNT; i++) { 1192 1150 if (!eth->netdev[i] || !done[i]) 1193 1151 continue; ··· 1275 1121 u32 status, mask; 1276 1122 int tx_done = 0; 1277 1123 1278 - mtk_handle_status_irq(eth); 1279 - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_STATUS); 1124 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) 1125 + mtk_handle_status_irq(eth); 1126 + mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg); 1280 1127 tx_done = mtk_poll_tx(eth, budget); 1281 1128 1282 1129 if (unlikely(netif_msg_intr(eth))) { 1283 - status = mtk_r32(eth, MTK_QDMA_INT_STATUS); 1284 - mask = mtk_r32(eth, MTK_QDMA_INT_MASK); 1130 + status = mtk_r32(eth, eth->tx_int_status_reg); 1131 + mask = mtk_r32(eth, eth->tx_int_mask_reg); 1285 1132 dev_info(eth->dev, 1286 1133 "done tx %d, intr 0x%08x/0x%x\n", 1287 1134 tx_done, status, mask); ··· 1291 1136 if (tx_done == budget) 1292 1137 return budget; 1293 1138 1294 - status = mtk_r32(eth, MTK_QDMA_INT_STATUS); 1139 + status = mtk_r32(eth, eth->tx_int_status_reg); 1295 1140 if (status & MTK_TX_DONE_INT) 1296 1141 return budget; 1297 1142 ··· 1358 1203 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; 1359 1204 } 1360 1205 1206 + /* On MT7688 (PDMA only) this driver uses the ring->dma structs 1207 + * only as the framework. The real HW descriptors are the PDMA 1208 + * descriptors in ring->dma_pdma. 1209 + */ 1210 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 1211 + ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, 1212 + &ring->phys_pdma, 1213 + GFP_ATOMIC); 1214 + if (!ring->dma_pdma) 1215 + goto no_tx_mem; 1216 + 1217 + for (i = 0; i < MTK_DMA_SIZE; i++) { 1218 + ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF; 1219 + ring->dma_pdma[i].txd4 = 0; 1220 + } 1221 + } 1222 + 1223 + ring->dma_size = MTK_DMA_SIZE; 1361 1224 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); 1362 1225 ring->next_free = &ring->dma[0]; 1363 1226 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; ··· 1386 1213 */ 1387 1214 wmb(); 1388 1215 1389 - mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); 1390 - mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); 1391 - mtk_w32(eth, 1392 - ring->phys + ((MTK_DMA_SIZE - 1) * sz), 1393 - MTK_QTX_CRX_PTR); 1394 - mtk_w32(eth, 1395 - ring->phys + ((MTK_DMA_SIZE - 1) * sz), 1396 - MTK_QTX_DRX_PTR); 1397 - mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); 1216 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 1217 + mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); 1218 + mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); 1219 + mtk_w32(eth, 1220 + ring->phys + ((MTK_DMA_SIZE - 1) * sz), 1221 + MTK_QTX_CRX_PTR); 1222 + mtk_w32(eth, 1223 + ring->phys + ((MTK_DMA_SIZE - 1) * sz), 1224 + MTK_QTX_DRX_PTR); 1225 + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, 1226 + MTK_QTX_CFG(0)); 1227 + } else { 1228 + mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); 1229 + mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); 1230 + mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); 1231 + mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX); 1232 + } 1398 1233 1399 1234 return 0; 1400 1235 ··· 1428 1247 ring->dma, 1429 1248 ring->phys); 1430 1249 ring->dma = NULL; 1250 + } 1251 + 1252 + if (ring->dma_pdma) { 1253 + dma_free_coherent(eth->dev, 1254 + MTK_DMA_SIZE * sizeof(*ring->dma_pdma), 1255 + ring->dma_pdma, 1256 + ring->phys_pdma); 1257 + ring->dma_pdma = NULL; 1431 1258 } 1432 1259 } 1433 1260 ··· 1484 1295 1485 1296 for (i = 0; i < rx_dma_size; i++) { 1486 1297 dma_addr_t dma_addr = dma_map_single(eth->dev, 1487 - ring->data[i] + NET_SKB_PAD, 1298 + ring->data[i] + NET_SKB_PAD + eth->ip_align, 1488 1299 ring->buf_size, 1489 1300 DMA_FROM_DEVICE); 1490 1301 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) 1491 1302 return -ENOMEM; 1492 1303 ring->dma[i].rxd1 = (unsigned int)dma_addr; 1493 1304 1494 - ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); 1305 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) 1306 + ring->dma[i].rxd2 = RX_DMA_LSO; 1307 + else 1308 + ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); 1495 1309 } 1496 1310 ring->dma_size = rx_dma_size; 1497 1311 ring->calc_idx_update = false; ··· 1810 1618 unsigned long t_start = jiffies; 1811 1619 1812 1620 while (1) { 1813 - if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & 1814 - (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) 1815 - return 0; 1621 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 1622 + if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & 1623 + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) 1624 + return 0; 1625 + } else { 1626 + if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) & 1627 + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) 1628 + return 0; 1629 + } 1630 + 1816 1631 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) 1817 1632 break; 1818 1633 } ··· 1836 1637 if (mtk_dma_busy_wait(eth)) 1837 1638 return -EBUSY; 1838 1639 1839 - /* QDMA needs scratch memory for internal reordering of the 1840 - * descriptors 1841 - */ 1842 - err = mtk_init_fq_dma(eth); 1843 - if (err) 1844 - return err; 1640 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 1641 + /* QDMA needs scratch memory for internal reordering of the 1642 + * descriptors 1643 + */ 1644 + err = mtk_init_fq_dma(eth); 1645 + if (err) 1646 + return err; 1647 + } 1845 1648 1846 1649 err = mtk_tx_alloc(eth); 1847 1650 if (err) 1848 1651 return err; 1849 1652 1850 - err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); 1851 - if (err) 1852 - return err; 1653 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 1654 + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); 1655 + if (err) 1656 + return err; 1657 + } 1853 1658 1854 1659 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); 1855 1660 if (err) ··· 1870 1667 return err; 1871 1668 } 1872 1669 1873 - /* Enable random early drop and set drop threshold automatically */ 1874 - mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, 1875 - MTK_QDMA_FC_THRES); 1876 - mtk_w32(eth, 0x0, MTK_QDMA_HRED2); 1670 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 1671 + /* Enable random early drop and set drop threshold 1672 + * automatically 1673 + */ 1674 + mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | 1675 + FC_THRES_MIN, MTK_QDMA_FC_THRES); 1676 + mtk_w32(eth, 0x0, MTK_QDMA_HRED2); 1677 + } 1877 1678 1878 1679 return 0; 1879 1680 } ··· 1948 1741 static irqreturn_t mtk_handle_irq(int irq, void *_eth) 1949 1742 { 1950 1743 struct mtk_eth *eth = _eth; 1744 + u32 status; 1951 1745 1746 + status = mtk_r32(eth, MTK_PDMA_INT_STATUS); 1952 1747 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) { 1953 1748 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT) 1954 1749 mtk_handle_irq_rx(irq, _eth); 1955 1750 } 1956 - if (mtk_r32(eth, MTK_QDMA_INT_MASK) & MTK_TX_DONE_INT) { 1957 - if (mtk_r32(eth, MTK_QDMA_INT_STATUS) & MTK_TX_DONE_INT) 1751 + if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) { 1752 + if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) 1958 1753 mtk_handle_irq_tx(irq, _eth); 1959 1754 } 1960 1755 ··· 1988 1779 return err; 1989 1780 } 1990 1781 1991 - mtk_w32(eth, 1992 - MTK_TX_WB_DDONE | MTK_TX_DMA_EN | 1993 - MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | 1994 - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | 1995 - MTK_RX_BT_32DWORDS, 1996 - MTK_QDMA_GLO_CFG); 1782 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 1783 + mtk_w32(eth, 1784 + MTK_TX_WB_DDONE | MTK_TX_DMA_EN | 1785 + MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | 1786 + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | 1787 + MTK_RX_BT_32DWORDS, 1788 + MTK_QDMA_GLO_CFG); 1997 1789 1998 - mtk_w32(eth, 1999 - MTK_RX_DMA_EN | rx_2b_offset | 2000 - MTK_RX_BT_32DWORDS | MTK_MULTI_EN, 2001 - MTK_PDMA_GLO_CFG); 1790 + mtk_w32(eth, 1791 + MTK_RX_DMA_EN | rx_2b_offset | 1792 + MTK_RX_BT_32DWORDS | MTK_MULTI_EN, 1793 + MTK_PDMA_GLO_CFG); 1794 + } else { 1795 + mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | 1796 + MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS, 1797 + MTK_PDMA_GLO_CFG); 1798 + } 2002 1799 2003 1800 return 0; 2004 1801 } ··· 2032 1817 2033 1818 phy_start(dev->phydev); 2034 1819 netif_start_queue(dev); 2035 - 2036 1820 return 0; 2037 1821 } 2038 1822 ··· 2075 1861 napi_disable(&eth->tx_napi); 2076 1862 napi_disable(&eth->rx_napi); 2077 1863 2078 - mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); 1864 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) 1865 + mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); 2079 1866 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); 2080 1867 2081 1868 mtk_dma_free(eth); ··· 2138 1923 if (ret) 2139 1924 goto err_disable_pm; 2140 1925 1926 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { 1927 + ret = device_reset(eth->dev); 1928 + if (ret) { 1929 + dev_err(eth->dev, "MAC reset failed!\n"); 1930 + goto err_disable_pm; 1931 + } 1932 + 1933 + /* enable interrupt delay for RX */ 1934 + mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); 1935 + 1936 + /* disable delay and normal interrupt */ 1937 + mtk_tx_irq_disable(eth, ~0); 1938 + mtk_rx_irq_disable(eth, ~0); 1939 + 1940 + return 0; 1941 + } 1942 + 1943 + /* Non-MT7628 handling... */ 2141 1944 ethsys_reset(eth, RSTCTRL_FE); 2142 1945 ethsys_reset(eth, RSTCTRL_PPE); 2143 1946 ··· 2659 2426 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; 2660 2427 eth->netdev[id]->base_addr = (unsigned long)eth->base; 2661 2428 2662 - eth->netdev[id]->hw_features = MTK_HW_FEATURES; 2429 + eth->netdev[id]->hw_features = eth->soc->hw_features; 2663 2430 if (eth->hwlro) 2664 2431 eth->netdev[id]->hw_features |= NETIF_F_LRO; 2665 2432 2666 - eth->netdev[id]->vlan_features = MTK_HW_FEATURES & 2433 + eth->netdev[id]->vlan_features = eth->soc->hw_features & 2667 2434 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 2668 - eth->netdev[id]->features |= MTK_HW_FEATURES; 2435 + eth->netdev[id]->features |= eth->soc->hw_features; 2669 2436 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; 2670 2437 2671 2438 eth->netdev[id]->irq = eth->irq[0]; ··· 2696 2463 if (IS_ERR(eth->base)) 2697 2464 return PTR_ERR(eth->base); 2698 2465 2466 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { 2467 + eth->tx_int_mask_reg = MTK_QDMA_INT_MASK; 2468 + eth->tx_int_status_reg = MTK_QDMA_INT_STATUS; 2469 + } else { 2470 + eth->tx_int_mask_reg = MTK_PDMA_INT_MASK; 2471 + eth->tx_int_status_reg = MTK_PDMA_INT_STATUS; 2472 + } 2473 + 2474 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { 2475 + eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA; 2476 + eth->ip_align = NET_IP_ALIGN; 2477 + } else { 2478 + eth->rx_dma_l4_valid = RX_DMA_L4_VALID; 2479 + } 2480 + 2699 2481 spin_lock_init(&eth->page_lock); 2700 2482 spin_lock_init(&eth->tx_irq_lock); 2701 2483 spin_lock_init(&eth->rx_irq_lock); 2702 2484 2703 - eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 2704 - "mediatek,ethsys"); 2705 - if (IS_ERR(eth->ethsys)) { 2706 - dev_err(&pdev->dev, "no ethsys regmap found\n"); 2707 - return PTR_ERR(eth->ethsys); 2485 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { 2486 + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 2487 + "mediatek,ethsys"); 2488 + if (IS_ERR(eth->ethsys)) { 2489 + dev_err(&pdev->dev, "no ethsys regmap found\n"); 2490 + return PTR_ERR(eth->ethsys); 2491 + } 2708 2492 } 2709 2493 2710 2494 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { ··· 2822 2572 if (err) 2823 2573 goto err_free_dev; 2824 2574 2825 - err = mtk_mdio_init(eth); 2826 - if (err) 2827 - goto err_free_dev; 2575 + /* No MT7628/88 support yet */ 2576 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { 2577 + err = mtk_mdio_init(eth); 2578 + if (err) 2579 + goto err_free_dev; 2580 + } 2828 2581 2829 2582 for (i = 0; i < MTK_MAX_DEVS; i++) { 2830 2583 if (!eth->netdev[i]) ··· 2890 2637 2891 2638 static const struct mtk_soc_data mt2701_data = { 2892 2639 .caps = MT7623_CAPS | MTK_HWLRO, 2640 + .hw_features = MTK_HW_FEATURES, 2893 2641 .required_clks = MT7623_CLKS_BITMAP, 2894 2642 .required_pctl = true, 2895 2643 }; 2896 2644 2897 2645 static const struct mtk_soc_data mt7621_data = { 2898 2646 .caps = MT7621_CAPS, 2647 + .hw_features = MTK_HW_FEATURES, 2899 2648 .required_clks = MT7621_CLKS_BITMAP, 2900 2649 .required_pctl = false, 2901 2650 }; ··· 2905 2650 static const struct mtk_soc_data mt7622_data = { 2906 2651 .ana_rgc3 = 0x2028, 2907 2652 .caps = MT7622_CAPS | MTK_HWLRO, 2653 + .hw_features = MTK_HW_FEATURES, 2908 2654 .required_clks = MT7622_CLKS_BITMAP, 2909 2655 .required_pctl = false, 2910 2656 }; 2911 2657 2912 2658 static const struct mtk_soc_data mt7623_data = { 2913 2659 .caps = MT7623_CAPS | MTK_HWLRO, 2660 + .hw_features = MTK_HW_FEATURES, 2914 2661 .required_clks = MT7623_CLKS_BITMAP, 2915 2662 .required_pctl = true, 2916 2663 }; ··· 2920 2663 static const struct mtk_soc_data mt7629_data = { 2921 2664 .ana_rgc3 = 0x128, 2922 2665 .caps = MT7629_CAPS | MTK_HWLRO, 2666 + .hw_features = MTK_HW_FEATURES, 2923 2667 .required_clks = MT7629_CLKS_BITMAP, 2668 + .required_pctl = false, 2669 + }; 2670 + 2671 + static const struct mtk_soc_data rt5350_data = { 2672 + .caps = MT7628_CAPS, 2673 + .hw_features = MTK_HW_FEATURES_MT7628, 2674 + .required_clks = MT7628_CLKS_BITMAP, 2924 2675 .required_pctl = false, 2925 2676 }; 2926 2677 ··· 2938 2673 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, 2939 2674 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, 2940 2675 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data}, 2676 + { .compatible = "ralink,rt5350-eth", .data = &rt5350_data}, 2941 2677 {}, 2942 2678 }; 2943 2679 MODULE_DEVICE_TABLE(of, of_mtk_match);
+47 -4
drivers/net/ethernet/mediatek/mtk_eth_soc.h
··· 39 39 NETIF_F_SG | NETIF_F_TSO | \ 40 40 NETIF_F_TSO6 | \ 41 41 NETIF_F_IPV6_CSUM) 42 + #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) 42 43 #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) 43 44 44 45 #define MTK_MAX_RX_RING_NUM 4 ··· 119 118 /* PDMA Global Configuration Register */ 120 119 #define MTK_PDMA_GLO_CFG 0xa04 121 120 #define MTK_MULTI_EN BIT(10) 121 + #define MTK_PDMA_SIZE_8DWORDS (1 << 4) 122 122 123 123 /* PDMA Reset Index Register */ 124 124 #define MTK_PDMA_RST_IDX 0xa08 ··· 278 276 #define TX_DMA_OWNER_CPU BIT(31) 279 277 #define TX_DMA_LS0 BIT(30) 280 278 #define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16) 279 + #define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN) 281 280 #define TX_DMA_SWC BIT(14) 282 281 #define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16) 283 282 283 + /* PDMA on MT7628 */ 284 + #define TX_DMA_DONE BIT(31) 285 + #define TX_DMA_LS1 BIT(14) 286 + #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) 287 + 284 288 /* QDMA descriptor rxd2 */ 285 289 #define RX_DMA_DONE BIT(31) 290 + #define RX_DMA_LSO BIT(30) 286 291 #define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) 287 292 #define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) 288 293 ··· 298 289 299 290 /* QDMA descriptor rxd4 */ 300 291 #define RX_DMA_L4_VALID BIT(24) 292 + #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ 301 293 #define RX_DMA_FPORT_SHIFT 19 302 294 #define RX_DMA_FPORT_MASK 0x7 303 295 ··· 422 412 #define CO_QPHY_SEL BIT(0) 423 413 #define GEPHY_MAC_SEL BIT(1) 424 414 415 + /* MT7628/88 specific stuff */ 416 + #define MT7628_PDMA_OFFSET 0x0800 417 + #define MT7628_SDM_OFFSET 0x0c00 418 + 419 + #define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00) 420 + #define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04) 421 + #define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08) 422 + #define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c) 423 + #define MT7628_PST_DTX_IDX0 BIT(0) 424 + 425 + #define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c) 426 + #define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10) 427 + 425 428 struct mtk_rx_dma { 426 429 unsigned int rxd1; 427 430 unsigned int rxd2; ··· 532 509 BIT(MTK_CLK_SGMII_CK) | \ 533 510 BIT(MTK_CLK_ETH2PLL)) 534 511 #define MT7621_CLKS_BITMAP (0) 512 + #define MT7628_CLKS_BITMAP (0) 535 513 #define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ 536 514 BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ 537 515 BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \ ··· 587 563 struct mtk_tx_dma *last_free; 588 564 u16 thresh; 589 565 atomic_t free_count; 566 + int dma_size; 567 + struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */ 568 + dma_addr_t phys_pdma; 569 + int cpu_idx; 590 570 }; 591 571 592 572 /* PDMA rx ring mode */ ··· 632 604 MTK_HWLRO_BIT, 633 605 MTK_SHARED_INT_BIT, 634 606 MTK_TRGMII_MT7621_CLK_BIT, 607 + MTK_QDMA_BIT, 608 + MTK_SOC_MT7628_BIT, 635 609 636 610 /* MUX BITS*/ 637 611 MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, ··· 664 634 #define MTK_HWLRO BIT(MTK_HWLRO_BIT) 665 635 #define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT) 666 636 #define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT) 637 + #define MTK_QDMA BIT(MTK_QDMA_BIT) 638 + #define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT) 667 639 668 640 #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ 669 641 BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) ··· 719 687 #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) 720 688 721 689 #define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \ 722 - MTK_GMAC2_RGMII | MTK_SHARED_INT | MTK_TRGMII_MT7621_CLK) 690 + MTK_GMAC2_RGMII | MTK_SHARED_INT | \ 691 + MTK_TRGMII_MT7621_CLK | MTK_QDMA) 723 692 724 693 #define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \ 725 694 MTK_GMAC2_SGMII | MTK_GDM1_ESW | \ 726 695 MTK_MUX_GDM1_TO_GMAC1_ESW | \ 727 - MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII) 696 + MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA) 728 697 729 - #define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII) 698 + #define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \ 699 + MTK_QDMA) 700 + 701 + #define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628) 730 702 731 703 #define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ 732 704 MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \ 733 705 MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \ 734 706 MTK_MUX_U3_GMAC2_TO_QPHY | \ 735 - MTK_MUX_GMAC12_TO_GEPHY_SGMII) 707 + MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) 736 708 737 709 /* struct mtk_eth_data - This is the structure holding all differences 738 710 * among various plaforms 739 711 * @ana_rgc3: The offset for register ANA_RGC3 related to 740 712 * sgmiisys syscon 741 713 * @caps Flags shown the extra capability for the SoC 714 + * @hw_features Flags shown HW features 742 715 * @required_clks Flags shown the bitmap for required clocks on 743 716 * the target SoC 744 717 * @required_pctl A bool value to show whether the SoC requires ··· 754 717 u32 caps; 755 718 u32 required_clks; 756 719 bool required_pctl; 720 + netdev_features_t hw_features; 757 721 }; 758 722 759 723 /* currently no SoC has more than 2 macs */ ··· 848 810 unsigned long state; 849 811 850 812 const struct mtk_soc_data *soc; 813 + 814 + u32 tx_int_mask_reg; 815 + u32 tx_int_status_reg; 816 + u32 rx_dma_l4_valid; 817 + int ip_align; 851 818 }; 852 819 853 820 /* struct mtk_mac - the structure that holds the info about the MACs of the