Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-airoha-add-an7583-ethernet-controller-support'

Lorenzo Bianconi says:

====================
net: airoha: Add AN7583 ethernet controller support

Introduce support for AN7583 ethernet controller to airoha-eth dirver.
The main differences between EN7581 and AN7583 is the latter runs a
single PPE module while EN7581 runs two of them. Moreover PPE SRAM in
AN7583 SoC is reduced to 8K (while SRAM is 16K on EN7581).

v2: https://lore.kernel.org/r/20251016-an7583-eth-support-v2-0-ea6e7e9acbdb@kernel.org
v1: https://lore.kernel.org/r/20251015-an7583-eth-support-v1-0-064855f05923@kernel.org
====================

Link: https://patch.msgid.link/20251017-an7583-eth-support-v3-0-f28319666667@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+439 -183
+34 -1
Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
··· 17 17 compatible: 18 18 enum: 19 19 - airoha,en7581-eth 20 + - airoha,an7583-eth 20 21 21 22 reg: 22 23 items: ··· 45 44 - description: PDMA irq 46 45 47 46 resets: 47 + minItems: 7 48 48 maxItems: 8 49 49 50 50 reset-names: ··· 56 54 - const: xsi-mac 57 55 - const: hsi0-mac 58 56 - const: hsi1-mac 59 - - const: hsi-mac 57 + - enum: [ hsi-mac, xfp-mac ] 60 58 - const: xfp-mac 59 + minItems: 7 61 60 62 61 memory-region: 63 62 items: ··· 83 80 The Airoha Network Processor Unit (NPU) provides a configuration 84 81 interface to implement hardware flow offloading programming Packet 85 82 Processor Engine (PPE) flow table. 83 + 84 + allOf: 85 + - $ref: ethernet-controller.yaml# 86 + - if: 87 + properties: 88 + compatible: 89 + contains: 90 + enum: 91 + - airoha,en7581-eth 92 + then: 93 + properties: 94 + resets: 95 + minItems: 8 96 + 97 + reset-names: 98 + minItems: 8 99 + 100 + - if: 101 + properties: 102 + compatible: 103 + contains: 104 + enum: 105 + - airoha,an7583-eth 106 + then: 107 + properties: 108 + resets: 109 + maxItems: 7 110 + 111 + reset-names: 112 + maxItems: 7 86 113 87 114 patternProperties: 88 115 "^ethernet@[1-4]$":
+186 -68
drivers/net/ethernet/airoha/airoha_eth.c
··· 297 297 int q; 298 298 299 299 all_rsv = airoha_fe_get_pse_all_rsv(eth); 300 - /* hw misses PPE2 oq rsv */ 301 - all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]; 300 + if (airoha_ppe_is_enabled(eth, 1)) { 301 + /* hw misses PPE2 oq rsv */ 302 + all_rsv += PSE_RSV_PAGES * 303 + pse_port_num_queues[FE_PSE_PORT_PPE2]; 304 + } 302 305 airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv); 303 306 304 307 /* CMD1 */ ··· 338 335 for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++) 339 336 airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q, 340 337 PSE_QUEUE_RSV_PAGES); 341 - /* PPE2 */ 342 - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { 343 - if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) 344 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 345 - PSE_QUEUE_RSV_PAGES); 346 - else 347 - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0); 338 + if (airoha_ppe_is_enabled(eth, 1)) { 339 + /* PPE2 */ 340 + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { 341 + if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) 342 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, 343 + q, 344 + PSE_QUEUE_RSV_PAGES); 345 + else 346 + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, 347 + q, 0); 348 + } 348 349 } 349 350 /* GMD4 */ 350 351 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++) ··· 530 523 531 524 /* disable IFC by default */ 532 525 airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); 533 - 534 - airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 535 - FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) | 536 - FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) | 537 - FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) | 538 - FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) | 539 - FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) | 540 - FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) | 541 - FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) | 542 - FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1)); 543 - airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1), 544 - FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) | 545 - FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) | 546 - FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) | 547 - FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) | 548 - FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) | 549 - FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) | 550 - FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) | 551 - FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2)); 552 526 553 527 /* enable 1:N vlan action, init vlan table */ 554 528 airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); ··· 1375 1387 int err, i; 1376 1388 1377 1389 /* disable xsi */ 1378 - err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), 1379 - eth->xsi_rsts); 1390 + err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts); 1380 1391 if (err) 1381 1392 return err; 1382 1393 ··· 1682 1695 return 0; 1683 1696 } 1684 1697 1685 - static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) 1698 + static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) 1686 1699 { 1687 - u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4; 1688 1700 struct airoha_eth *eth = port->qdma->eth; 1689 - u32 chan = port->id == 3 ? 4 : 0; 1701 + u32 val, pse_port, chan, nbq; 1702 + int src_port; 1690 1703 1691 1704 /* Forward the traffic to the proper GDM port */ 1705 + pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3 1706 + : FE_PSE_PORT_GDM4; 1692 1707 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port); 1693 1708 airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC); 1694 1709 1695 1710 /* Enable GDM2 loopback */ 1696 1711 airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff); 1697 1712 airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff); 1713 + 1714 + chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0; 1698 1715 airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2), 1699 1716 LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK, 1700 1717 FIELD_PREP(LPBK_CHAN_MASK, chan) | ··· 1713 1722 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2)); 1714 1723 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2)); 1715 1724 1716 - if (port->id == 3) { 1717 - /* FIXME: handle XSI_PCE1_PORT */ 1718 - airoha_fe_rmw(eth, REG_FE_WAN_PORT, 1719 - WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, 1720 - FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT)); 1721 - airoha_fe_rmw(eth, 1722 - REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3), 1723 - SP_CPORT_PCIE0_MASK, 1724 - FIELD_PREP(SP_CPORT_PCIE0_MASK, 1725 - FE_PSE_PORT_CDM2)); 1726 - } else { 1727 - /* FIXME: handle XSI_USB_PORT */ 1725 + /* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */ 1726 + nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0; 1727 + src_port = eth->soc->ops.get_src_port_id(port, nbq); 1728 + if (src_port < 0) 1729 + return src_port; 1730 + 1731 + airoha_fe_rmw(eth, REG_FE_WAN_PORT, 1732 + WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, 1733 + FIELD_PREP(WAN0_MASK, src_port)); 1734 + val = src_port & SP_CPORT_DFT_MASK; 1735 + airoha_fe_rmw(eth, 1736 + REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)), 1737 + SP_CPORT_MASK(val), 1738 + FE_PSE_PORT_CDM2 << __ffs(SP_CPORT_MASK(val))); 1739 + 1740 + if (port->id != AIROHA_GDM3_IDX && airoha_is_7581(eth)) 1728 1741 airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, 1729 1742 FC_ID_OF_SRC_PORT24_MASK, 1730 1743 FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2)); 1731 - airoha_fe_rmw(eth, REG_FE_WAN_PORT, 1732 - WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, 1733 - FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT)); 1734 - airoha_fe_rmw(eth, 1735 - REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3), 1736 - SP_CPORT_ETH_MASK, 1737 - FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2)); 1738 - } 1744 + 1745 + return 0; 1739 1746 } 1740 1747 1741 1748 static int airoha_dev_init(struct net_device *dev) 1742 1749 { 1743 1750 struct airoha_gdm_port *port = netdev_priv(dev); 1744 - struct airoha_eth *eth = port->qdma->eth; 1745 - u32 pse_port; 1751 + struct airoha_qdma *qdma = port->qdma; 1752 + struct airoha_eth *eth = qdma->eth; 1753 + u32 pse_port, fe_cpu_port; 1754 + u8 ppe_id; 1746 1755 1747 1756 airoha_set_macaddr(port, dev->dev_addr); 1748 1757 ··· 1750 1759 case 3: 1751 1760 case 4: 1752 1761 /* If GDM2 is active we can't enable loopback */ 1753 - if (!eth->ports[1]) 1754 - airhoha_set_gdm2_loopback(port); 1762 + if (!eth->ports[1]) { 1763 + int err; 1764 + 1765 + err = airhoha_set_gdm2_loopback(port); 1766 + if (err) 1767 + return err; 1768 + } 1755 1769 fallthrough; 1756 1770 case 2: 1757 - pse_port = FE_PSE_PORT_PPE2; 1758 - break; 1759 - default: 1771 + if (airoha_ppe_is_enabled(eth, 1)) { 1772 + /* For PPE2 always use secondary cpu port. */ 1773 + fe_cpu_port = FE_PSE_PORT_CDM2; 1774 + pse_port = FE_PSE_PORT_PPE2; 1775 + break; 1776 + } 1777 + fallthrough; 1778 + default: { 1779 + u8 qdma_id = qdma - &eth->qdma[0]; 1780 + 1781 + /* For PPE1 select cpu port according to the running QDMA. */ 1782 + fe_cpu_port = qdma_id ? FE_PSE_PORT_CDM2 : FE_PSE_PORT_CDM1; 1760 1783 pse_port = FE_PSE_PORT_PPE1; 1761 1784 break; 1762 1785 } 1786 + } 1763 1787 1764 1788 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port); 1789 + ppe_id = pse_port == FE_PSE_PORT_PPE2 ? 1 : 0; 1790 + airoha_fe_rmw(eth, REG_PPE_DFT_CPORT0(ppe_id), 1791 + DFT_CPORT_MASK(port->id), 1792 + fe_cpu_port << __ffs(DFT_CPORT_MASK(port->id))); 1765 1793 1766 1794 return 0; 1767 1795 } ··· 1897 1887 return index >= tail; 1898 1888 } 1899 1889 1890 + static int airoha_get_fe_port(struct airoha_gdm_port *port) 1891 + { 1892 + struct airoha_qdma *qdma = port->qdma; 1893 + struct airoha_eth *eth = qdma->eth; 1894 + 1895 + switch (eth->soc->version) { 1896 + case 0x7583: 1897 + return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3 1898 + : port->id; 1899 + case 0x7581: 1900 + default: 1901 + return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4 1902 + : port->id; 1903 + } 1904 + } 1905 + 1900 1906 static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, 1901 1907 struct net_device *dev) 1902 1908 { ··· 1953 1927 } 1954 1928 } 1955 1929 1956 - fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; 1930 + fport = airoha_get_fe_port(port); 1957 1931 msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | 1958 1932 FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); 1959 1933 ··· 2948 2922 2949 2923 static int airoha_probe(struct platform_device *pdev) 2950 2924 { 2925 + struct reset_control_bulk_data *xsi_rsts; 2951 2926 struct device_node *np; 2952 2927 struct airoha_eth *eth; 2953 2928 int i, err; ··· 2956 2929 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); 2957 2930 if (!eth) 2958 2931 return -ENOMEM; 2932 + 2933 + eth->soc = of_device_get_match_data(&pdev->dev); 2934 + if (!eth->soc) 2935 + return -EINVAL; 2959 2936 2960 2937 eth->dev = &pdev->dev; 2961 2938 ··· 2985 2954 return err; 2986 2955 } 2987 2956 2988 - eth->xsi_rsts[0].id = "xsi-mac"; 2989 - eth->xsi_rsts[1].id = "hsi0-mac"; 2990 - eth->xsi_rsts[2].id = "hsi1-mac"; 2991 - eth->xsi_rsts[3].id = "hsi-mac"; 2992 - eth->xsi_rsts[4].id = "xfp-mac"; 2957 + xsi_rsts = devm_kzalloc(eth->dev, 2958 + eth->soc->num_xsi_rsts * sizeof(*xsi_rsts), 2959 + GFP_KERNEL); 2960 + if (err) 2961 + return err; 2962 + 2963 + eth->xsi_rsts = xsi_rsts; 2964 + for (i = 0; i < eth->soc->num_xsi_rsts; i++) 2965 + eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i]; 2966 + 2993 2967 err = devm_reset_control_bulk_get_exclusive(eth->dev, 2994 - ARRAY_SIZE(eth->xsi_rsts), 2968 + eth->soc->num_xsi_rsts, 2995 2969 eth->xsi_rsts); 2996 2970 if (err) { 2997 2971 dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); ··· 3084 3048 platform_set_drvdata(pdev, NULL); 3085 3049 } 3086 3050 3051 + static const char * const en7581_xsi_rsts_names[] = { 3052 + "xsi-mac", 3053 + "hsi0-mac", 3054 + "hsi1-mac", 3055 + "hsi-mac", 3056 + "xfp-mac", 3057 + }; 3058 + 3059 + static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq) 3060 + { 3061 + switch (port->id) { 3062 + case 3: 3063 + /* 7581 SoC supports PCIe serdes on GDM3 port */ 3064 + if (nbq == 4) 3065 + return HSGMII_LAN_7581_PCIE0_SRCPORT; 3066 + if (nbq == 5) 3067 + return HSGMII_LAN_7581_PCIE1_SRCPORT; 3068 + break; 3069 + case 4: 3070 + /* 7581 SoC supports eth and usb serdes on GDM4 port */ 3071 + if (!nbq) 3072 + return HSGMII_LAN_7581_ETH_SRCPORT; 3073 + if (nbq == 1) 3074 + return HSGMII_LAN_7581_USB_SRCPORT; 3075 + break; 3076 + default: 3077 + break; 3078 + } 3079 + 3080 + return -EINVAL; 3081 + } 3082 + 3083 + static const char * const an7583_xsi_rsts_names[] = { 3084 + "xsi-mac", 3085 + "hsi0-mac", 3086 + "hsi1-mac", 3087 + "xfp-mac", 3088 + }; 3089 + 3090 + static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq) 3091 + { 3092 + switch (port->id) { 3093 + case 3: 3094 + /* 7583 SoC supports eth serdes on GDM3 port */ 3095 + if (!nbq) 3096 + return HSGMII_LAN_7583_ETH_SRCPORT; 3097 + break; 3098 + case 4: 3099 + /* 7583 SoC supports PCIe and USB serdes on GDM4 port */ 3100 + if (!nbq) 3101 + return HSGMII_LAN_7583_PCIE_SRCPORT; 3102 + if (nbq == 1) 3103 + return HSGMII_LAN_7583_USB_SRCPORT; 3104 + break; 3105 + default: 3106 + break; 3107 + } 3108 + 3109 + return -EINVAL; 3110 + } 3111 + 3112 + static const struct airoha_eth_soc_data en7581_soc_data = { 3113 + .version = 0x7581, 3114 + .xsi_rsts_names = en7581_xsi_rsts_names, 3115 + .num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names), 3116 + .num_ppe = 2, 3117 + .ops = { 3118 + .get_src_port_id = airoha_en7581_get_src_port_id, 3119 + }, 3120 + }; 3121 + 3122 + static const struct airoha_eth_soc_data an7583_soc_data = { 3123 + .version = 0x7583, 3124 + .xsi_rsts_names = an7583_xsi_rsts_names, 3125 + .num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names), 3126 + .num_ppe = 1, 3127 + .ops = { 3128 + .get_src_port_id = airoha_an7583_get_src_port_id, 3129 + }, 3130 + }; 3131 + 3087 3132 static const struct of_device_id of_airoha_match[] = { 3088 - { .compatible = "airoha,en7581-eth" }, 3133 + { .compatible = "airoha,en7581-eth", .data = &en7581_soc_data }, 3134 + { .compatible = "airoha,an7583-eth", .data = &an7583_soc_data }, 3089 3135 { /* sentinel */ } 3090 3136 }; 3091 3137 MODULE_DEVICE_TABLE(of, of_airoha_match);
+45 -20
drivers/net/ethernet/airoha/airoha_eth.h
··· 21 21 #define AIROHA_MAX_NUM_IRQ_BANKS 4 22 22 #define AIROHA_MAX_DSA_PORTS 7 23 23 #define AIROHA_MAX_NUM_RSTS 3 24 - #define AIROHA_MAX_NUM_XSI_RSTS 5 25 24 #define AIROHA_MAX_MTU 9216 26 25 #define AIROHA_MAX_PACKET_SIZE 2048 27 26 #define AIROHA_NUM_QOS_CHANNELS 4 ··· 47 48 #define QDMA_METER_IDX(_n) ((_n) & 0xff) 48 49 #define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3) 49 50 50 - #define PPE_NUM 2 51 - #define PPE1_SRAM_NUM_ENTRIES (8 * 1024) 52 - #define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES) 53 - #ifdef CONFIG_NET_AIROHA_FLOW_STATS 54 - #define PPE1_STATS_NUM_ENTRIES (4 * 1024) 55 - #else 56 - #define PPE1_STATS_NUM_ENTRIES 0 57 - #endif /* CONFIG_NET_AIROHA_FLOW_STATS */ 58 - #define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES) 59 - #define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES) 60 - #define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES) 51 + #define PPE_SRAM_NUM_ENTRIES (8 * 1024) 52 + #define PPE_STATS_NUM_ENTRIES (4 * 1024) 61 53 #define PPE_DRAM_NUM_ENTRIES (16 * 1024) 62 - #define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES) 63 - #define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1) 64 54 #define PPE_ENTRY_SIZE 80 65 55 #define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10)) 66 56 ··· 67 79 }; 68 80 69 81 enum { 70 - HSGMII_LAN_PCIE0_SRCPORT = 0x16, 71 - HSGMII_LAN_PCIE1_SRCPORT, 72 - HSGMII_LAN_ETH_SRCPORT, 73 - HSGMII_LAN_USB_SRCPORT, 82 + HSGMII_LAN_7581_PCIE0_SRCPORT = 0x16, 83 + HSGMII_LAN_7581_PCIE1_SRCPORT, 84 + HSGMII_LAN_7581_ETH_SRCPORT, 85 + HSGMII_LAN_7581_USB_SRCPORT, 86 + }; 87 + 88 + enum { 89 + HSGMII_LAN_7583_ETH_SRCPORT = 0x16, 90 + HSGMII_LAN_7583_PCIE_SRCPORT = 0x18, 91 + HSGMII_LAN_7583_USB_SRCPORT, 74 92 }; 75 93 76 94 enum { ··· 103 109 CRSN_22 = 0x16, /* hit bind and force route to CPU */ 104 110 CRSN_24 = 0x18, 105 111 CRSN_25 = 0x19, 112 + }; 113 + 114 + enum airoha_gdm_index { 115 + AIROHA_GDM1_IDX = 1, 116 + AIROHA_GDM2_IDX = 2, 117 + AIROHA_GDM3_IDX = 3, 118 + AIROHA_GDM4_IDX = 4, 106 119 }; 107 120 108 121 enum { ··· 555 554 struct rhashtable l2_flows; 556 555 557 556 struct hlist_head *foe_flow; 558 - u16 foe_check_time[PPE_NUM_ENTRIES]; 557 + u16 *foe_check_time; 559 558 560 559 struct airoha_foe_stats *foe_stats; 561 560 dma_addr_t foe_stats_dma; ··· 563 562 struct dentry *debugfs_dir; 564 563 }; 565 564 565 + struct airoha_eth_soc_data { 566 + u16 version; 567 + const char * const *xsi_rsts_names; 568 + int num_xsi_rsts; 569 + int num_ppe; 570 + struct { 571 + int (*get_src_port_id)(struct airoha_gdm_port *port, int nbq); 572 + } ops; 573 + }; 574 + 566 575 struct airoha_eth { 567 576 struct device *dev; 577 + 578 + const struct airoha_eth_soc_data *soc; 568 579 569 580 unsigned long state; 570 581 void __iomem *fe_regs; ··· 587 574 struct rhashtable flow_table; 588 575 589 576 struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; 590 - struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; 577 + struct reset_control_bulk_data *xsi_rsts; 591 578 592 579 struct net_device *napi_dev; 593 580 ··· 630 617 return port->id == 1; 631 618 } 632 619 620 + static inline bool airoha_is_7581(struct airoha_eth *eth) 621 + { 622 + return eth->soc->version == 0x7581; 623 + } 624 + 625 + static inline bool airoha_is_7583(struct airoha_eth *eth) 626 + { 627 + return eth->soc->version == 0x7583; 628 + } 629 + 633 630 bool airoha_is_valid_gdm_port(struct airoha_eth *eth, 634 631 struct airoha_gdm_port *port); 635 632 633 + bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index); 636 634 void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb, 637 635 u16 hash, bool rx_wlan); 638 636 int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data); 639 637 int airoha_ppe_init(struct airoha_eth *eth); 640 638 void airoha_ppe_deinit(struct airoha_eth *eth); 641 639 void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port); 640 + u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe); 642 641 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, 643 642 u32 hash); 644 643 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+170 -89
drivers/net/ethernet/airoha/airoha_ppe.c
··· 32 32 .automatic_shrinking = true, 33 33 }; 34 34 35 - static bool airoha_ppe2_is_enabled(struct airoha_eth *eth) 35 + static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe) 36 36 { 37 - return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK; 37 + if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS)) 38 + return -EOPNOTSUPP; 39 + 40 + if (airoha_is_7583(ppe->eth)) 41 + return -EOPNOTSUPP; 42 + 43 + return PPE_STATS_NUM_ENTRIES; 44 + } 45 + 46 + static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe) 47 + { 48 + int num_stats = airoha_ppe_get_num_stats_entries(ppe); 49 + 50 + if (num_stats > 0) { 51 + struct airoha_eth *eth = ppe->eth; 52 + 53 + num_stats = num_stats * eth->soc->num_ppe; 54 + } 55 + 56 + return num_stats; 57 + } 58 + 59 + static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe) 60 + { 61 + struct airoha_eth *eth = ppe->eth; 62 + 63 + return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe; 64 + } 65 + 66 + u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe) 67 + { 68 + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); 69 + 70 + return sram_num_entries + PPE_DRAM_NUM_ENTRIES; 71 + } 72 + 73 + bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index) 74 + { 75 + if (index >= eth->soc->num_ppe) 76 + return false; 77 + 78 + return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK; 38 79 } 39 80 40 81 static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe) ··· 87 46 88 47 static void airoha_ppe_hw_init(struct airoha_ppe *ppe) 89 48 { 90 - u32 sram_tb_size, sram_num_entries, dram_num_entries; 49 + u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries; 50 + u32 sram_tb_size, dram_num_entries; 91 51 struct airoha_eth *eth = ppe->eth; 92 - int i; 52 + int i, sram_num_stats_entries; 93 53 94 - sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry); 54 + sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); 55 + sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry); 95 56 dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES); 96 57 97 - for (i = 0; i < PPE_NUM; i++) { 58 + sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe); 59 + if (sram_num_stats_entries > 0) 60 + sram_ppe_num_data_entries -= sram_num_stats_entries; 61 + sram_ppe_num_data_entries = 62 + PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries); 63 + 64 + for (i = 0; i < eth->soc->num_ppe; i++) { 98 65 int p; 99 66 100 67 airoha_fe_wr(eth, REG_PPE_TB_BASE(i), ··· 134 85 135 86 airoha_fe_rmw(eth, REG_PPE_TB_CFG(i), 136 87 PPE_TB_CFG_SEARCH_MISS_MASK | 88 + PPE_SRAM_TB_NUM_ENTRY_MASK | 89 + PPE_DRAM_TB_NUM_ENTRY_MASK | 137 90 PPE_TB_CFG_KEEPALIVE_MASK | 138 91 PPE_TB_ENTRY_SIZE_MASK, 139 92 FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) | 140 - FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0)); 93 + FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) | 94 + FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, 95 + sram_ppe_num_data_entries) | 96 + FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, 97 + dram_num_entries)); 141 98 142 99 airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED); 143 100 ··· 155 100 AIROHA_MAX_MTU) | 156 101 FIELD_PREP(FP1_EGRESS_MTU_MASK, 157 102 AIROHA_MAX_MTU)); 158 - } 159 - 160 - if (airoha_ppe2_is_enabled(eth)) { 161 - sram_num_entries = 162 - PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES); 163 - airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), 164 - PPE_SRAM_TB_NUM_ENTRY_MASK | 165 - PPE_DRAM_TB_NUM_ENTRY_MASK, 166 - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, 167 - sram_num_entries) | 168 - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, 169 - dram_num_entries)); 170 - airoha_fe_rmw(eth, REG_PPE_TB_CFG(1), 171 - PPE_SRAM_TB_NUM_ENTRY_MASK | 172 - PPE_DRAM_TB_NUM_ENTRY_MASK, 173 - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, 174 - sram_num_entries) | 175 - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, 176 - dram_num_entries)); 177 - } else { 178 - sram_num_entries = 179 - PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES); 180 - airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), 181 - PPE_SRAM_TB_NUM_ENTRY_MASK | 182 - PPE_DRAM_TB_NUM_ENTRY_MASK, 183 - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, 184 - sram_num_entries) | 185 - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, 186 - dram_num_entries)); 187 103 } 188 104 } 189 105 ··· 454 428 return 0; 455 429 } 456 430 457 - static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) 431 + static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe, 432 + struct airoha_foe_entry *hwe) 458 433 { 459 434 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); 435 + u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1; 460 436 u32 hash, hv1, hv2, hv3; 461 437 462 438 switch (type) { ··· 496 468 case PPE_PKT_TYPE_IPV6_6RD: 497 469 default: 498 470 WARN_ON_ONCE(1); 499 - return PPE_HASH_MASK; 471 + return ppe_hash_mask; 500 472 } 501 473 502 474 hash = (hv1 & hv2) | ((~hv1) & hv3); 503 475 hash = (hash >> 24) | ((hash & 0xffffff) << 8); 504 476 hash ^= hv1 ^ hv2 ^ hv3; 505 477 hash ^= hash >> 16; 506 - hash &= PPE_NUM_ENTRIES - 1; 478 + hash &= ppe_hash_mask; 507 479 508 480 return hash; 509 481 } 510 482 511 - static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash) 483 + static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, 484 + u32 hash, u32 *index) 512 485 { 513 - if (!airoha_ppe2_is_enabled(ppe->eth)) 514 - return hash; 486 + int ppe_num_stats_entries; 515 487 516 - return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES 517 - : hash; 488 + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); 489 + if (ppe_num_stats_entries < 0) 490 + return ppe_num_stats_entries; 491 + 492 + *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES 493 + : hash; 494 + 495 + return 0; 518 496 } 519 497 520 498 static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, ··· 534 500 static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe, 535 501 struct airoha_npu *npu) 536 502 { 537 - int i; 503 + int i, ppe_num_stats_entries; 538 504 539 - for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++) 505 + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); 506 + if (ppe_num_stats_entries < 0) 507 + return; 508 + 509 + for (i = 0; i < ppe_num_stats_entries; i++) 540 510 airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i); 541 511 } 542 512 ··· 551 513 { 552 514 int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); 553 515 u32 index, pse_port, val, *data, *ib2, *meter; 516 + int ppe_num_stats_entries; 554 517 u8 nbq; 555 518 556 - index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); 557 - if (index >= PPE_STATS_NUM_ENTRIES) 519 + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); 520 + if (ppe_num_stats_entries < 0) 521 + return; 522 + 523 + if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index)) 524 + return; 525 + 526 + if (index >= ppe_num_stats_entries) 558 527 return; 559 528 560 529 if (type == PPE_PKT_TYPE_BRIDGE) { ··· 602 557 static struct airoha_foe_entry * 603 558 airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash) 604 559 { 560 + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); 561 + 605 562 lockdep_assert_held(&ppe_lock); 606 563 607 - if (hash < PPE_SRAM_NUM_ENTRIES) { 564 + if (hash < sram_num_entries) { 608 565 u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry); 566 + bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES; 609 567 struct airoha_eth *eth = ppe->eth; 610 - bool ppe2; 611 568 u32 val; 612 569 int i; 613 570 614 - ppe2 = airoha_ppe2_is_enabled(ppe->eth) && 615 - hash >= PPE1_SRAM_NUM_ENTRIES; 616 571 airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), 617 572 FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | 618 573 PPE_SRAM_CTRL_REQ_MASK); ··· 622 577 REG_PPE_RAM_CTRL(ppe2))) 623 578 return NULL; 624 579 625 - for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++) 580 + for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe); 581 + i++) 626 582 hwe[i] = airoha_fe_rr(eth, 627 583 REG_PPE_RAM_ENTRY(ppe2, i)); 628 584 } ··· 660 614 return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1)); 661 615 } 662 616 617 + static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash) 618 + { 619 + struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); 620 + bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES; 621 + u32 *ptr = (u32 *)hwe, val; 622 + int i; 623 + 624 + for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++) 625 + airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]); 626 + 627 + wmb(); 628 + airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), 629 + FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | 630 + PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK); 631 + 632 + return read_poll_timeout_atomic(airoha_fe_rr, val, 633 + val & PPE_SRAM_CTRL_ACK_MASK, 634 + 10, 100, false, ppe->eth, 635 + REG_PPE_RAM_CTRL(ppe2)); 636 + } 637 + 663 638 static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, 664 639 struct airoha_foe_entry *e, 665 640 u32 hash, bool rx_wlan) 666 641 { 642 + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); 667 643 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); 668 644 u32 ts = airoha_ppe_get_timestamp(ppe); 669 645 struct airoha_eth *eth = ppe->eth; ··· 710 642 if (!rx_wlan) 711 643 airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash); 712 644 713 - if (hash < PPE_SRAM_NUM_ENTRIES) { 714 - dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe); 715 - bool ppe2 = airoha_ppe2_is_enabled(eth) && 716 - hash >= PPE1_SRAM_NUM_ENTRIES; 717 - 718 - err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe), 719 - hash, ppe2); 720 - } 645 + if (hash < sram_num_entries) 646 + err = airoha_ppe_foe_commit_sram_entry(ppe, hash); 721 647 unlock: 722 648 rcu_read_unlock(); 723 649 ··· 834 772 if (state == AIROHA_FOE_STATE_BIND) 835 773 goto unlock; 836 774 837 - index = airoha_ppe_foe_get_entry_hash(hwe); 775 + index = airoha_ppe_foe_get_entry_hash(ppe, hwe); 838 776 hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) { 839 777 if (e->type == FLOW_TYPE_L2_SUBFLOW) { 840 778 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); ··· 894 832 if (type == PPE_PKT_TYPE_BRIDGE) 895 833 return airoha_ppe_foe_l2_flow_commit_entry(ppe, e); 896 834 897 - hash = airoha_ppe_foe_get_entry_hash(&e->data); 835 + hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data); 898 836 e->type = FLOW_TYPE_L4; 899 837 e->hash = 0xffff; 900 838 ··· 1220 1158 void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, 1221 1159 struct airoha_foe_stats64 *stats) 1222 1160 { 1223 - u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); 1224 1161 struct airoha_eth *eth = ppe->eth; 1162 + int ppe_num_stats_entries; 1225 1163 struct airoha_npu *npu; 1164 + u32 index; 1226 1165 1227 - if (index >= PPE_STATS_NUM_ENTRIES) 1166 + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); 1167 + if (ppe_num_stats_entries < 0) 1168 + return; 1169 + 1170 + if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index)) 1171 + return; 1172 + 1173 + if (index >= ppe_num_stats_entries) 1228 1174 return; 1229 1175 1230 1176 rcu_read_lock(); ··· 1295 1225 return -EOPNOTSUPP; 1296 1226 } 1297 1227 1298 - static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe, 1299 - struct airoha_npu *npu) 1228 + static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe) 1300 1229 { 1301 - int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES; 1230 + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); 1302 1231 struct airoha_foe_entry *hwe = ppe->foe; 1232 + int i, err = 0; 1303 1233 1304 - if (airoha_ppe2_is_enabled(ppe->eth)) 1305 - sram_num_entries = sram_num_entries / 2; 1234 + for (i = 0; i < sram_num_entries; i++) { 1235 + int err; 1306 1236 1307 - for (i = 0; i < sram_num_entries; i++) 1308 1237 memset(&hwe[i], 0, sizeof(*hwe)); 1238 + err = airoha_ppe_foe_commit_sram_entry(ppe, i); 1239 + if (err) 1240 + break; 1241 + } 1309 1242 1310 - return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma, 1311 - PPE_SRAM_NUM_ENTRIES); 1243 + return err; 1312 1244 } 1313 1245 1314 1246 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth) ··· 1329 1257 { 1330 1258 struct airoha_npu *npu = airoha_ppe_npu_get(eth); 1331 1259 struct airoha_ppe *ppe = eth->ppe; 1332 - int err; 1260 + int err, ppe_num_stats_entries; 1333 1261 1334 1262 if (IS_ERR(npu)) 1335 1263 return PTR_ERR(npu); ··· 1338 1266 if (err) 1339 1267 goto error_npu_put; 1340 1268 1341 - if (PPE_STATS_NUM_ENTRIES) { 1269 + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); 1270 + if (ppe_num_stats_entries > 0) { 1342 1271 err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma, 1343 - PPE_STATS_NUM_ENTRIES); 1272 + ppe_num_stats_entries); 1344 1273 if (err) 1345 1274 goto error_npu_put; 1346 1275 } 1347 1276 1348 1277 airoha_ppe_hw_init(ppe); 1349 - err = airoha_ppe_flush_sram_entries(ppe, npu); 1350 - if (err) 1351 - goto error_npu_put; 1352 - 1353 1278 airoha_ppe_foe_flow_stats_reset(ppe, npu); 1354 1279 1355 1280 rcu_assign_pointer(eth->npu, npu); ··· 1382 1313 u16 hash, bool rx_wlan) 1383 1314 { 1384 1315 struct airoha_ppe *ppe = dev->priv; 1316 + u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1; 1385 1317 u16 now, diff; 1386 1318 1387 - if (hash > PPE_HASH_MASK) 1319 + if (hash > ppe_hash_mask) 1388 1320 return; 1389 1321 1390 1322 now = (u16)jiffies; ··· 1475 1405 1476 1406 int airoha_ppe_init(struct airoha_eth *eth) 1477 1407 { 1408 + int foe_size, err, ppe_num_stats_entries; 1409 + u32 ppe_num_entries; 1478 1410 struct airoha_ppe *ppe; 1479 - int foe_size, err; 1480 1411 1481 1412 ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL); 1482 1413 if (!ppe) ··· 1486 1415 ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb; 1487 1416 ppe->dev.ops.check_skb = airoha_ppe_check_skb; 1488 1417 ppe->dev.priv = ppe; 1418 + ppe->eth = eth; 1419 + eth->ppe = ppe; 1489 1420 1490 - foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry); 1421 + ppe_num_entries = airoha_ppe_get_total_num_entries(ppe); 1422 + foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry); 1491 1423 ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma, 1492 1424 GFP_KERNEL); 1493 1425 if (!ppe->foe) 1494 1426 return -ENOMEM; 1495 1427 1496 - ppe->eth = eth; 1497 - eth->ppe = ppe; 1498 - 1499 1428 ppe->foe_flow = devm_kzalloc(eth->dev, 1500 - PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow), 1429 + ppe_num_entries * sizeof(*ppe->foe_flow), 1501 1430 GFP_KERNEL); 1502 1431 if (!ppe->foe_flow) 1503 1432 return -ENOMEM; 1504 1433 1505 - foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats); 1506 - if (foe_size) { 1434 + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); 1435 + if (ppe_num_stats_entries > 0) { 1436 + foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats); 1507 1437 ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size, 1508 1438 &ppe->foe_stats_dma, 1509 1439 GFP_KERNEL); 1510 1440 if (!ppe->foe_stats) 1511 1441 return -ENOMEM; 1512 1442 } 1443 + 1444 + ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries, 1445 + GFP_KERNEL); 1446 + if (!ppe->foe_check_time) 1447 + return -ENOMEM; 1448 + 1449 + err = airoha_ppe_flush_sram_entries(ppe); 1450 + if (err) 1451 + return err; 1513 1452 1514 1453 err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params); 1515 1454 if (err)
+2 -1
drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
··· 53 53 [AIROHA_FOE_STATE_FIN] = "FIN", 54 54 }; 55 55 struct airoha_ppe *ppe = m->private; 56 + u32 ppe_num_entries = airoha_ppe_get_total_num_entries(ppe); 56 57 int i; 57 58 58 - for (i = 0; i < PPE_NUM_ENTRIES; i++) { 59 + for (i = 0; i < ppe_num_entries; i++) { 59 60 const char *state_str, *type_str = "UNKNOWN"; 60 61 void *src_addr = NULL, *dest_addr = NULL; 61 62 u16 *src_port = NULL, *dest_port = NULL;
+2 -4
drivers/net/ethernet/airoha/airoha_regs.h
··· 383 383 #define REG_MC_VLAN_DATA 0x2108 384 384 385 385 #define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2)) 386 - #define SP_CPORT_PCIE1_MASK GENMASK(31, 28) 387 - #define SP_CPORT_PCIE0_MASK GENMASK(27, 24) 388 - #define SP_CPORT_USB_MASK GENMASK(7, 4) 389 - #define SP_CPORT_ETH_MASK GENMASK(7, 4) 386 + #define SP_CPORT_DFT_MASK GENMASK(2, 0) 387 + #define SP_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2)) 390 388 391 389 #define REG_SRC_PORT_FC_MAP6 0x2298 392 390 #define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24)