Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ethernet: mtk_eth_soc: ppe: add support for multiple PPEs

Add the missing pieces to allow multiple PPEs units, one for each GMAC.
mtk_gdm_config has been modified to work on targted mac ID,
the inner loop moved outside of the function to allow unrelated
operations like setting the MAC's PPE index.
Introduce a sanity check in flow_offload_replace to account for
non-MTK ingress devices.
Additional field 'ppe_idx' was added to struct mtk_mac in order
to keep track on the assigned PPE unit.

Signed-off-by: Elad Yifee <eladwf@gmail.com>
Link: https://lore.kernel.org/r/20240607082155.20021-1-eladwf@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Elad Yifee and committed by
Jakub Kicinski
dee4dd10 05f43db7

+92 -45
+72 -40
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 80 80 .fq_blen = 0x1b2c, 81 81 }, 82 82 .gdm1_cnt = 0x2400, 83 - .gdma_to_ppe = 0x4444, 83 + .gdma_to_ppe = { 84 + [0] = 0x4444, 85 + }, 84 86 .ppe_base = 0x0c00, 85 87 .wdma_base = { 86 88 [0] = 0x2800, ··· 146 144 .tx_sch_rate = 0x4798, 147 145 }, 148 146 .gdm1_cnt = 0x1c00, 149 - .gdma_to_ppe = 0x3333, 147 + .gdma_to_ppe = { 148 + [0] = 0x3333, 149 + [1] = 0x4444, 150 + }, 150 151 .ppe_base = 0x2000, 151 152 .wdma_base = { 152 153 [0] = 0x4800, ··· 197 192 .tx_sch_rate = 0x4798, 198 193 }, 199 194 .gdm1_cnt = 0x1c00, 200 - .gdma_to_ppe = 0x3333, 195 + .gdma_to_ppe = { 196 + [0] = 0x3333, 197 + [1] = 0x4444, 198 + [2] = 0xcccc, 199 + }, 201 200 .ppe_base = 0x2000, 202 201 .wdma_base = { 203 202 [0] = 0x4800, ··· 2024 2015 struct mtk_rx_dma_v2 *rxd, trxd; 2025 2016 int done = 0, bytes = 0; 2026 2017 dma_addr_t dma_addr = DMA_MAPPING_ERROR; 2018 + int ppe_idx = 0; 2027 2019 2028 2020 while (done < budget) { 2029 2021 unsigned int pktlen, *rxdcsum; ··· 2068 2058 goto release_desc; 2069 2059 2070 2060 netdev = eth->netdev[mac]; 2061 + ppe_idx = eth->mac[mac]->ppe_idx; 2071 2062 2072 2063 if (unlikely(test_bit(MTK_RESETTING, &eth->state))) 2073 2064 goto release_desc; ··· 2192 2181 } 2193 2182 2194 2183 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 2195 - mtk_ppe_check_skb(eth->ppe[0], skb, hash); 2184 + mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash); 2196 2185 2197 2186 skb_record_rx_queue(skb, 0); 2198 2187 napi_gro_receive(napi, skb); ··· 3287 3276 return 0; 3288 3277 } 3289 3278 3290 - static void mtk_gdm_config(struct mtk_eth *eth, u32 config) 3279 + static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config) 3291 3280 { 3292 - int i; 3281 + u32 val; 3293 3282 3294 3283 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) 3295 3284 return; 3296 3285 3297 - for (i = 0; i < MTK_MAX_DEVS; i++) { 3298 - u32 val; 3286 + val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id)); 3299 3287 3300 - if (!eth->netdev[i]) 3301 - continue; 3288 + /* default setup the forward port to send frame to PDMA */ 3289 + val &= ~0xffff; 3302 3290 3303 - val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); 3291 + /* Enable RX checksum */ 3292 + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; 3304 3293 3305 - /* default setup the forward port to send frame to PDMA */ 3306 - val &= ~0xffff; 3294 + val |= config; 3307 3295 3308 - /* Enable RX checksum */ 3309 - val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; 3296 + if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id])) 3297 + val |= MTK_GDMA_SPECIAL_TAG; 3310 3298 3311 - val |= config; 3312 - 3313 - if (netdev_uses_dsa(eth->netdev[i])) 3314 - val |= MTK_GDMA_SPECIAL_TAG; 3315 - 3316 - mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); 3317 - } 3318 - /* Reset and enable PSE */ 3319 - mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); 3320 - mtk_w32(eth, 0, MTK_RST_GL); 3299 + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id)); 3321 3300 } 3322 3301 3323 3302 ··· 3367 3366 { 3368 3367 struct mtk_mac *mac = netdev_priv(dev); 3369 3368 struct mtk_eth *eth = mac->hw; 3370 - int i, err; 3369 + struct mtk_mac *target_mac; 3370 + int i, err, ppe_num; 3371 + 3372 + ppe_num = eth->soc->ppe_num; 3371 3373 3372 3374 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); 3373 3375 if (err) { ··· 3394 3390 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) 3395 3391 mtk_ppe_start(eth->ppe[i]); 3396 3392 3397 - gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe 3398 - : MTK_GDMA_TO_PDMA; 3399 - mtk_gdm_config(eth, gdm_config); 3393 + for (i = 0; i < MTK_MAX_DEVS; i++) { 3394 + if (!eth->netdev[i]) 3395 + break; 3396 + 3397 + target_mac = netdev_priv(eth->netdev[i]); 3398 + if (!soc->offload_version) { 3399 + target_mac->ppe_idx = 0; 3400 + gdm_config = MTK_GDMA_TO_PDMA; 3401 + } else if (ppe_num >= 3 && target_mac->id == 2) { 3402 + target_mac->ppe_idx = 2; 3403 + gdm_config = soc->reg_map->gdma_to_ppe[2]; 3404 + } else if (ppe_num >= 2 && target_mac->id == 1) { 3405 + target_mac->ppe_idx = 1; 3406 + gdm_config = soc->reg_map->gdma_to_ppe[1]; 3407 + } else { 3408 + target_mac->ppe_idx = 0; 3409 + gdm_config = soc->reg_map->gdma_to_ppe[0]; 3410 + } 3411 + mtk_gdm_config(eth, target_mac->id, gdm_config); 3412 + } 3413 + /* Reset and enable PSE */ 3414 + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); 3415 + mtk_w32(eth, 0, MTK_RST_GL); 3400 3416 3401 3417 napi_enable(&eth->tx_napi); 3402 3418 napi_enable(&eth->rx_napi); 3403 3419 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); 3404 3420 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask); 3405 3421 refcount_set(&eth->dma_refcnt, 1); 3406 - } 3407 - else 3422 + } else { 3408 3423 refcount_inc(&eth->dma_refcnt); 3424 + } 3409 3425 3410 3426 phylink_start(mac->phylink); 3411 3427 netif_tx_start_all_queues(dev); ··· 3502 3478 if (!refcount_dec_and_test(&eth->dma_refcnt)) 3503 3479 return 0; 3504 3480 3505 - mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); 3481 + for (i = 0; i < MTK_MAX_DEVS; i++) 3482 + mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL); 3506 3483 3507 3484 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); 3508 3485 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); ··· 4984 4959 } 4985 4960 4986 4961 if (eth->soc->offload_version) { 4987 - u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1; 4962 + u8 ppe_num = eth->soc->ppe_num; 4988 4963 4989 - num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); 4990 - for (i = 0; i < num_ppe; i++) { 4991 - u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; 4964 + ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num); 4965 + for (i = 0; i < ppe_num; i++) { 4966 + u32 ppe_addr = eth->soc->reg_map->ppe_base; 4992 4967 4968 + ppe_addr += (i == 2 ? 0xc00 : i * 0x400); 4993 4969 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i); 4994 4970 4995 4971 if (!eth->ppe[i]) { 4996 4972 err = -ENOMEM; 4997 4973 goto err_deinit_ppe; 4998 4974 } 4999 - } 4975 + err = mtk_eth_offload_init(eth, i); 5000 4976 5001 - err = mtk_eth_offload_init(eth); 5002 - if (err) 5003 - goto err_deinit_ppe; 4977 + if (err) 4978 + goto err_deinit_ppe; 4979 + } 5004 4980 } 5005 4981 5006 4982 for (i = 0; i < MTK_MAX_DEVS; i++) { ··· 5109 5083 .required_pctl = false, 5110 5084 .version = 1, 5111 5085 .offload_version = 1, 5086 + .ppe_num = 1, 5112 5087 .hash_offset = 2, 5113 5088 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, 5114 5089 .tx = { ··· 5138 5111 .required_pctl = false, 5139 5112 .version = 1, 5140 5113 .offload_version = 2, 5114 + .ppe_num = 1, 5141 5115 .hash_offset = 2, 5142 5116 .has_accounting = true, 5143 5117 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, ··· 5167 5139 .required_pctl = true, 5168 5140 .version = 1, 5169 5141 .offload_version = 1, 5142 + .ppe_num = 1, 5170 5143 .hash_offset = 2, 5171 5144 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, 5172 5145 .disable_pll_modes = true, ··· 5223 5194 .required_pctl = false, 5224 5195 .version = 2, 5225 5196 .offload_version = 2, 5197 + .ppe_num = 2, 5226 5198 .hash_offset = 4, 5227 5199 .has_accounting = true, 5228 5200 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, ··· 5253 5223 .required_pctl = false, 5254 5224 .version = 2, 5255 5225 .offload_version = 2, 5226 + .ppe_num = 2, 5256 5227 .hash_offset = 4, 5257 5228 .has_accounting = true, 5258 5229 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, ··· 5283 5252 .required_pctl = false, 5284 5253 .version = 3, 5285 5254 .offload_version = 2, 5255 + .ppe_num = 3, 5286 5256 .hash_offset = 4, 5287 5257 .has_accounting = true, 5288 5258 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
+5 -3
drivers/net/ethernet/mediatek/mtk_eth_soc.h
··· 1132 1132 u32 tx_sch_rate; /* tx scheduler rate control registers */ 1133 1133 } qdma; 1134 1134 u32 gdm1_cnt; 1135 - u32 gdma_to_ppe; 1135 + u32 gdma_to_ppe[3]; 1136 1136 u32 ppe_base; 1137 1137 u32 wdma_base[3]; 1138 1138 u32 pse_iq_sta; ··· 1170 1170 u8 offload_version; 1171 1171 u8 hash_offset; 1172 1172 u8 version; 1173 + u8 ppe_num; 1173 1174 u16 foe_entry_size; 1174 1175 netdev_features_t hw_features; 1175 1176 bool has_accounting; ··· 1295 1294 1296 1295 struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS]; 1297 1296 1298 - struct mtk_ppe *ppe[2]; 1297 + struct mtk_ppe *ppe[3]; 1299 1298 struct rhashtable flow_table; 1300 1299 1301 1300 struct bpf_prog __rcu *prog; ··· 1320 1319 struct mtk_mac { 1321 1320 int id; 1322 1321 phy_interface_t interface; 1322 + u8 ppe_idx; 1323 1323 int speed; 1324 1324 struct device_node *of_node; 1325 1325 struct phylink *phylink; ··· 1442 1440 int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); 1443 1441 int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); 1444 1442 1445 - int mtk_eth_offload_init(struct mtk_eth *eth); 1443 + int mtk_eth_offload_init(struct mtk_eth *eth, u8 id); 1446 1444 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, 1447 1445 void *type_data); 1448 1446 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
+15 -2
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
··· 245 245 int ppe_index) 246 246 { 247 247 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 248 + struct net_device *idev = NULL, *odev = NULL; 248 249 struct flow_action_entry *act; 249 250 struct mtk_flow_data data = {}; 250 251 struct mtk_foe_entry foe; 251 - struct net_device *odev = NULL; 252 252 struct mtk_flow_entry *entry; 253 253 int offload_type = 0; 254 254 int wed_index = -1; ··· 264 264 struct flow_match_meta match; 265 265 266 266 flow_rule_match_meta(rule, &match); 267 + if (mtk_is_netsys_v2_or_greater(eth)) { 268 + idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex); 269 + if (idev) { 270 + struct mtk_mac *mac = netdev_priv(idev); 271 + 272 + if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num)) 273 + return -EINVAL; 274 + 275 + ppe_index = mac->ppe_idx; 276 + } 277 + } 267 278 } else { 268 279 return -EOPNOTSUPP; 269 280 } ··· 648 637 } 649 638 } 650 639 651 - int mtk_eth_offload_init(struct mtk_eth *eth) 640 + int mtk_eth_offload_init(struct mtk_eth *eth, u8 id) 652 641 { 642 + if (!eth->ppe[id] || !eth->ppe[id]->foe_table) 643 + return 0; 653 644 return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params); 654 645 }