Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: airoha: Add FLOW_CLS_STATS callback support

Introduce per-flow stats accounting to the flowtable hw offload in
the airoha_eth driver. Flow stats are split in the PPE and NPU modules:
- PPE: accounts for high 32bit of per-flow stats
- NPU: accounts for low 32bit of per-flow stats

FLOW_CLS_STATS can be enabled or disabled at compile time.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250516-airoha-en7581-flowstats-v2-2-06d5fbf28984@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Lorenzo Bianconi and committed by
Jakub Kicinski
b81e0f2b c5291874

+354 -20
+7
drivers/net/ethernet/airoha/Kconfig
··· 24 24 This driver supports the gigabit ethernet MACs in the 25 25 Airoha SoC family. 26 26 27 + config NET_AIROHA_FLOW_STATS 28 + default y 29 + bool "Airoha flow stats" 30 + depends on NET_AIROHA && NET_AIROHA_NPU 31 + help 32 + Enable Aiorha flowtable statistic counters. 33 + 27 34 endif #NET_VENDOR_AIROHA
+33
drivers/net/ethernet/airoha/airoha_eth.h
··· 50 50 #define PPE_NUM 2 51 51 #define PPE1_SRAM_NUM_ENTRIES (8 * 1024) 52 52 #define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES) 53 + #ifdef CONFIG_NET_AIROHA_FLOW_STATS 54 + #define PPE1_STATS_NUM_ENTRIES (4 * 1024) 55 + #else 56 + #define PPE1_STATS_NUM_ENTRIES 0 57 + #endif /* CONFIG_NET_AIROHA_FLOW_STATS */ 58 + #define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES) 59 + #define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES) 60 + #define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES) 53 61 #define PPE_DRAM_NUM_ENTRIES (16 * 1024) 54 62 #define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES) 55 63 #define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1) ··· 269 261 270 262 u16 pppoe_id; 271 263 u16 src_mac_lo; 264 + 265 + u32 meter; 272 266 }; 273 267 274 268 #define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24) ··· 305 295 #define AIROHA_FOE_DPI BIT(7) 306 296 #define AIROHA_FOE_TUNNEL BIT(6) 307 297 #define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0) 298 + 299 + #define AIROHA_FOE_TUNNEL_MTU GENMASK(31, 16) 300 + #define AIROHA_FOE_ACNT_GRP3 GENMASK(15, 9) 301 + #define AIROHA_FOE_METER_GRP3 GENMASK(8, 5) 302 + #define AIROHA_FOE_METER_GRP2 GENMASK(4, 0) 308 303 309 304 struct airoha_foe_bridge { 310 305 u32 dest_mac_hi; ··· 394 379 u32 ib2; 395 380 396 381 struct airoha_foe_mac_info_common l2; 382 + 383 + u32 meter; 397 384 }; 398 385 399 386 struct airoha_foe_entry { ··· 412 395 }; 413 396 u8 data[PPE_ENTRY_SIZE]; 414 397 }; 398 + }; 399 + 400 + struct airoha_foe_stats { 401 + u32 bytes; 402 + u32 packets; 403 + }; 404 + 405 + struct airoha_foe_stats64 { 406 + u64 bytes; 407 + u64 packets; 415 408 }; 416 409 417 410 struct airoha_flow_data { ··· 474 447 struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */ 475 448 u32 hash; 476 449 450 + struct airoha_foe_stats64 stats; 477 451 enum airoha_flow_entry_type type; 478 452 479 453 struct rhash_head node; ··· 551 523 struct hlist_head *foe_flow; 552 524 u16 foe_check_time[PPE_NUM_ENTRIES]; 553 525 526 + struct airoha_foe_stats *foe_stats; 527 + dma_addr_t foe_stats_dma; 528 + 554 529 struct dentry *debugfs_dir; 555 530 }; 556 531 ··· 613 582 void airoha_ppe_deinit(struct airoha_eth *eth); 614 583 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, 615 584 u32 hash); 585 + void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, 586 + struct airoha_foe_stats64 *stats); 616 587 617 588 #ifdef CONFIG_DEBUG_FS 618 589 int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
+51 -1
drivers/net/ethernet/airoha/airoha_npu.c
··· 12 12 #include <linux/of_reserved_mem.h> 13 13 #include <linux/regmap.h> 14 14 15 + #include "airoha_eth.h" 15 16 #include "airoha_npu.h" 16 17 17 18 #define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin" ··· 73 72 PPE_FUNC_SET_WAIT_HWNAT_INIT, 74 73 PPE_FUNC_SET_WAIT_HWNAT_DEINIT, 75 74 PPE_FUNC_SET_WAIT_API, 75 + PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP, 76 76 }; 77 77 78 78 enum { ··· 117 115 u32 size; 118 116 u32 data; 119 117 } set_info; 118 + struct { 119 + u32 npu_stats_addr; 120 + u32 foe_stats_addr; 121 + } stats_info; 120 122 }; 121 123 }; 122 124 ··· 357 351 return err; 358 352 } 359 353 360 - struct airoha_npu *airoha_npu_get(struct device *dev) 354 + static int airoha_npu_stats_setup(struct airoha_npu *npu, 355 + dma_addr_t foe_stats_addr) 356 + { 357 + int err, size = PPE_STATS_NUM_ENTRIES * sizeof(*npu->stats); 358 + struct ppe_mbox_data *ppe_data; 359 + 360 + if (!size) /* flow stats are disabled */ 361 + return 0; 362 + 363 + ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC); 364 + if (!ppe_data) 365 + return -ENOMEM; 366 + 367 + ppe_data->func_type = NPU_OP_SET; 368 + ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP; 369 + ppe_data->stats_info.foe_stats_addr = foe_stats_addr; 370 + 371 + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, 372 + sizeof(*ppe_data)); 373 + if (err) 374 + goto out; 375 + 376 + npu->stats = devm_ioremap(npu->dev, 377 + ppe_data->stats_info.npu_stats_addr, 378 + size); 379 + if (!npu->stats) 380 + err = -ENOMEM; 381 + out: 382 + kfree(ppe_data); 383 + 384 + return err; 385 + } 386 + 387 + struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr) 361 388 { 362 389 struct platform_device *pdev; 363 390 struct device_node *np; ··· 426 387 dev_name(dev)); 427 388 npu = ERR_PTR(-EINVAL); 428 389 goto error_module_put; 390 + } 391 + 392 + if (stats_addr) { 393 + int err; 394 + 395 + err = airoha_npu_stats_setup(npu, *stats_addr); 396 + if (err) { 397 + dev_err(dev, "failed to allocate npu stats buffer\n"); 398 + npu = ERR_PTR(err); 399 + goto error_module_put; 400 + } 429 401 } 430 402 431 403 return npu;
+3 -1
drivers/net/ethernet/airoha/airoha_npu.h
··· 17 17 struct work_struct wdt_work; 18 18 } cores[NPU_NUM_CORES]; 19 19 20 + struct airoha_foe_stats __iomem *stats; 21 + 20 22 struct { 21 23 int (*ppe_init)(struct airoha_npu *npu); 22 24 int (*ppe_deinit)(struct airoha_npu *npu); ··· 32 30 } ops; 33 31 }; 34 32 35 - struct airoha_npu *airoha_npu_get(struct device *dev); 33 + struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr); 36 34 void airoha_npu_put(struct airoha_npu *npu);
+253 -16
drivers/net/ethernet/airoha/airoha_ppe.c
··· 102 102 103 103 if (airoha_ppe2_is_enabled(eth)) { 104 104 sram_num_entries = 105 - PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES); 105 + PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES); 106 106 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), 107 107 PPE_SRAM_TB_NUM_ENTRY_MASK | 108 108 PPE_DRAM_TB_NUM_ENTRY_MASK, ··· 119 119 dram_num_entries)); 120 120 } else { 121 121 sram_num_entries = 122 - PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES); 122 + PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES); 123 123 airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), 124 124 PPE_SRAM_TB_NUM_ENTRY_MASK | 125 125 PPE_DRAM_TB_NUM_ENTRY_MASK, ··· 417 417 return hash; 418 418 } 419 419 420 + static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash) 421 + { 422 + if (!airoha_ppe2_is_enabled(ppe->eth)) 423 + return hash; 424 + 425 + return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES 426 + : hash; 427 + } 428 + 429 + static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, 430 + struct airoha_npu *npu, 431 + int index) 432 + { 433 + memset_io(&npu->stats[index], 0, sizeof(*npu->stats)); 434 + memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats)); 435 + } 436 + 437 + static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe, 438 + struct airoha_npu *npu) 439 + { 440 + int i; 441 + 442 + for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++) 443 + airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i); 444 + } 445 + 446 + static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, 447 + struct airoha_npu *npu, 448 + struct airoha_foe_entry *hwe, 449 + u32 hash) 450 + { 451 + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); 452 + u32 index, pse_port, val, *data, *ib2, *meter; 453 + u8 nbq; 454 + 455 + index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); 456 + if (index >= PPE_STATS_NUM_ENTRIES) 457 + return; 458 + 459 + if (type == PPE_PKT_TYPE_BRIDGE) { 460 + data = &hwe->bridge.data; 461 + ib2 = &hwe->bridge.ib2; 462 + meter = &hwe->bridge.l2.meter; 463 + } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { 464 + data = &hwe->ipv6.data; 465 + ib2 = &hwe->ipv6.ib2; 466 + meter = &hwe->ipv6.meter; 467 + } else { 468 + data = &hwe->ipv4.data; 469 + ib2 = &hwe->ipv4.ib2; 470 + meter = &hwe->ipv4.l2.meter; 471 + } 472 + 473 + airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index); 474 + 475 + val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data); 476 + *data = (*data & ~AIROHA_FOE_ACTDP) | 477 + FIELD_PREP(AIROHA_FOE_ACTDP, val); 478 + 479 + val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT | 480 + AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH); 481 + *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val); 482 + 483 + pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2); 484 + nbq = pse_port == 1 ? 6 : 5; 485 + *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT | 486 + AIROHA_FOE_IB2_PSE_QOS); 487 + *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) | 488 + FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq); 489 + } 490 + 420 491 struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, 421 492 u32 hash) 422 493 { ··· 541 470 struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); 542 471 u32 ts = airoha_ppe_get_timestamp(ppe); 543 472 struct airoha_eth *eth = ppe->eth; 473 + struct airoha_npu *npu; 474 + int err = 0; 544 475 545 476 memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1)); 546 477 wmb(); ··· 551 478 e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts); 552 479 hwe->ib1 = e->ib1; 553 480 481 + rcu_read_lock(); 482 + 483 + npu = rcu_dereference(eth->npu); 484 + if (!npu) { 485 + err = -ENODEV; 486 + goto unlock; 487 + } 488 + 489 + airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash); 490 + 554 491 if (hash < PPE_SRAM_NUM_ENTRIES) { 555 492 dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe); 556 493 bool ppe2 = airoha_ppe2_is_enabled(eth) && 557 494 hash >= PPE1_SRAM_NUM_ENTRIES; 558 - struct airoha_npu *npu; 559 - int err = -ENODEV; 560 495 561 - rcu_read_lock(); 562 - npu = rcu_dereference(eth->npu); 563 - if (npu) 564 - err = npu->ops.ppe_foe_commit_entry(npu, addr, 565 - sizeof(*hwe), hash, 566 - ppe2); 567 - rcu_read_unlock(); 568 - 569 - return err; 496 + err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe), 497 + hash, ppe2); 570 498 } 499 + unlock: 500 + rcu_read_unlock(); 571 501 572 - return 0; 502 + return err; 573 503 } 574 504 575 505 static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe, ··· 658 582 l2->common.etype = ETH_P_IPV6; 659 583 660 584 hwe.bridge.ib2 = e->data.bridge.ib2; 585 + hwe.bridge.data = e->data.bridge.data; 661 586 airoha_ppe_foe_commit_entry(ppe, &hwe, hash); 662 587 663 588 return 0; ··· 756 679 spin_unlock_bh(&ppe_lock); 757 680 758 681 return 0; 682 + } 683 + 684 + static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1) 685 + { 686 + u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1); 687 + u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe); 688 + int idle; 689 + 690 + if (state == AIROHA_FOE_STATE_BIND) { 691 + ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1); 692 + ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP; 693 + } else { 694 + ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1); 695 + now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now); 696 + ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP; 697 + } 698 + idle = now - ts; 699 + 700 + return idle < 0 ? idle + ts_mask + 1 : idle; 701 + } 702 + 703 + static void 704 + airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe, 705 + struct airoha_flow_table_entry *e) 706 + { 707 + int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1); 708 + struct airoha_flow_table_entry *iter; 709 + struct hlist_node *n; 710 + 711 + lockdep_assert_held(&ppe_lock); 712 + 713 + hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) { 714 + struct airoha_foe_entry *hwe; 715 + u32 ib1, state; 716 + int idle; 717 + 718 + hwe = airoha_ppe_foe_get_entry(ppe, iter->hash); 719 + ib1 = READ_ONCE(hwe->ib1); 720 + 721 + state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1); 722 + if (state != AIROHA_FOE_STATE_BIND) { 723 + iter->hash = 0xffff; 724 + airoha_ppe_foe_remove_flow(ppe, iter); 725 + continue; 726 + } 727 + 728 + idle = airoha_ppe_get_entry_idle_time(ppe, ib1); 729 + if (idle >= min_idle) 730 + continue; 731 + 732 + min_idle = idle; 733 + e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP; 734 + e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP; 735 + } 736 + } 737 + 738 + static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe, 739 + struct airoha_flow_table_entry *e) 740 + { 741 + struct airoha_foe_entry *hwe_p, hwe = {}; 742 + 743 + spin_lock_bh(&ppe_lock); 744 + 745 + if (e->type == FLOW_TYPE_L2) { 746 + airoha_ppe_foe_flow_l2_entry_update(ppe, e); 747 + goto unlock; 748 + } 749 + 750 + if (e->hash == 0xffff) 751 + goto unlock; 752 + 753 + hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash); 754 + if (!hwe_p) 755 + goto unlock; 756 + 757 + memcpy(&hwe, hwe_p, sizeof(*hwe_p)); 758 + if (!airoha_ppe_foe_compare_entry(e, &hwe)) { 759 + e->hash = 0xffff; 760 + goto unlock; 761 + } 762 + 763 + e->data.ib1 = hwe.ib1; 764 + unlock: 765 + spin_unlock_bh(&ppe_lock); 766 + } 767 + 768 + static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe, 769 + struct airoha_flow_table_entry *e) 770 + { 771 + airoha_ppe_foe_flow_entry_update(ppe, e); 772 + 773 + return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1); 759 774 } 760 775 761 776 static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port, ··· 1065 896 return 0; 1066 897 } 1067 898 899 + void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, 900 + struct airoha_foe_stats64 *stats) 901 + { 902 + u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); 903 + struct airoha_eth *eth = ppe->eth; 904 + struct airoha_npu *npu; 905 + 906 + if (index >= PPE_STATS_NUM_ENTRIES) 907 + return; 908 + 909 + rcu_read_lock(); 910 + 911 + npu = rcu_dereference(eth->npu); 912 + if (npu) { 913 + u64 packets = ppe->foe_stats[index].packets; 914 + u64 bytes = ppe->foe_stats[index].bytes; 915 + struct airoha_foe_stats npu_stats; 916 + 917 + memcpy_fromio(&npu_stats, &npu->stats[index], 918 + sizeof(*npu->stats)); 919 + stats->packets = packets << 32 | npu_stats.packets; 920 + stats->bytes = bytes << 32 | npu_stats.bytes; 921 + } 922 + 923 + rcu_read_unlock(); 924 + } 925 + 926 + static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port, 927 + struct flow_cls_offload *f) 928 + { 929 + struct airoha_eth *eth = port->qdma->eth; 930 + struct airoha_flow_table_entry *e; 931 + u32 idle; 932 + 933 + e = rhashtable_lookup(&eth->flow_table, &f->cookie, 934 + airoha_flow_table_params); 935 + if (!e) 936 + return -ENOENT; 937 + 938 + idle = airoha_ppe_entry_idle_time(eth->ppe, e); 939 + f->stats.lastused = jiffies - idle * HZ; 940 + 941 + if (e->hash != 0xffff) { 942 + struct airoha_foe_stats64 stats = {}; 943 + 944 + airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats); 945 + f->stats.pkts += (stats.packets - e->stats.packets); 946 + f->stats.bytes += (stats.bytes - e->stats.bytes); 947 + e->stats = stats; 948 + } 949 + 950 + return 0; 951 + } 952 + 1068 953 static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port, 1069 954 struct flow_cls_offload *f) 1070 955 { ··· 1127 904 return airoha_ppe_flow_offload_replace(port, f); 1128 905 case FLOW_CLS_DESTROY: 1129 906 return airoha_ppe_flow_offload_destroy(port, f); 907 + case FLOW_CLS_STATS: 908 + return airoha_ppe_flow_offload_stats(port, f); 1130 909 default: 1131 910 break; 1132 911 } ··· 1154 929 1155 930 static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth) 1156 931 { 1157 - struct airoha_npu *npu = airoha_npu_get(eth->dev); 932 + struct airoha_npu *npu = airoha_npu_get(eth->dev, 933 + &eth->ppe->foe_stats_dma); 1158 934 1159 935 if (IS_ERR(npu)) { 1160 936 request_module("airoha-npu"); 1161 - npu = airoha_npu_get(eth->dev); 937 + npu = airoha_npu_get(eth->dev, &eth->ppe->foe_stats_dma); 1162 938 } 1163 939 1164 940 return npu; ··· 1181 955 err = airoha_ppe_flush_sram_entries(eth->ppe, npu); 1182 956 if (err) 1183 957 goto error_npu_put; 958 + 959 + airoha_ppe_foe_flow_stats_reset(eth->ppe, npu); 1184 960 1185 961 rcu_assign_pointer(eth->npu, npu); 1186 962 synchronize_rcu(); ··· 1254 1026 GFP_KERNEL); 1255 1027 if (!ppe->foe_flow) 1256 1028 return -ENOMEM; 1029 + 1030 + foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats); 1031 + if (foe_size) { 1032 + ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size, 1033 + &ppe->foe_stats_dma, 1034 + GFP_KERNEL); 1035 + if (!ppe->foe_stats) 1036 + return -ENOMEM; 1037 + } 1257 1038 1258 1039 err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params); 1259 1040 if (err)
+7 -2
drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
··· 61 61 u16 *src_port = NULL, *dest_port = NULL; 62 62 struct airoha_foe_mac_info_common *l2; 63 63 unsigned char h_source[ETH_ALEN] = {}; 64 + struct airoha_foe_stats64 stats = {}; 64 65 unsigned char h_dest[ETH_ALEN]; 65 66 struct airoha_foe_entry *hwe; 66 67 u32 type, state, ib2, data; ··· 145 144 cpu_to_be16(hwe->ipv4.l2.src_mac_lo); 146 145 } 147 146 147 + airoha_ppe_foe_entry_get_stats(ppe, i, &stats); 148 + 148 149 *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi); 149 150 *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo); 150 151 *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi); 151 152 152 153 seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x" 153 - " vlan=%d,%d ib1=%08x ib2=%08x\n", 154 + " vlan=%d,%d ib1=%08x ib2=%08x" 155 + " packets=%llu bytes=%llu\n", 154 156 h_source, h_dest, l2->etype, data, 155 - l2->vlan1, l2->vlan2, hwe->ib1, ib2); 157 + l2->vlan1, l2->vlan2, hwe->ib1, ib2, 158 + stats.packets, stats.bytes); 156 159 } 157 160 158 161 return 0;