Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'octeontx2-af-next'

Ratheesh Kannoth says:

====================
octeontx2: *** Exact Match Table and Field hash ***

*** Exact match table and Field hash support for CN10KB silicon ***

Ratheesh Kannoth (11):

These patch series enables exact match table in CN10KB silicon. Legacy
silicon used NPC mcam to do packet fields/channel matching for NPC rules.
NPC mcam resources exahausted as customer use case increased.
Supporting many DMAC filter becomes a challenge, as RPM based filter
count is less. Exact match table has 4way 2K entry table and a 32 entry
fully associative cam table. Second table is to handle hash
table collision overflows in 4way 2K entry table. Enabling exact match table
results in KEX key to be appended with Hit/Miss status. This can be used
to match in NPC mcam for a more generic rule and drop those packets than
having DMAC drop rules for each DMAC entry in NPC mcam.

octeontx2-af: Exact match support
octeontx2-af: Exact match scan from kex profile
octeontx2-af: devlink configuration support
octeontx2-af: FLR handler for exact match table.
octeontx2-af: Drop rules for NPC MCAM
octeontx2-af: Debugsfs support for exact match.
octeontx2: Modify mbox request and response structures
octeontx2-af: Wrapper functions for mac addr add/del/update/reset
octeontx2-af: Invoke exact match functions if supported
octeontx2-pf: Add support for exact match table.
octeontx2-af: Enable Exact match flag in kex profile

Suman Ghosh (1):

CN10KB variant of CN10K series of silicons supports
a new feature where in a large protocol field
(eg 128bit IPv6 DIP) can be condensed into a small
hashed 32bit data. This saves a lot of space in MCAM key
and allows user to add more protocol fields into the filter.
A max of two such protocol data can be hashed.
This patch adds support for hashing IPv6 SIP and/or DIP.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+2877 -68
+1 -1
drivers/net/ethernet/marvell/octeontx2/af/Makefile
··· 11 11 rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ 12 12 rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ 13 13 rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ 14 - rvu_sdp.o 14 + rvu_sdp.o rvu_npc_hash.o
+35 -6
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
··· 169 169 M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \ 170 170 cgx_features_info_msg) \ 171 171 M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \ 172 - M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \ 172 + M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, cgx_mac_addr_reset_req, \ 173 + msg_rsp) \ 173 174 M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \ 174 - msg_rsp) \ 175 + cgx_mac_addr_update_rsp) \ 175 176 M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \ 176 177 cgx_pfc_rsp) \ 177 178 /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ ··· 242 241 M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \ 243 242 npc_mcam_get_stats_req, \ 244 243 npc_mcam_get_stats_rsp) \ 244 + M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key, \ 245 + npc_get_secret_key_req, \ 246 + npc_get_secret_key_rsp) \ 245 247 /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ 246 248 M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \ 247 249 nix_lf_alloc_req, nix_lf_alloc_rsp) \ ··· 432 428 struct mbox_msghdr hdr; 433 429 u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ 434 430 u8 nix_shaping; /* Is shaping and coloring supported */ 431 + u8 npc_hash_extract; /* Is hash extract supported */ 435 432 }; 436 433 437 434 /* CGX mbox message formats */ ··· 456 451 struct cgx_mac_addr_set_or_get { 457 452 struct mbox_msghdr hdr; 458 453 u8 mac_addr[ETH_ALEN]; 454 + u32 index; 459 455 }; 460 456 461 457 /* Structure for requesting the operation to ··· 472 466 */ 473 467 struct cgx_mac_addr_add_rsp { 474 468 struct mbox_msghdr hdr; 475 - u8 index; 469 + u32 index; 476 470 }; 477 471 478 472 /* Structure for requesting the operation to ··· 480 474 */ 481 475 struct cgx_mac_addr_del_req { 482 476 struct mbox_msghdr hdr; 483 - u8 index; 477 + u32 index; 484 478 }; 485 479 486 480 /* Structure for response against the operation to ··· 488 482 */ 489 483 struct cgx_max_dmac_entries_get_rsp { 490 484 struct mbox_msghdr hdr; 491 - u8 max_dmac_filters; 485 + u32 max_dmac_filters; 492 486 }; 493 487 494 488 struct cgx_link_user_info { ··· 589 583 int status; 590 584 }; 591 585 586 + struct cgx_mac_addr_reset_req { 587 + struct mbox_msghdr hdr; 588 + u32 index; 589 + }; 590 + 592 591 struct cgx_mac_addr_update_req { 593 592 struct mbox_msghdr hdr; 594 593 u8 mac_addr[ETH_ALEN]; 595 - u8 index; 594 + u32 index; 595 + }; 596 + 597 + struct cgx_mac_addr_update_rsp { 598 + struct mbox_msghdr hdr; 599 + u32 index; 596 600 }; 597 601 598 602 #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ ··· 1456 1440 u8 stat_ena; /* enabled */ 1457 1441 }; 1458 1442 1443 + struct npc_get_secret_key_req { 1444 + struct mbox_msghdr hdr; 1445 + u8 intf; 1446 + }; 1447 + 1448 + struct npc_get_secret_key_rsp { 1449 + struct mbox_msghdr hdr; 1450 + u64 secret_key[3]; 1451 + }; 1452 + 1459 1453 enum ptp_op { 1460 1454 PTP_OP_ADJFINE = 0, 1461 1455 PTP_OP_GET_CLOCK = 1, ··· 1648 1622 LMAC_AF_ERR_PERM_DENIED = -1103, 1649 1623 LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104, 1650 1624 LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105, 1625 + LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED = -1108, 1626 + LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED = -1109, 1627 + LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110, 1651 1628 }; 1652 1629 1653 1630 #endif /* MBOX_H */
+25
drivers/net/ethernet/marvell/octeontx2/af/npc.h
··· 10 10 11 11 #define NPC_KEX_CHAN_MASK 0xFFFULL 12 12 13 + #define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ 14 + rvu_write64(rvu, blkaddr, \ 15 + NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) 16 + 17 + #define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ 18 + rvu_write64(rvu, blkaddr, \ 19 + NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) 20 + 13 21 enum NPC_LID_E { 14 22 NPC_LID_LA = 0, 15 23 NPC_LID_LB, ··· 208 200 NPC_ERRLEV, 209 201 NPC_ERRCODE, 210 202 NPC_LXMB, 203 + NPC_EXACT_RESULT, 211 204 NPC_LA, 212 205 NPC_LB, 213 206 NPC_LC, ··· 388 379 u64 rsvd_63_61 :3; 389 380 #endif 390 381 }; 382 + 383 + /* NPC_AF_INTFX_KEX_CFG field masks */ 384 + #define NPC_EXACT_NIBBLE_START 40 385 + #define NPC_EXACT_NIBBLE_END 43 386 + #define NPC_EXACT_NIBBLE GENMASK_ULL(43, 40) 387 + 388 + /* NPC_EXACT_KEX_S nibble definitions for each field */ 389 + #define NPC_EXACT_NIBBLE_HIT BIT_ULL(40) 390 + #define NPC_EXACT_NIBBLE_OPC BIT_ULL(40) 391 + #define NPC_EXACT_NIBBLE_WAY BIT_ULL(40) 392 + #define NPC_EXACT_NIBBLE_INDEX GENMASK_ULL(43, 41) 393 + 394 + #define NPC_EXACT_RESULT_HIT BIT_ULL(0) 395 + #define NPC_EXACT_RESULT_OPC GENMASK_ULL(2, 1) 396 + #define NPC_EXACT_RESULT_WAY GENMASK_ULL(4, 3) 397 + #define NPC_EXACT_RESULT_IDX GENMASK_ULL(15, 5) 391 398 392 399 /* NPC_AF_INTFX_KEX_CFG field masks */ 393 400 #define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
+3 -2
drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
··· 155 155 156 156 /* Rx parse key extract nibble enable */ 157 157 #define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \ 158 - NPC_PARSE_NIBBLE_ERRCODE | \ 158 + NPC_PARSE_NIBBLE_L2L3_BCAST | \ 159 159 NPC_PARSE_NIBBLE_LA_LTYPE | \ 160 160 NPC_PARSE_NIBBLE_LB_LTYPE | \ 161 161 NPC_PARSE_NIBBLE_LC_LTYPE | \ ··· 15123 15123 .kpu_version = NPC_KPU_PROFILE_VER, 15124 15124 .keyx_cfg = { 15125 15125 /* nibble: LA..LE (ltype only) + Error code + Channel */ 15126 - [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX, 15126 + [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX | 15127 + (u64)NPC_EXACT_NIBBLE_HIT, 15127 15128 /* nibble: LA..LE (ltype only) */ 15128 15129 [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX, 15129 15130 },
+16
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
··· 18 18 #include "ptp.h" 19 19 20 20 #include "rvu_trace.h" 21 + #include "rvu_npc_hash.h" 21 22 22 23 #define DRV_NAME "rvu_af" 23 24 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" ··· 69 68 hw->cap.nix_tx_link_bp = true; 70 69 hw->cap.nix_rx_multicast = true; 71 70 hw->cap.nix_shaper_toggle_wait = false; 71 + hw->cap.npc_hash_extract = false; 72 + hw->cap.npc_exact_match_enabled = false; 72 73 hw->rvu = rvu; 73 74 74 75 if (is_rvu_pre_96xx_C0(rvu)) { ··· 88 85 89 86 if (!is_rvu_otx2(rvu)) 90 87 hw->cap.per_pf_mbox_regs = true; 88 + 89 + if (is_rvu_npc_hash_extract_en(rvu)) 90 + hw->cap.npc_hash_extract = true; 91 91 } 92 92 93 93 /* Poll a RVU block's register 'offset', for a 'zero' ··· 1128 1122 goto cgx_err; 1129 1123 } 1130 1124 1125 + err = rvu_npc_exact_init(rvu); 1126 + if (err) { 1127 + dev_err(rvu->dev, "failed to initialize exact match table\n"); 1128 + return err; 1129 + } 1130 + 1131 1131 /* Assign MACs for CGX mapped functions */ 1132 1132 rvu_setup_pfvf_macaddress(rvu); 1133 1133 ··· 2003 1991 2004 1992 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping; 2005 1993 rsp->nix_shaping = hw->cap.nix_shaping; 1994 + rsp->npc_hash_extract = hw->cap.npc_hash_extract; 2006 1995 2007 1996 return 0; 2008 1997 } ··· 2561 2548 2562 2549 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) 2563 2550 { 2551 + if (rvu_npc_exact_has_match_table(rvu)) 2552 + rvu_npc_exact_reset(rvu, pcifunc); 2553 + 2564 2554 mutex_lock(&rvu->flr_lock); 2565 2555 /* Reset order should reflect inter-block dependencies: 2566 2556 * 1. Reset any packet/work sources (NIX, CPT, TIM)
+23 -1
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
··· 338 338 bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */ 339 339 bool programmable_chans; /* Channels programmable ? */ 340 340 bool ipolicer; 341 + bool npc_hash_extract; /* Hash extract enabled ? */ 342 + bool npc_exact_match_enabled; /* Exact match supported ? */ 341 343 }; 342 344 343 345 struct rvu_hwinfo { ··· 371 369 struct rvu *rvu; 372 370 struct npc_pkind pkind; 373 371 struct npc_mcam mcam; 372 + struct npc_exact_table *table; 374 373 }; 375 374 376 375 struct mbox_wq_info { ··· 422 419 const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */ 423 420 const struct npc_kpu_profile *kpu; /* array[kpus] */ 424 421 struct npc_mcam_kex *mkex; 422 + struct npc_mcam_kex_hash *mkex_hash; 425 423 bool custom; 426 424 size_t pkinds; 427 425 size_t kpus; ··· 577 573 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 578 574 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || 579 575 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); 576 + } 577 + 578 + static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu) 579 + { 580 + u64 npc_const3; 581 + 582 + npc_const3 = rvu_read64(rvu, BLKADDR_NPC, NPC_AF_CONST3); 583 + if (!(npc_const3 & BIT_ULL(62))) 584 + return false; 585 + 586 + return true; 580 587 } 581 588 582 589 static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid, ··· 769 754 u32 convert_bytes_to_dwrr_mtu(u32 bytes); 770 755 771 756 /* NPC APIs */ 772 - int rvu_npc_init(struct rvu *rvu); 773 757 void rvu_npc_freemem(struct rvu *rvu); 774 758 int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); 775 759 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf); ··· 787 773 u64 chan); 788 774 void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 789 775 bool enable); 776 + 790 777 void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, 791 778 int nixlf, int type, bool enable); 792 779 void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); 780 + bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable); 793 781 void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); 794 782 void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); 795 783 void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); 796 784 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, 797 785 int group, int alg_idx, int mcam_index); 786 + 798 787 void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, 799 788 int blkaddr, int *alloc_cnt, 800 789 int *enable_cnt); ··· 832 815 int type); 833 816 bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, 834 817 int index); 818 + int rvu_npc_init(struct rvu *rvu); 819 + int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, 820 + u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask, 821 + u64 bcast_mcast_val, u64 bcast_mcast_mask); 822 + void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx); 835 823 836 824 /* CPT APIs */ 837 825 int rvu_cpt_register_interrupts(struct rvu *rvu);
+37 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
··· 14 14 #include "lmac_common.h" 15 15 #include "rvu_reg.h" 16 16 #include "rvu_trace.h" 17 + #include "rvu_npc_hash.h" 17 18 18 19 struct cgx_evq_entry { 19 20 struct list_head evq_node; ··· 475 474 if (!is_cgx_config_permitted(rvu, pcifunc)) 476 475 return; 477 476 477 + if (rvu_npc_exact_has_match_table(rvu)) { 478 + rvu_npc_exact_reset(rvu, pcifunc); 479 + return; 480 + } 481 + 478 482 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 479 483 cgx_dev = cgx_get_pdata(cgx_id); 480 484 lmac_count = cgx_get_lmac_cnt(cgx_dev); ··· 590 584 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 591 585 return -EPERM; 592 586 587 + if (rvu_npc_exact_has_match_table(rvu)) 588 + return rvu_npc_exact_mac_addr_set(rvu, req, rsp); 589 + 593 590 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 594 591 595 592 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); ··· 610 601 611 602 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 612 603 return -EPERM; 604 + 605 + if (rvu_npc_exact_has_match_table(rvu)) 606 + return rvu_npc_exact_mac_addr_add(rvu, req, rsp); 613 607 614 608 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 615 609 rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); ··· 634 622 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 635 623 return -EPERM; 636 624 625 + if (rvu_npc_exact_has_match_table(rvu)) 626 + return rvu_npc_exact_mac_addr_del(rvu, req, rsp); 627 + 637 628 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 638 629 return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); 639 630 } ··· 655 640 */ 656 641 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { 657 642 rsp->max_dmac_filters = 0; 643 + return 0; 644 + } 645 + 646 + if (rvu_npc_exact_has_match_table(rvu)) { 647 + rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu); 658 648 return 0; 659 649 } 660 650 ··· 700 680 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 701 681 return -EPERM; 702 682 683 + /* Disable drop on non hit rule */ 684 + if (rvu_npc_exact_has_match_table(rvu)) 685 + return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc); 686 + 703 687 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 704 688 705 689 cgx_lmac_promisc_config(cgx_id, lmac_id, true); ··· 718 694 719 695 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 720 696 return -EPERM; 697 + 698 + /* Disable drop on non hit rule */ 699 + if (rvu_npc_exact_has_match_table(rvu)) 700 + return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc); 721 701 722 702 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 723 703 ··· 1116 1088 return 0; 1117 1089 } 1118 1090 1119 - int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req, 1091 + int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 1120 1092 struct msg_rsp *rsp) 1121 1093 { 1122 1094 int pf = rvu_get_pf(req->hdr.pcifunc); ··· 1126 1098 return LMAC_AF_ERR_PERM_DENIED; 1127 1099 1128 1100 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1101 + 1102 + if (rvu_npc_exact_has_match_table(rvu)) 1103 + return rvu_npc_exact_mac_addr_reset(rvu, req, rsp); 1104 + 1129 1105 return cgx_lmac_addr_reset(cgx_id, lmac_id); 1130 1106 } 1131 1107 1132 1108 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, 1133 1109 struct cgx_mac_addr_update_req *req, 1134 - struct msg_rsp *rsp) 1110 + struct cgx_mac_addr_update_rsp *rsp) 1135 1111 { 1136 1112 int pf = rvu_get_pf(req->hdr.pcifunc); 1137 1113 u8 cgx_id, lmac_id; 1138 1114 1139 1115 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1140 1116 return LMAC_AF_ERR_PERM_DENIED; 1117 + 1118 + if (rvu_npc_exact_has_match_table(rvu)) 1119 + return rvu_npc_exact_mac_addr_update(rvu, req, rsp); 1141 1120 1142 1121 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1143 1122 return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+179
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
··· 18 18 #include "cgx.h" 19 19 #include "lmac_common.h" 20 20 #include "npc.h" 21 + #include "rvu_npc_hash.h" 21 22 22 23 #define DEBUGFS_DIR_NAME "octeontx2" 23 24 ··· 2601 2600 2602 2601 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL); 2603 2602 2603 + static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused) 2604 + { 2605 + struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 }; 2606 + struct npc_exact_table_entry *cam_entry; 2607 + struct npc_exact_table *table; 2608 + struct rvu *rvu = s->private; 2609 + int i, j; 2610 + 2611 + u8 bitmap = 0; 2612 + 2613 + table = rvu->hw->table; 2614 + 2615 + mutex_lock(&table->lock); 2616 + 2617 + /* Check if there is at least one entry in mem table */ 2618 + if (!table->mem_tbl_entry_cnt) 2619 + goto dump_cam_table; 2620 + 2621 + /* Print table headers */ 2622 + seq_puts(s, "\n\tExact Match MEM Table\n"); 2623 + seq_puts(s, "Index\t"); 2624 + 2625 + for (i = 0; i < table->mem_table.ways; i++) { 2626 + mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i], 2627 + struct npc_exact_table_entry, list); 2628 + 2629 + seq_printf(s, "Way-%d\t\t\t\t\t", i); 2630 + } 2631 + 2632 + seq_puts(s, "\n"); 2633 + for (i = 0; i < table->mem_table.ways; i++) 2634 + seq_puts(s, "\tChan MAC \t"); 2635 + 2636 + seq_puts(s, "\n\n"); 2637 + 2638 + /* Print mem table entries */ 2639 + for (i = 0; i < table->mem_table.depth; i++) { 2640 + bitmap = 0; 2641 + for (j = 0; j < table->mem_table.ways; j++) { 2642 + if (!mem_entry[j]) 2643 + continue; 2644 + 2645 + if (mem_entry[j]->index != i) 2646 + continue; 2647 + 2648 + bitmap |= BIT(j); 2649 + } 2650 + 2651 + /* No valid entries */ 2652 + if (!bitmap) 2653 + continue; 2654 + 2655 + seq_printf(s, "%d\t", i); 2656 + for (j = 0; j < table->mem_table.ways; j++) { 2657 + if (!(bitmap & BIT(j))) { 2658 + seq_puts(s, "nil\t\t\t\t\t"); 2659 + continue; 2660 + } 2661 + 2662 + seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan, 2663 + mem_entry[j]->mac); 2664 + mem_entry[j] = list_next_entry(mem_entry[j], list); 2665 + } 2666 + seq_puts(s, "\n"); 2667 + } 2668 + 2669 + dump_cam_table: 2670 + 2671 + if (!table->cam_tbl_entry_cnt) 2672 + goto done; 2673 + 2674 + seq_puts(s, "\n\tExact Match CAM Table\n"); 2675 + seq_puts(s, "index\tchan\tMAC\n"); 2676 + 2677 + /* Traverse cam table entries */ 2678 + list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) { 2679 + seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan, 2680 + cam_entry->mac); 2681 + } 2682 + 2683 + done: 2684 + mutex_unlock(&table->lock); 2685 + return 0; 2686 + } 2687 + 2688 + RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL); 2689 + 2690 + static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused) 2691 + { 2692 + struct npc_exact_table *table; 2693 + struct rvu *rvu = s->private; 2694 + int i; 2695 + 2696 + table = rvu->hw->table; 2697 + 2698 + seq_puts(s, "\n\tExact Table Info\n"); 2699 + seq_printf(s, "Exact Match Feature : %s\n", 2700 + rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable"); 2701 + if (!rvu->hw->cap.npc_exact_match_enabled) 2702 + return 0; 2703 + 2704 + seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n"); 2705 + for (i = 0; i < table->num_drop_rules; i++) 2706 + seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]); 2707 + 2708 + seq_puts(s, "\nMcam Index\tPromisc Mode Status\n"); 2709 + for (i = 0; i < table->num_drop_rules; i++) 2710 + seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off"); 2711 + 2712 + seq_puts(s, "\n\tMEM Table Info\n"); 2713 + seq_printf(s, "Ways : %d\n", table->mem_table.ways); 2714 + seq_printf(s, "Depth : %d\n", table->mem_table.depth); 2715 + seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask); 2716 + seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask); 2717 + seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset); 2718 + 2719 + seq_puts(s, "\n\tCAM Table Info\n"); 2720 + seq_printf(s, "Depth : %d\n", table->cam_table.depth); 2721 + 2722 + return 0; 2723 + } 2724 + 2725 + RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL); 2726 + 2727 + static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused) 2728 + { 2729 + struct npc_exact_table *table; 2730 + struct rvu *rvu = s->private; 2731 + struct npc_key_field *field; 2732 + u16 chan, pcifunc; 2733 + int blkaddr, i; 2734 + u64 cfg, cam1; 2735 + char *str; 2736 + 2737 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2738 + table = rvu->hw->table; 2739 + 2740 + field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN]; 2741 + 2742 + seq_puts(s, "\n\t Exact Hit on drop status\n"); 2743 + seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n"); 2744 + 2745 + for (i = 0; i < table->num_drop_rules; i++) { 2746 + pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i); 2747 + cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0)); 2748 + 2749 + /* channel will be always in keyword 0 */ 2750 + cam1 = rvu_read64(rvu, blkaddr, 2751 + NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1)); 2752 + chan = field->kw_mask[0] & cam1; 2753 + 2754 + str = (cfg & 1) ? "enabled" : "disabled"; 2755 + 2756 + seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i, 2757 + rvu_read64(rvu, blkaddr, 2758 + NPC_AF_MATCH_STATX(table->counter_idx[i])), 2759 + chan, str); 2760 + } 2761 + 2762 + return 0; 2763 + } 2764 + 2765 + RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL); 2766 + 2604 2767 static void rvu_dbg_npc_init(struct rvu *rvu) 2605 2768 { 2606 2769 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root); ··· 2773 2608 &rvu_dbg_npc_mcam_info_fops); 2774 2609 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu, 2775 2610 &rvu_dbg_npc_mcam_rules_fops); 2611 + 2776 2612 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu, 2777 2613 &rvu_dbg_npc_rx_miss_act_fops); 2614 + 2615 + if (!rvu->hw->cap.npc_exact_match_enabled) 2616 + return; 2617 + 2618 + debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu, 2619 + &rvu_dbg_npc_exact_entries_fops); 2620 + 2621 + debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu, 2622 + &rvu_dbg_npc_exact_info_fops); 2623 + 2624 + debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu, 2625 + &rvu_dbg_npc_exact_drop_cnt_fops); 2626 + 2778 2627 } 2779 2628 2780 2629 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
+69 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
··· 10 10 #include "rvu.h" 11 11 #include "rvu_reg.h" 12 12 #include "rvu_struct.h" 13 + #include "rvu_npc_hash.h" 13 14 14 15 #define DRV_NAME "octeontx2-af" 15 16 ··· 1437 1436 enum rvu_af_dl_param_id { 1438 1437 RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 1439 1438 RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, 1439 + RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, 1440 1440 }; 1441 + 1442 + static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id, 1443 + struct devlink_param_gset_ctx *ctx) 1444 + { 1445 + struct rvu_devlink *rvu_dl = devlink_priv(devlink); 1446 + struct rvu *rvu = rvu_dl->rvu; 1447 + bool enabled; 1448 + 1449 + enabled = rvu_npc_exact_has_match_table(rvu); 1450 + 1451 + snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", 1452 + enabled ? "enabled" : "disabled"); 1453 + 1454 + return 0; 1455 + } 1456 + 1457 + static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id, 1458 + struct devlink_param_gset_ctx *ctx) 1459 + { 1460 + struct rvu_devlink *rvu_dl = devlink_priv(devlink); 1461 + struct rvu *rvu = rvu_dl->rvu; 1462 + 1463 + rvu_npc_exact_disable_feature(rvu); 1464 + 1465 + return 0; 1466 + } 1467 + 1468 + static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id, 1469 + union devlink_param_value val, 1470 + struct netlink_ext_ack *extack) 1471 + { 1472 + struct rvu_devlink *rvu_dl = devlink_priv(devlink); 1473 + struct rvu *rvu = rvu_dl->rvu; 1474 + u64 enable; 1475 + 1476 + if (kstrtoull(val.vstr, 10, &enable)) { 1477 + NL_SET_ERR_MSG_MOD(extack, 1478 + "Only 1 value is supported"); 1479 + return -EINVAL; 1480 + } 1481 + 1482 + if (enable != 1) { 1483 + NL_SET_ERR_MSG_MOD(extack, 1484 + "Only disabling exact match feature is supported"); 1485 + return -EINVAL; 1486 + } 1487 + 1488 + if (rvu_npc_exact_can_disable_feature(rvu)) 1489 + return 0; 1490 + 1491 + NL_SET_ERR_MSG_MOD(extack, 1492 + "Can't disable exact match feature; Please try before any configuration"); 1493 + return -EFAULT; 1494 + } 1441 1495 1442 1496 static const struct devlink_param rvu_af_dl_params[] = { 1443 1497 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, ··· 1500 1444 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1501 1445 rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set, 1502 1446 rvu_af_dl_dwrr_mtu_validate), 1447 + DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, 1448 + "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING, 1449 + BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1450 + rvu_af_npc_exact_feature_get, 1451 + rvu_af_npc_exact_feature_disable, 1452 + rvu_af_npc_exact_feature_validate), 1503 1453 }; 1504 1454 1505 1455 /* Devlink switch mode */ ··· 1563 1501 { 1564 1502 struct rvu_devlink *rvu_dl; 1565 1503 struct devlink *dl; 1504 + size_t size; 1566 1505 int err; 1567 1506 1568 1507 dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink), ··· 1585 1522 goto err_dl_health; 1586 1523 } 1587 1524 1588 - err = devlink_params_register(dl, rvu_af_dl_params, 1589 - ARRAY_SIZE(rvu_af_dl_params)); 1525 + /* Register exact match devlink only for CN10K-B */ 1526 + size = ARRAY_SIZE(rvu_af_dl_params); 1527 + if (!rvu_npc_exact_has_match_table(rvu)) 1528 + size -= 1; 1529 + 1530 + err = devlink_params_register(dl, rvu_af_dl_params, size); 1590 1531 if (err) { 1591 1532 dev_err(rvu->dev, 1592 1533 "devlink params register failed with error %d", err);
+7
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
··· 14 14 #include "npc.h" 15 15 #include "cgx.h" 16 16 #include "lmac_common.h" 17 + #include "rvu_npc_hash.h" 17 18 18 19 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 19 20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, ··· 3793 3792 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 3794 3793 pfvf->rx_chan_base, 3795 3794 pfvf->rx_chan_cnt); 3795 + 3796 + if (rvu_npc_exact_has_match_table(rvu)) 3797 + rvu_npc_exact_promisc_enable(rvu, pcifunc); 3796 3798 } else { 3797 3799 if (!nix_rx_multicast) 3798 3800 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 3801 + 3802 + if (rvu_npc_exact_has_match_table(rvu)) 3803 + rvu_npc_exact_promisc_disable(rvu, pcifunc); 3799 3804 } 3800 3805 3801 3806 return 0;
+42 -9
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
··· 15 15 #include "npc.h" 16 16 #include "cgx.h" 17 17 #include "npc_profile.h" 18 + #include "rvu_npc_hash.h" 18 19 19 20 #define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */ 20 21 #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ ··· 1106 1105 NIXLF_PROMISC_ENTRY, false); 1107 1106 } 1108 1107 1108 + bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable) 1109 + { 1110 + int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1111 + struct npc_mcam *mcam = &rvu->hw->mcam; 1112 + struct rvu_npc_mcam_rule *rule, *tmp; 1113 + 1114 + mutex_lock(&mcam->lock); 1115 + 1116 + list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1117 + if (rule->intf != intf) 1118 + continue; 1119 + 1120 + if (rule->entry != entry) 1121 + continue; 1122 + 1123 + rule->enable = enable; 1124 + mutex_unlock(&mcam->lock); 1125 + 1126 + npc_enable_mcam_entry(rvu, mcam, blkaddr, 1127 + entry, enable); 1128 + 1129 + return true; 1130 + } 1131 + 1132 + mutex_unlock(&mcam->lock); 1133 + return false; 1134 + } 1135 + 1109 1136 void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1110 1137 { 1111 1138 /* Enables only broadcast match entry. Promisc/Allmulti are enabled ··· 1210 1181 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1211 1182 } 1212 1183 1213 - #define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ 1214 - rvu_write64(rvu, blkaddr, \ 1215 - NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) 1216 - 1217 - #define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ 1218 - rvu_write64(rvu, blkaddr, \ 1219 - NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) 1220 - 1221 1184 static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr, 1222 1185 struct npc_mcam_kex *mkex, u8 intf) 1223 1186 { ··· 1283 1262 npc_program_mkex_rx(rvu, blkaddr, mkex, intf); 1284 1263 npc_program_mkex_tx(rvu, blkaddr, mkex, intf); 1285 1264 } 1265 + 1266 + /* Programme mkex hash profile */ 1267 + npc_program_mkex_hash(rvu, blkaddr); 1286 1268 } 1287 1269 1288 1270 static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr, ··· 1487 1463 profile->kpus = ARRAY_SIZE(npc_kpu_profiles); 1488 1464 profile->lt_def = &npc_lt_defaults; 1489 1465 profile->mkex = &npc_mkex_default; 1466 + profile->mkex_hash = &npc_mkex_hash_default; 1490 1467 1491 1468 return 0; 1492 1469 } ··· 1844 1819 mcam->hprio_count = mcam->lprio_count; 1845 1820 mcam->hprio_end = mcam->hprio_count; 1846 1821 1847 - 1848 1822 /* Allocate bitmap for managing MCAM counters and memory 1849 1823 * for saving counter to RVU PFFUNC allocation mapping. 1850 1824 */ ··· 2071 2047 2072 2048 rvu_npc_setup_interfaces(rvu, blkaddr); 2073 2049 2050 + npc_config_secret_key(rvu, blkaddr); 2074 2051 /* Configure MKEX profile */ 2075 2052 npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name); 2076 2053 ··· 2585 2560 2586 2561 mutex_unlock(&mcam->lock); 2587 2562 return 0; 2563 + } 2564 + 2565 + /* Marks bitmaps to reserved the mcam slot */ 2566 + void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx) 2567 + { 2568 + struct npc_mcam *mcam = &rvu->hw->mcam; 2569 + 2570 + npc_mcam_set_bit(mcam, entry_idx); 2588 2571 } 2589 2572 2590 2573 int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+148 -14
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
··· 10 10 #include "rvu_reg.h" 11 11 #include "rvu.h" 12 12 #include "npc.h" 13 + #include "rvu_npc_fs.h" 14 + #include "npc_profile.h" 15 + #include "rvu_npc_hash.h" 13 16 14 17 #define NPC_BYTESM GENMASK_ULL(19, 16) 15 18 #define NPC_HDR_OFFSET GENMASK_ULL(15, 8) ··· 230 227 return true; 231 228 } 232 229 230 + static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number, 231 + u8 key_nibble, u8 intf) 232 + { 233 + u8 offset = (key_nibble * 4) % 64; /* offset within key word */ 234 + u8 kwi = (key_nibble * 4) / 64; /* which word in key */ 235 + u8 nr_bits = 4; /* bits in a nibble */ 236 + u8 type; 237 + 238 + switch (bit_number) { 239 + case 40 ... 43: 240 + type = NPC_EXACT_RESULT; 241 + break; 242 + 243 + default: 244 + return; 245 + } 246 + npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 247 + } 248 + 233 249 static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, 234 250 u8 key_nibble, u8 intf) 235 251 { ··· 298 276 default: 299 277 return; 300 278 } 279 + 301 280 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 302 281 } 303 282 ··· 532 509 { 533 510 struct npc_mcam *mcam = &rvu->hw->mcam; 534 511 u8 lid, lt, ld, bitnr; 512 + u64 cfg, masked_cfg; 535 513 u8 key_nibble = 0; 536 - u64 cfg; 537 514 538 515 /* Scan and note how parse result is going to be in key. 539 516 * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from ··· 541 518 * will be concatenated in key. 542 519 */ 543 520 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf)); 544 - cfg &= NPC_PARSE_NIBBLE; 545 - for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) { 521 + masked_cfg = cfg & NPC_PARSE_NIBBLE; 522 + for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) { 546 523 npc_scan_parse_result(mcam, bitnr, key_nibble, intf); 524 + key_nibble++; 525 + } 526 + 527 + /* Ignore exact match bits for mcam entries except the first rule 528 + * which is drop on hit. This first rule is configured explitcitly by 529 + * exact match code. 530 + */ 531 + masked_cfg = cfg & NPC_EXACT_NIBBLE; 532 + bitnr = NPC_EXACT_NIBBLE_START; 533 + for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, 534 + NPC_EXACT_NIBBLE_START) { 535 + npc_scan_exact_result(mcam, bitnr, key_nibble, intf); 547 536 key_nibble++; 548 537 } 549 538 ··· 659 624 * If any bits in mask are 0 then corresponding bits in value are 660 625 * dont care. 661 626 */ 662 - static void npc_update_entry(struct rvu *rvu, enum key_fields type, 663 - struct mcam_entry *entry, u64 val_lo, 664 - u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) 627 + void npc_update_entry(struct rvu *rvu, enum key_fields type, 628 + struct mcam_entry *entry, u64 val_lo, 629 + u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) 665 630 { 666 631 struct npc_mcam *mcam = &rvu->hw->mcam; 667 632 struct mcam_entry dummy = { {0} }; ··· 740 705 } 741 706 } 742 707 743 - #define IPV6_WORDS 4 744 - 745 708 static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, 746 709 u64 features, struct flow_msg *pkt, 747 710 struct flow_msg *mask, ··· 812 779 static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, 813 780 u64 features, struct flow_msg *pkt, 814 781 struct flow_msg *mask, 815 - struct rvu_npc_mcam_rule *output, u8 intf) 782 + struct rvu_npc_mcam_rule *output, u8 intf, 783 + int blkaddr) 816 784 { 817 785 u64 dmac_mask = ether_addr_to_u64(mask->dmac); 818 786 u64 smac_mask = ether_addr_to_u64(mask->smac); ··· 862 828 } while (0) 863 829 864 830 NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0); 831 + 865 832 NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); 866 833 NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, 867 834 ntohs(mask->etype), 0); ··· 889 854 890 855 npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); 891 856 npc_update_vlan_features(rvu, entry, features, intf); 857 + 858 + npc_update_field_hash(rvu, intf, entry, blkaddr, features, 859 + pkt, mask, opkt, omask); 892 860 } 893 861 894 - static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, 895 - u16 entry) 862 + static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry) 896 863 { 897 864 struct rvu_npc_mcam_rule *iter; 898 865 ··· 1060 1023 u16 owner = req->hdr.pcifunc; 1061 1024 struct msg_rsp write_rsp; 1062 1025 struct mcam_entry *entry; 1063 - int entry_index, err; 1064 1026 bool new = false; 1027 + u16 entry_index; 1028 + int err; 1065 1029 1066 1030 installed_features = req->features; 1067 1031 features = req->features; ··· 1070 1032 entry_index = req->entry; 1071 1033 1072 1034 npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, 1073 - req->intf); 1035 + req->intf, blkaddr); 1074 1036 1075 1037 if (is_npc_intf_rx(req->intf)) 1076 1038 npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); ··· 1095 1057 npc_update_flow(rvu, entry, missing_features, 1096 1058 &def_ucast_rule->packet, 1097 1059 &def_ucast_rule->mask, 1098 - &dummy, req->intf); 1060 + &dummy, req->intf, 1061 + blkaddr); 1099 1062 installed_features = req->features | missing_features; 1100 1063 } 1101 1064 ··· 1462 1423 index, false); 1463 1424 } 1464 1425 mutex_unlock(&mcam->lock); 1426 + } 1427 + 1428 + /* single drop on non hit rule starting from 0th index. This an extension 1429 + * to RPM mac filter to support more rules. 1430 + */ 1431 + int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, 1432 + u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask, 1433 + u64 bcast_mcast_val, u64 bcast_mcast_mask) 1434 + { 1435 + struct npc_mcam_alloc_counter_req cntr_req = { 0 }; 1436 + struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; 1437 + struct npc_mcam_write_entry_req req = { 0 }; 1438 + struct npc_mcam *mcam = &rvu->hw->mcam; 1439 + struct rvu_npc_mcam_rule *rule; 1440 + struct msg_rsp rsp; 1441 + bool enabled; 1442 + int blkaddr; 1443 + int err; 1444 + 1445 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1446 + if (blkaddr < 0) { 1447 + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1448 + return -ENODEV; 1449 + } 1450 + 1451 + /* Bail out if no exact match support */ 1452 + if (!rvu_npc_exact_has_match_table(rvu)) { 1453 + dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__); 1454 + return -EINVAL; 1455 + } 1456 + 1457 + /* If 0th entry is already used, return err */ 1458 + enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx); 1459 + if (enabled) { 1460 + dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n", 1461 + __func__, mcam_idx); 1462 + return -EINVAL; 1463 + } 1464 + 1465 + /* Add this entry to mcam rules list */ 1466 + rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1467 + if (!rule) 1468 + return -ENOMEM; 1469 + 1470 + /* Disable rule by default. Enable rule when first dmac filter is 1471 + * installed 1472 + */ 1473 + rule->enable = false; 1474 + rule->chan = chan_val; 1475 + rule->chan_mask = chan_mask; 1476 + rule->entry = mcam_idx; 1477 + rvu_mcam_add_rule(mcam, rule); 1478 + 1479 + /* Reserve slot 0 */ 1480 + npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx); 1481 + 1482 + /* Allocate counter for this single drop on non hit rule */ 1483 + cntr_req.hdr.pcifunc = 0; /* AF request */ 1484 + cntr_req.contig = true; 1485 + cntr_req.count = 1; 1486 + err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 1487 + if (err) { 1488 + dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n", 1489 + __func__, err); 1490 + return -EFAULT; 1491 + } 1492 + *counter_idx = cntr_rsp.cntr; 1493 + 1494 + /* Fill in fields for this mcam entry */ 1495 + npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0, 1496 + exact_mask, 0, NIX_INTF_RX); 1497 + npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0, 1498 + chan_mask, 0, NIX_INTF_RX); 1499 + npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0, 1500 + bcast_mcast_mask, 0, NIX_INTF_RX); 1501 + 1502 + req.intf = NIX_INTF_RX; 1503 + req.set_cntr = true; 1504 + req.cntr = cntr_rsp.cntr; 1505 + req.entry = mcam_idx; 1506 + 1507 + err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp); 1508 + if (err) { 1509 + dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n", 1510 + __func__, mcam_idx); 1511 + return err; 1512 + } 1513 + 1514 + dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n", 1515 + __func__, mcam_idx, req.cntr); 1516 + 1517 + /* disable entry at Bank 0, index 0 */ 1518 + npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false); 1519 + 1520 + return 0; 1465 1521 }
+17
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell RVU Admin Function driver 3 + * 4 + * Copyright (C) 2022 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef __RVU_NPC_FS_H 9 + #define __RVU_NPC_FS_H 10 + 11 + #define IPV6_WORDS 4 12 + 13 + void npc_update_entry(struct rvu *rvu, enum key_fields type, 14 + struct mcam_entry *entry, u64 val_lo, 15 + u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf); 16 + 17 + #endif /* RVU_NPC_FS_H */
+1958
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell RVU Admin Function driver 3 + * 4 + * Copyright (C) 2022 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/bitfield.h> 9 + #include <linux/module.h> 10 + #include <linux/pci.h> 11 + #include <linux/firmware.h> 12 + #include <linux/stddef.h> 13 + #include <linux/debugfs.h> 14 + #include <linux/bitfield.h> 15 + 16 + #include "rvu_struct.h" 17 + #include "rvu_reg.h" 18 + #include "rvu.h" 19 + #include "npc.h" 20 + #include "cgx.h" 21 + #include "rvu_npc_hash.h" 22 + #include "rvu_npc_fs.h" 23 + #include "rvu_npc_hash.h" 24 + 25 + static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit, 26 + size_t width_bits) 27 + { 28 + const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits); 29 + const size_t msb = start_bit + width_bits - 1; 30 + const size_t lword = start_bit >> 6; 31 + const size_t uword = msb >> 6; 32 + size_t lbits; 33 + u64 hi, lo; 34 + 35 + if (lword == uword) 36 + return (input[lword] >> (start_bit & 63)) & mask; 37 + 38 + lbits = 64 - (start_bit & 63); 39 + hi = input[uword]; 40 + lo = (input[lword] >> (start_bit & 63)); 41 + return ((hi << lbits) | lo) & mask; 42 + } 43 + 44 + static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len) 45 + { 46 + u64 prev_orig_word = 0; 47 + u64 cur_orig_word = 0; 48 + size_t extra = key_bit_len % 64; 49 + size_t max_idx = key_bit_len / 64; 50 + size_t i; 51 + 52 + if (extra) 53 + max_idx++; 54 + 55 + for (i = 0; i < max_idx; i++) { 56 + cur_orig_word = key[i]; 57 + key[i] = key[i] << 1; 58 + key[i] |= ((prev_orig_word >> 63) & 0x1); 59 + prev_orig_word = cur_orig_word; 60 + } 61 + } 62 + 63 + static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len, 64 + size_t key_bit_len) 65 + { 66 + u32 hash_out = 0; 67 + u64 temp_data = 0; 68 + int i; 69 + 70 + for (i = data_bit_len - 1; i >= 0; i--) { 71 + temp_data = (data[i / 64]); 72 + temp_data = temp_data >> (i % 64); 73 + temp_data &= 0x1; 74 + if (temp_data) 75 + hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32)); 76 + 77 + rvu_npc_lshift_key(key, key_bit_len); 78 + } 79 + 80 + return hash_out; 81 + } 82 + 83 + u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash, 84 + u64 *secret_key, u8 intf, u8 hash_idx) 85 + { 86 + u64 hash_key[3]; 87 + u64 data_padded[2]; 88 + u32 field_hash; 89 + 90 + hash_key[0] = secret_key[1] << 31; 91 + hash_key[0] |= secret_key[2]; 92 + hash_key[1] = secret_key[1] >> 33; 93 + hash_key[1] |= secret_key[0] << 31; 94 + hash_key[2] = secret_key[0] >> 33; 95 + 96 + data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0]; 97 + data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1]; 98 + field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159); 99 + 100 + field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32; 101 + field_hash |= mkex_hash->hash_ctrl[intf][hash_idx]; 102 + return field_hash; 103 + } 104 + 105 + static u64 npc_update_use_hash(int lt, int ld) 106 + { 107 + u64 cfg = 0; 108 + 109 + switch (lt) { 110 + case NPC_LT_LC_IP6: 111 + /* Update use_hash(bit-20) and bytesm1 (bit-16:19) 112 + * in KEX_LD_CFG 113 + */ 114 + cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03, 115 + ld ? 0x8 : 0x18, 116 + 0x1, 0x0, 0x10); 117 + break; 118 + } 119 + 120 + return cfg; 121 + } 122 + 123 + static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr, 124 + u8 intf) 125 + { 126 + struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 127 + int lid, lt, ld, hash_cnt = 0; 128 + 129 + if (is_npc_intf_tx(intf)) 130 + return; 131 + 132 + /* Program HASH_CFG */ 133 + for (lid = 0; lid < NPC_MAX_LID; lid++) { 134 + for (lt = 0; lt < NPC_MAX_LT; lt++) { 135 + for (ld = 0; ld < NPC_MAX_LD; ld++) { 136 + if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) { 137 + u64 cfg = npc_update_use_hash(lt, ld); 138 + 139 + hash_cnt++; 140 + if (hash_cnt == NPC_MAX_HASH) 141 + return; 142 + 143 + /* Set updated KEX configuration */ 144 + SET_KEX_LD(intf, lid, lt, ld, cfg); 145 + /* Set HASH configuration */ 146 + SET_KEX_LD_HASH(intf, ld, 147 + mkex_hash->hash[intf][ld]); 148 + SET_KEX_LD_HASH_MASK(intf, ld, 0, 149 + mkex_hash->hash_mask[intf][ld][0]); 150 + SET_KEX_LD_HASH_MASK(intf, ld, 1, 151 + mkex_hash->hash_mask[intf][ld][1]); 152 + SET_KEX_LD_HASH_CTRL(intf, ld, 153 + mkex_hash->hash_ctrl[intf][ld]); 154 + } 155 + } 156 + } 157 + } 158 + } 159 + 160 + static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr, 161 + u8 intf) 162 + { 163 + struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 164 + int lid, lt, ld, hash_cnt = 0; 165 + 166 + if (is_npc_intf_rx(intf)) 167 + return; 168 + 169 + /* Program HASH_CFG */ 170 + for (lid = 0; lid < NPC_MAX_LID; lid++) { 171 + for (lt = 0; lt < NPC_MAX_LT; lt++) { 172 + for (ld = 0; ld < NPC_MAX_LD; ld++) 173 + if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) { 174 + u64 cfg = npc_update_use_hash(lt, ld); 175 + 176 + hash_cnt++; 177 + if (hash_cnt == NPC_MAX_HASH) 178 + return; 179 + 180 + /* Set updated KEX configuration */ 181 + SET_KEX_LD(intf, lid, lt, ld, cfg); 182 + /* Set HASH configuration */ 183 + SET_KEX_LD_HASH(intf, ld, 184 + mkex_hash->hash[intf][ld]); 185 + SET_KEX_LD_HASH_MASK(intf, ld, 0, 186 + mkex_hash->hash_mask[intf][ld][0]); 187 + SET_KEX_LD_HASH_MASK(intf, ld, 1, 188 + mkex_hash->hash_mask[intf][ld][1]); 189 + SET_KEX_LD_HASH_CTRL(intf, ld, 190 + mkex_hash->hash_ctrl[intf][ld]); 191 + hash_cnt++; 192 + if (hash_cnt == NPC_MAX_HASH) 193 + return; 194 + } 195 + } 196 + } 197 + } 198 + 199 + void npc_config_secret_key(struct rvu *rvu, int blkaddr) 200 + { 201 + struct hw_cap *hwcap = &rvu->hw->cap; 202 + struct rvu_hwinfo *hw = rvu->hw; 203 + u8 intf; 204 + 205 + if (!hwcap->npc_hash_extract) { 206 + dev_info(rvu->dev, "HW does not support secret key configuration\n"); 207 + return; 208 + } 209 + 210 + for (intf = 0; intf < hw->npc_intfs; intf++) { 211 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf), 212 + RVU_NPC_HASH_SECRET_KEY0); 213 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf), 214 + RVU_NPC_HASH_SECRET_KEY1); 215 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf), 216 + RVU_NPC_HASH_SECRET_KEY2); 217 + } 218 + } 219 + 220 + void npc_program_mkex_hash(struct rvu *rvu, int blkaddr) 221 + { 222 + struct hw_cap *hwcap = &rvu->hw->cap; 223 + struct rvu_hwinfo *hw = rvu->hw; 224 + u8 intf; 225 + 226 + if (!hwcap->npc_hash_extract) { 227 + dev_dbg(rvu->dev, "Field hash extract feature is not supported\n"); 228 + return; 229 + } 230 + 231 + for (intf = 0; intf < hw->npc_intfs; intf++) { 232 + npc_program_mkex_hash_rx(rvu, blkaddr, intf); 233 + npc_program_mkex_hash_tx(rvu, blkaddr, intf); 234 + } 235 + } 236 + 237 + void npc_update_field_hash(struct rvu *rvu, u8 intf, 238 + struct mcam_entry *entry, 239 + int blkaddr, 240 + u64 features, 241 + struct flow_msg *pkt, 242 + struct flow_msg *mask, 243 + struct flow_msg *opkt, 244 + struct flow_msg *omask) 245 + { 246 + struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 247 + struct npc_get_secret_key_req req; 248 + struct npc_get_secret_key_rsp rsp; 249 + u64 ldata[2], cfg; 250 + u32 field_hash; 251 + u8 hash_idx; 252 + 253 + if (!rvu->hw->cap.npc_hash_extract) { 254 + dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__); 255 + return; 256 + } 257 + 258 + req.intf = intf; 259 + rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp); 260 + 261 + for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) { 262 + cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx)); 263 + if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) { 264 + u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8; 265 + u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4; 266 + u8 ltype_mask = cfg & GENMASK_ULL(3, 0); 267 + 268 + if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) { 269 + switch (ltype & ltype_mask) { 270 + /* If hash extract enabled is supported for IPv6 then 271 + * 128 bit IPv6 source and destination addressed 272 + * is hashed to 32 bit value. 273 + */ 274 + case NPC_LT_LC_IP6: 275 + if (features & BIT_ULL(NPC_SIP_IPV6)) { 276 + u32 src_ip[IPV6_WORDS]; 277 + 278 + be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); 279 + ldata[0] = (u64)src_ip[0] << 32 | src_ip[1]; 280 + ldata[1] = (u64)src_ip[2] << 32 | src_ip[3]; 281 + field_hash = npc_field_hash_calc(ldata, 282 + mkex_hash, 283 + rsp.secret_key, 284 + intf, 285 + hash_idx); 286 + npc_update_entry(rvu, NPC_SIP_IPV6, entry, 287 + field_hash, 0, 32, 0, intf); 288 + memcpy(&opkt->ip6src, &pkt->ip6src, 289 + sizeof(pkt->ip6src)); 290 + memcpy(&omask->ip6src, &mask->ip6src, 291 + sizeof(mask->ip6src)); 292 + break; 293 + } 294 + 295 + if (features & BIT_ULL(NPC_DIP_IPV6)) { 296 + u32 dst_ip[IPV6_WORDS]; 297 + 298 + be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); 299 + ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1]; 300 + ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3]; 301 + field_hash = npc_field_hash_calc(ldata, 302 + mkex_hash, 303 + rsp.secret_key, 304 + intf, 305 + hash_idx); 306 + npc_update_entry(rvu, NPC_DIP_IPV6, entry, 307 + field_hash, 0, 32, 0, intf); 308 + memcpy(&opkt->ip6dst, &pkt->ip6dst, 309 + sizeof(pkt->ip6dst)); 310 + memcpy(&omask->ip6dst, &mask->ip6dst, 311 + sizeof(mask->ip6dst)); 312 + } 313 + break; 314 + } 315 + } 316 + } 317 + } 318 + } 319 + 320 + int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu, 321 + struct npc_get_secret_key_req *req, 322 + struct npc_get_secret_key_rsp *rsp) 323 + { 324 + u64 *secret_key = rsp->secret_key; 325 + u8 intf = req->intf; 326 + int blkaddr; 327 + 328 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 329 + if (blkaddr < 0) { 330 + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 331 + return -EINVAL; 332 + } 333 + 334 + secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf)); 335 + secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf)); 336 + secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf)); 337 + 338 + return 0; 339 + } 340 + 341 + /** 342 + * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64. 343 + * @macaddr: MAC address. 344 + * Returns mdata for exact match table. 345 + */ 346 + static u64 rvu_npc_exact_mac2u64(u8 *mac_addr) 347 + { 348 + u64 mac = 0; 349 + int index; 350 + 351 + for (index = ETH_ALEN - 1; index >= 0; index--) 352 + mac |= ((u64)*mac_addr++) << (8 * index); 353 + 354 + return mac; 355 + } 356 + 357 + /** 358 + * rvu_exact_prepare_mdata - Make mdata for mcam entry 359 + * @mac: MAC address 360 + * @chan: Channel number. 361 + * @ctype: Channel Type. 362 + * @mask: LDATA mask. 363 + */ 364 + static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask) 365 + { 366 + u64 ldata = rvu_npc_exact_mac2u64(mac); 367 + 368 + /* Please note that mask is 48bit which excludes chan and ctype. 369 + * Increase mask bits if we need to include them as well. 370 + */ 371 + ldata |= ((u64)chan << 48); 372 + ldata |= ((u64)ctype << 60); 373 + ldata &= mask; 374 + ldata = ldata << 2; 375 + 376 + return ldata; 377 + } 378 + 379 + /** 380 + * rvu_exact_calculate_hash - calculate hash index to mem table. 381 + * @rvu: resource virtualization unit. 382 + * @chan: Channel number 383 + * @ctype: Channel type. 384 + * @mac: MAC address 385 + * @mask: HASH mask. 386 + * @table_depth: Depth of table. 387 + */ 388 + u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac, 389 + u64 mask, u32 table_depth) 390 + { 391 + struct npc_exact_table *table = rvu->hw->table; 392 + u64 hash_key[2]; 393 + u64 key_in[2]; 394 + u64 ldata; 395 + u32 hash; 396 + 397 + key_in[0] = RVU_NPC_HASH_SECRET_KEY0; 398 + key_in[1] = RVU_NPC_HASH_SECRET_KEY2; 399 + 400 + hash_key[0] = key_in[0] << 31; 401 + hash_key[0] |= key_in[1]; 402 + hash_key[1] = key_in[0] >> 33; 403 + 404 + ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask); 405 + 406 + dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__, 407 + ldata, hash_key[1], hash_key[0]); 408 + hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95); 409 + 410 + hash &= table->mem_table.hash_mask; 411 + hash += table->mem_table.hash_offset; 412 + dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash); 413 + 414 + return hash; 415 + } 416 + 417 + /** 418 + * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table. 419 + * @rvu: resource virtualization unit. 420 + * @way: Indicate way to table. 421 + * @index: Hash index to 4 way table. 422 + * 423 + * Searches 4 way table using hash index. Returns 0 on success. 424 + */ 425 + static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way, 426 + u32 *index, unsigned int hash) 427 + { 428 + struct npc_exact_table *table; 429 + int depth, i; 430 + 431 + table = rvu->hw->table; 432 + depth = table->mem_table.depth; 433 + 434 + /* Check all the 4 ways for a free slot. */ 435 + mutex_lock(&table->lock); 436 + for (i = 0; i < table->mem_table.ways; i++) { 437 + if (test_bit(hash + i * depth, table->mem_table.bmap)) 438 + continue; 439 + 440 + set_bit(hash + i * depth, table->mem_table.bmap); 441 + mutex_unlock(&table->lock); 442 + 443 + dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n", 444 + __func__, i, hash); 445 + 446 + *way = i; 447 + *index = hash; 448 + return 0; 449 + } 450 + mutex_unlock(&table->lock); 451 + 452 + dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__, 453 + bitmap_weight(table->mem_table.bmap, table->mem_table.depth)); 454 + return -ENOSPC; 455 + } 456 + 457 + /** 458 + * rvu_npc_exact_free_id - Free seq id from bitmat. 459 + * @rvu: Resource virtualization unit. 460 + * @seq_id: Sequence identifier to be freed. 461 + */ 462 + static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id) 463 + { 464 + struct npc_exact_table *table; 465 + 466 + table = rvu->hw->table; 467 + mutex_lock(&table->lock); 468 + clear_bit(seq_id, table->id_bmap); 469 + mutex_unlock(&table->lock); 470 + dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id); 471 + } 472 + 473 + /** 474 + * rvu_npc_exact_alloc_id - Alloc seq id from bitmap. 475 + * @rvu: Resource virtualization unit. 476 + * @seq_id: Sequence identifier. 477 + */ 478 + static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id) 479 + { 480 + struct npc_exact_table *table; 481 + u32 idx; 482 + 483 + table = rvu->hw->table; 484 + 485 + mutex_lock(&table->lock); 486 + idx = find_first_zero_bit(table->id_bmap, table->tot_ids); 487 + if (idx == table->tot_ids) { 488 + mutex_unlock(&table->lock); 489 + dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n", 490 + __func__, bitmap_weight(table->id_bmap, table->tot_ids)); 491 + 492 + return false; 493 + } 494 + 495 + /* Mark bit map to indicate that slot is used.*/ 496 + set_bit(idx, table->id_bmap); 497 + mutex_unlock(&table->lock); 498 + 499 + *seq_id = idx; 500 + dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id); 501 + 502 + return true; 503 + } 504 + 505 + /** 506 + * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table. 507 + * @rvu: resource virtualization unit. 508 + * @index: Index to exact CAM table. 509 + */ 510 + static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index) 511 + { 512 + struct npc_exact_table *table; 513 + u32 idx; 514 + 515 + table = rvu->hw->table; 516 + 517 + mutex_lock(&table->lock); 518 + idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth); 519 + if (idx == table->cam_table.depth) { 520 + mutex_unlock(&table->lock); 521 + dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__, 522 + bitmap_weight(table->cam_table.bmap, table->cam_table.depth)); 523 + return -ENOSPC; 524 + } 525 + 526 + /* Mark bit map to indicate that slot is used.*/ 527 + set_bit(idx, table->cam_table.bmap); 528 + mutex_unlock(&table->lock); 529 + 530 + *index = idx; 531 + dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n", 532 + __func__, idx); 533 + return 0; 534 + } 535 + 536 + /** 537 + * rvu_exact_prepare_table_entry - Data for exact match table entry. 538 + * @rvu: Resource virtualization unit. 539 + * @enable: Enable/Disable entry 540 + * @ctype: Software defined channel type. Currently set as 0. 541 + * @chan: Channel number. 542 + * @mac_addr: Destination mac address. 543 + * returns mdata for exact match table. 544 + */ 545 + static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable, 546 + u8 ctype, u16 chan, u8 *mac_addr) 547 + 548 + { 549 + u64 ldata = rvu_npc_exact_mac2u64(mac_addr); 550 + 551 + /* Enable or disable */ 552 + u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), !!enable); 553 + 554 + /* Set Ctype */ 555 + mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype); 556 + 557 + /* Set chan */ 558 + mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan); 559 + 560 + /* MAC address */ 561 + mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata); 562 + 563 + return mdata; 564 + } 565 + 566 + /** 567 + * rvu_exact_config_secret_key - Configure secret key. 568 + * Returns mdata for exact match table. 569 + */ 570 + static void rvu_exact_config_secret_key(struct rvu *rvu) 571 + { 572 + int blkaddr; 573 + 574 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 575 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX), 576 + RVU_NPC_HASH_SECRET_KEY0); 577 + 578 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX), 579 + RVU_NPC_HASH_SECRET_KEY1); 580 + 581 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX), 582 + RVU_NPC_HASH_SECRET_KEY2); 583 + } 584 + 585 + /** 586 + * rvu_exact_config_search_key - Configure search key 587 + * Returns mdata for exact match table. 588 + */ 589 + static void rvu_exact_config_search_key(struct rvu *rvu) 590 + { 591 + int blkaddr; 592 + u64 reg_val; 593 + 594 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 595 + 596 + /* HDR offset */ 597 + reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0); 598 + 599 + /* BYTESM1, number of bytes - 1 */ 600 + reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1); 601 + 602 + /* Enable LID and set LID to NPC_LID_LA */ 603 + reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1); 604 + reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA); 605 + 606 + /* Clear layer type based extraction */ 607 + 608 + /* Disable LT_EN */ 609 + reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0); 610 + 611 + /* Set LTYPE_MATCH to 0 */ 612 + reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0); 613 + 614 + /* Set LTYPE_MASK to 0 */ 615 + reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0); 616 + 617 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val); 618 + } 619 + 620 + /** 621 + * rvu_exact_config_result_ctrl - Set exact table hash control 622 + * @rvu: Resource virtualization unit. 623 + * @depth: Depth of Exact match table. 624 + * 625 + * Sets mask and offset for hash for mem table. 626 + */ 627 + static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth) 628 + { 629 + int blkaddr; 630 + u64 reg = 0; 631 + 632 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 633 + 634 + /* Set mask. Note that depth is a power of 2 */ 635 + rvu->hw->table->mem_table.hash_mask = (depth - 1); 636 + reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1)); 637 + 638 + /* Set offset as 0 */ 639 + rvu->hw->table->mem_table.hash_offset = 0; 640 + reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0); 641 + 642 + /* Set reg for RX */ 643 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg); 644 + /* Store hash mask and offset for s/w algorithm */ 645 + } 646 + 647 + /** 648 + * rvu_exact_config_table_mask - Set exact table mask. 649 + * @rvu: Resource virtualization unit. 650 + */ 651 + static void rvu_exact_config_table_mask(struct rvu *rvu) 652 + { 653 + int blkaddr; 654 + u64 mask = 0; 655 + 656 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 657 + 658 + /* Don't use Ctype */ 659 + mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0); 660 + 661 + /* Set chan */ 662 + mask |= GENMASK_ULL(59, 48); 663 + 664 + /* Full ldata */ 665 + mask |= GENMASK_ULL(47, 0); 666 + 667 + /* Store mask for s/w hash calcualtion */ 668 + rvu->hw->table->mem_table.mask = mask; 669 + 670 + /* Set mask for RX.*/ 671 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask); 672 + } 673 + 674 + /** 675 + * rvu_npc_exact_get_max_entries - Get total number of entries in table. 676 + * @rvu: resource virtualization unit. 677 + */ 678 + u32 rvu_npc_exact_get_max_entries(struct rvu *rvu) 679 + { 680 + struct npc_exact_table *table; 681 + 682 + table = rvu->hw->table; 683 + return table->tot_ids; 684 + } 685 + 686 + /** 687 + * rvu_npc_exact_has_match_table - Checks support for exact match. 688 + * @rvu: resource virtualization unit. 689 + * 690 + */ 691 + bool rvu_npc_exact_has_match_table(struct rvu *rvu) 692 + { 693 + return rvu->hw->cap.npc_exact_match_enabled; 694 + } 695 + 696 + /** 697 + * __rvu_npc_exact_find_entry_by_seq_id - find entry by id 698 + * @rvu: resource virtualization unit. 699 + * @seq_id: Sequence identifier. 700 + * 701 + * Caller should acquire the lock. 702 + */ 703 + static struct npc_exact_table_entry * 704 + __rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id) 705 + { 706 + struct npc_exact_table *table = rvu->hw->table; 707 + struct npc_exact_table_entry *entry = NULL; 708 + struct list_head *lhead; 709 + 710 + lhead = &table->lhead_gbl; 711 + 712 + /* traverse to find the matching entry */ 713 + list_for_each_entry(entry, lhead, glist) { 714 + if (entry->seq_id != seq_id) 715 + continue; 716 + 717 + return entry; 718 + } 719 + 720 + return NULL; 721 + } 722 + 723 + /** 724 + * rvu_npc_exact_add_to_list - Add entry to list 725 + * @rvu: resource virtualization unit. 726 + * @opc_type: OPCODE to select MEM/CAM table. 727 + * @ways: MEM table ways. 728 + * @index: Index in MEM/CAM table. 729 + * @cgx_id: CGX identifier. 730 + * @lamc_id: LMAC identifier. 731 + * @mac_addr: MAC address. 732 + * @chan: Channel number. 733 + * @ctype: Channel Type. 734 + * @seq_id: Sequence identifier 735 + * @cmd: True if function is called by ethtool cmd 736 + * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam. 737 + * @pcifunc: pci function 738 + */ 739 + static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways, 740 + u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan, 741 + u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc) 742 + { 743 + struct npc_exact_table_entry *entry, *tmp, *iter; 744 + struct npc_exact_table *table = rvu->hw->table; 745 + struct list_head *lhead, *pprev; 746 + 747 + WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS); 748 + 749 + if (!rvu_npc_exact_alloc_id(rvu, seq_id)) { 750 + dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__); 751 + return -EFAULT; 752 + } 753 + 754 + entry = kmalloc(sizeof(*entry), GFP_KERNEL); 755 + if (!entry) { 756 + rvu_npc_exact_free_id(rvu, *seq_id); 757 + dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__); 758 + return -ENOMEM; 759 + } 760 + 761 + mutex_lock(&table->lock); 762 + switch (opc_type) { 763 + case NPC_EXACT_OPC_CAM: 764 + lhead = &table->lhead_cam_tbl_entry; 765 + table->cam_tbl_entry_cnt++; 766 + break; 767 + 768 + case NPC_EXACT_OPC_MEM: 769 + lhead = &table->lhead_mem_tbl_entry[ways]; 770 + table->mem_tbl_entry_cnt++; 771 + break; 772 + 773 + default: 774 + mutex_unlock(&table->lock); 775 + kfree(entry); 776 + rvu_npc_exact_free_id(rvu, *seq_id); 777 + 778 + dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type); 779 + return -EINVAL; 780 + } 781 + 782 + /* Add to global list */ 783 + INIT_LIST_HEAD(&entry->glist); 784 + list_add_tail(&entry->glist, &table->lhead_gbl); 785 + INIT_LIST_HEAD(&entry->list); 786 + entry->index = index; 787 + entry->ways = ways; 788 + entry->opc_type = opc_type; 789 + 790 + entry->pcifunc = pcifunc; 791 + 792 + ether_addr_copy(entry->mac, mac_addr); 793 + entry->chan = chan; 794 + entry->ctype = ctype; 795 + entry->cgx_id = cgx_id; 796 + entry->lmac_id = lmac_id; 797 + 798 + entry->seq_id = *seq_id; 799 + 800 + entry->mcam_idx = mcam_idx; 801 + entry->cmd = cmd; 802 + 803 + pprev = lhead; 804 + 805 + /* Insert entry in ascending order of index */ 806 + list_for_each_entry_safe(iter, tmp, lhead, list) { 807 + if (index < iter->index) 808 + break; 809 + 810 + pprev = &iter->list; 811 + } 812 + 813 + /* Add to each table list */ 814 + list_add(&entry->list, pprev); 815 + mutex_unlock(&table->lock); 816 + return 0; 817 + } 818 + 819 + /** 820 + * rvu_npc_exact_mem_table_write - Wrapper for register write 821 + * @rvu: resource virtualization unit. 822 + * @blkaddr: Block address 823 + * @ways: ways for MEM table. 824 + * @index: Index in MEM 825 + * @mdata: Meta data to be written to register. 826 + */ 827 + static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways, 828 + u32 index, u64 mdata) 829 + { 830 + rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata); 831 + } 832 + 833 + /** 834 + * rvu_npc_exact_cam_table_write - Wrapper for register write 835 + * @rvu: resource virtualization unit. 836 + * @blkaddr: Block address 837 + * @index: Index in MEM 838 + * @mdata: Meta data to be written to register. 839 + */ 840 + static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr, 841 + u32 index, u64 mdata) 842 + { 843 + rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata); 844 + } 845 + 846 + /** 847 + * rvu_npc_exact_dealloc_table_entry - dealloc table entry 848 + * @rvu: resource virtualization unit. 849 + * @opc_type: OPCODE for selection of table(MEM or CAM) 850 + * @ways: ways if opc_type is MEM table. 851 + * @index: Index of MEM or CAM table. 852 + */ 853 + static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type, 854 + u8 ways, u32 index) 855 + { 856 + int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 857 + struct npc_exact_table *table; 858 + u8 null_dmac[6] = { 0 }; 859 + int depth; 860 + 861 + /* Prepare entry with all fields set to zero */ 862 + u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac); 863 + 864 + table = rvu->hw->table; 865 + depth = table->mem_table.depth; 866 + 867 + mutex_lock(&table->lock); 868 + 869 + switch (opc_type) { 870 + case NPC_EXACT_OPC_CAM: 871 + 872 + /* Check whether entry is used already */ 873 + if (!test_bit(index, table->cam_table.bmap)) { 874 + mutex_unlock(&table->lock); 875 + dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n", 876 + __func__, ways, index); 877 + return -EINVAL; 878 + } 879 + 880 + rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata); 881 + clear_bit(index, table->cam_table.bmap); 882 + break; 883 + 884 + case NPC_EXACT_OPC_MEM: 885 + 886 + /* Check whether entry is used already */ 887 + if (!test_bit(index + ways * depth, table->mem_table.bmap)) { 888 + mutex_unlock(&table->lock); 889 + dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n", 890 + __func__, index); 891 + return -EINVAL; 892 + } 893 + 894 + rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata); 895 + clear_bit(index + ways * depth, table->mem_table.bmap); 896 + break; 897 + 898 + default: 899 + mutex_unlock(&table->lock); 900 + dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type); 901 + return -ENOSPC; 902 + } 903 + 904 + mutex_unlock(&table->lock); 905 + 906 + dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n", 907 + __func__, index, ways, opc_type); 908 + 909 + return 0; 910 + } 911 + 912 + /** 913 + * rvu_npc_exact_alloc_table_entry - Allociate an entry 914 + * @rvu: resource virtualization unit. 915 + * @mac: MAC address. 916 + * @chan: Channel number. 917 + * @ctype: Channel Type. 918 + * @index: Index of MEM table or CAM table. 919 + * @ways: Ways. Only valid for MEM table. 920 + * @opc_type: OPCODE to select table (MEM or CAM) 921 + * 922 + * Try allocating a slot from MEM table. If all 4 ways 923 + * slot are full for a hash index, check availability in 924 + * 32-entry CAM table for allocation. 925 + */ 926 + static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype, 927 + u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type) 928 + { 929 + struct npc_exact_table *table; 930 + unsigned int hash; 931 + int err; 932 + 933 + table = rvu->hw->table; 934 + 935 + /* Check in 4-ways mem entry for free slote */ 936 + hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask, 937 + table->mem_table.depth); 938 + err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash); 939 + if (!err) { 940 + *opc_type = NPC_EXACT_OPC_MEM; 941 + dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n", 942 + __func__, *ways, *index); 943 + return 0; 944 + } 945 + 946 + dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__); 947 + 948 + /* wayss is 0 for cam table */ 949 + *ways = 0; 950 + err = rvu_npc_exact_alloc_cam_table_entry(rvu, index); 951 + if (!err) { 952 + *opc_type = NPC_EXACT_OPC_CAM; 953 + dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n", 954 + __func__, *index); 955 + return 0; 956 + } 957 + 958 + dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__); 959 + return -ENOSPC; 960 + } 961 + 962 + /** 963 + * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base. 964 + * @rvu: resource virtualization unit. 965 + * @drop_mcam_idx: Drop rule index in NPC mcam. 966 + * @chan_val: Channel value. 967 + * @chan_mask: Channel Mask. 968 + * @pcifunc: pcifunc of interface. 969 + */ 970 + static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx, 971 + u64 chan_val, u64 chan_mask, u16 pcifunc) 972 + { 973 + struct npc_exact_table *table; 974 + int i; 975 + 976 + table = rvu->hw->table; 977 + 978 + for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { 979 + if (!table->drop_rule_map[i].valid) 980 + break; 981 + 982 + if (table->drop_rule_map[i].chan_val != (u16)chan_val) 983 + continue; 984 + 985 + if (table->drop_rule_map[i].chan_mask != (u16)chan_mask) 986 + continue; 987 + 988 + return false; 989 + } 990 + 991 + if (i == NPC_MCAM_DROP_RULE_MAX) 992 + return false; 993 + 994 + table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx; 995 + table->drop_rule_map[i].chan_val = (u16)chan_val; 996 + table->drop_rule_map[i].chan_mask = (u16)chan_mask; 997 + table->drop_rule_map[i].pcifunc = pcifunc; 998 + table->drop_rule_map[i].valid = true; 999 + return true; 1000 + } 1001 + 1002 + /** 1003 + * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask. 1004 + * @rvu: resource virtualization unit. 1005 + * @intf_type: Interface type (SDK, LBK or CGX) 1006 + * @cgx_id: CGX identifier. 1007 + * @lmac_id: LAMC identifier. 1008 + * @val: Channel number. 1009 + * @mask: Channel mask. 1010 + */ 1011 + static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type, 1012 + u8 cgx_id, u8 lmac_id, 1013 + u64 *val, u64 *mask) 1014 + { 1015 + u16 chan_val, chan_mask; 1016 + 1017 + /* No support for SDP and LBK */ 1018 + if (intf_type != NIX_INTF_TYPE_CGX) 1019 + return false; 1020 + 1021 + chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 1022 + chan_mask = 0xfff; 1023 + 1024 + if (val) 1025 + *val = chan_val; 1026 + 1027 + if (mask) 1028 + *mask = chan_mask; 1029 + 1030 + return true; 1031 + } 1032 + 1033 + /** 1034 + * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc 1035 + * @rvu: resource virtualization unit. 1036 + * @drop_rule_idx: Drop rule index in NPC mcam. 1037 + * 1038 + * Debugfs (exact_drop_cnt) entry displays pcifunc for interface 1039 + * by retrieving the pcifunc value from data base. 1040 + */ 1041 + u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx) 1042 + { 1043 + struct npc_exact_table *table; 1044 + int i; 1045 + 1046 + table = rvu->hw->table; 1047 + 1048 + for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { 1049 + if (!table->drop_rule_map[i].valid) 1050 + break; 1051 + 1052 + if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx) 1053 + continue; 1054 + 1055 + return table->drop_rule_map[i].pcifunc; 1056 + } 1057 + 1058 + dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n", 1059 + __func__, drop_rule_idx); 1060 + return -1; 1061 + } 1062 + 1063 + /** 1064 + * rvu_npc_exact_get_drop_rule_info - Get drop rule information. 1065 + * @rvu: resource virtualization unit. 1066 + * @intf_type: Interface type (CGX, SDP or LBK) 1067 + * @cgx_id: CGX identifier. 1068 + * @lmac_id: LMAC identifier. 1069 + * @drop_mcam_idx: NPC mcam drop rule index. 1070 + * @val: Channel value. 1071 + * @mask: Channel mask. 1072 + * @pcifunc: pcifunc of interface corresponding to the drop rule. 1073 + */ 1074 + static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id, 1075 + u8 lmac_id, u32 *drop_mcam_idx, u64 *val, 1076 + u64 *mask, u16 *pcifunc) 1077 + { 1078 + struct npc_exact_table *table; 1079 + u64 chan_val, chan_mask; 1080 + bool rc; 1081 + int i; 1082 + 1083 + table = rvu->hw->table; 1084 + 1085 + if (intf_type != NIX_INTF_TYPE_CGX) { 1086 + dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__); 1087 + return false; 1088 + } 1089 + 1090 + rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id, 1091 + lmac_id, &chan_val, &chan_mask); 1092 + 1093 + for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { 1094 + if (!table->drop_rule_map[i].valid) 1095 + break; 1096 + 1097 + if (table->drop_rule_map[i].chan_val != (u16)chan_val) 1098 + continue; 1099 + 1100 + if (val) 1101 + *val = table->drop_rule_map[i].chan_val; 1102 + if (mask) 1103 + *mask = table->drop_rule_map[i].chan_mask; 1104 + if (pcifunc) 1105 + *pcifunc = table->drop_rule_map[i].pcifunc; 1106 + 1107 + *drop_mcam_idx = i; 1108 + return true; 1109 + } 1110 + 1111 + if (i == NPC_MCAM_DROP_RULE_MAX) { 1112 + dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n", 1113 + __func__, *drop_mcam_idx); 1114 + return false; 1115 + } 1116 + 1117 + dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n", 1118 + __func__, cgx_id, lmac_id); 1119 + return false; 1120 + } 1121 + 1122 + /** 1123 + * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule. 1124 + * @rvu: resource virtualization unit. 1125 + * @drop_mcam_idx: NPC mcam drop rule index. 1126 + * @val: +1 or -1. 1127 + * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it. 1128 + * 1129 + * when first exact match entry against a drop rule is added, enable_or_disable_cam 1130 + * is set to true. When last exact match entry against a drop rule is deleted, 1131 + * enable_or_disable_cam is set to true. 1132 + */ 1133 + static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx, 1134 + int val, bool *enable_or_disable_cam) 1135 + { 1136 + struct npc_exact_table *table; 1137 + u16 *cnt, old_cnt; 1138 + bool promisc; 1139 + 1140 + table = rvu->hw->table; 1141 + promisc = table->promisc_mode[drop_mcam_idx]; 1142 + 1143 + cnt = &table->cnt_cmd_rules[drop_mcam_idx]; 1144 + old_cnt = *cnt; 1145 + 1146 + *cnt += val; 1147 + 1148 + if (!enable_or_disable_cam) 1149 + goto done; 1150 + 1151 + *enable_or_disable_cam = false; 1152 + 1153 + if (promisc) 1154 + goto done; 1155 + 1156 + /* If all rules are deleted and not already in promisc mode; disable cam */ 1157 + if (!*cnt && val < 0) { 1158 + *enable_or_disable_cam = true; 1159 + goto done; 1160 + } 1161 + 1162 + /* If rule got added and not already in promisc mode; enable cam */ 1163 + if (!old_cnt && val > 0) { 1164 + *enable_or_disable_cam = true; 1165 + goto done; 1166 + } 1167 + 1168 + done: 1169 + return *cnt; 1170 + } 1171 + 1172 + /** 1173 + * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry. 1174 + * @rvu: resource virtualization unit. 1175 + * @seq_id: Sequence identifier of the entry. 1176 + * 1177 + * Deletes entry from linked lists and free up slot in HW MEM or CAM 1178 + * table. 1179 + */ 1180 + static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id) 1181 + { 1182 + struct npc_exact_table_entry *entry = NULL; 1183 + struct npc_exact_table *table; 1184 + u32 drop_mcam_idx; 1185 + bool disable_cam; 1186 + int *cnt; 1187 + 1188 + table = rvu->hw->table; 1189 + 1190 + mutex_lock(&table->lock); 1191 + 1192 + /* Lookup for entry which needs to be updated */ 1193 + entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id); 1194 + if (!entry) { 1195 + dev_dbg(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, seq_id); 1196 + mutex_unlock(&table->lock); 1197 + return -ENODATA; 1198 + } 1199 + 1200 + cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt : 1201 + &table->mem_tbl_entry_cnt; 1202 + 1203 + /* delete from lists */ 1204 + list_del_init(&entry->list); 1205 + list_del_init(&entry->glist); 1206 + 1207 + (*cnt)--; 1208 + 1209 + rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id, entry->lmac_id, 1210 + &drop_mcam_idx, NULL, NULL, NULL); 1211 + 1212 + if (entry->cmd) 1213 + __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam); 1214 + 1215 + /* No dmac filter rules; disable drop on hit rule */ 1216 + if (disable_cam) { 1217 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); 1218 + dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n", 1219 + __func__, drop_mcam_idx); 1220 + } 1221 + 1222 + mutex_unlock(&table->lock); 1223 + 1224 + rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index); 1225 + 1226 + rvu_npc_exact_free_id(rvu, seq_id); 1227 + 1228 + dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n", 1229 + __func__, seq_id, entry->mac); 1230 + kfree(entry); 1231 + 1232 + return 0; 1233 + } 1234 + 1235 + /** 1236 + * rvu_npc_exact_add_table_entry - Adds a table entry 1237 + * @rvu: resource virtualization unit. 1238 + * @cgx_id: cgx identifier. 1239 + * @lmac_id: lmac identifier. 1240 + * @mac: MAC address. 1241 + * @chan: Channel number. 1242 + * @ctype: Channel Type. 1243 + * @seq_id: Sequence number. 1244 + * @cmd: Whether it is invoked by ethtool cmd. 1245 + * @mcam_idx: NPC mcam index corresponding to MAC 1246 + * @pcifunc: PCI func. 1247 + * 1248 + * Creates a new exact match table entry in either CAM or 1249 + * MEM table. 1250 + */ 1251 + static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac, 1252 + u16 chan, u8 ctype, u32 *seq_id, bool cmd, 1253 + u32 mcam_idx, u16 pcifunc) 1254 + { 1255 + int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1256 + enum npc_exact_opc_type opc_type; 1257 + struct npc_exact_table *table; 1258 + u32 drop_mcam_idx; 1259 + bool enable_cam; 1260 + u32 index; 1261 + u64 mdata; 1262 + int err; 1263 + u8 ways; 1264 + 1265 + table = rvu->hw->table; 1266 + 1267 + ctype = 0; 1268 + 1269 + err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type); 1270 + if (err) { 1271 + dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__); 1272 + return err; 1273 + } 1274 + 1275 + /* Write mdata to table */ 1276 + mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac); 1277 + 1278 + if (opc_type == NPC_EXACT_OPC_CAM) 1279 + rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata); 1280 + else 1281 + rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata); 1282 + 1283 + /* Insert entry to linked list */ 1284 + err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id, 1285 + mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc); 1286 + if (err) { 1287 + rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index); 1288 + dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__); 1289 + return err; 1290 + } 1291 + 1292 + rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, 1293 + &drop_mcam_idx, NULL, NULL, NULL); 1294 + if (cmd) 1295 + __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam); 1296 + 1297 + /* First command rule; enable drop on hit rule */ 1298 + if (enable_cam) { 1299 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true); 1300 + dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n", 1301 + __func__, drop_mcam_idx); 1302 + } 1303 + 1304 + dev_dbg(rvu->dev, 1305 + "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n", 1306 + __func__, index, mac, ways, opc_type); 1307 + 1308 + return 0; 1309 + } 1310 + 1311 + /** 1312 + * rvu_npc_exact_update_table_entry - Update exact match table. 1313 + * @rvu: resource virtualization unit. 1314 + * @cgx_id: CGX identifier. 1315 + * @lamc_id: LMAC identifier. 1316 + * @old_mac: Existing MAC address entry. 1317 + * @new_mac: New MAC address entry. 1318 + * @seq_id: Sequence identifier of the entry. 1319 + * 1320 + * Updates MAC address of an entry. If entry is in MEM table, new 1321 + * hash value may not match with old one. 1322 + */ 1323 + static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, 1324 + u8 *old_mac, u8 *new_mac, u32 *seq_id) 1325 + { 1326 + int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1327 + struct npc_exact_table_entry *entry; 1328 + struct npc_exact_table *table; 1329 + u32 hash_index; 1330 + u64 mdata; 1331 + 1332 + table = rvu->hw->table; 1333 + 1334 + mutex_lock(&table->lock); 1335 + 1336 + /* Lookup for entry which needs to be updated */ 1337 + entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id); 1338 + if (!entry) { 1339 + mutex_unlock(&table->lock); 1340 + dev_dbg(rvu->dev, 1341 + "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n", 1342 + __func__, cgx_id, lmac_id, old_mac); 1343 + return -ENODATA; 1344 + } 1345 + 1346 + /* If entry is in mem table and new hash index is different than old 1347 + * hash index, we cannot update the entry. Fail in these scenarios. 1348 + */ 1349 + if (entry->opc_type == NPC_EXACT_OPC_MEM) { 1350 + hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype, 1351 + new_mac, table->mem_table.mask, 1352 + table->mem_table.depth); 1353 + if (hash_index != entry->index) { 1354 + dev_dbg(rvu->dev, 1355 + "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n", 1356 + __func__, hash_index, entry->index); 1357 + mutex_unlock(&table->lock); 1358 + return -EINVAL; 1359 + } 1360 + } 1361 + 1362 + mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac); 1363 + 1364 + if (entry->opc_type == NPC_EXACT_OPC_MEM) 1365 + rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata); 1366 + else 1367 + rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata); 1368 + 1369 + /* Update entry fields */ 1370 + ether_addr_copy(entry->mac, new_mac); 1371 + *seq_id = entry->seq_id; 1372 + 1373 + dev_dbg(rvu->dev, 1374 + "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n", 1375 + __func__, hash_index, entry->mac, entry->ways, entry->opc_type); 1376 + 1377 + dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n", 1378 + __func__, old_mac, new_mac); 1379 + 1380 + mutex_unlock(&table->lock); 1381 + return 0; 1382 + } 1383 + 1384 + /** 1385 + * rvu_npc_exact_promisc_disable - Disable promiscuous mode. 1386 + * @rvu: resource virtualization unit. 1387 + * @pcifunc: pcifunc 1388 + * 1389 + * Drop rule is against each PF. We dont support DMAC filter for 1390 + * VF. 1391 + */ 1392 + 1393 + int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) 1394 + { 1395 + struct npc_exact_table *table; 1396 + int pf = rvu_get_pf(pcifunc); 1397 + u8 cgx_id, lmac_id; 1398 + u32 drop_mcam_idx; 1399 + bool *promisc; 1400 + u32 cnt; 1401 + 1402 + table = rvu->hw->table; 1403 + 1404 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1405 + rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, 1406 + &drop_mcam_idx, NULL, NULL, NULL); 1407 + 1408 + mutex_lock(&table->lock); 1409 + promisc = &table->promisc_mode[drop_mcam_idx]; 1410 + 1411 + if (!*promisc) { 1412 + mutex_unlock(&table->lock); 1413 + dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n", 1414 + __func__, cgx_id, lmac_id); 1415 + return LMAC_AF_ERR_INVALID_PARAM; 1416 + } 1417 + *promisc = false; 1418 + cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL); 1419 + mutex_unlock(&table->lock); 1420 + 1421 + /* If no dmac filter entries configured, disable drop rule */ 1422 + if (!cnt) 1423 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); 1424 + else 1425 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc); 1426 + 1427 + dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n", 1428 + __func__, cgx_id, lmac_id, cnt); 1429 + return 0; 1430 + } 1431 + 1432 + /** 1433 + * rvu_npc_exact_promisc_enable - Enable promiscuous mode. 1434 + * @rvu: resource virtualization unit. 1435 + * @pcifunc: pcifunc. 1436 + */ 1437 + int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) 1438 + { 1439 + struct npc_exact_table *table; 1440 + int pf = rvu_get_pf(pcifunc); 1441 + u8 cgx_id, lmac_id; 1442 + u32 drop_mcam_idx; 1443 + bool *promisc; 1444 + u32 cnt; 1445 + 1446 + table = rvu->hw->table; 1447 + 1448 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1449 + rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, 1450 + &drop_mcam_idx, NULL, NULL, NULL); 1451 + 1452 + mutex_lock(&table->lock); 1453 + promisc = &table->promisc_mode[drop_mcam_idx]; 1454 + 1455 + if (*promisc) { 1456 + mutex_unlock(&table->lock); 1457 + dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n", 1458 + __func__, cgx_id, lmac_id); 1459 + return LMAC_AF_ERR_INVALID_PARAM; 1460 + } 1461 + *promisc = true; 1462 + cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL); 1463 + mutex_unlock(&table->lock); 1464 + 1465 + /* If no dmac filter entries configured, disable drop rule */ 1466 + if (!cnt) 1467 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); 1468 + else 1469 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc); 1470 + 1471 + dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n", 1472 + __func__, cgx_id, lmac_id, cnt); 1473 + return 0; 1474 + } 1475 + 1476 + /** 1477 + * rvu_npc_exact_mac_addr_reset - Delete PF mac address. 1478 + * @rvu: resource virtualization unit. 1479 + * @req: Reset request 1480 + * @rsp: Reset response. 1481 + */ 1482 + int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 1483 + struct msg_rsp *rsp) 1484 + { 1485 + int pf = rvu_get_pf(req->hdr.pcifunc); 1486 + u32 seq_id = req->index; 1487 + struct rvu_pfvf *pfvf; 1488 + u8 cgx_id, lmac_id; 1489 + int rc; 1490 + 1491 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1492 + 1493 + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1494 + 1495 + rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id); 1496 + if (rc) { 1497 + /* TODO: how to handle this error case ? */ 1498 + dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf); 1499 + return 0; 1500 + } 1501 + 1502 + dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n", 1503 + __func__, pfvf->mac_addr, pf, seq_id); 1504 + return 0; 1505 + } 1506 + 1507 + /** 1508 + * rvu_npc_exact_mac_addr_update - Update mac address field with new value. 1509 + * @rvu: resource virtualization unit. 1510 + * @req: Update request. 1511 + * @rsp: Update response. 1512 + */ 1513 + int rvu_npc_exact_mac_addr_update(struct rvu *rvu, 1514 + struct cgx_mac_addr_update_req *req, 1515 + struct cgx_mac_addr_update_rsp *rsp) 1516 + { 1517 + int pf = rvu_get_pf(req->hdr.pcifunc); 1518 + struct npc_exact_table_entry *entry; 1519 + struct npc_exact_table *table; 1520 + struct rvu_pfvf *pfvf; 1521 + u32 seq_id, mcam_idx; 1522 + u8 old_mac[ETH_ALEN]; 1523 + u8 cgx_id, lmac_id; 1524 + int rc; 1525 + 1526 + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1527 + return LMAC_AF_ERR_PERM_DENIED; 1528 + 1529 + dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n", 1530 + __func__, req->index, req->mac_addr); 1531 + 1532 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1533 + 1534 + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1535 + 1536 + table = rvu->hw->table; 1537 + 1538 + mutex_lock(&table->lock); 1539 + 1540 + /* Lookup for entry which needs to be updated */ 1541 + entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index); 1542 + if (!entry) { 1543 + dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index); 1544 + mutex_unlock(&table->lock); 1545 + return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED; 1546 + } 1547 + ether_addr_copy(old_mac, entry->mac); 1548 + seq_id = entry->seq_id; 1549 + mcam_idx = entry->mcam_idx; 1550 + mutex_unlock(&table->lock); 1551 + 1552 + rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac, 1553 + req->mac_addr, &seq_id); 1554 + if (!rc) { 1555 + rsp->index = seq_id; 1556 + dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n", 1557 + __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf); 1558 + ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1559 + return 0; 1560 + } 1561 + 1562 + /* Try deleting and adding it again */ 1563 + rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); 1564 + if (rc) { 1565 + /* This could be a new entry */ 1566 + dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, 1567 + pfvf->mac_addr, pf); 1568 + } 1569 + 1570 + rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, 1571 + pfvf->rx_chan_base, 0, &seq_id, true, 1572 + mcam_idx, req->hdr.pcifunc); 1573 + if (rc) { 1574 + dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__, 1575 + req->mac_addr, pf); 1576 + return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; 1577 + } 1578 + 1579 + rsp->index = seq_id; 1580 + dev_dbg(rvu->dev, 1581 + "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n", 1582 + __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id); 1583 + 1584 + ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1585 + return 0; 1586 + } 1587 + 1588 + /** 1589 + * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table. 1590 + * @rvu: resource virtualization unit. 1591 + * @req: Add request. 1592 + * @rsp: Add response. 1593 + */ 1594 + int rvu_npc_exact_mac_addr_add(struct rvu *rvu, 1595 + struct cgx_mac_addr_add_req *req, 1596 + struct cgx_mac_addr_add_rsp *rsp) 1597 + { 1598 + int pf = rvu_get_pf(req->hdr.pcifunc); 1599 + struct rvu_pfvf *pfvf; 1600 + u8 cgx_id, lmac_id; 1601 + int rc = 0; 1602 + u32 seq_id; 1603 + 1604 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1605 + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1606 + 1607 + rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, 1608 + pfvf->rx_chan_base, 0, &seq_id, 1609 + true, -1, req->hdr.pcifunc); 1610 + 1611 + if (!rc) { 1612 + rsp->index = seq_id; 1613 + dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n", 1614 + __func__, req->mac_addr, pf, seq_id); 1615 + return 0; 1616 + } 1617 + 1618 + dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__, 1619 + req->mac_addr, pf); 1620 + return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; 1621 + } 1622 + 1623 + /** 1624 + * rvu_npc_exact_mac_addr_del - Delete DMAC filter 1625 + * @rvu: resource virtualization unit. 1626 + * @req: Delete request. 1627 + * @rsp: Delete response. 1628 + */ 1629 + int rvu_npc_exact_mac_addr_del(struct rvu *rvu, 1630 + struct cgx_mac_addr_del_req *req, 1631 + struct msg_rsp *rsp) 1632 + { 1633 + int pf = rvu_get_pf(req->hdr.pcifunc); 1634 + int rc; 1635 + 1636 + rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); 1637 + if (!rc) { 1638 + dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n", 1639 + __func__, pf, req->index); 1640 + return 0; 1641 + } 1642 + 1643 + dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n", 1644 + __func__, pf, req->index); 1645 + return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED; 1646 + } 1647 + 1648 + /** 1649 + * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter. 1650 + * @rvu: resource virtualization unit. 1651 + * @req: Set request. 1652 + * @rsp: Set response. 1653 + */ 1654 + int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, 1655 + struct cgx_mac_addr_set_or_get *rsp) 1656 + { 1657 + int pf = rvu_get_pf(req->hdr.pcifunc); 1658 + u32 seq_id = req->index; 1659 + struct rvu_pfvf *pfvf; 1660 + u8 cgx_id, lmac_id; 1661 + u32 mcam_idx = -1; 1662 + int rc, nixlf; 1663 + 1664 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1665 + 1666 + pfvf = &rvu->pf[pf]; 1667 + 1668 + /* If table does not have an entry; both update entry and del table entry API 1669 + * below fails. Those are not failure conditions. 1670 + */ 1671 + rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr, 1672 + req->mac_addr, &seq_id); 1673 + if (!rc) { 1674 + rsp->index = seq_id; 1675 + ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1676 + ether_addr_copy(rsp->mac_addr, req->mac_addr); 1677 + dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n", 1678 + __func__, req->mac_addr, pf); 1679 + return 0; 1680 + } 1681 + 1682 + /* Try deleting and adding it again */ 1683 + rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); 1684 + if (rc) { 1685 + dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", 1686 + __func__, pfvf->mac_addr, pf); 1687 + } 1688 + 1689 + /* find mcam entry if exist */ 1690 + rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL); 1691 + if (!rc) { 1692 + mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc, 1693 + nixlf, NIXLF_UCAST_ENTRY); 1694 + } 1695 + 1696 + rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, 1697 + pfvf->rx_chan_base, 0, &seq_id, 1698 + true, mcam_idx, req->hdr.pcifunc); 1699 + if (rc) { 1700 + dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", 1701 + __func__, req->mac_addr, pf); 1702 + return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; 1703 + } 1704 + 1705 + rsp->index = seq_id; 1706 + ether_addr_copy(rsp->mac_addr, req->mac_addr); 1707 + ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1708 + dev_dbg(rvu->dev, 1709 + "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n", 1710 + __func__, req->mac_addr, pf, seq_id); 1711 + return 0; 1712 + } 1713 + 1714 + /** 1715 + * rvu_npc_exact_can_disable_feature - Check if feature can be disabled. 1716 + * @rvu: resource virtualization unit. 1717 + */ 1718 + bool rvu_npc_exact_can_disable_feature(struct rvu *rvu) 1719 + { 1720 + struct npc_exact_table *table = rvu->hw->table; 1721 + bool empty; 1722 + 1723 + if (!rvu->hw->cap.npc_exact_match_enabled) 1724 + return false; 1725 + 1726 + mutex_lock(&table->lock); 1727 + empty = list_empty(&table->lhead_gbl); 1728 + mutex_unlock(&table->lock); 1729 + 1730 + return empty; 1731 + } 1732 + 1733 + /** 1734 + * rvu_npc_exact_disable_feature - Disable feature. 1735 + * @rvu: resource virtualization unit. 1736 + */ 1737 + void rvu_npc_exact_disable_feature(struct rvu *rvu) 1738 + { 1739 + rvu->hw->cap.npc_exact_match_enabled = false; 1740 + } 1741 + 1742 + /** 1743 + * rvu_npc_exact_reset - Delete and free all entry which match pcifunc. 1744 + * @rvu: resource virtualization unit. 1745 + * @pcifunc: PCI func to match. 1746 + */ 1747 + void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc) 1748 + { 1749 + struct npc_exact_table *table = rvu->hw->table; 1750 + struct npc_exact_table_entry *tmp, *iter; 1751 + u32 seq_id; 1752 + 1753 + mutex_lock(&table->lock); 1754 + list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) { 1755 + if (pcifunc != iter->pcifunc) 1756 + continue; 1757 + 1758 + seq_id = iter->seq_id; 1759 + dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__, 1760 + pcifunc, seq_id); 1761 + 1762 + mutex_unlock(&table->lock); 1763 + rvu_npc_exact_del_table_entry_by_id(rvu, seq_id); 1764 + mutex_lock(&table->lock); 1765 + } 1766 + mutex_unlock(&table->lock); 1767 + } 1768 + 1769 + /** 1770 + * rvu_npc_exact_init - initialize exact match table 1771 + * @rvu: resource virtualization unit. 1772 + * 1773 + * Initialize HW and SW resources to manage 4way-2K table and fully 1774 + u8 cgx_id, lmac_id; 1775 + * associative 32-entry mcam table. 1776 + */ 1777 + int rvu_npc_exact_init(struct rvu *rvu) 1778 + { 1779 + u64 bcast_mcast_val, bcast_mcast_mask; 1780 + struct npc_exact_table *table; 1781 + u64 exact_val, exact_mask; 1782 + u64 chan_val, chan_mask; 1783 + u8 cgx_id, lmac_id; 1784 + u32 *drop_mcam_idx; 1785 + u16 max_lmac_cnt; 1786 + u64 npc_const3; 1787 + int table_size; 1788 + int blkaddr; 1789 + u16 pcifunc; 1790 + int err, i; 1791 + u64 cfg; 1792 + bool rc; 1793 + 1794 + /* Read NPC_AF_CONST3 and check for have exact 1795 + * match functionality is present 1796 + */ 1797 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1798 + if (blkaddr < 0) { 1799 + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1800 + return -EINVAL; 1801 + } 1802 + 1803 + /* Check exact match feature is supported */ 1804 + npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3); 1805 + if (!(npc_const3 & BIT_ULL(62))) { 1806 + dev_info(rvu->dev, "%s: No support for exact match support\n", 1807 + __func__); 1808 + return 0; 1809 + } 1810 + 1811 + /* Check if kex profile has enabled EXACT match nibble */ 1812 + cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); 1813 + if (!(cfg & NPC_EXACT_NIBBLE_HIT)) { 1814 + dev_info(rvu->dev, "%s: NPC exact match nibble not enabled in KEX profile\n", 1815 + __func__); 1816 + return 0; 1817 + } 1818 + 1819 + /* Set capability to true */ 1820 + rvu->hw->cap.npc_exact_match_enabled = true; 1821 + 1822 + table = kmalloc(sizeof(*table), GFP_KERNEL); 1823 + if (!table) 1824 + return -ENOMEM; 1825 + 1826 + dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__); 1827 + memset(table, 0, sizeof(*table)); 1828 + rvu->hw->table = table; 1829 + 1830 + /* Read table size, ways and depth */ 1831 + table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3); 1832 + table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3); 1833 + table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3); 1834 + 1835 + dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n", 1836 + __func__, table->mem_table.ways, table->cam_table.depth); 1837 + 1838 + /* Check if depth of table is not a sequre of 2 1839 + * TODO: why _builtin_popcount() is not working ? 1840 + */ 1841 + if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) { 1842 + dev_err(rvu->dev, 1843 + "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n", 1844 + __func__, table->mem_table.depth); 1845 + return -EINVAL; 1846 + } 1847 + 1848 + table_size = table->mem_table.depth * table->mem_table.ways; 1849 + 1850 + /* Allocate bitmap for 4way 2K table */ 1851 + table->mem_table.bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table_size), 1852 + sizeof(long), GFP_KERNEL); 1853 + if (!table->mem_table.bmap) 1854 + return -ENOMEM; 1855 + 1856 + dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__); 1857 + 1858 + /* Allocate bitmap for 32 entry mcam */ 1859 + table->cam_table.bmap = devm_kcalloc(rvu->dev, 1, sizeof(long), GFP_KERNEL); 1860 + 1861 + if (!table->cam_table.bmap) 1862 + return -ENOMEM; 1863 + 1864 + dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__); 1865 + 1866 + table->tot_ids = (table->mem_table.depth * table->mem_table.ways) + table->cam_table.depth; 1867 + table->id_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table->tot_ids), 1868 + table->tot_ids, GFP_KERNEL); 1869 + 1870 + if (!table->id_bmap) 1871 + return -ENOMEM; 1872 + 1873 + dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n", 1874 + __func__, table->tot_ids); 1875 + 1876 + /* Initialize list heads for npc_exact_table entries. 1877 + * This entry is used by debugfs to show entries in 1878 + * exact match table. 1879 + */ 1880 + for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++) 1881 + INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]); 1882 + 1883 + INIT_LIST_HEAD(&table->lhead_cam_tbl_entry); 1884 + INIT_LIST_HEAD(&table->lhead_gbl); 1885 + 1886 + mutex_init(&table->lock); 1887 + 1888 + rvu_exact_config_secret_key(rvu); 1889 + rvu_exact_config_search_key(rvu); 1890 + 1891 + rvu_exact_config_table_mask(rvu); 1892 + rvu_exact_config_result_ctrl(rvu, table->mem_table.depth); 1893 + 1894 + /* - No drop rule for LBK 1895 + * - Drop rules for SDP and each LMAC. 1896 + */ 1897 + exact_val = !NPC_EXACT_RESULT_HIT; 1898 + exact_mask = NPC_EXACT_RESULT_HIT; 1899 + 1900 + /* nibble - 3 2 1 0 1901 + * L3B L3M L2B L2M 1902 + */ 1903 + bcast_mcast_val = 0b0000; 1904 + bcast_mcast_mask = 0b0011; 1905 + 1906 + /* Install SDP drop rule */ 1907 + drop_mcam_idx = &table->num_drop_rules; 1908 + 1909 + max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE; 1910 + for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) { 1911 + if (rvu->pf2cgxlmac_map[i] == 0xFF) 1912 + continue; 1913 + 1914 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id); 1915 + 1916 + rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id, 1917 + lmac_id, &chan_val, &chan_mask); 1918 + if (!rc) { 1919 + dev_err(rvu->dev, 1920 + "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n", 1921 + __func__, chan_val, chan_mask, *drop_mcam_idx); 1922 + return -EINVAL; 1923 + } 1924 + 1925 + /* Filter rules are only for PF */ 1926 + pcifunc = RVU_PFFUNC(i, 0); 1927 + 1928 + dev_dbg(rvu->dev, 1929 + "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n", 1930 + __func__, cgx_id, lmac_id, chan_val, chan_mask); 1931 + 1932 + rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules, 1933 + chan_val, chan_mask, pcifunc); 1934 + if (!rc) { 1935 + dev_err(rvu->dev, 1936 + "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n", 1937 + __func__, cgx_id, lmac_id, chan_val); 1938 + return err; 1939 + } 1940 + 1941 + err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx, 1942 + &table->counter_idx[*drop_mcam_idx], 1943 + chan_val, chan_mask, 1944 + exact_val, exact_mask, 1945 + bcast_mcast_val, bcast_mcast_mask); 1946 + if (err) { 1947 + dev_err(rvu->dev, 1948 + "failed to configure drop rule (cgx=%d lmac=%d)\n", 1949 + cgx_id, lmac_id); 1950 + return err; 1951 + } 1952 + 1953 + (*drop_mcam_idx)++; 1954 + } 1955 + 1956 + dev_info(rvu->dev, "initialized exact match table successfully\n"); 1957 + return 0; 1958 + }
+233
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell RVU Admin Function driver 3 + * 4 + * Copyright (C) 2022 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef __RVU_NPC_HASH_H 9 + #define __RVU_NPC_HASH_H 10 + 11 + #define RVU_NPC_HASH_SECRET_KEY0 0xa9d5af4c9fbc76b1 12 + #define RVU_NPC_HASH_SECRET_KEY1 0xa9d5af4c9fbc87b4 13 + #define RVU_NPC_HASH_SECRET_KEY2 0x5954c9e7 14 + 15 + #define NPC_MAX_HASH 2 16 + #define NPC_MAX_HASH_MASK 2 17 + 18 + #define KEX_LD_CFG_USE_HASH(use_hash, bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \ 19 + ((use_hash) << 20 | ((bytesm1) << 16) | ((hdr_ofs) << 8) | \ 20 + ((ena) << 7) | ((flags_ena) << 6) | ((key_ofs) & 0x3F)) 21 + #define KEX_LD_CFG_HASH(hdr_ofs, bytesm1, lt_en, lid_en, lid, ltype_match, ltype_mask) \ 22 + (((hdr_ofs) << 32) | ((bytesm1) << 16) | \ 23 + ((lt_en) << 12) | ((lid_en) << 11) | ((lid) << 8) | \ 24 + ((ltype_match) << 4) | ((ltype_mask) & 0xF)) 25 + 26 + #define SET_KEX_LD_HASH(intf, ld, cfg) \ 27 + rvu_write64(rvu, blkaddr, \ 28 + NPC_AF_INTFX_HASHX_CFG(intf, ld), cfg) 29 + 30 + #define SET_KEX_LD_HASH_MASK(intf, ld, mask_idx, cfg) \ 31 + rvu_write64(rvu, blkaddr, \ 32 + NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg) 33 + 34 + #define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \ 35 + rvu_write64(rvu, blkaddr, \ 36 + NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg) 37 + 38 + struct npc_mcam_kex_hash { 39 + /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */ 40 + bool lid_lt_ld_hash_en[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]; 41 + /* NPC_AF_INTF(0..1)_HASH(0..1)_CFG */ 42 + u64 hash[NPC_MAX_INTF][NPC_MAX_HASH]; 43 + /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */ 44 + u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK]; 45 + /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */ 46 + u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH]; 47 + } __packed; 48 + 49 + void npc_update_field_hash(struct rvu *rvu, u8 intf, 50 + struct mcam_entry *entry, 51 + int blkaddr, 52 + u64 features, 53 + struct flow_msg *pkt, 54 + struct flow_msg *mask, 55 + struct flow_msg *opkt, 56 + struct flow_msg *omask); 57 + void npc_config_secret_key(struct rvu *rvu, int blkaddr); 58 + void npc_program_mkex_hash(struct rvu *rvu, int blkaddr); 59 + u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash, 60 + u64 *secret_key, u8 intf, u8 hash_idx); 61 + 62 + static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = { 63 + .lid_lt_ld_hash_en = { 64 + [NIX_INTF_RX] = { 65 + [NPC_LID_LC] = { 66 + [NPC_LT_LC_IP6] = { 67 + true, 68 + true, 69 + }, 70 + }, 71 + }, 72 + 73 + [NIX_INTF_TX] = { 74 + [NPC_LID_LC] = { 75 + [NPC_LT_LC_IP6] = { 76 + true, 77 + true, 78 + }, 79 + }, 80 + }, 81 + }, 82 + 83 + .hash = { 84 + [NIX_INTF_RX] = { 85 + KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf), 86 + KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf), 87 + }, 88 + 89 + [NIX_INTF_TX] = { 90 + KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf), 91 + KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf), 92 + }, 93 + }, 94 + 95 + .hash_mask = { 96 + [NIX_INTF_RX] = { 97 + [0] = { 98 + GENMASK_ULL(63, 0), 99 + GENMASK_ULL(63, 0), 100 + }, 101 + [1] = { 102 + GENMASK_ULL(63, 0), 103 + GENMASK_ULL(63, 0), 104 + }, 105 + }, 106 + 107 + [NIX_INTF_TX] = { 108 + [0] = { 109 + GENMASK_ULL(63, 0), 110 + GENMASK_ULL(63, 0), 111 + }, 112 + [1] = { 113 + GENMASK_ULL(63, 0), 114 + GENMASK_ULL(63, 0), 115 + }, 116 + }, 117 + }, 118 + 119 + .hash_ctrl = { 120 + [NIX_INTF_RX] = { 121 + [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */ 122 + [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */ 123 + }, 124 + 125 + [NIX_INTF_TX] = { 126 + [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */ 127 + [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */ 128 + }, 129 + }, 130 + }; 131 + 132 + /* If exact match table support is enabled, enable drop rules */ 133 + #define NPC_MCAM_DROP_RULE_MAX 30 134 + #define NPC_MCAM_SDP_DROP_RULE_IDX 0 135 + 136 + #define RVU_PFFUNC(pf, func) \ 137 + ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \ 138 + (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT)) 139 + 140 + enum npc_exact_opc_type { 141 + NPC_EXACT_OPC_MEM, 142 + NPC_EXACT_OPC_CAM, 143 + }; 144 + 145 + struct npc_exact_table_entry { 146 + struct list_head list; 147 + struct list_head glist; 148 + u32 seq_id; /* Sequence number of entry */ 149 + u32 index; /* Mem table or cam table index */ 150 + u32 mcam_idx; 151 + /* Mcam index. This is valid only if "cmd" field is false */ 152 + enum npc_exact_opc_type opc_type; 153 + u16 chan; 154 + u16 pcifunc; 155 + u8 ways; 156 + u8 mac[ETH_ALEN]; 157 + u8 ctype; 158 + u8 cgx_id; 159 + u8 lmac_id; 160 + bool cmd; /* Is added by ethtool command ? */ 161 + }; 162 + 163 + struct npc_exact_table { 164 + struct mutex lock; /* entries update lock */ 165 + unsigned long *id_bmap; 166 + int num_drop_rules; 167 + u32 tot_ids; 168 + u16 cnt_cmd_rules[NPC_MCAM_DROP_RULE_MAX]; 169 + u16 counter_idx[NPC_MCAM_DROP_RULE_MAX]; 170 + bool promisc_mode[NPC_MCAM_DROP_RULE_MAX]; 171 + struct { 172 + int ways; 173 + int depth; 174 + unsigned long *bmap; 175 + u64 mask; // Masks before hash calculation. 176 + u16 hash_mask; // 11 bits for hash mask 177 + u16 hash_offset; // 11 bits offset 178 + } mem_table; 179 + 180 + struct { 181 + int depth; 182 + unsigned long *bmap; 183 + } cam_table; 184 + 185 + struct { 186 + bool valid; 187 + u16 chan_val; 188 + u16 chan_mask; 189 + u16 pcifunc; 190 + u8 drop_rule_idx; 191 + } drop_rule_map[NPC_MCAM_DROP_RULE_MAX]; 192 + 193 + #define NPC_EXACT_TBL_MAX_WAYS 4 194 + 195 + struct list_head lhead_mem_tbl_entry[NPC_EXACT_TBL_MAX_WAYS]; 196 + int mem_tbl_entry_cnt; 197 + 198 + struct list_head lhead_cam_tbl_entry; 199 + int cam_tbl_entry_cnt; 200 + 201 + struct list_head lhead_gbl; 202 + }; 203 + 204 + bool rvu_npc_exact_has_match_table(struct rvu *rvu); 205 + u32 rvu_npc_exact_get_max_entries(struct rvu *rvu); 206 + int rvu_npc_exact_init(struct rvu *rvu); 207 + int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 208 + struct msg_rsp *rsp); 209 + 210 + int rvu_npc_exact_mac_addr_update(struct rvu *rvu, 211 + struct cgx_mac_addr_update_req *req, 212 + struct cgx_mac_addr_update_rsp *rsp); 213 + 214 + int rvu_npc_exact_mac_addr_add(struct rvu *rvu, 215 + struct cgx_mac_addr_add_req *req, 216 + struct cgx_mac_addr_add_rsp *rsp); 217 + 218 + int rvu_npc_exact_mac_addr_del(struct rvu *rvu, 219 + struct cgx_mac_addr_del_req *req, 220 + struct msg_rsp *rsp); 221 + 222 + int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, 223 + struct cgx_mac_addr_set_or_get *rsp); 224 + 225 + void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc); 226 + 227 + bool rvu_npc_exact_can_disable_feature(struct rvu *rvu); 228 + void rvu_npc_exact_disable_feature(struct rvu *rvu); 229 + void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc); 230 + u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx); 231 + int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc); 232 + int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc); 233 + #endif /* RVU_NPC_HASH_H */
+15
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
··· 565 565 #define NPC_AF_PCK_DEF_OIP4 (0x00620) 566 566 #define NPC_AF_PCK_DEF_OIP6 (0x00630) 567 567 #define NPC_AF_PCK_DEF_IIP4 (0x00640) 568 + #define NPC_AF_INTFX_HASHX_RESULT_CTRL(a, b) (0x006c0 | (a) << 4 | (b) << 3) 569 + #define NPC_AF_INTFX_HASHX_MASKX(a, b, c) (0x00700 | (a) << 5 | (b) << 4 | (c) << 3) 568 570 #define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3) 571 + #define NPC_AF_INTFX_HASHX_CFG(a, b) (0x00b00 | (a) << 6 | (b) << 4) 572 + #define NPC_AF_INTFX_SECRET_KEY0(a) (0x00e00 | (a) << 3) 573 + #define NPC_AF_INTFX_SECRET_KEY1(a) (0x00e20 | (a) << 3) 574 + #define NPC_AF_INTFX_SECRET_KEY2(a) (0x00e40 | (a) << 3) 569 575 #define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8) 570 576 #define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6) 571 577 #define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6) ··· 604 598 #define NPC_AF_MCAM_DBG (0x3001000) 605 599 #define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4) 606 600 #define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4) 601 + 602 + #define NPC_AF_EXACT_MEM_ENTRY(a, b) (0x300000 | (a) << 15 | (b) << 3) 603 + #define NPC_AF_EXACT_CAM_ENTRY(a) (0xC00 | (a) << 3) 604 + #define NPC_AF_INTFX_EXACT_MASK(a) (0x660 | (a) << 3) 605 + #define NPC_AF_INTFX_EXACT_RESULT_CTL(a)(0x680 | (a) << 3) 606 + #define NPC_AF_INTFX_EXACT_CFG(a) (0xA00 | (a) << 3) 607 + #define NPC_AF_INTFX_EXACT_SECRET0(a) (0xE00 | (a) << 3) 608 + #define NPC_AF_INTFX_EXACT_SECRET1(a) (0xE20 | (a) << 3) 609 + #define NPC_AF_INTFX_EXACT_SECRET2(a) (0xE40 | (a) << 3) 607 610 608 611 #define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \ 609 612 u64 offset; \
+5 -5
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
··· 314 314 #define OTX2_VF_VLAN_TX_INDEX 1 315 315 u16 max_flows; 316 316 u8 dmacflt_max_flows; 317 - u8 *bmap_to_dmacindex; 318 - unsigned long dmacflt_bmap; 317 + u32 *bmap_to_dmacindex; 318 + unsigned long *dmacflt_bmap; 319 319 struct list_head flow_list; 320 320 }; 321 321 ··· 895 895 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic); 896 896 /* CGX/RPM DMAC filters support */ 897 897 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 898 - int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); 899 - int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); 900 - int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos); 898 + int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 899 + int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 900 + int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos); 901 901 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); 902 902 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); 903 903
+35 -13
drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
··· 8 8 #include "otx2_common.h" 9 9 10 10 static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, 11 - u8 *dmac_index) 11 + u32 *dmac_index) 12 12 { 13 13 struct cgx_mac_addr_add_req *req; 14 14 struct cgx_mac_addr_add_rsp *rsp; ··· 35 35 return err; 36 36 } 37 37 38 - static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf) 38 + static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index) 39 39 { 40 40 struct cgx_mac_addr_set_or_get *req; 41 + struct cgx_mac_addr_set_or_get *rsp; 41 42 int err; 42 43 43 44 mutex_lock(&pf->mbox.lock); ··· 49 48 return -ENOMEM; 50 49 } 51 50 51 + req->index = *dmac_index; 52 + 52 53 ether_addr_copy(req->mac_addr, pf->netdev->dev_addr); 53 54 err = otx2_sync_mbox_msg(&pf->mbox); 55 + 56 + if (!err) { 57 + rsp = (struct cgx_mac_addr_set_or_get *) 58 + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); 59 + *dmac_index = rsp->index; 60 + } 54 61 55 62 mutex_unlock(&pf->mbox.lock); 56 63 return err; 57 64 } 58 65 59 - int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos) 66 + int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos) 60 67 { 61 - u8 *dmacindex; 68 + u32 *dmacindex; 62 69 63 70 /* Store dmacindex returned by CGX/RPM driver which will 64 71 * be used for macaddr update/remove ··· 74 65 dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos]; 75 66 76 67 if (ether_addr_equal(mac, pf->netdev->dev_addr)) 77 - return otx2_dmacflt_add_pfmac(pf); 68 + return otx2_dmacflt_add_pfmac(pf, dmacindex); 78 69 else 79 70 return otx2_dmacflt_do_add(pf, mac, dmacindex); 80 71 } 81 72 82 73 static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, 83 - u8 dmac_index) 74 + u32 dmac_index) 84 75 { 85 76 struct cgx_mac_addr_del_req *req; 86 77 int err; ··· 100 91 return err; 101 92 } 102 93 103 - static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf) 94 + static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index) 104 95 { 105 - struct msg_req *req; 96 + struct cgx_mac_addr_reset_req *req; 106 97 int err; 107 98 108 99 mutex_lock(&pf->mbox.lock); ··· 111 102 mutex_unlock(&pf->mbox.lock); 112 103 return -ENOMEM; 113 104 } 105 + req->index = dmac_index; 114 106 115 107 err = otx2_sync_mbox_msg(&pf->mbox); 116 108 ··· 120 110 } 121 111 122 112 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, 123 - u8 bit_pos) 113 + u32 bit_pos) 124 114 { 125 - u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; 115 + u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; 126 116 127 117 if (ether_addr_equal(mac, pf->netdev->dev_addr)) 128 - return otx2_dmacflt_remove_pfmac(pf); 118 + return otx2_dmacflt_remove_pfmac(pf, dmacindex); 129 119 else 130 120 return otx2_dmacflt_do_remove(pf, mac, dmacindex); 131 121 } ··· 161 151 return err; 162 152 } 163 153 164 - int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos) 154 + int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos) 165 155 { 166 156 struct cgx_mac_addr_update_req *req; 157 + struct cgx_mac_addr_update_rsp *rsp; 167 158 int rc; 168 159 169 160 mutex_lock(&pf->mbox.lock); ··· 178 167 179 168 ether_addr_copy(req->mac_addr, mac); 180 169 req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; 181 - rc = otx2_sync_mbox_msg(&pf->mbox); 182 170 171 + /* check the response and change index */ 172 + 173 + rc = otx2_sync_mbox_msg(&pf->mbox); 174 + if (rc) 175 + goto out; 176 + 177 + rsp = (struct cgx_mac_addr_update_rsp *) 178 + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); 179 + 180 + pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index; 181 + 182 + out: 183 183 mutex_unlock(&pf->mbox.lock); 184 184 return rc; 185 185 }
+28 -12
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
··· 18 18 struct ethtool_rx_flow_spec flow_spec; 19 19 struct list_head list; 20 20 u32 location; 21 - u16 entry; 21 + u32 entry; 22 22 bool is_vf; 23 23 u8 rss_ctx_id; 24 24 #define DMAC_FILTER_RULE BIT(0) ··· 232 232 return 0; 233 233 } 234 234 235 + /* TODO : revisit on size */ 236 + #define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32) 237 + 235 238 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf) 236 239 { 237 240 struct otx2_flow_config *flow_cfg; ··· 243 240 sizeof(struct otx2_flow_config), 244 241 GFP_KERNEL); 245 242 if (!pfvf->flow_cfg) 243 + return -ENOMEM; 244 + 245 + pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev, 246 + BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ), 247 + sizeof(long), GFP_KERNEL); 248 + if (!pfvf->flow_cfg->dmacflt_bmap) 246 249 return -ENOMEM; 247 250 248 251 flow_cfg = pfvf->flow_cfg; ··· 266 257 pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config), 267 258 GFP_KERNEL); 268 259 if (!pf->flow_cfg) 260 + return -ENOMEM; 261 + 262 + pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev, 263 + BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ), 264 + sizeof(long), GFP_KERNEL); 265 + if (!pf->flow_cfg->dmacflt_bmap) 269 266 return -ENOMEM; 270 267 271 268 INIT_LIST_HEAD(&pf->flow_cfg->flow_list); ··· 299 284 return 0; 300 285 301 286 pf->flow_cfg->bmap_to_dmacindex = 302 - devm_kzalloc(pf->dev, sizeof(u8) * 287 + devm_kzalloc(pf->dev, sizeof(u32) * 303 288 pf->flow_cfg->dmacflt_max_flows, 304 289 GFP_KERNEL); 305 290 ··· 370 355 { 371 356 struct otx2_nic *pf = netdev_priv(netdev); 372 357 373 - if (!bitmap_empty(&pf->flow_cfg->dmacflt_bmap, 358 + if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap, 374 359 pf->flow_cfg->dmacflt_max_flows)) 375 360 netdev_warn(netdev, 376 361 "Add %pM to CGX/RPM DMAC filters list as well\n", ··· 453 438 return 0; 454 439 455 440 if (flow_cfg->nr_flows == flow_cfg->max_flows || 456 - !bitmap_empty(&flow_cfg->dmacflt_bmap, 441 + !bitmap_empty(flow_cfg->dmacflt_bmap, 457 442 flow_cfg->dmacflt_max_flows)) 458 443 return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows; 459 444 else ··· 1025 1010 1026 1011 otx2_add_flow_to_list(pfvf, pf_mac); 1027 1012 pfvf->flow_cfg->nr_flows++; 1028 - set_bit(0, &pfvf->flow_cfg->dmacflt_bmap); 1013 + set_bit(0, pfvf->flow_cfg->dmacflt_bmap); 1029 1014 1030 1015 return 0; 1031 1016 } ··· 1079 1064 return otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 1080 1065 flow->entry); 1081 1066 1082 - if (bitmap_full(&flow_cfg->dmacflt_bmap, 1067 + if (bitmap_full(flow_cfg->dmacflt_bmap, 1083 1068 flow_cfg->dmacflt_max_flows)) { 1084 1069 netdev_warn(pfvf->netdev, 1085 1070 "Can't insert the rule %d as max allowed dmac filters are %d\n", ··· 1093 1078 } 1094 1079 1095 1080 /* Install PF mac address to DMAC filter list */ 1096 - if (!test_bit(0, &flow_cfg->dmacflt_bmap)) 1081 + if (!test_bit(0, flow_cfg->dmacflt_bmap)) 1097 1082 otx2_add_flow_with_pfmac(pfvf, flow); 1098 1083 1099 1084 flow->rule_type |= DMAC_FILTER_RULE; 1100 - flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap, 1085 + flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap, 1101 1086 flow_cfg->dmacflt_max_flows); 1102 1087 fsp->location = flow_cfg->max_flows + flow->entry; 1103 1088 flow->flow_spec.location = fsp->location; 1104 1089 flow->location = fsp->location; 1105 1090 1106 - set_bit(flow->entry, &flow_cfg->dmacflt_bmap); 1091 + set_bit(flow->entry, flow_cfg->dmacflt_bmap); 1107 1092 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); 1108 1093 1109 1094 } else { ··· 1169 1154 if (req == DMAC_ADDR_DEL) { 1170 1155 otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, 1171 1156 0); 1172 - clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap); 1157 + clear_bit(0, pfvf->flow_cfg->dmacflt_bmap); 1173 1158 found = true; 1174 1159 } else { 1175 1160 ether_addr_copy(eth_hdr->h_dest, 1176 1161 pfvf->netdev->dev_addr); 1162 + 1177 1163 otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0); 1178 1164 } 1179 1165 break; ··· 1210 1194 1211 1195 err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, 1212 1196 flow->entry); 1213 - clear_bit(flow->entry, &flow_cfg->dmacflt_bmap); 1197 + clear_bit(flow->entry, flow_cfg->dmacflt_bmap); 1214 1198 /* If all dmac filters are removed delete macfilter with 1215 1199 * interface mac address and configure CGX/RPM block in 1216 1200 * promiscuous mode 1217 1201 */ 1218 - if (bitmap_weight(&flow_cfg->dmacflt_bmap, 1202 + if (bitmap_weight(flow_cfg->dmacflt_bmap, 1219 1203 flow_cfg->dmacflt_max_flows) == 1) 1220 1204 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL); 1221 1205 } else {
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 1120 1120 struct msg_req *msg; 1121 1121 int err; 1122 1122 1123 - if (enable && !bitmap_empty(&pf->flow_cfg->dmacflt_bmap, 1123 + if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap, 1124 1124 pf->flow_cfg->dmacflt_max_flows)) 1125 1125 netdev_warn(pf->netdev, 1126 1126 "CGX/RPM internal loopback might not work as DMAC filters are active\n");