Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'octeontx2-exact-match-table'

Ratheesh Kannoth says:

====================
octeontx2: Exact Match Table.

Exact match table and Field hash support for CN10KB silicon
====================

Link: https://lore.kernel.org/r/20220708044151.2972645-1-rkannoth@marvell.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2902 -68
+1 -1
drivers/net/ethernet/marvell/octeontx2/af/Makefile
··· 11 11 rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ 12 12 rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ 13 13 rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ 14 - rvu_sdp.o 14 + rvu_sdp.o rvu_npc_hash.o
+35 -6
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
··· 169 169 M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \ 170 170 cgx_features_info_msg) \ 171 171 M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \ 172 - M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \ 172 + M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, cgx_mac_addr_reset_req, \ 173 + msg_rsp) \ 173 174 M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \ 174 - msg_rsp) \ 175 + cgx_mac_addr_update_rsp) \ 175 176 M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \ 176 177 cgx_pfc_rsp) \ 177 178 /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ ··· 242 241 M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \ 243 242 npc_mcam_get_stats_req, \ 244 243 npc_mcam_get_stats_rsp) \ 244 + M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key, \ 245 + npc_get_secret_key_req, \ 246 + npc_get_secret_key_rsp) \ 245 247 /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ 246 248 M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \ 247 249 nix_lf_alloc_req, nix_lf_alloc_rsp) \ ··· 432 428 struct mbox_msghdr hdr; 433 429 u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ 434 430 u8 nix_shaping; /* Is shaping and coloring supported */ 431 + u8 npc_hash_extract; /* Is hash extract supported */ 435 432 }; 436 433 437 434 /* CGX mbox message formats */ ··· 456 451 struct cgx_mac_addr_set_or_get { 457 452 struct mbox_msghdr hdr; 458 453 u8 mac_addr[ETH_ALEN]; 454 + u32 index; 459 455 }; 460 456 461 457 /* Structure for requesting the operation to ··· 472 466 */ 473 467 struct cgx_mac_addr_add_rsp { 474 468 struct mbox_msghdr hdr; 475 - u8 index; 469 + u32 index; 476 470 }; 477 471 478 472 /* Structure for requesting the operation to ··· 480 474 */ 481 475 struct cgx_mac_addr_del_req { 482 476 struct mbox_msghdr hdr; 483 - u8 index; 477 + u32 index; 484 478 }; 485 479 486 480 /* Structure for response against the operation to ··· 488 482 */ 489 483 struct cgx_max_dmac_entries_get_rsp { 490 484 struct mbox_msghdr hdr; 491 - u8 max_dmac_filters; 485 + u32 max_dmac_filters; 492 486 }; 493 487 494 488 struct cgx_link_user_info { ··· 589 583 int status; 590 584 }; 591 585 586 + struct cgx_mac_addr_reset_req { 587 + struct mbox_msghdr hdr; 588 + u32 index; 589 + }; 590 + 592 591 struct cgx_mac_addr_update_req { 593 592 struct mbox_msghdr hdr; 594 593 u8 mac_addr[ETH_ALEN]; 595 - u8 index; 594 + u32 index; 595 + }; 596 + 597 + struct cgx_mac_addr_update_rsp { 598 + struct mbox_msghdr hdr; 599 + u32 index; 596 600 }; 597 601 598 602 #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ ··· 1456 1440 u8 stat_ena; /* enabled */ 1457 1441 }; 1458 1442 1443 + struct npc_get_secret_key_req { 1444 + struct mbox_msghdr hdr; 1445 + u8 intf; 1446 + }; 1447 + 1448 + struct npc_get_secret_key_rsp { 1449 + struct mbox_msghdr hdr; 1450 + u64 secret_key[3]; 1451 + }; 1452 + 1459 1453 enum ptp_op { 1460 1454 PTP_OP_ADJFINE = 0, 1461 1455 PTP_OP_GET_CLOCK = 1, ··· 1648 1622 LMAC_AF_ERR_PERM_DENIED = -1103, 1649 1623 LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104, 1650 1624 LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105, 1625 + LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED = -1108, 1626 + LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED = -1109, 1627 + LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110, 1651 1628 }; 1652 1629 1653 1630 #endif /* MBOX_H */
+25
drivers/net/ethernet/marvell/octeontx2/af/npc.h
··· 10 10 11 11 #define NPC_KEX_CHAN_MASK 0xFFFULL 12 12 13 + #define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ 14 + rvu_write64(rvu, blkaddr, \ 15 + NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) 16 + 17 + #define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ 18 + rvu_write64(rvu, blkaddr, \ 19 + NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) 20 + 13 21 enum NPC_LID_E { 14 22 NPC_LID_LA = 0, 15 23 NPC_LID_LB, ··· 208 200 NPC_ERRLEV, 209 201 NPC_ERRCODE, 210 202 NPC_LXMB, 203 + NPC_EXACT_RESULT, 211 204 NPC_LA, 212 205 NPC_LB, 213 206 NPC_LC, ··· 388 379 u64 rsvd_63_61 :3; 389 380 #endif 390 381 }; 382 + 383 + /* NPC_AF_INTFX_KEX_CFG field masks */ 384 + #define NPC_EXACT_NIBBLE_START 40 385 + #define NPC_EXACT_NIBBLE_END 43 386 + #define NPC_EXACT_NIBBLE GENMASK_ULL(43, 40) 387 + 388 + /* NPC_EXACT_KEX_S nibble definitions for each field */ 389 + #define NPC_EXACT_NIBBLE_HIT BIT_ULL(40) 390 + #define NPC_EXACT_NIBBLE_OPC BIT_ULL(40) 391 + #define NPC_EXACT_NIBBLE_WAY BIT_ULL(40) 392 + #define NPC_EXACT_NIBBLE_INDEX GENMASK_ULL(43, 41) 393 + 394 + #define NPC_EXACT_RESULT_HIT BIT_ULL(0) 395 + #define NPC_EXACT_RESULT_OPC GENMASK_ULL(2, 1) 396 + #define NPC_EXACT_RESULT_WAY GENMASK_ULL(4, 3) 397 + #define NPC_EXACT_RESULT_IDX GENMASK_ULL(15, 5) 391 398 392 399 /* NPC_AF_INTFX_KEX_CFG field masks */ 393 400 #define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
+3 -2
drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
··· 155 155 156 156 /* Rx parse key extract nibble enable */ 157 157 #define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \ 158 - NPC_PARSE_NIBBLE_ERRCODE | \ 158 + NPC_PARSE_NIBBLE_L2L3_BCAST | \ 159 159 NPC_PARSE_NIBBLE_LA_LTYPE | \ 160 160 NPC_PARSE_NIBBLE_LB_LTYPE | \ 161 161 NPC_PARSE_NIBBLE_LC_LTYPE | \ ··· 15123 15123 .kpu_version = NPC_KPU_PROFILE_VER, 15124 15124 .keyx_cfg = { 15125 15125 /* nibble: LA..LE (ltype only) + Error code + Channel */ 15126 - [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX, 15126 + [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX | 15127 + (u64)NPC_EXACT_NIBBLE_HIT, 15127 15128 /* nibble: LA..LE (ltype only) */ 15128 15129 [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX, 15129 15130 },
+16
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
··· 18 18 #include "ptp.h" 19 19 20 20 #include "rvu_trace.h" 21 + #include "rvu_npc_hash.h" 21 22 22 23 #define DRV_NAME "rvu_af" 23 24 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" ··· 69 68 hw->cap.nix_tx_link_bp = true; 70 69 hw->cap.nix_rx_multicast = true; 71 70 hw->cap.nix_shaper_toggle_wait = false; 71 + hw->cap.npc_hash_extract = false; 72 + hw->cap.npc_exact_match_enabled = false; 72 73 hw->rvu = rvu; 73 74 74 75 if (is_rvu_pre_96xx_C0(rvu)) { ··· 88 85 89 86 if (!is_rvu_otx2(rvu)) 90 87 hw->cap.per_pf_mbox_regs = true; 88 + 89 + if (is_rvu_npc_hash_extract_en(rvu)) 90 + hw->cap.npc_hash_extract = true; 91 91 } 92 92 93 93 /* Poll a RVU block's register 'offset', for a 'zero' ··· 1128 1122 goto cgx_err; 1129 1123 } 1130 1124 1125 + err = rvu_npc_exact_init(rvu); 1126 + if (err) { 1127 + dev_err(rvu->dev, "failed to initialize exact match table\n"); 1128 + return err; 1129 + } 1130 + 1131 1131 /* Assign MACs for CGX mapped functions */ 1132 1132 rvu_setup_pfvf_macaddress(rvu); 1133 1133 ··· 2003 1991 2004 1992 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping; 2005 1993 rsp->nix_shaping = hw->cap.nix_shaping; 1994 + rsp->npc_hash_extract = hw->cap.npc_hash_extract; 2006 1995 2007 1996 return 0; 2008 1997 } ··· 2561 2548 2562 2549 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) 2563 2550 { 2551 + if (rvu_npc_exact_has_match_table(rvu)) 2552 + rvu_npc_exact_reset(rvu, pcifunc); 2553 + 2564 2554 mutex_lock(&rvu->flr_lock); 2565 2555 /* Reset order should reflect inter-block dependencies: 2566 2556 * 1. Reset any packet/work sources (NIX, CPT, TIM)
+23 -1
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
··· 338 338 bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */ 339 339 bool programmable_chans; /* Channels programmable ? */ 340 340 bool ipolicer; 341 + bool npc_hash_extract; /* Hash extract enabled ? */ 342 + bool npc_exact_match_enabled; /* Exact match supported ? */ 341 343 }; 342 344 343 345 struct rvu_hwinfo { ··· 371 369 struct rvu *rvu; 372 370 struct npc_pkind pkind; 373 371 struct npc_mcam mcam; 372 + struct npc_exact_table *table; 374 373 }; 375 374 376 375 struct mbox_wq_info { ··· 422 419 const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */ 423 420 const struct npc_kpu_profile *kpu; /* array[kpus] */ 424 421 struct npc_mcam_kex *mkex; 422 + struct npc_mcam_kex_hash *mkex_hash; 425 423 bool custom; 426 424 size_t pkinds; 427 425 size_t kpus; ··· 577 573 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 578 574 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || 579 575 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); 576 + } 577 + 578 + static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu) 579 + { 580 + u64 npc_const3; 581 + 582 + npc_const3 = rvu_read64(rvu, BLKADDR_NPC, NPC_AF_CONST3); 583 + if (!(npc_const3 & BIT_ULL(62))) 584 + return false; 585 + 586 + return true; 580 587 } 581 588 582 589 static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid, ··· 769 754 u32 convert_bytes_to_dwrr_mtu(u32 bytes); 770 755 771 756 /* NPC APIs */ 772 - int rvu_npc_init(struct rvu *rvu); 773 757 void rvu_npc_freemem(struct rvu *rvu); 774 758 int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); 775 759 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf); ··· 787 773 u64 chan); 788 774 void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, 789 775 bool enable); 776 + 790 777 void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, 791 778 int nixlf, int type, bool enable); 792 779 void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); 780 + bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable); 793 781 void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); 794 782 void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); 795 783 void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); 796 784 void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, 797 785 int group, int alg_idx, int mcam_index); 786 + 798 787 void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, 799 788 int blkaddr, int *alloc_cnt, 800 789 int *enable_cnt); ··· 832 815 int type); 833 816 bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, 834 817 int index); 818 + int rvu_npc_init(struct rvu *rvu); 819 + int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, 820 + u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask, 821 + u64 bcast_mcast_val, u64 bcast_mcast_mask); 822 + void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx); 835 823 836 824 /* CPT APIs */ 837 825 int rvu_cpt_register_interrupts(struct rvu *rvu);
+37 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
··· 14 14 #include "lmac_common.h" 15 15 #include "rvu_reg.h" 16 16 #include "rvu_trace.h" 17 + #include "rvu_npc_hash.h" 17 18 18 19 struct cgx_evq_entry { 19 20 struct list_head evq_node; ··· 475 474 if (!is_cgx_config_permitted(rvu, pcifunc)) 476 475 return; 477 476 477 + if (rvu_npc_exact_has_match_table(rvu)) { 478 + rvu_npc_exact_reset(rvu, pcifunc); 479 + return; 480 + } 481 + 478 482 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 479 483 cgx_dev = cgx_get_pdata(cgx_id); 480 484 lmac_count = cgx_get_lmac_cnt(cgx_dev); ··· 590 584 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 591 585 return -EPERM; 592 586 587 + if (rvu_npc_exact_has_match_table(rvu)) 588 + return rvu_npc_exact_mac_addr_set(rvu, req, rsp); 589 + 593 590 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 594 591 595 592 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); ··· 610 601 611 602 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 612 603 return -EPERM; 604 + 605 + if (rvu_npc_exact_has_match_table(rvu)) 606 + return rvu_npc_exact_mac_addr_add(rvu, req, rsp); 613 607 614 608 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 615 609 rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); ··· 634 622 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 635 623 return -EPERM; 636 624 625 + if (rvu_npc_exact_has_match_table(rvu)) 626 + return rvu_npc_exact_mac_addr_del(rvu, req, rsp); 627 + 637 628 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 638 629 return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); 639 630 } ··· 655 640 */ 656 641 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { 657 642 rsp->max_dmac_filters = 0; 643 + return 0; 644 + } 645 + 646 + if (rvu_npc_exact_has_match_table(rvu)) { 647 + rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu); 658 648 return 0; 659 649 } 660 650 ··· 700 680 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 701 681 return -EPERM; 702 682 683 + /* Disable drop on non hit rule */ 684 + if (rvu_npc_exact_has_match_table(rvu)) 685 + return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc); 686 + 703 687 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 704 688 705 689 cgx_lmac_promisc_config(cgx_id, lmac_id, true); ··· 718 694 719 695 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 720 696 return -EPERM; 697 + 698 + /* Disable drop on non hit rule */ 699 + if (rvu_npc_exact_has_match_table(rvu)) 700 + return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc); 721 701 722 702 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 723 703 ··· 1116 1088 return 0; 1117 1089 } 1118 1090 1119 - int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req, 1091 + int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 1120 1092 struct msg_rsp *rsp) 1121 1093 { 1122 1094 int pf = rvu_get_pf(req->hdr.pcifunc); ··· 1126 1098 return LMAC_AF_ERR_PERM_DENIED; 1127 1099 1128 1100 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1101 + 1102 + if (rvu_npc_exact_has_match_table(rvu)) 1103 + return rvu_npc_exact_mac_addr_reset(rvu, req, rsp); 1104 + 1129 1105 return cgx_lmac_addr_reset(cgx_id, lmac_id); 1130 1106 } 1131 1107 1132 1108 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, 1133 1109 struct cgx_mac_addr_update_req *req, 1134 - struct msg_rsp *rsp) 1110 + struct cgx_mac_addr_update_rsp *rsp) 1135 1111 { 1136 1112 int pf = rvu_get_pf(req->hdr.pcifunc); 1137 1113 u8 cgx_id, lmac_id; 1138 1114 1139 1115 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1140 1116 return LMAC_AF_ERR_PERM_DENIED; 1117 + 1118 + if (rvu_npc_exact_has_match_table(rvu)) 1119 + return rvu_npc_exact_mac_addr_update(rvu, req, rsp); 1141 1120 1142 1121 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1143 1122 return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+179
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
··· 18 18 #include "cgx.h" 19 19 #include "lmac_common.h" 20 20 #include "npc.h" 21 + #include "rvu_npc_hash.h" 21 22 22 23 #define DEBUGFS_DIR_NAME "octeontx2" 23 24 ··· 2601 2600 2602 2601 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL); 2603 2602 2603 + static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused) 2604 + { 2605 + struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 }; 2606 + struct npc_exact_table_entry *cam_entry; 2607 + struct npc_exact_table *table; 2608 + struct rvu *rvu = s->private; 2609 + int i, j; 2610 + 2611 + u8 bitmap = 0; 2612 + 2613 + table = rvu->hw->table; 2614 + 2615 + mutex_lock(&table->lock); 2616 + 2617 + /* Check if there is at least one entry in mem table */ 2618 + if (!table->mem_tbl_entry_cnt) 2619 + goto dump_cam_table; 2620 + 2621 + /* Print table headers */ 2622 + seq_puts(s, "\n\tExact Match MEM Table\n"); 2623 + seq_puts(s, "Index\t"); 2624 + 2625 + for (i = 0; i < table->mem_table.ways; i++) { 2626 + mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i], 2627 + struct npc_exact_table_entry, list); 2628 + 2629 + seq_printf(s, "Way-%d\t\t\t\t\t", i); 2630 + } 2631 + 2632 + seq_puts(s, "\n"); 2633 + for (i = 0; i < table->mem_table.ways; i++) 2634 + seq_puts(s, "\tChan MAC \t"); 2635 + 2636 + seq_puts(s, "\n\n"); 2637 + 2638 + /* Print mem table entries */ 2639 + for (i = 0; i < table->mem_table.depth; i++) { 2640 + bitmap = 0; 2641 + for (j = 0; j < table->mem_table.ways; j++) { 2642 + if (!mem_entry[j]) 2643 + continue; 2644 + 2645 + if (mem_entry[j]->index != i) 2646 + continue; 2647 + 2648 + bitmap |= BIT(j); 2649 + } 2650 + 2651 + /* No valid entries */ 2652 + if (!bitmap) 2653 + continue; 2654 + 2655 + seq_printf(s, "%d\t", i); 2656 + for (j = 0; j < table->mem_table.ways; j++) { 2657 + if (!(bitmap & BIT(j))) { 2658 + seq_puts(s, "nil\t\t\t\t\t"); 2659 + continue; 2660 + } 2661 + 2662 + seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan, 2663 + mem_entry[j]->mac); 2664 + mem_entry[j] = list_next_entry(mem_entry[j], list); 2665 + } 2666 + seq_puts(s, "\n"); 2667 + } 2668 + 2669 + dump_cam_table: 2670 + 2671 + if (!table->cam_tbl_entry_cnt) 2672 + goto done; 2673 + 2674 + seq_puts(s, "\n\tExact Match CAM Table\n"); 2675 + seq_puts(s, "index\tchan\tMAC\n"); 2676 + 2677 + /* Traverse cam table entries */ 2678 + list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) { 2679 + seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan, 2680 + cam_entry->mac); 2681 + } 2682 + 2683 + done: 2684 + mutex_unlock(&table->lock); 2685 + return 0; 2686 + } 2687 + 2688 + RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL); 2689 + 2690 + static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused) 2691 + { 2692 + struct npc_exact_table *table; 2693 + struct rvu *rvu = s->private; 2694 + int i; 2695 + 2696 + table = rvu->hw->table; 2697 + 2698 + seq_puts(s, "\n\tExact Table Info\n"); 2699 + seq_printf(s, "Exact Match Feature : %s\n", 2700 + rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable"); 2701 + if (!rvu->hw->cap.npc_exact_match_enabled) 2702 + return 0; 2703 + 2704 + seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n"); 2705 + for (i = 0; i < table->num_drop_rules; i++) 2706 + seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]); 2707 + 2708 + seq_puts(s, "\nMcam Index\tPromisc Mode Status\n"); 2709 + for (i = 0; i < table->num_drop_rules; i++) 2710 + seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off"); 2711 + 2712 + seq_puts(s, "\n\tMEM Table Info\n"); 2713 + seq_printf(s, "Ways : %d\n", table->mem_table.ways); 2714 + seq_printf(s, "Depth : %d\n", table->mem_table.depth); 2715 + seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask); 2716 + seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask); 2717 + seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset); 2718 + 2719 + seq_puts(s, "\n\tCAM Table Info\n"); 2720 + seq_printf(s, "Depth : %d\n", table->cam_table.depth); 2721 + 2722 + return 0; 2723 + } 2724 + 2725 + RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL); 2726 + 2727 + static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused) 2728 + { 2729 + struct npc_exact_table *table; 2730 + struct rvu *rvu = s->private; 2731 + struct npc_key_field *field; 2732 + u16 chan, pcifunc; 2733 + int blkaddr, i; 2734 + u64 cfg, cam1; 2735 + char *str; 2736 + 2737 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 2738 + table = rvu->hw->table; 2739 + 2740 + field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN]; 2741 + 2742 + seq_puts(s, "\n\t Exact Hit on drop status\n"); 2743 + seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n"); 2744 + 2745 + for (i = 0; i < table->num_drop_rules; i++) { 2746 + pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i); 2747 + cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0)); 2748 + 2749 + /* channel will be always in keyword 0 */ 2750 + cam1 = rvu_read64(rvu, blkaddr, 2751 + NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1)); 2752 + chan = field->kw_mask[0] & cam1; 2753 + 2754 + str = (cfg & 1) ? "enabled" : "disabled"; 2755 + 2756 + seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i, 2757 + rvu_read64(rvu, blkaddr, 2758 + NPC_AF_MATCH_STATX(table->counter_idx[i])), 2759 + chan, str); 2760 + } 2761 + 2762 + return 0; 2763 + } 2764 + 2765 + RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL); 2766 + 2604 2767 static void rvu_dbg_npc_init(struct rvu *rvu) 2605 2768 { 2606 2769 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root); ··· 2773 2608 &rvu_dbg_npc_mcam_info_fops); 2774 2609 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu, 2775 2610 &rvu_dbg_npc_mcam_rules_fops); 2611 + 2776 2612 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu, 2777 2613 &rvu_dbg_npc_rx_miss_act_fops); 2614 + 2615 + if (!rvu->hw->cap.npc_exact_match_enabled) 2616 + return; 2617 + 2618 + debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu, 2619 + &rvu_dbg_npc_exact_entries_fops); 2620 + 2621 + debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu, 2622 + &rvu_dbg_npc_exact_info_fops); 2623 + 2624 + debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu, 2625 + &rvu_dbg_npc_exact_drop_cnt_fops); 2626 + 2778 2627 } 2779 2628 2780 2629 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
+69 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
··· 10 10 #include "rvu.h" 11 11 #include "rvu_reg.h" 12 12 #include "rvu_struct.h" 13 + #include "rvu_npc_hash.h" 13 14 14 15 #define DRV_NAME "octeontx2-af" 15 16 ··· 1437 1436 enum rvu_af_dl_param_id { 1438 1437 RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 1439 1438 RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, 1439 + RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, 1440 1440 }; 1441 + 1442 + static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id, 1443 + struct devlink_param_gset_ctx *ctx) 1444 + { 1445 + struct rvu_devlink *rvu_dl = devlink_priv(devlink); 1446 + struct rvu *rvu = rvu_dl->rvu; 1447 + bool enabled; 1448 + 1449 + enabled = rvu_npc_exact_has_match_table(rvu); 1450 + 1451 + snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", 1452 + enabled ? "enabled" : "disabled"); 1453 + 1454 + return 0; 1455 + } 1456 + 1457 + static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id, 1458 + struct devlink_param_gset_ctx *ctx) 1459 + { 1460 + struct rvu_devlink *rvu_dl = devlink_priv(devlink); 1461 + struct rvu *rvu = rvu_dl->rvu; 1462 + 1463 + rvu_npc_exact_disable_feature(rvu); 1464 + 1465 + return 0; 1466 + } 1467 + 1468 + static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id, 1469 + union devlink_param_value val, 1470 + struct netlink_ext_ack *extack) 1471 + { 1472 + struct rvu_devlink *rvu_dl = devlink_priv(devlink); 1473 + struct rvu *rvu = rvu_dl->rvu; 1474 + u64 enable; 1475 + 1476 + if (kstrtoull(val.vstr, 10, &enable)) { 1477 + NL_SET_ERR_MSG_MOD(extack, 1478 + "Only 1 value is supported"); 1479 + return -EINVAL; 1480 + } 1481 + 1482 + if (enable != 1) { 1483 + NL_SET_ERR_MSG_MOD(extack, 1484 + "Only disabling exact match feature is supported"); 1485 + return -EINVAL; 1486 + } 1487 + 1488 + if (rvu_npc_exact_can_disable_feature(rvu)) 1489 + return 0; 1490 + 1491 + NL_SET_ERR_MSG_MOD(extack, 1492 + "Can't disable exact match feature; Please try before any configuration"); 1493 + return -EFAULT; 1494 + } 1441 1495 1442 1496 static const struct devlink_param rvu_af_dl_params[] = { 1443 1497 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, ··· 1500 1444 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1501 1445 rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set, 1502 1446 rvu_af_dl_dwrr_mtu_validate), 1447 + DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, 1448 + "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING, 1449 + BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1450 + rvu_af_npc_exact_feature_get, 1451 + rvu_af_npc_exact_feature_disable, 1452 + rvu_af_npc_exact_feature_validate), 1503 1453 }; 1504 1454 1505 1455 /* Devlink switch mode */ ··· 1563 1501 { 1564 1502 struct rvu_devlink *rvu_dl; 1565 1503 struct devlink *dl; 1504 + size_t size; 1566 1505 int err; 1567 1506 1568 1507 dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink), ··· 1585 1522 goto err_dl_health; 1586 1523 } 1587 1524 1588 - err = devlink_params_register(dl, rvu_af_dl_params, 1589 - ARRAY_SIZE(rvu_af_dl_params)); 1525 + /* Register exact match devlink only for CN10K-B */ 1526 + size = ARRAY_SIZE(rvu_af_dl_params); 1527 + if (!rvu_npc_exact_has_match_table(rvu)) 1528 + size -= 1; 1529 + 1530 + err = devlink_params_register(dl, rvu_af_dl_params, size); 1590 1531 if (err) { 1591 1532 dev_err(rvu->dev, 1592 1533 "devlink params register failed with error %d", err);
+7
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
··· 14 14 #include "npc.h" 15 15 #include "cgx.h" 16 16 #include "lmac_common.h" 17 + #include "rvu_npc_hash.h" 17 18 18 19 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 19 20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, ··· 3793 3792 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, 3794 3793 pfvf->rx_chan_base, 3795 3794 pfvf->rx_chan_cnt); 3795 + 3796 + if (rvu_npc_exact_has_match_table(rvu)) 3797 + rvu_npc_exact_promisc_enable(rvu, pcifunc); 3796 3798 } else { 3797 3799 if (!nix_rx_multicast) 3798 3800 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); 3801 + 3802 + if (rvu_npc_exact_has_match_table(rvu)) 3803 + rvu_npc_exact_promisc_disable(rvu, pcifunc); 3799 3804 } 3800 3805 3801 3806 return 0;
+42 -9
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
··· 15 15 #include "npc.h" 16 16 #include "cgx.h" 17 17 #include "npc_profile.h" 18 + #include "rvu_npc_hash.h" 18 19 19 20 #define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */ 20 21 #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ ··· 1106 1105 NIXLF_PROMISC_ENTRY, false); 1107 1106 } 1108 1107 1108 + bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable) 1109 + { 1110 + int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1111 + struct npc_mcam *mcam = &rvu->hw->mcam; 1112 + struct rvu_npc_mcam_rule *rule, *tmp; 1113 + 1114 + mutex_lock(&mcam->lock); 1115 + 1116 + list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { 1117 + if (rule->intf != intf) 1118 + continue; 1119 + 1120 + if (rule->entry != entry) 1121 + continue; 1122 + 1123 + rule->enable = enable; 1124 + mutex_unlock(&mcam->lock); 1125 + 1126 + npc_enable_mcam_entry(rvu, mcam, blkaddr, 1127 + entry, enable); 1128 + 1129 + return true; 1130 + } 1131 + 1132 + mutex_unlock(&mcam->lock); 1133 + return false; 1134 + } 1135 + 1109 1136 void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) 1110 1137 { 1111 1138 /* Enables only broadcast match entry. Promisc/Allmulti are enabled ··· 1210 1181 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); 1211 1182 } 1212 1183 1213 - #define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ 1214 - rvu_write64(rvu, blkaddr, \ 1215 - NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) 1216 - 1217 - #define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ 1218 - rvu_write64(rvu, blkaddr, \ 1219 - NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) 1220 - 1221 1184 static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr, 1222 1185 struct npc_mcam_kex *mkex, u8 intf) 1223 1186 { ··· 1283 1262 npc_program_mkex_rx(rvu, blkaddr, mkex, intf); 1284 1263 npc_program_mkex_tx(rvu, blkaddr, mkex, intf); 1285 1264 } 1265 + 1266 + /* Programme mkex hash profile */ 1267 + npc_program_mkex_hash(rvu, blkaddr); 1286 1268 } 1287 1269 1288 1270 static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr, ··· 1487 1463 profile->kpus = ARRAY_SIZE(npc_kpu_profiles); 1488 1464 profile->lt_def = &npc_lt_defaults; 1489 1465 profile->mkex = &npc_mkex_default; 1466 + profile->mkex_hash = &npc_mkex_hash_default; 1490 1467 1491 1468 return 0; 1492 1469 } ··· 1844 1819 mcam->hprio_count = mcam->lprio_count; 1845 1820 mcam->hprio_end = mcam->hprio_count; 1846 1821 1847 - 1848 1822 /* Allocate bitmap for managing MCAM counters and memory 1849 1823 * for saving counter to RVU PFFUNC allocation mapping. 1850 1824 */ ··· 2071 2047 2072 2048 rvu_npc_setup_interfaces(rvu, blkaddr); 2073 2049 2050 + npc_config_secret_key(rvu, blkaddr); 2074 2051 /* Configure MKEX profile */ 2075 2052 npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name); 2076 2053 ··· 2585 2560 2586 2561 mutex_unlock(&mcam->lock); 2587 2562 return 0; 2563 + } 2564 + 2565 + /* Marks bitmaps to reserved the mcam slot */ 2566 + void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx) 2567 + { 2568 + struct npc_mcam *mcam = &rvu->hw->mcam; 2569 + 2570 + npc_mcam_set_bit(mcam, entry_idx); 2588 2571 } 2589 2572 2590 2573 int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+147 -14
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
··· 10 10 #include "rvu_reg.h" 11 11 #include "rvu.h" 12 12 #include "npc.h" 13 + #include "rvu_npc_fs.h" 14 + #include "rvu_npc_hash.h" 13 15 14 16 #define NPC_BYTESM GENMASK_ULL(19, 16) 15 17 #define NPC_HDR_OFFSET GENMASK_ULL(15, 8) ··· 229 227 return true; 230 228 } 231 229 230 + static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number, 231 + u8 key_nibble, u8 intf) 232 + { 233 + u8 offset = (key_nibble * 4) % 64; /* offset within key word */ 234 + u8 kwi = (key_nibble * 4) / 64; /* which word in key */ 235 + u8 nr_bits = 4; /* bits in a nibble */ 236 + u8 type; 237 + 238 + switch (bit_number) { 239 + case 40 ... 43: 240 + type = NPC_EXACT_RESULT; 241 + break; 242 + 243 + default: 244 + return; 245 + } 246 + npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 247 + } 248 + 232 249 static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, 233 250 u8 key_nibble, u8 intf) 234 251 { ··· 297 276 default: 298 277 return; 299 278 } 279 + 300 280 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); 301 281 } 302 282 ··· 531 509 { 532 510 struct npc_mcam *mcam = &rvu->hw->mcam; 533 511 u8 lid, lt, ld, bitnr; 512 + u64 cfg, masked_cfg; 534 513 u8 key_nibble = 0; 535 - u64 cfg; 536 514 537 515 /* Scan and note how parse result is going to be in key. 538 516 * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from ··· 540 518 * will be concatenated in key. 541 519 */ 542 520 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf)); 543 - cfg &= NPC_PARSE_NIBBLE; 544 - for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) { 521 + masked_cfg = cfg & NPC_PARSE_NIBBLE; 522 + for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) { 545 523 npc_scan_parse_result(mcam, bitnr, key_nibble, intf); 524 + key_nibble++; 525 + } 526 + 527 + /* Ignore exact match bits for mcam entries except the first rule 528 + * which is drop on hit. This first rule is configured explitcitly by 529 + * exact match code. 530 + */ 531 + masked_cfg = cfg & NPC_EXACT_NIBBLE; 532 + bitnr = NPC_EXACT_NIBBLE_START; 533 + for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, 534 + NPC_EXACT_NIBBLE_START) { 535 + npc_scan_exact_result(mcam, bitnr, key_nibble, intf); 546 536 key_nibble++; 547 537 } 548 538 ··· 658 624 * If any bits in mask are 0 then corresponding bits in value are 659 625 * dont care. 660 626 */ 661 - static void npc_update_entry(struct rvu *rvu, enum key_fields type, 662 - struct mcam_entry *entry, u64 val_lo, 663 - u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) 627 + void npc_update_entry(struct rvu *rvu, enum key_fields type, 628 + struct mcam_entry *entry, u64 val_lo, 629 + u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) 664 630 { 665 631 struct npc_mcam *mcam = &rvu->hw->mcam; 666 632 struct mcam_entry dummy = { {0} }; ··· 739 705 } 740 706 } 741 707 742 - #define IPV6_WORDS 4 743 - 744 708 static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, 745 709 u64 features, struct flow_msg *pkt, 746 710 struct flow_msg *mask, ··· 811 779 static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, 812 780 u64 features, struct flow_msg *pkt, 813 781 struct flow_msg *mask, 814 - struct rvu_npc_mcam_rule *output, u8 intf) 782 + struct rvu_npc_mcam_rule *output, u8 intf, 783 + int blkaddr) 815 784 { 816 785 u64 dmac_mask = ether_addr_to_u64(mask->dmac); 817 786 u64 smac_mask = ether_addr_to_u64(mask->smac); ··· 861 828 } while (0) 862 829 863 830 NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0); 831 + 864 832 NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); 865 833 NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, 866 834 ntohs(mask->etype), 0); ··· 888 854 889 855 npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); 890 856 npc_update_vlan_features(rvu, entry, features, intf); 857 + 858 + npc_update_field_hash(rvu, intf, entry, blkaddr, features, 859 + pkt, mask, opkt, omask); 891 860 } 892 861 893 - static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, 894 - u16 entry) 862 + static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry) 895 863 { 896 864 struct rvu_npc_mcam_rule *iter; 897 865 ··· 1059 1023 u16 owner = req->hdr.pcifunc; 1060 1024 struct msg_rsp write_rsp; 1061 1025 struct mcam_entry *entry; 1062 - int entry_index, err; 1063 1026 bool new = false; 1027 + u16 entry_index; 1028 + int err; 1064 1029 1065 1030 installed_features = req->features; 1066 1031 features = req->features; ··· 1069 1032 entry_index = req->entry; 1070 1033 1071 1034 npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, 1072 - req->intf); 1035 + req->intf, blkaddr); 1073 1036 1074 1037 if (is_npc_intf_rx(req->intf)) 1075 1038 npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); ··· 1094 1057 npc_update_flow(rvu, entry, missing_features, 1095 1058 &def_ucast_rule->packet, 1096 1059 &def_ucast_rule->mask, 1097 - &dummy, req->intf); 1060 + &dummy, req->intf, 1061 + blkaddr); 1098 1062 installed_features = req->features | missing_features; 1099 1063 } 1100 1064 ··· 1461 1423 index, false); 1462 1424 } 1463 1425 mutex_unlock(&mcam->lock); 1426 + } 1427 + 1428 + /* single drop on non hit rule starting from 0th index. This an extension 1429 + * to RPM mac filter to support more rules. 1430 + */ 1431 + int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, 1432 + u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask, 1433 + u64 bcast_mcast_val, u64 bcast_mcast_mask) 1434 + { 1435 + struct npc_mcam_alloc_counter_req cntr_req = { 0 }; 1436 + struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; 1437 + struct npc_mcam_write_entry_req req = { 0 }; 1438 + struct npc_mcam *mcam = &rvu->hw->mcam; 1439 + struct rvu_npc_mcam_rule *rule; 1440 + struct msg_rsp rsp; 1441 + bool enabled; 1442 + int blkaddr; 1443 + int err; 1444 + 1445 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1446 + if (blkaddr < 0) { 1447 + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1448 + return -ENODEV; 1449 + } 1450 + 1451 + /* Bail out if no exact match support */ 1452 + if (!rvu_npc_exact_has_match_table(rvu)) { 1453 + dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__); 1454 + return -EINVAL; 1455 + } 1456 + 1457 + /* If 0th entry is already used, return err */ 1458 + enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx); 1459 + if (enabled) { 1460 + dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n", 1461 + __func__, mcam_idx); 1462 + return -EINVAL; 1463 + } 1464 + 1465 + /* Add this entry to mcam rules list */ 1466 + rule = kzalloc(sizeof(*rule), GFP_KERNEL); 1467 + if (!rule) 1468 + return -ENOMEM; 1469 + 1470 + /* Disable rule by default. Enable rule when first dmac filter is 1471 + * installed 1472 + */ 1473 + rule->enable = false; 1474 + rule->chan = chan_val; 1475 + rule->chan_mask = chan_mask; 1476 + rule->entry = mcam_idx; 1477 + rvu_mcam_add_rule(mcam, rule); 1478 + 1479 + /* Reserve slot 0 */ 1480 + npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx); 1481 + 1482 + /* Allocate counter for this single drop on non hit rule */ 1483 + cntr_req.hdr.pcifunc = 0; /* AF request */ 1484 + cntr_req.contig = true; 1485 + cntr_req.count = 1; 1486 + err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); 1487 + if (err) { 1488 + dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n", 1489 + __func__, err); 1490 + return -EFAULT; 1491 + } 1492 + *counter_idx = cntr_rsp.cntr; 1493 + 1494 + /* Fill in fields for this mcam entry */ 1495 + npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0, 1496 + exact_mask, 0, NIX_INTF_RX); 1497 + npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0, 1498 + chan_mask, 0, NIX_INTF_RX); 1499 + npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0, 1500 + bcast_mcast_mask, 0, NIX_INTF_RX); 1501 + 1502 + req.intf = NIX_INTF_RX; 1503 + req.set_cntr = true; 1504 + req.cntr = cntr_rsp.cntr; 1505 + req.entry = mcam_idx; 1506 + 1507 + err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp); 1508 + if (err) { 1509 + dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n", 1510 + __func__, mcam_idx); 1511 + return err; 1512 + } 1513 + 1514 + dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n", 1515 + __func__, mcam_idx, req.cntr); 1516 + 1517 + /* disable entry at Bank 0, index 0 */ 1518 + npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false); 1519 + 1520 + return 0; 1464 1521 }
+17
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell RVU Admin Function driver 3 + * 4 + * Copyright (C) 2022 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef __RVU_NPC_FS_H 9 + #define __RVU_NPC_FS_H 10 + 11 + #define IPV6_WORDS 4 12 + 13 + void npc_update_entry(struct rvu *rvu, enum key_fields type, 14 + struct mcam_entry *entry, u64 val_lo, 15 + u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf); 16 + 17 + #endif /* RVU_NPC_FS_H */
+1984
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell RVU Admin Function driver 3 + * 4 + * Copyright (C) 2022 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/bitfield.h> 9 + #include <linux/module.h> 10 + #include <linux/pci.h> 11 + #include <linux/firmware.h> 12 + #include <linux/stddef.h> 13 + #include <linux/debugfs.h> 14 + #include <linux/bitfield.h> 15 + 16 + #include "rvu_struct.h" 17 + #include "rvu_reg.h" 18 + #include "rvu.h" 19 + #include "npc.h" 20 + #include "cgx.h" 21 + #include "rvu_npc_hash.h" 22 + #include "rvu_npc_fs.h" 23 + #include "rvu_npc_hash.h" 24 + 25 + static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit, 26 + size_t width_bits) 27 + { 28 + const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits); 29 + const size_t msb = start_bit + width_bits - 1; 30 + const size_t lword = start_bit >> 6; 31 + const size_t uword = msb >> 6; 32 + size_t lbits; 33 + u64 hi, lo; 34 + 35 + if (lword == uword) 36 + return (input[lword] >> (start_bit & 63)) & mask; 37 + 38 + lbits = 64 - (start_bit & 63); 39 + hi = input[uword]; 40 + lo = (input[lword] >> (start_bit & 63)); 41 + return ((hi << lbits) | lo) & mask; 42 + } 43 + 44 + static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len) 45 + { 46 + u64 prev_orig_word = 0; 47 + u64 cur_orig_word = 0; 48 + size_t extra = key_bit_len % 64; 49 + size_t max_idx = key_bit_len / 64; 50 + size_t i; 51 + 52 + if (extra) 53 + max_idx++; 54 + 55 + for (i = 0; i < max_idx; i++) { 56 + cur_orig_word = key[i]; 57 + key[i] = key[i] << 1; 58 + key[i] |= ((prev_orig_word >> 63) & 0x1); 59 + prev_orig_word = cur_orig_word; 60 + } 61 + } 62 + 63 + static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len, 64 + size_t key_bit_len) 65 + { 66 + u32 hash_out = 0; 67 + u64 temp_data = 0; 68 + int i; 69 + 70 + for (i = data_bit_len - 1; i >= 0; i--) { 71 + temp_data = (data[i / 64]); 72 + temp_data = temp_data >> (i % 64); 73 + temp_data &= 0x1; 74 + if (temp_data) 75 + hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32)); 76 + 77 + rvu_npc_lshift_key(key, key_bit_len); 78 + } 79 + 80 + return hash_out; 81 + } 82 + 83 + u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash, 84 + u64 *secret_key, u8 intf, u8 hash_idx) 85 + { 86 + u64 hash_key[3]; 87 + u64 data_padded[2]; 88 + u32 field_hash; 89 + 90 + hash_key[0] = secret_key[1] << 31; 91 + hash_key[0] |= secret_key[2]; 92 + hash_key[1] = secret_key[1] >> 33; 93 + hash_key[1] |= secret_key[0] << 31; 94 + hash_key[2] = secret_key[0] >> 33; 95 + 96 + data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0]; 97 + data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1]; 98 + field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159); 99 + 100 + field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32; 101 + field_hash |= mkex_hash->hash_ctrl[intf][hash_idx]; 102 + return field_hash; 103 + } 104 + 105 + static u64 npc_update_use_hash(int lt, int ld) 106 + { 107 + u64 cfg = 0; 108 + 109 + switch (lt) { 110 + case NPC_LT_LC_IP6: 111 + /* Update use_hash(bit-20) and bytesm1 (bit-16:19) 112 + * in KEX_LD_CFG 113 + */ 114 + cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03, 115 + ld ? 0x8 : 0x18, 116 + 0x1, 0x0, 0x10); 117 + break; 118 + } 119 + 120 + return cfg; 121 + } 122 + 123 + static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr, 124 + u8 intf) 125 + { 126 + struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 127 + int lid, lt, ld, hash_cnt = 0; 128 + 129 + if (is_npc_intf_tx(intf)) 130 + return; 131 + 132 + /* Program HASH_CFG */ 133 + for (lid = 0; lid < NPC_MAX_LID; lid++) { 134 + for (lt = 0; lt < NPC_MAX_LT; lt++) { 135 + for (ld = 0; ld < NPC_MAX_LD; ld++) { 136 + if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) { 137 + u64 cfg = npc_update_use_hash(lt, ld); 138 + 139 + hash_cnt++; 140 + if (hash_cnt == NPC_MAX_HASH) 141 + return; 142 + 143 + /* Set updated KEX configuration */ 144 + SET_KEX_LD(intf, lid, lt, ld, cfg); 145 + /* Set HASH configuration */ 146 + SET_KEX_LD_HASH(intf, ld, 147 + mkex_hash->hash[intf][ld]); 148 + SET_KEX_LD_HASH_MASK(intf, ld, 0, 149 + mkex_hash->hash_mask[intf][ld][0]); 150 + SET_KEX_LD_HASH_MASK(intf, ld, 1, 151 + mkex_hash->hash_mask[intf][ld][1]); 152 + SET_KEX_LD_HASH_CTRL(intf, ld, 153 + mkex_hash->hash_ctrl[intf][ld]); 154 + } 155 + } 156 + } 157 + } 158 + } 159 + 160 + static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr, 161 + u8 intf) 162 + { 163 + struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 164 + int lid, lt, ld, hash_cnt = 0; 165 + 166 + if (is_npc_intf_rx(intf)) 167 + return; 168 + 169 + /* Program HASH_CFG */ 170 + for (lid = 0; lid < NPC_MAX_LID; lid++) { 171 + for (lt = 0; lt < NPC_MAX_LT; lt++) { 172 + for (ld = 0; ld < NPC_MAX_LD; ld++) 173 + if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) { 174 + u64 cfg = npc_update_use_hash(lt, ld); 175 + 176 + hash_cnt++; 177 + if (hash_cnt == NPC_MAX_HASH) 178 + return; 179 + 180 + /* Set updated KEX configuration */ 181 + SET_KEX_LD(intf, lid, lt, ld, cfg); 182 + /* Set HASH configuration */ 183 + SET_KEX_LD_HASH(intf, ld, 184 + mkex_hash->hash[intf][ld]); 185 + SET_KEX_LD_HASH_MASK(intf, ld, 0, 186 + mkex_hash->hash_mask[intf][ld][0]); 187 + SET_KEX_LD_HASH_MASK(intf, ld, 1, 188 + mkex_hash->hash_mask[intf][ld][1]); 189 + SET_KEX_LD_HASH_CTRL(intf, ld, 190 + mkex_hash->hash_ctrl[intf][ld]); 191 + hash_cnt++; 192 + if (hash_cnt == NPC_MAX_HASH) 193 + return; 194 + } 195 + } 196 + } 197 + } 198 + 199 + void npc_config_secret_key(struct rvu *rvu, int blkaddr) 200 + { 201 + struct hw_cap *hwcap = &rvu->hw->cap; 202 + struct rvu_hwinfo *hw = rvu->hw; 203 + u8 intf; 204 + 205 + if (!hwcap->npc_hash_extract) { 206 + dev_info(rvu->dev, "HW does not support secret key configuration\n"); 207 + return; 208 + } 209 + 210 + for (intf = 0; intf < hw->npc_intfs; intf++) { 211 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf), 212 + RVU_NPC_HASH_SECRET_KEY0); 213 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf), 214 + RVU_NPC_HASH_SECRET_KEY1); 215 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf), 216 + RVU_NPC_HASH_SECRET_KEY2); 217 + } 218 + } 219 + 220 + void npc_program_mkex_hash(struct rvu *rvu, int blkaddr) 221 + { 222 + struct hw_cap *hwcap = &rvu->hw->cap; 223 + struct rvu_hwinfo *hw = rvu->hw; 224 + u8 intf; 225 + 226 + if (!hwcap->npc_hash_extract) { 227 + dev_dbg(rvu->dev, "Field hash extract feature is not supported\n"); 228 + return; 229 + } 230 + 231 + for (intf = 0; intf < hw->npc_intfs; intf++) { 232 + npc_program_mkex_hash_rx(rvu, blkaddr, intf); 233 + npc_program_mkex_hash_tx(rvu, blkaddr, intf); 234 + } 235 + } 236 + 237 + void npc_update_field_hash(struct rvu *rvu, u8 intf, 238 + struct mcam_entry *entry, 239 + int blkaddr, 240 + u64 features, 241 + struct flow_msg *pkt, 242 + struct flow_msg *mask, 243 + struct flow_msg *opkt, 244 + struct flow_msg *omask) 245 + { 246 + struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 247 + struct npc_get_secret_key_req req; 248 + struct npc_get_secret_key_rsp rsp; 249 + u64 ldata[2], cfg; 250 + u32 field_hash; 251 + u8 hash_idx; 252 + 253 + if (!rvu->hw->cap.npc_hash_extract) { 254 + dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__); 255 + return; 256 + } 257 + 258 + req.intf = intf; 259 + rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp); 260 + 261 + for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) { 262 + cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx)); 263 + if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) { 264 + u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8; 265 + u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4; 266 + u8 ltype_mask = cfg & GENMASK_ULL(3, 0); 267 + 268 + if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) { 269 + switch (ltype & ltype_mask) { 270 + /* If hash extract enabled is supported for IPv6 then 271 + * 128 bit IPv6 source and destination addressed 272 + * is hashed to 32 bit value. 273 + */ 274 + case NPC_LT_LC_IP6: 275 + if (features & BIT_ULL(NPC_SIP_IPV6)) { 276 + u32 src_ip[IPV6_WORDS]; 277 + 278 + be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); 279 + ldata[0] = (u64)src_ip[0] << 32 | src_ip[1]; 280 + ldata[1] = (u64)src_ip[2] << 32 | src_ip[3]; 281 + field_hash = npc_field_hash_calc(ldata, 282 + mkex_hash, 283 + rsp.secret_key, 284 + intf, 285 + hash_idx); 286 + npc_update_entry(rvu, NPC_SIP_IPV6, entry, 287 + field_hash, 0, 32, 0, intf); 288 + memcpy(&opkt->ip6src, &pkt->ip6src, 289 + sizeof(pkt->ip6src)); 290 + memcpy(&omask->ip6src, &mask->ip6src, 291 + sizeof(mask->ip6src)); 292 + break; 293 + } 294 + 295 + if (features & BIT_ULL(NPC_DIP_IPV6)) { 296 + u32 dst_ip[IPV6_WORDS]; 297 + 298 + be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); 299 + ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1]; 300 + ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3]; 301 + field_hash = npc_field_hash_calc(ldata, 302 + mkex_hash, 303 + rsp.secret_key, 304 + intf, 305 + hash_idx); 306 + npc_update_entry(rvu, NPC_DIP_IPV6, entry, 307 + field_hash, 0, 32, 0, intf); 308 + memcpy(&opkt->ip6dst, &pkt->ip6dst, 309 + sizeof(pkt->ip6dst)); 310 + memcpy(&omask->ip6dst, &mask->ip6dst, 311 + sizeof(mask->ip6dst)); 312 + } 313 + break; 314 + } 315 + } 316 + } 317 + } 318 + } 319 + 320 + int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu, 321 + struct npc_get_secret_key_req *req, 322 + struct npc_get_secret_key_rsp *rsp) 323 + { 324 + u64 *secret_key = rsp->secret_key; 325 + u8 intf = req->intf; 326 + int blkaddr; 327 + 328 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 329 + if (blkaddr < 0) { 330 + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 331 + return -EINVAL; 332 + } 333 + 334 + secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf)); 335 + secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf)); 336 + secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf)); 337 + 338 + return 0; 339 + } 340 + 341 + /** 342 + * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64. 343 + * @mac_addr: MAC address. 344 + * Return: mdata for exact match table. 345 + */ 346 + static u64 rvu_npc_exact_mac2u64(u8 *mac_addr) 347 + { 348 + u64 mac = 0; 349 + int index; 350 + 351 + for (index = ETH_ALEN - 1; index >= 0; index--) 352 + mac |= ((u64)*mac_addr++) << (8 * index); 353 + 354 + return mac; 355 + } 356 + 357 + /** 358 + * rvu_exact_prepare_mdata - Make mdata for mcam entry 359 + * @mac: MAC address 360 + * @chan: Channel number. 361 + * @ctype: Channel Type. 362 + * @mask: LDATA mask. 363 + * Return: Meta data 364 + */ 365 + static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask) 366 + { 367 + u64 ldata = rvu_npc_exact_mac2u64(mac); 368 + 369 + /* Please note that mask is 48bit which excludes chan and ctype. 370 + * Increase mask bits if we need to include them as well. 371 + */ 372 + ldata |= ((u64)chan << 48); 373 + ldata |= ((u64)ctype << 60); 374 + ldata &= mask; 375 + ldata = ldata << 2; 376 + 377 + return ldata; 378 + } 379 + 380 + /** 381 + * rvu_exact_calculate_hash - calculate hash index to mem table. 382 + * @rvu: resource virtualization unit. 383 + * @chan: Channel number 384 + * @ctype: Channel type. 385 + * @mac: MAC address 386 + * @mask: HASH mask. 387 + * @table_depth: Depth of table. 388 + * Return: Hash value 389 + */ 390 + static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac, 391 + u64 mask, u32 table_depth) 392 + { 393 + struct npc_exact_table *table = rvu->hw->table; 394 + u64 hash_key[2]; 395 + u64 key_in[2]; 396 + u64 ldata; 397 + u32 hash; 398 + 399 + key_in[0] = RVU_NPC_HASH_SECRET_KEY0; 400 + key_in[1] = RVU_NPC_HASH_SECRET_KEY2; 401 + 402 + hash_key[0] = key_in[0] << 31; 403 + hash_key[0] |= key_in[1]; 404 + hash_key[1] = key_in[0] >> 33; 405 + 406 + ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask); 407 + 408 + dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__, 409 + ldata, hash_key[1], hash_key[0]); 410 + hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95); 411 + 412 + hash &= table->mem_table.hash_mask; 413 + hash += table->mem_table.hash_offset; 414 + dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash); 415 + 416 + return hash; 417 + } 418 + 419 + /** 420 + * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table. 421 + * @rvu: resource virtualization unit. 422 + * @way: Indicate way to table. 423 + * @index: Hash index to 4 way table. 424 + * @hash: Hash value. 425 + * 426 + * Searches 4 way table using hash index. Returns 0 on success. 427 + * Return: 0 upon success. 428 + */ 429 + static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way, 430 + u32 *index, unsigned int hash) 431 + { 432 + struct npc_exact_table *table; 433 + int depth, i; 434 + 435 + table = rvu->hw->table; 436 + depth = table->mem_table.depth; 437 + 438 + /* Check all the 4 ways for a free slot. */ 439 + mutex_lock(&table->lock); 440 + for (i = 0; i < table->mem_table.ways; i++) { 441 + if (test_bit(hash + i * depth, table->mem_table.bmap)) 442 + continue; 443 + 444 + set_bit(hash + i * depth, table->mem_table.bmap); 445 + mutex_unlock(&table->lock); 446 + 447 + dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n", 448 + __func__, i, hash); 449 + 450 + *way = i; 451 + *index = hash; 452 + return 0; 453 + } 454 + mutex_unlock(&table->lock); 455 + 456 + dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__, 457 + bitmap_weight(table->mem_table.bmap, table->mem_table.depth)); 458 + return -ENOSPC; 459 + } 460 + 461 + /** 462 + * rvu_npc_exact_free_id - Free seq id from bitmat. 463 + * @rvu: Resource virtualization unit. 464 + * @seq_id: Sequence identifier to be freed. 465 + */ 466 + static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id) 467 + { 468 + struct npc_exact_table *table; 469 + 470 + table = rvu->hw->table; 471 + mutex_lock(&table->lock); 472 + clear_bit(seq_id, table->id_bmap); 473 + mutex_unlock(&table->lock); 474 + dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id); 475 + } 476 + 477 + /** 478 + * rvu_npc_exact_alloc_id - Alloc seq id from bitmap. 479 + * @rvu: Resource virtualization unit. 480 + * @seq_id: Sequence identifier. 481 + * Return: True or false. 482 + */ 483 + static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id) 484 + { 485 + struct npc_exact_table *table; 486 + u32 idx; 487 + 488 + table = rvu->hw->table; 489 + 490 + mutex_lock(&table->lock); 491 + idx = find_first_zero_bit(table->id_bmap, table->tot_ids); 492 + if (idx == table->tot_ids) { 493 + mutex_unlock(&table->lock); 494 + dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n", 495 + __func__, bitmap_weight(table->id_bmap, table->tot_ids)); 496 + 497 + return false; 498 + } 499 + 500 + /* Mark bit map to indicate that slot is used.*/ 501 + set_bit(idx, table->id_bmap); 502 + mutex_unlock(&table->lock); 503 + 504 + *seq_id = idx; 505 + dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id); 506 + 507 + return true; 508 + } 509 + 510 + /** 511 + * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table. 512 + * @rvu: resource virtualization unit. 513 + * @index: Index to exact CAM table. 514 + * Return: 0 upon success; else error number. 515 + */ 516 + static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index) 517 + { 518 + struct npc_exact_table *table; 519 + u32 idx; 520 + 521 + table = rvu->hw->table; 522 + 523 + mutex_lock(&table->lock); 524 + idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth); 525 + if (idx == table->cam_table.depth) { 526 + mutex_unlock(&table->lock); 527 + dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__, 528 + bitmap_weight(table->cam_table.bmap, table->cam_table.depth)); 529 + return -ENOSPC; 530 + } 531 + 532 + /* Mark bit map to indicate that slot is used.*/ 533 + set_bit(idx, table->cam_table.bmap); 534 + mutex_unlock(&table->lock); 535 + 536 + *index = idx; 537 + dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n", 538 + __func__, idx); 539 + return 0; 540 + } 541 + 542 + /** 543 + * rvu_exact_prepare_table_entry - Data for exact match table entry. 544 + * @rvu: Resource virtualization unit. 545 + * @enable: Enable/Disable entry 546 + * @ctype: Software defined channel type. Currently set as 0. 547 + * @chan: Channel number. 548 + * @mac_addr: Destination mac address. 549 + * Return: mdata for exact match table. 550 + */ 551 + static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable, 552 + u8 ctype, u16 chan, u8 *mac_addr) 553 + 554 + { 555 + u64 ldata = rvu_npc_exact_mac2u64(mac_addr); 556 + 557 + /* Enable or disable */ 558 + u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0); 559 + 560 + /* Set Ctype */ 561 + mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype); 562 + 563 + /* Set chan */ 564 + mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan); 565 + 566 + /* MAC address */ 567 + mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata); 568 + 569 + return mdata; 570 + } 571 + 572 + /** 573 + * rvu_exact_config_secret_key - Configure secret key. 574 + * @rvu: Resource virtualization unit. 575 + */ 576 + static void rvu_exact_config_secret_key(struct rvu *rvu) 577 + { 578 + int blkaddr; 579 + 580 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 581 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX), 582 + RVU_NPC_HASH_SECRET_KEY0); 583 + 584 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX), 585 + RVU_NPC_HASH_SECRET_KEY1); 586 + 587 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX), 588 + RVU_NPC_HASH_SECRET_KEY2); 589 + } 590 + 591 + /** 592 + * rvu_exact_config_search_key - Configure search key 593 + * @rvu: Resource virtualization unit. 594 + */ 595 + static void rvu_exact_config_search_key(struct rvu *rvu) 596 + { 597 + int blkaddr; 598 + u64 reg_val; 599 + 600 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 601 + 602 + /* HDR offset */ 603 + reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0); 604 + 605 + /* BYTESM1, number of bytes - 1 */ 606 + reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1); 607 + 608 + /* Enable LID and set LID to NPC_LID_LA */ 609 + reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1); 610 + reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA); 611 + 612 + /* Clear layer type based extraction */ 613 + 614 + /* Disable LT_EN */ 615 + reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0); 616 + 617 + /* Set LTYPE_MATCH to 0 */ 618 + reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0); 619 + 620 + /* Set LTYPE_MASK to 0 */ 621 + reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0); 622 + 623 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val); 624 + } 625 + 626 + /** 627 + * rvu_exact_config_result_ctrl - Set exact table hash control 628 + * @rvu: Resource virtualization unit. 629 + * @depth: Depth of Exact match table. 630 + * 631 + * Sets mask and offset for hash for mem table. 632 + */ 633 + static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth) 634 + { 635 + int blkaddr; 636 + u64 reg = 0; 637 + 638 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 639 + 640 + /* Set mask. Note that depth is a power of 2 */ 641 + rvu->hw->table->mem_table.hash_mask = (depth - 1); 642 + reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1)); 643 + 644 + /* Set offset as 0 */ 645 + rvu->hw->table->mem_table.hash_offset = 0; 646 + reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0); 647 + 648 + /* Set reg for RX */ 649 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg); 650 + /* Store hash mask and offset for s/w algorithm */ 651 + } 652 + 653 + /** 654 + * rvu_exact_config_table_mask - Set exact table mask. 655 + * @rvu: Resource virtualization unit. 656 + */ 657 + static void rvu_exact_config_table_mask(struct rvu *rvu) 658 + { 659 + int blkaddr; 660 + u64 mask = 0; 661 + 662 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 663 + 664 + /* Don't use Ctype */ 665 + mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0); 666 + 667 + /* Set chan */ 668 + mask |= GENMASK_ULL(59, 48); 669 + 670 + /* Full ldata */ 671 + mask |= GENMASK_ULL(47, 0); 672 + 673 + /* Store mask for s/w hash calcualtion */ 674 + rvu->hw->table->mem_table.mask = mask; 675 + 676 + /* Set mask for RX.*/ 677 + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask); 678 + } 679 + 680 + /** 681 + * rvu_npc_exact_get_max_entries - Get total number of entries in table. 682 + * @rvu: resource virtualization unit. 683 + * Return: Maximum table entries possible. 684 + */ 685 + u32 rvu_npc_exact_get_max_entries(struct rvu *rvu) 686 + { 687 + struct npc_exact_table *table; 688 + 689 + table = rvu->hw->table; 690 + return table->tot_ids; 691 + } 692 + 693 + /** 694 + * rvu_npc_exact_has_match_table - Checks support for exact match. 695 + * @rvu: resource virtualization unit. 696 + * Return: True if exact match table is supported/enabled. 697 + */ 698 + bool rvu_npc_exact_has_match_table(struct rvu *rvu) 699 + { 700 + return rvu->hw->cap.npc_exact_match_enabled; 701 + } 702 + 703 + /** 704 + * __rvu_npc_exact_find_entry_by_seq_id - find entry by id 705 + * @rvu: resource virtualization unit. 706 + * @seq_id: Sequence identifier. 707 + * 708 + * Caller should acquire the lock. 709 + * Return: Pointer to table entry. 710 + */ 711 + static struct npc_exact_table_entry * 712 + __rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id) 713 + { 714 + struct npc_exact_table *table = rvu->hw->table; 715 + struct npc_exact_table_entry *entry = NULL; 716 + struct list_head *lhead; 717 + 718 + lhead = &table->lhead_gbl; 719 + 720 + /* traverse to find the matching entry */ 721 + list_for_each_entry(entry, lhead, glist) { 722 + if (entry->seq_id != seq_id) 723 + continue; 724 + 725 + return entry; 726 + } 727 + 728 + return NULL; 729 + } 730 + 731 + /** 732 + * rvu_npc_exact_add_to_list - Add entry to list 733 + * @rvu: resource virtualization unit. 734 + * @opc_type: OPCODE to select MEM/CAM table. 735 + * @ways: MEM table ways. 736 + * @index: Index in MEM/CAM table. 737 + * @cgx_id: CGX identifier. 738 + * @lmac_id: LMAC identifier. 739 + * @mac_addr: MAC address. 740 + * @chan: Channel number. 741 + * @ctype: Channel Type. 742 + * @seq_id: Sequence identifier 743 + * @cmd: True if function is called by ethtool cmd 744 + * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam. 745 + * @pcifunc: pci function 746 + * Return: 0 upon success. 747 + */ 748 + static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways, 749 + u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan, 750 + u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc) 751 + { 752 + struct npc_exact_table_entry *entry, *tmp, *iter; 753 + struct npc_exact_table *table = rvu->hw->table; 754 + struct list_head *lhead, *pprev; 755 + 756 + WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS); 757 + 758 + if (!rvu_npc_exact_alloc_id(rvu, seq_id)) { 759 + dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__); 760 + return -EFAULT; 761 + } 762 + 763 + entry = kmalloc(sizeof(*entry), GFP_KERNEL); 764 + if (!entry) { 765 + rvu_npc_exact_free_id(rvu, *seq_id); 766 + dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__); 767 + return -ENOMEM; 768 + } 769 + 770 + mutex_lock(&table->lock); 771 + switch (opc_type) { 772 + case NPC_EXACT_OPC_CAM: 773 + lhead = &table->lhead_cam_tbl_entry; 774 + table->cam_tbl_entry_cnt++; 775 + break; 776 + 777 + case NPC_EXACT_OPC_MEM: 778 + lhead = &table->lhead_mem_tbl_entry[ways]; 779 + table->mem_tbl_entry_cnt++; 780 + break; 781 + 782 + default: 783 + mutex_unlock(&table->lock); 784 + kfree(entry); 785 + rvu_npc_exact_free_id(rvu, *seq_id); 786 + 787 + dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type); 788 + return -EINVAL; 789 + } 790 + 791 + /* Add to global list */ 792 + INIT_LIST_HEAD(&entry->glist); 793 + list_add_tail(&entry->glist, &table->lhead_gbl); 794 + INIT_LIST_HEAD(&entry->list); 795 + entry->index = index; 796 + entry->ways = ways; 797 + entry->opc_type = opc_type; 798 + 799 + entry->pcifunc = pcifunc; 800 + 801 + ether_addr_copy(entry->mac, mac_addr); 802 + entry->chan = chan; 803 + entry->ctype = ctype; 804 + entry->cgx_id = cgx_id; 805 + entry->lmac_id = lmac_id; 806 + 807 + entry->seq_id = *seq_id; 808 + 809 + entry->mcam_idx = mcam_idx; 810 + entry->cmd = cmd; 811 + 812 + pprev = lhead; 813 + 814 + /* Insert entry in ascending order of index */ 815 + list_for_each_entry_safe(iter, tmp, lhead, list) { 816 + if (index < iter->index) 817 + break; 818 + 819 + pprev = &iter->list; 820 + } 821 + 822 + /* Add to each table list */ 823 + list_add(&entry->list, pprev); 824 + mutex_unlock(&table->lock); 825 + return 0; 826 + } 827 + 828 + /** 829 + * rvu_npc_exact_mem_table_write - Wrapper for register write 830 + * @rvu: resource virtualization unit. 831 + * @blkaddr: Block address 832 + * @ways: ways for MEM table. 833 + * @index: Index in MEM 834 + * @mdata: Meta data to be written to register. 835 + */ 836 + static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways, 837 + u32 index, u64 mdata) 838 + { 839 + rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata); 840 + } 841 + 842 + /** 843 + * rvu_npc_exact_cam_table_write - Wrapper for register write 844 + * @rvu: resource virtualization unit. 845 + * @blkaddr: Block address 846 + * @index: Index in MEM 847 + * @mdata: Meta data to be written to register. 848 + */ 849 + static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr, 850 + u32 index, u64 mdata) 851 + { 852 + rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata); 853 + } 854 + 855 + /** 856 + * rvu_npc_exact_dealloc_table_entry - dealloc table entry 857 + * @rvu: resource virtualization unit. 858 + * @opc_type: OPCODE for selection of table(MEM or CAM) 859 + * @ways: ways if opc_type is MEM table. 860 + * @index: Index of MEM or CAM table. 861 + * Return: 0 upon success. 862 + */ 863 + static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type, 864 + u8 ways, u32 index) 865 + { 866 + int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 867 + struct npc_exact_table *table; 868 + u8 null_dmac[6] = { 0 }; 869 + int depth; 870 + 871 + /* Prepare entry with all fields set to zero */ 872 + u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac); 873 + 874 + table = rvu->hw->table; 875 + depth = table->mem_table.depth; 876 + 877 + mutex_lock(&table->lock); 878 + 879 + switch (opc_type) { 880 + case NPC_EXACT_OPC_CAM: 881 + 882 + /* Check whether entry is used already */ 883 + if (!test_bit(index, table->cam_table.bmap)) { 884 + mutex_unlock(&table->lock); 885 + dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n", 886 + __func__, ways, index); 887 + return -EINVAL; 888 + } 889 + 890 + rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata); 891 + clear_bit(index, table->cam_table.bmap); 892 + break; 893 + 894 + case NPC_EXACT_OPC_MEM: 895 + 896 + /* Check whether entry is used already */ 897 + if (!test_bit(index + ways * depth, table->mem_table.bmap)) { 898 + mutex_unlock(&table->lock); 899 + dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n", 900 + __func__, index); 901 + return -EINVAL; 902 + } 903 + 904 + rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata); 905 + clear_bit(index + ways * depth, table->mem_table.bmap); 906 + break; 907 + 908 + default: 909 + mutex_unlock(&table->lock); 910 + dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type); 911 + return -ENOSPC; 912 + } 913 + 914 + mutex_unlock(&table->lock); 915 + 916 + dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n", 917 + __func__, index, ways, opc_type); 918 + 919 + return 0; 920 + } 921 + 922 + /** 923 + * rvu_npc_exact_alloc_table_entry - Allociate an entry 924 + * @rvu: resource virtualization unit. 925 + * @mac: MAC address. 926 + * @chan: Channel number. 927 + * @ctype: Channel Type. 928 + * @index: Index of MEM table or CAM table. 929 + * @ways: Ways. Only valid for MEM table. 930 + * @opc_type: OPCODE to select table (MEM or CAM) 931 + * 932 + * Try allocating a slot from MEM table. If all 4 ways 933 + * slot are full for a hash index, check availability in 934 + * 32-entry CAM table for allocation. 935 + * Return: 0 upon success. 936 + */ 937 + static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype, 938 + u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type) 939 + { 940 + struct npc_exact_table *table; 941 + unsigned int hash; 942 + int err; 943 + 944 + table = rvu->hw->table; 945 + 946 + /* Check in 4-ways mem entry for free slote */ 947 + hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask, 948 + table->mem_table.depth); 949 + err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash); 950 + if (!err) { 951 + *opc_type = NPC_EXACT_OPC_MEM; 952 + dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n", 953 + __func__, *ways, *index); 954 + return 0; 955 + } 956 + 957 + dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__); 958 + 959 + /* wayss is 0 for cam table */ 960 + *ways = 0; 961 + err = rvu_npc_exact_alloc_cam_table_entry(rvu, index); 962 + if (!err) { 963 + *opc_type = NPC_EXACT_OPC_CAM; 964 + dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n", 965 + __func__, *index); 966 + return 0; 967 + } 968 + 969 + dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__); 970 + return -ENOSPC; 971 + } 972 + 973 + /** 974 + * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base. 975 + * @rvu: resource virtualization unit. 976 + * @drop_mcam_idx: Drop rule index in NPC mcam. 977 + * @chan_val: Channel value. 978 + * @chan_mask: Channel Mask. 979 + * @pcifunc: pcifunc of interface. 980 + * Return: True upon success. 981 + */ 982 + static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx, 983 + u64 chan_val, u64 chan_mask, u16 pcifunc) 984 + { 985 + struct npc_exact_table *table; 986 + int i; 987 + 988 + table = rvu->hw->table; 989 + 990 + for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { 991 + if (!table->drop_rule_map[i].valid) 992 + break; 993 + 994 + if (table->drop_rule_map[i].chan_val != (u16)chan_val) 995 + continue; 996 + 997 + if (table->drop_rule_map[i].chan_mask != (u16)chan_mask) 998 + continue; 999 + 1000 + return false; 1001 + } 1002 + 1003 + if (i == NPC_MCAM_DROP_RULE_MAX) 1004 + return false; 1005 + 1006 + table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx; 1007 + table->drop_rule_map[i].chan_val = (u16)chan_val; 1008 + table->drop_rule_map[i].chan_mask = (u16)chan_mask; 1009 + table->drop_rule_map[i].pcifunc = pcifunc; 1010 + table->drop_rule_map[i].valid = true; 1011 + return true; 1012 + } 1013 + 1014 + /** 1015 + * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask. 1016 + * @rvu: resource virtualization unit. 1017 + * @intf_type: Interface type (SDK, LBK or CGX) 1018 + * @cgx_id: CGX identifier. 1019 + * @lmac_id: LAMC identifier. 1020 + * @val: Channel number. 1021 + * @mask: Channel mask. 1022 + * Return: True upon success. 1023 + */ 1024 + static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type, 1025 + u8 cgx_id, u8 lmac_id, 1026 + u64 *val, u64 *mask) 1027 + { 1028 + u16 chan_val, chan_mask; 1029 + 1030 + /* No support for SDP and LBK */ 1031 + if (intf_type != NIX_INTF_TYPE_CGX) 1032 + return false; 1033 + 1034 + chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 1035 + chan_mask = 0xfff; 1036 + 1037 + if (val) 1038 + *val = chan_val; 1039 + 1040 + if (mask) 1041 + *mask = chan_mask; 1042 + 1043 + return true; 1044 + } 1045 + 1046 + /** 1047 + * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc 1048 + * @rvu: resource virtualization unit. 1049 + * @drop_rule_idx: Drop rule index in NPC mcam. 1050 + * 1051 + * Debugfs (exact_drop_cnt) entry displays pcifunc for interface 1052 + * by retrieving the pcifunc value from data base. 1053 + * Return: Drop rule index. 1054 + */ 1055 + u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx) 1056 + { 1057 + struct npc_exact_table *table; 1058 + int i; 1059 + 1060 + table = rvu->hw->table; 1061 + 1062 + for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { 1063 + if (!table->drop_rule_map[i].valid) 1064 + break; 1065 + 1066 + if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx) 1067 + continue; 1068 + 1069 + return table->drop_rule_map[i].pcifunc; 1070 + } 1071 + 1072 + dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n", 1073 + __func__, drop_rule_idx); 1074 + return -1; 1075 + } 1076 + 1077 + /** 1078 + * rvu_npc_exact_get_drop_rule_info - Get drop rule information. 1079 + * @rvu: resource virtualization unit. 1080 + * @intf_type: Interface type (CGX, SDP or LBK) 1081 + * @cgx_id: CGX identifier. 1082 + * @lmac_id: LMAC identifier. 1083 + * @drop_mcam_idx: NPC mcam drop rule index. 1084 + * @val: Channel value. 1085 + * @mask: Channel mask. 1086 + * @pcifunc: pcifunc of interface corresponding to the drop rule. 1087 + * Return: True upon success. 1088 + */ 1089 + static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id, 1090 + u8 lmac_id, u32 *drop_mcam_idx, u64 *val, 1091 + u64 *mask, u16 *pcifunc) 1092 + { 1093 + struct npc_exact_table *table; 1094 + u64 chan_val, chan_mask; 1095 + bool rc; 1096 + int i; 1097 + 1098 + table = rvu->hw->table; 1099 + 1100 + if (intf_type != NIX_INTF_TYPE_CGX) { 1101 + dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__); 1102 + return false; 1103 + } 1104 + 1105 + rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id, 1106 + lmac_id, &chan_val, &chan_mask); 1107 + if (!rc) 1108 + return false; 1109 + 1110 + for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { 1111 + if (!table->drop_rule_map[i].valid) 1112 + break; 1113 + 1114 + if (table->drop_rule_map[i].chan_val != (u16)chan_val) 1115 + continue; 1116 + 1117 + if (val) 1118 + *val = table->drop_rule_map[i].chan_val; 1119 + if (mask) 1120 + *mask = table->drop_rule_map[i].chan_mask; 1121 + if (pcifunc) 1122 + *pcifunc = table->drop_rule_map[i].pcifunc; 1123 + 1124 + *drop_mcam_idx = i; 1125 + return true; 1126 + } 1127 + 1128 + if (i == NPC_MCAM_DROP_RULE_MAX) { 1129 + dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n", 1130 + __func__, *drop_mcam_idx); 1131 + return false; 1132 + } 1133 + 1134 + dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n", 1135 + __func__, cgx_id, lmac_id); 1136 + return false; 1137 + } 1138 + 1139 + /** 1140 + * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule. 1141 + * @rvu: resource virtualization unit. 1142 + * @drop_mcam_idx: NPC mcam drop rule index. 1143 + * @val: +1 or -1. 1144 + * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it. 1145 + * 1146 + * when first exact match entry against a drop rule is added, enable_or_disable_cam 1147 + * is set to true. When last exact match entry against a drop rule is deleted, 1148 + * enable_or_disable_cam is set to true. 1149 + * Return: Number of rules 1150 + */ 1151 + static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx, 1152 + int val, bool *enable_or_disable_cam) 1153 + { 1154 + struct npc_exact_table *table; 1155 + u16 *cnt, old_cnt; 1156 + bool promisc; 1157 + 1158 + table = rvu->hw->table; 1159 + promisc = table->promisc_mode[drop_mcam_idx]; 1160 + 1161 + cnt = &table->cnt_cmd_rules[drop_mcam_idx]; 1162 + old_cnt = *cnt; 1163 + 1164 + *cnt += val; 1165 + 1166 + if (!enable_or_disable_cam) 1167 + goto done; 1168 + 1169 + *enable_or_disable_cam = false; 1170 + 1171 + if (promisc) 1172 + goto done; 1173 + 1174 + /* If all rules are deleted and not already in promisc mode; disable cam */ 1175 + if (!*cnt && val < 0) { 1176 + *enable_or_disable_cam = true; 1177 + goto done; 1178 + } 1179 + 1180 + /* If rule got added and not already in promisc mode; enable cam */ 1181 + if (!old_cnt && val > 0) { 1182 + *enable_or_disable_cam = true; 1183 + goto done; 1184 + } 1185 + 1186 + done: 1187 + return *cnt; 1188 + } 1189 + 1190 + /** 1191 + * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry. 1192 + * @rvu: resource virtualization unit. 1193 + * @seq_id: Sequence identifier of the entry. 1194 + * 1195 + * Deletes entry from linked lists and free up slot in HW MEM or CAM 1196 + * table. 1197 + * Return: 0 upon success. 1198 + */ 1199 + static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id) 1200 + { 1201 + struct npc_exact_table_entry *entry = NULL; 1202 + struct npc_exact_table *table; 1203 + bool disable_cam = false; 1204 + u32 drop_mcam_idx; 1205 + int *cnt; 1206 + 1207 + table = rvu->hw->table; 1208 + 1209 + mutex_lock(&table->lock); 1210 + 1211 + /* Lookup for entry which needs to be updated */ 1212 + entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id); 1213 + if (!entry) { 1214 + dev_dbg(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, seq_id); 1215 + mutex_unlock(&table->lock); 1216 + return -ENODATA; 1217 + } 1218 + 1219 + cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt : 1220 + &table->mem_tbl_entry_cnt; 1221 + 1222 + /* delete from lists */ 1223 + list_del_init(&entry->list); 1224 + list_del_init(&entry->glist); 1225 + 1226 + (*cnt)--; 1227 + 1228 + rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id, entry->lmac_id, 1229 + &drop_mcam_idx, NULL, NULL, NULL); 1230 + 1231 + if (entry->cmd) 1232 + __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam); 1233 + 1234 + /* No dmac filter rules; disable drop on hit rule */ 1235 + if (disable_cam) { 1236 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); 1237 + dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n", 1238 + __func__, drop_mcam_idx); 1239 + } 1240 + 1241 + mutex_unlock(&table->lock); 1242 + 1243 + rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index); 1244 + 1245 + rvu_npc_exact_free_id(rvu, seq_id); 1246 + 1247 + dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n", 1248 + __func__, seq_id, entry->mac); 1249 + kfree(entry); 1250 + 1251 + return 0; 1252 + } 1253 + 1254 + /** 1255 + * rvu_npc_exact_add_table_entry - Adds a table entry 1256 + * @rvu: resource virtualization unit. 1257 + * @cgx_id: cgx identifier. 1258 + * @lmac_id: lmac identifier. 1259 + * @mac: MAC address. 1260 + * @chan: Channel number. 1261 + * @ctype: Channel Type. 1262 + * @seq_id: Sequence number. 1263 + * @cmd: Whether it is invoked by ethtool cmd. 1264 + * @mcam_idx: NPC mcam index corresponding to MAC 1265 + * @pcifunc: PCI func. 1266 + * 1267 + * Creates a new exact match table entry in either CAM or 1268 + * MEM table. 1269 + * Return: 0 upon success. 1270 + */ 1271 + static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac, 1272 + u16 chan, u8 ctype, u32 *seq_id, bool cmd, 1273 + u32 mcam_idx, u16 pcifunc) 1274 + { 1275 + int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1276 + enum npc_exact_opc_type opc_type; 1277 + bool enable_cam = false; 1278 + u32 drop_mcam_idx; 1279 + u32 index; 1280 + u64 mdata; 1281 + int err; 1282 + u8 ways; 1283 + 1284 + ctype = 0; 1285 + 1286 + err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type); 1287 + if (err) { 1288 + dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__); 1289 + return err; 1290 + } 1291 + 1292 + /* Write mdata to table */ 1293 + mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac); 1294 + 1295 + if (opc_type == NPC_EXACT_OPC_CAM) 1296 + rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata); 1297 + else 1298 + rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata); 1299 + 1300 + /* Insert entry to linked list */ 1301 + err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id, 1302 + mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc); 1303 + if (err) { 1304 + rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index); 1305 + dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__); 1306 + return err; 1307 + } 1308 + 1309 + rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, 1310 + &drop_mcam_idx, NULL, NULL, NULL); 1311 + if (cmd) 1312 + __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam); 1313 + 1314 + /* First command rule; enable drop on hit rule */ 1315 + if (enable_cam) { 1316 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true); 1317 + dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n", 1318 + __func__, drop_mcam_idx); 1319 + } 1320 + 1321 + dev_dbg(rvu->dev, 1322 + "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n", 1323 + __func__, index, mac, ways, opc_type); 1324 + 1325 + return 0; 1326 + } 1327 + 1328 + /** 1329 + * rvu_npc_exact_update_table_entry - Update exact match table. 1330 + * @rvu: resource virtualization unit. 1331 + * @cgx_id: CGX identifier. 1332 + * @lmac_id: LMAC identifier. 1333 + * @old_mac: Existing MAC address entry. 1334 + * @new_mac: New MAC address entry. 1335 + * @seq_id: Sequence identifier of the entry. 1336 + * 1337 + * Updates MAC address of an entry. If entry is in MEM table, new 1338 + * hash value may not match with old one. 1339 + * Return: 0 upon success. 1340 + */ 1341 + static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, 1342 + u8 *old_mac, u8 *new_mac, u32 *seq_id) 1343 + { 1344 + int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1345 + struct npc_exact_table_entry *entry; 1346 + struct npc_exact_table *table; 1347 + u32 hash_index; 1348 + u64 mdata; 1349 + 1350 + table = rvu->hw->table; 1351 + 1352 + mutex_lock(&table->lock); 1353 + 1354 + /* Lookup for entry which needs to be updated */ 1355 + entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id); 1356 + if (!entry) { 1357 + mutex_unlock(&table->lock); 1358 + dev_dbg(rvu->dev, 1359 + "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n", 1360 + __func__, cgx_id, lmac_id, old_mac); 1361 + return -ENODATA; 1362 + } 1363 + 1364 + /* If entry is in mem table and new hash index is different than old 1365 + * hash index, we cannot update the entry. Fail in these scenarios. 1366 + */ 1367 + if (entry->opc_type == NPC_EXACT_OPC_MEM) { 1368 + hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype, 1369 + new_mac, table->mem_table.mask, 1370 + table->mem_table.depth); 1371 + if (hash_index != entry->index) { 1372 + dev_dbg(rvu->dev, 1373 + "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n", 1374 + __func__, hash_index, entry->index); 1375 + mutex_unlock(&table->lock); 1376 + return -EINVAL; 1377 + } 1378 + } 1379 + 1380 + mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac); 1381 + 1382 + if (entry->opc_type == NPC_EXACT_OPC_MEM) 1383 + rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata); 1384 + else 1385 + rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata); 1386 + 1387 + /* Update entry fields */ 1388 + ether_addr_copy(entry->mac, new_mac); 1389 + *seq_id = entry->seq_id; 1390 + 1391 + dev_dbg(rvu->dev, 1392 + "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n", 1393 + __func__, hash_index, entry->mac, entry->ways, entry->opc_type); 1394 + 1395 + dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n", 1396 + __func__, old_mac, new_mac); 1397 + 1398 + mutex_unlock(&table->lock); 1399 + return 0; 1400 + } 1401 + 1402 + /** 1403 + * rvu_npc_exact_promisc_disable - Disable promiscuous mode. 1404 + * @rvu: resource virtualization unit. 1405 + * @pcifunc: pcifunc 1406 + * 1407 + * Drop rule is against each PF. We dont support DMAC filter for 1408 + * VF. 1409 + * Return: 0 upon success 1410 + */ 1411 + 1412 + int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) 1413 + { 1414 + struct npc_exact_table *table; 1415 + int pf = rvu_get_pf(pcifunc); 1416 + u8 cgx_id, lmac_id; 1417 + u32 drop_mcam_idx; 1418 + bool *promisc; 1419 + u32 cnt; 1420 + 1421 + table = rvu->hw->table; 1422 + 1423 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1424 + rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, 1425 + &drop_mcam_idx, NULL, NULL, NULL); 1426 + 1427 + mutex_lock(&table->lock); 1428 + promisc = &table->promisc_mode[drop_mcam_idx]; 1429 + 1430 + if (!*promisc) { 1431 + mutex_unlock(&table->lock); 1432 + dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n", 1433 + __func__, cgx_id, lmac_id); 1434 + return LMAC_AF_ERR_INVALID_PARAM; 1435 + } 1436 + *promisc = false; 1437 + cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL); 1438 + mutex_unlock(&table->lock); 1439 + 1440 + /* If no dmac filter entries configured, disable drop rule */ 1441 + if (!cnt) 1442 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); 1443 + else 1444 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc); 1445 + 1446 + dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n", 1447 + __func__, cgx_id, lmac_id, cnt); 1448 + return 0; 1449 + } 1450 + 1451 + /** 1452 + * rvu_npc_exact_promisc_enable - Enable promiscuous mode. 1453 + * @rvu: resource virtualization unit. 1454 + * @pcifunc: pcifunc. 1455 + * Return: 0 upon success 1456 + */ 1457 + int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) 1458 + { 1459 + struct npc_exact_table *table; 1460 + int pf = rvu_get_pf(pcifunc); 1461 + u8 cgx_id, lmac_id; 1462 + u32 drop_mcam_idx; 1463 + bool *promisc; 1464 + u32 cnt; 1465 + 1466 + table = rvu->hw->table; 1467 + 1468 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1469 + rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, 1470 + &drop_mcam_idx, NULL, NULL, NULL); 1471 + 1472 + mutex_lock(&table->lock); 1473 + promisc = &table->promisc_mode[drop_mcam_idx]; 1474 + 1475 + if (*promisc) { 1476 + mutex_unlock(&table->lock); 1477 + dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n", 1478 + __func__, cgx_id, lmac_id); 1479 + return LMAC_AF_ERR_INVALID_PARAM; 1480 + } 1481 + *promisc = true; 1482 + cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL); 1483 + mutex_unlock(&table->lock); 1484 + 1485 + /* If no dmac filter entries configured, disable drop rule */ 1486 + if (!cnt) 1487 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); 1488 + else 1489 + rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc); 1490 + 1491 + dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n", 1492 + __func__, cgx_id, lmac_id, cnt); 1493 + return 0; 1494 + } 1495 + 1496 + /** 1497 + * rvu_npc_exact_mac_addr_reset - Delete PF mac address. 1498 + * @rvu: resource virtualization unit. 1499 + * @req: Reset request 1500 + * @rsp: Reset response. 1501 + * Return: 0 upon success 1502 + */ 1503 + int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 1504 + struct msg_rsp *rsp) 1505 + { 1506 + int pf = rvu_get_pf(req->hdr.pcifunc); 1507 + u32 seq_id = req->index; 1508 + struct rvu_pfvf *pfvf; 1509 + u8 cgx_id, lmac_id; 1510 + int rc; 1511 + 1512 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1513 + 1514 + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1515 + 1516 + rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id); 1517 + if (rc) { 1518 + /* TODO: how to handle this error case ? */ 1519 + dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf); 1520 + return 0; 1521 + } 1522 + 1523 + dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n", 1524 + __func__, pfvf->mac_addr, pf, seq_id); 1525 + return 0; 1526 + } 1527 + 1528 + /** 1529 + * rvu_npc_exact_mac_addr_update - Update mac address field with new value. 1530 + * @rvu: resource virtualization unit. 1531 + * @req: Update request. 1532 + * @rsp: Update response. 1533 + * Return: 0 upon success 1534 + */ 1535 + int rvu_npc_exact_mac_addr_update(struct rvu *rvu, 1536 + struct cgx_mac_addr_update_req *req, 1537 + struct cgx_mac_addr_update_rsp *rsp) 1538 + { 1539 + int pf = rvu_get_pf(req->hdr.pcifunc); 1540 + struct npc_exact_table_entry *entry; 1541 + struct npc_exact_table *table; 1542 + struct rvu_pfvf *pfvf; 1543 + u32 seq_id, mcam_idx; 1544 + u8 old_mac[ETH_ALEN]; 1545 + u8 cgx_id, lmac_id; 1546 + int rc; 1547 + 1548 + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) 1549 + return LMAC_AF_ERR_PERM_DENIED; 1550 + 1551 + dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n", 1552 + __func__, req->index, req->mac_addr); 1553 + 1554 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1555 + 1556 + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1557 + 1558 + table = rvu->hw->table; 1559 + 1560 + mutex_lock(&table->lock); 1561 + 1562 + /* Lookup for entry which needs to be updated */ 1563 + entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index); 1564 + if (!entry) { 1565 + dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index); 1566 + mutex_unlock(&table->lock); 1567 + return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED; 1568 + } 1569 + ether_addr_copy(old_mac, entry->mac); 1570 + seq_id = entry->seq_id; 1571 + mcam_idx = entry->mcam_idx; 1572 + mutex_unlock(&table->lock); 1573 + 1574 + rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac, 1575 + req->mac_addr, &seq_id); 1576 + if (!rc) { 1577 + rsp->index = seq_id; 1578 + dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n", 1579 + __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf); 1580 + ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1581 + return 0; 1582 + } 1583 + 1584 + /* Try deleting and adding it again */ 1585 + rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); 1586 + if (rc) { 1587 + /* This could be a new entry */ 1588 + dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, 1589 + pfvf->mac_addr, pf); 1590 + } 1591 + 1592 + rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, 1593 + pfvf->rx_chan_base, 0, &seq_id, true, 1594 + mcam_idx, req->hdr.pcifunc); 1595 + if (rc) { 1596 + dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__, 1597 + req->mac_addr, pf); 1598 + return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; 1599 + } 1600 + 1601 + rsp->index = seq_id; 1602 + dev_dbg(rvu->dev, 1603 + "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n", 1604 + __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id); 1605 + 1606 + ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1607 + return 0; 1608 + } 1609 + 1610 + /** 1611 + * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table. 1612 + * @rvu: resource virtualization unit. 1613 + * @req: Add request. 1614 + * @rsp: Add response. 1615 + * Return: 0 upon success 1616 + */ 1617 + int rvu_npc_exact_mac_addr_add(struct rvu *rvu, 1618 + struct cgx_mac_addr_add_req *req, 1619 + struct cgx_mac_addr_add_rsp *rsp) 1620 + { 1621 + int pf = rvu_get_pf(req->hdr.pcifunc); 1622 + struct rvu_pfvf *pfvf; 1623 + u8 cgx_id, lmac_id; 1624 + int rc = 0; 1625 + u32 seq_id; 1626 + 1627 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1628 + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); 1629 + 1630 + rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, 1631 + pfvf->rx_chan_base, 0, &seq_id, 1632 + true, -1, req->hdr.pcifunc); 1633 + 1634 + if (!rc) { 1635 + rsp->index = seq_id; 1636 + dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n", 1637 + __func__, req->mac_addr, pf, seq_id); 1638 + return 0; 1639 + } 1640 + 1641 + dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__, 1642 + req->mac_addr, pf); 1643 + return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; 1644 + } 1645 + 1646 + /** 1647 + * rvu_npc_exact_mac_addr_del - Delete DMAC filter 1648 + * @rvu: resource virtualization unit. 1649 + * @req: Delete request. 1650 + * @rsp: Delete response. 1651 + * Return: 0 upon success 1652 + */ 1653 + int rvu_npc_exact_mac_addr_del(struct rvu *rvu, 1654 + struct cgx_mac_addr_del_req *req, 1655 + struct msg_rsp *rsp) 1656 + { 1657 + int pf = rvu_get_pf(req->hdr.pcifunc); 1658 + int rc; 1659 + 1660 + rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); 1661 + if (!rc) { 1662 + dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n", 1663 + __func__, pf, req->index); 1664 + return 0; 1665 + } 1666 + 1667 + dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n", 1668 + __func__, pf, req->index); 1669 + return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED; 1670 + } 1671 + 1672 + /** 1673 + * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter. 1674 + * @rvu: resource virtualization unit. 1675 + * @req: Set request. 1676 + * @rsp: Set response. 1677 + * Return: 0 upon success 1678 + */ 1679 + int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, 1680 + struct cgx_mac_addr_set_or_get *rsp) 1681 + { 1682 + int pf = rvu_get_pf(req->hdr.pcifunc); 1683 + u32 seq_id = req->index; 1684 + struct rvu_pfvf *pfvf; 1685 + u8 cgx_id, lmac_id; 1686 + u32 mcam_idx = -1; 1687 + int rc, nixlf; 1688 + 1689 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 1690 + 1691 + pfvf = &rvu->pf[pf]; 1692 + 1693 + /* If table does not have an entry; both update entry and del table entry API 1694 + * below fails. Those are not failure conditions. 1695 + */ 1696 + rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr, 1697 + req->mac_addr, &seq_id); 1698 + if (!rc) { 1699 + rsp->index = seq_id; 1700 + ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1701 + ether_addr_copy(rsp->mac_addr, req->mac_addr); 1702 + dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n", 1703 + __func__, req->mac_addr, pf); 1704 + return 0; 1705 + } 1706 + 1707 + /* Try deleting and adding it again */ 1708 + rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); 1709 + if (rc) { 1710 + dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", 1711 + __func__, pfvf->mac_addr, pf); 1712 + } 1713 + 1714 + /* find mcam entry if exist */ 1715 + rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL); 1716 + if (!rc) { 1717 + mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc, 1718 + nixlf, NIXLF_UCAST_ENTRY); 1719 + } 1720 + 1721 + rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, 1722 + pfvf->rx_chan_base, 0, &seq_id, 1723 + true, mcam_idx, req->hdr.pcifunc); 1724 + if (rc) { 1725 + dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", 1726 + __func__, req->mac_addr, pf); 1727 + return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; 1728 + } 1729 + 1730 + rsp->index = seq_id; 1731 + ether_addr_copy(rsp->mac_addr, req->mac_addr); 1732 + ether_addr_copy(pfvf->mac_addr, req->mac_addr); 1733 + dev_dbg(rvu->dev, 1734 + "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n", 1735 + __func__, req->mac_addr, pf, seq_id); 1736 + return 0; 1737 + } 1738 + 1739 + /** 1740 + * rvu_npc_exact_can_disable_feature - Check if feature can be disabled. 1741 + * @rvu: resource virtualization unit. 1742 + * Return: True if exact match feature is supported. 1743 + */ 1744 + bool rvu_npc_exact_can_disable_feature(struct rvu *rvu) 1745 + { 1746 + struct npc_exact_table *table = rvu->hw->table; 1747 + bool empty; 1748 + 1749 + if (!rvu->hw->cap.npc_exact_match_enabled) 1750 + return false; 1751 + 1752 + mutex_lock(&table->lock); 1753 + empty = list_empty(&table->lhead_gbl); 1754 + mutex_unlock(&table->lock); 1755 + 1756 + return empty; 1757 + } 1758 + 1759 + /** 1760 + * rvu_npc_exact_disable_feature - Disable feature. 1761 + * @rvu: resource virtualization unit. 1762 + */ 1763 + void rvu_npc_exact_disable_feature(struct rvu *rvu) 1764 + { 1765 + rvu->hw->cap.npc_exact_match_enabled = false; 1766 + } 1767 + 1768 + /** 1769 + * rvu_npc_exact_reset - Delete and free all entry which match pcifunc. 1770 + * @rvu: resource virtualization unit. 1771 + * @pcifunc: PCI func to match. 1772 + */ 1773 + void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc) 1774 + { 1775 + struct npc_exact_table *table = rvu->hw->table; 1776 + struct npc_exact_table_entry *tmp, *iter; 1777 + u32 seq_id; 1778 + 1779 + mutex_lock(&table->lock); 1780 + list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) { 1781 + if (pcifunc != iter->pcifunc) 1782 + continue; 1783 + 1784 + seq_id = iter->seq_id; 1785 + dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__, 1786 + pcifunc, seq_id); 1787 + 1788 + mutex_unlock(&table->lock); 1789 + rvu_npc_exact_del_table_entry_by_id(rvu, seq_id); 1790 + mutex_lock(&table->lock); 1791 + } 1792 + mutex_unlock(&table->lock); 1793 + } 1794 + 1795 + /** 1796 + * rvu_npc_exact_init - initialize exact match table 1797 + * @rvu: resource virtualization unit. 1798 + * 1799 + * Initialize HW and SW resources to manage 4way-2K table and fully 1800 + * associative 32-entry mcam table. 1801 + * Return: 0 upon success. 1802 + */ 1803 + int rvu_npc_exact_init(struct rvu *rvu) 1804 + { 1805 + u64 bcast_mcast_val, bcast_mcast_mask; 1806 + struct npc_exact_table *table; 1807 + u64 exact_val, exact_mask; 1808 + u64 chan_val, chan_mask; 1809 + u8 cgx_id, lmac_id; 1810 + u32 *drop_mcam_idx; 1811 + u16 max_lmac_cnt; 1812 + u64 npc_const3; 1813 + int table_size; 1814 + int blkaddr; 1815 + u16 pcifunc; 1816 + int err, i; 1817 + u64 cfg; 1818 + bool rc; 1819 + 1820 + /* Read NPC_AF_CONST3 and check for have exact 1821 + * match functionality is present 1822 + */ 1823 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 1824 + if (blkaddr < 0) { 1825 + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); 1826 + return -EINVAL; 1827 + } 1828 + 1829 + /* Check exact match feature is supported */ 1830 + npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3); 1831 + if (!(npc_const3 & BIT_ULL(62))) { 1832 + dev_info(rvu->dev, "%s: No support for exact match support\n", 1833 + __func__); 1834 + return 0; 1835 + } 1836 + 1837 + /* Check if kex profile has enabled EXACT match nibble */ 1838 + cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); 1839 + if (!(cfg & NPC_EXACT_NIBBLE_HIT)) { 1840 + dev_info(rvu->dev, "%s: NPC exact match nibble not enabled in KEX profile\n", 1841 + __func__); 1842 + return 0; 1843 + } 1844 + 1845 + /* Set capability to true */ 1846 + rvu->hw->cap.npc_exact_match_enabled = true; 1847 + 1848 + table = kmalloc(sizeof(*table), GFP_KERNEL); 1849 + if (!table) 1850 + return -ENOMEM; 1851 + 1852 + dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__); 1853 + memset(table, 0, sizeof(*table)); 1854 + rvu->hw->table = table; 1855 + 1856 + /* Read table size, ways and depth */ 1857 + table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3); 1858 + table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3); 1859 + table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3); 1860 + 1861 + dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n", 1862 + __func__, table->mem_table.ways, table->cam_table.depth); 1863 + 1864 + /* Check if depth of table is not a sequre of 2 1865 + * TODO: why _builtin_popcount() is not working ? 1866 + */ 1867 + if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) { 1868 + dev_err(rvu->dev, 1869 + "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n", 1870 + __func__, table->mem_table.depth); 1871 + return -EINVAL; 1872 + } 1873 + 1874 + table_size = table->mem_table.depth * table->mem_table.ways; 1875 + 1876 + /* Allocate bitmap for 4way 2K table */ 1877 + table->mem_table.bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table_size), 1878 + sizeof(long), GFP_KERNEL); 1879 + if (!table->mem_table.bmap) 1880 + return -ENOMEM; 1881 + 1882 + dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__); 1883 + 1884 + /* Allocate bitmap for 32 entry mcam */ 1885 + table->cam_table.bmap = devm_kcalloc(rvu->dev, 1, sizeof(long), GFP_KERNEL); 1886 + 1887 + if (!table->cam_table.bmap) 1888 + return -ENOMEM; 1889 + 1890 + dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__); 1891 + 1892 + table->tot_ids = (table->mem_table.depth * table->mem_table.ways) + table->cam_table.depth; 1893 + table->id_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table->tot_ids), 1894 + table->tot_ids, GFP_KERNEL); 1895 + 1896 + if (!table->id_bmap) 1897 + return -ENOMEM; 1898 + 1899 + dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n", 1900 + __func__, table->tot_ids); 1901 + 1902 + /* Initialize list heads for npc_exact_table entries. 1903 + * This entry is used by debugfs to show entries in 1904 + * exact match table. 1905 + */ 1906 + for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++) 1907 + INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]); 1908 + 1909 + INIT_LIST_HEAD(&table->lhead_cam_tbl_entry); 1910 + INIT_LIST_HEAD(&table->lhead_gbl); 1911 + 1912 + mutex_init(&table->lock); 1913 + 1914 + rvu_exact_config_secret_key(rvu); 1915 + rvu_exact_config_search_key(rvu); 1916 + 1917 + rvu_exact_config_table_mask(rvu); 1918 + rvu_exact_config_result_ctrl(rvu, table->mem_table.depth); 1919 + 1920 + /* - No drop rule for LBK 1921 + * - Drop rules for SDP and each LMAC. 1922 + */ 1923 + exact_val = !NPC_EXACT_RESULT_HIT; 1924 + exact_mask = NPC_EXACT_RESULT_HIT; 1925 + 1926 + /* nibble - 3 2 1 0 1927 + * L3B L3M L2B L2M 1928 + */ 1929 + bcast_mcast_val = 0b0000; 1930 + bcast_mcast_mask = 0b0011; 1931 + 1932 + /* Install SDP drop rule */ 1933 + drop_mcam_idx = &table->num_drop_rules; 1934 + 1935 + max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE; 1936 + for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) { 1937 + if (rvu->pf2cgxlmac_map[i] == 0xFF) 1938 + continue; 1939 + 1940 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id); 1941 + 1942 + rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id, 1943 + lmac_id, &chan_val, &chan_mask); 1944 + if (!rc) { 1945 + dev_err(rvu->dev, 1946 + "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n", 1947 + __func__, chan_val, chan_mask, *drop_mcam_idx); 1948 + return -EINVAL; 1949 + } 1950 + 1951 + /* Filter rules are only for PF */ 1952 + pcifunc = RVU_PFFUNC(i, 0); 1953 + 1954 + dev_dbg(rvu->dev, 1955 + "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n", 1956 + __func__, cgx_id, lmac_id, chan_val, chan_mask); 1957 + 1958 + rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules, 1959 + chan_val, chan_mask, pcifunc); 1960 + if (!rc) { 1961 + dev_err(rvu->dev, 1962 + "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n", 1963 + __func__, cgx_id, lmac_id, chan_val); 1964 + return err; 1965 + } 1966 + 1967 + err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx, 1968 + &table->counter_idx[*drop_mcam_idx], 1969 + chan_val, chan_mask, 1970 + exact_val, exact_mask, 1971 + bcast_mcast_val, bcast_mcast_mask); 1972 + if (err) { 1973 + dev_err(rvu->dev, 1974 + "failed to configure drop rule (cgx=%d lmac=%d)\n", 1975 + cgx_id, lmac_id); 1976 + return err; 1977 + } 1978 + 1979 + (*drop_mcam_idx)++; 1980 + } 1981 + 1982 + dev_info(rvu->dev, "initialized exact match table successfully\n"); 1983 + return 0; 1984 + }
+233
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell RVU Admin Function driver 3 + * 4 + * Copyright (C) 2022 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef __RVU_NPC_HASH_H 9 + #define __RVU_NPC_HASH_H 10 + 11 + #define RVU_NPC_HASH_SECRET_KEY0 0xa9d5af4c9fbc76b1 12 + #define RVU_NPC_HASH_SECRET_KEY1 0xa9d5af4c9fbc87b4 13 + #define RVU_NPC_HASH_SECRET_KEY2 0x5954c9e7 14 + 15 + #define NPC_MAX_HASH 2 16 + #define NPC_MAX_HASH_MASK 2 17 + 18 + #define KEX_LD_CFG_USE_HASH(use_hash, bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \ 19 + ((use_hash) << 20 | ((bytesm1) << 16) | ((hdr_ofs) << 8) | \ 20 + ((ena) << 7) | ((flags_ena) << 6) | ((key_ofs) & 0x3F)) 21 + #define KEX_LD_CFG_HASH(hdr_ofs, bytesm1, lt_en, lid_en, lid, ltype_match, ltype_mask) \ 22 + (((hdr_ofs) << 32) | ((bytesm1) << 16) | \ 23 + ((lt_en) << 12) | ((lid_en) << 11) | ((lid) << 8) | \ 24 + ((ltype_match) << 4) | ((ltype_mask) & 0xF)) 25 + 26 + #define SET_KEX_LD_HASH(intf, ld, cfg) \ 27 + rvu_write64(rvu, blkaddr, \ 28 + NPC_AF_INTFX_HASHX_CFG(intf, ld), cfg) 29 + 30 + #define SET_KEX_LD_HASH_MASK(intf, ld, mask_idx, cfg) \ 31 + rvu_write64(rvu, blkaddr, \ 32 + NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg) 33 + 34 + #define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \ 35 + rvu_write64(rvu, blkaddr, \ 36 + NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg) 37 + 38 + struct npc_mcam_kex_hash { 39 + /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */ 40 + bool lid_lt_ld_hash_en[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]; 41 + /* NPC_AF_INTF(0..1)_HASH(0..1)_CFG */ 42 + u64 hash[NPC_MAX_INTF][NPC_MAX_HASH]; 43 + /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */ 44 + u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK]; 45 + /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */ 46 + u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH]; 47 + } __packed; 48 + 49 + void npc_update_field_hash(struct rvu *rvu, u8 intf, 50 + struct mcam_entry *entry, 51 + int blkaddr, 52 + u64 features, 53 + struct flow_msg *pkt, 54 + struct flow_msg *mask, 55 + struct flow_msg *opkt, 56 + struct flow_msg *omask); 57 + void npc_config_secret_key(struct rvu *rvu, int blkaddr); 58 + void npc_program_mkex_hash(struct rvu *rvu, int blkaddr); 59 + u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash, 60 + u64 *secret_key, u8 intf, u8 hash_idx); 61 + 62 + static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = { 63 + .lid_lt_ld_hash_en = { 64 + [NIX_INTF_RX] = { 65 + [NPC_LID_LC] = { 66 + [NPC_LT_LC_IP6] = { 67 + true, 68 + true, 69 + }, 70 + }, 71 + }, 72 + 73 + [NIX_INTF_TX] = { 74 + [NPC_LID_LC] = { 75 + [NPC_LT_LC_IP6] = { 76 + true, 77 + true, 78 + }, 79 + }, 80 + }, 81 + }, 82 + 83 + .hash = { 84 + [NIX_INTF_RX] = { 85 + KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf), 86 + KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf), 87 + }, 88 + 89 + [NIX_INTF_TX] = { 90 + KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf), 91 + KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf), 92 + }, 93 + }, 94 + 95 + .hash_mask = { 96 + [NIX_INTF_RX] = { 97 + [0] = { 98 + GENMASK_ULL(63, 0), 99 + GENMASK_ULL(63, 0), 100 + }, 101 + [1] = { 102 + GENMASK_ULL(63, 0), 103 + GENMASK_ULL(63, 0), 104 + }, 105 + }, 106 + 107 + [NIX_INTF_TX] = { 108 + [0] = { 109 + GENMASK_ULL(63, 0), 110 + GENMASK_ULL(63, 0), 111 + }, 112 + [1] = { 113 + GENMASK_ULL(63, 0), 114 + GENMASK_ULL(63, 0), 115 + }, 116 + }, 117 + }, 118 + 119 + .hash_ctrl = { 120 + [NIX_INTF_RX] = { 121 + [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */ 122 + [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */ 123 + }, 124 + 125 + [NIX_INTF_TX] = { 126 + [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */ 127 + [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */ 128 + }, 129 + }, 130 + }; 131 + 132 + /* If exact match table support is enabled, enable drop rules */ 133 + #define NPC_MCAM_DROP_RULE_MAX 30 134 + #define NPC_MCAM_SDP_DROP_RULE_IDX 0 135 + 136 + #define RVU_PFFUNC(pf, func) \ 137 + ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \ 138 + (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT)) 139 + 140 + enum npc_exact_opc_type { 141 + NPC_EXACT_OPC_MEM, 142 + NPC_EXACT_OPC_CAM, 143 + }; 144 + 145 + struct npc_exact_table_entry { 146 + struct list_head list; 147 + struct list_head glist; 148 + u32 seq_id; /* Sequence number of entry */ 149 + u32 index; /* Mem table or cam table index */ 150 + u32 mcam_idx; 151 + /* Mcam index. This is valid only if "cmd" field is false */ 152 + enum npc_exact_opc_type opc_type; 153 + u16 chan; 154 + u16 pcifunc; 155 + u8 ways; 156 + u8 mac[ETH_ALEN]; 157 + u8 ctype; 158 + u8 cgx_id; 159 + u8 lmac_id; 160 + bool cmd; /* Is added by ethtool command ? */ 161 + }; 162 + 163 + struct npc_exact_table { 164 + struct mutex lock; /* entries update lock */ 165 + unsigned long *id_bmap; 166 + int num_drop_rules; 167 + u32 tot_ids; 168 + u16 cnt_cmd_rules[NPC_MCAM_DROP_RULE_MAX]; 169 + u16 counter_idx[NPC_MCAM_DROP_RULE_MAX]; 170 + bool promisc_mode[NPC_MCAM_DROP_RULE_MAX]; 171 + struct { 172 + int ways; 173 + int depth; 174 + unsigned long *bmap; 175 + u64 mask; // Masks before hash calculation. 176 + u16 hash_mask; // 11 bits for hash mask 177 + u16 hash_offset; // 11 bits offset 178 + } mem_table; 179 + 180 + struct { 181 + int depth; 182 + unsigned long *bmap; 183 + } cam_table; 184 + 185 + struct { 186 + bool valid; 187 + u16 chan_val; 188 + u16 chan_mask; 189 + u16 pcifunc; 190 + u8 drop_rule_idx; 191 + } drop_rule_map[NPC_MCAM_DROP_RULE_MAX]; 192 + 193 + #define NPC_EXACT_TBL_MAX_WAYS 4 194 + 195 + struct list_head lhead_mem_tbl_entry[NPC_EXACT_TBL_MAX_WAYS]; 196 + int mem_tbl_entry_cnt; 197 + 198 + struct list_head lhead_cam_tbl_entry; 199 + int cam_tbl_entry_cnt; 200 + 201 + struct list_head lhead_gbl; 202 + }; 203 + 204 + bool rvu_npc_exact_has_match_table(struct rvu *rvu); 205 + u32 rvu_npc_exact_get_max_entries(struct rvu *rvu); 206 + int rvu_npc_exact_init(struct rvu *rvu); 207 + int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, 208 + struct msg_rsp *rsp); 209 + 210 + int rvu_npc_exact_mac_addr_update(struct rvu *rvu, 211 + struct cgx_mac_addr_update_req *req, 212 + struct cgx_mac_addr_update_rsp *rsp); 213 + 214 + int rvu_npc_exact_mac_addr_add(struct rvu *rvu, 215 + struct cgx_mac_addr_add_req *req, 216 + struct cgx_mac_addr_add_rsp *rsp); 217 + 218 + int rvu_npc_exact_mac_addr_del(struct rvu *rvu, 219 + struct cgx_mac_addr_del_req *req, 220 + struct msg_rsp *rsp); 221 + 222 + int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, 223 + struct cgx_mac_addr_set_or_get *rsp); 224 + 225 + void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc); 226 + 227 + bool rvu_npc_exact_can_disable_feature(struct rvu *rvu); 228 + void rvu_npc_exact_disable_feature(struct rvu *rvu); 229 + void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc); 230 + u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx); 231 + int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc); 232 + int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc); 233 + #endif /* RVU_NPC_HASH_H */
+15
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
··· 565 565 #define NPC_AF_PCK_DEF_OIP4 (0x00620) 566 566 #define NPC_AF_PCK_DEF_OIP6 (0x00630) 567 567 #define NPC_AF_PCK_DEF_IIP4 (0x00640) 568 + #define NPC_AF_INTFX_HASHX_RESULT_CTRL(a, b) (0x006c0 | (a) << 4 | (b) << 3) 569 + #define NPC_AF_INTFX_HASHX_MASKX(a, b, c) (0x00700 | (a) << 5 | (b) << 4 | (c) << 3) 568 570 #define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3) 571 + #define NPC_AF_INTFX_HASHX_CFG(a, b) (0x00b00 | (a) << 6 | (b) << 4) 572 + #define NPC_AF_INTFX_SECRET_KEY0(a) (0x00e00 | (a) << 3) 573 + #define NPC_AF_INTFX_SECRET_KEY1(a) (0x00e20 | (a) << 3) 574 + #define NPC_AF_INTFX_SECRET_KEY2(a) (0x00e40 | (a) << 3) 569 575 #define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8) 570 576 #define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6) 571 577 #define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6) ··· 604 598 #define NPC_AF_MCAM_DBG (0x3001000) 605 599 #define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4) 606 600 #define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4) 601 + 602 + #define NPC_AF_EXACT_MEM_ENTRY(a, b) (0x300000 | (a) << 15 | (b) << 3) 603 + #define NPC_AF_EXACT_CAM_ENTRY(a) (0xC00 | (a) << 3) 604 + #define NPC_AF_INTFX_EXACT_MASK(a) (0x660 | (a) << 3) 605 + #define NPC_AF_INTFX_EXACT_RESULT_CTL(a)(0x680 | (a) << 3) 606 + #define NPC_AF_INTFX_EXACT_CFG(a) (0xA00 | (a) << 3) 607 + #define NPC_AF_INTFX_EXACT_SECRET0(a) (0xE00 | (a) << 3) 608 + #define NPC_AF_INTFX_EXACT_SECRET1(a) (0xE20 | (a) << 3) 609 + #define NPC_AF_INTFX_EXACT_SECRET2(a) (0xE40 | (a) << 3) 607 610 608 611 #define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \ 609 612 u64 offset; \
+5 -5
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
··· 314 314 #define OTX2_VF_VLAN_TX_INDEX 1 315 315 u16 max_flows; 316 316 u8 dmacflt_max_flows; 317 - u8 *bmap_to_dmacindex; 318 - unsigned long dmacflt_bmap; 317 + u32 *bmap_to_dmacindex; 318 + unsigned long *dmacflt_bmap; 319 319 struct list_head flow_list; 320 320 }; 321 321 ··· 895 895 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic); 896 896 /* CGX/RPM DMAC filters support */ 897 897 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 898 - int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); 899 - int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); 900 - int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos); 898 + int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 899 + int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 900 + int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos); 901 901 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); 902 902 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); 903 903
+35 -13
drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
··· 8 8 #include "otx2_common.h" 9 9 10 10 static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, 11 - u8 *dmac_index) 11 + u32 *dmac_index) 12 12 { 13 13 struct cgx_mac_addr_add_req *req; 14 14 struct cgx_mac_addr_add_rsp *rsp; ··· 35 35 return err; 36 36 } 37 37 38 - static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf) 38 + static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index) 39 39 { 40 40 struct cgx_mac_addr_set_or_get *req; 41 + struct cgx_mac_addr_set_or_get *rsp; 41 42 int err; 42 43 43 44 mutex_lock(&pf->mbox.lock); ··· 49 48 return -ENOMEM; 50 49 } 51 50 51 + req->index = *dmac_index; 52 + 52 53 ether_addr_copy(req->mac_addr, pf->netdev->dev_addr); 53 54 err = otx2_sync_mbox_msg(&pf->mbox); 55 + 56 + if (!err) { 57 + rsp = (struct cgx_mac_addr_set_or_get *) 58 + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); 59 + *dmac_index = rsp->index; 60 + } 54 61 55 62 mutex_unlock(&pf->mbox.lock); 56 63 return err; 57 64 } 58 65 59 - int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos) 66 + int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos) 60 67 { 61 - u8 *dmacindex; 68 + u32 *dmacindex; 62 69 63 70 /* Store dmacindex returned by CGX/RPM driver which will 64 71 * be used for macaddr update/remove ··· 74 65 dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos]; 75 66 76 67 if (ether_addr_equal(mac, pf->netdev->dev_addr)) 77 - return otx2_dmacflt_add_pfmac(pf); 68 + return otx2_dmacflt_add_pfmac(pf, dmacindex); 78 69 else 79 70 return otx2_dmacflt_do_add(pf, mac, dmacindex); 80 71 } 81 72 82 73 static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, 83 - u8 dmac_index) 74 + u32 dmac_index) 84 75 { 85 76 struct cgx_mac_addr_del_req *req; 86 77 int err; ··· 100 91 return err; 101 92 } 102 93 103 - static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf) 94 + static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index) 104 95 { 105 - struct msg_req *req; 96 + struct cgx_mac_addr_reset_req *req; 106 97 int err; 107 98 108 99 mutex_lock(&pf->mbox.lock); ··· 111 102 mutex_unlock(&pf->mbox.lock); 112 103 return -ENOMEM; 113 104 } 105 + req->index = dmac_index; 114 106 115 107 err = otx2_sync_mbox_msg(&pf->mbox); 116 108 ··· 120 110 } 121 111 122 112 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, 123 - u8 bit_pos) 113 + u32 bit_pos) 124 114 { 125 - u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; 115 + u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; 126 116 127 117 if (ether_addr_equal(mac, pf->netdev->dev_addr)) 128 - return otx2_dmacflt_remove_pfmac(pf); 118 + return otx2_dmacflt_remove_pfmac(pf, dmacindex); 129 119 else 130 120 return otx2_dmacflt_do_remove(pf, mac, dmacindex); 131 121 } ··· 161 151 return err; 162 152 } 163 153 164 - int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos) 154 + int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos) 165 155 { 166 156 struct cgx_mac_addr_update_req *req; 157 + struct cgx_mac_addr_update_rsp *rsp; 167 158 int rc; 168 159 169 160 mutex_lock(&pf->mbox.lock); ··· 178 167 179 168 ether_addr_copy(req->mac_addr, mac); 180 169 req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; 181 - rc = otx2_sync_mbox_msg(&pf->mbox); 182 170 171 + /* check the response and change index */ 172 + 173 + rc = otx2_sync_mbox_msg(&pf->mbox); 174 + if (rc) 175 + goto out; 176 + 177 + rsp = (struct cgx_mac_addr_update_rsp *) 178 + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); 179 + 180 + pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index; 181 + 182 + out: 183 183 mutex_unlock(&pf->mbox.lock); 184 184 return rc; 185 185 }
+28 -12
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
··· 18 18 struct ethtool_rx_flow_spec flow_spec; 19 19 struct list_head list; 20 20 u32 location; 21 - u16 entry; 21 + u32 entry; 22 22 bool is_vf; 23 23 u8 rss_ctx_id; 24 24 #define DMAC_FILTER_RULE BIT(0) ··· 232 232 return 0; 233 233 } 234 234 235 + /* TODO : revisit on size */ 236 + #define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32) 237 + 235 238 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf) 236 239 { 237 240 struct otx2_flow_config *flow_cfg; ··· 243 240 sizeof(struct otx2_flow_config), 244 241 GFP_KERNEL); 245 242 if (!pfvf->flow_cfg) 243 + return -ENOMEM; 244 + 245 + pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev, 246 + BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ), 247 + sizeof(long), GFP_KERNEL); 248 + if (!pfvf->flow_cfg->dmacflt_bmap) 246 249 return -ENOMEM; 247 250 248 251 flow_cfg = pfvf->flow_cfg; ··· 266 257 pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config), 267 258 GFP_KERNEL); 268 259 if (!pf->flow_cfg) 260 + return -ENOMEM; 261 + 262 + pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev, 263 + BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ), 264 + sizeof(long), GFP_KERNEL); 265 + if (!pf->flow_cfg->dmacflt_bmap) 269 266 return -ENOMEM; 270 267 271 268 INIT_LIST_HEAD(&pf->flow_cfg->flow_list); ··· 299 284 return 0; 300 285 301 286 pf->flow_cfg->bmap_to_dmacindex = 302 - devm_kzalloc(pf->dev, sizeof(u8) * 287 + devm_kzalloc(pf->dev, sizeof(u32) * 303 288 pf->flow_cfg->dmacflt_max_flows, 304 289 GFP_KERNEL); 305 290 ··· 370 355 { 371 356 struct otx2_nic *pf = netdev_priv(netdev); 372 357 373 - if (!bitmap_empty(&pf->flow_cfg->dmacflt_bmap, 358 + if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap, 374 359 pf->flow_cfg->dmacflt_max_flows)) 375 360 netdev_warn(netdev, 376 361 "Add %pM to CGX/RPM DMAC filters list as well\n", ··· 453 438 return 0; 454 439 455 440 if (flow_cfg->nr_flows == flow_cfg->max_flows || 456 - !bitmap_empty(&flow_cfg->dmacflt_bmap, 441 + !bitmap_empty(flow_cfg->dmacflt_bmap, 457 442 flow_cfg->dmacflt_max_flows)) 458 443 return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows; 459 444 else ··· 1025 1010 1026 1011 otx2_add_flow_to_list(pfvf, pf_mac); 1027 1012 pfvf->flow_cfg->nr_flows++; 1028 - set_bit(0, &pfvf->flow_cfg->dmacflt_bmap); 1013 + set_bit(0, pfvf->flow_cfg->dmacflt_bmap); 1029 1014 1030 1015 return 0; 1031 1016 } ··· 1079 1064 return otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 1080 1065 flow->entry); 1081 1066 1082 - if (bitmap_full(&flow_cfg->dmacflt_bmap, 1067 + if (bitmap_full(flow_cfg->dmacflt_bmap, 1083 1068 flow_cfg->dmacflt_max_flows)) { 1084 1069 netdev_warn(pfvf->netdev, 1085 1070 "Can't insert the rule %d as max allowed dmac filters are %d\n", ··· 1093 1078 } 1094 1079 1095 1080 /* Install PF mac address to DMAC filter list */ 1096 - if (!test_bit(0, &flow_cfg->dmacflt_bmap)) 1081 + if (!test_bit(0, flow_cfg->dmacflt_bmap)) 1097 1082 otx2_add_flow_with_pfmac(pfvf, flow); 1098 1083 1099 1084 flow->rule_type |= DMAC_FILTER_RULE; 1100 - flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap, 1085 + flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap, 1101 1086 flow_cfg->dmacflt_max_flows); 1102 1087 fsp->location = flow_cfg->max_flows + flow->entry; 1103 1088 flow->flow_spec.location = fsp->location; 1104 1089 flow->location = fsp->location; 1105 1090 1106 - set_bit(flow->entry, &flow_cfg->dmacflt_bmap); 1091 + set_bit(flow->entry, flow_cfg->dmacflt_bmap); 1107 1092 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); 1108 1093 1109 1094 } else { ··· 1169 1154 if (req == DMAC_ADDR_DEL) { 1170 1155 otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, 1171 1156 0); 1172 - clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap); 1157 + clear_bit(0, pfvf->flow_cfg->dmacflt_bmap); 1173 1158 found = true; 1174 1159 } else { 1175 1160 ether_addr_copy(eth_hdr->h_dest, 1176 1161 pfvf->netdev->dev_addr); 1162 + 1177 1163 otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0); 1178 1164 } 1179 1165 break; ··· 1210 1194 1211 1195 err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, 1212 1196 flow->entry); 1213 - clear_bit(flow->entry, &flow_cfg->dmacflt_bmap); 1197 + clear_bit(flow->entry, flow_cfg->dmacflt_bmap); 1214 1198 /* If all dmac filters are removed delete macfilter with 1215 1199 * interface mac address and configure CGX/RPM block in 1216 1200 * promiscuous mode 1217 1201 */ 1218 - if (bitmap_weight(&flow_cfg->dmacflt_bmap, 1202 + if (bitmap_weight(flow_cfg->dmacflt_bmap, 1219 1203 flow_cfg->dmacflt_max_flows) == 1) 1220 1204 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL); 1221 1205 } else {
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 1120 1120 struct msg_req *msg; 1121 1121 int err; 1122 1122 1123 - if (enable && !bitmap_empty(&pf->flow_cfg->dmacflt_bmap, 1123 + if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap, 1124 1124 pf->flow_cfg->dmacflt_max_flows)) 1125 1125 netdev_warn(pf->netdev, 1126 1126 "CGX/RPM internal loopback might not work as DMAC filters are active\n");