Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'bnxt_en-ntuple-filter-improvements'

Michael Chan says:

====================
bnxt_en: Ntuple filter improvements

The current Ntuple filter implementation has a limitation on 5750X (P5)
and newer chips. The destination ring of the ntuple filter must be
a valid ring in the RSS indirection table. Ntuple filters may not work
if the RSS indirection table is modified by the user to only contain a
subset of the rings. If an ntuple filter is set to a ring destination
that is not in the RSS indirection table, the packet matching that
filter will be placed in a random ring instead of the specified
destination ring.

This series of patches will fix the problem by using a separate VNIC
for ntuple filters. The default VNIC will be dedicated for RSS and
so the indirection table can be setup in any way and will not affect
ntuple filters using the separate VNIC.

Quite a bit of refactoring is needed to do the the VNIC and RSS
context accounting in the first few patches. This is technically a
bug fix, but I think the changes are too big for -net.
====================

Link: https://lore.kernel.org/r/20240220230317.96341-1-michael.chan@broadcom.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+311 -200
+289 -198
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 4211 4211 int num_vnics = 1; 4212 4212 4213 4213 #ifdef CONFIG_RFS_ACCEL 4214 - if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS) 4215 - num_vnics += bp->rx_nr_rings; 4214 + if (bp->flags & BNXT_FLAG_RFS) { 4215 + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 4216 + num_vnics++; 4217 + else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4218 + num_vnics += bp->rx_nr_rings; 4219 + } 4216 4220 #endif 4217 4221 4218 4222 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) ··· 4233 4229 4234 4230 static void bnxt_init_vnics(struct bnxt *bp) 4235 4231 { 4232 + struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 4236 4233 int i; 4237 4234 4238 4235 for (i = 0; i < bp->nr_vnics; i++) { ··· 4247 4242 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 4248 4243 4249 4244 if (bp->vnic_info[i].rss_hash_key) { 4250 - if (!i) { 4245 + if (i == BNXT_VNIC_DEFAULT) { 4251 4246 u8 *key = (void *)vnic->rss_hash_key; 4252 4247 int k; 4253 4248 ··· 4273 4268 bp->toeplitz_prefix |= key[k]; 4274 4269 } 4275 4270 } else { 4276 - memcpy(vnic->rss_hash_key, 4277 - bp->vnic_info[0].rss_hash_key, 4271 + memcpy(vnic->rss_hash_key, vnic0->rss_hash_key, 4278 4272 HW_HASH_KEY_SIZE); 4279 4273 } 4280 4274 } ··· 5004 5000 5005 5001 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 5006 5002 { 5003 + struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 5007 5004 int i, j, rc, size, arr_size; 5008 5005 void *bnapi; 5009 5006 ··· 5133 5128 if (rc) 5134 5129 goto alloc_mem_err; 5135 5130 5136 - bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 5137 - BNXT_VNIC_UCAST_FLAG; 5131 + vnic0->flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | 5132 + BNXT_VNIC_UCAST_FLAG; 5133 + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) 5134 + bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= 5135 + BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG; 5136 + 5138 5137 rc = bnxt_alloc_vnic_attributes(bp); 5139 5138 if (rc) 5140 5139 goto alloc_mem_err; ··· 5785 5776 mask[i] = cpu_to_be32(~0); 5786 5777 } 5787 5778 5779 + static void 5780 + bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, 5781 + struct hwrm_cfa_ntuple_filter_alloc_input *req, 5782 + u16 rxq) 5783 + { 5784 + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 5785 + struct bnxt_vnic_info *vnic; 5786 + u32 enables; 5787 + 5788 + vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 5789 + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 5790 + enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; 5791 + req->enables |= cpu_to_le32(enables); 5792 + req->rfs_ring_tbl_idx = cpu_to_le16(rxq); 5793 + } else { 5794 + u32 flags; 5795 + 5796 + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 5797 + req->flags |= cpu_to_le32(flags); 5798 + req->dst_id = cpu_to_le16(rxq); 5799 + } 5800 + } 5801 + 5788 5802 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 5789 5803 struct bnxt_ntuple_filter *fltr) 5790 5804 { ··· 5817 5785 struct flow_keys *keys = &fltr->fkeys; 5818 5786 struct bnxt_l2_filter *l2_fltr; 5819 5787 struct bnxt_vnic_info *vnic; 5820 - u32 flags = 0; 5821 5788 int rc; 5822 5789 5823 5790 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); ··· 5827 5796 req->l2_filter_id = l2_fltr->base.filter_id; 5828 5797 5829 5798 if (fltr->base.flags & BNXT_ACT_DROP) { 5830 - flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP; 5799 + req->flags = 5800 + cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); 5831 5801 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 5832 - flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 5833 - req->dst_id = cpu_to_le16(fltr->base.rxq); 5802 + bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq); 5834 5803 } else { 5835 5804 vnic = &bp->vnic_info[fltr->base.rxq + 1]; 5836 5805 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 5837 5806 } 5838 - req->flags = cpu_to_le32(flags); 5839 - req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 5807 + req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 5840 5808 5841 5809 req->ethertype = htons(ETH_P_IP); 5842 5810 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; ··· 6115 6085 for (i = 0; i < tbl_size; i++) { 6116 6086 u16 ring_id, j; 6117 6087 6118 - j = bp->rss_indir_tbl[i]; 6088 + if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) 6089 + j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); 6090 + else 6091 + j = bp->rss_indir_tbl[i]; 6119 6092 rxr = &bp->rx_ring[j]; 6120 6093 6121 6094 ring_id = rxr->rx_ring_struct.fw_ring_id; ··· 6211 6178 6212 6179 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) 6213 6180 { 6214 - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 6181 + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6215 6182 struct hwrm_vnic_rss_qcfg_output *resp; 6216 6183 struct hwrm_vnic_rss_qcfg_input *req; 6217 6184 ··· 6315 6282 6316 6283 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) 6317 6284 { 6285 + struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6318 6286 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 6319 6287 struct hwrm_vnic_cfg_input *req; 6320 6288 unsigned int ring = 0, grp_idx; ··· 6345 6311 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6346 6312 VNIC_CFG_REQ_ENABLES_MRU); 6347 6313 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 6348 - req->rss_rule = 6349 - cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); 6314 + req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); 6350 6315 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6351 6316 VNIC_CFG_REQ_ENABLES_MRU); 6352 6317 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); ··· 6442 6409 vnic_no_ring_grps: 6443 6410 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 6444 6411 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 6445 - if (vnic_id == 0) 6412 + if (vnic_id == BNXT_VNIC_DEFAULT) 6446 6413 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 6447 6414 6448 6415 resp = hwrm_req_hold(bp, req); ··· 7076 7043 hw_resc->resv_hw_ring_grps = 7077 7044 le32_to_cpu(resp->alloc_hw_ring_grps); 7078 7045 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 7046 + hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); 7079 7047 cp = le16_to_cpu(resp->alloc_cmpl_rings); 7080 7048 stats = le16_to_cpu(resp->alloc_stat_ctx); 7081 7049 hw_resc->resv_irqs = cp; ··· 7132 7098 static bool bnxt_rfs_supported(struct bnxt *bp); 7133 7099 7134 7100 static struct hwrm_func_cfg_input * 7135 - __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7136 - int ring_grps, int cp_rings, int stats, int vnics) 7101 + __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7137 7102 { 7138 7103 struct hwrm_func_cfg_input *req; 7139 7104 u32 enables = 0; ··· 7141 7108 return NULL; 7142 7109 7143 7110 req->fid = cpu_to_le16(0xffff); 7144 - enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7145 - req->num_tx_rings = cpu_to_le16(tx_rings); 7111 + enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7112 + req->num_tx_rings = cpu_to_le16(hwr->tx); 7146 7113 if (BNXT_NEW_RM(bp)) { 7147 - enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 7148 - enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7114 + enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 7115 + enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7149 7116 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7150 - enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 7151 - enables |= tx_rings + ring_grps ? 7117 + enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 7118 + enables |= hwr->cp_p5 ? 7152 7119 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7153 - enables |= rx_rings ? 7154 - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7155 7120 } else { 7156 - enables |= cp_rings ? 7121 + enables |= hwr->cp ? 7157 7122 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7158 - enables |= ring_grps ? 7159 - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 7160 - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7123 + enables |= hwr->grp ? 7124 + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7161 7125 } 7162 - enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 7163 - 7164 - req->num_rx_rings = cpu_to_le16(rx_rings); 7126 + enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 7127 + enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 7128 + 0; 7129 + req->num_rx_rings = cpu_to_le16(hwr->rx); 7130 + req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7165 7131 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7166 - u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps); 7167 - 7168 - req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 7169 - req->num_msix = cpu_to_le16(cp_rings); 7170 - req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); 7132 + req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7133 + req->num_msix = cpu_to_le16(hwr->cp); 7171 7134 } else { 7172 - req->num_cmpl_rings = cpu_to_le16(cp_rings); 7173 - req->num_hw_ring_grps = cpu_to_le16(ring_grps); 7174 - req->num_rsscos_ctxs = cpu_to_le16(1); 7175 - if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && 7176 - bnxt_rfs_supported(bp)) 7177 - req->num_rsscos_ctxs = 7178 - cpu_to_le16(ring_grps + 1); 7135 + req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7136 + req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7179 7137 } 7180 - req->num_stat_ctxs = cpu_to_le16(stats); 7181 - req->num_vnics = cpu_to_le16(vnics); 7138 + req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7139 + req->num_vnics = cpu_to_le16(hwr->vnic); 7182 7140 } 7183 7141 req->enables = cpu_to_le32(enables); 7184 7142 return req; 7185 7143 } 7186 7144 7187 7145 static struct hwrm_func_vf_cfg_input * 7188 - __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7189 - int ring_grps, int cp_rings, int stats, int vnics) 7146 + __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7190 7147 { 7191 7148 struct hwrm_func_vf_cfg_input *req; 7192 7149 u32 enables = 0; ··· 7184 7161 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) 7185 7162 return NULL; 7186 7163 7187 - enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7188 - enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 7189 - FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7190 - enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7164 + enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7165 + enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 7166 + FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7167 + enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7168 + enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7191 7169 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7192 - enables |= tx_rings + ring_grps ? 7170 + enables |= hwr->cp_p5 ? 7193 7171 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7194 7172 } else { 7195 - enables |= cp_rings ? 7196 - FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7197 - enables |= ring_grps ? 7173 + enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7174 + enables |= hwr->grp ? 7198 7175 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7199 7176 } 7200 - enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 7177 + enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 7201 7178 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 7202 7179 7203 7180 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 7204 - req->num_tx_rings = cpu_to_le16(tx_rings); 7205 - req->num_rx_rings = cpu_to_le16(rx_rings); 7181 + req->num_tx_rings = cpu_to_le16(hwr->tx); 7182 + req->num_rx_rings = cpu_to_le16(hwr->rx); 7183 + req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7206 7184 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7207 - u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps); 7208 - 7209 - req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 7210 - req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); 7185 + req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7211 7186 } else { 7212 - req->num_cmpl_rings = cpu_to_le16(cp_rings); 7213 - req->num_hw_ring_grps = cpu_to_le16(ring_grps); 7214 - req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); 7187 + req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7188 + req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7215 7189 } 7216 - req->num_stat_ctxs = cpu_to_le16(stats); 7217 - req->num_vnics = cpu_to_le16(vnics); 7190 + req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7191 + req->num_vnics = cpu_to_le16(hwr->vnic); 7218 7192 7219 7193 req->enables = cpu_to_le32(enables); 7220 7194 return req; 7221 7195 } 7222 7196 7223 7197 static int 7224 - bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7225 - int ring_grps, int cp_rings, int stats, int vnics) 7198 + bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7226 7199 { 7227 7200 struct hwrm_func_cfg_input *req; 7228 7201 int rc; 7229 7202 7230 - req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, 7231 - cp_rings, stats, vnics); 7203 + req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 7232 7204 if (!req) 7233 7205 return -ENOMEM; 7234 7206 ··· 7237 7219 return rc; 7238 7220 7239 7221 if (bp->hwrm_spec_code < 0x10601) 7240 - bp->hw_resc.resv_tx_rings = tx_rings; 7222 + bp->hw_resc.resv_tx_rings = hwr->tx; 7241 7223 7242 7224 return bnxt_hwrm_get_rings(bp); 7243 7225 } 7244 7226 7245 7227 static int 7246 - bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7247 - int ring_grps, int cp_rings, int stats, int vnics) 7228 + bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7248 7229 { 7249 7230 struct hwrm_func_vf_cfg_input *req; 7250 7231 int rc; 7251 7232 7252 7233 if (!BNXT_NEW_RM(bp)) { 7253 - bp->hw_resc.resv_tx_rings = tx_rings; 7234 + bp->hw_resc.resv_tx_rings = hwr->tx; 7254 7235 return 0; 7255 7236 } 7256 7237 7257 - req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, 7258 - cp_rings, stats, vnics); 7238 + req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 7259 7239 if (!req) 7260 7240 return -ENOMEM; 7261 7241 ··· 7264 7248 return bnxt_hwrm_get_rings(bp); 7265 7249 } 7266 7250 7267 - static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, 7268 - int cp, int stat, int vnic) 7251 + static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7269 7252 { 7270 7253 if (BNXT_PF(bp)) 7271 - return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, 7272 - vnic); 7254 + return bnxt_hwrm_reserve_pf_rings(bp, hwr); 7273 7255 else 7274 - return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, 7275 - vnic); 7256 + return bnxt_hwrm_reserve_vf_rings(bp, hwr); 7276 7257 } 7277 7258 7278 7259 int bnxt_nq_rings_in_use(struct bnxt *bp) ··· 7312 7299 return cp + ulp_stat; 7313 7300 } 7314 7301 7302 + static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7303 + { 7304 + if (!hwr->grp) 7305 + return 0; 7306 + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7307 + int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); 7308 + 7309 + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7310 + rss_ctx *= hwr->vnic; 7311 + return rss_ctx; 7312 + } 7313 + if (BNXT_VF(bp)) 7314 + return BNXT_VF_MAX_RSS_CTX; 7315 + if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) 7316 + return hwr->grp + 1; 7317 + return 1; 7318 + } 7319 + 7315 7320 /* Check if a default RSS map needs to be setup. This function is only 7316 7321 * used on older firmware that does not require reserving RX rings. 7317 7322 */ ··· 7345 7314 } 7346 7315 } 7347 7316 7317 + static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) 7318 + { 7319 + if (bp->flags & BNXT_FLAG_RFS) { 7320 + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7321 + return 2; 7322 + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7323 + return rx_rings + 1; 7324 + } 7325 + return 1; 7326 + } 7327 + 7348 7328 static bool bnxt_need_reserve_rings(struct bnxt *bp) 7349 7329 { 7350 7330 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7351 7331 int cp = bnxt_cp_rings_in_use(bp); 7352 7332 int nq = bnxt_nq_rings_in_use(bp); 7353 7333 int rx = bp->rx_nr_rings, stat; 7354 - int vnic = 1, grp = rx; 7334 + int vnic, grp = rx; 7355 7335 7356 7336 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 7357 7337 bp->hwrm_spec_code >= 0x10601) ··· 7377 7335 bnxt_check_rss_tbl_no_rmgr(bp); 7378 7336 return false; 7379 7337 } 7380 - if ((bp->flags & BNXT_FLAG_RFS) && 7381 - !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7382 - vnic = rx + 1; 7338 + 7339 + vnic = bnxt_get_total_vnics(bp, rx); 7340 + 7383 7341 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7384 7342 rx <<= 1; 7385 7343 stat = bnxt_get_func_stat_ctxs(bp); ··· 7394 7352 return false; 7395 7353 } 7396 7354 7397 - static int __bnxt_reserve_rings(struct bnxt *bp) 7355 + static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7398 7356 { 7399 7357 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7400 - int cp = bnxt_nq_rings_in_use(bp); 7401 - int tx = bp->tx_nr_rings; 7402 - int rx = bp->rx_nr_rings; 7403 - int grp, rx_rings, rc; 7404 - int vnic = 1, stat; 7358 + 7359 + hwr->tx = hw_resc->resv_tx_rings; 7360 + if (BNXT_NEW_RM(bp)) { 7361 + hwr->rx = hw_resc->resv_rx_rings; 7362 + hwr->cp = hw_resc->resv_irqs; 7363 + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7364 + hwr->cp_p5 = hw_resc->resv_cp_rings; 7365 + hwr->grp = hw_resc->resv_hw_ring_grps; 7366 + hwr->vnic = hw_resc->resv_vnics; 7367 + hwr->stat = hw_resc->resv_stat_ctxs; 7368 + hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; 7369 + } 7370 + } 7371 + 7372 + static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7373 + { 7374 + return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic && 7375 + hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); 7376 + } 7377 + 7378 + static int __bnxt_reserve_rings(struct bnxt *bp) 7379 + { 7380 + struct bnxt_hw_rings hwr = {0}; 7381 + int rx_rings, rc; 7405 7382 bool sh = false; 7406 7383 int tx_cp; 7407 7384 7408 7385 if (!bnxt_need_reserve_rings(bp)) 7409 7386 return 0; 7410 7387 7388 + hwr.cp = bnxt_nq_rings_in_use(bp); 7389 + hwr.tx = bp->tx_nr_rings; 7390 + hwr.rx = bp->rx_nr_rings; 7411 7391 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7412 7392 sh = true; 7413 - if ((bp->flags & BNXT_FLAG_RFS) && 7414 - !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7415 - vnic = rx + 1; 7416 - if (bp->flags & BNXT_FLAG_AGG_RINGS) 7417 - rx <<= 1; 7418 - grp = bp->rx_nr_rings; 7419 - stat = bnxt_get_func_stat_ctxs(bp); 7393 + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7394 + hwr.cp_p5 = hwr.rx + hwr.tx; 7420 7395 7421 - rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); 7396 + hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx); 7397 + 7398 + if (bp->flags & BNXT_FLAG_AGG_RINGS) 7399 + hwr.rx <<= 1; 7400 + hwr.grp = bp->rx_nr_rings; 7401 + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 7402 + hwr.stat = bnxt_get_func_stat_ctxs(bp); 7403 + 7404 + rc = bnxt_hwrm_reserve_rings(bp, &hwr); 7422 7405 if (rc) 7423 7406 return rc; 7424 7407 7425 - tx = hw_resc->resv_tx_rings; 7426 - if (BNXT_NEW_RM(bp)) { 7427 - rx = hw_resc->resv_rx_rings; 7428 - cp = hw_resc->resv_irqs; 7429 - grp = hw_resc->resv_hw_ring_grps; 7430 - vnic = hw_resc->resv_vnics; 7431 - stat = hw_resc->resv_stat_ctxs; 7432 - } 7408 + bnxt_copy_reserved_rings(bp, &hwr); 7433 7409 7434 - rx_rings = rx; 7410 + rx_rings = hwr.rx; 7435 7411 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7436 - if (rx >= 2) { 7437 - rx_rings = rx >> 1; 7412 + if (hwr.rx >= 2) { 7413 + rx_rings = hwr.rx >> 1; 7438 7414 } else { 7439 7415 if (netif_running(bp->dev)) 7440 7416 return -ENOMEM; ··· 7464 7404 bnxt_set_ring_params(bp); 7465 7405 } 7466 7406 } 7467 - rx_rings = min_t(int, rx_rings, grp); 7468 - cp = min_t(int, cp, bp->cp_nr_rings); 7469 - if (stat > bnxt_get_ulp_stat_ctxs(bp)) 7470 - stat -= bnxt_get_ulp_stat_ctxs(bp); 7471 - cp = min_t(int, cp, stat); 7472 - rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); 7407 + rx_rings = min_t(int, rx_rings, hwr.grp); 7408 + hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); 7409 + if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) 7410 + hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); 7411 + hwr.cp = min_t(int, hwr.cp, hwr.stat); 7412 + rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); 7473 7413 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7474 - rx = rx_rings << 1; 7475 - tx_cp = bnxt_num_tx_to_cp(bp, tx); 7476 - cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; 7477 - bp->tx_nr_rings = tx; 7414 + hwr.rx = rx_rings << 1; 7415 + tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); 7416 + hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; 7417 + bp->tx_nr_rings = hwr.tx; 7478 7418 7479 7419 /* If we cannot reserve all the RX rings, reset the RSS map only 7480 7420 * if absolutely necessary ··· 7491 7431 } 7492 7432 } 7493 7433 bp->rx_nr_rings = rx_rings; 7494 - bp->cp_nr_rings = cp; 7434 + bp->cp_nr_rings = hwr.cp; 7495 7435 7496 - if (!tx || !rx || !cp || !grp || !vnic || !stat) 7436 + if (!bnxt_rings_ok(bp, &hwr)) 7497 7437 return -ENOMEM; 7498 7438 7499 7439 if (!netif_is_rxfh_configured(bp->dev)) ··· 7502 7442 return rc; 7503 7443 } 7504 7444 7505 - static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7506 - int ring_grps, int cp_rings, int stats, 7507 - int vnics) 7445 + static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7508 7446 { 7509 7447 struct hwrm_func_vf_cfg_input *req; 7510 7448 u32 flags; ··· 7510 7452 if (!BNXT_NEW_RM(bp)) 7511 7453 return 0; 7512 7454 7513 - req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, 7514 - cp_rings, stats, vnics); 7455 + req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 7515 7456 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 7516 7457 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 7517 7458 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | ··· 7524 7467 return hwrm_req_send_silent(bp, req); 7525 7468 } 7526 7469 7527 - static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7528 - int ring_grps, int cp_rings, int stats, 7529 - int vnics) 7470 + static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7530 7471 { 7531 7472 struct hwrm_func_cfg_input *req; 7532 7473 u32 flags; 7533 7474 7534 - req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, 7535 - cp_rings, stats, vnics); 7475 + req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 7536 7476 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 7537 7477 if (BNXT_NEW_RM(bp)) { 7538 7478 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | ··· 7547 7493 return hwrm_req_send_silent(bp, req); 7548 7494 } 7549 7495 7550 - static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 7551 - int ring_grps, int cp_rings, int stats, 7552 - int vnics) 7496 + static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7553 7497 { 7554 7498 if (bp->hwrm_spec_code < 0x10801) 7555 7499 return 0; 7556 7500 7557 7501 if (BNXT_PF(bp)) 7558 - return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 7559 - ring_grps, cp_rings, stats, 7560 - vnics); 7502 + return bnxt_hwrm_check_pf_rings(bp, hwr); 7561 7503 7562 - return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 7563 - cp_rings, stats, vnics); 7504 + return bnxt_hwrm_check_vf_rings(bp, hwr); 7564 7505 } 7565 7506 7566 7507 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) ··· 9001 8952 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 9002 8953 9003 8954 if (flags & 8955 + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED) 8956 + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; 8957 + 8958 + if (flags & 9004 8959 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED) 9005 8960 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; 9006 8961 ··· 9872 9819 return __bnxt_setup_vnic(bp, vnic_id); 9873 9820 } 9874 9821 9822 + static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id, 9823 + u16 start_rx_ring_idx, int rx_rings) 9824 + { 9825 + int rc; 9826 + 9827 + rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings); 9828 + if (rc) { 9829 + netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 9830 + vnic_id, rc); 9831 + return rc; 9832 + } 9833 + return bnxt_setup_vnic(bp, vnic_id); 9834 + } 9835 + 9875 9836 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 9876 9837 { 9877 9838 int i, rc = 0; 9839 + 9840 + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 9841 + return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0, 9842 + bp->rx_nr_rings); 9878 9843 9879 9844 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 9880 9845 return 0; ··· 9909 9838 vnic->flags |= BNXT_VNIC_RFS_FLAG; 9910 9839 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 9911 9840 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 9912 - rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); 9913 - if (rc) { 9914 - netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 9915 - vnic_id, rc); 9916 - break; 9917 - } 9918 - rc = bnxt_setup_vnic(bp, vnic_id); 9919 - if (rc) 9841 + if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1)) 9920 9842 break; 9921 9843 } 9922 9844 return rc; ··· 9950 9886 9951 9887 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 9952 9888 { 9953 - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 9889 + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 9954 9890 int rc = 0; 9955 9891 unsigned int rx_nr_rings = bp->rx_nr_rings; 9956 9892 ··· 9979 9915 rx_nr_rings--; 9980 9916 9981 9917 /* default vnic 0 */ 9982 - rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); 9918 + rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings); 9983 9919 if (rc) { 9984 9920 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 9985 9921 goto err_out; ··· 9988 9924 if (BNXT_VF(bp)) 9989 9925 bnxt_hwrm_func_qcfg(bp); 9990 9926 9991 - rc = bnxt_setup_vnic(bp, 0); 9927 + rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT); 9992 9928 if (rc) 9993 9929 goto err_out; 9994 9930 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) ··· 11279 11215 hw_resc->resv_rx_rings = 0; 11280 11216 hw_resc->resv_hw_ring_grps = 0; 11281 11217 hw_resc->resv_vnics = 0; 11218 + hw_resc->resv_rsscos_ctxs = 0; 11282 11219 if (!fw_reset) { 11283 11220 bp->tx_nr_rings = 0; 11284 11221 bp->rx_nr_rings = 0; ··· 11648 11583 11649 11584 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { 11650 11585 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); 11651 - l2_fltr = bp->vnic_info[0].l2_filters[0]; 11586 + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 11652 11587 atomic_inc(&l2_fltr->refcnt); 11653 11588 ntp_fltr->l2_fltr = l2_fltr; 11654 11589 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) { ··· 12202 12137 12203 12138 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 12204 12139 { 12140 + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12205 12141 struct net_device *dev = bp->dev; 12206 - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 12207 12142 struct netdev_hw_addr *ha; 12208 12143 u8 *haddr; 12209 12144 int mc_count = 0; ··· 12237 12172 static bool bnxt_uc_list_updated(struct bnxt *bp) 12238 12173 { 12239 12174 struct net_device *dev = bp->dev; 12240 - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 12175 + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12241 12176 struct netdev_hw_addr *ha; 12242 12177 int off = 0; 12243 12178 ··· 12264 12199 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 12265 12200 return; 12266 12201 12267 - vnic = &bp->vnic_info[0]; 12202 + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12268 12203 mask = vnic->rx_mask; 12269 12204 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 12270 12205 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | ··· 12295 12230 static int bnxt_cfg_rx_mode(struct bnxt *bp) 12296 12231 { 12297 12232 struct net_device *dev = bp->dev; 12298 - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 12233 + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 12299 12234 struct netdev_hw_addr *ha; 12300 12235 int i, off = 0, rc; 12301 12236 bool uc_update; ··· 12407 12342 /* If runtime conditions support RFS */ 12408 12343 static bool bnxt_rfs_capable(struct bnxt *bp) 12409 12344 { 12410 - int vnics, max_vnics, max_rss_ctxs; 12345 + struct bnxt_hw_rings hwr = {0}; 12346 + int max_vnics, max_rss_ctxs; 12411 12347 12348 + hwr.rss_ctx = 1; 12349 + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 12350 + /* 2 VNICS: default + Ntuple */ 12351 + hwr.vnic = 2; 12352 + hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * 12353 + hwr.vnic; 12354 + goto check_reserve_vnic; 12355 + } 12412 12356 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 12413 12357 return bnxt_rfs_supported(bp); 12414 12358 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) 12415 12359 return false; 12416 12360 12417 - vnics = 1 + bp->rx_nr_rings; 12361 + hwr.vnic = 1 + bp->rx_nr_rings; 12362 + check_reserve_vnic: 12418 12363 max_vnics = bnxt_get_max_func_vnics(bp); 12419 12364 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 12420 12365 12421 - /* RSS contexts not a limiting factor */ 12422 - if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 12423 - max_rss_ctxs = max_vnics; 12424 - if (vnics > max_vnics || vnics > max_rss_ctxs) { 12366 + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 12367 + !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)) 12368 + hwr.rss_ctx = hwr.vnic; 12369 + 12370 + if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { 12425 12371 if (bp->rx_nr_rings > 1) 12426 12372 netdev_warn(bp->dev, 12427 12373 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", ··· 12443 12367 if (!BNXT_NEW_RM(bp)) 12444 12368 return true; 12445 12369 12446 - if (vnics == bp->hw_resc.resv_vnics) 12370 + if (hwr.vnic == bp->hw_resc.resv_vnics && 12371 + hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 12447 12372 return true; 12448 12373 12449 - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); 12450 - if (vnics <= bp->hw_resc.resv_vnics) 12374 + bnxt_hwrm_reserve_rings(bp, &hwr); 12375 + if (hwr.vnic <= bp->hw_resc.resv_vnics && 12376 + hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 12451 12377 return true; 12452 12378 12453 12379 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 12454 - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); 12380 + hwr.vnic = 1; 12381 + hwr.rss_ctx = 0; 12382 + bnxt_hwrm_reserve_rings(bp, &hwr); 12455 12383 return false; 12456 12384 } 12457 12385 ··· 12494 12414 return features; 12495 12415 } 12496 12416 12417 + static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, 12418 + bool link_re_init, u32 flags, bool update_tpa) 12419 + { 12420 + bnxt_close_nic(bp, irq_re_init, link_re_init); 12421 + bp->flags = flags; 12422 + if (update_tpa) 12423 + bnxt_set_ring_params(bp); 12424 + return bnxt_open_nic(bp, irq_re_init, link_re_init); 12425 + } 12426 + 12497 12427 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 12498 12428 { 12429 + bool update_tpa = false, update_ntuple = false; 12499 12430 struct bnxt *bp = netdev_priv(dev); 12500 12431 u32 flags = bp->flags; 12501 12432 u32 changes; 12502 12433 int rc = 0; 12503 12434 bool re_init = false; 12504 - bool update_tpa = false; 12505 12435 12506 12436 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 12507 12437 if (features & NETIF_F_GRO_HW) ··· 12542 12452 if (changes & ~BNXT_FLAG_TPA) 12543 12453 re_init = true; 12544 12454 12455 + if (changes & BNXT_FLAG_RFS) 12456 + update_ntuple = true; 12457 + 12545 12458 if (flags != bp->flags) { 12546 12459 u32 old_flags = bp->flags; 12547 12460 ··· 12555 12462 return rc; 12556 12463 } 12557 12464 12558 - if (re_init) { 12559 - bnxt_close_nic(bp, false, false); 12560 - bp->flags = flags; 12561 - if (update_tpa) 12562 - bnxt_set_ring_params(bp); 12465 + if (update_ntuple) 12466 + return bnxt_reinit_features(bp, true, false, flags, update_tpa); 12563 12467 12564 - return bnxt_open_nic(bp, false, false); 12565 - } 12468 + if (re_init) 12469 + return bnxt_reinit_features(bp, false, false, flags, update_tpa); 12470 + 12566 12471 if (update_tpa) { 12567 12472 bp->flags = flags; 12568 12473 rc = bnxt_set_tpa(bp, ··· 13390 13299 int tx_xdp) 13391 13300 { 13392 13301 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; 13393 - int tx_rings_needed, stats; 13302 + struct bnxt_hw_rings hwr = {0}; 13394 13303 int rx_rings = rx; 13395 - int cp, vnics; 13396 13304 13397 13305 if (tcs) 13398 13306 tx_sets = tcs; ··· 13404 13314 if (bp->flags & BNXT_FLAG_AGG_RINGS) 13405 13315 rx_rings <<= 1; 13406 13316 13407 - tx_rings_needed = tx * tx_sets + tx_xdp; 13408 - if (max_tx < tx_rings_needed) 13317 + hwr.rx = rx_rings; 13318 + hwr.tx = tx * tx_sets + tx_xdp; 13319 + if (max_tx < hwr.tx) 13409 13320 return -ENOMEM; 13410 13321 13411 - vnics = 1; 13412 - if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == 13413 - BNXT_FLAG_RFS) 13414 - vnics += rx; 13322 + hwr.vnic = bnxt_get_total_vnics(bp, rx); 13415 13323 13416 - tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp); 13417 - cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; 13418 - if (max_cp < cp) 13324 + tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp); 13325 + hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; 13326 + if (max_cp < hwr.cp) 13419 13327 return -ENOMEM; 13420 - stats = cp; 13328 + hwr.stat = hwr.cp; 13421 13329 if (BNXT_NEW_RM(bp)) { 13422 - cp += bnxt_get_ulp_msix_num(bp); 13423 - stats += bnxt_get_ulp_stat_ctxs(bp); 13330 + hwr.cp += bnxt_get_ulp_msix_num(bp); 13331 + hwr.stat += bnxt_get_ulp_stat_ctxs(bp); 13332 + hwr.grp = rx; 13333 + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 13424 13334 } 13425 - return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, 13426 - stats, vnics); 13335 + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 13336 + hwr.cp_p5 = hwr.tx + rx; 13337 + return bnxt_hwrm_check_rings(bp, &hwr); 13427 13338 } 13428 13339 13429 13340 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) ··· 14150 14059 if (skb) 14151 14060 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 14152 14061 14153 - vnic = &bp->vnic_info[0]; 14062 + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 14154 14063 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); 14155 14064 } 14156 14065 ··· 14245 14154 u32 flags; 14246 14155 14247 14156 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { 14248 - l2_fltr = bp->vnic_info[0].l2_filters[0]; 14157 + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 14249 14158 atomic_inc(&l2_fltr->refcnt); 14250 14159 } else { 14251 14160 struct bnxt_l2_key key;
+20
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 1213 1213 u16 cp_fw_ring_id; 1214 1214 }; 1215 1215 1216 + #define BNXT_VNIC_DEFAULT 0 1217 + #define BNXT_VNIC_NTUPLE 1 1218 + 1216 1219 struct bnxt_vnic_info { 1217 1220 u16 fw_vnic_id; /* returned by Chimp during alloc */ 1218 1221 #define BNXT_MAX_CTX_PER_VNIC 8 ··· 1255 1252 #define BNXT_VNIC_MCAST_FLAG 4 1256 1253 #define BNXT_VNIC_UCAST_FLAG 8 1257 1254 #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 1255 + #define BNXT_VNIC_NTUPLE_FLAG 0x20 1256 + }; 1257 + 1258 + struct bnxt_hw_rings { 1259 + int tx; 1260 + int rx; 1261 + int grp; 1262 + int cp; 1263 + int cp_p5; 1264 + int stat; 1265 + int vnic; 1266 + int rss_ctx; 1258 1267 }; 1259 1268 1260 1269 struct bnxt_hw_resc { 1261 1270 u16 min_rsscos_ctxs; 1262 1271 u16 max_rsscos_ctxs; 1272 + u16 resv_rsscos_ctxs; 1263 1273 u16 min_cp_rings; 1264 1274 u16 max_cp_rings; 1265 1275 u16 resv_cp_rings; ··· 2330 2314 #define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36) 2331 2315 #define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37) 2332 2316 #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38) 2317 + #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39) 2333 2318 2334 2319 u32 fw_dbg_cap; 2335 2320 2336 2321 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) 2337 2322 #define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \ 2338 2323 ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC)) 2324 + #define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \ 2325 + (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3)) 2326 + 2339 2327 u32 hwrm_spec_code; 2340 2328 u16 hwrm_cmd_seq; 2341 2329 u16 hwrm_cmd_kong_seq;
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 1314 1314 if (!new_fltr) 1315 1315 return -ENOMEM; 1316 1316 1317 - l2_fltr = bp->vnic_info[0].l2_filters[0]; 1317 + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 1318 1318 atomic_inc(&l2_fltr->refcnt); 1319 1319 new_fltr->l2_fltr = l2_fltr; 1320 1320 fmasks = &new_fltr->fmasks; ··· 1763 1763 if (!bp->vnic_info) 1764 1764 return 0; 1765 1765 1766 - vnic = &bp->vnic_info[0]; 1766 + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 1767 1767 if (rxfh->indir && bp->rss_indir_tbl) { 1768 1768 tbl_size = bnxt_get_rxfh_indir_size(dev); 1769 1769 for (i = 0; i < tbl_size; i++)