Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ice: allow bigger VFs

Unlike the XL710 series, 800-series hardware can allocate more than 4
MSI-X vectors per VF. This patch enables that functionality. We
dynamically allocate vectors and queues depending on how many VFs are
enabled. Allocating the maximum number of VFs replicates XL710
behavior with 4 queues and 4 vectors. But allocating a smaller number
of VFs will give you 16 queues and 16 vectors.

Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>

authored by

Mitch Williams and committed by
Jeff Kirsher
0ca469fb 5520deb1

+146 -158
-1
drivers/net/ethernet/intel/ice/ice.h
··· 70 70 #define ICE_Q_WAIT_RETRY_LIMIT 10 71 71 #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) 72 72 #define ICE_MAX_LG_RSS_QS 256 73 - #define ICE_MAX_SMALL_RSS_QS 8 74 73 #define ICE_RES_VALID_BIT 0x8000 75 74 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) 76 75 #define ICE_INVAL_Q_INDEX 0xffff
+4 -5
drivers/net/ethernet/intel/ice/ice_lib.c
··· 571 571 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 572 572 break; 573 573 case ICE_VSI_VF: 574 - /* VF VSI will gets a small RSS table 575 - * For VSI_LUT, LUT size should be set to 64 bytes 574 + /* VF VSI will get a small RSS table. 575 + * For VSI_LUT, LUT size should be set to 64 bytes. 576 576 */ 577 577 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 578 - vsi->rss_size = min_t(int, num_online_cpus(), 579 - BIT(cap->rss_table_entry_width)); 578 + vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; 580 579 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 581 580 break; 582 581 case ICE_VSI_LB: ··· 683 684 if (vsi->type == ICE_VSI_PF) 684 685 max_rss = ICE_MAX_LG_RSS_QS; 685 686 else 686 - max_rss = ICE_MAX_SMALL_RSS_QS; 687 + max_rss = ICE_MAX_RSS_QS_PER_VF; 687 688 qcount_rx = min_t(int, rx_numq_tc, max_rss); 688 689 if (!vsi->req_rxq) 689 690 qcount_rx = min_t(int, qcount_rx,
+136 -143
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
··· 99 99 */ 100 100 static bool ice_vf_has_no_qs_ena(struct ice_vf *vf) 101 101 { 102 - return (!bitmap_weight(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF) && 103 - !bitmap_weight(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF)); 102 + return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) && 103 + !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF)); 104 104 } 105 105 106 106 /** ··· 232 232 * ice_sriov_free_msix_res - Reset/free any used MSIX resources 233 233 * @pf: pointer to the PF structure 234 234 * 235 - * If MSIX entries from the pf->irq_tracker were needed then we need to 236 - * reset the irq_tracker->end and give back the entries we needed to 237 - * num_avail_sw_msix. 238 - * 239 - * If no MSIX entries were taken from the pf->irq_tracker then just clear 235 + * Since no MSIX entries are taken from the pf->irq_tracker then just clear 240 236 * the pf->sriov_base_vector. 241 237 * 242 238 * Returns 0 on success, and -EINVAL on error. ··· 249 253 return -EINVAL; 250 254 251 255 /* give back irq_tracker resources used */ 252 - if (pf->sriov_base_vector < res->num_entries) { 253 - res->end = res->num_entries; 254 - pf->num_avail_sw_msix += 255 - res->num_entries - pf->sriov_base_vector; 256 - } 256 + WARN_ON(pf->sriov_base_vector < res->num_entries); 257 257 258 258 pf->sriov_base_vector = 0; 259 259 ··· 263 271 void ice_set_vf_state_qs_dis(struct ice_vf *vf) 264 272 { 265 273 /* Clear Rx/Tx enabled queues flag */ 266 - bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF); 267 - bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF); 274 + bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); 275 + bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 268 276 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 269 277 } 270 278 ··· 596 604 */ 597 605 tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf), 598 606 ice_get_avail_rxq_count(pf)); 599 - tx_rx_queue_left += ICE_DFLT_QS_PER_VF; 607 + tx_rx_queue_left += pf->num_vf_qps; 600 608 if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left && 601 609 vf->num_req_qs != vf->num_vf_qs) 602 610 vf->num_vf_qs = vf->num_req_qs; ··· 795 803 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs 796 804 * 797 805 * This function allows SR-IOV resources to be taken from the end of the PF's 798 - * allowed HW MSIX vectors so in many cases the irq_tracker will not 799 - * be needed. In these cases we just set the pf->sriov_base_vector and return 800 - * success. 806 + * allowed HW MSIX vectors so that the irq_tracker will not be affected. We 807 + * just set the pf->sriov_base_vector and return success. 801 808 * 802 - * If SR-IOV needs to use any pf->irq_tracker entries it updates the 803 - * irq_tracker->end based on the first entry needed for SR-IOV. This makes it 804 - * so any calls to ice_get_res() using the irq_tracker will not try to use 805 - * resources at or beyond the newly set value. 809 + * If there are not enough resources available, return an error. This should 810 + * always be caught by ice_set_per_vf_res(). 806 811 * 807 812 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in 808 813 * in the PF's space available for SR-IOV. 809 814 */ 810 815 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) 811 816 { 812 - int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); 813 - u16 pf_total_msix_vectors = 814 - pf->hw.func_caps.common_cap.num_msix_vectors; 815 - struct ice_res_tracker *res = pf->irq_tracker; 817 + u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 818 + int vectors_used = pf->irq_tracker->num_entries; 816 819 int sriov_base_vector; 817 820 818 - if (max_valid_res_idx < 0) 819 - return max_valid_res_idx; 820 - 821 - sriov_base_vector = pf_total_msix_vectors - num_msix_needed; 821 + sriov_base_vector = total_vectors - num_msix_needed; 822 822 823 823 /* make sure we only grab irq_tracker entries from the list end and 824 824 * that we have enough available MSIX vectors 825 825 */ 826 - if (sriov_base_vector <= max_valid_res_idx) 826 + if (sriov_base_vector < vectors_used) 827 827 return -EINVAL; 828 828 829 829 pf->sriov_base_vector = sriov_base_vector; 830 - 831 - /* dip into irq_tracker entries and update used resources */ 832 - if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) { 833 - pf->num_avail_sw_msix -= 834 - res->num_entries - pf->sriov_base_vector; 835 - res->end = pf->sriov_base_vector; 836 - } 837 830 838 831 return 0; 839 832 } 840 833 841 834 /** 842 - * ice_check_avail_res - check if vectors and queues are available 835 + * ice_set_per_vf_res - check if vectors and queues are available 843 836 * @pf: pointer to the PF structure 844 837 * 845 - * This function is where we calculate actual number of resources for VF VSIs, 846 - * we don't reserve ahead of time during probe. Returns success if vectors and 847 - * queues resources are available, otherwise returns error code 838 + * First, determine HW interrupts from common pool. If we allocate fewer VFs, we 839 + * get more vectors and can enable more queues per VF. Note that this does not 840 + * grab any vectors from the SW pool already allocated. Also note, that all 841 + * vector counts include one for each VF's miscellaneous interrupt vector 842 + * (i.e. OICR). 843 + * 844 + * Minimum VFs - 2 vectors, 1 queue pair 845 + * Small VFs - 5 vectors, 4 queue pairs 846 + * Medium VFs - 17 vectors, 16 queue pairs 847 + * 848 + * Second, determine number of queue pairs per VF by starting with a pre-defined 849 + * maximum each VF supports. If this is not possible, then we adjust based on 850 + * queue pairs available on the device. 851 + * 852 + * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used 853 + * by each VF during VF initialization and reset. 848 854 */ 849 - static int ice_check_avail_res(struct ice_pf *pf) 855 + static int ice_set_per_vf_res(struct ice_pf *pf) 850 856 { 851 857 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); 852 - u16 num_msix, num_txq, num_rxq, num_avail_msix; 853 858 struct device *dev = ice_pf_to_dev(pf); 859 + u16 num_msix, num_txq, num_rxq; 860 + int v; 854 861 855 862 if (!pf->num_alloc_vfs || max_valid_res_idx < 0) 856 863 return -EINVAL; 857 864 858 - /* add 1 to max_valid_res_idx to account for it being 0-based */ 859 - num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors - 860 - (max_valid_res_idx + 1); 861 - 862 - /* Grab from HW interrupts common pool 863 - * Note: By the time the user decides it needs more vectors in a VF 864 - * its already too late since one must decide this prior to creating the 865 - * VF interface. So the best we can do is take a guess as to what the 866 - * user might want. 867 - * 868 - * We have two policies for vector allocation: 869 - * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small 870 - * number of NFV VFs used for NFV appliances, since this is a special 871 - * case, we try to assign maximum vectors per VF (65) as much as 872 - * possible, based on determine_resources algorithm. 873 - * 2. if num_alloc_vfs is from 17 to 256, then its large number of 874 - * regular VFs which are not used for any special purpose. Hence try to 875 - * grab default interrupt vectors (5 as supported by AVF driver). 876 - */ 877 - if (pf->num_alloc_vfs <= 16) { 878 - num_msix = ice_determine_res(pf, num_avail_msix, 879 - ICE_MAX_INTR_PER_VF, 880 - ICE_MIN_INTR_PER_VF); 881 - } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) { 882 - num_msix = ice_determine_res(pf, num_avail_msix, 883 - ICE_DFLT_INTR_PER_VF, 884 - ICE_MIN_INTR_PER_VF); 865 + /* determine MSI-X resources per VF */ 866 + v = (pf->hw.func_caps.common_cap.num_msix_vectors - 867 + pf->irq_tracker->num_entries) / pf->num_alloc_vfs; 868 + if (v >= ICE_NUM_VF_MSIX_MED) { 869 + num_msix = ICE_NUM_VF_MSIX_MED; 870 + } else if (v >= ICE_NUM_VF_MSIX_SMALL) { 871 + num_msix = ICE_NUM_VF_MSIX_SMALL; 872 + } else if (v >= ICE_MIN_INTR_PER_VF) { 873 + num_msix = ICE_MIN_INTR_PER_VF; 885 874 } else { 886 - dev_err(dev, "Number of VFs %d exceeds max VF count %d\n", 887 - pf->num_alloc_vfs, ICE_MAX_VF_COUNT); 875 + dev_err(dev, "Not enough vectors to support %d VFs\n", 876 + pf->num_alloc_vfs); 888 877 return -EIO; 889 878 } 890 879 891 - if (!num_msix) 892 - return -EIO; 893 - 894 - /* Grab from the common pool 895 - * start by requesting Default queues (4 as supported by AVF driver), 896 - * Note that, the main difference between queues and vectors is, latter 897 - * can only be reserved at init time but queues can be requested by VF 898 - * at runtime through Virtchnl, that is the reason we start by reserving 899 - * few queues. 900 - */ 880 + /* determine queue resources per VF */ 901 881 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), 902 - ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); 882 + min_t(u16, num_msix - 1, 883 + ICE_MAX_RSS_QS_PER_VF), 884 + ICE_MIN_QS_PER_VF); 903 885 904 886 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), 905 - ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); 887 + min_t(u16, num_msix - 1, 888 + ICE_MAX_RSS_QS_PER_VF), 889 + ICE_MIN_QS_PER_VF); 906 890 907 - if (!num_txq || !num_rxq) 891 + if (!num_txq || !num_rxq) { 892 + dev_err(dev, "Not enough queues to support %d VFs\n", 893 + pf->num_alloc_vfs); 908 894 return -EIO; 895 + } 909 896 910 - if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs)) 897 + if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs)) { 898 + dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", 899 + pf->num_alloc_vfs); 911 900 return -EINVAL; 901 + } 912 902 913 - /* since AVF driver works with only queue pairs which means, it expects 914 - * to have equal number of Rx and Tx queues, so take the minimum of 915 - * available Tx or Rx queues 916 - */ 903 + /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ 917 904 pf->num_vf_qps = min_t(int, num_txq, num_rxq); 918 905 pf->num_vf_msix = num_msix; 906 + dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", 907 + pf->num_alloc_vfs, num_msix, pf->num_vf_qps); 919 908 920 909 return 0; 921 910 } ··· 1005 1032 struct ice_hw *hw = &pf->hw; 1006 1033 int v; 1007 1034 1008 - if (ice_check_avail_res(pf)) { 1035 + if (ice_set_per_vf_res(pf)) { 1009 1036 dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n"); 1010 1037 return false; 1011 1038 } ··· 2099 2126 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) 2100 2127 { 2101 2128 if ((!vqs->rx_queues && !vqs->tx_queues) || 2102 - vqs->rx_queues >= BIT(ICE_MAX_BASE_QS_PER_VF) || 2103 - vqs->tx_queues >= BIT(ICE_MAX_BASE_QS_PER_VF)) 2129 + vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) || 2130 + vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF)) 2104 2131 return false; 2105 2132 2106 2133 return true; ··· 2149 2176 * programmed using ice_vsi_cfg_txqs 2150 2177 */ 2151 2178 q_map = vqs->rx_queues; 2152 - for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { 2179 + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 2153 2180 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 2154 2181 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2155 2182 goto error_param; ··· 2171 2198 2172 2199 vsi = pf->vsi[vf->lan_vsi_idx]; 2173 2200 q_map = vqs->tx_queues; 2174 - for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { 2201 + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 2175 2202 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 2176 2203 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2177 2204 goto error_param; ··· 2228 2255 goto error_param; 2229 2256 } 2230 2257 2231 - if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || 2232 - vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { 2233 - v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2234 - goto error_param; 2235 - } 2236 - 2237 2258 vsi = pf->vsi[vf->lan_vsi_idx]; 2238 2259 if (!vsi) { 2239 2260 v_ret = VIRTCHNL_STATUS_ERR_PARAM; ··· 2237 2270 if (vqs->tx_queues) { 2238 2271 q_map = vqs->tx_queues; 2239 2272 2240 - for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { 2273 + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 2241 2274 struct ice_ring *ring = vsi->tx_rings[vf_q_id]; 2242 2275 struct ice_txq_meta txq_meta = { 0 }; 2243 2276 ··· 2268 2301 q_map = vqs->rx_queues; 2269 2302 /* speed up Rx queue disable by batching them if possible */ 2270 2303 if (q_map && 2271 - bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF)) { 2304 + bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) { 2272 2305 if (ice_vsi_stop_all_rx_rings(vsi)) { 2273 2306 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n", 2274 2307 vsi->vsi_num); ··· 2276 2309 goto error_param; 2277 2310 } 2278 2311 2279 - bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF); 2312 + bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 2280 2313 } else if (q_map) { 2281 - for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { 2314 + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 2282 2315 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 2283 2316 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2284 2317 goto error_param; ··· 2312 2345 } 2313 2346 2314 2347 /** 2348 + * ice_cfg_interrupt 2349 + * @vf: pointer to the VF info 2350 + * @vsi: the VSI being configured 2351 + * @vector_id: vector ID 2352 + * @map: vector map for mapping vectors to queues 2353 + * @q_vector: structure for interrupt vector 2354 + * configure the IRQ to queue map 2355 + */ 2356 + static int 2357 + ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, 2358 + struct virtchnl_vector_map *map, 2359 + struct ice_q_vector *q_vector) 2360 + { 2361 + u16 vsi_q_id, vsi_q_id_idx; 2362 + unsigned long qmap; 2363 + 2364 + q_vector->num_ring_rx = 0; 2365 + q_vector->num_ring_tx = 0; 2366 + 2367 + qmap = map->rxq_map; 2368 + for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { 2369 + vsi_q_id = vsi_q_id_idx; 2370 + 2371 + if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) 2372 + return VIRTCHNL_STATUS_ERR_PARAM; 2373 + 2374 + q_vector->num_ring_rx++; 2375 + q_vector->rx.itr_idx = map->rxitr_idx; 2376 + vsi->rx_rings[vsi_q_id]->q_vector = q_vector; 2377 + ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, 2378 + q_vector->rx.itr_idx); 2379 + } 2380 + 2381 + qmap = map->txq_map; 2382 + for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { 2383 + vsi_q_id = vsi_q_id_idx; 2384 + 2385 + if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) 2386 + return VIRTCHNL_STATUS_ERR_PARAM; 2387 + 2388 + q_vector->num_ring_tx++; 2389 + q_vector->tx.itr_idx = map->txitr_idx; 2390 + vsi->tx_rings[vsi_q_id]->q_vector = q_vector; 2391 + ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, 2392 + q_vector->tx.itr_idx); 2393 + } 2394 + 2395 + return VIRTCHNL_STATUS_SUCCESS; 2396 + } 2397 + 2398 + /** 2315 2399 * ice_vc_cfg_irq_map_msg 2316 2400 * @vf: pointer to the VF info 2317 2401 * @msg: pointer to the msg buffer ··· 2372 2354 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) 2373 2355 { 2374 2356 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 2357 + u16 num_q_vectors_mapped, vsi_id, vector_id; 2375 2358 struct virtchnl_irq_map_info *irqmap_info; 2376 - u16 vsi_id, vsi_q_id, vector_id; 2377 2359 struct virtchnl_vector_map *map; 2378 2360 struct ice_pf *pf = vf->pf; 2379 - u16 num_q_vectors_mapped; 2380 2361 struct ice_vsi *vsi; 2381 - unsigned long qmap; 2382 2362 int i; 2383 2363 2384 2364 irqmap_info = (struct virtchnl_irq_map_info *)msg; ··· 2388 2372 */ 2389 2373 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || 2390 2374 pf->num_vf_msix < num_q_vectors_mapped || 2391 - !irqmap_info->num_vectors) { 2375 + !num_q_vectors_mapped) { 2392 2376 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2393 2377 goto error_param; 2394 2378 } ··· 2409 2393 /* vector_id is always 0-based for each VF, and can never be 2410 2394 * larger than or equal to the max allowed interrupts per VF 2411 2395 */ 2412 - if (!(vector_id < ICE_MAX_INTR_PER_VF) || 2396 + if (!(vector_id < pf->num_vf_msix) || 2413 2397 !ice_vc_isvalid_vsi_id(vf, vsi_id) || 2414 2398 (!vector_id && (map->rxq_map || map->txq_map))) { 2415 2399 v_ret = VIRTCHNL_STATUS_ERR_PARAM; ··· 2430 2414 } 2431 2415 2432 2416 /* lookout for the invalid queue index */ 2433 - qmap = map->rxq_map; 2434 - q_vector->num_ring_rx = 0; 2435 - for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { 2436 - if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { 2437 - v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2438 - goto error_param; 2439 - } 2440 - q_vector->num_ring_rx++; 2441 - q_vector->rx.itr_idx = map->rxitr_idx; 2442 - vsi->rx_rings[vsi_q_id]->q_vector = q_vector; 2443 - ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, 2444 - q_vector->rx.itr_idx); 2445 - } 2446 - 2447 - qmap = map->txq_map; 2448 - q_vector->num_ring_tx = 0; 2449 - for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { 2450 - if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { 2451 - v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2452 - goto error_param; 2453 - } 2454 - q_vector->num_ring_tx++; 2455 - q_vector->tx.itr_idx = map->txitr_idx; 2456 - vsi->tx_rings[vsi_q_id]->q_vector = q_vector; 2457 - ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, 2458 - q_vector->tx.itr_idx); 2459 - } 2417 + v_ret = (enum virtchnl_status_code) 2418 + ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector); 2419 + if (v_ret) 2420 + goto error_param; 2460 2421 } 2461 2422 2462 2423 error_param: ··· 2476 2483 goto error_param; 2477 2484 } 2478 2485 2479 - if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF || 2486 + if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF || 2480 2487 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { 2481 2488 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", 2482 2489 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); ··· 2783 2790 if (!req_queues) { 2784 2791 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", 2785 2792 vf->vf_id); 2786 - } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) { 2793 + } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) { 2787 2794 dev_err(dev, "VF %d tried to request more than %d queues.\n", 2788 - vf->vf_id, ICE_MAX_BASE_QS_PER_VF); 2789 - vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF; 2795 + vf->vf_id, ICE_MAX_RSS_QS_PER_VF); 2796 + vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF; 2790 2797 } else if (req_queues > cur_queues && 2791 2798 req_queues - cur_queues > tx_rx_queue_left) { 2792 2799 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", 2793 2800 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); 2794 2801 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, 2795 - ICE_MAX_BASE_QS_PER_VF); 2802 + ICE_MAX_RSS_QS_PER_VF); 2796 2803 } else { 2797 2804 /* request is successful, then reset VF */ 2798 2805 vf->num_req_qs = req_queues;
+6 -9
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
··· 21 21 #define ICE_PCI_CIAD_WAIT_COUNT 100 22 22 #define ICE_PCI_CIAD_WAIT_DELAY_US 1 23 23 24 - /* VF resources default values and limitation */ 24 + /* VF resource constraints */ 25 25 #define ICE_MAX_VF_COUNT 256 26 - #define ICE_MAX_QS_PER_VF 256 27 26 #define ICE_MIN_QS_PER_VF 1 28 - #define ICE_DFLT_QS_PER_VF 4 29 27 #define ICE_NONQ_VECS_VF 1 30 28 #define ICE_MAX_SCATTER_QS_PER_VF 16 31 - #define ICE_MAX_BASE_QS_PER_VF 16 32 - #define ICE_MAX_INTR_PER_VF 65 33 - #define ICE_MAX_POLICY_INTR_PER_VF 33 29 + #define ICE_MAX_RSS_QS_PER_VF 16 30 + #define ICE_NUM_VF_MSIX_MED 17 31 + #define ICE_NUM_VF_MSIX_SMALL 5 34 32 #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) 35 - #define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) 36 33 #define ICE_MAX_VF_RESET_TRIES 40 37 34 #define ICE_MAX_VF_RESET_SLEEP_MS 20 38 35 ··· 72 75 struct virtchnl_version_info vf_ver; 73 76 u32 driver_caps; /* reported by VF driver */ 74 77 struct virtchnl_ether_addr dflt_lan_addr; 75 - DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF); 76 - DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF); 78 + DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF); 79 + DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF); 77 80 u16 port_vlan_info; /* Port VLAN ID and QoS */ 78 81 u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ 79 82 u8 trusted:1;