Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ice: Create framework for VSI queue context

This patch introduces a framework to store queue specific information
in VSI queue contexts. Currently VSI queue context (represented by
struct ice_q_ctx) only has q_handle as a member. In future patches,
this structure will be updated to hold queue specific information.

Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>

authored by

Anirudh Venkataramanan and committed by
Jeff Kirsher
bb87ee0e f76c4b57

+210 -61
+57 -5
drivers/net/ethernet/intel/ice/ice_common.c
··· 2791 2791 } 2792 2792 2793 2793 /** 2794 + * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 2795 + * @hw: pointer to the HW struct 2796 + * @vsi_handle: software VSI handle 2797 + * @tc: TC number 2798 + * @q_handle: software queue handle 2799 + */ 2800 + static struct ice_q_ctx * 2801 + ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 2802 + { 2803 + struct ice_vsi_ctx *vsi; 2804 + struct ice_q_ctx *q_ctx; 2805 + 2806 + vsi = ice_get_vsi_ctx(hw, vsi_handle); 2807 + if (!vsi) 2808 + return NULL; 2809 + if (q_handle >= vsi->num_lan_q_entries[tc]) 2810 + return NULL; 2811 + if (!vsi->lan_q_ctx[tc]) 2812 + return NULL; 2813 + q_ctx = vsi->lan_q_ctx[tc]; 2814 + return &q_ctx[q_handle]; 2815 + } 2816 + 2817 + /** 2794 2818 * ice_ena_vsi_txq 2795 2819 * @pi: port information structure 2796 2820 * @vsi_handle: software VSI handle 2797 2821 * @tc: TC number 2822 + * @q_handle: software queue handle 2798 2823 * @num_qgrps: Number of added queue groups 2799 2824 * @buf: list of queue groups to be added 2800 2825 * @buf_size: size of buffer for indirect command ··· 2828 2803 * This function adds one LAN queue 2829 2804 */ 2830 2805 enum ice_status 2831 - ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, 2832 - struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 2806 + ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 2807 + u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 2833 2808 struct ice_sq_cd *cd) 2834 2809 { 2835 2810 struct ice_aqc_txsched_elem_data node = { 0 }; 2836 2811 struct ice_sched_node *parent; 2812 + struct ice_q_ctx *q_ctx; 2837 2813 enum ice_status status; 2838 2814 struct ice_hw *hw; 2839 2815 ··· 2850 2824 return ICE_ERR_PARAM; 2851 2825 2852 2826 mutex_lock(&pi->sched_lock); 2827 + 2828 + q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 2829 + if (!q_ctx) { 2830 + ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 2831 + q_handle); 2832 + status = ICE_ERR_PARAM; 2833 + goto ena_txq_exit; 2834 + } 2853 2835 2854 2836 /* find a parent node */ 2855 2837 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ··· 2885 2851 /* add the LAN queue */ 2886 2852 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 2887 2853 if (status) { 2888 - ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n", 2854 + ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 2889 2855 le16_to_cpu(buf->txqs[0].txq_id), 2890 2856 hw->adminq.sq_last_status); 2891 2857 goto ena_txq_exit; ··· 2893 2859 2894 2860 node.node_teid = buf->txqs[0].q_teid; 2895 2861 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 2862 + q_ctx->q_handle = q_handle; 2896 2863 2897 2864 /* add a leaf node into schduler tree queue layer */ 2898 2865 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); ··· 2906 2871 /** 2907 2872 * ice_dis_vsi_txq 2908 2873 * @pi: port information structure 2874 + * @vsi_handle: software VSI handle 2875 + * @tc: TC number 2909 2876 * @num_queues: number of queues 2877 + * @q_handles: pointer to software queue handle array 2910 2878 * @q_ids: pointer to the q_id array 2911 2879 * @q_teids: pointer to queue node teids 2912 2880 * @rst_src: if called due to reset, specifies the reset source ··· 2919 2881 * This function removes queues and their corresponding nodes in SW DB 2920 2882 */ 2921 2883 enum ice_status 2922 - ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 2923 - u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, 2884 + ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 2885 + u16 *q_handles, u16 *q_ids, u32 *q_teids, 2886 + enum ice_disq_rst_src rst_src, u16 vmvf_num, 2924 2887 struct ice_sq_cd *cd) 2925 2888 { 2926 2889 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 2927 2890 struct ice_aqc_dis_txq_item qg_list; 2891 + struct ice_q_ctx *q_ctx; 2928 2892 u16 i; 2929 2893 2930 2894 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) ··· 2949 2909 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 2950 2910 if (!node) 2951 2911 continue; 2912 + q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]); 2913 + if (!q_ctx) { 2914 + ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 2915 + q_handles[i]); 2916 + continue; 2917 + } 2918 + if (q_ctx->q_handle != q_handles[i]) { 2919 + ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 2920 + q_ctx->q_handle, q_handles[i]); 2921 + continue; 2922 + } 2952 2923 qg_list.parent_teid = node->info.parent_teid; 2953 2924 qg_list.num_qs = 1; 2954 2925 qg_list.q_id[0] = cpu_to_le16(q_ids[i]); ··· 2970 2919 if (status) 2971 2920 break; 2972 2921 ice_free_sched_node(pi, node); 2922 + q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 2973 2923 } 2974 2924 mutex_unlock(&pi->sched_lock); 2975 2925 return status;
+6 -5
drivers/net/ethernet/intel/ice/ice_common.h
··· 99 99 struct ice_sq_cd *cd); 100 100 101 101 enum ice_status 102 - ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, 103 - u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, 104 - struct ice_sq_cd *cmd_details); 102 + ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 103 + u16 *q_handle, u16 *q_ids, u32 *q_teids, 104 + enum ice_disq_rst_src rst_src, u16 vmvf_num, 105 + struct ice_sq_cd *cd); 105 106 enum ice_status 106 107 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 107 108 u16 *max_lanqs); 108 109 enum ice_status 109 - ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, 110 - struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 110 + ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 111 + u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 111 112 struct ice_sq_cd *cd); 112 113 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); 113 114 void ice_replay_post(struct ice_hw *hw);
+65 -44
drivers/net/ethernet/intel/ice/ice_lib.c
··· 1715 1715 rings[q_idx]->tail = 1716 1716 pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); 1717 1717 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, 1718 - num_q_grps, qg_buf, buf_len, 1719 - NULL); 1718 + i, num_q_grps, qg_buf, 1719 + buf_len, NULL); 1720 1720 if (status) { 1721 1721 dev_err(&vsi->back->pdev->dev, 1722 1722 "Failed to set LAN Tx queue context, error: %d\n", ··· 2033 2033 { 2034 2034 struct ice_pf *pf = vsi->back; 2035 2035 struct ice_hw *hw = &pf->hw; 2036 + int tc, q_idx = 0, err = 0; 2037 + u16 *q_ids, *q_handles, i; 2036 2038 enum ice_status status; 2037 2039 u32 *q_teids, val; 2038 - u16 *q_ids, i; 2039 - int err = 0; 2040 2040 2041 2041 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) 2042 2042 return -EINVAL; ··· 2053 2053 goto err_alloc_q_ids; 2054 2054 } 2055 2055 2056 - /* set up the Tx queue list to be disabled */ 2057 - ice_for_each_txq(vsi, i) { 2058 - u16 v_idx; 2059 - 2060 - if (!rings || !rings[i] || !rings[i]->q_vector) { 2061 - err = -EINVAL; 2062 - goto err_out; 2063 - } 2064 - 2065 - q_ids[i] = vsi->txq_map[i + offset]; 2066 - q_teids[i] = rings[i]->txq_teid; 2067 - 2068 - /* clear cause_ena bit for disabled queues */ 2069 - val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); 2070 - val &= ~QINT_TQCTL_CAUSE_ENA_M; 2071 - wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); 2072 - 2073 - /* software is expected to wait for 100 ns */ 2074 - ndelay(100); 2075 - 2076 - /* trigger a software interrupt for the vector associated to 2077 - * the queue to schedule NAPI handler 2078 - */ 2079 - v_idx = rings[i]->q_vector->v_idx; 2080 - wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), 2081 - GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 2056 + q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, 2057 + sizeof(*q_handles), GFP_KERNEL); 2058 + if (!q_handles) { 2059 + err = -ENOMEM; 2060 + goto err_alloc_q_handles; 2082 2061 } 2083 - status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, 2084 - rst_src, rel_vmvf_num, NULL); 2085 - /* if the disable queue command was exercised during an active reset 2086 - * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as 2087 - * the reset operation disables queues at the hardware level anyway. 2088 - */ 2089 - if (status == ICE_ERR_RESET_ONGOING) { 2090 - dev_info(&pf->pdev->dev, 2091 - "Reset in progress. LAN Tx queues already disabled\n"); 2092 - } else if (status) { 2093 - dev_err(&pf->pdev->dev, 2094 - "Failed to disable LAN Tx queues, error: %d\n", 2095 - status); 2096 - err = -ENODEV; 2062 + 2063 + /* set up the Tx queue list to be disabled for each enabled TC */ 2064 + ice_for_each_traffic_class(tc) { 2065 + if (!(vsi->tc_cfg.ena_tc & BIT(tc))) 2066 + break; 2067 + 2068 + for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { 2069 + u16 v_idx; 2070 + 2071 + if (!rings || !rings[i] || !rings[i]->q_vector) { 2072 + err = -EINVAL; 2073 + goto err_out; 2074 + } 2075 + 2076 + q_ids[i] = vsi->txq_map[q_idx + offset]; 2077 + q_teids[i] = rings[q_idx]->txq_teid; 2078 + q_handles[i] = i; 2079 + 2080 + /* clear cause_ena bit for disabled queues */ 2081 + val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); 2082 + val &= ~QINT_TQCTL_CAUSE_ENA_M; 2083 + wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); 2084 + 2085 + /* software is expected to wait for 100 ns */ 2086 + ndelay(100); 2087 + 2088 + /* trigger a software interrupt for the vector 2089 + * associated to the queue to schedule NAPI handler 2090 + */ 2091 + v_idx = rings[i]->q_vector->v_idx; 2092 + wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), 2093 + GLINT_DYN_CTL_SWINT_TRIG_M | 2094 + GLINT_DYN_CTL_INTENA_MSK_M); 2095 + q_idx++; 2096 + } 2097 + status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc, 2098 + vsi->num_txq, q_handles, q_ids, 2099 + q_teids, rst_src, rel_vmvf_num, NULL); 2100 + 2101 + /* if the disable queue command was exercised during an active 2102 + * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not 2103 + * an error as the reset operation disables queues at the 2104 + * hardware level anyway. 2105 + */ 2106 + if (status == ICE_ERR_RESET_ONGOING) { 2107 + dev_dbg(&pf->pdev->dev, 2108 + "Reset in progress. LAN Tx queues already disabled\n"); 2109 + } else if (status) { 2110 + dev_err(&pf->pdev->dev, 2111 + "Failed to disable LAN Tx queues, error: %d\n", 2112 + status); 2113 + err = -ENODEV; 2114 + } 2097 2115 } 2098 2116 2099 2117 err_out: 2118 + devm_kfree(&pf->pdev->dev, q_handles); 2119 + 2120 + err_alloc_q_handles: 2100 2121 devm_kfree(&pf->pdev->dev, q_ids); 2101 2122 2102 2123 err_alloc_q_ids:
+49 -5
drivers/net/ethernet/intel/ice/ice_sched.c
··· 533 533 } 534 534 535 535 /** 536 + * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC 537 + * @hw: pointer to the HW struct 538 + * @vsi_handle: VSI handle 539 + * @tc: TC number 540 + * @new_numqs: number of queues 541 + */ 542 + static enum ice_status 543 + ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) 544 + { 545 + struct ice_vsi_ctx *vsi_ctx; 546 + struct ice_q_ctx *q_ctx; 547 + 548 + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 549 + if (!vsi_ctx) 550 + return ICE_ERR_PARAM; 551 + /* allocate LAN queue contexts */ 552 + if (!vsi_ctx->lan_q_ctx[tc]) { 553 + vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), 554 + new_numqs, 555 + sizeof(*q_ctx), 556 + GFP_KERNEL); 557 + if (!vsi_ctx->lan_q_ctx[tc]) 558 + return ICE_ERR_NO_MEMORY; 559 + vsi_ctx->num_lan_q_entries[tc] = new_numqs; 560 + return 0; 561 + } 562 + /* num queues are increased, update the queue contexts */ 563 + if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { 564 + u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; 565 + 566 + q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, 567 + sizeof(*q_ctx), GFP_KERNEL); 568 + if (!q_ctx) 569 + return ICE_ERR_NO_MEMORY; 570 + memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], 571 + prev_num * sizeof(*q_ctx)); 572 + devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); 573 + vsi_ctx->lan_q_ctx[tc] = q_ctx; 574 + vsi_ctx->num_lan_q_entries[tc] = new_numqs; 575 + } 576 + return 0; 577 + } 578 + 579 + /** 536 580 * ice_sched_clear_agg - clears the aggregator related information 537 581 * @hw: pointer to the hardware structure 538 582 * ··· 1447 1403 if (!vsi_ctx) 1448 1404 return ICE_ERR_PARAM; 1449 1405 1450 - if (owner == ICE_SCHED_NODE_OWNER_LAN) 1451 - prev_numqs = vsi_ctx->sched.max_lanq[tc]; 1452 - else 1453 - return ICE_ERR_PARAM; 1454 - 1406 + prev_numqs = vsi_ctx->sched.max_lanq[tc]; 1455 1407 /* num queues are not changed or less than the previous number */ 1456 1408 if (new_numqs <= prev_numqs) 1457 1409 return status; 1410 + status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); 1411 + if (status) 1412 + return status; 1413 + 1458 1414 if (new_numqs) 1459 1415 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); 1460 1416 /* Keep the max number of queue configuration all the time. Update the
+22
drivers/net/ethernet/intel/ice/ice_switch.c
··· 329 329 } 330 330 331 331 /** 332 + * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs 333 + * @hw: pointer to the HW struct 334 + * @vsi_handle: VSI handle 335 + */ 336 + static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) 337 + { 338 + struct ice_vsi_ctx *vsi; 339 + u8 i; 340 + 341 + vsi = ice_get_vsi_ctx(hw, vsi_handle); 342 + if (!vsi) 343 + return; 344 + ice_for_each_traffic_class(i) { 345 + if (vsi->lan_q_ctx[i]) { 346 + devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); 347 + vsi->lan_q_ctx[i] = NULL; 348 + } 349 + } 350 + } 351 + 352 + /** 332 353 * ice_clear_vsi_ctx - clear the VSI context entry 333 354 * @hw: pointer to the HW struct 334 355 * @vsi_handle: VSI handle ··· 362 341 363 342 vsi = ice_get_vsi_ctx(hw, vsi_handle); 364 343 if (vsi) { 344 + ice_clear_vsi_q_ctx(hw, vsi_handle); 365 345 devm_kfree(ice_hw_to_dev(hw), vsi); 366 346 hw->vsi_ctx[vsi_handle] = NULL; 367 347 }
+9
drivers/net/ethernet/intel/ice/ice_switch.h
··· 9 9 #define ICE_SW_CFG_MAX_BUF_LEN 2048 10 10 #define ICE_DFLT_VSI_INVAL 0xff 11 11 #define ICE_VSI_INVAL_ID 0xffff 12 + #define ICE_INVAL_Q_HANDLE 0xFFFF 13 + #define ICE_INVAL_Q_HANDLE 0xFFFF 14 + 15 + /* VSI queue context structure */ 16 + struct ice_q_ctx { 17 + u16 q_handle; 18 + }; 12 19 13 20 /* VSI context structure for add/get/update/free operations */ 14 21 struct ice_vsi_ctx { ··· 27 20 struct ice_sched_vsi_info sched; 28 21 u8 alloc_from_pool; 29 22 u8 vf_num; 23 + u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS]; 24 + struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS]; 30 25 }; 31 26 32 27 enum ice_sw_fwd_act_type {
+2 -2
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
··· 996 996 /* Call Disable LAN Tx queue AQ call even when queues are not 997 997 * enabled. This is needed for successful completiom of VFR 998 998 */ 999 - ice_dis_vsi_txq(vsi->port_info, 0, NULL, NULL, ICE_VF_RESET, 1000 - vf->vf_id, NULL); 999 + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, 1000 + NULL, ICE_VF_RESET, vf->vf_id, NULL); 1001 1001 } 1002 1002 1003 1003 hw = &pf->hw;