Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2025-05-30 (ice, idpf)

For ice:
Michal resolves XDP issues related to Tx scheduler configuration with
large number of Tx queues.

Additional information:
https://lore.kernel.org/intel-wired-lan/20250513105529.241745-1-michal.kubiak@intel.com/

For idpf:
Brian Vazquez updates netif_subqueue_maybe_stop() condition check to
prevent possible races.

Emil shuts down virtchannel mailbox during reset to reduce timeout
delays as it's unavailable during that time.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
idpf: avoid mailbox timeout delays during reset
idpf: fix a race in txq wakeup
ice: fix rebuilding the Tx scheduler tree for large queue counts
ice: create new Tx scheduler nodes for new queues only
ice: fix Tx scheduler error handling in XDP callback
====================

Link: https://patch.msgid.link/20250530211221.2170484-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+218 -93
+33 -14
drivers/net/ethernet/intel/ice/ice_main.c
··· 2741 2741 } 2742 2742 2743 2743 /** 2744 + * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors 2745 + * @vsi: the VSI with XDP rings being unmapped 2746 + */ 2747 + static void ice_unmap_xdp_rings(struct ice_vsi *vsi) 2748 + { 2749 + int v_idx; 2750 + 2751 + ice_for_each_q_vector(vsi, v_idx) { 2752 + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2753 + struct ice_tx_ring *ring; 2754 + 2755 + ice_for_each_tx_ring(ring, q_vector->tx) 2756 + if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2757 + break; 2758 + 2759 + /* restore the value of last node prior to XDP setup */ 2760 + q_vector->tx.tx_ring = ring; 2761 + } 2762 + } 2763 + 2764 + /** 2744 2765 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 2745 2766 * @vsi: VSI to bring up Tx rings used by XDP 2746 2767 * @prog: bpf program that will be assigned to VSI ··· 2824 2803 if (status) { 2825 2804 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", 2826 2805 status); 2827 - goto clear_xdp_rings; 2806 + goto unmap_xdp_rings; 2828 2807 } 2829 2808 2830 2809 /* assign the prog only when it's not already present on VSI; ··· 2840 2819 ice_vsi_assign_bpf_prog(vsi, prog); 2841 2820 2842 2821 return 0; 2822 + unmap_xdp_rings: 2823 + ice_unmap_xdp_rings(vsi); 2843 2824 clear_xdp_rings: 2844 2825 ice_for_each_xdp_txq(vsi, i) 2845 2826 if (vsi->xdp_rings[i]) { ··· 2858 2835 mutex_unlock(&pf->avail_q_mutex); 2859 2836 2860 2837 devm_kfree(dev, vsi->xdp_rings); 2838 + vsi->xdp_rings = NULL; 2839 + 2861 2840 return -ENOMEM; 2862 2841 } 2863 2842 ··· 2875 2850 { 2876 2851 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2877 2852 struct ice_pf *pf = vsi->back; 2878 - int i, v_idx; 2853 + int i; 2879 2854 2880 2855 /* q_vectors are freed in reset path so there's no point in detaching 2881 2856 * rings ··· 2883 2858 if (cfg_type == ICE_XDP_CFG_PART) 2884 2859 goto free_qmap; 2885 2860 2886 - ice_for_each_q_vector(vsi, v_idx) { 2887 - struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2888 - struct ice_tx_ring *ring; 2889 - 2890 - ice_for_each_tx_ring(ring, q_vector->tx) 2891 - if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2892 - break; 2893 - 2894 - /* restore the value of last node prior to XDP setup */ 2895 - q_vector->tx.tx_ring = ring; 2896 - } 2861 + ice_unmap_xdp_rings(vsi); 2897 2862 2898 2863 free_qmap: 2899 2864 mutex_lock(&pf->avail_q_mutex); ··· 3028 3013 xdp_ring_err = ice_vsi_determine_xdp_res(vsi); 3029 3014 if (xdp_ring_err) { 3030 3015 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); 3016 + goto resume_if; 3031 3017 } else { 3032 3018 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, 3033 3019 ICE_XDP_CFG_FULL); 3034 - if (xdp_ring_err) 3020 + if (xdp_ring_err) { 3035 3021 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); 3022 + goto resume_if; 3023 + } 3036 3024 } 3037 3025 xdp_features_set_redirect_target(vsi->netdev, true); 3038 3026 /* reallocate Rx queues that are used for zero-copy */ ··· 3053 3035 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); 3054 3036 } 3055 3037 3038 + resume_if: 3056 3039 if (if_running) 3057 3040 ret = ice_up(vsi); 3058 3041
+148 -33
drivers/net/ethernet/intel/ice/ice_sched.c
··· 85 85 } 86 86 87 87 /** 88 + * ice_sched_find_next_vsi_node - find the next node for a given VSI 89 + * @vsi_node: VSI support node to start search with 90 + * 91 + * Return: Next VSI support node, or NULL. 92 + * 93 + * The function returns a pointer to the next node from the VSI layer 94 + * assigned to the given VSI, or NULL if there is no such a node. 95 + */ 96 + static struct ice_sched_node * 97 + ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node) 98 + { 99 + unsigned int vsi_handle = vsi_node->vsi_handle; 100 + 101 + while ((vsi_node = vsi_node->sibling) != NULL) 102 + if (vsi_node->vsi_handle == vsi_handle) 103 + break; 104 + 105 + return vsi_node; 106 + } 107 + 108 + /** 88 109 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd 89 110 * @hw: pointer to the HW struct 90 111 * @cmd_opc: cmd opcode ··· 1105 1084 if (parent->num_children < max_child_nodes) { 1106 1085 new_num_nodes = max_child_nodes - parent->num_children; 1107 1086 } else { 1108 - /* This parent is full, try the next sibling */ 1109 - parent = parent->sibling; 1087 + /* This parent is full, 1088 + * try the next available sibling. 1089 + */ 1090 + parent = ice_sched_find_next_vsi_node(parent); 1110 1091 /* Don't modify the first node TEID memory if the 1111 1092 * first node was added already in the above call. 1112 1093 * Instead send some temp memory for all other ··· 1551 1528 /* get the first queue group node from VSI sub-tree */ 1552 1529 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); 1553 1530 while (qgrp_node) { 1531 + struct ice_sched_node *next_vsi_node; 1532 + 1554 1533 /* make sure the qgroup node is part of the VSI subtree */ 1555 1534 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 1556 1535 if (qgrp_node->num_children < max_children && 1557 1536 qgrp_node->owner == owner) 1558 1537 break; 1559 1538 qgrp_node = qgrp_node->sibling; 1539 + if (qgrp_node) 1540 + continue; 1541 + 1542 + next_vsi_node = ice_sched_find_next_vsi_node(vsi_node); 1543 + if (!next_vsi_node) 1544 + break; 1545 + 1546 + vsi_node = next_vsi_node; 1547 + qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); 1560 1548 } 1561 1549 1562 1550 /* Select the best queue group */ ··· 1638 1604 /** 1639 1605 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes 1640 1606 * @hw: pointer to the HW struct 1641 - * @num_qs: number of queues 1607 + * @num_new_qs: number of new queues that will be added to the tree 1642 1608 * @num_nodes: num nodes array 1643 1609 * 1644 1610 * This function calculates the number of VSI child nodes based on the 1645 1611 * number of queues. 1646 1612 */ 1647 1613 static void 1648 - ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) 1614 + ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes) 1649 1615 { 1650 - u16 num = num_qs; 1616 + u16 num = num_new_qs; 1651 1617 u8 i, qgl, vsil; 1652 1618 1653 1619 qgl = ice_sched_get_qgrp_layer(hw); ··· 1813 1779 if (!parent) 1814 1780 return -EIO; 1815 1781 1816 - if (i == vsil) 1782 + /* Do not modify the VSI handle for already existing VSI nodes, 1783 + * (if no new VSI node was added to the tree). 1784 + * Assign the VSI handle only to newly added VSI nodes. 1785 + */ 1786 + if (i == vsil && num_added) 1817 1787 parent->vsi_handle = vsi_handle; 1818 1788 } 1819 1789 ··· 1848 1810 /* add VSI supported nodes to TC subtree */ 1849 1811 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, 1850 1812 num_nodes); 1813 + } 1814 + 1815 + /** 1816 + * ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count 1817 + * @hw: pointer to the HW struct 1818 + * @vsi_node: pointer to the leftmost VSI node that needs to be extended 1819 + * @new_numqs: new number of queues that has to be handled by the VSI 1820 + * @new_num_nodes: pointer to nodes count table to modify the VSI layer entry 1821 + * 1822 + * This function recalculates the number of supported nodes that need to 1823 + * be added after adding more Tx queues for a given VSI. 1824 + * The number of new VSI support nodes that shall be added will be saved 1825 + * to the @new_num_nodes table for the VSI layer. 1826 + */ 1827 + static void 1828 + ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw, 1829 + struct ice_sched_node *vsi_node, 1830 + unsigned int new_numqs, u16 *new_num_nodes) 1831 + { 1832 + u32 vsi_nodes_cnt = 1; 1833 + u32 max_queue_cnt = 1; 1834 + u32 qgl, vsil; 1835 + 1836 + qgl = ice_sched_get_qgrp_layer(hw); 1837 + vsil = ice_sched_get_vsi_layer(hw); 1838 + 1839 + for (u32 i = vsil; i <= qgl; i++) 1840 + max_queue_cnt *= hw->max_children[i]; 1841 + 1842 + while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL) 1843 + vsi_nodes_cnt++; 1844 + 1845 + if (new_numqs > (max_queue_cnt * vsi_nodes_cnt)) 1846 + new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) - 1847 + vsi_nodes_cnt; 1851 1848 } 1852 1849 1853 1850 /** ··· 1936 1863 return status; 1937 1864 } 1938 1865 1939 - if (new_numqs) 1940 - ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); 1941 - /* Keep the max number of queue configuration all the time. Update the 1942 - * tree only if number of queues > previous number of queues. This may 1866 + ice_sched_recalc_vsi_support_nodes(hw, vsi_node, 1867 + new_numqs, new_num_nodes); 1868 + ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs, 1869 + new_num_nodes); 1870 + 1871 + /* Never decrease the number of queues in the tree. Update the tree 1872 + * only if number of queues > previous number of queues. This may 1943 1873 * leave some extra nodes in the tree if number of queues < previous 1944 1874 * number but that wouldn't harm anything. Removing those extra nodes 1945 1875 * may complicate the code if those nodes are part of SRL or 1946 1876 * individually rate limited. 1877 + * Also, add the required VSI support nodes if the existing ones cannot 1878 + * handle the requested new number of queues. 1947 1879 */ 1880 + status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, 1881 + new_num_nodes); 1882 + if (status) 1883 + return status; 1884 + 1948 1885 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, 1949 1886 new_num_nodes, owner); 1950 1887 if (status) ··· 2096 2013 } 2097 2014 2098 2015 /** 2016 + * ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI 2017 + * @pi: port information structure 2018 + * @vsi_node: pointer to the leftmost node of the VSI to be removed 2019 + * @owner: LAN or RDMA 2020 + * @tc: TC number 2021 + * 2022 + * Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC. 2023 + * 2024 + * This function removes all the VSI support nodes associated with a given VSI 2025 + * and its LAN or RDMA children nodes from the scheduler tree. 2026 + */ 2027 + static int 2028 + ice_sched_rm_vsi_subtree(struct ice_port_info *pi, 2029 + struct ice_sched_node *vsi_node, u8 owner, u8 tc) 2030 + { 2031 + u16 vsi_handle = vsi_node->vsi_handle; 2032 + bool all_vsi_nodes_removed = true; 2033 + int j = 0; 2034 + 2035 + while (vsi_node) { 2036 + struct ice_sched_node *next_vsi_node; 2037 + 2038 + if (ice_sched_is_leaf_node_present(vsi_node)) { 2039 + ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc); 2040 + return -EBUSY; 2041 + } 2042 + while (j < vsi_node->num_children) { 2043 + if (vsi_node->children[j]->owner == owner) 2044 + ice_free_sched_node(pi, vsi_node->children[j]); 2045 + else 2046 + j++; 2047 + } 2048 + 2049 + next_vsi_node = ice_sched_find_next_vsi_node(vsi_node); 2050 + 2051 + /* remove the VSI if it has no children */ 2052 + if (!vsi_node->num_children) 2053 + ice_free_sched_node(pi, vsi_node); 2054 + else 2055 + all_vsi_nodes_removed = false; 2056 + 2057 + vsi_node = next_vsi_node; 2058 + } 2059 + 2060 + /* clean up aggregator related VSI info if any */ 2061 + if (all_vsi_nodes_removed) 2062 + ice_sched_rm_agg_vsi_info(pi, vsi_handle); 2063 + 2064 + return 0; 2065 + } 2066 + 2067 + /** 2099 2068 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes 2100 2069 * @pi: port information structure 2101 2070 * @vsi_handle: software VSI handle ··· 2173 2038 2174 2039 ice_for_each_traffic_class(i) { 2175 2040 struct ice_sched_node *vsi_node, *tc_node; 2176 - u8 j = 0; 2177 2041 2178 2042 tc_node = ice_sched_get_tc_node(pi, i); 2179 2043 if (!tc_node) ··· 2182 2048 if (!vsi_node) 2183 2049 continue; 2184 2050 2185 - if (ice_sched_is_leaf_node_present(vsi_node)) { 2186 - ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); 2187 - status = -EBUSY; 2051 + status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i); 2052 + if (status) 2188 2053 goto exit_sched_rm_vsi_cfg; 2189 - } 2190 - while (j < vsi_node->num_children) { 2191 - if (vsi_node->children[j]->owner == owner) { 2192 - ice_free_sched_node(pi, vsi_node->children[j]); 2193 2054 2194 - /* reset the counter again since the num 2195 - * children will be updated after node removal 2196 - */ 2197 - j = 0; 2198 - } else { 2199 - j++; 2200 - } 2201 - } 2202 - /* remove the VSI if it has no children */ 2203 - if (!vsi_node->num_children) { 2204 - ice_free_sched_node(pi, vsi_node); 2205 - vsi_ctx->sched.vsi_node[i] = NULL; 2055 + vsi_ctx->sched.vsi_node[i] = NULL; 2206 2056 2207 - /* clean up aggregator related VSI info if any */ 2208 - ice_sched_rm_agg_vsi_info(pi, vsi_handle); 2209 - } 2210 2057 if (owner == ICE_SCHED_NODE_OWNER_LAN) 2211 2058 vsi_ctx->sched.max_lanq[i] = 0; 2212 2059 else
+13 -5
drivers/net/ethernet/intel/idpf/idpf_lib.c
··· 1801 1801 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) 1802 1802 return; 1803 1803 1804 - if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || 1805 - test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { 1806 - set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1807 - idpf_init_hard_reset(adapter); 1808 - } 1804 + if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags)) 1805 + goto func_reset; 1806 + 1807 + if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) 1808 + goto drv_load; 1809 + 1810 + return; 1811 + 1812 + func_reset: 1813 + idpf_vc_xn_shutdown(adapter->vcxn_mngr); 1814 + drv_load: 1815 + set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); 1816 + idpf_init_hard_reset(adapter); 1809 1817 } 1810 1818 1811 1819 /**
+5 -4
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
··· 362 362 { 363 363 struct idpf_tx_offload_params offload = { }; 364 364 struct idpf_tx_buf *first; 365 + int csum, tso, needed; 365 366 unsigned int count; 366 367 __be16 protocol; 367 - int csum, tso; 368 368 369 369 count = idpf_tx_desc_count_required(tx_q, skb); 370 370 if (unlikely(!count)) 371 371 return idpf_tx_drop_skb(tx_q, skb); 372 372 373 - if (idpf_tx_maybe_stop_common(tx_q, 374 - count + IDPF_TX_DESCS_PER_CACHE_LINE + 375 - IDPF_TX_DESCS_FOR_CTX)) { 373 + needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX; 374 + if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, 375 + IDPF_DESC_UNUSED(tx_q), 376 + needed, needed)) { 376 377 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); 377 378 378 379 u64_stats_update_begin(&tx_q->stats_sync);
+17 -28
drivers/net/ethernet/intel/idpf/idpf_txrx.c
··· 2184 2184 desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); 2185 2185 } 2186 2186 2187 + /* Global conditions to tell whether the txq (and related resources) 2188 + * has room to allow the use of "size" descriptors. 2189 + */ 2190 + static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size) 2191 + { 2192 + if (IDPF_DESC_UNUSED(tx_q) < size || 2193 + IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > 2194 + IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) || 2195 + IDPF_TX_BUF_RSV_LOW(tx_q)) 2196 + return 0; 2197 + return 1; 2198 + } 2199 + 2187 2200 /** 2188 2201 * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions 2189 2202 * @tx_q: the queue to be checked ··· 2207 2194 static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, 2208 2195 unsigned int descs_needed) 2209 2196 { 2210 - if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) 2211 - goto out; 2197 + if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, 2198 + idpf_txq_has_room(tx_q, descs_needed), 2199 + 1, 1)) 2200 + return 0; 2212 2201 2213 - /* If there are too many outstanding completions expected on the 2214 - * completion queue, stop the TX queue to give the device some time to 2215 - * catch up 2216 - */ 2217 - if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > 2218 - IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq))) 2219 - goto splitq_stop; 2220 - 2221 - /* Also check for available book keeping buffers; if we are low, stop 2222 - * the queue to wait for more completions 2223 - */ 2224 - if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q))) 2225 - goto splitq_stop; 2226 - 2227 - return 0; 2228 - 2229 - splitq_stop: 2230 - netif_stop_subqueue(tx_q->netdev, tx_q->idx); 2231 - 2232 - out: 2233 2202 u64_stats_update_begin(&tx_q->stats_sync); 2234 2203 u64_stats_inc(&tx_q->q_stats.q_busy); 2235 2204 u64_stats_update_end(&tx_q->stats_sync); ··· 2236 2241 2237 2242 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); 2238 2243 tx_q->next_to_use = val; 2239 - 2240 - if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) { 2241 - u64_stats_update_begin(&tx_q->stats_sync); 2242 - u64_stats_inc(&tx_q->q_stats.q_busy); 2243 - u64_stats_update_end(&tx_q->stats_sync); 2244 - } 2245 2244 2246 2245 /* Force memory writes to complete before letting h/w 2247 2246 * know there are new descriptors to fetch. (Only
-8
drivers/net/ethernet/intel/idpf/idpf_txrx.h
··· 1049 1049 u16 cleaned_count); 1050 1050 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); 1051 1051 1052 - static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, 1053 - u32 needed) 1054 - { 1055 - return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, 1056 - IDPF_DESC_UNUSED(tx_q), 1057 - needed, needed); 1058 - } 1059 - 1060 1052 #endif /* !_IDPF_TXRX_H_ */
+1 -1
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
··· 347 347 * All waiting threads will be woken-up and their transaction aborted. Further 348 348 * operations on that object will fail. 349 349 */ 350 - static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) 350 + void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) 351 351 { 352 352 int i; 353 353
+1
drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
··· 150 150 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); 151 151 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); 152 152 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); 153 + void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr); 153 154 154 155 #endif /* _IDPF_VIRTCHNL_H_ */