Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: move mp dev config validation to __net_mp_open_rxq()

devmem code performs a number of safety checks to avoid having
to reimplement all of them in the drivers. Move those to
__net_mp_open_rxq() and reuse that function for binding to make
sure that io_uring ZC also benefits from them.

While at it rename the queue ID variable to rxq_idx in
__net_mp_open_rxq(), we touch most of the relevant lines.

The XArray insertion is reordered after the netdev_rx_queue_restart()
call, otherwise we'd need to duplicate the queue index check
or risk inserting an invalid pointer. The XArray allocation
failures should be extremely rare.

Reviewed-by: Mina Almasry <almasrymina@google.com>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Fixes: 6e18ed929d3b ("net: add helpers for setting a memory provider on an rx queue")
Link: https://patch.msgid.link/20250403013405.2827250-2-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+55 -58
+6
include/net/page_pool/memory_provider.h
··· 6 6 #include <net/page_pool/types.h> 7 7 8 8 struct netdev_rx_queue; 9 + struct netlink_ext_ack; 9 10 struct sk_buff; 10 11 11 12 struct memory_provider_ops { ··· 25 24 26 25 int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, 27 26 struct pp_memory_provider_params *p); 27 + int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx, 28 + const struct pp_memory_provider_params *p, 29 + struct netlink_ext_ack *extack); 28 30 void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, 29 31 struct pp_memory_provider_params *old_p); 32 + void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx, 33 + const struct pp_memory_provider_params *old_p); 30 34 31 35 /** 32 36 * net_mp_netmem_place_in_cache() - give a netmem to a page pool
+11 -41
net/core/devmem.c
··· 8 8 */ 9 9 10 10 #include <linux/dma-buf.h> 11 - #include <linux/ethtool_netlink.h> 12 11 #include <linux/genalloc.h> 13 12 #include <linux/mm.h> 14 13 #include <linux/netdevice.h> ··· 142 143 struct net_devmem_dmabuf_binding *binding, 143 144 struct netlink_ext_ack *extack) 144 145 { 146 + struct pp_memory_provider_params mp_params = { 147 + .mp_priv = binding, 148 + .mp_ops = &dmabuf_devmem_ops, 149 + }; 145 150 struct netdev_rx_queue *rxq; 146 151 u32 xa_idx; 147 152 int err; 148 153 149 - if (rxq_idx >= dev->real_num_rx_queues) { 150 - NL_SET_ERR_MSG(extack, "rx queue index out of range"); 151 - return -ERANGE; 152 - } 153 - 154 - if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) { 155 - NL_SET_ERR_MSG(extack, "tcp-data-split is disabled"); 156 - return -EINVAL; 157 - } 158 - 159 - if (dev->cfg->hds_thresh) { 160 - NL_SET_ERR_MSG(extack, "hds-thresh is not zero"); 161 - return -EINVAL; 162 - } 163 - 164 - rxq = __netif_get_rx_queue(dev, rxq_idx); 165 - if (rxq->mp_params.mp_ops) { 166 - NL_SET_ERR_MSG(extack, "designated queue already memory provider bound"); 167 - return -EEXIST; 168 - } 169 - 170 - #ifdef CONFIG_XDP_SOCKETS 171 - if (rxq->pool) { 172 - NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP"); 173 - return -EBUSY; 174 - } 175 - #endif 176 - 177 - err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, 178 - GFP_KERNEL); 154 + err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack); 179 155 if (err) 180 156 return err; 181 157 182 - rxq->mp_params.mp_priv = binding; 183 - rxq->mp_params.mp_ops = &dmabuf_devmem_ops; 184 - 185 - err = netdev_rx_queue_restart(dev, rxq_idx); 158 + rxq = __netif_get_rx_queue(dev, rxq_idx); 159 + err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, 160 + GFP_KERNEL); 186 161 if (err) 187 - goto err_xa_erase; 162 + goto err_close_rxq; 188 163 189 164 return 0; 190 165 191 - err_xa_erase: 192 - rxq->mp_params.mp_priv = NULL; 193 - rxq->mp_params.mp_ops = NULL; 194 - xa_erase(&binding->bound_rxqs, xa_idx); 195 - 166 + err_close_rxq: 167 + __net_mp_close_rxq(dev, rxq_idx, &mp_params); 196 168 return err; 197 169 } 198 170
-6
net/core/netdev-genl.c
··· 874 874 goto err_unlock; 875 875 } 876 876 877 - if (dev_xdp_prog_count(netdev)) { 878 - NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached"); 879 - err = -EEXIST; 880 - goto err_unlock; 881 - } 882 - 883 877 binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack); 884 878 if (IS_ERR(binding)) { 885 879 err = PTR_ERR(binding);
+38 -11
net/core/netdev_rx_queue.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 3 + #include <linux/ethtool_netlink.h> 3 4 #include <linux/netdevice.h> 4 5 #include <net/netdev_lock.h> 5 6 #include <net/netdev_queues.h> ··· 87 86 } 88 87 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL"); 89 88 90 - static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, 91 - struct pp_memory_provider_params *p) 89 + int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx, 90 + const struct pp_memory_provider_params *p, 91 + struct netlink_ext_ack *extack) 92 92 { 93 93 struct netdev_rx_queue *rxq; 94 94 int ret; ··· 97 95 if (!netdev_need_ops_lock(dev)) 98 96 return -EOPNOTSUPP; 99 97 100 - if (ifq_idx >= dev->real_num_rx_queues) 98 + if (rxq_idx >= dev->real_num_rx_queues) 101 99 return -EINVAL; 102 - ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues); 100 + rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues); 103 101 104 - rxq = __netif_get_rx_queue(dev, ifq_idx); 105 - if (rxq->mp_params.mp_ops) 102 + if (rxq_idx >= dev->real_num_rx_queues) { 103 + NL_SET_ERR_MSG(extack, "rx queue index out of range"); 104 + return -ERANGE; 105 + } 106 + if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) { 107 + NL_SET_ERR_MSG(extack, "tcp-data-split is disabled"); 108 + return -EINVAL; 109 + } 110 + if (dev->cfg->hds_thresh) { 111 + NL_SET_ERR_MSG(extack, "hds-thresh is not zero"); 112 + return -EINVAL; 113 + } 114 + if (dev_xdp_prog_count(dev)) { 115 + NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached"); 106 116 return -EEXIST; 117 + } 118 + 119 + rxq = __netif_get_rx_queue(dev, rxq_idx); 120 + if (rxq->mp_params.mp_ops) { 121 + NL_SET_ERR_MSG(extack, "designated queue already memory provider bound"); 122 + return -EEXIST; 123 + } 124 + #ifdef CONFIG_XDP_SOCKETS 125 + if (rxq->pool) { 126 + NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP"); 127 + return -EBUSY; 128 + } 129 + #endif 107 130 108 131 rxq->mp_params = *p; 109 - ret = netdev_rx_queue_restart(dev, ifq_idx); 132 + ret = netdev_rx_queue_restart(dev, rxq_idx); 110 133 if (ret) { 111 134 rxq->mp_params.mp_ops = NULL; 112 135 rxq->mp_params.mp_priv = NULL; ··· 139 112 return ret; 140 113 } 141 114 142 - int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, 115 + int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx, 143 116 struct pp_memory_provider_params *p) 144 117 { 145 118 int ret; 146 119 147 120 netdev_lock(dev); 148 - ret = __net_mp_open_rxq(dev, ifq_idx, p); 121 + ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL); 149 122 netdev_unlock(dev); 150 123 return ret; 151 124 } 152 125 153 - static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, 154 - struct pp_memory_provider_params *old_p) 126 + void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx, 127 + const struct pp_memory_provider_params *old_p) 155 128 { 156 129 struct netdev_rx_queue *rxq; 157 130