Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: add helper to pre-check if PP for an Rx queue will be unreadable

mlx5 pokes into the rxq state to check if the queue has a memory
provider, and therefore whether it may produce unreadable mem.
Add a helper for doing this in the page pool API. fbnic will want
a similar thing (tho, for a slightly different reason).

Reviewed-by: Mina Almasry <almasrymina@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Link: https://patch.msgid.link/20250901211214.1027927-11-kuba@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Jakub Kicinski and committed by
Paolo Abeni
3ceb0883 709da681

+24 -8
+1 -8
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 780 780 bitmap_free(rq->mpwqe.shampo->bitmap); 781 781 } 782 782 783 - static bool mlx5_rq_needs_separate_hd_pool(struct mlx5e_rq *rq) 784 - { 785 - struct netdev_rx_queue *rxq = __netif_get_rx_queue(rq->netdev, rq->ix); 786 - 787 - return !!rxq->mp_params.mp_ops; 788 - } 789 - 790 783 static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, 791 784 struct mlx5e_params *params, 792 785 struct mlx5e_rq_param *rqp, ··· 818 825 hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) / 819 826 MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; 820 827 821 - if (mlx5_rq_needs_separate_hd_pool(rq)) { 828 + if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) { 822 829 /* Separate page pool for shampo headers */ 823 830 struct page_pool_params pp_params = { }; 824 831
+2
include/net/netdev_queues.h
··· 151 151 int idx); 152 152 }; 153 153 154 + bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx); 155 + 154 156 /** 155 157 * DOC: Lockless queue stopping / waking helpers. 156 158 *
+12
include/net/page_pool/helpers.h
··· 505 505 page_pool_update_nid(pool, new_nid); 506 506 } 507 507 508 + /** 509 + * page_pool_is_unreadable() - will allocated buffers be unreadable for the CPU 510 + * @pool: queried page pool 511 + * 512 + * Check if page pool will return buffers which are unreadable to the CPU / 513 + * kernel. This will only be the case if user space bound a memory provider (mp) 514 + * which returns unreadable memory to the queue served by the page pool. 515 + * If %PP_FLAG_ALLOW_UNREADABLE_NETMEM was set but there is no mp bound 516 + * this helper will return false. See also netif_rxq_has_unreadable_mp(). 517 + * 518 + * Return: true if memory allocated by the page pool may be unreadable 519 + */ 508 520 static inline bool page_pool_is_unreadable(struct page_pool *pool) 509 521 { 510 522 return !!pool->mp_ops;
+9
net/core/netdev_rx_queue.c
··· 9 9 10 10 #include "page_pool_priv.h" 11 11 12 + /* See also page_pool_is_unreadable() */ 13 + bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx) 14 + { 15 + struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx); 16 + 17 + return !!rxq->mp_params.mp_ops; 18 + } 19 + EXPORT_SYMBOL(netif_rxq_has_unreadable_mp); 20 + 12 21 int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) 13 22 { 14 23 struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);