Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: page_pool: don't try to stash the napi id

Page ppol tried to cache the NAPI ID in page pool info to avoid
having a dependency on the life cycle of the NAPI instance.
Since commit under Fixes the NAPI ID is not populated until
napi_enable() and there's a good chance that page pool is
created before NAPI gets enabled.

Protect the NAPI pointer with the existing page pool mutex,
the reading path already holds it. napi_id itself we need
to READ_ONCE(), it's protected by netdev_lock() which are
not holding in page pool.

Before this patch napi IDs were missing for mlx5:

# ./cli.py --spec netlink/specs/netdev.yaml --dump page-pool-get

[{'id': 144, 'ifindex': 2, 'inflight': 3072, 'inflight-mem': 12582912},
{'id': 143, 'ifindex': 2, 'inflight': 5568, 'inflight-mem': 22806528},
{'id': 142, 'ifindex': 2, 'inflight': 5120, 'inflight-mem': 20971520},
{'id': 141, 'ifindex': 2, 'inflight': 4992, 'inflight-mem': 20447232},
...

After:

[{'id': 144, 'ifindex': 2, 'inflight': 3072, 'inflight-mem': 12582912,
'napi-id': 565},
{'id': 143, 'ifindex': 2, 'inflight': 4224, 'inflight-mem': 17301504,
'napi-id': 525},
{'id': 142, 'ifindex': 2, 'inflight': 4288, 'inflight-mem': 17563648,
'napi-id': 524},
...

Fixes: 86e25f40aa1e ("net: napi: Add napi_config")
Reviewed-by: Mina Almasry <almasrymina@google.com>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://patch.msgid.link/20250123231620.1086401-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+14 -8
-1
include/net/page_pool/types.h
··· 237 237 struct { 238 238 struct hlist_node list; 239 239 u64 detach_time; 240 - u32 napi_id; 241 240 u32 id; 242 241 } user; 243 242 };
+1 -1
net/core/dev.c
··· 6708 6708 static void __napi_hash_add_with_id(struct napi_struct *napi, 6709 6709 unsigned int napi_id) 6710 6710 { 6711 - napi->napi_id = napi_id; 6711 + WRITE_ONCE(napi->napi_id, napi_id); 6712 6712 hlist_add_head_rcu(&napi->napi_hash_node, 6713 6713 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 6714 6714 }
+2
net/core/page_pool.c
··· 1147 1147 WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state)); 1148 1148 WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1); 1149 1149 1150 + mutex_lock(&page_pools_lock); 1150 1151 WRITE_ONCE(pool->p.napi, NULL); 1152 + mutex_unlock(&page_pools_lock); 1151 1153 } 1152 1154 EXPORT_SYMBOL(page_pool_disable_direct_recycling); 1153 1155
+2
net/core/page_pool_priv.h
··· 7 7 8 8 #include "netmem_priv.h" 9 9 10 + extern struct mutex page_pools_lock; 11 + 10 12 s32 page_pool_inflight(const struct page_pool *pool, bool strict); 11 13 12 14 int page_pool_list(struct page_pool *pool);
+9 -6
net/core/page_pool_user.c
··· 3 3 #include <linux/mutex.h> 4 4 #include <linux/netdevice.h> 5 5 #include <linux/xarray.h> 6 + #include <net/busy_poll.h> 6 7 #include <net/net_debug.h> 7 8 #include <net/netdev_rx_queue.h> 8 9 #include <net/page_pool/helpers.h> ··· 15 14 #include "netdev-genl-gen.h" 16 15 17 16 static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1); 18 - /* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user. 17 + /* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev, 18 + * pool->user. 19 19 * Ordering: inside rtnl_lock 20 20 */ 21 - static DEFINE_MUTEX(page_pools_lock); 21 + DEFINE_MUTEX(page_pools_lock); 22 22 23 23 /* Page pools are only reachable from user space (via netlink) if they are 24 24 * linked to a netdev at creation time. Following page pool "visibility" ··· 218 216 { 219 217 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; 220 218 size_t inflight, refsz; 219 + unsigned int napi_id; 221 220 void *hdr; 222 221 223 222 hdr = genlmsg_iput(rsp, info); ··· 232 229 nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX, 233 230 pool->slow.netdev->ifindex)) 234 231 goto err_cancel; 235 - if (pool->user.napi_id && 236 - nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id)) 232 + 233 + napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0; 234 + if (napi_id >= MIN_NAPI_ID && 235 + nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, napi_id)) 237 236 goto err_cancel; 238 237 239 238 inflight = page_pool_inflight(pool, false); ··· 324 319 if (pool->slow.netdev) { 325 320 hlist_add_head(&pool->user.list, 326 321 &pool->slow.netdev->page_pools); 327 - pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0; 328 - 329 322 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF); 330 323 } 331 324