Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

page_pool: Move pp_magic check into helper functions

Since we are about to stash some more information into the pp_magic
field, let's move the magic signature checks into a pair of helper
functions so it can be changed in one place.

Reviewed-by: Mina Almasry <almasrymina@google.com>
Tested-by: Yonglong Liu <liuyonglong@huawei.com>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://patch.msgid.link/20250409-page-pool-track-dma-v9-1-6a9ef2e0cba8@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Toke Høiland-Jørgensen and committed by
Jakub Kicinski
cd3c9316 452446f8

+33 -24
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
··· 707 707 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 708 708 page = xdpi.page.page; 709 709 710 - /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) 711 - * as we know this is a page_pool page. 710 + /* No need to check page_pool_page_is_pp() as we 711 + * know this is a page_pool page. 712 712 */ 713 713 page_pool_recycle_direct(page->pp, page); 714 714 } while (++n < num);
+20
include/linux/mm.h
··· 4248 4248 #define VM_SEALED_SYSMAP VM_NONE 4249 4249 #endif 4250 4250 4251 + /* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is 4252 + * OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for 4253 + * the head page of compound page and bit 1 for pfmemalloc page. 4254 + * page_is_pfmemalloc() is checked in __page_pool_put_page() to avoid recycling 4255 + * the pfmemalloc page. 4256 + */ 4257 + #define PP_MAGIC_MASK ~0x3UL 4258 + 4259 + #ifdef CONFIG_PAGE_POOL 4260 + static inline bool page_pool_page_is_pp(struct page *page) 4261 + { 4262 + return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE; 4263 + } 4264 + #else 4265 + static inline bool page_pool_page_is_pp(struct page *page) 4266 + { 4267 + return false; 4268 + } 4269 + #endif 4270 + 4251 4271 #endif /* _LINUX_MM_H */
+2 -6
mm/page_alloc.c
··· 897 897 #ifdef CONFIG_MEMCG 898 898 page->memcg_data | 899 899 #endif 900 - #ifdef CONFIG_PAGE_POOL 901 - ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) | 902 - #endif 900 + page_pool_page_is_pp(page) | 903 901 (page->flags & check_flags))) 904 902 return false; 905 903 ··· 924 926 if (unlikely(page->memcg_data)) 925 927 bad_reason = "page still charged to cgroup"; 926 928 #endif 927 - #ifdef CONFIG_PAGE_POOL 928 - if (unlikely((page->pp_magic & ~0x3UL) == PP_SIGNATURE)) 929 + if (unlikely(page_pool_page_is_pp(page))) 929 930 bad_reason = "page_pool leak"; 930 - #endif 931 931 return bad_reason; 932 932 } 933 933
+5
net/core/netmem_priv.h
··· 18 18 __netmem_clear_lsb(netmem)->pp_magic = 0; 19 19 } 20 20 21 + static inline bool netmem_is_pp(netmem_ref netmem) 22 + { 23 + return (netmem_get_pp_magic(netmem) & PP_MAGIC_MASK) == PP_SIGNATURE; 24 + } 25 + 21 26 static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool) 22 27 { 23 28 __netmem_clear_lsb(netmem)->pp = pool;
+2 -14
net/core/skbuff.c
··· 893 893 skb_get(list); 894 894 } 895 895 896 - static bool is_pp_netmem(netmem_ref netmem) 897 - { 898 - return (netmem_get_pp_magic(netmem) & ~0x3UL) == PP_SIGNATURE; 899 - } 900 - 901 896 int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, 902 897 unsigned int headroom) 903 898 { ··· 990 995 { 991 996 netmem = netmem_compound_head(netmem); 992 997 993 - /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation 994 - * in order to preserve any existing bits, such as bit 0 for the 995 - * head page of compound page and bit 1 for pfmemalloc page, so 996 - * mask those bits for freeing side when doing below checking, 997 - * and page_is_pfmemalloc() is checked in __page_pool_put_page() 998 - * to avoid recycling the pfmemalloc page. 999 - */ 1000 - if (unlikely(!is_pp_netmem(netmem))) 998 + if (unlikely(!netmem_is_pp(netmem))) 1001 999 return false; 1002 1000 1003 1001 page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false); ··· 1030 1042 1031 1043 for (i = 0; i < shinfo->nr_frags; i++) { 1032 1044 head_netmem = netmem_compound_head(shinfo->frags[i].netmem); 1033 - if (likely(is_pp_netmem(head_netmem))) 1045 + if (likely(netmem_is_pp(head_netmem))) 1034 1046 page_pool_ref_netmem(head_netmem); 1035 1047 else 1036 1048 page_ref_inc(netmem_to_page(head_netmem));
+2 -2
net/core/xdp.c
··· 438 438 netmem = netmem_compound_head(netmem); 439 439 if (napi_direct && xdp_return_frame_no_direct()) 440 440 napi_direct = false; 441 - /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) 442 - * as mem->type knows this a page_pool page 441 + /* No need to check netmem_is_pp() as mem->type knows this a 442 + * page_pool page 443 443 */ 444 444 page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, 445 445 napi_direct);