Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-consolidate-page_is_pfmemalloc-usage'

Alexander Lobakin says:

====================
net: consolidate page_is_pfmemalloc() usage

page_is_pfmemalloc() is used mostly by networking drivers to test
if a page can be considered for reusing/recycling.
It doesn't write anything to the struct page itself, so its sole
argument can be constified, as well as the first argument of
skb_propagate_pfmemalloc().
In Page Pool core code, it can be simply inlined instead.
Most of the callers from NIC drivers were just doppelgangers of
the same condition tests. Derive them into a new common function
do deduplicate the code.

Resend of v3 [2]:
- it missed Patchwork and Netdev archives, probably due to server-side
issues.

Since v2 [1]:
- use more intuitive name for the new inline function since there's
nothing "reserved" in remote pages (Jakub Kicinski, John Hubbard);
- fold likely() inside the helper itself to make driver code a bit
fancier (Jakub Kicinski);
- split function introduction and using into two separate commits;
- collect some more tags (Jesse Brandeburg, David Rientjes).

Since v1 [0]:
- new: reduce code duplication by introducing a new common function
to test if a page can be reused/recycled (David Rientjes);
- collect autographs for Page Pool bits (Jesper Dangaard Brouer,
Ilias Apalodimas).

[0] https://lore.kernel.org/netdev/20210125164612.243838-1-alobakin@pm.me
[1] https://lore.kernel.org/netdev/20210127201031.98544-1-alobakin@pm.me
[2] https://lore.kernel.org/lkml/20210131120844.7529-1-alobakin@pm.me
====================

Link: https://lore.kernel.org/r/20210202133030.5760-1-alobakin@pm.me
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+46 -106
+6 -11
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 2800 2800 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 2801 2801 } 2802 2802 2803 - static bool hns3_page_is_reusable(struct page *page) 2804 - { 2805 - return page_to_nid(page) == numa_mem_id() && 2806 - !page_is_pfmemalloc(page); 2807 - } 2808 - 2809 2803 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) 2810 2804 { 2811 2805 return (page_count(cb->priv) - cb->pagecnt_bias) == 1; ··· 2817 2823 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2818 2824 size - pull_len, truesize); 2819 2825 2820 - /* Avoid re-using remote pages, or the stack is still using the page 2821 - * when page_offset rollback to zero, flag default unreuse 2826 + /* Avoid re-using remote and pfmemalloc pages, or the stack is still 2827 + * using the page when page_offset rollback to zero, flag default 2828 + * unreuse 2822 2829 */ 2823 - if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || 2830 + if (!dev_page_is_reusable(desc_cb->priv) || 2824 2831 (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) { 2825 2832 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); 2826 2833 return; ··· 3078 3083 if (length <= HNS3_RX_HEAD_SIZE) { 3079 3084 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 3080 3085 3081 - /* We can reuse buffer as-is, just make sure it is local */ 3082 - if (likely(hns3_page_is_reusable(desc_cb->priv))) 3086 + /* We can reuse buffer as-is, just make sure it is reusable */ 3087 + if (dev_page_is_reusable(desc_cb->priv)) 3083 3088 desc_cb->reuse_flag = 1; 3084 3089 else /* This page cannot be reused so discard it */ 3085 3090 __page_frag_cache_drain(desc_cb->priv,
+4 -9
drivers/net/ethernet/intel/fm10k/fm10k_main.c
··· 194 194 DMA_FROM_DEVICE); 195 195 } 196 196 197 - static inline bool fm10k_page_is_reserved(struct page *page) 198 - { 199 - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 200 - } 201 - 202 197 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 203 198 struct page *page, 204 199 unsigned int __maybe_unused truesize) 205 200 { 206 - /* avoid re-using remote pages */ 207 - if (unlikely(fm10k_page_is_reserved(page))) 201 + /* avoid re-using remote and pfmemalloc pages */ 202 + if (!dev_page_is_reusable(page)) 208 203 return false; 209 204 210 205 #if (PAGE_SIZE < 8192) ··· 260 265 if (likely(size <= FM10K_RX_HDR_LEN)) { 261 266 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 262 267 263 - /* page is not reserved, we can reuse buffer as-is */ 264 - if (likely(!fm10k_page_is_reserved(page))) 268 + /* page is reusable, we can reuse buffer as-is */ 269 + if (dev_page_is_reusable(page)) 265 270 return true; 266 271 267 272 /* this page cannot be reused so discard it */
+1 -14
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 1844 1844 } 1845 1845 1846 1846 /** 1847 - * i40e_page_is_reusable - check if any reuse is possible 1848 - * @page: page struct to check 1849 - * 1850 - * A page is not reusable if it was allocated under low memory 1851 - * conditions, or it's not in the same NUMA node as this CPU. 1852 - */ 1853 - static inline bool i40e_page_is_reusable(struct page *page) 1854 - { 1855 - return (page_to_nid(page) == numa_mem_id()) && 1856 - !page_is_pfmemalloc(page); 1857 - } 1858 - 1859 - /** 1860 1847 * i40e_can_reuse_rx_page - Determine if this page can be reused by 1861 1848 * the adapter for another receive 1862 1849 * ··· 1878 1891 struct page *page = rx_buffer->page; 1879 1892 1880 1893 /* Is any reuse possible? */ 1881 - if (unlikely(!i40e_page_is_reusable(page))) 1894 + if (!dev_page_is_reusable(page)) 1882 1895 return false; 1883 1896 1884 1897 #if (PAGE_SIZE < 8192)
+1 -14
drivers/net/ethernet/intel/iavf/iavf_txrx.c
··· 1142 1142 } 1143 1143 1144 1144 /** 1145 - * iavf_page_is_reusable - check if any reuse is possible 1146 - * @page: page struct to check 1147 - * 1148 - * A page is not reusable if it was allocated under low memory 1149 - * conditions, or it's not in the same NUMA node as this CPU. 1150 - */ 1151 - static inline bool iavf_page_is_reusable(struct page *page) 1152 - { 1153 - return (page_to_nid(page) == numa_mem_id()) && 1154 - !page_is_pfmemalloc(page); 1155 - } 1156 - 1157 - /** 1158 1145 * iavf_can_reuse_rx_page - Determine if this page can be reused by 1159 1146 * the adapter for another receive 1160 1147 * ··· 1174 1187 struct page *page = rx_buffer->page; 1175 1188 1176 1189 /* Is any reuse possible? */ 1177 - if (unlikely(!iavf_page_is_reusable(page))) 1190 + if (!dev_page_is_reusable(page)) 1178 1191 return false; 1179 1192 1180 1193 #if (PAGE_SIZE < 8192)
+2 -11
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 729 729 } 730 730 731 731 /** 732 - * ice_page_is_reserved - check if reuse is possible 733 - * @page: page struct to check 734 - */ 735 - static bool ice_page_is_reserved(struct page *page) 736 - { 737 - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 738 - } 739 - 740 - /** 741 732 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 742 733 * @rx_buf: Rx buffer to adjust 743 734 * @size: Size of adjustment ··· 766 775 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 767 776 struct page *page = rx_buf->page; 768 777 769 - /* avoid re-using remote pages */ 770 - if (unlikely(ice_page_is_reserved(page))) 778 + /* avoid re-using remote and pfmemalloc pages */ 779 + if (!dev_page_is_reusable(page)) 771 780 return false; 772 781 773 782 #if (PAGE_SIZE < 8192)
+2 -7
drivers/net/ethernet/intel/igb/igb_main.c
··· 8215 8215 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 8216 8216 } 8217 8217 8218 - static inline bool igb_page_is_reserved(struct page *page) 8219 - { 8220 - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 8221 - } 8222 - 8223 8218 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) 8224 8219 { 8225 8220 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 8226 8221 struct page *page = rx_buffer->page; 8227 8222 8228 - /* avoid re-using remote pages */ 8229 - if (unlikely(igb_page_is_reserved(page))) 8223 + /* avoid re-using remote and pfmemalloc pages */ 8224 + if (!dev_page_is_reusable(page)) 8230 8225 return false; 8231 8226 8232 8227 #if (PAGE_SIZE < 8192)
+2 -7
drivers/net/ethernet/intel/igc/igc_main.c
··· 1648 1648 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1649 1649 } 1650 1650 1651 - static inline bool igc_page_is_reserved(struct page *page) 1652 - { 1653 - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 1654 - } 1655 - 1656 1651 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer) 1657 1652 { 1658 1653 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1659 1654 struct page *page = rx_buffer->page; 1660 1655 1661 - /* avoid re-using remote pages */ 1662 - if (unlikely(igc_page_is_reserved(page))) 1656 + /* avoid re-using remote and pfmemalloc pages */ 1657 + if (!dev_page_is_reusable(page)) 1663 1658 return false; 1664 1659 1665 1660 #if (PAGE_SIZE < 8192)
+2 -7
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 1940 1940 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1941 1941 } 1942 1942 1943 - static inline bool ixgbe_page_is_reserved(struct page *page) 1944 - { 1945 - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 1946 - } 1947 - 1948 1943 static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer, 1949 1944 int rx_buffer_pgcnt) 1950 1945 { 1951 1946 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1952 1947 struct page *page = rx_buffer->page; 1953 1948 1954 - /* avoid re-using remote pages */ 1955 - if (unlikely(ixgbe_page_is_reserved(page))) 1949 + /* avoid re-using remote and pfmemalloc pages */ 1950 + if (!dev_page_is_reusable(page)) 1956 1951 return false; 1957 1952 1958 1953 #if (PAGE_SIZE < 8192)
+2 -7
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 781 781 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 782 782 } 783 783 784 - static inline bool ixgbevf_page_is_reserved(struct page *page) 785 - { 786 - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 787 - } 788 - 789 784 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer) 790 785 { 791 786 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 792 787 struct page *page = rx_buffer->page; 793 788 794 - /* avoid re-using remote pages */ 795 - if (unlikely(ixgbevf_page_is_reserved(page))) 789 + /* avoid re-using remote and pfmemalloc pages */ 790 + if (!dev_page_is_reusable(page)) 796 791 return false; 797 792 798 793 #if (PAGE_SIZE < 8192)
+1 -6
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 213 213 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; 214 214 } 215 215 216 - static inline bool mlx5e_page_is_reserved(struct page *page) 217 - { 218 - return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); 219 - } 220 - 221 216 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, 222 217 struct mlx5e_dma_info *dma_info) 223 218 { ··· 225 230 return false; 226 231 } 227 232 228 - if (unlikely(mlx5e_page_is_reserved(dma_info->page))) { 233 + if (!dev_page_is_reusable(dma_info->page)) { 229 234 stats->cache_waive++; 230 235 return false; 231 236 }
+1 -1
include/linux/mm.h
··· 1584 1584 * ALLOC_NO_WATERMARKS and the low watermark was not 1585 1585 * met implying that the system is under some pressure. 1586 1586 */ 1587 - static inline bool page_is_pfmemalloc(struct page *page) 1587 + static inline bool page_is_pfmemalloc(const struct page *page) 1588 1588 { 1589 1589 /* 1590 1590 * Page index cannot be this large so this must be
+18 -2
include/linux/skbuff.h
··· 2939 2939 } 2940 2940 2941 2941 /** 2942 + * dev_page_is_reusable - check whether a page can be reused for network Rx 2943 + * @page: the page to test 2944 + * 2945 + * A page shouldn't be considered for reusing/recycling if it was allocated 2946 + * under memory pressure or at a distant memory node. 2947 + * 2948 + * Returns false if this page should be returned to page allocator, true 2949 + * otherwise. 2950 + */ 2951 + static inline bool dev_page_is_reusable(const struct page *page) 2952 + { 2953 + return likely(page_to_nid(page) == numa_mem_id() && 2954 + !page_is_pfmemalloc(page)); 2955 + } 2956 + 2957 + /** 2942 2958 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page 2943 2959 * @page: The page that was allocated from skb_alloc_page 2944 2960 * @skb: The skb that may need pfmemalloc set 2945 2961 */ 2946 - static inline void skb_propagate_pfmemalloc(struct page *page, 2947 - struct sk_buff *skb) 2962 + static inline void skb_propagate_pfmemalloc(const struct page *page, 2963 + struct sk_buff *skb) 2948 2964 { 2949 2965 if (page_is_pfmemalloc(page)) 2950 2966 skb->pfmemalloc = true;
+4 -10
net/core/page_pool.c
··· 350 350 return true; 351 351 } 352 352 353 - /* page is NOT reusable when: 354 - * 1) allocated when system is under some pressure. (page_is_pfmemalloc) 355 - */ 356 - static bool pool_page_reusable(struct page_pool *pool, struct page *page) 357 - { 358 - return !page_is_pfmemalloc(page); 359 - } 360 - 361 353 /* If the page refcnt == 1, this will try to recycle the page. 362 354 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for 363 355 * the configured size min(dma_sync_size, pool->max_len). ··· 365 373 * regular page allocator APIs. 366 374 * 367 375 * refcnt == 1 means page_pool owns page, and can recycle it. 376 + * 377 + * page is NOT reusable when allocated when system is under 378 + * some pressure. (page_is_pfmemalloc) 368 379 */ 369 - if (likely(page_ref_count(page) == 1 && 370 - pool_page_reusable(pool, page))) { 380 + if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) { 371 381 /* Read barrier done in page_ref_count / READ_ONCE */ 372 382 373 383 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)