mm/page_alloc: ensure try_alloc_pages() plays well with unaccepted memory

try_alloc_pages() will not attempt to allocate memory if the system has
*any* unaccepted memory. Memory is accepted as needed and can remain in
the system indefinitely, causing the interface to always fail.

Rather than immediately giving up, attempt to use already accepted memory
on free lists.

Pass 'alloc_flags' to cond_accept_memory() and do not accept new memory
for ALLOC_TRYLOCK requests.

Found via code inspection - only BPF uses this at present and the
runtime effects are unclear.

Link: https://lkml.kernel.org/r/20250506112509.905147-2-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation")
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by Kirill A. Shutemov and committed by Andrew Morton 23fa022a d55582d6

+15 -13
+15 -13
mm/page_alloc.c
··· 290 #endif 291 292 static bool page_contains_unaccepted(struct page *page, unsigned int order); 293 - static bool cond_accept_memory(struct zone *zone, unsigned int order); 294 static bool __free_unaccepted(struct page *page); 295 296 int page_group_by_mobility_disabled __read_mostly; ··· 3612 } 3613 } 3614 3615 - cond_accept_memory(zone, order); 3616 3617 /* 3618 * Detect whether the number of free pages is below high ··· 3639 gfp_mask)) { 3640 int ret; 3641 3642 - if (cond_accept_memory(zone, order)) 3643 goto try_this_zone; 3644 3645 /* ··· 3692 3693 return page; 3694 } else { 3695 - if (cond_accept_memory(zone, order)) 3696 goto try_this_zone; 3697 3698 /* Try again if zone has deferred pages */ ··· 4845 goto failed; 4846 } 4847 4848 - cond_accept_memory(zone, 0); 4849 retry_this_zone: 4850 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4851 if (zone_watermark_fast(zone, 0, mark, ··· 4854 break; 4855 } 4856 4857 - if (cond_accept_memory(zone, 0)) 4858 goto retry_this_zone; 4859 4860 /* Try again if zone has deferred pages */ ··· 7282 return static_branch_unlikely(&zones_with_unaccepted_pages); 7283 } 7284 7285 - static bool cond_accept_memory(struct zone *zone, unsigned int order) 7286 { 7287 long to_accept, wmark; 7288 bool ret = false; ··· 7292 return false; 7293 7294 if (list_empty(&zone->unaccepted_pages)) 7295 return false; 7296 7297 wmark = promo_wmark_pages(zone); ··· 7354 return false; 7355 } 7356 7357 - static bool cond_accept_memory(struct zone *zone, unsigned int order) 7358 { 7359 return false; 7360 } ··· 7426 if (!pcp_allowed_order(order)) 7427 return NULL; 7428 7429 - #ifdef CONFIG_UNACCEPTED_MEMORY 7430 - /* Bailout, since try_to_accept_memory_one() needs to take a lock */ 7431 - if (has_unaccepted_memory()) 7432 - return NULL; 7433 - #endif 7434 /* Bailout, since _deferred_grow_zone() needs to take a lock */ 7435 if (deferred_pages_enabled()) 7436 return NULL;
··· 290 #endif 291 292 static bool page_contains_unaccepted(struct page *page, unsigned int order); 293 + static bool cond_accept_memory(struct zone *zone, unsigned int order, 294 + int alloc_flags); 295 static bool __free_unaccepted(struct page *page); 296 297 int page_group_by_mobility_disabled __read_mostly; ··· 3611 } 3612 } 3613 3614 + cond_accept_memory(zone, order, alloc_flags); 3615 3616 /* 3617 * Detect whether the number of free pages is below high ··· 3638 gfp_mask)) { 3639 int ret; 3640 3641 + if (cond_accept_memory(zone, order, alloc_flags)) 3642 goto try_this_zone; 3643 3644 /* ··· 3691 3692 return page; 3693 } else { 3694 + if (cond_accept_memory(zone, order, alloc_flags)) 3695 goto try_this_zone; 3696 3697 /* Try again if zone has deferred pages */ ··· 4844 goto failed; 4845 } 4846 4847 + cond_accept_memory(zone, 0, alloc_flags); 4848 retry_this_zone: 4849 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4850 if (zone_watermark_fast(zone, 0, mark, ··· 4853 break; 4854 } 4855 4856 + if (cond_accept_memory(zone, 0, alloc_flags)) 4857 goto retry_this_zone; 4858 4859 /* Try again if zone has deferred pages */ ··· 7281 return static_branch_unlikely(&zones_with_unaccepted_pages); 7282 } 7283 7284 + static bool cond_accept_memory(struct zone *zone, unsigned int order, 7285 + int alloc_flags) 7286 { 7287 long to_accept, wmark; 7288 bool ret = false; ··· 7290 return false; 7291 7292 if (list_empty(&zone->unaccepted_pages)) 7293 + return false; 7294 + 7295 + /* Bailout, since try_to_accept_memory_one() needs to take a lock */ 7296 + if (alloc_flags & ALLOC_TRYLOCK) 7297 return false; 7298 7299 wmark = promo_wmark_pages(zone); ··· 7348 return false; 7349 } 7350 7351 + static bool cond_accept_memory(struct zone *zone, unsigned int order, 7352 + int alloc_flags) 7353 { 7354 return false; 7355 } ··· 7419 if (!pcp_allowed_order(order)) 7420 return NULL; 7421 7422 /* Bailout, since _deferred_grow_zone() needs to take a lock */ 7423 if (deferred_pages_enabled()) 7424 return NULL;