mm/page_alloc: ensure try_alloc_pages() plays well with unaccepted memory

try_alloc_pages() will not attempt to allocate memory if the system has
*any* unaccepted memory. Memory is accepted as needed and can remain in
the system indefinitely, causing the interface to always fail.

Rather than immediately giving up, attempt to use already accepted memory
on free lists.

Pass 'alloc_flags' to cond_accept_memory() and do not accept new memory
for ALLOC_TRYLOCK requests.

Found via code inspection - only BPF uses this at present and the
runtime effects are unclear.

Link: https://lkml.kernel.org/r/20250506112509.905147-2-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation")
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by Kirill A. Shutemov and committed by Andrew Morton 23fa022a d55582d6

+15 -13
+15 -13
mm/page_alloc.c
··· 290 290 #endif 291 291 292 292 static bool page_contains_unaccepted(struct page *page, unsigned int order); 293 - static bool cond_accept_memory(struct zone *zone, unsigned int order); 293 + static bool cond_accept_memory(struct zone *zone, unsigned int order, 294 + int alloc_flags); 294 295 static bool __free_unaccepted(struct page *page); 295 296 296 297 int page_group_by_mobility_disabled __read_mostly; ··· 3612 3611 } 3613 3612 } 3614 3613 3615 - cond_accept_memory(zone, order); 3614 + cond_accept_memory(zone, order, alloc_flags); 3616 3615 3617 3616 /* 3618 3617 * Detect whether the number of free pages is below high ··· 3639 3638 gfp_mask)) { 3640 3639 int ret; 3641 3640 3642 - if (cond_accept_memory(zone, order)) 3641 + if (cond_accept_memory(zone, order, alloc_flags)) 3643 3642 goto try_this_zone; 3644 3643 3645 3644 /* ··· 3692 3691 3693 3692 return page; 3694 3693 } else { 3695 - if (cond_accept_memory(zone, order)) 3694 + if (cond_accept_memory(zone, order, alloc_flags)) 3696 3695 goto try_this_zone; 3697 3696 3698 3697 /* Try again if zone has deferred pages */ ··· 4845 4844 goto failed; 4846 4845 } 4847 4846 4848 - cond_accept_memory(zone, 0); 4847 + cond_accept_memory(zone, 0, alloc_flags); 4849 4848 retry_this_zone: 4850 4849 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4851 4850 if (zone_watermark_fast(zone, 0, mark, ··· 4854 4853 break; 4855 4854 } 4856 4855 4857 - if (cond_accept_memory(zone, 0)) 4856 + if (cond_accept_memory(zone, 0, alloc_flags)) 4858 4857 goto retry_this_zone; 4859 4858 4860 4859 /* Try again if zone has deferred pages */ ··· 7282 7281 return static_branch_unlikely(&zones_with_unaccepted_pages); 7283 7282 } 7284 7283 7285 - static bool cond_accept_memory(struct zone *zone, unsigned int order) 7284 + static bool cond_accept_memory(struct zone *zone, unsigned int order, 7285 + int alloc_flags) 7286 7286 { 7287 7287 long to_accept, wmark; 7288 7288 bool ret = false; ··· 7292 7290 return false; 7293 7291 7294 7292 if (list_empty(&zone->unaccepted_pages)) 7293 + return false; 7294 + 7295 + /* Bailout, since try_to_accept_memory_one() needs to take a lock */ 7296 + if (alloc_flags & ALLOC_TRYLOCK) 7295 7297 return false; 7296 7298 7297 7299 wmark = promo_wmark_pages(zone); ··· 7354 7348 return false; 7355 7349 } 7356 7350 7357 - static bool cond_accept_memory(struct zone *zone, unsigned int order) 7351 + static bool cond_accept_memory(struct zone *zone, unsigned int order, 7352 + int alloc_flags) 7358 7353 { 7359 7354 return false; 7360 7355 } ··· 7426 7419 if (!pcp_allowed_order(order)) 7427 7420 return NULL; 7428 7421 7429 - #ifdef CONFIG_UNACCEPTED_MEMORY 7430 - /* Bailout, since try_to_accept_memory_one() needs to take a lock */ 7431 - if (has_unaccepted_memory()) 7432 - return NULL; 7433 - #endif 7434 7422 /* Bailout, since _deferred_grow_zone() needs to take a lock */ 7435 7423 if (deferred_pages_enabled()) 7436 7424 return NULL;