mm/codetag: move tag retrieval back upfront in __free_pages()

Commit 51ff4d7486f0 ("mm: avoid extra mem_alloc_profiling_enabled()
checks") introduces a possible use-after-free scenario, when page
is non-compound, page[0] could be released by other thread right
after put_page_testzero failed in current thread, pgalloc_tag_sub_pages
afterwards would manipulate an invalid page for accounting remaining
pages:

[timeline] [thread1] [thread2]
| alloc_page non-compound
V
| get_page, rf counter inc
V
| in ___free_pages
| put_page_testzero fails
V
| put_page, page released
V
| in ___free_pages,
| pgalloc_tag_sub_pages
| manipulate an invalid page
V

Restore __free_pages() to its state before, retrieve alloc tag
beforehand.

Link: https://lkml.kernel.org/r/20250505193034.91682-1-00107082@163.com
Fixes: 51ff4d7486f0 ("mm: avoid extra mem_alloc_profiling_enabled() checks")
Signed-off-by: David Wang <00107082@163.com>
Acked-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by David Wang and committed by Andrew Morton 0ae0227f 4b7c0857

+14 -9
+8
include/linux/pgalloc_tag.h
··· 188 188 return tag; 189 189 } 190 190 191 + static inline struct alloc_tag *pgalloc_tag_get(struct page *page) 192 + { 193 + if (mem_alloc_profiling_enabled()) 194 + return __pgalloc_tag_get(page); 195 + return NULL; 196 + } 197 + 191 198 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order); 192 199 void pgalloc_tag_swap(struct folio *new, struct folio *old); 193 200 ··· 206 199 static inline void alloc_tag_sec_init(void) {} 207 200 static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} 208 201 static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {} 202 + static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } 209 203 210 204 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 211 205
+6 -9
mm/page_alloc.c
··· 1151 1151 __pgalloc_tag_sub(page, nr); 1152 1152 } 1153 1153 1154 - static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) 1154 + /* When tag is not NULL, assuming mem_alloc_profiling_enabled */ 1155 + static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) 1155 1156 { 1156 - struct alloc_tag *tag; 1157 - 1158 - if (!mem_alloc_profiling_enabled()) 1159 - return; 1160 - 1161 - tag = __pgalloc_tag_get(page); 1162 1157 if (tag) 1163 1158 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); 1164 1159 } ··· 1163 1168 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, 1164 1169 unsigned int nr) {} 1165 1170 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} 1166 - static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} 1171 + static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} 1167 1172 1168 1173 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 1169 1174 ··· 5060 5065 { 5061 5066 /* get PageHead before we drop reference */ 5062 5067 int head = PageHead(page); 5068 + /* get alloc tag in case the page is released by others */ 5069 + struct alloc_tag *tag = pgalloc_tag_get(page); 5063 5070 5064 5071 if (put_page_testzero(page)) 5065 5072 __free_frozen_pages(page, order, fpi_flags); 5066 5073 else if (!head) { 5067 - pgalloc_tag_sub_pages(page, (1 << order) - 1); 5074 + pgalloc_tag_sub_pages(tag, (1 << order) - 1); 5068 5075 while (order-- > 0) 5069 5076 __free_frozen_pages(page + (1 << order), order, 5070 5077 fpi_flags);