Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kmsan: remove hard-coded GFP_KERNEL flags

kmsan_vmap_pages_range_noflush() allocates its temp s_pages/o_pages arrays
with GFP_KERNEL, which may sleep. This is inconsistent with vmalloc() as
it will support non-blocking requests later.

Plumb gfp_mask through the kmsan_vmap_pages_range_noflush(), so it can use
it internally for its demand.

Please note, the subsequent __vmap_pages_range_noflush() still uses
GFP_KERNEL and can sleep. If a caller runs under reclaim constraints,
sleeping is forbidden, it must establish the appropriate memalloc scope
API.

Link: https://lkml.kernel.org/r/20251007122035.56347-8-urezki@gmail.com
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Uladzislau Rezki (Sony) and committed by
Andrew Morton
b186a942 ad435e79

+27 -17
+4 -2
include/linux/kmsan.h
··· 133 133 * @prot: page protection flags used for vmap. 134 134 * @pages: array of pages. 135 135 * @page_shift: page_shift passed to vmap_range_noflush(). 136 + * @gfp_mask: gfp_mask to use internally. 136 137 * 137 138 * KMSAN maps shadow and origin pages of @pages into contiguous ranges in 138 139 * vmalloc metadata address range. Returns 0 on success, callers must check ··· 143 142 unsigned long end, 144 143 pgprot_t prot, 145 144 struct page **pages, 146 - unsigned int page_shift); 145 + unsigned int page_shift, 146 + gfp_t gfp_mask); 147 147 148 148 /** 149 149 * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap. ··· 349 347 350 348 static inline int __must_check kmsan_vmap_pages_range_noflush( 351 349 unsigned long start, unsigned long end, pgprot_t prot, 352 - struct page **pages, unsigned int page_shift) 350 + struct page **pages, unsigned int page_shift, gfp_t gfp_mask) 353 351 { 354 352 return 0; 355 353 }
+2 -2
mm/internal.h
··· 1355 1355 #ifdef CONFIG_MMU 1356 1356 void __init vmalloc_init(void); 1357 1357 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, 1358 - pgprot_t prot, struct page **pages, unsigned int page_shift); 1358 + pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask); 1359 1359 unsigned int get_vm_area_page_order(struct vm_struct *vm); 1360 1360 #else 1361 1361 static inline void vmalloc_init(void) ··· 1364 1364 1365 1365 static inline 1366 1366 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end, 1367 - pgprot_t prot, struct page **pages, unsigned int page_shift) 1367 + pgprot_t prot, struct page **pages, unsigned int page_shift, gfp_t gfp_mask) 1368 1368 { 1369 1369 return -EINVAL; 1370 1370 }
+3 -3
mm/kmsan/shadow.c
··· 215 215 216 216 int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, 217 217 pgprot_t prot, struct page **pages, 218 - unsigned int page_shift) 218 + unsigned int page_shift, gfp_t gfp_mask) 219 219 { 220 220 unsigned long shadow_start, origin_start, shadow_end, origin_end; 221 221 struct page **s_pages, **o_pages; ··· 230 230 return 0; 231 231 232 232 nr = (end - start) / PAGE_SIZE; 233 - s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL); 234 - o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL); 233 + s_pages = kcalloc(nr, sizeof(*s_pages), gfp_mask); 234 + o_pages = kcalloc(nr, sizeof(*o_pages), gfp_mask); 235 235 if (!s_pages || !o_pages) { 236 236 err = -ENOMEM; 237 237 goto ret;
+1 -1
mm/percpu-vm.c
··· 194 194 int nr_pages) 195 195 { 196 196 return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT), 197 - PAGE_KERNEL, pages, PAGE_SHIFT); 197 + PAGE_KERNEL, pages, PAGE_SHIFT, GFP_KERNEL); 198 198 } 199 199 200 200 /**
+17 -9
mm/vmalloc.c
··· 671 671 } 672 672 673 673 int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 674 - pgprot_t prot, struct page **pages, unsigned int page_shift) 674 + pgprot_t prot, struct page **pages, unsigned int page_shift, 675 + gfp_t gfp_mask) 675 676 { 676 677 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, 677 - page_shift); 678 + page_shift, gfp_mask); 678 679 679 680 if (ret) 680 681 return ret; 681 682 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 683 + } 684 + 685 + static int __vmap_pages_range(unsigned long addr, unsigned long end, 686 + pgprot_t prot, struct page **pages, unsigned int page_shift, 687 + gfp_t gfp_mask) 688 + { 689 + int err; 690 + 691 + err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift, gfp_mask); 692 + flush_cache_vmap(addr, end); 693 + return err; 682 694 } 683 695 684 696 /** ··· 708 696 int vmap_pages_range(unsigned long addr, unsigned long end, 709 697 pgprot_t prot, struct page **pages, unsigned int page_shift) 710 698 { 711 - int err; 712 - 713 - err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 714 - flush_cache_vmap(addr, end); 715 - return err; 699 + return __vmap_pages_range(addr, end, prot, pages, page_shift, GFP_KERNEL); 716 700 } 717 701 718 702 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, ··· 3847 3839 */ 3848 3840 flags = memalloc_apply_gfp_scope(gfp_mask); 3849 3841 do { 3850 - ret = vmap_pages_range(addr, addr + size, prot, area->pages, 3851 - page_shift); 3842 + ret = __vmap_pages_range(addr, addr + size, prot, area->pages, 3843 + page_shift, nested_gfp); 3852 3844 if (nofail && (ret < 0)) 3853 3845 schedule_timeout_uninterruptible(1); 3854 3846 } while (nofail && (ret < 0));