Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: kmsan: maintain KMSAN metadata for page operations

Insert KMSAN hooks that make the necessary bookkeeping changes:
- poison page shadow and origins in alloc_pages()/free_page();
- clear page shadow and origins in clear_page(), copy_user_highpage();
- copy page metadata in copy_highpage(), wp_page_copy();
- handle vmap()/vunmap()/iounmap();

Link: https://lkml.kernel.org/r/20220915150417.722975-15-glider@google.com
Signed-off-by: Alexander Potapenko <glider@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Ilya Leoshkevich <iii@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Marco Elver <elver@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Alexander Potapenko and committed by
Andrew Morton
b073d7f8 d596b04f

+394 -2
+7
arch/x86/include/asm/page_64.h
··· 8 8 #include <asm/cpufeatures.h> 9 9 #include <asm/alternative.h> 10 10 11 + #include <linux/kmsan-checks.h> 12 + 11 13 /* duplicated to the one in bootmem.h */ 12 14 extern unsigned long max_pfn; 13 15 extern unsigned long phys_base; ··· 49 47 50 48 static inline void clear_page(void *page) 51 49 { 50 + /* 51 + * Clean up KMSAN metadata for the page being cleared. The assembly call 52 + * below clobbers @page, so we perform unpoisoning before it. 53 + */ 54 + kmsan_unpoison_memory(page, PAGE_SIZE); 52 55 alternative_call_2(clear_page_orig, 53 56 clear_page_rep, X86_FEATURE_REP_GOOD, 54 57 clear_page_erms, X86_FEATURE_ERMS,
+3
arch/x86/mm/ioremap.c
··· 17 17 #include <linux/cc_platform.h> 18 18 #include <linux/efi.h> 19 19 #include <linux/pgtable.h> 20 + #include <linux/kmsan.h> 20 21 21 22 #include <asm/set_memory.h> 22 23 #include <asm/e820/api.h> ··· 480 479 return; 481 480 } 482 481 482 + kmsan_iounmap_page_range((unsigned long)addr, 483 + (unsigned long)addr + get_vm_area_size(p)); 483 484 memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 484 485 485 486 /* Finally remove it */
+3
include/linux/highmem.h
··· 6 6 #include <linux/kernel.h> 7 7 #include <linux/bug.h> 8 8 #include <linux/cacheflush.h> 9 + #include <linux/kmsan.h> 9 10 #include <linux/mm.h> 10 11 #include <linux/uaccess.h> 11 12 #include <linux/hardirq.h> ··· 312 311 vfrom = kmap_local_page(from); 313 312 vto = kmap_local_page(to); 314 313 copy_user_page(vto, vfrom, vaddr, to); 314 + kmsan_unpoison_memory(page_address(to), PAGE_SIZE); 315 315 kunmap_local(vto); 316 316 kunmap_local(vfrom); 317 317 } ··· 328 326 vfrom = kmap_local_page(from); 329 327 vto = kmap_local_page(to); 330 328 copy_page(vto, vfrom); 329 + kmsan_copy_page_meta(to, from); 331 330 kunmap_local(vto); 332 331 kunmap_local(vfrom); 333 332 }
+145
include/linux/kmsan.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * KMSAN API for subsystems. 4 + * 5 + * Copyright (C) 2017-2022 Google LLC 6 + * Author: Alexander Potapenko <glider@google.com> 7 + * 8 + */ 9 + #ifndef _LINUX_KMSAN_H 10 + #define _LINUX_KMSAN_H 11 + 12 + #include <linux/gfp.h> 13 + #include <linux/kmsan-checks.h> 14 + #include <linux/types.h> 15 + 16 + struct page; 17 + 18 + #ifdef CONFIG_KMSAN 19 + 20 + /** 21 + * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call. 22 + * @page: struct page pointer returned by alloc_pages(). 23 + * @order: order of allocated struct page. 24 + * @flags: GFP flags used by alloc_pages() 25 + * 26 + * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless 27 + * @flags contain __GFP_ZERO. 28 + */ 29 + void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags); 30 + 31 + /** 32 + * kmsan_free_page() - Notify KMSAN about a free_pages() call. 33 + * @page: struct page pointer passed to free_pages(). 34 + * @order: order of deallocated struct page. 35 + * 36 + * KMSAN marks freed memory as uninitialized. 37 + */ 38 + void kmsan_free_page(struct page *page, unsigned int order); 39 + 40 + /** 41 + * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages. 42 + * @dst: destination page. 43 + * @src: source page. 44 + * 45 + * KMSAN copies the contents of metadata pages for @src into the metadata pages 46 + * for @dst. If @dst has no associated metadata pages, nothing happens. 47 + * If @src has no associated metadata pages, @dst metadata pages are unpoisoned. 48 + */ 49 + void kmsan_copy_page_meta(struct page *dst, struct page *src); 50 + 51 + /** 52 + * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap. 53 + * @start: start of vmapped range. 54 + * @end: end of vmapped range. 55 + * @prot: page protection flags used for vmap. 56 + * @pages: array of pages. 57 + * @page_shift: page_shift passed to vmap_range_noflush(). 58 + * 59 + * KMSAN maps shadow and origin pages of @pages into contiguous ranges in 60 + * vmalloc metadata address range. 61 + */ 62 + void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, 63 + pgprot_t prot, struct page **pages, 64 + unsigned int page_shift); 65 + 66 + /** 67 + * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap. 68 + * @start: start of vunmapped range. 69 + * @end: end of vunmapped range. 70 + * 71 + * KMSAN unmaps the contiguous metadata ranges created by 72 + * kmsan_map_kernel_range_noflush(). 73 + */ 74 + void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end); 75 + 76 + /** 77 + * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call. 78 + * @addr: range start. 79 + * @end: range end. 80 + * @phys_addr: physical range start. 81 + * @prot: page protection flags used for ioremap_page_range(). 82 + * @page_shift: page_shift argument passed to vmap_range_noflush(). 83 + * 84 + * KMSAN creates new metadata pages for the physical pages mapped into the 85 + * virtual memory. 86 + */ 87 + void kmsan_ioremap_page_range(unsigned long addr, unsigned long end, 88 + phys_addr_t phys_addr, pgprot_t prot, 89 + unsigned int page_shift); 90 + 91 + /** 92 + * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call. 93 + * @start: range start. 94 + * @end: range end. 95 + * 96 + * KMSAN unmaps the metadata pages for the given range and, unlike for 97 + * vunmap_page_range(), also deallocates them. 98 + */ 99 + void kmsan_iounmap_page_range(unsigned long start, unsigned long end); 100 + 101 + #else 102 + 103 + static inline int kmsan_alloc_page(struct page *page, unsigned int order, 104 + gfp_t flags) 105 + { 106 + return 0; 107 + } 108 + 109 + static inline void kmsan_free_page(struct page *page, unsigned int order) 110 + { 111 + } 112 + 113 + static inline void kmsan_copy_page_meta(struct page *dst, struct page *src) 114 + { 115 + } 116 + 117 + static inline void kmsan_vmap_pages_range_noflush(unsigned long start, 118 + unsigned long end, 119 + pgprot_t prot, 120 + struct page **pages, 121 + unsigned int page_shift) 122 + { 123 + } 124 + 125 + static inline void kmsan_vunmap_range_noflush(unsigned long start, 126 + unsigned long end) 127 + { 128 + } 129 + 130 + static inline void kmsan_ioremap_page_range(unsigned long start, 131 + unsigned long end, 132 + phys_addr_t phys_addr, 133 + pgprot_t prot, 134 + unsigned int page_shift) 135 + { 136 + } 137 + 138 + static inline void kmsan_iounmap_page_range(unsigned long start, 139 + unsigned long end) 140 + { 141 + } 142 + 143 + #endif 144 + 145 + #endif /* _LINUX_KMSAN_H */
+6
mm/internal.h
··· 818 818 } 819 819 #endif 820 820 821 + int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, 822 + pgprot_t prot, struct page **pages, 823 + unsigned int page_shift); 824 + 821 825 void vunmap_range_noflush(unsigned long start, unsigned long end); 826 + 827 + void __vunmap_range_noflush(unsigned long start, unsigned long end); 822 828 823 829 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, 824 830 unsigned long addr, int page_nid, int *flags);
+86
mm/kmsan/hooks.c
··· 11 11 12 12 #include <linux/cacheflush.h> 13 13 #include <linux/gfp.h> 14 + #include <linux/kmsan.h> 14 15 #include <linux/mm.h> 15 16 #include <linux/mm_types.h> 16 17 #include <linux/slab.h> ··· 26 25 * kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to 27 26 * skipping effects of functions like memset() inside instrumented code. 28 27 */ 28 + 29 + static unsigned long vmalloc_shadow(unsigned long addr) 30 + { 31 + return (unsigned long)kmsan_get_metadata((void *)addr, 32 + KMSAN_META_SHADOW); 33 + } 34 + 35 + static unsigned long vmalloc_origin(unsigned long addr) 36 + { 37 + return (unsigned long)kmsan_get_metadata((void *)addr, 38 + KMSAN_META_ORIGIN); 39 + } 40 + 41 + void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end) 42 + { 43 + __vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end)); 44 + __vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end)); 45 + flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end)); 46 + flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end)); 47 + } 48 + 49 + /* 50 + * This function creates new shadow/origin pages for the physical pages mapped 51 + * into the virtual memory. If those physical pages already had shadow/origin, 52 + * those are ignored. 53 + */ 54 + void kmsan_ioremap_page_range(unsigned long start, unsigned long end, 55 + phys_addr_t phys_addr, pgprot_t prot, 56 + unsigned int page_shift) 57 + { 58 + gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO; 59 + struct page *shadow, *origin; 60 + unsigned long off = 0; 61 + int nr; 62 + 63 + if (!kmsan_enabled || kmsan_in_runtime()) 64 + return; 65 + 66 + nr = (end - start) / PAGE_SIZE; 67 + kmsan_enter_runtime(); 68 + for (int i = 0; i < nr; i++, off += PAGE_SIZE) { 69 + shadow = alloc_pages(gfp_mask, 1); 70 + origin = alloc_pages(gfp_mask, 1); 71 + __vmap_pages_range_noflush( 72 + vmalloc_shadow(start + off), 73 + vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow, 74 + PAGE_SHIFT); 75 + __vmap_pages_range_noflush( 76 + vmalloc_origin(start + off), 77 + vmalloc_origin(start + off + PAGE_SIZE), prot, &origin, 78 + PAGE_SHIFT); 79 + } 80 + flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end)); 81 + flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end)); 82 + kmsan_leave_runtime(); 83 + } 84 + 85 + void kmsan_iounmap_page_range(unsigned long start, unsigned long end) 86 + { 87 + unsigned long v_shadow, v_origin; 88 + struct page *shadow, *origin; 89 + int nr; 90 + 91 + if (!kmsan_enabled || kmsan_in_runtime()) 92 + return; 93 + 94 + nr = (end - start) / PAGE_SIZE; 95 + kmsan_enter_runtime(); 96 + v_shadow = (unsigned long)vmalloc_shadow(start); 97 + v_origin = (unsigned long)vmalloc_origin(start); 98 + for (int i = 0; i < nr; 99 + i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) { 100 + shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow); 101 + origin = kmsan_vmalloc_to_page_or_null((void *)v_origin); 102 + __vunmap_range_noflush(v_shadow, vmalloc_shadow(end)); 103 + __vunmap_range_noflush(v_origin, vmalloc_origin(end)); 104 + if (shadow) 105 + __free_pages(shadow, 1); 106 + if (origin) 107 + __free_pages(origin, 1); 108 + } 109 + flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end)); 110 + flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end)); 111 + kmsan_leave_runtime(); 112 + } 29 113 30 114 /* Functions from kmsan-checks.h follow. */ 31 115 void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
+113
mm/kmsan/shadow.c
··· 145 145 146 146 return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off; 147 147 } 148 + 149 + void kmsan_copy_page_meta(struct page *dst, struct page *src) 150 + { 151 + if (!kmsan_enabled || kmsan_in_runtime()) 152 + return; 153 + if (!dst || !page_has_metadata(dst)) 154 + return; 155 + if (!src || !page_has_metadata(src)) { 156 + kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE, 157 + /*checked*/ false); 158 + return; 159 + } 160 + 161 + kmsan_enter_runtime(); 162 + __memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE); 163 + __memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE); 164 + kmsan_leave_runtime(); 165 + } 166 + 167 + void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags) 168 + { 169 + bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled; 170 + struct page *shadow, *origin; 171 + depot_stack_handle_t handle; 172 + int pages = 1 << order; 173 + 174 + if (!page) 175 + return; 176 + 177 + shadow = shadow_page_for(page); 178 + origin = origin_page_for(page); 179 + 180 + if (initialized) { 181 + __memset(page_address(shadow), 0, PAGE_SIZE * pages); 182 + __memset(page_address(origin), 0, PAGE_SIZE * pages); 183 + return; 184 + } 185 + 186 + /* Zero pages allocated by the runtime should also be initialized. */ 187 + if (kmsan_in_runtime()) 188 + return; 189 + 190 + __memset(page_address(shadow), -1, PAGE_SIZE * pages); 191 + kmsan_enter_runtime(); 192 + handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0); 193 + kmsan_leave_runtime(); 194 + /* 195 + * Addresses are page-aligned, pages are contiguous, so it's ok 196 + * to just fill the origin pages with @handle. 197 + */ 198 + for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++) 199 + ((depot_stack_handle_t *)page_address(origin))[i] = handle; 200 + } 201 + 202 + void kmsan_free_page(struct page *page, unsigned int order) 203 + { 204 + if (!kmsan_enabled || kmsan_in_runtime()) 205 + return; 206 + kmsan_enter_runtime(); 207 + kmsan_internal_poison_memory(page_address(page), 208 + PAGE_SIZE << compound_order(page), 209 + GFP_KERNEL, 210 + KMSAN_POISON_CHECK | KMSAN_POISON_FREE); 211 + kmsan_leave_runtime(); 212 + } 213 + 214 + void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, 215 + pgprot_t prot, struct page **pages, 216 + unsigned int page_shift) 217 + { 218 + unsigned long shadow_start, origin_start, shadow_end, origin_end; 219 + struct page **s_pages, **o_pages; 220 + int nr, mapped; 221 + 222 + if (!kmsan_enabled) 223 + return; 224 + 225 + shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW); 226 + shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW); 227 + if (!shadow_start) 228 + return; 229 + 230 + nr = (end - start) / PAGE_SIZE; 231 + s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL); 232 + o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL); 233 + if (!s_pages || !o_pages) 234 + goto ret; 235 + for (int i = 0; i < nr; i++) { 236 + s_pages[i] = shadow_page_for(pages[i]); 237 + o_pages[i] = origin_page_for(pages[i]); 238 + } 239 + prot = __pgprot(pgprot_val(prot) | _PAGE_NX); 240 + prot = PAGE_KERNEL; 241 + 242 + origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN); 243 + origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN); 244 + kmsan_enter_runtime(); 245 + mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot, 246 + s_pages, page_shift); 247 + KMSAN_WARN_ON(mapped); 248 + mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot, 249 + o_pages, page_shift); 250 + KMSAN_WARN_ON(mapped); 251 + kmsan_leave_runtime(); 252 + flush_tlb_kernel_range(shadow_start, shadow_end); 253 + flush_tlb_kernel_range(origin_start, origin_end); 254 + flush_cache_vmap(shadow_start, shadow_end); 255 + flush_cache_vmap(origin_start, origin_end); 256 + 257 + ret: 258 + kfree(s_pages); 259 + kfree(o_pages); 260 + }
+2
mm/memory.c
··· 52 52 #include <linux/highmem.h> 53 53 #include <linux/pagemap.h> 54 54 #include <linux/memremap.h> 55 + #include <linux/kmsan.h> 55 56 #include <linux/ksm.h> 56 57 #include <linux/rmap.h> 57 58 #include <linux/export.h> ··· 3137 3136 delayacct_wpcopy_end(); 3138 3137 return 0; 3139 3138 } 3139 + kmsan_copy_page_meta(new_page, old_page); 3140 3140 } 3141 3141 3142 3142 if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
+11
mm/page_alloc.c
··· 27 27 #include <linux/compiler.h> 28 28 #include <linux/kernel.h> 29 29 #include <linux/kasan.h> 30 + #include <linux/kmsan.h> 30 31 #include <linux/module.h> 31 32 #include <linux/suspend.h> 32 33 #include <linux/pagevec.h> ··· 1401 1400 VM_BUG_ON_PAGE(PageTail(page), page); 1402 1401 1403 1402 trace_mm_page_free(page, order); 1403 + kmsan_free_page(page, order); 1404 1404 1405 1405 if (unlikely(PageHWPoison(page)) && !order) { 1406 1406 /* ··· 3810 3808 /* 3811 3809 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 3812 3810 */ 3811 + 3812 + /* 3813 + * Do not instrument rmqueue() with KMSAN. This function may call 3814 + * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 3815 + * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3816 + * may call rmqueue() again, which will result in a deadlock. 3817 + */ 3818 + __no_sanitize_memory 3813 3819 static inline 3814 3820 struct page *rmqueue(struct zone *preferred_zone, 3815 3821 struct zone *zone, unsigned int order, ··· 5570 5560 } 5571 5561 5572 5562 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 5563 + kmsan_alloc_page(page, order, alloc_gfp); 5573 5564 5574 5565 return page; 5575 5566 }
+18 -2
mm/vmalloc.c
··· 320 320 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), 321 321 ioremap_max_page_shift); 322 322 flush_cache_vmap(addr, end); 323 + if (!err) 324 + kmsan_ioremap_page_range(addr, end, phys_addr, prot, 325 + ioremap_max_page_shift); 323 326 return err; 324 327 } 325 328 ··· 419 416 * 420 417 * This is an internal function only. Do not use outside mm/. 421 418 */ 422 - void vunmap_range_noflush(unsigned long start, unsigned long end) 419 + void __vunmap_range_noflush(unsigned long start, unsigned long end) 423 420 { 424 421 unsigned long next; 425 422 pgd_t *pgd; ··· 439 436 440 437 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 441 438 arch_sync_kernel_mappings(start, end); 439 + } 440 + 441 + void vunmap_range_noflush(unsigned long start, unsigned long end) 442 + { 443 + kmsan_vunmap_range_noflush(start, end); 444 + __vunmap_range_noflush(start, end); 442 445 } 443 446 444 447 /** ··· 584 575 * 585 576 * This is an internal function only. Do not use outside mm/. 586 577 */ 587 - int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 578 + int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, 588 579 pgprot_t prot, struct page **pages, unsigned int page_shift) 589 580 { 590 581 unsigned int i, nr = (end - addr) >> PAGE_SHIFT; ··· 608 599 } 609 600 610 601 return 0; 602 + } 603 + 604 + int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 605 + pgprot_t prot, struct page **pages, unsigned int page_shift) 606 + { 607 + kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 608 + return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 611 609 } 612 610 613 611 /**