x86/mm/kasan: Don't use vmemmap_populate() to initialize shadow

[ Note, this commit is a cherry-picked version of:

d17a1d97dc20: ("x86/mm/kasan: don't use vmemmap_populate() to initialize shadow")

... for easier x86 entry code testing and back-porting. ]

The KASAN shadow is currently mapped using vmemmap_populate() since that
provides a semi-convenient way to map pages into init_top_pgt. However,
since that no longer zeroes the mapped pages, it is not suitable for
KASAN, which requires zeroed shadow memory.

Add kasan_populate_shadow() interface and use it instead of
vmemmap_populate(). Besides, this allows us to take advantage of
gigantic pages and use them to populate the shadow, which should save us
some memory wasted on page tables and reduce TLB pressure.

Link: http://lkml.kernel.org/r/20171103185147.2688-2-pasha.tatashin@oracle.com
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Bob Picco <bob.picco@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sam Ravnborg <sam@ravnborg.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by Andrey Ryabinin and committed by Ingo Molnar f68d62a5 548c3050

+137 -8
+1 -1
arch/x86/Kconfig
··· 110 select HAVE_ARCH_AUDITSYSCALL 111 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE 112 select HAVE_ARCH_JUMP_LABEL 113 - select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP 114 select HAVE_ARCH_KGDB 115 select HAVE_ARCH_KMEMCHECK 116 select HAVE_ARCH_MMAP_RND_BITS if MMU
··· 110 select HAVE_ARCH_AUDITSYSCALL 111 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE 112 select HAVE_ARCH_JUMP_LABEL 113 + select HAVE_ARCH_KASAN if X86_64 114 select HAVE_ARCH_KGDB 115 select HAVE_ARCH_KMEMCHECK 116 select HAVE_ARCH_MMAP_RND_BITS if MMU
+136 -7
arch/x86/mm/kasan_init_64.c
··· 4 #include <linux/bootmem.h> 5 #include <linux/kasan.h> 6 #include <linux/kdebug.h> 7 #include <linux/mm.h> 8 #include <linux/sched.h> 9 #include <linux/sched/task.h> 10 #include <linux/vmalloc.h> 11 12 #include <asm/e820/types.h> 13 #include <asm/tlbflush.h> 14 #include <asm/sections.h> 15 #include <asm/pgtable.h> ··· 20 21 static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); 22 23 - static int __init map_range(struct range *range) 24 { 25 unsigned long start; 26 unsigned long end; ··· 155 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); 156 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); 157 158 - return vmemmap_populate(start, end, NUMA_NO_NODE); 159 } 160 161 static void __init clear_pgds(unsigned long start, ··· 318 if (pfn_mapped[i].end == 0) 319 break; 320 321 - if (map_range(&pfn_mapped[i])) 322 - panic("kasan: unable to allocate shadow!"); 323 } 324 kasan_populate_zero_shadow( 325 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), 326 kasan_mem_to_shadow((void *)__START_KERNEL_map)); 327 328 - vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), 329 - (unsigned long)kasan_mem_to_shadow(_end), 330 - NUMA_NO_NODE); 331 332 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), 333 (void *)KASAN_SHADOW_END);
··· 4 #include <linux/bootmem.h> 5 #include <linux/kasan.h> 6 #include <linux/kdebug.h> 7 + #include <linux/memblock.h> 8 #include <linux/mm.h> 9 #include <linux/sched.h> 10 #include <linux/sched/task.h> 11 #include <linux/vmalloc.h> 12 13 #include <asm/e820/types.h> 14 + #include <asm/pgalloc.h> 15 #include <asm/tlbflush.h> 16 #include <asm/sections.h> 17 #include <asm/pgtable.h> ··· 18 19 static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); 20 21 + static __init void *early_alloc(size_t size, int nid) 22 + { 23 + return memblock_virt_alloc_try_nid_nopanic(size, size, 24 + __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); 25 + } 26 + 27 + static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, 28 + unsigned long end, int nid) 29 + { 30 + pte_t *pte; 31 + 32 + if (pmd_none(*pmd)) { 33 + void *p; 34 + 35 + if (boot_cpu_has(X86_FEATURE_PSE) && 36 + ((end - addr) == PMD_SIZE) && 37 + IS_ALIGNED(addr, PMD_SIZE)) { 38 + p = early_alloc(PMD_SIZE, nid); 39 + if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) 40 + return; 41 + else if (p) 42 + memblock_free(__pa(p), PMD_SIZE); 43 + } 44 + 45 + p = early_alloc(PAGE_SIZE, nid); 46 + pmd_populate_kernel(&init_mm, pmd, p); 47 + } 48 + 49 + pte = pte_offset_kernel(pmd, addr); 50 + do { 51 + pte_t entry; 52 + void *p; 53 + 54 + if (!pte_none(*pte)) 55 + continue; 56 + 57 + p = early_alloc(PAGE_SIZE, nid); 58 + entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); 59 + set_pte_at(&init_mm, addr, pte, entry); 60 + } while (pte++, addr += PAGE_SIZE, addr != end); 61 + } 62 + 63 + static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, 64 + unsigned long end, int nid) 65 + { 66 + pmd_t *pmd; 67 + unsigned long next; 68 + 69 + if (pud_none(*pud)) { 70 + void *p; 71 + 72 + if (boot_cpu_has(X86_FEATURE_GBPAGES) && 73 + ((end - addr) == PUD_SIZE) && 74 + IS_ALIGNED(addr, PUD_SIZE)) { 75 + p = early_alloc(PUD_SIZE, nid); 76 + if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) 77 + return; 78 + else if (p) 79 + memblock_free(__pa(p), PUD_SIZE); 80 + } 81 + 82 + p = early_alloc(PAGE_SIZE, nid); 83 + pud_populate(&init_mm, pud, p); 84 + } 85 + 86 + pmd = pmd_offset(pud, addr); 87 + do { 88 + next = pmd_addr_end(addr, end); 89 + if (!pmd_large(*pmd)) 90 + kasan_populate_pmd(pmd, addr, next, nid); 91 + } while (pmd++, addr = next, addr != end); 92 + } 93 + 94 + static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, 95 + unsigned long end, int nid) 96 + { 97 + pud_t *pud; 98 + unsigned long next; 99 + 100 + if (p4d_none(*p4d)) { 101 + void *p = early_alloc(PAGE_SIZE, nid); 102 + 103 + p4d_populate(&init_mm, p4d, p); 104 + } 105 + 106 + pud = pud_offset(p4d, addr); 107 + do { 108 + next = pud_addr_end(addr, end); 109 + if (!pud_large(*pud)) 110 + kasan_populate_pud(pud, addr, next, nid); 111 + } while (pud++, addr = next, addr != end); 112 + } 113 + 114 + static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, 115 + unsigned long end, int nid) 116 + { 117 + void *p; 118 + p4d_t *p4d; 119 + unsigned long next; 120 + 121 + if (pgd_none(*pgd)) { 122 + p = early_alloc(PAGE_SIZE, nid); 123 + pgd_populate(&init_mm, pgd, p); 124 + } 125 + 126 + p4d = p4d_offset(pgd, addr); 127 + do { 128 + next = p4d_addr_end(addr, end); 129 + kasan_populate_p4d(p4d, addr, next, nid); 130 + } while (p4d++, addr = next, addr != end); 131 + } 132 + 133 + static void __init kasan_populate_shadow(unsigned long addr, unsigned long end, 134 + int nid) 135 + { 136 + pgd_t *pgd; 137 + unsigned long next; 138 + 139 + addr = addr & PAGE_MASK; 140 + end = round_up(end, PAGE_SIZE); 141 + pgd = pgd_offset_k(addr); 142 + do { 143 + next = pgd_addr_end(addr, end); 144 + kasan_populate_pgd(pgd, addr, next, nid); 145 + } while (pgd++, addr = next, addr != end); 146 + } 147 + 148 + static void __init map_range(struct range *range) 149 { 150 unsigned long start; 151 unsigned long end; ··· 26 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); 27 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); 28 29 + kasan_populate_shadow(start, end, early_pfn_to_nid(range->start)); 30 } 31 32 static void __init clear_pgds(unsigned long start, ··· 189 if (pfn_mapped[i].end == 0) 190 break; 191 192 + map_range(&pfn_mapped[i]); 193 } 194 + 195 kasan_populate_zero_shadow( 196 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), 197 kasan_mem_to_shadow((void *)__START_KERNEL_map)); 198 199 + kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), 200 + (unsigned long)kasan_mem_to_shadow(_end), 201 + early_pfn_to_nid(__pa(_stext))); 202 203 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), 204 (void *)KASAN_SHADOW_END);