x86/mm/kasan: Don't use vmemmap_populate() to initialize shadow

[ Note, this is a Git cherry-pick of the following commit:

d17a1d97dc20: ("x86/mm/kasan: don't use vmemmap_populate() to initialize shadow")

... for easier x86 PTI code testing and back-porting. ]

The KASAN shadow is currently mapped using vmemmap_populate() since that
provides a semi-convenient way to map pages into init_top_pgt. However,
since that no longer zeroes the mapped pages, it is not suitable for
KASAN, which requires zeroed shadow memory.

Add kasan_populate_shadow() interface and use it instead of
vmemmap_populate(). Besides, this allows us to take advantage of
gigantic pages and use them to populate the shadow, which should save us
some memory wasted on page tables and reduce TLB pressure.

Link: http://lkml.kernel.org/r/20171103185147.2688-2-pasha.tatashin@oracle.com
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Bob Picco <bob.picco@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sam Ravnborg <sam@ravnborg.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by Andrey Ryabinin and committed by Ingo Molnar 2aeb0736 3382290e

Changed files
+137 -8
arch
+1 -1
arch/x86/Kconfig
··· 108 108 select HAVE_ARCH_AUDITSYSCALL 109 109 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE 110 110 select HAVE_ARCH_JUMP_LABEL 111 - select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP 111 + select HAVE_ARCH_KASAN if X86_64 112 112 select HAVE_ARCH_KGDB 113 113 select HAVE_ARCH_KMEMCHECK 114 114 select HAVE_ARCH_MMAP_RND_BITS if MMU
+136 -7
arch/x86/mm/kasan_init_64.c
··· 4 4 #include <linux/bootmem.h> 5 5 #include <linux/kasan.h> 6 6 #include <linux/kdebug.h> 7 + #include <linux/memblock.h> 7 8 #include <linux/mm.h> 8 9 #include <linux/sched.h> 9 10 #include <linux/sched/task.h> 10 11 #include <linux/vmalloc.h> 11 12 12 13 #include <asm/e820/types.h> 14 + #include <asm/pgalloc.h> 13 15 #include <asm/tlbflush.h> 14 16 #include <asm/sections.h> 15 17 #include <asm/pgtable.h> ··· 20 18 21 19 static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); 22 20 23 - static int __init map_range(struct range *range) 21 + static __init void *early_alloc(size_t size, int nid) 22 + { 23 + return memblock_virt_alloc_try_nid_nopanic(size, size, 24 + __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); 25 + } 26 + 27 + static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, 28 + unsigned long end, int nid) 29 + { 30 + pte_t *pte; 31 + 32 + if (pmd_none(*pmd)) { 33 + void *p; 34 + 35 + if (boot_cpu_has(X86_FEATURE_PSE) && 36 + ((end - addr) == PMD_SIZE) && 37 + IS_ALIGNED(addr, PMD_SIZE)) { 38 + p = early_alloc(PMD_SIZE, nid); 39 + if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) 40 + return; 41 + else if (p) 42 + memblock_free(__pa(p), PMD_SIZE); 43 + } 44 + 45 + p = early_alloc(PAGE_SIZE, nid); 46 + pmd_populate_kernel(&init_mm, pmd, p); 47 + } 48 + 49 + pte = pte_offset_kernel(pmd, addr); 50 + do { 51 + pte_t entry; 52 + void *p; 53 + 54 + if (!pte_none(*pte)) 55 + continue; 56 + 57 + p = early_alloc(PAGE_SIZE, nid); 58 + entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); 59 + set_pte_at(&init_mm, addr, pte, entry); 60 + } while (pte++, addr += PAGE_SIZE, addr != end); 61 + } 62 + 63 + static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, 64 + unsigned long end, int nid) 65 + { 66 + pmd_t *pmd; 67 + unsigned long next; 68 + 69 + if (pud_none(*pud)) { 70 + void *p; 71 + 72 + if (boot_cpu_has(X86_FEATURE_GBPAGES) && 73 + ((end - addr) == PUD_SIZE) && 74 + IS_ALIGNED(addr, PUD_SIZE)) { 75 + p = early_alloc(PUD_SIZE, nid); 76 + if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) 77 + return; 78 + else if (p) 79 + memblock_free(__pa(p), PUD_SIZE); 80 + } 81 + 82 + p = early_alloc(PAGE_SIZE, nid); 83 + pud_populate(&init_mm, pud, p); 84 + } 85 + 86 + pmd = pmd_offset(pud, addr); 87 + do { 88 + next = pmd_addr_end(addr, end); 89 + if (!pmd_large(*pmd)) 90 + kasan_populate_pmd(pmd, addr, next, nid); 91 + } while (pmd++, addr = next, addr != end); 92 + } 93 + 94 + static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, 95 + unsigned long end, int nid) 96 + { 97 + pud_t *pud; 98 + unsigned long next; 99 + 100 + if (p4d_none(*p4d)) { 101 + void *p = early_alloc(PAGE_SIZE, nid); 102 + 103 + p4d_populate(&init_mm, p4d, p); 104 + } 105 + 106 + pud = pud_offset(p4d, addr); 107 + do { 108 + next = pud_addr_end(addr, end); 109 + if (!pud_large(*pud)) 110 + kasan_populate_pud(pud, addr, next, nid); 111 + } while (pud++, addr = next, addr != end); 112 + } 113 + 114 + static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, 115 + unsigned long end, int nid) 116 + { 117 + void *p; 118 + p4d_t *p4d; 119 + unsigned long next; 120 + 121 + if (pgd_none(*pgd)) { 122 + p = early_alloc(PAGE_SIZE, nid); 123 + pgd_populate(&init_mm, pgd, p); 124 + } 125 + 126 + p4d = p4d_offset(pgd, addr); 127 + do { 128 + next = p4d_addr_end(addr, end); 129 + kasan_populate_p4d(p4d, addr, next, nid); 130 + } while (p4d++, addr = next, addr != end); 131 + } 132 + 133 + static void __init kasan_populate_shadow(unsigned long addr, unsigned long end, 134 + int nid) 135 + { 136 + pgd_t *pgd; 137 + unsigned long next; 138 + 139 + addr = addr & PAGE_MASK; 140 + end = round_up(end, PAGE_SIZE); 141 + pgd = pgd_offset_k(addr); 142 + do { 143 + next = pgd_addr_end(addr, end); 144 + kasan_populate_pgd(pgd, addr, next, nid); 145 + } while (pgd++, addr = next, addr != end); 146 + } 147 + 148 + static void __init map_range(struct range *range) 24 149 { 25 150 unsigned long start; 26 151 unsigned long end; ··· 155 26 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); 156 27 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); 157 28 158 - return vmemmap_populate(start, end, NUMA_NO_NODE); 29 + kasan_populate_shadow(start, end, early_pfn_to_nid(range->start)); 159 30 } 160 31 161 32 static void __init clear_pgds(unsigned long start, ··· 318 189 if (pfn_mapped[i].end == 0) 319 190 break; 320 191 321 - if (map_range(&pfn_mapped[i])) 322 - panic("kasan: unable to allocate shadow!"); 192 + map_range(&pfn_mapped[i]); 323 193 } 194 + 324 195 kasan_populate_zero_shadow( 325 196 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), 326 197 kasan_mem_to_shadow((void *)__START_KERNEL_map)); 327 198 328 - vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), 329 - (unsigned long)kasan_mem_to_shadow(_end), 330 - NUMA_NO_NODE); 199 + kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), 200 + (unsigned long)kasan_mem_to_shadow(_end), 201 + early_pfn_to_nid(__pa(_stext))); 331 202 332 203 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), 333 204 (void *)KASAN_SHADOW_END);