Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 9016/2: Initialize the mapping of KASan shadow memory

This patch initializes KASan shadow region's page table and memory.
There are two stage for KASan initializing:

1. At early boot stage the whole shadow region is mapped to just
one physical page (kasan_zero_page). It is finished by the function
kasan_early_init which is called by __mmap_switched(arch/arm/kernel/
head-common.S)

2. After the calling of paging_init, we use kasan_zero_page as zero
shadow for some memory that KASan does not need to track, and we
allocate a new shadow space for the other memory that KASan need to
track. These issues are finished by the function kasan_init which is
call by setup_arch.

When using KASan we also need to increase the THREAD_SIZE_ORDER
from 1 to 2 as the extra calls for shadow memory uses quite a bit
of stack.

As we need to make a temporary copy of the PGD when setting up
shadow memory we create a helpful PGD_SIZE definition for both
LPAE and non-LPAE setups.

The KASan core code unconditionally calls pud_populate() so this
needs to be changed from BUG() to do {} while (0) when building
with KASan enabled.

After the initial development by Andre Ryabinin several modifications
have been made to this code:

Abbott Liu <liuwenliang@huawei.com>
- Add support ARM LPAE: If LPAE is enabled, KASan shadow region's
mapping table need be copied in the pgd_alloc() function.
- Change kasan_pte_populate,kasan_pmd_populate,kasan_pud_populate,
kasan_pgd_populate from .meminit.text section to .init.text section.
Reported by Florian Fainelli <f.fainelli@gmail.com>

Linus Walleij <linus.walleij@linaro.org>:
- Drop the custom mainpulation of TTBR0 and just use
cpu_switch_mm() to switch the pgd table.
- Adopt to handle 4th level page tabel folding.
- Rewrite the entire page directory and page entry initialization
sequence to be recursive based on ARM64:s kasan_init.c.

Ard Biesheuvel <ardb@kernel.org>:
- Necessary underlying fixes.
- Crucial bug fixes to the memory set-up code.

Co-developed-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Co-developed-by: Abbott Liu <liuwenliang@huawei.com>
Co-developed-by: Ard Biesheuvel <ardb@kernel.org>

Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: kasan-dev@googlegroups.com
Cc: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Tested-by: Ard Biesheuvel <ardb@kernel.org> # QEMU/KVM/mach-virt/LPAE/8G
Tested-by: Florian Fainelli <f.fainelli@gmail.com> # Brahma SoCs
Tested-by: Ahmad Fatoum <a.fatoum@pengutronix.de> # i.MX6Q
Reported-by: Russell King - ARM Linux <rmk+kernel@armlinux.org.uk>
Reported-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Abbott Liu <liuwenliang@huawei.com>
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>

authored by

Linus Walleij and committed by
Russell King
5615f69b c12366ba

+362 -2
+33
arch/arm/include/asm/kasan.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * arch/arm/include/asm/kasan.h 4 + * 5 + * Copyright (c) 2015 Samsung Electronics Co., Ltd. 6 + * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 7 + * 8 + */ 9 + 10 + #ifndef __ASM_KASAN_H 11 + #define __ASM_KASAN_H 12 + 13 + #ifdef CONFIG_KASAN 14 + 15 + #include <asm/kasan_def.h> 16 + 17 + #define KASAN_SHADOW_SCALE_SHIFT 3 18 + 19 + /* 20 + * The compiler uses a shadow offset assuming that addresses start 21 + * from 0. Kernel addresses don't start from 0, so shadow 22 + * for kernel really starts from 'compiler's shadow offset' + 23 + * ('kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT) 24 + */ 25 + 26 + asmlinkage void kasan_early_init(void); 27 + extern void kasan_init(void); 28 + 29 + #else 30 + static inline void kasan_init(void) { } 31 + #endif 32 + 33 + #endif
+7 -1
arch/arm/include/asm/pgalloc.h
··· 21 21 #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) 22 22 23 23 #ifdef CONFIG_ARM_LPAE 24 + #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 24 25 25 26 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 26 27 { ··· 29 28 } 30 29 31 30 #else /* !CONFIG_ARM_LPAE */ 31 + #define PGD_SIZE (PAGE_SIZE << 2) 32 32 33 33 /* 34 34 * Since we have only two-level page tables, these are trivial 35 35 */ 36 36 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) 37 37 #define pmd_free(mm, pmd) do { } while (0) 38 + #ifdef CONFIG_KASAN 39 + /* The KASan core unconditionally calls pud_populate() on all architectures */ 40 + #define pud_populate(mm,pmd,pte) do { } while (0) 41 + #else 38 42 #define pud_populate(mm,pmd,pte) BUG() 39 - 43 + #endif 40 44 #endif /* CONFIG_ARM_LPAE */ 41 45 42 46 extern pgd_t *pgd_alloc(struct mm_struct *mm);
+8
arch/arm/include/asm/thread_info.h
··· 13 13 #include <asm/fpstate.h> 14 14 #include <asm/page.h> 15 15 16 + #ifdef CONFIG_KASAN 17 + /* 18 + * KASan uses a lot of extra stack space so the thread size order needs to 19 + * be increased. 20 + */ 21 + #define THREAD_SIZE_ORDER 2 22 + #else 16 23 #define THREAD_SIZE_ORDER 1 24 + #endif 17 25 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 18 26 #define THREAD_START_SP (THREAD_SIZE - 8) 19 27
+3
arch/arm/kernel/head-common.S
··· 111 111 str r8, [r2] @ Save atags pointer 112 112 cmp r3, #0 113 113 strne r10, [r3] @ Save control register values 114 + #ifdef CONFIG_KASAN 115 + bl kasan_early_init 116 + #endif 114 117 mov lr, #0 115 118 b start_kernel 116 119 ENDPROC(__mmap_switched)
+2
arch/arm/kernel/setup.c
··· 59 59 #include <asm/unwind.h> 60 60 #include <asm/memblock.h> 61 61 #include <asm/virt.h> 62 + #include <asm/kasan.h> 62 63 63 64 #include "atags.h" 64 65 ··· 1146 1145 early_ioremap_reset(); 1147 1146 1148 1147 paging_init(mdesc); 1148 + kasan_init(); 1149 1149 request_standard_resources(mdesc); 1150 1150 1151 1151 if (mdesc->restart)
+3
arch/arm/mm/Makefile
··· 113 113 obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o 114 114 obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o 115 115 obj-$(CONFIG_CACHE_UNIPHIER) += cache-uniphier.o 116 + 117 + KASAN_SANITIZE_kasan_init.o := n 118 + obj-$(CONFIG_KASAN) += kasan_init.o
+291
arch/arm/mm/kasan_init.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * This file contains kasan initialization code for ARM. 4 + * 5 + * Copyright (c) 2018 Samsung Electronics Co., Ltd. 6 + * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 7 + * Author: Linus Walleij <linus.walleij@linaro.org> 8 + */ 9 + 10 + #define pr_fmt(fmt) "kasan: " fmt 11 + #include <linux/kasan.h> 12 + #include <linux/kernel.h> 13 + #include <linux/memblock.h> 14 + #include <linux/sched/task.h> 15 + #include <linux/start_kernel.h> 16 + #include <linux/pgtable.h> 17 + #include <asm/cputype.h> 18 + #include <asm/highmem.h> 19 + #include <asm/mach/map.h> 20 + #include <asm/memory.h> 21 + #include <asm/page.h> 22 + #include <asm/pgalloc.h> 23 + #include <asm/procinfo.h> 24 + #include <asm/proc-fns.h> 25 + 26 + #include "mm.h" 27 + 28 + static pgd_t tmp_pgd_table[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); 29 + 30 + pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss; 31 + 32 + static __init void *kasan_alloc_block(size_t size) 33 + { 34 + return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), 35 + MEMBLOCK_ALLOC_KASAN, NUMA_NO_NODE); 36 + } 37 + 38 + static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, 39 + unsigned long end, bool early) 40 + { 41 + unsigned long next; 42 + pte_t *ptep = pte_offset_kernel(pmdp, addr); 43 + 44 + do { 45 + pte_t entry; 46 + void *p; 47 + 48 + next = addr + PAGE_SIZE; 49 + 50 + if (!early) { 51 + if (!pte_none(READ_ONCE(*ptep))) 52 + continue; 53 + 54 + p = kasan_alloc_block(PAGE_SIZE); 55 + if (!p) { 56 + panic("%s failed to allocate shadow page for address 0x%lx\n", 57 + __func__, addr); 58 + return; 59 + } 60 + memset(p, KASAN_SHADOW_INIT, PAGE_SIZE); 61 + entry = pfn_pte(virt_to_pfn(p), 62 + __pgprot(pgprot_val(PAGE_KERNEL))); 63 + } else if (pte_none(READ_ONCE(*ptep))) { 64 + /* 65 + * The early shadow memory is mapping all KASan 66 + * operations to one and the same page in memory, 67 + * "kasan_early_shadow_page" so that the instrumentation 68 + * will work on a scratch area until we can set up the 69 + * proper KASan shadow memory. 70 + */ 71 + entry = pfn_pte(virt_to_pfn(kasan_early_shadow_page), 72 + __pgprot(_L_PTE_DEFAULT | L_PTE_DIRTY | L_PTE_XN)); 73 + } else { 74 + /* 75 + * Early shadow mappings are PMD_SIZE aligned, so if the 76 + * first entry is already set, they must all be set. 77 + */ 78 + return; 79 + } 80 + 81 + set_pte_at(&init_mm, addr, ptep, entry); 82 + } while (ptep++, addr = next, addr != end); 83 + } 84 + 85 + /* 86 + * The pmd (page middle directory) is only used on LPAE 87 + */ 88 + static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, 89 + unsigned long end, bool early) 90 + { 91 + unsigned long next; 92 + pmd_t *pmdp = pmd_offset(pudp, addr); 93 + 94 + do { 95 + if (pmd_none(*pmdp)) { 96 + /* 97 + * We attempt to allocate a shadow block for the PMDs 98 + * used by the PTEs for this address if it isn't already 99 + * allocated. 100 + */ 101 + void *p = early ? kasan_early_shadow_pte : 102 + kasan_alloc_block(PAGE_SIZE); 103 + 104 + if (!p) { 105 + panic("%s failed to allocate shadow block for address 0x%lx\n", 106 + __func__, addr); 107 + return; 108 + } 109 + pmd_populate_kernel(&init_mm, pmdp, p); 110 + flush_pmd_entry(pmdp); 111 + } 112 + 113 + next = pmd_addr_end(addr, end); 114 + kasan_pte_populate(pmdp, addr, next, early); 115 + } while (pmdp++, addr = next, addr != end); 116 + } 117 + 118 + static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, 119 + bool early) 120 + { 121 + unsigned long next; 122 + pgd_t *pgdp; 123 + p4d_t *p4dp; 124 + pud_t *pudp; 125 + 126 + pgdp = pgd_offset_k(addr); 127 + 128 + do { 129 + /* 130 + * Allocate and populate the shadow block of p4d folded into 131 + * pud folded into pmd if it doesn't already exist 132 + */ 133 + if (!early && pgd_none(*pgdp)) { 134 + void *p = kasan_alloc_block(PAGE_SIZE); 135 + 136 + if (!p) { 137 + panic("%s failed to allocate shadow block for address 0x%lx\n", 138 + __func__, addr); 139 + return; 140 + } 141 + pgd_populate(&init_mm, pgdp, p); 142 + } 143 + 144 + next = pgd_addr_end(addr, end); 145 + /* 146 + * We just immediately jump over the p4d and pud page 147 + * directories since we believe ARM32 will never gain four 148 + * nor five level page tables. 149 + */ 150 + p4dp = p4d_offset(pgdp, addr); 151 + pudp = pud_offset(p4dp, addr); 152 + 153 + kasan_pmd_populate(pudp, addr, next, early); 154 + } while (pgdp++, addr = next, addr != end); 155 + } 156 + 157 + extern struct proc_info_list *lookup_processor_type(unsigned int); 158 + 159 + void __init kasan_early_init(void) 160 + { 161 + struct proc_info_list *list; 162 + 163 + /* 164 + * locate processor in the list of supported processor 165 + * types. The linker builds this table for us from the 166 + * entries in arch/arm/mm/proc-*.S 167 + */ 168 + list = lookup_processor_type(read_cpuid_id()); 169 + if (list) { 170 + #ifdef MULTI_CPU 171 + processor = *list->proc; 172 + #endif 173 + } 174 + 175 + BUILD_BUG_ON((KASAN_SHADOW_END - (1UL << 29)) != KASAN_SHADOW_OFFSET); 176 + /* 177 + * We walk the page table and set all of the shadow memory to point 178 + * to the scratch page. 179 + */ 180 + kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, true); 181 + } 182 + 183 + static void __init clear_pgds(unsigned long start, 184 + unsigned long end) 185 + { 186 + for (; start && start < end; start += PMD_SIZE) 187 + pmd_clear(pmd_off_k(start)); 188 + } 189 + 190 + static int __init create_mapping(void *start, void *end) 191 + { 192 + void *shadow_start, *shadow_end; 193 + 194 + shadow_start = kasan_mem_to_shadow(start); 195 + shadow_end = kasan_mem_to_shadow(end); 196 + 197 + pr_info("Mapping kernel virtual memory block: %px-%px at shadow: %px-%px\n", 198 + start, end, shadow_start, shadow_end); 199 + 200 + kasan_pgd_populate((unsigned long)shadow_start & PAGE_MASK, 201 + PAGE_ALIGN((unsigned long)shadow_end), false); 202 + return 0; 203 + } 204 + 205 + void __init kasan_init(void) 206 + { 207 + phys_addr_t pa_start, pa_end; 208 + u64 i; 209 + 210 + /* 211 + * We are going to perform proper setup of shadow memory. 212 + * 213 + * At first we should unmap early shadow (clear_pgds() call bellow). 214 + * However, instrumented code can't execute without shadow memory. 215 + * 216 + * To keep the early shadow memory MMU tables around while setting up 217 + * the proper shadow memory, we copy swapper_pg_dir (the initial page 218 + * table) to tmp_pgd_table and use that to keep the early shadow memory 219 + * mapped until the full shadow setup is finished. Then we swap back 220 + * to the proper swapper_pg_dir. 221 + */ 222 + 223 + memcpy(tmp_pgd_table, swapper_pg_dir, sizeof(tmp_pgd_table)); 224 + #ifdef CONFIG_ARM_LPAE 225 + /* We need to be in the same PGD or this won't work */ 226 + BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) != 227 + pgd_index(KASAN_SHADOW_END)); 228 + memcpy(tmp_pmd_table, 229 + pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), 230 + sizeof(tmp_pmd_table)); 231 + set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)], 232 + __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); 233 + #endif 234 + cpu_switch_mm(tmp_pgd_table, &init_mm); 235 + local_flush_tlb_all(); 236 + 237 + clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 238 + 239 + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), 240 + kasan_mem_to_shadow((void *)-1UL) + 1); 241 + 242 + for_each_mem_range(i, &pa_start, &pa_end) { 243 + void *start = __va(pa_start); 244 + void *end = __va(pa_end); 245 + 246 + /* Do not attempt to shadow highmem */ 247 + if (pa_start >= arm_lowmem_limit) { 248 + pr_info("Skip highmem block at %pa-%pa\n", &pa_start, &pa_end); 249 + continue; 250 + } 251 + if (pa_end > arm_lowmem_limit) { 252 + pr_info("Truncating shadow for memory block at %pa-%pa to lowmem region at %pa\n", 253 + &pa_start, &pa_end, &arm_lowmem_limit); 254 + end = __va(arm_lowmem_limit); 255 + } 256 + if (start >= end) { 257 + pr_info("Skipping invalid memory block %pa-%pa (virtual %p-%p)\n", 258 + &pa_start, &pa_end, start, end); 259 + continue; 260 + } 261 + 262 + create_mapping(start, end); 263 + } 264 + 265 + /* 266 + * 1. The module global variables are in MODULES_VADDR ~ MODULES_END, 267 + * so we need to map this area. 268 + * 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR 269 + * ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't 270 + * use kasan_populate_zero_shadow. 271 + */ 272 + create_mapping((void *)MODULES_VADDR, (void *)(PKMAP_BASE + PMD_SIZE)); 273 + 274 + /* 275 + * KAsan may reuse the contents of kasan_early_shadow_pte directly, so 276 + * we should make sure that it maps the zero page read-only. 277 + */ 278 + for (i = 0; i < PTRS_PER_PTE; i++) 279 + set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE, 280 + &kasan_early_shadow_pte[i], 281 + pfn_pte(virt_to_pfn(kasan_early_shadow_page), 282 + __pgprot(pgprot_val(PAGE_KERNEL) 283 + | L_PTE_RDONLY))); 284 + 285 + cpu_switch_mm(swapper_pg_dir, &init_mm); 286 + local_flush_tlb_all(); 287 + 288 + memset(kasan_early_shadow_page, 0, PAGE_SIZE); 289 + pr_info("Kernel address sanitizer initialized\n"); 290 + init_task.kasan_depth = 0; 291 + }
+15 -1
arch/arm/mm/pgd.c
··· 66 66 new_pmd = pmd_alloc(mm, new_pud, 0); 67 67 if (!new_pmd) 68 68 goto no_pmd; 69 - #endif 69 + #ifdef CONFIG_KASAN 70 + /* 71 + * Copy PMD table for KASAN shadow mappings. 72 + */ 73 + init_pgd = pgd_offset_k(TASK_SIZE); 74 + init_p4d = p4d_offset(init_pgd, TASK_SIZE); 75 + init_pud = pud_offset(init_p4d, TASK_SIZE); 76 + init_pmd = pmd_offset(init_pud, TASK_SIZE); 77 + new_pmd = pmd_offset(new_pud, TASK_SIZE); 78 + memcpy(new_pmd, init_pmd, 79 + (pmd_index(MODULES_VADDR) - pmd_index(TASK_SIZE)) 80 + * sizeof(pmd_t)); 81 + clean_dcache_area(new_pmd, PTRS_PER_PMD * sizeof(pmd_t)); 82 + #endif /* CONFIG_KASAN */ 83 + #endif /* CONFIG_LPAE */ 70 84 71 85 if (!vectors_high()) { 72 86 /*