Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

UML: add support for KASAN under x86_64

Make KASAN run on User Mode Linux on x86_64.

The UML-specific KASAN initializer uses mmap to map the ~16TB of shadow
memory to the location defined by KASAN_SHADOW_OFFSET. kasan_init()
utilizes constructors to initialize KASAN before main().

The location of the KASAN shadow memory, starting at
KASAN_SHADOW_OFFSET, can be configured using the KASAN_SHADOW_OFFSET
option. The default location of this offset is 0x100000000000, which
keeps it out-of-the-way even on UML setups with more "physical" memory.

For low-memory setups, 0x7fff8000 can be used instead, which fits in an
immediate and is therefore faster, as suggested by Dmitry Vyukov. There
is usually enough free space at this location; however, it is a config
option so that it can be easily changed if needed.

Note that, unlike KASAN on other architectures, vmalloc allocations
still use the shadow memory allocated upfront, rather than allocating
and free-ing it per-vmalloc allocation.

If another architecture chooses to go down the same path, we should
replace the checks for CONFIG_UML with something more generic, such
as:
- A CONFIG_KASAN_NO_SHADOW_ALLOC option, which architectures could set
- or, a way of having architecture-specific versions of these vmalloc
and module shadow memory allocation options.

Also note that, while UML supports both KASAN in inline mode
(CONFIG_KASAN_INLINE) and static linking (CONFIG_STATIC_LINK), it does
not support both at the same time.

Signed-off-by: Patricia Alfonso <trishalfonso@google.com>
Co-developed-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Signed-off-by: David Gow <davidgow@google.com>
Reviewed-by: Johannes Berg <johannes@sipsolutions.net>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Signed-off-by: Richard Weinberger <richard@nod.at>

authored by

Patricia Alfonso and committed by
Richard Weinberger
5b301409 335e52c2

+135 -7
+15
arch/um/Kconfig
··· 12 12 select ARCH_HAS_STRNLEN_USER 13 13 select ARCH_NO_PREEMPT 14 14 select HAVE_ARCH_AUDITSYSCALL 15 + select HAVE_ARCH_KASAN if X86_64 16 + select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN 15 17 select HAVE_ARCH_SECCOMP_FILTER 16 18 select HAVE_ASM_MODVERSIONS 17 19 select HAVE_UID16 ··· 220 218 line help for more details. 221 219 222 220 It is safe to say Y, but you probably don't need this. 221 + 222 + config KASAN_SHADOW_OFFSET 223 + hex 224 + depends on KASAN 225 + default 0x100000000000 226 + help 227 + This is the offset at which the ~16TB of shadow memory is 228 + mapped and used by KASAN for memory debugging. This can be any 229 + address that has at least KASAN_SHADOW_SIZE (total address space divided 230 + by 8) amount of space so that the KASAN shadow memory does not conflict 231 + with anything. The default is 0x100000000000, which works even if mem is 232 + set to a large value. On low-memory systems, try 0x7fff8000, as it fits 233 + into the immediate of most instructions, improving performance. 223 234 224 235 endmenu 225 236
+2
arch/um/include/asm/common.lds.S
··· 83 83 } 84 84 .init_array : { 85 85 __init_array_start = .; 86 + *(.kasan_init) 87 + *(.init_array.*) 86 88 *(.init_array) 87 89 __init_array_end = .; 88 90 }
+37
arch/um/include/asm/kasan.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_UM_KASAN_H 3 + #define __ASM_UM_KASAN_H 4 + 5 + #include <linux/init.h> 6 + #include <linux/const.h> 7 + 8 + #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) 9 + 10 + /* used in kasan_mem_to_shadow to divide by 8 */ 11 + #define KASAN_SHADOW_SCALE_SHIFT 3 12 + 13 + #ifdef CONFIG_X86_64 14 + #define KASAN_HOST_USER_SPACE_END_ADDR 0x00007fffffffffffUL 15 + /* KASAN_SHADOW_SIZE is the size of total address space divided by 8 */ 16 + #define KASAN_SHADOW_SIZE ((KASAN_HOST_USER_SPACE_END_ADDR + 1) >> \ 17 + KASAN_SHADOW_SCALE_SHIFT) 18 + #else 19 + #error "KASAN_SHADOW_SIZE is not defined for this sub-architecture" 20 + #endif /* CONFIG_X86_64 */ 21 + 22 + #define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET) 23 + #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) 24 + 25 + #ifdef CONFIG_KASAN 26 + void kasan_init(void); 27 + void kasan_map_memory(void *start, unsigned long len); 28 + extern int kasan_um_is_ready; 29 + 30 + #ifdef CONFIG_STATIC_LINK 31 + #define kasan_arch_is_ready() (kasan_um_is_ready) 32 + #endif 33 + #else 34 + static inline void kasan_init(void) { } 35 + #endif /* CONFIG_KASAN */ 36 + 37 + #endif /* __ASM_UM_KASAN_H */
+5 -1
arch/um/kernel/dyn.lds.S
··· 109 109 be empty, which isn't pretty. */ 110 110 . = ALIGN(32 / 8); 111 111 .preinit_array : { *(.preinit_array) } 112 - .init_array : { *(.init_array) } 112 + .init_array : { 113 + *(.kasan_init) 114 + *(.init_array.*) 115 + *(.init_array) 116 + } 113 117 .fini_array : { *(.fini_array) } 114 118 .data : { 115 119 INIT_TASK_DATA(KERNEL_STACK_SIZE)
+19
arch/um/kernel/mem.c
··· 18 18 #include <kern_util.h> 19 19 #include <mem_user.h> 20 20 #include <os.h> 21 + #include <linux/sched/task.h> 22 + 23 + #ifdef CONFIG_KASAN 24 + int kasan_um_is_ready; 25 + void kasan_init(void) 26 + { 27 + /* 28 + * kasan_map_memory will map all of the required address space and 29 + * the host machine will allocate physical memory as necessary. 30 + */ 31 + kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE); 32 + init_task.kasan_depth = 0; 33 + kasan_um_is_ready = true; 34 + } 35 + 36 + static void (*kasan_init_ptr)(void) 37 + __section(".kasan_init") __used 38 + = kasan_init; 39 + #endif 21 40 22 41 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */ 23 42 unsigned long *empty_zero_page = NULL;
+1 -1
arch/um/kernel/stacktrace.c
··· 27 27 28 28 frame = (struct stack_frame *)bp; 29 29 while (((long) sp & (THREAD_SIZE-1)) != 0) { 30 - addr = *sp; 30 + addr = READ_ONCE_NOCHECK(*sp); 31 31 if (__kernel_text_address(addr)) { 32 32 reliable = 0; 33 33 if ((unsigned long) sp == bp + sizeof(long)) {
+22
arch/um/os-Linux/mem.c
··· 17 17 #include <init.h> 18 18 #include <os.h> 19 19 20 + /* 21 + * kasan_map_memory - maps memory from @start with a size of @len. 22 + * The allocated memory is filled with zeroes upon success. 23 + * @start: the start address of the memory to be mapped 24 + * @len: the length of the memory to be mapped 25 + * 26 + * This function is used to map shadow memory for KASAN in uml 27 + */ 28 + void kasan_map_memory(void *start, size_t len) 29 + { 30 + if (mmap(start, 31 + len, 32 + PROT_READ|PROT_WRITE, 33 + MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, 34 + -1, 35 + 0) == MAP_FAILED) { 36 + os_info("Couldn't allocate shadow memory: %s\n.", 37 + strerror(errno)); 38 + exit(1); 39 + } 40 + } 41 + 20 42 /* Set by make_tempfile() during early boot. */ 21 43 static char *tempdir = NULL; 22 44
+2 -2
arch/um/os-Linux/user_syms.c
··· 27 27 #ifndef __x86_64__ 28 28 extern void *memcpy(void *, const void *, size_t); 29 29 EXPORT_SYMBOL(memcpy); 30 - #endif 31 - 32 30 EXPORT_SYMBOL(memmove); 33 31 EXPORT_SYMBOL(memset); 32 + #endif 33 + 34 34 EXPORT_SYMBOL(printf); 35 35 36 36 /* Here, instead, I can provide a fake prototype. Yes, someone cares: genksyms.
+2 -1
arch/x86/um/Makefile
··· 28 28 29 29 obj-y += syscalls_64.o vdso/ 30 30 31 - subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o 31 + subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \ 32 + ../lib/memmove_64.o ../lib/memset_64.o 32 33 33 34 endif 34 35
+3
arch/x86/um/vdso/Makefile
··· 3 3 # Building vDSO images for x86. 4 4 # 5 5 6 + # do not instrument on vdso because KASAN is not compatible with user mode 7 + KASAN_SANITIZE := n 8 + 6 9 # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. 7 10 KCOV_INSTRUMENT := n 8 11
+27 -2
mm/kasan/shadow.c
··· 295 295 return 0; 296 296 297 297 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr); 298 - shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); 299 298 shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); 300 - shadow_end = ALIGN(shadow_end, PAGE_SIZE); 299 + 300 + /* 301 + * User Mode Linux maps enough shadow memory for all of virtual memory 302 + * at boot, so doesn't need to allocate more on vmalloc, just clear it. 303 + * 304 + * The remaining CONFIG_UML checks in this file exist for the same 305 + * reason. 306 + */ 307 + if (IS_ENABLED(CONFIG_UML)) { 308 + __memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start); 309 + return 0; 310 + } 311 + 312 + shadow_start = PAGE_ALIGN_DOWN(shadow_start); 313 + shadow_end = PAGE_ALIGN(shadow_end); 301 314 302 315 ret = apply_to_page_range(&init_mm, shadow_start, 303 316 shadow_end - shadow_start, ··· 479 466 480 467 if (shadow_end > shadow_start) { 481 468 size = shadow_end - shadow_start; 469 + if (IS_ENABLED(CONFIG_UML)) { 470 + __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start); 471 + return; 472 + } 482 473 apply_to_existing_page_range(&init_mm, 483 474 (unsigned long)shadow_start, 484 475 size, kasan_depopulate_vmalloc_pte, ··· 548 531 if (WARN_ON(!PAGE_ALIGNED(shadow_start))) 549 532 return -EINVAL; 550 533 534 + if (IS_ENABLED(CONFIG_UML)) { 535 + __memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size); 536 + return 0; 537 + } 538 + 551 539 ret = __vmalloc_node_range(shadow_size, 1, shadow_start, 552 540 shadow_start + shadow_size, 553 541 GFP_KERNEL, ··· 576 554 577 555 void kasan_free_module_shadow(const struct vm_struct *vm) 578 556 { 557 + if (IS_ENABLED(CONFIG_UML)) 558 + return; 559 + 579 560 if (vm->flags & VM_KASAN) 580 561 vfree(kasan_mem_to_shadow(vm->addr)); 581 562 }