Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kasan: enable instrumentation of global variables

This feature let us to detect accesses out of bounds of global variables.
This will work as for globals in kernel image, so for globals in modules.
Currently this won't work for symbols in user-specified sections (e.g.
__init, __read_mostly, ...)

The idea of this is simple. Compiler increases each global variable by
redzone size and add constructors invoking __asan_register_globals()
function. Information about global variable (address, size, size with
redzone ...) passed to __asan_register_globals() so we could poison
variable's redzone.

This patch also forces module_alloc() to return 8*PAGE_SIZE aligned
address making shadow memory handling (
kasan_module_alloc()/kasan_module_free() ) more simple. Such alignment
guarantees that each shadow page backing modules address space correspond
to only one module_alloc() allocation.

Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: Andrey Konovalov <adech.fo@gmail.com>
Cc: Yuri Gribov <tetra2005@gmail.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Andrey Ryabinin and committed by
Linus Torvalds
bebf56a1 6301939d

+132 -4
+1 -1
Documentation/kasan.txt
··· 9 9 bugs. 10 10 11 11 KASan uses compile-time instrumentation for checking every memory access, 12 - therefore you will need a certain version of GCC >= 4.9.2 12 + therefore you will need a certain version of GCC > 4.9.2 13 13 14 14 Currently KASan is supported only for x86_64 architecture and requires that the 15 15 kernel be built with the SLUB allocator.
+11 -1
arch/x86/kernel/module.c
··· 24 24 #include <linux/fs.h> 25 25 #include <linux/string.h> 26 26 #include <linux/kernel.h> 27 + #include <linux/kasan.h> 27 28 #include <linux/bug.h> 28 29 #include <linux/mm.h> 29 30 #include <linux/gfp.h> ··· 84 83 85 84 void *module_alloc(unsigned long size) 86 85 { 86 + void *p; 87 + 87 88 if (PAGE_ALIGN(size) > MODULES_LEN) 88 89 return NULL; 89 - return __vmalloc_node_range(size, 1, 90 + 91 + p = __vmalloc_node_range(size, MODULE_ALIGN, 90 92 MODULES_VADDR + get_module_load_offset(), 91 93 MODULES_END, GFP_KERNEL | __GFP_HIGHMEM, 92 94 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, 93 95 __builtin_return_address(0)); 96 + if (p && (kasan_module_alloc(p, size) < 0)) { 97 + vfree(p); 98 + return NULL; 99 + } 100 + 101 + return p; 94 102 } 95 103 96 104 #ifdef CONFIG_X86_32
+1 -1
arch/x86/mm/kasan_init_64.c
··· 196 196 (unsigned long)kasan_mem_to_shadow(_end), 197 197 NUMA_NO_NODE); 198 198 199 - populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_VADDR), 199 + populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), 200 200 (void *)KASAN_SHADOW_END); 201 201 202 202 memset(kasan_zero_page, 0, PAGE_SIZE);
+4
include/linux/compiler-gcc4.h
··· 85 85 #define __HAVE_BUILTIN_BSWAP16__ 86 86 #endif 87 87 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ 88 + 89 + #if GCC_VERSION >= 40902 90 + #define KASAN_ABI_VERSION 3 91 + #endif
+2
include/linux/compiler-gcc5.h
··· 63 63 #define __HAVE_BUILTIN_BSWAP64__ 64 64 #define __HAVE_BUILTIN_BSWAP16__ 65 65 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ 66 + 67 + #define KASAN_ABI_VERSION 4
+10
include/linux/kasan.h
··· 49 49 void kasan_slab_alloc(struct kmem_cache *s, void *object); 50 50 void kasan_slab_free(struct kmem_cache *s, void *object); 51 51 52 + #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) 53 + 54 + int kasan_module_alloc(void *addr, size_t size); 55 + void kasan_module_free(void *addr); 56 + 52 57 #else /* CONFIG_KASAN */ 58 + 59 + #define MODULE_ALIGN 1 53 60 54 61 static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 55 62 ··· 80 73 81 74 static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} 82 75 static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} 76 + 77 + static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 78 + static inline void kasan_module_free(void *addr) {} 83 79 84 80 #endif /* CONFIG_KASAN */ 85 81
+2
kernel/module.c
··· 56 56 #include <linux/async.h> 57 57 #include <linux/percpu.h> 58 58 #include <linux/kmemleak.h> 59 + #include <linux/kasan.h> 59 60 #include <linux/jump_label.h> 60 61 #include <linux/pfn.h> 61 62 #include <linux/bsearch.h> ··· 1814 1813 void __weak module_memfree(void *module_region) 1815 1814 { 1816 1815 vfree(module_region); 1816 + kasan_module_free(module_region); 1817 1817 } 1818 1818 1819 1819 void __weak module_arch_cleanup(struct module *mod)
+1
lib/Kconfig.kasan
··· 6 6 config KASAN 7 7 bool "KASan: runtime memory debugger" 8 8 depends on SLUB_DEBUG 9 + select CONSTRUCTORS 9 10 help 10 11 Enables kernel address sanitizer - runtime memory debugger, 11 12 designed to find out-of-bounds accesses and use-after-free bugs.
+52
mm/kasan/kasan.c
··· 22 22 #include <linux/memblock.h> 23 23 #include <linux/memory.h> 24 24 #include <linux/mm.h> 25 + #include <linux/module.h> 25 26 #include <linux/printk.h> 26 27 #include <linux/sched.h> 27 28 #include <linux/slab.h> ··· 395 394 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), 396 395 KASAN_FREE_PAGE); 397 396 } 397 + 398 + int kasan_module_alloc(void *addr, size_t size) 399 + { 400 + void *ret; 401 + size_t shadow_size; 402 + unsigned long shadow_start; 403 + 404 + shadow_start = (unsigned long)kasan_mem_to_shadow(addr); 405 + shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, 406 + PAGE_SIZE); 407 + 408 + if (WARN_ON(!PAGE_ALIGNED(shadow_start))) 409 + return -EINVAL; 410 + 411 + ret = __vmalloc_node_range(shadow_size, 1, shadow_start, 412 + shadow_start + shadow_size, 413 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 414 + PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 415 + __builtin_return_address(0)); 416 + return ret ? 0 : -ENOMEM; 417 + } 418 + 419 + void kasan_module_free(void *addr) 420 + { 421 + vfree(kasan_mem_to_shadow(addr)); 422 + } 423 + 424 + static void register_global(struct kasan_global *global) 425 + { 426 + size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); 427 + 428 + kasan_unpoison_shadow(global->beg, global->size); 429 + 430 + kasan_poison_shadow(global->beg + aligned_size, 431 + global->size_with_redzone - aligned_size, 432 + KASAN_GLOBAL_REDZONE); 433 + } 434 + 435 + void __asan_register_globals(struct kasan_global *globals, size_t size) 436 + { 437 + int i; 438 + 439 + for (i = 0; i < size; i++) 440 + register_global(&globals[i]); 441 + } 442 + EXPORT_SYMBOL(__asan_register_globals); 443 + 444 + void __asan_unregister_globals(struct kasan_global *globals, size_t size) 445 + { 446 + } 447 + EXPORT_SYMBOL(__asan_unregister_globals); 398 448 399 449 #define DEFINE_ASAN_LOAD_STORE(size) \ 400 450 void __asan_load##size(unsigned long addr) \
+25
mm/kasan/kasan.h
··· 11 11 #define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */ 12 12 #define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */ 13 13 #define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */ 14 + #define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */ 14 15 15 16 /* 16 17 * Stack redzone shadow values ··· 22 21 #define KASAN_STACK_RIGHT 0xF3 23 22 #define KASAN_STACK_PARTIAL 0xF4 24 23 24 + /* Don't break randconfig/all*config builds */ 25 + #ifndef KASAN_ABI_VERSION 26 + #define KASAN_ABI_VERSION 1 27 + #endif 25 28 26 29 struct kasan_access_info { 27 30 const void *access_addr; ··· 33 28 size_t access_size; 34 29 bool is_write; 35 30 unsigned long ip; 31 + }; 32 + 33 + /* The layout of struct dictated by compiler */ 34 + struct kasan_source_location { 35 + const char *filename; 36 + int line_no; 37 + int column_no; 38 + }; 39 + 40 + /* The layout of struct dictated by compiler */ 41 + struct kasan_global { 42 + const void *beg; /* Address of the beginning of the global variable. */ 43 + size_t size; /* Size of the global variable. */ 44 + size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */ 45 + const void *name; 46 + const void *module_name; /* Name of the module where the global variable is declared. */ 47 + unsigned long has_dynamic_init; /* This needed for C++ */ 48 + #if KASAN_ABI_VERSION >= 4 49 + struct kasan_source_location *location; 50 + #endif 36 51 }; 37 52 38 53 void kasan_report_error(struct kasan_access_info *info);
+22
mm/kasan/report.c
··· 23 23 #include <linux/types.h> 24 24 #include <linux/kasan.h> 25 25 26 + #include <asm/sections.h> 27 + 26 28 #include "kasan.h" 27 29 #include "../slab.h" 28 30 ··· 63 61 break; 64 62 case KASAN_PAGE_REDZONE: 65 63 case KASAN_KMALLOC_REDZONE: 64 + case KASAN_GLOBAL_REDZONE: 66 65 case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: 67 66 bug_type = "out of bounds access"; 68 67 break; ··· 81 78 pr_err("%s of size %zu by task %s/%d\n", 82 79 info->is_write ? "Write" : "Read", 83 80 info->access_size, current->comm, task_pid_nr(current)); 81 + } 82 + 83 + static inline bool kernel_or_module_addr(const void *addr) 84 + { 85 + return (addr >= (void *)_stext && addr < (void *)_end) 86 + || (addr >= (void *)MODULES_VADDR 87 + && addr < (void *)MODULES_END); 88 + } 89 + 90 + static inline bool init_task_stack_addr(const void *addr) 91 + { 92 + return addr >= (void *)&init_thread_union.stack && 93 + (addr <= (void *)&init_thread_union.stack + 94 + sizeof(init_thread_union.stack)); 84 95 } 85 96 86 97 static void print_address_description(struct kasan_access_info *info) ··· 122 105 return; 123 106 } 124 107 dump_page(page, "kasan: bad access detected"); 108 + } 109 + 110 + if (kernel_or_module_addr(addr)) { 111 + if (!init_task_stack_addr(addr)) 112 + pr_err("Address belongs to variable %pS\n", addr); 125 113 } 126 114 127 115 dump_stack();
+1 -1
scripts/Makefile.kasan
··· 9 9 10 10 CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \ 11 11 -fasan-shadow-offset=$(CONFIG_KASAN_SHADOW_OFFSET) \ 12 - --param asan-stack=1 \ 12 + --param asan-stack=1 --param asan-globals=1 \ 13 13 --param asan-instrumentation-with-call-threshold=$(call_threshold)) 14 14 15 15 ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),)