Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kasan, arm64: expand CONFIG_KASAN checks

Some #ifdef CONFIG_KASAN checks are only relevant for software KASAN modes
(either related to shadow memory or compiler instrumentation). Expand
those into CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS.

Link: https://lkml.kernel.org/r/e6971e432dbd72bb897ff14134ebb7e169bdcf0c.1606161801.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Marco Elver <elver@google.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Andrey Konovalov and committed by
Linus Torvalds
0fea6e9a 8a494023

+36 -25
+1 -1
arch/arm64/Kconfig
··· 334 334 335 335 config KASAN_SHADOW_OFFSET 336 336 hex 337 - depends on KASAN 337 + depends on KASAN_GENERIC || KASAN_SW_TAGS 338 338 default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS 339 339 default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS 340 340 default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
+1 -1
arch/arm64/Makefile
··· 137 137 138 138 ifeq ($(CONFIG_KASAN_SW_TAGS), y) 139 139 KASAN_SHADOW_SCALE_SHIFT := 4 140 - else 140 + else ifeq ($(CONFIG_KASAN_GENERIC), y) 141 141 KASAN_SHADOW_SCALE_SHIFT := 3 142 142 endif 143 143
+1 -1
arch/arm64/include/asm/assembler.h
··· 473 473 #define NOKPROBE(x) 474 474 #endif 475 475 476 - #ifdef CONFIG_KASAN 476 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 477 477 #define EXPORT_SYMBOL_NOKASAN(name) 478 478 #else 479 479 #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
+1 -1
arch/arm64/include/asm/memory.h
··· 72 72 * address space for the shadow region respectively. They can bloat the stack 73 73 * significantly, so double the (minimum) stack size when they are in use. 74 74 */ 75 - #ifdef CONFIG_KASAN 75 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 76 76 #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) 77 77 #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ 78 78 + KASAN_SHADOW_OFFSET)
+3 -2
arch/arm64/include/asm/string.h
··· 5 5 #ifndef __ASM_STRING_H 6 6 #define __ASM_STRING_H 7 7 8 - #ifndef CONFIG_KASAN 8 + #if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) 9 9 #define __HAVE_ARCH_STRRCHR 10 10 extern char *strrchr(const char *, int c); 11 11 ··· 48 48 void memcpy_flushcache(void *dst, const void *src, size_t cnt); 49 49 #endif 50 50 51 - #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) 51 + #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 52 + !defined(__SANITIZE_ADDRESS__) 52 53 53 54 /* 54 55 * For files that are not instrumented (e.g. mm/slub.c) we
+1 -1
arch/arm64/kernel/head.S
··· 433 433 bl __pi_memset 434 434 dsb ishst // Make zero page visible to PTW 435 435 436 - #ifdef CONFIG_KASAN 436 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 437 437 bl kasan_early_init 438 438 #endif 439 439 #ifdef CONFIG_RANDOMIZE_BASE
+1 -1
arch/arm64/kernel/image-vars.h
··· 37 37 __efistub_strrchr = __pi_strrchr; 38 38 __efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc; 39 39 40 - #ifdef CONFIG_KASAN 40 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 41 41 __efistub___memcpy = __pi_memcpy; 42 42 __efistub___memmove = __pi_memmove; 43 43 __efistub___memset = __pi_memset;
+2 -1
arch/arm64/kernel/kaslr.c
··· 161 161 /* use the top 16 bits to randomize the linear region */ 162 162 memstart_offset_seed = seed >> 48; 163 163 164 - if (IS_ENABLED(CONFIG_KASAN)) 164 + if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 165 + IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 165 166 /* 166 167 * KASAN does not expect the module region to intersect the 167 168 * vmalloc region, since shadow memory is allocated for each
+4 -2
arch/arm64/kernel/module.c
··· 30 30 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 31 31 gfp_mask |= __GFP_NOWARN; 32 32 33 - if (IS_ENABLED(CONFIG_KASAN)) 33 + if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 34 + IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 34 35 /* don't exceed the static module region - see below */ 35 36 module_alloc_end = MODULES_END; 36 37 ··· 40 39 NUMA_NO_NODE, __builtin_return_address(0)); 41 40 42 41 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && 43 - !IS_ENABLED(CONFIG_KASAN)) 42 + !IS_ENABLED(CONFIG_KASAN_GENERIC) && 43 + !IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 44 44 /* 45 45 * KASAN can only deal with module allocations being served 46 46 * from the reserved module region, since the remainder of
+3 -3
arch/arm64/mm/ptdump.c
··· 29 29 enum address_markers_idx { 30 30 PAGE_OFFSET_NR = 0, 31 31 PAGE_END_NR, 32 - #ifdef CONFIG_KASAN 32 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 33 33 KASAN_START_NR, 34 34 #endif 35 35 }; ··· 37 37 static struct addr_marker address_markers[] = { 38 38 { PAGE_OFFSET, "Linear Mapping start" }, 39 39 { 0 /* PAGE_END */, "Linear Mapping end" }, 40 - #ifdef CONFIG_KASAN 40 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 41 41 { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, 42 42 { KASAN_SHADOW_END, "Kasan shadow end" }, 43 43 #endif ··· 383 383 static int ptdump_init(void) 384 384 { 385 385 address_markers[PAGE_END_NR].start_address = PAGE_END; 386 - #ifdef CONFIG_KASAN 386 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 387 387 address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START; 388 388 #endif 389 389 ptdump_initialize();
+1 -1
include/linux/kasan-checks.h
··· 9 9 * even in compilation units that selectively disable KASAN, but must use KASAN 10 10 * to validate access to an address. Never use these in header files! 11 11 */ 12 - #ifdef CONFIG_KASAN 12 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 13 13 bool __kasan_check_read(const volatile void *p, unsigned int size); 14 14 bool __kasan_check_write(const volatile void *p, unsigned int size); 15 15 #else
+4 -3
include/linux/kasan.h
··· 238 238 239 239 #endif /* CONFIG_KASAN_VMALLOC */ 240 240 241 - #if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) 241 + #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 242 + !defined(CONFIG_KASAN_VMALLOC) 242 243 243 244 /* 244 245 * These functions provide a special case to support backing module ··· 249 248 int kasan_module_alloc(void *addr, size_t size); 250 249 void kasan_free_shadow(const struct vm_struct *vm); 251 250 252 - #else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ 251 + #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 253 252 254 253 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 255 254 static inline void kasan_free_shadow(const struct vm_struct *vm) {} 256 255 257 - #endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ 256 + #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 258 257 259 258 #ifdef CONFIG_KASAN_INLINE 260 259 void kasan_non_canonical_hook(unsigned long addr);
+2 -1
include/linux/moduleloader.h
··· 96 96 /* Any cleanup before freeing mod->module_init */ 97 97 void module_arch_freeing_init(struct module *mod); 98 98 99 - #if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) 99 + #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 100 + !defined(CONFIG_KASAN_VMALLOC) 100 101 #include <linux/kasan.h> 101 102 #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) 102 103 #else
+1 -1
include/linux/string.h
··· 267 267 268 268 #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) 269 269 270 - #ifdef CONFIG_KASAN 270 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 271 271 extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); 272 272 extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); 273 273 extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+8 -5
mm/ptdump.c
··· 4 4 #include <linux/ptdump.h> 5 5 #include <linux/kasan.h> 6 6 7 - #ifdef CONFIG_KASAN 7 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 8 8 /* 9 9 * This is an optimization for KASAN=y case. Since all kasan page tables 10 10 * eventually point to the kasan_early_shadow_page we could call note_page() ··· 31 31 struct ptdump_state *st = walk->private; 32 32 pgd_t val = READ_ONCE(*pgd); 33 33 34 - #if CONFIG_PGTABLE_LEVELS > 4 && defined(CONFIG_KASAN) 34 + #if CONFIG_PGTABLE_LEVELS > 4 && \ 35 + (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) 35 36 if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d))) 36 37 return note_kasan_page_table(walk, addr); 37 38 #endif ··· 52 51 struct ptdump_state *st = walk->private; 53 52 p4d_t val = READ_ONCE(*p4d); 54 53 55 - #if CONFIG_PGTABLE_LEVELS > 3 && defined(CONFIG_KASAN) 54 + #if CONFIG_PGTABLE_LEVELS > 3 && \ 55 + (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) 56 56 if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud))) 57 57 return note_kasan_page_table(walk, addr); 58 58 #endif ··· 73 71 struct ptdump_state *st = walk->private; 74 72 pud_t val = READ_ONCE(*pud); 75 73 76 - #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_KASAN) 74 + #if CONFIG_PGTABLE_LEVELS > 2 && \ 75 + (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) 77 76 if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd))) 78 77 return note_kasan_page_table(walk, addr); 79 78 #endif ··· 94 91 struct ptdump_state *st = walk->private; 95 92 pmd_t val = READ_ONCE(*pmd); 96 93 97 - #if defined(CONFIG_KASAN) 94 + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 98 95 if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte))) 99 96 return note_kasan_page_table(walk, addr); 100 97 #endif
+2
scripts/Makefile.lib
··· 148 148 # we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE) 149 149 # 150 150 ifeq ($(CONFIG_KASAN),y) 151 + ifneq ($(CONFIG_KASAN_HW_TAGS),y) 151 152 _c_flags += $(if $(patsubst n%,, \ 152 153 $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ 153 154 $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)) 155 + endif 154 156 endif 155 157 156 158 ifeq ($(CONFIG_UBSAN),y)