Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

LoongArch: Add KASAN (Kernel Address Sanitizer) support

1/8 of kernel addresses reserved for shadow memory. But for LoongArch,
There are a lot of holes between different segments and valid address
space (256T available) is insufficient to map all these segments to kasan
shadow memory with the common formula provided by kasan core, saying
(addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET

So LoongArch has a arch-specific mapping formula, different segments are
mapped individually, and only limited space lengths of these specific
segments are mapped to shadow.

At early boot stage the whole shadow region populated with just one
physical page (kasan_early_shadow_page). Later, this page is reused as
readonly zero shadow for some memory that kasan currently don't track.
After mapping the physical memory, pages for shadow memory are allocated
and mapped.

Functions like memset()/memcpy()/memmove() do a lot of memory accesses.
If bad pointer passed to one of these function it is important to be
caught. Compiler's instrumentation cannot do this since these functions
are written in assembly.

KASan replaces memory functions with manually instrumented variants.
Original functions declared as weak symbols so strong definitions in
mm/kasan/kasan.c could replace them. Original functions have aliases
with '__' prefix in names, so we could call non-instrumented variant
if needed.

Signed-off-by: Qing Zhang <zhangqing@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>

authored by

Qing Zhang and committed by
Huacai Chen
5aa4ac64 9fbcc076

+455 -13
+2 -2
Documentation/dev-tools/kasan.rst
··· 41 41 Architectures 42 42 ~~~~~~~~~~~~~ 43 43 44 - Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, s390, and 45 - xtensa, and the tag-based KASAN modes are supported only on arm64. 44 + Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, s390, xtensa, 45 + and loongarch, and the tag-based KASAN modes are supported only on arm64. 46 46 47 47 Compilers 48 48 ~~~~~~~~~
+1 -1
Documentation/features/debug/KASAN/arch-support.txt
··· 13 13 | csky: | TODO | 14 14 | hexagon: | TODO | 15 15 | ia64: | TODO | 16 - | loongarch: | TODO | 16 + | loongarch: | ok | 17 17 | m68k: | TODO | 18 18 | microblaze: | TODO | 19 19 | mips: | TODO |
+1 -1
Documentation/translations/zh_CN/dev-tools/kasan.rst
··· 42 42 体系架构 43 43 ~~~~~~~~ 44 44 45 - 在x86_64、arm、arm64、powerpc、riscv、s390和xtensa上支持通用KASAN, 45 + 在x86_64、arm、arm64、powerpc、riscv、s390、xtensa和loongarch上支持通用KASAN, 46 46 而基于标签的KASAN模式只在arm64上支持。 47 47 48 48 编译器
+7
arch/loongarch/Kconfig
··· 8 8 select ACPI_PPTT if ACPI 9 9 select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI 10 10 select ARCH_BINFMT_ELF_STATE 11 + select ARCH_DISABLE_KASAN_INLINE 11 12 select ARCH_ENABLE_MEMORY_HOTPLUG 12 13 select ARCH_ENABLE_MEMORY_HOTREMOVE 13 14 select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI ··· 93 92 select HAVE_ARCH_AUDITSYSCALL 94 93 select HAVE_ARCH_JUMP_LABEL 95 94 select HAVE_ARCH_JUMP_LABEL_RELATIVE 95 + select HAVE_ARCH_KASAN 96 96 select HAVE_ARCH_KFENCE 97 97 select HAVE_ARCH_KGDB if PERF_EVENTS 98 98 select HAVE_ARCH_MMAP_RND_BITS if MMU ··· 670 668 671 669 config ARCH_SUPPORTS_UPROBES 672 670 def_bool y 671 + 672 + config KASAN_SHADOW_OFFSET 673 + hex 674 + default 0x0 675 + depends on KASAN 673 676 674 677 menu "Power management options" 675 678
+3
arch/loongarch/Makefile
··· 84 84 endif 85 85 86 86 cflags-y += $(call cc-option, -mno-check-zero-division) 87 + 88 + ifndef CONFIG_KASAN 87 89 cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset 90 + endif 88 91 89 92 load-y = 0x9000000000200000 90 93 bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
+126
arch/loongarch/include/asm/kasan.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_KASAN_H 3 + #define __ASM_KASAN_H 4 + 5 + #ifndef __ASSEMBLY__ 6 + 7 + #include <linux/linkage.h> 8 + #include <linux/mmzone.h> 9 + #include <asm/addrspace.h> 10 + #include <asm/io.h> 11 + #include <asm/pgtable.h> 12 + 13 + #define __HAVE_ARCH_SHADOW_MAP 14 + 15 + #define KASAN_SHADOW_SCALE_SHIFT 3 16 + #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) 17 + 18 + #define XRANGE_SHIFT (48) 19 + 20 + /* Valid address length */ 21 + #define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) 22 + /* Used for taking out the valid address */ 23 + #define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0) 24 + /* One segment whole address space size */ 25 + #define XRANGE_SIZE (XRANGE_SHADOW_MASK + 1) 26 + 27 + /* 64-bit segment value. */ 28 + #define XKPRANGE_UC_SEG (0x8000) 29 + #define XKPRANGE_CC_SEG (0x9000) 30 + #define XKVRANGE_VC_SEG (0xffff) 31 + 32 + /* Cached */ 33 + #define XKPRANGE_CC_START CACHE_BASE 34 + #define XKPRANGE_CC_SIZE XRANGE_SIZE 35 + #define XKPRANGE_CC_KASAN_OFFSET (0) 36 + #define XKPRANGE_CC_SHADOW_SIZE (XKPRANGE_CC_SIZE >> KASAN_SHADOW_SCALE_SHIFT) 37 + #define XKPRANGE_CC_SHADOW_END (XKPRANGE_CC_KASAN_OFFSET + XKPRANGE_CC_SHADOW_SIZE) 38 + 39 + /* UnCached */ 40 + #define XKPRANGE_UC_START UNCACHE_BASE 41 + #define XKPRANGE_UC_SIZE XRANGE_SIZE 42 + #define XKPRANGE_UC_KASAN_OFFSET XKPRANGE_CC_SHADOW_END 43 + #define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT) 44 + #define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE) 45 + 46 + /* VMALLOC (Cached or UnCached) */ 47 + #define XKVRANGE_VC_START MODULES_VADDR 48 + #define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE) 49 + #define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END 50 + #define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT) 51 + #define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE) 52 + 53 + /* KAsan shadow memory start right after vmalloc. */ 54 + #define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE) 55 + #define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET) 56 + #define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) 57 + 58 + #define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET) 59 + #define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET) 60 + #define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET) 61 + 62 + extern bool kasan_early_stage; 63 + extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 64 + 65 + #define kasan_arch_is_ready kasan_arch_is_ready 66 + static __always_inline bool kasan_arch_is_ready(void) 67 + { 68 + return !kasan_early_stage; 69 + } 70 + 71 + static inline void *kasan_mem_to_shadow(const void *addr) 72 + { 73 + if (!kasan_arch_is_ready()) { 74 + return (void *)(kasan_early_shadow_page); 75 + } else { 76 + unsigned long maddr = (unsigned long)addr; 77 + unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff; 78 + unsigned long offset = 0; 79 + 80 + maddr &= XRANGE_SHADOW_MASK; 81 + switch (xrange) { 82 + case XKPRANGE_CC_SEG: 83 + offset = XKPRANGE_CC_SHADOW_OFFSET; 84 + break; 85 + case XKPRANGE_UC_SEG: 86 + offset = XKPRANGE_UC_SHADOW_OFFSET; 87 + break; 88 + case XKVRANGE_VC_SEG: 89 + offset = XKVRANGE_VC_SHADOW_OFFSET; 90 + break; 91 + default: 92 + WARN_ON(1); 93 + return NULL; 94 + } 95 + 96 + return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset); 97 + } 98 + } 99 + 100 + static inline const void *kasan_shadow_to_mem(const void *shadow_addr) 101 + { 102 + unsigned long addr = (unsigned long)shadow_addr; 103 + 104 + if (unlikely(addr > KASAN_SHADOW_END) || 105 + unlikely(addr < KASAN_SHADOW_START)) { 106 + WARN_ON(1); 107 + return NULL; 108 + } 109 + 110 + if (addr >= XKVRANGE_VC_SHADOW_OFFSET) 111 + return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START); 112 + else if (addr >= XKPRANGE_UC_SHADOW_OFFSET) 113 + return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START); 114 + else if (addr >= XKPRANGE_CC_SHADOW_OFFSET) 115 + return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START); 116 + else { 117 + WARN_ON(1); 118 + return NULL; 119 + } 120 + } 121 + 122 + void kasan_init(void); 123 + asmlinkage void kasan_early_init(void); 124 + 125 + #endif 126 + #endif
+7
arch/loongarch/include/asm/pgtable.h
··· 89 89 #endif 90 90 91 91 #define VMALLOC_START MODULES_END 92 + 93 + #ifndef CONFIG_KASAN 92 94 #define VMALLOC_END \ 93 95 (vm_map_base + \ 94 96 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE) 97 + #else 98 + #define VMALLOC_END \ 99 + (vm_map_base + \ 100 + min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE) 101 + #endif 95 102 96 103 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK)) 97 104 #define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
+20
arch/loongarch/include/asm/string.h
··· 7 7 8 8 #define __HAVE_ARCH_MEMSET 9 9 extern void *memset(void *__s, int __c, size_t __count); 10 + extern void *__memset(void *__s, int __c, size_t __count); 10 11 11 12 #define __HAVE_ARCH_MEMCPY 12 13 extern void *memcpy(void *__to, __const__ void *__from, size_t __n); 14 + extern void *__memcpy(void *__to, __const__ void *__from, size_t __n); 13 15 14 16 #define __HAVE_ARCH_MEMMOVE 15 17 extern void *memmove(void *__dest, __const__ void *__src, size_t __n); 18 + extern void *__memmove(void *__dest, __const__ void *__src, size_t __n); 19 + 20 + #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) 21 + 22 + /* 23 + * For files that are not instrumented (e.g. mm/slub.c) we 24 + * should use not instrumented version of mem* functions. 25 + */ 26 + 27 + #define memset(s, c, n) __memset(s, c, n) 28 + #define memcpy(dst, src, len) __memcpy(dst, src, len) 29 + #define memmove(dst, src, len) __memmove(dst, src, len) 30 + 31 + #ifndef __NO_FORTIFY 32 + #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ 33 + #endif 34 + 35 + #endif 16 36 17 37 #endif /* _ASM_STRING_H */
+6
arch/loongarch/kernel/Makefile
··· 34 34 CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE) 35 35 endif 36 36 37 + KASAN_SANITIZE_efi.o := n 38 + KASAN_SANITIZE_cpu-probe.o := n 39 + KASAN_SANITIZE_traps.o := n 40 + KASAN_SANITIZE_smp.o := n 41 + KASAN_SANITIZE_vdso.o := n 42 + 37 43 obj-$(CONFIG_MODULES) += module.o module-sections.o 38 44 obj-$(CONFIG_STACKTRACE) += stacktrace.o 39 45
+4
arch/loongarch/kernel/head.S
··· 104 104 105 105 #endif /* CONFIG_RELOCATABLE */ 106 106 107 + #ifdef CONFIG_KASAN 108 + bl kasan_early_init 109 + #endif 110 + 107 111 bl start_kernel 108 112 ASM_BUG() 109 113
+4
arch/loongarch/kernel/setup.c
··· 626 626 #endif 627 627 628 628 paging_init(); 629 + 630 + #ifdef CONFIG_KASAN 631 + kasan_init(); 632 + #endif 629 633 }
+7 -1
arch/loongarch/lib/memcpy.S
··· 10 10 #include <asm/cpu.h> 11 11 #include <asm/regdef.h> 12 12 13 + .section .noinstr.text, "ax" 14 + 13 15 SYM_FUNC_START(memcpy) 14 16 /* 15 17 * Some CPUs support hardware unaligned access ··· 19 17 ALTERNATIVE "b __memcpy_generic", \ 20 18 "b __memcpy_fast", CPU_FEATURE_UAL 21 19 SYM_FUNC_END(memcpy) 22 - _ASM_NOKPROBE(memcpy) 20 + SYM_FUNC_ALIAS(__memcpy, memcpy) 23 21 24 22 EXPORT_SYMBOL(memcpy) 23 + EXPORT_SYMBOL(__memcpy) 24 + 25 + _ASM_NOKPROBE(memcpy) 26 + _ASM_NOKPROBE(__memcpy) 25 27 26 28 /* 27 29 * void *__memcpy_generic(void *dst, const void *src, size_t n)
+13 -7
arch/loongarch/lib/memmove.S
··· 10 10 #include <asm/cpu.h> 11 11 #include <asm/regdef.h> 12 12 13 + .section .noinstr.text, "ax" 14 + 13 15 SYM_FUNC_START(memmove) 14 - blt a0, a1, memcpy /* dst < src, memcpy */ 15 - blt a1, a0, rmemcpy /* src < dst, rmemcpy */ 16 - jr ra /* dst == src, return */ 16 + blt a0, a1, __memcpy /* dst < src, memcpy */ 17 + blt a1, a0, __rmemcpy /* src < dst, rmemcpy */ 18 + jr ra /* dst == src, return */ 17 19 SYM_FUNC_END(memmove) 18 - _ASM_NOKPROBE(memmove) 20 + SYM_FUNC_ALIAS(__memmove, memmove) 19 21 20 22 EXPORT_SYMBOL(memmove) 23 + EXPORT_SYMBOL(__memmove) 21 24 22 - SYM_FUNC_START(rmemcpy) 25 + _ASM_NOKPROBE(memmove) 26 + _ASM_NOKPROBE(__memmove) 27 + 28 + SYM_FUNC_START(__rmemcpy) 23 29 /* 24 30 * Some CPUs support hardware unaligned access 25 31 */ 26 32 ALTERNATIVE "b __rmemcpy_generic", \ 27 33 "b __rmemcpy_fast", CPU_FEATURE_UAL 28 - SYM_FUNC_END(rmemcpy) 29 - _ASM_NOKPROBE(rmemcpy) 34 + SYM_FUNC_END(__rmemcpy) 35 + _ASM_NOKPROBE(__rmemcpy) 30 36 31 37 /* 32 38 * void *__rmemcpy_generic(void *dst, const void *src, size_t n)
+7 -1
arch/loongarch/lib/memset.S
··· 16 16 bstrins.d \r0, \r0, 63, 32 17 17 .endm 18 18 19 + .section .noinstr.text, "ax" 20 + 19 21 SYM_FUNC_START(memset) 20 22 /* 21 23 * Some CPUs support hardware unaligned access ··· 25 23 ALTERNATIVE "b __memset_generic", \ 26 24 "b __memset_fast", CPU_FEATURE_UAL 27 25 SYM_FUNC_END(memset) 28 - _ASM_NOKPROBE(memset) 26 + SYM_FUNC_ALIAS(__memset, memset) 29 27 30 28 EXPORT_SYMBOL(memset) 29 + EXPORT_SYMBOL(__memset) 30 + 31 + _ASM_NOKPROBE(memset) 32 + _ASM_NOKPROBE(__memset) 31 33 32 34 /* 33 35 * void *__memset_generic(void *s, int c, size_t n)
+3
arch/loongarch/mm/Makefile
··· 7 7 fault.o ioremap.o maccess.o mmap.o pgtable.o page.o 8 8 9 9 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 10 + obj-$(CONFIG_KASAN) += kasan_init.o 11 + 12 + KASAN_SANITIZE_kasan_init.o := n
+243
arch/loongarch/mm/kasan_init.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2023 Loongson Technology Corporation Limited 4 + */ 5 + #define pr_fmt(fmt) "kasan: " fmt 6 + #include <linux/kasan.h> 7 + #include <linux/memblock.h> 8 + #include <linux/sched/task.h> 9 + 10 + #include <asm/tlbflush.h> 11 + #include <asm/pgalloc.h> 12 + #include <asm-generic/sections.h> 13 + 14 + static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); 15 + 16 + #ifdef __PAGETABLE_PUD_FOLDED 17 + #define __p4d_none(early, p4d) (0) 18 + #else 19 + #define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \ 20 + (__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud))) 21 + #endif 22 + 23 + #ifdef __PAGETABLE_PMD_FOLDED 24 + #define __pud_none(early, pud) (0) 25 + #else 26 + #define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \ 27 + (__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd))) 28 + #endif 29 + 30 + #define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \ 31 + (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte))) 32 + 33 + #define __pte_none(early, pte) (early ? pte_none(pte) : \ 34 + ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page))) 35 + 36 + bool kasan_early_stage = true; 37 + 38 + /* 39 + * Alloc memory for shadow memory page table. 40 + */ 41 + static phys_addr_t __init kasan_alloc_zeroed_page(int node) 42 + { 43 + void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, 44 + __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); 45 + if (!p) 46 + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", 47 + __func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS)); 48 + 49 + return __pa(p); 50 + } 51 + 52 + static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early) 53 + { 54 + if (__pmd_none(early, READ_ONCE(*pmdp))) { 55 + phys_addr_t pte_phys = early ? 56 + __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node); 57 + if (!early) 58 + memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte)); 59 + pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys)); 60 + } 61 + 62 + return pte_offset_kernel(pmdp, addr); 63 + } 64 + 65 + static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early) 66 + { 67 + if (__pud_none(early, READ_ONCE(*pudp))) { 68 + phys_addr_t pmd_phys = early ? 69 + __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node); 70 + if (!early) 71 + memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd)); 72 + pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys)); 73 + } 74 + 75 + return pmd_offset(pudp, addr); 76 + } 77 + 78 + static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early) 79 + { 80 + if (__p4d_none(early, READ_ONCE(*p4dp))) { 81 + phys_addr_t pud_phys = early ? 82 + __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node); 83 + if (!early) 84 + memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud)); 85 + p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys)); 86 + } 87 + 88 + return pud_offset(p4dp, addr); 89 + } 90 + 91 + static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, 92 + unsigned long end, int node, bool early) 93 + { 94 + unsigned long next; 95 + pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early); 96 + 97 + do { 98 + phys_addr_t page_phys = early ? 99 + __pa_symbol(kasan_early_shadow_page) 100 + : kasan_alloc_zeroed_page(node); 101 + next = addr + PAGE_SIZE; 102 + set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); 103 + } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep))); 104 + } 105 + 106 + static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, 107 + unsigned long end, int node, bool early) 108 + { 109 + unsigned long next; 110 + pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early); 111 + 112 + do { 113 + next = pmd_addr_end(addr, end); 114 + kasan_pte_populate(pmdp, addr, next, node, early); 115 + } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp))); 116 + } 117 + 118 + static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, 119 + unsigned long end, int node, bool early) 120 + { 121 + unsigned long next; 122 + pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early); 123 + 124 + do { 125 + next = pud_addr_end(addr, end); 126 + kasan_pmd_populate(pudp, addr, next, node, early); 127 + } while (pudp++, addr = next, addr != end); 128 + } 129 + 130 + static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr, 131 + unsigned long end, int node, bool early) 132 + { 133 + unsigned long next; 134 + p4d_t *p4dp = p4d_offset(pgdp, addr); 135 + 136 + do { 137 + next = p4d_addr_end(addr, end); 138 + kasan_pud_populate(p4dp, addr, next, node, early); 139 + } while (p4dp++, addr = next, addr != end); 140 + } 141 + 142 + static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, 143 + int node, bool early) 144 + { 145 + unsigned long next; 146 + pgd_t *pgdp; 147 + 148 + pgdp = pgd_offset_k(addr); 149 + 150 + do { 151 + next = pgd_addr_end(addr, end); 152 + kasan_p4d_populate(pgdp, addr, next, node, early); 153 + } while (pgdp++, addr = next, addr != end); 154 + 155 + } 156 + 157 + /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */ 158 + static void __init kasan_map_populate(unsigned long start, unsigned long end, 159 + int node) 160 + { 161 + kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false); 162 + } 163 + 164 + asmlinkage void __init kasan_early_init(void) 165 + { 166 + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); 167 + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); 168 + } 169 + 170 + static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval) 171 + { 172 + WRITE_ONCE(*pgdp, pgdval); 173 + } 174 + 175 + static void __init clear_pgds(unsigned long start, unsigned long end) 176 + { 177 + /* 178 + * Remove references to kasan page tables from 179 + * swapper_pg_dir. pgd_clear() can't be used 180 + * here because it's nop on 2,3-level pagetable setups 181 + */ 182 + for (; start < end; start += PGDIR_SIZE) 183 + kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0)); 184 + } 185 + 186 + void __init kasan_init(void) 187 + { 188 + u64 i; 189 + phys_addr_t pa_start, pa_end; 190 + 191 + /* 192 + * PGD was populated as invalid_pmd_table or invalid_pud_table 193 + * in pagetable_init() which depends on how many levels of page 194 + * table you are using, but we had to clean the gpd of kasan 195 + * shadow memory, as the pgd value is none-zero. 196 + * The assertion pgd_none is going to be false and the formal populate 197 + * afterwards is not going to create any new pgd at all. 198 + */ 199 + memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir)); 200 + csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH); 201 + local_flush_tlb_all(); 202 + 203 + clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 204 + 205 + /* Maps everything to a single page of zeroes */ 206 + kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true); 207 + 208 + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), 209 + kasan_mem_to_shadow((void *)KFENCE_AREA_END)); 210 + 211 + kasan_early_stage = false; 212 + 213 + /* Populate the linear mapping */ 214 + for_each_mem_range(i, &pa_start, &pa_end) { 215 + void *start = (void *)phys_to_virt(pa_start); 216 + void *end = (void *)phys_to_virt(pa_end); 217 + 218 + if (start >= end) 219 + break; 220 + 221 + kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), 222 + (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE); 223 + } 224 + 225 + /* Populate modules mapping */ 226 + kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR), 227 + (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE); 228 + /* 229 + * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we 230 + * should make sure that it maps the zero page read-only. 231 + */ 232 + for (i = 0; i < PTRS_PER_PTE; i++) 233 + set_pte(&kasan_early_shadow_pte[i], 234 + pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO)); 235 + 236 + memset(kasan_early_shadow_page, 0, PAGE_SIZE); 237 + csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH); 238 + local_flush_tlb_all(); 239 + 240 + /* At this point kasan is fully initialized. Enable error messages */ 241 + init_task.kasan_depth = 0; 242 + pr_info("KernelAddressSanitizer initialized.\n"); 243 + }
+1
arch/loongarch/vdso/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 # Objects to go into the VDSO. 3 3 4 + KASAN_SANITIZE := n 4 5 KCOV_INSTRUMENT := n 5 6 6 7 # Include the generic Makefile to check the built vdso.