Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kasan: introduce ARCH_DEFER_KASAN and unify static key across modes

Patch series "kasan: unify kasan_enabled() and remove arch-specific
implementations", v6.

This patch series addresses the fragmentation in KASAN initialization
across architectures by introducing a unified approach that eliminates
duplicate static keys and arch-specific kasan_arch_is_ready()
implementations.

The core issue is that different architectures have inconsistent approaches
to KASAN readiness tracking:
- PowerPC, LoongArch, and UML arch, each implement own kasan_arch_is_ready()
- Only HW_TAGS mode had a unified static key (kasan_flag_enabled)
- Generic and SW_TAGS modes relied on arch-specific solutions
or always-on behavior


This patch (of 2):

Introduce CONFIG_ARCH_DEFER_KASAN to identify architectures [1] that need
to defer KASAN initialization until shadow memory is properly set up, and
unify the static key infrastructure across all KASAN modes.

[1] PowerPC, UML, LoongArch selects ARCH_DEFER_KASAN.

The core issue is that different architectures haveinconsistent approaches
to KASAN readiness tracking:
- PowerPC, LoongArch, and UML arch, each implement own
kasan_arch_is_ready()
- Only HW_TAGS mode had a unified static key (kasan_flag_enabled)
- Generic and SW_TAGS modes relied on arch-specific solutions or always-on
behavior

This patch addresses the fragmentation in KASAN initialization across
architectures by introducing a unified approach that eliminates duplicate
static keys and arch-specific kasan_arch_is_ready() implementations.

Let's replace kasan_arch_is_ready() with existing kasan_enabled() check,
which examines the static key being enabled if arch selects
ARCH_DEFER_KASAN or has HW_TAGS mode support. For other arch,
kasan_enabled() checks the enablement during compile time.

Now KASAN users can use a single kasan_enabled() check everywhere.

Link: https://lkml.kernel.org/r/20250810125746.1105476-1-snovitoll@gmail.com
Link: https://lkml.kernel.org/r/20250810125746.1105476-2-snovitoll@gmail.com
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217049
Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> #powerpc
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: David Gow <davidgow@google.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@loongson.cn>
Cc: Marco Elver <elver@google.com>
Cc: Qing Zhang <zhangqing@loongson.cn>
Cc: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Sabyrzhan Tasbolatov and committed by
Andrew Morton
1e338f4d bc9950b5

+106 -70
+1
arch/loongarch/Kconfig
··· 9 9 select ACPI_PPTT if ACPI 10 10 select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI 11 11 select ARCH_BINFMT_ELF_STATE 12 + select ARCH_NEEDS_DEFER_KASAN 12 13 select ARCH_DISABLE_KASAN_INLINE 13 14 select ARCH_ENABLE_MEMORY_HOTPLUG 14 15 select ARCH_ENABLE_MEMORY_HOTREMOVE
-7
arch/loongarch/include/asm/kasan.h
··· 66 66 #define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET) 67 67 #define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET) 68 68 69 - extern bool kasan_early_stage; 70 69 extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 71 70 72 71 #define kasan_mem_to_shadow kasan_mem_to_shadow ··· 73 74 74 75 #define kasan_shadow_to_mem kasan_shadow_to_mem 75 76 const void *kasan_shadow_to_mem(const void *shadow_addr); 76 - 77 - #define kasan_arch_is_ready kasan_arch_is_ready 78 - static __always_inline bool kasan_arch_is_ready(void) 79 - { 80 - return !kasan_early_stage; 81 - } 82 77 83 78 #define addr_has_metadata addr_has_metadata 84 79 static __always_inline bool addr_has_metadata(const void *addr)
+3 -5
arch/loongarch/mm/kasan_init.c
··· 40 40 #define __pte_none(early, pte) (early ? pte_none(pte) : \ 41 41 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page))) 42 42 43 - bool kasan_early_stage = true; 44 - 45 43 void *kasan_mem_to_shadow(const void *addr) 46 44 { 47 - if (!kasan_arch_is_ready()) { 45 + if (!kasan_enabled()) { 48 46 return (void *)(kasan_early_shadow_page); 49 47 } else { 50 48 unsigned long maddr = (unsigned long)addr; ··· 296 298 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), 297 299 kasan_mem_to_shadow((void *)KFENCE_AREA_END)); 298 300 299 - kasan_early_stage = false; 301 + /* Enable KASAN here before kasan_mem_to_shadow(). */ 302 + kasan_init_generic(); 300 303 301 304 /* Populate the linear mapping */ 302 305 for_each_mem_range(i, &pa_start, &pa_end) { ··· 328 329 329 330 /* At this point kasan is fully initialized. Enable error messages */ 330 331 init_task.kasan_depth = 0; 331 - pr_info("KernelAddressSanitizer initialized.\n"); 332 332 }
+1
arch/powerpc/Kconfig
··· 122 122 # Please keep this list sorted alphabetically. 123 123 # 124 124 select ARCH_32BIT_OFF_T if PPC32 125 + select ARCH_NEEDS_DEFER_KASAN if PPC_RADIX_MMU 125 126 select ARCH_DISABLE_KASAN_INLINE if PPC_RADIX_MMU 126 127 select ARCH_DMA_DEFAULT_COHERENT if !NOT_COHERENT_CACHE 127 128 select ARCH_ENABLE_MEMORY_HOTPLUG
-12
arch/powerpc/include/asm/kasan.h
··· 53 53 #endif 54 54 55 55 #ifdef CONFIG_KASAN 56 - #ifdef CONFIG_PPC_BOOK3S_64 57 - DECLARE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key); 58 - 59 - static __always_inline bool kasan_arch_is_ready(void) 60 - { 61 - if (static_branch_likely(&powerpc_kasan_enabled_key)) 62 - return true; 63 - return false; 64 - } 65 - 66 - #define kasan_arch_is_ready kasan_arch_is_ready 67 - #endif 68 56 69 57 void kasan_early_init(void); 70 58 void kasan_mmu_init(void);
+1 -1
arch/powerpc/mm/kasan/init_32.c
··· 165 165 166 166 /* At this point kasan is fully initialized. Enable error messages */ 167 167 init_task.kasan_depth = 0; 168 - pr_info("KASAN init done\n"); 168 + kasan_init_generic(); 169 169 } 170 170 171 171 void __init kasan_late_init(void)
+1 -1
arch/powerpc/mm/kasan/init_book3e_64.c
··· 127 127 128 128 /* Enable error messages */ 129 129 init_task.kasan_depth = 0; 130 - pr_info("KASAN init done\n"); 130 + kasan_init_generic(); 131 131 } 132 132 133 133 void __init kasan_late_init(void) { }
+1 -5
arch/powerpc/mm/kasan/init_book3s_64.c
··· 19 19 #include <linux/memblock.h> 20 20 #include <asm/pgalloc.h> 21 21 22 - DEFINE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key); 23 - 24 22 static void __init kasan_init_phys_region(void *start, void *end) 25 23 { 26 24 unsigned long k_start, k_end, k_cur; ··· 90 92 */ 91 93 memset(kasan_early_shadow_page, 0, PAGE_SIZE); 92 94 93 - static_branch_inc(&powerpc_kasan_enabled_key); 94 - 95 95 /* Enable error messages */ 96 96 init_task.kasan_depth = 0; 97 - pr_info("KASAN init done\n"); 97 + kasan_init_generic(); 98 98 } 99 99 100 100 void __init kasan_early_init(void) { }
+1
arch/um/Kconfig
··· 5 5 config UML 6 6 bool 7 7 default y 8 + select ARCH_NEEDS_DEFER_KASAN if STATIC_LINK 8 9 select ARCH_WANTS_DYNAMIC_TASK_STRUCT 9 10 select ARCH_HAS_CACHE_LINE_SIZE 10 11 select ARCH_HAS_CPU_FINALIZE_INIT
+2 -3
arch/um/include/asm/kasan.h
··· 24 24 25 25 #ifdef CONFIG_KASAN 26 26 void kasan_init(void); 27 - extern int kasan_um_is_ready; 28 27 29 - #ifdef CONFIG_STATIC_LINK 30 - #define kasan_arch_is_ready() (kasan_um_is_ready) 28 + #if defined(CONFIG_STATIC_LINK) && defined(CONFIG_KASAN_INLINE) 29 + #error UML does not work in KASAN_INLINE mode with STATIC_LINK enabled! 31 30 #endif 32 31 #else 33 32 static inline void kasan_init(void) { }
+10 -3
arch/um/kernel/mem.c
··· 21 21 #include <os.h> 22 22 #include <um_malloc.h> 23 23 #include <linux/sched/task.h> 24 + #include <linux/kasan.h> 24 25 25 26 #ifdef CONFIG_KASAN 26 - int kasan_um_is_ready; 27 - void kasan_init(void) 27 + void __init kasan_init(void) 28 28 { 29 29 /* 30 30 * kasan_map_memory will map all of the required address space and ··· 32 32 */ 33 33 kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE); 34 34 init_task.kasan_depth = 0; 35 - kasan_um_is_ready = true; 35 + /* 36 + * Since kasan_init() is called before main(), 37 + * KASAN is initialized but the enablement is deferred after 38 + * jump_label_init(). See arch_mm_preinit(). 39 + */ 36 40 } 37 41 38 42 static void (*kasan_init_ptr)(void) ··· 62 58 63 59 void __init arch_mm_preinit(void) 64 60 { 61 + /* Safe to call after jump_label_init(). Enables KASAN. */ 62 + kasan_init_generic(); 63 + 65 64 /* clear the zero-page */ 66 65 memset(empty_zero_page, 0, PAGE_SIZE); 67 66
+23 -9
include/linux/kasan-enabled.h
··· 4 4 5 5 #include <linux/static_key.h> 6 6 7 - #ifdef CONFIG_KASAN_HW_TAGS 8 - 7 + #if defined(CONFIG_ARCH_DEFER_KASAN) || defined(CONFIG_KASAN_HW_TAGS) 8 + /* 9 + * Global runtime flag for KASAN modes that need runtime control. 10 + * Used by ARCH_DEFER_KASAN architectures and HW_TAGS mode. 11 + */ 9 12 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); 10 13 14 + /* 15 + * Runtime control for shadow memory initialization or HW_TAGS mode. 16 + * Uses static key for architectures that need deferred KASAN or HW_TAGS. 17 + */ 11 18 static __always_inline bool kasan_enabled(void) 12 19 { 13 20 return static_branch_likely(&kasan_flag_enabled); 14 21 } 15 22 16 - static inline bool kasan_hw_tags_enabled(void) 23 + static inline void kasan_enable(void) 17 24 { 18 - return kasan_enabled(); 25 + static_branch_enable(&kasan_flag_enabled); 19 26 } 20 - 21 - #else /* CONFIG_KASAN_HW_TAGS */ 22 - 23 - static inline bool kasan_enabled(void) 27 + #else 28 + /* For architectures that can enable KASAN early, use compile-time check. */ 29 + static __always_inline bool kasan_enabled(void) 24 30 { 25 31 return IS_ENABLED(CONFIG_KASAN); 26 32 } 27 33 34 + static inline void kasan_enable(void) {} 35 + #endif /* CONFIG_ARCH_DEFER_KASAN || CONFIG_KASAN_HW_TAGS */ 36 + 37 + #ifdef CONFIG_KASAN_HW_TAGS 38 + static inline bool kasan_hw_tags_enabled(void) 39 + { 40 + return kasan_enabled(); 41 + } 42 + #else 28 43 static inline bool kasan_hw_tags_enabled(void) 29 44 { 30 45 return false; 31 46 } 32 - 33 47 #endif /* CONFIG_KASAN_HW_TAGS */ 34 48 35 49 #endif /* LINUX_KASAN_ENABLED_H */
+6
include/linux/kasan.h
··· 543 543 544 544 #endif /* CONFIG_KASAN_HW_TAGS */ 545 545 546 + #ifdef CONFIG_KASAN_GENERIC 547 + void __init kasan_init_generic(void); 548 + #else 549 + static inline void kasan_init_generic(void) { } 550 + #endif 551 + 546 552 #ifdef CONFIG_KASAN_SW_TAGS 547 553 void __init kasan_init_sw_tags(void); 548 554 #else
+12
lib/Kconfig.kasan
··· 19 19 Disables both inline and stack instrumentation. Selected by 20 20 architectures that do not support these instrumentation types. 21 21 22 + config ARCH_NEEDS_DEFER_KASAN 23 + bool 24 + 25 + config ARCH_DEFER_KASAN 26 + def_bool y 27 + depends on KASAN && ARCH_NEEDS_DEFER_KASAN 28 + help 29 + Architectures should select this if they need to defer KASAN 30 + initialization until shadow memory is properly set up. This 31 + enables runtime control via static keys. Otherwise, KASAN uses 32 + compile-time constants for better performance. 33 + 22 34 config CC_HAS_KASAN_GENERIC 23 35 def_bool $(cc-option, -fsanitize=kernel-address) 24 36
+13 -4
mm/kasan/common.c
··· 32 32 #include "kasan.h" 33 33 #include "../slab.h" 34 34 35 + #if defined(CONFIG_ARCH_DEFER_KASAN) || defined(CONFIG_KASAN_HW_TAGS) 36 + /* 37 + * Definition of the unified static key declared in kasan-enabled.h. 38 + * This provides consistent runtime enable/disable across KASAN modes. 39 + */ 40 + DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled); 41 + EXPORT_SYMBOL_GPL(kasan_flag_enabled); 42 + #endif 43 + 35 44 struct slab *kasan_addr_to_slab(const void *addr) 36 45 { 37 46 if (virt_addr_valid(addr)) ··· 255 246 bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object, 256 247 unsigned long ip) 257 248 { 258 - if (!kasan_arch_is_ready() || is_kfence_address(object)) 249 + if (is_kfence_address(object)) 259 250 return false; 260 251 return check_slab_allocation(cache, object, ip); 261 252 } ··· 263 254 bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init, 264 255 bool still_accessible) 265 256 { 266 - if (!kasan_arch_is_ready() || is_kfence_address(object)) 257 + if (is_kfence_address(object)) 267 258 return false; 268 259 269 260 /* ··· 302 293 303 294 static inline bool check_page_allocation(void *ptr, unsigned long ip) 304 295 { 305 - if (!kasan_arch_is_ready()) 296 + if (!kasan_enabled()) 306 297 return false; 307 298 308 299 if (ptr != page_address(virt_to_head_page(ptr))) { ··· 531 522 return true; 532 523 } 533 524 534 - if (is_kfence_address(ptr) || !kasan_arch_is_ready()) 525 + if (is_kfence_address(ptr)) 535 526 return true; 536 527 537 528 slab = folio_slab(folio);
+15 -4
mm/kasan/generic.c
··· 37 37 #include "../slab.h" 38 38 39 39 /* 40 + * Initialize Generic KASAN and enable runtime checks. 41 + * This should be called from arch kasan_init() once shadow memory is ready. 42 + */ 43 + void __init kasan_init_generic(void) 44 + { 45 + kasan_enable(); 46 + 47 + pr_info("KernelAddressSanitizer initialized (generic)\n"); 48 + } 49 + 50 + /* 40 51 * All functions below always inlined so compiler could 41 52 * perform better optimizations in each of __asan_loadX/__assn_storeX 42 53 * depending on memory access size X. ··· 176 165 size_t size, bool write, 177 166 unsigned long ret_ip) 178 167 { 179 - if (!kasan_arch_is_ready()) 168 + if (!kasan_enabled()) 180 169 return true; 181 170 182 171 if (unlikely(size == 0)) ··· 204 193 { 205 194 s8 shadow_byte; 206 195 207 - if (!kasan_arch_is_ready()) 196 + if (!kasan_enabled()) 208 197 return true; 209 198 210 199 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr)); ··· 506 495 507 496 static void release_free_meta(const void *object, struct kasan_free_meta *meta) 508 497 { 509 - if (!kasan_arch_is_ready()) 498 + if (!kasan_enabled()) 510 499 return; 511 500 512 501 /* Check if free meta is valid. */ ··· 573 562 kasan_save_track(&alloc_meta->alloc_track, flags); 574 563 } 575 564 576 - void kasan_save_free_info(struct kmem_cache *cache, void *object) 565 + void __kasan_save_free_info(struct kmem_cache *cache, void *object) 577 566 { 578 567 struct kasan_free_meta *free_meta; 579 568
+1 -8
mm/kasan/hw_tags.c
··· 46 46 static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata; 47 47 48 48 /* 49 - * Whether KASAN is enabled at all. 50 - * The value remains false until KASAN is initialized by kasan_init_hw_tags(). 51 - */ 52 - DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled); 53 - EXPORT_SYMBOL(kasan_flag_enabled); 54 - 55 - /* 56 49 * Whether the selected mode is synchronous, asynchronous, or asymmetric. 57 50 * Defaults to KASAN_MODE_SYNC. 58 51 */ ··· 253 260 kasan_init_tags(); 254 261 255 262 /* KASAN is now initialized, enable it. */ 256 - static_branch_enable(&kasan_flag_enabled); 263 + kasan_enable(); 257 264 258 265 pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s)\n", 259 266 kasan_mode_info(),
+7 -1
mm/kasan/kasan.h
··· 398 398 void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack); 399 399 void kasan_save_track(struct kasan_track *track, gfp_t flags); 400 400 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags); 401 - void kasan_save_free_info(struct kmem_cache *cache, void *object); 401 + 402 + void __kasan_save_free_info(struct kmem_cache *cache, void *object); 403 + static inline void kasan_save_free_info(struct kmem_cache *cache, void *object) 404 + { 405 + if (kasan_enabled()) 406 + __kasan_save_free_info(cache, object); 407 + } 402 408 403 409 #ifdef CONFIG_KASAN_GENERIC 404 410 bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
+6 -6
mm/kasan/shadow.c
··· 125 125 { 126 126 void *shadow_start, *shadow_end; 127 127 128 - if (!kasan_arch_is_ready()) 128 + if (!kasan_enabled()) 129 129 return; 130 130 131 131 /* ··· 150 150 #ifdef CONFIG_KASAN_GENERIC 151 151 void kasan_poison_last_granule(const void *addr, size_t size) 152 152 { 153 - if (!kasan_arch_is_ready()) 153 + if (!kasan_enabled()) 154 154 return; 155 155 156 156 if (size & KASAN_GRANULE_MASK) { ··· 408 408 unsigned long shadow_start, shadow_end; 409 409 int ret; 410 410 411 - if (!kasan_arch_is_ready()) 411 + if (!kasan_enabled()) 412 412 return 0; 413 413 414 414 if (!is_vmalloc_or_module_addr((void *)addr)) ··· 583 583 unsigned long region_start, region_end; 584 584 unsigned long size; 585 585 586 - if (!kasan_arch_is_ready()) 586 + if (!kasan_enabled()) 587 587 return; 588 588 589 589 region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); ··· 634 634 * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored. 635 635 */ 636 636 637 - if (!kasan_arch_is_ready()) 637 + if (!kasan_enabled()) 638 638 return (void *)start; 639 639 640 640 if (!is_vmalloc_or_module_addr(start)) ··· 659 659 */ 660 660 void __kasan_poison_vmalloc(const void *start, unsigned long size) 661 661 { 662 - if (!kasan_arch_is_ready()) 662 + if (!kasan_enabled()) 663 663 return; 664 664 665 665 if (!is_vmalloc_or_module_addr(start))
+1
mm/kasan/sw_tags.c
··· 44 44 per_cpu(prng_state, cpu) = (u32)get_cycles(); 45 45 46 46 kasan_init_tags(); 47 + kasan_enable(); 47 48 48 49 pr_info("KernelAddressSanitizer initialized (sw-tags, stacktrace=%s)\n", 49 50 str_on_off(kasan_stack_collection_enabled()));
+1 -1
mm/kasan/tags.c
··· 142 142 save_stack_info(cache, object, flags, false); 143 143 } 144 144 145 - void kasan_save_free_info(struct kmem_cache *cache, void *object) 145 + void __kasan_save_free_info(struct kmem_cache *cache, void *object) 146 146 { 147 147 save_stack_info(cache, object, 0, true); 148 148 }