at v6.1 14 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_KASAN_H 3#define _LINUX_KASAN_H 4 5#include <linux/bug.h> 6#include <linux/kasan-enabled.h> 7#include <linux/kernel.h> 8#include <linux/static_key.h> 9#include <linux/types.h> 10 11struct kmem_cache; 12struct page; 13struct slab; 14struct vm_struct; 15struct task_struct; 16 17#ifdef CONFIG_KASAN 18 19#include <linux/linkage.h> 20#include <asm/kasan.h> 21 22#endif 23 24typedef unsigned int __bitwise kasan_vmalloc_flags_t; 25 26#define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u) 27#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u) 28#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) 29#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) 30 31#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 32 33#include <linux/pgtable.h> 34 35/* Software KASAN implementations use shadow memory. */ 36 37#ifdef CONFIG_KASAN_SW_TAGS 38/* This matches KASAN_TAG_INVALID. */ 39#define KASAN_SHADOW_INIT 0xFE 40#else 41#define KASAN_SHADOW_INIT 0 42#endif 43 44#ifndef PTE_HWTABLE_PTRS 45#define PTE_HWTABLE_PTRS 0 46#endif 47 48extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 49extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; 50extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; 51extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; 52extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; 53 54int kasan_populate_early_shadow(const void *shadow_start, 55 const void *shadow_end); 56 57static inline void *kasan_mem_to_shadow(const void *addr) 58{ 59 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 60 + KASAN_SHADOW_OFFSET; 61} 62 63int kasan_add_zero_shadow(void *start, unsigned long size); 64void kasan_remove_zero_shadow(void *start, unsigned long size); 65 66/* Enable reporting bugs after kasan_disable_current() */ 67extern void kasan_enable_current(void); 68 69/* Disable reporting bugs for current task */ 70extern void kasan_disable_current(void); 71 72#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 73 74static inline int kasan_add_zero_shadow(void *start, unsigned long size) 75{ 76 return 0; 77} 78static inline void kasan_remove_zero_shadow(void *start, 79 unsigned long size) 80{} 81 82static inline void kasan_enable_current(void) {} 83static inline void kasan_disable_current(void) {} 84 85#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 86 87#ifdef CONFIG_KASAN_HW_TAGS 88 89#else /* CONFIG_KASAN_HW_TAGS */ 90 91#endif /* CONFIG_KASAN_HW_TAGS */ 92 93static inline bool kasan_has_integrated_init(void) 94{ 95 return kasan_hw_tags_enabled(); 96} 97 98#ifdef CONFIG_KASAN 99 100struct kasan_cache { 101#ifdef CONFIG_KASAN_GENERIC 102 int alloc_meta_offset; 103 int free_meta_offset; 104#endif 105 bool is_kmalloc; 106}; 107 108void __kasan_unpoison_range(const void *addr, size_t size); 109static __always_inline void kasan_unpoison_range(const void *addr, size_t size) 110{ 111 if (kasan_enabled()) 112 __kasan_unpoison_range(addr, size); 113} 114 115void __kasan_poison_pages(struct page *page, unsigned int order, bool init); 116static __always_inline void kasan_poison_pages(struct page *page, 117 unsigned int order, bool init) 118{ 119 if (kasan_enabled()) 120 __kasan_poison_pages(page, order, init); 121} 122 123void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); 124static __always_inline void kasan_unpoison_pages(struct page *page, 125 unsigned int order, bool init) 126{ 127 if (kasan_enabled()) 128 __kasan_unpoison_pages(page, order, init); 129} 130 131void __kasan_cache_create_kmalloc(struct kmem_cache *cache); 132static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) 133{ 134 if (kasan_enabled()) 135 __kasan_cache_create_kmalloc(cache); 136} 137 138void __kasan_poison_slab(struct slab *slab); 139static __always_inline void kasan_poison_slab(struct slab *slab) 140{ 141 if (kasan_enabled()) 142 __kasan_poison_slab(slab); 143} 144 145void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 146static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, 147 void *object) 148{ 149 if (kasan_enabled()) 150 __kasan_unpoison_object_data(cache, object); 151} 152 153void __kasan_poison_object_data(struct kmem_cache *cache, void *object); 154static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, 155 void *object) 156{ 157 if (kasan_enabled()) 158 __kasan_poison_object_data(cache, object); 159} 160 161void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, 162 const void *object); 163static __always_inline void * __must_check kasan_init_slab_obj( 164 struct kmem_cache *cache, const void *object) 165{ 166 if (kasan_enabled()) 167 return __kasan_init_slab_obj(cache, object); 168 return (void *)object; 169} 170 171bool __kasan_slab_free(struct kmem_cache *s, void *object, 172 unsigned long ip, bool init); 173static __always_inline bool kasan_slab_free(struct kmem_cache *s, 174 void *object, bool init) 175{ 176 if (kasan_enabled()) 177 return __kasan_slab_free(s, object, _RET_IP_, init); 178 return false; 179} 180 181void __kasan_kfree_large(void *ptr, unsigned long ip); 182static __always_inline void kasan_kfree_large(void *ptr) 183{ 184 if (kasan_enabled()) 185 __kasan_kfree_large(ptr, _RET_IP_); 186} 187 188void __kasan_slab_free_mempool(void *ptr, unsigned long ip); 189static __always_inline void kasan_slab_free_mempool(void *ptr) 190{ 191 if (kasan_enabled()) 192 __kasan_slab_free_mempool(ptr, _RET_IP_); 193} 194 195void * __must_check __kasan_slab_alloc(struct kmem_cache *s, 196 void *object, gfp_t flags, bool init); 197static __always_inline void * __must_check kasan_slab_alloc( 198 struct kmem_cache *s, void *object, gfp_t flags, bool init) 199{ 200 if (kasan_enabled()) 201 return __kasan_slab_alloc(s, object, flags, init); 202 return object; 203} 204 205void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, 206 size_t size, gfp_t flags); 207static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, 208 const void *object, size_t size, gfp_t flags) 209{ 210 if (kasan_enabled()) 211 return __kasan_kmalloc(s, object, size, flags); 212 return (void *)object; 213} 214 215void * __must_check __kasan_kmalloc_large(const void *ptr, 216 size_t size, gfp_t flags); 217static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, 218 size_t size, gfp_t flags) 219{ 220 if (kasan_enabled()) 221 return __kasan_kmalloc_large(ptr, size, flags); 222 return (void *)ptr; 223} 224 225void * __must_check __kasan_krealloc(const void *object, 226 size_t new_size, gfp_t flags); 227static __always_inline void * __must_check kasan_krealloc(const void *object, 228 size_t new_size, gfp_t flags) 229{ 230 if (kasan_enabled()) 231 return __kasan_krealloc(object, new_size, flags); 232 return (void *)object; 233} 234 235/* 236 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for 237 * the hardware tag-based mode that doesn't rely on compiler instrumentation. 238 */ 239bool __kasan_check_byte(const void *addr, unsigned long ip); 240static __always_inline bool kasan_check_byte(const void *addr) 241{ 242 if (kasan_enabled()) 243 return __kasan_check_byte(addr, _RET_IP_); 244 return true; 245} 246 247#else /* CONFIG_KASAN */ 248 249static inline void kasan_unpoison_range(const void *address, size_t size) {} 250static inline void kasan_poison_pages(struct page *page, unsigned int order, 251 bool init) {} 252static inline void kasan_unpoison_pages(struct page *page, unsigned int order, 253 bool init) {} 254static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} 255static inline void kasan_poison_slab(struct slab *slab) {} 256static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 257 void *object) {} 258static inline void kasan_poison_object_data(struct kmem_cache *cache, 259 void *object) {} 260static inline void *kasan_init_slab_obj(struct kmem_cache *cache, 261 const void *object) 262{ 263 return (void *)object; 264} 265static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) 266{ 267 return false; 268} 269static inline void kasan_kfree_large(void *ptr) {} 270static inline void kasan_slab_free_mempool(void *ptr) {} 271static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, 272 gfp_t flags, bool init) 273{ 274 return object; 275} 276static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, 277 size_t size, gfp_t flags) 278{ 279 return (void *)object; 280} 281static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) 282{ 283 return (void *)ptr; 284} 285static inline void *kasan_krealloc(const void *object, size_t new_size, 286 gfp_t flags) 287{ 288 return (void *)object; 289} 290static inline bool kasan_check_byte(const void *address) 291{ 292 return true; 293} 294 295#endif /* CONFIG_KASAN */ 296 297#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) 298void kasan_unpoison_task_stack(struct task_struct *task); 299#else 300static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 301#endif 302 303#ifdef CONFIG_KASAN_GENERIC 304 305size_t kasan_metadata_size(struct kmem_cache *cache); 306slab_flags_t kasan_never_merge(void); 307void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 308 slab_flags_t *flags); 309 310void kasan_cache_shrink(struct kmem_cache *cache); 311void kasan_cache_shutdown(struct kmem_cache *cache); 312void kasan_record_aux_stack(void *ptr); 313void kasan_record_aux_stack_noalloc(void *ptr); 314 315#else /* CONFIG_KASAN_GENERIC */ 316 317/* Tag-based KASAN modes do not use per-object metadata. */ 318static inline size_t kasan_metadata_size(struct kmem_cache *cache) 319{ 320 return 0; 321} 322/* And thus nothing prevents cache merging. */ 323static inline slab_flags_t kasan_never_merge(void) 324{ 325 return 0; 326} 327/* And no cache-related metadata initialization is required. */ 328static inline void kasan_cache_create(struct kmem_cache *cache, 329 unsigned int *size, 330 slab_flags_t *flags) {} 331 332static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 333static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 334static inline void kasan_record_aux_stack(void *ptr) {} 335static inline void kasan_record_aux_stack_noalloc(void *ptr) {} 336 337#endif /* CONFIG_KASAN_GENERIC */ 338 339#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 340 341static inline void *kasan_reset_tag(const void *addr) 342{ 343 return (void *)arch_kasan_reset_tag(addr); 344} 345 346/** 347 * kasan_report - print a report about a bad memory access detected by KASAN 348 * @addr: address of the bad access 349 * @size: size of the bad access 350 * @is_write: whether the bad access is a write or a read 351 * @ip: instruction pointer for the accessibility check or the bad access itself 352 */ 353bool kasan_report(unsigned long addr, size_t size, 354 bool is_write, unsigned long ip); 355 356#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 357 358static inline void *kasan_reset_tag(const void *addr) 359{ 360 return (void *)addr; 361} 362 363#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ 364 365#ifdef CONFIG_KASAN_HW_TAGS 366 367void kasan_report_async(void); 368 369#endif /* CONFIG_KASAN_HW_TAGS */ 370 371#ifdef CONFIG_KASAN_SW_TAGS 372void __init kasan_init_sw_tags(void); 373#else 374static inline void kasan_init_sw_tags(void) { } 375#endif 376 377#ifdef CONFIG_KASAN_HW_TAGS 378void kasan_init_hw_tags_cpu(void); 379void __init kasan_init_hw_tags(void); 380#else 381static inline void kasan_init_hw_tags_cpu(void) { } 382static inline void kasan_init_hw_tags(void) { } 383#endif 384 385#ifdef CONFIG_KASAN_VMALLOC 386 387#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 388 389void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); 390int kasan_populate_vmalloc(unsigned long addr, unsigned long size); 391void kasan_release_vmalloc(unsigned long start, unsigned long end, 392 unsigned long free_region_start, 393 unsigned long free_region_end); 394 395#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 396 397static inline void kasan_populate_early_vm_area_shadow(void *start, 398 unsigned long size) 399{ } 400static inline int kasan_populate_vmalloc(unsigned long start, 401 unsigned long size) 402{ 403 return 0; 404} 405static inline void kasan_release_vmalloc(unsigned long start, 406 unsigned long end, 407 unsigned long free_region_start, 408 unsigned long free_region_end) { } 409 410#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 411 412void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, 413 kasan_vmalloc_flags_t flags); 414static __always_inline void *kasan_unpoison_vmalloc(const void *start, 415 unsigned long size, 416 kasan_vmalloc_flags_t flags) 417{ 418 if (kasan_enabled()) 419 return __kasan_unpoison_vmalloc(start, size, flags); 420 return (void *)start; 421} 422 423void __kasan_poison_vmalloc(const void *start, unsigned long size); 424static __always_inline void kasan_poison_vmalloc(const void *start, 425 unsigned long size) 426{ 427 if (kasan_enabled()) 428 __kasan_poison_vmalloc(start, size); 429} 430 431#else /* CONFIG_KASAN_VMALLOC */ 432 433static inline void kasan_populate_early_vm_area_shadow(void *start, 434 unsigned long size) { } 435static inline int kasan_populate_vmalloc(unsigned long start, 436 unsigned long size) 437{ 438 return 0; 439} 440static inline void kasan_release_vmalloc(unsigned long start, 441 unsigned long end, 442 unsigned long free_region_start, 443 unsigned long free_region_end) { } 444 445static inline void *kasan_unpoison_vmalloc(const void *start, 446 unsigned long size, 447 kasan_vmalloc_flags_t flags) 448{ 449 return (void *)start; 450} 451static inline void kasan_poison_vmalloc(const void *start, unsigned long size) 452{ } 453 454#endif /* CONFIG_KASAN_VMALLOC */ 455 456#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 457 !defined(CONFIG_KASAN_VMALLOC) 458 459/* 460 * These functions allocate and free shadow memory for kernel modules. 461 * They are only required when KASAN_VMALLOC is not supported, as otherwise 462 * shadow memory is allocated by the generic vmalloc handlers. 463 */ 464int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask); 465void kasan_free_module_shadow(const struct vm_struct *vm); 466 467#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 468 469static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; } 470static inline void kasan_free_module_shadow(const struct vm_struct *vm) {} 471 472#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 473 474#ifdef CONFIG_KASAN_INLINE 475void kasan_non_canonical_hook(unsigned long addr); 476#else /* CONFIG_KASAN_INLINE */ 477static inline void kasan_non_canonical_hook(unsigned long addr) { } 478#endif /* CONFIG_KASAN_INLINE */ 479 480#endif /* LINUX_KASAN_H */