Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
5#include <linux/bug.h>
6#include <linux/kernel.h>
7#include <linux/static_key.h>
8#include <linux/types.h>
9
10struct kmem_cache;
11struct page;
12struct vm_struct;
13struct task_struct;
14
15#ifdef CONFIG_KASAN
16
17#include <linux/linkage.h>
18#include <asm/kasan.h>
19
20/* kasan_data struct is used in KUnit tests for KASAN expected failures */
21struct kunit_kasan_expectation {
22 bool report_found;
23};
24
25#endif
26
27#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
28
29#include <linux/pgtable.h>
30
31/* Software KASAN implementations use shadow memory. */
32
33#ifdef CONFIG_KASAN_SW_TAGS
34/* This matches KASAN_TAG_INVALID. */
35#define KASAN_SHADOW_INIT 0xFE
36#else
37#define KASAN_SHADOW_INIT 0
38#endif
39
40#ifndef PTE_HWTABLE_PTRS
41#define PTE_HWTABLE_PTRS 0
42#endif
43
44extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
45extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
46extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
47extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
48extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
49
50int kasan_populate_early_shadow(const void *shadow_start,
51 const void *shadow_end);
52
53static inline void *kasan_mem_to_shadow(const void *addr)
54{
55 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
56 + KASAN_SHADOW_OFFSET;
57}
58
59int kasan_add_zero_shadow(void *start, unsigned long size);
60void kasan_remove_zero_shadow(void *start, unsigned long size);
61
62/* Enable reporting bugs after kasan_disable_current() */
63extern void kasan_enable_current(void);
64
65/* Disable reporting bugs for current task */
66extern void kasan_disable_current(void);
67
68#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
69
70static inline int kasan_add_zero_shadow(void *start, unsigned long size)
71{
72 return 0;
73}
74static inline void kasan_remove_zero_shadow(void *start,
75 unsigned long size)
76{}
77
78static inline void kasan_enable_current(void) {}
79static inline void kasan_disable_current(void) {}
80
81#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
82
83#ifdef CONFIG_KASAN_HW_TAGS
84
85DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
86
87static __always_inline bool kasan_enabled(void)
88{
89 return static_branch_likely(&kasan_flag_enabled);
90}
91
92static inline bool kasan_hw_tags_enabled(void)
93{
94 return kasan_enabled();
95}
96
97void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
98void kasan_free_pages(struct page *page, unsigned int order);
99
100#else /* CONFIG_KASAN_HW_TAGS */
101
102static inline bool kasan_enabled(void)
103{
104 return IS_ENABLED(CONFIG_KASAN);
105}
106
107static inline bool kasan_hw_tags_enabled(void)
108{
109 return false;
110}
111
112static __always_inline void kasan_alloc_pages(struct page *page,
113 unsigned int order, gfp_t flags)
114{
115 /* Only available for integrated init. */
116 BUILD_BUG();
117}
118
119static __always_inline void kasan_free_pages(struct page *page,
120 unsigned int order)
121{
122 /* Only available for integrated init. */
123 BUILD_BUG();
124}
125
126#endif /* CONFIG_KASAN_HW_TAGS */
127
128static inline bool kasan_has_integrated_init(void)
129{
130 return kasan_hw_tags_enabled();
131}
132
133#ifdef CONFIG_KASAN
134
135struct kasan_cache {
136 int alloc_meta_offset;
137 int free_meta_offset;
138 bool is_kmalloc;
139};
140
141slab_flags_t __kasan_never_merge(void);
142static __always_inline slab_flags_t kasan_never_merge(void)
143{
144 if (kasan_enabled())
145 return __kasan_never_merge();
146 return 0;
147}
148
149void __kasan_unpoison_range(const void *addr, size_t size);
150static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
151{
152 if (kasan_enabled())
153 __kasan_unpoison_range(addr, size);
154}
155
156void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
157static __always_inline void kasan_poison_pages(struct page *page,
158 unsigned int order, bool init)
159{
160 if (kasan_enabled())
161 __kasan_poison_pages(page, order, init);
162}
163
164void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
165static __always_inline void kasan_unpoison_pages(struct page *page,
166 unsigned int order, bool init)
167{
168 if (kasan_enabled())
169 __kasan_unpoison_pages(page, order, init);
170}
171
172void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
173 slab_flags_t *flags);
174static __always_inline void kasan_cache_create(struct kmem_cache *cache,
175 unsigned int *size, slab_flags_t *flags)
176{
177 if (kasan_enabled())
178 __kasan_cache_create(cache, size, flags);
179}
180
181void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
182static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
183{
184 if (kasan_enabled())
185 __kasan_cache_create_kmalloc(cache);
186}
187
188size_t __kasan_metadata_size(struct kmem_cache *cache);
189static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
190{
191 if (kasan_enabled())
192 return __kasan_metadata_size(cache);
193 return 0;
194}
195
196void __kasan_poison_slab(struct page *page);
197static __always_inline void kasan_poison_slab(struct page *page)
198{
199 if (kasan_enabled())
200 __kasan_poison_slab(page);
201}
202
203void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
204static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
205 void *object)
206{
207 if (kasan_enabled())
208 __kasan_unpoison_object_data(cache, object);
209}
210
211void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
212static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
213 void *object)
214{
215 if (kasan_enabled())
216 __kasan_poison_object_data(cache, object);
217}
218
219void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
220 const void *object);
221static __always_inline void * __must_check kasan_init_slab_obj(
222 struct kmem_cache *cache, const void *object)
223{
224 if (kasan_enabled())
225 return __kasan_init_slab_obj(cache, object);
226 return (void *)object;
227}
228
229bool __kasan_slab_free(struct kmem_cache *s, void *object,
230 unsigned long ip, bool init);
231static __always_inline bool kasan_slab_free(struct kmem_cache *s,
232 void *object, bool init)
233{
234 if (kasan_enabled())
235 return __kasan_slab_free(s, object, _RET_IP_, init);
236 return false;
237}
238
239void __kasan_kfree_large(void *ptr, unsigned long ip);
240static __always_inline void kasan_kfree_large(void *ptr)
241{
242 if (kasan_enabled())
243 __kasan_kfree_large(ptr, _RET_IP_);
244}
245
246void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
247static __always_inline void kasan_slab_free_mempool(void *ptr)
248{
249 if (kasan_enabled())
250 __kasan_slab_free_mempool(ptr, _RET_IP_);
251}
252
253void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
254 void *object, gfp_t flags, bool init);
255static __always_inline void * __must_check kasan_slab_alloc(
256 struct kmem_cache *s, void *object, gfp_t flags, bool init)
257{
258 if (kasan_enabled())
259 return __kasan_slab_alloc(s, object, flags, init);
260 return object;
261}
262
263void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
264 size_t size, gfp_t flags);
265static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
266 const void *object, size_t size, gfp_t flags)
267{
268 if (kasan_enabled())
269 return __kasan_kmalloc(s, object, size, flags);
270 return (void *)object;
271}
272
273void * __must_check __kasan_kmalloc_large(const void *ptr,
274 size_t size, gfp_t flags);
275static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
276 size_t size, gfp_t flags)
277{
278 if (kasan_enabled())
279 return __kasan_kmalloc_large(ptr, size, flags);
280 return (void *)ptr;
281}
282
283void * __must_check __kasan_krealloc(const void *object,
284 size_t new_size, gfp_t flags);
285static __always_inline void * __must_check kasan_krealloc(const void *object,
286 size_t new_size, gfp_t flags)
287{
288 if (kasan_enabled())
289 return __kasan_krealloc(object, new_size, flags);
290 return (void *)object;
291}
292
293/*
294 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
295 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
296 */
297bool __kasan_check_byte(const void *addr, unsigned long ip);
298static __always_inline bool kasan_check_byte(const void *addr)
299{
300 if (kasan_enabled())
301 return __kasan_check_byte(addr, _RET_IP_);
302 return true;
303}
304
305
306bool kasan_save_enable_multi_shot(void);
307void kasan_restore_multi_shot(bool enabled);
308
309#else /* CONFIG_KASAN */
310
311static inline slab_flags_t kasan_never_merge(void)
312{
313 return 0;
314}
315static inline void kasan_unpoison_range(const void *address, size_t size) {}
316static inline void kasan_poison_pages(struct page *page, unsigned int order,
317 bool init) {}
318static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
319 bool init) {}
320static inline void kasan_cache_create(struct kmem_cache *cache,
321 unsigned int *size,
322 slab_flags_t *flags) {}
323static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
324static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
325static inline void kasan_poison_slab(struct page *page) {}
326static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
327 void *object) {}
328static inline void kasan_poison_object_data(struct kmem_cache *cache,
329 void *object) {}
330static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
331 const void *object)
332{
333 return (void *)object;
334}
335static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
336{
337 return false;
338}
339static inline void kasan_kfree_large(void *ptr) {}
340static inline void kasan_slab_free_mempool(void *ptr) {}
341static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
342 gfp_t flags, bool init)
343{
344 return object;
345}
346static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
347 size_t size, gfp_t flags)
348{
349 return (void *)object;
350}
351static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
352{
353 return (void *)ptr;
354}
355static inline void *kasan_krealloc(const void *object, size_t new_size,
356 gfp_t flags)
357{
358 return (void *)object;
359}
360static inline bool kasan_check_byte(const void *address)
361{
362 return true;
363}
364
365#endif /* CONFIG_KASAN */
366
367#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
368void kasan_unpoison_task_stack(struct task_struct *task);
369#else
370static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
371#endif
372
373#ifdef CONFIG_KASAN_GENERIC
374
375void kasan_cache_shrink(struct kmem_cache *cache);
376void kasan_cache_shutdown(struct kmem_cache *cache);
377void kasan_record_aux_stack(void *ptr);
378void kasan_record_aux_stack_noalloc(void *ptr);
379
380#else /* CONFIG_KASAN_GENERIC */
381
382static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
383static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
384static inline void kasan_record_aux_stack(void *ptr) {}
385static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
386
387#endif /* CONFIG_KASAN_GENERIC */
388
389#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
390
391static inline void *kasan_reset_tag(const void *addr)
392{
393 return (void *)arch_kasan_reset_tag(addr);
394}
395
396/**
397 * kasan_report - print a report about a bad memory access detected by KASAN
398 * @addr: address of the bad access
399 * @size: size of the bad access
400 * @is_write: whether the bad access is a write or a read
401 * @ip: instruction pointer for the accessibility check or the bad access itself
402 */
403bool kasan_report(unsigned long addr, size_t size,
404 bool is_write, unsigned long ip);
405
406#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
407
408static inline void *kasan_reset_tag(const void *addr)
409{
410 return (void *)addr;
411}
412
413#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
414
415#ifdef CONFIG_KASAN_HW_TAGS
416
417void kasan_report_async(void);
418
419#endif /* CONFIG_KASAN_HW_TAGS */
420
421#ifdef CONFIG_KASAN_SW_TAGS
422void __init kasan_init_sw_tags(void);
423#else
424static inline void kasan_init_sw_tags(void) { }
425#endif
426
427#ifdef CONFIG_KASAN_HW_TAGS
428void kasan_init_hw_tags_cpu(void);
429void __init kasan_init_hw_tags(void);
430#else
431static inline void kasan_init_hw_tags_cpu(void) { }
432static inline void kasan_init_hw_tags(void) { }
433#endif
434
435#ifdef CONFIG_KASAN_VMALLOC
436
437int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
438void kasan_poison_vmalloc(const void *start, unsigned long size);
439void kasan_unpoison_vmalloc(const void *start, unsigned long size);
440void kasan_release_vmalloc(unsigned long start, unsigned long end,
441 unsigned long free_region_start,
442 unsigned long free_region_end);
443
444void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
445
446#else /* CONFIG_KASAN_VMALLOC */
447
448static inline int kasan_populate_vmalloc(unsigned long start,
449 unsigned long size)
450{
451 return 0;
452}
453
454static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
455{ }
456static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
457{ }
458static inline void kasan_release_vmalloc(unsigned long start,
459 unsigned long end,
460 unsigned long free_region_start,
461 unsigned long free_region_end) {}
462
463static inline void kasan_populate_early_vm_area_shadow(void *start,
464 unsigned long size)
465{ }
466
467#endif /* CONFIG_KASAN_VMALLOC */
468
469#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
470 !defined(CONFIG_KASAN_VMALLOC)
471
472/*
473 * These functions provide a special case to support backing module
474 * allocations with real shadow memory. With KASAN vmalloc, the special
475 * case is unnecessary, as the work is handled in the generic case.
476 */
477int kasan_module_alloc(void *addr, size_t size);
478void kasan_free_shadow(const struct vm_struct *vm);
479
480#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
481
482static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
483static inline void kasan_free_shadow(const struct vm_struct *vm) {}
484
485#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
486
487#ifdef CONFIG_KASAN_INLINE
488void kasan_non_canonical_hook(unsigned long addr);
489#else /* CONFIG_KASAN_INLINE */
490static inline void kasan_non_canonical_hook(unsigned long addr) { }
491#endif /* CONFIG_KASAN_INLINE */
492
493#endif /* LINUX_KASAN_H */