Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
5#include <linux/static_key.h>
6#include <linux/types.h>
7
8struct kmem_cache;
9struct page;
10struct vm_struct;
11struct task_struct;
12
13#ifdef CONFIG_KASAN
14
15#include <linux/linkage.h>
16#include <asm/kasan.h>
17
18/* kasan_data struct is used in KUnit tests for KASAN expected failures */
19struct kunit_kasan_expectation {
20 bool report_expected;
21 bool report_found;
22};
23
24#endif
25
26#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
27
28#include <linux/pgtable.h>
29
30/* Software KASAN implementations use shadow memory. */
31
32#ifdef CONFIG_KASAN_SW_TAGS
33#define KASAN_SHADOW_INIT 0xFF
34#else
35#define KASAN_SHADOW_INIT 0
36#endif
37
38#ifndef PTE_HWTABLE_PTRS
39#define PTE_HWTABLE_PTRS 0
40#endif
41
42extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
43extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
44extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
45extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
46extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
47
48int kasan_populate_early_shadow(const void *shadow_start,
49 const void *shadow_end);
50
51static inline void *kasan_mem_to_shadow(const void *addr)
52{
53 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
54 + KASAN_SHADOW_OFFSET;
55}
56
57int kasan_add_zero_shadow(void *start, unsigned long size);
58void kasan_remove_zero_shadow(void *start, unsigned long size);
59
60/* Enable reporting bugs after kasan_disable_current() */
61extern void kasan_enable_current(void);
62
63/* Disable reporting bugs for current task */
64extern void kasan_disable_current(void);
65
66#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
67
68static inline int kasan_add_zero_shadow(void *start, unsigned long size)
69{
70 return 0;
71}
72static inline void kasan_remove_zero_shadow(void *start,
73 unsigned long size)
74{}
75
76static inline void kasan_enable_current(void) {}
77static inline void kasan_disable_current(void) {}
78
79#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
80
81#ifdef CONFIG_KASAN
82
83struct kasan_cache {
84 int alloc_meta_offset;
85 int free_meta_offset;
86};
87
88#ifdef CONFIG_KASAN_HW_TAGS
89
90DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
91
92static __always_inline bool kasan_enabled(void)
93{
94 return static_branch_likely(&kasan_flag_enabled);
95}
96
97#else /* CONFIG_KASAN_HW_TAGS */
98
99static inline bool kasan_enabled(void)
100{
101 return true;
102}
103
104#endif /* CONFIG_KASAN_HW_TAGS */
105
106slab_flags_t __kasan_never_merge(void);
107static __always_inline slab_flags_t kasan_never_merge(void)
108{
109 if (kasan_enabled())
110 return __kasan_never_merge();
111 return 0;
112}
113
114void __kasan_unpoison_range(const void *addr, size_t size);
115static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
116{
117 if (kasan_enabled())
118 __kasan_unpoison_range(addr, size);
119}
120
121void __kasan_alloc_pages(struct page *page, unsigned int order);
122static __always_inline void kasan_alloc_pages(struct page *page,
123 unsigned int order)
124{
125 if (kasan_enabled())
126 __kasan_alloc_pages(page, order);
127}
128
129void __kasan_free_pages(struct page *page, unsigned int order);
130static __always_inline void kasan_free_pages(struct page *page,
131 unsigned int order)
132{
133 if (kasan_enabled())
134 __kasan_free_pages(page, order);
135}
136
137void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
138 slab_flags_t *flags);
139static __always_inline void kasan_cache_create(struct kmem_cache *cache,
140 unsigned int *size, slab_flags_t *flags)
141{
142 if (kasan_enabled())
143 __kasan_cache_create(cache, size, flags);
144}
145
146size_t __kasan_metadata_size(struct kmem_cache *cache);
147static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
148{
149 if (kasan_enabled())
150 return __kasan_metadata_size(cache);
151 return 0;
152}
153
154void __kasan_poison_slab(struct page *page);
155static __always_inline void kasan_poison_slab(struct page *page)
156{
157 if (kasan_enabled())
158 __kasan_poison_slab(page);
159}
160
161void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
162static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
163 void *object)
164{
165 if (kasan_enabled())
166 __kasan_unpoison_object_data(cache, object);
167}
168
169void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
170static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
171 void *object)
172{
173 if (kasan_enabled())
174 __kasan_poison_object_data(cache, object);
175}
176
177void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
178 const void *object);
179static __always_inline void * __must_check kasan_init_slab_obj(
180 struct kmem_cache *cache, const void *object)
181{
182 if (kasan_enabled())
183 return __kasan_init_slab_obj(cache, object);
184 return (void *)object;
185}
186
187bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
188static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
189 unsigned long ip)
190{
191 if (kasan_enabled())
192 return __kasan_slab_free(s, object, ip);
193 return false;
194}
195
196void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
197static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
198{
199 if (kasan_enabled())
200 __kasan_slab_free_mempool(ptr, ip);
201}
202
203void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
204 void *object, gfp_t flags);
205static __always_inline void * __must_check kasan_slab_alloc(
206 struct kmem_cache *s, void *object, gfp_t flags)
207{
208 if (kasan_enabled())
209 return __kasan_slab_alloc(s, object, flags);
210 return object;
211}
212
213void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
214 size_t size, gfp_t flags);
215static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
216 const void *object, size_t size, gfp_t flags)
217{
218 if (kasan_enabled())
219 return __kasan_kmalloc(s, object, size, flags);
220 return (void *)object;
221}
222
223void * __must_check __kasan_kmalloc_large(const void *ptr,
224 size_t size, gfp_t flags);
225static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
226 size_t size, gfp_t flags)
227{
228 if (kasan_enabled())
229 return __kasan_kmalloc_large(ptr, size, flags);
230 return (void *)ptr;
231}
232
233void * __must_check __kasan_krealloc(const void *object,
234 size_t new_size, gfp_t flags);
235static __always_inline void * __must_check kasan_krealloc(const void *object,
236 size_t new_size, gfp_t flags)
237{
238 if (kasan_enabled())
239 return __kasan_krealloc(object, new_size, flags);
240 return (void *)object;
241}
242
243void __kasan_kfree_large(void *ptr, unsigned long ip);
244static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
245{
246 if (kasan_enabled())
247 __kasan_kfree_large(ptr, ip);
248}
249
250bool kasan_save_enable_multi_shot(void);
251void kasan_restore_multi_shot(bool enabled);
252
253#else /* CONFIG_KASAN */
254
255static inline bool kasan_enabled(void)
256{
257 return false;
258}
259static inline slab_flags_t kasan_never_merge(void)
260{
261 return 0;
262}
263static inline void kasan_unpoison_range(const void *address, size_t size) {}
264static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
265static inline void kasan_free_pages(struct page *page, unsigned int order) {}
266static inline void kasan_cache_create(struct kmem_cache *cache,
267 unsigned int *size,
268 slab_flags_t *flags) {}
269static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
270static inline void kasan_poison_slab(struct page *page) {}
271static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
272 void *object) {}
273static inline void kasan_poison_object_data(struct kmem_cache *cache,
274 void *object) {}
275static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
276 const void *object)
277{
278 return (void *)object;
279}
280static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
281 unsigned long ip)
282{
283 return false;
284}
285static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
286static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
287 gfp_t flags)
288{
289 return object;
290}
291static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
292 size_t size, gfp_t flags)
293{
294 return (void *)object;
295}
296static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
297{
298 return (void *)ptr;
299}
300static inline void *kasan_krealloc(const void *object, size_t new_size,
301 gfp_t flags)
302{
303 return (void *)object;
304}
305static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
306
307#endif /* CONFIG_KASAN */
308
309#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
310void kasan_unpoison_task_stack(struct task_struct *task);
311#else
312static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
313#endif
314
315#ifdef CONFIG_KASAN_GENERIC
316
317void kasan_cache_shrink(struct kmem_cache *cache);
318void kasan_cache_shutdown(struct kmem_cache *cache);
319void kasan_record_aux_stack(void *ptr);
320
321#else /* CONFIG_KASAN_GENERIC */
322
323static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
324static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
325static inline void kasan_record_aux_stack(void *ptr) {}
326
327#endif /* CONFIG_KASAN_GENERIC */
328
329#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
330
331static inline void *kasan_reset_tag(const void *addr)
332{
333 return (void *)arch_kasan_reset_tag(addr);
334}
335
336/**
337 * kasan_report - print a report about a bad memory access detected by KASAN
338 * @addr: address of the bad access
339 * @size: size of the bad access
340 * @is_write: whether the bad access is a write or a read
341 * @ip: instruction pointer for the accessibility check or the bad access itself
342 */
343bool kasan_report(unsigned long addr, size_t size,
344 bool is_write, unsigned long ip);
345
346#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
347
348static inline void *kasan_reset_tag(const void *addr)
349{
350 return (void *)addr;
351}
352
353#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
354
355#ifdef CONFIG_KASAN_SW_TAGS
356void __init kasan_init_sw_tags(void);
357#else
358static inline void kasan_init_sw_tags(void) { }
359#endif
360
361#ifdef CONFIG_KASAN_HW_TAGS
362void kasan_init_hw_tags_cpu(void);
363void __init kasan_init_hw_tags(void);
364#else
365static inline void kasan_init_hw_tags_cpu(void) { }
366static inline void kasan_init_hw_tags(void) { }
367#endif
368
369#ifdef CONFIG_KASAN_VMALLOC
370
371int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
372void kasan_poison_vmalloc(const void *start, unsigned long size);
373void kasan_unpoison_vmalloc(const void *start, unsigned long size);
374void kasan_release_vmalloc(unsigned long start, unsigned long end,
375 unsigned long free_region_start,
376 unsigned long free_region_end);
377
378#else /* CONFIG_KASAN_VMALLOC */
379
380static inline int kasan_populate_vmalloc(unsigned long start,
381 unsigned long size)
382{
383 return 0;
384}
385
386static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
387{ }
388static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
389{ }
390static inline void kasan_release_vmalloc(unsigned long start,
391 unsigned long end,
392 unsigned long free_region_start,
393 unsigned long free_region_end) {}
394
395#endif /* CONFIG_KASAN_VMALLOC */
396
397#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
398 !defined(CONFIG_KASAN_VMALLOC)
399
400/*
401 * These functions provide a special case to support backing module
402 * allocations with real shadow memory. With KASAN vmalloc, the special
403 * case is unnecessary, as the work is handled in the generic case.
404 */
405int kasan_module_alloc(void *addr, size_t size);
406void kasan_free_shadow(const struct vm_struct *vm);
407
408#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
409
410static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
411static inline void kasan_free_shadow(const struct vm_struct *vm) {}
412
413#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
414
415#ifdef CONFIG_KASAN_INLINE
416void kasan_non_canonical_hook(unsigned long addr);
417#else /* CONFIG_KASAN_INLINE */
418static inline void kasan_non_canonical_hook(unsigned long addr) { }
419#endif /* CONFIG_KASAN_INLINE */
420
421#endif /* LINUX_KASAN_H */