Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
5#include <linux/static_key.h>
6#include <linux/types.h>
7
8struct kmem_cache;
9struct page;
10struct vm_struct;
11struct task_struct;
12
13#ifdef CONFIG_KASAN
14
15#include <linux/linkage.h>
16#include <asm/kasan.h>
17
18/* kasan_data struct is used in KUnit tests for KASAN expected failures */
19struct kunit_kasan_expectation {
20 bool report_expected;
21 bool report_found;
22};
23
24#endif
25
26#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
27
28#include <linux/pgtable.h>
29
30/* Software KASAN implementations use shadow memory. */
31
32#ifdef CONFIG_KASAN_SW_TAGS
33#define KASAN_SHADOW_INIT 0xFF
34#else
35#define KASAN_SHADOW_INIT 0
36#endif
37
38#ifndef PTE_HWTABLE_PTRS
39#define PTE_HWTABLE_PTRS 0
40#endif
41
42extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
43extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
44extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
45extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
46extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
47
48int kasan_populate_early_shadow(const void *shadow_start,
49 const void *shadow_end);
50
51static inline void *kasan_mem_to_shadow(const void *addr)
52{
53 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
54 + KASAN_SHADOW_OFFSET;
55}
56
57int kasan_add_zero_shadow(void *start, unsigned long size);
58void kasan_remove_zero_shadow(void *start, unsigned long size);
59
60/* Enable reporting bugs after kasan_disable_current() */
61extern void kasan_enable_current(void);
62
63/* Disable reporting bugs for current task */
64extern void kasan_disable_current(void);
65
66#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
67
68static inline int kasan_add_zero_shadow(void *start, unsigned long size)
69{
70 return 0;
71}
72static inline void kasan_remove_zero_shadow(void *start,
73 unsigned long size)
74{}
75
76static inline void kasan_enable_current(void) {}
77static inline void kasan_disable_current(void) {}
78
79#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
80
81#ifdef CONFIG_KASAN
82
83struct kasan_cache {
84 int alloc_meta_offset;
85 int free_meta_offset;
86 bool is_kmalloc;
87};
88
89#ifdef CONFIG_KASAN_HW_TAGS
90
91DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
92
93static __always_inline bool kasan_enabled(void)
94{
95 return static_branch_likely(&kasan_flag_enabled);
96}
97
98#else /* CONFIG_KASAN_HW_TAGS */
99
100static inline bool kasan_enabled(void)
101{
102 return true;
103}
104
105#endif /* CONFIG_KASAN_HW_TAGS */
106
107slab_flags_t __kasan_never_merge(void);
108static __always_inline slab_flags_t kasan_never_merge(void)
109{
110 if (kasan_enabled())
111 return __kasan_never_merge();
112 return 0;
113}
114
115void __kasan_unpoison_range(const void *addr, size_t size);
116static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
117{
118 if (kasan_enabled())
119 __kasan_unpoison_range(addr, size);
120}
121
122void __kasan_alloc_pages(struct page *page, unsigned int order);
123static __always_inline void kasan_alloc_pages(struct page *page,
124 unsigned int order)
125{
126 if (kasan_enabled())
127 __kasan_alloc_pages(page, order);
128}
129
130void __kasan_free_pages(struct page *page, unsigned int order);
131static __always_inline void kasan_free_pages(struct page *page,
132 unsigned int order)
133{
134 if (kasan_enabled())
135 __kasan_free_pages(page, order);
136}
137
138void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
139 slab_flags_t *flags);
140static __always_inline void kasan_cache_create(struct kmem_cache *cache,
141 unsigned int *size, slab_flags_t *flags)
142{
143 if (kasan_enabled())
144 __kasan_cache_create(cache, size, flags);
145}
146
147void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
148static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
149{
150 if (kasan_enabled())
151 __kasan_cache_create_kmalloc(cache);
152}
153
154size_t __kasan_metadata_size(struct kmem_cache *cache);
155static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
156{
157 if (kasan_enabled())
158 return __kasan_metadata_size(cache);
159 return 0;
160}
161
162void __kasan_poison_slab(struct page *page);
163static __always_inline void kasan_poison_slab(struct page *page)
164{
165 if (kasan_enabled())
166 __kasan_poison_slab(page);
167}
168
169void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
170static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
171 void *object)
172{
173 if (kasan_enabled())
174 __kasan_unpoison_object_data(cache, object);
175}
176
177void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
178static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
179 void *object)
180{
181 if (kasan_enabled())
182 __kasan_poison_object_data(cache, object);
183}
184
185void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
186 const void *object);
187static __always_inline void * __must_check kasan_init_slab_obj(
188 struct kmem_cache *cache, const void *object)
189{
190 if (kasan_enabled())
191 return __kasan_init_slab_obj(cache, object);
192 return (void *)object;
193}
194
195bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
196static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
197{
198 if (kasan_enabled())
199 return __kasan_slab_free(s, object, _RET_IP_);
200 return false;
201}
202
203void __kasan_kfree_large(void *ptr, unsigned long ip);
204static __always_inline void kasan_kfree_large(void *ptr)
205{
206 if (kasan_enabled())
207 __kasan_kfree_large(ptr, _RET_IP_);
208}
209
210void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
211static __always_inline void kasan_slab_free_mempool(void *ptr)
212{
213 if (kasan_enabled())
214 __kasan_slab_free_mempool(ptr, _RET_IP_);
215}
216
217void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
218 void *object, gfp_t flags);
219static __always_inline void * __must_check kasan_slab_alloc(
220 struct kmem_cache *s, void *object, gfp_t flags)
221{
222 if (kasan_enabled())
223 return __kasan_slab_alloc(s, object, flags);
224 return object;
225}
226
227void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
228 size_t size, gfp_t flags);
229static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
230 const void *object, size_t size, gfp_t flags)
231{
232 if (kasan_enabled())
233 return __kasan_kmalloc(s, object, size, flags);
234 return (void *)object;
235}
236
237void * __must_check __kasan_kmalloc_large(const void *ptr,
238 size_t size, gfp_t flags);
239static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
240 size_t size, gfp_t flags)
241{
242 if (kasan_enabled())
243 return __kasan_kmalloc_large(ptr, size, flags);
244 return (void *)ptr;
245}
246
247void * __must_check __kasan_krealloc(const void *object,
248 size_t new_size, gfp_t flags);
249static __always_inline void * __must_check kasan_krealloc(const void *object,
250 size_t new_size, gfp_t flags)
251{
252 if (kasan_enabled())
253 return __kasan_krealloc(object, new_size, flags);
254 return (void *)object;
255}
256
257/*
258 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
259 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
260 */
261bool __kasan_check_byte(const void *addr, unsigned long ip);
262static __always_inline bool kasan_check_byte(const void *addr)
263{
264 if (kasan_enabled())
265 return __kasan_check_byte(addr, _RET_IP_);
266 return true;
267}
268
269
270bool kasan_save_enable_multi_shot(void);
271void kasan_restore_multi_shot(bool enabled);
272
273#else /* CONFIG_KASAN */
274
275static inline bool kasan_enabled(void)
276{
277 return false;
278}
279static inline slab_flags_t kasan_never_merge(void)
280{
281 return 0;
282}
283static inline void kasan_unpoison_range(const void *address, size_t size) {}
284static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
285static inline void kasan_free_pages(struct page *page, unsigned int order) {}
286static inline void kasan_cache_create(struct kmem_cache *cache,
287 unsigned int *size,
288 slab_flags_t *flags) {}
289static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
290static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
291static inline void kasan_poison_slab(struct page *page) {}
292static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
293 void *object) {}
294static inline void kasan_poison_object_data(struct kmem_cache *cache,
295 void *object) {}
296static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
297 const void *object)
298{
299 return (void *)object;
300}
301static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
302{
303 return false;
304}
305static inline void kasan_kfree_large(void *ptr) {}
306static inline void kasan_slab_free_mempool(void *ptr) {}
307static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
308 gfp_t flags)
309{
310 return object;
311}
312static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
313 size_t size, gfp_t flags)
314{
315 return (void *)object;
316}
317static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
318{
319 return (void *)ptr;
320}
321static inline void *kasan_krealloc(const void *object, size_t new_size,
322 gfp_t flags)
323{
324 return (void *)object;
325}
326static inline bool kasan_check_byte(const void *address)
327{
328 return true;
329}
330
331#endif /* CONFIG_KASAN */
332
333#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
334void kasan_unpoison_task_stack(struct task_struct *task);
335#else
336static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
337#endif
338
339#ifdef CONFIG_KASAN_GENERIC
340
341void kasan_cache_shrink(struct kmem_cache *cache);
342void kasan_cache_shutdown(struct kmem_cache *cache);
343void kasan_record_aux_stack(void *ptr);
344
345#else /* CONFIG_KASAN_GENERIC */
346
347static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
348static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
349static inline void kasan_record_aux_stack(void *ptr) {}
350
351#endif /* CONFIG_KASAN_GENERIC */
352
353#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
354
355static inline void *kasan_reset_tag(const void *addr)
356{
357 return (void *)arch_kasan_reset_tag(addr);
358}
359
360/**
361 * kasan_report - print a report about a bad memory access detected by KASAN
362 * @addr: address of the bad access
363 * @size: size of the bad access
364 * @is_write: whether the bad access is a write or a read
365 * @ip: instruction pointer for the accessibility check or the bad access itself
366 */
367bool kasan_report(unsigned long addr, size_t size,
368 bool is_write, unsigned long ip);
369
370#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
371
372static inline void *kasan_reset_tag(const void *addr)
373{
374 return (void *)addr;
375}
376
377#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
378
379#ifdef CONFIG_KASAN_SW_TAGS
380void __init kasan_init_sw_tags(void);
381#else
382static inline void kasan_init_sw_tags(void) { }
383#endif
384
385#ifdef CONFIG_KASAN_HW_TAGS
386void kasan_init_hw_tags_cpu(void);
387void __init kasan_init_hw_tags(void);
388#else
389static inline void kasan_init_hw_tags_cpu(void) { }
390static inline void kasan_init_hw_tags(void) { }
391#endif
392
393#ifdef CONFIG_KASAN_VMALLOC
394
395int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
396void kasan_poison_vmalloc(const void *start, unsigned long size);
397void kasan_unpoison_vmalloc(const void *start, unsigned long size);
398void kasan_release_vmalloc(unsigned long start, unsigned long end,
399 unsigned long free_region_start,
400 unsigned long free_region_end);
401
402#else /* CONFIG_KASAN_VMALLOC */
403
404static inline int kasan_populate_vmalloc(unsigned long start,
405 unsigned long size)
406{
407 return 0;
408}
409
410static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
411{ }
412static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
413{ }
414static inline void kasan_release_vmalloc(unsigned long start,
415 unsigned long end,
416 unsigned long free_region_start,
417 unsigned long free_region_end) {}
418
419#endif /* CONFIG_KASAN_VMALLOC */
420
421#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
422 !defined(CONFIG_KASAN_VMALLOC)
423
424/*
425 * These functions provide a special case to support backing module
426 * allocations with real shadow memory. With KASAN vmalloc, the special
427 * case is unnecessary, as the work is handled in the generic case.
428 */
429int kasan_module_alloc(void *addr, size_t size);
430void kasan_free_shadow(const struct vm_struct *vm);
431
432#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
433
434static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
435static inline void kasan_free_shadow(const struct vm_struct *vm) {}
436
437#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
438
439#ifdef CONFIG_KASAN_INLINE
440void kasan_non_canonical_hook(unsigned long addr);
441#else /* CONFIG_KASAN_INLINE */
442static inline void kasan_non_canonical_hook(unsigned long addr) { }
443#endif /* CONFIG_KASAN_INLINE */
444
445#endif /* LINUX_KASAN_H */