Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
5#include <linux/static_key.h>
6#include <linux/types.h>
7
8struct kmem_cache;
9struct page;
10struct vm_struct;
11struct task_struct;
12
13#ifdef CONFIG_KASAN
14
15#include <linux/linkage.h>
16#include <asm/kasan.h>
17
18/* kasan_data struct is used in KUnit tests for KASAN expected failures */
19struct kunit_kasan_expectation {
20 bool report_expected;
21 bool report_found;
22};
23
24#endif
25
26#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
27
28#include <linux/pgtable.h>
29
30/* Software KASAN implementations use shadow memory. */
31
32#ifdef CONFIG_KASAN_SW_TAGS
33#define KASAN_SHADOW_INIT 0xFF
34#else
35#define KASAN_SHADOW_INIT 0
36#endif
37
38extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
39extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
40extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
41extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
42extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
43
44int kasan_populate_early_shadow(const void *shadow_start,
45 const void *shadow_end);
46
47static inline void *kasan_mem_to_shadow(const void *addr)
48{
49 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
50 + KASAN_SHADOW_OFFSET;
51}
52
53int kasan_add_zero_shadow(void *start, unsigned long size);
54void kasan_remove_zero_shadow(void *start, unsigned long size);
55
56/* Enable reporting bugs after kasan_disable_current() */
57extern void kasan_enable_current(void);
58
59/* Disable reporting bugs for current task */
60extern void kasan_disable_current(void);
61
62#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
63
64static inline int kasan_add_zero_shadow(void *start, unsigned long size)
65{
66 return 0;
67}
68static inline void kasan_remove_zero_shadow(void *start,
69 unsigned long size)
70{}
71
72static inline void kasan_enable_current(void) {}
73static inline void kasan_disable_current(void) {}
74
75#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
76
77#ifdef CONFIG_KASAN
78
79struct kasan_cache {
80 int alloc_meta_offset;
81 int free_meta_offset;
82};
83
84#ifdef CONFIG_KASAN_HW_TAGS
85
86DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
87
88static __always_inline bool kasan_enabled(void)
89{
90 return static_branch_likely(&kasan_flag_enabled);
91}
92
93#else /* CONFIG_KASAN_HW_TAGS */
94
95static inline bool kasan_enabled(void)
96{
97 return true;
98}
99
100#endif /* CONFIG_KASAN_HW_TAGS */
101
102slab_flags_t __kasan_never_merge(void);
103static __always_inline slab_flags_t kasan_never_merge(void)
104{
105 if (kasan_enabled())
106 return __kasan_never_merge();
107 return 0;
108}
109
110void __kasan_unpoison_range(const void *addr, size_t size);
111static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
112{
113 if (kasan_enabled())
114 __kasan_unpoison_range(addr, size);
115}
116
117void __kasan_alloc_pages(struct page *page, unsigned int order);
118static __always_inline void kasan_alloc_pages(struct page *page,
119 unsigned int order)
120{
121 if (kasan_enabled())
122 __kasan_alloc_pages(page, order);
123}
124
125void __kasan_free_pages(struct page *page, unsigned int order);
126static __always_inline void kasan_free_pages(struct page *page,
127 unsigned int order)
128{
129 if (kasan_enabled())
130 __kasan_free_pages(page, order);
131}
132
133void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
134 slab_flags_t *flags);
135static __always_inline void kasan_cache_create(struct kmem_cache *cache,
136 unsigned int *size, slab_flags_t *flags)
137{
138 if (kasan_enabled())
139 __kasan_cache_create(cache, size, flags);
140}
141
142size_t __kasan_metadata_size(struct kmem_cache *cache);
143static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
144{
145 if (kasan_enabled())
146 return __kasan_metadata_size(cache);
147 return 0;
148}
149
150void __kasan_poison_slab(struct page *page);
151static __always_inline void kasan_poison_slab(struct page *page)
152{
153 if (kasan_enabled())
154 __kasan_poison_slab(page);
155}
156
157void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
158static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
159 void *object)
160{
161 if (kasan_enabled())
162 __kasan_unpoison_object_data(cache, object);
163}
164
165void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
166static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
167 void *object)
168{
169 if (kasan_enabled())
170 __kasan_poison_object_data(cache, object);
171}
172
173void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
174 const void *object);
175static __always_inline void * __must_check kasan_init_slab_obj(
176 struct kmem_cache *cache, const void *object)
177{
178 if (kasan_enabled())
179 return __kasan_init_slab_obj(cache, object);
180 return (void *)object;
181}
182
183bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
184static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
185 unsigned long ip)
186{
187 if (kasan_enabled())
188 return __kasan_slab_free(s, object, ip);
189 return false;
190}
191
192void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
193static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
194{
195 if (kasan_enabled())
196 __kasan_slab_free_mempool(ptr, ip);
197}
198
199void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
200 void *object, gfp_t flags);
201static __always_inline void * __must_check kasan_slab_alloc(
202 struct kmem_cache *s, void *object, gfp_t flags)
203{
204 if (kasan_enabled())
205 return __kasan_slab_alloc(s, object, flags);
206 return object;
207}
208
209void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
210 size_t size, gfp_t flags);
211static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
212 const void *object, size_t size, gfp_t flags)
213{
214 if (kasan_enabled())
215 return __kasan_kmalloc(s, object, size, flags);
216 return (void *)object;
217}
218
219void * __must_check __kasan_kmalloc_large(const void *ptr,
220 size_t size, gfp_t flags);
221static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
222 size_t size, gfp_t flags)
223{
224 if (kasan_enabled())
225 return __kasan_kmalloc_large(ptr, size, flags);
226 return (void *)ptr;
227}
228
229void * __must_check __kasan_krealloc(const void *object,
230 size_t new_size, gfp_t flags);
231static __always_inline void * __must_check kasan_krealloc(const void *object,
232 size_t new_size, gfp_t flags)
233{
234 if (kasan_enabled())
235 return __kasan_krealloc(object, new_size, flags);
236 return (void *)object;
237}
238
239void __kasan_kfree_large(void *ptr, unsigned long ip);
240static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
241{
242 if (kasan_enabled())
243 __kasan_kfree_large(ptr, ip);
244}
245
246bool kasan_save_enable_multi_shot(void);
247void kasan_restore_multi_shot(bool enabled);
248
249#else /* CONFIG_KASAN */
250
251static inline bool kasan_enabled(void)
252{
253 return false;
254}
255static inline slab_flags_t kasan_never_merge(void)
256{
257 return 0;
258}
259static inline void kasan_unpoison_range(const void *address, size_t size) {}
260static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
261static inline void kasan_free_pages(struct page *page, unsigned int order) {}
262static inline void kasan_cache_create(struct kmem_cache *cache,
263 unsigned int *size,
264 slab_flags_t *flags) {}
265static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
266static inline void kasan_poison_slab(struct page *page) {}
267static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
268 void *object) {}
269static inline void kasan_poison_object_data(struct kmem_cache *cache,
270 void *object) {}
271static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
272 const void *object)
273{
274 return (void *)object;
275}
276static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
277 unsigned long ip)
278{
279 return false;
280}
281static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
282static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
283 gfp_t flags)
284{
285 return object;
286}
287static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
288 size_t size, gfp_t flags)
289{
290 return (void *)object;
291}
292static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
293{
294 return (void *)ptr;
295}
296static inline void *kasan_krealloc(const void *object, size_t new_size,
297 gfp_t flags)
298{
299 return (void *)object;
300}
301static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
302
303#endif /* CONFIG_KASAN */
304
305#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
306void kasan_unpoison_task_stack(struct task_struct *task);
307#else
308static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
309#endif
310
311#ifdef CONFIG_KASAN_GENERIC
312
313void kasan_cache_shrink(struct kmem_cache *cache);
314void kasan_cache_shutdown(struct kmem_cache *cache);
315void kasan_record_aux_stack(void *ptr);
316
317#else /* CONFIG_KASAN_GENERIC */
318
319static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
320static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
321static inline void kasan_record_aux_stack(void *ptr) {}
322
323#endif /* CONFIG_KASAN_GENERIC */
324
325#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
326
327static inline void *kasan_reset_tag(const void *addr)
328{
329 return (void *)arch_kasan_reset_tag(addr);
330}
331
332bool kasan_report(unsigned long addr, size_t size,
333 bool is_write, unsigned long ip);
334
335#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
336
337static inline void *kasan_reset_tag(const void *addr)
338{
339 return (void *)addr;
340}
341
342#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
343
344#ifdef CONFIG_KASAN_SW_TAGS
345void __init kasan_init_sw_tags(void);
346#else
347static inline void kasan_init_sw_tags(void) { }
348#endif
349
350#ifdef CONFIG_KASAN_HW_TAGS
351void kasan_init_hw_tags_cpu(void);
352void __init kasan_init_hw_tags(void);
353#else
354static inline void kasan_init_hw_tags_cpu(void) { }
355static inline void kasan_init_hw_tags(void) { }
356#endif
357
358#ifdef CONFIG_KASAN_VMALLOC
359
360int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
361void kasan_poison_vmalloc(const void *start, unsigned long size);
362void kasan_unpoison_vmalloc(const void *start, unsigned long size);
363void kasan_release_vmalloc(unsigned long start, unsigned long end,
364 unsigned long free_region_start,
365 unsigned long free_region_end);
366
367#else /* CONFIG_KASAN_VMALLOC */
368
369static inline int kasan_populate_vmalloc(unsigned long start,
370 unsigned long size)
371{
372 return 0;
373}
374
375static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
376{ }
377static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
378{ }
379static inline void kasan_release_vmalloc(unsigned long start,
380 unsigned long end,
381 unsigned long free_region_start,
382 unsigned long free_region_end) {}
383
384#endif /* CONFIG_KASAN_VMALLOC */
385
386#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
387 !defined(CONFIG_KASAN_VMALLOC)
388
389/*
390 * These functions provide a special case to support backing module
391 * allocations with real shadow memory. With KASAN vmalloc, the special
392 * case is unnecessary, as the work is handled in the generic case.
393 */
394int kasan_module_alloc(void *addr, size_t size);
395void kasan_free_shadow(const struct vm_struct *vm);
396
397#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
398
399static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
400static inline void kasan_free_shadow(const struct vm_struct *vm) {}
401
402#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
403
404#ifdef CONFIG_KASAN_INLINE
405void kasan_non_canonical_hook(unsigned long addr);
406#else /* CONFIG_KASAN_INLINE */
407static inline void kasan_non_canonical_hook(unsigned long addr) { }
408#endif /* CONFIG_KASAN_INLINE */
409
410#endif /* LINUX_KASAN_H */