Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
5#include <linux/bug.h>
6#include <linux/kasan-enabled.h>
7#include <linux/kernel.h>
8#include <linux/static_key.h>
9#include <linux/types.h>
10
11struct kmem_cache;
12struct page;
13struct slab;
14struct vm_struct;
15struct task_struct;
16
17#ifdef CONFIG_KASAN
18
19#include <linux/linkage.h>
20#include <asm/kasan.h>
21
22#endif
23
24typedef unsigned int __bitwise kasan_vmalloc_flags_t;
25
26#define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
27#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
28#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
29#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
30
31#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
32
33#include <linux/pgtable.h>
34
35/* Software KASAN implementations use shadow memory. */
36
37#ifdef CONFIG_KASAN_SW_TAGS
38/* This matches KASAN_TAG_INVALID. */
39#define KASAN_SHADOW_INIT 0xFE
40#else
41#define KASAN_SHADOW_INIT 0
42#endif
43
44#ifndef PTE_HWTABLE_PTRS
45#define PTE_HWTABLE_PTRS 0
46#endif
47
48extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
49extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
50extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
51extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
52extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
53
54int kasan_populate_early_shadow(const void *shadow_start,
55 const void *shadow_end);
56
57static inline void *kasan_mem_to_shadow(const void *addr)
58{
59 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
60 + KASAN_SHADOW_OFFSET;
61}
62
63int kasan_add_zero_shadow(void *start, unsigned long size);
64void kasan_remove_zero_shadow(void *start, unsigned long size);
65
66/* Enable reporting bugs after kasan_disable_current() */
67extern void kasan_enable_current(void);
68
69/* Disable reporting bugs for current task */
70extern void kasan_disable_current(void);
71
72#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
73
74static inline int kasan_add_zero_shadow(void *start, unsigned long size)
75{
76 return 0;
77}
78static inline void kasan_remove_zero_shadow(void *start,
79 unsigned long size)
80{}
81
82static inline void kasan_enable_current(void) {}
83static inline void kasan_disable_current(void) {}
84
85#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
86
87#ifdef CONFIG_KASAN_HW_TAGS
88
89#else /* CONFIG_KASAN_HW_TAGS */
90
91#endif /* CONFIG_KASAN_HW_TAGS */
92
93static inline bool kasan_has_integrated_init(void)
94{
95 return kasan_hw_tags_enabled();
96}
97
98#ifdef CONFIG_KASAN
99void __kasan_unpoison_range(const void *addr, size_t size);
100static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
101{
102 if (kasan_enabled())
103 __kasan_unpoison_range(addr, size);
104}
105
106void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
107static __always_inline void kasan_poison_pages(struct page *page,
108 unsigned int order, bool init)
109{
110 if (kasan_enabled())
111 __kasan_poison_pages(page, order, init);
112}
113
114bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
115static __always_inline bool kasan_unpoison_pages(struct page *page,
116 unsigned int order, bool init)
117{
118 if (kasan_enabled())
119 return __kasan_unpoison_pages(page, order, init);
120 return false;
121}
122
123void __kasan_poison_slab(struct slab *slab);
124static __always_inline void kasan_poison_slab(struct slab *slab)
125{
126 if (kasan_enabled())
127 __kasan_poison_slab(slab);
128}
129
130void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
131static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
132 void *object)
133{
134 if (kasan_enabled())
135 __kasan_unpoison_object_data(cache, object);
136}
137
138void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
139static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
140 void *object)
141{
142 if (kasan_enabled())
143 __kasan_poison_object_data(cache, object);
144}
145
146void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
147 const void *object);
148static __always_inline void * __must_check kasan_init_slab_obj(
149 struct kmem_cache *cache, const void *object)
150{
151 if (kasan_enabled())
152 return __kasan_init_slab_obj(cache, object);
153 return (void *)object;
154}
155
156bool __kasan_slab_free(struct kmem_cache *s, void *object,
157 unsigned long ip, bool init);
158static __always_inline bool kasan_slab_free(struct kmem_cache *s,
159 void *object, bool init)
160{
161 if (kasan_enabled())
162 return __kasan_slab_free(s, object, _RET_IP_, init);
163 return false;
164}
165
166void __kasan_kfree_large(void *ptr, unsigned long ip);
167static __always_inline void kasan_kfree_large(void *ptr)
168{
169 if (kasan_enabled())
170 __kasan_kfree_large(ptr, _RET_IP_);
171}
172
173void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
174static __always_inline void kasan_slab_free_mempool(void *ptr)
175{
176 if (kasan_enabled())
177 __kasan_slab_free_mempool(ptr, _RET_IP_);
178}
179
180void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
181 void *object, gfp_t flags, bool init);
182static __always_inline void * __must_check kasan_slab_alloc(
183 struct kmem_cache *s, void *object, gfp_t flags, bool init)
184{
185 if (kasan_enabled())
186 return __kasan_slab_alloc(s, object, flags, init);
187 return object;
188}
189
190void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
191 size_t size, gfp_t flags);
192static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
193 const void *object, size_t size, gfp_t flags)
194{
195 if (kasan_enabled())
196 return __kasan_kmalloc(s, object, size, flags);
197 return (void *)object;
198}
199
200void * __must_check __kasan_kmalloc_large(const void *ptr,
201 size_t size, gfp_t flags);
202static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
203 size_t size, gfp_t flags)
204{
205 if (kasan_enabled())
206 return __kasan_kmalloc_large(ptr, size, flags);
207 return (void *)ptr;
208}
209
210void * __must_check __kasan_krealloc(const void *object,
211 size_t new_size, gfp_t flags);
212static __always_inline void * __must_check kasan_krealloc(const void *object,
213 size_t new_size, gfp_t flags)
214{
215 if (kasan_enabled())
216 return __kasan_krealloc(object, new_size, flags);
217 return (void *)object;
218}
219
220/*
221 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
222 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
223 */
224bool __kasan_check_byte(const void *addr, unsigned long ip);
225static __always_inline bool kasan_check_byte(const void *addr)
226{
227 if (kasan_enabled())
228 return __kasan_check_byte(addr, _RET_IP_);
229 return true;
230}
231
232#else /* CONFIG_KASAN */
233
234static inline void kasan_unpoison_range(const void *address, size_t size) {}
235static inline void kasan_poison_pages(struct page *page, unsigned int order,
236 bool init) {}
237static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
238 bool init)
239{
240 return false;
241}
242static inline void kasan_poison_slab(struct slab *slab) {}
243static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
244 void *object) {}
245static inline void kasan_poison_object_data(struct kmem_cache *cache,
246 void *object) {}
247static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
248 const void *object)
249{
250 return (void *)object;
251}
252static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
253{
254 return false;
255}
256static inline void kasan_kfree_large(void *ptr) {}
257static inline void kasan_slab_free_mempool(void *ptr) {}
258static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
259 gfp_t flags, bool init)
260{
261 return object;
262}
263static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
264 size_t size, gfp_t flags)
265{
266 return (void *)object;
267}
268static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
269{
270 return (void *)ptr;
271}
272static inline void *kasan_krealloc(const void *object, size_t new_size,
273 gfp_t flags)
274{
275 return (void *)object;
276}
277static inline bool kasan_check_byte(const void *address)
278{
279 return true;
280}
281
282#endif /* CONFIG_KASAN */
283
284#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
285void kasan_unpoison_task_stack(struct task_struct *task);
286#else
287static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
288#endif
289
290#ifdef CONFIG_KASAN_GENERIC
291
292struct kasan_cache {
293 int alloc_meta_offset;
294 int free_meta_offset;
295};
296
297size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
298slab_flags_t kasan_never_merge(void);
299void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
300 slab_flags_t *flags);
301
302void kasan_cache_shrink(struct kmem_cache *cache);
303void kasan_cache_shutdown(struct kmem_cache *cache);
304void kasan_record_aux_stack(void *ptr);
305void kasan_record_aux_stack_noalloc(void *ptr);
306
307#else /* CONFIG_KASAN_GENERIC */
308
309/* Tag-based KASAN modes do not use per-object metadata. */
310static inline size_t kasan_metadata_size(struct kmem_cache *cache,
311 bool in_object)
312{
313 return 0;
314}
315/* And thus nothing prevents cache merging. */
316static inline slab_flags_t kasan_never_merge(void)
317{
318 return 0;
319}
320/* And no cache-related metadata initialization is required. */
321static inline void kasan_cache_create(struct kmem_cache *cache,
322 unsigned int *size,
323 slab_flags_t *flags) {}
324
325static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
326static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
327static inline void kasan_record_aux_stack(void *ptr) {}
328static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
329
330#endif /* CONFIG_KASAN_GENERIC */
331
332#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
333
334static inline void *kasan_reset_tag(const void *addr)
335{
336 return (void *)arch_kasan_reset_tag(addr);
337}
338
339/**
340 * kasan_report - print a report about a bad memory access detected by KASAN
341 * @addr: address of the bad access
342 * @size: size of the bad access
343 * @is_write: whether the bad access is a write or a read
344 * @ip: instruction pointer for the accessibility check or the bad access itself
345 */
346bool kasan_report(unsigned long addr, size_t size,
347 bool is_write, unsigned long ip);
348
349#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
350
351static inline void *kasan_reset_tag(const void *addr)
352{
353 return (void *)addr;
354}
355
356#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
357
358#ifdef CONFIG_KASAN_HW_TAGS
359
360void kasan_report_async(void);
361
362#endif /* CONFIG_KASAN_HW_TAGS */
363
364#ifdef CONFIG_KASAN_SW_TAGS
365void __init kasan_init_sw_tags(void);
366#else
367static inline void kasan_init_sw_tags(void) { }
368#endif
369
370#ifdef CONFIG_KASAN_HW_TAGS
371void kasan_init_hw_tags_cpu(void);
372void __init kasan_init_hw_tags(void);
373#else
374static inline void kasan_init_hw_tags_cpu(void) { }
375static inline void kasan_init_hw_tags(void) { }
376#endif
377
378#ifdef CONFIG_KASAN_VMALLOC
379
380#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
381
382void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
383int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
384void kasan_release_vmalloc(unsigned long start, unsigned long end,
385 unsigned long free_region_start,
386 unsigned long free_region_end);
387
388#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
389
390static inline void kasan_populate_early_vm_area_shadow(void *start,
391 unsigned long size)
392{ }
393static inline int kasan_populate_vmalloc(unsigned long start,
394 unsigned long size)
395{
396 return 0;
397}
398static inline void kasan_release_vmalloc(unsigned long start,
399 unsigned long end,
400 unsigned long free_region_start,
401 unsigned long free_region_end) { }
402
403#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
404
405void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
406 kasan_vmalloc_flags_t flags);
407static __always_inline void *kasan_unpoison_vmalloc(const void *start,
408 unsigned long size,
409 kasan_vmalloc_flags_t flags)
410{
411 if (kasan_enabled())
412 return __kasan_unpoison_vmalloc(start, size, flags);
413 return (void *)start;
414}
415
416void __kasan_poison_vmalloc(const void *start, unsigned long size);
417static __always_inline void kasan_poison_vmalloc(const void *start,
418 unsigned long size)
419{
420 if (kasan_enabled())
421 __kasan_poison_vmalloc(start, size);
422}
423
424#else /* CONFIG_KASAN_VMALLOC */
425
426static inline void kasan_populate_early_vm_area_shadow(void *start,
427 unsigned long size) { }
428static inline int kasan_populate_vmalloc(unsigned long start,
429 unsigned long size)
430{
431 return 0;
432}
433static inline void kasan_release_vmalloc(unsigned long start,
434 unsigned long end,
435 unsigned long free_region_start,
436 unsigned long free_region_end) { }
437
438static inline void *kasan_unpoison_vmalloc(const void *start,
439 unsigned long size,
440 kasan_vmalloc_flags_t flags)
441{
442 return (void *)start;
443}
444static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
445{ }
446
447#endif /* CONFIG_KASAN_VMALLOC */
448
449#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
450 !defined(CONFIG_KASAN_VMALLOC)
451
452/*
453 * These functions allocate and free shadow memory for kernel modules.
454 * They are only required when KASAN_VMALLOC is not supported, as otherwise
455 * shadow memory is allocated by the generic vmalloc handlers.
456 */
457int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
458void kasan_free_module_shadow(const struct vm_struct *vm);
459
460#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
461
462static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
463static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
464
465#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
466
467#ifdef CONFIG_KASAN_INLINE
468void kasan_non_canonical_hook(unsigned long addr);
469#else /* CONFIG_KASAN_INLINE */
470static inline void kasan_non_canonical_hook(unsigned long addr) { }
471#endif /* CONFIG_KASAN_INLINE */
472
473#endif /* LINUX_KASAN_H */