Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
5#include <linux/bug.h>
6#include <linux/kasan-enabled.h>
7#include <linux/kasan-tags.h>
8#include <linux/kernel.h>
9#include <linux/static_key.h>
10#include <linux/types.h>
11
12struct kmem_cache;
13struct page;
14struct slab;
15struct vm_struct;
16struct task_struct;
17
18#ifdef CONFIG_KASAN
19
20#include <linux/linkage.h>
21#include <asm/kasan.h>
22
23#endif
24
25typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26
27#define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
28#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
29#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
30#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
31#define KASAN_VMALLOC_KEEP_TAG ((__force kasan_vmalloc_flags_t)0x08u)
32
33#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
34#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
35
36#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
37
38#include <linux/pgtable.h>
39
40/* Software KASAN implementations use shadow memory. */
41
42#ifdef CONFIG_KASAN_SW_TAGS
43/* This matches KASAN_TAG_INVALID. */
44#define KASAN_SHADOW_INIT 0xFE
45#else
46#define KASAN_SHADOW_INIT 0
47#endif
48
49#ifndef PTE_HWTABLE_PTRS
50#define PTE_HWTABLE_PTRS 0
51#endif
52
53extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
54extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
55extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
56extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
57extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
58
59int kasan_populate_early_shadow(const void *shadow_start,
60 const void *shadow_end);
61
62#ifndef kasan_mem_to_shadow
63static inline void *kasan_mem_to_shadow(const void *addr)
64{
65 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
66 + KASAN_SHADOW_OFFSET;
67}
68#endif
69
70int kasan_add_zero_shadow(void *start, unsigned long size);
71void kasan_remove_zero_shadow(void *start, unsigned long size);
72
73/* Enable reporting bugs after kasan_disable_current() */
74extern void kasan_enable_current(void);
75
76/* Disable reporting bugs for current task */
77extern void kasan_disable_current(void);
78
79#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
80
81static inline int kasan_add_zero_shadow(void *start, unsigned long size)
82{
83 return 0;
84}
85static inline void kasan_remove_zero_shadow(void *start,
86 unsigned long size)
87{}
88
89static inline void kasan_enable_current(void) {}
90static inline void kasan_disable_current(void) {}
91
92#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
93
94#ifdef CONFIG_KASAN_HW_TAGS
95
96#else /* CONFIG_KASAN_HW_TAGS */
97
98#endif /* CONFIG_KASAN_HW_TAGS */
99
100static inline bool kasan_has_integrated_init(void)
101{
102 return kasan_hw_tags_enabled();
103}
104
105#ifdef CONFIG_KASAN
106void __kasan_unpoison_range(const void *addr, size_t size);
107static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
108{
109 if (kasan_enabled())
110 __kasan_unpoison_range(addr, size);
111}
112
113void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
114static __always_inline void kasan_poison_pages(struct page *page,
115 unsigned int order, bool init)
116{
117 if (kasan_enabled())
118 __kasan_poison_pages(page, order, init);
119}
120
121bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
122static __always_inline bool kasan_unpoison_pages(struct page *page,
123 unsigned int order, bool init)
124{
125 if (kasan_enabled())
126 return __kasan_unpoison_pages(page, order, init);
127 return false;
128}
129
130void __kasan_poison_slab(struct slab *slab);
131static __always_inline void kasan_poison_slab(struct slab *slab)
132{
133 if (kasan_enabled())
134 __kasan_poison_slab(slab);
135}
136
137void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
138/**
139 * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
140 * @cache: Cache the object belong to.
141 * @object: Pointer to the object.
142 *
143 * This function is intended for the slab allocator's internal use. It
144 * temporarily unpoisons an object from a newly allocated slab without doing
145 * anything else. The object must later be repoisoned by
146 * kasan_poison_new_object().
147 */
148static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
149 void *object)
150{
151 if (kasan_enabled())
152 __kasan_unpoison_new_object(cache, object);
153}
154
155void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
156/**
157 * kasan_poison_new_object - Repoison a new slab object.
158 * @cache: Cache the object belong to.
159 * @object: Pointer to the object.
160 *
161 * This function is intended for the slab allocator's internal use. It
162 * repoisons an object that was previously unpoisoned by
163 * kasan_unpoison_new_object() without doing anything else.
164 */
165static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
166 void *object)
167{
168 if (kasan_enabled())
169 __kasan_poison_new_object(cache, object);
170}
171
172void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
173 const void *object);
174static __always_inline void * __must_check kasan_init_slab_obj(
175 struct kmem_cache *cache, const void *object)
176{
177 if (kasan_enabled())
178 return __kasan_init_slab_obj(cache, object);
179 return (void *)object;
180}
181
182bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
183 unsigned long ip);
184/**
185 * kasan_slab_pre_free - Check whether freeing a slab object is safe.
186 * @object: Object to be freed.
187 *
188 * This function checks whether freeing the given object is safe. It may
189 * check for double-free and invalid-free bugs and report them.
190 *
191 * This function is intended only for use by the slab allocator.
192 *
193 * @Return true if freeing the object is unsafe; false otherwise.
194 */
195static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
196 void *object)
197{
198 if (kasan_enabled())
199 return __kasan_slab_pre_free(s, object, _RET_IP_);
200 return false;
201}
202
203bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
204 bool still_accessible, bool no_quarantine);
205/**
206 * kasan_slab_free - Poison, initialize, and quarantine a slab object.
207 * @object: Object to be freed.
208 * @init: Whether to initialize the object.
209 * @still_accessible: Whether the object contents are still accessible.
210 *
211 * This function informs that a slab object has been freed and is not
212 * supposed to be accessed anymore, except when @still_accessible is set
213 * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
214 * grace period might not have passed yet).
215 *
216 * For KASAN modes that have integrated memory initialization
217 * (kasan_has_integrated_init() == true), this function also initializes
218 * the object's memory. For other modes, the @init argument is ignored.
219 *
220 * This function might also take ownership of the object to quarantine it.
221 * When this happens, KASAN will defer freeing the object to a later
222 * stage and handle it internally until then. The return value indicates
223 * whether KASAN took ownership of the object.
224 *
225 * This function is intended only for use by the slab allocator.
226 *
227 * @Return true if KASAN took ownership of the object; false otherwise.
228 */
229static __always_inline bool kasan_slab_free(struct kmem_cache *s,
230 void *object, bool init,
231 bool still_accessible,
232 bool no_quarantine)
233{
234 if (kasan_enabled())
235 return __kasan_slab_free(s, object, init, still_accessible,
236 no_quarantine);
237 return false;
238}
239
240void __kasan_kfree_large(void *ptr, unsigned long ip);
241static __always_inline void kasan_kfree_large(void *ptr)
242{
243 if (kasan_enabled())
244 __kasan_kfree_large(ptr, _RET_IP_);
245}
246
247void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
248 void *object, gfp_t flags, bool init);
249static __always_inline void * __must_check kasan_slab_alloc(
250 struct kmem_cache *s, void *object, gfp_t flags, bool init)
251{
252 if (kasan_enabled())
253 return __kasan_slab_alloc(s, object, flags, init);
254 return object;
255}
256
257void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
258 size_t size, gfp_t flags);
259static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
260 const void *object, size_t size, gfp_t flags)
261{
262 if (kasan_enabled())
263 return __kasan_kmalloc(s, object, size, flags);
264 return (void *)object;
265}
266
267void * __must_check __kasan_kmalloc_large(const void *ptr,
268 size_t size, gfp_t flags);
269static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
270 size_t size, gfp_t flags)
271{
272 if (kasan_enabled())
273 return __kasan_kmalloc_large(ptr, size, flags);
274 return (void *)ptr;
275}
276
277void * __must_check __kasan_krealloc(const void *object,
278 size_t new_size, gfp_t flags);
279static __always_inline void * __must_check kasan_krealloc(const void *object,
280 size_t new_size, gfp_t flags)
281{
282 if (kasan_enabled())
283 return __kasan_krealloc(object, new_size, flags);
284 return (void *)object;
285}
286
287bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
288 unsigned long ip);
289/**
290 * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
291 * @page: Pointer to the page allocation.
292 * @order: Order of the allocation.
293 *
294 * This function is intended for kernel subsystems that cache page allocations
295 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
296 *
297 * This function is similar to kasan_mempool_poison_object() but operates on
298 * page allocations.
299 *
300 * Before the poisoned allocation can be reused, it must be unpoisoned via
301 * kasan_mempool_unpoison_pages().
302 *
303 * Return: true if the allocation can be safely reused; false otherwise.
304 */
305static __always_inline bool kasan_mempool_poison_pages(struct page *page,
306 unsigned int order)
307{
308 if (kasan_enabled())
309 return __kasan_mempool_poison_pages(page, order, _RET_IP_);
310 return true;
311}
312
313void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
314 unsigned long ip);
315/**
316 * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
317 * @page: Pointer to the page allocation.
318 * @order: Order of the allocation.
319 *
320 * This function is intended for kernel subsystems that cache page allocations
321 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
322 *
323 * This function unpoisons a page allocation that was previously poisoned by
324 * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
325 * the tag-based modes, this function assigns a new tag to the allocation.
326 */
327static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
328 unsigned int order)
329{
330 if (kasan_enabled())
331 __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
332}
333
334bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
335/**
336 * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
337 * @ptr: Pointer to the slab allocation.
338 *
339 * This function is intended for kernel subsystems that cache slab allocations
340 * to reuse them instead of freeing them back to the slab allocator (e.g.
341 * mempool).
342 *
343 * This function poisons a slab allocation and saves a free stack trace for it
344 * without initializing the allocation's memory and without putting it into the
345 * quarantine (for the Generic mode).
346 *
347 * This function also performs checks to detect double-free and invalid-free
348 * bugs and reports them. The caller can use the return value of this function
349 * to find out if the allocation is buggy.
350 *
351 * Before the poisoned allocation can be reused, it must be unpoisoned via
352 * kasan_mempool_unpoison_object().
353 *
354 * This function operates on all slab allocations including large kmalloc
355 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
356 * size > KMALLOC_MAX_SIZE).
357 *
358 * Return: true if the allocation can be safely reused; false otherwise.
359 */
360static __always_inline bool kasan_mempool_poison_object(void *ptr)
361{
362 if (kasan_enabled())
363 return __kasan_mempool_poison_object(ptr, _RET_IP_);
364 return true;
365}
366
367void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
368/**
369 * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
370 * @ptr: Pointer to the slab allocation.
371 * @size: Size to be unpoisoned.
372 *
373 * This function is intended for kernel subsystems that cache slab allocations
374 * to reuse them instead of freeing them back to the slab allocator (e.g.
375 * mempool).
376 *
377 * This function unpoisons a slab allocation that was previously poisoned via
378 * kasan_mempool_poison_object() and saves an alloc stack trace for it without
379 * initializing the allocation's memory. For the tag-based modes, this function
380 * does not assign a new tag to the allocation and instead restores the
381 * original tags based on the pointer value.
382 *
383 * This function operates on all slab allocations including large kmalloc
384 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
385 * size > KMALLOC_MAX_SIZE).
386 */
387static __always_inline void kasan_mempool_unpoison_object(void *ptr,
388 size_t size)
389{
390 if (kasan_enabled())
391 __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
392}
393
394/*
395 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
396 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
397 */
398bool __kasan_check_byte(const void *addr, unsigned long ip);
399static __always_inline bool kasan_check_byte(const void *addr)
400{
401 if (kasan_enabled())
402 return __kasan_check_byte(addr, _RET_IP_);
403 return true;
404}
405
406#else /* CONFIG_KASAN */
407
408static inline void kasan_unpoison_range(const void *address, size_t size) {}
409static inline void kasan_poison_pages(struct page *page, unsigned int order,
410 bool init) {}
411static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
412 bool init)
413{
414 return false;
415}
416static inline void kasan_poison_slab(struct slab *slab) {}
417static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
418 void *object) {}
419static inline void kasan_poison_new_object(struct kmem_cache *cache,
420 void *object) {}
421static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
422 const void *object)
423{
424 return (void *)object;
425}
426
427static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
428{
429 return false;
430}
431
432static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
433 bool init, bool still_accessible,
434 bool no_quarantine)
435{
436 return false;
437}
438static inline void kasan_kfree_large(void *ptr) {}
439static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
440 gfp_t flags, bool init)
441{
442 return object;
443}
444static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
445 size_t size, gfp_t flags)
446{
447 return (void *)object;
448}
449static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
450{
451 return (void *)ptr;
452}
453static inline void *kasan_krealloc(const void *object, size_t new_size,
454 gfp_t flags)
455{
456 return (void *)object;
457}
458static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
459{
460 return true;
461}
462static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
463static inline bool kasan_mempool_poison_object(void *ptr)
464{
465 return true;
466}
467static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
468
469static inline bool kasan_check_byte(const void *address)
470{
471 return true;
472}
473
474#endif /* CONFIG_KASAN */
475
476#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
477void kasan_unpoison_task_stack(struct task_struct *task);
478asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
479#else
480static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
481static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
482#endif
483
484#ifdef CONFIG_KASAN_GENERIC
485
486struct kasan_cache {
487 int alloc_meta_offset;
488 int free_meta_offset;
489};
490
491size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
492void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
493 slab_flags_t *flags);
494
495void kasan_cache_shrink(struct kmem_cache *cache);
496void kasan_cache_shutdown(struct kmem_cache *cache);
497void kasan_record_aux_stack(void *ptr);
498
499#else /* CONFIG_KASAN_GENERIC */
500
501/* Tag-based KASAN modes do not use per-object metadata. */
502static inline size_t kasan_metadata_size(struct kmem_cache *cache,
503 bool in_object)
504{
505 return 0;
506}
507/* And no cache-related metadata initialization is required. */
508static inline void kasan_cache_create(struct kmem_cache *cache,
509 unsigned int *size,
510 slab_flags_t *flags) {}
511
512static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
513static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
514static inline void kasan_record_aux_stack(void *ptr) {}
515
516#endif /* CONFIG_KASAN_GENERIC */
517
518#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
519
520static inline void *kasan_reset_tag(const void *addr)
521{
522 return (void *)arch_kasan_reset_tag(addr);
523}
524
525/**
526 * kasan_report - print a report about a bad memory access detected by KASAN
527 * @addr: address of the bad access
528 * @size: size of the bad access
529 * @is_write: whether the bad access is a write or a read
530 * @ip: instruction pointer for the accessibility check or the bad access itself
531 */
532bool kasan_report(const void *addr, size_t size,
533 bool is_write, unsigned long ip);
534
535#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
536
537static inline void *kasan_reset_tag(const void *addr)
538{
539 return (void *)addr;
540}
541
542#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
543
544#ifdef CONFIG_KASAN_HW_TAGS
545
546void kasan_report_async(void);
547
548#endif /* CONFIG_KASAN_HW_TAGS */
549
550#ifdef CONFIG_KASAN_GENERIC
551void __init kasan_init_generic(void);
552#else
553static inline void kasan_init_generic(void) { }
554#endif
555
556#ifdef CONFIG_KASAN_SW_TAGS
557void __init kasan_init_sw_tags(void);
558#else
559static inline void kasan_init_sw_tags(void) { }
560#endif
561
562#ifdef CONFIG_KASAN_HW_TAGS
563void kasan_init_hw_tags_cpu(void);
564void __init kasan_init_hw_tags(void);
565#else
566static inline void kasan_init_hw_tags_cpu(void) { }
567static inline void kasan_init_hw_tags(void) { }
568#endif
569
570#ifdef CONFIG_KASAN_VMALLOC
571
572#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
573
574void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
575int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
576static inline int kasan_populate_vmalloc(unsigned long addr,
577 unsigned long size, gfp_t gfp_mask)
578{
579 if (kasan_enabled())
580 return __kasan_populate_vmalloc(addr, size, gfp_mask);
581 return 0;
582}
583void __kasan_release_vmalloc(unsigned long start, unsigned long end,
584 unsigned long free_region_start,
585 unsigned long free_region_end,
586 unsigned long flags);
587static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
588 unsigned long free_region_start,
589 unsigned long free_region_end,
590 unsigned long flags)
591{
592 if (kasan_enabled())
593 return __kasan_release_vmalloc(start, end, free_region_start,
594 free_region_end, flags);
595}
596
597#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
598
599static inline void kasan_populate_early_vm_area_shadow(void *start,
600 unsigned long size)
601{ }
602static inline int kasan_populate_vmalloc(unsigned long start,
603 unsigned long size, gfp_t gfp_mask)
604{
605 return 0;
606}
607static inline void kasan_release_vmalloc(unsigned long start,
608 unsigned long end,
609 unsigned long free_region_start,
610 unsigned long free_region_end,
611 unsigned long flags) { }
612
613#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
614
615void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
616 kasan_vmalloc_flags_t flags);
617static __always_inline void *kasan_unpoison_vmalloc(const void *start,
618 unsigned long size,
619 kasan_vmalloc_flags_t flags)
620{
621 if (kasan_enabled())
622 return __kasan_unpoison_vmalloc(start, size, flags);
623 return (void *)start;
624}
625
626void __kasan_poison_vmalloc(const void *start, unsigned long size);
627static __always_inline void kasan_poison_vmalloc(const void *start,
628 unsigned long size)
629{
630 if (kasan_enabled())
631 __kasan_poison_vmalloc(start, size);
632}
633
634void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
635 kasan_vmalloc_flags_t flags);
636static __always_inline void
637kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
638 kasan_vmalloc_flags_t flags)
639{
640 if (kasan_enabled())
641 __kasan_unpoison_vmap_areas(vms, nr_vms, flags);
642}
643
644#else /* CONFIG_KASAN_VMALLOC */
645
646static inline void kasan_populate_early_vm_area_shadow(void *start,
647 unsigned long size) { }
648static inline int kasan_populate_vmalloc(unsigned long start,
649 unsigned long size, gfp_t gfp_mask)
650{
651 return 0;
652}
653static inline void kasan_release_vmalloc(unsigned long start,
654 unsigned long end,
655 unsigned long free_region_start,
656 unsigned long free_region_end,
657 unsigned long flags) { }
658
659static inline void *kasan_unpoison_vmalloc(const void *start,
660 unsigned long size,
661 kasan_vmalloc_flags_t flags)
662{
663 return (void *)start;
664}
665static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
666{ }
667
668static __always_inline void
669kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
670 kasan_vmalloc_flags_t flags)
671{ }
672
673#endif /* CONFIG_KASAN_VMALLOC */
674
675#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
676 !defined(CONFIG_KASAN_VMALLOC)
677
678/*
679 * These functions allocate and free shadow memory for kernel modules.
680 * They are only required when KASAN_VMALLOC is not supported, as otherwise
681 * shadow memory is allocated by the generic vmalloc handlers.
682 */
683int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
684void kasan_free_module_shadow(const struct vm_struct *vm);
685
686#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
687
688static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
689static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
690
691#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
692
693#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
694void kasan_non_canonical_hook(unsigned long addr);
695#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
696static inline void kasan_non_canonical_hook(unsigned long addr) { }
697#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
698
699#endif /* LINUX_KASAN_H */