Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * KMSAN API for subsystems.
4 *
5 * Copyright (C) 2017-2022 Google LLC
6 * Author: Alexander Potapenko <glider@google.com>
7 *
8 */
9#ifndef _LINUX_KMSAN_H
10#define _LINUX_KMSAN_H
11
12#include <linux/dma-direction.h>
13#include <linux/gfp.h>
14#include <linux/kmsan-checks.h>
15#include <linux/types.h>
16
17struct page;
18struct kmem_cache;
19struct task_struct;
20struct scatterlist;
21struct urb;
22
23#ifdef CONFIG_KMSAN
24
25/**
26 * kmsan_task_create() - Initialize KMSAN state for the task.
27 * @task: task to initialize.
28 */
29void kmsan_task_create(struct task_struct *task);
30
31/**
32 * kmsan_task_exit() - Notify KMSAN that a task has exited.
33 * @task: task about to finish.
34 */
35void kmsan_task_exit(struct task_struct *task);
36
37/**
38 * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
39 *
40 * Allocate and initialize KMSAN metadata for early allocations.
41 */
42void __init kmsan_init_shadow(void);
43
44/**
45 * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
46 */
47void __init kmsan_init_runtime(void);
48
49/**
50 * kmsan_memblock_free_pages() - handle freeing of memblock pages.
51 * @page: struct page to free.
52 * @order: order of @page.
53 *
54 * Freed pages are either returned to buddy allocator or held back to be used
55 * as metadata pages.
56 */
57bool __init __must_check kmsan_memblock_free_pages(struct page *page,
58 unsigned int order);
59
60/**
61 * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
62 * @page: struct page pointer returned by alloc_pages().
63 * @order: order of allocated struct page.
64 * @flags: GFP flags used by alloc_pages()
65 *
66 * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
67 * @flags contain __GFP_ZERO.
68 */
69void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
70
71/**
72 * kmsan_free_page() - Notify KMSAN about a free_pages() call.
73 * @page: struct page pointer passed to free_pages().
74 * @order: order of deallocated struct page.
75 *
76 * KMSAN marks freed memory as uninitialized.
77 */
78void kmsan_free_page(struct page *page, unsigned int order);
79
80/**
81 * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
82 * @dst: destination page.
83 * @src: source page.
84 *
85 * KMSAN copies the contents of metadata pages for @src into the metadata pages
86 * for @dst. If @dst has no associated metadata pages, nothing happens.
87 * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
88 */
89void kmsan_copy_page_meta(struct page *dst, struct page *src);
90
91/**
92 * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
93 * @s: slab cache the object belongs to.
94 * @object: object pointer.
95 * @flags: GFP flags passed to the allocator.
96 *
97 * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
98 * newly created object, marking it as initialized or uninitialized.
99 */
100void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
101
102/**
103 * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
104 * @s: slab cache the object belongs to.
105 * @object: object pointer.
106 *
107 * KMSAN marks the freed object as uninitialized.
108 */
109void kmsan_slab_free(struct kmem_cache *s, void *object);
110
111/**
112 * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
113 * @ptr: object pointer.
114 * @size: object size.
115 * @flags: GFP flags passed to the allocator.
116 *
117 * Similar to kmsan_slab_alloc(), but for large allocations.
118 */
119void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
120
121/**
122 * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
123 * @ptr: object pointer.
124 *
125 * Similar to kmsan_slab_free(), but for large allocations.
126 */
127void kmsan_kfree_large(const void *ptr);
128
129/**
130 * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
131 * @start: start of vmapped range.
132 * @end: end of vmapped range.
133 * @prot: page protection flags used for vmap.
134 * @pages: array of pages.
135 * @page_shift: page_shift passed to vmap_range_noflush().
136 * @gfp_mask: gfp_mask to use internally.
137 *
138 * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
139 * vmalloc metadata address range. Returns 0 on success, callers must check
140 * for non-zero return value.
141 */
142int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
143 unsigned long end,
144 pgprot_t prot,
145 struct page **pages,
146 unsigned int page_shift,
147 gfp_t gfp_mask);
148
149/**
150 * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
151 * @start: start of vunmapped range.
152 * @end: end of vunmapped range.
153 *
154 * KMSAN unmaps the contiguous metadata ranges created by
155 * kmsan_map_kernel_range_noflush().
156 */
157void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
158
159/**
160 * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
161 * @addr: range start.
162 * @end: range end.
163 * @phys_addr: physical range start.
164 * @prot: page protection flags used for ioremap_page_range().
165 * @page_shift: page_shift argument passed to vmap_range_noflush().
166 *
167 * KMSAN creates new metadata pages for the physical pages mapped into the
168 * virtual memory. Returns 0 on success, callers must check for non-zero return
169 * value.
170 */
171int __must_check kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
172 phys_addr_t phys_addr, pgprot_t prot,
173 unsigned int page_shift);
174
175/**
176 * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
177 * @start: range start.
178 * @end: range end.
179 *
180 * KMSAN unmaps the metadata pages for the given range and, unlike for
181 * vunmap_page_range(), also deallocates them.
182 */
183void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
184
185/**
186 * kmsan_handle_dma() - Handle a DMA data transfer.
187 * @phys: physical address of the buffer.
188 * @size: buffer size.
189 * @dir: one of possible dma_data_direction values.
190 *
191 * Depending on @direction, KMSAN:
192 * * checks the buffer, if it is copied to device;
193 * * initializes the buffer, if it is copied from device;
194 * * does both, if this is a DMA_BIDIRECTIONAL transfer.
195 */
196void kmsan_handle_dma(phys_addr_t phys, size_t size,
197 enum dma_data_direction dir);
198
199/**
200 * kmsan_handle_dma_sg() - Handle a DMA transfer using scatterlist.
201 * @sg: scatterlist holding DMA buffers.
202 * @nents: number of scatterlist entries.
203 * @dir: one of possible dma_data_direction values.
204 *
205 * Depending on @direction, KMSAN:
206 * * checks the buffers in the scatterlist, if they are copied to device;
207 * * initializes the buffers, if they are copied from device;
208 * * does both, if this is a DMA_BIDIRECTIONAL transfer.
209 */
210void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
211 enum dma_data_direction dir);
212
213/**
214 * kmsan_handle_urb() - Handle a USB data transfer.
215 * @urb: struct urb pointer.
216 * @is_out: data transfer direction (true means output to hardware).
217 *
218 * If @is_out is true, KMSAN checks the transfer buffer of @urb. Otherwise,
219 * KMSAN initializes the transfer buffer.
220 */
221void kmsan_handle_urb(const struct urb *urb, bool is_out);
222
223/**
224 * kmsan_unpoison_entry_regs() - Handle pt_regs in low-level entry code.
225 * @regs: struct pt_regs pointer received from assembly code.
226 *
227 * KMSAN unpoisons the contents of the passed pt_regs, preventing potential
228 * false positive reports. Unlike kmsan_unpoison_memory(),
229 * kmsan_unpoison_entry_regs() can be called from the regions where
230 * kmsan_in_runtime() returns true, which is the case in early entry code.
231 */
232void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
233
234/**
235 * kmsan_get_metadata() - Return a pointer to KMSAN shadow or origins.
236 * @addr: kernel address.
237 * @is_origin: whether to return origins or shadow.
238 *
239 * Return NULL if metadata cannot be found.
240 */
241void *kmsan_get_metadata(void *addr, bool is_origin);
242
243/**
244 * kmsan_enable_current(): Enable KMSAN for the current task.
245 *
246 * Each kmsan_enable_current() current call must be preceded by a
247 * kmsan_disable_current() call. These call pairs may be nested.
248 */
249void kmsan_enable_current(void);
250
251/**
252 * kmsan_disable_current(): Disable KMSAN for the current task.
253 *
254 * Each kmsan_disable_current() current call must be followed by a
255 * kmsan_enable_current() call. These call pairs may be nested.
256 */
257void kmsan_disable_current(void);
258
259/**
260 * memset_no_sanitize_memory(): Fill memory without KMSAN instrumentation.
261 * @s: address of kernel memory to fill.
262 * @c: constant byte to fill the memory with.
263 * @n: number of bytes to fill.
264 *
265 * This is like memset(), but without KMSAN instrumentation.
266 */
267static inline void *memset_no_sanitize_memory(void *s, int c, size_t n)
268{
269 return __memset(s, c, n);
270}
271
272extern bool kmsan_enabled;
273extern int panic_on_kmsan;
274
275/*
276 * KMSAN performs a lot of consistency checks that are currently enabled by
277 * default. BUG_ON is normally discouraged in the kernel, unless used for
278 * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
279 * recover if something goes wrong.
280 */
281#define KMSAN_WARN_ON(cond) \
282 ({ \
283 const bool __cond = WARN_ON(cond); \
284 if (unlikely(__cond)) { \
285 WRITE_ONCE(kmsan_enabled, false); \
286 if (panic_on_kmsan) { \
287 /* Can't call panic() here because */ \
288 /* of uaccess checks. */ \
289 BUG(); \
290 } \
291 } \
292 __cond; \
293 })
294
295#else
296
297static inline void kmsan_init_shadow(void)
298{
299}
300
301static inline void kmsan_init_runtime(void)
302{
303}
304
305static inline bool __must_check kmsan_memblock_free_pages(struct page *page,
306 unsigned int order)
307{
308 return true;
309}
310
311static inline void kmsan_task_create(struct task_struct *task)
312{
313}
314
315static inline void kmsan_task_exit(struct task_struct *task)
316{
317}
318
319static inline void kmsan_alloc_page(struct page *page, unsigned int order,
320 gfp_t flags)
321{
322}
323
324static inline void kmsan_free_page(struct page *page, unsigned int order)
325{
326}
327
328static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
329{
330}
331
332static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
333 gfp_t flags)
334{
335}
336
337static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
338{
339}
340
341static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
342 gfp_t flags)
343{
344}
345
346static inline void kmsan_kfree_large(const void *ptr)
347{
348}
349
350static inline int __must_check kmsan_vmap_pages_range_noflush(
351 unsigned long start, unsigned long end, pgprot_t prot,
352 struct page **pages, unsigned int page_shift, gfp_t gfp_mask)
353{
354 return 0;
355}
356
357static inline void kmsan_vunmap_range_noflush(unsigned long start,
358 unsigned long end)
359{
360}
361
362static inline int __must_check kmsan_ioremap_page_range(unsigned long start,
363 unsigned long end,
364 phys_addr_t phys_addr,
365 pgprot_t prot,
366 unsigned int page_shift)
367{
368 return 0;
369}
370
371static inline void kmsan_iounmap_page_range(unsigned long start,
372 unsigned long end)
373{
374}
375
376static inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
377 enum dma_data_direction dir)
378{
379}
380
381static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
382 enum dma_data_direction dir)
383{
384}
385
386static inline void kmsan_handle_urb(const struct urb *urb, bool is_out)
387{
388}
389
390static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
391{
392}
393
394static inline void kmsan_enable_current(void)
395{
396}
397
398static inline void kmsan_disable_current(void)
399{
400}
401
402static inline void *memset_no_sanitize_memory(void *s, int c, size_t n)
403{
404 return memset(s, c, n);
405}
406
407#define KMSAN_WARN_ON WARN_ON
408
409#endif
410
411#endif /* _LINUX_KMSAN_H */