Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VMALLOC_H
3#define _LINUX_VMALLOC_H
4
5#include <linux/alloc_tag.h>
6#include <linux/sched.h>
7#include <linux/spinlock.h>
8#include <linux/init.h>
9#include <linux/list.h>
10#include <linux/llist.h>
11#include <asm/page.h> /* pgprot_t */
12#include <linux/rbtree.h>
13#include <linux/overflow.h>
14
15#include <asm/vmalloc.h>
16
17struct vm_area_struct; /* vma defining user mapping in mm_types.h */
18struct notifier_block; /* in notifier.h */
19struct iov_iter; /* in uio.h */
20
21/* bits in flags of vmalloc's vm_struct below */
22#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
23#define VM_ALLOC 0x00000002 /* vmalloc() */
24#define VM_MAP 0x00000004 /* vmap()ed pages */
25#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
26#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
27#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
28#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */
29#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
30#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
31#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
32#define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
33
34#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
35 !defined(CONFIG_KASAN_VMALLOC)
36#define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */
37#else
38#define VM_DEFER_KMEMLEAK 0
39#endif
40#define VM_SPARSE 0x00001000 /* sparse vm_area. not all pages are present. */
41
42/* bits [20..32] reserved for arch specific ioremap internals */
43
44/*
45 * Maximum alignment for ioremap() regions.
46 * Can be overridden by arch-specific value.
47 */
48#ifndef IOREMAP_MAX_ORDER
49#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
50#endif
51
52struct vm_struct {
53 struct vm_struct *next;
54 void *addr;
55 unsigned long size;
56 unsigned long flags;
57 struct page **pages;
58#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
59 unsigned int page_order;
60#endif
61 unsigned int nr_pages;
62 phys_addr_t phys_addr;
63 const void *caller;
64};
65
66struct vmap_area {
67 unsigned long va_start;
68 unsigned long va_end;
69
70 struct rb_node rb_node; /* address sorted rbtree */
71 struct list_head list; /* address sorted list */
72
73 /*
74 * The following two variables can be packed, because
75 * a vmap_area object can be either:
76 * 1) in "free" tree (root is free_vmap_area_root)
77 * 2) or "busy" tree (root is vmap_area_root)
78 */
79 union {
80 unsigned long subtree_max_size; /* in "free" tree */
81 struct vm_struct *vm; /* in "busy" tree */
82 };
83 unsigned long flags; /* mark type of vm_map_ram area */
84};
85
86/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
87#ifndef arch_vmap_p4d_supported
88static inline bool arch_vmap_p4d_supported(pgprot_t prot)
89{
90 return false;
91}
92#endif
93
94#ifndef arch_vmap_pud_supported
95static inline bool arch_vmap_pud_supported(pgprot_t prot)
96{
97 return false;
98}
99#endif
100
101#ifndef arch_vmap_pmd_supported
102static inline bool arch_vmap_pmd_supported(pgprot_t prot)
103{
104 return false;
105}
106#endif
107
108#ifndef arch_vmap_pte_range_map_size
109static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
110 u64 pfn, unsigned int max_page_shift)
111{
112 return PAGE_SIZE;
113}
114#endif
115
116#ifndef arch_vmap_pte_supported_shift
117static inline int arch_vmap_pte_supported_shift(unsigned long size)
118{
119 return PAGE_SHIFT;
120}
121#endif
122
123#ifndef arch_vmap_pgprot_tagged
124static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
125{
126 return prot;
127}
128#endif
129
130/*
131 * Highlevel APIs for driver use
132 */
133extern void vm_unmap_ram(const void *mem, unsigned int count);
134extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
135extern void vm_unmap_aliases(void);
136
137#ifdef CONFIG_MMU
138extern unsigned long vmalloc_nr_pages(void);
139#else
140static inline unsigned long vmalloc_nr_pages(void) { return 0; }
141#endif
142
143extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
144#define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__))
145
146extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
147#define vzalloc(...) alloc_hooks(vzalloc_noprof(__VA_ARGS__))
148
149extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
150#define vmalloc_user(...) alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
151
152extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
153#define vmalloc_node(...) alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
154
155extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
156#define vzalloc_node(...) alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
157
158extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
159#define vmalloc_32(...) alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
160
161extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
162#define vmalloc_32_user(...) alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
163
164extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
165#define __vmalloc(...) alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
166
167extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
168 unsigned long start, unsigned long end, gfp_t gfp_mask,
169 pgprot_t prot, unsigned long vm_flags, int node,
170 const void *caller) __alloc_size(1);
171#define __vmalloc_node_range(...) alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
172
173void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
174 int node, const void *caller) __alloc_size(1);
175#define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
176
177void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
178#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
179
180extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
181#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
182
183extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
184#define vmalloc_array(...) alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
185
186extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
187#define __vcalloc(...) alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
188
189extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
190#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__))
191
192extern void vfree(const void *addr);
193extern void vfree_atomic(const void *addr);
194
195extern void *vmap(struct page **pages, unsigned int count,
196 unsigned long flags, pgprot_t prot);
197void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
198extern void vunmap(const void *addr);
199
200extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
201 unsigned long uaddr, void *kaddr,
202 unsigned long pgoff, unsigned long size);
203
204extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
205 unsigned long pgoff);
206
207/*
208 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
209 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
210 * needs to be called.
211 */
212#ifndef ARCH_PAGE_TABLE_SYNC_MASK
213#define ARCH_PAGE_TABLE_SYNC_MASK 0
214#endif
215
216/*
217 * There is no default implementation for arch_sync_kernel_mappings(). It is
218 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
219 * is 0.
220 */
221void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
222
223/*
224 * Lowlevel-APIs (not for driver use!)
225 */
226
227static inline size_t get_vm_area_size(const struct vm_struct *area)
228{
229 if (!(area->flags & VM_NO_GUARD))
230 /* return actual size without guard page */
231 return area->size - PAGE_SIZE;
232 else
233 return area->size;
234
235}
236
237extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
238extern struct vm_struct *get_vm_area_caller(unsigned long size,
239 unsigned long flags, const void *caller);
240extern struct vm_struct *__get_vm_area_caller(unsigned long size,
241 unsigned long flags,
242 unsigned long start, unsigned long end,
243 const void *caller);
244void free_vm_area(struct vm_struct *area);
245extern struct vm_struct *remove_vm_area(const void *addr);
246extern struct vm_struct *find_vm_area(const void *addr);
247struct vmap_area *find_vmap_area(unsigned long addr);
248
249static inline bool is_vm_area_hugepages(const void *addr)
250{
251 /*
252 * This may not 100% tell if the area is mapped with > PAGE_SIZE
253 * page table entries, if for some reason the architecture indicates
254 * larger sizes are available but decides not to use them, nothing
255 * prevents that. This only indicates the size of the physical page
256 * allocated in the vmalloc layer.
257 */
258#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
259 return find_vm_area(addr)->page_order > 0;
260#else
261 return false;
262#endif
263}
264
265#ifdef CONFIG_MMU
266int vm_area_map_pages(struct vm_struct *area, unsigned long start,
267 unsigned long end, struct page **pages);
268void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
269 unsigned long end);
270void vunmap_range(unsigned long addr, unsigned long end);
271static inline void set_vm_flush_reset_perms(void *addr)
272{
273 struct vm_struct *vm = find_vm_area(addr);
274
275 if (vm)
276 vm->flags |= VM_FLUSH_RESET_PERMS;
277}
278
279#else
280static inline void set_vm_flush_reset_perms(void *addr)
281{
282}
283#endif
284
285/* for /proc/kcore */
286extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
287
288/*
289 * Internals. Don't use..
290 */
291extern __init void vm_area_add_early(struct vm_struct *vm);
292extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
293
294#ifdef CONFIG_SMP
295# ifdef CONFIG_MMU
296struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
297 const size_t *sizes, int nr_vms,
298 size_t align);
299
300void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
301# else
302static inline struct vm_struct **
303pcpu_get_vm_areas(const unsigned long *offsets,
304 const size_t *sizes, int nr_vms,
305 size_t align)
306{
307 return NULL;
308}
309
310static inline void
311pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
312{
313}
314# endif
315#endif
316
317#ifdef CONFIG_MMU
318#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
319#else
320#define VMALLOC_TOTAL 0UL
321#endif
322
323int register_vmap_purge_notifier(struct notifier_block *nb);
324int unregister_vmap_purge_notifier(struct notifier_block *nb);
325
326#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
327bool vmalloc_dump_obj(void *object);
328#else
329static inline bool vmalloc_dump_obj(void *object) { return false; }
330#endif
331
332#endif /* _LINUX_VMALLOC_H */