Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VMALLOC_H
3#define _LINUX_VMALLOC_H
4
5#include <linux/spinlock.h>
6#include <linux/init.h>
7#include <linux/list.h>
8#include <linux/llist.h>
9#include <asm/page.h> /* pgprot_t */
10#include <linux/rbtree.h>
11#include <linux/overflow.h>
12
13#include <asm/vmalloc.h>
14
15struct vm_area_struct; /* vma defining user mapping in mm_types.h */
16struct notifier_block; /* in notifier.h */
17
18/* bits in flags of vmalloc's vm_struct below */
19#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
20#define VM_ALLOC 0x00000002 /* vmalloc() */
21#define VM_MAP 0x00000004 /* vmap()ed pages */
22#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
23#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
24#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
25#define VM_NO_GUARD 0x00000040 /* don't add guard page */
26#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
27
28/*
29 * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
30 *
31 * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
32 * shadow memory has been mapped. It's used to handle allocation errors so that
33 * we don't try to poision shadow on free if it was never allocated.
34 *
35 * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
36 * determine which allocations need the module shadow freed.
37 */
38
39/*
40 * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
41 * vfree_atomic().
42 */
43#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
44
45/* bits [20..32] reserved for arch specific ioremap internals */
46
47/*
48 * Maximum alignment for ioremap() regions.
49 * Can be overriden by arch-specific value.
50 */
51#ifndef IOREMAP_MAX_ORDER
52#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
53#endif
54
55struct vm_struct {
56 struct vm_struct *next;
57 void *addr;
58 unsigned long size;
59 unsigned long flags;
60 struct page **pages;
61 unsigned int nr_pages;
62 phys_addr_t phys_addr;
63 const void *caller;
64};
65
66struct vmap_area {
67 unsigned long va_start;
68 unsigned long va_end;
69
70 struct rb_node rb_node; /* address sorted rbtree */
71 struct list_head list; /* address sorted list */
72
73 /*
74 * The following three variables can be packed, because
75 * a vmap_area object is always one of the three states:
76 * 1) in "free" tree (root is vmap_area_root)
77 * 2) in "busy" tree (root is free_vmap_area_root)
78 * 3) in purge list (head is vmap_purge_list)
79 */
80 union {
81 unsigned long subtree_max_size; /* in "free" tree */
82 struct vm_struct *vm; /* in "busy" tree */
83 struct llist_node purge_list; /* in purge list */
84 };
85};
86
87/*
88 * Highlevel APIs for driver use
89 */
90extern void vm_unmap_ram(const void *mem, unsigned int count);
91extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
92extern void vm_unmap_aliases(void);
93
94#ifdef CONFIG_MMU
95extern void __init vmalloc_init(void);
96extern unsigned long vmalloc_nr_pages(void);
97#else
98static inline void vmalloc_init(void)
99{
100}
101static inline unsigned long vmalloc_nr_pages(void) { return 0; }
102#endif
103
104extern void *vmalloc(unsigned long size);
105extern void *vzalloc(unsigned long size);
106extern void *vmalloc_user(unsigned long size);
107extern void *vmalloc_node(unsigned long size, int node);
108extern void *vzalloc_node(unsigned long size, int node);
109extern void *vmalloc_exec(unsigned long size);
110extern void *vmalloc_32(unsigned long size);
111extern void *vmalloc_32_user(unsigned long size);
112extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
113extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
114 unsigned long start, unsigned long end, gfp_t gfp_mask,
115 pgprot_t prot, unsigned long vm_flags, int node,
116 const void *caller);
117void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
118 int node, const void *caller);
119
120extern void vfree(const void *addr);
121extern void vfree_atomic(const void *addr);
122
123extern void *vmap(struct page **pages, unsigned int count,
124 unsigned long flags, pgprot_t prot);
125extern void vunmap(const void *addr);
126
127extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
128 unsigned long uaddr, void *kaddr,
129 unsigned long pgoff, unsigned long size);
130
131extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
132 unsigned long pgoff);
133
134/*
135 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
136 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
137 * needs to be called.
138 */
139#ifndef ARCH_PAGE_TABLE_SYNC_MASK
140#define ARCH_PAGE_TABLE_SYNC_MASK 0
141#endif
142
143/*
144 * There is no default implementation for arch_sync_kernel_mappings(). It is
145 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
146 * is 0.
147 */
148void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
149
150/*
151 * Lowlevel-APIs (not for driver use!)
152 */
153
154static inline size_t get_vm_area_size(const struct vm_struct *area)
155{
156 if (!(area->flags & VM_NO_GUARD))
157 /* return actual size without guard page */
158 return area->size - PAGE_SIZE;
159 else
160 return area->size;
161
162}
163
164extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
165extern struct vm_struct *get_vm_area_caller(unsigned long size,
166 unsigned long flags, const void *caller);
167extern struct vm_struct *__get_vm_area_caller(unsigned long size,
168 unsigned long flags,
169 unsigned long start, unsigned long end,
170 const void *caller);
171extern struct vm_struct *remove_vm_area(const void *addr);
172extern struct vm_struct *find_vm_area(const void *addr);
173
174#ifdef CONFIG_MMU
175extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
176 pgprot_t prot, struct page **pages);
177int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
178 struct page **pages);
179extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
180extern void unmap_kernel_range(unsigned long addr, unsigned long size);
181static inline void set_vm_flush_reset_perms(void *addr)
182{
183 struct vm_struct *vm = find_vm_area(addr);
184
185 if (vm)
186 vm->flags |= VM_FLUSH_RESET_PERMS;
187}
188#else
189static inline int
190map_kernel_range_noflush(unsigned long start, unsigned long size,
191 pgprot_t prot, struct page **pages)
192{
193 return size >> PAGE_SHIFT;
194}
195#define map_kernel_range map_kernel_range_noflush
196static inline void
197unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
198{
199}
200#define unmap_kernel_range unmap_kernel_range_noflush
201static inline void set_vm_flush_reset_perms(void *addr)
202{
203}
204#endif
205
206/* Allocate/destroy a 'vmalloc' VM area. */
207extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
208extern void free_vm_area(struct vm_struct *area);
209
210/* for /dev/kmem */
211extern long vread(char *buf, char *addr, unsigned long count);
212extern long vwrite(char *buf, char *addr, unsigned long count);
213
214/*
215 * Internals. Dont't use..
216 */
217extern struct list_head vmap_area_list;
218extern __init void vm_area_add_early(struct vm_struct *vm);
219extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
220
221#ifdef CONFIG_SMP
222# ifdef CONFIG_MMU
223struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
224 const size_t *sizes, int nr_vms,
225 size_t align);
226
227void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
228# else
229static inline struct vm_struct **
230pcpu_get_vm_areas(const unsigned long *offsets,
231 const size_t *sizes, int nr_vms,
232 size_t align)
233{
234 return NULL;
235}
236
237static inline void
238pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
239{
240}
241# endif
242#endif
243
244#ifdef CONFIG_MMU
245#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
246#else
247#define VMALLOC_TOTAL 0UL
248#endif
249
250int register_vmap_purge_notifier(struct notifier_block *nb);
251int unregister_vmap_purge_notifier(struct notifier_block *nb);
252
253#endif /* _LINUX_VMALLOC_H */