at v6.15-rc7 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_VMALLOC_H 3#define _LINUX_VMALLOC_H 4 5#include <linux/alloc_tag.h> 6#include <linux/sched.h> 7#include <linux/spinlock.h> 8#include <linux/init.h> 9#include <linux/list.h> 10#include <linux/llist.h> 11#include <asm/page.h> /* pgprot_t */ 12#include <linux/rbtree.h> 13#include <linux/overflow.h> 14 15#include <asm/vmalloc.h> 16 17struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 18struct notifier_block; /* in notifier.h */ 19struct iov_iter; /* in uio.h */ 20 21/* bits in flags of vmalloc's vm_struct below */ 22#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 23#define VM_ALLOC 0x00000002 /* vmalloc() */ 24#define VM_MAP 0x00000004 /* vmap()ed pages */ 25#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 26#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ 27#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ 28#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */ 29#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ 30#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ 31#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ 32#define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */ 33 34#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 35 !defined(CONFIG_KASAN_VMALLOC) 36#define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */ 37#else 38#define VM_DEFER_KMEMLEAK 0 39#endif 40#define VM_SPARSE 0x00001000 /* sparse vm_area. not all pages are present. */ 41 42/* bits [20..32] reserved for arch specific ioremap internals */ 43 44/* 45 * Maximum alignment for ioremap() regions. 46 * Can be overridden by arch-specific value. 47 */ 48#ifndef IOREMAP_MAX_ORDER 49#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 50#endif 51 52struct vm_struct { 53 struct vm_struct *next; 54 void *addr; 55 unsigned long size; 56 unsigned long flags; 57 struct page **pages; 58#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 59 unsigned int page_order; 60#endif 61 unsigned int nr_pages; 62 phys_addr_t phys_addr; 63 const void *caller; 64 unsigned long requested_size; 65}; 66 67struct vmap_area { 68 unsigned long va_start; 69 unsigned long va_end; 70 71 struct rb_node rb_node; /* address sorted rbtree */ 72 struct list_head list; /* address sorted list */ 73 74 /* 75 * The following two variables can be packed, because 76 * a vmap_area object can be either: 77 * 1) in "free" tree (root is free_vmap_area_root) 78 * 2) or "busy" tree (root is vmap_area_root) 79 */ 80 union { 81 unsigned long subtree_max_size; /* in "free" tree */ 82 struct vm_struct *vm; /* in "busy" tree */ 83 }; 84 unsigned long flags; /* mark type of vm_map_ram area */ 85}; 86 87/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ 88#ifndef arch_vmap_p4d_supported 89static inline bool arch_vmap_p4d_supported(pgprot_t prot) 90{ 91 return false; 92} 93#endif 94 95#ifndef arch_vmap_pud_supported 96static inline bool arch_vmap_pud_supported(pgprot_t prot) 97{ 98 return false; 99} 100#endif 101 102#ifndef arch_vmap_pmd_supported 103static inline bool arch_vmap_pmd_supported(pgprot_t prot) 104{ 105 return false; 106} 107#endif 108 109#ifndef arch_vmap_pte_range_map_size 110static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, 111 u64 pfn, unsigned int max_page_shift) 112{ 113 return PAGE_SIZE; 114} 115#endif 116 117#ifndef arch_vmap_pte_supported_shift 118static inline int arch_vmap_pte_supported_shift(unsigned long size) 119{ 120 return PAGE_SHIFT; 121} 122#endif 123 124#ifndef arch_vmap_pgprot_tagged 125static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) 126{ 127 return prot; 128} 129#endif 130 131/* 132 * Highlevel APIs for driver use 133 */ 134extern void vm_unmap_ram(const void *mem, unsigned int count); 135extern void *vm_map_ram(struct page **pages, unsigned int count, int node); 136extern void vm_unmap_aliases(void); 137 138extern void *vmalloc_noprof(unsigned long size) __alloc_size(1); 139#define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__)) 140 141extern void *vzalloc_noprof(unsigned long size) __alloc_size(1); 142#define vzalloc(...) alloc_hooks(vzalloc_noprof(__VA_ARGS__)) 143 144extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1); 145#define vmalloc_user(...) alloc_hooks(vmalloc_user_noprof(__VA_ARGS__)) 146 147extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1); 148#define vmalloc_node(...) alloc_hooks(vmalloc_node_noprof(__VA_ARGS__)) 149 150extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1); 151#define vzalloc_node(...) alloc_hooks(vzalloc_node_noprof(__VA_ARGS__)) 152 153extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1); 154#define vmalloc_32(...) alloc_hooks(vmalloc_32_noprof(__VA_ARGS__)) 155 156extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1); 157#define vmalloc_32_user(...) alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__)) 158 159extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); 160#define __vmalloc(...) alloc_hooks(__vmalloc_noprof(__VA_ARGS__)) 161 162extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, 163 unsigned long start, unsigned long end, gfp_t gfp_mask, 164 pgprot_t prot, unsigned long vm_flags, int node, 165 const void *caller) __alloc_size(1); 166#define __vmalloc_node_range(...) alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__)) 167 168void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, 169 int node, const void *caller) __alloc_size(1); 170#define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__)) 171 172void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); 173#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__)) 174 175extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); 176#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__)) 177 178extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2); 179#define vmalloc_array(...) alloc_hooks(vmalloc_array_noprof(__VA_ARGS__)) 180 181extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); 182#define __vcalloc(...) alloc_hooks(__vcalloc_noprof(__VA_ARGS__)) 183 184extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2); 185#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__)) 186 187void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags) 188 __realloc_size(2); 189#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__)) 190 191extern void vfree(const void *addr); 192extern void vfree_atomic(const void *addr); 193 194extern void *vmap(struct page **pages, unsigned int count, 195 unsigned long flags, pgprot_t prot); 196void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); 197extern void vunmap(const void *addr); 198 199extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, 200 unsigned long uaddr, void *kaddr, 201 unsigned long pgoff, unsigned long size); 202 203extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 204 unsigned long pgoff); 205 206int vmap_pages_range(unsigned long addr, unsigned long end, pgprot_t prot, 207 struct page **pages, unsigned int page_shift); 208 209/* 210 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values 211 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() 212 * needs to be called. 213 */ 214#ifndef ARCH_PAGE_TABLE_SYNC_MASK 215#define ARCH_PAGE_TABLE_SYNC_MASK 0 216#endif 217 218/* 219 * There is no default implementation for arch_sync_kernel_mappings(). It is 220 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK 221 * is 0. 222 */ 223void arch_sync_kernel_mappings(unsigned long start, unsigned long end); 224 225/* 226 * Lowlevel-APIs (not for driver use!) 227 */ 228 229static inline size_t get_vm_area_size(const struct vm_struct *area) 230{ 231 if (!(area->flags & VM_NO_GUARD)) 232 /* return actual size without guard page */ 233 return area->size - PAGE_SIZE; 234 else 235 return area->size; 236 237} 238 239extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 240extern struct vm_struct *get_vm_area_caller(unsigned long size, 241 unsigned long flags, const void *caller); 242extern struct vm_struct *__get_vm_area_caller(unsigned long size, 243 unsigned long flags, 244 unsigned long start, unsigned long end, 245 const void *caller); 246void free_vm_area(struct vm_struct *area); 247extern struct vm_struct *remove_vm_area(const void *addr); 248extern struct vm_struct *find_vm_area(const void *addr); 249struct vmap_area *find_vmap_area(unsigned long addr); 250 251static inline bool is_vm_area_hugepages(const void *addr) 252{ 253 /* 254 * This may not 100% tell if the area is mapped with > PAGE_SIZE 255 * page table entries, if for some reason the architecture indicates 256 * larger sizes are available but decides not to use them, nothing 257 * prevents that. This only indicates the size of the physical page 258 * allocated in the vmalloc layer. 259 */ 260#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 261 return find_vm_area(addr)->page_order > 0; 262#else 263 return false; 264#endif 265} 266 267/* for /proc/kcore */ 268long vread_iter(struct iov_iter *iter, const char *addr, size_t count); 269 270/* 271 * Internals. Don't use.. 272 */ 273__init void vm_area_add_early(struct vm_struct *vm); 274__init void vm_area_register_early(struct vm_struct *vm, size_t align); 275 276int register_vmap_purge_notifier(struct notifier_block *nb); 277int unregister_vmap_purge_notifier(struct notifier_block *nb); 278 279#ifdef CONFIG_MMU 280#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) 281 282unsigned long vmalloc_nr_pages(void); 283 284int vm_area_map_pages(struct vm_struct *area, unsigned long start, 285 unsigned long end, struct page **pages); 286void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, 287 unsigned long end); 288void vunmap_range(unsigned long addr, unsigned long end); 289 290static inline void set_vm_flush_reset_perms(void *addr) 291{ 292 struct vm_struct *vm = find_vm_area(addr); 293 294 if (vm) 295 vm->flags |= VM_FLUSH_RESET_PERMS; 296} 297#else /* !CONFIG_MMU */ 298#define VMALLOC_TOTAL 0UL 299 300static inline unsigned long vmalloc_nr_pages(void) { return 0; } 301static inline void set_vm_flush_reset_perms(void *addr) {} 302#endif /* CONFIG_MMU */ 303 304#if defined(CONFIG_MMU) && defined(CONFIG_SMP) 305struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 306 const size_t *sizes, int nr_vms, 307 size_t align); 308 309void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 310# else 311static inline struct vm_struct ** 312pcpu_get_vm_areas(const unsigned long *offsets, 313 const size_t *sizes, int nr_vms, 314 size_t align) 315{ 316 return NULL; 317} 318 319static inline void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) {} 320#endif 321 322#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) 323bool vmalloc_dump_obj(void *object); 324#else 325static inline bool vmalloc_dump_obj(void *object) { return false; } 326#endif 327 328#endif /* _LINUX_VMALLOC_H */