at v6.12-rc1 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_VMALLOC_H 3#define _LINUX_VMALLOC_H 4 5#include <linux/alloc_tag.h> 6#include <linux/sched.h> 7#include <linux/spinlock.h> 8#include <linux/init.h> 9#include <linux/list.h> 10#include <linux/llist.h> 11#include <asm/page.h> /* pgprot_t */ 12#include <linux/rbtree.h> 13#include <linux/overflow.h> 14 15#include <asm/vmalloc.h> 16 17struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 18struct notifier_block; /* in notifier.h */ 19struct iov_iter; /* in uio.h */ 20 21/* bits in flags of vmalloc's vm_struct below */ 22#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 23#define VM_ALLOC 0x00000002 /* vmalloc() */ 24#define VM_MAP 0x00000004 /* vmap()ed pages */ 25#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 26#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ 27#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ 28#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */ 29#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ 30#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ 31#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ 32#define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */ 33 34#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 35 !defined(CONFIG_KASAN_VMALLOC) 36#define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */ 37#else 38#define VM_DEFER_KMEMLEAK 0 39#endif 40#define VM_SPARSE 0x00001000 /* sparse vm_area. not all pages are present. */ 41 42/* bits [20..32] reserved for arch specific ioremap internals */ 43 44/* 45 * Maximum alignment for ioremap() regions. 46 * Can be overridden by arch-specific value. 47 */ 48#ifndef IOREMAP_MAX_ORDER 49#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 50#endif 51 52struct vm_struct { 53 struct vm_struct *next; 54 void *addr; 55 unsigned long size; 56 unsigned long flags; 57 struct page **pages; 58#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 59 unsigned int page_order; 60#endif 61 unsigned int nr_pages; 62 phys_addr_t phys_addr; 63 const void *caller; 64}; 65 66struct vmap_area { 67 unsigned long va_start; 68 unsigned long va_end; 69 70 struct rb_node rb_node; /* address sorted rbtree */ 71 struct list_head list; /* address sorted list */ 72 73 /* 74 * The following two variables can be packed, because 75 * a vmap_area object can be either: 76 * 1) in "free" tree (root is free_vmap_area_root) 77 * 2) or "busy" tree (root is vmap_area_root) 78 */ 79 union { 80 unsigned long subtree_max_size; /* in "free" tree */ 81 struct vm_struct *vm; /* in "busy" tree */ 82 }; 83 unsigned long flags; /* mark type of vm_map_ram area */ 84}; 85 86/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ 87#ifndef arch_vmap_p4d_supported 88static inline bool arch_vmap_p4d_supported(pgprot_t prot) 89{ 90 return false; 91} 92#endif 93 94#ifndef arch_vmap_pud_supported 95static inline bool arch_vmap_pud_supported(pgprot_t prot) 96{ 97 return false; 98} 99#endif 100 101#ifndef arch_vmap_pmd_supported 102static inline bool arch_vmap_pmd_supported(pgprot_t prot) 103{ 104 return false; 105} 106#endif 107 108#ifndef arch_vmap_pte_range_map_size 109static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, 110 u64 pfn, unsigned int max_page_shift) 111{ 112 return PAGE_SIZE; 113} 114#endif 115 116#ifndef arch_vmap_pte_supported_shift 117static inline int arch_vmap_pte_supported_shift(unsigned long size) 118{ 119 return PAGE_SHIFT; 120} 121#endif 122 123#ifndef arch_vmap_pgprot_tagged 124static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) 125{ 126 return prot; 127} 128#endif 129 130/* 131 * Highlevel APIs for driver use 132 */ 133extern void vm_unmap_ram(const void *mem, unsigned int count); 134extern void *vm_map_ram(struct page **pages, unsigned int count, int node); 135extern void vm_unmap_aliases(void); 136 137#ifdef CONFIG_MMU 138extern unsigned long vmalloc_nr_pages(void); 139#else 140static inline unsigned long vmalloc_nr_pages(void) { return 0; } 141#endif 142 143extern void *vmalloc_noprof(unsigned long size) __alloc_size(1); 144#define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__)) 145 146extern void *vzalloc_noprof(unsigned long size) __alloc_size(1); 147#define vzalloc(...) alloc_hooks(vzalloc_noprof(__VA_ARGS__)) 148 149extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1); 150#define vmalloc_user(...) alloc_hooks(vmalloc_user_noprof(__VA_ARGS__)) 151 152extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1); 153#define vmalloc_node(...) alloc_hooks(vmalloc_node_noprof(__VA_ARGS__)) 154 155extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1); 156#define vzalloc_node(...) alloc_hooks(vzalloc_node_noprof(__VA_ARGS__)) 157 158extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1); 159#define vmalloc_32(...) alloc_hooks(vmalloc_32_noprof(__VA_ARGS__)) 160 161extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1); 162#define vmalloc_32_user(...) alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__)) 163 164extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); 165#define __vmalloc(...) alloc_hooks(__vmalloc_noprof(__VA_ARGS__)) 166 167extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, 168 unsigned long start, unsigned long end, gfp_t gfp_mask, 169 pgprot_t prot, unsigned long vm_flags, int node, 170 const void *caller) __alloc_size(1); 171#define __vmalloc_node_range(...) alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__)) 172 173void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, 174 int node, const void *caller) __alloc_size(1); 175#define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__)) 176 177void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); 178#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__)) 179 180extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); 181#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__)) 182 183extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2); 184#define vmalloc_array(...) alloc_hooks(vmalloc_array_noprof(__VA_ARGS__)) 185 186extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); 187#define __vcalloc(...) alloc_hooks(__vcalloc_noprof(__VA_ARGS__)) 188 189extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2); 190#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__)) 191 192void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags) 193 __realloc_size(2); 194#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__)) 195 196extern void vfree(const void *addr); 197extern void vfree_atomic(const void *addr); 198 199extern void *vmap(struct page **pages, unsigned int count, 200 unsigned long flags, pgprot_t prot); 201void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); 202extern void vunmap(const void *addr); 203 204extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, 205 unsigned long uaddr, void *kaddr, 206 unsigned long pgoff, unsigned long size); 207 208extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 209 unsigned long pgoff); 210 211/* 212 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values 213 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() 214 * needs to be called. 215 */ 216#ifndef ARCH_PAGE_TABLE_SYNC_MASK 217#define ARCH_PAGE_TABLE_SYNC_MASK 0 218#endif 219 220/* 221 * There is no default implementation for arch_sync_kernel_mappings(). It is 222 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK 223 * is 0. 224 */ 225void arch_sync_kernel_mappings(unsigned long start, unsigned long end); 226 227/* 228 * Lowlevel-APIs (not for driver use!) 229 */ 230 231static inline size_t get_vm_area_size(const struct vm_struct *area) 232{ 233 if (!(area->flags & VM_NO_GUARD)) 234 /* return actual size without guard page */ 235 return area->size - PAGE_SIZE; 236 else 237 return area->size; 238 239} 240 241extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 242extern struct vm_struct *get_vm_area_caller(unsigned long size, 243 unsigned long flags, const void *caller); 244extern struct vm_struct *__get_vm_area_caller(unsigned long size, 245 unsigned long flags, 246 unsigned long start, unsigned long end, 247 const void *caller); 248void free_vm_area(struct vm_struct *area); 249extern struct vm_struct *remove_vm_area(const void *addr); 250extern struct vm_struct *find_vm_area(const void *addr); 251struct vmap_area *find_vmap_area(unsigned long addr); 252 253static inline bool is_vm_area_hugepages(const void *addr) 254{ 255 /* 256 * This may not 100% tell if the area is mapped with > PAGE_SIZE 257 * page table entries, if for some reason the architecture indicates 258 * larger sizes are available but decides not to use them, nothing 259 * prevents that. This only indicates the size of the physical page 260 * allocated in the vmalloc layer. 261 */ 262#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 263 return find_vm_area(addr)->page_order > 0; 264#else 265 return false; 266#endif 267} 268 269#ifdef CONFIG_MMU 270int vm_area_map_pages(struct vm_struct *area, unsigned long start, 271 unsigned long end, struct page **pages); 272void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, 273 unsigned long end); 274void vunmap_range(unsigned long addr, unsigned long end); 275static inline void set_vm_flush_reset_perms(void *addr) 276{ 277 struct vm_struct *vm = find_vm_area(addr); 278 279 if (vm) 280 vm->flags |= VM_FLUSH_RESET_PERMS; 281} 282 283#else 284static inline void set_vm_flush_reset_perms(void *addr) 285{ 286} 287#endif 288 289/* for /proc/kcore */ 290extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count); 291 292/* 293 * Internals. Don't use.. 294 */ 295extern __init void vm_area_add_early(struct vm_struct *vm); 296extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 297 298#ifdef CONFIG_SMP 299# ifdef CONFIG_MMU 300struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 301 const size_t *sizes, int nr_vms, 302 size_t align); 303 304void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 305# else 306static inline struct vm_struct ** 307pcpu_get_vm_areas(const unsigned long *offsets, 308 const size_t *sizes, int nr_vms, 309 size_t align) 310{ 311 return NULL; 312} 313 314static inline void 315pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 316{ 317} 318# endif 319#endif 320 321#ifdef CONFIG_MMU 322#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) 323#else 324#define VMALLOC_TOTAL 0UL 325#endif 326 327int register_vmap_purge_notifier(struct notifier_block *nb); 328int unregister_vmap_purge_notifier(struct notifier_block *nb); 329 330#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) 331bool vmalloc_dump_obj(void *object); 332#else 333static inline bool vmalloc_dump_obj(void *object) { return false; } 334#endif 335 336#endif /* _LINUX_VMALLOC_H */