at v4.16-rc4 6.4 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_VMALLOC_H 3#define _LINUX_VMALLOC_H 4 5#include <linux/spinlock.h> 6#include <linux/init.h> 7#include <linux/list.h> 8#include <linux/llist.h> 9#include <asm/page.h> /* pgprot_t */ 10#include <linux/rbtree.h> 11 12struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 13struct notifier_block; /* in notifier.h */ 14 15/* bits in flags of vmalloc's vm_struct below */ 16#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 17#define VM_ALLOC 0x00000002 /* vmalloc() */ 18#define VM_MAP 0x00000004 /* vmap()ed pages */ 19#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 20#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ 21#define VM_NO_GUARD 0x00000040 /* don't add guard page */ 22#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ 23/* bits [20..32] reserved for arch specific ioremap internals */ 24 25/* 26 * Maximum alignment for ioremap() regions. 27 * Can be overriden by arch-specific value. 28 */ 29#ifndef IOREMAP_MAX_ORDER 30#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 31#endif 32 33struct vm_struct { 34 struct vm_struct *next; 35 void *addr; 36 unsigned long size; 37 unsigned long flags; 38 struct page **pages; 39 unsigned int nr_pages; 40 phys_addr_t phys_addr; 41 const void *caller; 42}; 43 44struct vmap_area { 45 unsigned long va_start; 46 unsigned long va_end; 47 unsigned long flags; 48 struct rb_node rb_node; /* address sorted rbtree */ 49 struct list_head list; /* address sorted list */ 50 struct llist_node purge_list; /* "lazy purge" list */ 51 struct vm_struct *vm; 52 struct rcu_head rcu_head; 53}; 54 55/* 56 * Highlevel APIs for driver use 57 */ 58extern void vm_unmap_ram(const void *mem, unsigned int count); 59extern void *vm_map_ram(struct page **pages, unsigned int count, 60 int node, pgprot_t prot); 61extern void vm_unmap_aliases(void); 62 63#ifdef CONFIG_MMU 64extern void __init vmalloc_init(void); 65#else 66static inline void vmalloc_init(void) 67{ 68} 69#endif 70 71extern void *vmalloc(unsigned long size); 72extern void *vzalloc(unsigned long size); 73extern void *vmalloc_user(unsigned long size); 74extern void *vmalloc_node(unsigned long size, int node); 75extern void *vzalloc_node(unsigned long size, int node); 76extern void *vmalloc_exec(unsigned long size); 77extern void *vmalloc_32(unsigned long size); 78extern void *vmalloc_32_user(unsigned long size); 79extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 80extern void *__vmalloc_node_range(unsigned long size, unsigned long align, 81 unsigned long start, unsigned long end, gfp_t gfp_mask, 82 pgprot_t prot, unsigned long vm_flags, int node, 83 const void *caller); 84#ifndef CONFIG_MMU 85extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags); 86static inline void *__vmalloc_node_flags_caller(unsigned long size, int node, 87 gfp_t flags, void *caller) 88{ 89 return __vmalloc_node_flags(size, node, flags); 90} 91#else 92extern void *__vmalloc_node_flags_caller(unsigned long size, 93 int node, gfp_t flags, void *caller); 94#endif 95 96extern void vfree(const void *addr); 97extern void vfree_atomic(const void *addr); 98 99extern void *vmap(struct page **pages, unsigned int count, 100 unsigned long flags, pgprot_t prot); 101extern void vunmap(const void *addr); 102 103extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, 104 unsigned long uaddr, void *kaddr, 105 unsigned long size); 106 107extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 108 unsigned long pgoff); 109void vmalloc_sync_all(void); 110 111/* 112 * Lowlevel-APIs (not for driver use!) 113 */ 114 115static inline size_t get_vm_area_size(const struct vm_struct *area) 116{ 117 if (!(area->flags & VM_NO_GUARD)) 118 /* return actual size without guard page */ 119 return area->size - PAGE_SIZE; 120 else 121 return area->size; 122 123} 124 125extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 126extern struct vm_struct *get_vm_area_caller(unsigned long size, 127 unsigned long flags, const void *caller); 128extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 129 unsigned long start, unsigned long end); 130extern struct vm_struct *__get_vm_area_caller(unsigned long size, 131 unsigned long flags, 132 unsigned long start, unsigned long end, 133 const void *caller); 134extern struct vm_struct *remove_vm_area(const void *addr); 135extern struct vm_struct *find_vm_area(const void *addr); 136 137extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 138 struct page **pages); 139#ifdef CONFIG_MMU 140extern int map_kernel_range_noflush(unsigned long start, unsigned long size, 141 pgprot_t prot, struct page **pages); 142extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); 143extern void unmap_kernel_range(unsigned long addr, unsigned long size); 144#else 145static inline int 146map_kernel_range_noflush(unsigned long start, unsigned long size, 147 pgprot_t prot, struct page **pages) 148{ 149 return size >> PAGE_SHIFT; 150} 151static inline void 152unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 153{ 154} 155static inline void 156unmap_kernel_range(unsigned long addr, unsigned long size) 157{ 158} 159#endif 160 161/* Allocate/destroy a 'vmalloc' VM area. */ 162extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); 163extern void free_vm_area(struct vm_struct *area); 164 165/* for /dev/kmem */ 166extern long vread(char *buf, char *addr, unsigned long count); 167extern long vwrite(char *buf, char *addr, unsigned long count); 168 169/* 170 * Internals. Dont't use.. 171 */ 172extern struct list_head vmap_area_list; 173extern __init void vm_area_add_early(struct vm_struct *vm); 174extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 175 176#ifdef CONFIG_SMP 177# ifdef CONFIG_MMU 178struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 179 const size_t *sizes, int nr_vms, 180 size_t align); 181 182void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 183# else 184static inline struct vm_struct ** 185pcpu_get_vm_areas(const unsigned long *offsets, 186 const size_t *sizes, int nr_vms, 187 size_t align) 188{ 189 return NULL; 190} 191 192static inline void 193pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 194{ 195} 196# endif 197#endif 198 199#ifdef CONFIG_MMU 200#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) 201#else 202#define VMALLOC_TOTAL 0UL 203#endif 204 205int register_vmap_purge_notifier(struct notifier_block *nb); 206int unregister_vmap_purge_notifier(struct notifier_block *nb); 207 208#endif /* _LINUX_VMALLOC_H */