at v3.10-rc4 5.7 kB view raw
1#ifndef _LINUX_VMALLOC_H 2#define _LINUX_VMALLOC_H 3 4#include <linux/spinlock.h> 5#include <linux/init.h> 6#include <linux/list.h> 7#include <asm/page.h> /* pgprot_t */ 8#include <linux/rbtree.h> 9 10struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 11 12/* bits in flags of vmalloc's vm_struct below */ 13#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 14#define VM_ALLOC 0x00000002 /* vmalloc() */ 15#define VM_MAP 0x00000004 /* vmap()ed pages */ 16#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 17#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ 18#define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */ 19/* bits [20..32] reserved for arch specific ioremap internals */ 20 21/* 22 * Maximum alignment for ioremap() regions. 23 * Can be overriden by arch-specific value. 24 */ 25#ifndef IOREMAP_MAX_ORDER 26#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 27#endif 28 29struct vm_struct { 30 struct vm_struct *next; 31 void *addr; 32 unsigned long size; 33 unsigned long flags; 34 struct page **pages; 35 unsigned int nr_pages; 36 phys_addr_t phys_addr; 37 const void *caller; 38}; 39 40struct vmap_area { 41 unsigned long va_start; 42 unsigned long va_end; 43 unsigned long flags; 44 struct rb_node rb_node; /* address sorted rbtree */ 45 struct list_head list; /* address sorted list */ 46 struct list_head purge_list; /* "lazy purge" list */ 47 struct vm_struct *vm; 48 struct rcu_head rcu_head; 49}; 50 51/* 52 * Highlevel APIs for driver use 53 */ 54extern void vm_unmap_ram(const void *mem, unsigned int count); 55extern void *vm_map_ram(struct page **pages, unsigned int count, 56 int node, pgprot_t prot); 57extern void vm_unmap_aliases(void); 58 59#ifdef CONFIG_MMU 60extern void __init vmalloc_init(void); 61#else 62static inline void vmalloc_init(void) 63{ 64} 65#endif 66 67extern void *vmalloc(unsigned long size); 68extern void *vzalloc(unsigned long size); 69extern void *vmalloc_user(unsigned long size); 70extern void *vmalloc_node(unsigned long size, int node); 71extern void *vzalloc_node(unsigned long size, int node); 72extern void *vmalloc_exec(unsigned long size); 73extern void *vmalloc_32(unsigned long size); 74extern void *vmalloc_32_user(unsigned long size); 75extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 76extern void *__vmalloc_node_range(unsigned long size, unsigned long align, 77 unsigned long start, unsigned long end, gfp_t gfp_mask, 78 pgprot_t prot, int node, const void *caller); 79extern void vfree(const void *addr); 80 81extern void *vmap(struct page **pages, unsigned int count, 82 unsigned long flags, pgprot_t prot); 83extern void vunmap(const void *addr); 84 85extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 86 unsigned long pgoff); 87void vmalloc_sync_all(void); 88 89/* 90 * Lowlevel-APIs (not for driver use!) 91 */ 92 93static inline size_t get_vm_area_size(const struct vm_struct *area) 94{ 95 /* return actual size without guard page */ 96 return area->size - PAGE_SIZE; 97} 98 99extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 100extern struct vm_struct *get_vm_area_caller(unsigned long size, 101 unsigned long flags, const void *caller); 102extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 103 unsigned long start, unsigned long end); 104extern struct vm_struct *__get_vm_area_caller(unsigned long size, 105 unsigned long flags, 106 unsigned long start, unsigned long end, 107 const void *caller); 108extern struct vm_struct *remove_vm_area(const void *addr); 109extern struct vm_struct *find_vm_area(const void *addr); 110 111extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 112 struct page ***pages); 113#ifdef CONFIG_MMU 114extern int map_kernel_range_noflush(unsigned long start, unsigned long size, 115 pgprot_t prot, struct page **pages); 116extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); 117extern void unmap_kernel_range(unsigned long addr, unsigned long size); 118#else 119static inline int 120map_kernel_range_noflush(unsigned long start, unsigned long size, 121 pgprot_t prot, struct page **pages) 122{ 123 return size >> PAGE_SHIFT; 124} 125static inline void 126unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 127{ 128} 129static inline void 130unmap_kernel_range(unsigned long addr, unsigned long size) 131{ 132} 133#endif 134 135/* Allocate/destroy a 'vmalloc' VM area. */ 136extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); 137extern void free_vm_area(struct vm_struct *area); 138 139/* for /dev/kmem */ 140extern long vread(char *buf, char *addr, unsigned long count); 141extern long vwrite(char *buf, char *addr, unsigned long count); 142 143/* 144 * Internals. Dont't use.. 145 */ 146extern struct list_head vmap_area_list; 147extern __init void vm_area_add_early(struct vm_struct *vm); 148extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 149 150#ifdef CONFIG_SMP 151# ifdef CONFIG_MMU 152struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 153 const size_t *sizes, int nr_vms, 154 size_t align); 155 156void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 157# else 158static inline struct vm_struct ** 159pcpu_get_vm_areas(const unsigned long *offsets, 160 const size_t *sizes, int nr_vms, 161 size_t align) 162{ 163 return NULL; 164} 165 166static inline void 167pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 168{ 169} 170# endif 171#endif 172 173struct vmalloc_info { 174 unsigned long used; 175 unsigned long largest_chunk; 176}; 177 178#ifdef CONFIG_MMU 179#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) 180extern void get_vmalloc_info(struct vmalloc_info *vmi); 181#else 182 183#define VMALLOC_TOTAL 0UL 184#define get_vmalloc_info(vmi) \ 185do { \ 186 (vmi)->used = 0; \ 187 (vmi)->largest_chunk = 0; \ 188} while (0) 189#endif 190 191#endif /* _LINUX_VMALLOC_H */