at v2.6.39-rc1 4.7 kB view raw
1#ifndef _LINUX_VMALLOC_H 2#define _LINUX_VMALLOC_H 3 4#include <linux/spinlock.h> 5#include <linux/init.h> 6#include <asm/page.h> /* pgprot_t */ 7 8struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 9 10/* bits in flags of vmalloc's vm_struct below */ 11#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 12#define VM_ALLOC 0x00000002 /* vmalloc() */ 13#define VM_MAP 0x00000004 /* vmap()ed pages */ 14#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 15#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ 16/* bits [20..32] reserved for arch specific ioremap internals */ 17 18/* 19 * Maximum alignment for ioremap() regions. 20 * Can be overriden by arch-specific value. 21 */ 22#ifndef IOREMAP_MAX_ORDER 23#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 24#endif 25 26struct vm_struct { 27 struct vm_struct *next; 28 void *addr; 29 unsigned long size; 30 unsigned long flags; 31 struct page **pages; 32 unsigned int nr_pages; 33 phys_addr_t phys_addr; 34 void *caller; 35}; 36 37/* 38 * Highlevel APIs for driver use 39 */ 40extern void vm_unmap_ram(const void *mem, unsigned int count); 41extern void *vm_map_ram(struct page **pages, unsigned int count, 42 int node, pgprot_t prot); 43extern void vm_unmap_aliases(void); 44 45#ifdef CONFIG_MMU 46extern void __init vmalloc_init(void); 47#else 48static inline void vmalloc_init(void) 49{ 50} 51#endif 52 53extern void *vmalloc(unsigned long size); 54extern void *vzalloc(unsigned long size); 55extern void *vmalloc_user(unsigned long size); 56extern void *vmalloc_node(unsigned long size, int node); 57extern void *vzalloc_node(unsigned long size, int node); 58extern void *vmalloc_exec(unsigned long size); 59extern void *vmalloc_32(unsigned long size); 60extern void *vmalloc_32_user(unsigned long size); 61extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 62extern void *__vmalloc_node_range(unsigned long size, unsigned long align, 63 unsigned long start, unsigned long end, gfp_t gfp_mask, 64 pgprot_t prot, int node, void *caller); 65extern void vfree(const void *addr); 66 67extern void *vmap(struct page **pages, unsigned int count, 68 unsigned long flags, pgprot_t prot); 69extern void vunmap(const void *addr); 70 71extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 72 unsigned long pgoff); 73void vmalloc_sync_all(void); 74 75/* 76 * Lowlevel-APIs (not for driver use!) 77 */ 78 79static inline size_t get_vm_area_size(const struct vm_struct *area) 80{ 81 /* return actual size without guard page */ 82 return area->size - PAGE_SIZE; 83} 84 85extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 86extern struct vm_struct *get_vm_area_caller(unsigned long size, 87 unsigned long flags, void *caller); 88extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 89 unsigned long start, unsigned long end); 90extern struct vm_struct *__get_vm_area_caller(unsigned long size, 91 unsigned long flags, 92 unsigned long start, unsigned long end, 93 void *caller); 94extern struct vm_struct *remove_vm_area(const void *addr); 95 96extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 97 struct page ***pages); 98#ifdef CONFIG_MMU 99extern int map_kernel_range_noflush(unsigned long start, unsigned long size, 100 pgprot_t prot, struct page **pages); 101extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); 102extern void unmap_kernel_range(unsigned long addr, unsigned long size); 103#else 104static inline int 105map_kernel_range_noflush(unsigned long start, unsigned long size, 106 pgprot_t prot, struct page **pages) 107{ 108 return size >> PAGE_SHIFT; 109} 110static inline void 111unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 112{ 113} 114static inline void 115unmap_kernel_range(unsigned long addr, unsigned long size) 116{ 117} 118#endif 119 120/* Allocate/destroy a 'vmalloc' VM area. */ 121extern struct vm_struct *alloc_vm_area(size_t size); 122extern void free_vm_area(struct vm_struct *area); 123 124/* for /dev/kmem */ 125extern long vread(char *buf, char *addr, unsigned long count); 126extern long vwrite(char *buf, char *addr, unsigned long count); 127 128/* 129 * Internals. Dont't use.. 130 */ 131extern rwlock_t vmlist_lock; 132extern struct vm_struct *vmlist; 133extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 134 135#ifdef CONFIG_SMP 136# ifdef CONFIG_MMU 137struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 138 const size_t *sizes, int nr_vms, 139 size_t align); 140 141void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 142# else 143static inline struct vm_struct ** 144pcpu_get_vm_areas(const unsigned long *offsets, 145 const size_t *sizes, int nr_vms, 146 size_t align) 147{ 148 return NULL; 149} 150 151static inline void 152pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 153{ 154} 155# endif 156#endif 157 158#endif /* _LINUX_VMALLOC_H */