at v2.6.36 5.6 kB view raw
1#ifndef _LINUX_HIGHMEM_H 2#define _LINUX_HIGHMEM_H 3 4#include <linux/fs.h> 5#include <linux/kernel.h> 6#include <linux/mm.h> 7#include <linux/uaccess.h> 8 9#include <asm/cacheflush.h> 10 11#ifndef ARCH_HAS_FLUSH_ANON_PAGE 12static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 13{ 14} 15#endif 16 17#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 18static inline void flush_kernel_dcache_page(struct page *page) 19{ 20} 21static inline void flush_kernel_vmap_range(void *vaddr, int size) 22{ 23} 24static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 25{ 26} 27#endif 28 29#include <asm/kmap_types.h> 30 31#ifdef CONFIG_DEBUG_HIGHMEM 32 33void debug_kmap_atomic(enum km_type type); 34 35#else 36 37static inline void debug_kmap_atomic(enum km_type type) 38{ 39} 40 41#endif 42 43#ifdef CONFIG_HIGHMEM 44#include <asm/highmem.h> 45 46/* declarations for linux/mm/highmem.c */ 47unsigned int nr_free_highpages(void); 48extern unsigned long totalhigh_pages; 49 50void kmap_flush_unused(void); 51 52#else /* CONFIG_HIGHMEM */ 53 54static inline unsigned int nr_free_highpages(void) { return 0; } 55 56#define totalhigh_pages 0UL 57 58#ifndef ARCH_HAS_KMAP 59static inline void *kmap(struct page *page) 60{ 61 might_sleep(); 62 return page_address(page); 63} 64 65static inline void kunmap(struct page *page) 66{ 67} 68 69static inline void *kmap_atomic(struct page *page, enum km_type idx) 70{ 71 pagefault_disable(); 72 return page_address(page); 73} 74#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) 75 76static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx) 77{ 78 pagefault_enable(); 79} 80 81#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 82#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 83 84#define kmap_flush_unused() do {} while(0) 85#endif 86 87#endif /* CONFIG_HIGHMEM */ 88 89/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */ 90/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */ 91#define kunmap_atomic(addr, idx) do { \ 92 BUILD_BUG_ON(__same_type((addr), struct page *)); \ 93 kunmap_atomic_notypecheck((addr), (idx)); \ 94 } while (0) 95 96/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 97#ifndef clear_user_highpage 98static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 99{ 100 void *addr = kmap_atomic(page, KM_USER0); 101 clear_user_page(addr, vaddr, page); 102 kunmap_atomic(addr, KM_USER0); 103} 104#endif 105 106#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 107/** 108 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags 109 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE 110 * @vma: The VMA the page is to be allocated for 111 * @vaddr: The virtual address the page will be inserted into 112 * 113 * This function will allocate a page for a VMA but the caller is expected 114 * to specify via movableflags whether the page will be movable in the 115 * future or not 116 * 117 * An architecture may override this function by defining 118 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own 119 * implementation. 120 */ 121static inline struct page * 122__alloc_zeroed_user_highpage(gfp_t movableflags, 123 struct vm_area_struct *vma, 124 unsigned long vaddr) 125{ 126 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, 127 vma, vaddr); 128 129 if (page) 130 clear_user_highpage(page, vaddr); 131 132 return page; 133} 134#endif 135 136/** 137 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move 138 * @vma: The VMA the page is to be allocated for 139 * @vaddr: The virtual address the page will be inserted into 140 * 141 * This function will allocate a page for a VMA that the caller knows will 142 * be able to migrate in the future using move_pages() or reclaimed 143 */ 144static inline struct page * 145alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, 146 unsigned long vaddr) 147{ 148 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); 149} 150 151static inline void clear_highpage(struct page *page) 152{ 153 void *kaddr = kmap_atomic(page, KM_USER0); 154 clear_page(kaddr); 155 kunmap_atomic(kaddr, KM_USER0); 156} 157 158static inline void zero_user_segments(struct page *page, 159 unsigned start1, unsigned end1, 160 unsigned start2, unsigned end2) 161{ 162 void *kaddr = kmap_atomic(page, KM_USER0); 163 164 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); 165 166 if (end1 > start1) 167 memset(kaddr + start1, 0, end1 - start1); 168 169 if (end2 > start2) 170 memset(kaddr + start2, 0, end2 - start2); 171 172 kunmap_atomic(kaddr, KM_USER0); 173 flush_dcache_page(page); 174} 175 176static inline void zero_user_segment(struct page *page, 177 unsigned start, unsigned end) 178{ 179 zero_user_segments(page, start, end, 0, 0); 180} 181 182static inline void zero_user(struct page *page, 183 unsigned start, unsigned size) 184{ 185 zero_user_segments(page, start, start + size, 0, 0); 186} 187 188static inline void __deprecated memclear_highpage_flush(struct page *page, 189 unsigned int offset, unsigned int size) 190{ 191 zero_user(page, offset, size); 192} 193 194#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 195 196static inline void copy_user_highpage(struct page *to, struct page *from, 197 unsigned long vaddr, struct vm_area_struct *vma) 198{ 199 char *vfrom, *vto; 200 201 vfrom = kmap_atomic(from, KM_USER0); 202 vto = kmap_atomic(to, KM_USER1); 203 copy_user_page(vto, vfrom, vaddr, to); 204 kunmap_atomic(vfrom, KM_USER0); 205 kunmap_atomic(vto, KM_USER1); 206} 207 208#endif 209 210static inline void copy_highpage(struct page *to, struct page *from) 211{ 212 char *vfrom, *vto; 213 214 vfrom = kmap_atomic(from, KM_USER0); 215 vto = kmap_atomic(to, KM_USER1); 216 copy_page(vto, vfrom); 217 kunmap_atomic(vfrom, KM_USER0); 218 kunmap_atomic(vto, KM_USER1); 219} 220 221#endif /* _LINUX_HIGHMEM_H */