at v2.6.22 3.6 kB view raw
1#ifndef _LINUX_HIGHMEM_H 2#define _LINUX_HIGHMEM_H 3 4#include <linux/fs.h> 5#include <linux/mm.h> 6#include <linux/uaccess.h> 7 8#include <asm/cacheflush.h> 9 10#ifndef ARCH_HAS_FLUSH_ANON_PAGE 11static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 12{ 13} 14#endif 15 16#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 17static inline void flush_kernel_dcache_page(struct page *page) 18{ 19} 20#endif 21 22#ifdef CONFIG_HIGHMEM 23 24#include <asm/highmem.h> 25 26/* declarations for linux/mm/highmem.c */ 27unsigned int nr_free_highpages(void); 28extern unsigned long totalhigh_pages; 29 30void kmap_flush_unused(void); 31 32#else /* CONFIG_HIGHMEM */ 33 34static inline unsigned int nr_free_highpages(void) { return 0; } 35 36#define totalhigh_pages 0 37 38#ifndef ARCH_HAS_KMAP 39static inline void *kmap(struct page *page) 40{ 41 might_sleep(); 42 return page_address(page); 43} 44 45#define kunmap(page) do { (void) (page); } while (0) 46 47#include <asm/kmap_types.h> 48 49static inline void *kmap_atomic(struct page *page, enum km_type idx) 50{ 51 pagefault_disable(); 52 return page_address(page); 53} 54#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) 55 56#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) 57#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 58#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 59 60#define kmap_flush_unused() do {} while(0) 61#endif 62 63#endif /* CONFIG_HIGHMEM */ 64 65/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 66static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 67{ 68 void *addr = kmap_atomic(page, KM_USER0); 69 clear_user_page(addr, vaddr, page); 70 kunmap_atomic(addr, KM_USER0); 71 /* Make sure this page is cleared on other CPU's too before using it */ 72 smp_wmb(); 73} 74 75#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 76static inline struct page * 77alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) 78{ 79 struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr); 80 81 if (page) 82 clear_user_highpage(page, vaddr); 83 84 return page; 85} 86#endif 87 88static inline void clear_highpage(struct page *page) 89{ 90 void *kaddr = kmap_atomic(page, KM_USER0); 91 clear_page(kaddr); 92 kunmap_atomic(kaddr, KM_USER0); 93} 94 95/* 96 * Same but also flushes aliased cache contents to RAM. 97 * 98 * This must be a macro because KM_USER0 and friends aren't defined if 99 * !CONFIG_HIGHMEM 100 */ 101#define zero_user_page(page, offset, size, km_type) \ 102 do { \ 103 void *kaddr; \ 104 \ 105 BUG_ON((offset) + (size) > PAGE_SIZE); \ 106 \ 107 kaddr = kmap_atomic(page, km_type); \ 108 memset((char *)kaddr + (offset), 0, (size)); \ 109 flush_dcache_page(page); \ 110 kunmap_atomic(kaddr, (km_type)); \ 111 } while (0) 112 113static inline void __deprecated memclear_highpage_flush(struct page *page, 114 unsigned int offset, unsigned int size) 115{ 116 zero_user_page(page, offset, size, KM_USER0); 117} 118 119#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 120 121static inline void copy_user_highpage(struct page *to, struct page *from, 122 unsigned long vaddr, struct vm_area_struct *vma) 123{ 124 char *vfrom, *vto; 125 126 vfrom = kmap_atomic(from, KM_USER0); 127 vto = kmap_atomic(to, KM_USER1); 128 copy_user_page(vto, vfrom, vaddr, to); 129 kunmap_atomic(vfrom, KM_USER0); 130 kunmap_atomic(vto, KM_USER1); 131 /* Make sure this page is cleared on other CPU's too before using it */ 132 smp_wmb(); 133} 134 135#endif 136 137static inline void copy_highpage(struct page *to, struct page *from) 138{ 139 char *vfrom, *vto; 140 141 vfrom = kmap_atomic(from, KM_USER0); 142 vto = kmap_atomic(to, KM_USER1); 143 copy_page(vto, vfrom); 144 kunmap_atomic(vfrom, KM_USER0); 145 kunmap_atomic(vto, KM_USER1); 146} 147 148#endif /* _LINUX_HIGHMEM_H */