1#ifndef _LINUX_PAGEMAP_H 2#define _LINUX_PAGEMAP_H 3 4/* 5 * Copyright 1995 Linus Torvalds 6 */ 7#include <linux/mm.h> 8#include <linux/fs.h> 9#include <linux/list.h> 10#include <linux/highmem.h> 11#include <linux/compiler.h> 12#include <asm/uaccess.h> 13#include <linux/gfp.h> 14 15/* 16 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page 17 * allocation mode flags. 18 */ 19#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ 20#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ 21 22static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 23{ 24 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; 25} 26 27/* 28 * This is non-atomic. Only to be used before the mapping is activated. 29 * Probably needs a barrier... 30 */ 31static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 32{ 33 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | 34 (__force unsigned long)mask; 35} 36 37/* 38 * The page cache can done in larger chunks than 39 * one page, because it allows for more efficient 40 * throughput (it can then be mapped into user 41 * space in smaller chunks for same flexibility). 42 * 43 * Or rather, it _will_ be done in larger chunks. 44 */ 45#define PAGE_CACHE_SHIFT PAGE_SHIFT 46#define PAGE_CACHE_SIZE PAGE_SIZE 47#define PAGE_CACHE_MASK PAGE_MASK 48#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) 49 50#define page_cache_get(page) get_page(page) 51#define page_cache_release(page) put_page(page) 52void release_pages(struct page **pages, int nr, int cold); 53 54static inline struct page *page_cache_alloc(struct address_space *x) 55{ 56 return alloc_pages(mapping_gfp_mask(x), 0); 57} 58 59static inline struct page *page_cache_alloc_cold(struct address_space *x) 60{ 61 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); 62} 63 64typedef int filler_t(void *, struct page *); 65 66extern struct page * find_get_page(struct address_space *mapping, 67 unsigned long index); 68extern struct page * find_lock_page(struct address_space *mapping, 69 unsigned long index); 70extern struct page * find_trylock_page(struct address_space *mapping, 71 unsigned long index); 72extern struct page * find_or_create_page(struct address_space *mapping, 73 unsigned long index, gfp_t gfp_mask); 74unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 75 unsigned int nr_pages, struct page **pages); 76unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 77 int tag, unsigned int nr_pages, struct page **pages); 78 79/* 80 * Returns locked page at given index in given cache, creating it if needed. 81 */ 82static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index) 83{ 84 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 85} 86 87extern struct page * grab_cache_page_nowait(struct address_space *mapping, 88 unsigned long index); 89extern struct page * read_cache_page(struct address_space *mapping, 90 unsigned long index, filler_t *filler, 91 void *data); 92extern int read_cache_pages(struct address_space *mapping, 93 struct list_head *pages, filler_t *filler, void *data); 94 95int add_to_page_cache(struct page *page, struct address_space *mapping, 96 unsigned long index, gfp_t gfp_mask); 97int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 98 unsigned long index, gfp_t gfp_mask); 99extern void remove_from_page_cache(struct page *page); 100extern void __remove_from_page_cache(struct page *page); 101 102extern atomic_t nr_pagecache; 103 104#ifdef CONFIG_SMP 105 106#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) 107DECLARE_PER_CPU(long, nr_pagecache_local); 108 109/* 110 * pagecache_acct implements approximate accounting for pagecache. 111 * vm_enough_memory() do not need high accuracy. Writers will keep 112 * an offset in their per-cpu arena and will spill that into the 113 * global count whenever the absolute value of the local count 114 * exceeds the counter's threshold. 115 * 116 * MUST be protected from preemption. 117 * current protection is mapping->page_lock. 118 */ 119static inline void pagecache_acct(int count) 120{ 121 long *local; 122 123 local = &__get_cpu_var(nr_pagecache_local); 124 *local += count; 125 if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { 126 atomic_add(*local, &nr_pagecache); 127 *local = 0; 128 } 129} 130 131#else 132 133static inline void pagecache_acct(int count) 134{ 135 atomic_add(count, &nr_pagecache); 136} 137#endif 138 139static inline unsigned long get_page_cache_size(void) 140{ 141 int ret = atomic_read(&nr_pagecache); 142 if (unlikely(ret < 0)) 143 ret = 0; 144 return ret; 145} 146 147/* 148 * Return byte-offset into filesystem object for page. 149 */ 150static inline loff_t page_offset(struct page *page) 151{ 152 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; 153} 154 155static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 156 unsigned long address) 157{ 158 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 159 pgoff += vma->vm_pgoff; 160 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); 161} 162 163extern void FASTCALL(__lock_page(struct page *page)); 164extern void FASTCALL(unlock_page(struct page *page)); 165 166static inline void lock_page(struct page *page) 167{ 168 might_sleep(); 169 if (TestSetPageLocked(page)) 170 __lock_page(page); 171} 172 173/* 174 * This is exported only for wait_on_page_locked/wait_on_page_writeback. 175 * Never use this directly! 176 */ 177extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr)); 178 179/* 180 * Wait for a page to be unlocked. 181 * 182 * This must be called with the caller "holding" the page, 183 * ie with increased "page->count" so that the page won't 184 * go away during the wait.. 185 */ 186static inline void wait_on_page_locked(struct page *page) 187{ 188 if (PageLocked(page)) 189 wait_on_page_bit(page, PG_locked); 190} 191 192/* 193 * Wait for a page to complete writeback 194 */ 195static inline void wait_on_page_writeback(struct page *page) 196{ 197 if (PageWriteback(page)) 198 wait_on_page_bit(page, PG_writeback); 199} 200 201extern void end_page_writeback(struct page *page); 202 203/* 204 * Fault a userspace page into pagetables. Return non-zero on a fault. 205 * 206 * This assumes that two userspace pages are always sufficient. That's 207 * not true if PAGE_CACHE_SIZE > PAGE_SIZE. 208 */ 209static inline int fault_in_pages_writeable(char __user *uaddr, int size) 210{ 211 int ret; 212 213 /* 214 * Writing zeroes into userspace here is OK, because we know that if 215 * the zero gets there, we'll be overwriting it. 216 */ 217 ret = __put_user(0, uaddr); 218 if (ret == 0) { 219 char __user *end = uaddr + size - 1; 220 221 /* 222 * If the page was already mapped, this will get a cache miss 223 * for sure, so try to avoid doing it. 224 */ 225 if (((unsigned long)uaddr & PAGE_MASK) != 226 ((unsigned long)end & PAGE_MASK)) 227 ret = __put_user(0, end); 228 } 229 return ret; 230} 231 232static inline void fault_in_pages_readable(const char __user *uaddr, int size) 233{ 234 volatile char c; 235 int ret; 236 237 ret = __get_user(c, uaddr); 238 if (ret == 0) { 239 const char __user *end = uaddr + size - 1; 240 241 if (((unsigned long)uaddr & PAGE_MASK) != 242 ((unsigned long)end & PAGE_MASK)) 243 __get_user(c, end); 244 } 245} 246 247#endif /* _LINUX_PAGEMAP_H */