Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at bf2002967775cbb233876d51ff94e8daa7e77858 427 lines 13 kB view raw
1#ifndef _LINUX_PAGEMAP_H 2#define _LINUX_PAGEMAP_H 3 4/* 5 * Copyright 1995 Linus Torvalds 6 */ 7#include <linux/mm.h> 8#include <linux/fs.h> 9#include <linux/list.h> 10#include <linux/highmem.h> 11#include <linux/compiler.h> 12#include <asm/uaccess.h> 13#include <linux/gfp.h> 14#include <linux/bitops.h> 15#include <linux/hardirq.h> /* for in_interrupt() */ 16 17/* 18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page 19 * allocation mode flags. 20 */ 21#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ 22#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ 23#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ 24 25static inline void mapping_set_error(struct address_space *mapping, int error) 26{ 27 if (unlikely(error)) { 28 if (error == -ENOSPC) 29 set_bit(AS_ENOSPC, &mapping->flags); 30 else 31 set_bit(AS_EIO, &mapping->flags); 32 } 33} 34 35static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 36{ 37 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; 38} 39 40/* 41 * This is non-atomic. Only to be used before the mapping is activated. 42 * Probably needs a barrier... 43 */ 44static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 45{ 46 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | 47 (__force unsigned long)mask; 48} 49 50/* 51 * The page cache can done in larger chunks than 52 * one page, because it allows for more efficient 53 * throughput (it can then be mapped into user 54 * space in smaller chunks for same flexibility). 55 * 56 * Or rather, it _will_ be done in larger chunks. 57 */ 58#define PAGE_CACHE_SHIFT PAGE_SHIFT 59#define PAGE_CACHE_SIZE PAGE_SIZE 60#define PAGE_CACHE_MASK PAGE_MASK 61#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) 62 63#define page_cache_get(page) get_page(page) 64#define page_cache_release(page) put_page(page) 65void release_pages(struct page **pages, int nr, int cold); 66 67/* 68 * speculatively take a reference to a page. 69 * If the page is free (_count == 0), then _count is untouched, and 0 70 * is returned. Otherwise, _count is incremented by 1 and 1 is returned. 71 * 72 * This function must be called inside the same rcu_read_lock() section as has 73 * been used to lookup the page in the pagecache radix-tree (or page table): 74 * this allows allocators to use a synchronize_rcu() to stabilize _count. 75 * 76 * Unless an RCU grace period has passed, the count of all pages coming out 77 * of the allocator must be considered unstable. page_count may return higher 78 * than expected, and put_page must be able to do the right thing when the 79 * page has been finished with, no matter what it is subsequently allocated 80 * for (because put_page is what is used here to drop an invalid speculative 81 * reference). 82 * 83 * This is the interesting part of the lockless pagecache (and lockless 84 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) 85 * has the following pattern: 86 * 1. find page in radix tree 87 * 2. conditionally increment refcount 88 * 3. check the page is still in pagecache (if no, goto 1) 89 * 90 * Remove-side that cares about stability of _count (eg. reclaim) has the 91 * following (with tree_lock held for write): 92 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) 93 * B. remove page from pagecache 94 * C. free the page 95 * 96 * There are 2 critical interleavings that matter: 97 * - 2 runs before A: in this case, A sees elevated refcount and bails out 98 * - A runs before 2: in this case, 2 sees zero refcount and retries; 99 * subsequently, B will complete and 1 will find no page, causing the 100 * lookup to return NULL. 101 * 102 * It is possible that between 1 and 2, the page is removed then the exact same 103 * page is inserted into the same position in pagecache. That's OK: the 104 * old find_get_page using tree_lock could equally have run before or after 105 * such a re-insertion, depending on order that locks are granted. 106 * 107 * Lookups racing against pagecache insertion isn't a big problem: either 1 108 * will find the page or it will not. Likewise, the old find_get_page could run 109 * either before the insertion or afterwards, depending on timing. 110 */ 111static inline int page_cache_get_speculative(struct page *page) 112{ 113 VM_BUG_ON(in_interrupt()); 114 115#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) 116# ifdef CONFIG_PREEMPT 117 VM_BUG_ON(!in_atomic()); 118# endif 119 /* 120 * Preempt must be disabled here - we rely on rcu_read_lock doing 121 * this for us. 122 * 123 * Pagecache won't be truncated from interrupt context, so if we have 124 * found a page in the radix tree here, we have pinned its refcount by 125 * disabling preempt, and hence no need for the "speculative get" that 126 * SMP requires. 127 */ 128 VM_BUG_ON(page_count(page) == 0); 129 atomic_inc(&page->_count); 130 131#else 132 if (unlikely(!get_page_unless_zero(page))) { 133 /* 134 * Either the page has been freed, or will be freed. 135 * In either case, retry here and the caller should 136 * do the right thing (see comments above). 137 */ 138 return 0; 139 } 140#endif 141 VM_BUG_ON(PageTail(page)); 142 143 return 1; 144} 145 146/* 147 * Same as above, but add instead of inc (could just be merged) 148 */ 149static inline int page_cache_add_speculative(struct page *page, int count) 150{ 151 VM_BUG_ON(in_interrupt()); 152 153#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) 154# ifdef CONFIG_PREEMPT 155 VM_BUG_ON(!in_atomic()); 156# endif 157 VM_BUG_ON(page_count(page) == 0); 158 atomic_add(count, &page->_count); 159 160#else 161 if (unlikely(!atomic_add_unless(&page->_count, count, 0))) 162 return 0; 163#endif 164 VM_BUG_ON(PageCompound(page) && page != compound_head(page)); 165 166 return 1; 167} 168 169static inline int page_freeze_refs(struct page *page, int count) 170{ 171 return likely(atomic_cmpxchg(&page->_count, count, 0) == count); 172} 173 174static inline void page_unfreeze_refs(struct page *page, int count) 175{ 176 VM_BUG_ON(page_count(page) != 0); 177 VM_BUG_ON(count == 0); 178 179 atomic_set(&page->_count, count); 180} 181 182#ifdef CONFIG_NUMA 183extern struct page *__page_cache_alloc(gfp_t gfp); 184#else 185static inline struct page *__page_cache_alloc(gfp_t gfp) 186{ 187 return alloc_pages(gfp, 0); 188} 189#endif 190 191static inline struct page *page_cache_alloc(struct address_space *x) 192{ 193 return __page_cache_alloc(mapping_gfp_mask(x)); 194} 195 196static inline struct page *page_cache_alloc_cold(struct address_space *x) 197{ 198 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); 199} 200 201typedef int filler_t(void *, struct page *); 202 203extern struct page * find_get_page(struct address_space *mapping, 204 pgoff_t index); 205extern struct page * find_lock_page(struct address_space *mapping, 206 pgoff_t index); 207extern struct page * find_or_create_page(struct address_space *mapping, 208 pgoff_t index, gfp_t gfp_mask); 209unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 210 unsigned int nr_pages, struct page **pages); 211unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, 212 unsigned int nr_pages, struct page **pages); 213unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 214 int tag, unsigned int nr_pages, struct page **pages); 215 216struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index); 217 218/* 219 * Returns locked page at given index in given cache, creating it if needed. 220 */ 221static inline struct page *grab_cache_page(struct address_space *mapping, 222 pgoff_t index) 223{ 224 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 225} 226 227extern struct page * grab_cache_page_nowait(struct address_space *mapping, 228 pgoff_t index); 229extern struct page * read_cache_page_async(struct address_space *mapping, 230 pgoff_t index, filler_t *filler, 231 void *data); 232extern struct page * read_cache_page(struct address_space *mapping, 233 pgoff_t index, filler_t *filler, 234 void *data); 235extern int read_cache_pages(struct address_space *mapping, 236 struct list_head *pages, filler_t *filler, void *data); 237 238static inline struct page *read_mapping_page_async( 239 struct address_space *mapping, 240 pgoff_t index, void *data) 241{ 242 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 243 return read_cache_page_async(mapping, index, filler, data); 244} 245 246static inline struct page *read_mapping_page(struct address_space *mapping, 247 pgoff_t index, void *data) 248{ 249 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 250 return read_cache_page(mapping, index, filler, data); 251} 252 253/* 254 * Return byte-offset into filesystem object for page. 255 */ 256static inline loff_t page_offset(struct page *page) 257{ 258 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; 259} 260 261static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 262 unsigned long address) 263{ 264 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 265 pgoff += vma->vm_pgoff; 266 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); 267} 268 269extern void __lock_page(struct page *page); 270extern int __lock_page_killable(struct page *page); 271extern void __lock_page_nosync(struct page *page); 272extern void unlock_page(struct page *page); 273 274static inline void set_page_locked(struct page *page) 275{ 276 set_bit(PG_locked, &page->flags); 277} 278 279static inline void clear_page_locked(struct page *page) 280{ 281 clear_bit(PG_locked, &page->flags); 282} 283 284static inline int trylock_page(struct page *page) 285{ 286 return !test_and_set_bit(PG_locked, &page->flags); 287} 288 289/* 290 * lock_page may only be called if we have the page's inode pinned. 291 */ 292static inline void lock_page(struct page *page) 293{ 294 might_sleep(); 295 if (!trylock_page(page)) 296 __lock_page(page); 297} 298 299/* 300 * lock_page_killable is like lock_page but can be interrupted by fatal 301 * signals. It returns 0 if it locked the page and -EINTR if it was 302 * killed while waiting. 303 */ 304static inline int lock_page_killable(struct page *page) 305{ 306 might_sleep(); 307 if (!trylock_page(page)) 308 return __lock_page_killable(page); 309 return 0; 310} 311 312/* 313 * lock_page_nosync should only be used if we can't pin the page's inode. 314 * Doesn't play quite so well with block device plugging. 315 */ 316static inline void lock_page_nosync(struct page *page) 317{ 318 might_sleep(); 319 if (!trylock_page(page)) 320 __lock_page_nosync(page); 321} 322 323/* 324 * This is exported only for wait_on_page_locked/wait_on_page_writeback. 325 * Never use this directly! 326 */ 327extern void wait_on_page_bit(struct page *page, int bit_nr); 328 329/* 330 * Wait for a page to be unlocked. 331 * 332 * This must be called with the caller "holding" the page, 333 * ie with increased "page->count" so that the page won't 334 * go away during the wait.. 335 */ 336static inline void wait_on_page_locked(struct page *page) 337{ 338 if (PageLocked(page)) 339 wait_on_page_bit(page, PG_locked); 340} 341 342/* 343 * Wait for a page to complete writeback 344 */ 345static inline void wait_on_page_writeback(struct page *page) 346{ 347 if (PageWriteback(page)) 348 wait_on_page_bit(page, PG_writeback); 349} 350 351extern void end_page_writeback(struct page *page); 352 353/* 354 * Fault a userspace page into pagetables. Return non-zero on a fault. 355 * 356 * This assumes that two userspace pages are always sufficient. That's 357 * not true if PAGE_CACHE_SIZE > PAGE_SIZE. 358 */ 359static inline int fault_in_pages_writeable(char __user *uaddr, int size) 360{ 361 int ret; 362 363 if (unlikely(size == 0)) 364 return 0; 365 366 /* 367 * Writing zeroes into userspace here is OK, because we know that if 368 * the zero gets there, we'll be overwriting it. 369 */ 370 ret = __put_user(0, uaddr); 371 if (ret == 0) { 372 char __user *end = uaddr + size - 1; 373 374 /* 375 * If the page was already mapped, this will get a cache miss 376 * for sure, so try to avoid doing it. 377 */ 378 if (((unsigned long)uaddr & PAGE_MASK) != 379 ((unsigned long)end & PAGE_MASK)) 380 ret = __put_user(0, end); 381 } 382 return ret; 383} 384 385static inline int fault_in_pages_readable(const char __user *uaddr, int size) 386{ 387 volatile char c; 388 int ret; 389 390 if (unlikely(size == 0)) 391 return 0; 392 393 ret = __get_user(c, uaddr); 394 if (ret == 0) { 395 const char __user *end = uaddr + size - 1; 396 397 if (((unsigned long)uaddr & PAGE_MASK) != 398 ((unsigned long)end & PAGE_MASK)) 399 ret = __get_user(c, end); 400 } 401 return ret; 402} 403 404int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 405 pgoff_t index, gfp_t gfp_mask); 406int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 407 pgoff_t index, gfp_t gfp_mask); 408extern void remove_from_page_cache(struct page *page); 409extern void __remove_from_page_cache(struct page *page); 410 411/* 412 * Like add_to_page_cache_locked, but used to add newly allocated pages: 413 * the page is new, so we can just run set_page_locked() against it. 414 */ 415static inline int add_to_page_cache(struct page *page, 416 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) 417{ 418 int error; 419 420 set_page_locked(page); 421 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); 422 if (unlikely(error)) 423 clear_page_locked(page); 424 return error; 425} 426 427#endif /* _LINUX_PAGEMAP_H */