at v2.6.23 3.7 kB view raw
1#ifndef _LINUX_RMAP_H 2#define _LINUX_RMAP_H 3/* 4 * Declarations for Reverse Mapping functions in mm/rmap.c 5 */ 6 7#include <linux/list.h> 8#include <linux/slab.h> 9#include <linux/mm.h> 10#include <linux/spinlock.h> 11 12/* 13 * The anon_vma heads a list of private "related" vmas, to scan if 14 * an anonymous page pointing to this anon_vma needs to be unmapped: 15 * the vmas on the list will be related by forking, or by splitting. 16 * 17 * Since vmas come and go as they are split and merged (particularly 18 * in mprotect), the mapping field of an anonymous page cannot point 19 * directly to a vma: instead it points to an anon_vma, on whose list 20 * the related vmas can be easily linked or unlinked. 21 * 22 * After unlinking the last vma on the list, we must garbage collect 23 * the anon_vma object itself: we're guaranteed no page can be 24 * pointing to this anon_vma once its vma list is empty. 25 */ 26struct anon_vma { 27 spinlock_t lock; /* Serialize access to vma list */ 28 struct list_head head; /* List of private "related" vmas */ 29}; 30 31#ifdef CONFIG_MMU 32 33extern struct kmem_cache *anon_vma_cachep; 34 35static inline struct anon_vma *anon_vma_alloc(void) 36{ 37 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 38} 39 40static inline void anon_vma_free(struct anon_vma *anon_vma) 41{ 42 kmem_cache_free(anon_vma_cachep, anon_vma); 43} 44 45static inline void anon_vma_lock(struct vm_area_struct *vma) 46{ 47 struct anon_vma *anon_vma = vma->anon_vma; 48 if (anon_vma) 49 spin_lock(&anon_vma->lock); 50} 51 52static inline void anon_vma_unlock(struct vm_area_struct *vma) 53{ 54 struct anon_vma *anon_vma = vma->anon_vma; 55 if (anon_vma) 56 spin_unlock(&anon_vma->lock); 57} 58 59/* 60 * anon_vma helper functions. 61 */ 62void anon_vma_init(void); /* create anon_vma_cachep */ 63int anon_vma_prepare(struct vm_area_struct *); 64void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); 65void anon_vma_unlink(struct vm_area_struct *); 66void anon_vma_link(struct vm_area_struct *); 67void __anon_vma_link(struct vm_area_struct *); 68 69/* 70 * rmap interfaces called when adding or removing pte of page 71 */ 72void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 73void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 74void page_add_file_rmap(struct page *); 75void page_remove_rmap(struct page *, struct vm_area_struct *); 76 77#ifdef CONFIG_DEBUG_VM 78void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); 79#else 80static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) 81{ 82 atomic_inc(&page->_mapcount); 83} 84#endif 85 86/* 87 * Called from mm/vmscan.c to handle paging out 88 */ 89int page_referenced(struct page *, int is_locked); 90int try_to_unmap(struct page *, int ignore_refs); 91 92/* 93 * Called from mm/filemap_xip.c to unmap empty zero page 94 */ 95pte_t *page_check_address(struct page *, struct mm_struct *, 96 unsigned long, spinlock_t **); 97 98/* 99 * Used by swapoff to help locate where page is expected in vma. 100 */ 101unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); 102 103/* 104 * Cleans the PTEs of shared mappings. 105 * (and since clean PTEs should also be readonly, write protects them too) 106 * 107 * returns the number of cleaned PTEs. 108 */ 109int page_mkclean(struct page *); 110 111#else /* !CONFIG_MMU */ 112 113#define anon_vma_init() do {} while (0) 114#define anon_vma_prepare(vma) (0) 115#define anon_vma_link(vma) do {} while (0) 116 117#define page_referenced(page,l) TestClearPageReferenced(page) 118#define try_to_unmap(page, refs) SWAP_FAIL 119 120static inline int page_mkclean(struct page *page) 121{ 122 return 0; 123} 124 125 126#endif /* CONFIG_MMU */ 127 128/* 129 * Return values of try_to_unmap 130 */ 131#define SWAP_SUCCESS 0 132#define SWAP_AGAIN 1 133#define SWAP_FAIL 2 134 135#endif /* _LINUX_RMAP_H */