at v2.6.17-rc2 6.4 kB view raw
1#ifndef _ASM_GENERIC_PGTABLE_H 2#define _ASM_GENERIC_PGTABLE_H 3 4#ifndef __HAVE_ARCH_PTEP_ESTABLISH 5/* 6 * Establish a new mapping: 7 * - flush the old one 8 * - update the page tables 9 * - inform the TLB about the new one 10 * 11 * We hold the mm semaphore for reading, and the pte lock. 12 * 13 * Note: the old pte is known to not be writable, so we don't need to 14 * worry about dirty bits etc getting lost. 15 */ 16#ifndef __HAVE_ARCH_SET_PTE_ATOMIC 17#define ptep_establish(__vma, __address, __ptep, __entry) \ 18do { \ 19 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ 20 flush_tlb_page(__vma, __address); \ 21} while (0) 22#else /* __HAVE_ARCH_SET_PTE_ATOMIC */ 23#define ptep_establish(__vma, __address, __ptep, __entry) \ 24do { \ 25 set_pte_atomic(__ptep, __entry); \ 26 flush_tlb_page(__vma, __address); \ 27} while (0) 28#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */ 29#endif 30 31#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 32/* 33 * Largely same as above, but only sets the access flags (dirty, 34 * accessed, and writable). Furthermore, we know it always gets set 35 * to a "more permissive" setting, which allows most architectures 36 * to optimize this. 37 */ 38#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 39do { \ 40 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ 41 flush_tlb_page(__vma, __address); \ 42} while (0) 43#endif 44 45#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 46#define ptep_test_and_clear_young(__vma, __address, __ptep) \ 47({ \ 48 pte_t __pte = *(__ptep); \ 49 int r = 1; \ 50 if (!pte_young(__pte)) \ 51 r = 0; \ 52 else \ 53 set_pte_at((__vma)->vm_mm, (__address), \ 54 (__ptep), pte_mkold(__pte)); \ 55 r; \ 56}) 57#endif 58 59#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 60#define ptep_clear_flush_young(__vma, __address, __ptep) \ 61({ \ 62 int __young; \ 63 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ 64 if (__young) \ 65 flush_tlb_page(__vma, __address); \ 66 __young; \ 67}) 68#endif 69 70#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 71#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \ 72({ \ 73 pte_t __pte = *__ptep; \ 74 int r = 1; \ 75 if (!pte_dirty(__pte)) \ 76 r = 0; \ 77 else \ 78 set_pte_at((__vma)->vm_mm, (__address), (__ptep), \ 79 pte_mkclean(__pte)); \ 80 r; \ 81}) 82#endif 83 84#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH 85#define ptep_clear_flush_dirty(__vma, __address, __ptep) \ 86({ \ 87 int __dirty; \ 88 __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ 89 if (__dirty) \ 90 flush_tlb_page(__vma, __address); \ 91 __dirty; \ 92}) 93#endif 94 95#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR 96#define ptep_get_and_clear(__mm, __address, __ptep) \ 97({ \ 98 pte_t __pte = *(__ptep); \ 99 pte_clear((__mm), (__address), (__ptep)); \ 100 __pte; \ 101}) 102#endif 103 104#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 105#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \ 106({ \ 107 pte_t __pte; \ 108 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \ 109 __pte; \ 110}) 111#endif 112 113#ifndef __HAVE_ARCH_PTE_CLEAR_FULL 114#define pte_clear_full(__mm, __address, __ptep, __full) \ 115do { \ 116 pte_clear((__mm), (__address), (__ptep)); \ 117} while (0) 118#endif 119 120#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 121#define ptep_clear_flush(__vma, __address, __ptep) \ 122({ \ 123 pte_t __pte; \ 124 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ 125 flush_tlb_page(__vma, __address); \ 126 __pte; \ 127}) 128#endif 129 130#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT 131struct mm_struct; 132static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 133{ 134 pte_t old_pte = *ptep; 135 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); 136} 137#endif 138 139#ifndef __HAVE_ARCH_PTE_SAME 140#define pte_same(A,B) (pte_val(A) == pte_val(B)) 141#endif 142 143#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY 144#define page_test_and_clear_dirty(page) (0) 145#define pte_maybe_dirty(pte) pte_dirty(pte) 146#else 147#define pte_maybe_dirty(pte) (1) 148#endif 149 150#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 151#define page_test_and_clear_young(page) (0) 152#endif 153 154#ifndef __HAVE_ARCH_PGD_OFFSET_GATE 155#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) 156#endif 157 158#ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE 159#define lazy_mmu_prot_update(pte) do { } while (0) 160#endif 161 162#ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE 163#define move_pte(pte, prot, old_addr, new_addr) (pte) 164#else 165#define move_pte(pte, prot, old_addr, new_addr) \ 166({ \ 167 pte_t newpte = (pte); \ 168 if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ 169 pte_page(pte) == ZERO_PAGE(old_addr)) \ 170 newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ 171 newpte; \ 172}) 173#endif 174 175/* 176 * When walking page tables, get the address of the next boundary, 177 * or the end address of the range if that comes earlier. Although no 178 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. 179 */ 180 181#define pgd_addr_end(addr, end) \ 182({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ 183 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 184}) 185 186#ifndef pud_addr_end 187#define pud_addr_end(addr, end) \ 188({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ 189 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 190}) 191#endif 192 193#ifndef pmd_addr_end 194#define pmd_addr_end(addr, end) \ 195({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ 196 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 197}) 198#endif 199 200#ifndef __ASSEMBLY__ 201/* 202 * When walking page tables, we usually want to skip any p?d_none entries; 203 * and any p?d_bad entries - reporting the error before resetting to none. 204 * Do the tests inline, but report and clear the bad entry in mm/memory.c. 205 */ 206void pgd_clear_bad(pgd_t *); 207void pud_clear_bad(pud_t *); 208void pmd_clear_bad(pmd_t *); 209 210static inline int pgd_none_or_clear_bad(pgd_t *pgd) 211{ 212 if (pgd_none(*pgd)) 213 return 1; 214 if (unlikely(pgd_bad(*pgd))) { 215 pgd_clear_bad(pgd); 216 return 1; 217 } 218 return 0; 219} 220 221static inline int pud_none_or_clear_bad(pud_t *pud) 222{ 223 if (pud_none(*pud)) 224 return 1; 225 if (unlikely(pud_bad(*pud))) { 226 pud_clear_bad(pud); 227 return 1; 228 } 229 return 0; 230} 231 232static inline int pmd_none_or_clear_bad(pmd_t *pmd) 233{ 234 if (pmd_none(*pmd)) 235 return 1; 236 if (unlikely(pmd_bad(*pmd))) { 237 pmd_clear_bad(pmd); 238 return 1; 239 } 240 return 0; 241} 242#endif /* !__ASSEMBLY__ */ 243 244#endif /* _ASM_GENERIC_PGTABLE_H */