Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.5-rc1 202 lines 5.4 kB view raw
1/* 2 * mm/pgtable-generic.c 3 * 4 * Generic pgtable methods declared in asm-generic/pgtable.h 5 * 6 * Copyright (C) 2010 Linus Torvalds 7 */ 8 9#include <linux/pagemap.h> 10#include <asm/tlb.h> 11#include <asm-generic/pgtable.h> 12 13/* 14 * If a p?d_bad entry is found while walking page tables, report 15 * the error, before resetting entry to p?d_none. Usually (but 16 * very seldom) called out from the p?d_none_or_clear_bad macros. 17 */ 18 19void pgd_clear_bad(pgd_t *pgd) 20{ 21 pgd_ERROR(*pgd); 22 pgd_clear(pgd); 23} 24 25void pud_clear_bad(pud_t *pud) 26{ 27 pud_ERROR(*pud); 28 pud_clear(pud); 29} 30 31void pmd_clear_bad(pmd_t *pmd) 32{ 33 pmd_ERROR(*pmd); 34 pmd_clear(pmd); 35} 36 37#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 38/* 39 * Only sets the access flags (dirty, accessed), as well as write 40 * permission. Furthermore, we know it always gets set to a "more 41 * permissive" setting, which allows most architectures to optimize 42 * this. We return whether the PTE actually changed, which in turn 43 * instructs the caller to do things like update__mmu_cache. This 44 * used to be done in the caller, but sparc needs minor faults to 45 * force that call on sun4c so we changed this macro slightly 46 */ 47int ptep_set_access_flags(struct vm_area_struct *vma, 48 unsigned long address, pte_t *ptep, 49 pte_t entry, int dirty) 50{ 51 int changed = !pte_same(*ptep, entry); 52 if (changed) { 53 set_pte_at(vma->vm_mm, address, ptep, entry); 54 flush_tlb_fix_spurious_fault(vma, address); 55 } 56 return changed; 57} 58#endif 59 60#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 61int ptep_clear_flush_young(struct vm_area_struct *vma, 62 unsigned long address, pte_t *ptep) 63{ 64 int young; 65 young = ptep_test_and_clear_young(vma, address, ptep); 66 if (young) 67 flush_tlb_page(vma, address); 68 return young; 69} 70#endif 71 72#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 73pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, 74 pte_t *ptep) 75{ 76 struct mm_struct *mm = (vma)->vm_mm; 77 pte_t pte; 78 pte = ptep_get_and_clear(mm, address, ptep); 79 if (pte_accessible(mm, pte)) 80 flush_tlb_page(vma, address); 81 return pte; 82} 83#endif 84 85#ifdef CONFIG_TRANSPARENT_HUGEPAGE 86 87#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 88 89/* 90 * ARCHes with special requirements for evicting THP backing TLB entries can 91 * implement this. Otherwise also, it can help optimize normal TLB flush in 92 * THP regime. stock flush_tlb_range() typically has optimization to nuke the 93 * entire TLB TLB if flush span is greater than a threshhold, which will 94 * likely be true for a single huge page. Thus a single thp flush will 95 * invalidate the entire TLB which is not desitable. 96 * e.g. see arch/arc: flush_pmd_tlb_range 97 */ 98#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 99#endif 100 101#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 102int pmdp_set_access_flags(struct vm_area_struct *vma, 103 unsigned long address, pmd_t *pmdp, 104 pmd_t entry, int dirty) 105{ 106 int changed = !pmd_same(*pmdp, entry); 107 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 108 if (changed) { 109 set_pmd_at(vma->vm_mm, address, pmdp, entry); 110 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 111 } 112 return changed; 113} 114#endif 115 116#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 117int pmdp_clear_flush_young(struct vm_area_struct *vma, 118 unsigned long address, pmd_t *pmdp) 119{ 120 int young; 121 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 122 young = pmdp_test_and_clear_young(vma, address, pmdp); 123 if (young) 124 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 125 return young; 126} 127#endif 128 129#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 130pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, 131 pmd_t *pmdp) 132{ 133 pmd_t pmd; 134 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 135 VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); 136 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 137 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 138 return pmd; 139} 140#endif 141 142#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 143void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 144 pgtable_t pgtable) 145{ 146 assert_spin_locked(pmd_lockptr(mm, pmdp)); 147 148 /* FIFO */ 149 if (!pmd_huge_pte(mm, pmdp)) 150 INIT_LIST_HEAD(&pgtable->lru); 151 else 152 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); 153 pmd_huge_pte(mm, pmdp) = pgtable; 154} 155#endif 156 157#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 158/* no "address" argument so destroys page coloring of some arch */ 159pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 160{ 161 pgtable_t pgtable; 162 163 assert_spin_locked(pmd_lockptr(mm, pmdp)); 164 165 /* FIFO */ 166 pgtable = pmd_huge_pte(mm, pmdp); 167 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, 168 struct page, lru); 169 if (pmd_huge_pte(mm, pmdp)) 170 list_del(&pgtable->lru); 171 return pgtable; 172} 173#endif 174 175#ifndef __HAVE_ARCH_PMDP_INVALIDATE 176void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 177 pmd_t *pmdp) 178{ 179 pmd_t entry = *pmdp; 180 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); 181 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 182} 183#endif 184 185#ifndef pmdp_collapse_flush 186pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 187 pmd_t *pmdp) 188{ 189 /* 190 * pmd and hugepage pte format are same. So we could 191 * use the same function. 192 */ 193 pmd_t pmd; 194 195 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 196 VM_BUG_ON(pmd_trans_huge(*pmdp)); 197 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 198 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 199 return pmd; 200} 201#endif 202#endif /* CONFIG_TRANSPARENT_HUGEPAGE */