···11+/*22+ * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.33+ *44+ * This program is free software; you can redistribute it and/or55+ * modify it under the terms of the GNU General Public License66+ * as published by the Free Software Foundation; either version77+ * 2 of the License, or (at your option) any later version.88+ */99+1010+#include <linux/sched.h>1111+#include <asm/pgalloc.h>1212+#include <asm/tlb.h>1313+1414+#include "mmu_decl.h"1515+#include <trace/events/thp.h>1616+1717+#ifdef CONFIG_TRANSPARENT_HUGEPAGE1818+/*1919+ * This is called when relaxing access to a hugepage. It's also called in the page2020+ * fault path when we don't hit any of the major fault cases, ie, a minor2121+ * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have2222+ * handled those two for us, we additionally deal with missing execute2323+ * permission here on some processors2424+ */2525+int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,2626+ pmd_t *pmdp, pmd_t entry, int dirty)2727+{2828+ int changed;2929+#ifdef CONFIG_DEBUG_VM3030+ WARN_ON(!pmd_trans_huge(*pmdp));3131+ assert_spin_locked(&vma->vm_mm->page_table_lock);3232+#endif3333+ changed = !pmd_same(*(pmdp), entry);3434+ if (changed) {3535+ __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));3636+ /*3737+ * Since we are not supporting SW TLB systems, we don't3838+ * have any thing similar to flush_tlb_page_nohash()3939+ */4040+ }4141+ return changed;4242+}4343+4444+int pmdp_test_and_clear_young(struct vm_area_struct *vma,4545+ unsigned long address, pmd_t *pmdp)4646+{4747+ return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);4848+}4949+/*5050+ * set a new huge pmd. We should not be called for updating5151+ * an existing pmd entry. That should go via pmd_hugepage_update.5252+ */5353+void set_pmd_at(struct mm_struct *mm, unsigned long addr,5454+ pmd_t *pmdp, pmd_t pmd)5555+{5656+#ifdef CONFIG_DEBUG_VM5757+ WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));5858+ assert_spin_locked(&mm->page_table_lock);5959+ WARN_ON(!pmd_trans_huge(pmd));6060+#endif6161+ trace_hugepage_set_pmd(addr, pmd_val(pmd));6262+ return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));6363+}6464+/*6565+ * We use this to invalidate a pmdp entry before switching from a6666+ * hugepte to regular pmd entry.6767+ */6868+void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,6969+ pmd_t *pmdp)7070+{7171+ pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);7272+7373+ /*7474+ * This ensures that generic code that rely on IRQ disabling7575+ * to prevent a parallel THP split work as expected.7676+ */7777+ kick_all_cpus_sync();7878+}7979+8080+static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)8181+{8282+ return __pmd(pmd_val(pmd) | pgprot_val(pgprot));8383+}8484+8585+pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)8686+{8787+ unsigned long pmdv;8888+8989+ pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;9090+ return pmd_set_protbits(__pmd(pmdv), pgprot);9191+}9292+9393+pmd_t mk_pmd(struct page *page, pgprot_t pgprot)9494+{9595+ return pfn_pmd(page_to_pfn(page), pgprot);9696+}9797+9898+pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)9999+{100100+ unsigned long pmdv;101101+102102+ pmdv = pmd_val(pmd);103103+ pmdv &= _HPAGE_CHG_MASK;104104+ return pmd_set_protbits(__pmd(pmdv), newprot);105105+}106106+107107+/*108108+ * This is called at the end of handling a user page fault, when the109109+ * fault has been handled by updating a HUGE PMD entry in the linux page tables.110110+ * We use it to preload an HPTE into the hash table corresponding to111111+ * the updated linux HUGE PMD entry.112112+ */113113+void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,114114+ pmd_t *pmd)115115+{116116+ return;117117+}118118+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+13-124
arch/powerpc/mm/pgtable-hash64.c
···9999100100#ifdef CONFIG_TRANSPARENT_HUGEPAGE101101102102-/*103103- * This is called when relaxing access to a hugepage. It's also called in the page104104- * fault path when we don't hit any of the major fault cases, ie, a minor105105- * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have106106- * handled those two for us, we additionally deal with missing execute107107- * permission here on some processors108108- */109109-int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,110110- pmd_t *pmdp, pmd_t entry, int dirty)111111-{112112- int changed;113113-#ifdef CONFIG_DEBUG_VM114114- WARN_ON(!pmd_trans_huge(*pmdp));115115- assert_spin_locked(&vma->vm_mm->page_table_lock);116116-#endif117117- changed = !pmd_same(*(pmdp), entry);118118- if (changed) {119119- __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));120120- /*121121- * Since we are not supporting SW TLB systems, we don't122122- * have any thing similar to flush_tlb_page_nohash()123123- */124124- }125125- return changed;126126-}127127-128128-unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,129129- pmd_t *pmdp, unsigned long clr,130130- unsigned long set)102102+unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,103103+ pmd_t *pmdp, unsigned long clr,104104+ unsigned long set)131105{132106 __be64 old_be, tmp;133107 unsigned long old;···132158 return old;133159}134160135135-pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,136136- pmd_t *pmdp)161161+pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,162162+ pmd_t *pmdp)137163{138164 pmd_t pmd;139165···172198}173199174200/*175175- * We currently remove entries from the hashtable regardless of whether176176- * the entry was young or dirty.177177- *178178- * We should be more intelligent about this but for the moment we override179179- * these functions and force a tlb flush unconditionally180180- */181181-int pmdp_test_and_clear_young(struct vm_area_struct *vma,182182- unsigned long address, pmd_t *pmdp)183183-{184184- return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);185185-}186186-187187-/*188201 * We want to put the pgtable in pmd and use pgtable for tracking189202 * the base page size hptes190203 */191191-void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,192192- pgtable_t pgtable)204204+void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,205205+ pgtable_t pgtable)193206{194207 pgtable_t *pgtable_slot;195208 assert_spin_locked(&mm->page_table_lock);···194233 smp_wmb();195234}196235197197-pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)236236+pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)198237{199238 pgtable_t pgtable;200239 pgtable_t *pgtable_slot;···214253 return pgtable;215254}216255217217-void pmdp_huge_split_prepare(struct vm_area_struct *vma,218218- unsigned long address, pmd_t *pmdp)256256+void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,257257+ unsigned long address, pmd_t *pmdp)219258{220259 VM_BUG_ON(address & ~HPAGE_PMD_MASK);221260 VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);···233272 * pgtable_t after this.234273 */235274 pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);236236-}237237-238238-239239-/*240240- * set a new huge pmd. We should not be called for updating241241- * an existing pmd entry. That should go via pmd_hugepage_update.242242- */243243-void set_pmd_at(struct mm_struct *mm, unsigned long addr,244244- pmd_t *pmdp, pmd_t pmd)245245-{246246-#ifdef CONFIG_DEBUG_VM247247- WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));248248- assert_spin_locked(&mm->page_table_lock);249249- WARN_ON(!pmd_trans_huge(pmd));250250-#endif251251- trace_hugepage_set_pmd(addr, pmd_val(pmd));252252- return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));253253-}254254-255255-/*256256- * We use this to invalidate a pmdp entry before switching from a257257- * hugepte to regular pmd entry.258258- */259259-void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,260260- pmd_t *pmdp)261261-{262262- pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);263263-264264- /*265265- * This ensures that generic code that rely on IRQ disabling266266- * to prevent a parallel THP split work as expected.267267- */268268- kick_all_cpus_sync();269275}270276271277/*···274346 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);275347}276348277277-static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)278278-{279279- return __pmd(pmd_val(pmd) | pgprot_val(pgprot));280280-}281281-282282-pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)283283-{284284- unsigned long pmdv;285285-286286- pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;287287- return pmd_set_protbits(__pmd(pmdv), pgprot);288288-}289289-290290-pmd_t mk_pmd(struct page *page, pgprot_t pgprot)291291-{292292- return pfn_pmd(page_to_pfn(page), pgprot);293293-}294294-295295-pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)296296-{297297- unsigned long pmdv;298298-299299- pmdv = pmd_val(pmd);300300- pmdv &= _HPAGE_CHG_MASK;301301- return pmd_set_protbits(__pmd(pmdv), newprot);302302-}303303-304304-/*305305- * This is called at the end of handling a user page fault, when the306306- * fault has been handled by updating a HUGE PMD entry in the linux page tables.307307- * We use it to preload an HPTE into the hash table corresponding to308308- * the updated linux HUGE PMD entry.309309- */310310-void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,311311- pmd_t *pmd)312312-{313313- return;314314-}315315-316316-pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,317317- unsigned long addr, pmd_t *pmdp)349349+pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,350350+ unsigned long addr, pmd_t *pmdp)318351{319352 pmd_t old_pmd;320353 pgtable_t pgtable;···310421 return old_pmd;311422}312423313313-int has_transparent_hugepage(void)424424+int hash__has_transparent_hugepage(void)314425{315426316427 if (!mmu_has_feature(MMU_FTR_16M_PAGE))