Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: mm: Transparent huge page support for LPAE systems.

The patch adds support for THP (transparent huge pages) to LPAE
systems. When this feature is enabled, the kernel tries to map
anonymous pages as 2MB sections where possible.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
[steve.capper@linaro.org: symbolic constants used, value of
PMD_SECT_SPLITTING adjusted, tlbflush.h included in pgtable.h,
added PROT_NONE support.]
Signed-off-by: Steve Capper <steve.capper@linaro.org>
Reviewed-by: Will Deacon <will.deacon@arm.com>

authored by

Catalin Marinas and committed by
Steve Capper
8d962507 1355e2a6

+78 -1
+4
arch/arm/Kconfig
··· 1711 1711 def_bool y 1712 1712 depends on ARM_LPAE 1713 1713 1714 + config HAVE_ARCH_TRANSPARENT_HUGEPAGE 1715 + def_bool y 1716 + depends on ARM_LPAE 1717 + 1714 1718 source "mm/Kconfig" 1715 1719 1716 1720 config FORCE_MAX_ZONEORDER
+2
arch/arm/include/asm/pgtable-3level-hwdef.h
··· 42 42 */ 43 43 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) 44 44 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) 45 + #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ 46 + #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ 45 47 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 46 48 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) 47 49 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
+60
arch/arm/include/asm/pgtable-3level.h
··· 87 87 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ 88 88 #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ 89 89 90 + #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) 91 + #define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) 92 + #define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56) 93 + #define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) 94 + 90 95 /* 91 96 * To be used in assembly code with the upper page attributes. 92 97 */ ··· 200 195 201 196 #define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT)) 202 197 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 198 + 199 + #define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) 200 + 201 + #define __HAVE_ARCH_PMD_WRITE 202 + #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) 203 + 204 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 205 + #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) 206 + #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) 207 + #endif 208 + 209 + #define PMD_BIT_FUNC(fn,op) \ 210 + static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } 211 + 212 + PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); 213 + PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); 214 + PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); 215 + PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); 216 + PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); 217 + PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); 218 + 219 + #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 220 + 221 + #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) 222 + #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 223 + #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 224 + 225 + /* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ 226 + #define pmd_mknotpresent(pmd) (__pmd(0)) 227 + 228 + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 229 + { 230 + const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY | 231 + PMD_SECT_VALID | PMD_SECT_NONE; 232 + pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); 233 + return pmd; 234 + } 235 + 236 + static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 237 + pmd_t *pmdp, pmd_t pmd) 238 + { 239 + BUG_ON(addr >= TASK_SIZE); 240 + 241 + /* create a faulting entry if PROT_NONE protected */ 242 + if (pmd_val(pmd) & PMD_SECT_NONE) 243 + pmd_val(pmd) &= ~PMD_SECT_VALID; 244 + 245 + *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); 246 + flush_pmd_entry(pmdp); 247 + } 248 + 249 + static inline int has_transparent_hugepage(void) 250 + { 251 + return 1; 252 + } 203 253 204 254 #endif /* __ASSEMBLY__ */ 205 255
+3
arch/arm/include/asm/pgtable.h
··· 24 24 #include <asm/memory.h> 25 25 #include <asm/pgtable-hwdef.h> 26 26 27 + 28 + #include <asm/tlbflush.h> 29 + 27 30 #ifdef CONFIG_ARM_LPAE 28 31 #include <asm/pgtable-3level.h> 29 32 #else
+6
arch/arm/include/asm/tlb.h
··· 223 223 #endif 224 224 } 225 225 226 + static inline void 227 + tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) 228 + { 229 + tlb_add_flush(tlb, addr); 230 + } 231 + 226 232 #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) 227 233 #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) 228 234 #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
+2
arch/arm/include/asm/tlbflush.h
··· 535 535 } 536 536 #endif 537 537 538 + #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 539 + 538 540 #endif 539 541 540 542 #endif /* CONFIG_MMU */
+1 -1
arch/arm/mm/fsr-3level.c
··· 9 9 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 10 10 { do_bad, SIGBUS, 0, "reserved access flag fault" }, 11 11 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 12 - { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 12 + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 13 13 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, 14 14 { do_bad, SIGBUS, 0, "reserved permission fault" }, 15 15 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },