Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

thp: add x86 32bit support

Add support for transparent hugepages to x86 32bit.

Share the same VM_ bitflag for VM_MAPPED_COPY. mm/nommu.c will never
support transparent hugepages.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Johannes Weiner and committed by
Linus Torvalds
f2d6bfe9 5f24ce5f

+156 -115
+9
arch/x86/include/asm/pgtable-2level.h
··· 46 46 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 47 47 #endif 48 48 49 + #ifdef CONFIG_SMP 50 + static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) 51 + { 52 + return __pmd(xchg((pmdval_t *)xp, 0)); 53 + } 54 + #else 55 + #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) 56 + #endif 57 + 49 58 /* 50 59 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 51 60 * split up the 29 bits of offset into this range:
+23
arch/x86/include/asm/pgtable-3level.h
··· 104 104 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 105 105 #endif 106 106 107 + #ifdef CONFIG_SMP 108 + union split_pmd { 109 + struct { 110 + u32 pmd_low; 111 + u32 pmd_high; 112 + }; 113 + pmd_t pmd; 114 + }; 115 + static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) 116 + { 117 + union split_pmd res, *orig = (union split_pmd *)pmdp; 118 + 119 + /* xchg acts as a barrier before setting of the high bits */ 120 + res.pmd_low = xchg(&orig->pmd_low, 0); 121 + res.pmd_high = orig->pmd_high; 122 + orig->pmd_high = 0; 123 + 124 + return res.pmd; 125 + } 126 + #else 127 + #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) 128 + #endif 129 + 107 130 /* 108 131 * Bits 0, 6 and 7 are taken in the low part of the pte, 109 132 * put the 32 bits of offset into the high part.
+117
arch/x86/include/asm/pgtable.h
··· 97 97 return pte_flags(pte) & _PAGE_ACCESSED; 98 98 } 99 99 100 + static inline int pmd_young(pmd_t pmd) 101 + { 102 + return pmd_flags(pmd) & _PAGE_ACCESSED; 103 + } 104 + 100 105 static inline int pte_write(pte_t pte) 101 106 { 102 107 return pte_flags(pte) & _PAGE_RW; ··· 149 144 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 150 145 (_PAGE_PSE | _PAGE_PRESENT); 151 146 } 147 + 148 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 149 + static inline int pmd_trans_splitting(pmd_t pmd) 150 + { 151 + return pmd_val(pmd) & _PAGE_SPLITTING; 152 + } 153 + 154 + static inline int pmd_trans_huge(pmd_t pmd) 155 + { 156 + return pmd_val(pmd) & _PAGE_PSE; 157 + } 158 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 152 159 153 160 static inline pte_t pte_set_flags(pte_t pte, pteval_t set) 154 161 { ··· 234 217 static inline pte_t pte_mkspecial(pte_t pte) 235 218 { 236 219 return pte_set_flags(pte, _PAGE_SPECIAL); 220 + } 221 + 222 + static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) 223 + { 224 + pmdval_t v = native_pmd_val(pmd); 225 + 226 + return __pmd(v | set); 227 + } 228 + 229 + static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) 230 + { 231 + pmdval_t v = native_pmd_val(pmd); 232 + 233 + return __pmd(v & ~clear); 234 + } 235 + 236 + static inline pmd_t pmd_mkold(pmd_t pmd) 237 + { 238 + return pmd_clear_flags(pmd, _PAGE_ACCESSED); 239 + } 240 + 241 + static inline pmd_t pmd_wrprotect(pmd_t pmd) 242 + { 243 + return pmd_clear_flags(pmd, _PAGE_RW); 244 + } 245 + 246 + static inline pmd_t pmd_mkdirty(pmd_t pmd) 247 + { 248 + return pmd_set_flags(pmd, _PAGE_DIRTY); 249 + } 250 + 251 + static inline pmd_t pmd_mkhuge(pmd_t pmd) 252 + { 253 + return pmd_set_flags(pmd, _PAGE_PSE); 254 + } 255 + 256 + static inline pmd_t pmd_mkyoung(pmd_t pmd) 257 + { 258 + return pmd_set_flags(pmd, _PAGE_ACCESSED); 259 + } 260 + 261 + static inline pmd_t pmd_mkwrite(pmd_t pmd) 262 + { 263 + return pmd_set_flags(pmd, _PAGE_RW); 264 + } 265 + 266 + static inline pmd_t pmd_mknotpresent(pmd_t pmd) 267 + { 268 + return pmd_clear_flags(pmd, _PAGE_PRESENT); 237 269 } 238 270 239 271 /* ··· 593 527 return res; 594 528 } 595 529 530 + static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) 531 + { 532 + pmd_t res = *pmdp; 533 + 534 + native_pmd_clear(pmdp); 535 + return res; 536 + } 537 + 596 538 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, 597 539 pte_t *ptep , pte_t pte) 598 540 { ··· 689 615 } 690 616 691 617 #define flush_tlb_fix_spurious_fault(vma, address) 618 + 619 + #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 620 + 621 + #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 622 + extern int pmdp_set_access_flags(struct vm_area_struct *vma, 623 + unsigned long address, pmd_t *pmdp, 624 + pmd_t entry, int dirty); 625 + 626 + #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 627 + extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, 628 + unsigned long addr, pmd_t *pmdp); 629 + 630 + #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 631 + extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 632 + unsigned long address, pmd_t *pmdp); 633 + 634 + 635 + #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 636 + extern void pmdp_splitting_flush(struct vm_area_struct *vma, 637 + unsigned long addr, pmd_t *pmdp); 638 + 639 + #define __HAVE_ARCH_PMD_WRITE 640 + static inline int pmd_write(pmd_t pmd) 641 + { 642 + return pmd_flags(pmd) & _PAGE_RW; 643 + } 644 + 645 + #define __HAVE_ARCH_PMDP_GET_AND_CLEAR 646 + static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, 647 + pmd_t *pmdp) 648 + { 649 + pmd_t pmd = native_pmdp_get_and_clear(pmdp); 650 + pmd_update(mm, addr, pmdp); 651 + return pmd; 652 + } 653 + 654 + #define __HAVE_ARCH_PMDP_SET_WRPROTECT 655 + static inline void pmdp_set_wrprotect(struct mm_struct *mm, 656 + unsigned long addr, pmd_t *pmdp) 657 + { 658 + clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); 659 + pmd_update(mm, addr, pmdp); 660 + } 692 661 693 662 /* 694 663 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
-109
arch/x86/include/asm/pgtable_64.h
··· 182 182 183 183 #define __HAVE_ARCH_PTE_SAME 184 184 185 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 186 - static inline int pmd_trans_splitting(pmd_t pmd) 187 - { 188 - return pmd_val(pmd) & _PAGE_SPLITTING; 189 - } 190 - 191 - static inline int pmd_trans_huge(pmd_t pmd) 192 - { 193 - return pmd_val(pmd) & _PAGE_PSE; 194 - } 195 - #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 196 - 197 - #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 198 - 199 - #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 200 - extern int pmdp_set_access_flags(struct vm_area_struct *vma, 201 - unsigned long address, pmd_t *pmdp, 202 - pmd_t entry, int dirty); 203 - 204 - #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 205 - extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, 206 - unsigned long addr, pmd_t *pmdp); 207 - 208 - #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 209 - extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 210 - unsigned long address, pmd_t *pmdp); 211 - 212 - 213 - #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 214 - extern void pmdp_splitting_flush(struct vm_area_struct *vma, 215 - unsigned long addr, pmd_t *pmdp); 216 - 217 - #define __HAVE_ARCH_PMD_WRITE 218 - static inline int pmd_write(pmd_t pmd) 219 - { 220 - return pmd_flags(pmd) & _PAGE_RW; 221 - } 222 - 223 - #define __HAVE_ARCH_PMDP_GET_AND_CLEAR 224 - static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, 225 - pmd_t *pmdp) 226 - { 227 - pmd_t pmd = native_pmdp_get_and_clear(pmdp); 228 - pmd_update(mm, addr, pmdp); 229 - return pmd; 230 - } 231 - 232 - #define __HAVE_ARCH_PMDP_SET_WRPROTECT 233 - static inline void pmdp_set_wrprotect(struct mm_struct *mm, 234 - unsigned long addr, pmd_t *pmdp) 235 - { 236 - clear_bit(_PAGE_BIT_RW, (unsigned long *)&pmdp->pmd); 237 - pmd_update(mm, addr, pmdp); 238 - } 239 - 240 - static inline int pmd_young(pmd_t pmd) 241 - { 242 - return pmd_flags(pmd) & _PAGE_ACCESSED; 243 - } 244 - 245 - static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) 246 - { 247 - pmdval_t v = native_pmd_val(pmd); 248 - 249 - return native_make_pmd(v | set); 250 - } 251 - 252 - static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) 253 - { 254 - pmdval_t v = native_pmd_val(pmd); 255 - 256 - return native_make_pmd(v & ~clear); 257 - } 258 - 259 - static inline pmd_t pmd_mkold(pmd_t pmd) 260 - { 261 - return pmd_clear_flags(pmd, _PAGE_ACCESSED); 262 - } 263 - 264 - static inline pmd_t pmd_wrprotect(pmd_t pmd) 265 - { 266 - return pmd_clear_flags(pmd, _PAGE_RW); 267 - } 268 - 269 - static inline pmd_t pmd_mkdirty(pmd_t pmd) 270 - { 271 - return pmd_set_flags(pmd, _PAGE_DIRTY); 272 - } 273 - 274 - static inline pmd_t pmd_mkhuge(pmd_t pmd) 275 - { 276 - return pmd_set_flags(pmd, _PAGE_PSE); 277 - } 278 - 279 - static inline pmd_t pmd_mkyoung(pmd_t pmd) 280 - { 281 - return pmd_set_flags(pmd, _PAGE_ACCESSED); 282 - } 283 - 284 - static inline pmd_t pmd_mkwrite(pmd_t pmd) 285 - { 286 - return pmd_set_flags(pmd, _PAGE_RW); 287 - } 288 - 289 - static inline pmd_t pmd_mknotpresent(pmd_t pmd) 290 - { 291 - return pmd_clear_flags(pmd, _PAGE_PRESENT); 292 - } 293 - 294 185 #endif /* !__ASSEMBLY__ */ 295 186 296 187 #endif /* _ASM_X86_PGTABLE_64_H */
+2 -2
arch/x86/mm/pgtable.c
··· 362 362 363 363 if (pmd_young(*pmdp)) 364 364 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 365 - (unsigned long *) &pmdp->pmd); 365 + (unsigned long *)pmdp); 366 366 367 367 if (ret) 368 368 pmd_update(vma->vm_mm, addr, pmdp); ··· 404 404 int set; 405 405 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 406 406 set = !test_and_set_bit(_PAGE_BIT_SPLITTING, 407 - (unsigned long *)&pmdp->pmd); 407 + (unsigned long *)pmdp); 408 408 if (set) { 409 409 pmd_update(vma->vm_mm, address, pmdp); 410 410 /* need tlb flush only to serialize against gup-fast */
+4 -3
include/linux/mm.h
··· 102 102 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 103 103 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 104 104 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 105 + #ifndef CONFIG_TRANSPARENT_HUGEPAGE 105 106 #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ 107 + #else 108 + #define VM_HUGEPAGE 0x01000000 /* MADV_HUGEPAGE marked this vma */ 109 + #endif 106 110 #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ 107 111 #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ 108 112 ··· 115 111 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ 116 112 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ 117 113 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 118 - #if BITS_PER_LONG > 32 119 - #define VM_HUGEPAGE 0x100000000UL /* MADV_HUGEPAGE marked this vma */ 120 - #endif 121 114 122 115 /* Bits set in the VMA until the stack is in its final location */ 123 116 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
+1 -1
mm/Kconfig
··· 304 304 305 305 config TRANSPARENT_HUGEPAGE 306 306 bool "Transparent Hugepage Support" if EMBEDDED 307 - depends on X86_64 && MMU 307 + depends on X86 && MMU 308 308 default y 309 309 help 310 310 Transparent Hugepages allows the kernel to use huge pages and