Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/mm: Fold p4d page table layer at runtime

Change page table helpers to fold p4d at runtime.
The logic is the same as in <asm-generic/pgtable-nop4d.h>.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20180214182542.69302-8-kirill.shutemov@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Kirill A. Shutemov and committed by
Ingo Molnar
98219dda 6f9dd329

+20 -6
+6 -4
arch/x86/include/asm/paravirt.h
··· 569 569 570 570 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 571 571 { 572 - pgdval_t val = native_pgd_val(pgd); 573 - 574 - PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, val); 572 + if (pgtable_l5_enabled) 573 + PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd)); 574 + else 575 + set_p4d((p4d_t *)(pgdp), (p4d_t) { pgd.pgd }); 575 576 } 576 577 577 578 static inline void pgd_clear(pgd_t *pgdp) 578 579 { 579 - set_pgd(pgdp, __pgd(0)); 580 + if (pgtable_l5_enabled) 581 + set_pgd(pgdp, __pgd(0)); 580 582 } 581 583 582 584 #endif /* CONFIG_PGTABLE_LEVELS == 5 */
+4 -1
arch/x86/include/asm/pgalloc.h
··· 167 167 #if CONFIG_PGTABLE_LEVELS > 4 168 168 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) 169 169 { 170 + if (!pgtable_l5_enabled) 171 + return; 170 172 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); 171 173 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); 172 174 } ··· 193 191 static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, 194 192 unsigned long address) 195 193 { 196 - ___p4d_free_tlb(tlb, p4d); 194 + if (pgtable_l5_enabled) 195 + ___p4d_free_tlb(tlb, p4d); 197 196 } 198 197 199 198 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
+10 -1
arch/x86/include/asm/pgtable.h
··· 65 65 66 66 #ifndef __PAGETABLE_P4D_FOLDED 67 67 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) 68 - #define pgd_clear(pgd) native_pgd_clear(pgd) 68 + #define pgd_clear(pgd) (pgtable_l5_enabled ? native_pgd_clear(pgd) : 0) 69 69 #endif 70 70 71 71 #ifndef set_p4d ··· 859 859 #if CONFIG_PGTABLE_LEVELS > 4 860 860 static inline int pgd_present(pgd_t pgd) 861 861 { 862 + if (!pgtable_l5_enabled) 863 + return 1; 862 864 return pgd_flags(pgd) & _PAGE_PRESENT; 863 865 } 864 866 ··· 878 876 /* to find an entry in a page-table-directory. */ 879 877 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) 880 878 { 879 + if (!pgtable_l5_enabled) 880 + return (p4d_t *)pgd; 881 881 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); 882 882 } 883 883 884 884 static inline int pgd_bad(pgd_t pgd) 885 885 { 886 886 unsigned long ignore_flags = _PAGE_USER; 887 + 888 + if (!pgtable_l5_enabled) 889 + return 0; 887 890 888 891 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) 889 892 ignore_flags |= _PAGE_NX; ··· 898 891 899 892 static inline int pgd_none(pgd_t pgd) 900 893 { 894 + if (!pgtable_l5_enabled) 895 + return 0; 901 896 /* 902 897 * There is no need to do a workaround for the KNL stray 903 898 * A/D bit erratum here. PGDs only point to page tables