Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: expose number of page table levels on Kconfig level

We would want to use number of page table level to define mm_struct.
Let's expose it as CONFIG_PGTABLE_LEVELS.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Tested-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Kirill A. Shutemov and committed by
Linus Torvalds
98233368 6b8ce2a1

+42 -40
+6
arch/x86/Kconfig
··· 277 277 config FIX_EARLYCON_MEM 278 278 def_bool y 279 279 280 + config PGTABLE_LEVELS 281 + int 282 + default 4 if X86_64 283 + default 3 if X86_PAE 284 + default 2 285 + 280 286 source "init/Kconfig" 281 287 source "kernel/Kconfig.freezer" 282 288
+4 -4
arch/x86/include/asm/paravirt.h
··· 545 545 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); 546 546 } 547 547 548 - #if PAGETABLE_LEVELS >= 3 548 + #if CONFIG_PGTABLE_LEVELS >= 3 549 549 static inline pmd_t __pmd(pmdval_t val) 550 550 { 551 551 pmdval_t ret; ··· 585 585 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, 586 586 val); 587 587 } 588 - #if PAGETABLE_LEVELS == 4 588 + #if CONFIG_PGTABLE_LEVELS == 4 589 589 static inline pud_t __pud(pudval_t val) 590 590 { 591 591 pudval_t ret; ··· 636 636 set_pud(pudp, __pud(0)); 637 637 } 638 638 639 - #endif /* PAGETABLE_LEVELS == 4 */ 639 + #endif /* CONFIG_PGTABLE_LEVELS == 4 */ 640 640 641 - #endif /* PAGETABLE_LEVELS >= 3 */ 641 + #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ 642 642 643 643 #ifdef CONFIG_X86_PAE 644 644 /* Special-case pte-setting operations for PAE, which can't update a
+4 -4
arch/x86/include/asm/paravirt_types.h
··· 294 294 struct paravirt_callee_save pgd_val; 295 295 struct paravirt_callee_save make_pgd; 296 296 297 - #if PAGETABLE_LEVELS >= 3 297 + #if CONFIG_PGTABLE_LEVELS >= 3 298 298 #ifdef CONFIG_X86_PAE 299 299 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 300 300 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, ··· 308 308 struct paravirt_callee_save pmd_val; 309 309 struct paravirt_callee_save make_pmd; 310 310 311 - #if PAGETABLE_LEVELS == 4 311 + #if CONFIG_PGTABLE_LEVELS == 4 312 312 struct paravirt_callee_save pud_val; 313 313 struct paravirt_callee_save make_pud; 314 314 315 315 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); 316 - #endif /* PAGETABLE_LEVELS == 4 */ 317 - #endif /* PAGETABLE_LEVELS >= 3 */ 316 + #endif /* CONFIG_PGTABLE_LEVELS == 4 */ 317 + #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ 318 318 319 319 struct pv_lazy_ops lazy_mode; 320 320
+4 -4
arch/x86/include/asm/pgalloc.h
··· 77 77 78 78 #define pmd_pgtable(pmd) pmd_page(pmd) 79 79 80 - #if PAGETABLE_LEVELS > 2 80 + #if CONFIG_PGTABLE_LEVELS > 2 81 81 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 82 82 { 83 83 struct page *page; ··· 116 116 } 117 117 #endif /* CONFIG_X86_PAE */ 118 118 119 - #if PAGETABLE_LEVELS > 3 119 + #if CONFIG_PGTABLE_LEVELS > 3 120 120 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 121 121 { 122 122 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); ··· 142 142 ___pud_free_tlb(tlb, pud); 143 143 } 144 144 145 - #endif /* PAGETABLE_LEVELS > 3 */ 146 - #endif /* PAGETABLE_LEVELS > 2 */ 145 + #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 146 + #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 147 147 148 148 #endif /* _ASM_X86_PGALLOC_H */
-1
arch/x86/include/asm/pgtable-2level_types.h
··· 17 17 #endif /* !__ASSEMBLY__ */ 18 18 19 19 #define SHARED_KERNEL_PMD 0 20 - #define PAGETABLE_LEVELS 2 21 20 22 21 /* 23 22 * traditional i386 two-level paging structure:
-2
arch/x86/include/asm/pgtable-3level_types.h
··· 24 24 #define SHARED_KERNEL_PMD 1 25 25 #endif 26 26 27 - #define PAGETABLE_LEVELS 3 28 - 29 27 /* 30 28 * PGDIR_SHIFT determines what a top-level page table entry can map 31 29 */
+4 -4
arch/x86/include/asm/pgtable.h
··· 551 551 return npg >> (20 - PAGE_SHIFT); 552 552 } 553 553 554 - #if PAGETABLE_LEVELS > 2 554 + #if CONFIG_PGTABLE_LEVELS > 2 555 555 static inline int pud_none(pud_t pud) 556 556 { 557 557 return native_pud_val(pud) == 0; ··· 594 594 { 595 595 return 0; 596 596 } 597 - #endif /* PAGETABLE_LEVELS > 2 */ 597 + #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 598 598 599 - #if PAGETABLE_LEVELS > 3 599 + #if CONFIG_PGTABLE_LEVELS > 3 600 600 static inline int pgd_present(pgd_t pgd) 601 601 { 602 602 return pgd_flags(pgd) & _PAGE_PRESENT; ··· 633 633 { 634 634 return !native_pgd_val(pgd); 635 635 } 636 - #endif /* PAGETABLE_LEVELS > 3 */ 636 + #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 637 637 638 638 #endif /* __ASSEMBLY__ */ 639 639
-1
arch/x86/include/asm/pgtable_64_types.h
··· 20 20 #endif /* !__ASSEMBLY__ */ 21 21 22 22 #define SHARED_KERNEL_PMD 0 23 - #define PAGETABLE_LEVELS 4 24 23 25 24 /* 26 25 * PGDIR_SHIFT determines what a top-level page table entry can map
+2 -2
arch/x86/include/asm/pgtable_types.h
··· 234 234 return native_pgd_val(pgd) & PTE_FLAGS_MASK; 235 235 } 236 236 237 - #if PAGETABLE_LEVELS > 3 237 + #if CONFIG_PGTABLE_LEVELS > 3 238 238 typedef struct { pudval_t pud; } pud_t; 239 239 240 240 static inline pud_t native_make_pud(pmdval_t val) ··· 255 255 } 256 256 #endif 257 257 258 - #if PAGETABLE_LEVELS > 2 258 + #if CONFIG_PGTABLE_LEVELS > 2 259 259 typedef struct { pmdval_t pmd; } pmd_t; 260 260 261 261 static inline pmd_t native_make_pmd(pmdval_t val)
+3 -3
arch/x86/kernel/paravirt.c
··· 443 443 .ptep_modify_prot_start = __ptep_modify_prot_start, 444 444 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 445 445 446 - #if PAGETABLE_LEVELS >= 3 446 + #if CONFIG_PGTABLE_LEVELS >= 3 447 447 #ifdef CONFIG_X86_PAE 448 448 .set_pte_atomic = native_set_pte_atomic, 449 449 .pte_clear = native_pte_clear, ··· 454 454 .pmd_val = PTE_IDENT, 455 455 .make_pmd = PTE_IDENT, 456 456 457 - #if PAGETABLE_LEVELS == 4 457 + #if CONFIG_PGTABLE_LEVELS == 4 458 458 .pud_val = PTE_IDENT, 459 459 .make_pud = PTE_IDENT, 460 460 461 461 .set_pgd = native_set_pgd, 462 462 #endif 463 - #endif /* PAGETABLE_LEVELS >= 3 */ 463 + #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ 464 464 465 465 .pte_val = PTE_IDENT, 466 466 .pgd_val = PTE_IDENT,
+7 -7
arch/x86/mm/pgtable.c
··· 58 58 tlb_remove_page(tlb, pte); 59 59 } 60 60 61 - #if PAGETABLE_LEVELS > 2 61 + #if CONFIG_PGTABLE_LEVELS > 2 62 62 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 63 63 { 64 64 struct page *page = virt_to_page(pmd); ··· 74 74 tlb_remove_page(tlb, page); 75 75 } 76 76 77 - #if PAGETABLE_LEVELS > 3 77 + #if CONFIG_PGTABLE_LEVELS > 3 78 78 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 79 79 { 80 80 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); 81 81 tlb_remove_page(tlb, virt_to_page(pud)); 82 82 } 83 - #endif /* PAGETABLE_LEVELS > 3 */ 84 - #endif /* PAGETABLE_LEVELS > 2 */ 83 + #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 84 + #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 85 85 86 86 static inline void pgd_list_add(pgd_t *pgd) 87 87 { ··· 117 117 /* If the pgd points to a shared pagetable level (either the 118 118 ptes in non-PAE, or shared PMD in PAE), then just copy the 119 119 references from swapper_pg_dir. */ 120 - if (PAGETABLE_LEVELS == 2 || 121 - (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || 122 - PAGETABLE_LEVELS == 4) { 120 + if (CONFIG_PGTABLE_LEVELS == 2 || 121 + (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || 122 + CONFIG_PGTABLE_LEVELS == 4) { 123 123 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, 124 124 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 125 125 KERNEL_PGD_PTRS);
+7 -7
arch/x86/xen/mmu.c
··· 502 502 } 503 503 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); 504 504 505 - #if PAGETABLE_LEVELS == 4 505 + #if CONFIG_PGTABLE_LEVELS == 4 506 506 __visible pudval_t xen_pud_val(pud_t pud) 507 507 { 508 508 return pte_mfn_to_pfn(pud.pud); ··· 589 589 590 590 xen_mc_issue(PARAVIRT_LAZY_MMU); 591 591 } 592 - #endif /* PAGETABLE_LEVELS == 4 */ 592 + #endif /* CONFIG_PGTABLE_LEVELS == 4 */ 593 593 594 594 /* 595 595 * (Yet another) pagetable walker. This one is intended for pinning a ··· 1628 1628 xen_release_ptpage(pfn, PT_PMD); 1629 1629 } 1630 1630 1631 - #if PAGETABLE_LEVELS == 4 1631 + #if CONFIG_PGTABLE_LEVELS == 4 1632 1632 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) 1633 1633 { 1634 1634 xen_alloc_ptpage(mm, pfn, PT_PUD); ··· 2046 2046 pv_mmu_ops.set_pte = xen_set_pte; 2047 2047 pv_mmu_ops.set_pmd = xen_set_pmd; 2048 2048 pv_mmu_ops.set_pud = xen_set_pud; 2049 - #if PAGETABLE_LEVELS == 4 2049 + #if CONFIG_PGTABLE_LEVELS == 4 2050 2050 pv_mmu_ops.set_pgd = xen_set_pgd; 2051 2051 #endif 2052 2052 ··· 2056 2056 pv_mmu_ops.alloc_pmd = xen_alloc_pmd; 2057 2057 pv_mmu_ops.release_pte = xen_release_pte; 2058 2058 pv_mmu_ops.release_pmd = xen_release_pmd; 2059 - #if PAGETABLE_LEVELS == 4 2059 + #if CONFIG_PGTABLE_LEVELS == 4 2060 2060 pv_mmu_ops.alloc_pud = xen_alloc_pud; 2061 2061 pv_mmu_ops.release_pud = xen_release_pud; 2062 2062 #endif ··· 2122 2122 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), 2123 2123 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), 2124 2124 2125 - #if PAGETABLE_LEVELS == 4 2125 + #if CONFIG_PGTABLE_LEVELS == 4 2126 2126 .pud_val = PV_CALLEE_SAVE(xen_pud_val), 2127 2127 .make_pud = PV_CALLEE_SAVE(xen_make_pud), 2128 2128 .set_pgd = xen_set_pgd_hyper, 2129 2129 2130 2130 .alloc_pud = xen_alloc_pmd_init, 2131 2131 .release_pud = xen_release_pmd_init, 2132 - #endif /* PAGETABLE_LEVELS == 4 */ 2132 + #endif /* CONFIG_PGTABLE_LEVELS == 4 */ 2133 2133 2134 2134 .activate_mm = xen_activate_mm, 2135 2135 .dup_mmap = xen_dup_mmap,
+1 -1
include/trace/events/xen.h
··· 224 224 TP_printk("pmdp %p", __entry->pmdp) 225 225 ); 226 226 227 - #if PAGETABLE_LEVELS >= 4 227 + #if CONFIG_PGTABLE_LEVELS >= 4 228 228 229 229 TRACE_EVENT(xen_mmu_set_pud, 230 230 TP_PROTO(pud_t *pudp, pud_t pudval),