Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/mm: Stop pretending pgtable_l5_enabled is a variable

pgtable_l5_enabled is defined using cpu_feature_enabled() but we refer
to it as a variable. This is misleading.

Make pgtable_l5_enabled() a function.

We cannot literally define it as a function due to circular dependencies
between header files. Function-alike macros is close enough.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20180518103528.59260-4-kirill.shutemov@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Kirill A. Shutemov and committed by
Ingo Molnar
ed7588d5 ad3fe525

+47 -42
+1 -1
arch/x86/include/asm/page_64_types.h
··· 53 53 #define __PHYSICAL_MASK_SHIFT 52 54 54 55 55 #ifdef CONFIG_X86_5LEVEL 56 - #define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled ? 56 : 47) 56 + #define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled() ? 56 : 47) 57 57 #else 58 58 #define __VIRTUAL_MASK_SHIFT 47 59 59 #endif
+2 -2
arch/x86/include/asm/paravirt.h
··· 574 574 } 575 575 576 576 #define set_pgd(pgdp, pgdval) do { \ 577 - if (pgtable_l5_enabled) \ 577 + if (pgtable_l5_enabled()) \ 578 578 __set_pgd(pgdp, pgdval); \ 579 579 else \ 580 580 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \ 581 581 } while (0) 582 582 583 583 #define pgd_clear(pgdp) do { \ 584 - if (pgtable_l5_enabled) \ 584 + if (pgtable_l5_enabled()) \ 585 585 set_pgd(pgdp, __pgd(0)); \ 586 586 } while (0) 587 587
+2 -2
arch/x86/include/asm/pgalloc.h
··· 167 167 #if CONFIG_PGTABLE_LEVELS > 4 168 168 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) 169 169 { 170 - if (!pgtable_l5_enabled) 170 + if (!pgtable_l5_enabled()) 171 171 return; 172 172 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); 173 173 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); ··· 193 193 static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, 194 194 unsigned long address) 195 195 { 196 - if (pgtable_l5_enabled) 196 + if (pgtable_l5_enabled()) 197 197 ___p4d_free_tlb(tlb, p4d); 198 198 } 199 199
+5 -5
arch/x86/include/asm/pgtable.h
··· 65 65 66 66 #ifndef __PAGETABLE_P4D_FOLDED 67 67 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) 68 - #define pgd_clear(pgd) (pgtable_l5_enabled ? native_pgd_clear(pgd) : 0) 68 + #define pgd_clear(pgd) (pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0) 69 69 #endif 70 70 71 71 #ifndef set_p4d ··· 881 881 #if CONFIG_PGTABLE_LEVELS > 4 882 882 static inline int pgd_present(pgd_t pgd) 883 883 { 884 - if (!pgtable_l5_enabled) 884 + if (!pgtable_l5_enabled()) 885 885 return 1; 886 886 return pgd_flags(pgd) & _PAGE_PRESENT; 887 887 } ··· 900 900 /* to find an entry in a page-table-directory. */ 901 901 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) 902 902 { 903 - if (!pgtable_l5_enabled) 903 + if (!pgtable_l5_enabled()) 904 904 return (p4d_t *)pgd; 905 905 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); 906 906 } ··· 909 909 { 910 910 unsigned long ignore_flags = _PAGE_USER; 911 911 912 - if (!pgtable_l5_enabled) 912 + if (!pgtable_l5_enabled()) 913 913 return 0; 914 914 915 915 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) ··· 920 920 921 921 static inline int pgd_none(pgd_t pgd) 922 922 { 923 - if (!pgtable_l5_enabled) 923 + if (!pgtable_l5_enabled()) 924 924 return 0; 925 925 /* 926 926 * There is no need to do a workaround for the KNL stray
+1 -1
arch/x86/include/asm/pgtable_32_types.h
··· 15 15 # include <asm/pgtable-2level_types.h> 16 16 #endif 17 17 18 - #define pgtable_l5_enabled 0 18 + #define pgtable_l5_enabled() 0 19 19 20 20 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 21 21 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
+1 -1
arch/x86/include/asm/pgtable_64.h
··· 220 220 { 221 221 pgd_t pgd; 222 222 223 - if (pgtable_l5_enabled || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { 223 + if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { 224 224 *p4dp = p4d; 225 225 return; 226 226 }
+9 -5
arch/x86/include/asm/pgtable_64_types.h
··· 28 28 * cpu_feature_enabled() is not available in early boot code. 29 29 * Use variable instead. 30 30 */ 31 - #define pgtable_l5_enabled __pgtable_l5_enabled 31 + static inline bool pgtable_l5_enabled(void) 32 + { 33 + return __pgtable_l5_enabled; 34 + } 32 35 #else 33 - #define pgtable_l5_enabled cpu_feature_enabled(X86_FEATURE_LA57) 36 + #define pgtable_l5_enabled() cpu_feature_enabled(X86_FEATURE_LA57) 34 37 #endif /* USE_EARLY_PGTABLE_L5 */ 38 + 35 39 #else 36 - #define pgtable_l5_enabled 0 40 + #define pgtable_l5_enabled() 0 37 41 #endif /* CONFIG_X86_5LEVEL */ 38 42 39 43 extern unsigned int pgdir_shift; ··· 113 109 114 110 #define LDT_PGD_ENTRY_L4 -3UL 115 111 #define LDT_PGD_ENTRY_L5 -112UL 116 - #define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) 112 + #define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) 117 113 #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) 118 114 119 115 #define __VMALLOC_BASE_L4 0xffffc90000000000UL ··· 127 123 128 124 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 129 125 # define VMALLOC_START vmalloc_base 130 - # define VMALLOC_SIZE_TB (pgtable_l5_enabled ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4) 126 + # define VMALLOC_SIZE_TB (pgtable_l5_enabled() ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4) 131 127 # define VMEMMAP_START vmemmap_base 132 128 #else 133 129 # define VMALLOC_START __VMALLOC_BASE_L4
+2 -2
arch/x86/include/asm/sparsemem.h
··· 27 27 # endif 28 28 #else /* CONFIG_X86_32 */ 29 29 # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ 30 - # define MAX_PHYSADDR_BITS (pgtable_l5_enabled ? 52 : 44) 31 - # define MAX_PHYSMEM_BITS (pgtable_l5_enabled ? 52 : 46) 30 + # define MAX_PHYSADDR_BITS (pgtable_l5_enabled() ? 52 : 44) 31 + # define MAX_PHYSMEM_BITS (pgtable_l5_enabled() ? 52 : 46) 32 32 #endif 33 33 34 34 #endif /* CONFIG_SPARSEMEM */
+1 -1
arch/x86/kernel/head64.c
··· 279 279 * critical -- __PAGE_OFFSET would point us back into the dynamic 280 280 * range and we might end up looping forever... 281 281 */ 282 - if (!pgtable_l5_enabled) 282 + if (!pgtable_l5_enabled()) 283 283 p4d_p = pgd_p; 284 284 else if (pgd) 285 285 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
+2 -1
arch/x86/kernel/machine_kexec_64.c
··· 354 354 { 355 355 VMCOREINFO_NUMBER(phys_base); 356 356 VMCOREINFO_SYMBOL(init_top_pgt); 357 - VMCOREINFO_NUMBER(pgtable_l5_enabled); 357 + vmcoreinfo_append_str("NUMBER(pgtable_l5_enabled)=%d\n", 358 + pgtable_l5_enabled()); 358 359 359 360 #ifdef CONFIG_NUMA 360 361 VMCOREINFO_SYMBOL(node_data);
+3 -3
arch/x86/mm/dump_pagetables.c
··· 360 360 void *pt) 361 361 { 362 362 if (__pa(pt) == __pa(kasan_zero_pmd) || 363 - (pgtable_l5_enabled && __pa(pt) == __pa(kasan_zero_p4d)) || 363 + (pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) || 364 364 __pa(pt) == __pa(kasan_zero_pud)) { 365 365 pgprotval_t prot = pte_flags(kasan_zero_pte[0]); 366 366 note_page(m, st, __pgprot(prot), 0, 5); ··· 476 476 } 477 477 } 478 478 479 - #define pgd_large(a) (pgtable_l5_enabled ? pgd_large(a) : p4d_large(__p4d(pgd_val(a)))) 480 - #define pgd_none(a) (pgtable_l5_enabled ? pgd_none(a) : p4d_none(__p4d(pgd_val(a)))) 479 + #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a)))) 480 + #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a)))) 481 481 482 482 static inline bool is_hypervisor_range(int idx) 483 483 {
+2 -2
arch/x86/mm/fault.c
··· 439 439 if (pgd_none(*pgd_k)) 440 440 return -1; 441 441 442 - if (pgtable_l5_enabled) { 442 + if (pgtable_l5_enabled()) { 443 443 if (pgd_none(*pgd)) { 444 444 set_pgd(pgd, *pgd_k); 445 445 arch_flush_lazy_mmu_mode(); ··· 454 454 if (p4d_none(*p4d_k)) 455 455 return -1; 456 456 457 - if (p4d_none(*p4d) && !pgtable_l5_enabled) { 457 + if (p4d_none(*p4d) && !pgtable_l5_enabled()) { 458 458 set_p4d(p4d, *p4d_k); 459 459 arch_flush_lazy_mmu_mode(); 460 460 } else {
+1 -1
arch/x86/mm/ident_map.c
··· 123 123 result = ident_p4d_init(info, p4d, addr, next); 124 124 if (result) 125 125 return result; 126 - if (pgtable_l5_enabled) { 126 + if (pgtable_l5_enabled()) { 127 127 set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag)); 128 128 } else { 129 129 /*
+4 -4
arch/x86/mm/init_64.c
··· 180 180 */ 181 181 void sync_global_pgds(unsigned long start, unsigned long end) 182 182 { 183 - if (pgtable_l5_enabled) 183 + if (pgtable_l5_enabled()) 184 184 sync_global_pgds_l5(start, end); 185 185 else 186 186 sync_global_pgds_l4(start, end); ··· 643 643 unsigned long vaddr = (unsigned long)__va(paddr); 644 644 int i = p4d_index(vaddr); 645 645 646 - if (!pgtable_l5_enabled) 646 + if (!pgtable_l5_enabled()) 647 647 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask); 648 648 649 649 for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) { ··· 723 723 page_size_mask); 724 724 725 725 spin_lock(&init_mm.page_table_lock); 726 - if (pgtable_l5_enabled) 726 + if (pgtable_l5_enabled()) 727 727 pgd_populate(&init_mm, pgd, p4d); 728 728 else 729 729 p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d); ··· 1100 1100 * 5-level case we should free them. This code will have to change 1101 1101 * to adapt for boot-time switching between 4 and 5 level page tables. 1102 1102 */ 1103 - if (pgtable_l5_enabled) 1103 + if (pgtable_l5_enabled()) 1104 1104 free_pud_table(pud_base, p4d); 1105 1105 } 1106 1106
+4 -4
arch/x86/mm/kasan_init_64.c
··· 180 180 * With folded p4d, pgd_clear() is nop, use p4d_clear() 181 181 * instead. 182 182 */ 183 - if (pgtable_l5_enabled) 183 + if (pgtable_l5_enabled()) 184 184 pgd_clear(pgd); 185 185 else 186 186 p4d_clear(p4d_offset(pgd, start)); ··· 195 195 { 196 196 unsigned long p4d; 197 197 198 - if (!pgtable_l5_enabled) 198 + if (!pgtable_l5_enabled()) 199 199 return (p4d_t *)pgd; 200 200 201 201 p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; ··· 282 282 for (i = 0; i < PTRS_PER_PUD; i++) 283 283 kasan_zero_pud[i] = __pud(pud_val); 284 284 285 - for (i = 0; pgtable_l5_enabled && i < PTRS_PER_P4D; i++) 285 + for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++) 286 286 kasan_zero_p4d[i] = __p4d(p4d_val); 287 287 288 288 kasan_map_early_shadow(early_top_pgt); ··· 313 313 * bunch of things like kernel code, modules, EFI mapping, etc. 314 314 * We need to take extra steps to not overwrite them. 315 315 */ 316 - if (pgtable_l5_enabled) { 316 + if (pgtable_l5_enabled()) { 317 317 void *ptr; 318 318 319 319 ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
+4 -4
arch/x86/mm/kaslr.c
··· 78 78 struct rnd_state rand_state; 79 79 unsigned long remain_entropy; 80 80 81 - vaddr_start = pgtable_l5_enabled ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; 81 + vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; 82 82 vaddr = vaddr_start; 83 83 84 84 /* ··· 124 124 */ 125 125 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i); 126 126 prandom_bytes_state(&rand_state, &rand, sizeof(rand)); 127 - if (pgtable_l5_enabled) 127 + if (pgtable_l5_enabled()) 128 128 entropy = (rand % (entropy + 1)) & P4D_MASK; 129 129 else 130 130 entropy = (rand % (entropy + 1)) & PUD_MASK; ··· 136 136 * randomization alignment. 137 137 */ 138 138 vaddr += get_padding(&kaslr_regions[i]); 139 - if (pgtable_l5_enabled) 139 + if (pgtable_l5_enabled()) 140 140 vaddr = round_up(vaddr + 1, P4D_SIZE); 141 141 else 142 142 vaddr = round_up(vaddr + 1, PUD_SIZE); ··· 212 212 return; 213 213 } 214 214 215 - if (pgtable_l5_enabled) 215 + if (pgtable_l5_enabled()) 216 216 init_trampoline_p4d(); 217 217 else 218 218 init_trampoline_pud();
+1 -1
arch/x86/mm/tlb.c
··· 157 157 unsigned long sp = current_stack_pointer; 158 158 pgd_t *pgd = pgd_offset(mm, sp); 159 159 160 - if (pgtable_l5_enabled) { 160 + if (pgtable_l5_enabled()) { 161 161 if (unlikely(pgd_none(*pgd))) { 162 162 pgd_t *pgd_ref = pgd_offset_k(sp); 163 163
+1 -1
arch/x86/platform/efi/efi_64.c
··· 225 225 226 226 pud = pud_alloc(&init_mm, p4d, EFI_VA_END); 227 227 if (!pud) { 228 - if (pgtable_l5_enabled) 228 + if (pgtable_l5_enabled()) 229 229 free_page((unsigned long) pgd_page_vaddr(*pgd)); 230 230 free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER); 231 231 return -ENOMEM;
+1 -1
arch/x86/power/hibernate_64.c
··· 72 72 * tables used by the image kernel. 73 73 */ 74 74 75 - if (pgtable_l5_enabled) { 75 + if (pgtable_l5_enabled()) { 76 76 p4d = (p4d_t *)get_safe_page(GFP_ATOMIC); 77 77 if (!p4d) 78 78 return -ENOMEM;