Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/mm: Make virtual memory layout dynamic for CONFIG_X86_5LEVEL=y

We need to be able to adjust virtual memory layout at runtime to be able
to switch between 4- and 5-level paging at boot-time.

KASLR already has movable __VMALLOC_BASE, __VMEMMAP_BASE and __PAGE_OFFSET.
Let's re-use it.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20180214111656.88514-4-kirill.shutemov@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Kirill A. Shutemov and committed by
Ingo Molnar
eedb92ab 02390b87

+25 -16
+8
arch/x86/Kconfig
··· 1430 1430 1431 1431 config X86_5LEVEL 1432 1432 bool "Enable 5-level page tables support" 1433 + select DYNAMIC_MEMORY_LAYOUT 1433 1434 depends on X86_64 1434 1435 ---help--- 1435 1436 5-level paging enables access to larger address space: ··· 2144 2143 2145 2144 Don't change this unless you know what you are doing. 2146 2145 2146 + config DYNAMIC_MEMORY_LAYOUT 2147 + bool 2148 + ---help--- 2149 + This option makes base addresses of vmalloc and vmemmap as well as 2150 + __PAGE_OFFSET movable during boot. 2151 + 2147 2152 config RANDOMIZE_MEMORY 2148 2153 bool "Randomize the kernel memory sections" 2149 2154 depends on X86_64 2150 2155 depends on RANDOMIZE_BASE 2156 + select DYNAMIC_MEMORY_LAYOUT 2151 2157 default RANDOMIZE_BASE 2152 2158 ---help--- 2153 2159 Randomizes the base virtual address of kernel memory sections
-4
arch/x86/include/asm/kaslr.h
··· 5 5 unsigned long kaslr_get_random_long(const char *purpose); 6 6 7 7 #ifdef CONFIG_RANDOMIZE_MEMORY 8 - extern unsigned long page_offset_base; 9 - extern unsigned long vmalloc_base; 10 - extern unsigned long vmemmap_base; 11 - 12 8 void kernel_randomize_memory(void); 13 9 #else 14 10 static inline void kernel_randomize_memory(void) { }
+4
arch/x86/include/asm/page_64.h
··· 11 11 extern unsigned long max_pfn; 12 12 extern unsigned long phys_base; 13 13 14 + extern unsigned long page_offset_base; 15 + extern unsigned long vmalloc_base; 16 + extern unsigned long vmemmap_base; 17 + 14 18 static inline unsigned long __phys_addr_nodebug(unsigned long x) 15 19 { 16 20 unsigned long y = x - __START_KERNEL_map;
+2 -2
arch/x86/include/asm/page_64_types.h
··· 43 43 #define __PAGE_OFFSET_BASE _AC(0xffff880000000000, UL) 44 44 #endif 45 45 46 - #ifdef CONFIG_RANDOMIZE_MEMORY 46 + #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 47 47 #define __PAGE_OFFSET page_offset_base 48 48 #else 49 49 #define __PAGE_OFFSET __PAGE_OFFSET_BASE 50 - #endif /* CONFIG_RANDOMIZE_MEMORY */ 50 + #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ 51 51 52 52 #define __START_KERNEL_map _AC(0xffffffff80000000, UL) 53 53
+2 -2
arch/x86/include/asm/pgtable_64_types.h
··· 100 100 # define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) 101 101 #endif 102 102 103 - #ifdef CONFIG_RANDOMIZE_MEMORY 103 + #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 104 104 # define VMALLOC_START vmalloc_base 105 105 # define VMEMMAP_START vmemmap_base 106 106 #else 107 107 # define VMALLOC_START __VMALLOC_BASE 108 108 # define VMEMMAP_START __VMEMMAP_BASE 109 - #endif /* CONFIG_RANDOMIZE_MEMORY */ 109 + #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ 110 110 111 111 #define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) 112 112
+9
arch/x86/kernel/head64.c
··· 39 39 static unsigned int __initdata next_early_pgt; 40 40 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); 41 41 42 + #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 43 + unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE; 44 + EXPORT_SYMBOL(page_offset_base); 45 + unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE; 46 + EXPORT_SYMBOL(vmalloc_base); 47 + unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE; 48 + EXPORT_SYMBOL(vmemmap_base); 49 + #endif 50 + 42 51 #define __head __section(.head.text) 43 52 44 53 static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
-8
arch/x86/mm/kaslr.c
··· 43 43 static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; 44 44 static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; 45 45 46 - /* Default values */ 47 - unsigned long page_offset_base = __PAGE_OFFSET_BASE; 48 - EXPORT_SYMBOL(page_offset_base); 49 - unsigned long vmalloc_base = __VMALLOC_BASE; 50 - EXPORT_SYMBOL(vmalloc_base); 51 - unsigned long vmemmap_base = __VMEMMAP_BASE; 52 - EXPORT_SYMBOL(vmemmap_base); 53 - 54 46 /* 55 47 * Memory regions randomized by KASLR (except modules that use a separate logic 56 48 * earlier during boot). The list is ordered based on virtual addresses. This