Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/mm: simplify init_trampoline() and surrounding logic

There are three cases for the trampoline initialization:
* 32-bit does nothing
* 64-bit with kaslr disabled simply copies a PGD entry from the direct map
to the trampoline PGD
* 64-bit with kaslr enabled maps the real mode trampoline at PUD level

These cases are currently differentiated by a bunch of ifdefs inside
asm/include/pgtable.h and the case of 64-bits with kaslr on uses
pgd_index() helper.

Replacing the ifdefs with a static function in arch/x86/mm/init.c gives
clearer code and allows moving pgd_index() to the generic implementation
in include/linux/pgtable.h

[rppt@linux.ibm.com: take CONFIG_RANDOMIZE_MEMORY into account in kaslr_enabled()]
Link: http://lkml.kernel.org/r/20200525104045.GB13212@linux.ibm.com

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200514170327.31389-8-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mike Rapoport and committed by
Linus Torvalds
88107d33 1bcdc68d

+37 -47
+2
arch/x86/include/asm/kaslr.h
··· 6 6 7 7 #ifdef CONFIG_RANDOMIZE_MEMORY 8 8 void kernel_randomize_memory(void); 9 + void init_trampoline_kaslr(void); 9 10 #else 10 11 static inline void kernel_randomize_memory(void) { } 12 + static inline void init_trampoline_kaslr(void) {} 11 13 #endif /* CONFIG_RANDOMIZE_MEMORY */ 12 14 13 15 #endif
+1 -14
arch/x86/include/asm/pgtable.h
··· 1071 1071 void early_alloc_pgt_buf(void); 1072 1072 extern void memblock_find_dma_reserve(void); 1073 1073 1074 + 1074 1075 #ifdef CONFIG_X86_64 1075 - /* Realmode trampoline initialization. */ 1076 1076 extern pgd_t trampoline_pgd_entry; 1077 - static inline void __meminit init_trampoline_default(void) 1078 - { 1079 - /* Default trampoline pgd value */ 1080 - trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; 1081 - } 1082 1077 1083 1078 void __init poking_init(void); 1084 1079 1085 1080 unsigned long init_memory_mapping(unsigned long start, 1086 1081 unsigned long end, pgprot_t prot); 1087 - 1088 - # ifdef CONFIG_RANDOMIZE_MEMORY 1089 - void __meminit init_trampoline(void); 1090 - # else 1091 - # define init_trampoline init_trampoline_default 1092 - # endif 1093 - #else 1094 - static inline void init_trampoline(void) { } 1095 1082 #endif 1096 1083 1097 1084 /* local pte updates need not use xchg for locking */
+11 -1
arch/x86/include/asm/setup.h
··· 75 75 76 76 static inline bool kaslr_enabled(void) 77 77 { 78 - return !!(boot_params.hdr.loadflags & KASLR_FLAG); 78 + return IS_ENABLED(CONFIG_RANDOMIZE_MEMORY) && 79 + !!(boot_params.hdr.loadflags & KASLR_FLAG); 80 + } 81 + 82 + /* 83 + * Apply no randomization if KASLR was disabled at boot or if KASAN 84 + * is enabled. KASAN shadow mappings rely on regions being PGD aligned. 85 + */ 86 + static inline bool kaslr_memory_enabled(void) 87 + { 88 + return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN); 79 89 } 80 90 81 91 static inline unsigned long kaslr_offset(void)
+22
arch/x86/mm/init.c
··· 680 680 } 681 681 } 682 682 683 + /* 684 + * The real mode trampoline, which is required for bootstrapping CPUs 685 + * occupies only a small area under the low 1MB. See reserve_real_mode() 686 + * for details. 687 + * 688 + * If KASLR is disabled the first PGD entry of the direct mapping is copied 689 + * to map the real mode trampoline. 690 + * 691 + * If KASLR is enabled, copy only the PUD which covers the low 1MB 692 + * area. This limits the randomization granularity to 1GB for both 4-level 693 + * and 5-level paging. 694 + */ 695 + static void __init init_trampoline(void) 696 + { 697 + #ifdef CONFIG_X86_64 698 + if (!kaslr_memory_enabled()) 699 + trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; 700 + else 701 + init_trampoline_kaslr(); 702 + #endif 703 + } 704 + 683 705 void __init init_mem_mapping(void) 684 706 { 685 707 unsigned long end;
+1 -32
arch/x86/mm/kaslr.c
··· 61 61 return (region->size_tb << TB_SHIFT); 62 62 } 63 63 64 - /* 65 - * Apply no randomization if KASLR was disabled at boot or if KASAN 66 - * is enabled. KASAN shadow mappings rely on regions being PGD aligned. 67 - */ 68 - static inline bool kaslr_memory_enabled(void) 69 - { 70 - return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN); 71 - } 72 - 73 64 /* Initialize base and padding for each memory region randomized with KASLR */ 74 65 void __init kernel_randomize_memory(void) 75 66 { ··· 139 148 } 140 149 } 141 150 142 - static void __meminit init_trampoline_pud(void) 151 + void __meminit init_trampoline_kaslr(void) 143 152 { 144 153 pud_t *pud_page_tramp, *pud, *pud_tramp; 145 154 p4d_t *p4d_page_tramp, *p4d, *p4d_tramp; ··· 179 188 set_pgd(&trampoline_pgd_entry, 180 189 __pgd(_KERNPG_TABLE | __pa(pud_page_tramp))); 181 190 } 182 - } 183 - 184 - /* 185 - * The real mode trampoline, which is required for bootstrapping CPUs 186 - * occupies only a small area under the low 1MB. See reserve_real_mode() 187 - * for details. 188 - * 189 - * If KASLR is disabled the first PGD entry of the direct mapping is copied 190 - * to map the real mode trampoline. 191 - * 192 - * If KASLR is enabled, copy only the PUD which covers the low 1MB 193 - * area. This limits the randomization granularity to 1GB for both 4-level 194 - * and 5-level paging. 195 - */ 196 - void __meminit init_trampoline(void) 197 - { 198 - if (!kaslr_memory_enabled()) { 199 - init_trampoline_default(); 200 - return; 201 - } 202 - 203 - init_trampoline_pud(); 204 191 }