Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/mm/KASLR: Propagate KASLR status to kernel proper

Commit:

e2b32e678513 ("x86, kaslr: randomize module base load address")

made module base address randomization unconditional and didn't regard
disabled KKASLR due to CONFIG_HIBERNATION and command line option
"nokaslr". For more info see (now reverted) commit:

f47233c2d34f ("x86/mm/ASLR: Propagate base load address calculation")

In order to propagate KASLR status to kernel proper, we need a single bit
in boot_params.hdr.loadflags and we've chosen bit 1 thus leaving the
top-down allocated bits for bits supposed to be used by the bootloader.

Originally-From: Jiri Kosina <jkosina@suse.cz>
Suggested-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Borislav Petkov and committed by
Ingo Molnar
78cac48c 47091e3c

+35 -17
+6
Documentation/x86/boot.txt
··· 406 406 - If 0, the protected-mode code is loaded at 0x10000. 407 407 - If 1, the protected-mode code is loaded at 0x100000. 408 408 409 + Bit 1 (kernel internal): ALSR_FLAG 410 + - Used internally by the compressed kernel to communicate 411 + KASLR status to kernel proper. 412 + If 1, KASLR enabled. 413 + If 0, KASLR disabled. 414 + 409 415 Bit 5 (write): QUIET_FLAG 410 416 - If 0, print early messages. 411 417 - If 1, suppress early messages.
+4 -1
arch/x86/boot/compressed/aslr.c
··· 295 295 return slots_fetch_random(); 296 296 } 297 297 298 - unsigned char *choose_kernel_location(unsigned char *input, 298 + unsigned char *choose_kernel_location(struct boot_params *boot_params, 299 + unsigned char *input, 299 300 unsigned long input_size, 300 301 unsigned char *output, 301 302 unsigned long output_size) ··· 315 314 goto out; 316 315 } 317 316 #endif 317 + 318 + boot_params->hdr.loadflags |= KASLR_FLAG; 318 319 319 320 /* Record the various known unsafe memory ranges. */ 320 321 mem_avoid_init((unsigned long)input, input_size,
+4 -1
arch/x86/boot/compressed/misc.c
··· 377 377 378 378 real_mode = rmode; 379 379 380 + /* Clear it for solely in-kernel use */ 381 + real_mode->hdr.loadflags &= ~KASLR_FLAG; 382 + 380 383 sanitize_boot_params(real_mode); 381 384 382 385 if (real_mode->screen_info.orig_video_mode == 7) { ··· 404 401 * the entire decompressed kernel plus relocation table, or the 405 402 * entire decompressed kernel plus .bss and .brk sections. 406 403 */ 407 - output = choose_kernel_location(input_data, input_len, output, 404 + output = choose_kernel_location(real_mode, input_data, input_len, output, 408 405 output_len > run_size ? output_len 409 406 : run_size); 410 407
+4 -2
arch/x86/boot/compressed/misc.h
··· 57 57 58 58 #if CONFIG_RANDOMIZE_BASE 59 59 /* aslr.c */ 60 - unsigned char *choose_kernel_location(unsigned char *input, 60 + unsigned char *choose_kernel_location(struct boot_params *boot_params, 61 + unsigned char *input, 61 62 unsigned long input_size, 62 63 unsigned char *output, 63 64 unsigned long output_size); ··· 66 65 bool has_cpuflag(int flag); 67 66 #else 68 67 static inline 69 - unsigned char *choose_kernel_location(unsigned char *input, 68 + unsigned char *choose_kernel_location(struct boot_params *boot_params, 69 + unsigned char *input, 70 70 unsigned long input_size, 71 71 unsigned char *output, 72 72 unsigned long output_size)
+5
arch/x86/include/asm/setup.h
··· 66 66 */ 67 67 extern struct boot_params boot_params; 68 68 69 + static inline bool kaslr_enabled(void) 70 + { 71 + return !!(boot_params.hdr.loadflags & KASLR_FLAG); 72 + } 73 + 69 74 /* 70 75 * Do NOT EVER look at the BIOS memory size location. 71 76 * It does not work on many machines.
+1
arch/x86/include/uapi/asm/bootparam.h
··· 15 15 16 16 /* loadflags */ 17 17 #define LOADED_HIGH (1<<0) 18 + #define KASLR_FLAG (1<<1) 18 19 #define QUIET_FLAG (1<<5) 19 20 #define KEEP_SEGMENTS (1<<6) 20 21 #define CAN_USE_HEAP (1<<7)
+2 -9
arch/x86/kernel/module.c
··· 33 33 34 34 #include <asm/page.h> 35 35 #include <asm/pgtable.h> 36 + #include <asm/setup.h> 36 37 37 38 #if 0 38 39 #define DEBUGP(fmt, ...) \ ··· 48 47 49 48 #ifdef CONFIG_RANDOMIZE_BASE 50 49 static unsigned long module_load_offset; 51 - static int randomize_modules = 1; 52 50 53 51 /* Mutex protects the module_load_offset. */ 54 52 static DEFINE_MUTEX(module_kaslr_mutex); 55 53 56 - static int __init parse_nokaslr(char *p) 57 - { 58 - randomize_modules = 0; 59 - return 0; 60 - } 61 - early_param("nokaslr", parse_nokaslr); 62 - 63 54 static unsigned long int get_module_load_offset(void) 64 55 { 65 - if (randomize_modules) { 56 + if (kaslr_enabled()) { 66 57 mutex_lock(&module_kaslr_mutex); 67 58 /* 68 59 * Calculate the module_load_offset the first time this
+9 -4
arch/x86/kernel/setup.c
··· 832 832 static int 833 833 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) 834 834 { 835 - pr_emerg("Kernel Offset: 0x%lx from 0x%lx " 836 - "(relocation range: 0x%lx-0x%lx)\n", 837 - (unsigned long)&_text - __START_KERNEL, __START_KERNEL, 838 - __START_KERNEL_map, MODULES_VADDR-1); 835 + if (kaslr_enabled()) { 836 + pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", 837 + (unsigned long)&_text - __START_KERNEL, 838 + __START_KERNEL, 839 + __START_KERNEL_map, 840 + MODULES_VADDR-1); 841 + } else { 842 + pr_emerg("Kernel Offset: disabled\n"); 843 + } 839 844 840 845 return 0; 841 846 }