Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull misc x86 fixes from Ingo Molnar:
"This contains:

- EFI fixes
- a boot printout fix
- ASLR/kASLR fixes
- intel microcode driver fixes
- other misc fixes

Most of the linecount comes from an EFI revert"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm/ASLR: Avoid PAGE_SIZE redefinition for UML subarch
x86/microcode/intel: Handle truncated microcode images more robustly
x86/microcode/intel: Guard against stack overflow in the loader
x86, mm/ASLR: Fix stack randomization on 64-bit systems
x86/mm/init: Fix incorrect page size in init_memory_mapping() printks
x86/mm/ASLR: Propagate base load address calculation
Documentation/x86: Fix path in zero-page.txt
x86/apic: Fix the devicetree build in certain configs
Revert "efi/libstub: Call get_memory_map() to obtain map and desc sizes"
x86/efi: Avoid triple faults during EFI mixed mode calls

+419 -239
+1 -1
Documentation/x86/zero-page.txt
··· 3 3 real-mode setup code of the kernel. References/settings to it mainly 4 4 are in: 5 5 6 - arch/x86/include/asm/bootparam.h 6 + arch/x86/include/uapi/asm/bootparam.h 7 7 8 8 9 9 Offset Proto Name Meaning
+1
arch/x86/boot/compressed/Makefile
··· 51 51 52 52 vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ 53 53 $(objtree)/drivers/firmware/efi/libstub/lib.a 54 + vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o 54 55 55 56 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE 56 57 $(call if_changed,ld)
+33 -1
arch/x86/boot/compressed/aslr.c
··· 14 14 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" 15 15 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; 16 16 17 + struct kaslr_setup_data { 18 + __u64 next; 19 + __u32 type; 20 + __u32 len; 21 + __u8 data[1]; 22 + } kaslr_setup_data; 23 + 17 24 #define I8254_PORT_CONTROL 0x43 18 25 #define I8254_PORT_COUNTER0 0x40 19 26 #define I8254_CMD_READBACK 0xC0 ··· 302 295 return slots_fetch_random(); 303 296 } 304 297 305 - unsigned char *choose_kernel_location(unsigned char *input, 298 + static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled) 299 + { 300 + struct setup_data *data; 301 + 302 + kaslr_setup_data.type = SETUP_KASLR; 303 + kaslr_setup_data.len = 1; 304 + kaslr_setup_data.next = 0; 305 + kaslr_setup_data.data[0] = enabled; 306 + 307 + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; 308 + 309 + while (data && data->next) 310 + data = (struct setup_data *)(unsigned long)data->next; 311 + 312 + if (data) 313 + data->next = (unsigned long)&kaslr_setup_data; 314 + else 315 + params->hdr.setup_data = (unsigned long)&kaslr_setup_data; 316 + 317 + } 318 + 319 + unsigned char *choose_kernel_location(struct boot_params *params, 320 + unsigned char *input, 306 321 unsigned long input_size, 307 322 unsigned char *output, 308 323 unsigned long output_size) ··· 335 306 #ifdef CONFIG_HIBERNATION 336 307 if (!cmdline_find_option_bool("kaslr")) { 337 308 debug_putstr("KASLR disabled by default...\n"); 309 + add_kaslr_setup_data(params, 0); 338 310 goto out; 339 311 } 340 312 #else 341 313 if (cmdline_find_option_bool("nokaslr")) { 342 314 debug_putstr("KASLR disabled by cmdline...\n"); 315 + add_kaslr_setup_data(params, 0); 343 316 goto out; 344 317 } 345 318 #endif 319 + add_kaslr_setup_data(params, 1); 346 320 347 321 /* Record the various known unsafe memory ranges. */ 348 322 mem_avoid_init((unsigned long)input, input_size,
-25
arch/x86/boot/compressed/efi_stub_64.S
··· 3 3 #include <asm/processor-flags.h> 4 4 5 5 #include "../../platform/efi/efi_stub_64.S" 6 - 7 - #ifdef CONFIG_EFI_MIXED 8 - .code64 9 - .text 10 - ENTRY(efi64_thunk) 11 - push %rbp 12 - push %rbx 13 - 14 - subq $16, %rsp 15 - leaq efi_exit32(%rip), %rax 16 - movl %eax, 8(%rsp) 17 - leaq efi_gdt64(%rip), %rax 18 - movl %eax, 4(%rsp) 19 - movl %eax, 2(%rax) /* Fixup the gdt base address */ 20 - leaq efi32_boot_gdt(%rip), %rax 21 - movl %eax, (%rsp) 22 - 23 - call __efi64_thunk 24 - 25 - addq $16, %rsp 26 - pop %rbx 27 - pop %rbp 28 - ret 29 - ENDPROC(efi64_thunk) 30 - #endif /* CONFIG_EFI_MIXED */
+196
arch/x86/boot/compressed/efi_thunk_64.S
··· 1 + /* 2 + * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming 3 + * 4 + * Early support for invoking 32-bit EFI services from a 64-bit kernel. 5 + * 6 + * Because this thunking occurs before ExitBootServices() we have to 7 + * restore the firmware's 32-bit GDT before we make EFI serivce calls, 8 + * since the firmware's 32-bit IDT is still currently installed and it 9 + * needs to be able to service interrupts. 10 + * 11 + * On the plus side, we don't have to worry about mangling 64-bit 12 + * addresses into 32-bits because we're executing with an identify 13 + * mapped pagetable and haven't transitioned to 64-bit virtual addresses 14 + * yet. 15 + */ 16 + 17 + #include <linux/linkage.h> 18 + #include <asm/msr.h> 19 + #include <asm/page_types.h> 20 + #include <asm/processor-flags.h> 21 + #include <asm/segment.h> 22 + 23 + .code64 24 + .text 25 + ENTRY(efi64_thunk) 26 + push %rbp 27 + push %rbx 28 + 29 + subq $8, %rsp 30 + leaq efi_exit32(%rip), %rax 31 + movl %eax, 4(%rsp) 32 + leaq efi_gdt64(%rip), %rax 33 + movl %eax, (%rsp) 34 + movl %eax, 2(%rax) /* Fixup the gdt base address */ 35 + 36 + movl %ds, %eax 37 + push %rax 38 + movl %es, %eax 39 + push %rax 40 + movl %ss, %eax 41 + push %rax 42 + 43 + /* 44 + * Convert x86-64 ABI params to i386 ABI 45 + */ 46 + subq $32, %rsp 47 + movl %esi, 0x0(%rsp) 48 + movl %edx, 0x4(%rsp) 49 + movl %ecx, 0x8(%rsp) 50 + movq %r8, %rsi 51 + movl %esi, 0xc(%rsp) 52 + movq %r9, %rsi 53 + movl %esi, 0x10(%rsp) 54 + 55 + sgdt save_gdt(%rip) 56 + 57 + leaq 1f(%rip), %rbx 58 + movq %rbx, func_rt_ptr(%rip) 59 + 60 + /* 61 + * Switch to gdt with 32-bit segments. This is the firmware GDT 62 + * that was installed when the kernel started executing. This 63 + * pointer was saved at the EFI stub entry point in head_64.S. 64 + */ 65 + leaq efi32_boot_gdt(%rip), %rax 66 + lgdt (%rax) 67 + 68 + pushq $__KERNEL_CS 69 + leaq efi_enter32(%rip), %rax 70 + pushq %rax 71 + lretq 72 + 73 + 1: addq $32, %rsp 74 + 75 + lgdt save_gdt(%rip) 76 + 77 + pop %rbx 78 + movl %ebx, %ss 79 + pop %rbx 80 + movl %ebx, %es 81 + pop %rbx 82 + movl %ebx, %ds 83 + 84 + /* 85 + * Convert 32-bit status code into 64-bit. 86 + */ 87 + test %rax, %rax 88 + jz 1f 89 + movl %eax, %ecx 90 + andl $0x0fffffff, %ecx 91 + andl $0xf0000000, %eax 92 + shl $32, %rax 93 + or %rcx, %rax 94 + 1: 95 + addq $8, %rsp 96 + pop %rbx 97 + pop %rbp 98 + ret 99 + ENDPROC(efi64_thunk) 100 + 101 + ENTRY(efi_exit32) 102 + movq func_rt_ptr(%rip), %rax 103 + push %rax 104 + mov %rdi, %rax 105 + ret 106 + ENDPROC(efi_exit32) 107 + 108 + .code32 109 + /* 110 + * EFI service pointer must be in %edi. 111 + * 112 + * The stack should represent the 32-bit calling convention. 113 + */ 114 + ENTRY(efi_enter32) 115 + movl $__KERNEL_DS, %eax 116 + movl %eax, %ds 117 + movl %eax, %es 118 + movl %eax, %ss 119 + 120 + /* Reload pgtables */ 121 + movl %cr3, %eax 122 + movl %eax, %cr3 123 + 124 + /* Disable paging */ 125 + movl %cr0, %eax 126 + btrl $X86_CR0_PG_BIT, %eax 127 + movl %eax, %cr0 128 + 129 + /* Disable long mode via EFER */ 130 + movl $MSR_EFER, %ecx 131 + rdmsr 132 + btrl $_EFER_LME, %eax 133 + wrmsr 134 + 135 + call *%edi 136 + 137 + /* We must preserve return value */ 138 + movl %eax, %edi 139 + 140 + /* 141 + * Some firmware will return with interrupts enabled. Be sure to 142 + * disable them before we switch GDTs. 143 + */ 144 + cli 145 + 146 + movl 56(%esp), %eax 147 + movl %eax, 2(%eax) 148 + lgdtl (%eax) 149 + 150 + movl %cr4, %eax 151 + btsl $(X86_CR4_PAE_BIT), %eax 152 + movl %eax, %cr4 153 + 154 + movl %cr3, %eax 155 + movl %eax, %cr3 156 + 157 + movl $MSR_EFER, %ecx 158 + rdmsr 159 + btsl $_EFER_LME, %eax 160 + wrmsr 161 + 162 + xorl %eax, %eax 163 + lldt %ax 164 + 165 + movl 60(%esp), %eax 166 + pushl $__KERNEL_CS 167 + pushl %eax 168 + 169 + /* Enable paging */ 170 + movl %cr0, %eax 171 + btsl $X86_CR0_PG_BIT, %eax 172 + movl %eax, %cr0 173 + lret 174 + ENDPROC(efi_enter32) 175 + 176 + .data 177 + .balign 8 178 + .global efi32_boot_gdt 179 + efi32_boot_gdt: .word 0 180 + .quad 0 181 + 182 + save_gdt: .word 0 183 + .quad 0 184 + func_rt_ptr: .quad 0 185 + 186 + .global efi_gdt64 187 + efi_gdt64: 188 + .word efi_gdt64_end - efi_gdt64 189 + .long 0 /* Filled out by user */ 190 + .word 0 191 + .quad 0x0000000000000000 /* NULL descriptor */ 192 + .quad 0x00af9a000000ffff /* __KERNEL_CS */ 193 + .quad 0x00cf92000000ffff /* __KERNEL_DS */ 194 + .quad 0x0080890000000000 /* TS descriptor */ 195 + .quad 0x0000000000000000 /* TS continued */ 196 + efi_gdt64_end:
+2 -1
arch/x86/boot/compressed/misc.c
··· 401 401 * the entire decompressed kernel plus relocation table, or the 402 402 * entire decompressed kernel plus .bss and .brk sections. 403 403 */ 404 - output = choose_kernel_location(input_data, input_len, output, 404 + output = choose_kernel_location(real_mode, input_data, input_len, 405 + output, 405 406 output_len > run_size ? output_len 406 407 : run_size); 407 408
+4 -2
arch/x86/boot/compressed/misc.h
··· 57 57 58 58 #if CONFIG_RANDOMIZE_BASE 59 59 /* aslr.c */ 60 - unsigned char *choose_kernel_location(unsigned char *input, 60 + unsigned char *choose_kernel_location(struct boot_params *params, 61 + unsigned char *input, 61 62 unsigned long input_size, 62 63 unsigned char *output, 63 64 unsigned long output_size); ··· 66 65 bool has_cpuflag(int flag); 67 66 #else 68 67 static inline 69 - unsigned char *choose_kernel_location(unsigned char *input, 68 + unsigned char *choose_kernel_location(struct boot_params *params, 69 + unsigned char *input, 70 70 unsigned long input_size, 71 71 unsigned char *output, 72 72 unsigned long output_size)
+8
arch/x86/include/asm/apic.h
··· 213 213 extern void setup_boot_APIC_clock(void); 214 214 extern void setup_secondary_APIC_clock(void); 215 215 extern int APIC_init_uniprocessor(void); 216 + 217 + #ifdef CONFIG_X86_64 218 + static inline int apic_force_enable(unsigned long addr) 219 + { 220 + return -1; 221 + } 222 + #else 216 223 extern int apic_force_enable(unsigned long addr); 224 + #endif 217 225 218 226 extern int apic_bsp_setup(bool upmode); 219 227 extern void apic_ap_setup(void);
+2
arch/x86/include/asm/page_types.h
··· 51 51 extern unsigned long max_low_pfn_mapped; 52 52 extern unsigned long max_pfn_mapped; 53 53 54 + extern bool kaslr_enabled; 55 + 54 56 static inline phys_addr_t get_max_mapped(void) 55 57 { 56 58 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
+1
arch/x86/include/uapi/asm/bootparam.h
··· 7 7 #define SETUP_DTB 2 8 8 #define SETUP_PCI 3 9 9 #define SETUP_EFI 4 10 + #define SETUP_KASLR 5 10 11 11 12 /* ram_size flags */ 12 13 #define RAMDISK_IMAGE_START_MASK 0x07FF
+5
arch/x86/kernel/cpu/microcode/intel.c
··· 196 196 struct microcode_header_intel mc_header; 197 197 unsigned int mc_size; 198 198 199 + if (leftover < sizeof(mc_header)) { 200 + pr_err("error! Truncated header in microcode data file\n"); 201 + break; 202 + } 203 + 199 204 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header))) 200 205 break; 201 206
+5 -1
arch/x86/kernel/cpu/microcode/intel_early.c
··· 321 321 unsigned int mc_saved_count = mc_saved_data->mc_saved_count; 322 322 int i; 323 323 324 - while (leftover) { 324 + while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) { 325 + 326 + if (leftover < sizeof(mc_header)) 327 + break; 328 + 325 329 mc_header = (struct microcode_header_intel *)ucode_ptr; 326 330 327 331 mc_size = get_totalsize(mc_header);
+1 -9
arch/x86/kernel/module.c
··· 47 47 48 48 #ifdef CONFIG_RANDOMIZE_BASE 49 49 static unsigned long module_load_offset; 50 - static int randomize_modules = 1; 51 50 52 51 /* Mutex protects the module_load_offset. */ 53 52 static DEFINE_MUTEX(module_kaslr_mutex); 54 53 55 - static int __init parse_nokaslr(char *p) 56 - { 57 - randomize_modules = 0; 58 - return 0; 59 - } 60 - early_param("nokaslr", parse_nokaslr); 61 - 62 54 static unsigned long int get_module_load_offset(void) 63 55 { 64 - if (randomize_modules) { 56 + if (kaslr_enabled) { 65 57 mutex_lock(&module_kaslr_mutex); 66 58 /* 67 59 * Calculate the module_load_offset the first time this
+18 -4
arch/x86/kernel/setup.c
··· 122 122 unsigned long max_low_pfn_mapped; 123 123 unsigned long max_pfn_mapped; 124 124 125 + bool __read_mostly kaslr_enabled = false; 126 + 125 127 #ifdef CONFIG_DMI 126 128 RESERVE_BRK(dmi_alloc, 65536); 127 129 #endif ··· 427 425 } 428 426 #endif /* CONFIG_BLK_DEV_INITRD */ 429 427 428 + static void __init parse_kaslr_setup(u64 pa_data, u32 data_len) 429 + { 430 + kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data)); 431 + } 432 + 430 433 static void __init parse_setup_data(void) 431 434 { 432 435 struct setup_data *data; ··· 456 449 break; 457 450 case SETUP_EFI: 458 451 parse_efi_setup(pa_data, data_len); 452 + break; 453 + case SETUP_KASLR: 454 + parse_kaslr_setup(pa_data, data_len); 459 455 break; 460 456 default: 461 457 break; ··· 842 832 static int 843 833 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) 844 834 { 845 - pr_emerg("Kernel Offset: 0x%lx from 0x%lx " 846 - "(relocation range: 0x%lx-0x%lx)\n", 847 - (unsigned long)&_text - __START_KERNEL, __START_KERNEL, 848 - __START_KERNEL_map, MODULES_VADDR-1); 835 + if (kaslr_enabled) 836 + pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", 837 + (unsigned long)&_text - __START_KERNEL, 838 + __START_KERNEL, 839 + __START_KERNEL_map, 840 + MODULES_VADDR-1); 841 + else 842 + pr_emerg("Kernel Offset: disabled\n"); 849 843 850 844 return 0; 851 845 }
+26 -2
arch/x86/mm/init.c
··· 238 238 } 239 239 } 240 240 241 + static const char *page_size_string(struct map_range *mr) 242 + { 243 + static const char str_1g[] = "1G"; 244 + static const char str_2m[] = "2M"; 245 + static const char str_4m[] = "4M"; 246 + static const char str_4k[] = "4k"; 247 + 248 + if (mr->page_size_mask & (1<<PG_LEVEL_1G)) 249 + return str_1g; 250 + /* 251 + * 32-bit without PAE has a 4M large page size. 252 + * PG_LEVEL_2M is misnamed, but we can at least 253 + * print out the right size in the string. 254 + */ 255 + if (IS_ENABLED(CONFIG_X86_32) && 256 + !IS_ENABLED(CONFIG_X86_PAE) && 257 + mr->page_size_mask & (1<<PG_LEVEL_2M)) 258 + return str_4m; 259 + 260 + if (mr->page_size_mask & (1<<PG_LEVEL_2M)) 261 + return str_2m; 262 + 263 + return str_4k; 264 + } 265 + 241 266 static int __meminit split_mem_range(struct map_range *mr, int nr_range, 242 267 unsigned long start, 243 268 unsigned long end) ··· 358 333 for (i = 0; i < nr_range; i++) 359 334 printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", 360 335 mr[i].start, mr[i].end - 1, 361 - (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( 362 - (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); 336 + page_size_string(&mr[i])); 363 337 364 338 return nr_range; 365 339 }
+3 -3
arch/x86/mm/mmap.c
··· 35 35 .flags = -1, 36 36 }; 37 37 38 - static unsigned int stack_maxrandom_size(void) 38 + static unsigned long stack_maxrandom_size(void) 39 39 { 40 - unsigned int max = 0; 40 + unsigned long max = 0; 41 41 if ((current->flags & PF_RANDOMIZE) && 42 42 !(current->personality & ADDR_NO_RANDOMIZE)) { 43 - max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT; 43 + max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT; 44 44 } 45 45 46 46 return max;
-161
arch/x86/platform/efi/efi_stub_64.S
··· 91 91 ret 92 92 ENDPROC(efi_call) 93 93 94 - #ifdef CONFIG_EFI_MIXED 95 - 96 - /* 97 - * We run this function from the 1:1 mapping. 98 - * 99 - * This function must be invoked with a 1:1 mapped stack. 100 - */ 101 - ENTRY(__efi64_thunk) 102 - movl %ds, %eax 103 - push %rax 104 - movl %es, %eax 105 - push %rax 106 - movl %ss, %eax 107 - push %rax 108 - 109 - subq $32, %rsp 110 - movl %esi, 0x0(%rsp) 111 - movl %edx, 0x4(%rsp) 112 - movl %ecx, 0x8(%rsp) 113 - movq %r8, %rsi 114 - movl %esi, 0xc(%rsp) 115 - movq %r9, %rsi 116 - movl %esi, 0x10(%rsp) 117 - 118 - sgdt save_gdt(%rip) 119 - 120 - leaq 1f(%rip), %rbx 121 - movq %rbx, func_rt_ptr(%rip) 122 - 123 - /* Switch to gdt with 32-bit segments */ 124 - movl 64(%rsp), %eax 125 - lgdt (%rax) 126 - 127 - leaq efi_enter32(%rip), %rax 128 - pushq $__KERNEL_CS 129 - pushq %rax 130 - lretq 131 - 132 - 1: addq $32, %rsp 133 - 134 - lgdt save_gdt(%rip) 135 - 136 - pop %rbx 137 - movl %ebx, %ss 138 - pop %rbx 139 - movl %ebx, %es 140 - pop %rbx 141 - movl %ebx, %ds 142 - 143 - /* 144 - * Convert 32-bit status code into 64-bit. 145 - */ 146 - test %rax, %rax 147 - jz 1f 148 - movl %eax, %ecx 149 - andl $0x0fffffff, %ecx 150 - andl $0xf0000000, %eax 151 - shl $32, %rax 152 - or %rcx, %rax 153 - 1: 154 - ret 155 - ENDPROC(__efi64_thunk) 156 - 157 - ENTRY(efi_exit32) 158 - movq func_rt_ptr(%rip), %rax 159 - push %rax 160 - mov %rdi, %rax 161 - ret 162 - ENDPROC(efi_exit32) 163 - 164 - .code32 165 - /* 166 - * EFI service pointer must be in %edi. 167 - * 168 - * The stack should represent the 32-bit calling convention. 169 - */ 170 - ENTRY(efi_enter32) 171 - movl $__KERNEL_DS, %eax 172 - movl %eax, %ds 173 - movl %eax, %es 174 - movl %eax, %ss 175 - 176 - /* Reload pgtables */ 177 - movl %cr3, %eax 178 - movl %eax, %cr3 179 - 180 - /* Disable paging */ 181 - movl %cr0, %eax 182 - btrl $X86_CR0_PG_BIT, %eax 183 - movl %eax, %cr0 184 - 185 - /* Disable long mode via EFER */ 186 - movl $MSR_EFER, %ecx 187 - rdmsr 188 - btrl $_EFER_LME, %eax 189 - wrmsr 190 - 191 - call *%edi 192 - 193 - /* We must preserve return value */ 194 - movl %eax, %edi 195 - 196 - /* 197 - * Some firmware will return with interrupts enabled. Be sure to 198 - * disable them before we switch GDTs. 199 - */ 200 - cli 201 - 202 - movl 68(%esp), %eax 203 - movl %eax, 2(%eax) 204 - lgdtl (%eax) 205 - 206 - movl %cr4, %eax 207 - btsl $(X86_CR4_PAE_BIT), %eax 208 - movl %eax, %cr4 209 - 210 - movl %cr3, %eax 211 - movl %eax, %cr3 212 - 213 - movl $MSR_EFER, %ecx 214 - rdmsr 215 - btsl $_EFER_LME, %eax 216 - wrmsr 217 - 218 - xorl %eax, %eax 219 - lldt %ax 220 - 221 - movl 72(%esp), %eax 222 - pushl $__KERNEL_CS 223 - pushl %eax 224 - 225 - /* Enable paging */ 226 - movl %cr0, %eax 227 - btsl $X86_CR0_PG_BIT, %eax 228 - movl %eax, %cr0 229 - lret 230 - ENDPROC(efi_enter32) 231 - 232 - .data 233 - .balign 8 234 - .global efi32_boot_gdt 235 - efi32_boot_gdt: .word 0 236 - .quad 0 237 - 238 - save_gdt: .word 0 239 - .quad 0 240 - func_rt_ptr: .quad 0 241 - 242 - .global efi_gdt64 243 - efi_gdt64: 244 - .word efi_gdt64_end - efi_gdt64 245 - .long 0 /* Filled out by user */ 246 - .word 0 247 - .quad 0x0000000000000000 /* NULL descriptor */ 248 - .quad 0x00af9a000000ffff /* __KERNEL_CS */ 249 - .quad 0x00cf92000000ffff /* __KERNEL_DS */ 250 - .quad 0x0080890000000000 /* TS descriptor */ 251 - .quad 0x0000000000000000 /* TS continued */ 252 - efi_gdt64_end: 253 - #endif /* CONFIG_EFI_MIXED */ 254 - 255 94 .data 256 95 ENTRY(efi_scratch) 257 96 .fill 3,8,0
+104 -17
arch/x86/platform/efi/efi_thunk_64.S
··· 1 1 /* 2 2 * Copyright (C) 2014 Intel Corporation; author Matt Fleming 3 + * 4 + * Support for invoking 32-bit EFI runtime services from a 64-bit 5 + * kernel. 6 + * 7 + * The below thunking functions are only used after ExitBootServices() 8 + * has been called. This simplifies things considerably as compared with 9 + * the early EFI thunking because we can leave all the kernel state 10 + * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime 11 + * services from __KERNEL32_CS. This means we can continue to service 12 + * interrupts across an EFI mixed mode call. 13 + * 14 + * We do however, need to handle the fact that we're running in a full 15 + * 64-bit virtual address space. Things like the stack and instruction 16 + * addresses need to be accessible by the 32-bit firmware, so we rely on 17 + * using the identity mappings in the EFI page table to access the stack 18 + * and kernel text (see efi_setup_page_tables()). 3 19 */ 4 20 5 21 #include <linux/linkage.h> 6 22 #include <asm/page_types.h> 23 + #include <asm/segment.h> 7 24 8 25 .text 9 26 .code64 ··· 50 33 leaq efi_exit32(%rip), %rbx 51 34 subq %rax, %rbx 52 35 movl %ebx, 8(%rsp) 53 - leaq efi_gdt64(%rip), %rbx 54 - subq %rax, %rbx 55 - movl %ebx, 2(%ebx) 56 - movl %ebx, 4(%rsp) 57 - leaq efi_gdt32(%rip), %rbx 58 - subq %rax, %rbx 59 - movl %ebx, 2(%ebx) 60 - movl %ebx, (%rsp) 61 36 62 37 leaq __efi64_thunk(%rip), %rbx 63 38 subq %rax, %rbx ··· 61 52 retq 62 53 ENDPROC(efi64_thunk) 63 54 64 - .data 65 - efi_gdt32: 66 - .word efi_gdt32_end - efi_gdt32 67 - .long 0 /* Filled out above */ 68 - .word 0 69 - .quad 0x0000000000000000 /* NULL descriptor */ 70 - .quad 0x00cf9a000000ffff /* __KERNEL_CS */ 71 - .quad 0x00cf93000000ffff /* __KERNEL_DS */ 72 - efi_gdt32_end: 55 + /* 56 + * We run this function from the 1:1 mapping. 57 + * 58 + * This function must be invoked with a 1:1 mapped stack. 59 + */ 60 + ENTRY(__efi64_thunk) 61 + movl %ds, %eax 62 + push %rax 63 + movl %es, %eax 64 + push %rax 65 + movl %ss, %eax 66 + push %rax 73 67 68 + subq $32, %rsp 69 + movl %esi, 0x0(%rsp) 70 + movl %edx, 0x4(%rsp) 71 + movl %ecx, 0x8(%rsp) 72 + movq %r8, %rsi 73 + movl %esi, 0xc(%rsp) 74 + movq %r9, %rsi 75 + movl %esi, 0x10(%rsp) 76 + 77 + leaq 1f(%rip), %rbx 78 + movq %rbx, func_rt_ptr(%rip) 79 + 80 + /* Switch to 32-bit descriptor */ 81 + pushq $__KERNEL32_CS 82 + leaq efi_enter32(%rip), %rax 83 + pushq %rax 84 + lretq 85 + 86 + 1: addq $32, %rsp 87 + 88 + pop %rbx 89 + movl %ebx, %ss 90 + pop %rbx 91 + movl %ebx, %es 92 + pop %rbx 93 + movl %ebx, %ds 94 + 95 + /* 96 + * Convert 32-bit status code into 64-bit. 97 + */ 98 + test %rax, %rax 99 + jz 1f 100 + movl %eax, %ecx 101 + andl $0x0fffffff, %ecx 102 + andl $0xf0000000, %eax 103 + shl $32, %rax 104 + or %rcx, %rax 105 + 1: 106 + ret 107 + ENDPROC(__efi64_thunk) 108 + 109 + ENTRY(efi_exit32) 110 + movq func_rt_ptr(%rip), %rax 111 + push %rax 112 + mov %rdi, %rax 113 + ret 114 + ENDPROC(efi_exit32) 115 + 116 + .code32 117 + /* 118 + * EFI service pointer must be in %edi. 119 + * 120 + * The stack should represent the 32-bit calling convention. 121 + */ 122 + ENTRY(efi_enter32) 123 + movl $__KERNEL_DS, %eax 124 + movl %eax, %ds 125 + movl %eax, %es 126 + movl %eax, %ss 127 + 128 + call *%edi 129 + 130 + /* We must preserve return value */ 131 + movl %eax, %edi 132 + 133 + movl 72(%esp), %eax 134 + pushl $__KERNEL_CS 135 + pushl %eax 136 + 137 + lret 138 + ENDPROC(efi_enter32) 139 + 140 + .data 141 + .balign 8 142 + func_rt_ptr: .quad 0 74 143 efi_saved_sp: .quad 0
+6 -10
drivers/firmware/efi/libstub/efi-stub-helper.c
··· 75 75 unsigned long key; 76 76 u32 desc_version; 77 77 78 - *map_size = 0; 79 - *desc_size = 0; 80 - key = 0; 81 - status = efi_call_early(get_memory_map, map_size, NULL, 82 - &key, desc_size, &desc_version); 83 - if (status != EFI_BUFFER_TOO_SMALL) 84 - return EFI_LOAD_ERROR; 85 - 78 + *map_size = sizeof(*m) * 32; 79 + again: 86 80 /* 87 81 * Add an additional efi_memory_desc_t because we're doing an 88 82 * allocation which may be in a new descriptor region. 89 83 */ 90 - *map_size += *desc_size; 84 + *map_size += sizeof(*m); 91 85 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 92 86 *map_size, (void **)&m); 93 87 if (status != EFI_SUCCESS) 94 88 goto fail; 95 89 90 + *desc_size = 0; 91 + key = 0; 96 92 status = efi_call_early(get_memory_map, map_size, m, 97 93 &key, desc_size, &desc_version); 98 94 if (status == EFI_BUFFER_TOO_SMALL) { 99 95 efi_call_early(free_pool, m); 100 - return EFI_LOAD_ERROR; 96 + goto again; 101 97 } 102 98 103 99 if (status != EFI_SUCCESS)
+3 -2
fs/binfmt_elf.c
··· 645 645 646 646 static unsigned long randomize_stack_top(unsigned long stack_top) 647 647 { 648 - unsigned int random_variable = 0; 648 + unsigned long random_variable = 0; 649 649 650 650 if ((current->flags & PF_RANDOMIZE) && 651 651 !(current->personality & ADDR_NO_RANDOMIZE)) { 652 - random_variable = get_random_int() & STACK_RND_MASK; 652 + random_variable = (unsigned long) get_random_int(); 653 + random_variable &= STACK_RND_MASK; 653 654 random_variable <<= PAGE_SHIFT; 654 655 } 655 656 #ifdef CONFIG_STACK_GROWSUP