Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86_boot_for_v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 boot updates from Borislav Petkov:
"A of early boot cleanups and fixes.

- Do some spring cleaning to the compressed boot code by moving the
EFI mixed-mode code to a separate compilation unit, the AMD memory
encryption early code where it belongs and fixing up build
dependencies. Make the deprecated EFI handover protocol optional
with the goal of removing it at some point (Ard Biesheuvel)

- Skip realmode init code on Xen PV guests as it is not needed there

- Remove an old 32-bit PIC code compiler workaround"

* tag 'x86_boot_for_v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/boot: Remove x86_32 PIC using %ebx workaround
x86/boot: Skip realmode init code when running as Xen PV guest
x86/efi: Make the deprecated EFI handover protocol optional
x86/boot/compressed: Only build mem_encrypt.S if AMD_MEM_ENCRYPT=y
x86/boot/compressed: Adhere to calling convention in get_sev_encryption_bit()
x86/boot/compressed: Move startup32_check_sev_cbit() out of head_64.S
x86/boot/compressed: Move startup32_check_sev_cbit() into .text
x86/boot/compressed: Move startup32_load_idt() out of head_64.S
x86/boot/compressed: Move startup32_load_idt() into .text section
x86/boot/compressed: Pull global variable reference into startup32_load_idt()
x86/boot/compressed: Avoid touching ECX in startup32_set_idt_entry()
x86/boot/compressed: Simplify IDT/GDT preserve/restore in the EFI thunk
x86/boot/compressed, efi: Merge multiple definitions of image_offset into one
x86/boot/compressed: Move efi32_pe_entry() out of head_64.S
x86/boot/compressed: Move efi32_entry out of head_64.S
x86/boot/compressed: Move efi32_pe_entry into .text section
x86/boot/compressed: Move bootargs parsing out of 32-bit startup code
x86/boot/compressed: Move 32-bit entrypoint code into .text section
x86/boot/compressed: Rename efi_thunk_64.S to efi-mixed.S

+547 -512
+17
arch/x86/Kconfig
··· 1981 1981 1982 1982 See Documentation/admin-guide/efi-stub.rst for more information. 1983 1983 1984 + config EFI_HANDOVER_PROTOCOL 1985 + bool "EFI handover protocol (DEPRECATED)" 1986 + depends on EFI_STUB 1987 + default y 1988 + help 1989 + Select this in order to include support for the deprecated EFI 1990 + handover protocol, which defines alternative entry points into the 1991 + EFI stub. This is a practice that has no basis in the UEFI 1992 + specification, and requires a priori knowledge on the part of the 1993 + bootloader about Linux/x86 specific ways of passing the command line 1994 + and initrd, and where in memory those assets may be loaded. 1995 + 1996 + If in doubt, say Y. Even though the corresponding support is not 1997 + present in upstream GRUB or other bootloaders, most distros build 1998 + GRUB with numerous downstream patches applied, and may rely on the 1999 + handover protocol as as result. 2000 + 1984 2001 config EFI_MIXED 1985 2002 bool "EFI mixed-mode support" 1986 2003 depends on EFI_STUB && X86_64
+4 -4
arch/x86/boot/compressed/Makefile
··· 100 100 ifdef CONFIG_X86_64 101 101 vmlinux-objs-y += $(obj)/ident_map_64.o 102 102 vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o 103 - vmlinux-objs-y += $(obj)/mem_encrypt.o 103 + vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o 104 104 vmlinux-objs-y += $(obj)/pgtable_64.o 105 105 vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o 106 106 endif ··· 108 108 vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o 109 109 vmlinux-objs-$(CONFIG_INTEL_TDX_GUEST) += $(obj)/tdx.o $(obj)/tdcall.o 110 110 111 - vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o 112 111 vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o 113 - efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a 112 + vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o 113 + vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a 114 114 115 - $(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE 115 + $(obj)/vmlinux: $(vmlinux-objs-y) FORCE 116 116 $(call if_changed,ld) 117 117 118 118 OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
+345
arch/x86/boot/compressed/efi_mixed.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming 4 + * 5 + * Early support for invoking 32-bit EFI services from a 64-bit kernel. 6 + * 7 + * Because this thunking occurs before ExitBootServices() we have to 8 + * restore the firmware's 32-bit GDT and IDT before we make EFI service 9 + * calls. 10 + * 11 + * On the plus side, we don't have to worry about mangling 64-bit 12 + * addresses into 32-bits because we're executing with an identity 13 + * mapped pagetable and haven't transitioned to 64-bit virtual addresses 14 + * yet. 15 + */ 16 + 17 + #include <linux/linkage.h> 18 + #include <asm/msr.h> 19 + #include <asm/page_types.h> 20 + #include <asm/processor-flags.h> 21 + #include <asm/segment.h> 22 + 23 + .code64 24 + .text 25 + /* 26 + * When booting in 64-bit mode on 32-bit EFI firmware, startup_64_mixed_mode() 27 + * is the first thing that runs after switching to long mode. Depending on 28 + * whether the EFI handover protocol or the compat entry point was used to 29 + * enter the kernel, it will either branch to the 64-bit EFI handover 30 + * entrypoint at offset 0x390 in the image, or to the 64-bit EFI PE/COFF 31 + * entrypoint efi_pe_entry(). In the former case, the bootloader must provide a 32 + * struct bootparams pointer as the third argument, so the presence of such a 33 + * pointer is used to disambiguate. 34 + * 35 + * +--------------+ 36 + * +------------------+ +------------+ +------>| efi_pe_entry | 37 + * | efi32_pe_entry |---->| | | +-----------+--+ 38 + * +------------------+ | | +------+----------------+ | 39 + * | startup_32 |---->| startup_64_mixed_mode | | 40 + * +------------------+ | | +------+----------------+ V 41 + * | efi32_stub_entry |---->| | | +------------------+ 42 + * +------------------+ +------------+ +---->| efi64_stub_entry | 43 + * +-------------+----+ 44 + * +------------+ +----------+ | 45 + * | startup_64 |<----| efi_main |<--------------+ 46 + * +------------+ +----------+ 47 + */ 48 + SYM_FUNC_START(startup_64_mixed_mode) 49 + lea efi32_boot_args(%rip), %rdx 50 + mov 0(%rdx), %edi 51 + mov 4(%rdx), %esi 52 + mov 8(%rdx), %edx // saved bootparams pointer 53 + test %edx, %edx 54 + jnz efi64_stub_entry 55 + /* 56 + * efi_pe_entry uses MS calling convention, which requires 32 bytes of 57 + * shadow space on the stack even if all arguments are passed in 58 + * registers. We also need an additional 8 bytes for the space that 59 + * would be occupied by the return address, and this also results in 60 + * the correct stack alignment for entry. 61 + */ 62 + sub $40, %rsp 63 + mov %rdi, %rcx // MS calling convention 64 + mov %rsi, %rdx 65 + jmp efi_pe_entry 66 + SYM_FUNC_END(startup_64_mixed_mode) 67 + 68 + SYM_FUNC_START(__efi64_thunk) 69 + push %rbp 70 + push %rbx 71 + 72 + movl %ds, %eax 73 + push %rax 74 + movl %es, %eax 75 + push %rax 76 + movl %ss, %eax 77 + push %rax 78 + 79 + /* Copy args passed on stack */ 80 + movq 0x30(%rsp), %rbp 81 + movq 0x38(%rsp), %rbx 82 + movq 0x40(%rsp), %rax 83 + 84 + /* 85 + * Convert x86-64 ABI params to i386 ABI 86 + */ 87 + subq $64, %rsp 88 + movl %esi, 0x0(%rsp) 89 + movl %edx, 0x4(%rsp) 90 + movl %ecx, 0x8(%rsp) 91 + movl %r8d, 0xc(%rsp) 92 + movl %r9d, 0x10(%rsp) 93 + movl %ebp, 0x14(%rsp) 94 + movl %ebx, 0x18(%rsp) 95 + movl %eax, 0x1c(%rsp) 96 + 97 + leaq 0x20(%rsp), %rbx 98 + sgdt (%rbx) 99 + sidt 16(%rbx) 100 + 101 + leaq 1f(%rip), %rbp 102 + 103 + /* 104 + * Switch to IDT and GDT with 32-bit segments. These are the firmware 105 + * GDT and IDT that were installed when the kernel started executing. 106 + * The pointers were saved by the efi32_entry() routine below. 107 + * 108 + * Pass the saved DS selector to the 32-bit code, and use far return to 109 + * restore the saved CS selector. 110 + */ 111 + lidt efi32_boot_idt(%rip) 112 + lgdt efi32_boot_gdt(%rip) 113 + 114 + movzwl efi32_boot_ds(%rip), %edx 115 + movzwq efi32_boot_cs(%rip), %rax 116 + pushq %rax 117 + leaq efi_enter32(%rip), %rax 118 + pushq %rax 119 + lretq 120 + 121 + 1: addq $64, %rsp 122 + movq %rdi, %rax 123 + 124 + pop %rbx 125 + movl %ebx, %ss 126 + pop %rbx 127 + movl %ebx, %es 128 + pop %rbx 129 + movl %ebx, %ds 130 + /* Clear out 32-bit selector from FS and GS */ 131 + xorl %ebx, %ebx 132 + movl %ebx, %fs 133 + movl %ebx, %gs 134 + 135 + pop %rbx 136 + pop %rbp 137 + RET 138 + SYM_FUNC_END(__efi64_thunk) 139 + 140 + .code32 141 + /* 142 + * EFI service pointer must be in %edi. 143 + * 144 + * The stack should represent the 32-bit calling convention. 145 + */ 146 + SYM_FUNC_START_LOCAL(efi_enter32) 147 + /* Load firmware selector into data and stack segment registers */ 148 + movl %edx, %ds 149 + movl %edx, %es 150 + movl %edx, %fs 151 + movl %edx, %gs 152 + movl %edx, %ss 153 + 154 + /* Reload pgtables */ 155 + movl %cr3, %eax 156 + movl %eax, %cr3 157 + 158 + /* Disable paging */ 159 + movl %cr0, %eax 160 + btrl $X86_CR0_PG_BIT, %eax 161 + movl %eax, %cr0 162 + 163 + /* Disable long mode via EFER */ 164 + movl $MSR_EFER, %ecx 165 + rdmsr 166 + btrl $_EFER_LME, %eax 167 + wrmsr 168 + 169 + call *%edi 170 + 171 + /* We must preserve return value */ 172 + movl %eax, %edi 173 + 174 + /* 175 + * Some firmware will return with interrupts enabled. Be sure to 176 + * disable them before we switch GDTs and IDTs. 177 + */ 178 + cli 179 + 180 + lidtl 16(%ebx) 181 + lgdtl (%ebx) 182 + 183 + movl %cr4, %eax 184 + btsl $(X86_CR4_PAE_BIT), %eax 185 + movl %eax, %cr4 186 + 187 + movl %cr3, %eax 188 + movl %eax, %cr3 189 + 190 + movl $MSR_EFER, %ecx 191 + rdmsr 192 + btsl $_EFER_LME, %eax 193 + wrmsr 194 + 195 + xorl %eax, %eax 196 + lldt %ax 197 + 198 + pushl $__KERNEL_CS 199 + pushl %ebp 200 + 201 + /* Enable paging */ 202 + movl %cr0, %eax 203 + btsl $X86_CR0_PG_BIT, %eax 204 + movl %eax, %cr0 205 + lret 206 + SYM_FUNC_END(efi_enter32) 207 + 208 + /* 209 + * This is the common EFI stub entry point for mixed mode. 210 + * 211 + * Arguments: %ecx image handle 212 + * %edx EFI system table pointer 213 + * %esi struct bootparams pointer (or NULL when not using 214 + * the EFI handover protocol) 215 + * 216 + * Since this is the point of no return for ordinary execution, no registers 217 + * are considered live except for the function parameters. [Note that the EFI 218 + * stub may still exit and return to the firmware using the Exit() EFI boot 219 + * service.] 220 + */ 221 + SYM_FUNC_START(efi32_entry) 222 + call 1f 223 + 1: pop %ebx 224 + 225 + /* Save firmware GDTR and code/data selectors */ 226 + sgdtl (efi32_boot_gdt - 1b)(%ebx) 227 + movw %cs, (efi32_boot_cs - 1b)(%ebx) 228 + movw %ds, (efi32_boot_ds - 1b)(%ebx) 229 + 230 + /* Store firmware IDT descriptor */ 231 + sidtl (efi32_boot_idt - 1b)(%ebx) 232 + 233 + /* Store boot arguments */ 234 + leal (efi32_boot_args - 1b)(%ebx), %ebx 235 + movl %ecx, 0(%ebx) 236 + movl %edx, 4(%ebx) 237 + movl %esi, 8(%ebx) 238 + movb $0x0, 12(%ebx) // efi_is64 239 + 240 + /* Disable paging */ 241 + movl %cr0, %eax 242 + btrl $X86_CR0_PG_BIT, %eax 243 + movl %eax, %cr0 244 + 245 + jmp startup_32 246 + SYM_FUNC_END(efi32_entry) 247 + 248 + #define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime) 249 + #define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol) 250 + #define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base) 251 + 252 + /* 253 + * efi_status_t efi32_pe_entry(efi_handle_t image_handle, 254 + * efi_system_table_32_t *sys_table) 255 + */ 256 + SYM_FUNC_START(efi32_pe_entry) 257 + pushl %ebp 258 + movl %esp, %ebp 259 + pushl %eax // dummy push to allocate loaded_image 260 + 261 + pushl %ebx // save callee-save registers 262 + pushl %edi 263 + 264 + call verify_cpu // check for long mode support 265 + testl %eax, %eax 266 + movl $0x80000003, %eax // EFI_UNSUPPORTED 267 + jnz 2f 268 + 269 + call 1f 270 + 1: pop %ebx 271 + 272 + /* Get the loaded image protocol pointer from the image handle */ 273 + leal -4(%ebp), %eax 274 + pushl %eax // &loaded_image 275 + leal (loaded_image_proto - 1b)(%ebx), %eax 276 + pushl %eax // pass the GUID address 277 + pushl 8(%ebp) // pass the image handle 278 + 279 + /* 280 + * Note the alignment of the stack frame. 281 + * sys_table 282 + * handle <-- 16-byte aligned on entry by ABI 283 + * return address 284 + * frame pointer 285 + * loaded_image <-- local variable 286 + * saved %ebx <-- 16-byte aligned here 287 + * saved %edi 288 + * &loaded_image 289 + * &loaded_image_proto 290 + * handle <-- 16-byte aligned for call to handle_protocol 291 + */ 292 + 293 + movl 12(%ebp), %eax // sys_table 294 + movl ST32_boottime(%eax), %eax // sys_table->boottime 295 + call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol 296 + addl $12, %esp // restore argument space 297 + testl %eax, %eax 298 + jnz 2f 299 + 300 + movl 8(%ebp), %ecx // image_handle 301 + movl 12(%ebp), %edx // sys_table 302 + movl -4(%ebp), %esi // loaded_image 303 + movl LI32_image_base(%esi), %esi // loaded_image->image_base 304 + leal (startup_32 - 1b)(%ebx), %ebp // runtime address of startup_32 305 + /* 306 + * We need to set the image_offset variable here since startup_32() will 307 + * use it before we get to the 64-bit efi_pe_entry() in C code. 308 + */ 309 + subl %esi, %ebp // calculate image_offset 310 + movl %ebp, (image_offset - 1b)(%ebx) // save image_offset 311 + xorl %esi, %esi 312 + jmp efi32_entry // pass %ecx, %edx, %esi 313 + // no other registers remain live 314 + 315 + 2: popl %edi // restore callee-save registers 316 + popl %ebx 317 + leave 318 + RET 319 + SYM_FUNC_END(efi32_pe_entry) 320 + 321 + .section ".rodata" 322 + /* EFI loaded image protocol GUID */ 323 + .balign 4 324 + SYM_DATA_START_LOCAL(loaded_image_proto) 325 + .long 0x5b1b31a1 326 + .word 0x9562, 0x11d2 327 + .byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b 328 + SYM_DATA_END(loaded_image_proto) 329 + 330 + .data 331 + .balign 8 332 + SYM_DATA_START_LOCAL(efi32_boot_gdt) 333 + .word 0 334 + .quad 0 335 + SYM_DATA_END(efi32_boot_gdt) 336 + 337 + SYM_DATA_START_LOCAL(efi32_boot_idt) 338 + .word 0 339 + .quad 0 340 + SYM_DATA_END(efi32_boot_idt) 341 + 342 + SYM_DATA_LOCAL(efi32_boot_cs, .word 0) 343 + SYM_DATA_LOCAL(efi32_boot_ds, .word 0) 344 + SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0) 345 + SYM_DATA(efi_is64, .byte 1)
-189
arch/x86/boot/compressed/efi_thunk_64.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming 4 - * 5 - * Early support for invoking 32-bit EFI services from a 64-bit kernel. 6 - * 7 - * Because this thunking occurs before ExitBootServices() we have to 8 - * restore the firmware's 32-bit GDT and IDT before we make EFI service 9 - * calls. 10 - * 11 - * On the plus side, we don't have to worry about mangling 64-bit 12 - * addresses into 32-bits because we're executing with an identity 13 - * mapped pagetable and haven't transitioned to 64-bit virtual addresses 14 - * yet. 15 - */ 16 - 17 - #include <linux/linkage.h> 18 - #include <asm/msr.h> 19 - #include <asm/page_types.h> 20 - #include <asm/processor-flags.h> 21 - #include <asm/segment.h> 22 - 23 - .code64 24 - .text 25 - SYM_FUNC_START(__efi64_thunk) 26 - push %rbp 27 - push %rbx 28 - 29 - movl %ds, %eax 30 - push %rax 31 - movl %es, %eax 32 - push %rax 33 - movl %ss, %eax 34 - push %rax 35 - 36 - /* Copy args passed on stack */ 37 - movq 0x30(%rsp), %rbp 38 - movq 0x38(%rsp), %rbx 39 - movq 0x40(%rsp), %rax 40 - 41 - /* 42 - * Convert x86-64 ABI params to i386 ABI 43 - */ 44 - subq $64, %rsp 45 - movl %esi, 0x0(%rsp) 46 - movl %edx, 0x4(%rsp) 47 - movl %ecx, 0x8(%rsp) 48 - movl %r8d, 0xc(%rsp) 49 - movl %r9d, 0x10(%rsp) 50 - movl %ebp, 0x14(%rsp) 51 - movl %ebx, 0x18(%rsp) 52 - movl %eax, 0x1c(%rsp) 53 - 54 - leaq 0x20(%rsp), %rbx 55 - sgdt (%rbx) 56 - 57 - addq $16, %rbx 58 - sidt (%rbx) 59 - 60 - leaq 1f(%rip), %rbp 61 - 62 - /* 63 - * Switch to IDT and GDT with 32-bit segments. This is the firmware GDT 64 - * and IDT that was installed when the kernel started executing. The 65 - * pointers were saved at the EFI stub entry point in head_64.S. 66 - * 67 - * Pass the saved DS selector to the 32-bit code, and use far return to 68 - * restore the saved CS selector. 69 - */ 70 - leaq efi32_boot_idt(%rip), %rax 71 - lidt (%rax) 72 - leaq efi32_boot_gdt(%rip), %rax 73 - lgdt (%rax) 74 - 75 - movzwl efi32_boot_ds(%rip), %edx 76 - movzwq efi32_boot_cs(%rip), %rax 77 - pushq %rax 78 - leaq efi_enter32(%rip), %rax 79 - pushq %rax 80 - lretq 81 - 82 - 1: addq $64, %rsp 83 - movq %rdi, %rax 84 - 85 - pop %rbx 86 - movl %ebx, %ss 87 - pop %rbx 88 - movl %ebx, %es 89 - pop %rbx 90 - movl %ebx, %ds 91 - /* Clear out 32-bit selector from FS and GS */ 92 - xorl %ebx, %ebx 93 - movl %ebx, %fs 94 - movl %ebx, %gs 95 - 96 - pop %rbx 97 - pop %rbp 98 - RET 99 - SYM_FUNC_END(__efi64_thunk) 100 - 101 - .code32 102 - /* 103 - * EFI service pointer must be in %edi. 104 - * 105 - * The stack should represent the 32-bit calling convention. 106 - */ 107 - SYM_FUNC_START_LOCAL(efi_enter32) 108 - /* Load firmware selector into data and stack segment registers */ 109 - movl %edx, %ds 110 - movl %edx, %es 111 - movl %edx, %fs 112 - movl %edx, %gs 113 - movl %edx, %ss 114 - 115 - /* Reload pgtables */ 116 - movl %cr3, %eax 117 - movl %eax, %cr3 118 - 119 - /* Disable paging */ 120 - movl %cr0, %eax 121 - btrl $X86_CR0_PG_BIT, %eax 122 - movl %eax, %cr0 123 - 124 - /* Disable long mode via EFER */ 125 - movl $MSR_EFER, %ecx 126 - rdmsr 127 - btrl $_EFER_LME, %eax 128 - wrmsr 129 - 130 - call *%edi 131 - 132 - /* We must preserve return value */ 133 - movl %eax, %edi 134 - 135 - /* 136 - * Some firmware will return with interrupts enabled. Be sure to 137 - * disable them before we switch GDTs and IDTs. 138 - */ 139 - cli 140 - 141 - lidtl (%ebx) 142 - subl $16, %ebx 143 - 144 - lgdtl (%ebx) 145 - 146 - movl %cr4, %eax 147 - btsl $(X86_CR4_PAE_BIT), %eax 148 - movl %eax, %cr4 149 - 150 - movl %cr3, %eax 151 - movl %eax, %cr3 152 - 153 - movl $MSR_EFER, %ecx 154 - rdmsr 155 - btsl $_EFER_LME, %eax 156 - wrmsr 157 - 158 - xorl %eax, %eax 159 - lldt %ax 160 - 161 - pushl $__KERNEL_CS 162 - pushl %ebp 163 - 164 - /* Enable paging */ 165 - movl %cr0, %eax 166 - btsl $X86_CR0_PG_BIT, %eax 167 - movl %eax, %cr0 168 - lret 169 - SYM_FUNC_END(efi_enter32) 170 - 171 - .data 172 - .balign 8 173 - SYM_DATA_START(efi32_boot_gdt) 174 - .word 0 175 - .quad 0 176 - SYM_DATA_END(efi32_boot_gdt) 177 - 178 - SYM_DATA_START(efi32_boot_idt) 179 - .word 0 180 - .quad 0 181 - SYM_DATA_END(efi32_boot_idt) 182 - 183 - SYM_DATA_START(efi32_boot_cs) 184 - .word 0 185 - SYM_DATA_END(efi32_boot_cs) 186 - 187 - SYM_DATA_START(efi32_boot_ds) 188 - .word 0 189 - SYM_DATA_END(efi32_boot_ds)
-4
arch/x86/boot/compressed/head_32.S
··· 208 208 .quad 0x00cf92000000ffff /* __KERNEL_DS */ 209 209 SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) 210 210 211 - #ifdef CONFIG_EFI_STUB 212 - SYM_DATA(image_offset, .long 0) 213 - #endif 214 - 215 211 /* 216 212 * Stack and heap for uncompression 217 213 */
+18 -285
arch/x86/boot/compressed/head_64.S
··· 118 118 1: 119 119 120 120 /* Setup Exception handling for SEV-ES */ 121 + #ifdef CONFIG_AMD_MEM_ENCRYPT 121 122 call startup32_load_idt 123 + #endif 122 124 123 125 /* Make sure cpu supports long mode. */ 124 126 call verify_cpu ··· 180 178 */ 181 179 /* 182 180 * If SEV is active then set the encryption mask in the page tables. 183 - * This will insure that when the kernel is copied and decompressed 181 + * This will ensure that when the kernel is copied and decompressed 184 182 * it will be done so encrypted. 185 183 */ 186 - call get_sev_encryption_bit 187 184 xorl %edx, %edx 188 185 #ifdef CONFIG_AMD_MEM_ENCRYPT 186 + call get_sev_encryption_bit 187 + xorl %edx, %edx 189 188 testl %eax, %eax 190 189 jz 1f 191 190 subl $32, %eax /* Encryption bit is always above bit 31 */ ··· 252 249 movl $__BOOT_TSS, %eax 253 250 ltr %ax 254 251 252 + #ifdef CONFIG_AMD_MEM_ENCRYPT 253 + /* Check if the C-bit position is correct when SEV is active */ 254 + call startup32_check_sev_cbit 255 + #endif 256 + 255 257 /* 256 258 * Setup for the jump to 64bit mode 257 259 * ··· 269 261 */ 270 262 leal rva(startup_64)(%ebp), %eax 271 263 #ifdef CONFIG_EFI_MIXED 272 - movl rva(efi32_boot_args)(%ebp), %edi 273 - testl %edi, %edi 274 - jz 1f 275 - leal rva(efi64_stub_entry)(%ebp), %eax 276 - movl rva(efi32_boot_args+4)(%ebp), %esi 277 - movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer 278 - testl %edx, %edx 279 - jnz 1f 280 - /* 281 - * efi_pe_entry uses MS calling convention, which requires 32 bytes of 282 - * shadow space on the stack even if all arguments are passed in 283 - * registers. We also need an additional 8 bytes for the space that 284 - * would be occupied by the return address, and this also results in 285 - * the correct stack alignment for entry. 286 - */ 287 - subl $40, %esp 288 - leal rva(efi_pe_entry)(%ebp), %eax 289 - movl %edi, %ecx // MS calling convention 290 - movl %esi, %edx 264 + cmpb $1, rva(efi_is64)(%ebp) 265 + je 1f 266 + leal rva(startup_64_mixed_mode)(%ebp), %eax 291 267 1: 292 268 #endif 293 - /* Check if the C-bit position is correct when SEV is active */ 294 - call startup32_check_sev_cbit 295 269 296 270 pushl $__KERNEL_CS 297 271 pushl %eax ··· 286 296 lret 287 297 SYM_FUNC_END(startup_32) 288 298 289 - #ifdef CONFIG_EFI_MIXED 299 + #if IS_ENABLED(CONFIG_EFI_MIXED) && IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL) 290 300 .org 0x190 291 301 SYM_FUNC_START(efi32_stub_entry) 292 302 add $0x4, %esp /* Discard return address */ 293 303 popl %ecx 294 304 popl %edx 295 305 popl %esi 296 - 297 - call 1f 298 - 1: pop %ebp 299 - subl $ rva(1b), %ebp 300 - 301 - movl %esi, rva(efi32_boot_args+8)(%ebp) 302 - SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL) 303 - movl %ecx, rva(efi32_boot_args)(%ebp) 304 - movl %edx, rva(efi32_boot_args+4)(%ebp) 305 - movb $0, rva(efi_is64)(%ebp) 306 - 307 - /* Save firmware GDTR and code/data selectors */ 308 - sgdtl rva(efi32_boot_gdt)(%ebp) 309 - movw %cs, rva(efi32_boot_cs)(%ebp) 310 - movw %ds, rva(efi32_boot_ds)(%ebp) 311 - 312 - /* Store firmware IDT descriptor */ 313 - sidtl rva(efi32_boot_idt)(%ebp) 314 - 315 - /* Disable paging */ 316 - movl %cr0, %eax 317 - btrl $X86_CR0_PG_BIT, %eax 318 - movl %eax, %cr0 319 - 320 - jmp startup_32 306 + jmp efi32_entry 321 307 SYM_FUNC_END(efi32_stub_entry) 322 308 #endif 323 309 ··· 516 550 SYM_CODE_END(startup_64) 517 551 518 552 #ifdef CONFIG_EFI_STUB 553 + #ifdef CONFIG_EFI_HANDOVER_PROTOCOL 519 554 .org 0x390 555 + #endif 520 556 SYM_FUNC_START(efi64_stub_entry) 521 557 and $~0xf, %rsp /* realign the stack */ 522 558 movq %rdx, %rbx /* save boot_params pointer */ ··· 681 713 jmp 1b 682 714 SYM_FUNC_END(.Lno_longmode) 683 715 716 + .globl verify_cpu 684 717 #include "../../kernel/verify_cpu.S" 685 718 686 719 .data ··· 712 743 .quad 0 713 744 .endr 714 745 SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end) 715 - 716 - #ifdef CONFIG_AMD_MEM_ENCRYPT 717 - SYM_DATA_START(boot32_idt_desc) 718 - .word boot32_idt_end - boot32_idt - 1 719 - .long 0 720 - SYM_DATA_END(boot32_idt_desc) 721 - .balign 8 722 - SYM_DATA_START(boot32_idt) 723 - .rept 32 724 - .quad 0 725 - .endr 726 - SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end) 727 - #endif 728 - 729 - #ifdef CONFIG_EFI_STUB 730 - SYM_DATA(image_offset, .long 0) 731 - #endif 732 - #ifdef CONFIG_EFI_MIXED 733 - SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0) 734 - SYM_DATA(efi_is64, .byte 1) 735 - 736 - #define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime) 737 - #define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol) 738 - #define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base) 739 - 740 - __HEAD 741 - .code32 742 - SYM_FUNC_START(efi32_pe_entry) 743 - /* 744 - * efi_status_t efi32_pe_entry(efi_handle_t image_handle, 745 - * efi_system_table_32_t *sys_table) 746 - */ 747 - 748 - pushl %ebp 749 - movl %esp, %ebp 750 - pushl %eax // dummy push to allocate loaded_image 751 - 752 - pushl %ebx // save callee-save registers 753 - pushl %edi 754 - 755 - call verify_cpu // check for long mode support 756 - testl %eax, %eax 757 - movl $0x80000003, %eax // EFI_UNSUPPORTED 758 - jnz 2f 759 - 760 - call 1f 761 - 1: pop %ebx 762 - subl $ rva(1b), %ebx 763 - 764 - /* Get the loaded image protocol pointer from the image handle */ 765 - leal -4(%ebp), %eax 766 - pushl %eax // &loaded_image 767 - leal rva(loaded_image_proto)(%ebx), %eax 768 - pushl %eax // pass the GUID address 769 - pushl 8(%ebp) // pass the image handle 770 - 771 - /* 772 - * Note the alignment of the stack frame. 773 - * sys_table 774 - * handle <-- 16-byte aligned on entry by ABI 775 - * return address 776 - * frame pointer 777 - * loaded_image <-- local variable 778 - * saved %ebx <-- 16-byte aligned here 779 - * saved %edi 780 - * &loaded_image 781 - * &loaded_image_proto 782 - * handle <-- 16-byte aligned for call to handle_protocol 783 - */ 784 - 785 - movl 12(%ebp), %eax // sys_table 786 - movl ST32_boottime(%eax), %eax // sys_table->boottime 787 - call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol 788 - addl $12, %esp // restore argument space 789 - testl %eax, %eax 790 - jnz 2f 791 - 792 - movl 8(%ebp), %ecx // image_handle 793 - movl 12(%ebp), %edx // sys_table 794 - movl -4(%ebp), %esi // loaded_image 795 - movl LI32_image_base(%esi), %esi // loaded_image->image_base 796 - movl %ebx, %ebp // startup_32 for efi32_pe_stub_entry 797 - /* 798 - * We need to set the image_offset variable here since startup_32() will 799 - * use it before we get to the 64-bit efi_pe_entry() in C code. 800 - */ 801 - subl %esi, %ebx 802 - movl %ebx, rva(image_offset)(%ebp) // save image_offset 803 - jmp efi32_pe_stub_entry 804 - 805 - 2: popl %edi // restore callee-save registers 806 - popl %ebx 807 - leave 808 - RET 809 - SYM_FUNC_END(efi32_pe_entry) 810 - 811 - .section ".rodata" 812 - /* EFI loaded image protocol GUID */ 813 - .balign 4 814 - SYM_DATA_START_LOCAL(loaded_image_proto) 815 - .long 0x5b1b31a1 816 - .word 0x9562, 0x11d2 817 - .byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b 818 - SYM_DATA_END(loaded_image_proto) 819 - #endif 820 - 821 - #ifdef CONFIG_AMD_MEM_ENCRYPT 822 - __HEAD 823 - .code32 824 - /* 825 - * Write an IDT entry into boot32_idt 826 - * 827 - * Parameters: 828 - * 829 - * %eax: Handler address 830 - * %edx: Vector number 831 - * 832 - * Physical offset is expected in %ebp 833 - */ 834 - SYM_FUNC_START(startup32_set_idt_entry) 835 - push %ebx 836 - push %ecx 837 - 838 - /* IDT entry address to %ebx */ 839 - leal rva(boot32_idt)(%ebp), %ebx 840 - shl $3, %edx 841 - addl %edx, %ebx 842 - 843 - /* Build IDT entry, lower 4 bytes */ 844 - movl %eax, %edx 845 - andl $0x0000ffff, %edx # Target code segment offset [15:0] 846 - movl $__KERNEL32_CS, %ecx # Target code segment selector 847 - shl $16, %ecx 848 - orl %ecx, %edx 849 - 850 - /* Store lower 4 bytes to IDT */ 851 - movl %edx, (%ebx) 852 - 853 - /* Build IDT entry, upper 4 bytes */ 854 - movl %eax, %edx 855 - andl $0xffff0000, %edx # Target code segment offset [31:16] 856 - orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate 857 - 858 - /* Store upper 4 bytes to IDT */ 859 - movl %edx, 4(%ebx) 860 - 861 - pop %ecx 862 - pop %ebx 863 - RET 864 - SYM_FUNC_END(startup32_set_idt_entry) 865 - #endif 866 - 867 - SYM_FUNC_START(startup32_load_idt) 868 - #ifdef CONFIG_AMD_MEM_ENCRYPT 869 - /* #VC handler */ 870 - leal rva(startup32_vc_handler)(%ebp), %eax 871 - movl $X86_TRAP_VC, %edx 872 - call startup32_set_idt_entry 873 - 874 - /* Load IDT */ 875 - leal rva(boot32_idt)(%ebp), %eax 876 - movl %eax, rva(boot32_idt_desc+2)(%ebp) 877 - lidt rva(boot32_idt_desc)(%ebp) 878 - #endif 879 - RET 880 - SYM_FUNC_END(startup32_load_idt) 881 - 882 - /* 883 - * Check for the correct C-bit position when the startup_32 boot-path is used. 884 - * 885 - * The check makes use of the fact that all memory is encrypted when paging is 886 - * disabled. The function creates 64 bits of random data using the RDRAND 887 - * instruction. RDRAND is mandatory for SEV guests, so always available. If the 888 - * hypervisor violates that the kernel will crash right here. 889 - * 890 - * The 64 bits of random data are stored to a memory location and at the same 891 - * time kept in the %eax and %ebx registers. Since encryption is always active 892 - * when paging is off the random data will be stored encrypted in main memory. 893 - * 894 - * Then paging is enabled. When the C-bit position is correct all memory is 895 - * still mapped encrypted and comparing the register values with memory will 896 - * succeed. An incorrect C-bit position will map all memory unencrypted, so that 897 - * the compare will use the encrypted random data and fail. 898 - */ 899 - SYM_FUNC_START(startup32_check_sev_cbit) 900 - #ifdef CONFIG_AMD_MEM_ENCRYPT 901 - pushl %eax 902 - pushl %ebx 903 - pushl %ecx 904 - pushl %edx 905 - 906 - /* Check for non-zero sev_status */ 907 - movl rva(sev_status)(%ebp), %eax 908 - testl %eax, %eax 909 - jz 4f 910 - 911 - /* 912 - * Get two 32-bit random values - Don't bail out if RDRAND fails 913 - * because it is better to prevent forward progress if no random value 914 - * can be gathered. 915 - */ 916 - 1: rdrand %eax 917 - jnc 1b 918 - 2: rdrand %ebx 919 - jnc 2b 920 - 921 - /* Store to memory and keep it in the registers */ 922 - movl %eax, rva(sev_check_data)(%ebp) 923 - movl %ebx, rva(sev_check_data+4)(%ebp) 924 - 925 - /* Enable paging to see if encryption is active */ 926 - movl %cr0, %edx /* Backup %cr0 in %edx */ 927 - movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */ 928 - movl %ecx, %cr0 929 - 930 - cmpl %eax, rva(sev_check_data)(%ebp) 931 - jne 3f 932 - cmpl %ebx, rva(sev_check_data+4)(%ebp) 933 - jne 3f 934 - 935 - movl %edx, %cr0 /* Restore previous %cr0 */ 936 - 937 - jmp 4f 938 - 939 - 3: /* Check failed - hlt the machine */ 940 - hlt 941 - jmp 3b 942 - 943 - 4: 944 - popl %edx 945 - popl %ecx 946 - popl %ebx 947 - popl %eax 948 - #endif 949 - RET 950 - SYM_FUNC_END(startup32_check_sev_cbit) 951 746 952 747 /* 953 748 * Stack and heap for uncompression
+139 -13
arch/x86/boot/compressed/mem_encrypt.S
··· 12 12 #include <asm/processor-flags.h> 13 13 #include <asm/msr.h> 14 14 #include <asm/asm-offsets.h> 15 + #include <asm/segment.h> 16 + #include <asm/trapnr.h> 15 17 16 18 .text 17 19 .code32 18 20 SYM_FUNC_START(get_sev_encryption_bit) 19 - xor %eax, %eax 20 - 21 - #ifdef CONFIG_AMD_MEM_ENCRYPT 22 21 push %ebx 23 - push %ecx 24 - push %edx 25 22 26 23 movl $0x80000000, %eax /* CPUID to check the highest leaf */ 27 24 cpuid ··· 49 52 xor %eax, %eax 50 53 51 54 .Lsev_exit: 52 - pop %edx 53 - pop %ecx 54 55 pop %ebx 55 - 56 - #endif /* CONFIG_AMD_MEM_ENCRYPT */ 57 - 58 56 RET 59 57 SYM_FUNC_END(get_sev_encryption_bit) 60 58 ··· 90 98 jmp 1b 91 99 SYM_CODE_END(sev_es_req_cpuid) 92 100 93 - SYM_CODE_START(startup32_vc_handler) 101 + SYM_CODE_START_LOCAL(startup32_vc_handler) 94 102 pushl %eax 95 103 pushl %ebx 96 104 pushl %ecx ··· 176 184 jmp .Lfail 177 185 SYM_CODE_END(startup32_vc_handler) 178 186 187 + /* 188 + * Write an IDT entry into boot32_idt 189 + * 190 + * Parameters: 191 + * 192 + * %eax: Handler address 193 + * %edx: Vector number 194 + * %ecx: IDT address 195 + */ 196 + SYM_FUNC_START_LOCAL(startup32_set_idt_entry) 197 + /* IDT entry address to %ecx */ 198 + leal (%ecx, %edx, 8), %ecx 199 + 200 + /* Build IDT entry, lower 4 bytes */ 201 + movl %eax, %edx 202 + andl $0x0000ffff, %edx # Target code segment offset [15:0] 203 + orl $(__KERNEL32_CS << 16), %edx # Target code segment selector 204 + 205 + /* Store lower 4 bytes to IDT */ 206 + movl %edx, (%ecx) 207 + 208 + /* Build IDT entry, upper 4 bytes */ 209 + movl %eax, %edx 210 + andl $0xffff0000, %edx # Target code segment offset [31:16] 211 + orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate 212 + 213 + /* Store upper 4 bytes to IDT */ 214 + movl %edx, 4(%ecx) 215 + 216 + RET 217 + SYM_FUNC_END(startup32_set_idt_entry) 218 + 219 + SYM_FUNC_START(startup32_load_idt) 220 + push %ebp 221 + push %ebx 222 + 223 + call 1f 224 + 1: pop %ebp 225 + 226 + leal (boot32_idt - 1b)(%ebp), %ebx 227 + 228 + /* #VC handler */ 229 + leal (startup32_vc_handler - 1b)(%ebp), %eax 230 + movl $X86_TRAP_VC, %edx 231 + movl %ebx, %ecx 232 + call startup32_set_idt_entry 233 + 234 + /* Load IDT */ 235 + leal (boot32_idt_desc - 1b)(%ebp), %ecx 236 + movl %ebx, 2(%ecx) 237 + lidt (%ecx) 238 + 239 + pop %ebx 240 + pop %ebp 241 + RET 242 + SYM_FUNC_END(startup32_load_idt) 243 + 244 + /* 245 + * Check for the correct C-bit position when the startup_32 boot-path is used. 246 + * 247 + * The check makes use of the fact that all memory is encrypted when paging is 248 + * disabled. The function creates 64 bits of random data using the RDRAND 249 + * instruction. RDRAND is mandatory for SEV guests, so always available. If the 250 + * hypervisor violates that the kernel will crash right here. 251 + * 252 + * The 64 bits of random data are stored to a memory location and at the same 253 + * time kept in the %eax and %ebx registers. Since encryption is always active 254 + * when paging is off the random data will be stored encrypted in main memory. 255 + * 256 + * Then paging is enabled. When the C-bit position is correct all memory is 257 + * still mapped encrypted and comparing the register values with memory will 258 + * succeed. An incorrect C-bit position will map all memory unencrypted, so that 259 + * the compare will use the encrypted random data and fail. 260 + */ 261 + SYM_FUNC_START(startup32_check_sev_cbit) 262 + pushl %ebx 263 + pushl %ebp 264 + 265 + call 0f 266 + 0: popl %ebp 267 + 268 + /* Check for non-zero sev_status */ 269 + movl (sev_status - 0b)(%ebp), %eax 270 + testl %eax, %eax 271 + jz 4f 272 + 273 + /* 274 + * Get two 32-bit random values - Don't bail out if RDRAND fails 275 + * because it is better to prevent forward progress if no random value 276 + * can be gathered. 277 + */ 278 + 1: rdrand %eax 279 + jnc 1b 280 + 2: rdrand %ebx 281 + jnc 2b 282 + 283 + /* Store to memory and keep it in the registers */ 284 + leal (sev_check_data - 0b)(%ebp), %ebp 285 + movl %eax, 0(%ebp) 286 + movl %ebx, 4(%ebp) 287 + 288 + /* Enable paging to see if encryption is active */ 289 + movl %cr0, %edx /* Backup %cr0 in %edx */ 290 + movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */ 291 + movl %ecx, %cr0 292 + 293 + cmpl %eax, 0(%ebp) 294 + jne 3f 295 + cmpl %ebx, 4(%ebp) 296 + jne 3f 297 + 298 + movl %edx, %cr0 /* Restore previous %cr0 */ 299 + 300 + jmp 4f 301 + 302 + 3: /* Check failed - hlt the machine */ 303 + hlt 304 + jmp 3b 305 + 306 + 4: 307 + popl %ebp 308 + popl %ebx 309 + RET 310 + SYM_FUNC_END(startup32_check_sev_cbit) 311 + 179 312 .code64 180 313 181 314 #include "../../kernel/sev_verify_cbit.S" 182 315 183 316 .data 184 317 185 - #ifdef CONFIG_AMD_MEM_ENCRYPT 186 318 .balign 8 187 319 SYM_DATA(sme_me_mask, .quad 0) 188 320 SYM_DATA(sev_status, .quad 0) 189 321 SYM_DATA(sev_check_data, .quad 0) 190 - #endif 322 + 323 + SYM_DATA_START_LOCAL(boot32_idt) 324 + .rept 32 325 + .quad 0 326 + .endr 327 + SYM_DATA_END(boot32_idt) 328 + 329 + SYM_DATA_START_LOCAL(boot32_idt_desc) 330 + .word . - boot32_idt - 1 331 + .long 0 332 + SYM_DATA_END(boot32_idt_desc)
+3 -12
arch/x86/boot/cpuflags.c
··· 64 64 return !!((f0^f1) & mask); 65 65 } 66 66 67 - /* Handle x86_32 PIC using ebx. */ 68 - #if defined(__i386__) && defined(__PIC__) 69 - # define EBX_REG "=r" 70 - #else 71 - # define EBX_REG "=b" 72 - #endif 73 - 74 67 void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d) 75 68 { 76 - asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t" 77 - "cpuid \n\t" 78 - ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t" 79 - : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b) 80 - : "a" (id), "c" (count) 69 + asm volatile("cpuid" 70 + : "=a" (*a), "=b" (*b), "=c" (*c), "=d" (*d) 71 + : "0" (id), "2" (count) 81 72 ); 82 73 } 83 74
+1 -1
arch/x86/boot/header.S
··· 407 407 # define XLF1 0 408 408 #endif 409 409 410 - #ifdef CONFIG_EFI_STUB 410 + #ifdef CONFIG_EFI_HANDOVER_PROTOCOL 411 411 # ifdef CONFIG_EFI_MIXED 412 412 # define XLF23 (XLF_EFI_HANDOVER_32|XLF_EFI_HANDOVER_64) 413 413 # else
+2
arch/x86/boot/tools/build.c
··· 290 290 { 291 291 unsigned long addr = efi32_stub_entry; 292 292 293 + #ifdef CONFIG_EFI_HANDOVER_PROTOCOL 293 294 #ifdef CONFIG_X86_64 294 295 /* Yes, this is really how we defined it :( */ 295 296 addr = efi64_stub_entry - 0x200; ··· 299 298 #ifdef CONFIG_EFI_MIXED 300 299 if (efi32_stub_entry != addr) 301 300 die("32-bit and 64-bit EFI entry points do not match\n"); 301 + #endif 302 302 #endif 303 303 put_unaligned_le32(addr, &buf[0x264]); 304 304 }
+1
arch/x86/include/asm/realmode.h
··· 91 91 92 92 void reserve_real_mode(void); 93 93 void load_trampoline_pgtable(void); 94 + void init_real_mode(void); 94 95 95 96 #endif /* __ASSEMBLY__ */ 96 97
+4
arch/x86/include/asm/x86_init.h
··· 285 285 * possible in x86_early_init_platform_quirks() by 286 286 * only using the current x86_hardware_subarch 287 287 * semantics. 288 + * @realmode_reserve: reserve memory for realmode trampoline 289 + * @realmode_init: initialize realmode trampoline 288 290 * @hyper: x86 hypervisor specific runtime callbacks 289 291 */ 290 292 struct x86_platform_ops { ··· 303 301 void (*apic_post_init)(void); 304 302 struct x86_legacy_features legacy; 305 303 void (*set_legacy_features)(void); 304 + void (*realmode_reserve)(void); 305 + void (*realmode_init)(void); 306 306 struct x86_hyper_runtime hyper; 307 307 struct x86_guest guest; 308 308 };
+1 -1
arch/x86/kernel/setup.c
··· 1176 1176 * Moreover, on machines with SandyBridge graphics or in setups that use 1177 1177 * crashkernel the entire 1M is reserved anyway. 1178 1178 */ 1179 - reserve_real_mode(); 1179 + x86_platform.realmode_reserve(); 1180 1180 1181 1181 init_mem_mapping(); 1182 1182
+3
arch/x86/kernel/x86_init.c
··· 25 25 #include <asm/iommu.h> 26 26 #include <asm/mach_traps.h> 27 27 #include <asm/irqdomain.h> 28 + #include <asm/realmode.h> 28 29 29 30 void x86_init_noop(void) { } 30 31 void __init x86_init_uint_noop(unsigned int unused) { } ··· 146 145 .get_nmi_reason = default_get_nmi_reason, 147 146 .save_sched_clock_state = tsc_save_sched_clock_state, 148 147 .restore_sched_clock_state = tsc_restore_sched_clock_state, 148 + .realmode_reserve = reserve_real_mode, 149 + .realmode_init = init_real_mode, 149 150 .hyper.pin_vcpu = x86_op_int_noop, 150 151 151 152 .guest = {
+6 -2
arch/x86/realmode/init.c
··· 200 200 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); 201 201 } 202 202 203 - static int __init init_real_mode(void) 203 + void __init init_real_mode(void) 204 204 { 205 205 if (!real_mode_header) 206 206 panic("Real mode trampoline was not allocated"); 207 207 208 208 setup_real_mode(); 209 209 set_real_mode_permissions(); 210 + } 210 211 212 + static int __init do_init_real_mode(void) 213 + { 214 + x86_platform.realmode_init(); 211 215 return 0; 212 216 } 213 - early_initcall(init_real_mode); 217 + early_initcall(do_init_real_mode);
+2
arch/x86/xen/enlighten_pv.c
··· 1266 1266 xen_vcpu_info_reset(0); 1267 1267 1268 1268 x86_platform.get_nmi_reason = xen_get_nmi_reason; 1269 + x86_platform.realmode_reserve = x86_init_noop; 1270 + x86_platform.realmode_init = x86_init_noop; 1269 1271 1270 1272 x86_init.resources.memory_setup = xen_memory_setup; 1271 1273 x86_init.irqs.intr_mode_select = x86_init_noop;
+1 -1
drivers/firmware/efi/libstub/x86-stub.c
··· 23 23 24 24 const efi_system_table_t *efi_system_table; 25 25 const efi_dxe_services_table_t *efi_dxe_table; 26 - extern u32 image_offset; 26 + u32 image_offset __section(".data"); 27 27 static efi_loaded_image_t *image = NULL; 28 28 29 29 static efi_status_t