Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/boot/compressed/64: Add stage1 #VC handler

Add the first handler for #VC exceptions. At stage 1 there is no GHCB
yet because the kernel might still be running on the EFI page table.

The stage 1 handler is limited to the MSR-based protocol to talk to the
hypervisor and can only support CPUID exit-codes, but that is enough to
get to stage 2.

[ bp: Zap superfluous newlines after rd/wrmsr instruction mnemonics. ]

Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20200907131613.12703-20-joro@8bytes.org

authored by

Joerg Roedel and committed by
Borislav Petkov
29dcc60f 21cf2372

+160
+1
arch/x86/boot/compressed/Makefile
··· 88 88 vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o 89 89 vmlinux-objs-y += $(obj)/mem_encrypt.o 90 90 vmlinux-objs-y += $(obj)/pgtable_64.o 91 + vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev-es.o 91 92 endif 92 93 93 94 vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
+4
arch/x86/boot/compressed/idt_64.c
··· 32 32 { 33 33 boot_idt_desc.address = (unsigned long)boot_idt; 34 34 35 + 36 + if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) 37 + set_idt_entry(X86_TRAP_VC, boot_stage1_vc); 38 + 35 39 load_boot_idt(&boot_idt_desc); 36 40 } 37 41
+4
arch/x86/boot/compressed/idt_handlers_64.S
··· 70 70 .code64 71 71 72 72 EXCEPTION_HANDLER boot_page_fault do_boot_page_fault error_code=1 73 + 74 + #ifdef CONFIG_AMD_MEM_ENCRYPT 75 + EXCEPTION_HANDLER boot_stage1_vc do_vc_no_ghcb error_code=1 76 + #endif
+1
arch/x86/boot/compressed/misc.h
··· 141 141 142 142 /* IDT Entry Points */ 143 143 void boot_page_fault(void); 144 + void boot_stage1_vc(void); 144 145 145 146 #endif /* BOOT_COMPRESSED_MISC_H */
+45
arch/x86/boot/compressed/sev-es.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * AMD Encrypted Register State Support 4 + * 5 + * Author: Joerg Roedel <jroedel@suse.de> 6 + */ 7 + 8 + /* 9 + * misc.h needs to be first because it knows how to include the other kernel 10 + * headers in the pre-decompression code in a way that does not break 11 + * compilation. 12 + */ 13 + #include "misc.h" 14 + 15 + #include <asm/sev-es.h> 16 + #include <asm/msr-index.h> 17 + #include <asm/ptrace.h> 18 + #include <asm/svm.h> 19 + 20 + static inline u64 sev_es_rd_ghcb_msr(void) 21 + { 22 + unsigned long low, high; 23 + 24 + asm volatile("rdmsr" : "=a" (low), "=d" (high) : 25 + "c" (MSR_AMD64_SEV_ES_GHCB)); 26 + 27 + return ((high << 32) | low); 28 + } 29 + 30 + static inline void sev_es_wr_ghcb_msr(u64 val) 31 + { 32 + u32 low, high; 33 + 34 + low = val & 0xffffffffUL; 35 + high = val >> 32; 36 + 37 + asm volatile("wrmsr" : : "c" (MSR_AMD64_SEV_ES_GHCB), 38 + "a"(low), "d" (high) : "memory"); 39 + } 40 + 41 + #undef __init 42 + #define __init 43 + 44 + /* Include code for early handlers */ 45 + #include "../../kernel/sev-es-shared.c"
+1
arch/x86/include/asm/msr-index.h
··· 466 466 #define MSR_AMD64_IBSBRTARGET 0xc001103b 467 467 #define MSR_AMD64_IBSOPDATA4 0xc001103d 468 468 #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ 469 + #define MSR_AMD64_SEV_ES_GHCB 0xc0010130 469 470 #define MSR_AMD64_SEV 0xc0010131 470 471 #define MSR_AMD64_SEV_ENABLED_BIT 0 471 472 #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
+37
arch/x86/include/asm/sev-es.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * AMD Encrypted Register State Support 4 + * 5 + * Author: Joerg Roedel <jroedel@suse.de> 6 + */ 7 + 8 + #ifndef __ASM_ENCRYPTED_STATE_H 9 + #define __ASM_ENCRYPTED_STATE_H 10 + 11 + #include <linux/types.h> 12 + 13 + #define GHCB_SEV_CPUID_REQ 0x004UL 14 + #define GHCB_CPUID_REQ_EAX 0 15 + #define GHCB_CPUID_REQ_EBX 1 16 + #define GHCB_CPUID_REQ_ECX 2 17 + #define GHCB_CPUID_REQ_EDX 3 18 + #define GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \ 19 + (((unsigned long)reg & 3) << 30) | \ 20 + (((unsigned long)fn) << 32)) 21 + 22 + #define GHCB_SEV_CPUID_RESP 0x005UL 23 + #define GHCB_SEV_TERMINATE 0x100UL 24 + 25 + #define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff) 26 + #define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); } 27 + 28 + void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code); 29 + 30 + static inline u64 lower_bits(u64 val, unsigned int bits) 31 + { 32 + u64 mask = (1ULL << bits) - 1; 33 + 34 + return (val & mask); 35 + } 36 + 37 + #endif
+1
arch/x86/include/asm/trapnr.h
··· 26 26 #define X86_TRAP_XF 19 /* SIMD Floating-Point Exception */ 27 27 #define X86_TRAP_VE 20 /* Virtualization Exception */ 28 28 #define X86_TRAP_CP 21 /* Control Protection Exception */ 29 + #define X86_TRAP_VC 29 /* VMM Communication Exception */ 29 30 #define X86_TRAP_IRET 32 /* IRET Exception */ 30 31 31 32 #endif
+66
arch/x86/kernel/sev-es-shared.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * AMD Encrypted Register State Support 4 + * 5 + * Author: Joerg Roedel <jroedel@suse.de> 6 + * 7 + * This file is not compiled stand-alone. It contains code shared 8 + * between the pre-decompression boot code and the running Linux kernel 9 + * and is included directly into both code-bases. 10 + */ 11 + 12 + /* 13 + * Boot VC Handler - This is the first VC handler during boot, there is no GHCB 14 + * page yet, so it only supports the MSR based communication with the 15 + * hypervisor and only the CPUID exit-code. 16 + */ 17 + void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) 18 + { 19 + unsigned int fn = lower_bits(regs->ax, 32); 20 + unsigned long val; 21 + 22 + /* Only CPUID is supported via MSR protocol */ 23 + if (exit_code != SVM_EXIT_CPUID) 24 + goto fail; 25 + 26 + sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX)); 27 + VMGEXIT(); 28 + val = sev_es_rd_ghcb_msr(); 29 + if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP) 30 + goto fail; 31 + regs->ax = val >> 32; 32 + 33 + sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX)); 34 + VMGEXIT(); 35 + val = sev_es_rd_ghcb_msr(); 36 + if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP) 37 + goto fail; 38 + regs->bx = val >> 32; 39 + 40 + sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX)); 41 + VMGEXIT(); 42 + val = sev_es_rd_ghcb_msr(); 43 + if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP) 44 + goto fail; 45 + regs->cx = val >> 32; 46 + 47 + sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX)); 48 + VMGEXIT(); 49 + val = sev_es_rd_ghcb_msr(); 50 + if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP) 51 + goto fail; 52 + regs->dx = val >> 32; 53 + 54 + /* Skip over the CPUID two-byte opcode */ 55 + regs->ip += 2; 56 + 57 + return; 58 + 59 + fail: 60 + sev_es_wr_ghcb_msr(GHCB_SEV_TERMINATE); 61 + VMGEXIT(); 62 + 63 + /* Shouldn't get here - if we do halt the machine */ 64 + while (true) 65 + asm volatile("hlt\n"); 66 + }