Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86_sev_for_v6.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 SEV updates from Borislav Petkov:

- Largely cleanups along with a change to save XSS to the GHCB
(Guest-Host Communication Block) in SEV-ES guests so that the
hypervisor can determine the guest's XSAVES buffer size properly
and thus support shadow stacks in AMD confidential guests

* tag 'x86_sev_for_v6.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/cc: Fix enum spelling to fix kernel-doc warnings
x86/boot: Drop unused sev_enable() fallback
x86/coco/sev: Convert has_cpuflag() to use cpu_feature_enabled()
x86/sev: Include XSS value in GHCB CPUID request
x86/boot: Move boot_*msr helpers to asm/shared/msr.h

+45 -55
-11
arch/x86/boot/compressed/misc.h
··· 152 152 void sev_insn_decode_init(void); 153 153 bool early_setup_ghcb(void); 154 154 #else 155 - static inline void sev_enable(struct boot_params *bp) 156 - { 157 - /* 158 - * bp->cc_blob_address should only be set by boot/compressed kernel. 159 - * Initialize it to 0 unconditionally (thus here in this stub too) to 160 - * ensure that uninitialized values from buggy bootloaders aren't 161 - * propagated. 162 - */ 163 - if (bp) 164 - bp->cc_blob_address = 0; 165 - } 166 155 static inline void snp_check_features(void) { } 167 156 static inline void sev_es_shutdown_ghcb(void) { } 168 157 static inline bool sev_es_check_ghcb_fault(unsigned long address)
+4 -3
arch/x86/boot/compressed/sev.c
··· 14 14 15 15 #include <asm/bootparam.h> 16 16 #include <asm/pgtable_types.h> 17 + #include <asm/shared/msr.h> 17 18 #include <asm/sev.h> 18 19 #include <asm/trapnr.h> 19 20 #include <asm/trap_pf.h> ··· 398 397 } 399 398 400 399 /* Set the SME mask if this is an SEV guest. */ 401 - boot_rdmsr(MSR_AMD64_SEV, &m); 400 + raw_rdmsr(MSR_AMD64_SEV, &m); 402 401 sev_status = m.q; 403 402 if (!(sev_status & MSR_AMD64_SEV_ENABLED)) 404 403 return; ··· 447 446 if (sev_check_cpu_support() < 0) 448 447 return 0; 449 448 450 - boot_rdmsr(MSR_AMD64_SEV, &m); 449 + raw_rdmsr(MSR_AMD64_SEV, &m); 451 450 return m.q; 452 451 } 453 452 ··· 497 496 struct msr m; 498 497 499 498 /* Obtain the address of the calling area to use */ 500 - boot_rdmsr(MSR_SVSM_CAA, &m); 499 + raw_rdmsr(MSR_SVSM_CAA, &m); 501 500 boot_svsm_caa_pa = m.q; 502 501 503 502 /*
+3 -3
arch/x86/boot/compressed/sev.h
··· 10 10 11 11 #ifdef CONFIG_AMD_MEM_ENCRYPT 12 12 13 - #include "../msr.h" 13 + #include <asm/shared/msr.h> 14 14 15 15 void snp_accept_memory(phys_addr_t start, phys_addr_t end); 16 16 u64 sev_get_status(void); ··· 20 20 { 21 21 struct msr m; 22 22 23 - boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m); 23 + raw_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m); 24 24 25 25 return m.q; 26 26 } ··· 30 30 struct msr m; 31 31 32 32 m.q = val; 33 - boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m); 33 + raw_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m); 34 34 } 35 35 36 36 #else
+8 -8
arch/x86/boot/cpucheck.c
··· 26 26 #include <asm/intel-family.h> 27 27 #include <asm/processor-flags.h> 28 28 #include <asm/msr-index.h> 29 + #include <asm/shared/msr.h> 29 30 30 31 #include "string.h" 31 - #include "msr.h" 32 32 33 33 static u32 err_flags[NCAPINTS]; 34 34 ··· 134 134 135 135 struct msr m; 136 136 137 - boot_rdmsr(MSR_K7_HWCR, &m); 137 + raw_rdmsr(MSR_K7_HWCR, &m); 138 138 m.l &= ~(1 << 15); 139 - boot_wrmsr(MSR_K7_HWCR, &m); 139 + raw_wrmsr(MSR_K7_HWCR, &m); 140 140 141 141 get_cpuflags(); /* Make sure it really did something */ 142 142 err = check_cpuflags(); ··· 148 148 149 149 struct msr m; 150 150 151 - boot_rdmsr(MSR_VIA_FCR, &m); 151 + raw_rdmsr(MSR_VIA_FCR, &m); 152 152 m.l |= (1 << 1) | (1 << 7); 153 - boot_wrmsr(MSR_VIA_FCR, &m); 153 + raw_wrmsr(MSR_VIA_FCR, &m); 154 154 155 155 set_bit(X86_FEATURE_CX8, cpu.flags); 156 156 err = check_cpuflags(); ··· 160 160 struct msr m, m_tmp; 161 161 u32 level = 1; 162 162 163 - boot_rdmsr(0x80860004, &m); 163 + raw_rdmsr(0x80860004, &m); 164 164 m_tmp = m; 165 165 m_tmp.l = ~0; 166 - boot_wrmsr(0x80860004, &m_tmp); 166 + raw_wrmsr(0x80860004, &m_tmp); 167 167 asm("cpuid" 168 168 : "+a" (level), "=d" (cpu.flags[0]) 169 169 : : "ecx", "ebx"); 170 - boot_wrmsr(0x80860004, &m); 170 + raw_wrmsr(0x80860004, &m); 171 171 172 172 err = check_cpuflags(); 173 173 } else if (err == 0x01 &&
-26
arch/x86/boot/msr.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Helpers/definitions related to MSR access. 4 - */ 5 - 6 - #ifndef BOOT_MSR_H 7 - #define BOOT_MSR_H 8 - 9 - #include <asm/shared/msr.h> 10 - 11 - /* 12 - * The kernel proper already defines rdmsr()/wrmsr(), but they are not for the 13 - * boot kernel since they rely on tracepoint/exception handling infrastructure 14 - * that's not available here. 15 - */ 16 - static inline void boot_rdmsr(unsigned int reg, struct msr *m) 17 - { 18 - asm volatile("rdmsr" : "=a" (m->l), "=d" (m->h) : "c" (reg)); 19 - } 20 - 21 - static inline void boot_wrmsr(unsigned int reg, const struct msr *m) 22 - { 23 - asm volatile("wrmsr" : : "c" (reg), "a"(m->l), "d" (m->h) : "memory"); 24 - } 25 - 26 - #endif /* BOOT_MSR_H */
+1 -1
arch/x86/boot/startup/sev-shared.c
··· 12 12 #include <asm/setup_data.h> 13 13 14 14 #ifndef __BOOT_COMPRESSED 15 - #define has_cpuflag(f) boot_cpu_has(f) 15 + #define has_cpuflag(f) cpu_feature_enabled(f) 16 16 #else 17 17 #undef WARN 18 18 #define WARN(condition, format...) (!!(condition))
-1
arch/x86/coco/sev/vc-handle.c
··· 352 352 353 353 #define sev_printk(fmt, ...) printk(fmt, ##__VA_ARGS__) 354 354 #define error(v) 355 - #define has_cpuflag(f) boot_cpu_has(f) 356 355 357 356 #include "vc-shared.c" 358 357
+11
arch/x86/coco/sev/vc-shared.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 + #ifndef __BOOT_COMPRESSED 4 + #define has_cpuflag(f) cpu_feature_enabled(f) 5 + #endif 6 + 3 7 static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt, 4 8 unsigned long exit_code) 5 9 { ··· 549 545 else 550 546 /* xgetbv will cause #GP - use reset value for xcr0 */ 551 547 ghcb_set_xcr0(ghcb, 1); 548 + 549 + if (has_cpuflag(X86_FEATURE_SHSTK) && regs->ax == 0xd && regs->cx == 1) { 550 + struct msr m; 551 + 552 + raw_rdmsr(MSR_IA32_XSS, &m); 553 + ghcb_set_xss(ghcb, m.q); 554 + } 552 555 553 556 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); 554 557 if (ret != ES_OK)
+15
arch/x86/include/asm/shared/msr.h
··· 12 12 }; 13 13 }; 14 14 15 + /* 16 + * The kernel proper already defines rdmsr()/wrmsr(), but they are not for the 17 + * boot kernel since they rely on tracepoint/exception handling infrastructure 18 + * that's not available here. 19 + */ 20 + static inline void raw_rdmsr(unsigned int reg, struct msr *m) 21 + { 22 + asm volatile("rdmsr" : "=a" (m->l), "=d" (m->h) : "c" (reg)); 23 + } 24 + 25 + static inline void raw_wrmsr(unsigned int reg, const struct msr *m) 26 + { 27 + asm volatile("wrmsr" : : "c" (reg), "a"(m->l), "d" (m->h) : "memory"); 28 + } 29 + 15 30 #endif /* _ASM_X86_SHARED_MSR_H */
+1
arch/x86/include/asm/svm.h
··· 701 701 DEFINE_GHCB_ACCESSORS(sw_exit_info_2) 702 702 DEFINE_GHCB_ACCESSORS(sw_scratch) 703 703 DEFINE_GHCB_ACCESSORS(xcr0) 704 + DEFINE_GHCB_ACCESSORS(xss) 704 705 705 706 #endif
+1 -1
arch/x86/lib/kaslr.c
··· 22 22 #include <asm/setup.h> 23 23 24 24 #define debug_putstr(v) early_printk("%s", v) 25 - #define has_cpuflag(f) boot_cpu_has(f) 25 + #define has_cpuflag(f) cpu_feature_enabled(f) 26 26 #define get_boot_seed() kaslr_offset() 27 27 #endif 28 28
+1 -1
include/linux/cc_platform.h
··· 74 74 CC_ATTR_GUEST_UNROLL_STRING_IO, 75 75 76 76 /** 77 - * @CC_ATTR_SEV_SNP: Guest SNP is active. 77 + * @CC_ATTR_GUEST_SEV_SNP: Guest SNP is active. 78 78 * 79 79 * The platform/OS is running as a guest/virtual machine and actively 80 80 * using AMD SEV-SNP features.