Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/sev: Avoid global variable to store virtual address of SVSM area

The boottime SVSM calling area is used both by the startup code running from
a 1:1 mapping, and potentially later on running from the ordinary kernel
mapping.

This SVSM calling area is statically allocated, and so its physical address
doesn't change. However, its virtual address depends on the calling context
(1:1 mapping or kernel virtual mapping), and even though the variable that
holds the virtual address of this calling area gets updated from 1:1 address
to kernel address during the boot, it is hard to reason about why this is
guaranteed to be safe.

So instead, take the RIP-relative address of the boottime SVSM calling area
whenever its virtual address is required, and only use a global variable for
the physical address.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/20250828102202.1849035-30-ardb+git@google.com

authored by

Ard Biesheuvel and committed by
Borislav Petkov (AMD)
a5f03880 37dbd78f

+9 -32
+2 -3
arch/x86/boot/compressed/sev.c
··· 37 37 38 38 #define __BOOT_COMPRESSED 39 39 40 - extern struct svsm_ca *boot_svsm_caa; 41 40 extern u64 boot_svsm_caa_pa; 42 41 43 42 struct svsm_ca *svsm_get_caa(void) 44 43 { 45 - return boot_svsm_caa; 44 + /* The decompressor is mapped 1:1 so VA == PA */ 45 + return (struct svsm_ca *)boot_svsm_caa_pa; 46 46 } 47 47 48 48 u64 svsm_get_caa_pa(void) ··· 532 532 533 533 /* Obtain the address of the calling area to use */ 534 534 boot_rdmsr(MSR_SVSM_CAA, &m); 535 - boot_svsm_caa = (void *)m.q; 536 535 boot_svsm_caa_pa = m.q; 537 536 538 537 /*
+1 -6
arch/x86/boot/startup/sev-shared.c
··· 13 13 14 14 #ifndef __BOOT_COMPRESSED 15 15 #define error(v) pr_err(v) 16 + #define has_cpuflag(f) boot_cpu_has(f) 16 17 #else 17 18 #undef WARN 18 19 #define WARN(condition, format...) (!!(condition)) ··· 27 26 * early boot, both with identity mapped virtual addresses and proper kernel 28 27 * virtual addresses. 29 28 */ 30 - struct svsm_ca *boot_svsm_caa __ro_after_init; 31 29 u64 boot_svsm_caa_pa __ro_after_init; 32 30 33 31 /* ··· 720 720 if (caa & (PAGE_SIZE - 1)) 721 721 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CAA); 722 722 723 - /* 724 - * The CA is identity mapped when this routine is called, both by the 725 - * decompressor code and the early kernel code. 726 - */ 727 - boot_svsm_caa = (struct svsm_ca *)caa; 728 723 boot_svsm_caa_pa = caa; 729 724 730 725 /* Advertise the SVSM presence via CPUID. */
+5 -4
arch/x86/boot/startup/sev-startup.c
··· 252 252 253 253 static __head void svsm_setup(struct cc_blob_sev_info *cc_info) 254 254 { 255 + struct snp_secrets_page *secrets = (void *)cc_info->secrets_phys; 255 256 struct svsm_call call = {}; 256 257 u64 pa; 257 258 ··· 273 272 pa = (u64)rip_rel_ptr(&boot_svsm_ca_page); 274 273 275 274 /* 276 - * Switch over to the boot SVSM CA while the current CA is still 277 - * addressable. There is no GHCB at this point so use the MSR protocol. 275 + * Switch over to the boot SVSM CA while the current CA is still 1:1 276 + * mapped and thus addressable with VA == PA. There is no GHCB at this 277 + * point so use the MSR protocol. 278 278 * 279 279 * SVSM_CORE_REMAP_CA call: 280 280 * RAX = 0 (Protocol=0, CallID=0) 281 281 * RCX = New CA GPA 282 282 */ 283 - call.caa = svsm_get_caa(); 283 + call.caa = (struct svsm_ca *)secrets->svsm_caa; 284 284 call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA); 285 285 call.rcx = pa; 286 286 287 287 if (svsm_call_msr_protocol(&call)) 288 288 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL); 289 289 290 - boot_svsm_caa = (struct svsm_ca *)pa; 291 290 boot_svsm_caa_pa = pa; 292 291 } 293 292
-9
arch/x86/coco/sev/core.c
··· 1666 1666 pr_cont("\n"); 1667 1667 } 1668 1668 1669 - void __init snp_update_svsm_ca(void) 1670 - { 1671 - if (!snp_vmpl) 1672 - return; 1673 - 1674 - /* Update the CAA to a proper kernel address */ 1675 - boot_svsm_caa = &boot_svsm_ca_page; 1676 - } 1677 - 1678 1669 #ifdef CONFIG_SYSFS 1679 1670 static ssize_t vmpl_show(struct kobject *kobj, 1680 1671 struct kobj_attribute *attr, char *buf)
+1 -2
arch/x86/include/asm/sev-internal.h
··· 60 60 DECLARE_PER_CPU(struct svsm_ca *, svsm_caa); 61 61 DECLARE_PER_CPU(u64, svsm_caa_pa); 62 62 63 - extern struct svsm_ca *boot_svsm_caa; 64 63 extern u64 boot_svsm_caa_pa; 65 64 66 65 static __always_inline struct svsm_ca *svsm_get_caa(void) ··· 67 68 if (sev_cfg.use_cas) 68 69 return this_cpu_read(svsm_caa); 69 70 else 70 - return boot_svsm_caa; 71 + return rip_rel_ptr(&boot_svsm_ca_page); 71 72 } 72 73 73 74 static __always_inline u64 svsm_get_caa_pa(void)
-2
arch/x86/include/asm/sev.h
··· 519 519 u64 snp_get_unsupported_features(u64 status); 520 520 u64 sev_get_status(void); 521 521 void sev_show_status(void); 522 - void snp_update_svsm_ca(void); 523 522 int prepare_pte_enc(struct pte_enc_desc *d); 524 523 void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot); 525 524 void snp_kexec_finish(void); ··· 600 601 static inline u64 snp_get_unsupported_features(u64 status) { return 0; } 601 602 static inline u64 sev_get_status(void) { return 0; } 602 603 static inline void sev_show_status(void) { } 603 - static inline void snp_update_svsm_ca(void) { } 604 604 static inline int prepare_pte_enc(struct pte_enc_desc *d) { return 0; } 605 605 static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { } 606 606 static inline void snp_kexec_finish(void) { }
-6
arch/x86/mm/mem_encrypt_amd.c
··· 536 536 x86_init.resources.dmi_setup = snp_dmi_setup; 537 537 } 538 538 539 - /* 540 - * Switch the SVSM CA mapping (if active) from identity mapped to 541 - * kernel mapped. 542 - */ 543 - snp_update_svsm_ca(); 544 - 545 539 if (sev_status & MSR_AMD64_SNP_SECURE_TSC) 546 540 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); 547 541 }