Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/sev: Use boot SVSM CA for all startup and init code

To avoid having to reason about whether or not to use the per-CPU SVSM calling
area when running startup and init code on the boot CPU, reuse the boot SVSM
calling area as the per-CPU area for the BSP.

Thus, remove the need to make the per-CPU variables and associated state in
sev_cfg accessible to the startup code once confined.

[ bp: Massage commit message. ]

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/20250828102202.1849035-33-ardb+git@google.com

authored by

Ard Biesheuvel and committed by
Borislav Petkov (AMD)
c54604fb 00d25566

+28 -59
-13
arch/x86/boot/compressed/sev.c
··· 37 37 38 38 #define __BOOT_COMPRESSED 39 39 40 - extern u64 boot_svsm_caa_pa; 41 - 42 - struct svsm_ca *svsm_get_caa(void) 43 - { 44 - /* The decompressor is mapped 1:1 so VA == PA */ 45 - return (struct svsm_ca *)boot_svsm_caa_pa; 46 - } 47 - 48 - u64 svsm_get_caa_pa(void) 49 - { 50 - return boot_svsm_caa_pa; 51 - } 52 - 53 40 u8 snp_vmpl; 54 41 55 42 /* Include code for early handlers */
+6 -5
arch/x86/boot/startup/sev-startup.c
··· 50 50 /* For early boot SVSM communication */ 51 51 struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE); 52 52 53 - DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); 54 - DEFINE_PER_CPU(u64, svsm_caa_pa); 55 - 56 53 /* 57 54 * Nothing shall interrupt this code path while holding the per-CPU 58 55 * GHCB. The backup GHCB is only for NMIs interrupting this path. ··· 150 153 unsigned long npages) 151 154 { 152 155 struct psc_desc d = { 153 - SNP_PAGE_STATE_PRIVATE, svsm_get_caa(), svsm_get_caa_pa() 156 + SNP_PAGE_STATE_PRIVATE, 157 + rip_rel_ptr(&boot_svsm_ca_page), 158 + boot_svsm_caa_pa 154 159 }; 155 160 156 161 /* ··· 175 176 unsigned long npages) 176 177 { 177 178 struct psc_desc d = { 178 - SNP_PAGE_STATE_SHARED, svsm_get_caa(), svsm_get_caa_pa() 179 + SNP_PAGE_STATE_SHARED, 180 + rip_rel_ptr(&boot_svsm_ca_page), 181 + boot_svsm_caa_pa 179 182 }; 180 183 181 184 /*
+22 -25
arch/x86/coco/sev/core.c
··· 46 46 #include <asm/cmdline.h> 47 47 #include <asm/msr.h> 48 48 49 + DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); 50 + DEFINE_PER_CPU(u64, svsm_caa_pa); 51 + 52 + static inline struct svsm_ca *svsm_get_caa(void) 53 + { 54 + if (sev_cfg.use_cas) 55 + return this_cpu_read(svsm_caa); 56 + else 57 + return rip_rel_ptr(&boot_svsm_ca_page); 58 + } 59 + 60 + static inline u64 svsm_get_caa_pa(void) 61 + { 62 + if (sev_cfg.use_cas) 63 + return this_cpu_read(svsm_caa_pa); 64 + else 65 + return boot_svsm_caa_pa; 66 + } 67 + 49 68 /* AP INIT values as documented in the APM2 section "Processor Initialization State" */ 50 69 #define AP_INIT_CS_LIMIT 0xffff 51 70 #define AP_INIT_DS_LIMIT 0xffff ··· 1331 1312 struct svsm_ca *caa; 1332 1313 1333 1314 /* Allocate the SVSM CA page if an SVSM is present */ 1334 - caa = memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE); 1315 + caa = cpu ? memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE) 1316 + : &boot_svsm_ca_page; 1335 1317 1336 1318 per_cpu(svsm_caa, cpu) = caa; 1337 1319 per_cpu(svsm_caa_pa, cpu) = __pa(caa); ··· 1386 1366 init_ghcb(cpu); 1387 1367 } 1388 1368 1389 - /* If running under an SVSM, switch to the per-cpu CA */ 1390 - if (snp_vmpl) { 1391 - struct svsm_call call = {}; 1392 - unsigned long flags; 1393 - int ret; 1394 - 1395 - local_irq_save(flags); 1396 - 1397 - /* 1398 - * SVSM_CORE_REMAP_CA call: 1399 - * RAX = 0 (Protocol=0, CallID=0) 1400 - * RCX = New CA GPA 1401 - */ 1402 - call.caa = svsm_get_caa(); 1403 - call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA); 1404 - call.rcx = this_cpu_read(svsm_caa_pa); 1405 - ret = svsm_perform_call_protocol(&call); 1406 - if (ret) 1407 - panic("Can't remap the SVSM CA, ret=%d, rax_out=0x%llx\n", 1408 - ret, call.rax_out); 1409 - 1369 + if (snp_vmpl) 1410 1370 sev_cfg.use_cas = true; 1411 - 1412 - local_irq_restore(flags); 1413 - } 1414 1371 1415 1372 sev_es_setup_play_dead(); 1416 1373
-16
arch/x86/include/asm/sev-internal.h
··· 62 62 63 63 extern u64 boot_svsm_caa_pa; 64 64 65 - static __always_inline struct svsm_ca *svsm_get_caa(void) 66 - { 67 - if (sev_cfg.use_cas) 68 - return this_cpu_read(svsm_caa); 69 - else 70 - return rip_rel_ptr(&boot_svsm_ca_page); 71 - } 72 - 73 - static __always_inline u64 svsm_get_caa_pa(void) 74 - { 75 - if (sev_cfg.use_cas) 76 - return this_cpu_read(svsm_caa_pa); 77 - else 78 - return boot_svsm_caa_pa; 79 - } 80 - 81 65 enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt); 82 66 void vc_forward_exception(struct es_em_ctxt *ctxt); 83 67