Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86_sev_for_v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 SEV updates from Borislav Petkov:

- Do the proper memory conversion of guest memory in order to be able
to kexec kernels in SNP guests along with other adjustments and
cleanups to that effect

- Start converting and moving functionality from the sev-guest driver
into core code with the purpose of supporting the secure TSC SNP
feature where the hypervisor cannot influence the TSC exposed to the
guest anymore

- Add a "nosnp" cmdline option in order to be able to disable SNP
support in the hypervisor and thus free-up resources which are not
going to be used

- Cleanups

[ Reminding myself about the endless TLA's again: SEV is the AMD Secure
Encrypted Virtualization - Linus ]

* tag 'x86_sev_for_v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/sev: Cleanup vc_handle_msr()
x86/sev: Convert shared memory back to private on kexec
x86/mm: Refactor __set_clr_pte_enc()
x86/boot: Skip video memory access in the decompressor for SEV-ES/SNP
virt: sev-guest: Carve out SNP message context structure
virt: sev-guest: Reduce the scope of SNP command mutex
virt: sev-guest: Consolidate SNP guest messaging parameters to a struct
x86/sev: Cache the secrets page address
x86/sev: Handle failures from snp_init()
virt: sev-guest: Use AES GCM crypto library
x86/virt: Provide "nosnp" boot option for sev kernel command line
x86/virt: Move SEV-specific parsing into arch/x86/virt/svm

+543 -394
+5
Documentation/arch/x86/x86_64/boot-options.rst
··· 305 305 306 306 debug 307 307 Enable debug messages. 308 + 309 + nosnp 310 + Do not enable SEV-SNP (applies to host/hypervisor only). Setting 311 + 'nosnp' avoids the RMP check overhead in memory accesses when 312 + users do not want to run SEV-SNP guests.
+15
arch/x86/boot/compressed/misc.c
··· 385 385 hdr->xloadflags |= XLF_MEM_ENCRYPTION; 386 386 } 387 387 388 + static void early_sev_detect(void) 389 + { 390 + /* 391 + * Accessing video memory causes guest termination because 392 + * the boot stage2 #VC handler of SEV-ES/SNP guests does not 393 + * support MMIO handling and kexec -c adds screen_info to the 394 + * boot parameters passed to the kexec kernel, which causes 395 + * console output to be dumped to both video and serial. 396 + */ 397 + if (sev_status & MSR_AMD64_SEV_ES_ENABLED) 398 + lines = cols = 0; 399 + } 400 + 388 401 /* 389 402 * The compressed kernel image (ZO), has been moved so that its position 390 403 * is against the end of the buffer used to hold the uncompressed kernel ··· 452 439 * paravirtualized port I/O operations if needed. 453 440 */ 454 441 early_tdx_detect(); 442 + 443 + early_sev_detect(); 455 444 456 445 console_init(); 457 446
+166 -103
arch/x86/coco/sev/core.c
··· 92 92 /* Bitmap of SEV features supported by the hypervisor */ 93 93 static u64 sev_hv_features __ro_after_init; 94 94 95 + /* Secrets page physical address from the CC blob */ 96 + static u64 secrets_pa __ro_after_init; 97 + 95 98 /* #VC handler runtime per-CPU data */ 96 99 struct sev_es_runtime_data { 97 100 struct ghcb ghcb_page; ··· 143 140 static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa); 144 141 static DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); 145 142 static DEFINE_PER_CPU(u64, svsm_caa_pa); 146 - 147 - struct sev_config { 148 - __u64 debug : 1, 149 - 150 - /* 151 - * Indicates when the per-CPU GHCB has been created and registered 152 - * and thus can be used by the BSP instead of the early boot GHCB. 153 - * 154 - * For APs, the per-CPU GHCB is created before they are started 155 - * and registered upon startup, so this flag can be used globally 156 - * for the BSP and APs. 157 - */ 158 - ghcbs_initialized : 1, 159 - 160 - /* 161 - * Indicates when the per-CPU SVSM CA is to be used instead of the 162 - * boot SVSM CA. 163 - * 164 - * For APs, the per-CPU SVSM CA is created as part of the AP 165 - * bringup, so this flag can be used globally for the BSP and APs. 166 - */ 167 - use_cas : 1, 168 - 169 - __reserved : 61; 170 - }; 171 - 172 - static struct sev_config sev_cfg __read_mostly; 173 143 174 144 static __always_inline bool on_vc_stack(struct pt_regs *regs) 175 145 { ··· 698 722 __sev_put_ghcb(&state); 699 723 } 700 724 701 - static u64 __init get_secrets_page(void) 702 - { 703 - u64 pa_data = boot_params.cc_blob_address; 704 - struct cc_blob_sev_info info; 705 - void *map; 706 - 707 - /* 708 - * The CC blob contains the address of the secrets page, check if the 709 - * blob is present. 710 - */ 711 - if (!pa_data) 712 - return 0; 713 - 714 - map = early_memremap(pa_data, sizeof(info)); 715 - if (!map) { 716 - pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n"); 717 - return 0; 718 - } 719 - memcpy(&info, map, sizeof(info)); 720 - early_memunmap(map, sizeof(info)); 721 - 722 - /* smoke-test the secrets page passed */ 723 - if (!info.secrets_phys || info.secrets_len != PAGE_SIZE) 724 - return 0; 725 - 726 - return info.secrets_phys; 727 - } 728 - 729 725 static u64 __init get_snp_jump_table_addr(void) 730 726 { 731 727 struct snp_secrets_page *secrets; 732 728 void __iomem *mem; 733 - u64 pa, addr; 729 + u64 addr; 734 730 735 - pa = get_secrets_page(); 736 - if (!pa) 737 - return 0; 738 - 739 - mem = ioremap_encrypted(pa, PAGE_SIZE); 731 + mem = ioremap_encrypted(secrets_pa, PAGE_SIZE); 740 732 if (!mem) { 741 733 pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n"); 742 734 return 0; ··· 952 1008 npages = (end - start) >> PAGE_SHIFT; 953 1009 954 1010 set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); 1011 + } 1012 + 1013 + static void set_pte_enc(pte_t *kpte, int level, void *va) 1014 + { 1015 + struct pte_enc_desc d = { 1016 + .kpte = kpte, 1017 + .pte_level = level, 1018 + .va = va, 1019 + .encrypt = true 1020 + }; 1021 + 1022 + prepare_pte_enc(&d); 1023 + set_pte_enc_mask(kpte, d.pfn, d.new_pgprot); 1024 + } 1025 + 1026 + static void unshare_all_memory(void) 1027 + { 1028 + unsigned long addr, end, size, ghcb; 1029 + struct sev_es_runtime_data *data; 1030 + unsigned int npages, level; 1031 + bool skipped_addr; 1032 + pte_t *pte; 1033 + int cpu; 1034 + 1035 + /* Unshare the direct mapping. */ 1036 + addr = PAGE_OFFSET; 1037 + end = PAGE_OFFSET + get_max_mapped(); 1038 + 1039 + while (addr < end) { 1040 + pte = lookup_address(addr, &level); 1041 + size = page_level_size(level); 1042 + npages = size / PAGE_SIZE; 1043 + skipped_addr = false; 1044 + 1045 + if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) { 1046 + addr += size; 1047 + continue; 1048 + } 1049 + 1050 + /* 1051 + * Ensure that all the per-CPU GHCBs are made private at the 1052 + * end of the unsharing loop so that the switch to the slower 1053 + * MSR protocol happens last. 1054 + */ 1055 + for_each_possible_cpu(cpu) { 1056 + data = per_cpu(runtime_data, cpu); 1057 + ghcb = (unsigned long)&data->ghcb_page; 1058 + 1059 + if (addr <= ghcb && ghcb <= addr + size) { 1060 + skipped_addr = true; 1061 + break; 1062 + } 1063 + } 1064 + 1065 + if (!skipped_addr) { 1066 + set_pte_enc(pte, level, (void *)addr); 1067 + snp_set_memory_private(addr, npages); 1068 + } 1069 + addr += size; 1070 + } 1071 + 1072 + /* Unshare all bss decrypted memory. */ 1073 + addr = (unsigned long)__start_bss_decrypted; 1074 + end = (unsigned long)__start_bss_decrypted_unused; 1075 + npages = (end - addr) >> PAGE_SHIFT; 1076 + 1077 + for (; addr < end; addr += PAGE_SIZE) { 1078 + pte = lookup_address(addr, &level); 1079 + if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) 1080 + continue; 1081 + 1082 + set_pte_enc(pte, level, (void *)addr); 1083 + } 1084 + addr = (unsigned long)__start_bss_decrypted; 1085 + snp_set_memory_private(addr, npages); 1086 + 1087 + __flush_tlb_all(); 1088 + } 1089 + 1090 + /* Stop new private<->shared conversions */ 1091 + void snp_kexec_begin(void) 1092 + { 1093 + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 1094 + return; 1095 + 1096 + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) 1097 + return; 1098 + 1099 + /* 1100 + * Crash kernel ends up here with interrupts disabled: can't wait for 1101 + * conversions to finish. 1102 + * 1103 + * If race happened, just report and proceed. 1104 + */ 1105 + if (!set_memory_enc_stop_conversion()) 1106 + pr_warn("Failed to stop shared<->private conversions\n"); 1107 + } 1108 + 1109 + void snp_kexec_finish(void) 1110 + { 1111 + struct sev_es_runtime_data *data; 1112 + unsigned int level, cpu; 1113 + unsigned long size; 1114 + struct ghcb *ghcb; 1115 + pte_t *pte; 1116 + 1117 + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 1118 + return; 1119 + 1120 + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) 1121 + return; 1122 + 1123 + unshare_all_memory(); 1124 + 1125 + /* 1126 + * Switch to using the MSR protocol to change per-CPU GHCBs to 1127 + * private. All the per-CPU GHCBs have been switched back to private, 1128 + * so can't do any more GHCB calls to the hypervisor beyond this point 1129 + * until the kexec'ed kernel starts running. 1130 + */ 1131 + boot_ghcb = NULL; 1132 + sev_cfg.ghcbs_initialized = false; 1133 + 1134 + for_each_possible_cpu(cpu) { 1135 + data = per_cpu(runtime_data, cpu); 1136 + ghcb = &data->ghcb_page; 1137 + pte = lookup_address((unsigned long)ghcb, &level); 1138 + size = page_level_size(level); 1139 + set_pte_enc(pte, level, (void *)ghcb); 1140 + snp_set_memory_private((unsigned long)ghcb, (size / PAGE_SIZE)); 1141 + } 955 1142 } 956 1143 957 1144 static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa) ··· 1406 1331 return 0; 1407 1332 } 1408 1333 1334 + /* Writes to the SVSM CAA MSR are ignored */ 1335 + static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write) 1336 + { 1337 + if (write) 1338 + return ES_OK; 1339 + 1340 + regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa)); 1341 + regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa)); 1342 + 1343 + return ES_OK; 1344 + } 1345 + 1409 1346 static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) 1410 1347 { 1411 1348 struct pt_regs *regs = ctxt->regs; 1412 1349 enum es_result ret; 1413 - u64 exit_info_1; 1350 + bool write; 1414 1351 1415 1352 /* Is it a WRMSR? */ 1416 - exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0; 1353 + write = ctxt->insn.opcode.bytes[1] == 0x30; 1417 1354 1418 - if (regs->cx == MSR_SVSM_CAA) { 1419 - /* Writes to the SVSM CAA msr are ignored */ 1420 - if (exit_info_1) 1421 - return ES_OK; 1422 - 1423 - regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa)); 1424 - regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa)); 1425 - 1426 - return ES_OK; 1427 - } 1355 + if (regs->cx == MSR_SVSM_CAA) 1356 + return __vc_handle_msr_caa(regs, write); 1428 1357 1429 1358 ghcb_set_rcx(ghcb, regs->cx); 1430 - if (exit_info_1) { 1359 + if (write) { 1431 1360 ghcb_set_rax(ghcb, regs->ax); 1432 1361 ghcb_set_rdx(ghcb, regs->dx); 1433 1362 } 1434 1363 1435 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0); 1364 + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0); 1436 1365 1437 - if ((ret == ES_OK) && (!exit_info_1)) { 1366 + if ((ret == ES_OK) && !write) { 1438 1367 regs->ax = ghcb->save.rax; 1439 1368 regs->dx = ghcb->save.rdx; 1440 1369 } ··· 2379 2300 if (!cc_info) 2380 2301 return false; 2381 2302 2303 + if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE) 2304 + secrets_pa = cc_info->secrets_phys; 2305 + else 2306 + return false; 2307 + 2382 2308 setup_cpuid_table(cc_info); 2383 2309 2384 2310 svsm_setup(cc_info); ··· 2458 2374 } 2459 2375 arch_initcall(report_snp_info); 2460 2376 2461 - static int __init init_sev_config(char *str) 2462 - { 2463 - char *s; 2464 - 2465 - while ((s = strsep(&str, ","))) { 2466 - if (!strcmp(s, "debug")) { 2467 - sev_cfg.debug = true; 2468 - continue; 2469 - } 2470 - 2471 - pr_info("SEV command-line option '%s' was not recognized\n", s); 2472 - } 2473 - 2474 - return 1; 2475 - } 2476 - __setup("sev=", init_sev_config); 2477 - 2478 2377 static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input) 2479 2378 { 2480 2379 /* If (new) lengths have been returned, propagate them up */ ··· 2508 2441 } 2509 2442 EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req); 2510 2443 2511 - int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio) 2444 + int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input, 2445 + struct snp_guest_request_ioctl *rio) 2512 2446 { 2513 2447 struct ghcb_state state; 2514 2448 struct es_em_ctxt ctxt; ··· 2533 2465 2534 2466 vc_ghcb_invalidate(ghcb); 2535 2467 2536 - if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { 2468 + if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { 2537 2469 ghcb_set_rax(ghcb, input->data_gpa); 2538 2470 ghcb_set_rbx(ghcb, input->data_npages); 2539 2471 } 2540 2472 2541 - ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa); 2473 + ret = sev_es_ghcb_hv_call(ghcb, &ctxt, req->exit_code, input->req_gpa, input->resp_gpa); 2542 2474 if (ret) 2543 2475 goto e_put; 2544 2476 ··· 2553 2485 2554 2486 case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN): 2555 2487 /* Number of expected pages are returned in RBX */ 2556 - if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { 2488 + if (req->exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { 2557 2489 input->data_npages = ghcb_get_rbx(ghcb); 2558 2490 ret = -ENOSPC; 2559 2491 break; ··· 2581 2513 static int __init snp_init_platform_device(void) 2582 2514 { 2583 2515 struct sev_guest_platform_data data; 2584 - u64 gpa; 2585 2516 2586 2517 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 2587 2518 return -ENODEV; 2588 2519 2589 - gpa = get_secrets_page(); 2590 - if (!gpa) 2591 - return -ENODEV; 2592 - 2593 - data.secrets_gpa = gpa; 2520 + data.secrets_gpa = secrets_pa; 2594 2521 if (platform_device_add_data(&sev_guest_device, &data, sizeof(data))) 2595 2522 return -ENODEV; 2596 2523
+27
arch/x86/include/asm/sev-common.h
··· 220 220 #define GHCB_ERR_INVALID_INPUT 5 221 221 #define GHCB_ERR_INVALID_EVENT 6 222 222 223 + struct sev_config { 224 + __u64 debug : 1, 225 + 226 + /* 227 + * Indicates when the per-CPU GHCB has been created and registered 228 + * and thus can be used by the BSP instead of the early boot GHCB. 229 + * 230 + * For APs, the per-CPU GHCB is created before they are started 231 + * and registered upon startup, so this flag can be used globally 232 + * for the BSP and APs. 233 + */ 234 + ghcbs_initialized : 1, 235 + 236 + /* 237 + * Indicates when the per-CPU SVSM CA is to be used instead of the 238 + * boot SVSM CA. 239 + * 240 + * For APs, the per-CPU SVSM CA is created as part of the AP 241 + * bringup, so this flag can be used globally for the BSP and APs. 242 + */ 243 + use_cas : 1, 244 + 245 + __reserved : 61; 246 + }; 247 + 248 + extern struct sev_config sev_cfg; 249 + 223 250 #endif
+65 -2
arch/x86/include/asm/sev.h
··· 120 120 }; 121 121 122 122 #define MAX_AUTHTAG_LEN 32 123 + #define AUTHTAG_LEN 16 124 + #define AAD_LEN 48 125 + #define MSG_HDR_VER 1 123 126 124 127 /* See SNP spec SNP_GUEST_REQUEST section for the structure */ 125 128 enum msg_type { ··· 174 171 u64 secrets_gpa; 175 172 }; 176 173 174 + struct snp_guest_req { 175 + void *req_buf; 176 + size_t req_sz; 177 + 178 + void *resp_buf; 179 + size_t resp_sz; 180 + 181 + u64 exit_code; 182 + unsigned int vmpck_id; 183 + u8 msg_version; 184 + u8 msg_type; 185 + }; 186 + 177 187 /* 178 188 * The secrets page contains 96-bytes of reserved field that can be used by 179 189 * the guest OS. The guest OS uses the area to save the message sequence ··· 233 217 /* Remainder of page */ 234 218 u8 rsvd4[3744]; 235 219 } __packed; 220 + 221 + struct snp_msg_desc { 222 + /* request and response are in unencrypted memory */ 223 + struct snp_guest_msg *request, *response; 224 + 225 + /* 226 + * Avoid information leakage by double-buffering shared messages 227 + * in fields that are in regular encrypted memory. 228 + */ 229 + struct snp_guest_msg secret_request, secret_response; 230 + 231 + struct snp_secrets_page *secrets; 232 + struct snp_req_data input; 233 + 234 + void *certs_data; 235 + 236 + struct aesgcm_ctx *ctx; 237 + 238 + u32 *os_area_msg_seqno; 239 + u8 *vmpck; 240 + }; 236 241 237 242 /* 238 243 * The SVSM Calling Area (CA) related structures. ··· 320 283 u8 service_guid[16]; 321 284 u32 service_manifest_ver; 322 285 u8 rsvd[4]; 286 + }; 287 + 288 + /* PTE descriptor used for the prepare_pte_enc() operations. */ 289 + struct pte_enc_desc { 290 + pte_t *kpte; 291 + int pte_level; 292 + bool encrypt; 293 + /* pfn of the kpte above */ 294 + unsigned long pfn; 295 + /* physical address of @pfn */ 296 + unsigned long pa; 297 + /* virtual address of @pfn */ 298 + void *va; 299 + /* memory covered by the pte */ 300 + unsigned long size; 301 + pgprot_t new_pgprot; 323 302 }; 324 303 325 304 /* ··· 445 392 bool snp_init(struct boot_params *bp); 446 393 void __noreturn snp_abort(void); 447 394 void snp_dmi_setup(void); 448 - int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio); 395 + int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input, 396 + struct snp_guest_request_ioctl *rio); 449 397 int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, struct svsm_attest_call *input); 450 398 void snp_accept_memory(phys_addr_t start, phys_addr_t end); 451 399 u64 snp_get_unsupported_features(u64 status); 452 400 u64 sev_get_status(void); 453 401 void sev_show_status(void); 454 402 void snp_update_svsm_ca(void); 403 + int prepare_pte_enc(struct pte_enc_desc *d); 404 + void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot); 405 + void snp_kexec_finish(void); 406 + void snp_kexec_begin(void); 455 407 456 408 #else /* !CONFIG_AMD_MEM_ENCRYPT */ 457 409 ··· 480 422 static inline bool snp_init(struct boot_params *bp) { return false; } 481 423 static inline void snp_abort(void) { } 482 424 static inline void snp_dmi_setup(void) { } 483 - static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio) 425 + static inline int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input, 426 + struct snp_guest_request_ioctl *rio) 484 427 { 485 428 return -ENOTTY; 486 429 } ··· 494 435 static inline u64 sev_get_status(void) { return 0; } 495 436 static inline void sev_show_status(void) { } 496 437 static inline void snp_update_svsm_ca(void) { } 438 + static inline int prepare_pte_enc(struct pte_enc_desc *d) { return 0; } 439 + static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { } 440 + static inline void snp_kexec_finish(void) { } 441 + static inline void snp_kexec_begin(void) { } 497 442 498 443 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 499 444
+51 -26
arch/x86/mm/mem_encrypt_amd.c
··· 311 311 return 0; 312 312 } 313 313 314 - static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) 314 + int prepare_pte_enc(struct pte_enc_desc *d) 315 315 { 316 - pgprot_t old_prot, new_prot; 317 - unsigned long pfn, pa, size; 318 - pte_t new_pte; 316 + pgprot_t old_prot; 319 317 320 - pfn = pg_level_to_pfn(level, kpte, &old_prot); 321 - if (!pfn) 322 - return; 318 + d->pfn = pg_level_to_pfn(d->pte_level, d->kpte, &old_prot); 319 + if (!d->pfn) 320 + return 1; 323 321 324 - new_prot = old_prot; 325 - if (enc) 326 - pgprot_val(new_prot) |= _PAGE_ENC; 322 + d->new_pgprot = old_prot; 323 + if (d->encrypt) 324 + pgprot_val(d->new_pgprot) |= _PAGE_ENC; 327 325 else 328 - pgprot_val(new_prot) &= ~_PAGE_ENC; 326 + pgprot_val(d->new_pgprot) &= ~_PAGE_ENC; 329 327 330 328 /* If prot is same then do nothing. */ 331 - if (pgprot_val(old_prot) == pgprot_val(new_prot)) 332 - return; 329 + if (pgprot_val(old_prot) == pgprot_val(d->new_pgprot)) 330 + return 1; 333 331 334 - pa = pfn << PAGE_SHIFT; 335 - size = page_level_size(level); 332 + d->pa = d->pfn << PAGE_SHIFT; 333 + d->size = page_level_size(d->pte_level); 336 334 337 335 /* 338 - * We are going to perform in-place en-/decryption and change the 339 - * physical page attribute from C=1 to C=0 or vice versa. Flush the 340 - * caches to ensure that data gets accessed with the correct C-bit. 336 + * In-place en-/decryption and physical page attribute change 337 + * from C=1 to C=0 or vice versa will be performed. Flush the 338 + * caches to ensure that data gets accessed with the correct 339 + * C-bit. 341 340 */ 342 - clflush_cache_range(__va(pa), size); 341 + if (d->va) 342 + clflush_cache_range(d->va, d->size); 343 + else 344 + clflush_cache_range(__va(d->pa), d->size); 345 + 346 + return 0; 347 + } 348 + 349 + void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) 350 + { 351 + pte_t new_pte; 352 + 353 + /* Change the page encryption mask. */ 354 + new_pte = pfn_pte(pfn, new_prot); 355 + set_pte_atomic(kpte, new_pte); 356 + } 357 + 358 + static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) 359 + { 360 + struct pte_enc_desc d = { 361 + .kpte = kpte, 362 + .pte_level = level, 363 + .encrypt = enc 364 + }; 365 + 366 + if (prepare_pte_enc(&d)) 367 + return; 343 368 344 369 /* Encrypt/decrypt the contents in-place */ 345 370 if (enc) { 346 - sme_early_encrypt(pa, size); 371 + sme_early_encrypt(d.pa, d.size); 347 372 } else { 348 - sme_early_decrypt(pa, size); 373 + sme_early_decrypt(d.pa, d.size); 349 374 350 375 /* 351 376 * ON SNP, the page state in the RMP table must happen 352 377 * before the page table updates. 353 378 */ 354 - early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1); 379 + early_snp_set_memory_shared((unsigned long)__va(d.pa), d.pa, 1); 355 380 } 356 381 357 - /* Change the page encryption mask. */ 358 - new_pte = pfn_pte(pfn, new_prot); 359 - set_pte_atomic(kpte, new_pte); 382 + set_pte_enc_mask(kpte, d.pfn, d.new_pgprot); 360 383 361 384 /* 362 385 * If page is set encrypted in the page table, then update the RMP table to 363 386 * add this page as private. 364 387 */ 365 388 if (enc) 366 - early_snp_set_memory_private((unsigned long)__va(pa), pa, 1); 389 + early_snp_set_memory_private((unsigned long)__va(d.pa), d.pa, 1); 367 390 } 368 391 369 392 static int __init early_set_memory_enc_dec(unsigned long vaddr, ··· 490 467 x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish; 491 468 x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required; 492 469 x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required; 470 + x86_platform.guest.enc_kexec_begin = snp_kexec_begin; 471 + x86_platform.guest.enc_kexec_finish = snp_kexec_finish; 493 472 494 473 /* 495 474 * AMD-SEV-ES intercepts the RDMSR to read the X2APIC ID in the
+7 -4
arch/x86/mm/mem_encrypt_identity.c
··· 495 495 unsigned int eax, ebx, ecx, edx; 496 496 unsigned long feature_mask; 497 497 unsigned long me_mask; 498 - bool snp; 498 + bool snp_en; 499 499 u64 msr; 500 500 501 - snp = snp_init(bp); 501 + snp_en = snp_init(bp); 502 502 503 503 /* Check for the SME/SEV support leaf */ 504 504 eax = 0x80000000; ··· 531 531 RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV); 532 532 feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; 533 533 534 - /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */ 535 - if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED)) 534 + /* 535 + * Any discrepancies between the presence of a CC blob and SNP 536 + * enablement abort the guest. 537 + */ 538 + if (snp_en ^ !!(msr & MSR_AMD64_SEV_SNP_ENABLED)) 536 539 snp_abort(); 537 540 538 541 /* Check if memory encryption is enabled */
+1
arch/x86/virt/svm/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 3 obj-$(CONFIG_KVM_AMD_SEV) += sev.o 4 + obj-$(CONFIG_CPU_SUP_AMD) += cmdline.o
+45
arch/x86/virt/svm/cmdline.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * AMD SVM-SEV command line parsing support 4 + * 5 + * Copyright (C) 2023 - 2024 Advanced Micro Devices, Inc. 6 + * 7 + * Author: Michael Roth <michael.roth@amd.com> 8 + */ 9 + 10 + #include <linux/string.h> 11 + #include <linux/printk.h> 12 + #include <linux/cache.h> 13 + #include <linux/cpufeature.h> 14 + 15 + #include <asm/sev-common.h> 16 + 17 + struct sev_config sev_cfg __read_mostly; 18 + 19 + static int __init init_sev_config(char *str) 20 + { 21 + char *s; 22 + 23 + while ((s = strsep(&str, ","))) { 24 + if (!strcmp(s, "debug")) { 25 + sev_cfg.debug = true; 26 + continue; 27 + } 28 + 29 + if (!strcmp(s, "nosnp")) { 30 + if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) { 31 + setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); 32 + cc_platform_clear(CC_ATTR_HOST_SEV_SNP); 33 + continue; 34 + } else { 35 + goto warn; 36 + } 37 + } 38 + 39 + warn: 40 + pr_info("SEV command-line option '%s' was not recognized\n", s); 41 + } 42 + 43 + return 1; 44 + } 45 + __setup("sev=", init_sev_config);
+1 -3
drivers/virt/coco/sev-guest/Kconfig
··· 2 2 tristate "AMD SEV Guest driver" 3 3 default m 4 4 depends on AMD_MEM_ENCRYPT 5 - select CRYPTO 6 - select CRYPTO_AEAD2 7 - select CRYPTO_GCM 5 + select CRYPTO_LIB_AESGCM 8 6 select TSM_REPORTS 9 7 help 10 8 SEV-SNP firmware provides the guest a mechanism to communicate with
+160 -256
drivers/virt/coco/sev-guest/sev-guest.c
··· 17 17 #include <linux/set_memory.h> 18 18 #include <linux/fs.h> 19 19 #include <linux/tsm.h> 20 - #include <crypto/aead.h> 21 - #include <linux/scatterlist.h> 20 + #include <crypto/gcm.h> 22 21 #include <linux/psp-sev.h> 23 22 #include <linux/sockptr.h> 24 23 #include <linux/cleanup.h> ··· 30 31 #include <asm/sev.h> 31 32 32 33 #define DEVICE_NAME "sev-guest" 33 - #define AAD_LEN 48 34 - #define MSG_HDR_VER 1 35 34 36 35 #define SNP_REQ_MAX_RETRY_DURATION (60*HZ) 37 36 #define SNP_REQ_RETRY_DELAY (2*HZ) 38 37 39 38 #define SVSM_MAX_RETRIES 3 40 39 41 - struct snp_guest_crypto { 42 - struct crypto_aead *tfm; 43 - u8 *iv, *authtag; 44 - int iv_len, a_len; 45 - }; 46 - 47 40 struct snp_guest_dev { 48 41 struct device *dev; 49 42 struct miscdevice misc; 50 43 51 - void *certs_data; 52 - struct snp_guest_crypto *crypto; 53 - /* request and response are in unencrypted memory */ 54 - struct snp_guest_msg *request, *response; 44 + struct snp_msg_desc *msg_desc; 55 45 56 - /* 57 - * Avoid information leakage by double-buffering shared messages 58 - * in fields that are in regular encrypted memory. 59 - */ 60 - struct snp_guest_msg secret_request, secret_response; 61 - 62 - struct snp_secrets_page *secrets; 63 - struct snp_req_data input; 64 46 union { 65 47 struct snp_report_req report; 66 48 struct snp_derived_key_req derived_key; 67 49 struct snp_ext_report_req ext_report; 68 50 } req; 69 - u32 *os_area_msg_seqno; 70 - u8 *vmpck; 71 51 }; 72 52 73 53 /* ··· 63 85 /* Mutex to serialize the shared buffer access and command handling. */ 64 86 static DEFINE_MUTEX(snp_cmd_mutex); 65 87 66 - static bool is_vmpck_empty(struct snp_guest_dev *snp_dev) 88 + static bool is_vmpck_empty(struct snp_msg_desc *mdesc) 67 89 { 68 90 char zero_key[VMPCK_KEY_LEN] = {0}; 69 91 70 - if (snp_dev->vmpck) 71 - return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN); 92 + if (mdesc->vmpck) 93 + return !memcmp(mdesc->vmpck, zero_key, VMPCK_KEY_LEN); 72 94 73 95 return true; 74 96 } ··· 90 112 * vulnerable. If the sequence number were incremented for a fresh IV the ASP 91 113 * will reject the request. 92 114 */ 93 - static void snp_disable_vmpck(struct snp_guest_dev *snp_dev) 115 + static void snp_disable_vmpck(struct snp_msg_desc *mdesc) 94 116 { 95 - dev_alert(snp_dev->dev, "Disabling VMPCK%d communication key to prevent IV reuse.\n", 117 + pr_alert("Disabling VMPCK%d communication key to prevent IV reuse.\n", 96 118 vmpck_id); 97 - memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN); 98 - snp_dev->vmpck = NULL; 119 + memzero_explicit(mdesc->vmpck, VMPCK_KEY_LEN); 120 + mdesc->vmpck = NULL; 99 121 } 100 122 101 - static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev) 123 + static inline u64 __snp_get_msg_seqno(struct snp_msg_desc *mdesc) 102 124 { 103 125 u64 count; 104 126 105 127 lockdep_assert_held(&snp_cmd_mutex); 106 128 107 129 /* Read the current message sequence counter from secrets pages */ 108 - count = *snp_dev->os_area_msg_seqno; 130 + count = *mdesc->os_area_msg_seqno; 109 131 110 132 return count + 1; 111 133 } 112 134 113 135 /* Return a non-zero on success */ 114 - static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev) 136 + static u64 snp_get_msg_seqno(struct snp_msg_desc *mdesc) 115 137 { 116 - u64 count = __snp_get_msg_seqno(snp_dev); 138 + u64 count = __snp_get_msg_seqno(mdesc); 117 139 118 140 /* 119 141 * The message sequence counter for the SNP guest request is a 64-bit ··· 124 146 * invalid number and will fail the message request. 125 147 */ 126 148 if (count >= UINT_MAX) { 127 - dev_err(snp_dev->dev, "request message sequence counter overflow\n"); 149 + pr_err("request message sequence counter overflow\n"); 128 150 return 0; 129 151 } 130 152 131 153 return count; 132 154 } 133 155 134 - static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev) 156 + static void snp_inc_msg_seqno(struct snp_msg_desc *mdesc) 135 157 { 136 158 /* 137 159 * The counter is also incremented by the PSP, so increment it by 2 138 160 * and save in secrets page. 139 161 */ 140 - *snp_dev->os_area_msg_seqno += 2; 162 + *mdesc->os_area_msg_seqno += 2; 141 163 } 142 164 143 165 static inline struct snp_guest_dev *to_snp_dev(struct file *file) ··· 147 169 return container_of(dev, struct snp_guest_dev, misc); 148 170 } 149 171 150 - static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen) 172 + static struct aesgcm_ctx *snp_init_crypto(u8 *key, size_t keylen) 151 173 { 152 - struct snp_guest_crypto *crypto; 174 + struct aesgcm_ctx *ctx; 153 175 154 - crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT); 155 - if (!crypto) 176 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); 177 + if (!ctx) 156 178 return NULL; 157 179 158 - crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 159 - if (IS_ERR(crypto->tfm)) 160 - goto e_free; 161 - 162 - if (crypto_aead_setkey(crypto->tfm, key, keylen)) 163 - goto e_free_crypto; 164 - 165 - crypto->iv_len = crypto_aead_ivsize(crypto->tfm); 166 - crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT); 167 - if (!crypto->iv) 168 - goto e_free_crypto; 169 - 170 - if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) { 171 - if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) { 172 - dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN); 173 - goto e_free_iv; 174 - } 180 + if (aesgcm_expandkey(ctx, key, keylen, AUTHTAG_LEN)) { 181 + pr_err("Crypto context initialization failed\n"); 182 + kfree(ctx); 183 + return NULL; 175 184 } 176 185 177 - crypto->a_len = crypto_aead_authsize(crypto->tfm); 178 - crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT); 179 - if (!crypto->authtag) 180 - goto e_free_iv; 181 - 182 - return crypto; 183 - 184 - e_free_iv: 185 - kfree(crypto->iv); 186 - e_free_crypto: 187 - crypto_free_aead(crypto->tfm); 188 - e_free: 189 - kfree(crypto); 190 - 191 - return NULL; 186 + return ctx; 192 187 } 193 188 194 - static void deinit_crypto(struct snp_guest_crypto *crypto) 189 + static int verify_and_dec_payload(struct snp_msg_desc *mdesc, struct snp_guest_req *req) 195 190 { 196 - crypto_free_aead(crypto->tfm); 197 - kfree(crypto->iv); 198 - kfree(crypto->authtag); 199 - kfree(crypto); 200 - } 201 - 202 - static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg, 203 - u8 *src_buf, u8 *dst_buf, size_t len, bool enc) 204 - { 205 - struct snp_guest_msg_hdr *hdr = &msg->hdr; 206 - struct scatterlist src[3], dst[3]; 207 - DECLARE_CRYPTO_WAIT(wait); 208 - struct aead_request *req; 209 - int ret; 210 - 211 - req = aead_request_alloc(crypto->tfm, GFP_KERNEL); 212 - if (!req) 213 - return -ENOMEM; 214 - 215 - /* 216 - * AEAD memory operations: 217 - * +------ AAD -------+------- DATA -----+---- AUTHTAG----+ 218 - * | msg header | plaintext | hdr->authtag | 219 - * | bytes 30h - 5Fh | or | | 220 - * | | cipher | | 221 - * +------------------+------------------+----------------+ 222 - */ 223 - sg_init_table(src, 3); 224 - sg_set_buf(&src[0], &hdr->algo, AAD_LEN); 225 - sg_set_buf(&src[1], src_buf, hdr->msg_sz); 226 - sg_set_buf(&src[2], hdr->authtag, crypto->a_len); 227 - 228 - sg_init_table(dst, 3); 229 - sg_set_buf(&dst[0], &hdr->algo, AAD_LEN); 230 - sg_set_buf(&dst[1], dst_buf, hdr->msg_sz); 231 - sg_set_buf(&dst[2], hdr->authtag, crypto->a_len); 232 - 233 - aead_request_set_ad(req, AAD_LEN); 234 - aead_request_set_tfm(req, crypto->tfm); 235 - aead_request_set_callback(req, 0, crypto_req_done, &wait); 236 - 237 - aead_request_set_crypt(req, src, dst, len, crypto->iv); 238 - ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait); 239 - 240 - aead_request_free(req); 241 - return ret; 242 - } 243 - 244 - static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, 245 - void *plaintext, size_t len) 246 - { 247 - struct snp_guest_crypto *crypto = snp_dev->crypto; 248 - struct snp_guest_msg_hdr *hdr = &msg->hdr; 249 - 250 - memset(crypto->iv, 0, crypto->iv_len); 251 - memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); 252 - 253 - return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true); 254 - } 255 - 256 - static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, 257 - void *plaintext, size_t len) 258 - { 259 - struct snp_guest_crypto *crypto = snp_dev->crypto; 260 - struct snp_guest_msg_hdr *hdr = &msg->hdr; 261 - 262 - /* Build IV with response buffer sequence number */ 263 - memset(crypto->iv, 0, crypto->iv_len); 264 - memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); 265 - 266 - return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false); 267 - } 268 - 269 - static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz) 270 - { 271 - struct snp_guest_crypto *crypto = snp_dev->crypto; 272 - struct snp_guest_msg *resp_msg = &snp_dev->secret_response; 273 - struct snp_guest_msg *req_msg = &snp_dev->secret_request; 191 + struct snp_guest_msg *resp_msg = &mdesc->secret_response; 192 + struct snp_guest_msg *req_msg = &mdesc->secret_request; 274 193 struct snp_guest_msg_hdr *req_msg_hdr = &req_msg->hdr; 275 194 struct snp_guest_msg_hdr *resp_msg_hdr = &resp_msg->hdr; 195 + struct aesgcm_ctx *ctx = mdesc->ctx; 196 + u8 iv[GCM_AES_IV_SIZE] = {}; 276 197 277 198 pr_debug("response [seqno %lld type %d version %d sz %d]\n", 278 199 resp_msg_hdr->msg_seqno, resp_msg_hdr->msg_type, resp_msg_hdr->msg_version, 279 200 resp_msg_hdr->msg_sz); 280 201 281 202 /* Copy response from shared memory to encrypted memory. */ 282 - memcpy(resp_msg, snp_dev->response, sizeof(*resp_msg)); 203 + memcpy(resp_msg, mdesc->response, sizeof(*resp_msg)); 283 204 284 205 /* Verify that the sequence counter is incremented by 1 */ 285 206 if (unlikely(resp_msg_hdr->msg_seqno != (req_msg_hdr->msg_seqno + 1))) ··· 193 316 * If the message size is greater than our buffer length then return 194 317 * an error. 195 318 */ 196 - if (unlikely((resp_msg_hdr->msg_sz + crypto->a_len) > sz)) 319 + if (unlikely((resp_msg_hdr->msg_sz + ctx->authsize) > req->resp_sz)) 197 320 return -EBADMSG; 198 321 199 322 /* Decrypt the payload */ 200 - return dec_payload(snp_dev, resp_msg, payload, resp_msg_hdr->msg_sz + crypto->a_len); 323 + memcpy(iv, &resp_msg_hdr->msg_seqno, min(sizeof(iv), sizeof(resp_msg_hdr->msg_seqno))); 324 + if (!aesgcm_decrypt(ctx, req->resp_buf, resp_msg->payload, resp_msg_hdr->msg_sz, 325 + &resp_msg_hdr->algo, AAD_LEN, iv, resp_msg_hdr->authtag)) 326 + return -EBADMSG; 327 + 328 + return 0; 201 329 } 202 330 203 - static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type, 204 - void *payload, size_t sz) 331 + static int enc_payload(struct snp_msg_desc *mdesc, u64 seqno, struct snp_guest_req *req) 205 332 { 206 - struct snp_guest_msg *msg = &snp_dev->secret_request; 333 + struct snp_guest_msg *msg = &mdesc->secret_request; 207 334 struct snp_guest_msg_hdr *hdr = &msg->hdr; 335 + struct aesgcm_ctx *ctx = mdesc->ctx; 336 + u8 iv[GCM_AES_IV_SIZE] = {}; 208 337 209 338 memset(msg, 0, sizeof(*msg)); 210 339 211 340 hdr->algo = SNP_AEAD_AES_256_GCM; 212 341 hdr->hdr_version = MSG_HDR_VER; 213 342 hdr->hdr_sz = sizeof(*hdr); 214 - hdr->msg_type = type; 215 - hdr->msg_version = version; 343 + hdr->msg_type = req->msg_type; 344 + hdr->msg_version = req->msg_version; 216 345 hdr->msg_seqno = seqno; 217 - hdr->msg_vmpck = vmpck_id; 218 - hdr->msg_sz = sz; 346 + hdr->msg_vmpck = req->vmpck_id; 347 + hdr->msg_sz = req->req_sz; 219 348 220 349 /* Verify the sequence number is non-zero */ 221 350 if (!hdr->msg_seqno) ··· 230 347 pr_debug("request [seqno %lld type %d version %d sz %d]\n", 231 348 hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz); 232 349 233 - return __enc_payload(snp_dev, msg, payload, sz); 350 + if (WARN_ON((req->req_sz + ctx->authsize) > sizeof(msg->payload))) 351 + return -EBADMSG; 352 + 353 + memcpy(iv, &hdr->msg_seqno, min(sizeof(iv), sizeof(hdr->msg_seqno))); 354 + aesgcm_encrypt(ctx, msg->payload, req->req_buf, req->req_sz, &hdr->algo, 355 + AAD_LEN, iv, hdr->authtag); 356 + 357 + return 0; 234 358 } 235 359 236 - static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, 360 + static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, 237 361 struct snp_guest_request_ioctl *rio) 238 362 { 239 363 unsigned long req_start = jiffies; ··· 255 365 * sequence number must be incremented or the VMPCK must be deleted to 256 366 * prevent reuse of the IV. 257 367 */ 258 - rc = snp_issue_guest_request(exit_code, &snp_dev->input, rio); 368 + rc = snp_issue_guest_request(req, &mdesc->input, rio); 259 369 switch (rc) { 260 370 case -ENOSPC: 261 371 /* ··· 265 375 * order to increment the sequence number and thus avoid 266 376 * IV reuse. 267 377 */ 268 - override_npages = snp_dev->input.data_npages; 269 - exit_code = SVM_VMGEXIT_GUEST_REQUEST; 378 + override_npages = mdesc->input.data_npages; 379 + req->exit_code = SVM_VMGEXIT_GUEST_REQUEST; 270 380 271 381 /* 272 382 * Override the error to inform callers the given extended ··· 305 415 * structure and any failure will wipe the VMPCK, preventing further 306 416 * use anyway. 307 417 */ 308 - snp_inc_msg_seqno(snp_dev); 418 + snp_inc_msg_seqno(mdesc); 309 419 310 420 if (override_err) { 311 421 rio->exitinfo2 = override_err; ··· 321 431 } 322 432 323 433 if (override_npages) 324 - snp_dev->input.data_npages = override_npages; 434 + mdesc->input.data_npages = override_npages; 325 435 326 436 return rc; 327 437 } 328 438 329 - static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, 330 - struct snp_guest_request_ioctl *rio, u8 type, 331 - void *req_buf, size_t req_sz, void *resp_buf, 332 - u32 resp_sz) 439 + static int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, 440 + struct snp_guest_request_ioctl *rio) 333 441 { 334 442 u64 seqno; 335 443 int rc; 336 444 445 + guard(mutex)(&snp_cmd_mutex); 446 + 447 + /* Check if the VMPCK is not empty */ 448 + if (is_vmpck_empty(mdesc)) { 449 + pr_err_ratelimited("VMPCK is disabled\n"); 450 + return -ENOTTY; 451 + } 452 + 337 453 /* Get message sequence and verify that its a non-zero */ 338 - seqno = snp_get_msg_seqno(snp_dev); 454 + seqno = snp_get_msg_seqno(mdesc); 339 455 if (!seqno) 340 456 return -EIO; 341 457 342 458 /* Clear shared memory's response for the host to populate. */ 343 - memset(snp_dev->response, 0, sizeof(struct snp_guest_msg)); 459 + memset(mdesc->response, 0, sizeof(struct snp_guest_msg)); 344 460 345 - /* Encrypt the userspace provided payload in snp_dev->secret_request. */ 346 - rc = enc_payload(snp_dev, seqno, rio->msg_version, type, req_buf, req_sz); 461 + /* Encrypt the userspace provided payload in mdesc->secret_request. */ 462 + rc = enc_payload(mdesc, seqno, req); 347 463 if (rc) 348 464 return rc; 349 465 ··· 357 461 * Write the fully encrypted request to the shared unencrypted 358 462 * request page. 359 463 */ 360 - memcpy(snp_dev->request, &snp_dev->secret_request, 361 - sizeof(snp_dev->secret_request)); 464 + memcpy(mdesc->request, &mdesc->secret_request, 465 + sizeof(mdesc->secret_request)); 362 466 363 - rc = __handle_guest_request(snp_dev, exit_code, rio); 467 + rc = __handle_guest_request(mdesc, req, rio); 364 468 if (rc) { 365 469 if (rc == -EIO && 366 470 rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN)) 367 471 return rc; 368 472 369 - dev_alert(snp_dev->dev, 370 - "Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n", 371 - rc, rio->exitinfo2); 473 + pr_alert("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n", 474 + rc, rio->exitinfo2); 372 475 373 - snp_disable_vmpck(snp_dev); 476 + snp_disable_vmpck(mdesc); 374 477 return rc; 375 478 } 376 479 377 - rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz); 480 + rc = verify_and_dec_payload(mdesc, req); 378 481 if (rc) { 379 - dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc); 380 - snp_disable_vmpck(snp_dev); 482 + pr_alert("Detected unexpected decode failure from ASP. rc: %d\n", rc); 483 + snp_disable_vmpck(mdesc); 381 484 return rc; 382 485 } 383 486 ··· 390 495 391 496 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 392 497 { 393 - struct snp_guest_crypto *crypto = snp_dev->crypto; 394 498 struct snp_report_req *report_req = &snp_dev->req.report; 499 + struct snp_msg_desc *mdesc = snp_dev->msg_desc; 395 500 struct snp_report_resp *report_resp; 501 + struct snp_guest_req req = {}; 396 502 int rc, resp_len; 397 - 398 - lockdep_assert_held(&snp_cmd_mutex); 399 503 400 504 if (!arg->req_data || !arg->resp_data) 401 505 return -EINVAL; ··· 407 513 * response payload. Make sure that it has enough space to cover the 408 514 * authtag. 409 515 */ 410 - resp_len = sizeof(report_resp->data) + crypto->a_len; 516 + resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize; 411 517 report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); 412 518 if (!report_resp) 413 519 return -ENOMEM; 414 520 415 - rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, SNP_MSG_REPORT_REQ, 416 - report_req, sizeof(*report_req), report_resp->data, resp_len); 521 + req.msg_version = arg->msg_version; 522 + req.msg_type = SNP_MSG_REPORT_REQ; 523 + req.vmpck_id = vmpck_id; 524 + req.req_buf = report_req; 525 + req.req_sz = sizeof(*report_req); 526 + req.resp_buf = report_resp->data; 527 + req.resp_sz = resp_len; 528 + req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; 529 + 530 + rc = snp_send_guest_request(mdesc, &req, arg); 417 531 if (rc) 418 532 goto e_free; 419 533 ··· 436 534 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 437 535 { 438 536 struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key; 439 - struct snp_guest_crypto *crypto = snp_dev->crypto; 440 537 struct snp_derived_key_resp derived_key_resp = {0}; 538 + struct snp_msg_desc *mdesc = snp_dev->msg_desc; 539 + struct snp_guest_req req = {}; 441 540 int rc, resp_len; 442 541 /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ 443 542 u8 buf[64 + 16]; 444 - 445 - lockdep_assert_held(&snp_cmd_mutex); 446 543 447 544 if (!arg->req_data || !arg->resp_data) 448 545 return -EINVAL; ··· 451 550 * response payload. Make sure that it has enough space to cover the 452 551 * authtag. 453 552 */ 454 - resp_len = sizeof(derived_key_resp.data) + crypto->a_len; 553 + resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize; 455 554 if (sizeof(buf) < resp_len) 456 555 return -ENOMEM; 457 556 ··· 459 558 sizeof(*derived_key_req))) 460 559 return -EFAULT; 461 560 462 - rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, SNP_MSG_KEY_REQ, 463 - derived_key_req, sizeof(*derived_key_req), buf, resp_len); 561 + req.msg_version = arg->msg_version; 562 + req.msg_type = SNP_MSG_KEY_REQ; 563 + req.vmpck_id = vmpck_id; 564 + req.req_buf = derived_key_req; 565 + req.req_sz = sizeof(*derived_key_req); 566 + req.resp_buf = buf; 567 + req.resp_sz = resp_len; 568 + req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; 569 + 570 + rc = snp_send_guest_request(mdesc, &req, arg); 464 571 if (rc) 465 572 return rc; 466 573 ··· 488 579 489 580 { 490 581 struct snp_ext_report_req *report_req = &snp_dev->req.ext_report; 491 - struct snp_guest_crypto *crypto = snp_dev->crypto; 582 + struct snp_msg_desc *mdesc = snp_dev->msg_desc; 492 583 struct snp_report_resp *report_resp; 584 + struct snp_guest_req req = {}; 493 585 int ret, npages = 0, resp_len; 494 586 sockptr_t certs_address; 495 - 496 - lockdep_assert_held(&snp_cmd_mutex); 497 587 498 588 if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data)) 499 589 return -EINVAL; ··· 522 614 * the host. If host does not supply any certs in it, then copy 523 615 * zeros to indicate that certificate data was not provided. 524 616 */ 525 - memset(snp_dev->certs_data, 0, report_req->certs_len); 617 + memset(mdesc->certs_data, 0, report_req->certs_len); 526 618 npages = report_req->certs_len >> PAGE_SHIFT; 527 619 cmd: 528 620 /* ··· 530 622 * response payload. Make sure that it has enough space to cover the 531 623 * authtag. 532 624 */ 533 - resp_len = sizeof(report_resp->data) + crypto->a_len; 625 + resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize; 534 626 report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); 535 627 if (!report_resp) 536 628 return -ENOMEM; 537 629 538 - snp_dev->input.data_npages = npages; 539 - ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg, SNP_MSG_REPORT_REQ, 540 - &report_req->data, sizeof(report_req->data), 541 - report_resp->data, resp_len); 630 + mdesc->input.data_npages = npages; 631 + 632 + req.msg_version = arg->msg_version; 633 + req.msg_type = SNP_MSG_REPORT_REQ; 634 + req.vmpck_id = vmpck_id; 635 + req.req_buf = &report_req->data; 636 + req.req_sz = sizeof(report_req->data); 637 + req.resp_buf = report_resp->data; 638 + req.resp_sz = resp_len; 639 + req.exit_code = SVM_VMGEXIT_EXT_GUEST_REQUEST; 640 + 641 + ret = snp_send_guest_request(mdesc, &req, arg); 542 642 543 643 /* If certs length is invalid then copy the returned length */ 544 644 if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) { 545 - report_req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT; 645 + report_req->certs_len = mdesc->input.data_npages << PAGE_SHIFT; 546 646 547 647 if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req))) 548 648 ret = -EFAULT; ··· 559 643 if (ret) 560 644 goto e_free; 561 645 562 - if (npages && copy_to_sockptr(certs_address, snp_dev->certs_data, report_req->certs_len)) { 646 + if (npages && copy_to_sockptr(certs_address, mdesc->certs_data, report_req->certs_len)) { 563 647 ret = -EFAULT; 564 648 goto e_free; 565 649 } ··· 589 673 if (!input.msg_version) 590 674 return -EINVAL; 591 675 592 - mutex_lock(&snp_cmd_mutex); 593 - 594 - /* Check if the VMPCK is not empty */ 595 - if (is_vmpck_empty(snp_dev)) { 596 - dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n"); 597 - mutex_unlock(&snp_cmd_mutex); 598 - return -ENOTTY; 599 - } 600 - 601 676 switch (ioctl) { 602 677 case SNP_GET_REPORT: 603 678 ret = get_report(snp_dev, &input); ··· 609 702 default: 610 703 break; 611 704 } 612 - 613 - mutex_unlock(&snp_cmd_mutex); 614 705 615 706 if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input))) 616 707 return -EFAULT; ··· 723 818 rep_len = SZ_4K; 724 819 man_len = SZ_4K; 725 820 certs_len = SEV_FW_BLOB_MAX_SIZE; 726 - 727 - guard(mutex)(&snp_cmd_mutex); 728 821 729 822 if (guid_is_null(&desc->service_guid)) { 730 823 call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES); ··· 858 955 if (!buf) 859 956 return -ENOMEM; 860 957 861 - guard(mutex)(&snp_cmd_mutex); 862 - 863 - /* Check if the VMPCK is not empty */ 864 - if (is_vmpck_empty(snp_dev)) { 865 - dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n"); 866 - return -ENOTTY; 867 - } 868 - 869 958 cert_table = buf + report_size; 870 959 struct snp_ext_report_req ext_req = { 871 960 .data = { .vmpl = desc->privlevel }, ··· 983 1088 struct snp_secrets_page *secrets; 984 1089 struct device *dev = &pdev->dev; 985 1090 struct snp_guest_dev *snp_dev; 1091 + struct snp_msg_desc *mdesc; 986 1092 struct miscdevice *misc; 987 1093 void __iomem *mapping; 988 1094 int ret; ··· 1008 1112 if (!snp_dev) 1009 1113 goto e_unmap; 1010 1114 1115 + mdesc = devm_kzalloc(&pdev->dev, sizeof(struct snp_msg_desc), GFP_KERNEL); 1116 + if (!mdesc) 1117 + goto e_unmap; 1118 + 1011 1119 /* Adjust the default VMPCK key based on the executing VMPL level */ 1012 1120 if (vmpck_id == -1) 1013 1121 vmpck_id = snp_vmpl; 1014 1122 1015 1123 ret = -EINVAL; 1016 - snp_dev->vmpck = get_vmpck(vmpck_id, secrets, &snp_dev->os_area_msg_seqno); 1017 - if (!snp_dev->vmpck) { 1124 + mdesc->vmpck = get_vmpck(vmpck_id, secrets, &mdesc->os_area_msg_seqno); 1125 + if (!mdesc->vmpck) { 1018 1126 dev_err(dev, "Invalid VMPCK%d communication key\n", vmpck_id); 1019 1127 goto e_unmap; 1020 1128 } 1021 1129 1022 1130 /* Verify that VMPCK is not zero. */ 1023 - if (is_vmpck_empty(snp_dev)) { 1131 + if (is_vmpck_empty(mdesc)) { 1024 1132 dev_err(dev, "Empty VMPCK%d communication key\n", vmpck_id); 1025 1133 goto e_unmap; 1026 1134 } 1027 1135 1028 1136 platform_set_drvdata(pdev, snp_dev); 1029 1137 snp_dev->dev = dev; 1030 - snp_dev->secrets = secrets; 1138 + mdesc->secrets = secrets; 1031 1139 1032 1140 /* Allocate the shared page used for the request and response message. */ 1033 - snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); 1034 - if (!snp_dev->request) 1141 + mdesc->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); 1142 + if (!mdesc->request) 1035 1143 goto e_unmap; 1036 1144 1037 - snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); 1038 - if (!snp_dev->response) 1145 + mdesc->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); 1146 + if (!mdesc->response) 1039 1147 goto e_free_request; 1040 1148 1041 - snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE); 1042 - if (!snp_dev->certs_data) 1149 + mdesc->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE); 1150 + if (!mdesc->certs_data) 1043 1151 goto e_free_response; 1044 1152 1045 1153 ret = -EIO; 1046 - snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN); 1047 - if (!snp_dev->crypto) 1154 + mdesc->ctx = snp_init_crypto(mdesc->vmpck, VMPCK_KEY_LEN); 1155 + if (!mdesc->ctx) 1048 1156 goto e_free_cert_data; 1049 1157 1050 1158 misc = &snp_dev->misc; ··· 1056 1156 misc->name = DEVICE_NAME; 1057 1157 misc->fops = &snp_guest_fops; 1058 1158 1059 - /* initial the input address for guest request */ 1060 - snp_dev->input.req_gpa = __pa(snp_dev->request); 1061 - snp_dev->input.resp_gpa = __pa(snp_dev->response); 1062 - snp_dev->input.data_gpa = __pa(snp_dev->certs_data); 1159 + /* Initialize the input addresses for guest request */ 1160 + mdesc->input.req_gpa = __pa(mdesc->request); 1161 + mdesc->input.resp_gpa = __pa(mdesc->response); 1162 + mdesc->input.data_gpa = __pa(mdesc->certs_data); 1063 1163 1064 1164 /* Set the privlevel_floor attribute based on the vmpck_id */ 1065 1165 sev_tsm_ops.privlevel_floor = vmpck_id; ··· 1074 1174 1075 1175 ret = misc_register(misc); 1076 1176 if (ret) 1077 - goto e_free_cert_data; 1177 + goto e_free_ctx; 1078 1178 1179 + snp_dev->msg_desc = mdesc; 1079 1180 dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n", vmpck_id); 1080 1181 return 0; 1081 1182 1183 + e_free_ctx: 1184 + kfree(mdesc->ctx); 1082 1185 e_free_cert_data: 1083 - free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); 1186 + free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE); 1084 1187 e_free_response: 1085 - free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); 1188 + free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg)); 1086 1189 e_free_request: 1087 - free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); 1190 + free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); 1088 1191 e_unmap: 1089 1192 iounmap(mapping); 1090 1193 return ret; ··· 1096 1193 static void __exit sev_guest_remove(struct platform_device *pdev) 1097 1194 { 1098 1195 struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev); 1196 + struct snp_msg_desc *mdesc = snp_dev->msg_desc; 1099 1197 1100 - free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); 1101 - free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); 1102 - free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); 1103 - deinit_crypto(snp_dev->crypto); 1198 + free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE); 1199 + free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg)); 1200 + free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); 1201 + kfree(mdesc->ctx); 1104 1202 misc_deregister(&snp_dev->misc); 1105 1203 } 1106 1204