Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86-urgent-2025-09-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fix from Ingo Molnar:
"Fix a SEV-SNP regression when CONFIG_KVM_AMD_SEV is disabled"

* tag 'x86-urgent-2025-09-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/sev: Guard sev_evict_cache() with CONFIG_AMD_MEM_ENCRYPT

+19 -19
+19 -19
arch/x86/include/asm/sev.h
··· 562 562 563 563 extern struct ghcb *boot_ghcb; 564 564 565 + static inline void sev_evict_cache(void *va, int npages) 566 + { 567 + volatile u8 val __always_unused; 568 + u8 *bytes = va; 569 + int page_idx; 570 + 571 + /* 572 + * For SEV guests, a read from the first/last cache-lines of a 4K page 573 + * using the guest key is sufficient to cause a flush of all cache-lines 574 + * associated with that 4K page without incurring all the overhead of a 575 + * full CLFLUSH sequence. 576 + */ 577 + for (page_idx = 0; page_idx < npages; page_idx++) { 578 + val = bytes[page_idx * PAGE_SIZE]; 579 + val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1]; 580 + } 581 + } 582 + 565 583 #else /* !CONFIG_AMD_MEM_ENCRYPT */ 566 584 567 585 #define snp_vmpl 0 ··· 623 605 static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; } 624 606 static inline void __init snp_secure_tsc_prepare(void) { } 625 607 static inline void __init snp_secure_tsc_init(void) { } 608 + static inline void sev_evict_cache(void *va, int npages) {} 626 609 627 610 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 628 611 ··· 638 619 void snp_leak_pages(u64 pfn, unsigned int npages); 639 620 void kdump_sev_callback(void); 640 621 void snp_fixup_e820_tables(void); 641 - 642 - static inline void sev_evict_cache(void *va, int npages) 643 - { 644 - volatile u8 val __always_unused; 645 - u8 *bytes = va; 646 - int page_idx; 647 - 648 - /* 649 - * For SEV guests, a read from the first/last cache-lines of a 4K page 650 - * using the guest key is sufficient to cause a flush of all cache-lines 651 - * associated with that 4K page without incurring all the overhead of a 652 - * full CLFLUSH sequence. 653 - */ 654 - for (page_idx = 0; page_idx < npages; page_idx++) { 655 - val = bytes[page_idx * PAGE_SIZE]; 656 - val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1]; 657 - } 658 - } 659 622 #else 660 623 static inline bool snp_probe_rmptable_info(void) { return false; } 661 624 static inline int snp_rmptable_init(void) { return -ENOSYS; } ··· 653 652 static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} 654 653 static inline void kdump_sev_callback(void) { } 655 654 static inline void snp_fixup_e820_tables(void) {} 656 - static inline void sev_evict_cache(void *va, int npages) {} 657 655 #endif 658 656 659 657 #endif