Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/sme: Replace occurrences of sme_active() with cc_platform_has()

Replace uses of sme_active() with the more generic cc_platform_has()
using CC_ATTR_HOST_MEM_ENCRYPT. If future support is added for other
memory encryption technologies, the use of CC_ATTR_HOST_MEM_ENCRYPT
can be updated, as required.

This also replaces two usages of sev_active() that are really geared
towards detecting if SME is active.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210928191009.32551-6-bp@alien8.de

authored by

Tom Lendacky and committed by
Borislav Petkov
32cb4d02 bfebd37e

+36 -34
+1 -1
arch/x86/include/asm/kexec.h
··· 129 129 unsigned long page_list, 130 130 unsigned long start_address, 131 131 unsigned int preserve_context, 132 - unsigned int sme_active); 132 + unsigned int host_mem_enc_active); 133 133 #endif 134 134 135 135 #define ARCH_HAS_KIMAGE_ARCH
-2
arch/x86/include/asm/mem_encrypt.h
··· 51 51 void __init mem_encrypt_init(void); 52 52 53 53 void __init sev_es_init_vc_handling(void); 54 - bool sme_active(void); 55 54 bool sev_active(void); 56 55 bool sev_es_active(void); 57 56 ··· 75 76 static inline void __init sme_enable(struct boot_params *bp) { } 76 77 77 78 static inline void sev_es_init_vc_handling(void) { } 78 - static inline bool sme_active(void) { return false; } 79 79 static inline bool sev_active(void) { return false; } 80 80 static inline bool sev_es_active(void) { return false; } 81 81
+8 -7
arch/x86/kernel/machine_kexec_64.c
··· 17 17 #include <linux/suspend.h> 18 18 #include <linux/vmalloc.h> 19 19 #include <linux/efi.h> 20 + #include <linux/cc_platform.h> 20 21 21 22 #include <asm/init.h> 22 23 #include <asm/tlbflush.h> ··· 359 358 (unsigned long)page_list, 360 359 image->start, 361 360 image->preserve_context, 362 - sme_active()); 361 + cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)); 363 362 364 363 #ifdef CONFIG_KEXEC_JUMP 365 364 if (image->preserve_context) ··· 570 569 */ 571 570 int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) 572 571 { 573 - if (sev_active()) 572 + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 574 573 return 0; 575 574 576 575 /* 577 - * If SME is active we need to be sure that kexec pages are 578 - * not encrypted because when we boot to the new kernel the 576 + * If host memory encryption is active we need to be sure that kexec 577 + * pages are not encrypted because when we boot to the new kernel the 579 578 * pages won't be accessed encrypted (initially). 580 579 */ 581 580 return set_memory_decrypted((unsigned long)vaddr, pages); ··· 583 582 584 583 void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) 585 584 { 586 - if (sev_active()) 585 + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 587 586 return; 588 587 589 588 /* 590 - * If SME is active we need to reset the pages back to being 591 - * an encrypted mapping before freeing them. 589 + * If host memory encryption is active we need to reset the pages back 590 + * to being an encrypted mapping before freeing them. 592 591 */ 593 592 set_memory_encrypted((unsigned long)vaddr, pages); 594 593 }
+4 -5
arch/x86/kernel/pci-swiotlb.c
··· 6 6 #include <linux/swiotlb.h> 7 7 #include <linux/memblock.h> 8 8 #include <linux/dma-direct.h> 9 - #include <linux/mem_encrypt.h> 9 + #include <linux/cc_platform.h> 10 10 11 11 #include <asm/iommu.h> 12 12 #include <asm/swiotlb.h> ··· 45 45 swiotlb = 1; 46 46 47 47 /* 48 - * If SME is active then swiotlb will be set to 1 so that bounce 49 - * buffers are allocated and used for devices that do not support 50 - * the addressing range required for the encryption mask. 48 + * Set swiotlb to 1 so that bounce buffers are allocated and used for 49 + * devices that can't support DMA to encrypted memory. 51 50 */ 52 - if (sme_active()) 51 + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 53 52 swiotlb = 1; 54 53 55 54 return swiotlb;
+1 -1
arch/x86/kernel/relocate_kernel_64.S
··· 47 47 * %rsi page_list 48 48 * %rdx start address 49 49 * %rcx preserve_context 50 - * %r8 sme_active 50 + * %r8 host_mem_enc_active 51 51 */ 52 52 53 53 /* Save the CPU context, used for jumping back */
+3 -3
arch/x86/mm/ioremap.c
··· 14 14 #include <linux/slab.h> 15 15 #include <linux/vmalloc.h> 16 16 #include <linux/mmiotrace.h> 17 - #include <linux/mem_encrypt.h> 17 + #include <linux/cc_platform.h> 18 18 #include <linux/efi.h> 19 19 #include <linux/pgtable.h> 20 20 ··· 703 703 if (flags & MEMREMAP_DEC) 704 704 return false; 705 705 706 - if (sme_active()) { 706 + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 707 707 if (memremap_is_setup_data(phys_addr, size) || 708 708 memremap_is_efi_data(phys_addr, size)) 709 709 return false; ··· 729 729 730 730 encrypted_prot = true; 731 731 732 - if (sme_active()) { 732 + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 733 733 if (early_memremap_is_setup_data(phys_addr, size) || 734 734 memremap_is_efi_data(phys_addr, size)) 735 735 encrypted_prot = false;
+4 -9
arch/x86/mm/mem_encrypt.c
··· 144 144 struct boot_params *boot_data; 145 145 unsigned long cmdline_paddr; 146 146 147 - if (!sme_active()) 147 + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 148 148 return; 149 149 150 150 /* Get the command line address before unmapping the real_mode_data */ ··· 164 164 struct boot_params *boot_data; 165 165 unsigned long cmdline_paddr; 166 166 167 - if (!sme_active()) 167 + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 168 168 return; 169 169 170 170 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); ··· 377 377 { 378 378 return sev_status & MSR_AMD64_SEV_ENABLED; 379 379 } 380 - 381 - bool sme_active(void) 382 - { 383 - return sme_me_mask && !sev_active(); 384 - } 385 380 EXPORT_SYMBOL_GPL(sev_active); 386 381 387 382 /* Needs to be called from non-instrumentable code */ ··· 399 404 * device does not support DMA to addresses that include the 400 405 * encryption mask. 401 406 */ 402 - if (sme_active()) { 407 + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 403 408 u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); 404 409 u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, 405 410 dev->bus_dma_limit); ··· 440 445 pr_info("AMD Memory Encryption Features active:"); 441 446 442 447 /* Secure Memory Encryption */ 443 - if (sme_active()) { 448 + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 444 449 /* 445 450 * SME is mutually exclusive with any of the SEV 446 451 * features below.
+8 -1
arch/x86/mm/mem_encrypt_identity.c
··· 30 30 #include <linux/kernel.h> 31 31 #include <linux/mm.h> 32 32 #include <linux/mem_encrypt.h> 33 + #include <linux/cc_platform.h> 33 34 34 35 #include <asm/setup.h> 35 36 #include <asm/sections.h> ··· 288 287 unsigned long pgtable_area_len; 289 288 unsigned long decrypted_base; 290 289 291 - if (!sme_active()) 290 + /* 291 + * This is early code, use an open coded check for SME instead of 292 + * using cc_platform_has(). This eliminates worries about removing 293 + * instrumentation or checking boot_cpu_data in the cc_platform_has() 294 + * function. 295 + */ 296 + if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED) 292 297 return; 293 298 294 299 /*
+3 -2
arch/x86/realmode/init.c
··· 3 3 #include <linux/slab.h> 4 4 #include <linux/memblock.h> 5 5 #include <linux/mem_encrypt.h> 6 + #include <linux/cc_platform.h> 6 7 #include <linux/pgtable.h> 7 8 8 9 #include <asm/set_memory.h> ··· 45 44 static void sme_sev_setup_real_mode(struct trampoline_header *th) 46 45 { 47 46 #ifdef CONFIG_AMD_MEM_ENCRYPT 48 - if (sme_active()) 47 + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 49 48 th->flags |= TH_FLAGS_SME_ACTIVE; 50 49 51 50 if (sev_es_active()) { ··· 82 81 * decrypted memory in order to bring up other processors 83 82 * successfully. This is not needed for SEV. 84 83 */ 85 - if (sme_active()) 84 + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 86 85 set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT); 87 86 88 87 memcpy(base, real_mode_blob, size);
+4 -3
drivers/iommu/amd/init.c
··· 20 20 #include <linux/amd-iommu.h> 21 21 #include <linux/export.h> 22 22 #include <linux/kmemleak.h> 23 - #include <linux/mem_encrypt.h> 23 + #include <linux/cc_platform.h> 24 24 #include <asm/pci-direct.h> 25 25 #include <asm/iommu.h> 26 26 #include <asm/apic.h> ··· 964 964 pr_err("The address of old device table is above 4G, not trustworthy!\n"); 965 965 return false; 966 966 } 967 - old_devtb = (sme_active() && is_kdump_kernel()) 967 + old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel()) 968 968 ? (__force void *)ioremap_encrypted(old_devtb_phys, 969 969 dev_table_size) 970 970 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); ··· 3032 3032 3033 3033 static bool amd_iommu_sme_check(void) 3034 3034 { 3035 - if (!sme_active() || (boot_cpu_data.x86 != 0x17)) 3035 + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) || 3036 + (boot_cpu_data.x86 != 0x17)) 3036 3037 return true; 3037 3038 3038 3039 /* For Fam17h, a specific level of support is required */