Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/mm: Add Secure Encrypted Virtualization (SEV) support

Provide support for Secure Encrypted Virtualization (SEV). This initial
support defines a flag that is used by the kernel to determine if it is
running with SEV active.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Tested-by: Borislav Petkov <bp@suse.de>
Cc: kvm@vger.kernel.org
Cc: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@kernel.org>
Link: https://lkml.kernel.org/r/20171020143059.3291-3-brijesh.singh@amd.com

authored by

Tom Lendacky and committed by
Thomas Gleixner
d8aa7eea 33e63acc

+37 -2
+6
arch/x86/include/asm/mem_encrypt.h
··· 47 47 48 48 void swiotlb_set_mem_attributes(void *vaddr, unsigned long size); 49 49 50 + bool sme_active(void); 51 + bool sev_active(void); 52 + 50 53 #else /* !CONFIG_AMD_MEM_ENCRYPT */ 51 54 52 55 #define sme_me_mask 0ULL ··· 66 63 67 64 static inline void __init sme_encrypt_kernel(void) { } 68 65 static inline void __init sme_enable(struct boot_params *bp) { } 66 + 67 + static inline bool sme_active(void) { return false; } 68 + static inline bool sev_active(void) { return false; } 69 69 70 70 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 71 71
+26
arch/x86/mm/mem_encrypt.c
··· 42 42 u64 sme_me_mask __section(.data) = 0; 43 43 EXPORT_SYMBOL_GPL(sme_me_mask); 44 44 45 + static bool sev_enabled __section(.data); 46 + 45 47 /* Buffer used for early in-place encryption by BSP, no locking needed */ 46 48 static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); 47 49 ··· 193 191 for (i = 0; i < ARRAY_SIZE(protection_map); i++) 194 192 protection_map[i] = pgprot_encrypted(protection_map[i]); 195 193 } 194 + 195 + /* 196 + * SME and SEV are very similar but they are not the same, so there are 197 + * times that the kernel will need to distinguish between SME and SEV. The 198 + * sme_active() and sev_active() functions are used for this. When a 199 + * distinction isn't needed, the mem_encrypt_active() function can be used. 200 + * 201 + * The trampoline code is a good example for this requirement. Before 202 + * paging is activated, SME will access all memory as decrypted, but SEV 203 + * will access all memory as encrypted. So, when APs are being brought 204 + * up under SME the trampoline area cannot be encrypted, whereas under SEV 205 + * the trampoline area must be encrypted. 206 + */ 207 + bool sme_active(void) 208 + { 209 + return sme_me_mask && !sev_enabled; 210 + } 211 + EXPORT_SYMBOL_GPL(sme_active); 212 + 213 + bool sev_active(void) 214 + { 215 + return sme_me_mask && sev_enabled; 216 + } 217 + EXPORT_SYMBOL_GPL(sev_active); 196 218 197 219 /* Architecture __weak replacement functions */ 198 220 void __init mem_encrypt_init(void)
+5 -2
include/linux/mem_encrypt.h
··· 23 23 24 24 #define sme_me_mask 0ULL 25 25 26 + static inline bool sme_active(void) { return false; } 27 + static inline bool sev_active(void) { return false; } 28 + 26 29 #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ 27 30 28 - static inline bool sme_active(void) 31 + static inline bool mem_encrypt_active(void) 29 32 { 30 - return !!sme_me_mask; 33 + return sme_me_mask; 31 34 } 32 35 33 36 static inline u64 sme_get_me_mask(void)