Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/x86: Add a selftest for SGX

Add a selftest for SGX. It is a trivial test where a simple enclave
copies one 64-bit word of memory between two memory locations,
but ensures that all SGX hardware and software infrastructure is
functioning.

Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Cc: linux-kselftest@vger.kernel.org
Link: https://lkml.kernel.org/r/20201112220135.165028-21-jarkko@kernel.org

authored by

Jarkko Sakkinen and committed by
Borislav Petkov
2adcba79 84664369

+1222
+1
tools/testing/selftests/Makefile
··· 50 50 TARGETS += rseq 51 51 TARGETS += rtc 52 52 TARGETS += seccomp 53 + TARGETS += sgx 53 54 TARGETS += sigaltstack 54 55 TARGETS += size 55 56 TARGETS += sparc64
+2
tools/testing/selftests/sgx/.gitignore
··· 1 + test_sgx 2 + test_encl.elf
+53
tools/testing/selftests/sgx/Makefile
··· 1 + top_srcdir = ../../../.. 2 + 3 + include ../lib.mk 4 + 5 + .PHONY: all clean 6 + 7 + CAN_BUILD_X86_64 := $(shell ../x86/check_cc.sh $(CC) \ 8 + ../x86/trivial_64bit_program.c) 9 + 10 + ifndef OBJCOPY 11 + OBJCOPY := $(CROSS_COMPILE)objcopy 12 + endif 13 + 14 + INCLUDES := -I$(top_srcdir)/tools/include 15 + HOST_CFLAGS := -Wall -Werror -g $(INCLUDES) -fPIC -z noexecstack 16 + ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \ 17 + -fno-stack-protector -mrdrnd $(INCLUDES) 18 + 19 + TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx 20 + 21 + ifeq ($(CAN_BUILD_X86_64), 1) 22 + all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf 23 + endif 24 + 25 + $(OUTPUT)/test_sgx: $(OUTPUT)/main.o \ 26 + $(OUTPUT)/load.o \ 27 + $(OUTPUT)/sigstruct.o \ 28 + $(OUTPUT)/call.o 29 + $(CC) $(HOST_CFLAGS) -o $@ $^ -lcrypto 30 + 31 + $(OUTPUT)/main.o: main.c 32 + $(CC) $(HOST_CFLAGS) -c $< -o $@ 33 + 34 + $(OUTPUT)/load.o: load.c 35 + $(CC) $(HOST_CFLAGS) -c $< -o $@ 36 + 37 + $(OUTPUT)/sigstruct.o: sigstruct.c 38 + $(CC) $(HOST_CFLAGS) -c $< -o $@ 39 + 40 + $(OUTPUT)/call.o: call.S 41 + $(CC) $(HOST_CFLAGS) -c $< -o $@ 42 + 43 + $(OUTPUT)/test_encl.elf: test_encl.lds test_encl.c test_encl_bootstrap.S 44 + $(CC) $(ENCL_CFLAGS) -T $^ -o $@ 45 + 46 + EXTRA_CLEAN := \ 47 + $(OUTPUT)/test_encl.elf \ 48 + $(OUTPUT)/load.o \ 49 + $(OUTPUT)/call.o \ 50 + $(OUTPUT)/main.o \ 51 + $(OUTPUT)/sigstruct.o \ 52 + $(OUTPUT)/test_sgx \ 53 + $(OUTPUT)/test_sgx.o \
+44
tools/testing/selftests/sgx/call.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /** 3 + * Copyright(c) 2016-20 Intel Corporation. 4 + */ 5 + 6 + .text 7 + 8 + .global sgx_call_vdso 9 + sgx_call_vdso: 10 + .cfi_startproc 11 + push %r15 12 + .cfi_adjust_cfa_offset 8 13 + .cfi_rel_offset %r15, 0 14 + push %r14 15 + .cfi_adjust_cfa_offset 8 16 + .cfi_rel_offset %r14, 0 17 + push %r13 18 + .cfi_adjust_cfa_offset 8 19 + .cfi_rel_offset %r13, 0 20 + push %r12 21 + .cfi_adjust_cfa_offset 8 22 + .cfi_rel_offset %r12, 0 23 + push %rbx 24 + .cfi_adjust_cfa_offset 8 25 + .cfi_rel_offset %rbx, 0 26 + push $0 27 + .cfi_adjust_cfa_offset 8 28 + push 0x38(%rsp) 29 + .cfi_adjust_cfa_offset 8 30 + call *eenter(%rip) 31 + add $0x10, %rsp 32 + .cfi_adjust_cfa_offset -0x10 33 + pop %rbx 34 + .cfi_adjust_cfa_offset -8 35 + pop %r12 36 + .cfi_adjust_cfa_offset -8 37 + pop %r13 38 + .cfi_adjust_cfa_offset -8 39 + pop %r14 40 + .cfi_adjust_cfa_offset -8 41 + pop %r15 42 + .cfi_adjust_cfa_offset -8 43 + ret 44 + .cfi_endproc
+21
tools/testing/selftests/sgx/defines.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright(c) 2016-20 Intel Corporation. 4 + */ 5 + 6 + #ifndef DEFINES_H 7 + #define DEFINES_H 8 + 9 + #include <stdint.h> 10 + 11 + #define PAGE_SIZE 4096 12 + #define PAGE_MASK (~(PAGE_SIZE - 1)) 13 + 14 + #define __aligned(x) __attribute__((__aligned__(x))) 15 + #define __packed __attribute__((packed)) 16 + 17 + #include "../../../../arch/x86/kernel/cpu/sgx/arch.h" 18 + #include "../../../../arch/x86/include/asm/enclu.h" 19 + #include "../../../../arch/x86/include/uapi/asm/sgx.h" 20 + 21 + #endif /* DEFINES_H */
+277
tools/testing/selftests/sgx/load.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright(c) 2016-20 Intel Corporation. */ 3 + 4 + #include <assert.h> 5 + #include <elf.h> 6 + #include <errno.h> 7 + #include <fcntl.h> 8 + #include <stdbool.h> 9 + #include <stdio.h> 10 + #include <stdint.h> 11 + #include <stdlib.h> 12 + #include <string.h> 13 + #include <unistd.h> 14 + #include <sys/ioctl.h> 15 + #include <sys/mman.h> 16 + #include <sys/stat.h> 17 + #include <sys/time.h> 18 + #include <sys/types.h> 19 + #include "defines.h" 20 + #include "main.h" 21 + 22 + void encl_delete(struct encl *encl) 23 + { 24 + if (encl->encl_base) 25 + munmap((void *)encl->encl_base, encl->encl_size); 26 + 27 + if (encl->bin) 28 + munmap(encl->bin, encl->bin_size); 29 + 30 + if (encl->fd) 31 + close(encl->fd); 32 + 33 + if (encl->segment_tbl) 34 + free(encl->segment_tbl); 35 + 36 + memset(encl, 0, sizeof(*encl)); 37 + } 38 + 39 + static bool encl_map_bin(const char *path, struct encl *encl) 40 + { 41 + struct stat sb; 42 + void *bin; 43 + int ret; 44 + int fd; 45 + 46 + fd = open(path, O_RDONLY); 47 + if (fd == -1) { 48 + perror("open()"); 49 + return false; 50 + } 51 + 52 + ret = stat(path, &sb); 53 + if (ret) { 54 + perror("stat()"); 55 + goto err; 56 + } 57 + 58 + bin = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0); 59 + if (bin == MAP_FAILED) { 60 + perror("mmap()"); 61 + goto err; 62 + } 63 + 64 + encl->bin = bin; 65 + encl->bin_size = sb.st_size; 66 + 67 + close(fd); 68 + return true; 69 + 70 + err: 71 + close(fd); 72 + return false; 73 + } 74 + 75 + static bool encl_ioc_create(struct encl *encl) 76 + { 77 + struct sgx_secs *secs = &encl->secs; 78 + struct sgx_enclave_create ioc; 79 + int rc; 80 + 81 + assert(encl->encl_base != 0); 82 + 83 + memset(secs, 0, sizeof(*secs)); 84 + secs->ssa_frame_size = 1; 85 + secs->attributes = SGX_ATTR_MODE64BIT; 86 + secs->xfrm = 3; 87 + secs->base = encl->encl_base; 88 + secs->size = encl->encl_size; 89 + 90 + ioc.src = (unsigned long)secs; 91 + rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_CREATE, &ioc); 92 + if (rc) { 93 + fprintf(stderr, "SGX_IOC_ENCLAVE_CREATE failed: errno=%d\n", 94 + errno); 95 + munmap((void *)secs->base, encl->encl_size); 96 + return false; 97 + } 98 + 99 + return true; 100 + } 101 + 102 + static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg) 103 + { 104 + struct sgx_enclave_add_pages ioc; 105 + struct sgx_secinfo secinfo; 106 + int rc; 107 + 108 + memset(&secinfo, 0, sizeof(secinfo)); 109 + secinfo.flags = seg->flags; 110 + 111 + ioc.src = (uint64_t)encl->src + seg->offset; 112 + ioc.offset = seg->offset; 113 + ioc.length = seg->size; 114 + ioc.secinfo = (unsigned long)&secinfo; 115 + ioc.flags = SGX_PAGE_MEASURE; 116 + 117 + rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc); 118 + if (rc < 0) { 119 + fprintf(stderr, "SGX_IOC_ENCLAVE_ADD_PAGES failed: errno=%d.\n", 120 + errno); 121 + return false; 122 + } 123 + 124 + return true; 125 + } 126 + 127 + bool encl_load(const char *path, struct encl *encl) 128 + { 129 + Elf64_Phdr *phdr_tbl; 130 + off_t src_offset; 131 + Elf64_Ehdr *ehdr; 132 + int i, j; 133 + int ret; 134 + 135 + memset(encl, 0, sizeof(*encl)); 136 + 137 + ret = open("/dev/sgx_enclave", O_RDWR); 138 + if (ret < 0) { 139 + fprintf(stderr, "Unable to open /dev/sgx_enclave\n"); 140 + goto err; 141 + } 142 + 143 + encl->fd = ret; 144 + 145 + if (!encl_map_bin(path, encl)) 146 + goto err; 147 + 148 + ehdr = encl->bin; 149 + phdr_tbl = encl->bin + ehdr->e_phoff; 150 + 151 + for (i = 0; i < ehdr->e_phnum; i++) { 152 + Elf64_Phdr *phdr = &phdr_tbl[i]; 153 + 154 + if (phdr->p_type == PT_LOAD) 155 + encl->nr_segments++; 156 + } 157 + 158 + encl->segment_tbl = calloc(encl->nr_segments, 159 + sizeof(struct encl_segment)); 160 + if (!encl->segment_tbl) 161 + goto err; 162 + 163 + for (i = 0, j = 0; i < ehdr->e_phnum; i++) { 164 + Elf64_Phdr *phdr = &phdr_tbl[i]; 165 + unsigned int flags = phdr->p_flags; 166 + struct encl_segment *seg; 167 + 168 + if (phdr->p_type != PT_LOAD) 169 + continue; 170 + 171 + seg = &encl->segment_tbl[j]; 172 + 173 + if (!!(flags & ~(PF_R | PF_W | PF_X))) { 174 + fprintf(stderr, 175 + "%d has invalid segment flags 0x%02x.\n", i, 176 + phdr->p_flags); 177 + goto err; 178 + } 179 + 180 + if (j == 0 && flags != (PF_R | PF_W)) { 181 + fprintf(stderr, 182 + "TCS has invalid segment flags 0x%02x.\n", 183 + phdr->p_flags); 184 + goto err; 185 + } 186 + 187 + if (j == 0) { 188 + src_offset = phdr->p_offset & PAGE_MASK; 189 + 190 + seg->prot = PROT_READ | PROT_WRITE; 191 + seg->flags = SGX_PAGE_TYPE_TCS << 8; 192 + } else { 193 + seg->prot = (phdr->p_flags & PF_R) ? PROT_READ : 0; 194 + seg->prot |= (phdr->p_flags & PF_W) ? PROT_WRITE : 0; 195 + seg->prot |= (phdr->p_flags & PF_X) ? PROT_EXEC : 0; 196 + seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot; 197 + } 198 + 199 + seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset; 200 + seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK; 201 + 202 + printf("0x%016lx 0x%016lx 0x%02x\n", seg->offset, seg->size, 203 + seg->prot); 204 + 205 + j++; 206 + } 207 + 208 + assert(j == encl->nr_segments); 209 + 210 + encl->src = encl->bin + src_offset; 211 + encl->src_size = encl->segment_tbl[j - 1].offset + 212 + encl->segment_tbl[j - 1].size; 213 + 214 + for (encl->encl_size = 4096; encl->encl_size < encl->src_size; ) 215 + encl->encl_size <<= 1; 216 + 217 + return true; 218 + 219 + err: 220 + encl_delete(encl); 221 + return false; 222 + } 223 + 224 + static bool encl_map_area(struct encl *encl) 225 + { 226 + size_t encl_size = encl->encl_size; 227 + void *area; 228 + 229 + area = mmap(NULL, encl_size * 2, PROT_NONE, 230 + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 231 + if (area == MAP_FAILED) { 232 + perror("mmap"); 233 + return false; 234 + } 235 + 236 + encl->encl_base = ((uint64_t)area + encl_size - 1) & ~(encl_size - 1); 237 + 238 + munmap(area, encl->encl_base - (uint64_t)area); 239 + munmap((void *)(encl->encl_base + encl_size), 240 + (uint64_t)area + encl_size - encl->encl_base); 241 + 242 + return true; 243 + } 244 + 245 + bool encl_build(struct encl *encl) 246 + { 247 + struct sgx_enclave_init ioc; 248 + int ret; 249 + int i; 250 + 251 + if (!encl_map_area(encl)) 252 + return false; 253 + 254 + if (!encl_ioc_create(encl)) 255 + return false; 256 + 257 + /* 258 + * Pages must be added before mapping VMAs because their permissions 259 + * cap the VMA permissions. 260 + */ 261 + for (i = 0; i < encl->nr_segments; i++) { 262 + struct encl_segment *seg = &encl->segment_tbl[i]; 263 + 264 + if (!encl_ioc_add_pages(encl, seg)) 265 + return false; 266 + } 267 + 268 + ioc.sigstruct = (uint64_t)&encl->sigstruct; 269 + ret = ioctl(encl->fd, SGX_IOC_ENCLAVE_INIT, &ioc); 270 + if (ret) { 271 + fprintf(stderr, "SGX_IOC_ENCLAVE_INIT failed: errno=%d\n", 272 + errno); 273 + return false; 274 + } 275 + 276 + return true; 277 + }
+246
tools/testing/selftests/sgx/main.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright(c) 2016-20 Intel Corporation. */ 3 + 4 + #include <elf.h> 5 + #include <errno.h> 6 + #include <fcntl.h> 7 + #include <stdbool.h> 8 + #include <stdio.h> 9 + #include <stdint.h> 10 + #include <stdlib.h> 11 + #include <string.h> 12 + #include <unistd.h> 13 + #include <sys/ioctl.h> 14 + #include <sys/mman.h> 15 + #include <sys/stat.h> 16 + #include <sys/time.h> 17 + #include <sys/types.h> 18 + #include "defines.h" 19 + #include "main.h" 20 + #include "../kselftest.h" 21 + 22 + static const uint64_t MAGIC = 0x1122334455667788ULL; 23 + vdso_sgx_enter_enclave_t eenter; 24 + 25 + struct vdso_symtab { 26 + Elf64_Sym *elf_symtab; 27 + const char *elf_symstrtab; 28 + Elf64_Word *elf_hashtab; 29 + }; 30 + 31 + static void *vdso_get_base_addr(char *envp[]) 32 + { 33 + Elf64_auxv_t *auxv; 34 + int i; 35 + 36 + for (i = 0; envp[i]; i++) 37 + ; 38 + 39 + auxv = (Elf64_auxv_t *)&envp[i + 1]; 40 + 41 + for (i = 0; auxv[i].a_type != AT_NULL; i++) { 42 + if (auxv[i].a_type == AT_SYSINFO_EHDR) 43 + return (void *)auxv[i].a_un.a_val; 44 + } 45 + 46 + return NULL; 47 + } 48 + 49 + static Elf64_Dyn *vdso_get_dyntab(void *addr) 50 + { 51 + Elf64_Ehdr *ehdr = addr; 52 + Elf64_Phdr *phdrtab = addr + ehdr->e_phoff; 53 + int i; 54 + 55 + for (i = 0; i < ehdr->e_phnum; i++) 56 + if (phdrtab[i].p_type == PT_DYNAMIC) 57 + return addr + phdrtab[i].p_offset; 58 + 59 + return NULL; 60 + } 61 + 62 + static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag) 63 + { 64 + int i; 65 + 66 + for (i = 0; dyntab[i].d_tag != DT_NULL; i++) 67 + if (dyntab[i].d_tag == tag) 68 + return addr + dyntab[i].d_un.d_ptr; 69 + 70 + return NULL; 71 + } 72 + 73 + static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab) 74 + { 75 + Elf64_Dyn *dyntab = vdso_get_dyntab(addr); 76 + 77 + symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB); 78 + if (!symtab->elf_symtab) 79 + return false; 80 + 81 + symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB); 82 + if (!symtab->elf_symstrtab) 83 + return false; 84 + 85 + symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH); 86 + if (!symtab->elf_hashtab) 87 + return false; 88 + 89 + return true; 90 + } 91 + 92 + static unsigned long elf_sym_hash(const char *name) 93 + { 94 + unsigned long h = 0, high; 95 + 96 + while (*name) { 97 + h = (h << 4) + *name++; 98 + high = h & 0xf0000000; 99 + 100 + if (high) 101 + h ^= high >> 24; 102 + 103 + h &= ~high; 104 + } 105 + 106 + return h; 107 + } 108 + 109 + static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name) 110 + { 111 + Elf64_Word bucketnum = symtab->elf_hashtab[0]; 112 + Elf64_Word *buckettab = &symtab->elf_hashtab[2]; 113 + Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum]; 114 + Elf64_Sym *sym; 115 + Elf64_Word i; 116 + 117 + for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF; 118 + i = chaintab[i]) { 119 + sym = &symtab->elf_symtab[i]; 120 + if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name])) 121 + return sym; 122 + } 123 + 124 + return NULL; 125 + } 126 + 127 + bool report_results(struct sgx_enclave_run *run, int ret, uint64_t result, 128 + const char *test) 129 + { 130 + bool valid = true; 131 + 132 + if (ret) { 133 + printf("FAIL: %s() returned: %d\n", test, ret); 134 + valid = false; 135 + } 136 + 137 + if (run->function != EEXIT) { 138 + printf("FAIL: %s() function, expected: %u, got: %u\n", test, EEXIT, 139 + run->function); 140 + valid = false; 141 + } 142 + 143 + if (result != MAGIC) { 144 + printf("FAIL: %s(), expected: 0x%lx, got: 0x%lx\n", test, MAGIC, 145 + result); 146 + valid = false; 147 + } 148 + 149 + if (run->user_data) { 150 + printf("FAIL: %s() user data, expected: 0x0, got: 0x%llx\n", 151 + test, run->user_data); 152 + valid = false; 153 + } 154 + 155 + return valid; 156 + } 157 + 158 + static int user_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9, 159 + struct sgx_enclave_run *run) 160 + { 161 + run->user_data = 0; 162 + return 0; 163 + } 164 + 165 + int main(int argc, char *argv[], char *envp[]) 166 + { 167 + struct sgx_enclave_run run; 168 + struct vdso_symtab symtab; 169 + Elf64_Sym *eenter_sym; 170 + uint64_t result = 0; 171 + struct encl encl; 172 + unsigned int i; 173 + void *addr; 174 + int ret; 175 + 176 + memset(&run, 0, sizeof(run)); 177 + 178 + if (!encl_load("test_encl.elf", &encl)) { 179 + encl_delete(&encl); 180 + ksft_exit_skip("cannot load enclaves\n"); 181 + } 182 + 183 + if (!encl_measure(&encl)) 184 + goto err; 185 + 186 + if (!encl_build(&encl)) 187 + goto err; 188 + 189 + /* 190 + * An enclave consumer only must do this. 191 + */ 192 + for (i = 0; i < encl.nr_segments; i++) { 193 + struct encl_segment *seg = &encl.segment_tbl[i]; 194 + 195 + addr = mmap((void *)encl.encl_base + seg->offset, seg->size, 196 + seg->prot, MAP_SHARED | MAP_FIXED, encl.fd, 0); 197 + if (addr == MAP_FAILED) { 198 + fprintf(stderr, "mmap() failed, errno=%d.\n", errno); 199 + exit(KSFT_FAIL); 200 + } 201 + } 202 + 203 + memset(&run, 0, sizeof(run)); 204 + run.tcs = encl.encl_base; 205 + 206 + addr = vdso_get_base_addr(envp); 207 + if (!addr) 208 + goto err; 209 + 210 + if (!vdso_get_symtab(addr, &symtab)) 211 + goto err; 212 + 213 + eenter_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave"); 214 + if (!eenter_sym) 215 + goto err; 216 + 217 + eenter = addr + eenter_sym->st_value; 218 + 219 + ret = sgx_call_vdso((void *)&MAGIC, &result, 0, EENTER, NULL, NULL, &run); 220 + if (!report_results(&run, ret, result, "sgx_call_vdso")) 221 + goto err; 222 + 223 + 224 + /* Invoke the vDSO directly. */ 225 + result = 0; 226 + ret = eenter((unsigned long)&MAGIC, (unsigned long)&result, 0, EENTER, 227 + 0, 0, &run); 228 + if (!report_results(&run, ret, result, "eenter")) 229 + goto err; 230 + 231 + /* And with an exit handler. */ 232 + run.user_handler = (__u64)user_handler; 233 + run.user_data = 0xdeadbeef; 234 + ret = eenter((unsigned long)&MAGIC, (unsigned long)&result, 0, EENTER, 235 + 0, 0, &run); 236 + if (!report_results(&run, ret, result, "user_handler")) 237 + goto err; 238 + 239 + printf("SUCCESS\n"); 240 + encl_delete(&encl); 241 + exit(KSFT_PASS); 242 + 243 + err: 244 + encl_delete(&encl); 245 + exit(KSFT_FAIL); 246 + }
+38
tools/testing/selftests/sgx/main.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright(c) 2016-20 Intel Corporation. 4 + */ 5 + 6 + #ifndef MAIN_H 7 + #define MAIN_H 8 + 9 + struct encl_segment { 10 + off_t offset; 11 + size_t size; 12 + unsigned int prot; 13 + unsigned int flags; 14 + }; 15 + 16 + struct encl { 17 + int fd; 18 + void *bin; 19 + off_t bin_size; 20 + void *src; 21 + size_t src_size; 22 + size_t encl_size; 23 + off_t encl_base; 24 + unsigned int nr_segments; 25 + struct encl_segment *segment_tbl; 26 + struct sgx_secs secs; 27 + struct sgx_sigstruct sigstruct; 28 + }; 29 + 30 + void encl_delete(struct encl *ctx); 31 + bool encl_load(const char *path, struct encl *encl); 32 + bool encl_measure(struct encl *encl); 33 + bool encl_build(struct encl *encl); 34 + 35 + int sgx_call_vdso(void *rdi, void *rsi, long rdx, u32 function, void *r8, void *r9, 36 + struct sgx_enclave_run *run); 37 + 38 + #endif /* MAIN_H */
+391
tools/testing/selftests/sgx/sigstruct.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright(c) 2016-20 Intel Corporation. */ 3 + 4 + #define _GNU_SOURCE 5 + #include <assert.h> 6 + #include <getopt.h> 7 + #include <stdbool.h> 8 + #include <stdint.h> 9 + #include <stdio.h> 10 + #include <stdlib.h> 11 + #include <string.h> 12 + #include <sys/stat.h> 13 + #include <sys/types.h> 14 + #include <unistd.h> 15 + #include <openssl/err.h> 16 + #include <openssl/pem.h> 17 + #include "defines.h" 18 + #include "main.h" 19 + 20 + struct q1q2_ctx { 21 + BN_CTX *bn_ctx; 22 + BIGNUM *m; 23 + BIGNUM *s; 24 + BIGNUM *q1; 25 + BIGNUM *qr; 26 + BIGNUM *q2; 27 + }; 28 + 29 + static void free_q1q2_ctx(struct q1q2_ctx *ctx) 30 + { 31 + BN_CTX_free(ctx->bn_ctx); 32 + BN_free(ctx->m); 33 + BN_free(ctx->s); 34 + BN_free(ctx->q1); 35 + BN_free(ctx->qr); 36 + BN_free(ctx->q2); 37 + } 38 + 39 + static bool alloc_q1q2_ctx(const uint8_t *s, const uint8_t *m, 40 + struct q1q2_ctx *ctx) 41 + { 42 + ctx->bn_ctx = BN_CTX_new(); 43 + ctx->s = BN_bin2bn(s, SGX_MODULUS_SIZE, NULL); 44 + ctx->m = BN_bin2bn(m, SGX_MODULUS_SIZE, NULL); 45 + ctx->q1 = BN_new(); 46 + ctx->qr = BN_new(); 47 + ctx->q2 = BN_new(); 48 + 49 + if (!ctx->bn_ctx || !ctx->s || !ctx->m || !ctx->q1 || !ctx->qr || 50 + !ctx->q2) { 51 + free_q1q2_ctx(ctx); 52 + return false; 53 + } 54 + 55 + return true; 56 + } 57 + 58 + static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1, 59 + uint8_t *q2) 60 + { 61 + struct q1q2_ctx ctx; 62 + 63 + if (!alloc_q1q2_ctx(s, m, &ctx)) { 64 + fprintf(stderr, "Not enough memory for Q1Q2 calculation\n"); 65 + return false; 66 + } 67 + 68 + if (!BN_mul(ctx.q1, ctx.s, ctx.s, ctx.bn_ctx)) 69 + goto out; 70 + 71 + if (!BN_div(ctx.q1, ctx.qr, ctx.q1, ctx.m, ctx.bn_ctx)) 72 + goto out; 73 + 74 + if (BN_num_bytes(ctx.q1) > SGX_MODULUS_SIZE) { 75 + fprintf(stderr, "Too large Q1 %d bytes\n", 76 + BN_num_bytes(ctx.q1)); 77 + goto out; 78 + } 79 + 80 + if (!BN_mul(ctx.q2, ctx.s, ctx.qr, ctx.bn_ctx)) 81 + goto out; 82 + 83 + if (!BN_div(ctx.q2, NULL, ctx.q2, ctx.m, ctx.bn_ctx)) 84 + goto out; 85 + 86 + if (BN_num_bytes(ctx.q2) > SGX_MODULUS_SIZE) { 87 + fprintf(stderr, "Too large Q2 %d bytes\n", 88 + BN_num_bytes(ctx.q2)); 89 + goto out; 90 + } 91 + 92 + BN_bn2bin(ctx.q1, q1); 93 + BN_bn2bin(ctx.q2, q2); 94 + 95 + free_q1q2_ctx(&ctx); 96 + return true; 97 + out: 98 + free_q1q2_ctx(&ctx); 99 + return false; 100 + } 101 + 102 + struct sgx_sigstruct_payload { 103 + struct sgx_sigstruct_header header; 104 + struct sgx_sigstruct_body body; 105 + }; 106 + 107 + static bool check_crypto_errors(void) 108 + { 109 + int err; 110 + bool had_errors = false; 111 + const char *filename; 112 + int line; 113 + char str[256]; 114 + 115 + for ( ; ; ) { 116 + if (ERR_peek_error() == 0) 117 + break; 118 + 119 + had_errors = true; 120 + err = ERR_get_error_line(&filename, &line); 121 + ERR_error_string_n(err, str, sizeof(str)); 122 + fprintf(stderr, "crypto: %s: %s:%d\n", str, filename, line); 123 + } 124 + 125 + return had_errors; 126 + } 127 + 128 + static inline const BIGNUM *get_modulus(RSA *key) 129 + { 130 + const BIGNUM *n; 131 + 132 + RSA_get0_key(key, &n, NULL, NULL); 133 + return n; 134 + } 135 + 136 + static RSA *gen_sign_key(void) 137 + { 138 + BIGNUM *e; 139 + RSA *key; 140 + int ret; 141 + 142 + e = BN_new(); 143 + key = RSA_new(); 144 + 145 + if (!e || !key) 146 + goto err; 147 + 148 + ret = BN_set_word(e, RSA_3); 149 + if (ret != 1) 150 + goto err; 151 + 152 + ret = RSA_generate_key_ex(key, 3072, e, NULL); 153 + if (ret != 1) 154 + goto err; 155 + 156 + BN_free(e); 157 + 158 + return key; 159 + 160 + err: 161 + RSA_free(key); 162 + BN_free(e); 163 + 164 + return NULL; 165 + } 166 + 167 + static void reverse_bytes(void *data, int length) 168 + { 169 + int i = 0; 170 + int j = length - 1; 171 + uint8_t temp; 172 + uint8_t *ptr = data; 173 + 174 + while (i < j) { 175 + temp = ptr[i]; 176 + ptr[i] = ptr[j]; 177 + ptr[j] = temp; 178 + i++; 179 + j--; 180 + } 181 + } 182 + 183 + enum mrtags { 184 + MRECREATE = 0x0045544145524345, 185 + MREADD = 0x0000000044444145, 186 + MREEXTEND = 0x00444E4554584545, 187 + }; 188 + 189 + static bool mrenclave_update(EVP_MD_CTX *ctx, const void *data) 190 + { 191 + if (!EVP_DigestUpdate(ctx, data, 64)) { 192 + fprintf(stderr, "digest update failed\n"); 193 + return false; 194 + } 195 + 196 + return true; 197 + } 198 + 199 + static bool mrenclave_commit(EVP_MD_CTX *ctx, uint8_t *mrenclave) 200 + { 201 + unsigned int size; 202 + 203 + if (!EVP_DigestFinal_ex(ctx, (unsigned char *)mrenclave, &size)) { 204 + fprintf(stderr, "digest commit failed\n"); 205 + return false; 206 + } 207 + 208 + if (size != 32) { 209 + fprintf(stderr, "invalid digest size = %u\n", size); 210 + return false; 211 + } 212 + 213 + return true; 214 + } 215 + 216 + struct mrecreate { 217 + uint64_t tag; 218 + uint32_t ssaframesize; 219 + uint64_t size; 220 + uint8_t reserved[44]; 221 + } __attribute__((__packed__)); 222 + 223 + 224 + static bool mrenclave_ecreate(EVP_MD_CTX *ctx, uint64_t blob_size) 225 + { 226 + struct mrecreate mrecreate; 227 + uint64_t encl_size; 228 + 229 + for (encl_size = 0x1000; encl_size < blob_size; ) 230 + encl_size <<= 1; 231 + 232 + memset(&mrecreate, 0, sizeof(mrecreate)); 233 + mrecreate.tag = MRECREATE; 234 + mrecreate.ssaframesize = 1; 235 + mrecreate.size = encl_size; 236 + 237 + if (!EVP_DigestInit_ex(ctx, EVP_sha256(), NULL)) 238 + return false; 239 + 240 + return mrenclave_update(ctx, &mrecreate); 241 + } 242 + 243 + struct mreadd { 244 + uint64_t tag; 245 + uint64_t offset; 246 + uint64_t flags; /* SECINFO flags */ 247 + uint8_t reserved[40]; 248 + } __attribute__((__packed__)); 249 + 250 + static bool mrenclave_eadd(EVP_MD_CTX *ctx, uint64_t offset, uint64_t flags) 251 + { 252 + struct mreadd mreadd; 253 + 254 + memset(&mreadd, 0, sizeof(mreadd)); 255 + mreadd.tag = MREADD; 256 + mreadd.offset = offset; 257 + mreadd.flags = flags; 258 + 259 + return mrenclave_update(ctx, &mreadd); 260 + } 261 + 262 + struct mreextend { 263 + uint64_t tag; 264 + uint64_t offset; 265 + uint8_t reserved[48]; 266 + } __attribute__((__packed__)); 267 + 268 + static bool mrenclave_eextend(EVP_MD_CTX *ctx, uint64_t offset, 269 + const uint8_t *data) 270 + { 271 + struct mreextend mreextend; 272 + int i; 273 + 274 + for (i = 0; i < 0x1000; i += 0x100) { 275 + memset(&mreextend, 0, sizeof(mreextend)); 276 + mreextend.tag = MREEXTEND; 277 + mreextend.offset = offset + i; 278 + 279 + if (!mrenclave_update(ctx, &mreextend)) 280 + return false; 281 + 282 + if (!mrenclave_update(ctx, &data[i + 0x00])) 283 + return false; 284 + 285 + if (!mrenclave_update(ctx, &data[i + 0x40])) 286 + return false; 287 + 288 + if (!mrenclave_update(ctx, &data[i + 0x80])) 289 + return false; 290 + 291 + if (!mrenclave_update(ctx, &data[i + 0xC0])) 292 + return false; 293 + } 294 + 295 + return true; 296 + } 297 + 298 + static bool mrenclave_segment(EVP_MD_CTX *ctx, struct encl *encl, 299 + struct encl_segment *seg) 300 + { 301 + uint64_t end = seg->offset + seg->size; 302 + uint64_t offset; 303 + 304 + for (offset = seg->offset; offset < end; offset += PAGE_SIZE) { 305 + if (!mrenclave_eadd(ctx, offset, seg->flags)) 306 + return false; 307 + 308 + if (!mrenclave_eextend(ctx, offset, encl->src + offset)) 309 + return false; 310 + } 311 + 312 + return true; 313 + } 314 + 315 + bool encl_measure(struct encl *encl) 316 + { 317 + uint64_t header1[2] = {0x000000E100000006, 0x0000000000010000}; 318 + uint64_t header2[2] = {0x0000006000000101, 0x0000000100000060}; 319 + struct sgx_sigstruct *sigstruct = &encl->sigstruct; 320 + struct sgx_sigstruct_payload payload; 321 + uint8_t digest[SHA256_DIGEST_LENGTH]; 322 + unsigned int siglen; 323 + RSA *key = NULL; 324 + EVP_MD_CTX *ctx; 325 + int i; 326 + 327 + memset(sigstruct, 0, sizeof(*sigstruct)); 328 + 329 + sigstruct->header.header1[0] = header1[0]; 330 + sigstruct->header.header1[1] = header1[1]; 331 + sigstruct->header.header2[0] = header2[0]; 332 + sigstruct->header.header2[1] = header2[1]; 333 + sigstruct->exponent = 3; 334 + sigstruct->body.attributes = SGX_ATTR_MODE64BIT; 335 + sigstruct->body.xfrm = 3; 336 + 337 + /* sanity check */ 338 + if (check_crypto_errors()) 339 + goto err; 340 + 341 + key = gen_sign_key(); 342 + if (!key) 343 + goto err; 344 + 345 + BN_bn2bin(get_modulus(key), sigstruct->modulus); 346 + 347 + ctx = EVP_MD_CTX_create(); 348 + if (!ctx) 349 + goto err; 350 + 351 + if (!mrenclave_ecreate(ctx, encl->src_size)) 352 + goto err; 353 + 354 + for (i = 0; i < encl->nr_segments; i++) { 355 + struct encl_segment *seg = &encl->segment_tbl[i]; 356 + 357 + if (!mrenclave_segment(ctx, encl, seg)) 358 + goto err; 359 + } 360 + 361 + if (!mrenclave_commit(ctx, sigstruct->body.mrenclave)) 362 + goto err; 363 + 364 + memcpy(&payload.header, &sigstruct->header, sizeof(sigstruct->header)); 365 + memcpy(&payload.body, &sigstruct->body, sizeof(sigstruct->body)); 366 + 367 + SHA256((unsigned char *)&payload, sizeof(payload), digest); 368 + 369 + if (!RSA_sign(NID_sha256, digest, SHA256_DIGEST_LENGTH, 370 + sigstruct->signature, &siglen, key)) 371 + goto err; 372 + 373 + if (!calc_q1q2(sigstruct->signature, sigstruct->modulus, sigstruct->q1, 374 + sigstruct->q2)) 375 + goto err; 376 + 377 + /* BE -> LE */ 378 + reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE); 379 + reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE); 380 + reverse_bytes(sigstruct->q1, SGX_MODULUS_SIZE); 381 + reverse_bytes(sigstruct->q2, SGX_MODULUS_SIZE); 382 + 383 + EVP_MD_CTX_destroy(ctx); 384 + RSA_free(key); 385 + return true; 386 + 387 + err: 388 + EVP_MD_CTX_destroy(ctx); 389 + RSA_free(key); 390 + return false; 391 + }
+20
tools/testing/selftests/sgx/test_encl.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright(c) 2016-20 Intel Corporation. */ 3 + 4 + #include <stddef.h> 5 + #include "defines.h" 6 + 7 + static void *memcpy(void *dest, const void *src, size_t n) 8 + { 9 + size_t i; 10 + 11 + for (i = 0; i < n; i++) 12 + ((char *)dest)[i] = ((char *)src)[i]; 13 + 14 + return dest; 15 + } 16 + 17 + void encl_body(void *rdi, void *rsi) 18 + { 19 + memcpy(rsi, rdi, 8); 20 + }
+40
tools/testing/selftests/sgx/test_encl.lds
··· 1 + OUTPUT_FORMAT(elf64-x86-64) 2 + 3 + PHDRS 4 + { 5 + tcs PT_LOAD; 6 + text PT_LOAD; 7 + data PT_LOAD; 8 + } 9 + 10 + SECTIONS 11 + { 12 + . = 0; 13 + .tcs : { 14 + *(.tcs*) 15 + } : tcs 16 + 17 + . = ALIGN(4096); 18 + .text : { 19 + *(.text*) 20 + *(.rodata*) 21 + } : text 22 + 23 + . = ALIGN(4096); 24 + .data : { 25 + *(.data*) 26 + } : data 27 + 28 + /DISCARD/ : { 29 + *(.comment*) 30 + *(.note*) 31 + *(.debug*) 32 + *(.eh_frame*) 33 + } 34 + } 35 + 36 + ASSERT(!DEFINED(.altinstructions), "ALTERNATIVES are not supported in enclaves") 37 + ASSERT(!DEFINED(.altinstr_replacement), "ALTERNATIVES are not supported in enclaves") 38 + ASSERT(!DEFINED(.discard.retpoline_safe), "RETPOLINE ALTERNATIVES are not supported in enclaves") 39 + ASSERT(!DEFINED(.discard.nospec), "RETPOLINE ALTERNATIVES are not supported in enclaves") 40 + ASSERT(!DEFINED(.got.plt), "Libcalls are not supported in enclaves")
+89
tools/testing/selftests/sgx/test_encl_bootstrap.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright(c) 2016-20 Intel Corporation. 4 + */ 5 + 6 + .macro ENCLU 7 + .byte 0x0f, 0x01, 0xd7 8 + .endm 9 + 10 + .section ".tcs", "aw" 11 + .balign 4096 12 + 13 + .fill 1, 8, 0 # STATE (set by CPU) 14 + .fill 1, 8, 0 # FLAGS 15 + .quad encl_ssa # OSSA 16 + .fill 1, 4, 0 # CSSA (set by CPU) 17 + .fill 1, 4, 1 # NSSA 18 + .quad encl_entry # OENTRY 19 + .fill 1, 8, 0 # AEP (set by EENTER and ERESUME) 20 + .fill 1, 8, 0 # OFSBASE 21 + .fill 1, 8, 0 # OGSBASE 22 + .fill 1, 4, 0xFFFFFFFF # FSLIMIT 23 + .fill 1, 4, 0xFFFFFFFF # GSLIMIT 24 + .fill 4024, 1, 0 # Reserved 25 + 26 + # Identical to the previous TCS. 27 + .fill 1, 8, 0 # STATE (set by CPU) 28 + .fill 1, 8, 0 # FLAGS 29 + .quad encl_ssa # OSSA 30 + .fill 1, 4, 0 # CSSA (set by CPU) 31 + .fill 1, 4, 1 # NSSA 32 + .quad encl_entry # OENTRY 33 + .fill 1, 8, 0 # AEP (set by EENTER and ERESUME) 34 + .fill 1, 8, 0 # OFSBASE 35 + .fill 1, 8, 0 # OGSBASE 36 + .fill 1, 4, 0xFFFFFFFF # FSLIMIT 37 + .fill 1, 4, 0xFFFFFFFF # GSLIMIT 38 + .fill 4024, 1, 0 # Reserved 39 + 40 + .text 41 + 42 + encl_entry: 43 + # RBX contains the base address for TCS, which is also the first address 44 + # inside the enclave. By adding the value of le_stack_end to it, we get 45 + # the absolute address for the stack. 46 + lea (encl_stack)(%rbx), %rax 47 + xchg %rsp, %rax 48 + push %rax 49 + 50 + push %rcx # push the address after EENTER 51 + push %rbx # push the enclave base address 52 + 53 + call encl_body 54 + 55 + pop %rbx # pop the enclave base address 56 + 57 + /* Clear volatile GPRs, except RAX (EEXIT function). */ 58 + xor %rcx, %rcx 59 + xor %rdx, %rdx 60 + xor %rdi, %rdi 61 + xor %rsi, %rsi 62 + xor %r8, %r8 63 + xor %r9, %r9 64 + xor %r10, %r10 65 + xor %r11, %r11 66 + 67 + # Reset status flags. 68 + add %rdx, %rdx # OF = SF = AF = CF = 0; ZF = PF = 1 69 + 70 + # Prepare EEXIT target by popping the address of the instruction after 71 + # EENTER to RBX. 72 + pop %rbx 73 + 74 + # Restore the caller stack. 75 + pop %rax 76 + mov %rax, %rsp 77 + 78 + # EEXIT 79 + mov $4, %rax 80 + enclu 81 + 82 + .section ".data", "aw" 83 + 84 + encl_ssa: 85 + .space 4096 86 + 87 + .balign 4096 88 + .space 8192 89 + encl_stack: