Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 236 lines 7.2 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* Copyright (C) 2021-2022 Intel Corporation */ 3#ifndef _ASM_X86_TDX_H 4#define _ASM_X86_TDX_H 5 6#include <linux/init.h> 7#include <linux/bits.h> 8#include <linux/mmzone.h> 9 10#include <asm/errno.h> 11#include <asm/ptrace.h> 12#include <asm/trapnr.h> 13#include <asm/shared/tdx.h> 14 15/* 16 * SW-defined error codes. 17 * 18 * Bits 47:40 == 0xFF indicate Reserved status code class that never used by 19 * TDX module. 20 */ 21#define TDX_ERROR _BITUL(63) 22#define TDX_NON_RECOVERABLE _BITUL(62) 23#define TDX_SW_ERROR (TDX_ERROR | GENMASK_ULL(47, 40)) 24#define TDX_SEAMCALL_VMFAILINVALID (TDX_SW_ERROR | _UL(0xFFFF0000)) 25 26#define TDX_SEAMCALL_GP (TDX_SW_ERROR | X86_TRAP_GP) 27#define TDX_SEAMCALL_UD (TDX_SW_ERROR | X86_TRAP_UD) 28 29/* 30 * TDX module SEAMCALL leaf function error codes 31 */ 32#define TDX_SUCCESS 0ULL 33#define TDX_RND_NO_ENTROPY 0x8000020300000000ULL 34 35#ifndef __ASSEMBLER__ 36 37#include <uapi/asm/mce.h> 38#include <asm/tdx_global_metadata.h> 39#include <linux/pgtable.h> 40 41/* 42 * Used by the #VE exception handler to gather the #VE exception 43 * info from the TDX module. This is a software only structure 44 * and not part of the TDX module/VMM ABI. 45 */ 46struct ve_info { 47 u64 exit_reason; 48 u64 exit_qual; 49 /* Guest Linear (virtual) Address */ 50 u64 gla; 51 /* Guest Physical Address */ 52 u64 gpa; 53 u32 instr_len; 54 u32 instr_info; 55}; 56 57#ifdef CONFIG_INTEL_TDX_GUEST 58 59void __init tdx_early_init(void); 60 61void tdx_get_ve_info(struct ve_info *ve); 62 63bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve); 64 65void tdx_halt(void); 66 67bool tdx_early_handle_ve(struct pt_regs *regs); 68 69int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport); 70 71int tdx_mcall_extend_rtmr(u8 index, u8 *data); 72 73u64 tdx_hcall_get_quote(u8 *buf, size_t size); 74 75void __init tdx_dump_attributes(u64 td_attr); 76void __init tdx_dump_td_ctls(u64 td_ctls); 77 78#else 79 80static inline void tdx_early_init(void) { }; 81static inline void tdx_halt(void) { }; 82 83static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; } 84 85#endif /* CONFIG_INTEL_TDX_GUEST */ 86 87#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_INTEL_TDX_GUEST) 88long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2, 89 unsigned long p3, unsigned long p4); 90#else 91static inline long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, 92 unsigned long p2, unsigned long p3, 93 unsigned long p4) 94{ 95 return -ENODEV; 96} 97#endif /* CONFIG_INTEL_TDX_GUEST && CONFIG_KVM_GUEST */ 98 99#ifdef CONFIG_INTEL_TDX_HOST 100u64 __seamcall(u64 fn, struct tdx_module_args *args); 101u64 __seamcall_ret(u64 fn, struct tdx_module_args *args); 102u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args); 103void tdx_init(void); 104 105#include <linux/preempt.h> 106#include <asm/archrandom.h> 107#include <asm/processor.h> 108 109typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args); 110 111static __always_inline u64 __seamcall_dirty_cache(sc_func_t func, u64 fn, 112 struct tdx_module_args *args) 113{ 114 lockdep_assert_preemption_disabled(); 115 116 /* 117 * SEAMCALLs are made to the TDX module and can generate dirty 118 * cachelines of TDX private memory. Mark cache state incoherent 119 * so that the cache can be flushed during kexec. 120 * 121 * This needs to be done before actually making the SEAMCALL, 122 * because kexec-ing CPU could send NMI to stop remote CPUs, 123 * in which case even disabling IRQ won't help here. 124 */ 125 this_cpu_write(cache_state_incoherent, true); 126 127 return func(fn, args); 128} 129 130static __always_inline u64 sc_retry(sc_func_t func, u64 fn, 131 struct tdx_module_args *args) 132{ 133 int retry = RDRAND_RETRY_LOOPS; 134 u64 ret; 135 136 do { 137 preempt_disable(); 138 ret = __seamcall_dirty_cache(func, fn, args); 139 preempt_enable(); 140 } while (ret == TDX_RND_NO_ENTROPY && --retry); 141 142 return ret; 143} 144 145#define seamcall(_fn, _args) sc_retry(__seamcall, (_fn), (_args)) 146#define seamcall_ret(_fn, _args) sc_retry(__seamcall_ret, (_fn), (_args)) 147#define seamcall_saved_ret(_fn, _args) sc_retry(__seamcall_saved_ret, (_fn), (_args)) 148const char *tdx_dump_mce_info(struct mce *m); 149const struct tdx_sys_info *tdx_get_sysinfo(void); 150 151int tdx_guest_keyid_alloc(void); 152u32 tdx_get_nr_guest_keyids(void); 153void tdx_guest_keyid_free(unsigned int keyid); 154 155void tdx_quirk_reset_page(struct page *page); 156 157struct tdx_td { 158 /* TD root structure: */ 159 struct page *tdr_page; 160 161 int tdcs_nr_pages; 162 /* TD control structure: */ 163 struct page **tdcs_pages; 164 165 /* Size of `tdcx_pages` in struct tdx_vp */ 166 int tdcx_nr_pages; 167}; 168 169struct tdx_vp { 170 /* TDVP root page */ 171 struct page *tdvpr_page; 172 /* precalculated page_to_phys(tdvpr_page) for use in noinstr code */ 173 phys_addr_t tdvpr_pa; 174 175 /* TD vCPU control structure: */ 176 struct page **tdcx_pages; 177}; 178 179static inline u64 mk_keyed_paddr(u16 hkid, struct page *page) 180{ 181 u64 ret; 182 183 ret = page_to_phys(page); 184 /* KeyID bits are just above the physical address bits: */ 185 ret |= (u64)hkid << boot_cpu_data.x86_phys_bits; 186 187 return ret; 188} 189 190static inline int pg_level_to_tdx_sept_level(enum pg_level level) 191{ 192 WARN_ON_ONCE(level == PG_LEVEL_NONE); 193 return level - 1; 194} 195 196u64 tdh_vp_enter(struct tdx_vp *vp, struct tdx_module_args *args); 197u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page); 198u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2); 199u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2); 200u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page); 201u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2); 202u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, int level, u64 *ext_err1, u64 *ext_err2); 203u64 tdh_mng_key_config(struct tdx_td *td); 204u64 tdh_mng_create(struct tdx_td *td, u16 hkid); 205u64 tdh_vp_create(struct tdx_td *td, struct tdx_vp *vp); 206u64 tdh_mng_rd(struct tdx_td *td, u64 field, u64 *data); 207u64 tdh_mr_extend(struct tdx_td *td, u64 gpa, u64 *ext_err1, u64 *ext_err2); 208u64 tdh_mr_finalize(struct tdx_td *td); 209u64 tdh_vp_flush(struct tdx_vp *vp); 210u64 tdh_mng_vpflushdone(struct tdx_td *td); 211u64 tdh_mng_key_freeid(struct tdx_td *td); 212u64 tdh_mng_init(struct tdx_td *td, u64 td_params, u64 *extended_err); 213u64 tdh_vp_init(struct tdx_vp *vp, u64 initial_rcx, u32 x2apicid); 214u64 tdh_vp_rd(struct tdx_vp *vp, u64 field, u64 *data); 215u64 tdh_vp_wr(struct tdx_vp *vp, u64 field, u64 data, u64 mask); 216u64 tdh_phymem_page_reclaim(struct page *page, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size); 217u64 tdh_mem_track(struct tdx_td *tdr); 218u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2); 219u64 tdh_phymem_cache_wb(bool resume); 220u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td); 221u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page); 222#else 223static inline void tdx_init(void) { } 224static inline u32 tdx_get_nr_guest_keyids(void) { return 0; } 225static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; } 226static inline const struct tdx_sys_info *tdx_get_sysinfo(void) { return NULL; } 227#endif /* CONFIG_INTEL_TDX_HOST */ 228 229#ifdef CONFIG_KEXEC_CORE 230void tdx_cpu_flush_cache_for_kexec(void); 231#else 232static inline void tdx_cpu_flush_cache_for_kexec(void) { } 233#endif 234 235#endif /* !__ASSEMBLER__ */ 236#endif /* _ASM_X86_TDX_H */