Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.11-rc6 774 lines 24 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/include/asm/kvm_host.h: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11#ifndef __ARM64_KVM_HOST_H__ 12#define __ARM64_KVM_HOST_H__ 13 14#include <linux/arm-smccc.h> 15#include <linux/bitmap.h> 16#include <linux/types.h> 17#include <linux/jump_label.h> 18#include <linux/kvm_types.h> 19#include <linux/percpu.h> 20#include <linux/psci.h> 21#include <asm/arch_gicv3.h> 22#include <asm/barrier.h> 23#include <asm/cpufeature.h> 24#include <asm/cputype.h> 25#include <asm/daifflags.h> 26#include <asm/fpsimd.h> 27#include <asm/kvm.h> 28#include <asm/kvm_asm.h> 29#include <asm/thread_info.h> 30 31#define __KVM_HAVE_ARCH_INTC_INITIALIZED 32 33#define KVM_USER_MEM_SLOTS 512 34#define KVM_HALT_POLL_NS_DEFAULT 500000 35 36#include <kvm/arm_vgic.h> 37#include <kvm/arm_arch_timer.h> 38#include <kvm/arm_pmu.h> 39 40#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS 41 42#define KVM_VCPU_MAX_FEATURES 7 43 44#define KVM_REQ_SLEEP \ 45 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 46#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) 47#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) 48#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) 49#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) 50 51#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ 52 KVM_DIRTY_LOG_INITIALLY_SET) 53 54/* 55 * Mode of operation configurable with kvm-arm.mode early param. 56 * See Documentation/admin-guide/kernel-parameters.txt for more information. 57 */ 58enum kvm_mode { 59 KVM_MODE_DEFAULT, 60 KVM_MODE_PROTECTED, 61}; 62enum kvm_mode kvm_get_mode(void); 63 64DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 65 66extern unsigned int kvm_sve_max_vl; 67int kvm_arm_init_sve(void); 68 69int __attribute_const__ kvm_target_cpu(void); 70int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 71void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); 72 73struct kvm_vmid { 74 /* The VMID generation used for the virt. memory system */ 75 u64 vmid_gen; 76 u32 vmid; 77}; 78 79struct kvm_s2_mmu { 80 struct kvm_vmid vmid; 81 82 /* 83 * stage2 entry level table 84 * 85 * Two kvm_s2_mmu structures in the same VM can point to the same 86 * pgd here. This happens when running a guest using a 87 * translation regime that isn't affected by its own stage-2 88 * translation, such as a non-VHE hypervisor running at vEL2, or 89 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the 90 * canonical stage-2 page tables. 91 */ 92 phys_addr_t pgd_phys; 93 struct kvm_pgtable *pgt; 94 95 /* The last vcpu id that ran on each physical CPU */ 96 int __percpu *last_vcpu_ran; 97 98 struct kvm *kvm; 99}; 100 101struct kvm_arch_memory_slot { 102}; 103 104struct kvm_arch { 105 struct kvm_s2_mmu mmu; 106 107 /* VTCR_EL2 value for this VM */ 108 u64 vtcr; 109 110 /* The maximum number of vCPUs depends on the used GIC model */ 111 int max_vcpus; 112 113 /* Interrupt controller */ 114 struct vgic_dist vgic; 115 116 /* Mandated version of PSCI */ 117 u32 psci_version; 118 119 /* 120 * If we encounter a data abort without valid instruction syndrome 121 * information, report this to user space. User space can (and 122 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is 123 * supported. 124 */ 125 bool return_nisv_io_abort_to_user; 126 127 /* 128 * VM-wide PMU filter, implemented as a bitmap and big enough for 129 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). 130 */ 131 unsigned long *pmu_filter; 132 unsigned int pmuver; 133 134 u8 pfr0_csv2; 135 u8 pfr0_csv3; 136}; 137 138struct kvm_vcpu_fault_info { 139 u32 esr_el2; /* Hyp Syndrom Register */ 140 u64 far_el2; /* Hyp Fault Address Register */ 141 u64 hpfar_el2; /* Hyp IPA Fault Address Register */ 142 u64 disr_el1; /* Deferred [SError] Status Register */ 143}; 144 145enum vcpu_sysreg { 146 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ 147 MPIDR_EL1, /* MultiProcessor Affinity Register */ 148 CSSELR_EL1, /* Cache Size Selection Register */ 149 SCTLR_EL1, /* System Control Register */ 150 ACTLR_EL1, /* Auxiliary Control Register */ 151 CPACR_EL1, /* Coprocessor Access Control */ 152 ZCR_EL1, /* SVE Control */ 153 TTBR0_EL1, /* Translation Table Base Register 0 */ 154 TTBR1_EL1, /* Translation Table Base Register 1 */ 155 TCR_EL1, /* Translation Control Register */ 156 ESR_EL1, /* Exception Syndrome Register */ 157 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */ 158 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */ 159 FAR_EL1, /* Fault Address Register */ 160 MAIR_EL1, /* Memory Attribute Indirection Register */ 161 VBAR_EL1, /* Vector Base Address Register */ 162 CONTEXTIDR_EL1, /* Context ID Register */ 163 TPIDR_EL0, /* Thread ID, User R/W */ 164 TPIDRRO_EL0, /* Thread ID, User R/O */ 165 TPIDR_EL1, /* Thread ID, Privileged */ 166 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */ 167 CNTKCTL_EL1, /* Timer Control Register (EL1) */ 168 PAR_EL1, /* Physical Address Register */ 169 MDSCR_EL1, /* Monitor Debug System Control Register */ 170 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ 171 DISR_EL1, /* Deferred Interrupt Status Register */ 172 173 /* Performance Monitors Registers */ 174 PMCR_EL0, /* Control Register */ 175 PMSELR_EL0, /* Event Counter Selection Register */ 176 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ 177 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, 178 PMCCNTR_EL0, /* Cycle Counter Register */ 179 PMEVTYPER0_EL0, /* Event Type Register (0-30) */ 180 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, 181 PMCCFILTR_EL0, /* Cycle Count Filter Register */ 182 PMCNTENSET_EL0, /* Count Enable Set Register */ 183 PMINTENSET_EL1, /* Interrupt Enable Set Register */ 184 PMOVSSET_EL0, /* Overflow Flag Status Set Register */ 185 PMSWINC_EL0, /* Software Increment Register */ 186 PMUSERENR_EL0, /* User Enable Register */ 187 188 /* Pointer Authentication Registers in a strict increasing order. */ 189 APIAKEYLO_EL1, 190 APIAKEYHI_EL1, 191 APIBKEYLO_EL1, 192 APIBKEYHI_EL1, 193 APDAKEYLO_EL1, 194 APDAKEYHI_EL1, 195 APDBKEYLO_EL1, 196 APDBKEYHI_EL1, 197 APGAKEYLO_EL1, 198 APGAKEYHI_EL1, 199 200 ELR_EL1, 201 SP_EL1, 202 SPSR_EL1, 203 204 CNTVOFF_EL2, 205 CNTV_CVAL_EL0, 206 CNTV_CTL_EL0, 207 CNTP_CVAL_EL0, 208 CNTP_CTL_EL0, 209 210 /* 32bit specific registers. Keep them at the end of the range */ 211 DACR32_EL2, /* Domain Access Control Register */ 212 IFSR32_EL2, /* Instruction Fault Status Register */ 213 FPEXC32_EL2, /* Floating-Point Exception Control Register */ 214 DBGVCR32_EL2, /* Debug Vector Catch Register */ 215 216 NR_SYS_REGS /* Nothing after this line! */ 217}; 218 219struct kvm_cpu_context { 220 struct user_pt_regs regs; /* sp = sp_el0 */ 221 222 u64 spsr_abt; 223 u64 spsr_und; 224 u64 spsr_irq; 225 u64 spsr_fiq; 226 227 struct user_fpsimd_state fp_regs; 228 229 u64 sys_regs[NR_SYS_REGS]; 230 231 struct kvm_vcpu *__hyp_running_vcpu; 232}; 233 234struct kvm_pmu_events { 235 u32 events_host; 236 u32 events_guest; 237}; 238 239struct kvm_host_data { 240 struct kvm_cpu_context host_ctxt; 241 struct kvm_pmu_events pmu_events; 242}; 243 244struct kvm_host_psci_config { 245 /* PSCI version used by host. */ 246 u32 version; 247 248 /* Function IDs used by host if version is v0.1. */ 249 struct psci_0_1_function_ids function_ids_0_1; 250 251 bool psci_0_1_cpu_suspend_implemented; 252 bool psci_0_1_cpu_on_implemented; 253 bool psci_0_1_cpu_off_implemented; 254 bool psci_0_1_migrate_implemented; 255}; 256 257extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config); 258#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config) 259 260extern s64 kvm_nvhe_sym(hyp_physvirt_offset); 261#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset) 262 263extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS]; 264#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map) 265 266struct vcpu_reset_state { 267 unsigned long pc; 268 unsigned long r0; 269 bool be; 270 bool reset; 271}; 272 273struct kvm_vcpu_arch { 274 struct kvm_cpu_context ctxt; 275 void *sve_state; 276 unsigned int sve_max_vl; 277 278 /* Stage 2 paging state used by the hardware on next switch */ 279 struct kvm_s2_mmu *hw_mmu; 280 281 /* HYP configuration */ 282 u64 hcr_el2; 283 u32 mdcr_el2; 284 285 /* Exception Information */ 286 struct kvm_vcpu_fault_info fault; 287 288 /* State of various workarounds, see kvm_asm.h for bit assignment */ 289 u64 workaround_flags; 290 291 /* Miscellaneous vcpu state flags */ 292 u64 flags; 293 294 /* 295 * We maintain more than a single set of debug registers to support 296 * debugging the guest from the host and to maintain separate host and 297 * guest state during world switches. vcpu_debug_state are the debug 298 * registers of the vcpu as the guest sees them. host_debug_state are 299 * the host registers which are saved and restored during 300 * world switches. external_debug_state contains the debug 301 * values we want to debug the guest. This is set via the 302 * KVM_SET_GUEST_DEBUG ioctl. 303 * 304 * debug_ptr points to the set of debug registers that should be loaded 305 * onto the hardware when running the guest. 306 */ 307 struct kvm_guest_debug_arch *debug_ptr; 308 struct kvm_guest_debug_arch vcpu_debug_state; 309 struct kvm_guest_debug_arch external_debug_state; 310 311 struct thread_info *host_thread_info; /* hyp VA */ 312 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ 313 314 struct { 315 /* {Break,watch}point registers */ 316 struct kvm_guest_debug_arch regs; 317 /* Statistical profiling extension */ 318 u64 pmscr_el1; 319 } host_debug_state; 320 321 /* VGIC state */ 322 struct vgic_cpu vgic_cpu; 323 struct arch_timer_cpu timer_cpu; 324 struct kvm_pmu pmu; 325 326 /* 327 * Anything that is not used directly from assembly code goes 328 * here. 329 */ 330 331 /* 332 * Guest registers we preserve during guest debugging. 333 * 334 * These shadow registers are updated by the kvm_handle_sys_reg 335 * trap handler if the guest accesses or updates them while we 336 * are using guest debug. 337 */ 338 struct { 339 u32 mdscr_el1; 340 } guest_debug_preserved; 341 342 /* vcpu power-off state */ 343 bool power_off; 344 345 /* Don't run the guest (internal implementation need) */ 346 bool pause; 347 348 /* Cache some mmu pages needed inside spinlock regions */ 349 struct kvm_mmu_memory_cache mmu_page_cache; 350 351 /* Target CPU and feature flags */ 352 int target; 353 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 354 355 /* Detect first run of a vcpu */ 356 bool has_run_once; 357 358 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ 359 u64 vsesr_el2; 360 361 /* Additional reset state */ 362 struct vcpu_reset_state reset_state; 363 364 /* True when deferrable sysregs are loaded on the physical CPU, 365 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */ 366 bool sysregs_loaded_on_cpu; 367 368 /* Guest PV state */ 369 struct { 370 u64 last_steal; 371 gpa_t base; 372 } steal; 373}; 374 375/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ 376#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \ 377 sve_ffr_offset((vcpu)->arch.sve_max_vl))) 378 379#define vcpu_sve_state_size(vcpu) ({ \ 380 size_t __size_ret; \ 381 unsigned int __vcpu_vq; \ 382 \ 383 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ 384 __size_ret = 0; \ 385 } else { \ 386 __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \ 387 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ 388 } \ 389 \ 390 __size_ret; \ 391}) 392 393/* vcpu_arch flags field values: */ 394#define KVM_ARM64_DEBUG_DIRTY (1 << 0) 395#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ 396#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ 397#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ 398#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ 399#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ 400#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ 401#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ 402#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ 403#define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ 404 405/* 406 * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can 407 * take the following values: 408 * 409 * For AArch32 EL1: 410 */ 411#define KVM_ARM64_EXCEPT_AA32_UND (0 << 9) 412#define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9) 413#define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9) 414/* For AArch64: */ 415#define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9) 416#define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9) 417#define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9) 418#define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9) 419#define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) 420#define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) 421 422/* 423 * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be 424 * set together with an exception... 425 */ 426#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ 427 428#define vcpu_has_sve(vcpu) (system_supports_sve() && \ 429 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) 430 431#ifdef CONFIG_ARM64_PTR_AUTH 432#define vcpu_has_ptrauth(vcpu) \ 433 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 434 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 435 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) 436#else 437#define vcpu_has_ptrauth(vcpu) false 438#endif 439 440#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) 441 442/* 443 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the 444 * memory backed version of a register, and not the one most recently 445 * accessed by a running VCPU. For example, for userspace access or 446 * for system registers that are never context switched, but only 447 * emulated. 448 */ 449#define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)]) 450 451#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) 452 453#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) 454 455u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); 456void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); 457 458static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) 459{ 460 /* 461 * *** VHE ONLY *** 462 * 463 * System registers listed in the switch are not saved on every 464 * exit from the guest but are only saved on vcpu_put. 465 * 466 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 467 * should never be listed below, because the guest cannot modify its 468 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's 469 * thread when emulating cross-VCPU communication. 470 */ 471 if (!has_vhe()) 472 return false; 473 474 switch (reg) { 475 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break; 476 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; 477 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; 478 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; 479 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; 480 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break; 481 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break; 482 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break; 483 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break; 484 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break; 485 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break; 486 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break; 487 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break; 488 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break; 489 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break; 490 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break; 491 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break; 492 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break; 493 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break; 494 case PAR_EL1: *val = read_sysreg_par(); break; 495 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break; 496 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break; 497 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break; 498 default: return false; 499 } 500 501 return true; 502} 503 504static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) 505{ 506 /* 507 * *** VHE ONLY *** 508 * 509 * System registers listed in the switch are not restored on every 510 * entry to the guest but are only restored on vcpu_load. 511 * 512 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but 513 * should never be listed below, because the MPIDR should only be set 514 * once, before running the VCPU, and never changed later. 515 */ 516 if (!has_vhe()) 517 return false; 518 519 switch (reg) { 520 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break; 521 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; 522 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; 523 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; 524 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; 525 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break; 526 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break; 527 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break; 528 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break; 529 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break; 530 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break; 531 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break; 532 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break; 533 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break; 534 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break; 535 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; 536 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; 537 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; 538 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break; 539 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; 540 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; 541 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; 542 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break; 543 default: return false; 544 } 545 546 return true; 547} 548 549struct kvm_vm_stat { 550 ulong remote_tlb_flush; 551}; 552 553struct kvm_vcpu_stat { 554 u64 halt_successful_poll; 555 u64 halt_attempted_poll; 556 u64 halt_poll_success_ns; 557 u64 halt_poll_fail_ns; 558 u64 halt_poll_invalid; 559 u64 halt_wakeup; 560 u64 hvc_exit_stat; 561 u64 wfe_exit_stat; 562 u64 wfi_exit_stat; 563 u64 mmio_exit_user; 564 u64 mmio_exit_kernel; 565 u64 exits; 566}; 567 568int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); 569unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 570int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 571int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 572int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 573 574unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); 575int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 576int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 577int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 578 579int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 580 struct kvm_vcpu_events *events); 581 582int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 583 struct kvm_vcpu_events *events); 584 585#define KVM_ARCH_WANT_MMU_NOTIFIER 586int kvm_unmap_hva_range(struct kvm *kvm, 587 unsigned long start, unsigned long end, unsigned flags); 588int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 589int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 590int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 591 592void kvm_arm_halt_guest(struct kvm *kvm); 593void kvm_arm_resume_guest(struct kvm *kvm); 594 595#define kvm_call_hyp_nvhe(f, ...) \ 596 ({ \ 597 struct arm_smccc_res res; \ 598 \ 599 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ 600 ##__VA_ARGS__, &res); \ 601 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ 602 \ 603 res.a1; \ 604 }) 605 606/* 607 * The couple of isb() below are there to guarantee the same behaviour 608 * on VHE as on !VHE, where the eret to EL1 acts as a context 609 * synchronization event. 610 */ 611#define kvm_call_hyp(f, ...) \ 612 do { \ 613 if (has_vhe()) { \ 614 f(__VA_ARGS__); \ 615 isb(); \ 616 } else { \ 617 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 618 } \ 619 } while(0) 620 621#define kvm_call_hyp_ret(f, ...) \ 622 ({ \ 623 typeof(f(__VA_ARGS__)) ret; \ 624 \ 625 if (has_vhe()) { \ 626 ret = f(__VA_ARGS__); \ 627 isb(); \ 628 } else { \ 629 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ 630 } \ 631 \ 632 ret; \ 633 }) 634 635void force_vm_exit(const cpumask_t *mask); 636void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); 637 638int handle_exit(struct kvm_vcpu *vcpu, int exception_index); 639void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); 640 641int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); 642int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); 643int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); 644int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); 645int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); 646int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); 647 648void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); 649 650void kvm_sys_reg_table_init(void); 651 652/* MMIO helpers */ 653void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); 654unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); 655 656int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); 657int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); 658 659int kvm_perf_init(void); 660int kvm_perf_teardown(void); 661 662long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); 663gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); 664void kvm_update_stolen_time(struct kvm_vcpu *vcpu); 665 666bool kvm_arm_pvtime_supported(void); 667int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, 668 struct kvm_device_attr *attr); 669int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, 670 struct kvm_device_attr *attr); 671int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, 672 struct kvm_device_attr *attr); 673 674static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) 675{ 676 vcpu_arch->steal.base = GPA_INVALID; 677} 678 679static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) 680{ 681 return (vcpu_arch->steal.base != GPA_INVALID); 682} 683 684void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); 685 686struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 687 688DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); 689 690static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) 691{ 692 /* The host's MPIDR is immutable, so let's set it up at boot time */ 693 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); 694} 695 696static inline bool kvm_arch_requires_vhe(void) 697{ 698 /* 699 * The Arm architecture specifies that implementation of SVE 700 * requires VHE also to be implemented. The KVM code for arm64 701 * relies on this when SVE is present: 702 */ 703 if (system_supports_sve()) 704 return true; 705 706 return false; 707} 708 709void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); 710 711static inline void kvm_arch_hardware_unsetup(void) {} 712static inline void kvm_arch_sync_events(struct kvm *kvm) {} 713static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 714static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 715 716void kvm_arm_init_debug(void); 717void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 718void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 719void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 720int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 721 struct kvm_device_attr *attr); 722int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 723 struct kvm_device_attr *attr); 724int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 725 struct kvm_device_attr *attr); 726 727/* Guest/host FPSIMD coordination helpers */ 728int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); 729void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); 730void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); 731void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); 732 733static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) 734{ 735 return (!has_vhe() && attr->exclude_host); 736} 737 738#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ 739static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) 740{ 741 return kvm_arch_vcpu_run_map_fp(vcpu); 742} 743 744void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); 745void kvm_clr_pmu_events(u32 clr); 746 747void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); 748void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); 749#else 750static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} 751static inline void kvm_clr_pmu_events(u32 clr) {} 752#endif 753 754void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); 755void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); 756 757int kvm_set_ipa_limit(void); 758 759#define __KVM_HAVE_ARCH_VM_ALLOC 760struct kvm *kvm_arch_alloc_vm(void); 761void kvm_arch_free_vm(struct kvm *kvm); 762 763int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); 764 765int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); 766bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); 767 768#define kvm_arm_vcpu_sve_finalized(vcpu) \ 769 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) 770 771#define kvm_vcpu_has_pmu(vcpu) \ 772 (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) 773 774#endif /* __ARM64_KVM_HOST_H__ */