Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Move vcpu PC/Exception flags to the input flag set

The PC update flags (which also deal with exception injection)
is one of the most complicated use of the flag we have. Make it
more fool prof by:

- moving it over to the new accessors and assign it to the
input flag set

- turn the combination of generic ELx flags with another flag
indicating the target EL itself into an explicit set of
flags for each EL and vector combination

- add a new accessor to pend the exception

This is otherwise a pretty straightformward conversion.

Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Reiji Watanabe <reijiw@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>

+61 -54
+8 -1
arch/arm64/include/asm/kvm_emulate.h
··· 473 473 474 474 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) 475 475 { 476 - vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC; 476 + vcpu_set_flag(vcpu, INCREMENT_PC); 477 477 } 478 + 479 + #define kvm_pend_exception(v, e) \ 480 + do { \ 481 + vcpu_set_flag((v), PENDING_EXCEPTION); \ 482 + vcpu_set_flag((v), e); \ 483 + } while (0) 484 + 478 485 479 486 static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature) 480 487 {
+34 -24
arch/arm64/include/asm/kvm_host.h
··· 474 474 /* PTRAUTH exposed to guest */ 475 475 #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2)) 476 476 477 + /* Exception pending */ 478 + #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0)) 479 + /* 480 + * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't 481 + * be set together with an exception... 482 + */ 483 + #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1)) 484 + /* Target EL/MODE (not a single flag, but let's abuse the macro) */ 485 + #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1)) 486 + 487 + /* Helpers to encode exceptions with minimum fuss */ 488 + #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK) 489 + #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL) 490 + #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL 491 + 492 + /* 493 + * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following 494 + * values: 495 + * 496 + * For AArch32 EL1: 497 + */ 498 + #define EXCEPT_AA32_UND __vcpu_except_flags(0) 499 + #define EXCEPT_AA32_IABT __vcpu_except_flags(1) 500 + #define EXCEPT_AA32_DABT __vcpu_except_flags(2) 501 + /* For AArch64: */ 502 + #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0) 503 + #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1) 504 + #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2) 505 + #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3) 506 + /* For AArch64 with NV (one day): */ 507 + #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4) 508 + #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5) 509 + #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6) 510 + #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7) 477 511 478 512 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ 479 513 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ ··· 532 498 /* vcpu_arch flags field values: */ 533 499 #define KVM_ARM64_DEBUG_DIRTY (1 << 0) 534 500 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ 535 - #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ 536 - /* 537 - * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be 538 - * set together with an exception... 539 - */ 540 - #define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ 541 - #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ 542 - /* 543 - * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can 544 - * take the following values: 545 - * 546 - * For AArch32 EL1: 547 - */ 548 - #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9) 549 - #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9) 550 - #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9) 551 - /* For AArch64: */ 552 - #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9) 553 - #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9) 554 - #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9) 555 - #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9) 556 - #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) 557 - #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) 558 - 559 501 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ 560 502 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ 561 503 #define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
+2 -2
arch/arm64/kvm/arm.c
··· 1013 1013 * the vcpu state. Note that this relies on __kvm_adjust_pc() 1014 1014 * being preempt-safe on VHE. 1015 1015 */ 1016 - if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION | 1017 - KVM_ARM64_INCREMENT_PC))) 1016 + if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) || 1017 + vcpu_get_flag(vcpu, INCREMENT_PC))) 1018 1018 kvm_call_hyp(__kvm_adjust_pc, vcpu); 1019 1019 1020 1020 vcpu_put(vcpu);
+11 -12
arch/arm64/kvm/hyp/exception.c
··· 303 303 static void kvm_inject_exception(struct kvm_vcpu *vcpu) 304 304 { 305 305 if (vcpu_el1_is_32bit(vcpu)) { 306 - switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) { 307 - case KVM_ARM64_EXCEPT_AA32_UND: 306 + switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) { 307 + case unpack_vcpu_flag(EXCEPT_AA32_UND): 308 308 enter_exception32(vcpu, PSR_AA32_MODE_UND, 4); 309 309 break; 310 - case KVM_ARM64_EXCEPT_AA32_IABT: 310 + case unpack_vcpu_flag(EXCEPT_AA32_IABT): 311 311 enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12); 312 312 break; 313 - case KVM_ARM64_EXCEPT_AA32_DABT: 313 + case unpack_vcpu_flag(EXCEPT_AA32_DABT): 314 314 enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16); 315 315 break; 316 316 default: ··· 318 318 break; 319 319 } 320 320 } else { 321 - switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) { 322 - case (KVM_ARM64_EXCEPT_AA64_ELx_SYNC | 323 - KVM_ARM64_EXCEPT_AA64_EL1): 321 + switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) { 322 + case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC): 324 323 enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync); 325 324 break; 326 325 default: ··· 339 340 */ 340 341 void __kvm_adjust_pc(struct kvm_vcpu *vcpu) 341 342 { 342 - if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) { 343 + if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) { 343 344 kvm_inject_exception(vcpu); 344 - vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION | 345 - KVM_ARM64_EXCEPT_MASK); 346 - } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) { 345 + vcpu_clear_flag(vcpu, PENDING_EXCEPTION); 346 + vcpu_clear_flag(vcpu, EXCEPT_MASK); 347 + } else if (vcpu_get_flag(vcpu, INCREMENT_PC)) { 347 348 kvm_skip_instr(vcpu); 348 - vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC; 349 + vcpu_clear_flag(vcpu, INCREMENT_PC); 349 350 } 350 351 }
+1 -3
arch/arm64/kvm/hyp/nvhe/sys_regs.c
··· 38 38 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 39 39 *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR); 40 40 41 - vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | 42 - KVM_ARM64_EXCEPT_AA64_ELx_SYNC | 43 - KVM_ARM64_PENDING_EXCEPTION); 41 + kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); 44 42 45 43 __kvm_adjust_pc(vcpu); 46 44
+5 -12
arch/arm64/kvm/inject_fault.c
··· 20 20 bool is_aarch32 = vcpu_mode_is_32bit(vcpu); 21 21 u64 esr = 0; 22 22 23 - vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | 24 - KVM_ARM64_EXCEPT_AA64_ELx_SYNC | 25 - KVM_ARM64_PENDING_EXCEPTION); 23 + kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); 26 24 27 25 vcpu_write_sys_reg(vcpu, addr, FAR_EL1); 28 26 ··· 50 52 { 51 53 u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); 52 54 53 - vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | 54 - KVM_ARM64_EXCEPT_AA64_ELx_SYNC | 55 - KVM_ARM64_PENDING_EXCEPTION); 55 + kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); 56 56 57 57 /* 58 58 * Build an unknown exception, depending on the instruction ··· 69 73 70 74 static void inject_undef32(struct kvm_vcpu *vcpu) 71 75 { 72 - vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_UND | 73 - KVM_ARM64_PENDING_EXCEPTION); 76 + kvm_pend_exception(vcpu, EXCEPT_AA32_UND); 74 77 } 75 78 76 79 /* ··· 92 97 far = vcpu_read_sys_reg(vcpu, FAR_EL1); 93 98 94 99 if (is_pabt) { 95 - vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IABT | 96 - KVM_ARM64_PENDING_EXCEPTION); 100 + kvm_pend_exception(vcpu, EXCEPT_AA32_IABT); 97 101 far &= GENMASK(31, 0); 98 102 far |= (u64)addr << 32; 99 103 vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2); 100 104 } else { /* !iabt */ 101 - vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_DABT | 102 - KVM_ARM64_PENDING_EXCEPTION); 105 + kvm_pend_exception(vcpu, EXCEPT_AA32_DABT); 103 106 far &= GENMASK(63, 32); 104 107 far |= addr; 105 108 vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);