Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s

Currently, the {read,write}_sysreg_el*() accessors for accessing
particular ELs' sysregs in the presence of VHE rely on some local
hacks and define their system register encodings in a way that is
inconsistent with the core definitions in <asm/sysreg.h>.

As a result, it is necessary to add duplicate definitions for any
system register that already needs a definition in sysreg.h for
other reasons.

This is a bit of a maintenance headache, and the reasons for the
_el*() accessors working the way they do is a bit historical.

This patch gets rid of the shadow sysreg definitions in
<asm/kvm_hyp.h>, converts the _el*() accessors to use the core
__msr_s/__mrs_s interface, and converts all call sites to use the
standard sysreg #define names (i.e., upper case, with SYS_ prefix).

This patch will conflict heavily anyway, so the opportunity
to clean up some bad whitespace in the context of the changes is
taken.

The change exposes a few system registers that have no sysreg.h
definition, due to msr_s/mrs_s being used in place of msr/mrs:
additions are made in order to fill in the gaps.

Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Link: https://www.spinics.net/lists/kvm-arm/msg31717.html
[Rebased to v4.21-rc1]
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
[Rebased to v5.2-rc5, changelog updates]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

authored by

Dave Martin and committed by
Marc Zyngier
fdec2a9e 49caebe9

+148 -156
+7 -6
arch/arm/include/asm/kvm_hyp.h
··· 82 82 #define VFP_FPEXC __ACCESS_VFP(FPEXC) 83 83 84 84 /* AArch64 compatibility macros, only for the timer so far */ 85 - #define read_sysreg_el0(r) read_sysreg(r##_el0) 86 - #define write_sysreg_el0(v, r) write_sysreg(v, r##_el0) 85 + #define read_sysreg_el0(r) read_sysreg(r##_EL0) 86 + #define write_sysreg_el0(v, r) write_sysreg(v, r##_EL0) 87 87 88 - #define cntp_ctl_el0 CNTP_CTL 89 - #define cntp_cval_el0 CNTP_CVAL 90 - #define cntv_ctl_el0 CNTV_CTL 91 - #define cntv_cval_el0 CNTV_CVAL 88 + #define SYS_CNTP_CTL_EL0 CNTP_CTL 89 + #define SYS_CNTP_CVAL_EL0 CNTP_CVAL 90 + #define SYS_CNTV_CTL_EL0 CNTV_CTL 91 + #define SYS_CNTV_CVAL_EL0 CNTV_CVAL 92 + 92 93 #define cntvoff_el2 CNTVOFF 93 94 #define cnthctl_el2 CNTHCTL 94 95
+8 -8
arch/arm64/include/asm/kvm_emulate.h
··· 126 126 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) 127 127 { 128 128 if (vcpu->arch.sysregs_loaded_on_cpu) 129 - return read_sysreg_el1(elr); 129 + return read_sysreg_el1(SYS_ELR); 130 130 else 131 131 return *__vcpu_elr_el1(vcpu); 132 132 } ··· 134 134 static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) 135 135 { 136 136 if (vcpu->arch.sysregs_loaded_on_cpu) 137 - write_sysreg_el1(v, elr); 137 + write_sysreg_el1(v, SYS_ELR); 138 138 else 139 139 *__vcpu_elr_el1(vcpu) = v; 140 140 } ··· 186 186 return vcpu_read_spsr32(vcpu); 187 187 188 188 if (vcpu->arch.sysregs_loaded_on_cpu) 189 - return read_sysreg_el1(spsr); 189 + return read_sysreg_el1(SYS_SPSR); 190 190 else 191 191 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; 192 192 } ··· 199 199 } 200 200 201 201 if (vcpu->arch.sysregs_loaded_on_cpu) 202 - write_sysreg_el1(v, spsr); 202 + write_sysreg_el1(v, SYS_SPSR); 203 203 else 204 204 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; 205 205 } ··· 465 465 */ 466 466 static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) 467 467 { 468 - *vcpu_pc(vcpu) = read_sysreg_el2(elr); 469 - vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr); 468 + *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 469 + vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); 470 470 471 471 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 472 472 473 - write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr); 474 - write_sysreg_el2(*vcpu_pc(vcpu), elr); 473 + write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR); 474 + write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); 475 475 } 476 476 477 477 #endif /* __ARM64_KVM_EMULATE_H__ */
+5 -45
arch/arm64/include/asm/kvm_hyp.h
··· 18 18 #define read_sysreg_elx(r,nvh,vh) \ 19 19 ({ \ 20 20 u64 reg; \ 21 - asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ 21 + asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \ 22 22 __mrs_s("%0", r##vh), \ 23 23 ARM64_HAS_VIRT_HOST_EXTN) \ 24 24 : "=r" (reg)); \ ··· 28 28 #define write_sysreg_elx(v,r,nvh,vh) \ 29 29 do { \ 30 30 u64 __val = (u64)(v); \ 31 - asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ 31 + asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \ 32 32 __msr_s(r##vh, "%x0"), \ 33 33 ARM64_HAS_VIRT_HOST_EXTN) \ 34 34 : : "rZ" (__val)); \ ··· 37 37 /* 38 38 * Unified accessors for registers that have a different encoding 39 39 * between VHE and non-VHE. They must be specified without their "ELx" 40 - * encoding. 40 + * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h. 41 41 */ 42 - #define read_sysreg_el2(r) \ 43 - ({ \ 44 - u64 reg; \ 45 - asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\ 46 - "mrs %0, " __stringify(r##_EL1),\ 47 - ARM64_HAS_VIRT_HOST_EXTN) \ 48 - : "=r" (reg)); \ 49 - reg; \ 50 - }) 51 - 52 - #define write_sysreg_el2(v,r) \ 53 - do { \ 54 - u64 __val = (u64)(v); \ 55 - asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\ 56 - "msr " __stringify(r##_EL1) ", %x0",\ 57 - ARM64_HAS_VIRT_HOST_EXTN) \ 58 - : : "rZ" (__val)); \ 59 - } while (0) 60 42 61 43 #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) 62 44 #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) 63 45 #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) 64 46 #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) 65 - 66 - /* The VHE specific system registers and their encoding */ 67 - #define sctlr_EL12 sys_reg(3, 5, 1, 0, 0) 68 - #define cpacr_EL12 sys_reg(3, 5, 1, 0, 2) 69 - #define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0) 70 - #define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1) 71 - #define tcr_EL12 sys_reg(3, 5, 2, 0, 2) 72 - #define afsr0_EL12 sys_reg(3, 5, 5, 1, 0) 73 - #define afsr1_EL12 sys_reg(3, 5, 5, 1, 1) 74 - #define esr_EL12 sys_reg(3, 5, 5, 2, 0) 75 - #define far_EL12 sys_reg(3, 5, 6, 0, 0) 76 - #define mair_EL12 sys_reg(3, 5, 10, 2, 0) 77 - #define amair_EL12 sys_reg(3, 5, 10, 3, 0) 78 - #define vbar_EL12 sys_reg(3, 5, 12, 0, 0) 79 - #define contextidr_EL12 sys_reg(3, 5, 13, 0, 1) 80 - #define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0) 81 - #define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0) 82 - #define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1) 83 - #define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2) 84 - #define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0) 85 - #define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1) 86 - #define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2) 87 - #define spsr_EL12 sys_reg(3, 5, 4, 0, 0) 88 - #define elr_EL12 sys_reg(3, 5, 4, 0, 1) 47 + #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) 48 + #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) 89 49 90 50 /** 91 51 * hyp_alternate_select - Generates patchable code sequences that are
+33 -2
arch/arm64/include/asm/sysreg.h
··· 191 191 #define SYS_APGAKEYLO_EL1 sys_reg(3, 0, 2, 3, 0) 192 192 #define SYS_APGAKEYHI_EL1 sys_reg(3, 0, 2, 3, 1) 193 193 194 + #define SYS_SPSR_EL1 sys_reg(3, 0, 4, 0, 0) 195 + #define SYS_ELR_EL1 sys_reg(3, 0, 4, 0, 1) 196 + 194 197 #define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) 195 198 196 199 #define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0) ··· 385 382 #define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1) 386 383 #define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2) 387 384 385 + #define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1) 386 + #define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2) 387 + 388 388 #define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0) 389 389 #define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1) 390 390 #define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0) ··· 398 392 #define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3)) 399 393 #define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n)) 400 394 401 - #define SYS_PMCCFILTR_EL0 sys_reg (3, 3, 14, 15, 7) 395 + #define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7) 402 396 403 397 #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) 404 - 405 398 #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) 399 + #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) 400 + #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) 406 401 #define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) 402 + #define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0) 407 403 #define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3) 408 404 #define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0) 405 + #define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0) 409 406 410 407 #define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1) 411 408 #define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) ··· 453 444 #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) 454 445 455 446 /* VHE encodings for architectural EL0/1 system registers */ 447 + #define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) 448 + #define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) 456 449 #define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) 450 + #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) 451 + #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) 452 + #define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) 453 + #define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0) 454 + #define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1) 455 + #define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0) 456 + #define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) 457 + #define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) 458 + #define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) 459 + #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) 460 + #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) 461 + #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) 462 + #define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1) 463 + #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) 464 + #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) 465 + #define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1) 466 + #define SYS_CNTP_CVAL_EL02 sys_reg(3, 5, 14, 2, 2) 467 + #define SYS_CNTV_TVAL_EL02 sys_reg(3, 5, 14, 3, 0) 468 + #define SYS_CNTV_CTL_EL02 sys_reg(3, 5, 14, 3, 1) 469 + #define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2) 457 470 458 471 /* Common SCTLR_ELx flags. */ 459 472 #define SCTLR_ELx_DSSBS (_BITUL(44))
+7 -7
arch/arm64/kvm/hyp/switch.c
··· 284 284 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) 285 285 return true; 286 286 287 - far = read_sysreg_el2(far); 287 + far = read_sysreg_el2(SYS_FAR); 288 288 289 289 /* 290 290 * The HPFAR can be invalid if the stage 2 fault did not ··· 401 401 static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) 402 402 { 403 403 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) 404 - vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr); 404 + vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); 405 405 406 406 /* 407 407 * We're using the raw exception code in order to only process ··· 697 697 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); 698 698 699 699 __hyp_do_panic(str_va, 700 - spsr, elr, 701 - read_sysreg(esr_el2), read_sysreg_el2(far), 700 + spsr, elr, 701 + read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR), 702 702 read_sysreg(hpfar_el2), par, vcpu); 703 703 } 704 704 ··· 713 713 714 714 panic(__hyp_panic_string, 715 715 spsr, elr, 716 - read_sysreg_el2(esr), read_sysreg_el2(far), 716 + read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR), 717 717 read_sysreg(hpfar_el2), par, vcpu); 718 718 } 719 719 NOKPROBE_SYMBOL(__hyp_call_panic_vhe); 720 720 721 721 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) 722 722 { 723 - u64 spsr = read_sysreg_el2(spsr); 724 - u64 elr = read_sysreg_el2(elr); 723 + u64 spsr = read_sysreg_el2(SYS_SPSR); 724 + u64 elr = read_sysreg_el2(SYS_ELR); 725 725 u64 par = read_sysreg(par_el1); 726 726 727 727 if (!has_vhe())
+39 -39
arch/arm64/kvm/hyp/sysreg-sr.c
··· 43 43 static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) 44 44 { 45 45 ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); 46 - ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr); 46 + ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR); 47 47 ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); 48 - ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(cpacr); 49 - ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(ttbr0); 50 - ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(ttbr1); 51 - ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(tcr); 52 - ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(esr); 53 - ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(afsr0); 54 - ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(afsr1); 55 - ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(far); 56 - ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(mair); 57 - ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(vbar); 58 - ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(contextidr); 59 - ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair); 60 - ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl); 48 + ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR); 49 + ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0); 50 + ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1); 51 + ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(SYS_TCR); 52 + ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(SYS_ESR); 53 + ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(SYS_AFSR0); 54 + ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(SYS_AFSR1); 55 + ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(SYS_FAR); 56 + ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(SYS_MAIR); 57 + ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(SYS_VBAR); 58 + ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(SYS_CONTEXTIDR); 59 + ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(SYS_AMAIR); 60 + ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(SYS_CNTKCTL); 61 61 ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); 62 62 ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); 63 63 64 64 ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); 65 - ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr); 66 - ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr); 65 + ctxt->gp_regs.elr_el1 = read_sysreg_el1(SYS_ELR); 66 + ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR); 67 67 } 68 68 69 69 static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) 70 70 { 71 - ctxt->gp_regs.regs.pc = read_sysreg_el2(elr); 72 - ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr); 71 + ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR); 72 + ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); 73 73 74 74 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) 75 75 ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2); ··· 109 109 110 110 static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) 111 111 { 112 - write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); 113 - write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); 112 + write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); 113 + write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); 114 114 } 115 115 116 116 static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) 117 117 { 118 118 write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); 119 119 write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); 120 - write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], sctlr); 121 - write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); 122 - write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], cpacr); 123 - write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], ttbr0); 124 - write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], ttbr1); 125 - write_sysreg_el1(ctxt->sys_regs[TCR_EL1], tcr); 126 - write_sysreg_el1(ctxt->sys_regs[ESR_EL1], esr); 127 - write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], afsr0); 128 - write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], afsr1); 129 - write_sysreg_el1(ctxt->sys_regs[FAR_EL1], far); 130 - write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], mair); 131 - write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], vbar); 132 - write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr); 133 - write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair); 134 - write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl); 120 + write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); 121 + write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); 122 + write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR); 123 + write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0); 124 + write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1); 125 + write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); 126 + write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR); 127 + write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0); 128 + write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1); 129 + write_sysreg_el1(ctxt->sys_regs[FAR_EL1], SYS_FAR); 130 + write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], SYS_MAIR); 131 + write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], SYS_VBAR); 132 + write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR); 133 + write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], SYS_AMAIR); 134 + write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], SYS_CNTKCTL); 135 135 write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); 136 136 write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); 137 137 138 138 write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); 139 - write_sysreg_el1(ctxt->gp_regs.elr_el1, elr); 140 - write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr); 139 + write_sysreg_el1(ctxt->gp_regs.elr_el1, SYS_ELR); 140 + write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR); 141 141 } 142 142 143 143 static void __hyp_text ··· 160 160 if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t) 161 161 pstate = PSR_MODE_EL2h | PSR_IL_BIT; 162 162 163 - write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); 164 - write_sysreg_el2(pstate, spsr); 163 + write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR); 164 + write_sysreg_el2(pstate, SYS_SPSR); 165 165 166 166 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) 167 167 write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
+6 -6
arch/arm64/kvm/hyp/tlb.c
··· 33 33 * in the TCR_EL1 register. We also need to prevent it to 34 34 * allocate IPA->PA walks, so we enable the S1 MMU... 35 35 */ 36 - val = cxt->tcr = read_sysreg_el1(tcr); 36 + val = cxt->tcr = read_sysreg_el1(SYS_TCR); 37 37 val |= TCR_EPD1_MASK | TCR_EPD0_MASK; 38 - write_sysreg_el1(val, tcr); 39 - val = cxt->sctlr = read_sysreg_el1(sctlr); 38 + write_sysreg_el1(val, SYS_TCR); 39 + val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR); 40 40 val |= SCTLR_ELx_M; 41 - write_sysreg_el1(val, sctlr); 41 + write_sysreg_el1(val, SYS_SCTLR); 42 42 } 43 43 44 44 /* ··· 85 85 86 86 if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) { 87 87 /* Restore the registers to what they were */ 88 - write_sysreg_el1(cxt->tcr, tcr); 89 - write_sysreg_el1(cxt->sctlr, sctlr); 88 + write_sysreg_el1(cxt->tcr, SYS_TCR); 89 + write_sysreg_el1(cxt->sctlr, SYS_SCTLR); 90 90 } 91 91 92 92 local_irq_restore(cxt->flags);
+1 -1
arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
··· 16 16 static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) 17 17 { 18 18 if (vcpu_mode_is_32bit(vcpu)) 19 - return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT); 19 + return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT); 20 20 21 21 return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE); 22 22 }
+2 -2
arch/arm64/kvm/regmap.c
··· 152 152 153 153 switch (spsr_idx) { 154 154 case KVM_SPSR_SVC: 155 - return read_sysreg_el1(spsr); 155 + return read_sysreg_el1(SYS_SPSR); 156 156 case KVM_SPSR_ABT: 157 157 return read_sysreg(spsr_abt); 158 158 case KVM_SPSR_UND: ··· 177 177 178 178 switch (spsr_idx) { 179 179 case KVM_SPSR_SVC: 180 - write_sysreg_el1(v, spsr); 180 + write_sysreg_el1(v, SYS_SPSR); 181 181 case KVM_SPSR_ABT: 182 182 write_sysreg(v, spsr_abt); 183 183 case KVM_SPSR_UND:
+28 -28
arch/arm64/kvm/sys_regs.c
··· 81 81 */ 82 82 switch (reg) { 83 83 case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1); 84 - case SCTLR_EL1: return read_sysreg_s(sctlr_EL12); 84 + case SCTLR_EL1: return read_sysreg_s(SYS_SCTLR_EL12); 85 85 case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1); 86 - case CPACR_EL1: return read_sysreg_s(cpacr_EL12); 87 - case TTBR0_EL1: return read_sysreg_s(ttbr0_EL12); 88 - case TTBR1_EL1: return read_sysreg_s(ttbr1_EL12); 89 - case TCR_EL1: return read_sysreg_s(tcr_EL12); 90 - case ESR_EL1: return read_sysreg_s(esr_EL12); 91 - case AFSR0_EL1: return read_sysreg_s(afsr0_EL12); 92 - case AFSR1_EL1: return read_sysreg_s(afsr1_EL12); 93 - case FAR_EL1: return read_sysreg_s(far_EL12); 94 - case MAIR_EL1: return read_sysreg_s(mair_EL12); 95 - case VBAR_EL1: return read_sysreg_s(vbar_EL12); 96 - case CONTEXTIDR_EL1: return read_sysreg_s(contextidr_EL12); 86 + case CPACR_EL1: return read_sysreg_s(SYS_CPACR_EL12); 87 + case TTBR0_EL1: return read_sysreg_s(SYS_TTBR0_EL12); 88 + case TTBR1_EL1: return read_sysreg_s(SYS_TTBR1_EL12); 89 + case TCR_EL1: return read_sysreg_s(SYS_TCR_EL12); 90 + case ESR_EL1: return read_sysreg_s(SYS_ESR_EL12); 91 + case AFSR0_EL1: return read_sysreg_s(SYS_AFSR0_EL12); 92 + case AFSR1_EL1: return read_sysreg_s(SYS_AFSR1_EL12); 93 + case FAR_EL1: return read_sysreg_s(SYS_FAR_EL12); 94 + case MAIR_EL1: return read_sysreg_s(SYS_MAIR_EL12); 95 + case VBAR_EL1: return read_sysreg_s(SYS_VBAR_EL12); 96 + case CONTEXTIDR_EL1: return read_sysreg_s(SYS_CONTEXTIDR_EL12); 97 97 case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0); 98 98 case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0); 99 99 case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1); 100 - case AMAIR_EL1: return read_sysreg_s(amair_EL12); 101 - case CNTKCTL_EL1: return read_sysreg_s(cntkctl_EL12); 100 + case AMAIR_EL1: return read_sysreg_s(SYS_AMAIR_EL12); 101 + case CNTKCTL_EL1: return read_sysreg_s(SYS_CNTKCTL_EL12); 102 102 case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1); 103 103 case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2); 104 104 case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2); ··· 124 124 */ 125 125 switch (reg) { 126 126 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return; 127 - case SCTLR_EL1: write_sysreg_s(val, sctlr_EL12); return; 127 + case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); return; 128 128 case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return; 129 - case CPACR_EL1: write_sysreg_s(val, cpacr_EL12); return; 130 - case TTBR0_EL1: write_sysreg_s(val, ttbr0_EL12); return; 131 - case TTBR1_EL1: write_sysreg_s(val, ttbr1_EL12); return; 132 - case TCR_EL1: write_sysreg_s(val, tcr_EL12); return; 133 - case ESR_EL1: write_sysreg_s(val, esr_EL12); return; 134 - case AFSR0_EL1: write_sysreg_s(val, afsr0_EL12); return; 135 - case AFSR1_EL1: write_sysreg_s(val, afsr1_EL12); return; 136 - case FAR_EL1: write_sysreg_s(val, far_EL12); return; 137 - case MAIR_EL1: write_sysreg_s(val, mair_EL12); return; 138 - case VBAR_EL1: write_sysreg_s(val, vbar_EL12); return; 139 - case CONTEXTIDR_EL1: write_sysreg_s(val, contextidr_EL12); return; 129 + case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); return; 130 + case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); return; 131 + case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); return; 132 + case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); return; 133 + case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); return; 134 + case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); return; 135 + case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); return; 136 + case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); return; 137 + case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); return; 138 + case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); return; 139 + case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12); return; 140 140 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return; 141 141 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return; 142 142 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return; 143 - case AMAIR_EL1: write_sysreg_s(val, amair_EL12); return; 144 - case CNTKCTL_EL1: write_sysreg_s(val, cntkctl_EL12); return; 143 + case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); return; 144 + case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); return; 145 145 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return; 146 146 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return; 147 147 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
+12 -12
virt/kvm/arm/arch_timer.c
··· 237 237 238 238 switch (index) { 239 239 case TIMER_VTIMER: 240 - cnt_ctl = read_sysreg_el0(cntv_ctl); 240 + cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL); 241 241 break; 242 242 case TIMER_PTIMER: 243 - cnt_ctl = read_sysreg_el0(cntp_ctl); 243 + cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL); 244 244 break; 245 245 case NR_KVM_TIMERS: 246 246 /* GCC is braindead */ ··· 350 350 351 351 switch (index) { 352 352 case TIMER_VTIMER: 353 - ctx->cnt_ctl = read_sysreg_el0(cntv_ctl); 354 - ctx->cnt_cval = read_sysreg_el0(cntv_cval); 353 + ctx->cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL); 354 + ctx->cnt_cval = read_sysreg_el0(SYS_CNTV_CVAL); 355 355 356 356 /* Disable the timer */ 357 - write_sysreg_el0(0, cntv_ctl); 357 + write_sysreg_el0(0, SYS_CNTV_CTL); 358 358 isb(); 359 359 360 360 break; 361 361 case TIMER_PTIMER: 362 - ctx->cnt_ctl = read_sysreg_el0(cntp_ctl); 363 - ctx->cnt_cval = read_sysreg_el0(cntp_cval); 362 + ctx->cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL); 363 + ctx->cnt_cval = read_sysreg_el0(SYS_CNTP_CVAL); 364 364 365 365 /* Disable the timer */ 366 - write_sysreg_el0(0, cntp_ctl); 366 + write_sysreg_el0(0, SYS_CNTP_CTL); 367 367 isb(); 368 368 369 369 break; ··· 429 429 430 430 switch (index) { 431 431 case TIMER_VTIMER: 432 - write_sysreg_el0(ctx->cnt_cval, cntv_cval); 432 + write_sysreg_el0(ctx->cnt_cval, SYS_CNTV_CVAL); 433 433 isb(); 434 - write_sysreg_el0(ctx->cnt_ctl, cntv_ctl); 434 + write_sysreg_el0(ctx->cnt_ctl, SYS_CNTV_CTL); 435 435 break; 436 436 case TIMER_PTIMER: 437 - write_sysreg_el0(ctx->cnt_cval, cntp_cval); 437 + write_sysreg_el0(ctx->cnt_cval, SYS_CNTP_CVAL); 438 438 isb(); 439 - write_sysreg_el0(ctx->cnt_ctl, cntp_ctl); 439 + write_sysreg_el0(ctx->cnt_ctl, SYS_CNTP_CTL); 440 440 break; 441 441 case NR_KVM_TIMERS: 442 442 BUG();