Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Add RMW specific sysreg accessor

In a number of cases, we perform a Read-Modify-Write operation on
a system register, meaning that we would apply the RESx masks twice.

Instead, provide a new accessor that performs this RMW operation,
allowing the masks to be applied exactly once per operation.

Reviewed-by: Miguel Luis <miguel.luis@oracle.com>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250603070824.1192795-3-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>

+32 -21
+11
arch/arm64/include/asm/kvm_host.h
··· 1118 1118 ctxt_sys_reg(ctxt, (r)) = __v; \ 1119 1119 } while (0) 1120 1120 1121 + #define __vcpu_rmw_sys_reg(v, r, op, val) \ 1122 + do { \ 1123 + const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ 1124 + u64 __v = ctxt_sys_reg(ctxt, (r)); \ 1125 + __v op (val); \ 1126 + if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ 1127 + __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \ 1128 + \ 1129 + ctxt_sys_reg(ctxt, (r)) = __v; \ 1130 + } while (0) 1131 + 1121 1132 #define __vcpu_sys_reg(v,r) \ 1122 1133 (*({ \ 1123 1134 const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
+2 -2
arch/arm64/kvm/debug.c
··· 216 216 void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val) 217 217 { 218 218 if (val & OSLAR_EL1_OSLK) 219 - __vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK; 219 + __vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK); 220 220 else 221 - __vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK; 221 + __vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK); 222 222 223 223 preempt_disable(); 224 224 kvm_arch_vcpu_put(vcpu);
+2 -2
arch/arm64/kvm/hyp/vhe/sysreg-sr.c
··· 70 70 */ 71 71 val = read_sysreg_el1(SYS_CNTKCTL); 72 72 val &= CNTKCTL_VALID_BITS; 73 - __vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS; 74 - __vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val; 73 + __vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS); 74 + __vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val); 75 75 } 76 76 77 77 __vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));
+1 -1
arch/arm64/kvm/nested.c
··· 1757 1757 1758 1758 out: 1759 1759 for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++) 1760 - (void)__vcpu_sys_reg(vcpu, sr); 1760 + __vcpu_rmw_sys_reg(vcpu, sr, |=, 0); 1761 1761 1762 1762 return 0; 1763 1763 }
+5 -5
arch/arm64/kvm/pmu-emul.c
··· 510 510 continue; 511 511 512 512 /* Mark overflow */ 513 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); 513 + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i)); 514 514 515 515 if (kvm_pmu_counter_can_chain(pmc)) 516 516 kvm_pmu_counter_increment(vcpu, BIT(i + 1), ··· 556 556 perf_event->attr.sample_period = period; 557 557 perf_event->hw.sample_period = period; 558 558 559 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); 559 + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx)); 560 560 561 561 if (kvm_pmu_counter_can_chain(pmc)) 562 562 kvm_pmu_counter_increment(vcpu, BIT(idx + 1), ··· 914 914 { 915 915 u64 mask = kvm_pmu_implemented_counter_mask(vcpu); 916 916 917 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask; 918 - __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask; 919 - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask; 917 + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask); 918 + __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask); 919 + __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask); 920 920 921 921 kvm_pmu_reprogram_counter_mask(vcpu, mask); 922 922 }
+11 -11
arch/arm64/kvm/sys_regs.c
··· 791 791 mask |= GENMASK(n - 1, 0); 792 792 793 793 reset_unknown(vcpu, r); 794 - __vcpu_sys_reg(vcpu, r->reg) &= mask; 794 + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask); 795 795 796 796 return __vcpu_sys_reg(vcpu, r->reg); 797 797 } ··· 799 799 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 800 800 { 801 801 reset_unknown(vcpu, r); 802 - __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0); 802 + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0)); 803 803 804 804 return __vcpu_sys_reg(vcpu, r->reg); 805 805 } ··· 811 811 return 0; 812 812 813 813 reset_unknown(vcpu, r); 814 - __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); 814 + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm)); 815 815 816 816 return __vcpu_sys_reg(vcpu, r->reg); 817 817 } ··· 819 819 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 820 820 { 821 821 reset_unknown(vcpu, r); 822 - __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK; 822 + __vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK); 823 823 824 824 return __vcpu_sys_reg(vcpu, r->reg); 825 825 } ··· 1103 1103 val = p->regval & mask; 1104 1104 if (r->Op2 & 0x1) 1105 1105 /* accessing PMCNTENSET_EL0 */ 1106 - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; 1106 + __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val); 1107 1107 else 1108 1108 /* accessing PMCNTENCLR_EL0 */ 1109 - __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; 1109 + __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val); 1110 1110 1111 1111 kvm_pmu_reprogram_counter_mask(vcpu, val); 1112 1112 } else { ··· 1129 1129 1130 1130 if (r->Op2 & 0x1) 1131 1131 /* accessing PMINTENSET_EL1 */ 1132 - __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; 1132 + __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val); 1133 1133 else 1134 1134 /* accessing PMINTENCLR_EL1 */ 1135 - __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; 1135 + __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val); 1136 1136 } else { 1137 1137 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1); 1138 1138 } ··· 1151 1151 if (p->is_write) { 1152 1152 if (r->CRm & 0x2) 1153 1153 /* accessing PMOVSSET_EL0 */ 1154 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); 1154 + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask)); 1155 1155 else 1156 1156 /* accessing PMOVSCLR_EL0 */ 1157 - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); 1157 + __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask)); 1158 1158 } else { 1159 1159 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); 1160 1160 } ··· 4786 4786 r->reset(vcpu, r); 4787 4787 4788 4788 if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS) 4789 - (void)__vcpu_sys_reg(vcpu, r->reg); 4789 + __vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0); 4790 4790 } 4791 4791 4792 4792 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);