Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: KVM: Correctly handle zero register in system register accesses

System register accesses also use zero register for Rt == 31, and
therefore using it will also result in getting SP value instead. This
patch makes them also using new accessors, introduced by the previous
patch. Since register value is no longer directly associated with storage
inside vCPU context structure, we introduce a dedicated storage for it in
struct sys_reg_params.

This refactor also gets rid of "massive hack" in kvm_handle_cp_64().

Signed-off-by: Pavel Fedin <p.fedin@samsung.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

authored by

Pavel Fedin and committed by
Marc Zyngier
2ec5be3d 3fec037d

+45 -48
+42 -45
arch/arm64/kvm/sys_regs.c
··· 97 97 struct sys_reg_params *p, 98 98 const struct sys_reg_desc *r) 99 99 { 100 - unsigned long val; 101 100 bool was_enabled = vcpu_has_cache_enabled(vcpu); 102 101 103 102 BUG_ON(!p->is_write); 104 103 105 - val = *vcpu_reg(vcpu, p->Rt); 106 104 if (!p->is_aarch32) { 107 - vcpu_sys_reg(vcpu, r->reg) = val; 105 + vcpu_sys_reg(vcpu, r->reg) = p->regval; 108 106 } else { 109 107 if (!p->is_32bit) 110 - vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; 111 - vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 108 + vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval); 109 + vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval); 112 110 } 113 111 114 112 kvm_toggle_cache(vcpu, was_enabled); ··· 123 125 struct sys_reg_params *p, 124 126 const struct sys_reg_desc *r) 125 127 { 126 - u64 val; 127 - 128 128 if (!p->is_write) 129 129 return read_from_write_only(vcpu, p); 130 130 131 - val = *vcpu_reg(vcpu, p->Rt); 132 - vgic_v3_dispatch_sgi(vcpu, val); 131 + vgic_v3_dispatch_sgi(vcpu, p->regval); 133 132 134 133 return true; 135 134 } ··· 148 153 if (p->is_write) { 149 154 return ignore_write(vcpu, p); 150 155 } else { 151 - *vcpu_reg(vcpu, p->Rt) = (1 << 3); 156 + p->regval = (1 << 3); 152 157 return true; 153 158 } 154 159 } ··· 162 167 } else { 163 168 u32 val; 164 169 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); 165 - *vcpu_reg(vcpu, p->Rt) = val; 170 + p->regval = val; 166 171 return true; 167 172 } 168 173 } ··· 199 204 const struct sys_reg_desc *r) 200 205 { 201 206 if (p->is_write) { 202 - vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 207 + vcpu_sys_reg(vcpu, r->reg) = p->regval; 203 208 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 204 209 } else { 205 - *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); 210 + p->regval = vcpu_sys_reg(vcpu, r->reg); 206 211 } 207 212 208 - trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt)); 213 + trace_trap_reg(__func__, r->reg, p->is_write, p->regval); 209 214 210 215 return true; 211 216 } ··· 223 228 struct sys_reg_params *p, 224 229 u64 *dbg_reg) 225 230 { 226 - u64 val = *vcpu_reg(vcpu, p->Rt); 231 + u64 val = p->regval; 227 232 228 233 if (p->is_32bit) { 229 234 val &= 0xffffffffUL; ··· 238 243 struct sys_reg_params *p, 239 244 u64 *dbg_reg) 240 245 { 241 - u64 val = *dbg_reg; 242 - 246 + p->regval = *dbg_reg; 243 247 if (p->is_32bit) 244 - val &= 0xffffffffUL; 245 - 246 - *vcpu_reg(vcpu, p->Rt) = val; 248 + p->regval &= 0xffffffffUL; 247 249 } 248 250 249 251 static inline bool trap_bvr(struct kvm_vcpu *vcpu, ··· 689 697 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); 690 698 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); 691 699 692 - *vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 693 - (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 694 - (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) | 695 - (6 << 16) | (el3 << 14) | (el3 << 12)); 700 + p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 701 + (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 702 + (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) 703 + | (6 << 16) | (el3 << 14) | (el3 << 12)); 696 704 return true; 697 705 } 698 706 } ··· 702 710 const struct sys_reg_desc *r) 703 711 { 704 712 if (p->is_write) { 705 - vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 713 + vcpu_cp14(vcpu, r->reg) = p->regval; 706 714 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 707 715 } else { 708 - *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); 716 + p->regval = vcpu_cp14(vcpu, r->reg); 709 717 } 710 718 711 719 return true; ··· 732 740 u64 val = *dbg_reg; 733 741 734 742 val &= 0xffffffffUL; 735 - val |= *vcpu_reg(vcpu, p->Rt) << 32; 743 + val |= p->regval << 32; 736 744 *dbg_reg = val; 737 745 738 746 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 739 747 } else { 740 - *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32; 748 + p->regval = *dbg_reg >> 32; 741 749 } 742 750 743 751 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); ··· 1054 1062 { 1055 1063 struct sys_reg_params params; 1056 1064 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1065 + int Rt = (hsr >> 5) & 0xf; 1057 1066 int Rt2 = (hsr >> 10) & 0xf; 1058 1067 1059 1068 params.is_aarch32 = true; 1060 1069 params.is_32bit = false; 1061 1070 params.CRm = (hsr >> 1) & 0xf; 1062 - params.Rt = (hsr >> 5) & 0xf; 1063 1071 params.is_write = ((hsr & 1) == 0); 1064 1072 1065 1073 params.Op0 = 0; ··· 1068 1076 params.CRn = 0; 1069 1077 1070 1078 /* 1071 - * Massive hack here. Store Rt2 in the top 32bits so we only 1072 - * have one register to deal with. As we use the same trap 1079 + * Make a 64-bit value out of Rt and Rt2. As we use the same trap 1073 1080 * backends between AArch32 and AArch64, we get away with it. 1074 1081 */ 1075 1082 if (params.is_write) { 1076 - u64 val = *vcpu_reg(vcpu, params.Rt); 1077 - val &= 0xffffffff; 1078 - val |= *vcpu_reg(vcpu, Rt2) << 32; 1079 - *vcpu_reg(vcpu, params.Rt) = val; 1083 + params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; 1084 + params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; 1080 1085 } 1081 1086 1082 1087 if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) ··· 1084 1095 unhandled_cp_access(vcpu, &params); 1085 1096 1086 1097 out: 1087 - /* Do the opposite hack for the read side */ 1098 + /* Split up the value between registers for the read side */ 1088 1099 if (!params.is_write) { 1089 - u64 val = *vcpu_reg(vcpu, params.Rt); 1090 - val >>= 32; 1091 - *vcpu_reg(vcpu, Rt2) = val; 1100 + vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); 1101 + vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); 1092 1102 } 1093 1103 1094 1104 return 1; ··· 1106 1118 { 1107 1119 struct sys_reg_params params; 1108 1120 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1121 + int Rt = (hsr >> 5) & 0xf; 1109 1122 1110 1123 params.is_aarch32 = true; 1111 1124 params.is_32bit = true; 1112 1125 params.CRm = (hsr >> 1) & 0xf; 1113 - params.Rt = (hsr >> 5) & 0xf; 1126 + params.regval = vcpu_get_reg(vcpu, Rt); 1114 1127 params.is_write = ((hsr & 1) == 0); 1115 1128 params.CRn = (hsr >> 10) & 0xf; 1116 1129 params.Op0 = 0; 1117 1130 params.Op1 = (hsr >> 14) & 0x7; 1118 1131 params.Op2 = (hsr >> 17) & 0x7; 1119 1132 1120 - if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 1133 + if (!emulate_cp(vcpu, &params, target_specific, nr_specific) || 1134 + !emulate_cp(vcpu, &params, global, nr_global)) { 1135 + if (!params.is_write) 1136 + vcpu_set_reg(vcpu, Rt, params.regval); 1121 1137 return 1; 1122 - if (!emulate_cp(vcpu, &params, global, nr_global)) 1123 - return 1; 1138 + } 1124 1139 1125 1140 unhandled_cp_access(vcpu, &params); 1126 1141 return 1; ··· 1221 1230 { 1222 1231 struct sys_reg_params params; 1223 1232 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 1233 + int Rt = (esr >> 5) & 0x1f; 1234 + int ret; 1224 1235 1225 1236 trace_kvm_handle_sys_reg(esr); 1226 1237 ··· 1233 1240 params.CRn = (esr >> 10) & 0xf; 1234 1241 params.CRm = (esr >> 1) & 0xf; 1235 1242 params.Op2 = (esr >> 17) & 0x7; 1236 - params.Rt = (esr >> 5) & 0x1f; 1243 + params.regval = vcpu_get_reg(vcpu, Rt); 1237 1244 params.is_write = !(esr & 1); 1238 1245 1239 - return emulate_sys_reg(vcpu, &params); 1246 + ret = emulate_sys_reg(vcpu, &params); 1247 + 1248 + if (!params.is_write) 1249 + vcpu_set_reg(vcpu, Rt, params.regval); 1250 + return ret; 1240 1251 } 1241 1252 1242 1253 /******************************************************************************
+2 -2
arch/arm64/kvm/sys_regs.h
··· 28 28 u8 CRn; 29 29 u8 CRm; 30 30 u8 Op2; 31 - u8 Rt; 31 + u64 regval; 32 32 bool is_write; 33 33 bool is_aarch32; 34 34 bool is_32bit; /* Only valid if is_aarch32 is true */ ··· 79 79 static inline bool read_zero(struct kvm_vcpu *vcpu, 80 80 struct sys_reg_params *p) 81 81 { 82 - *vcpu_reg(vcpu, p->Rt) = 0; 82 + p->regval = 0; 83 83 return true; 84 84 } 85 85
+1 -1
arch/arm64/kvm/sys_regs_generic_v8.c
··· 37 37 if (p->is_write) 38 38 return ignore_write(vcpu, p); 39 39 40 - *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); 40 + p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1); 41 41 return true; 42 42 } 43 43