Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: vgic-v3: Make the userspace accessors use sysreg API

The vgic-v3 sysreg accessors have been ignored as the rest of the
sysreg internal API was evolving, and are stuck with the .access
method (which is normally reserved to the guest's own access)
for the userspace accesses (which should use the .set/.get_user()
methods).

Catch up with the program and repaint all the accessors so that
they fit into the normal userspace model, and plug the result into
the helpers that have been introduced earlier.

Reviewed-by: Reiji Watanabe <reijiw@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>

+258 -198
+258 -198
arch/arm64/kvm/vgic-sys-reg-v3.c
··· 10 10 #include "vgic/vgic.h" 11 11 #include "sys_regs.h" 12 12 13 - static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 14 - const struct sys_reg_desc *r) 13 + static int set_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 14 + u64 val) 15 15 { 16 16 u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v; 17 + struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu; 18 + struct vgic_vmcr vmcr; 19 + 20 + vgic_get_vmcr(vcpu, &vmcr); 21 + 22 + /* 23 + * Disallow restoring VM state if not supported by this 24 + * hardware. 25 + */ 26 + host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >> 27 + ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1; 28 + if (host_pri_bits > vgic_v3_cpu->num_pri_bits) 29 + return -EINVAL; 30 + 31 + vgic_v3_cpu->num_pri_bits = host_pri_bits; 32 + 33 + host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >> 34 + ICC_CTLR_EL1_ID_BITS_SHIFT; 35 + if (host_id_bits > vgic_v3_cpu->num_id_bits) 36 + return -EINVAL; 37 + 38 + vgic_v3_cpu->num_id_bits = host_id_bits; 39 + 40 + host_seis = ((kvm_vgic_global_state.ich_vtr_el2 & 41 + ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT); 42 + seis = (val & ICC_CTLR_EL1_SEIS_MASK) >> 43 + ICC_CTLR_EL1_SEIS_SHIFT; 44 + if (host_seis != seis) 45 + return -EINVAL; 46 + 47 + host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 & 48 + ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT); 49 + a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT; 50 + if (host_a3v != a3v) 51 + return -EINVAL; 52 + 53 + /* 54 + * Here set VMCR.CTLR in ICC_CTLR_EL1 layout. 55 + * The vgic_set_vmcr() will convert to ICH_VMCR layout. 56 + */ 57 + vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT; 58 + vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT; 59 + vgic_set_vmcr(vcpu, &vmcr); 60 + 61 + return 0; 62 + } 63 + 64 + static int get_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 65 + u64 *valp) 66 + { 17 67 struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu; 18 68 struct vgic_vmcr vmcr; 19 69 u64 val; 20 70 21 71 vgic_get_vmcr(vcpu, &vmcr); 22 - if (p->is_write) { 23 - val = p->regval; 72 + val = 0; 73 + val |= (vgic_v3_cpu->num_pri_bits - 1) << ICC_CTLR_EL1_PRI_BITS_SHIFT; 74 + val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT; 75 + val |= ((kvm_vgic_global_state.ich_vtr_el2 & 76 + ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) << 77 + ICC_CTLR_EL1_SEIS_SHIFT; 78 + val |= ((kvm_vgic_global_state.ich_vtr_el2 & 79 + ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) << 80 + ICC_CTLR_EL1_A3V_SHIFT; 81 + /* 82 + * The VMCR.CTLR value is in ICC_CTLR_EL1 layout. 83 + * Extract it directly using ICC_CTLR_EL1 reg definitions. 84 + */ 85 + val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK; 86 + val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; 24 87 25 - /* 26 - * Disallow restoring VM state if not supported by this 27 - * hardware. 28 - */ 29 - host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >> 30 - ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1; 31 - if (host_pri_bits > vgic_v3_cpu->num_pri_bits) 32 - return false; 88 + *valp = val; 33 89 34 - vgic_v3_cpu->num_pri_bits = host_pri_bits; 35 - 36 - host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >> 37 - ICC_CTLR_EL1_ID_BITS_SHIFT; 38 - if (host_id_bits > vgic_v3_cpu->num_id_bits) 39 - return false; 40 - 41 - vgic_v3_cpu->num_id_bits = host_id_bits; 42 - 43 - host_seis = ((kvm_vgic_global_state.ich_vtr_el2 & 44 - ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT); 45 - seis = (val & ICC_CTLR_EL1_SEIS_MASK) >> 46 - ICC_CTLR_EL1_SEIS_SHIFT; 47 - if (host_seis != seis) 48 - return false; 49 - 50 - host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 & 51 - ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT); 52 - a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT; 53 - if (host_a3v != a3v) 54 - return false; 55 - 56 - /* 57 - * Here set VMCR.CTLR in ICC_CTLR_EL1 layout. 58 - * The vgic_set_vmcr() will convert to ICH_VMCR layout. 59 - */ 60 - vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT; 61 - vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT; 62 - vgic_set_vmcr(vcpu, &vmcr); 63 - } else { 64 - val = 0; 65 - val |= (vgic_v3_cpu->num_pri_bits - 1) << 66 - ICC_CTLR_EL1_PRI_BITS_SHIFT; 67 - val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT; 68 - val |= ((kvm_vgic_global_state.ich_vtr_el2 & 69 - ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) << 70 - ICC_CTLR_EL1_SEIS_SHIFT; 71 - val |= ((kvm_vgic_global_state.ich_vtr_el2 & 72 - ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) << 73 - ICC_CTLR_EL1_A3V_SHIFT; 74 - /* 75 - * The VMCR.CTLR value is in ICC_CTLR_EL1 layout. 76 - * Extract it directly using ICC_CTLR_EL1 reg definitions. 77 - */ 78 - val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK; 79 - val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; 80 - 81 - p->regval = val; 82 - } 83 - 84 - return true; 90 + return 0; 85 91 } 86 92 87 - static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 88 - const struct sys_reg_desc *r) 93 + static int set_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 94 + u64 val) 89 95 { 90 96 struct vgic_vmcr vmcr; 91 97 92 98 vgic_get_vmcr(vcpu, &vmcr); 93 - if (p->is_write) { 94 - vmcr.pmr = (p->regval & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT; 95 - vgic_set_vmcr(vcpu, &vmcr); 96 - } else { 97 - p->regval = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK; 98 - } 99 + vmcr.pmr = (val & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT; 100 + vgic_set_vmcr(vcpu, &vmcr); 99 101 100 - return true; 102 + return 0; 101 103 } 102 104 103 - static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 104 - const struct sys_reg_desc *r) 105 + static int get_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 106 + u64 *val) 105 107 { 106 108 struct vgic_vmcr vmcr; 107 109 108 110 vgic_get_vmcr(vcpu, &vmcr); 109 - if (p->is_write) { 110 - vmcr.bpr = (p->regval & ICC_BPR0_EL1_MASK) >> 111 - ICC_BPR0_EL1_SHIFT; 112 - vgic_set_vmcr(vcpu, &vmcr); 113 - } else { 114 - p->regval = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) & 115 - ICC_BPR0_EL1_MASK; 116 - } 111 + *val = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK; 117 112 118 - return true; 113 + return 0; 119 114 } 120 115 121 - static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 122 - const struct sys_reg_desc *r) 116 + static int set_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 117 + u64 val) 123 118 { 124 119 struct vgic_vmcr vmcr; 125 120 126 - if (!p->is_write) 127 - p->regval = 0; 121 + vgic_get_vmcr(vcpu, &vmcr); 122 + vmcr.bpr = (val & ICC_BPR0_EL1_MASK) >> ICC_BPR0_EL1_SHIFT; 123 + vgic_set_vmcr(vcpu, &vmcr); 124 + 125 + return 0; 126 + } 127 + 128 + static int get_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 129 + u64 *val) 130 + { 131 + struct vgic_vmcr vmcr; 132 + 133 + vgic_get_vmcr(vcpu, &vmcr); 134 + *val = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) & ICC_BPR0_EL1_MASK; 135 + 136 + return 0; 137 + } 138 + 139 + static int set_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 140 + u64 val) 141 + { 142 + struct vgic_vmcr vmcr; 128 143 129 144 vgic_get_vmcr(vcpu, &vmcr); 130 145 if (!vmcr.cbpr) { 131 - if (p->is_write) { 132 - vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >> 133 - ICC_BPR1_EL1_SHIFT; 134 - vgic_set_vmcr(vcpu, &vmcr); 135 - } else { 136 - p->regval = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) & 137 - ICC_BPR1_EL1_MASK; 138 - } 139 - } else { 140 - if (!p->is_write) 141 - p->regval = min((vmcr.bpr + 1), 7U); 146 + vmcr.abpr = (val & ICC_BPR1_EL1_MASK) >> ICC_BPR1_EL1_SHIFT; 147 + vgic_set_vmcr(vcpu, &vmcr); 142 148 } 143 149 144 - return true; 150 + return 0; 145 151 } 146 152 147 - static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 148 - const struct sys_reg_desc *r) 153 + static int get_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 154 + u64 *val) 149 155 { 150 156 struct vgic_vmcr vmcr; 151 157 152 158 vgic_get_vmcr(vcpu, &vmcr); 153 - if (p->is_write) { 154 - vmcr.grpen0 = (p->regval & ICC_IGRPEN0_EL1_MASK) >> 155 - ICC_IGRPEN0_EL1_SHIFT; 156 - vgic_set_vmcr(vcpu, &vmcr); 157 - } else { 158 - p->regval = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) & 159 - ICC_IGRPEN0_EL1_MASK; 160 - } 159 + if (!vmcr.cbpr) 160 + *val = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) & ICC_BPR1_EL1_MASK; 161 + else 162 + *val = min((vmcr.bpr + 1), 7U); 161 163 162 - return true; 164 + 165 + return 0; 163 166 } 164 167 165 - static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 166 - const struct sys_reg_desc *r) 168 + static int set_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 169 + u64 val) 167 170 { 168 171 struct vgic_vmcr vmcr; 169 172 170 173 vgic_get_vmcr(vcpu, &vmcr); 171 - if (p->is_write) { 172 - vmcr.grpen1 = (p->regval & ICC_IGRPEN1_EL1_MASK) >> 173 - ICC_IGRPEN1_EL1_SHIFT; 174 - vgic_set_vmcr(vcpu, &vmcr); 175 - } else { 176 - p->regval = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) & 177 - ICC_IGRPEN1_EL1_MASK; 178 - } 174 + vmcr.grpen0 = (val & ICC_IGRPEN0_EL1_MASK) >> ICC_IGRPEN0_EL1_SHIFT; 175 + vgic_set_vmcr(vcpu, &vmcr); 179 176 180 - return true; 177 + return 0; 181 178 } 182 179 183 - static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu, 184 - struct sys_reg_params *p, u8 apr, u8 idx) 180 + static int get_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 181 + u64 *val) 182 + { 183 + struct vgic_vmcr vmcr; 184 + 185 + vgic_get_vmcr(vcpu, &vmcr); 186 + *val = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) & ICC_IGRPEN0_EL1_MASK; 187 + 188 + return 0; 189 + } 190 + 191 + static int set_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 192 + u64 val) 193 + { 194 + struct vgic_vmcr vmcr; 195 + 196 + vgic_get_vmcr(vcpu, &vmcr); 197 + vmcr.grpen1 = (val & ICC_IGRPEN1_EL1_MASK) >> ICC_IGRPEN1_EL1_SHIFT; 198 + vgic_set_vmcr(vcpu, &vmcr); 199 + 200 + return 0; 201 + } 202 + 203 + static int get_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 204 + u64 *val) 205 + { 206 + struct vgic_vmcr vmcr; 207 + 208 + vgic_get_vmcr(vcpu, &vmcr); 209 + *val = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) & ICC_IGRPEN1_EL1_MASK; 210 + 211 + return 0; 212 + } 213 + 214 + static void set_apr_reg(struct kvm_vcpu *vcpu, u64 val, u8 apr, u8 idx) 185 215 { 186 216 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; 187 - uint32_t *ap_reg; 188 217 189 218 if (apr) 190 - ap_reg = &vgicv3->vgic_ap1r[idx]; 219 + vgicv3->vgic_ap1r[idx] = val; 191 220 else 192 - ap_reg = &vgicv3->vgic_ap0r[idx]; 193 - 194 - if (p->is_write) 195 - *ap_reg = p->regval; 196 - else 197 - p->regval = *ap_reg; 221 + vgicv3->vgic_ap0r[idx] = val; 198 222 } 199 223 200 - static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 201 - const struct sys_reg_desc *r, u8 apr) 224 + static u64 get_apr_reg(struct kvm_vcpu *vcpu, u8 apr, u8 idx) 225 + { 226 + struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; 227 + 228 + if (apr) 229 + return vgicv3->vgic_ap1r[idx]; 230 + else 231 + return vgicv3->vgic_ap0r[idx]; 232 + } 233 + 234 + static int set_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 235 + u64 val) 236 + 202 237 { 203 238 u8 idx = r->Op2 & 3; 204 239 205 240 if (idx > vgic_v3_max_apr_idx(vcpu)) 206 - goto err; 241 + return -EINVAL; 207 242 208 - vgic_v3_access_apr_reg(vcpu, p, apr, idx); 209 - return true; 210 - err: 211 - if (!p->is_write) 212 - p->regval = 0; 213 - 214 - return false; 243 + set_apr_reg(vcpu, val, 0, idx); 244 + return 0; 215 245 } 216 246 217 - static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 218 - const struct sys_reg_desc *r) 247 + static int get_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 248 + u64 *val) 249 + { 250 + u8 idx = r->Op2 & 3; 251 + 252 + if (idx > vgic_v3_max_apr_idx(vcpu)) 253 + return -EINVAL; 254 + 255 + *val = get_apr_reg(vcpu, 0, idx); 256 + 257 + return 0; 258 + } 259 + 260 + static int set_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 261 + u64 val) 219 262 220 263 { 221 - return access_gic_aprn(vcpu, p, r, 0); 264 + u8 idx = r->Op2 & 3; 265 + 266 + if (idx > vgic_v3_max_apr_idx(vcpu)) 267 + return -EINVAL; 268 + 269 + set_apr_reg(vcpu, val, 1, idx); 270 + return 0; 222 271 } 223 272 224 - static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 225 - const struct sys_reg_desc *r) 273 + static int get_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 274 + u64 *val) 226 275 { 227 - return access_gic_aprn(vcpu, p, r, 1); 276 + u8 idx = r->Op2 & 3; 277 + 278 + if (idx > vgic_v3_max_apr_idx(vcpu)) 279 + return -EINVAL; 280 + 281 + *val = get_apr_reg(vcpu, 1, idx); 282 + 283 + return 0; 228 284 } 229 285 230 - static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 231 - const struct sys_reg_desc *r) 286 + static int set_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 287 + u64 val) 288 + { 289 + /* Validate SRE bit */ 290 + if (!(val & ICC_SRE_EL1_SRE)) 291 + return -EINVAL; 292 + 293 + return 0; 294 + } 295 + 296 + static int get_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 297 + u64 *val) 232 298 { 233 299 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; 234 300 235 - /* Validate SRE bit */ 236 - if (p->is_write) { 237 - if (!(p->regval & ICC_SRE_EL1_SRE)) 238 - return false; 239 - } else { 240 - p->regval = vgicv3->vgic_sre; 241 - } 301 + *val = vgicv3->vgic_sre; 242 302 243 - return true; 303 + return 0; 244 304 } 305 + 245 306 static const struct sys_reg_desc gic_v3_icc_reg_descs[] = { 246 - { SYS_DESC(SYS_ICC_PMR_EL1), access_gic_pmr }, 247 - { SYS_DESC(SYS_ICC_BPR0_EL1), access_gic_bpr0 }, 248 - { SYS_DESC(SYS_ICC_AP0R0_EL1), access_gic_ap0r }, 249 - { SYS_DESC(SYS_ICC_AP0R1_EL1), access_gic_ap0r }, 250 - { SYS_DESC(SYS_ICC_AP0R2_EL1), access_gic_ap0r }, 251 - { SYS_DESC(SYS_ICC_AP0R3_EL1), access_gic_ap0r }, 252 - { SYS_DESC(SYS_ICC_AP1R0_EL1), access_gic_ap1r }, 253 - { SYS_DESC(SYS_ICC_AP1R1_EL1), access_gic_ap1r }, 254 - { SYS_DESC(SYS_ICC_AP1R2_EL1), access_gic_ap1r }, 255 - { SYS_DESC(SYS_ICC_AP1R3_EL1), access_gic_ap1r }, 256 - { SYS_DESC(SYS_ICC_BPR1_EL1), access_gic_bpr1 }, 257 - { SYS_DESC(SYS_ICC_CTLR_EL1), access_gic_ctlr }, 258 - { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, 259 - { SYS_DESC(SYS_ICC_IGRPEN0_EL1), access_gic_grpen0 }, 260 - { SYS_DESC(SYS_ICC_IGRPEN1_EL1), access_gic_grpen1 }, 307 + { SYS_DESC(SYS_ICC_PMR_EL1), 308 + .set_user = set_gic_pmr, .get_user = get_gic_pmr, }, 309 + { SYS_DESC(SYS_ICC_BPR0_EL1), 310 + .set_user = set_gic_bpr0, .get_user = get_gic_bpr0, }, 311 + { SYS_DESC(SYS_ICC_AP0R0_EL1), 312 + .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, }, 313 + { SYS_DESC(SYS_ICC_AP0R1_EL1), 314 + .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, }, 315 + { SYS_DESC(SYS_ICC_AP0R2_EL1), 316 + .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, }, 317 + { SYS_DESC(SYS_ICC_AP0R3_EL1), 318 + .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, }, 319 + { SYS_DESC(SYS_ICC_AP1R0_EL1), 320 + .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, }, 321 + { SYS_DESC(SYS_ICC_AP1R1_EL1), 322 + .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, }, 323 + { SYS_DESC(SYS_ICC_AP1R2_EL1), 324 + .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, }, 325 + { SYS_DESC(SYS_ICC_AP1R3_EL1), 326 + .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, }, 327 + { SYS_DESC(SYS_ICC_BPR1_EL1), 328 + .set_user = set_gic_bpr1, .get_user = get_gic_bpr1, }, 329 + { SYS_DESC(SYS_ICC_CTLR_EL1), 330 + .set_user = set_gic_ctlr, .get_user = get_gic_ctlr, }, 331 + { SYS_DESC(SYS_ICC_SRE_EL1), 332 + .set_user = set_gic_sre, .get_user = get_gic_sre, }, 333 + { SYS_DESC(SYS_ICC_IGRPEN0_EL1), 334 + .set_user = set_gic_grpen0, .get_user = get_gic_grpen0, }, 335 + { SYS_DESC(SYS_ICC_IGRPEN1_EL1), 336 + .set_user = set_gic_grpen1, .get_user = get_gic_grpen1, }, 261 337 }; 262 338 263 339 static u64 attr_to_id(u64 attr) ··· 358 282 struct kvm_device_attr *attr, 359 283 bool is_write) 360 284 { 361 - u64 __user *uaddr = (u64 __user *)(long)attr->addr; 362 - struct sys_reg_params params; 363 - const struct sys_reg_desc *r; 364 - u64 sysreg; 285 + struct kvm_one_reg reg = { 286 + .id = attr_to_id(attr->attr), 287 + .addr = attr->addr, 288 + }; 365 289 366 - sysreg = attr_to_id(attr->attr); 367 - 368 - if (is_write) { 369 - if (get_user(params.regval, uaddr)) 370 - return -EFAULT; 371 - } 372 - params.is_write = is_write; 373 - 374 - r = find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs, 375 - ARRAY_SIZE(gic_v3_icc_reg_descs)); 376 - if (!r) 377 - return -ENXIO; 378 - 379 - if (!r->access(vcpu, &params, r)) 380 - return -EINVAL; 381 - 382 - if (!is_write) { 383 - if (put_user(params.regval, uaddr)) 384 - return -EFAULT; 385 - } 386 - 387 - return 0; 290 + if (is_write) 291 + return kvm_sys_reg_set_user(vcpu, &reg, gic_v3_icc_reg_descs, 292 + ARRAY_SIZE(gic_v3_icc_reg_descs)); 293 + else 294 + return kvm_sys_reg_get_user(vcpu, &reg, gic_v3_icc_reg_descs, 295 + ARRAY_SIZE(gic_v3_icc_reg_descs)); 388 296 }