Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.0 688 lines 18 kB view raw
1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License version 2 as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program. If not, see <http://www.gnu.org/licenses/>. 13 */ 14 15#include <linux/irqchip/arm-gic-v3.h> 16#include <linux/kvm.h> 17#include <linux/kvm_host.h> 18#include <kvm/arm_vgic.h> 19#include <asm/kvm_hyp.h> 20#include <asm/kvm_mmu.h> 21#include <asm/kvm_asm.h> 22 23#include "vgic.h" 24 25static bool group0_trap; 26static bool group1_trap; 27static bool common_trap; 28static bool gicv4_enable; 29 30void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) 31{ 32 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; 33 34 cpuif->vgic_hcr |= ICH_HCR_UIE; 35} 36 37static bool lr_signals_eoi_mi(u64 lr_val) 38{ 39 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) && 40 !(lr_val & ICH_LR_HW); 41} 42 43void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) 44{ 45 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 46 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; 47 u32 model = vcpu->kvm->arch.vgic.vgic_model; 48 int lr; 49 50 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 51 52 cpuif->vgic_hcr &= ~ICH_HCR_UIE; 53 54 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { 55 u64 val = cpuif->vgic_lr[lr]; 56 u32 intid, cpuid; 57 struct vgic_irq *irq; 58 bool is_v2_sgi = false; 59 60 cpuid = val & GICH_LR_PHYSID_CPUID; 61 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; 62 63 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) { 64 intid = val & ICH_LR_VIRTUAL_ID_MASK; 65 } else { 66 intid = val & GICH_LR_VIRTUALID; 67 is_v2_sgi = vgic_irq_is_sgi(intid); 68 } 69 70 /* Notify fds when the guest EOI'ed a level-triggered IRQ */ 71 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) 72 kvm_notify_acked_irq(vcpu->kvm, 0, 73 intid - VGIC_NR_PRIVATE_IRQS); 74 75 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 76 if (!irq) /* An LPI could have been unmapped. */ 77 continue; 78 79 raw_spin_lock(&irq->irq_lock); 80 81 /* Always preserve the active bit */ 82 irq->active = !!(val & ICH_LR_ACTIVE_BIT); 83 84 if (irq->active && is_v2_sgi) 85 irq->active_source = cpuid; 86 87 /* Edge is the only case where we preserve the pending bit */ 88 if (irq->config == VGIC_CONFIG_EDGE && 89 (val & ICH_LR_PENDING_BIT)) { 90 irq->pending_latch = true; 91 92 if (is_v2_sgi) 93 irq->source |= (1 << cpuid); 94 } 95 96 /* 97 * Clear soft pending state when level irqs have been acked. 98 */ 99 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE)) 100 irq->pending_latch = false; 101 102 /* 103 * Level-triggered mapped IRQs are special because we only 104 * observe rising edges as input to the VGIC. 105 * 106 * If the guest never acked the interrupt we have to sample 107 * the physical line and set the line level, because the 108 * device state could have changed or we simply need to 109 * process the still pending interrupt later. 110 * 111 * If this causes us to lower the level, we have to also clear 112 * the physical active state, since we will otherwise never be 113 * told when the interrupt becomes asserted again. 114 */ 115 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) { 116 irq->line_level = vgic_get_phys_line_level(irq); 117 118 if (!irq->line_level) 119 vgic_irq_set_phys_active(irq, false); 120 } 121 122 raw_spin_unlock(&irq->irq_lock); 123 vgic_put_irq(vcpu->kvm, irq); 124 } 125 126 vgic_cpu->used_lrs = 0; 127} 128 129/* Requires the irq to be locked already */ 130void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) 131{ 132 u32 model = vcpu->kvm->arch.vgic.vgic_model; 133 u64 val = irq->intid; 134 bool allow_pending = true, is_v2_sgi; 135 136 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) && 137 model == KVM_DEV_TYPE_ARM_VGIC_V2); 138 139 if (irq->active) { 140 val |= ICH_LR_ACTIVE_BIT; 141 if (is_v2_sgi) 142 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT; 143 if (vgic_irq_is_multi_sgi(irq)) { 144 allow_pending = false; 145 val |= ICH_LR_EOI; 146 } 147 } 148 149 if (irq->hw) { 150 val |= ICH_LR_HW; 151 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; 152 /* 153 * Never set pending+active on a HW interrupt, as the 154 * pending state is kept at the physical distributor 155 * level. 156 */ 157 if (irq->active) 158 allow_pending = false; 159 } else { 160 if (irq->config == VGIC_CONFIG_LEVEL) { 161 val |= ICH_LR_EOI; 162 163 /* 164 * Software resampling doesn't work very well 165 * if we allow P+A, so let's not do that. 166 */ 167 if (irq->active) 168 allow_pending = false; 169 } 170 } 171 172 if (allow_pending && irq_is_pending(irq)) { 173 val |= ICH_LR_PENDING_BIT; 174 175 if (irq->config == VGIC_CONFIG_EDGE) 176 irq->pending_latch = false; 177 178 if (vgic_irq_is_sgi(irq->intid) && 179 model == KVM_DEV_TYPE_ARM_VGIC_V2) { 180 u32 src = ffs(irq->source); 181 182 BUG_ON(!src); 183 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 184 irq->source &= ~(1 << (src - 1)); 185 if (irq->source) { 186 irq->pending_latch = true; 187 val |= ICH_LR_EOI; 188 } 189 } 190 } 191 192 /* 193 * Level-triggered mapped IRQs are special because we only observe 194 * rising edges as input to the VGIC. We therefore lower the line 195 * level here, so that we can take new virtual IRQs. See 196 * vgic_v3_fold_lr_state for more info. 197 */ 198 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) 199 irq->line_level = false; 200 201 if (irq->group) 202 val |= ICH_LR_GROUP; 203 204 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; 205 206 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val; 207} 208 209void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) 210{ 211 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0; 212} 213 214void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 215{ 216 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 217 u32 model = vcpu->kvm->arch.vgic.vgic_model; 218 u32 vmcr; 219 220 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { 221 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) & 222 ICH_VMCR_ACK_CTL_MASK; 223 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) & 224 ICH_VMCR_FIQ_EN_MASK; 225 } else { 226 /* 227 * When emulating GICv3 on GICv3 with SRE=1 on the 228 * VFIQEn bit is RES1 and the VAckCtl bit is RES0. 229 */ 230 vmcr = ICH_VMCR_FIQ_EN_MASK; 231 } 232 233 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; 234 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; 235 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; 236 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; 237 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; 238 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK; 239 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK; 240 241 cpu_if->vgic_vmcr = vmcr; 242} 243 244void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 245{ 246 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 247 u32 model = vcpu->kvm->arch.vgic.vgic_model; 248 u32 vmcr; 249 250 vmcr = cpu_if->vgic_vmcr; 251 252 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { 253 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >> 254 ICH_VMCR_ACK_CTL_SHIFT; 255 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >> 256 ICH_VMCR_FIQ_EN_SHIFT; 257 } else { 258 /* 259 * When emulating GICv3 on GICv3 with SRE=1 on the 260 * VFIQEn bit is RES1 and the VAckCtl bit is RES0. 261 */ 262 vmcrp->fiqen = 1; 263 vmcrp->ackctl = 0; 264 } 265 266 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; 267 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT; 268 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 269 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 270 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; 271 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT; 272 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT; 273} 274 275#define INITIAL_PENDBASER_VALUE \ 276 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \ 277 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \ 278 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable)) 279 280void vgic_v3_enable(struct kvm_vcpu *vcpu) 281{ 282 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; 283 284 /* 285 * By forcing VMCR to zero, the GIC will restore the binary 286 * points to their reset values. Anything else resets to zero 287 * anyway. 288 */ 289 vgic_v3->vgic_vmcr = 0; 290 291 /* 292 * If we are emulating a GICv3, we do it in an non-GICv2-compatible 293 * way, so we force SRE to 1 to demonstrate this to the guest. 294 * Also, we don't support any form of IRQ/FIQ bypass. 295 * This goes with the spec allowing the value to be RAO/WI. 296 */ 297 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 298 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB | 299 ICC_SRE_EL1_DFB | 300 ICC_SRE_EL1_SRE); 301 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; 302 } else { 303 vgic_v3->vgic_sre = 0; 304 } 305 306 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 & 307 ICH_VTR_ID_BITS_MASK) >> 308 ICH_VTR_ID_BITS_SHIFT; 309 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 & 310 ICH_VTR_PRI_BITS_MASK) >> 311 ICH_VTR_PRI_BITS_SHIFT) + 1; 312 313 /* Get the show on the road... */ 314 vgic_v3->vgic_hcr = ICH_HCR_EN; 315 if (group0_trap) 316 vgic_v3->vgic_hcr |= ICH_HCR_TALL0; 317 if (group1_trap) 318 vgic_v3->vgic_hcr |= ICH_HCR_TALL1; 319 if (common_trap) 320 vgic_v3->vgic_hcr |= ICH_HCR_TC; 321} 322 323int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) 324{ 325 struct kvm_vcpu *vcpu; 326 int byte_offset, bit_nr; 327 gpa_t pendbase, ptr; 328 bool status; 329 u8 val; 330 int ret; 331 unsigned long flags; 332 333retry: 334 vcpu = irq->target_vcpu; 335 if (!vcpu) 336 return 0; 337 338 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); 339 340 byte_offset = irq->intid / BITS_PER_BYTE; 341 bit_nr = irq->intid % BITS_PER_BYTE; 342 ptr = pendbase + byte_offset; 343 344 ret = kvm_read_guest_lock(kvm, ptr, &val, 1); 345 if (ret) 346 return ret; 347 348 status = val & (1 << bit_nr); 349 350 raw_spin_lock_irqsave(&irq->irq_lock, flags); 351 if (irq->target_vcpu != vcpu) { 352 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 353 goto retry; 354 } 355 irq->pending_latch = status; 356 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 357 358 if (status) { 359 /* clear consumed data */ 360 val &= ~(1 << bit_nr); 361 ret = kvm_write_guest(kvm, ptr, &val, 1); 362 if (ret) 363 return ret; 364 } 365 return 0; 366} 367 368/** 369 * vgic_its_save_pending_tables - Save the pending tables into guest RAM 370 * kvm lock and all vcpu lock must be held 371 */ 372int vgic_v3_save_pending_tables(struct kvm *kvm) 373{ 374 struct vgic_dist *dist = &kvm->arch.vgic; 375 int last_byte_offset = -1; 376 struct vgic_irq *irq; 377 int ret; 378 u8 val; 379 380 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 381 int byte_offset, bit_nr; 382 struct kvm_vcpu *vcpu; 383 gpa_t pendbase, ptr; 384 bool stored; 385 386 vcpu = irq->target_vcpu; 387 if (!vcpu) 388 continue; 389 390 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); 391 392 byte_offset = irq->intid / BITS_PER_BYTE; 393 bit_nr = irq->intid % BITS_PER_BYTE; 394 ptr = pendbase + byte_offset; 395 396 if (byte_offset != last_byte_offset) { 397 ret = kvm_read_guest_lock(kvm, ptr, &val, 1); 398 if (ret) 399 return ret; 400 last_byte_offset = byte_offset; 401 } 402 403 stored = val & (1U << bit_nr); 404 if (stored == irq->pending_latch) 405 continue; 406 407 if (irq->pending_latch) 408 val |= 1 << bit_nr; 409 else 410 val &= ~(1 << bit_nr); 411 412 ret = kvm_write_guest(kvm, ptr, &val, 1); 413 if (ret) 414 return ret; 415 } 416 return 0; 417} 418 419/** 420 * vgic_v3_rdist_overlap - check if a region overlaps with any 421 * existing redistributor region 422 * 423 * @kvm: kvm handle 424 * @base: base of the region 425 * @size: size of region 426 * 427 * Return: true if there is an overlap 428 */ 429bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size) 430{ 431 struct vgic_dist *d = &kvm->arch.vgic; 432 struct vgic_redist_region *rdreg; 433 434 list_for_each_entry(rdreg, &d->rd_regions, list) { 435 if ((base + size > rdreg->base) && 436 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg))) 437 return true; 438 } 439 return false; 440} 441 442/* 443 * Check for overlapping regions and for regions crossing the end of memory 444 * for base addresses which have already been set. 445 */ 446bool vgic_v3_check_base(struct kvm *kvm) 447{ 448 struct vgic_dist *d = &kvm->arch.vgic; 449 struct vgic_redist_region *rdreg; 450 451 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) && 452 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base) 453 return false; 454 455 list_for_each_entry(rdreg, &d->rd_regions, list) { 456 if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) < 457 rdreg->base) 458 return false; 459 } 460 461 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base)) 462 return true; 463 464 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base, 465 KVM_VGIC_V3_DIST_SIZE); 466} 467 468/** 469 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one 470 * which has free space to put a new rdist region. 471 * 472 * @rd_regions: redistributor region list head 473 * 474 * A redistributor regions maps n redistributors, n = region size / (2 x 64kB). 475 * Stride between redistributors is 0 and regions are filled in the index order. 476 * 477 * Return: the redist region handle, if any, that has space to map a new rdist 478 * region. 479 */ 480struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions) 481{ 482 struct vgic_redist_region *rdreg; 483 484 list_for_each_entry(rdreg, rd_regions, list) { 485 if (!vgic_v3_redist_region_full(rdreg)) 486 return rdreg; 487 } 488 return NULL; 489} 490 491struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm, 492 u32 index) 493{ 494 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions; 495 struct vgic_redist_region *rdreg; 496 497 list_for_each_entry(rdreg, rd_regions, list) { 498 if (rdreg->index == index) 499 return rdreg; 500 } 501 return NULL; 502} 503 504 505int vgic_v3_map_resources(struct kvm *kvm) 506{ 507 struct vgic_dist *dist = &kvm->arch.vgic; 508 struct kvm_vcpu *vcpu; 509 int ret = 0; 510 int c; 511 512 if (vgic_ready(kvm)) 513 goto out; 514 515 kvm_for_each_vcpu(c, vcpu, kvm) { 516 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 517 518 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) { 519 kvm_debug("vcpu %d redistributor base not set\n", c); 520 ret = -ENXIO; 521 goto out; 522 } 523 } 524 525 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) { 526 kvm_err("Need to set vgic distributor addresses first\n"); 527 ret = -ENXIO; 528 goto out; 529 } 530 531 if (!vgic_v3_check_base(kvm)) { 532 kvm_err("VGIC redist and dist frames overlap\n"); 533 ret = -EINVAL; 534 goto out; 535 } 536 537 /* 538 * For a VGICv3 we require the userland to explicitly initialize 539 * the VGIC before we need to use it. 540 */ 541 if (!vgic_initialized(kvm)) { 542 ret = -EBUSY; 543 goto out; 544 } 545 546 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); 547 if (ret) { 548 kvm_err("Unable to register VGICv3 dist MMIO regions\n"); 549 goto out; 550 } 551 552 dist->ready = true; 553 554out: 555 return ret; 556} 557 558DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap); 559 560static int __init early_group0_trap_cfg(char *buf) 561{ 562 return strtobool(buf, &group0_trap); 563} 564early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg); 565 566static int __init early_group1_trap_cfg(char *buf) 567{ 568 return strtobool(buf, &group1_trap); 569} 570early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg); 571 572static int __init early_common_trap_cfg(char *buf) 573{ 574 return strtobool(buf, &common_trap); 575} 576early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg); 577 578static int __init early_gicv4_enable(char *buf) 579{ 580 return strtobool(buf, &gicv4_enable); 581} 582early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable); 583 584/** 585 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT 586 * @node: pointer to the DT node 587 * 588 * Returns 0 if a GICv3 has been found, returns an error code otherwise 589 */ 590int vgic_v3_probe(const struct gic_kvm_info *info) 591{ 592 u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2); 593 int ret; 594 595 /* 596 * The ListRegs field is 5 bits, but there is a architectural 597 * maximum of 16 list registers. Just ignore bit 4... 598 */ 599 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1; 600 kvm_vgic_global_state.can_emulate_gicv2 = false; 601 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2; 602 603 /* GICv4 support? */ 604 if (info->has_v4) { 605 kvm_vgic_global_state.has_gicv4 = gicv4_enable; 606 kvm_info("GICv4 support %sabled\n", 607 gicv4_enable ? "en" : "dis"); 608 } 609 610 if (!info->vcpu.start) { 611 kvm_info("GICv3: no GICV resource entry\n"); 612 kvm_vgic_global_state.vcpu_base = 0; 613 } else if (!PAGE_ALIGNED(info->vcpu.start)) { 614 pr_warn("GICV physical address 0x%llx not page aligned\n", 615 (unsigned long long)info->vcpu.start); 616 kvm_vgic_global_state.vcpu_base = 0; 617 } else { 618 kvm_vgic_global_state.vcpu_base = info->vcpu.start; 619 kvm_vgic_global_state.can_emulate_gicv2 = true; 620 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); 621 if (ret) { 622 kvm_err("Cannot register GICv2 KVM device.\n"); 623 return ret; 624 } 625 kvm_info("vgic-v2@%llx\n", info->vcpu.start); 626 } 627 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3); 628 if (ret) { 629 kvm_err("Cannot register GICv3 KVM device.\n"); 630 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2); 631 return ret; 632 } 633 634 if (kvm_vgic_global_state.vcpu_base == 0) 635 kvm_info("disabling GICv2 emulation\n"); 636 637#ifdef CONFIG_ARM64 638 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) { 639 group0_trap = true; 640 group1_trap = true; 641 } 642#endif 643 644 if (group0_trap || group1_trap || common_trap) { 645 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n", 646 group0_trap ? "G0" : "", 647 group1_trap ? "G1" : "", 648 common_trap ? "C" : ""); 649 static_branch_enable(&vgic_v3_cpuif_trap); 650 } 651 652 kvm_vgic_global_state.vctrl_base = NULL; 653 kvm_vgic_global_state.type = VGIC_V3; 654 kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS; 655 656 return 0; 657} 658 659void vgic_v3_load(struct kvm_vcpu *vcpu) 660{ 661 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 662 663 /* 664 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen 665 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the 666 * VMCR_EL2 save/restore in the world switch. 667 */ 668 if (likely(cpu_if->vgic_sre)) 669 kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr); 670 671 kvm_call_hyp(__vgic_v3_restore_aprs, vcpu); 672 673 if (has_vhe()) 674 __vgic_v3_activate_traps(vcpu); 675} 676 677void vgic_v3_put(struct kvm_vcpu *vcpu) 678{ 679 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 680 681 if (likely(cpu_if->vgic_sre)) 682 cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr); 683 684 kvm_call_hyp(__vgic_v3_save_aprs, vcpu); 685 686 if (has_vhe()) 687 __vgic_v3_deactivate_traps(vcpu); 688}