Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 kvm fixes from Catalin Marinas:

- Don't drop references on LPIs that weren't visited by the vgic-debug
iterator

- Cure lock ordering issue when unregistering vgic redistributors

- Fix for misaligned stage-2 mappings when VMs are backed by hugetlb
pages

- Treat SGI registers as UNDEFINED if a VM hasn't been configured for
GICv3

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
KVM: arm64: Make ICC_*SGI*_EL1 undef in the absence of a vGICv3
KVM: arm64: Ensure canonical IPA is hugepage-aligned when handling fault
KVM: arm64: vgic: Don't hold config_lock while unregistering redistributors
KVM: arm64: vgic-debug: Don't put unmarked LPIs

+33 -5
+8 -1
arch/arm64/kvm/mmu.c
··· 1540 vma_pagesize = min(vma_pagesize, (long)max_map_size); 1541 } 1542 1543 - if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) 1544 fault_ipa &= ~(vma_pagesize - 1); 1545 1546 gfn = ipa >> PAGE_SHIFT; 1547 mte_allowed = kvm_vma_mte_allowed(vma);
··· 1540 vma_pagesize = min(vma_pagesize, (long)max_map_size); 1541 } 1542 1543 + /* 1544 + * Both the canonical IPA and fault IPA must be hugepage-aligned to 1545 + * ensure we find the right PFN and lay down the mapping in the right 1546 + * place. 1547 + */ 1548 + if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) { 1549 fault_ipa &= ~(vma_pagesize - 1); 1550 + ipa &= ~(vma_pagesize - 1); 1551 + } 1552 1553 gfn = ipa >> PAGE_SHIFT; 1554 mte_allowed = kvm_vma_mte_allowed(vma);
+6
arch/arm64/kvm/sys_regs.c
··· 33 #include <trace/events/kvm.h> 34 35 #include "sys_regs.h" 36 37 #include "trace.h" 38 ··· 435 const struct sys_reg_desc *r) 436 { 437 bool g1; 438 439 if (!p->is_write) 440 return read_from_write_only(vcpu, p, r);
··· 33 #include <trace/events/kvm.h> 34 35 #include "sys_regs.h" 36 + #include "vgic/vgic.h" 37 38 #include "trace.h" 39 ··· 434 const struct sys_reg_desc *r) 435 { 436 bool g1; 437 + 438 + if (!kvm_has_gicv3(vcpu->kvm)) { 439 + kvm_inject_undefined(vcpu); 440 + return false; 441 + } 442 443 if (!p->is_write) 444 return read_from_write_only(vcpu, p, r);
+1 -1
arch/arm64/kvm/vgic/vgic-debug.c
··· 85 struct vgic_irq *irq; 86 unsigned long intid; 87 88 - xa_for_each(&dist->lpi_xa, intid, irq) { 89 xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER); 90 vgic_put_irq(kvm, irq); 91 }
··· 85 struct vgic_irq *irq; 86 unsigned long intid; 87 88 + xa_for_each_marked(&dist->lpi_xa, intid, irq, LPI_XA_MARK_DEBUG_ITER) { 89 xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER); 90 vgic_put_irq(kvm, irq); 91 }
+6 -3
arch/arm64/kvm/vgic/vgic-init.c
··· 417 kfree(vgic_cpu->private_irqs); 418 vgic_cpu->private_irqs = NULL; 419 420 - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 421 - vgic_unregister_redist_iodev(vcpu); 422 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 423 - } 424 } 425 426 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) ··· 446 kvm_vgic_dist_destroy(kvm); 447 448 mutex_unlock(&kvm->arch.config_lock); 449 mutex_unlock(&kvm->slots_lock); 450 } 451
··· 417 kfree(vgic_cpu->private_irqs); 418 vgic_cpu->private_irqs = NULL; 419 420 + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 421 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 422 } 423 424 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) ··· 448 kvm_vgic_dist_destroy(kvm); 449 450 mutex_unlock(&kvm->arch.config_lock); 451 + 452 + if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 453 + kvm_for_each_vcpu(i, vcpu, kvm) 454 + vgic_unregister_redist_iodev(vcpu); 455 + 456 mutex_unlock(&kvm->slots_lock); 457 } 458
+5
arch/arm64/kvm/vgic/vgic.c
··· 36 * we have to disable IRQs before taking this lock and everything lower 37 * than it. 38 * 39 * If you need to take multiple locks, always take the upper lock first, 40 * then the lower ones, e.g. first take the its_lock, then the irq_lock. 41 * If you are already holding a lock and need to take a higher one, you
··· 36 * we have to disable IRQs before taking this lock and everything lower 37 * than it. 38 * 39 + * The config_lock has additional ordering requirements: 40 + * kvm->slots_lock 41 + * kvm->srcu 42 + * kvm->arch.config_lock 43 + * 44 * If you need to take multiple locks, always take the upper lock first, 45 * then the lower ones, e.g. first take the its_lock, then the irq_lock. 46 * If you are already holding a lock and need to take a higher one, you
+7
arch/arm64/kvm/vgic/vgic.h
··· 346 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); 347 int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); 348 349 #endif
··· 346 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); 347 int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); 348 349 + static inline bool kvm_has_gicv3(struct kvm *kvm) 350 + { 351 + return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) && 352 + irqchip_in_kernel(kvm) && 353 + kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3); 354 + } 355 + 356 #endif