Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 kvm fixes from Catalin Marinas:

- Don't drop references on LPIs that weren't visited by the vgic-debug
iterator

- Cure lock ordering issue when unregistering vgic redistributors

- Fix for misaligned stage-2 mappings when VMs are backed by hugetlb
pages

- Treat SGI registers as UNDEFINED if a VM hasn't been configured for
GICv3

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
KVM: arm64: Make ICC_*SGI*_EL1 undef in the absence of a vGICv3
KVM: arm64: Ensure canonical IPA is hugepage-aligned when handling fault
KVM: arm64: vgic: Don't hold config_lock while unregistering redistributors
KVM: arm64: vgic-debug: Don't put unmarked LPIs

+33 -5
+8 -1
arch/arm64/kvm/mmu.c
··· 1540 1540 vma_pagesize = min(vma_pagesize, (long)max_map_size); 1541 1541 } 1542 1542 1543 - if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) 1543 + /* 1544 + * Both the canonical IPA and fault IPA must be hugepage-aligned to 1545 + * ensure we find the right PFN and lay down the mapping in the right 1546 + * place. 1547 + */ 1548 + if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) { 1544 1549 fault_ipa &= ~(vma_pagesize - 1); 1550 + ipa &= ~(vma_pagesize - 1); 1551 + } 1545 1552 1546 1553 gfn = ipa >> PAGE_SHIFT; 1547 1554 mte_allowed = kvm_vma_mte_allowed(vma);
+6
arch/arm64/kvm/sys_regs.c
··· 33 33 #include <trace/events/kvm.h> 34 34 35 35 #include "sys_regs.h" 36 + #include "vgic/vgic.h" 36 37 37 38 #include "trace.h" 38 39 ··· 435 434 const struct sys_reg_desc *r) 436 435 { 437 436 bool g1; 437 + 438 + if (!kvm_has_gicv3(vcpu->kvm)) { 439 + kvm_inject_undefined(vcpu); 440 + return false; 441 + } 438 442 439 443 if (!p->is_write) 440 444 return read_from_write_only(vcpu, p, r);
+1 -1
arch/arm64/kvm/vgic/vgic-debug.c
··· 85 85 struct vgic_irq *irq; 86 86 unsigned long intid; 87 87 88 - xa_for_each(&dist->lpi_xa, intid, irq) { 88 + xa_for_each_marked(&dist->lpi_xa, intid, irq, LPI_XA_MARK_DEBUG_ITER) { 89 89 xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER); 90 90 vgic_put_irq(kvm, irq); 91 91 }
+6 -3
arch/arm64/kvm/vgic/vgic-init.c
··· 417 417 kfree(vgic_cpu->private_irqs); 418 418 vgic_cpu->private_irqs = NULL; 419 419 420 - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 421 - vgic_unregister_redist_iodev(vcpu); 420 + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 422 421 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; 423 - } 424 422 } 425 423 426 424 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) ··· 446 448 kvm_vgic_dist_destroy(kvm); 447 449 448 450 mutex_unlock(&kvm->arch.config_lock); 451 + 452 + if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 453 + kvm_for_each_vcpu(i, vcpu, kvm) 454 + vgic_unregister_redist_iodev(vcpu); 455 + 449 456 mutex_unlock(&kvm->slots_lock); 450 457 } 451 458
+5
arch/arm64/kvm/vgic/vgic.c
··· 36 36 * we have to disable IRQs before taking this lock and everything lower 37 37 * than it. 38 38 * 39 + * The config_lock has additional ordering requirements: 40 + * kvm->slots_lock 41 + * kvm->srcu 42 + * kvm->arch.config_lock 43 + * 39 44 * If you need to take multiple locks, always take the upper lock first, 40 45 * then the lower ones, e.g. first take the its_lock, then the irq_lock. 41 46 * If you are already holding a lock and need to take a higher one, you
+7
arch/arm64/kvm/vgic/vgic.h
··· 346 346 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); 347 347 int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); 348 348 349 + static inline bool kvm_has_gicv3(struct kvm *kvm) 350 + { 351 + return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) && 352 + irqchip_in_kernel(kvm) && 353 + kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3); 354 + } 355 + 349 356 #endif