Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
"PPC:
- Fix a bug where we try to do an ultracall on a system without an
ultravisor

KVM:
- Fix uninitialised sysreg accessor
- Fix handling of demand-paged device mappings
- Stop spamming the console on IMPDEF sysregs
- Relax mappings of writable memslots
- Assorted cleanups

MIPS:
- Now orphan, James Hogan is stepping down

x86:
- MAINTAINERS change, so long Radim and thanks for all the fish
- supported CPUID fixes for AMD machines without SPEC_CTRL"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
MAINTAINERS: remove Radim from KVM maintainers
MAINTAINERS: Orphan KVM for MIPS
kvm: x86: Host feature SSBD doesn't imply guest feature AMD_SSBD
kvm: x86: Host feature SSBD doesn't imply guest feature SPEC_CTRL_SSBD
KVM: PPC: Book3S HV: Don't do ultravisor calls on systems without ultravisor
KVM: arm/arm64: Properly handle faulting of device mappings
KVM: arm64: Ensure 'params' is initialised when looking up sys register
KVM: arm/arm64: Remove excessive permission check in kvm_arch_prepare_memory_region
KVM: arm64: Don't log IMP DEF sysreg traps
KVM: arm64: Sanely ratelimit sysreg messages
KVM: arm/arm64: vgic: Use wrapper function to lock/unlock all vcpus in kvm_vgic_create()
KVM: arm/arm64: vgic: Fix potential double free dist->spis in __kvm_vgic_destroy()
KVM: arm/arm64: Get rid of unused arg in cpu_init_hyp_mode()

Changed files
+66 -47
arch
arm64
powerpc
x86
kvm
virt
kvm
+2 -4
MAINTAINERS
··· 9041 9041 9042 9042 KERNEL VIRTUAL MACHINE (KVM) 9043 9043 M: Paolo Bonzini <pbonzini@redhat.com> 9044 - M: Radim Krčmář <rkrcmar@redhat.com> 9045 9044 L: kvm@vger.kernel.org 9046 9045 W: http://www.linux-kvm.org 9047 9046 T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git ··· 9075 9076 F: include/kvm/arm_* 9076 9077 9077 9078 KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) 9078 - M: James Hogan <jhogan@kernel.org> 9079 9079 L: linux-mips@vger.kernel.org 9080 - S: Supported 9080 + L: kvm@vger.kernel.org 9081 + S: Orphan 9081 9082 F: arch/mips/include/uapi/asm/kvm* 9082 9083 F: arch/mips/include/asm/kvm* 9083 9084 F: arch/mips/kvm/ ··· 9112 9113 9113 9114 KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) 9114 9115 M: Paolo Bonzini <pbonzini@redhat.com> 9115 - M: Radim Krčmář <rkrcmar@redhat.com> 9116 9116 R: Sean Christopherson <sean.j.christopherson@intel.com> 9117 9117 R: Vitaly Kuznetsov <vkuznets@redhat.com> 9118 9118 R: Wanpeng Li <wanpengli@tencent.com>
+18 -7
arch/arm64/kvm/sys_regs.c
··· 2098 2098 WARN_ON(1); 2099 2099 } 2100 2100 2101 - kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n", 2102 - cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); 2103 - print_sys_reg_instr(params); 2101 + print_sys_reg_msg(params, 2102 + "Unsupported guest CP%d access at: %08lx [%08lx]\n", 2103 + cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); 2104 2104 kvm_inject_undefined(vcpu); 2105 2105 } 2106 2106 ··· 2233 2233 NULL, 0); 2234 2234 } 2235 2235 2236 + static bool is_imp_def_sys_reg(struct sys_reg_params *params) 2237 + { 2238 + // See ARM DDI 0487E.a, section D12.3.2 2239 + return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011; 2240 + } 2241 + 2236 2242 static int emulate_sys_reg(struct kvm_vcpu *vcpu, 2237 2243 struct sys_reg_params *params) 2238 2244 { ··· 2254 2248 2255 2249 if (likely(r)) { 2256 2250 perform_access(vcpu, params, r); 2251 + } else if (is_imp_def_sys_reg(params)) { 2252 + kvm_inject_undefined(vcpu); 2257 2253 } else { 2258 - kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n", 2259 - *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); 2260 - print_sys_reg_instr(params); 2254 + print_sys_reg_msg(params, 2255 + "Unsupported guest sys_reg access at: %lx [%08lx]\n", 2256 + *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); 2261 2257 kvm_inject_undefined(vcpu); 2262 2258 } 2263 2259 return 1; ··· 2368 2360 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) 2369 2361 return NULL; 2370 2362 2363 + if (!index_to_params(id, &params)) 2364 + return NULL; 2365 + 2371 2366 table = get_target_table(vcpu->arch.target, true, &num); 2372 - r = find_reg_by_id(id, &params, table, num); 2367 + r = find_reg(&params, table, num); 2373 2368 if (!r) 2374 2369 r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 2375 2370
+16 -3
arch/arm64/kvm/sys_regs.h
··· 62 62 #define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */ 63 63 #define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */ 64 64 65 + static __printf(2, 3) 66 + inline void print_sys_reg_msg(const struct sys_reg_params *p, 67 + char *fmt, ...) 68 + { 69 + va_list va; 70 + 71 + va_start(va, fmt); 72 + /* Look, we even formatted it for you to paste into the table! */ 73 + kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", 74 + &(struct va_format){ fmt, &va }, 75 + p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read"); 76 + va_end(va); 77 + } 78 + 65 79 static inline void print_sys_reg_instr(const struct sys_reg_params *p) 66 80 { 67 - /* Look, we even formatted it for you to paste into the table! */ 68 - kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", 69 - p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read"); 81 + /* GCC warns on an empty format string */ 82 + print_sys_reg_msg(p, "%s", ""); 70 83 } 71 84 72 85 static inline bool ignore_write(struct kvm_vcpu *vcpu,
+2 -1
arch/powerpc/kvm/book3s_hv.c
··· 4983 4983 if (nesting_enabled(kvm)) 4984 4984 kvmhv_release_all_nested(kvm); 4985 4985 kvm->arch.process_table = 0; 4986 - uv_svm_terminate(kvm->arch.lpid); 4986 + if (kvm->arch.secure_guest) 4987 + uv_svm_terminate(kvm->arch.lpid); 4987 4988 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); 4988 4989 } 4989 4990
+4 -2
arch/x86/kvm/cpuid.c
··· 402 402 entry->edx |= F(SPEC_CTRL); 403 403 if (boot_cpu_has(X86_FEATURE_STIBP)) 404 404 entry->edx |= F(INTEL_STIBP); 405 - if (boot_cpu_has(X86_FEATURE_SSBD)) 405 + if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || 406 + boot_cpu_has(X86_FEATURE_AMD_SSBD)) 406 407 entry->edx |= F(SPEC_CTRL_SSBD); 407 408 /* 408 409 * We emulate ARCH_CAPABILITIES in software even ··· 760 759 entry->ebx |= F(AMD_IBRS); 761 760 if (boot_cpu_has(X86_FEATURE_STIBP)) 762 761 entry->ebx |= F(AMD_STIBP); 763 - if (boot_cpu_has(X86_FEATURE_SSBD)) 762 + if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || 763 + boot_cpu_has(X86_FEATURE_AMD_SSBD)) 764 764 entry->ebx |= F(AMD_SSBD); 765 765 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 766 766 entry->ebx |= F(AMD_SSB_NO);
+2 -2
virt/kvm/arm/arm.c
··· 1352 1352 } 1353 1353 } 1354 1354 1355 - static void cpu_init_hyp_mode(void *dummy) 1355 + static void cpu_init_hyp_mode(void) 1356 1356 { 1357 1357 phys_addr_t pgd_ptr; 1358 1358 unsigned long hyp_stack_ptr; ··· 1386 1386 if (is_kernel_in_hyp_mode()) 1387 1387 kvm_timer_init_vhe(); 1388 1388 else 1389 - cpu_init_hyp_mode(NULL); 1389 + cpu_init_hyp_mode(); 1390 1390 1391 1391 kvm_arm_init_debug(); 1392 1392
+17 -13
virt/kvm/arm/mmu.c
··· 38 38 #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) 39 39 #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1) 40 40 41 + static bool is_iomap(unsigned long flags) 42 + { 43 + return flags & KVM_S2PTE_FLAG_IS_IOMAP; 44 + } 45 + 41 46 static bool memslot_is_logging(struct kvm_memory_slot *memslot) 42 47 { 43 48 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); ··· 1703 1698 1704 1699 vma_pagesize = vma_kernel_pagesize(vma); 1705 1700 if (logging_active || 1701 + (vma->vm_flags & VM_PFNMAP) || 1706 1702 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { 1707 1703 force_pte = true; 1708 1704 vma_pagesize = PAGE_SIZE; ··· 1766 1760 writable = false; 1767 1761 } 1768 1762 1763 + if (exec_fault && is_iomap(flags)) 1764 + return -ENOEXEC; 1765 + 1769 1766 spin_lock(&kvm->mmu_lock); 1770 1767 if (mmu_notifier_retry(kvm, mmu_seq)) 1771 1768 goto out_unlock; ··· 1790 1781 if (writable) 1791 1782 kvm_set_pfn_dirty(pfn); 1792 1783 1793 - if (fault_status != FSC_PERM) 1784 + if (fault_status != FSC_PERM && !is_iomap(flags)) 1794 1785 clean_dcache_guest_page(pfn, vma_pagesize); 1795 1786 1796 1787 if (exec_fault) ··· 1957 1948 if (kvm_is_error_hva(hva) || (write_fault && !writable)) { 1958 1949 if (is_iabt) { 1959 1950 /* Prefetch Abort on I/O address */ 1960 - kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); 1961 - ret = 1; 1962 - goto out_unlock; 1951 + ret = -ENOEXEC; 1952 + goto out; 1963 1953 } 1964 1954 1965 1955 /* ··· 2000 1992 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); 2001 1993 if (ret == 0) 2002 1994 ret = 1; 1995 + out: 1996 + if (ret == -ENOEXEC) { 1997 + kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); 1998 + ret = 1; 1999 + } 2003 2000 out_unlock: 2004 2001 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2005 2002 return ret; ··· 2313 2300 2314 2301 if (!vma || vma->vm_start >= reg_end) 2315 2302 break; 2316 - 2317 - /* 2318 - * Mapping a read-only VMA is only allowed if the 2319 - * memory region is configured as read-only. 2320 - */ 2321 - if (writable && !(vma->vm_flags & VM_WRITE)) { 2322 - ret = -EPERM; 2323 - break; 2324 - } 2325 2303 2326 2304 /* 2327 2305 * Take the intersection of this VMA with the memory region
+5 -15
virt/kvm/arm/vgic/vgic-init.c
··· 70 70 */ 71 71 int kvm_vgic_create(struct kvm *kvm, u32 type) 72 72 { 73 - int i, vcpu_lock_idx = -1, ret; 73 + int i, ret; 74 74 struct kvm_vcpu *vcpu; 75 75 76 76 if (irqchip_in_kernel(kvm)) ··· 86 86 !kvm_vgic_global_state.can_emulate_gicv2) 87 87 return -ENODEV; 88 88 89 - /* 90 - * Any time a vcpu is run, vcpu_load is called which tries to grab the 91 - * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure 92 - * that no other VCPUs are run while we create the vgic. 93 - */ 94 89 ret = -EBUSY; 95 - kvm_for_each_vcpu(i, vcpu, kvm) { 96 - if (!mutex_trylock(&vcpu->mutex)) 97 - goto out_unlock; 98 - vcpu_lock_idx = i; 99 - } 90 + if (!lock_all_vcpus(kvm)) 91 + return ret; 100 92 101 93 kvm_for_each_vcpu(i, vcpu, kvm) { 102 94 if (vcpu->arch.has_run_once) ··· 117 125 INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); 118 126 119 127 out_unlock: 120 - for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { 121 - vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); 122 - mutex_unlock(&vcpu->mutex); 123 - } 128 + unlock_all_vcpus(kvm); 124 129 return ret; 125 130 } 126 131 ··· 166 177 break; 167 178 default: 168 179 kfree(dist->spis); 180 + dist->spis = NULL; 169 181 return -EINVAL; 170 182 } 171 183 }