Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"ARM:

- Avoid pKVM finalization if KVM initialization fails

- Add missing BTI instructions in the hypervisor, fixing an early
boot failure on BTI systems

- Handle MMU notifiers correctly for non hugepage-aligned memslots

- Work around a bug in the architecture where hypervisor timer
controls have UNKNOWN behavior under nested virt

- Disable preemption in kvm_arch_hardware_enable(), fixing a kernel
BUG in cpu hotplug resulting from per-CPU accessor sanity checking

- Make WFI emulation on GICv4 systems robust w.r.t. preemption,
consistently requesting a doorbell interrupt on vcpu_put()

- Uphold RES0 sysreg behavior when emulating older PMU versions

- Avoid macro expansion when initializing PMU register names,
ensuring the tracepoints pretty-print the sysreg

s390:

- Two fixes for asynchronous destroy

x86 fixes will come early next week"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: s390: pv: fix index value of replaced ASCE
KVM: s390: pv: simplify shutdown and fix race
KVM: arm64: Fix the name of sys_reg_desc related to PMU
KVM: arm64: Correctly handle RES0 bits PMEVTYPER<n>_EL0.evtCount
KVM: arm64: vgic-v4: Make the doorbell request robust w.r.t preemption
KVM: arm64: Add missing BTI instructions
KVM: arm64: Correctly handle page aging notifiers for unaligned memslot
KVM: arm64: Disable preemption in kvm_arch_hardware_enable()
KVM: arm64: Handle kvm_arm_init failure correctly in finalize_pkvm
KVM: arm64: timers: Use CNTHCTL_EL2 when setting non-CNTKCTL_EL1 bits

Changed files
+140 -72
arch
include
+2
arch/arm64/include/asm/kvm_host.h
··· 727 727 #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) 728 728 /* PMUSERENR for the guest EL0 is on physical CPU */ 729 729 #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6)) 730 + /* WFI instruction trapped */ 731 + #define IN_WFI __vcpu_single_flag(sflags, BIT(7)) 730 732 731 733 732 734 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
+9 -17
arch/arm64/include/asm/kvm_pgtable.h
··· 608 608 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr); 609 609 610 610 /** 611 - * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry. 611 + * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access 612 + * flag in a page-table entry. 612 613 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 613 614 * @addr: Intermediate physical address to identify the page-table entry. 615 + * @size: Size of the address range to visit. 616 + * @mkold: True if the access flag should be cleared. 614 617 * 615 618 * The offset of @addr within a page is ignored. 616 619 * 617 - * If there is a valid, leaf page-table entry used to translate @addr, then 618 - * clear the access flag in that entry. 620 + * Tests and conditionally clears the access flag for every valid, leaf 621 + * page-table entry used to translate the range [@addr, @addr + @size). 619 622 * 620 623 * Note that it is the caller's responsibility to invalidate the TLB after 621 624 * calling this function to ensure that the updated permissions are visible 622 625 * to the CPUs. 623 626 * 624 - * Return: The old page-table entry prior to clearing the flag, 0 on failure. 627 + * Return: True if any of the visited PTEs had the access flag set. 625 628 */ 626 - kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); 629 + bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, 630 + u64 size, bool mkold); 627 631 628 632 /** 629 633 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a ··· 648 644 */ 649 645 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, 650 646 enum kvm_pgtable_prot prot); 651 - 652 - /** 653 - * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the 654 - * access flag set. 655 - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 656 - * @addr: Intermediate physical address to identify the page-table entry. 657 - * 658 - * The offset of @addr within a page is ignored. 659 - * 660 - * Return: True if the page-table entry has the access flag set, false otherwise. 661 - */ 662 - bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr); 663 647 664 648 /** 665 649 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
+1
arch/arm64/include/asm/virt.h
··· 78 78 79 79 void __hyp_set_vectors(phys_addr_t phys_vector_base); 80 80 void __hyp_reset_vectors(void); 81 + bool is_kvm_arm_initialised(void); 81 82 82 83 DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); 83 84
+3 -3
arch/arm64/kvm/arch_timer.c
··· 827 827 assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr); 828 828 assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr); 829 829 830 - /* This only happens on VHE, so use the CNTKCTL_EL1 accessor */ 831 - sysreg_clear_set(cntkctl_el1, clr, set); 830 + /* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */ 831 + sysreg_clear_set(cnthctl_el2, clr, set); 832 832 } 833 833 834 834 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) ··· 1563 1563 void kvm_timer_init_vhe(void) 1564 1564 { 1565 1565 if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)) 1566 - sysreg_clear_set(cntkctl_el1, 0, CNTHCTL_ECV); 1566 + sysreg_clear_set(cnthctl_el2, 0, CNTHCTL_ECV); 1567 1567 } 1568 1568 1569 1569 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+24 -4
arch/arm64/kvm/arm.c
··· 53 53 54 54 DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 55 55 56 - static bool vgic_present; 56 + static bool vgic_present, kvm_arm_initialised; 57 57 58 58 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); 59 59 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 60 + 61 + bool is_kvm_arm_initialised(void) 62 + { 63 + return kvm_arm_initialised; 64 + } 60 65 61 66 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 62 67 { ··· 718 713 */ 719 714 preempt_disable(); 720 715 kvm_vgic_vmcr_sync(vcpu); 721 - vgic_v4_put(vcpu, true); 716 + vcpu_set_flag(vcpu, IN_WFI); 717 + vgic_v4_put(vcpu); 722 718 preempt_enable(); 723 719 724 720 kvm_vcpu_halt(vcpu); 725 721 vcpu_clear_flag(vcpu, IN_WFIT); 726 722 727 723 preempt_disable(); 724 + vcpu_clear_flag(vcpu, IN_WFI); 728 725 vgic_v4_load(vcpu); 729 726 preempt_enable(); 730 727 } ··· 794 787 if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { 795 788 /* The distributor enable bits were changed */ 796 789 preempt_disable(); 797 - vgic_v4_put(vcpu, false); 790 + vgic_v4_put(vcpu); 798 791 vgic_v4_load(vcpu); 799 792 preempt_enable(); 800 793 } ··· 1874 1867 1875 1868 int kvm_arch_hardware_enable(void) 1876 1869 { 1877 - int was_enabled = __this_cpu_read(kvm_arm_hardware_enabled); 1870 + int was_enabled; 1878 1871 1872 + /* 1873 + * Most calls to this function are made with migration 1874 + * disabled, but not with preemption disabled. The former is 1875 + * enough to ensure correctness, but most of the helpers 1876 + * expect the later and will throw a tantrum otherwise. 1877 + */ 1878 + preempt_disable(); 1879 + 1880 + was_enabled = __this_cpu_read(kvm_arm_hardware_enabled); 1879 1881 _kvm_arch_hardware_enable(NULL); 1880 1882 1881 1883 if (!was_enabled) { 1882 1884 kvm_vgic_cpu_up(); 1883 1885 kvm_timer_cpu_up(); 1884 1886 } 1887 + 1888 + preempt_enable(); 1885 1889 1886 1890 return 0; 1887 1891 } ··· 2499 2481 err = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); 2500 2482 if (err) 2501 2483 goto out_subs; 2484 + 2485 + kvm_arm_initialised = true; 2502 2486 2503 2487 return 0; 2504 2488
+8
arch/arm64/kvm/hyp/hyp-entry.S
··· 154 154 esb 155 155 stp x0, x1, [sp, #-16]! 156 156 662: 157 + /* 158 + * spectre vectors __bp_harden_hyp_vecs generate br instructions at runtime 159 + * that jump at offset 8 at __kvm_hyp_vector. 160 + * As hyp .text is guarded section, it needs bti j. 161 + */ 162 + bti j 157 163 b \target 158 164 159 165 check_preamble_length 661b, 662b ··· 171 165 nop 172 166 stp x0, x1, [sp, #-16]! 173 167 662: 168 + /* Check valid_vect */ 169 + bti j 174 170 b \target 175 171 176 172 check_preamble_length 661b, 662b
+10
arch/arm64/kvm/hyp/nvhe/host.S
··· 297 297 298 298 ret 299 299 SYM_CODE_END(__kvm_hyp_host_forward_smc) 300 + 301 + /* 302 + * kvm_host_psci_cpu_entry is called through br instruction, which requires 303 + * bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external 304 + * functions, but bti c instead. 305 + */ 306 + SYM_CODE_START(kvm_host_psci_cpu_entry) 307 + bti j 308 + b __kvm_host_psci_cpu_entry 309 + SYM_CODE_END(kvm_host_psci_cpu_entry)
+1 -1
arch/arm64/kvm/hyp/nvhe/psci-relay.c
··· 200 200 __hyp_pa(init_params), 0); 201 201 } 202 202 203 - asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on) 203 + asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on) 204 204 { 205 205 struct psci_boot_args *boot_args; 206 206 struct kvm_cpu_context *host_ctxt;
+38 -9
arch/arm64/kvm/hyp/pgtable.c
··· 1195 1195 return pte; 1196 1196 } 1197 1197 1198 - kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr) 1198 + struct stage2_age_data { 1199 + bool mkold; 1200 + bool young; 1201 + }; 1202 + 1203 + static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx, 1204 + enum kvm_pgtable_walk_flags visit) 1199 1205 { 1200 - kvm_pte_t pte = 0; 1201 - stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF, 1202 - &pte, NULL, 0); 1206 + kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF; 1207 + struct stage2_age_data *data = ctx->arg; 1208 + 1209 + if (!kvm_pte_valid(ctx->old) || new == ctx->old) 1210 + return 0; 1211 + 1212 + data->young = true; 1213 + 1214 + /* 1215 + * stage2_age_walker() is always called while holding the MMU lock for 1216 + * write, so this will always succeed. Nonetheless, this deliberately 1217 + * follows the race detection pattern of the other stage-2 walkers in 1218 + * case the locking mechanics of the MMU notifiers is ever changed. 1219 + */ 1220 + if (data->mkold && !stage2_try_set_pte(ctx, new)) 1221 + return -EAGAIN; 1222 + 1203 1223 /* 1204 1224 * "But where's the TLBI?!", you scream. 1205 1225 * "Over in the core code", I sigh. 1206 1226 * 1207 1227 * See the '->clear_flush_young()' callback on the KVM mmu notifier. 1208 1228 */ 1209 - return pte; 1229 + return 0; 1210 1230 } 1211 1231 1212 - bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr) 1232 + bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, 1233 + u64 size, bool mkold) 1213 1234 { 1214 - kvm_pte_t pte = 0; 1215 - stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0); 1216 - return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF; 1235 + struct stage2_age_data data = { 1236 + .mkold = mkold, 1237 + }; 1238 + struct kvm_pgtable_walker walker = { 1239 + .cb = stage2_age_walker, 1240 + .arg = &data, 1241 + .flags = KVM_PGTABLE_WALK_LEAF, 1242 + }; 1243 + 1244 + WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker)); 1245 + return data.young; 1217 1246 } 1218 1247 1219 1248 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
+8 -10
arch/arm64/kvm/mmu.c
··· 1756 1756 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1757 1757 { 1758 1758 u64 size = (range->end - range->start) << PAGE_SHIFT; 1759 - kvm_pte_t kpte; 1760 - pte_t pte; 1761 1759 1762 1760 if (!kvm->arch.mmu.pgt) 1763 1761 return false; 1764 1762 1765 - WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); 1766 - 1767 - kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, 1768 - range->start << PAGE_SHIFT); 1769 - pte = __pte(kpte); 1770 - return pte_valid(pte) && pte_young(pte); 1763 + return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, 1764 + range->start << PAGE_SHIFT, 1765 + size, true); 1771 1766 } 1772 1767 1773 1768 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1774 1769 { 1770 + u64 size = (range->end - range->start) << PAGE_SHIFT; 1771 + 1775 1772 if (!kvm->arch.mmu.pgt) 1776 1773 return false; 1777 1774 1778 - return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, 1779 - range->start << PAGE_SHIFT); 1775 + return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, 1776 + range->start << PAGE_SHIFT, 1777 + size, false); 1780 1778 } 1781 1779 1782 1780 phys_addr_t kvm_mmu_get_httbr(void)
+1 -1
arch/arm64/kvm/pkvm.c
··· 244 244 { 245 245 int ret; 246 246 247 - if (!is_protected_kvm_enabled()) 247 + if (!is_protected_kvm_enabled() || !is_kvm_arm_initialised()) 248 248 return 0; 249 249 250 250 /*
+21 -21
arch/arm64/kvm/sys_regs.c
··· 986 986 987 987 if (p->is_write) { 988 988 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); 989 - __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; 990 989 kvm_vcpu_pmu_restore_guest(vcpu); 991 990 } else { 992 991 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; ··· 1114 1115 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ 1115 1116 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr } 1116 1117 1117 - #define PMU_SYS_REG(r) \ 1118 - SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility 1118 + #define PMU_SYS_REG(name) \ 1119 + SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \ 1120 + .visibility = pmu_visibility 1119 1121 1120 1122 /* Macro to expand the PMEVCNTRn_EL0 register */ 1121 1123 #define PMU_PMEVCNTR_EL0(n) \ 1122 - { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \ 1124 + { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \ 1123 1125 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \ 1124 1126 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), } 1125 1127 1126 1128 /* Macro to expand the PMEVTYPERn_EL0 register */ 1127 1129 #define PMU_PMEVTYPER_EL0(n) \ 1128 - { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \ 1130 + { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \ 1129 1131 .reset = reset_pmevtyper, \ 1130 1132 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), } 1131 1133 ··· 2115 2115 { SYS_DESC(SYS_PMBSR_EL1), undef_access }, 2116 2116 /* PMBIDR_EL1 is not trapped */ 2117 2117 2118 - { PMU_SYS_REG(SYS_PMINTENSET_EL1), 2118 + { PMU_SYS_REG(PMINTENSET_EL1), 2119 2119 .access = access_pminten, .reg = PMINTENSET_EL1 }, 2120 - { PMU_SYS_REG(SYS_PMINTENCLR_EL1), 2120 + { PMU_SYS_REG(PMINTENCLR_EL1), 2121 2121 .access = access_pminten, .reg = PMINTENSET_EL1 }, 2122 2122 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, 2123 2123 ··· 2164 2164 { SYS_DESC(SYS_CTR_EL0), access_ctr }, 2165 2165 { SYS_DESC(SYS_SVCR), undef_access }, 2166 2166 2167 - { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr, 2167 + { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, 2168 2168 .reset = reset_pmcr, .reg = PMCR_EL0 }, 2169 - { PMU_SYS_REG(SYS_PMCNTENSET_EL0), 2169 + { PMU_SYS_REG(PMCNTENSET_EL0), 2170 2170 .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, 2171 - { PMU_SYS_REG(SYS_PMCNTENCLR_EL0), 2171 + { PMU_SYS_REG(PMCNTENCLR_EL0), 2172 2172 .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, 2173 - { PMU_SYS_REG(SYS_PMOVSCLR_EL0), 2173 + { PMU_SYS_REG(PMOVSCLR_EL0), 2174 2174 .access = access_pmovs, .reg = PMOVSSET_EL0 }, 2175 2175 /* 2176 2176 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was 2177 2177 * previously (and pointlessly) advertised in the past... 2178 2178 */ 2179 - { PMU_SYS_REG(SYS_PMSWINC_EL0), 2179 + { PMU_SYS_REG(PMSWINC_EL0), 2180 2180 .get_user = get_raz_reg, .set_user = set_wi_reg, 2181 2181 .access = access_pmswinc, .reset = NULL }, 2182 - { PMU_SYS_REG(SYS_PMSELR_EL0), 2182 + { PMU_SYS_REG(PMSELR_EL0), 2183 2183 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 }, 2184 - { PMU_SYS_REG(SYS_PMCEID0_EL0), 2184 + { PMU_SYS_REG(PMCEID0_EL0), 2185 2185 .access = access_pmceid, .reset = NULL }, 2186 - { PMU_SYS_REG(SYS_PMCEID1_EL0), 2186 + { PMU_SYS_REG(PMCEID1_EL0), 2187 2187 .access = access_pmceid, .reset = NULL }, 2188 - { PMU_SYS_REG(SYS_PMCCNTR_EL0), 2188 + { PMU_SYS_REG(PMCCNTR_EL0), 2189 2189 .access = access_pmu_evcntr, .reset = reset_unknown, 2190 2190 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr}, 2191 - { PMU_SYS_REG(SYS_PMXEVTYPER_EL0), 2191 + { PMU_SYS_REG(PMXEVTYPER_EL0), 2192 2192 .access = access_pmu_evtyper, .reset = NULL }, 2193 - { PMU_SYS_REG(SYS_PMXEVCNTR_EL0), 2193 + { PMU_SYS_REG(PMXEVCNTR_EL0), 2194 2194 .access = access_pmu_evcntr, .reset = NULL }, 2195 2195 /* 2196 2196 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero 2197 2197 * in 32bit mode. Here we choose to reset it as zero for consistency. 2198 2198 */ 2199 - { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr, 2199 + { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr, 2200 2200 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 }, 2201 - { PMU_SYS_REG(SYS_PMOVSSET_EL0), 2201 + { PMU_SYS_REG(PMOVSSET_EL0), 2202 2202 .access = access_pmovs, .reg = PMOVSSET_EL0 }, 2203 2203 2204 2204 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, ··· 2354 2354 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero 2355 2355 * in 32bit mode. Here we choose to reset it as zero for consistency. 2356 2356 */ 2357 - { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper, 2357 + { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper, 2358 2358 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 }, 2359 2359 2360 2360 EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
+1 -1
arch/arm64/kvm/vgic/vgic-v3.c
··· 749 749 { 750 750 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 751 751 752 - WARN_ON(vgic_v4_put(vcpu, false)); 752 + WARN_ON(vgic_v4_put(vcpu)); 753 753 754 754 vgic_v3_vmcr_sync(vcpu); 755 755
+5 -2
arch/arm64/kvm/vgic/vgic-v4.c
··· 336 336 its_vm->vpes = NULL; 337 337 } 338 338 339 - int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db) 339 + int vgic_v4_put(struct kvm_vcpu *vcpu) 340 340 { 341 341 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; 342 342 343 343 if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) 344 344 return 0; 345 345 346 - return its_make_vpe_non_resident(vpe, need_db); 346 + return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI)); 347 347 } 348 348 349 349 int vgic_v4_load(struct kvm_vcpu *vcpu) ··· 352 352 int err; 353 353 354 354 if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident) 355 + return 0; 356 + 357 + if (vcpu_get_flag(vcpu, IN_WFI)) 355 358 return 0; 356 359 357 360 /*
+6 -2
arch/s390/kvm/pv.c
··· 411 411 u16 _rc, _rrc; 412 412 int cc = 0; 413 413 414 - /* Make sure the counter does not reach 0 before calling s390_uv_destroy_range */ 415 - atomic_inc(&kvm->mm->context.protected_count); 414 + /* 415 + * Nothing to do if the counter was already 0. Otherwise make sure 416 + * the counter does not reach 0 before calling s390_uv_destroy_range. 417 + */ 418 + if (!atomic_inc_not_zero(&kvm->mm->context.protected_count)) 419 + return 0; 416 420 417 421 *rc = 1; 418 422 /* If the current VM is protected, destroy it */
+1
arch/s390/mm/gmap.c
··· 2853 2853 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); 2854 2854 if (!page) 2855 2855 return -ENOMEM; 2856 + page->index = 0; 2856 2857 table = page_to_virt(page); 2857 2858 memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT)); 2858 2859
+1 -1
include/kvm/arm_vgic.h
··· 431 431 432 432 int vgic_v4_load(struct kvm_vcpu *vcpu); 433 433 void vgic_v4_commit(struct kvm_vcpu *vcpu); 434 - int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db); 434 + int vgic_v4_put(struct kvm_vcpu *vcpu); 435 435 436 436 /* CPU HP callbacks */ 437 437 void kvm_vgic_cpu_up(void);