Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"ARM:

- Plug a race in the stage-2 mapping code where the IPA and the PA
would end up being out of sync

- Make better use of the bitmap API (bitmap_zero, bitmap_zalloc...)

- FP/SVE/SME documentation update, in the hope that this field
becomes clearer...

- Add workaround for Apple SEIS brokenness to a new SoC

- Random comment fixes

x86:

- add MSR_IA32_TSX_CTRL into msrs_to_save

- fixes for XCR0 handling in SGX enclaves

Generic:

- Fix vcpu_array[0] races

- Fix race between starting a VM and 'reboot -f'"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: VMX: add MSR_IA32_TSX_CTRL into msrs_to_save
KVM: x86: Don't adjust guest's CPUID.0x12.1 (allowed SGX enclave XFRM)
KVM: VMX: Don't rely _only_ on CPUID to enforce XCR0 restrictions for ECREATE
KVM: Fix vcpu_array[0] races
KVM: VMX: Fix header file dependency of asm/vmx.h
KVM: Don't enable hardware after a restart/shutdown is initiated
KVM: Use syscore_ops instead of reboot_notifier to hook restart/shutdown
KVM: arm64: vgic: Add Apple M2 PRO/MAX cpus to the list of broken SEIS implementations
KVM: arm64: Clarify host SME state management
KVM: arm64: Restructure check for SVE support in FP trap handler
KVM: arm64: Document check for TIF_FOREIGN_FPSTATE
KVM: arm64: Fix repeated words in comments
KVM: arm64: Constify start/end/phys fields of the pgtable walker data
KVM: arm64: Infer PA offset from VA in hyp map walker
KVM: arm64: Infer the PA offset from IPA in stage-2 map walker
KVM: arm64: Use the bitmap API to allocate bitmaps
KVM: arm64: Slightly optimize flush_context()

Changed files
+129 -66
arch
arm64
x86
include
asm
kvm
virt
+8
arch/arm64/include/asm/cputype.h
··· 126 126 #define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029 127 127 #define APPLE_CPU_PART_M2_BLIZZARD 0x032 128 128 #define APPLE_CPU_PART_M2_AVALANCHE 0x033 129 + #define APPLE_CPU_PART_M2_BLIZZARD_PRO 0x034 130 + #define APPLE_CPU_PART_M2_AVALANCHE_PRO 0x035 131 + #define APPLE_CPU_PART_M2_BLIZZARD_MAX 0x038 132 + #define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039 129 133 130 134 #define AMPERE_CPU_PART_AMPERE1 0xAC3 131 135 ··· 185 181 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX) 186 182 #define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD) 187 183 #define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE) 184 + #define MIDR_APPLE_M2_BLIZZARD_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_PRO) 185 + #define MIDR_APPLE_M2_AVALANCHE_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_PRO) 186 + #define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX) 187 + #define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX) 188 188 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) 189 189 190 190 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+1
arch/arm64/include/asm/kvm_pgtable.h
··· 209 209 kvm_pte_t old; 210 210 void *arg; 211 211 struct kvm_pgtable_mm_ops *mm_ops; 212 + u64 start; 212 213 u64 addr; 213 214 u64 end; 214 215 u32 level;
+17 -9
arch/arm64/kvm/fpsimd.c
··· 81 81 82 82 fpsimd_kvm_prepare(); 83 83 84 + /* 85 + * We will check TIF_FOREIGN_FPSTATE just before entering the 86 + * guest in kvm_arch_vcpu_ctxflush_fp() and override this to 87 + * FP_STATE_FREE if the flag set. 88 + */ 84 89 vcpu->arch.fp_state = FP_STATE_HOST_OWNED; 85 90 86 91 vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); 87 92 if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) 88 93 vcpu_set_flag(vcpu, HOST_SVE_ENABLED); 89 94 90 - /* 91 - * We don't currently support SME guests but if we leave 92 - * things in streaming mode then when the guest starts running 93 - * FPSIMD or SVE code it may generate SME traps so as a 94 - * special case if we are in streaming mode we force the host 95 - * state to be saved now and exit streaming mode so that we 96 - * don't have to handle any SME traps for valid guest 97 - * operations. Do this for ZA as well for now for simplicity. 98 - */ 99 95 if (system_supports_sme()) { 100 96 vcpu_clear_flag(vcpu, HOST_SME_ENABLED); 101 97 if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) 102 98 vcpu_set_flag(vcpu, HOST_SME_ENABLED); 103 99 100 + /* 101 + * If PSTATE.SM is enabled then save any pending FP 102 + * state and disable PSTATE.SM. If we leave PSTATE.SM 103 + * enabled and the guest does not enable SME via 104 + * CPACR_EL1.SMEN then operations that should be valid 105 + * may generate SME traps from EL1 to EL1 which we 106 + * can't intercept and which would confuse the guest. 107 + * 108 + * Do the same for PSTATE.ZA in the case where there 109 + * is state in the registers which has not already 110 + * been saved, this is very unlikely to happen. 111 + */ 104 112 if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) { 105 113 vcpu->arch.fp_state = FP_STATE_FREE; 106 114 fpsimd_save_and_flush_cpu_state();
+10 -2
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 177 177 sve_guest = vcpu_has_sve(vcpu); 178 178 esr_ec = kvm_vcpu_trap_get_class(vcpu); 179 179 180 - /* Don't handle SVE traps for non-SVE vcpus here: */ 181 - if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) 180 + /* Only handle traps the vCPU can support here: */ 181 + switch (esr_ec) { 182 + case ESR_ELx_EC_FP_ASIMD: 183 + break; 184 + case ESR_ELx_EC_SVE: 185 + if (!sve_guest) 186 + return false; 187 + break; 188 + default: 182 189 return false; 190 + } 183 191 184 192 /* Valid trap. Switch the context: */ 185 193
+32 -9
arch/arm64/kvm/hyp/pgtable.c
··· 58 58 struct kvm_pgtable_walk_data { 59 59 struct kvm_pgtable_walker *walker; 60 60 61 + const u64 start; 61 62 u64 addr; 62 - u64 end; 63 + const u64 end; 63 64 }; 64 65 65 66 static bool kvm_phys_is_valid(u64 phys) ··· 202 201 .old = READ_ONCE(*ptep), 203 202 .arg = data->walker->arg, 204 203 .mm_ops = mm_ops, 204 + .start = data->start, 205 205 .addr = data->addr, 206 206 .end = data->end, 207 207 .level = level, ··· 295 293 struct kvm_pgtable_walker *walker) 296 294 { 297 295 struct kvm_pgtable_walk_data walk_data = { 296 + .start = ALIGN_DOWN(addr, PAGE_SIZE), 298 297 .addr = ALIGN_DOWN(addr, PAGE_SIZE), 299 298 .end = PAGE_ALIGN(walk_data.addr + size), 300 299 .walker = walker, ··· 352 349 } 353 350 354 351 struct hyp_map_data { 355 - u64 phys; 352 + const u64 phys; 356 353 kvm_pte_t attr; 357 354 }; 358 355 ··· 410 407 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, 411 408 struct hyp_map_data *data) 412 409 { 410 + u64 phys = data->phys + (ctx->addr - ctx->start); 413 411 kvm_pte_t new; 414 - u64 granule = kvm_granule_size(ctx->level), phys = data->phys; 415 412 416 413 if (!kvm_block_mapping_supported(ctx, phys)) 417 414 return false; 418 415 419 - data->phys += granule; 420 416 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); 421 417 if (ctx->old == new) 422 418 return true; ··· 578 576 } 579 577 580 578 struct stage2_map_data { 581 - u64 phys; 579 + const u64 phys; 582 580 kvm_pte_t attr; 583 581 u8 owner_id; 584 582 ··· 796 794 return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN); 797 795 } 798 796 797 + static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx, 798 + const struct stage2_map_data *data) 799 + { 800 + u64 phys = data->phys; 801 + 802 + /* 803 + * Stage-2 walks to update ownership data are communicated to the map 804 + * walker using an invalid PA. Avoid offsetting an already invalid PA, 805 + * which could overflow and make the address valid again. 806 + */ 807 + if (!kvm_phys_is_valid(phys)) 808 + return phys; 809 + 810 + /* 811 + * Otherwise, work out the correct PA based on how far the walk has 812 + * gotten. 813 + */ 814 + return phys + (ctx->addr - ctx->start); 815 + } 816 + 799 817 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx, 800 818 struct stage2_map_data *data) 801 819 { 820 + u64 phys = stage2_map_walker_phys_addr(ctx, data); 821 + 802 822 if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1))) 803 823 return false; 804 824 805 - return kvm_block_mapping_supported(ctx, data->phys); 825 + return kvm_block_mapping_supported(ctx, phys); 806 826 } 807 827 808 828 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, 809 829 struct stage2_map_data *data) 810 830 { 811 831 kvm_pte_t new; 812 - u64 granule = kvm_granule_size(ctx->level), phys = data->phys; 832 + u64 phys = stage2_map_walker_phys_addr(ctx, data); 833 + u64 granule = kvm_granule_size(ctx->level); 813 834 struct kvm_pgtable *pgt = data->mmu->pgt; 814 835 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 815 836 ··· 866 841 867 842 stage2_make_pte(ctx, new); 868 843 869 - if (kvm_phys_is_valid(phys)) 870 - data->phys += granule; 871 844 return 0; 872 845 } 873 846
+1 -1
arch/arm64/kvm/inject_fault.c
··· 204 204 * Size Fault at level 0, as if exceeding PARange. 205 205 * 206 206 * Non-LPAE guests will only get the external abort, as there 207 - * is no way to to describe the ASF. 207 + * is no way to describe the ASF. 208 208 */ 209 209 if (vcpu_el1_is_32bit(vcpu) && 210 210 !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
+4
arch/arm64/kvm/vgic/vgic-v3.c
··· 616 616 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX), 617 617 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD), 618 618 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE), 619 + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO), 620 + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO), 621 + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX), 622 + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX), 619 623 {}, 620 624 }; 621 625
+3 -4
arch/arm64/kvm/vmid.c
··· 47 47 int cpu; 48 48 u64 vmid; 49 49 50 - bitmap_clear(vmid_map, 0, NUM_USER_VMIDS); 50 + bitmap_zero(vmid_map, NUM_USER_VMIDS); 51 51 52 52 for_each_possible_cpu(cpu) { 53 53 vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0); ··· 182 182 */ 183 183 WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus()); 184 184 atomic64_set(&vmid_generation, VMID_FIRST_VERSION); 185 - vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS), 186 - sizeof(*vmid_map), GFP_KERNEL); 185 + vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL); 187 186 if (!vmid_map) 188 187 return -ENOMEM; 189 188 ··· 191 192 192 193 void __init kvm_arm_vmid_alloc_free(void) 193 194 { 194 - kfree(vmid_map); 195 + bitmap_free(vmid_map); 195 196 }
+2
arch/x86/include/asm/vmx.h
··· 13 13 14 14 15 15 #include <linux/bitops.h> 16 + #include <linux/bug.h> 16 17 #include <linux/types.h> 18 + 17 19 #include <uapi/asm/vmx.h> 18 20 #include <asm/vmxfeatures.h> 19 21
-16
arch/x86/kvm/cpuid.c
··· 253 253 int nent) 254 254 { 255 255 struct kvm_cpuid_entry2 *best; 256 - u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent); 257 256 258 257 best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); 259 258 if (best) { ··· 290 291 cpuid_entry_change(best, X86_FEATURE_MWAIT, 291 292 vcpu->arch.ia32_misc_enable_msr & 292 293 MSR_IA32_MISC_ENABLE_MWAIT); 293 - } 294 - 295 - /* 296 - * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate 297 - * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's 298 - * requested XCR0 value. The enclave's XFRM must be a subset of XCRO 299 - * at the time of EENTER, thus adjust the allowed XFRM by the guest's 300 - * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to 301 - * '1' even on CPUs that don't support XSAVE. 302 - */ 303 - best = cpuid_entry2_find(entries, nent, 0x12, 0x1); 304 - if (best) { 305 - best->ecx &= guest_supported_xcr0 & 0xffffffff; 306 - best->edx &= guest_supported_xcr0 >> 32; 307 - best->ecx |= XFEATURE_MASK_FPSSE; 308 294 } 309 295 } 310 296
+9 -2
arch/x86/kvm/vmx/sgx.c
··· 170 170 return 1; 171 171 } 172 172 173 - /* Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. */ 173 + /* 174 + * Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. Note 175 + * that the allowed XFRM (XFeature Request Mask) isn't strictly bound 176 + * by the supported XCR0. FP+SSE *must* be set in XFRM, even if XSAVE 177 + * is unsupported, i.e. even if XCR0 itself is completely unsupported. 178 + */ 174 179 if ((u32)miscselect & ~sgx_12_0->ebx || 175 180 (u32)attributes & ~sgx_12_1->eax || 176 181 (u32)(attributes >> 32) & ~sgx_12_1->ebx || 177 182 (u32)xfrm & ~sgx_12_1->ecx || 178 - (u32)(xfrm >> 32) & ~sgx_12_1->edx) { 183 + (u32)(xfrm >> 32) & ~sgx_12_1->edx || 184 + xfrm & ~(vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE) || 185 + (xfrm & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) { 179 186 kvm_inject_gp(vcpu, 0); 180 187 return 1; 181 188 }
+5 -1
arch/x86/kvm/x86.c
··· 1446 1446 #endif 1447 1447 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 1448 1448 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, 1449 - MSR_IA32_SPEC_CTRL, 1449 + MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL, 1450 1450 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, 1451 1451 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, 1452 1452 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, ··· 7153 7153 case MSR_IA32_XFD: 7154 7154 case MSR_IA32_XFD_ERR: 7155 7155 if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) 7156 + return; 7157 + break; 7158 + case MSR_IA32_TSX_CTRL: 7159 + if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR)) 7156 7160 return; 7157 7161 break; 7158 7162 default:
+37 -22
virt/kvm/kvm_main.c
··· 3962 3962 } 3963 3963 3964 3964 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); 3965 - r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); 3966 - BUG_ON(r == -EBUSY); 3965 + r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT); 3967 3966 if (r) 3968 3967 goto unlock_vcpu_destroy; 3969 3968 3970 3969 /* Now it's all set up, let userspace reach it */ 3971 3970 kvm_get_kvm(kvm); 3972 3971 r = create_vcpu_fd(vcpu); 3973 - if (r < 0) { 3974 - xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); 3975 - kvm_put_kvm_no_destroy(kvm); 3976 - goto unlock_vcpu_destroy; 3972 + if (r < 0) 3973 + goto kvm_put_xa_release; 3974 + 3975 + if (KVM_BUG_ON(!!xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) { 3976 + r = -EINVAL; 3977 + goto kvm_put_xa_release; 3977 3978 } 3978 3979 3979 3980 /* ··· 3989 3988 kvm_create_vcpu_debugfs(vcpu); 3990 3989 return r; 3991 3990 3991 + kvm_put_xa_release: 3992 + kvm_put_kvm_no_destroy(kvm); 3993 + xa_release(&kvm->vcpu_array, vcpu->vcpu_idx); 3992 3994 unlock_vcpu_destroy: 3993 3995 mutex_unlock(&kvm->lock); 3994 3996 kvm_dirty_ring_free(&vcpu->dirty_ring); ··· 5188 5184 static int hardware_enable_all(void) 5189 5185 { 5190 5186 atomic_t failed = ATOMIC_INIT(0); 5191 - int r = 0; 5187 + int r; 5188 + 5189 + /* 5190 + * Do not enable hardware virtualization if the system is going down. 5191 + * If userspace initiated a forced reboot, e.g. reboot -f, then it's 5192 + * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling 5193 + * after kvm_reboot() is called. Note, this relies on system_state 5194 + * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops 5195 + * hook instead of registering a dedicated reboot notifier (the latter 5196 + * runs before system_state is updated). 5197 + */ 5198 + if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF || 5199 + system_state == SYSTEM_RESTART) 5200 + return -EBUSY; 5192 5201 5193 5202 /* 5194 5203 * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu() ··· 5213 5196 */ 5214 5197 cpus_read_lock(); 5215 5198 mutex_lock(&kvm_lock); 5199 + 5200 + r = 0; 5216 5201 5217 5202 kvm_usage_count++; 5218 5203 if (kvm_usage_count == 1) { ··· 5232 5213 return r; 5233 5214 } 5234 5215 5235 - static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 5236 - void *v) 5216 + static void kvm_shutdown(void) 5237 5217 { 5238 5218 /* 5239 - * Some (well, at least mine) BIOSes hang on reboot if 5240 - * in vmx root mode. 5241 - * 5242 - * And Intel TXT required VMX off for all cpu when system shutdown. 5219 + * Disable hardware virtualization and set kvm_rebooting to indicate 5220 + * that KVM has asynchronously disabled hardware virtualization, i.e. 5221 + * that relevant errors and exceptions aren't entirely unexpected. 5222 + * Some flavors of hardware virtualization need to be disabled before 5223 + * transferring control to firmware (to perform shutdown/reboot), e.g. 5224 + * on x86, virtualization can block INIT interrupts, which are used by 5225 + * firmware to pull APs back under firmware control. Note, this path 5226 + * is used for both shutdown and reboot scenarios, i.e. neither name is 5227 + * 100% comprehensive. 5243 5228 */ 5244 5229 pr_info("kvm: exiting hardware virtualization\n"); 5245 5230 kvm_rebooting = true; 5246 5231 on_each_cpu(hardware_disable_nolock, NULL, 1); 5247 - return NOTIFY_OK; 5248 5232 } 5249 - 5250 - static struct notifier_block kvm_reboot_notifier = { 5251 - .notifier_call = kvm_reboot, 5252 - .priority = 0, 5253 - }; 5254 5233 5255 5234 static int kvm_suspend(void) 5256 5235 { ··· 5280 5263 static struct syscore_ops kvm_syscore_ops = { 5281 5264 .suspend = kvm_suspend, 5282 5265 .resume = kvm_resume, 5266 + .shutdown = kvm_shutdown, 5283 5267 }; 5284 5268 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ 5285 5269 static int hardware_enable_all(void) ··· 5985 5967 if (r) 5986 5968 return r; 5987 5969 5988 - register_reboot_notifier(&kvm_reboot_notifier); 5989 5970 register_syscore_ops(&kvm_syscore_ops); 5990 5971 #endif 5991 5972 ··· 6056 6039 err_vcpu_cache: 6057 6040 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 6058 6041 unregister_syscore_ops(&kvm_syscore_ops); 6059 - unregister_reboot_notifier(&kvm_reboot_notifier); 6060 6042 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE); 6061 6043 #endif 6062 6044 return r; ··· 6081 6065 kvm_async_pf_deinit(); 6082 6066 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 6083 6067 unregister_syscore_ops(&kvm_syscore_ops); 6084 - unregister_reboot_notifier(&kvm_reboot_notifier); 6085 6068 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE); 6086 6069 #endif 6087 6070 kvm_irqfd_exit();