Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Radim Krčmář:
"PPC:

- Close a hole which could possibly lead to the host timebase getting
out of sync.

- Three fixes relating to PTEs and TLB entries for radix guests.

- Fix a bug which could lead to an interrupt never getting delivered
to the guest, if it is pending for a guest vCPU when the vCPU gets
offlined.

s390:

- Fix false negatives in VSIE validity check (Cc stable)

x86:

- Fix time drift of VMX preemption timer when a guest uses LAPIC
timer in periodic mode (Cc stable)

- Unconditionally expose CPUID.IA32_ARCH_CAPABILITIES to allow
migration from hosts that don't need retpoline mitigation (Cc
stable)

- Fix guest crashes on reboot by properly coupling CR4.OSXSAVE and
CPUID.OSXSAVE (Cc stable)

- Report correct RIP after Hyper-V hypercall #UD (introduced in
-rc6)"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: x86: fix #UD address of failed Hyper-V hypercalls
kvm: x86: IA32_ARCH_CAPABILITIES is always supported
KVM: x86: Update cpuid properly when CR4.OSXAVE or CR4.PKE is changed
x86/kvm: fix LAPIC timer drift when guest uses periodic mode
KVM: s390: vsie: fix < 8k check for the itdba
KVM: PPC: Book 3S HV: Do ptesync in radix guest exit path
KVM: PPC: Book3S HV: XIVE: Resend re-routed interrupts on CPU priority change
KVM: PPC: Book3S HV: Make radix clear pte when unmapping
KVM: PPC: Book3S HV: Make radix use correct tlbie sequence in kvmppc_radix_tlbie_page
KVM: PPC: Book3S HV: Snapshot timebase offset on guest entry

+198 -75
+1
arch/powerpc/include/asm/kvm_book3s.h
··· 96 96 struct kvm_vcpu *runner; 97 97 struct kvm *kvm; 98 98 u64 tb_offset; /* guest timebase - host timebase */ 99 + u64 tb_offset_applied; /* timebase offset currently in force */ 99 100 ulong lpcr; 100 101 u32 arch_compat; 101 102 ulong pcr;
+1
arch/powerpc/kernel/asm-offsets.c
··· 562 562 OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); 563 563 OFFSET(VCORE_KVM, kvmppc_vcore, kvm); 564 564 OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); 565 + OFFSET(VCORE_TB_OFFSET_APPL, kvmppc_vcore, tb_offset_applied); 565 566 OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); 566 567 OFFSET(VCORE_PCR, kvmppc_vcore, pcr); 567 568 OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes);
+3 -3
arch/powerpc/kvm/book3s_64_mmu_radix.c
··· 162 162 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) 163 163 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) 164 164 : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); 165 - asm volatile("ptesync": : :"memory"); 165 + asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); 166 166 } 167 167 168 168 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) ··· 173 173 /* RIC=1 PRS=0 R=1 IS=2 */ 174 174 asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) 175 175 : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); 176 - asm volatile("ptesync": : :"memory"); 176 + asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); 177 177 } 178 178 179 179 unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, ··· 584 584 585 585 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); 586 586 if (ptep && pte_present(*ptep)) { 587 - old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0, 587 + old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0, 588 588 gpa, shift); 589 589 kvmppc_radix_tlbie_page(kvm, gpa, shift); 590 590 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
+1
arch/powerpc/kvm/book3s_hv.c
··· 2441 2441 vc->in_guest = 0; 2442 2442 vc->napping_threads = 0; 2443 2443 vc->conferring_threads = 0; 2444 + vc->tb_offset_applied = 0; 2444 2445 } 2445 2446 2446 2447 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
+52 -45
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 692 692 22: ld r8,VCORE_TB_OFFSET(r5) 693 693 cmpdi r8,0 694 694 beq 37f 695 + std r8, VCORE_TB_OFFSET_APPL(r5) 695 696 mftb r6 /* current host timebase */ 696 697 add r8,r8,r6 697 698 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ ··· 941 940 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 942 941 8: 943 942 944 - /* 945 - * Set the decrementer to the guest decrementer. 946 - */ 947 - ld r8,VCPU_DEC_EXPIRES(r4) 948 - /* r8 is a host timebase value here, convert to guest TB */ 949 - ld r5,HSTATE_KVM_VCORE(r13) 950 - ld r6,VCORE_TB_OFFSET(r5) 951 - add r8,r8,r6 952 - mftb r7 953 - subf r3,r7,r8 954 - mtspr SPRN_DEC,r3 955 - 956 943 ld r5, VCPU_SPRG0(r4) 957 944 ld r6, VCPU_SPRG1(r4) 958 945 ld r7, VCPU_SPRG2(r4) ··· 993 1004 ld r8,VCORE_LPCR(r5) 994 1005 mtspr SPRN_LPCR,r8 995 1006 isync 1007 + 1008 + /* 1009 + * Set the decrementer to the guest decrementer. 1010 + */ 1011 + ld r8,VCPU_DEC_EXPIRES(r4) 1012 + /* r8 is a host timebase value here, convert to guest TB */ 1013 + ld r5,HSTATE_KVM_VCORE(r13) 1014 + ld r6,VCORE_TB_OFFSET_APPL(r5) 1015 + add r8,r8,r6 1016 + mftb r7 1017 + subf r3,r7,r8 1018 + mtspr SPRN_DEC,r3 996 1019 997 1020 /* Check if HDEC expires soon */ 998 1021 mfspr r3, SPRN_HDEC ··· 1598 1597 1599 1598 guest_bypass: 1600 1599 stw r12, STACK_SLOT_TRAP(r1) 1601 - mr r3, r12 1600 + 1601 + /* Save DEC */ 1602 + /* Do this before kvmhv_commence_exit so we know TB is guest TB */ 1603 + ld r3, HSTATE_KVM_VCORE(r13) 1604 + mfspr r5,SPRN_DEC 1605 + mftb r6 1606 + /* On P9, if the guest has large decr enabled, don't sign extend */ 1607 + BEGIN_FTR_SECTION 1608 + ld r4, VCORE_LPCR(r3) 1609 + andis. r4, r4, LPCR_LD@h 1610 + bne 16f 1611 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1612 + extsw r5,r5 1613 + 16: add r5,r5,r6 1614 + /* r5 is a guest timebase value here, convert to host TB */ 1615 + ld r4,VCORE_TB_OFFSET_APPL(r3) 1616 + subf r5,r4,r5 1617 + std r5,VCPU_DEC_EXPIRES(r9) 1618 + 1602 1619 /* Increment exit count, poke other threads to exit */ 1620 + mr r3, r12 1603 1621 bl kvmhv_commence_exit 1604 1622 nop 1605 1623 ld r9, HSTATE_KVM_VCPU(r13) ··· 1658 1638 add r4,r4,r6 1659 1639 mtspr SPRN_PURR,r3 1660 1640 mtspr SPRN_SPURR,r4 1661 - 1662 - /* Save DEC */ 1663 - ld r3, HSTATE_KVM_VCORE(r13) 1664 - mfspr r5,SPRN_DEC 1665 - mftb r6 1666 - /* On P9, if the guest has large decr enabled, don't sign extend */ 1667 - BEGIN_FTR_SECTION 1668 - ld r4, VCORE_LPCR(r3) 1669 - andis. r4, r4, LPCR_LD@h 1670 - bne 16f 1671 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1672 - extsw r5,r5 1673 - 16: add r5,r5,r6 1674 - /* r5 is a guest timebase value here, convert to host TB */ 1675 - ld r4,VCORE_TB_OFFSET(r3) 1676 - subf r5,r4,r5 1677 - std r5,VCPU_DEC_EXPIRES(r9) 1678 1641 1679 1642 BEGIN_FTR_SECTION 1680 1643 b 8f ··· 1908 1905 cmpwi cr2, r0, 0 1909 1906 beq cr2, 4f 1910 1907 1908 + /* 1909 + * Radix: do eieio; tlbsync; ptesync sequence in case we 1910 + * interrupted the guest between a tlbie and a ptesync. 1911 + */ 1912 + eieio 1913 + tlbsync 1914 + ptesync 1915 + 1911 1916 /* Radix: Handle the case where the guest used an illegal PID */ 1912 1917 LOAD_REG_ADDR(r4, mmu_base_pid) 1913 1918 lwz r3, VCPU_GUEST_PID(r9) ··· 2028 2017 2029 2018 27: 2030 2019 /* Subtract timebase offset from timebase */ 2031 - ld r8,VCORE_TB_OFFSET(r5) 2020 + ld r8, VCORE_TB_OFFSET_APPL(r5) 2032 2021 cmpdi r8,0 2033 2022 beq 17f 2023 + li r0, 0 2024 + std r0, VCORE_TB_OFFSET_APPL(r5) 2034 2025 mftb r6 /* current guest timebase */ 2035 2026 subf r8,r8,r6 2036 2027 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ ··· 2713 2700 add r3, r3, r5 2714 2701 ld r4, HSTATE_KVM_VCPU(r13) 2715 2702 ld r5, HSTATE_KVM_VCORE(r13) 2716 - ld r6, VCORE_TB_OFFSET(r5) 2703 + ld r6, VCORE_TB_OFFSET_APPL(r5) 2717 2704 subf r3, r6, r3 /* convert to host TB value */ 2718 2705 std r3, VCPU_DEC_EXPIRES(r4) 2719 2706 ··· 2812 2799 /* Restore guest decrementer */ 2813 2800 ld r3, VCPU_DEC_EXPIRES(r4) 2814 2801 ld r5, HSTATE_KVM_VCORE(r13) 2815 - ld r6, VCORE_TB_OFFSET(r5) 2802 + ld r6, VCORE_TB_OFFSET_APPL(r5) 2816 2803 add r3, r3, r6 /* convert host TB to guest TB value */ 2817 2804 mftb r7 2818 2805 subf r3, r7, r3 ··· 3619 3606 */ 3620 3607 kvmhv_start_timing: 3621 3608 ld r5, HSTATE_KVM_VCORE(r13) 3622 - lbz r6, VCORE_IN_GUEST(r5) 3623 - cmpwi r6, 0 3624 - beq 5f /* if in guest, need to */ 3625 - ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ 3626 - 5: mftb r5 3627 - subf r5, r6, r5 3609 + ld r6, VCORE_TB_OFFSET_APPL(r5) 3610 + mftb r5 3611 + subf r5, r6, r5 /* subtract current timebase offset */ 3628 3612 std r3, VCPU_CUR_ACTIVITY(r4) 3629 3613 std r5, VCPU_ACTIVITY_START(r4) 3630 3614 blr ··· 3632 3622 */ 3633 3623 kvmhv_accumulate_time: 3634 3624 ld r5, HSTATE_KVM_VCORE(r13) 3635 - lbz r8, VCORE_IN_GUEST(r5) 3636 - cmpwi r8, 0 3637 - beq 4f /* if in guest, need to */ 3638 - ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ 3639 - 4: ld r5, VCPU_CUR_ACTIVITY(r4) 3625 + ld r8, VCORE_TB_OFFSET_APPL(r5) 3626 + ld r5, VCPU_CUR_ACTIVITY(r4) 3640 3627 ld r6, VCPU_ACTIVITY_START(r4) 3641 3628 std r3, VCPU_CUR_ACTIVITY(r4) 3642 3629 mftb r7 3643 - subf r7, r8, r7 3630 + subf r7, r8, r7 /* subtract current timebase offset */ 3644 3631 std r7, VCPU_ACTIVITY_START(r4) 3645 3632 cmpdi r5, 0 3646 3633 beqlr
+101 -7
arch/powerpc/kvm/book3s_xive_template.c
··· 11 11 #define XGLUE(a,b) a##b 12 12 #define GLUE(a,b) XGLUE(a,b) 13 13 14 + /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */ 15 + #define XICS_DUMMY 1 16 + 14 17 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) 15 18 { 16 19 u8 cppr; ··· 208 205 goto skip_ipi; 209 206 } 210 207 208 + /* If it's the dummy interrupt, continue searching */ 209 + if (hirq == XICS_DUMMY) 210 + goto skip_ipi; 211 + 211 212 /* If fetching, update queue pointers */ 212 213 if (scan_type == scan_fetch) { 213 214 q->idx = idx; ··· 392 385 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); 393 386 } 394 387 388 + static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive, 389 + struct kvmppc_xive_vcpu *xc) 390 + { 391 + unsigned int prio; 392 + 393 + /* For each priority that is now masked */ 394 + for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { 395 + struct xive_q *q = &xc->queues[prio]; 396 + struct kvmppc_xive_irq_state *state; 397 + struct kvmppc_xive_src_block *sb; 398 + u32 idx, toggle, entry, irq, hw_num; 399 + struct xive_irq_data *xd; 400 + __be32 *qpage; 401 + u16 src; 402 + 403 + idx = q->idx; 404 + toggle = q->toggle; 405 + qpage = READ_ONCE(q->qpage); 406 + if (!qpage) 407 + continue; 408 + 409 + /* For each interrupt in the queue */ 410 + for (;;) { 411 + entry = be32_to_cpup(qpage + idx); 412 + 413 + /* No more ? */ 414 + if ((entry >> 31) == toggle) 415 + break; 416 + irq = entry & 0x7fffffff; 417 + 418 + /* Skip dummies and IPIs */ 419 + if (irq == XICS_DUMMY || irq == XICS_IPI) 420 + goto next; 421 + sb = kvmppc_xive_find_source(xive, irq, &src); 422 + if (!sb) 423 + goto next; 424 + state = &sb->irq_state[src]; 425 + 426 + /* Has it been rerouted ? */ 427 + if (xc->server_num == state->act_server) 428 + goto next; 429 + 430 + /* 431 + * Allright, it *has* been re-routed, kill it from 432 + * the queue. 433 + */ 434 + qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); 435 + 436 + /* Find the HW interrupt */ 437 + kvmppc_xive_select_irq(state, &hw_num, &xd); 438 + 439 + /* If it's not an LSI, set PQ to 11 the EOI will force a resend */ 440 + if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) 441 + GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11); 442 + 443 + /* EOI the source */ 444 + GLUE(X_PFX,source_eoi)(hw_num, xd); 445 + 446 + next: 447 + idx = (idx + 1) & q->msk; 448 + if (idx == 0) 449 + toggle ^= 1; 450 + } 451 + } 452 + } 453 + 395 454 X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) 396 455 { 397 456 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 457 + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; 398 458 u8 old_cppr; 399 459 400 460 pr_devel("H_CPPR(cppr=%ld)\n", cppr); ··· 481 407 */ 482 408 smp_mb(); 483 409 484 - /* 485 - * We are masking less, we need to look for pending things 486 - * to deliver and set VP pending bits accordingly to trigger 487 - * a new interrupt otherwise we might miss MFRR changes for 488 - * which we have optimized out sending an IPI signal. 489 - */ 490 - if (cppr > old_cppr) 410 + if (cppr > old_cppr) { 411 + /* 412 + * We are masking less, we need to look for pending things 413 + * to deliver and set VP pending bits accordingly to trigger 414 + * a new interrupt otherwise we might miss MFRR changes for 415 + * which we have optimized out sending an IPI signal. 416 + */ 491 417 GLUE(X_PFX,push_pending_to_hw)(xc); 418 + } else { 419 + /* 420 + * We are masking more, we need to check the queue for any 421 + * interrupt that has been routed to another CPU, take 422 + * it out (replace it with the dummy) and retrigger it. 423 + * 424 + * This is necessary since those interrupts may otherwise 425 + * never be processed, at least not until this CPU restores 426 + * its CPPR. 427 + * 428 + * This is in theory racy vs. HW adding new interrupts to 429 + * the queue. In practice this works because the interesting 430 + * cases are when the guest has done a set_xive() to move the 431 + * interrupt away, which flushes the xive, followed by the 432 + * target CPU doing a H_CPPR. So any new interrupt coming into 433 + * the queue must still be routed to us and isn't a source 434 + * of concern. 435 + */ 436 + GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc); 437 + } 492 438 493 439 /* Apply new CPPR */ 494 440 xc->hw_cppr = cppr;
+1 -1
arch/s390/kvm/vsie.c
··· 578 578 579 579 gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; 580 580 if (gpa && (scb_s->ecb & ECB_TE)) { 581 - if (!(gpa & ~0x1fffU)) { 581 + if (!(gpa & ~0x1fffUL)) { 582 582 rc = set_validity_icpt(scb_s, 0x0080U); 583 583 goto unpin; 584 584 }
+5
arch/x86/kvm/cpuid.c
··· 495 495 entry->ecx &= ~F(PKU); 496 496 entry->edx &= kvm_cpuid_7_0_edx_x86_features; 497 497 cpuid_mask(&entry->edx, CPUID_7_EDX); 498 + /* 499 + * We emulate ARCH_CAPABILITIES in software even 500 + * if the host doesn't support it. 501 + */ 502 + entry->edx |= F(ARCH_CAPABILITIES); 498 503 } else { 499 504 entry->ebx = 0; 500 505 entry->ecx = 0;
+11 -8
arch/x86/kvm/hyperv.c
··· 1260 1260 } 1261 1261 } 1262 1262 1263 + static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) 1264 + { 1265 + kvm_hv_hypercall_set_result(vcpu, result); 1266 + ++vcpu->stat.hypercalls; 1267 + return kvm_skip_emulated_instruction(vcpu); 1268 + } 1269 + 1263 1270 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) 1264 1271 { 1265 - struct kvm_run *run = vcpu->run; 1266 - 1267 - kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); 1268 - return kvm_skip_emulated_instruction(vcpu); 1272 + return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); 1269 1273 } 1270 1274 1271 1275 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) ··· 1354 1350 /* Hypercall continuation is not supported yet */ 1355 1351 if (rep_cnt || rep_idx) { 1356 1352 ret = HV_STATUS_INVALID_HYPERCALL_CODE; 1357 - goto set_result; 1353 + goto out; 1358 1354 } 1359 1355 1360 1356 switch (code) { ··· 1385 1381 break; 1386 1382 } 1387 1383 1388 - set_result: 1389 - kvm_hv_hypercall_set_result(vcpu, ret); 1390 - return 1; 1384 + out: 1385 + return kvm_hv_hypercall_complete(vcpu, ret); 1391 1386 } 1392 1387 1393 1388 void kvm_hv_init_vm(struct kvm *kvm)
+14 -2
arch/x86/kvm/lapic.c
··· 1522 1522 1523 1523 static void advance_periodic_target_expiration(struct kvm_lapic *apic) 1524 1524 { 1525 - apic->lapic_timer.tscdeadline += 1526 - nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); 1525 + ktime_t now = ktime_get(); 1526 + u64 tscl = rdtsc(); 1527 + ktime_t delta; 1528 + 1529 + /* 1530 + * Synchronize both deadlines to the same time source or 1531 + * differences in the periods (caused by differences in the 1532 + * underlying clocks or numerical approximation errors) will 1533 + * cause the two to drift apart over time as the errors 1534 + * accumulate. 1535 + */ 1527 1536 apic->lapic_timer.target_expiration = 1528 1537 ktime_add_ns(apic->lapic_timer.target_expiration, 1529 1538 apic->lapic_timer.period); 1539 + delta = ktime_sub(apic->lapic_timer.target_expiration, now); 1540 + apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + 1541 + nsec_to_cycles(apic->vcpu, delta); 1530 1542 } 1531 1543 1532 1544 static void start_sw_period(struct kvm_lapic *apic)
+8 -9
arch/x86/kvm/x86.c
··· 6671 6671 unsigned long nr, a0, a1, a2, a3, ret; 6672 6672 int op_64_bit; 6673 6673 6674 - if (kvm_hv_hypercall_enabled(vcpu->kvm)) { 6675 - if (!kvm_hv_hypercall(vcpu)) 6676 - return 0; 6677 - goto out; 6678 - } 6674 + if (kvm_hv_hypercall_enabled(vcpu->kvm)) 6675 + return kvm_hv_hypercall(vcpu); 6679 6676 6680 6677 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); 6681 6678 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); ··· 6693 6696 6694 6697 if (kvm_x86_ops->get_cpl(vcpu) != 0) { 6695 6698 ret = -KVM_EPERM; 6696 - goto out_error; 6699 + goto out; 6697 6700 } 6698 6701 6699 6702 switch (nr) { ··· 6713 6716 ret = -KVM_ENOSYS; 6714 6717 break; 6715 6718 } 6716 - out_error: 6719 + out: 6717 6720 if (!op_64_bit) 6718 6721 ret = (u32)ret; 6719 6722 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 6720 6723 6721 - out: 6722 6724 ++vcpu->stat.hypercalls; 6723 6725 return kvm_skip_emulated_instruction(vcpu); 6724 6726 } ··· 7976 7980 { 7977 7981 struct msr_data apic_base_msr; 7978 7982 int mmu_reset_needed = 0; 7983 + int cpuid_update_needed = 0; 7979 7984 int pending_vec, max_bits, idx; 7980 7985 struct desc_ptr dt; 7981 7986 int ret = -EINVAL; ··· 8015 8018 vcpu->arch.cr0 = sregs->cr0; 8016 8019 8017 8020 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 8021 + cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & 8022 + (X86_CR4_OSXSAVE | X86_CR4_PKE)); 8018 8023 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 8019 - if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE)) 8024 + if (cpuid_update_needed) 8020 8025 kvm_update_cpuid(vcpu); 8021 8026 8022 8027 idx = srcu_read_lock(&vcpu->kvm->srcu);