KVM: ia64: Fix halt emulation logic

Common halt logic was changed by x86 and did not update ia64. This patch
updates halt for ia64.

Fixes a regression causing guests to hang with more than 2 vcpus.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>

authored by Xiantao Zhang and committed by Avi Kivity decc9016 5550af4d

+48 -42
+2 -1
arch/ia64/include/asm/kvm_host.h
··· 365 365 long itc_offset; 366 366 unsigned long itc_check; 367 367 unsigned long timer_check; 368 - unsigned long timer_pending; 368 + unsigned int timer_pending; 369 + unsigned int timer_fired; 369 370 370 371 unsigned long vrr[8]; 371 372 unsigned long ibr[8];
+38 -38
arch/ia64/kvm/kvm-ia64.c
··· 385 385 struct kvm *kvm = vcpu->kvm; 386 386 struct call_data call_data; 387 387 int i; 388 + 388 389 call_data.ptc_g_data = p->u.ptc_g_data; 389 390 390 391 for (i = 0; i < KVM_MAX_VCPUS; i++) { ··· 419 418 ktime_t kt; 420 419 long itc_diff; 421 420 unsigned long vcpu_now_itc; 422 - 423 421 unsigned long expires; 424 422 struct hrtimer *p_ht = &vcpu->arch.hlt_timer; 425 423 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; 426 424 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); 427 425 428 - vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; 429 - 430 - if (time_after(vcpu_now_itc, vpd->itm)) { 431 - vcpu->arch.timer_check = 1; 432 - return 1; 433 - } 434 - itc_diff = vpd->itm - vcpu_now_itc; 435 - if (itc_diff < 0) 436 - itc_diff = -itc_diff; 437 - 438 - expires = div64_u64(itc_diff, cyc_per_usec); 439 - kt = ktime_set(0, 1000 * expires); 440 - vcpu->arch.ht_active = 1; 441 - hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); 442 - 443 426 if (irqchip_in_kernel(vcpu->kvm)) { 427 + 428 + vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; 429 + 430 + if (time_after(vcpu_now_itc, vpd->itm)) { 431 + vcpu->arch.timer_check = 1; 432 + return 1; 433 + } 434 + itc_diff = vpd->itm - vcpu_now_itc; 435 + if (itc_diff < 0) 436 + itc_diff = -itc_diff; 437 + 438 + expires = div64_u64(itc_diff, cyc_per_usec); 439 + kt = ktime_set(0, 1000 * expires); 440 + 441 + down_read(&vcpu->kvm->slots_lock); 442 + vcpu->arch.ht_active = 1; 443 + hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); 444 + 444 445 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 445 446 kvm_vcpu_block(vcpu); 446 447 hrtimer_cancel(p_ht); 447 448 vcpu->arch.ht_active = 0; 449 + 450 + if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) 451 + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 452 + vcpu->arch.mp_state = 453 + KVM_MP_STATE_RUNNABLE; 454 + up_read(&vcpu->kvm->slots_lock); 448 455 449 456 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) 450 457 return -EINTR; ··· 492 483 493 484 static const int kvm_vti_max_exit_handlers = 494 485 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); 495 - 496 - static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu) 497 - { 498 - } 499 486 500 487 static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) 501 488 { ··· 605 600 606 601 again: 607 602 preempt_disable(); 608 - 609 - kvm_prepare_guest_switch(vcpu); 610 603 local_irq_disable(); 611 604 612 605 if (signal_pending(current)) { ··· 617 614 618 615 vcpu->guest_mode = 1; 619 616 kvm_guest_enter(); 620 - 617 + down_read(&vcpu->kvm->slots_lock); 621 618 r = vti_vcpu_run(vcpu, kvm_run); 622 619 if (r < 0) { 623 620 local_irq_enable(); ··· 637 634 * But we need to prevent reordering, hence this barrier(): 638 635 */ 639 636 barrier(); 640 - 641 637 kvm_guest_exit(); 642 - 638 + up_read(&vcpu->kvm->slots_lock); 643 639 preempt_enable(); 644 640 645 641 r = kvm_handle_exit(kvm_run, vcpu); ··· 675 673 676 674 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 677 675 kvm_vcpu_block(vcpu); 676 + clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 678 677 vcpu_put(vcpu); 679 678 return -EAGAIN; 680 679 } ··· 1128 1125 wait_queue_head_t *q; 1129 1126 1130 1127 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); 1128 + q = &vcpu->wq; 1129 + 1131 1130 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) 1132 1131 goto out; 1133 1132 1134 - q = &vcpu->wq; 1135 - if (waitqueue_active(q)) { 1136 - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 1133 + if (waitqueue_active(q)) 1137 1134 wake_up_interruptible(q); 1138 - } 1135 + 1139 1136 out: 1137 + vcpu->arch.timer_fired = 1; 1140 1138 vcpu->arch.timer_check = 1; 1141 1139 return HRTIMER_NORESTART; 1142 1140 } ··· 1706 1702 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 1707 1703 { 1708 1704 int ipi_pcpu = vcpu->cpu; 1705 + int cpu = get_cpu(); 1709 1706 1710 1707 if (waitqueue_active(&vcpu->wq)) 1711 1708 wake_up_interruptible(&vcpu->wq); 1712 1709 1713 - if (vcpu->guest_mode) 1710 + if (vcpu->guest_mode && cpu != ipi_pcpu) 1714 1711 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); 1712 + put_cpu(); 1715 1713 } 1716 1714 1717 1715 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) ··· 1723 1717 1724 1718 if (!test_and_set_bit(vec, &vpd->irr[0])) { 1725 1719 vcpu->arch.irq_new_pending = 1; 1726 - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) 1727 - kvm_vcpu_kick(vcpu); 1728 - else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { 1729 - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 1730 - if (waitqueue_active(&vcpu->wq)) 1731 - wake_up_interruptible(&vcpu->wq); 1732 - } 1720 + kvm_vcpu_kick(vcpu); 1733 1721 return 1; 1734 1722 } 1735 1723 return 0; ··· 1793 1793 1794 1794 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 1795 1795 { 1796 - return 0; 1796 + return vcpu->arch.timer_fired; 1797 1797 } 1798 1798 1799 1799 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+7 -2
arch/ia64/kvm/kvm_fw.c
··· 286 286 return index; 287 287 } 288 288 289 + static void prepare_for_halt(struct kvm_vcpu *vcpu) 290 + { 291 + vcpu->arch.timer_pending = 1; 292 + vcpu->arch.timer_fired = 0; 293 + } 294 + 289 295 int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) 290 296 { 291 297 ··· 310 304 break; 311 305 case PAL_HALT_LIGHT: 312 306 { 313 - vcpu->arch.timer_pending = 1; 314 307 INIT_PAL_STATUS_SUCCESS(result); 308 + prepare_for_halt(vcpu); 315 309 if (kvm_highest_pending_irq(vcpu) == -1) 316 310 ret = kvm_emulate_halt(vcpu); 317 - 318 311 } 319 312 break; 320 313
+1 -1
arch/ia64/kvm/process.c
··· 713 713 if (!(VCPU(v, itv) & (1 << 16))) { 714 714 vcpu_pend_interrupt(v, VCPU(v, itv) 715 715 & 0xff); 716 - VMX(v, itc_check) = 0; 716 + VMX(v, itc_check) = 0; 717 717 } else { 718 718 v->arch.timer_pending = 1; 719 719 }