Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
KVM: ia64: Makefile fix for forcing to re-generate asm-offsets.h
KVM: Future-proof device assignment ABI
KVM: ia64: Fix halt emulation logic
KVM: Fix guest shared interrupt with in-kernel irqchip
KVM: MMU: sync root on paravirt TLB flush

+140 -58
+5 -1
arch/ia64/include/asm/kvm_host.h
··· 365 long itc_offset; 366 unsigned long itc_check; 367 unsigned long timer_check; 368 - unsigned long timer_pending; 369 370 unsigned long vrr[8]; 371 unsigned long ibr[8]; ··· 418 struct list_head assigned_dev_head; 419 struct dmar_domain *intel_iommu_domain; 420 struct hlist_head irq_ack_notifier_list; 421 }; 422 423 union cpuid3_t {
··· 365 long itc_offset; 366 unsigned long itc_check; 367 unsigned long timer_check; 368 + unsigned int timer_pending; 369 + unsigned int timer_fired; 370 371 unsigned long vrr[8]; 372 unsigned long ibr[8]; ··· 417 struct list_head assigned_dev_head; 418 struct dmar_domain *intel_iommu_domain; 419 struct hlist_head irq_ack_notifier_list; 420 + 421 + unsigned long irq_sources_bitmap; 422 + unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; 423 }; 424 425 union cpuid3_t {
+6 -2
arch/ia64/kvm/Makefile
··· 29 echo ""; \ 30 echo "#endif" ) > $@ 31 endef 32 # We use internal rules to avoid the "is up to date" message from make 33 - arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c 34 $(call if_changed_dep,cc_s_c) 35 36 $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s 37 $(call cmd,offsets) 38 39 # 40 # Makefile for Kernel-based Virtual Machine module ··· 58 kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o 59 obj-$(CONFIG_KVM) += kvm.o 60 61 - FORCE : $(obj)/$(offsets-file) 62 EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 63 kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ 64 vtlb.o process.o
··· 29 echo ""; \ 30 echo "#endif" ) > $@ 31 endef 32 + 33 # We use internal rules to avoid the "is up to date" message from make 34 + arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \ 35 + $(wildcard $(srctree)/arch/ia64/include/asm/*.h)\ 36 + $(wildcard $(srctree)/include/linux/*.h) 37 $(call if_changed_dep,cc_s_c) 38 39 $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s 40 $(call cmd,offsets) 41 + 42 + FORCE : $(obj)/$(offsets-file) 43 44 # 45 # Makefile for Kernel-based Virtual Machine module ··· 53 kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o 54 obj-$(CONFIG_KVM) += kvm.o 55 56 EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 57 kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ 58 vtlb.o process.o
+43 -41
arch/ia64/kvm/kvm-ia64.c
··· 385 struct kvm *kvm = vcpu->kvm; 386 struct call_data call_data; 387 int i; 388 call_data.ptc_g_data = p->u.ptc_g_data; 389 390 for (i = 0; i < KVM_MAX_VCPUS; i++) { ··· 419 ktime_t kt; 420 long itc_diff; 421 unsigned long vcpu_now_itc; 422 - 423 unsigned long expires; 424 struct hrtimer *p_ht = &vcpu->arch.hlt_timer; 425 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; 426 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); 427 428 - vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; 429 - 430 - if (time_after(vcpu_now_itc, vpd->itm)) { 431 - vcpu->arch.timer_check = 1; 432 - return 1; 433 - } 434 - itc_diff = vpd->itm - vcpu_now_itc; 435 - if (itc_diff < 0) 436 - itc_diff = -itc_diff; 437 - 438 - expires = div64_u64(itc_diff, cyc_per_usec); 439 - kt = ktime_set(0, 1000 * expires); 440 - vcpu->arch.ht_active = 1; 441 - hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); 442 - 443 if (irqchip_in_kernel(vcpu->kvm)) { 444 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 445 kvm_vcpu_block(vcpu); 446 hrtimer_cancel(p_ht); 447 vcpu->arch.ht_active = 0; 448 449 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) 450 return -EINTR; ··· 492 493 static const int kvm_vti_max_exit_handlers = 494 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); 495 - 496 - static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu) 497 - { 498 - } 499 500 static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) 501 { ··· 605 606 again: 607 preempt_disable(); 608 - 609 - kvm_prepare_guest_switch(vcpu); 610 local_irq_disable(); 611 612 if (signal_pending(current)) { ··· 617 618 vcpu->guest_mode = 1; 619 kvm_guest_enter(); 620 - 621 r = vti_vcpu_run(vcpu, kvm_run); 622 if (r < 0) { 623 local_irq_enable(); ··· 637 * But we need to prevent reordering, hence this barrier(): 638 */ 639 barrier(); 640 - 641 kvm_guest_exit(); 642 - 643 preempt_enable(); 644 645 r = kvm_handle_exit(kvm_run, vcpu); ··· 675 676 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 677 kvm_vcpu_block(vcpu); 678 vcpu_put(vcpu); 679 return -EAGAIN; 680 } ··· 781 kvm_build_io_pmt(kvm); 782 783 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 784 } 785 786 struct kvm *kvm_arch_create_vm(void) ··· 947 goto out; 948 if (irqchip_in_kernel(kvm)) { 949 mutex_lock(&kvm->lock); 950 - kvm_ioapic_set_irq(kvm->arch.vioapic, 951 - irq_event.irq, 952 - irq_event.level); 953 mutex_unlock(&kvm->lock); 954 r = 0; 955 } ··· 1128 wait_queue_head_t *q; 1129 1130 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); 1131 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) 1132 goto out; 1133 1134 - q = &vcpu->wq; 1135 - if (waitqueue_active(q)) { 1136 - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 1137 wake_up_interruptible(q); 1138 - } 1139 out: 1140 vcpu->arch.timer_check = 1; 1141 return HRTIMER_NORESTART; 1142 } ··· 1706 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 1707 { 1708 int ipi_pcpu = vcpu->cpu; 1709 1710 if (waitqueue_active(&vcpu->wq)) 1711 wake_up_interruptible(&vcpu->wq); 1712 1713 - if (vcpu->guest_mode) 1714 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); 1715 } 1716 1717 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) ··· 1723 1724 if (!test_and_set_bit(vec, &vpd->irr[0])) { 1725 vcpu->arch.irq_new_pending = 1; 1726 - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) 1727 - kvm_vcpu_kick(vcpu); 1728 - else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { 1729 - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 1730 - if (waitqueue_active(&vcpu->wq)) 1731 - wake_up_interruptible(&vcpu->wq); 1732 - } 1733 return 1; 1734 } 1735 return 0; ··· 1793 1794 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 1795 { 1796 - return 0; 1797 } 1798 1799 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
··· 385 struct kvm *kvm = vcpu->kvm; 386 struct call_data call_data; 387 int i; 388 + 389 call_data.ptc_g_data = p->u.ptc_g_data; 390 391 for (i = 0; i < KVM_MAX_VCPUS; i++) { ··· 418 ktime_t kt; 419 long itc_diff; 420 unsigned long vcpu_now_itc; 421 unsigned long expires; 422 struct hrtimer *p_ht = &vcpu->arch.hlt_timer; 423 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; 424 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); 425 426 if (irqchip_in_kernel(vcpu->kvm)) { 427 + 428 + vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; 429 + 430 + if (time_after(vcpu_now_itc, vpd->itm)) { 431 + vcpu->arch.timer_check = 1; 432 + return 1; 433 + } 434 + itc_diff = vpd->itm - vcpu_now_itc; 435 + if (itc_diff < 0) 436 + itc_diff = -itc_diff; 437 + 438 + expires = div64_u64(itc_diff, cyc_per_usec); 439 + kt = ktime_set(0, 1000 * expires); 440 + 441 + down_read(&vcpu->kvm->slots_lock); 442 + vcpu->arch.ht_active = 1; 443 + hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); 444 + 445 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 446 kvm_vcpu_block(vcpu); 447 hrtimer_cancel(p_ht); 448 vcpu->arch.ht_active = 0; 449 + 450 + if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) 451 + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 452 + vcpu->arch.mp_state = 453 + KVM_MP_STATE_RUNNABLE; 454 + up_read(&vcpu->kvm->slots_lock); 455 456 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) 457 return -EINTR; ··· 483 484 static const int kvm_vti_max_exit_handlers = 485 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); 486 487 static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) 488 { ··· 600 601 again: 602 preempt_disable(); 603 local_irq_disable(); 604 605 if (signal_pending(current)) { ··· 614 615 vcpu->guest_mode = 1; 616 kvm_guest_enter(); 617 + down_read(&vcpu->kvm->slots_lock); 618 r = vti_vcpu_run(vcpu, kvm_run); 619 if (r < 0) { 620 local_irq_enable(); ··· 634 * But we need to prevent reordering, hence this barrier(): 635 */ 636 barrier(); 637 kvm_guest_exit(); 638 + up_read(&vcpu->kvm->slots_lock); 639 preempt_enable(); 640 641 r = kvm_handle_exit(kvm_run, vcpu); ··· 673 674 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 675 kvm_vcpu_block(vcpu); 676 + clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 677 vcpu_put(vcpu); 678 return -EAGAIN; 679 } ··· 778 kvm_build_io_pmt(kvm); 779 780 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 781 + 782 + /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 783 + set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 784 } 785 786 struct kvm *kvm_arch_create_vm(void) ··· 941 goto out; 942 if (irqchip_in_kernel(kvm)) { 943 mutex_lock(&kvm->lock); 944 + kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 945 + irq_event.irq, irq_event.level); 946 mutex_unlock(&kvm->lock); 947 r = 0; 948 } ··· 1123 wait_queue_head_t *q; 1124 1125 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); 1126 + q = &vcpu->wq; 1127 + 1128 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) 1129 goto out; 1130 1131 + if (waitqueue_active(q)) 1132 wake_up_interruptible(q); 1133 + 1134 out: 1135 + vcpu->arch.timer_fired = 1; 1136 vcpu->arch.timer_check = 1; 1137 return HRTIMER_NORESTART; 1138 } ··· 1700 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 1701 { 1702 int ipi_pcpu = vcpu->cpu; 1703 + int cpu = get_cpu(); 1704 1705 if (waitqueue_active(&vcpu->wq)) 1706 wake_up_interruptible(&vcpu->wq); 1707 1708 + if (vcpu->guest_mode && cpu != ipi_pcpu) 1709 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); 1710 + put_cpu(); 1711 } 1712 1713 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) ··· 1715 1716 if (!test_and_set_bit(vec, &vpd->irr[0])) { 1717 vcpu->arch.irq_new_pending = 1; 1718 + kvm_vcpu_kick(vcpu); 1719 return 1; 1720 } 1721 return 0; ··· 1791 1792 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 1793 { 1794 + return vcpu->arch.timer_fired; 1795 } 1796 1797 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+7 -2
arch/ia64/kvm/kvm_fw.c
··· 286 return index; 287 } 288 289 int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) 290 { 291 ··· 310 break; 311 case PAL_HALT_LIGHT: 312 { 313 - vcpu->arch.timer_pending = 1; 314 INIT_PAL_STATUS_SUCCESS(result); 315 if (kvm_highest_pending_irq(vcpu) == -1) 316 ret = kvm_emulate_halt(vcpu); 317 - 318 } 319 break; 320
··· 286 return index; 287 } 288 289 + static void prepare_for_halt(struct kvm_vcpu *vcpu) 290 + { 291 + vcpu->arch.timer_pending = 1; 292 + vcpu->arch.timer_fired = 0; 293 + } 294 + 295 int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) 296 { 297 ··· 304 break; 305 case PAL_HALT_LIGHT: 306 { 307 INIT_PAL_STATUS_SUCCESS(result); 308 + prepare_for_halt(vcpu); 309 if (kvm_highest_pending_irq(vcpu) == -1) 310 ret = kvm_emulate_halt(vcpu); 311 } 312 break; 313
+1 -1
arch/ia64/kvm/process.c
··· 713 if (!(VCPU(v, itv) & (1 << 16))) { 714 vcpu_pend_interrupt(v, VCPU(v, itv) 715 & 0xff); 716 - VMX(v, itc_check) = 0; 717 } else { 718 v->arch.timer_pending = 1; 719 }
··· 713 if (!(VCPU(v, itv) & (1 << 16))) { 714 vcpu_pend_interrupt(v, VCPU(v, itv) 715 & 0xff); 716 + VMX(v, itc_check) = 0; 717 } else { 718 v->arch.timer_pending = 1; 719 }
+3
arch/x86/include/asm/kvm_host.h
··· 364 365 struct page *ept_identity_pagetable; 366 bool ept_identity_pagetable_done; 367 }; 368 369 struct kvm_vm_stat {
··· 364 365 struct page *ept_identity_pagetable; 366 bool ept_identity_pagetable_done; 367 + 368 + unsigned long irq_sources_bitmap; 369 + unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; 370 }; 371 372 struct kvm_vm_stat {
+9 -2
arch/x86/kvm/i8254.c
··· 545 if (!pit) 546 return NULL; 547 548 mutex_init(&pit->pit_state.lock); 549 mutex_lock(&pit->pit_state.lock); 550 spin_lock_init(&pit->pit_state.inject_lock); ··· 593 mutex_lock(&kvm->arch.vpit->pit_state.lock); 594 timer = &kvm->arch.vpit->pit_state.pit_timer.timer; 595 hrtimer_cancel(timer); 596 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 597 kfree(kvm->arch.vpit); 598 } ··· 602 static void __inject_pit_timer_intr(struct kvm *kvm) 603 { 604 mutex_lock(&kvm->lock); 605 - kvm_set_irq(kvm, 0, 1); 606 - kvm_set_irq(kvm, 0, 0); 607 mutex_unlock(&kvm->lock); 608 } 609
··· 545 if (!pit) 546 return NULL; 547 548 + mutex_lock(&kvm->lock); 549 + pit->irq_source_id = kvm_request_irq_source_id(kvm); 550 + mutex_unlock(&kvm->lock); 551 + if (pit->irq_source_id < 0) 552 + return NULL; 553 + 554 mutex_init(&pit->pit_state.lock); 555 mutex_lock(&pit->pit_state.lock); 556 spin_lock_init(&pit->pit_state.inject_lock); ··· 587 mutex_lock(&kvm->arch.vpit->pit_state.lock); 588 timer = &kvm->arch.vpit->pit_state.pit_timer.timer; 589 hrtimer_cancel(timer); 590 + kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); 591 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 592 kfree(kvm->arch.vpit); 593 } ··· 595 static void __inject_pit_timer_intr(struct kvm *kvm) 596 { 597 mutex_lock(&kvm->lock); 598 + kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); 599 + kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); 600 mutex_unlock(&kvm->lock); 601 } 602
+1
arch/x86/kvm/i8254.h
··· 44 struct kvm_io_device speaker_dev; 45 struct kvm *kvm; 46 struct kvm_kpit_state pit_state; 47 }; 48 49 #define KVM_PIT_BASE_ADDRESS 0x40
··· 44 struct kvm_io_device speaker_dev; 45 struct kvm *kvm; 46 struct kvm_kpit_state pit_state; 47 + int irq_source_id; 48 }; 49 50 #define KVM_PIT_BASE_ADDRESS 0x40
+1
arch/x86/kvm/mmu.c
··· 2634 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) 2635 { 2636 kvm_x86_ops->tlb_flush(vcpu); 2637 return 1; 2638 } 2639
··· 2634 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) 2635 { 2636 kvm_x86_ops->tlb_flush(vcpu); 2637 + set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); 2638 return 1; 2639 } 2640
+5 -1
arch/x86/kvm/x86.c
··· 1742 goto out; 1743 if (irqchip_in_kernel(kvm)) { 1744 mutex_lock(&kvm->lock); 1745 - kvm_set_irq(kvm, irq_event.irq, irq_event.level); 1746 mutex_unlock(&kvm->lock); 1747 r = 0; 1748 } ··· 4013 4014 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 4015 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 4016 4017 return kvm; 4018 }
··· 1742 goto out; 1743 if (irqchip_in_kernel(kvm)) { 1744 mutex_lock(&kvm->lock); 1745 + kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1746 + irq_event.irq, irq_event.level); 1747 mutex_unlock(&kvm->lock); 1748 r = 0; 1749 } ··· 4012 4013 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 4014 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 4015 + 4016 + /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 4017 + set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 4018 4019 return kvm; 4020 }
+6
include/linux/kvm.h
··· 489 __u32 busnr; 490 __u32 devfn; 491 __u32 flags; 492 }; 493 494 struct kvm_assigned_irq { ··· 499 __u32 host_irq; 500 __u32 guest_irq; 501 __u32 flags; 502 }; 503 504 #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
··· 489 __u32 busnr; 490 __u32 devfn; 491 __u32 flags; 492 + union { 493 + __u32 reserved[12]; 494 + }; 495 }; 496 497 struct kvm_assigned_irq { ··· 496 __u32 host_irq; 497 __u32 guest_irq; 498 __u32 flags; 499 + union { 500 + __u32 reserved[12]; 501 + }; 502 }; 503 504 #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+6 -1
include/linux/kvm_host.h
··· 37 #define KVM_REQ_UNHALT 6 38 #define KVM_REQ_MMU_SYNC 7 39 40 struct kvm_vcpu; 41 extern struct kmem_cache *kvm_vcpu_cache; 42 ··· 308 int host_irq; 309 int guest_irq; 310 int irq_requested; 311 struct pci_dev *dev; 312 struct kvm *kvm; 313 }; 314 - void kvm_set_irq(struct kvm *kvm, int irq, int level); 315 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); 316 void kvm_register_irq_ack_notifier(struct kvm *kvm, 317 struct kvm_irq_ack_notifier *kian); 318 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 319 struct kvm_irq_ack_notifier *kian); 320 321 #ifdef CONFIG_DMAR 322 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
··· 37 #define KVM_REQ_UNHALT 6 38 #define KVM_REQ_MMU_SYNC 7 39 40 + #define KVM_USERSPACE_IRQ_SOURCE_ID 0 41 + 42 struct kvm_vcpu; 43 extern struct kmem_cache *kvm_vcpu_cache; 44 ··· 306 int host_irq; 307 int guest_irq; 308 int irq_requested; 309 + int irq_source_id; 310 struct pci_dev *dev; 311 struct kvm *kvm; 312 }; 313 + void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); 314 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); 315 void kvm_register_irq_ack_notifier(struct kvm *kvm, 316 struct kvm_irq_ack_notifier *kian); 317 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 318 struct kvm_irq_ack_notifier *kian); 319 + int kvm_request_irq_source_id(struct kvm *kvm); 320 + void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 321 322 #ifdef CONFIG_DMAR 323 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
+39 -3
virt/kvm/irq_comm.c
··· 25 #include "ioapic.h" 26 27 /* This should be called with the kvm->lock mutex held */ 28 - void kvm_set_irq(struct kvm *kvm, int irq, int level) 29 { 30 /* Not possible to detect if the guest uses the PIC or the 31 * IOAPIC. So set the bit in both. The guest will ignore 32 * writes to the unused one. 33 */ 34 - kvm_ioapic_set_irq(kvm->arch.vioapic, irq, level); 35 #ifdef CONFIG_X86 36 - kvm_pic_set_irq(pic_irqchip(kvm), irq, level); 37 #endif 38 } 39 ··· 65 struct kvm_irq_ack_notifier *kian) 66 { 67 hlist_del(&kian->link); 68 }
··· 25 #include "ioapic.h" 26 27 /* This should be called with the kvm->lock mutex held */ 28 + void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level) 29 { 30 + unsigned long *irq_state = (unsigned long *)&kvm->arch.irq_states[irq]; 31 + 32 + /* Logical OR for level trig interrupt */ 33 + if (level) 34 + set_bit(irq_source_id, irq_state); 35 + else 36 + clear_bit(irq_source_id, irq_state); 37 + 38 /* Not possible to detect if the guest uses the PIC or the 39 * IOAPIC. So set the bit in both. The guest will ignore 40 * writes to the unused one. 41 */ 42 + kvm_ioapic_set_irq(kvm->arch.vioapic, irq, !!(*irq_state)); 43 #ifdef CONFIG_X86 44 + kvm_pic_set_irq(pic_irqchip(kvm), irq, !!(*irq_state)); 45 #endif 46 } 47 ··· 57 struct kvm_irq_ack_notifier *kian) 58 { 59 hlist_del(&kian->link); 60 + } 61 + 62 + /* The caller must hold kvm->lock mutex */ 63 + int kvm_request_irq_source_id(struct kvm *kvm) 64 + { 65 + unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; 66 + int irq_source_id = find_first_zero_bit(bitmap, 67 + sizeof(kvm->arch.irq_sources_bitmap)); 68 + if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { 69 + printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); 70 + irq_source_id = -EFAULT; 71 + } else 72 + set_bit(irq_source_id, bitmap); 73 + return irq_source_id; 74 + } 75 + 76 + void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) 77 + { 78 + int i; 79 + 80 + if (irq_source_id <= 0 || 81 + irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { 82 + printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); 83 + return; 84 + } 85 + for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) 86 + clear_bit(irq_source_id, &kvm->arch.irq_states[i]); 87 + clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); 88 }
+8 -4
virt/kvm/kvm_main.c
··· 105 */ 106 mutex_lock(&assigned_dev->kvm->lock); 107 kvm_set_irq(assigned_dev->kvm, 108 assigned_dev->guest_irq, 1); 109 mutex_unlock(&assigned_dev->kvm->lock); 110 kvm_put_kvm(assigned_dev->kvm); 111 } 112 113 - /* FIXME: Implement the OR logic needed to make shared interrupts on 114 - * this line behave properly 115 - */ 116 static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) 117 { 118 struct kvm_assigned_dev_kernel *assigned_dev = ··· 132 133 dev = container_of(kian, struct kvm_assigned_dev_kernel, 134 ack_notifier); 135 - kvm_set_irq(dev->kvm, dev->guest_irq, 0); 136 enable_irq(dev->host_irq); 137 } 138 ··· 144 free_irq(assigned_dev->host_irq, (void *)assigned_dev); 145 146 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); 147 148 if (cancel_work_sync(&assigned_dev->interrupt_work)) 149 /* We had pending work. That means we will have to take ··· 214 match->ack_notifier.gsi = assigned_irq->guest_irq; 215 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; 216 kvm_register_irq_ack_notifier(kvm, &match->ack_notifier); 217 218 /* Even though this is PCI, we don't want to use shared 219 * interrupts. Sharing host devices with guest-assigned devices
··· 105 */ 106 mutex_lock(&assigned_dev->kvm->lock); 107 kvm_set_irq(assigned_dev->kvm, 108 + assigned_dev->irq_source_id, 109 assigned_dev->guest_irq, 1); 110 mutex_unlock(&assigned_dev->kvm->lock); 111 kvm_put_kvm(assigned_dev->kvm); 112 } 113 114 static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) 115 { 116 struct kvm_assigned_dev_kernel *assigned_dev = ··· 134 135 dev = container_of(kian, struct kvm_assigned_dev_kernel, 136 ack_notifier); 137 + kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); 138 enable_irq(dev->host_irq); 139 } 140 ··· 146 free_irq(assigned_dev->host_irq, (void *)assigned_dev); 147 148 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); 149 + kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); 150 151 if (cancel_work_sync(&assigned_dev->interrupt_work)) 152 /* We had pending work. That means we will have to take ··· 215 match->ack_notifier.gsi = assigned_irq->guest_irq; 216 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; 217 kvm_register_irq_ack_notifier(kvm, &match->ack_notifier); 218 + r = kvm_request_irq_source_id(kvm); 219 + if (r < 0) 220 + goto out_release; 221 + else 222 + match->irq_source_id = r; 223 224 /* Even though this is PCI, we don't want to use shared 225 * interrupts. Sharing host devices with guest-assigned devices