Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/xen: event channels delivery on HVM.

Set the callback to receive evtchns from Xen, using the
callback vector delivery mechanism.

The traditional way for receiving event channel notifications from Xen
is via the interrupts from the platform PCI device.
The callback vector is a newer alternative that allow us to receive
notifications on any vcpu and doesn't need any PCI support: we allocate
a vector exclusively to receive events, in the vector handler we don't
need to interact with the vlapic, therefore we avoid a VMEXIT.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>

authored by

Sheng Yang and committed by
Jeremy Fitzhardinge
38e20b07 bee6ab53

+119 -8
+3
arch/x86/include/asm/irq_vectors.h
··· 125 125 */ 126 126 #define MCE_SELF_VECTOR 0xeb 127 127 128 + /* Xen vector callback to receive events in a HVM domain */ 129 + #define XEN_HVM_EVTCHN_CALLBACK 0xe9 130 + 128 131 #define NR_VECTORS 256 129 132 130 133 #define FPU_IRQ 13
+3
arch/x86/kernel/entry_32.S
··· 1166 1166 .previous 1167 1167 ENDPROC(xen_failsafe_callback) 1168 1168 1169 + BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, 1170 + xen_evtchn_do_upcall) 1171 + 1169 1172 #endif /* CONFIG_XEN */ 1170 1173 1171 1174 #ifdef CONFIG_FUNCTION_TRACER
+3
arch/x86/kernel/entry_64.S
··· 1329 1329 CFI_ENDPROC 1330 1330 END(xen_failsafe_callback) 1331 1331 1332 + apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ 1333 + xen_hvm_callback_vector xen_evtchn_do_upcall 1334 + 1332 1335 #endif /* CONFIG_XEN */ 1333 1336 1334 1337 /*
+28
arch/x86/xen/enlighten.c
··· 11 11 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 12 12 */ 13 13 14 + #include <linux/cpu.h> 14 15 #include <linux/kernel.h> 15 16 #include <linux/init.h> 16 17 #include <linux/smp.h> ··· 39 38 #include <xen/interface/memory.h> 40 39 #include <xen/features.h> 41 40 #include <xen/page.h> 41 + #include <xen/hvm.h> 42 42 #include <xen/hvc-console.h> 43 43 44 44 #include <asm/paravirt.h> ··· 82 80 void *xen_initial_gdt; 83 81 84 82 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); 83 + __read_mostly int xen_have_vector_callback; 84 + EXPORT_SYMBOL_GPL(xen_have_vector_callback); 85 85 86 86 /* 87 87 * Point at some empty memory to start with. We map the real shared_info ··· 1281 1277 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 1282 1278 } 1283 1279 1280 + static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, 1281 + unsigned long action, void *hcpu) 1282 + { 1283 + int cpu = (long)hcpu; 1284 + switch (action) { 1285 + case CPU_UP_PREPARE: 1286 + per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1287 + break; 1288 + default: 1289 + break; 1290 + } 1291 + return NOTIFY_OK; 1292 + } 1293 + 1294 + static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = { 1295 + .notifier_call = xen_hvm_cpu_notify, 1296 + }; 1297 + 1284 1298 static void __init xen_hvm_guest_init(void) 1285 1299 { 1286 1300 int r; ··· 1309 1287 return; 1310 1288 1311 1289 init_shared_info(); 1290 + 1291 + if (xen_feature(XENFEAT_hvm_callback_vector)) 1292 + xen_have_vector_callback = 1; 1293 + register_cpu_notifier(&xen_hvm_cpu_notifier); 1294 + have_vcpu_info_placement = 0; 1295 + x86_init.irqs.intr_init = xen_init_IRQ; 1312 1296 } 1313 1297 1314 1298 static bool __init xen_hvm_platform(void)
+2
arch/x86/xen/xen-ops.h
··· 38 38 void xen_enable_syscall(void); 39 39 void xen_vcpu_restore(void); 40 40 41 + void xen_callback_vector(void); 42 + 41 43 void __init xen_build_dynamic_phys_to_machine(void); 42 44 43 45 void xen_init_irq_ops(void);
+64 -8
drivers/xen/events.c
··· 29 29 #include <linux/bootmem.h> 30 30 #include <linux/slab.h> 31 31 32 + #include <asm/desc.h> 32 33 #include <asm/ptrace.h> 33 34 #include <asm/irq.h> 34 35 #include <asm/idle.h> ··· 37 36 #include <asm/xen/hypercall.h> 38 37 #include <asm/xen/hypervisor.h> 39 38 39 + #include <xen/xen.h> 40 + #include <xen/hvm.h> 40 41 #include <xen/xen-ops.h> 41 42 #include <xen/events.h> 42 43 #include <xen/interface/xen.h> 43 44 #include <xen/interface/event_channel.h> 45 + #include <xen/interface/hvm/hvm_op.h> 46 + #include <xen/interface/hvm/params.h> 44 47 45 48 /* 46 49 * This lock protects updates to the following mapping and reference-count ··· 622 617 * a bitset of words which contain pending event bits. The second 623 618 * level is a bitset of pending events themselves. 624 619 */ 625 - void xen_evtchn_do_upcall(struct pt_regs *regs) 620 + static void __xen_evtchn_do_upcall(void) 626 621 { 627 622 int cpu = get_cpu(); 628 - struct pt_regs *old_regs = set_irq_regs(regs); 629 623 struct shared_info *s = HYPERVISOR_shared_info; 630 624 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); 631 625 unsigned count; 632 - 633 - exit_idle(); 634 - irq_enter(); 635 626 636 627 do { 637 628 unsigned long pending_words; ··· 668 667 } while(count != 1); 669 668 670 669 out: 671 - irq_exit(); 672 - set_irq_regs(old_regs); 673 670 674 671 put_cpu(); 672 + } 673 + 674 + void xen_evtchn_do_upcall(struct pt_regs *regs) 675 + { 676 + struct pt_regs *old_regs = set_irq_regs(regs); 677 + 678 + exit_idle(); 679 + irq_enter(); 680 + 681 + __xen_evtchn_do_upcall(); 682 + 683 + irq_exit(); 684 + set_irq_regs(old_regs); 685 + } 686 + 687 + void xen_hvm_evtchn_do_upcall(void) 688 + { 689 + __xen_evtchn_do_upcall(); 675 690 } 676 691 677 692 /* Rebind a new event channel to an existing irq. */ ··· 950 933 .retrigger = retrigger_dynirq, 951 934 }; 952 935 936 + int xen_set_callback_via(uint64_t via) 937 + { 938 + struct xen_hvm_param a; 939 + a.domid = DOMID_SELF; 940 + a.index = HVM_PARAM_CALLBACK_IRQ; 941 + a.value = via; 942 + return HYPERVISOR_hvm_op(HVMOP_set_param, &a); 943 + } 944 + EXPORT_SYMBOL_GPL(xen_set_callback_via); 945 + 946 + /* Vector callbacks are better than PCI interrupts to receive event 947 + * channel notifications because we can receive vector callbacks on any 948 + * vcpu and we don't need PCI support or APIC interactions. */ 949 + void xen_callback_vector(void) 950 + { 951 + int rc; 952 + uint64_t callback_via; 953 + if (xen_have_vector_callback) { 954 + callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK); 955 + rc = xen_set_callback_via(callback_via); 956 + if (rc) { 957 + printk(KERN_ERR "Request for Xen HVM callback vector" 958 + " failed.\n"); 959 + xen_have_vector_callback = 0; 960 + return; 961 + } 962 + printk(KERN_INFO "Xen HVM callback vector for event delivery is " 963 + "enabled\n"); 964 + /* in the restore case the vector has already been allocated */ 965 + if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors)) 966 + alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector); 967 + } 968 + } 969 + 953 970 void __init xen_init_IRQ(void) 954 971 { 955 972 int i; ··· 998 947 for (i = 0; i < NR_EVENT_CHANNELS; i++) 999 948 mask_evtchn(i); 1000 949 1001 - irq_ctx_init(smp_processor_id()); 950 + if (xen_hvm_domain()) { 951 + xen_callback_vector(); 952 + native_init_IRQ(); 953 + } else { 954 + irq_ctx_init(smp_processor_id()); 955 + } 1002 956 }
+7
include/xen/events.h
··· 56 56 /* Determine the IRQ which is bound to an event channel */ 57 57 unsigned irq_from_evtchn(unsigned int evtchn); 58 58 59 + /* Xen HVM evtchn vector callback */ 60 + extern void xen_hvm_callback_vector(void); 61 + extern int xen_have_vector_callback; 62 + int xen_set_callback_via(uint64_t via); 63 + void xen_evtchn_do_upcall(struct pt_regs *regs); 64 + void xen_hvm_evtchn_do_upcall(void); 65 + 59 66 #endif /* _XEN_EVENTS_H */
+6
include/xen/hvm.h
··· 3 3 #define XEN_HVM_H__ 4 4 5 5 #include <xen/interface/hvm/params.h> 6 + #include <asm/xen/hypercall.h> 6 7 7 8 static inline int hvm_get_parameter(int idx, uint64_t *value) 8 9 { ··· 21 20 *value = xhv.value; 22 21 return r; 23 22 } 23 + 24 + #define HVM_CALLBACK_VIA_TYPE_VECTOR 0x2 25 + #define HVM_CALLBACK_VIA_TYPE_SHIFT 56 26 + #define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\ 27 + HVM_CALLBACK_VIA_TYPE_SHIFT | (x)) 24 28 25 29 #endif /* XEN_HVM_H__ */
+3
include/xen/interface/features.h
··· 41 41 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ 42 42 #define XENFEAT_mmu_pt_update_preserve_ad 5 43 43 44 + /* x86: Does this Xen host support the HVM callback vector type? */ 45 + #define XENFEAT_hvm_callback_vector 8 46 + 44 47 #define XENFEAT_NR_SUBMAPS 1 45 48 46 49 #endif /* __XEN_PUBLIC_FEATURES_H__ */