Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.3-rc4 1796 lines 42 kB view raw
1/* 2 * Xen event channels 3 * 4 * Xen models interrupts with abstract event channels. Because each 5 * domain gets 1024 event channels, but NR_IRQ is not that large, we 6 * must dynamically map irqs<->event channels. The event channels 7 * interface with the rest of the kernel by defining a xen interrupt 8 * chip. When an event is received, it is mapped to an irq and sent 9 * through the normal interrupt processing path. 10 * 11 * There are four kinds of events which can be mapped to an event 12 * channel: 13 * 14 * 1. Inter-domain notifications. This includes all the virtual 15 * device events, since they're driven by front-ends in another domain 16 * (typically dom0). 17 * 2. VIRQs, typically used for timers. These are per-cpu events. 18 * 3. IPIs. 19 * 4. PIRQs - Hardware interrupts. 20 * 21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 22 */ 23 24#include <linux/linkage.h> 25#include <linux/interrupt.h> 26#include <linux/irq.h> 27#include <linux/module.h> 28#include <linux/string.h> 29#include <linux/bootmem.h> 30#include <linux/slab.h> 31#include <linux/irqnr.h> 32#include <linux/pci.h> 33 34#include <asm/desc.h> 35#include <asm/ptrace.h> 36#include <asm/irq.h> 37#include <asm/idle.h> 38#include <asm/io_apic.h> 39#include <asm/sync_bitops.h> 40#include <asm/xen/pci.h> 41#include <asm/xen/hypercall.h> 42#include <asm/xen/hypervisor.h> 43 44#include <xen/xen.h> 45#include <xen/hvm.h> 46#include <xen/xen-ops.h> 47#include <xen/events.h> 48#include <xen/interface/xen.h> 49#include <xen/interface/event_channel.h> 50#include <xen/interface/hvm/hvm_op.h> 51#include <xen/interface/hvm/params.h> 52 53/* 54 * This lock protects updates to the following mapping and reference-count 55 * arrays. The lock does not need to be acquired to read the mapping tables. 56 */ 57static DEFINE_MUTEX(irq_mapping_update_lock); 58 59static LIST_HEAD(xen_irq_list_head); 60 61/* IRQ <-> VIRQ mapping. */ 62static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; 63 64/* IRQ <-> IPI mapping */ 65static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; 66 67/* Interrupt types. */ 68enum xen_irq_type { 69 IRQT_UNBOUND = 0, 70 IRQT_PIRQ, 71 IRQT_VIRQ, 72 IRQT_IPI, 73 IRQT_EVTCHN 74}; 75 76/* 77 * Packed IRQ information: 78 * type - enum xen_irq_type 79 * event channel - irq->event channel mapping 80 * cpu - cpu this event channel is bound to 81 * index - type-specific information: 82 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM 83 * guest, or GSI (real passthrough IRQ) of the device. 84 * VIRQ - virq number 85 * IPI - IPI vector 86 * EVTCHN - 87 */ 88struct irq_info { 89 struct list_head list; 90 int refcnt; 91 enum xen_irq_type type; /* type */ 92 unsigned irq; 93 unsigned short evtchn; /* event channel */ 94 unsigned short cpu; /* cpu bound */ 95 96 union { 97 unsigned short virq; 98 enum ipi_vector ipi; 99 struct { 100 unsigned short pirq; 101 unsigned short gsi; 102 unsigned char vector; 103 unsigned char flags; 104 uint16_t domid; 105 } pirq; 106 } u; 107}; 108#define PIRQ_NEEDS_EOI (1 << 0) 109#define PIRQ_SHAREABLE (1 << 1) 110 111static int *evtchn_to_irq; 112 113static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], 114 cpu_evtchn_mask); 115 116/* Xen will never allocate port zero for any purpose. */ 117#define VALID_EVTCHN(chn) ((chn) != 0) 118 119static struct irq_chip xen_dynamic_chip; 120static struct irq_chip xen_percpu_chip; 121static struct irq_chip xen_pirq_chip; 122static void enable_dynirq(struct irq_data *data); 123static void disable_dynirq(struct irq_data *data); 124 125/* Get info for IRQ */ 126static struct irq_info *info_for_irq(unsigned irq) 127{ 128 return irq_get_handler_data(irq); 129} 130 131/* Constructors for packed IRQ information. */ 132static void xen_irq_info_common_init(struct irq_info *info, 133 unsigned irq, 134 enum xen_irq_type type, 135 unsigned short evtchn, 136 unsigned short cpu) 137{ 138 139 BUG_ON(info->type != IRQT_UNBOUND && info->type != type); 140 141 info->type = type; 142 info->irq = irq; 143 info->evtchn = evtchn; 144 info->cpu = cpu; 145 146 evtchn_to_irq[evtchn] = irq; 147} 148 149static void xen_irq_info_evtchn_init(unsigned irq, 150 unsigned short evtchn) 151{ 152 struct irq_info *info = info_for_irq(irq); 153 154 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0); 155} 156 157static void xen_irq_info_ipi_init(unsigned cpu, 158 unsigned irq, 159 unsigned short evtchn, 160 enum ipi_vector ipi) 161{ 162 struct irq_info *info = info_for_irq(irq); 163 164 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0); 165 166 info->u.ipi = ipi; 167 168 per_cpu(ipi_to_irq, cpu)[ipi] = irq; 169} 170 171static void xen_irq_info_virq_init(unsigned cpu, 172 unsigned irq, 173 unsigned short evtchn, 174 unsigned short virq) 175{ 176 struct irq_info *info = info_for_irq(irq); 177 178 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0); 179 180 info->u.virq = virq; 181 182 per_cpu(virq_to_irq, cpu)[virq] = irq; 183} 184 185static void xen_irq_info_pirq_init(unsigned irq, 186 unsigned short evtchn, 187 unsigned short pirq, 188 unsigned short gsi, 189 unsigned short vector, 190 uint16_t domid, 191 unsigned char flags) 192{ 193 struct irq_info *info = info_for_irq(irq); 194 195 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0); 196 197 info->u.pirq.pirq = pirq; 198 info->u.pirq.gsi = gsi; 199 info->u.pirq.vector = vector; 200 info->u.pirq.domid = domid; 201 info->u.pirq.flags = flags; 202} 203 204/* 205 * Accessors for packed IRQ information. 206 */ 207static unsigned int evtchn_from_irq(unsigned irq) 208{ 209 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq))) 210 return 0; 211 212 return info_for_irq(irq)->evtchn; 213} 214 215unsigned irq_from_evtchn(unsigned int evtchn) 216{ 217 return evtchn_to_irq[evtchn]; 218} 219EXPORT_SYMBOL_GPL(irq_from_evtchn); 220 221static enum ipi_vector ipi_from_irq(unsigned irq) 222{ 223 struct irq_info *info = info_for_irq(irq); 224 225 BUG_ON(info == NULL); 226 BUG_ON(info->type != IRQT_IPI); 227 228 return info->u.ipi; 229} 230 231static unsigned virq_from_irq(unsigned irq) 232{ 233 struct irq_info *info = info_for_irq(irq); 234 235 BUG_ON(info == NULL); 236 BUG_ON(info->type != IRQT_VIRQ); 237 238 return info->u.virq; 239} 240 241static unsigned pirq_from_irq(unsigned irq) 242{ 243 struct irq_info *info = info_for_irq(irq); 244 245 BUG_ON(info == NULL); 246 BUG_ON(info->type != IRQT_PIRQ); 247 248 return info->u.pirq.pirq; 249} 250 251static enum xen_irq_type type_from_irq(unsigned irq) 252{ 253 return info_for_irq(irq)->type; 254} 255 256static unsigned cpu_from_irq(unsigned irq) 257{ 258 return info_for_irq(irq)->cpu; 259} 260 261static unsigned int cpu_from_evtchn(unsigned int evtchn) 262{ 263 int irq = evtchn_to_irq[evtchn]; 264 unsigned ret = 0; 265 266 if (irq != -1) 267 ret = cpu_from_irq(irq); 268 269 return ret; 270} 271 272static bool pirq_needs_eoi(unsigned irq) 273{ 274 struct irq_info *info = info_for_irq(irq); 275 276 BUG_ON(info->type != IRQT_PIRQ); 277 278 return info->u.pirq.flags & PIRQ_NEEDS_EOI; 279} 280 281static inline unsigned long active_evtchns(unsigned int cpu, 282 struct shared_info *sh, 283 unsigned int idx) 284{ 285 return sh->evtchn_pending[idx] & 286 per_cpu(cpu_evtchn_mask, cpu)[idx] & 287 ~sh->evtchn_mask[idx]; 288} 289 290static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 291{ 292 int irq = evtchn_to_irq[chn]; 293 294 BUG_ON(irq == -1); 295#ifdef CONFIG_SMP 296 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); 297#endif 298 299 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))); 300 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); 301 302 info_for_irq(irq)->cpu = cpu; 303} 304 305static void init_evtchn_cpu_bindings(void) 306{ 307 int i; 308#ifdef CONFIG_SMP 309 struct irq_info *info; 310 311 /* By default all event channels notify CPU#0. */ 312 list_for_each_entry(info, &xen_irq_list_head, list) { 313 struct irq_desc *desc = irq_to_desc(info->irq); 314 cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); 315 } 316#endif 317 318 for_each_possible_cpu(i) 319 memset(per_cpu(cpu_evtchn_mask, i), 320 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); 321} 322 323static inline void clear_evtchn(int port) 324{ 325 struct shared_info *s = HYPERVISOR_shared_info; 326 sync_clear_bit(port, &s->evtchn_pending[0]); 327} 328 329static inline void set_evtchn(int port) 330{ 331 struct shared_info *s = HYPERVISOR_shared_info; 332 sync_set_bit(port, &s->evtchn_pending[0]); 333} 334 335static inline int test_evtchn(int port) 336{ 337 struct shared_info *s = HYPERVISOR_shared_info; 338 return sync_test_bit(port, &s->evtchn_pending[0]); 339} 340 341 342/** 343 * notify_remote_via_irq - send event to remote end of event channel via irq 344 * @irq: irq of event channel to send event to 345 * 346 * Unlike notify_remote_via_evtchn(), this is safe to use across 347 * save/restore. Notifications on a broken connection are silently 348 * dropped. 349 */ 350void notify_remote_via_irq(int irq) 351{ 352 int evtchn = evtchn_from_irq(irq); 353 354 if (VALID_EVTCHN(evtchn)) 355 notify_remote_via_evtchn(evtchn); 356} 357EXPORT_SYMBOL_GPL(notify_remote_via_irq); 358 359static void mask_evtchn(int port) 360{ 361 struct shared_info *s = HYPERVISOR_shared_info; 362 sync_set_bit(port, &s->evtchn_mask[0]); 363} 364 365static void unmask_evtchn(int port) 366{ 367 struct shared_info *s = HYPERVISOR_shared_info; 368 unsigned int cpu = get_cpu(); 369 370 BUG_ON(!irqs_disabled()); 371 372 /* Slow path (hypercall) if this is a non-local port. */ 373 if (unlikely(cpu != cpu_from_evtchn(port))) { 374 struct evtchn_unmask unmask = { .port = port }; 375 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); 376 } else { 377 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 378 379 sync_clear_bit(port, &s->evtchn_mask[0]); 380 381 /* 382 * The following is basically the equivalent of 383 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose 384 * the interrupt edge' if the channel is masked. 385 */ 386 if (sync_test_bit(port, &s->evtchn_pending[0]) && 387 !sync_test_and_set_bit(port / BITS_PER_LONG, 388 &vcpu_info->evtchn_pending_sel)) 389 vcpu_info->evtchn_upcall_pending = 1; 390 } 391 392 put_cpu(); 393} 394 395static void xen_irq_init(unsigned irq) 396{ 397 struct irq_info *info; 398#ifdef CONFIG_SMP 399 struct irq_desc *desc = irq_to_desc(irq); 400 401 /* By default all event channels notify CPU#0. */ 402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); 403#endif 404 405 info = kzalloc(sizeof(*info), GFP_KERNEL); 406 if (info == NULL) 407 panic("Unable to allocate metadata for IRQ%d\n", irq); 408 409 info->type = IRQT_UNBOUND; 410 info->refcnt = -1; 411 412 irq_set_handler_data(irq, info); 413 414 list_add_tail(&info->list, &xen_irq_list_head); 415} 416 417static int __must_check xen_allocate_irq_dynamic(void) 418{ 419 int first = 0; 420 int irq; 421 422#ifdef CONFIG_X86_IO_APIC 423 /* 424 * For an HVM guest or domain 0 which see "real" (emulated or 425 * actual respectively) GSIs we allocate dynamic IRQs 426 * e.g. those corresponding to event channels or MSIs 427 * etc. from the range above those "real" GSIs to avoid 428 * collisions. 429 */ 430 if (xen_initial_domain() || xen_hvm_domain()) 431 first = get_nr_irqs_gsi(); 432#endif 433 434 irq = irq_alloc_desc_from(first, -1); 435 436 if (irq >= 0) 437 xen_irq_init(irq); 438 439 return irq; 440} 441 442static int __must_check xen_allocate_irq_gsi(unsigned gsi) 443{ 444 int irq; 445 446 /* 447 * A PV guest has no concept of a GSI (since it has no ACPI 448 * nor access to/knowledge of the physical APICs). Therefore 449 * all IRQs are dynamically allocated from the entire IRQ 450 * space. 451 */ 452 if (xen_pv_domain() && !xen_initial_domain()) 453 return xen_allocate_irq_dynamic(); 454 455 /* Legacy IRQ descriptors are already allocated by the arch. */ 456 if (gsi < NR_IRQS_LEGACY) 457 irq = gsi; 458 else 459 irq = irq_alloc_desc_at(gsi, -1); 460 461 xen_irq_init(irq); 462 463 return irq; 464} 465 466static void xen_free_irq(unsigned irq) 467{ 468 struct irq_info *info = irq_get_handler_data(irq); 469 470 list_del(&info->list); 471 472 irq_set_handler_data(irq, NULL); 473 474 WARN_ON(info->refcnt > 0); 475 476 kfree(info); 477 478 /* Legacy IRQ descriptors are managed by the arch. */ 479 if (irq < NR_IRQS_LEGACY) 480 return; 481 482 irq_free_desc(irq); 483} 484 485static void pirq_query_unmask(int irq) 486{ 487 struct physdev_irq_status_query irq_status; 488 struct irq_info *info = info_for_irq(irq); 489 490 BUG_ON(info->type != IRQT_PIRQ); 491 492 irq_status.irq = pirq_from_irq(irq); 493 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 494 irq_status.flags = 0; 495 496 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; 497 if (irq_status.flags & XENIRQSTAT_needs_eoi) 498 info->u.pirq.flags |= PIRQ_NEEDS_EOI; 499} 500 501static bool probing_irq(int irq) 502{ 503 struct irq_desc *desc = irq_to_desc(irq); 504 505 return desc && desc->action == NULL; 506} 507 508static void eoi_pirq(struct irq_data *data) 509{ 510 int evtchn = evtchn_from_irq(data->irq); 511 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; 512 int rc = 0; 513 514 irq_move_irq(data); 515 516 if (VALID_EVTCHN(evtchn)) 517 clear_evtchn(evtchn); 518 519 if (pirq_needs_eoi(data->irq)) { 520 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); 521 WARN_ON(rc); 522 } 523} 524 525static void mask_ack_pirq(struct irq_data *data) 526{ 527 disable_dynirq(data); 528 eoi_pirq(data); 529} 530 531static unsigned int __startup_pirq(unsigned int irq) 532{ 533 struct evtchn_bind_pirq bind_pirq; 534 struct irq_info *info = info_for_irq(irq); 535 int evtchn = evtchn_from_irq(irq); 536 int rc; 537 538 BUG_ON(info->type != IRQT_PIRQ); 539 540 if (VALID_EVTCHN(evtchn)) 541 goto out; 542 543 bind_pirq.pirq = pirq_from_irq(irq); 544 /* NB. We are happy to share unless we are probing. */ 545 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? 546 BIND_PIRQ__WILL_SHARE : 0; 547 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); 548 if (rc != 0) { 549 if (!probing_irq(irq)) 550 printk(KERN_INFO "Failed to obtain physical IRQ %d\n", 551 irq); 552 return 0; 553 } 554 evtchn = bind_pirq.port; 555 556 pirq_query_unmask(irq); 557 558 evtchn_to_irq[evtchn] = irq; 559 bind_evtchn_to_cpu(evtchn, 0); 560 info->evtchn = evtchn; 561 562out: 563 unmask_evtchn(evtchn); 564 eoi_pirq(irq_get_irq_data(irq)); 565 566 return 0; 567} 568 569static unsigned int startup_pirq(struct irq_data *data) 570{ 571 return __startup_pirq(data->irq); 572} 573 574static void shutdown_pirq(struct irq_data *data) 575{ 576 struct evtchn_close close; 577 unsigned int irq = data->irq; 578 struct irq_info *info = info_for_irq(irq); 579 int evtchn = evtchn_from_irq(irq); 580 581 BUG_ON(info->type != IRQT_PIRQ); 582 583 if (!VALID_EVTCHN(evtchn)) 584 return; 585 586 mask_evtchn(evtchn); 587 588 close.port = evtchn; 589 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 590 BUG(); 591 592 bind_evtchn_to_cpu(evtchn, 0); 593 evtchn_to_irq[evtchn] = -1; 594 info->evtchn = 0; 595} 596 597static void enable_pirq(struct irq_data *data) 598{ 599 startup_pirq(data); 600} 601 602static void disable_pirq(struct irq_data *data) 603{ 604 disable_dynirq(data); 605} 606 607static int find_irq_by_gsi(unsigned gsi) 608{ 609 struct irq_info *info; 610 611 list_for_each_entry(info, &xen_irq_list_head, list) { 612 if (info->type != IRQT_PIRQ) 613 continue; 614 615 if (info->u.pirq.gsi == gsi) 616 return info->irq; 617 } 618 619 return -1; 620} 621 622/* 623 * Do not make any assumptions regarding the relationship between the 624 * IRQ number returned here and the Xen pirq argument. 625 * 626 * Note: We don't assign an event channel until the irq actually started 627 * up. Return an existing irq if we've already got one for the gsi. 628 * 629 * Shareable implies level triggered, not shareable implies edge 630 * triggered here. 631 */ 632int xen_bind_pirq_gsi_to_irq(unsigned gsi, 633 unsigned pirq, int shareable, char *name) 634{ 635 int irq = -1; 636 struct physdev_irq irq_op; 637 638 mutex_lock(&irq_mapping_update_lock); 639 640 irq = find_irq_by_gsi(gsi); 641 if (irq != -1) { 642 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", 643 irq, gsi); 644 goto out; 645 } 646 647 irq = xen_allocate_irq_gsi(gsi); 648 if (irq < 0) 649 goto out; 650 651 irq_op.irq = irq; 652 irq_op.vector = 0; 653 654 /* Only the privileged domain can do this. For non-priv, the pcifront 655 * driver provides a PCI bus that does the call to do exactly 656 * this in the priv domain. */ 657 if (xen_initial_domain() && 658 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { 659 xen_free_irq(irq); 660 irq = -ENOSPC; 661 goto out; 662 } 663 664 xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, 665 shareable ? PIRQ_SHAREABLE : 0); 666 667 pirq_query_unmask(irq); 668 /* We try to use the handler with the appropriate semantic for the 669 * type of interrupt: if the interrupt is an edge triggered 670 * interrupt we use handle_edge_irq. 671 * 672 * On the other hand if the interrupt is level triggered we use 673 * handle_fasteoi_irq like the native code does for this kind of 674 * interrupts. 675 * 676 * Depending on the Xen version, pirq_needs_eoi might return true 677 * not only for level triggered interrupts but for edge triggered 678 * interrupts too. In any case Xen always honors the eoi mechanism, 679 * not injecting any more pirqs of the same kind if the first one 680 * hasn't received an eoi yet. Therefore using the fasteoi handler 681 * is the right choice either way. 682 */ 683 if (shareable) 684 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 685 handle_fasteoi_irq, name); 686 else 687 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 688 handle_edge_irq, name); 689 690out: 691 mutex_unlock(&irq_mapping_update_lock); 692 693 return irq; 694} 695 696#ifdef CONFIG_PCI_MSI 697int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) 698{ 699 int rc; 700 struct physdev_get_free_pirq op_get_free_pirq; 701 702 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; 703 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); 704 705 WARN_ONCE(rc == -ENOSYS, 706 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n"); 707 708 return rc ? -1 : op_get_free_pirq.pirq; 709} 710 711int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, 712 int pirq, int vector, const char *name, 713 domid_t domid) 714{ 715 int irq, ret; 716 717 mutex_lock(&irq_mapping_update_lock); 718 719 irq = xen_allocate_irq_dynamic(); 720 if (irq < 0) 721 goto out; 722 723 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, 724 name); 725 726 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); 727 ret = irq_set_msi_desc(irq, msidesc); 728 if (ret < 0) 729 goto error_irq; 730out: 731 mutex_unlock(&irq_mapping_update_lock); 732 return irq; 733error_irq: 734 mutex_unlock(&irq_mapping_update_lock); 735 xen_free_irq(irq); 736 return ret; 737} 738#endif 739 740int xen_destroy_irq(int irq) 741{ 742 struct irq_desc *desc; 743 struct physdev_unmap_pirq unmap_irq; 744 struct irq_info *info = info_for_irq(irq); 745 int rc = -ENOENT; 746 747 mutex_lock(&irq_mapping_update_lock); 748 749 desc = irq_to_desc(irq); 750 if (!desc) 751 goto out; 752 753 if (xen_initial_domain()) { 754 unmap_irq.pirq = info->u.pirq.pirq; 755 unmap_irq.domid = info->u.pirq.domid; 756 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); 757 /* If another domain quits without making the pci_disable_msix 758 * call, the Xen hypervisor takes care of freeing the PIRQs 759 * (free_domain_pirqs). 760 */ 761 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) 762 printk(KERN_INFO "domain %d does not have %d anymore\n", 763 info->u.pirq.domid, info->u.pirq.pirq); 764 else if (rc) { 765 printk(KERN_WARNING "unmap irq failed %d\n", rc); 766 goto out; 767 } 768 } 769 770 xen_free_irq(irq); 771 772out: 773 mutex_unlock(&irq_mapping_update_lock); 774 return rc; 775} 776 777int xen_irq_from_pirq(unsigned pirq) 778{ 779 int irq; 780 781 struct irq_info *info; 782 783 mutex_lock(&irq_mapping_update_lock); 784 785 list_for_each_entry(info, &xen_irq_list_head, list) { 786 if (info->type != IRQT_PIRQ) 787 continue; 788 irq = info->irq; 789 if (info->u.pirq.pirq == pirq) 790 goto out; 791 } 792 irq = -1; 793out: 794 mutex_unlock(&irq_mapping_update_lock); 795 796 return irq; 797} 798 799 800int xen_pirq_from_irq(unsigned irq) 801{ 802 return pirq_from_irq(irq); 803} 804EXPORT_SYMBOL_GPL(xen_pirq_from_irq); 805int bind_evtchn_to_irq(unsigned int evtchn) 806{ 807 int irq; 808 809 mutex_lock(&irq_mapping_update_lock); 810 811 irq = evtchn_to_irq[evtchn]; 812 813 if (irq == -1) { 814 irq = xen_allocate_irq_dynamic(); 815 if (irq == -1) 816 goto out; 817 818 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 819 handle_edge_irq, "event"); 820 821 xen_irq_info_evtchn_init(irq, evtchn); 822 } 823 824out: 825 mutex_unlock(&irq_mapping_update_lock); 826 827 return irq; 828} 829EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); 830 831static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) 832{ 833 struct evtchn_bind_ipi bind_ipi; 834 int evtchn, irq; 835 836 mutex_lock(&irq_mapping_update_lock); 837 838 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 839 840 if (irq == -1) { 841 irq = xen_allocate_irq_dynamic(); 842 if (irq < 0) 843 goto out; 844 845 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 846 handle_percpu_irq, "ipi"); 847 848 bind_ipi.vcpu = cpu; 849 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 850 &bind_ipi) != 0) 851 BUG(); 852 evtchn = bind_ipi.port; 853 854 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); 855 856 bind_evtchn_to_cpu(evtchn, cpu); 857 } 858 859 out: 860 mutex_unlock(&irq_mapping_update_lock); 861 return irq; 862} 863 864static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, 865 unsigned int remote_port) 866{ 867 struct evtchn_bind_interdomain bind_interdomain; 868 int err; 869 870 bind_interdomain.remote_dom = remote_domain; 871 bind_interdomain.remote_port = remote_port; 872 873 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 874 &bind_interdomain); 875 876 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); 877} 878 879static int find_virq(unsigned int virq, unsigned int cpu) 880{ 881 struct evtchn_status status; 882 int port, rc = -ENOENT; 883 884 memset(&status, 0, sizeof(status)); 885 for (port = 0; port <= NR_EVENT_CHANNELS; port++) { 886 status.dom = DOMID_SELF; 887 status.port = port; 888 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); 889 if (rc < 0) 890 continue; 891 if (status.status != EVTCHNSTAT_virq) 892 continue; 893 if (status.u.virq == virq && status.vcpu == cpu) { 894 rc = port; 895 break; 896 } 897 } 898 return rc; 899} 900 901int bind_virq_to_irq(unsigned int virq, unsigned int cpu) 902{ 903 struct evtchn_bind_virq bind_virq; 904 int evtchn, irq, ret; 905 906 mutex_lock(&irq_mapping_update_lock); 907 908 irq = per_cpu(virq_to_irq, cpu)[virq]; 909 910 if (irq == -1) { 911 irq = xen_allocate_irq_dynamic(); 912 if (irq == -1) 913 goto out; 914 915 irq_set_chip_and_handler_name(irq, &xen_percpu_chip, 916 handle_percpu_irq, "virq"); 917 918 bind_virq.virq = virq; 919 bind_virq.vcpu = cpu; 920 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 921 &bind_virq); 922 if (ret == 0) 923 evtchn = bind_virq.port; 924 else { 925 if (ret == -EEXIST) 926 ret = find_virq(virq, cpu); 927 BUG_ON(ret < 0); 928 evtchn = ret; 929 } 930 931 xen_irq_info_virq_init(cpu, irq, evtchn, virq); 932 933 bind_evtchn_to_cpu(evtchn, cpu); 934 } 935 936out: 937 mutex_unlock(&irq_mapping_update_lock); 938 939 return irq; 940} 941 942static void unbind_from_irq(unsigned int irq) 943{ 944 struct evtchn_close close; 945 int evtchn = evtchn_from_irq(irq); 946 struct irq_info *info = irq_get_handler_data(irq); 947 948 mutex_lock(&irq_mapping_update_lock); 949 950 if (info->refcnt > 0) { 951 info->refcnt--; 952 if (info->refcnt != 0) 953 goto done; 954 } 955 956 if (VALID_EVTCHN(evtchn)) { 957 close.port = evtchn; 958 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 959 BUG(); 960 961 switch (type_from_irq(irq)) { 962 case IRQT_VIRQ: 963 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) 964 [virq_from_irq(irq)] = -1; 965 break; 966 case IRQT_IPI: 967 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) 968 [ipi_from_irq(irq)] = -1; 969 break; 970 default: 971 break; 972 } 973 974 /* Closed ports are implicitly re-bound to VCPU0. */ 975 bind_evtchn_to_cpu(evtchn, 0); 976 977 evtchn_to_irq[evtchn] = -1; 978 } 979 980 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); 981 982 xen_free_irq(irq); 983 984 done: 985 mutex_unlock(&irq_mapping_update_lock); 986} 987 988int bind_evtchn_to_irqhandler(unsigned int evtchn, 989 irq_handler_t handler, 990 unsigned long irqflags, 991 const char *devname, void *dev_id) 992{ 993 int irq, retval; 994 995 irq = bind_evtchn_to_irq(evtchn); 996 if (irq < 0) 997 return irq; 998 retval = request_irq(irq, handler, irqflags, devname, dev_id); 999 if (retval != 0) { 1000 unbind_from_irq(irq); 1001 return retval; 1002 } 1003 1004 return irq; 1005} 1006EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); 1007 1008int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, 1009 unsigned int remote_port, 1010 irq_handler_t handler, 1011 unsigned long irqflags, 1012 const char *devname, 1013 void *dev_id) 1014{ 1015 int irq, retval; 1016 1017 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); 1018 if (irq < 0) 1019 return irq; 1020 1021 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1022 if (retval != 0) { 1023 unbind_from_irq(irq); 1024 return retval; 1025 } 1026 1027 return irq; 1028} 1029EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); 1030 1031int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 1032 irq_handler_t handler, 1033 unsigned long irqflags, const char *devname, void *dev_id) 1034{ 1035 int irq, retval; 1036 1037 irq = bind_virq_to_irq(virq, cpu); 1038 if (irq < 0) 1039 return irq; 1040 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1041 if (retval != 0) { 1042 unbind_from_irq(irq); 1043 return retval; 1044 } 1045 1046 return irq; 1047} 1048EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); 1049 1050int bind_ipi_to_irqhandler(enum ipi_vector ipi, 1051 unsigned int cpu, 1052 irq_handler_t handler, 1053 unsigned long irqflags, 1054 const char *devname, 1055 void *dev_id) 1056{ 1057 int irq, retval; 1058 1059 irq = bind_ipi_to_irq(ipi, cpu); 1060 if (irq < 0) 1061 return irq; 1062 1063 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME; 1064 retval = request_irq(irq, handler, irqflags, devname, dev_id); 1065 if (retval != 0) { 1066 unbind_from_irq(irq); 1067 return retval; 1068 } 1069 1070 return irq; 1071} 1072 1073void unbind_from_irqhandler(unsigned int irq, void *dev_id) 1074{ 1075 free_irq(irq, dev_id); 1076 unbind_from_irq(irq); 1077} 1078EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 1079 1080int evtchn_make_refcounted(unsigned int evtchn) 1081{ 1082 int irq = evtchn_to_irq[evtchn]; 1083 struct irq_info *info; 1084 1085 if (irq == -1) 1086 return -ENOENT; 1087 1088 info = irq_get_handler_data(irq); 1089 1090 if (!info) 1091 return -ENOENT; 1092 1093 WARN_ON(info->refcnt != -1); 1094 1095 info->refcnt = 1; 1096 1097 return 0; 1098} 1099EXPORT_SYMBOL_GPL(evtchn_make_refcounted); 1100 1101int evtchn_get(unsigned int evtchn) 1102{ 1103 int irq; 1104 struct irq_info *info; 1105 int err = -ENOENT; 1106 1107 if (evtchn >= NR_EVENT_CHANNELS) 1108 return -EINVAL; 1109 1110 mutex_lock(&irq_mapping_update_lock); 1111 1112 irq = evtchn_to_irq[evtchn]; 1113 if (irq == -1) 1114 goto done; 1115 1116 info = irq_get_handler_data(irq); 1117 1118 if (!info) 1119 goto done; 1120 1121 err = -EINVAL; 1122 if (info->refcnt <= 0) 1123 goto done; 1124 1125 info->refcnt++; 1126 err = 0; 1127 done: 1128 mutex_unlock(&irq_mapping_update_lock); 1129 1130 return err; 1131} 1132EXPORT_SYMBOL_GPL(evtchn_get); 1133 1134void evtchn_put(unsigned int evtchn) 1135{ 1136 int irq = evtchn_to_irq[evtchn]; 1137 if (WARN_ON(irq == -1)) 1138 return; 1139 unbind_from_irq(irq); 1140} 1141EXPORT_SYMBOL_GPL(evtchn_put); 1142 1143void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) 1144{ 1145 int irq = per_cpu(ipi_to_irq, cpu)[vector]; 1146 BUG_ON(irq < 0); 1147 notify_remote_via_irq(irq); 1148} 1149 1150irqreturn_t xen_debug_interrupt(int irq, void *dev_id) 1151{ 1152 struct shared_info *sh = HYPERVISOR_shared_info; 1153 int cpu = smp_processor_id(); 1154 unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); 1155 int i; 1156 unsigned long flags; 1157 static DEFINE_SPINLOCK(debug_lock); 1158 struct vcpu_info *v; 1159 1160 spin_lock_irqsave(&debug_lock, flags); 1161 1162 printk("\nvcpu %d\n ", cpu); 1163 1164 for_each_online_cpu(i) { 1165 int pending; 1166 v = per_cpu(xen_vcpu, i); 1167 pending = (get_irq_regs() && i == cpu) 1168 ? xen_irqs_disabled(get_irq_regs()) 1169 : v->evtchn_upcall_mask; 1170 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i, 1171 pending, v->evtchn_upcall_pending, 1172 (int)(sizeof(v->evtchn_pending_sel)*2), 1173 v->evtchn_pending_sel); 1174 } 1175 v = per_cpu(xen_vcpu, cpu); 1176 1177 printk("\npending:\n "); 1178 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) 1179 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2, 1180 sh->evtchn_pending[i], 1181 i % 8 == 0 ? "\n " : " "); 1182 printk("\nglobal mask:\n "); 1183 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 1184 printk("%0*lx%s", 1185 (int)(sizeof(sh->evtchn_mask[0])*2), 1186 sh->evtchn_mask[i], 1187 i % 8 == 0 ? "\n " : " "); 1188 1189 printk("\nglobally unmasked:\n "); 1190 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 1191 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2), 1192 sh->evtchn_pending[i] & ~sh->evtchn_mask[i], 1193 i % 8 == 0 ? "\n " : " "); 1194 1195 printk("\nlocal cpu%d mask:\n ", cpu); 1196 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--) 1197 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2), 1198 cpu_evtchn[i], 1199 i % 8 == 0 ? "\n " : " "); 1200 1201 printk("\nlocally unmasked:\n "); 1202 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) { 1203 unsigned long pending = sh->evtchn_pending[i] 1204 & ~sh->evtchn_mask[i] 1205 & cpu_evtchn[i]; 1206 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2), 1207 pending, i % 8 == 0 ? "\n " : " "); 1208 } 1209 1210 printk("\npending list:\n"); 1211 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 1212 if (sync_test_bit(i, sh->evtchn_pending)) { 1213 int word_idx = i / BITS_PER_LONG; 1214 printk(" %d: event %d -> irq %d%s%s%s\n", 1215 cpu_from_evtchn(i), i, 1216 evtchn_to_irq[i], 1217 sync_test_bit(word_idx, &v->evtchn_pending_sel) 1218 ? "" : " l2-clear", 1219 !sync_test_bit(i, sh->evtchn_mask) 1220 ? "" : " globally-masked", 1221 sync_test_bit(i, cpu_evtchn) 1222 ? "" : " locally-masked"); 1223 } 1224 } 1225 1226 spin_unlock_irqrestore(&debug_lock, flags); 1227 1228 return IRQ_HANDLED; 1229} 1230 1231static DEFINE_PER_CPU(unsigned, xed_nesting_count); 1232static DEFINE_PER_CPU(unsigned int, current_word_idx); 1233static DEFINE_PER_CPU(unsigned int, current_bit_idx); 1234 1235/* 1236 * Mask out the i least significant bits of w 1237 */ 1238#define MASK_LSBS(w, i) (w & ((~0UL) << i)) 1239 1240/* 1241 * Search the CPUs pending events bitmasks. For each one found, map 1242 * the event number to an irq, and feed it into do_IRQ() for 1243 * handling. 1244 * 1245 * Xen uses a two-level bitmap to speed searching. The first level is 1246 * a bitset of words which contain pending event bits. The second 1247 * level is a bitset of pending events themselves. 1248 */ 1249static void __xen_evtchn_do_upcall(void) 1250{ 1251 int start_word_idx, start_bit_idx; 1252 int word_idx, bit_idx; 1253 int i; 1254 int cpu = get_cpu(); 1255 struct shared_info *s = HYPERVISOR_shared_info; 1256 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 1257 unsigned count; 1258 1259 do { 1260 unsigned long pending_words; 1261 1262 vcpu_info->evtchn_upcall_pending = 0; 1263 1264 if (__this_cpu_inc_return(xed_nesting_count) - 1) 1265 goto out; 1266 1267#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ 1268 /* Clear master flag /before/ clearing selector flag. */ 1269 wmb(); 1270#endif 1271 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); 1272 1273 start_word_idx = __this_cpu_read(current_word_idx); 1274 start_bit_idx = __this_cpu_read(current_bit_idx); 1275 1276 word_idx = start_word_idx; 1277 1278 for (i = 0; pending_words != 0; i++) { 1279 unsigned long pending_bits; 1280 unsigned long words; 1281 1282 words = MASK_LSBS(pending_words, word_idx); 1283 1284 /* 1285 * If we masked out all events, wrap to beginning. 1286 */ 1287 if (words == 0) { 1288 word_idx = 0; 1289 bit_idx = 0; 1290 continue; 1291 } 1292 word_idx = __ffs(words); 1293 1294 pending_bits = active_evtchns(cpu, s, word_idx); 1295 bit_idx = 0; /* usually scan entire word from start */ 1296 if (word_idx == start_word_idx) { 1297 /* We scan the starting word in two parts */ 1298 if (i == 0) 1299 /* 1st time: start in the middle */ 1300 bit_idx = start_bit_idx; 1301 else 1302 /* 2nd time: mask bits done already */ 1303 bit_idx &= (1UL << start_bit_idx) - 1; 1304 } 1305 1306 do { 1307 unsigned long bits; 1308 int port, irq; 1309 struct irq_desc *desc; 1310 1311 bits = MASK_LSBS(pending_bits, bit_idx); 1312 1313 /* If we masked out all events, move on. */ 1314 if (bits == 0) 1315 break; 1316 1317 bit_idx = __ffs(bits); 1318 1319 /* Process port. */ 1320 port = (word_idx * BITS_PER_LONG) + bit_idx; 1321 irq = evtchn_to_irq[port]; 1322 1323 if (irq != -1) { 1324 desc = irq_to_desc(irq); 1325 if (desc) 1326 generic_handle_irq_desc(irq, desc); 1327 } 1328 1329 bit_idx = (bit_idx + 1) % BITS_PER_LONG; 1330 1331 /* Next caller starts at last processed + 1 */ 1332 __this_cpu_write(current_word_idx, 1333 bit_idx ? word_idx : 1334 (word_idx+1) % BITS_PER_LONG); 1335 __this_cpu_write(current_bit_idx, bit_idx); 1336 } while (bit_idx != 0); 1337 1338 /* Scan start_l1i twice; all others once. */ 1339 if ((word_idx != start_word_idx) || (i != 0)) 1340 pending_words &= ~(1UL << word_idx); 1341 1342 word_idx = (word_idx + 1) % BITS_PER_LONG; 1343 } 1344 1345 BUG_ON(!irqs_disabled()); 1346 1347 count = __this_cpu_read(xed_nesting_count); 1348 __this_cpu_write(xed_nesting_count, 0); 1349 } while (count != 1 || vcpu_info->evtchn_upcall_pending); 1350 1351out: 1352 1353 put_cpu(); 1354} 1355 1356void xen_evtchn_do_upcall(struct pt_regs *regs) 1357{ 1358 struct pt_regs *old_regs = set_irq_regs(regs); 1359 1360 exit_idle(); 1361 irq_enter(); 1362 1363 __xen_evtchn_do_upcall(); 1364 1365 irq_exit(); 1366 set_irq_regs(old_regs); 1367} 1368 1369void xen_hvm_evtchn_do_upcall(void) 1370{ 1371 __xen_evtchn_do_upcall(); 1372} 1373EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); 1374 1375/* Rebind a new event channel to an existing irq. */ 1376void rebind_evtchn_irq(int evtchn, int irq) 1377{ 1378 struct irq_info *info = info_for_irq(irq); 1379 1380 /* Make sure the irq is masked, since the new event channel 1381 will also be masked. */ 1382 disable_irq(irq); 1383 1384 mutex_lock(&irq_mapping_update_lock); 1385 1386 /* After resume the irq<->evtchn mappings are all cleared out */ 1387 BUG_ON(evtchn_to_irq[evtchn] != -1); 1388 /* Expect irq to have been bound before, 1389 so there should be a proper type */ 1390 BUG_ON(info->type == IRQT_UNBOUND); 1391 1392 xen_irq_info_evtchn_init(irq, evtchn); 1393 1394 mutex_unlock(&irq_mapping_update_lock); 1395 1396 /* new event channels are always bound to cpu 0 */ 1397 irq_set_affinity(irq, cpumask_of(0)); 1398 1399 /* Unmask the event channel. */ 1400 enable_irq(irq); 1401} 1402 1403/* Rebind an evtchn so that it gets delivered to a specific cpu */ 1404static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 1405{ 1406 struct evtchn_bind_vcpu bind_vcpu; 1407 int evtchn = evtchn_from_irq(irq); 1408 1409 if (!VALID_EVTCHN(evtchn)) 1410 return -1; 1411 1412 /* 1413 * Events delivered via platform PCI interrupts are always 1414 * routed to vcpu 0 and hence cannot be rebound. 1415 */ 1416 if (xen_hvm_domain() && !xen_have_vector_callback) 1417 return -1; 1418 1419 /* Send future instances of this interrupt to other vcpu. */ 1420 bind_vcpu.port = evtchn; 1421 bind_vcpu.vcpu = tcpu; 1422 1423 /* 1424 * If this fails, it usually just indicates that we're dealing with a 1425 * virq or IPI channel, which don't actually need to be rebound. Ignore 1426 * it, but don't do the xenlinux-level rebind in that case. 1427 */ 1428 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 1429 bind_evtchn_to_cpu(evtchn, tcpu); 1430 1431 return 0; 1432} 1433 1434static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, 1435 bool force) 1436{ 1437 unsigned tcpu = cpumask_first(dest); 1438 1439 return rebind_irq_to_cpu(data->irq, tcpu); 1440} 1441 1442int resend_irq_on_evtchn(unsigned int irq) 1443{ 1444 int masked, evtchn = evtchn_from_irq(irq); 1445 struct shared_info *s = HYPERVISOR_shared_info; 1446 1447 if (!VALID_EVTCHN(evtchn)) 1448 return 1; 1449 1450 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); 1451 sync_set_bit(evtchn, s->evtchn_pending); 1452 if (!masked) 1453 unmask_evtchn(evtchn); 1454 1455 return 1; 1456} 1457 1458static void enable_dynirq(struct irq_data *data) 1459{ 1460 int evtchn = evtchn_from_irq(data->irq); 1461 1462 if (VALID_EVTCHN(evtchn)) 1463 unmask_evtchn(evtchn); 1464} 1465 1466static void disable_dynirq(struct irq_data *data) 1467{ 1468 int evtchn = evtchn_from_irq(data->irq); 1469 1470 if (VALID_EVTCHN(evtchn)) 1471 mask_evtchn(evtchn); 1472} 1473 1474static void ack_dynirq(struct irq_data *data) 1475{ 1476 int evtchn = evtchn_from_irq(data->irq); 1477 1478 irq_move_irq(data); 1479 1480 if (VALID_EVTCHN(evtchn)) 1481 clear_evtchn(evtchn); 1482} 1483 1484static void mask_ack_dynirq(struct irq_data *data) 1485{ 1486 disable_dynirq(data); 1487 ack_dynirq(data); 1488} 1489 1490static int retrigger_dynirq(struct irq_data *data) 1491{ 1492 int evtchn = evtchn_from_irq(data->irq); 1493 struct shared_info *sh = HYPERVISOR_shared_info; 1494 int ret = 0; 1495 1496 if (VALID_EVTCHN(evtchn)) { 1497 int masked; 1498 1499 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); 1500 sync_set_bit(evtchn, sh->evtchn_pending); 1501 if (!masked) 1502 unmask_evtchn(evtchn); 1503 ret = 1; 1504 } 1505 1506 return ret; 1507} 1508 1509static void restore_pirqs(void) 1510{ 1511 int pirq, rc, irq, gsi; 1512 struct physdev_map_pirq map_irq; 1513 struct irq_info *info; 1514 1515 list_for_each_entry(info, &xen_irq_list_head, list) { 1516 if (info->type != IRQT_PIRQ) 1517 continue; 1518 1519 pirq = info->u.pirq.pirq; 1520 gsi = info->u.pirq.gsi; 1521 irq = info->irq; 1522 1523 /* save/restore of PT devices doesn't work, so at this point the 1524 * only devices present are GSI based emulated devices */ 1525 if (!gsi) 1526 continue; 1527 1528 map_irq.domid = DOMID_SELF; 1529 map_irq.type = MAP_PIRQ_TYPE_GSI; 1530 map_irq.index = gsi; 1531 map_irq.pirq = pirq; 1532 1533 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); 1534 if (rc) { 1535 printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", 1536 gsi, irq, pirq, rc); 1537 xen_free_irq(irq); 1538 continue; 1539 } 1540 1541 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 1542 1543 __startup_pirq(irq); 1544 } 1545} 1546 1547static void restore_cpu_virqs(unsigned int cpu) 1548{ 1549 struct evtchn_bind_virq bind_virq; 1550 int virq, irq, evtchn; 1551 1552 for (virq = 0; virq < NR_VIRQS; virq++) { 1553 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) 1554 continue; 1555 1556 BUG_ON(virq_from_irq(irq) != virq); 1557 1558 /* Get a new binding from Xen. */ 1559 bind_virq.virq = virq; 1560 bind_virq.vcpu = cpu; 1561 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 1562 &bind_virq) != 0) 1563 BUG(); 1564 evtchn = bind_virq.port; 1565 1566 /* Record the new mapping. */ 1567 xen_irq_info_virq_init(cpu, irq, evtchn, virq); 1568 bind_evtchn_to_cpu(evtchn, cpu); 1569 } 1570} 1571 1572static void restore_cpu_ipis(unsigned int cpu) 1573{ 1574 struct evtchn_bind_ipi bind_ipi; 1575 int ipi, irq, evtchn; 1576 1577 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { 1578 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) 1579 continue; 1580 1581 BUG_ON(ipi_from_irq(irq) != ipi); 1582 1583 /* Get a new binding from Xen. */ 1584 bind_ipi.vcpu = cpu; 1585 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 1586 &bind_ipi) != 0) 1587 BUG(); 1588 evtchn = bind_ipi.port; 1589 1590 /* Record the new mapping. */ 1591 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); 1592 bind_evtchn_to_cpu(evtchn, cpu); 1593 } 1594} 1595 1596/* Clear an irq's pending state, in preparation for polling on it */ 1597void xen_clear_irq_pending(int irq) 1598{ 1599 int evtchn = evtchn_from_irq(irq); 1600 1601 if (VALID_EVTCHN(evtchn)) 1602 clear_evtchn(evtchn); 1603} 1604EXPORT_SYMBOL(xen_clear_irq_pending); 1605void xen_set_irq_pending(int irq) 1606{ 1607 int evtchn = evtchn_from_irq(irq); 1608 1609 if (VALID_EVTCHN(evtchn)) 1610 set_evtchn(evtchn); 1611} 1612 1613bool xen_test_irq_pending(int irq) 1614{ 1615 int evtchn = evtchn_from_irq(irq); 1616 bool ret = false; 1617 1618 if (VALID_EVTCHN(evtchn)) 1619 ret = test_evtchn(evtchn); 1620 1621 return ret; 1622} 1623 1624/* Poll waiting for an irq to become pending with timeout. In the usual case, 1625 * the irq will be disabled so it won't deliver an interrupt. */ 1626void xen_poll_irq_timeout(int irq, u64 timeout) 1627{ 1628 evtchn_port_t evtchn = evtchn_from_irq(irq); 1629 1630 if (VALID_EVTCHN(evtchn)) { 1631 struct sched_poll poll; 1632 1633 poll.nr_ports = 1; 1634 poll.timeout = timeout; 1635 set_xen_guest_handle(poll.ports, &evtchn); 1636 1637 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) 1638 BUG(); 1639 } 1640} 1641EXPORT_SYMBOL(xen_poll_irq_timeout); 1642/* Poll waiting for an irq to become pending. In the usual case, the 1643 * irq will be disabled so it won't deliver an interrupt. */ 1644void xen_poll_irq(int irq) 1645{ 1646 xen_poll_irq_timeout(irq, 0 /* no timeout */); 1647} 1648 1649/* Check whether the IRQ line is shared with other guests. */ 1650int xen_test_irq_shared(int irq) 1651{ 1652 struct irq_info *info = info_for_irq(irq); 1653 struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq }; 1654 1655 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) 1656 return 0; 1657 return !(irq_status.flags & XENIRQSTAT_shared); 1658} 1659EXPORT_SYMBOL_GPL(xen_test_irq_shared); 1660 1661void xen_irq_resume(void) 1662{ 1663 unsigned int cpu, evtchn; 1664 struct irq_info *info; 1665 1666 init_evtchn_cpu_bindings(); 1667 1668 /* New event-channel space is not 'live' yet. */ 1669 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 1670 mask_evtchn(evtchn); 1671 1672 /* No IRQ <-> event-channel mappings. */ 1673 list_for_each_entry(info, &xen_irq_list_head, list) 1674 info->evtchn = 0; /* zap event-channel binding */ 1675 1676 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 1677 evtchn_to_irq[evtchn] = -1; 1678 1679 for_each_possible_cpu(cpu) { 1680 restore_cpu_virqs(cpu); 1681 restore_cpu_ipis(cpu); 1682 } 1683 1684 restore_pirqs(); 1685} 1686 1687static struct irq_chip xen_dynamic_chip __read_mostly = { 1688 .name = "xen-dyn", 1689 1690 .irq_disable = disable_dynirq, 1691 .irq_mask = disable_dynirq, 1692 .irq_unmask = enable_dynirq, 1693 1694 .irq_ack = ack_dynirq, 1695 .irq_mask_ack = mask_ack_dynirq, 1696 1697 .irq_set_affinity = set_affinity_irq, 1698 .irq_retrigger = retrigger_dynirq, 1699}; 1700 1701static struct irq_chip xen_pirq_chip __read_mostly = { 1702 .name = "xen-pirq", 1703 1704 .irq_startup = startup_pirq, 1705 .irq_shutdown = shutdown_pirq, 1706 .irq_enable = enable_pirq, 1707 .irq_disable = disable_pirq, 1708 1709 .irq_mask = disable_dynirq, 1710 .irq_unmask = enable_dynirq, 1711 1712 .irq_ack = eoi_pirq, 1713 .irq_eoi = eoi_pirq, 1714 .irq_mask_ack = mask_ack_pirq, 1715 1716 .irq_set_affinity = set_affinity_irq, 1717 1718 .irq_retrigger = retrigger_dynirq, 1719}; 1720 1721static struct irq_chip xen_percpu_chip __read_mostly = { 1722 .name = "xen-percpu", 1723 1724 .irq_disable = disable_dynirq, 1725 .irq_mask = disable_dynirq, 1726 .irq_unmask = enable_dynirq, 1727 1728 .irq_ack = ack_dynirq, 1729}; 1730 1731int xen_set_callback_via(uint64_t via) 1732{ 1733 struct xen_hvm_param a; 1734 a.domid = DOMID_SELF; 1735 a.index = HVM_PARAM_CALLBACK_IRQ; 1736 a.value = via; 1737 return HYPERVISOR_hvm_op(HVMOP_set_param, &a); 1738} 1739EXPORT_SYMBOL_GPL(xen_set_callback_via); 1740 1741#ifdef CONFIG_XEN_PVHVM 1742/* Vector callbacks are better than PCI interrupts to receive event 1743 * channel notifications because we can receive vector callbacks on any 1744 * vcpu and we don't need PCI support or APIC interactions. */ 1745void xen_callback_vector(void) 1746{ 1747 int rc; 1748 uint64_t callback_via; 1749 if (xen_have_vector_callback) { 1750 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK); 1751 rc = xen_set_callback_via(callback_via); 1752 if (rc) { 1753 printk(KERN_ERR "Request for Xen HVM callback vector" 1754 " failed.\n"); 1755 xen_have_vector_callback = 0; 1756 return; 1757 } 1758 printk(KERN_INFO "Xen HVM callback vector for event delivery is " 1759 "enabled\n"); 1760 /* in the restore case the vector has already been allocated */ 1761 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors)) 1762 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector); 1763 } 1764} 1765#else 1766void xen_callback_vector(void) {} 1767#endif 1768 1769void __init xen_init_IRQ(void) 1770{ 1771 int i; 1772 1773 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), 1774 GFP_KERNEL); 1775 BUG_ON(!evtchn_to_irq); 1776 for (i = 0; i < NR_EVENT_CHANNELS; i++) 1777 evtchn_to_irq[i] = -1; 1778 1779 init_evtchn_cpu_bindings(); 1780 1781 /* No event channels are 'live' right now. */ 1782 for (i = 0; i < NR_EVENT_CHANNELS; i++) 1783 mask_evtchn(i); 1784 1785 if (xen_hvm_domain()) { 1786 xen_callback_vector(); 1787 native_init_IRQ(); 1788 /* pci_xen_hvm_init must be called after native_init_IRQ so that 1789 * __acpi_register_gsi can point at the right function */ 1790 pci_xen_hvm_init(); 1791 } else { 1792 irq_ctx_init(smp_processor_id()); 1793 if (xen_initial_domain()) 1794 pci_xen_initial_domain(); 1795 } 1796}