Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.29-rc4 837 lines 20 kB view raw
1/* 2 * Xen event channels 3 * 4 * Xen models interrupts with abstract event channels. Because each 5 * domain gets 1024 event channels, but NR_IRQ is not that large, we 6 * must dynamically map irqs<->event channels. The event channels 7 * interface with the rest of the kernel by defining a xen interrupt 8 * chip. When an event is recieved, it is mapped to an irq and sent 9 * through the normal interrupt processing path. 10 * 11 * There are four kinds of events which can be mapped to an event 12 * channel: 13 * 14 * 1. Inter-domain notifications. This includes all the virtual 15 * device events, since they're driven by front-ends in another domain 16 * (typically dom0). 17 * 2. VIRQs, typically used for timers. These are per-cpu events. 18 * 3. IPIs. 19 * 4. Hardware interrupts. Not supported at present. 20 * 21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 22 */ 23 24#include <linux/linkage.h> 25#include <linux/interrupt.h> 26#include <linux/irq.h> 27#include <linux/module.h> 28#include <linux/string.h> 29 30#include <asm/ptrace.h> 31#include <asm/irq.h> 32#include <asm/sync_bitops.h> 33#include <asm/xen/hypercall.h> 34#include <asm/xen/hypervisor.h> 35 36#include <xen/xen-ops.h> 37#include <xen/events.h> 38#include <xen/interface/xen.h> 39#include <xen/interface/event_channel.h> 40 41/* 42 * This lock protects updates to the following mapping and reference-count 43 * arrays. The lock does not need to be acquired to read the mapping tables. 44 */ 45static DEFINE_SPINLOCK(irq_mapping_update_lock); 46 47/* IRQ <-> VIRQ mapping. */ 48static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; 49 50/* IRQ <-> IPI mapping */ 51static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; 52 53/* Packed IRQ information: binding type, sub-type index, and event channel. */ 54struct packed_irq 55{ 56 unsigned short evtchn; 57 unsigned char index; 58 unsigned char type; 59}; 60 61static struct packed_irq irq_info[NR_IRQS]; 62 63/* Binding types. */ 64enum { 65 IRQT_UNBOUND, 66 IRQT_PIRQ, 67 IRQT_VIRQ, 68 IRQT_IPI, 69 IRQT_EVTCHN 70}; 71 72/* Convenient shorthand for packed representation of an unbound IRQ. */ 73#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) 74 75static int evtchn_to_irq[NR_EVENT_CHANNELS] = { 76 [0 ... NR_EVENT_CHANNELS-1] = -1 77}; 78static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; 79static u8 cpu_evtchn[NR_EVENT_CHANNELS]; 80 81/* Reference counts for bindings to IRQs. */ 82static int irq_bindcount[NR_IRQS]; 83 84/* Xen will never allocate port zero for any purpose. */ 85#define VALID_EVTCHN(chn) ((chn) != 0) 86 87static struct irq_chip xen_dynamic_chip; 88 89/* Constructor for packed IRQ information. */ 90static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) 91{ 92 return (struct packed_irq) { evtchn, index, type }; 93} 94 95/* 96 * Accessors for packed IRQ information. 97 */ 98static inline unsigned int evtchn_from_irq(int irq) 99{ 100 return irq_info[irq].evtchn; 101} 102 103static inline unsigned int index_from_irq(int irq) 104{ 105 return irq_info[irq].index; 106} 107 108static inline unsigned int type_from_irq(int irq) 109{ 110 return irq_info[irq].type; 111} 112 113static inline unsigned long active_evtchns(unsigned int cpu, 114 struct shared_info *sh, 115 unsigned int idx) 116{ 117 return (sh->evtchn_pending[idx] & 118 cpu_evtchn_mask[cpu][idx] & 119 ~sh->evtchn_mask[idx]); 120} 121 122static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 123{ 124 int irq = evtchn_to_irq[chn]; 125 126 BUG_ON(irq == -1); 127#ifdef CONFIG_SMP 128 irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu); 129#endif 130 131 __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); 132 __set_bit(chn, cpu_evtchn_mask[cpu]); 133 134 cpu_evtchn[chn] = cpu; 135} 136 137static void init_evtchn_cpu_bindings(void) 138{ 139#ifdef CONFIG_SMP 140 struct irq_desc *desc; 141 int i; 142 143 /* By default all event channels notify CPU#0. */ 144 for_each_irq_desc(i, desc) { 145 desc->affinity = cpumask_of_cpu(0); 146 } 147#endif 148 149 memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); 150 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); 151} 152 153static inline unsigned int cpu_from_evtchn(unsigned int evtchn) 154{ 155 return cpu_evtchn[evtchn]; 156} 157 158static inline void clear_evtchn(int port) 159{ 160 struct shared_info *s = HYPERVISOR_shared_info; 161 sync_clear_bit(port, &s->evtchn_pending[0]); 162} 163 164static inline void set_evtchn(int port) 165{ 166 struct shared_info *s = HYPERVISOR_shared_info; 167 sync_set_bit(port, &s->evtchn_pending[0]); 168} 169 170static inline int test_evtchn(int port) 171{ 172 struct shared_info *s = HYPERVISOR_shared_info; 173 return sync_test_bit(port, &s->evtchn_pending[0]); 174} 175 176 177/** 178 * notify_remote_via_irq - send event to remote end of event channel via irq 179 * @irq: irq of event channel to send event to 180 * 181 * Unlike notify_remote_via_evtchn(), this is safe to use across 182 * save/restore. Notifications on a broken connection are silently 183 * dropped. 184 */ 185void notify_remote_via_irq(int irq) 186{ 187 int evtchn = evtchn_from_irq(irq); 188 189 if (VALID_EVTCHN(evtchn)) 190 notify_remote_via_evtchn(evtchn); 191} 192EXPORT_SYMBOL_GPL(notify_remote_via_irq); 193 194static void mask_evtchn(int port) 195{ 196 struct shared_info *s = HYPERVISOR_shared_info; 197 sync_set_bit(port, &s->evtchn_mask[0]); 198} 199 200static void unmask_evtchn(int port) 201{ 202 struct shared_info *s = HYPERVISOR_shared_info; 203 unsigned int cpu = get_cpu(); 204 205 BUG_ON(!irqs_disabled()); 206 207 /* Slow path (hypercall) if this is a non-local port. */ 208 if (unlikely(cpu != cpu_from_evtchn(port))) { 209 struct evtchn_unmask unmask = { .port = port }; 210 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); 211 } else { 212 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); 213 214 sync_clear_bit(port, &s->evtchn_mask[0]); 215 216 /* 217 * The following is basically the equivalent of 218 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose 219 * the interrupt edge' if the channel is masked. 220 */ 221 if (sync_test_bit(port, &s->evtchn_pending[0]) && 222 !sync_test_and_set_bit(port / BITS_PER_LONG, 223 &vcpu_info->evtchn_pending_sel)) 224 vcpu_info->evtchn_upcall_pending = 1; 225 } 226 227 put_cpu(); 228} 229 230static int find_unbound_irq(void) 231{ 232 int irq; 233 struct irq_desc *desc; 234 235 /* Only allocate from dynirq range */ 236 for (irq = 0; irq < nr_irqs; irq++) 237 if (irq_bindcount[irq] == 0) 238 break; 239 240 if (irq == nr_irqs) 241 panic("No available IRQ to bind to: increase nr_irqs!\n"); 242 243 desc = irq_to_desc_alloc_cpu(irq, 0); 244 if (WARN_ON(desc == NULL)) 245 return -1; 246 247 return irq; 248} 249 250int bind_evtchn_to_irq(unsigned int evtchn) 251{ 252 int irq; 253 254 spin_lock(&irq_mapping_update_lock); 255 256 irq = evtchn_to_irq[evtchn]; 257 258 if (irq == -1) { 259 irq = find_unbound_irq(); 260 261 dynamic_irq_init(irq); 262 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 263 handle_level_irq, "event"); 264 265 evtchn_to_irq[evtchn] = irq; 266 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); 267 } 268 269 irq_bindcount[irq]++; 270 271 spin_unlock(&irq_mapping_update_lock); 272 273 return irq; 274} 275EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); 276 277static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) 278{ 279 struct evtchn_bind_ipi bind_ipi; 280 int evtchn, irq; 281 282 spin_lock(&irq_mapping_update_lock); 283 284 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 285 if (irq == -1) { 286 irq = find_unbound_irq(); 287 if (irq < 0) 288 goto out; 289 290 dynamic_irq_init(irq); 291 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 292 handle_level_irq, "ipi"); 293 294 bind_ipi.vcpu = cpu; 295 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 296 &bind_ipi) != 0) 297 BUG(); 298 evtchn = bind_ipi.port; 299 300 evtchn_to_irq[evtchn] = irq; 301 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); 302 303 per_cpu(ipi_to_irq, cpu)[ipi] = irq; 304 305 bind_evtchn_to_cpu(evtchn, cpu); 306 } 307 308 irq_bindcount[irq]++; 309 310 out: 311 spin_unlock(&irq_mapping_update_lock); 312 return irq; 313} 314 315 316static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) 317{ 318 struct evtchn_bind_virq bind_virq; 319 int evtchn, irq; 320 321 spin_lock(&irq_mapping_update_lock); 322 323 irq = per_cpu(virq_to_irq, cpu)[virq]; 324 325 if (irq == -1) { 326 bind_virq.virq = virq; 327 bind_virq.vcpu = cpu; 328 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 329 &bind_virq) != 0) 330 BUG(); 331 evtchn = bind_virq.port; 332 333 irq = find_unbound_irq(); 334 335 dynamic_irq_init(irq); 336 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 337 handle_level_irq, "virq"); 338 339 evtchn_to_irq[evtchn] = irq; 340 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); 341 342 per_cpu(virq_to_irq, cpu)[virq] = irq; 343 344 bind_evtchn_to_cpu(evtchn, cpu); 345 } 346 347 irq_bindcount[irq]++; 348 349 spin_unlock(&irq_mapping_update_lock); 350 351 return irq; 352} 353 354static void unbind_from_irq(unsigned int irq) 355{ 356 struct evtchn_close close; 357 int evtchn = evtchn_from_irq(irq); 358 359 spin_lock(&irq_mapping_update_lock); 360 361 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { 362 close.port = evtchn; 363 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 364 BUG(); 365 366 switch (type_from_irq(irq)) { 367 case IRQT_VIRQ: 368 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) 369 [index_from_irq(irq)] = -1; 370 break; 371 case IRQT_IPI: 372 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) 373 [index_from_irq(irq)] = -1; 374 break; 375 default: 376 break; 377 } 378 379 /* Closed ports are implicitly re-bound to VCPU0. */ 380 bind_evtchn_to_cpu(evtchn, 0); 381 382 evtchn_to_irq[evtchn] = -1; 383 irq_info[irq] = IRQ_UNBOUND; 384 385 dynamic_irq_cleanup(irq); 386 } 387 388 spin_unlock(&irq_mapping_update_lock); 389} 390 391int bind_evtchn_to_irqhandler(unsigned int evtchn, 392 irq_handler_t handler, 393 unsigned long irqflags, 394 const char *devname, void *dev_id) 395{ 396 unsigned int irq; 397 int retval; 398 399 irq = bind_evtchn_to_irq(evtchn); 400 retval = request_irq(irq, handler, irqflags, devname, dev_id); 401 if (retval != 0) { 402 unbind_from_irq(irq); 403 return retval; 404 } 405 406 return irq; 407} 408EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); 409 410int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 411 irq_handler_t handler, 412 unsigned long irqflags, const char *devname, void *dev_id) 413{ 414 unsigned int irq; 415 int retval; 416 417 irq = bind_virq_to_irq(virq, cpu); 418 retval = request_irq(irq, handler, irqflags, devname, dev_id); 419 if (retval != 0) { 420 unbind_from_irq(irq); 421 return retval; 422 } 423 424 return irq; 425} 426EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); 427 428int bind_ipi_to_irqhandler(enum ipi_vector ipi, 429 unsigned int cpu, 430 irq_handler_t handler, 431 unsigned long irqflags, 432 const char *devname, 433 void *dev_id) 434{ 435 int irq, retval; 436 437 irq = bind_ipi_to_irq(ipi, cpu); 438 if (irq < 0) 439 return irq; 440 441 retval = request_irq(irq, handler, irqflags, devname, dev_id); 442 if (retval != 0) { 443 unbind_from_irq(irq); 444 return retval; 445 } 446 447 return irq; 448} 449 450void unbind_from_irqhandler(unsigned int irq, void *dev_id) 451{ 452 free_irq(irq, dev_id); 453 unbind_from_irq(irq); 454} 455EXPORT_SYMBOL_GPL(unbind_from_irqhandler); 456 457void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) 458{ 459 int irq = per_cpu(ipi_to_irq, cpu)[vector]; 460 BUG_ON(irq < 0); 461 notify_remote_via_irq(irq); 462} 463 464irqreturn_t xen_debug_interrupt(int irq, void *dev_id) 465{ 466 struct shared_info *sh = HYPERVISOR_shared_info; 467 int cpu = smp_processor_id(); 468 int i; 469 unsigned long flags; 470 static DEFINE_SPINLOCK(debug_lock); 471 472 spin_lock_irqsave(&debug_lock, flags); 473 474 printk("vcpu %d\n ", cpu); 475 476 for_each_online_cpu(i) { 477 struct vcpu_info *v = per_cpu(xen_vcpu, i); 478 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, 479 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, 480 v->evtchn_upcall_pending, 481 v->evtchn_pending_sel); 482 } 483 printk("pending:\n "); 484 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) 485 printk("%08lx%s", sh->evtchn_pending[i], 486 i % 8 == 0 ? "\n " : " "); 487 printk("\nmasks:\n "); 488 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 489 printk("%08lx%s", sh->evtchn_mask[i], 490 i % 8 == 0 ? "\n " : " "); 491 492 printk("\nunmasked:\n "); 493 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 494 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], 495 i % 8 == 0 ? "\n " : " "); 496 497 printk("\npending list:\n"); 498 for(i = 0; i < NR_EVENT_CHANNELS; i++) { 499 if (sync_test_bit(i, sh->evtchn_pending)) { 500 printk(" %d: event %d -> irq %d\n", 501 cpu_evtchn[i], i, 502 evtchn_to_irq[i]); 503 } 504 } 505 506 spin_unlock_irqrestore(&debug_lock, flags); 507 508 return IRQ_HANDLED; 509} 510 511 512/* 513 * Search the CPUs pending events bitmasks. For each one found, map 514 * the event number to an irq, and feed it into do_IRQ() for 515 * handling. 516 * 517 * Xen uses a two-level bitmap to speed searching. The first level is 518 * a bitset of words which contain pending event bits. The second 519 * level is a bitset of pending events themselves. 520 */ 521void xen_evtchn_do_upcall(struct pt_regs *regs) 522{ 523 int cpu = get_cpu(); 524 struct shared_info *s = HYPERVISOR_shared_info; 525 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); 526 static DEFINE_PER_CPU(unsigned, nesting_count); 527 unsigned count; 528 529 do { 530 unsigned long pending_words; 531 532 vcpu_info->evtchn_upcall_pending = 0; 533 534 if (__get_cpu_var(nesting_count)++) 535 goto out; 536 537#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ 538 /* Clear master flag /before/ clearing selector flag. */ 539 wmb(); 540#endif 541 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); 542 while (pending_words != 0) { 543 unsigned long pending_bits; 544 int word_idx = __ffs(pending_words); 545 pending_words &= ~(1UL << word_idx); 546 547 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { 548 int bit_idx = __ffs(pending_bits); 549 int port = (word_idx * BITS_PER_LONG) + bit_idx; 550 int irq = evtchn_to_irq[port]; 551 552 if (irq != -1) 553 xen_do_IRQ(irq, regs); 554 } 555 } 556 557 BUG_ON(!irqs_disabled()); 558 559 count = __get_cpu_var(nesting_count); 560 __get_cpu_var(nesting_count) = 0; 561 } while(count != 1); 562 563out: 564 put_cpu(); 565} 566 567/* Rebind a new event channel to an existing irq. */ 568void rebind_evtchn_irq(int evtchn, int irq) 569{ 570 /* Make sure the irq is masked, since the new event channel 571 will also be masked. */ 572 disable_irq(irq); 573 574 spin_lock(&irq_mapping_update_lock); 575 576 /* After resume the irq<->evtchn mappings are all cleared out */ 577 BUG_ON(evtchn_to_irq[evtchn] != -1); 578 /* Expect irq to have been bound before, 579 so the bindcount should be non-0 */ 580 BUG_ON(irq_bindcount[irq] == 0); 581 582 evtchn_to_irq[evtchn] = irq; 583 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); 584 585 spin_unlock(&irq_mapping_update_lock); 586 587 /* new event channels are always bound to cpu 0 */ 588 irq_set_affinity(irq, cpumask_of(0)); 589 590 /* Unmask the event channel. */ 591 enable_irq(irq); 592} 593 594/* Rebind an evtchn so that it gets delivered to a specific cpu */ 595static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 596{ 597 struct evtchn_bind_vcpu bind_vcpu; 598 int evtchn = evtchn_from_irq(irq); 599 600 if (!VALID_EVTCHN(evtchn)) 601 return; 602 603 /* Send future instances of this interrupt to other vcpu. */ 604 bind_vcpu.port = evtchn; 605 bind_vcpu.vcpu = tcpu; 606 607 /* 608 * If this fails, it usually just indicates that we're dealing with a 609 * virq or IPI channel, which don't actually need to be rebound. Ignore 610 * it, but don't do the xenlinux-level rebind in that case. 611 */ 612 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 613 bind_evtchn_to_cpu(evtchn, tcpu); 614} 615 616 617static void set_affinity_irq(unsigned irq, const struct cpumask *dest) 618{ 619 unsigned tcpu = cpumask_first(dest); 620 rebind_irq_to_cpu(irq, tcpu); 621} 622 623int resend_irq_on_evtchn(unsigned int irq) 624{ 625 int masked, evtchn = evtchn_from_irq(irq); 626 struct shared_info *s = HYPERVISOR_shared_info; 627 628 if (!VALID_EVTCHN(evtchn)) 629 return 1; 630 631 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); 632 sync_set_bit(evtchn, s->evtchn_pending); 633 if (!masked) 634 unmask_evtchn(evtchn); 635 636 return 1; 637} 638 639static void enable_dynirq(unsigned int irq) 640{ 641 int evtchn = evtchn_from_irq(irq); 642 643 if (VALID_EVTCHN(evtchn)) 644 unmask_evtchn(evtchn); 645} 646 647static void disable_dynirq(unsigned int irq) 648{ 649 int evtchn = evtchn_from_irq(irq); 650 651 if (VALID_EVTCHN(evtchn)) 652 mask_evtchn(evtchn); 653} 654 655static void ack_dynirq(unsigned int irq) 656{ 657 int evtchn = evtchn_from_irq(irq); 658 659 move_native_irq(irq); 660 661 if (VALID_EVTCHN(evtchn)) 662 clear_evtchn(evtchn); 663} 664 665static int retrigger_dynirq(unsigned int irq) 666{ 667 int evtchn = evtchn_from_irq(irq); 668 struct shared_info *sh = HYPERVISOR_shared_info; 669 int ret = 0; 670 671 if (VALID_EVTCHN(evtchn)) { 672 int masked; 673 674 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); 675 sync_set_bit(evtchn, sh->evtchn_pending); 676 if (!masked) 677 unmask_evtchn(evtchn); 678 ret = 1; 679 } 680 681 return ret; 682} 683 684static void restore_cpu_virqs(unsigned int cpu) 685{ 686 struct evtchn_bind_virq bind_virq; 687 int virq, irq, evtchn; 688 689 for (virq = 0; virq < NR_VIRQS; virq++) { 690 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) 691 continue; 692 693 BUG_ON(irq_info[irq].type != IRQT_VIRQ); 694 BUG_ON(irq_info[irq].index != virq); 695 696 /* Get a new binding from Xen. */ 697 bind_virq.virq = virq; 698 bind_virq.vcpu = cpu; 699 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 700 &bind_virq) != 0) 701 BUG(); 702 evtchn = bind_virq.port; 703 704 /* Record the new mapping. */ 705 evtchn_to_irq[evtchn] = irq; 706 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); 707 bind_evtchn_to_cpu(evtchn, cpu); 708 709 /* Ready for use. */ 710 unmask_evtchn(evtchn); 711 } 712} 713 714static void restore_cpu_ipis(unsigned int cpu) 715{ 716 struct evtchn_bind_ipi bind_ipi; 717 int ipi, irq, evtchn; 718 719 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { 720 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) 721 continue; 722 723 BUG_ON(irq_info[irq].type != IRQT_IPI); 724 BUG_ON(irq_info[irq].index != ipi); 725 726 /* Get a new binding from Xen. */ 727 bind_ipi.vcpu = cpu; 728 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 729 &bind_ipi) != 0) 730 BUG(); 731 evtchn = bind_ipi.port; 732 733 /* Record the new mapping. */ 734 evtchn_to_irq[evtchn] = irq; 735 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); 736 bind_evtchn_to_cpu(evtchn, cpu); 737 738 /* Ready for use. */ 739 unmask_evtchn(evtchn); 740 741 } 742} 743 744/* Clear an irq's pending state, in preparation for polling on it */ 745void xen_clear_irq_pending(int irq) 746{ 747 int evtchn = evtchn_from_irq(irq); 748 749 if (VALID_EVTCHN(evtchn)) 750 clear_evtchn(evtchn); 751} 752 753void xen_set_irq_pending(int irq) 754{ 755 int evtchn = evtchn_from_irq(irq); 756 757 if (VALID_EVTCHN(evtchn)) 758 set_evtchn(evtchn); 759} 760 761bool xen_test_irq_pending(int irq) 762{ 763 int evtchn = evtchn_from_irq(irq); 764 bool ret = false; 765 766 if (VALID_EVTCHN(evtchn)) 767 ret = test_evtchn(evtchn); 768 769 return ret; 770} 771 772/* Poll waiting for an irq to become pending. In the usual case, the 773 irq will be disabled so it won't deliver an interrupt. */ 774void xen_poll_irq(int irq) 775{ 776 evtchn_port_t evtchn = evtchn_from_irq(irq); 777 778 if (VALID_EVTCHN(evtchn)) { 779 struct sched_poll poll; 780 781 poll.nr_ports = 1; 782 poll.timeout = 0; 783 set_xen_guest_handle(poll.ports, &evtchn); 784 785 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) 786 BUG(); 787 } 788} 789 790void xen_irq_resume(void) 791{ 792 unsigned int cpu, irq, evtchn; 793 794 init_evtchn_cpu_bindings(); 795 796 /* New event-channel space is not 'live' yet. */ 797 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 798 mask_evtchn(evtchn); 799 800 /* No IRQ <-> event-channel mappings. */ 801 for (irq = 0; irq < nr_irqs; irq++) 802 irq_info[irq].evtchn = 0; /* zap event-channel binding */ 803 804 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 805 evtchn_to_irq[evtchn] = -1; 806 807 for_each_possible_cpu(cpu) { 808 restore_cpu_virqs(cpu); 809 restore_cpu_ipis(cpu); 810 } 811} 812 813static struct irq_chip xen_dynamic_chip __read_mostly = { 814 .name = "xen-dyn", 815 .mask = disable_dynirq, 816 .unmask = enable_dynirq, 817 .ack = ack_dynirq, 818 .set_affinity = set_affinity_irq, 819 .retrigger = retrigger_dynirq, 820}; 821 822void __init xen_init_IRQ(void) 823{ 824 int i; 825 826 init_evtchn_cpu_bindings(); 827 828 /* No event channels are 'live' right now. */ 829 for (i = 0; i < NR_EVENT_CHANNELS; i++) 830 mask_evtchn(i); 831 832 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ 833 for (i = 0; i < nr_irqs; i++) 834 irq_bindcount[i] = 0; 835 836 irq_ctx_init(smp_processor_id()); 837}