Merge branches 'stable/irq.fairness' and 'stable/irq.ween_of_nr_irqs' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

* 'stable/irq.fairness' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen: events: Remove redundant clear of l2i at end of round-robin loop
xen: events: Make round-robin scan fairer by snapshotting each l2 word once only
xen: events: Clean up round-robin evtchn scan.
xen: events: Make last processed event channel a per-cpu variable.
xen: events: Process event channels notifications in round-robin order.

* 'stable/irq.ween_of_nr_irqs' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen: events: Fix compile error if CONFIG_SMP is not defined.
xen: events: correct locking in xen_irq_from_pirq
xen: events: propagate irq allocation failure instead of panicking
xen: events: do not workaround too-small nr_irqs
xen: events: remove use of nr_irqs as upper bound on number of pirqs
xen: events: dynamically allocate irq info structures
xen: events: maintain a list of Xen interrupts
xen: events: push setup of irq<->{evtchn,ipi,virq,pirq} maps into irq_info init functions
xen: events: turn irq_info constructors into initialiser functions
xen: events: use per-cpu variable for cpu_evtchn_mask
xen: events: refactor GSI pirq bindings functions
xen: events: rename restore_cpu_pirqs -> restore_pirqs
xen: events: remove unused public functions
xen: events: fix xen_map_pirq_gsi error return
xen: events: simplify comment
xen: events: separate two unrelated halves of if condition

Fix up trivial conflicts in drivers/xen/events.c

+302 -206
+29 -12
arch/x86/pci/xen.c
··· 50 50 name = "ioapic-level"; 51 51 } 52 52 53 - irq = xen_map_pirq_gsi(map_irq.pirq, gsi, shareable, name); 53 + irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name); 54 54 55 55 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 56 56 ··· 237 237 { 238 238 int rc; 239 239 int share = 1; 240 + int pirq; 240 241 u8 gsi; 241 242 242 243 rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); ··· 247 246 return rc; 248 247 } 249 248 249 + rc = xen_allocate_pirq_gsi(gsi); 250 + if (rc < 0) { 251 + dev_warn(&dev->dev, "Xen PCI: failed to allocate a PIRQ for GSI%d: %d\n", 252 + gsi, rc); 253 + return rc; 254 + } 255 + pirq = rc; 256 + 250 257 if (gsi < NR_IRQS_LEGACY) 251 258 share = 0; 252 259 253 - rc = xen_allocate_pirq(gsi, share, "pcifront"); 260 + rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront"); 254 261 if (rc < 0) { 255 - dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n", 256 - gsi, rc); 262 + dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n", 263 + gsi, pirq, rc); 257 264 return rc; 258 265 } 259 266 ··· 318 309 #ifdef CONFIG_XEN_DOM0 319 310 static int xen_register_pirq(u32 gsi, int triggering) 320 311 { 321 - int rc, irq; 312 + int rc, pirq, irq = -1; 322 313 struct physdev_map_pirq map_irq; 323 314 int shareable = 0; 324 315 char *name; ··· 334 325 name = "ioapic-level"; 335 326 } 336 327 337 - irq = xen_allocate_pirq(gsi, shareable, name); 328 + pirq = xen_allocate_pirq_gsi(gsi); 329 + if (pirq < 0) 330 + goto out; 338 331 339 - printk(KERN_DEBUG "xen: --> irq=%d\n", irq); 340 - 332 + irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name); 341 333 if (irq < 0) 342 334 goto out; 335 + 336 + printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d\n", pirq, irq); 343 337 344 338 map_irq.domid = DOMID_SELF; 345 339 map_irq.type = MAP_PIRQ_TYPE_GSI; 346 340 map_irq.index = gsi; 347 - map_irq.pirq = irq; 341 + map_irq.pirq = pirq; 348 342 349 343 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); 350 344 if (rc) { ··· 434 422 435 423 void __init xen_setup_pirqs(void) 436 424 { 437 - int irq; 425 + int pirq, irq; 438 426 439 427 pci_xen_initial_domain(); 440 428 441 429 if (0 == nr_ioapics) { 442 - for (irq = 0; irq < NR_IRQS_LEGACY; irq++) 443 - xen_allocate_pirq(irq, 0, "xt-pic"); 430 + for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { 431 + pirq = xen_allocate_pirq_gsi(irq); 432 + if (WARN(pirq < 0, 433 + "Could not allocate PIRQ for legacy interrupt\n")) 434 + break; 435 + irq = xen_bind_pirq_gsi_to_irq(irq, pirq, 0, "xt-pic"); 436 + } 444 437 return; 445 438 } 446 439
+263 -180
drivers/xen/events.c
··· 56 56 */ 57 57 static DEFINE_SPINLOCK(irq_mapping_update_lock); 58 58 59 + static LIST_HEAD(xen_irq_list_head); 60 + 59 61 /* IRQ <-> VIRQ mapping. */ 60 62 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; 61 63 ··· 87 85 */ 88 86 struct irq_info 89 87 { 88 + struct list_head list; 90 89 enum xen_irq_type type; /* type */ 90 + unsigned irq; 91 91 unsigned short evtchn; /* event channel */ 92 92 unsigned short cpu; /* cpu bound */ 93 93 ··· 107 103 #define PIRQ_NEEDS_EOI (1 << 0) 108 104 #define PIRQ_SHAREABLE (1 << 1) 109 105 110 - static struct irq_info *irq_info; 111 - static int *pirq_to_irq; 112 - 113 106 static int *evtchn_to_irq; 114 - struct cpu_evtchn_s { 115 - unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; 116 - }; 117 107 118 - static __initdata struct cpu_evtchn_s init_evtchn_mask = { 119 - .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul, 120 - }; 121 - static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask; 122 - 123 - static inline unsigned long *cpu_evtchn_mask(int cpu) 124 - { 125 - return cpu_evtchn_mask_p[cpu].bits; 126 - } 108 + static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], 109 + cpu_evtchn_mask); 127 110 128 111 /* Xen will never allocate port zero for any purpose. */ 129 112 #define VALID_EVTCHN(chn) ((chn) != 0) ··· 119 128 static struct irq_chip xen_percpu_chip; 120 129 static struct irq_chip xen_pirq_chip; 121 130 122 - /* Constructor for packed IRQ information. */ 123 - static struct irq_info mk_unbound_info(void) 131 + /* Get info for IRQ */ 132 + static struct irq_info *info_for_irq(unsigned irq) 124 133 { 125 - return (struct irq_info) { .type = IRQT_UNBOUND }; 134 + return get_irq_data(irq); 126 135 } 127 136 128 - static struct irq_info mk_evtchn_info(unsigned short evtchn) 137 + /* Constructors for packed IRQ information. */ 138 + static void xen_irq_info_common_init(struct irq_info *info, 139 + unsigned irq, 140 + enum xen_irq_type type, 141 + unsigned short evtchn, 142 + unsigned short cpu) 129 143 { 130 - return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, 131 - .cpu = 0 }; 144 + 145 + BUG_ON(info->type != IRQT_UNBOUND && info->type != type); 146 + 147 + info->type = type; 148 + info->irq = irq; 149 + info->evtchn = evtchn; 150 + info->cpu = cpu; 151 + 152 + evtchn_to_irq[evtchn] = irq; 132 153 } 133 154 134 - static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) 155 + static void xen_irq_info_evtchn_init(unsigned irq, 156 + unsigned short evtchn) 135 157 { 136 - return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, 137 - .cpu = 0, .u.ipi = ipi }; 158 + struct irq_info *info = info_for_irq(irq); 159 + 160 + xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0); 138 161 } 139 162 140 - static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) 163 + static void xen_irq_info_ipi_init(unsigned cpu, 164 + unsigned irq, 165 + unsigned short evtchn, 166 + enum ipi_vector ipi) 141 167 { 142 - return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, 143 - .cpu = 0, .u.virq = virq }; 168 + struct irq_info *info = info_for_irq(irq); 169 + 170 + xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0); 171 + 172 + info->u.ipi = ipi; 173 + 174 + per_cpu(ipi_to_irq, cpu)[ipi] = irq; 144 175 } 145 176 146 - static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq, 147 - unsigned short gsi, unsigned short vector) 177 + static void xen_irq_info_virq_init(unsigned cpu, 178 + unsigned irq, 179 + unsigned short evtchn, 180 + unsigned short virq) 148 181 { 149 - return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, 150 - .cpu = 0, 151 - .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } }; 182 + struct irq_info *info = info_for_irq(irq); 183 + 184 + xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0); 185 + 186 + info->u.virq = virq; 187 + 188 + per_cpu(virq_to_irq, cpu)[virq] = irq; 189 + } 190 + 191 + static void xen_irq_info_pirq_init(unsigned irq, 192 + unsigned short evtchn, 193 + unsigned short pirq, 194 + unsigned short gsi, 195 + unsigned short vector, 196 + unsigned char flags) 197 + { 198 + struct irq_info *info = info_for_irq(irq); 199 + 200 + xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0); 201 + 202 + info->u.pirq.pirq = pirq; 203 + info->u.pirq.gsi = gsi; 204 + info->u.pirq.vector = vector; 205 + info->u.pirq.flags = flags; 152 206 } 153 207 154 208 /* 155 209 * Accessors for packed IRQ information. 156 210 */ 157 - static struct irq_info *info_for_irq(unsigned irq) 158 - { 159 - return &irq_info[irq]; 160 - } 161 - 162 211 static unsigned int evtchn_from_irq(unsigned irq) 163 212 { 164 213 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq))) ··· 243 212 return info->u.pirq.pirq; 244 213 } 245 214 246 - static unsigned gsi_from_irq(unsigned irq) 247 - { 248 - struct irq_info *info = info_for_irq(irq); 249 - 250 - BUG_ON(info == NULL); 251 - BUG_ON(info->type != IRQT_PIRQ); 252 - 253 - return info->u.pirq.gsi; 254 - } 255 - 256 - static unsigned vector_from_irq(unsigned irq) 257 - { 258 - struct irq_info *info = info_for_irq(irq); 259 - 260 - BUG_ON(info == NULL); 261 - BUG_ON(info->type != IRQT_PIRQ); 262 - 263 - return info->u.pirq.vector; 264 - } 265 - 266 215 static enum xen_irq_type type_from_irq(unsigned irq) 267 216 { 268 217 return info_for_irq(irq)->type; ··· 278 267 unsigned int idx) 279 268 { 280 269 return (sh->evtchn_pending[idx] & 281 - cpu_evtchn_mask(cpu)[idx] & 270 + per_cpu(cpu_evtchn_mask, cpu)[idx] & 282 271 ~sh->evtchn_mask[idx]); 283 272 } 284 273 ··· 291 280 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); 292 281 #endif 293 282 294 - clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); 295 - set_bit(chn, cpu_evtchn_mask(cpu)); 283 + clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))); 284 + set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); 296 285 297 - irq_info[irq].cpu = cpu; 286 + info_for_irq(irq)->cpu = cpu; 298 287 } 299 288 300 289 static void init_evtchn_cpu_bindings(void) 301 290 { 302 291 int i; 303 292 #ifdef CONFIG_SMP 304 - struct irq_desc *desc; 293 + struct irq_info *info; 305 294 306 295 /* By default all event channels notify CPU#0. */ 307 - for_each_irq_desc(i, desc) { 296 + list_for_each_entry(info, &xen_irq_list_head, list) { 297 + struct irq_desc *desc = irq_to_desc(info->irq); 308 298 cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); 309 299 } 310 300 #endif 311 301 312 302 for_each_possible_cpu(i) 313 - memset(cpu_evtchn_mask(i), 314 - (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s)); 315 - 303 + memset(per_cpu(cpu_evtchn_mask, i), 304 + (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); 316 305 } 317 306 318 307 static inline void clear_evtchn(int port) ··· 387 376 put_cpu(); 388 377 } 389 378 390 - static int xen_allocate_irq_dynamic(void) 379 + static void xen_irq_init(unsigned irq) 380 + { 381 + struct irq_info *info; 382 + struct irq_desc *desc = irq_to_desc(irq); 383 + 384 + #ifdef CONFIG_SMP 385 + /* By default all event channels notify CPU#0. */ 386 + cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); 387 + #endif 388 + 389 + info = kzalloc(sizeof(*info), GFP_KERNEL); 390 + if (info == NULL) 391 + panic("Unable to allocate metadata for IRQ%d\n", irq); 392 + 393 + info->type = IRQT_UNBOUND; 394 + 395 + set_irq_data(irq, info); 396 + 397 + list_add_tail(&info->list, &xen_irq_list_head); 398 + } 399 + 400 + static int __must_check xen_allocate_irq_dynamic(void) 391 401 { 392 402 int first = 0; 393 403 int irq; ··· 425 393 first = get_nr_irqs_gsi(); 426 394 #endif 427 395 428 - retry: 429 396 irq = irq_alloc_desc_from(first, -1); 430 397 431 - if (irq == -ENOMEM && first > NR_IRQS_LEGACY) { 432 - printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n"); 433 - first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY); 434 - goto retry; 435 - } 436 - 437 - if (irq < 0) 438 - panic("No available IRQ to bind to: increase nr_irqs!\n"); 398 + xen_irq_init(irq); 439 399 440 400 return irq; 441 401 } 442 402 443 - static int xen_allocate_irq_gsi(unsigned gsi) 403 + static int __must_check xen_allocate_irq_gsi(unsigned gsi) 444 404 { 445 405 int irq; 446 406 ··· 447 423 448 424 /* Legacy IRQ descriptors are already allocated by the arch. */ 449 425 if (gsi < NR_IRQS_LEGACY) 450 - return gsi; 426 + irq = gsi; 427 + else 428 + irq = irq_alloc_desc_at(gsi, -1); 451 429 452 - irq = irq_alloc_desc_at(gsi, -1); 453 - if (irq < 0) 454 - panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq); 430 + xen_irq_init(irq); 455 431 456 432 return irq; 457 433 } 458 434 459 435 static void xen_free_irq(unsigned irq) 460 436 { 437 + struct irq_info *info = get_irq_data(irq); 438 + 439 + list_del(&info->list); 440 + 441 + set_irq_data(irq, NULL); 442 + 443 + kfree(info); 444 + 461 445 /* Legacy IRQ descriptors are managed by the arch. */ 462 446 if (irq < NR_IRQS_LEGACY) 463 447 return; ··· 595 563 596 564 static int find_irq_by_gsi(unsigned gsi) 597 565 { 598 - int irq; 566 + struct irq_info *info; 599 567 600 - for (irq = 0; irq < nr_irqs; irq++) { 601 - struct irq_info *info = info_for_irq(irq); 602 - 603 - if (info == NULL || info->type != IRQT_PIRQ) 568 + list_for_each_entry(info, &xen_irq_list_head, list) { 569 + if (info->type != IRQT_PIRQ) 604 570 continue; 605 571 606 - if (gsi_from_irq(irq) == gsi) 607 - return irq; 572 + if (info->u.pirq.gsi == gsi) 573 + return info->irq; 608 574 } 609 575 610 576 return -1; 611 577 } 612 578 613 - int xen_allocate_pirq(unsigned gsi, int shareable, char *name) 579 + int xen_allocate_pirq_gsi(unsigned gsi) 614 580 { 615 - return xen_map_pirq_gsi(gsi, gsi, shareable, name); 581 + return gsi; 616 582 } 617 583 618 - /* xen_map_pirq_gsi might allocate irqs from the top down, as a 619 - * consequence don't assume that the irq number returned has a low value 620 - * or can be used as a pirq number unless you know otherwise. 621 - * 622 - * One notable exception is when xen_map_pirq_gsi is called passing an 623 - * hardware gsi as argument, in that case the irq number returned 624 - * matches the gsi number passed as second argument. 584 + /* 585 + * Do not make any assumptions regarding the relationship between the 586 + * IRQ number returned here and the Xen pirq argument. 625 587 * 626 588 * Note: We don't assign an event channel until the irq actually started 627 589 * up. Return an existing irq if we've already got one for the gsi. 628 590 */ 629 - int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) 591 + int xen_bind_pirq_gsi_to_irq(unsigned gsi, 592 + unsigned pirq, int shareable, char *name) 630 593 { 631 - int irq = 0; 594 + int irq = -1; 632 595 struct physdev_irq irq_op; 633 596 634 597 spin_lock(&irq_mapping_update_lock); 635 - 636 - if ((pirq > nr_irqs) || (gsi > nr_irqs)) { 637 - printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n", 638 - pirq > nr_irqs ? "pirq" :"", 639 - gsi > nr_irqs ? "gsi" : ""); 640 - goto out; 641 - } 642 598 643 599 irq = find_irq_by_gsi(gsi); 644 600 if (irq != -1) { ··· 636 616 } 637 617 638 618 irq = xen_allocate_irq_gsi(gsi); 619 + if (irq < 0) 620 + goto out; 639 621 640 622 set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 641 623 handle_level_irq, name); ··· 655 633 goto out; 656 634 } 657 635 658 - irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector); 659 - irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0; 660 - pirq_to_irq[pirq] = irq; 636 + xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, 637 + shareable ? PIRQ_SHAREABLE : 0); 661 638 662 639 out: 663 640 spin_unlock(&irq_mapping_update_lock); ··· 693 672 set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 694 673 handle_level_irq, name); 695 674 696 - irq_info[irq] = mk_pirq_info(0, pirq, 0, vector); 697 - pirq_to_irq[pirq] = irq; 675 + xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); 698 676 ret = irq_set_msi_desc(irq, msidesc); 699 677 if (ret < 0) 700 678 goto error_irq; ··· 729 709 goto out; 730 710 } 731 711 } 732 - pirq_to_irq[info->u.pirq.pirq] = -1; 733 - 734 - irq_info[irq] = mk_unbound_info(); 735 712 736 713 xen_free_irq(irq); 737 714 ··· 737 720 return rc; 738 721 } 739 722 740 - int xen_vector_from_irq(unsigned irq) 741 - { 742 - return vector_from_irq(irq); 743 - } 744 - 745 - int xen_gsi_from_irq(unsigned irq) 746 - { 747 - return gsi_from_irq(irq); 748 - } 749 - 750 723 int xen_irq_from_pirq(unsigned pirq) 751 724 { 752 - return pirq_to_irq[pirq]; 725 + int irq; 726 + 727 + struct irq_info *info; 728 + 729 + spin_lock(&irq_mapping_update_lock); 730 + 731 + list_for_each_entry(info, &xen_irq_list_head, list) { 732 + if (info == NULL || info->type != IRQT_PIRQ) 733 + continue; 734 + irq = info->irq; 735 + if (info->u.pirq.pirq == pirq) 736 + goto out; 737 + } 738 + irq = -1; 739 + out: 740 + spin_unlock(&irq_mapping_update_lock); 741 + 742 + return irq; 753 743 } 754 744 755 745 int bind_evtchn_to_irq(unsigned int evtchn) ··· 769 745 770 746 if (irq == -1) { 771 747 irq = xen_allocate_irq_dynamic(); 748 + if (irq == -1) 749 + goto out; 772 750 773 751 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 774 752 handle_fasteoi_irq, "event"); 775 753 776 - evtchn_to_irq[evtchn] = irq; 777 - irq_info[irq] = mk_evtchn_info(evtchn); 754 + xen_irq_info_evtchn_init(irq, evtchn); 778 755 } 779 756 757 + out: 780 758 spin_unlock(&irq_mapping_update_lock); 781 759 782 760 return irq; ··· 808 782 BUG(); 809 783 evtchn = bind_ipi.port; 810 784 811 - evtchn_to_irq[evtchn] = irq; 812 - irq_info[irq] = mk_ipi_info(evtchn, ipi); 813 - per_cpu(ipi_to_irq, cpu)[ipi] = irq; 785 + xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); 814 786 815 787 bind_evtchn_to_cpu(evtchn, cpu); 816 788 } ··· 845 821 846 822 if (irq == -1) { 847 823 irq = xen_allocate_irq_dynamic(); 824 + if (irq == -1) 825 + goto out; 848 826 849 827 set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 850 828 handle_percpu_irq, "virq"); ··· 858 832 BUG(); 859 833 evtchn = bind_virq.port; 860 834 861 - evtchn_to_irq[evtchn] = irq; 862 - irq_info[irq] = mk_virq_info(evtchn, virq); 863 - 864 - per_cpu(virq_to_irq, cpu)[virq] = irq; 835 + xen_irq_info_virq_init(cpu, irq, evtchn, virq); 865 836 866 837 bind_evtchn_to_cpu(evtchn, cpu); 867 838 } 868 839 840 + out: 869 841 spin_unlock(&irq_mapping_update_lock); 870 842 871 843 return irq; ··· 900 876 evtchn_to_irq[evtchn] = -1; 901 877 } 902 878 903 - if (irq_info[irq].type != IRQT_UNBOUND) { 904 - irq_info[irq] = mk_unbound_info(); 879 + BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); 905 880 906 - xen_free_irq(irq); 907 - } 881 + xen_free_irq(irq); 908 882 909 883 spin_unlock(&irq_mapping_update_lock); 910 884 } ··· 916 894 int retval; 917 895 918 896 irq = bind_evtchn_to_irq(evtchn); 897 + if (irq < 0) 898 + return irq; 919 899 retval = request_irq(irq, handler, irqflags, devname, dev_id); 920 900 if (retval != 0) { 921 901 unbind_from_irq(irq); ··· 959 935 int retval; 960 936 961 937 irq = bind_virq_to_irq(virq, cpu); 938 + if (irq < 0) 939 + return irq; 962 940 retval = request_irq(irq, handler, irqflags, devname, dev_id); 963 941 if (retval != 0) { 964 942 unbind_from_irq(irq); ··· 1012 986 { 1013 987 struct shared_info *sh = HYPERVISOR_shared_info; 1014 988 int cpu = smp_processor_id(); 1015 - unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu); 989 + unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); 1016 990 int i; 1017 991 unsigned long flags; 1018 992 static DEFINE_SPINLOCK(debug_lock); ··· 1090 1064 } 1091 1065 1092 1066 static DEFINE_PER_CPU(unsigned, xed_nesting_count); 1067 + static DEFINE_PER_CPU(unsigned int, current_word_idx); 1068 + static DEFINE_PER_CPU(unsigned int, current_bit_idx); 1069 + 1070 + /* 1071 + * Mask out the i least significant bits of w 1072 + */ 1073 + #define MASK_LSBS(w, i) (w & ((~0UL) << i)) 1093 1074 1094 1075 /* 1095 1076 * Search the CPUs pending events bitmasks. For each one found, map ··· 1109 1076 */ 1110 1077 static void __xen_evtchn_do_upcall(void) 1111 1078 { 1079 + int start_word_idx, start_bit_idx; 1080 + int word_idx, bit_idx; 1081 + int i; 1112 1082 int cpu = get_cpu(); 1113 1083 struct shared_info *s = HYPERVISOR_shared_info; 1114 1084 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); ··· 1130 1094 wmb(); 1131 1095 #endif 1132 1096 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); 1133 - while (pending_words != 0) { 1134 - unsigned long pending_bits; 1135 - int word_idx = __ffs(pending_words); 1136 - pending_words &= ~(1UL << word_idx); 1137 1097 1138 - while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { 1139 - int bit_idx = __ffs(pending_bits); 1140 - int port = (word_idx * BITS_PER_LONG) + bit_idx; 1141 - int irq = evtchn_to_irq[port]; 1098 + start_word_idx = __this_cpu_read(current_word_idx); 1099 + start_bit_idx = __this_cpu_read(current_bit_idx); 1100 + 1101 + word_idx = start_word_idx; 1102 + 1103 + for (i = 0; pending_words != 0; i++) { 1104 + unsigned long pending_bits; 1105 + unsigned long words; 1106 + 1107 + words = MASK_LSBS(pending_words, word_idx); 1108 + 1109 + /* 1110 + * If we masked out all events, wrap to beginning. 1111 + */ 1112 + if (words == 0) { 1113 + word_idx = 0; 1114 + bit_idx = 0; 1115 + continue; 1116 + } 1117 + word_idx = __ffs(words); 1118 + 1119 + pending_bits = active_evtchns(cpu, s, word_idx); 1120 + bit_idx = 0; /* usually scan entire word from start */ 1121 + if (word_idx == start_word_idx) { 1122 + /* We scan the starting word in two parts */ 1123 + if (i == 0) 1124 + /* 1st time: start in the middle */ 1125 + bit_idx = start_bit_idx; 1126 + else 1127 + /* 2nd time: mask bits done already */ 1128 + bit_idx &= (1UL << start_bit_idx) - 1; 1129 + } 1130 + 1131 + do { 1132 + unsigned long bits; 1133 + int port, irq; 1142 1134 struct irq_desc *desc; 1135 + 1136 + bits = MASK_LSBS(pending_bits, bit_idx); 1137 + 1138 + /* If we masked out all events, move on. */ 1139 + if (bits == 0) 1140 + break; 1141 + 1142 + bit_idx = __ffs(bits); 1143 + 1144 + /* Process port. */ 1145 + port = (word_idx * BITS_PER_LONG) + bit_idx; 1146 + irq = evtchn_to_irq[port]; 1143 1147 1144 1148 mask_evtchn(port); 1145 1149 clear_evtchn(port); ··· 1189 1113 if (desc) 1190 1114 generic_handle_irq_desc(irq, desc); 1191 1115 } 1192 - } 1116 + 1117 + bit_idx = (bit_idx + 1) % BITS_PER_LONG; 1118 + 1119 + /* Next caller starts at last processed + 1 */ 1120 + __this_cpu_write(current_word_idx, 1121 + bit_idx ? word_idx : 1122 + (word_idx+1) % BITS_PER_LONG); 1123 + __this_cpu_write(current_bit_idx, bit_idx); 1124 + } while (bit_idx != 0); 1125 + 1126 + /* Scan start_l1i twice; all others once. */ 1127 + if ((word_idx != start_word_idx) || (i != 0)) 1128 + pending_words &= ~(1UL << word_idx); 1129 + 1130 + word_idx = (word_idx + 1) % BITS_PER_LONG; 1193 1131 } 1194 1132 1195 1133 BUG_ON(!irqs_disabled()); ··· 1253 1163 so there should be a proper type */ 1254 1164 BUG_ON(info->type == IRQT_UNBOUND); 1255 1165 1256 - evtchn_to_irq[evtchn] = irq; 1257 - irq_info[irq] = mk_evtchn_info(evtchn); 1166 + xen_irq_info_evtchn_init(irq, evtchn); 1258 1167 1259 1168 spin_unlock(&irq_mapping_update_lock); 1260 1169 ··· 1270 1181 struct evtchn_bind_vcpu bind_vcpu; 1271 1182 int evtchn = evtchn_from_irq(irq); 1272 1183 1273 - /* events delivered via platform PCI interrupts are always 1274 - * routed to vcpu 0 */ 1275 - if (!VALID_EVTCHN(evtchn) || 1276 - (xen_hvm_domain() && !xen_have_vector_callback)) 1184 + if (!VALID_EVTCHN(evtchn)) 1185 + return -1; 1186 + 1187 + /* 1188 + * Events delivered via platform PCI interrupts are always 1189 + * routed to vcpu 0 and hence cannot be rebound. 1190 + */ 1191 + if (xen_hvm_domain() && !xen_have_vector_callback) 1277 1192 return -1; 1278 1193 1279 1194 /* Send future instances of this interrupt to other vcpu. */ ··· 1364 1271 return ret; 1365 1272 } 1366 1273 1367 - static void restore_cpu_pirqs(void) 1274 + static void restore_pirqs(void) 1368 1275 { 1369 1276 int pirq, rc, irq, gsi; 1370 1277 struct physdev_map_pirq map_irq; 1278 + struct irq_info *info; 1371 1279 1372 - for (pirq = 0; pirq < nr_irqs; pirq++) { 1373 - irq = pirq_to_irq[pirq]; 1374 - if (irq == -1) 1280 + list_for_each_entry(info, &xen_irq_list_head, list) { 1281 + if (info->type != IRQT_PIRQ) 1375 1282 continue; 1283 + 1284 + pirq = info->u.pirq.pirq; 1285 + gsi = info->u.pirq.gsi; 1286 + irq = info->irq; 1376 1287 1377 1288 /* save/restore of PT devices doesn't work, so at this point the 1378 1289 * only devices present are GSI based emulated devices */ 1379 - gsi = gsi_from_irq(irq); 1380 1290 if (!gsi) 1381 1291 continue; 1382 1292 ··· 1392 1296 if (rc) { 1393 1297 printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", 1394 1298 gsi, irq, pirq, rc); 1395 - irq_info[irq] = mk_unbound_info(); 1396 - pirq_to_irq[pirq] = -1; 1299 + xen_free_irq(irq); 1397 1300 continue; 1398 1301 } 1399 1302 ··· 1422 1327 evtchn = bind_virq.port; 1423 1328 1424 1329 /* Record the new mapping. */ 1425 - evtchn_to_irq[evtchn] = irq; 1426 - irq_info[irq] = mk_virq_info(evtchn, virq); 1330 + xen_irq_info_virq_init(cpu, irq, evtchn, virq); 1427 1331 bind_evtchn_to_cpu(evtchn, cpu); 1428 1332 } 1429 1333 } ··· 1446 1352 evtchn = bind_ipi.port; 1447 1353 1448 1354 /* Record the new mapping. */ 1449 - evtchn_to_irq[evtchn] = irq; 1450 - irq_info[irq] = mk_ipi_info(evtchn, ipi); 1355 + xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); 1451 1356 bind_evtchn_to_cpu(evtchn, cpu); 1452 1357 } 1453 1358 } ··· 1506 1413 1507 1414 void xen_irq_resume(void) 1508 1415 { 1509 - unsigned int cpu, irq, evtchn; 1416 + unsigned int cpu, evtchn; 1417 + struct irq_info *info; 1510 1418 1511 1419 init_evtchn_cpu_bindings(); 1512 1420 ··· 1516 1422 mask_evtchn(evtchn); 1517 1423 1518 1424 /* No IRQ <-> event-channel mappings. */ 1519 - for (irq = 0; irq < nr_irqs; irq++) 1520 - irq_info[irq].evtchn = 0; /* zap event-channel binding */ 1425 + list_for_each_entry(info, &xen_irq_list_head, list) 1426 + info->evtchn = 0; /* zap event-channel binding */ 1521 1427 1522 1428 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 1523 1429 evtchn_to_irq[evtchn] = -1; ··· 1527 1433 restore_cpu_ipis(cpu); 1528 1434 } 1529 1435 1530 - restore_cpu_pirqs(); 1436 + restore_pirqs(); 1531 1437 } 1532 1438 1533 1439 static struct irq_chip xen_dynamic_chip __read_mostly = { ··· 1612 1518 void __init xen_init_IRQ(void) 1613 1519 { 1614 1520 int i; 1615 - 1616 - cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), 1617 - GFP_KERNEL); 1618 - irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); 1619 - 1620 - /* We are using nr_irqs as the maximum number of pirq available but 1621 - * that number is actually chosen by Xen and we don't know exactly 1622 - * what it is. Be careful choosing high pirq numbers. */ 1623 - pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL); 1624 - for (i = 0; i < nr_irqs; i++) 1625 - pirq_to_irq[i] = -1; 1626 1521 1627 1522 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), 1628 1523 GFP_KERNEL);
+10 -14
include/xen/events.h
··· 47 47 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); 48 48 } 49 49 50 - extern void notify_remote_via_irq(int irq); 50 + void notify_remote_via_irq(int irq); 51 51 52 - extern void xen_irq_resume(void); 52 + void xen_irq_resume(void); 53 53 54 54 /* Clear an irq's pending state, in preparation for polling on it */ 55 55 void xen_clear_irq_pending(int irq); ··· 68 68 unsigned irq_from_evtchn(unsigned int evtchn); 69 69 70 70 /* Xen HVM evtchn vector callback */ 71 - extern void xen_hvm_callback_vector(void); 71 + void xen_hvm_callback_vector(void); 72 72 extern int xen_have_vector_callback; 73 73 int xen_set_callback_via(uint64_t via); 74 74 void xen_evtchn_do_upcall(struct pt_regs *regs); 75 75 void xen_hvm_evtchn_do_upcall(void); 76 76 77 - /* Allocate an irq for a physical interrupt, given a gsi. "Legacy" 78 - * GSIs are identity mapped; others are dynamically allocated as 79 - * usual. */ 80 - int xen_allocate_pirq(unsigned gsi, int shareable, char *name); 81 - int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name); 77 + /* Allocate a pirq for a physical interrupt, given a gsi. */ 78 + int xen_allocate_pirq_gsi(unsigned gsi); 79 + /* Bind a pirq for a physical interrupt to an irq. */ 80 + int xen_bind_pirq_gsi_to_irq(unsigned gsi, 81 + unsigned pirq, int shareable, char *name); 82 82 83 83 #ifdef CONFIG_PCI_MSI 84 + /* Allocate a pirq for a MSI style physical interrupt. */ 84 85 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); 86 + /* Bind an PSI pirq to an irq. */ 85 87 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, 86 88 int pirq, int vector, const char *name); 87 89 #endif 88 90 89 91 /* De-allocates the above mentioned physical interrupt. */ 90 92 int xen_destroy_irq(int irq); 91 - 92 - /* Return vector allocated to pirq */ 93 - int xen_vector_from_irq(unsigned pirq); 94 - 95 - /* Return gsi allocated to pirq */ 96 - int xen_gsi_from_irq(unsigned pirq); 97 93 98 94 /* Return irq from pirq */ 99 95 int xen_irq_from_pirq(unsigned pirq);