Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] Add new interrupt mapping core and change platforms to use it

This adds the new irq remapper core and removes the old one. Because
there are some fundamental conflicts with the old code, like the value
of NO_IRQ which I'm now setting to 0 (as per discussions with Linus),
etc..., this commit also changes the relevant platform and driver code
over to use the new remapper (so as not to cause difficulties later
in bisecting).

This patch removes the old pre-parsing of the open firmware interrupt
tree along with all the bogus assumptions it made to try to renumber
interrupts according to the platform. This is all to be handled by the
new code now.

For the pSeries XICS interrupt controller, a single remapper host is
created for the whole machine regardless of how many interrupt
presentation and source controllers are found, and it's set to match
any device node that isn't a 8259. That works fine on pSeries and
avoids having to deal with some of the complexities of split source
controllers vs. presentation controllers in the pSeries device trees.

The powerpc i8259 PIC driver now always requests the legacy interrupt
range. It also has the feature of being able to match any device node
(including NULL) if passed no device node as an input. That will help
porting over platforms with broken device-trees like Pegasos who don't
have a proper interrupt tree.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Benjamin Herrenschmidt and committed by
Paul Mackerras
0ebfff14 f63e115f

+2841 -2138
+2 -7
arch/powerpc/kernel/ibmebus.c
··· 323 323 unsigned long irq_flags, const char * devname, 324 324 void *dev_id) 325 325 { 326 - unsigned int irq = virt_irq_create_mapping(ist); 326 + unsigned int irq = irq_create_mapping(NULL, ist, 0); 327 327 328 328 if (irq == NO_IRQ) 329 329 return -EINVAL; 330 - 331 - irq = irq_offset_up(irq); 332 330 333 331 return request_irq(irq, handler, 334 332 irq_flags, devname, dev_id); ··· 335 337 336 338 void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id) 337 339 { 338 - unsigned int irq = virt_irq_create_mapping(ist); 340 + unsigned int irq = irq_find_mapping(NULL, ist); 339 341 340 - irq = irq_offset_up(irq); 341 342 free_irq(irq, dev_id); 342 - 343 - return; 344 343 } 345 344 EXPORT_SYMBOL(ibmebus_free_irq); 346 345
+507 -122
arch/powerpc/kernel/irq.c
··· 29 29 * to reduce code space and undefined function references. 30 30 */ 31 31 32 + #undef DEBUG 33 + 32 34 #include <linux/module.h> 33 35 #include <linux/threads.h> 34 36 #include <linux/kernel_stat.h> ··· 48 46 #include <linux/cpumask.h> 49 47 #include <linux/profile.h> 50 48 #include <linux/bitops.h> 51 - #include <linux/pci.h> 49 + #include <linux/list.h> 50 + #include <linux/radix-tree.h> 51 + #include <linux/mutex.h> 52 + #include <linux/bootmem.h> 52 53 53 54 #include <asm/uaccess.h> 54 55 #include <asm/system.h> ··· 62 57 #include <asm/prom.h> 63 58 #include <asm/ptrace.h> 64 59 #include <asm/machdep.h> 60 + #include <asm/udbg.h> 65 61 #ifdef CONFIG_PPC_ISERIES 66 62 #include <asm/paca.h> 67 63 #endif ··· 94 88 EXPORT_SYMBOL(irq_desc); 95 89 96 90 int distribute_irqs = 1; 97 - u64 ppc64_interrupt_controller; 98 91 #endif /* CONFIG_PPC64 */ 99 92 100 93 int show_interrupts(struct seq_file *p, void *v) ··· 186 181 187 182 void do_IRQ(struct pt_regs *regs) 188 183 { 189 - int irq; 184 + unsigned int irq; 190 185 #ifdef CONFIG_IRQSTACKS 191 186 struct thread_info *curtp, *irqtp; 192 187 #endif ··· 217 212 */ 218 213 irq = ppc_md.get_irq(regs); 219 214 220 - if (irq >= 0) { 215 + if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { 221 216 #ifdef CONFIG_IRQSTACKS 222 217 /* Switch to the irq stack to handle this */ 223 218 curtp = current_thread_info(); ··· 236 231 } else 237 232 #endif 238 233 generic_handle_irq(irq, regs); 239 - } else if (irq != -2) 234 + } else if (irq != NO_IRQ_IGNORE) 240 235 /* That's not SMP safe ... but who cares ? */ 241 236 ppc_spurious_interrupts++; 242 237 ··· 259 254 #endif 260 255 } 261 256 262 - #ifdef CONFIG_PPC64 263 - /* 264 - * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. 265 - */ 266 - 267 - #define UNDEFINED_IRQ 0xffffffff 268 - unsigned int virt_irq_to_real_map[NR_IRQS]; 269 - 270 - /* 271 - * Don't use virtual irqs 0, 1, 2 for devices. 272 - * The pcnet32 driver considers interrupt numbers < 2 to be invalid, 273 - * and 2 is the XICS IPI interrupt. 274 - * We limit virtual irqs to __irq_offet_value less than virt_irq_max so 275 - * that when we offset them we don't end up with an interrupt 276 - * number >= virt_irq_max. 277 - */ 278 - #define MIN_VIRT_IRQ 3 279 - 280 - unsigned int virt_irq_max; 281 - static unsigned int max_virt_irq; 282 - static unsigned int nr_virt_irqs; 283 - 284 - void 285 - virt_irq_init(void) 286 - { 287 - int i; 288 - 289 - if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1))) 290 - virt_irq_max = NR_IRQS - 1; 291 - max_virt_irq = virt_irq_max - __irq_offset_value; 292 - nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1; 293 - 294 - for (i = 0; i < NR_IRQS; i++) 295 - virt_irq_to_real_map[i] = UNDEFINED_IRQ; 296 - } 297 - 298 - /* Create a mapping for a real_irq if it doesn't already exist. 299 - * Return the virtual irq as a convenience. 300 - */ 301 - int virt_irq_create_mapping(unsigned int real_irq) 302 - { 303 - unsigned int virq, first_virq; 304 - static int warned; 305 - 306 - if (ppc64_interrupt_controller == IC_OPEN_PIC) 307 - return real_irq; /* no mapping for openpic (for now) */ 308 - 309 - if (ppc64_interrupt_controller == IC_CELL_PIC) 310 - return real_irq; /* no mapping for iic either */ 311 - 312 - /* don't map interrupts < MIN_VIRT_IRQ */ 313 - if (real_irq < MIN_VIRT_IRQ) { 314 - virt_irq_to_real_map[real_irq] = real_irq; 315 - return real_irq; 316 - } 317 - 318 - /* map to a number between MIN_VIRT_IRQ and max_virt_irq */ 319 - virq = real_irq; 320 - if (virq > max_virt_irq) 321 - virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; 322 - 323 - /* search for this number or a free slot */ 324 - first_virq = virq; 325 - while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { 326 - if (virt_irq_to_real_map[virq] == real_irq) 327 - return virq; 328 - if (++virq > max_virt_irq) 329 - virq = MIN_VIRT_IRQ; 330 - if (virq == first_virq) 331 - goto nospace; /* oops, no free slots */ 332 - } 333 - 334 - virt_irq_to_real_map[virq] = real_irq; 335 - return virq; 336 - 337 - nospace: 338 - if (!warned) { 339 - printk(KERN_CRIT "Interrupt table is full\n"); 340 - printk(KERN_CRIT "Increase virt_irq_max (currently %d) " 341 - "in your kernel sources and rebuild.\n", virt_irq_max); 342 - warned = 1; 343 - } 344 - return NO_IRQ; 345 - } 346 - 347 - /* 348 - * In most cases will get a hit on the very first slot checked in the 349 - * virt_irq_to_real_map. Only when there are a large number of 350 - * IRQs will this be expensive. 351 - */ 352 - unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) 353 - { 354 - unsigned int virq; 355 - unsigned int first_virq; 356 - 357 - virq = real_irq; 358 - 359 - if (virq > max_virt_irq) 360 - virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; 361 - 362 - first_virq = virq; 363 - 364 - do { 365 - if (virt_irq_to_real_map[virq] == real_irq) 366 - return virq; 367 - 368 - virq++; 369 - 370 - if (virq >= max_virt_irq) 371 - virq = 0; 372 - 373 - } while (first_virq != virq); 374 - 375 - return NO_IRQ; 376 - 377 - } 378 - #endif /* CONFIG_PPC64 */ 379 257 380 258 #ifdef CONFIG_IRQSTACKS 381 259 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; ··· 317 429 local_irq_restore(flags); 318 430 } 319 431 EXPORT_SYMBOL(do_softirq); 432 + 433 + 434 + /* 435 + * IRQ controller and virtual interrupts 436 + */ 437 + 438 + #ifdef CONFIG_PPC_MERGE 439 + 440 + static LIST_HEAD(irq_hosts); 441 + static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; 442 + 443 + struct irq_map_entry irq_map[NR_IRQS]; 444 + static unsigned int irq_virq_count = NR_IRQS; 445 + static struct irq_host *irq_default_host; 446 + 447 + struct irq_host *irq_alloc_host(unsigned int revmap_type, 448 + unsigned int revmap_arg, 449 + struct irq_host_ops *ops, 450 + irq_hw_number_t inval_irq) 451 + { 452 + struct irq_host *host; 453 + unsigned int size = sizeof(struct irq_host); 454 + unsigned int i; 455 + unsigned int *rmap; 456 + unsigned long flags; 457 + 458 + /* Allocate structure and revmap table if using linear mapping */ 459 + if (revmap_type == IRQ_HOST_MAP_LINEAR) 460 + size += revmap_arg * sizeof(unsigned int); 461 + if (mem_init_done) 462 + host = kzalloc(size, GFP_KERNEL); 463 + else { 464 + host = alloc_bootmem(size); 465 + if (host) 466 + memset(host, 0, size); 467 + } 468 + if (host == NULL) 469 + return NULL; 470 + 471 + /* Fill structure */ 472 + host->revmap_type = revmap_type; 473 + host->inval_irq = inval_irq; 474 + host->ops = ops; 475 + 476 + spin_lock_irqsave(&irq_big_lock, flags); 477 + 478 + /* If it's a legacy controller, check for duplicates and 479 + * mark it as allocated (we use irq 0 host pointer for that 480 + */ 481 + if (revmap_type == IRQ_HOST_MAP_LEGACY) { 482 + if (irq_map[0].host != NULL) { 483 + spin_unlock_irqrestore(&irq_big_lock, flags); 484 + /* If we are early boot, we can't free the structure, 485 + * too bad... 486 + * this will be fixed once slab is made available early 487 + * instead of the current cruft 488 + */ 489 + if (mem_init_done) 490 + kfree(host); 491 + return NULL; 492 + } 493 + irq_map[0].host = host; 494 + } 495 + 496 + list_add(&host->link, &irq_hosts); 497 + spin_unlock_irqrestore(&irq_big_lock, flags); 498 + 499 + /* Additional setups per revmap type */ 500 + switch(revmap_type) { 501 + case IRQ_HOST_MAP_LEGACY: 502 + /* 0 is always the invalid number for legacy */ 503 + host->inval_irq = 0; 504 + /* setup us as the host for all legacy interrupts */ 505 + for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { 506 + irq_map[i].hwirq = 0; 507 + smp_wmb(); 508 + irq_map[i].host = host; 509 + smp_wmb(); 510 + 511 + /* Clear some flags */ 512 + get_irq_desc(i)->status 513 + &= ~(IRQ_NOREQUEST | IRQ_LEVEL); 514 + 515 + /* Legacy flags are left to default at this point, 516 + * one can then use irq_create_mapping() to 517 + * explicitely change them 518 + */ 519 + ops->map(host, i, i, 0); 520 + } 521 + break; 522 + case IRQ_HOST_MAP_LINEAR: 523 + rmap = (unsigned int *)(host + 1); 524 + for (i = 0; i < revmap_arg; i++) 525 + rmap[i] = IRQ_NONE; 526 + host->revmap_data.linear.size = revmap_arg; 527 + smp_wmb(); 528 + host->revmap_data.linear.revmap = rmap; 529 + break; 530 + default: 531 + break; 532 + } 533 + 534 + pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); 535 + 536 + return host; 537 + } 538 + 539 + struct irq_host *irq_find_host(struct device_node *node) 540 + { 541 + struct irq_host *h, *found = NULL; 542 + unsigned long flags; 543 + 544 + /* We might want to match the legacy controller last since 545 + * it might potentially be set to match all interrupts in 546 + * the absence of a device node. This isn't a problem so far 547 + * yet though... 548 + */ 549 + spin_lock_irqsave(&irq_big_lock, flags); 550 + list_for_each_entry(h, &irq_hosts, link) 551 + if (h->ops->match == NULL || h->ops->match(h, node)) { 552 + found = h; 553 + break; 554 + } 555 + spin_unlock_irqrestore(&irq_big_lock, flags); 556 + return found; 557 + } 558 + EXPORT_SYMBOL_GPL(irq_find_host); 559 + 560 + void irq_set_default_host(struct irq_host *host) 561 + { 562 + pr_debug("irq: Default host set to @0x%p\n", host); 563 + 564 + irq_default_host = host; 565 + } 566 + 567 + void irq_set_virq_count(unsigned int count) 568 + { 569 + pr_debug("irq: Trying to set virq count to %d\n", count); 570 + 571 + BUG_ON(count < NUM_ISA_INTERRUPTS); 572 + if (count < NR_IRQS) 573 + irq_virq_count = count; 574 + } 575 + 576 + unsigned int irq_create_mapping(struct irq_host *host, 577 + irq_hw_number_t hwirq, 578 + unsigned int flags) 579 + { 580 + unsigned int virq, hint; 581 + 582 + pr_debug("irq: irq_create_mapping(0x%p, 0x%lx, 0x%x)\n", 583 + host, hwirq, flags); 584 + 585 + /* Look for default host if nececssary */ 586 + if (host == NULL) 587 + host = irq_default_host; 588 + if (host == NULL) { 589 + printk(KERN_WARNING "irq_create_mapping called for" 590 + " NULL host, hwirq=%lx\n", hwirq); 591 + WARN_ON(1); 592 + return NO_IRQ; 593 + } 594 + pr_debug("irq: -> using host @%p\n", host); 595 + 596 + /* Check if mapping already exist, if it does, call 597 + * host->ops->map() to update the flags 598 + */ 599 + virq = irq_find_mapping(host, hwirq); 600 + if (virq != IRQ_NONE) { 601 + pr_debug("irq: -> existing mapping on virq %d\n", virq); 602 + host->ops->map(host, virq, hwirq, flags); 603 + return virq; 604 + } 605 + 606 + /* Get a virtual interrupt number */ 607 + if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { 608 + /* Handle legacy */ 609 + virq = (unsigned int)hwirq; 610 + if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) 611 + return NO_IRQ; 612 + return virq; 613 + } else { 614 + /* Allocate a virtual interrupt number */ 615 + hint = hwirq % irq_virq_count; 616 + virq = irq_alloc_virt(host, 1, hint); 617 + if (virq == NO_IRQ) { 618 + pr_debug("irq: -> virq allocation failed\n"); 619 + return NO_IRQ; 620 + } 621 + } 622 + pr_debug("irq: -> obtained virq %d\n", virq); 623 + 624 + /* Clear some flags */ 625 + get_irq_desc(virq)->status &= ~(IRQ_NOREQUEST | IRQ_LEVEL); 626 + 627 + /* map it */ 628 + if (host->ops->map(host, virq, hwirq, flags)) { 629 + pr_debug("irq: -> mapping failed, freeing\n"); 630 + irq_free_virt(virq, 1); 631 + return NO_IRQ; 632 + } 633 + smp_wmb(); 634 + irq_map[virq].hwirq = hwirq; 635 + smp_mb(); 636 + return virq; 637 + } 638 + EXPORT_SYMBOL_GPL(irq_create_mapping); 639 + 640 + extern unsigned int irq_create_of_mapping(struct device_node *controller, 641 + u32 *intspec, unsigned int intsize) 642 + { 643 + struct irq_host *host; 644 + irq_hw_number_t hwirq; 645 + unsigned int flags = IRQ_TYPE_NONE; 646 + 647 + if (controller == NULL) 648 + host = irq_default_host; 649 + else 650 + host = irq_find_host(controller); 651 + if (host == NULL) 652 + return NO_IRQ; 653 + 654 + /* If host has no translation, then we assume interrupt line */ 655 + if (host->ops->xlate == NULL) 656 + hwirq = intspec[0]; 657 + else { 658 + if (host->ops->xlate(host, controller, intspec, intsize, 659 + &hwirq, &flags)) 660 + return NO_IRQ; 661 + } 662 + 663 + return irq_create_mapping(host, hwirq, flags); 664 + } 665 + EXPORT_SYMBOL_GPL(irq_create_of_mapping); 666 + 667 + unsigned int irq_of_parse_and_map(struct device_node *dev, int index) 668 + { 669 + struct of_irq oirq; 670 + 671 + if (of_irq_map_one(dev, index, &oirq)) 672 + return NO_IRQ; 673 + 674 + return irq_create_of_mapping(oirq.controller, oirq.specifier, 675 + oirq.size); 676 + } 677 + EXPORT_SYMBOL_GPL(irq_of_parse_and_map); 678 + 679 + void irq_dispose_mapping(unsigned int virq) 680 + { 681 + struct irq_host *host = irq_map[virq].host; 682 + irq_hw_number_t hwirq; 683 + unsigned long flags; 684 + 685 + WARN_ON (host == NULL); 686 + if (host == NULL) 687 + return; 688 + 689 + /* Never unmap legacy interrupts */ 690 + if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 691 + return; 692 + 693 + /* remove chip and handler */ 694 + set_irq_chip_and_handler(virq, NULL, NULL); 695 + 696 + /* Make sure it's completed */ 697 + synchronize_irq(virq); 698 + 699 + /* Tell the PIC about it */ 700 + if (host->ops->unmap) 701 + host->ops->unmap(host, virq); 702 + smp_mb(); 703 + 704 + /* Clear reverse map */ 705 + hwirq = irq_map[virq].hwirq; 706 + switch(host->revmap_type) { 707 + case IRQ_HOST_MAP_LINEAR: 708 + if (hwirq < host->revmap_data.linear.size) 709 + host->revmap_data.linear.revmap[hwirq] = IRQ_NONE; 710 + break; 711 + case IRQ_HOST_MAP_TREE: 712 + /* Check if radix tree allocated yet */ 713 + if (host->revmap_data.tree.gfp_mask == 0) 714 + break; 715 + /* XXX radix tree not safe ! remove lock whem it becomes safe 716 + * and use some RCU sync to make sure everything is ok before we 717 + * can re-use that map entry 718 + */ 719 + spin_lock_irqsave(&irq_big_lock, flags); 720 + radix_tree_delete(&host->revmap_data.tree, hwirq); 721 + spin_unlock_irqrestore(&irq_big_lock, flags); 722 + break; 723 + } 724 + 725 + /* Destroy map */ 726 + smp_mb(); 727 + irq_map[virq].hwirq = host->inval_irq; 728 + 729 + /* Set some flags */ 730 + get_irq_desc(virq)->status |= IRQ_NOREQUEST; 731 + 732 + /* Free it */ 733 + irq_free_virt(virq, 1); 734 + } 735 + EXPORT_SYMBOL_GPL(irq_dispose_mapping); 736 + 737 + unsigned int irq_find_mapping(struct irq_host *host, 738 + irq_hw_number_t hwirq) 739 + { 740 + unsigned int i; 741 + unsigned int hint = hwirq % irq_virq_count; 742 + 743 + /* Look for default host if nececssary */ 744 + if (host == NULL) 745 + host = irq_default_host; 746 + if (host == NULL) 747 + return NO_IRQ; 748 + 749 + /* legacy -> bail early */ 750 + if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 751 + return hwirq; 752 + 753 + /* Slow path does a linear search of the map */ 754 + if (hint < NUM_ISA_INTERRUPTS) 755 + hint = NUM_ISA_INTERRUPTS; 756 + i = hint; 757 + do { 758 + if (irq_map[i].host == host && 759 + irq_map[i].hwirq == hwirq) 760 + return i; 761 + i++; 762 + if (i >= irq_virq_count) 763 + i = NUM_ISA_INTERRUPTS; 764 + } while(i != hint); 765 + return NO_IRQ; 766 + } 767 + EXPORT_SYMBOL_GPL(irq_find_mapping); 768 + 769 + 770 + unsigned int irq_radix_revmap(struct irq_host *host, 771 + irq_hw_number_t hwirq) 772 + { 773 + struct radix_tree_root *tree; 774 + struct irq_map_entry *ptr; 775 + unsigned int virq; 776 + unsigned long flags; 777 + 778 + WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 779 + 780 + /* Check if the radix tree exist yet. We test the value of 781 + * the gfp_mask for that. Sneaky but saves another int in the 782 + * structure. If not, we fallback to slow mode 783 + */ 784 + tree = &host->revmap_data.tree; 785 + if (tree->gfp_mask == 0) 786 + return irq_find_mapping(host, hwirq); 787 + 788 + /* XXX Current radix trees are NOT SMP safe !!! Remove that lock 789 + * when that is fixed (when Nick's patch gets in 790 + */ 791 + spin_lock_irqsave(&irq_big_lock, flags); 792 + 793 + /* Now try to resolve */ 794 + ptr = radix_tree_lookup(tree, hwirq); 795 + /* Found it, return */ 796 + if (ptr) { 797 + virq = ptr - irq_map; 798 + goto bail; 799 + } 800 + 801 + /* If not there, try to insert it */ 802 + virq = irq_find_mapping(host, hwirq); 803 + if (virq != NO_IRQ) 804 + radix_tree_insert(tree, virq, &irq_map[virq]); 805 + bail: 806 + spin_unlock_irqrestore(&irq_big_lock, flags); 807 + return virq; 808 + } 809 + 810 + unsigned int irq_linear_revmap(struct irq_host *host, 811 + irq_hw_number_t hwirq) 812 + { 813 + unsigned int *revmap; 814 + 815 + WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); 816 + 817 + /* Check revmap bounds */ 818 + if (unlikely(hwirq >= host->revmap_data.linear.size)) 819 + return irq_find_mapping(host, hwirq); 820 + 821 + /* Check if revmap was allocated */ 822 + revmap = host->revmap_data.linear.revmap; 823 + if (unlikely(revmap == NULL)) 824 + return irq_find_mapping(host, hwirq); 825 + 826 + /* Fill up revmap with slow path if no mapping found */ 827 + if (unlikely(revmap[hwirq] == NO_IRQ)) 828 + revmap[hwirq] = irq_find_mapping(host, hwirq); 829 + 830 + return revmap[hwirq]; 831 + } 832 + 833 + unsigned int irq_alloc_virt(struct irq_host *host, 834 + unsigned int count, 835 + unsigned int hint) 836 + { 837 + unsigned long flags; 838 + unsigned int i, j, found = NO_IRQ; 839 + unsigned int limit = irq_virq_count - count; 840 + 841 + if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) 842 + return NO_IRQ; 843 + 844 + spin_lock_irqsave(&irq_big_lock, flags); 845 + 846 + /* Use hint for 1 interrupt if any */ 847 + if (count == 1 && hint >= NUM_ISA_INTERRUPTS && 848 + hint < irq_virq_count && irq_map[hint].host == NULL) { 849 + found = hint; 850 + goto hint_found; 851 + } 852 + 853 + /* Look for count consecutive numbers in the allocatable 854 + * (non-legacy) space 855 + */ 856 + for (i = NUM_ISA_INTERRUPTS; i <= limit; ) { 857 + for (j = i; j < (i + count); j++) 858 + if (irq_map[j].host != NULL) { 859 + i = j + 1; 860 + continue; 861 + } 862 + found = i; 863 + break; 864 + } 865 + if (found == NO_IRQ) { 866 + spin_unlock_irqrestore(&irq_big_lock, flags); 867 + return NO_IRQ; 868 + } 869 + hint_found: 870 + for (i = found; i < (found + count); i++) { 871 + irq_map[i].hwirq = host->inval_irq; 872 + smp_wmb(); 873 + irq_map[i].host = host; 874 + } 875 + spin_unlock_irqrestore(&irq_big_lock, flags); 876 + return found; 877 + } 878 + 879 + void irq_free_virt(unsigned int virq, unsigned int count) 880 + { 881 + unsigned long flags; 882 + unsigned int i; 883 + 884 + WARN_ON (virq < NUM_ISA_INTERRUPTS); 885 + WARN_ON (count == 0 || (virq + count) > irq_virq_count); 886 + 887 + spin_lock_irqsave(&irq_big_lock, flags); 888 + for (i = virq; i < (virq + count); i++) { 889 + struct irq_host *host; 890 + 891 + if (i < NUM_ISA_INTERRUPTS || 892 + (virq + count) > irq_virq_count) 893 + continue; 894 + 895 + host = irq_map[i].host; 896 + irq_map[i].hwirq = host->inval_irq; 897 + smp_wmb(); 898 + irq_map[i].host = NULL; 899 + } 900 + spin_unlock_irqrestore(&irq_big_lock, flags); 901 + } 902 + 903 + void irq_early_init(void) 904 + { 905 + unsigned int i; 906 + 907 + for (i = 0; i < NR_IRQS; i++) 908 + get_irq_desc(i)->status |= IRQ_NOREQUEST; 909 + } 910 + 911 + /* We need to create the radix trees late */ 912 + static int irq_late_init(void) 913 + { 914 + struct irq_host *h; 915 + unsigned long flags; 916 + 917 + spin_lock_irqsave(&irq_big_lock, flags); 918 + list_for_each_entry(h, &irq_hosts, link) { 919 + if (h->revmap_type == IRQ_HOST_MAP_TREE) 920 + INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); 921 + } 922 + spin_unlock_irqrestore(&irq_big_lock, flags); 923 + 924 + return 0; 925 + } 926 + arch_initcall(irq_late_init); 927 + 928 + #endif /* CONFIG_PPC_MERGE */ 320 929 321 930 #ifdef CONFIG_PCI_MSI 322 931 int pci_enable_msi(struct pci_dev * pdev)
+22 -24
arch/powerpc/kernel/legacy_serial.c
··· 28 28 struct device_node *np; 29 29 unsigned int speed; 30 30 unsigned int clock; 31 + int irq_check_parent; 31 32 phys_addr_t taddr; 32 33 } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; 33 34 static unsigned int legacy_serial_count; ··· 37 36 static int __init add_legacy_port(struct device_node *np, int want_index, 38 37 int iotype, phys_addr_t base, 39 38 phys_addr_t taddr, unsigned long irq, 40 - upf_t flags) 39 + upf_t flags, int irq_check_parent) 41 40 { 42 41 u32 *clk, *spd, clock = BASE_BAUD * 16; 43 42 int index; ··· 69 68 if (legacy_serial_infos[index].np != 0) { 70 69 /* if we still have some room, move it, else override */ 71 70 if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) { 72 - printk(KERN_INFO "Moved legacy port %d -> %d\n", 71 + printk(KERN_DEBUG "Moved legacy port %d -> %d\n", 73 72 index, legacy_serial_count); 74 73 legacy_serial_ports[legacy_serial_count] = 75 74 legacy_serial_ports[index]; ··· 77 76 legacy_serial_infos[index]; 78 77 legacy_serial_count++; 79 78 } else { 80 - printk(KERN_INFO "Replacing legacy port %d\n", index); 79 + printk(KERN_DEBUG "Replacing legacy port %d\n", index); 81 80 } 82 81 } 83 82 ··· 96 95 legacy_serial_infos[index].np = of_node_get(np); 97 96 legacy_serial_infos[index].clock = clock; 98 97 legacy_serial_infos[index].speed = spd ? *spd : 0; 98 + legacy_serial_infos[index].irq_check_parent = irq_check_parent; 99 99 100 - printk(KERN_INFO "Found legacy serial port %d for %s\n", 100 + printk(KERN_DEBUG "Found legacy serial port %d for %s\n", 101 101 index, np->full_name); 102 - printk(KERN_INFO " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n", 102 + printk(KERN_DEBUG " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n", 103 103 (iotype == UPIO_PORT) ? "port" : "mem", 104 104 (unsigned long long)base, (unsigned long long)taddr, irq, 105 105 legacy_serial_ports[index].uartclk, ··· 134 132 /* Add port, irq will be dealt with later. We passed a translated 135 133 * IO port value. It will be fixed up later along with the irq 136 134 */ 137 - return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags); 135 + return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0); 138 136 } 139 137 140 138 static int __init add_legacy_isa_port(struct device_node *np, ··· 172 170 173 171 /* Add port, irq will be dealt with later */ 174 172 return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, 175 - NO_IRQ, UPF_BOOT_AUTOCONF); 173 + NO_IRQ, UPF_BOOT_AUTOCONF, 0); 176 174 177 175 } 178 176 ··· 244 242 /* Add port, irq will be dealt with later. We passed a translated 245 243 * IO port value. It will be fixed up later along with the irq 246 244 */ 247 - return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, UPF_BOOT_AUTOCONF); 245 + return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, 246 + UPF_BOOT_AUTOCONF, np != pci_dev); 248 247 } 249 248 #endif 250 249 ··· 376 373 struct device_node *np, 377 374 struct plat_serial8250_port *port) 378 375 { 376 + unsigned int virq; 377 + 379 378 DBG("fixup_port_irq(%d)\n", index); 380 379 381 - /* Check for interrupts in that node */ 382 - if (np->n_intrs > 0) { 383 - port->irq = np->intrs[0].line; 384 - DBG(" port %d (%s), irq=%d\n", 385 - index, np->full_name, port->irq); 386 - return; 380 + virq = irq_of_parse_and_map(np, 0); 381 + if (virq == NO_IRQ && legacy_serial_infos[index].irq_check_parent) { 382 + np = of_get_parent(np); 383 + if (np == NULL) 384 + return; 385 + virq = irq_of_parse_and_map(np, 0); 386 + of_node_put(np); 387 387 } 388 - 389 - /* Check for interrupts in the parent */ 390 - np = of_get_parent(np); 391 - if (np == NULL) 388 + if (virq == NO_IRQ) 392 389 return; 393 390 394 - if (np->n_intrs > 0) { 395 - port->irq = np->intrs[0].line; 396 - DBG(" port %d (%s), irq=%d\n", 397 - index, np->full_name, port->irq); 398 - } 399 - of_node_put(np); 391 + port->irq = virq; 400 392 } 401 393 402 394 static void __init fixup_port_pio(int index,
+37
arch/powerpc/kernel/pci_32.c
··· 1404 1404 /* XXX FIXME - update OF device tree node interrupt property */ 1405 1405 } 1406 1406 1407 + #ifdef CONFIG_PPC_MERGE 1408 + /* XXX This is a copy of the ppc64 version. This is temporary until we start 1409 + * merging the 2 PCI layers 1410 + */ 1411 + /* 1412 + * Reads the interrupt pin to determine if interrupt is use by card. 1413 + * If the interrupt is used, then gets the interrupt line from the 1414 + * openfirmware and sets it in the pci_dev and pci_config line. 1415 + */ 1416 + int pci_read_irq_line(struct pci_dev *pci_dev) 1417 + { 1418 + struct of_irq oirq; 1419 + unsigned int virq; 1420 + 1421 + DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1422 + 1423 + if (of_irq_map_pci(pci_dev, &oirq)) { 1424 + DBG(" -> failed !\n"); 1425 + return -1; 1426 + } 1427 + 1428 + DBG(" -> got one, spec %d cells (0x%08x...) on %s\n", 1429 + oirq.size, oirq.specifier[0], oirq.controller->full_name); 1430 + 1431 + virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); 1432 + if(virq == NO_IRQ) { 1433 + DBG(" -> failed to map !\n"); 1434 + return -1; 1435 + } 1436 + pci_dev->irq = virq; 1437 + pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq); 1438 + 1439 + return 0; 1440 + } 1441 + EXPORT_SYMBOL(pci_read_irq_line); 1442 + #endif /* CONFIG_PPC_MERGE */ 1443 + 1407 1444 int pcibios_enable_device(struct pci_dev *dev, int mask) 1408 1445 { 1409 1446 u16 cmd, old_cmd;
+16 -17
arch/powerpc/kernel/pci_64.c
··· 398 398 } else { 399 399 dev->hdr_type = PCI_HEADER_TYPE_NORMAL; 400 400 dev->rom_base_reg = PCI_ROM_ADDRESS; 401 + /* Maybe do a default OF mapping here */ 401 402 dev->irq = NO_IRQ; 402 - if (node->n_intrs > 0) { 403 - dev->irq = node->intrs[0].line; 404 - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 405 - dev->irq); 406 - } 407 403 } 408 404 409 405 pci_parse_of_addrs(node, dev); ··· 1284 1288 */ 1285 1289 int pci_read_irq_line(struct pci_dev *pci_dev) 1286 1290 { 1287 - u8 intpin; 1288 - struct device_node *node; 1291 + struct of_irq oirq; 1292 + unsigned int virq; 1289 1293 1290 - pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intpin); 1291 - if (intpin == 0) 1292 - return 0; 1294 + DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1293 1295 1294 - node = pci_device_to_OF_node(pci_dev); 1295 - if (node == NULL) 1296 + if (of_irq_map_pci(pci_dev, &oirq)) { 1297 + DBG(" -> failed !\n"); 1296 1298 return -1; 1299 + } 1297 1300 1298 - if (node->n_intrs == 0) 1301 + DBG(" -> got one, spec %d cells (0x%08x...) on %s\n", 1302 + oirq.size, oirq.specifier[0], oirq.controller->full_name); 1303 + 1304 + virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); 1305 + if(virq == NO_IRQ) { 1306 + DBG(" -> failed to map !\n"); 1299 1307 return -1; 1300 - 1301 - pci_dev->irq = node->intrs[0].line; 1302 - 1303 - pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq); 1308 + } 1309 + pci_dev->irq = virq; 1310 + pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq); 1304 1311 1305 1312 return 0; 1306 1313 }
+2 -449
arch/powerpc/kernel/prom.c
··· 30 30 #include <linux/module.h> 31 31 #include <linux/kexec.h> 32 32 #include <linux/debugfs.h> 33 + #include <linux/irq.h> 33 34 34 35 #include <asm/prom.h> 35 36 #include <asm/rtas.h> ··· 86 85 87 86 /* export that to outside world */ 88 87 struct device_node *of_chosen; 89 - 90 - struct device_node *dflt_interrupt_controller; 91 - int num_interrupt_controllers; 92 - 93 - /* 94 - * Wrapper for allocating memory for various data that needs to be 95 - * attached to device nodes as they are processed at boot or when 96 - * added to the device tree later (e.g. DLPAR). At boot there is 97 - * already a region reserved so we just increment *mem_start by size; 98 - * otherwise we call kmalloc. 99 - */ 100 - static void * prom_alloc(unsigned long size, unsigned long *mem_start) 101 - { 102 - unsigned long tmp; 103 - 104 - if (!mem_start) 105 - return kmalloc(size, GFP_KERNEL); 106 - 107 - tmp = *mem_start; 108 - *mem_start += size; 109 - return (void *)tmp; 110 - } 111 - 112 - /* 113 - * Find the device_node with a given phandle. 114 - */ 115 - static struct device_node * find_phandle(phandle ph) 116 - { 117 - struct device_node *np; 118 - 119 - for (np = allnodes; np != 0; np = np->allnext) 120 - if (np->linux_phandle == ph) 121 - return np; 122 - return NULL; 123 - } 124 - 125 - /* 126 - * Find the interrupt parent of a node. 127 - */ 128 - static struct device_node * __devinit intr_parent(struct device_node *p) 129 - { 130 - phandle *parp; 131 - 132 - parp = (phandle *) get_property(p, "interrupt-parent", NULL); 133 - if (parp == NULL) 134 - return p->parent; 135 - p = find_phandle(*parp); 136 - if (p != NULL) 137 - return p; 138 - /* 139 - * On a powermac booted with BootX, we don't get to know the 140 - * phandles for any nodes, so find_phandle will return NULL. 141 - * Fortunately these machines only have one interrupt controller 142 - * so there isn't in fact any ambiguity. -- paulus 143 - */ 144 - if (num_interrupt_controllers == 1) 145 - p = dflt_interrupt_controller; 146 - return p; 147 - } 148 - 149 - /* 150 - * Find out the size of each entry of the interrupts property 151 - * for a node. 152 - */ 153 - int __devinit prom_n_intr_cells(struct device_node *np) 154 - { 155 - struct device_node *p; 156 - unsigned int *icp; 157 - 158 - for (p = np; (p = intr_parent(p)) != NULL; ) { 159 - icp = (unsigned int *) 160 - get_property(p, "#interrupt-cells", NULL); 161 - if (icp != NULL) 162 - return *icp; 163 - if (get_property(p, "interrupt-controller", NULL) != NULL 164 - || get_property(p, "interrupt-map", NULL) != NULL) { 165 - printk("oops, node %s doesn't have #interrupt-cells\n", 166 - p->full_name); 167 - return 1; 168 - } 169 - } 170 - #ifdef DEBUG_IRQ 171 - printk("prom_n_intr_cells failed for %s\n", np->full_name); 172 - #endif 173 - return 1; 174 - } 175 - 176 - /* 177 - * Map an interrupt from a device up to the platform interrupt 178 - * descriptor. 179 - */ 180 - static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler, 181 - struct device_node *np, unsigned int *ints, 182 - int nintrc) 183 - { 184 - struct device_node *p, *ipar; 185 - unsigned int *imap, *imask, *ip; 186 - int i, imaplen, match; 187 - int newintrc = 0, newaddrc = 0; 188 - unsigned int *reg; 189 - int naddrc; 190 - 191 - reg = (unsigned int *) get_property(np, "reg", NULL); 192 - naddrc = prom_n_addr_cells(np); 193 - p = intr_parent(np); 194 - while (p != NULL) { 195 - if (get_property(p, "interrupt-controller", NULL) != NULL) 196 - /* this node is an interrupt controller, stop here */ 197 - break; 198 - imap = (unsigned int *) 199 - get_property(p, "interrupt-map", &imaplen); 200 - if (imap == NULL) { 201 - p = intr_parent(p); 202 - continue; 203 - } 204 - imask = (unsigned int *) 205 - get_property(p, "interrupt-map-mask", NULL); 206 - if (imask == NULL) { 207 - printk("oops, %s has interrupt-map but no mask\n", 208 - p->full_name); 209 - return 0; 210 - } 211 - imaplen /= sizeof(unsigned int); 212 - match = 0; 213 - ipar = NULL; 214 - while (imaplen > 0 && !match) { 215 - /* check the child-interrupt field */ 216 - match = 1; 217 - for (i = 0; i < naddrc && match; ++i) 218 - match = ((reg[i] ^ imap[i]) & imask[i]) == 0; 219 - for (; i < naddrc + nintrc && match; ++i) 220 - match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0; 221 - imap += naddrc + nintrc; 222 - imaplen -= naddrc + nintrc; 223 - /* grab the interrupt parent */ 224 - ipar = find_phandle((phandle) *imap++); 225 - --imaplen; 226 - if (ipar == NULL && num_interrupt_controllers == 1) 227 - /* cope with BootX not giving us phandles */ 228 - ipar = dflt_interrupt_controller; 229 - if (ipar == NULL) { 230 - printk("oops, no int parent %x in map of %s\n", 231 - imap[-1], p->full_name); 232 - return 0; 233 - } 234 - /* find the parent's # addr and intr cells */ 235 - ip = (unsigned int *) 236 - get_property(ipar, "#interrupt-cells", NULL); 237 - if (ip == NULL) { 238 - printk("oops, no #interrupt-cells on %s\n", 239 - ipar->full_name); 240 - return 0; 241 - } 242 - newintrc = *ip; 243 - ip = (unsigned int *) 244 - get_property(ipar, "#address-cells", NULL); 245 - newaddrc = (ip == NULL)? 0: *ip; 246 - imap += newaddrc + newintrc; 247 - imaplen -= newaddrc + newintrc; 248 - } 249 - if (imaplen < 0) { 250 - printk("oops, error decoding int-map on %s, len=%d\n", 251 - p->full_name, imaplen); 252 - return 0; 253 - } 254 - if (!match) { 255 - #ifdef DEBUG_IRQ 256 - printk("oops, no match in %s int-map for %s\n", 257 - p->full_name, np->full_name); 258 - #endif 259 - return 0; 260 - } 261 - p = ipar; 262 - naddrc = newaddrc; 263 - nintrc = newintrc; 264 - ints = imap - nintrc; 265 - reg = ints - naddrc; 266 - } 267 - if (p == NULL) { 268 - #ifdef DEBUG_IRQ 269 - printk("hmmm, int tree for %s doesn't have ctrler\n", 270 - np->full_name); 271 - #endif 272 - return 0; 273 - } 274 - *irq = ints; 275 - *ictrler = p; 276 - return nintrc; 277 - } 278 - 279 - static unsigned char map_isa_senses[4] = { 280 - IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, 281 - IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE, 282 - IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE, 283 - IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE 284 - }; 285 - 286 - static unsigned char map_mpic_senses[4] = { 287 - IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE, 288 - IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, 289 - /* 2 seems to be used for the 8259 cascade... */ 290 - IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE, 291 - IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE, 292 - }; 293 - 294 - static int __devinit finish_node_interrupts(struct device_node *np, 295 - unsigned long *mem_start, 296 - int measure_only) 297 - { 298 - unsigned int *ints; 299 - int intlen, intrcells, intrcount; 300 - int i, j, n, sense; 301 - unsigned int *irq, virq; 302 - struct device_node *ic; 303 - int trace = 0; 304 - 305 - //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0) 306 - #define TRACE(fmt...) 307 - 308 - if (!strcmp(np->name, "smu-doorbell")) 309 - trace = 1; 310 - 311 - TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n", 312 - num_interrupt_controllers); 313 - 314 - if (num_interrupt_controllers == 0) { 315 - /* 316 - * Old machines just have a list of interrupt numbers 317 - * and no interrupt-controller nodes. 318 - */ 319 - ints = (unsigned int *) get_property(np, "AAPL,interrupts", 320 - &intlen); 321 - /* XXX old interpret_pci_props looked in parent too */ 322 - /* XXX old interpret_macio_props looked for interrupts 323 - before AAPL,interrupts */ 324 - if (ints == NULL) 325 - ints = (unsigned int *) get_property(np, "interrupts", 326 - &intlen); 327 - if (ints == NULL) 328 - return 0; 329 - 330 - np->n_intrs = intlen / sizeof(unsigned int); 331 - np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]), 332 - mem_start); 333 - if (!np->intrs) 334 - return -ENOMEM; 335 - if (measure_only) 336 - return 0; 337 - 338 - for (i = 0; i < np->n_intrs; ++i) { 339 - np->intrs[i].line = *ints++; 340 - np->intrs[i].sense = IRQ_SENSE_LEVEL 341 - | IRQ_POLARITY_NEGATIVE; 342 - } 343 - return 0; 344 - } 345 - 346 - ints = (unsigned int *) get_property(np, "interrupts", &intlen); 347 - TRACE("ints=%p, intlen=%d\n", ints, intlen); 348 - if (ints == NULL) 349 - return 0; 350 - intrcells = prom_n_intr_cells(np); 351 - intlen /= intrcells * sizeof(unsigned int); 352 - TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen); 353 - np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start); 354 - if (!np->intrs) 355 - return -ENOMEM; 356 - 357 - if (measure_only) 358 - return 0; 359 - 360 - intrcount = 0; 361 - for (i = 0; i < intlen; ++i, ints += intrcells) { 362 - n = map_interrupt(&irq, &ic, np, ints, intrcells); 363 - TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n); 364 - if (n <= 0) 365 - continue; 366 - 367 - /* don't map IRQ numbers under a cascaded 8259 controller */ 368 - if (ic && device_is_compatible(ic, "chrp,iic")) { 369 - np->intrs[intrcount].line = irq[0]; 370 - sense = (n > 1)? (irq[1] & 3): 3; 371 - np->intrs[intrcount].sense = map_isa_senses[sense]; 372 - } else { 373 - virq = virt_irq_create_mapping(irq[0]); 374 - TRACE("virq=%d\n", virq); 375 - #ifdef CONFIG_PPC64 376 - if (virq == NO_IRQ) { 377 - printk(KERN_CRIT "Could not allocate interrupt" 378 - " number for %s\n", np->full_name); 379 - continue; 380 - } 381 - #endif 382 - np->intrs[intrcount].line = irq_offset_up(virq); 383 - sense = (n > 1)? (irq[1] & 3): 1; 384 - 385 - /* Apple uses bits in there in a different way, let's 386 - * only keep the real sense bit on macs 387 - */ 388 - if (machine_is(powermac)) 389 - sense &= 0x1; 390 - np->intrs[intrcount].sense = map_mpic_senses[sense]; 391 - } 392 - 393 - #ifdef CONFIG_PPC64 394 - /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */ 395 - if (machine_is(powermac) && ic && ic->parent) { 396 - char *name = get_property(ic->parent, "name", NULL); 397 - if (name && !strcmp(name, "u3")) 398 - np->intrs[intrcount].line += 128; 399 - else if (!(name && (!strcmp(name, "mac-io") || 400 - !strcmp(name, "u4")))) 401 - /* ignore other cascaded controllers, such as 402 - the k2-sata-root */ 403 - break; 404 - } 405 - #endif /* CONFIG_PPC64 */ 406 - if (n > 2) { 407 - printk("hmmm, got %d intr cells for %s:", n, 408 - np->full_name); 409 - for (j = 0; j < n; ++j) 410 - printk(" %d", irq[j]); 411 - printk("\n"); 412 - } 413 - ++intrcount; 414 - } 415 - np->n_intrs = intrcount; 416 - 417 - return 0; 418 - } 419 - 420 - static int __devinit finish_node(struct device_node *np, 421 - unsigned long *mem_start, 422 - int measure_only) 423 - { 424 - struct device_node *child; 425 - int rc = 0; 426 - 427 - rc = finish_node_interrupts(np, mem_start, measure_only); 428 - if (rc) 429 - goto out; 430 - 431 - for (child = np->child; child != NULL; child = child->sibling) { 432 - rc = finish_node(child, mem_start, measure_only); 433 - if (rc) 434 - goto out; 435 - } 436 - out: 437 - return rc; 438 - } 439 - 440 - static void __init scan_interrupt_controllers(void) 441 - { 442 - struct device_node *np; 443 - int n = 0; 444 - char *name, *ic; 445 - int iclen; 446 - 447 - for (np = allnodes; np != NULL; np = np->allnext) { 448 - ic = get_property(np, "interrupt-controller", &iclen); 449 - name = get_property(np, "name", NULL); 450 - /* checking iclen makes sure we don't get a false 451 - match on /chosen.interrupt_controller */ 452 - if ((name != NULL 453 - && strcmp(name, "interrupt-controller") == 0) 454 - || (ic != NULL && iclen == 0 455 - && strcmp(name, "AppleKiwi"))) { 456 - if (n == 0) 457 - dflt_interrupt_controller = np; 458 - ++n; 459 - } 460 - } 461 - num_interrupt_controllers = n; 462 - } 463 - 464 - /** 465 - * finish_device_tree is called once things are running normally 466 - * (i.e. with text and data mapped to the address they were linked at). 467 - * It traverses the device tree and fills in some of the additional, 468 - * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt 469 - * mapping is also initialized at this point. 470 - */ 471 - void __init finish_device_tree(void) 472 - { 473 - unsigned long start, end, size = 0; 474 - 475 - DBG(" -> finish_device_tree\n"); 476 - 477 - #ifdef CONFIG_PPC64 478 - /* Initialize virtual IRQ map */ 479 - virt_irq_init(); 480 - #endif 481 - scan_interrupt_controllers(); 482 - 483 - /* 484 - * Finish device-tree (pre-parsing some properties etc...) 485 - * We do this in 2 passes. One with "measure_only" set, which 486 - * will only measure the amount of memory needed, then we can 487 - * allocate that memory, and call finish_node again. However, 488 - * we must be careful as most routines will fail nowadays when 489 - * prom_alloc() returns 0, so we must make sure our first pass 490 - * doesn't start at 0. We pre-initialize size to 16 for that 491 - * reason and then remove those additional 16 bytes 492 - */ 493 - size = 16; 494 - finish_node(allnodes, &size, 1); 495 - size -= 16; 496 - 497 - if (0 == size) 498 - end = start = 0; 499 - else 500 - end = start = (unsigned long)__va(lmb_alloc(size, 128)); 501 - 502 - finish_node(allnodes, &end, 0); 503 - BUG_ON(end != start + size); 504 - 505 - DBG(" <- finish_device_tree\n"); 506 - } 507 88 508 89 static inline char *find_flat_dt_string(u32 offset) 509 90 { ··· 972 1389 EXPORT_SYMBOL(prom_n_size_cells); 973 1390 974 1391 /** 975 - * Work out the sense (active-low level / active-high edge) 976 - * of each interrupt from the device tree. 977 - */ 978 - void __init prom_get_irq_senses(unsigned char *senses, int off, int max) 979 - { 980 - struct device_node *np; 981 - int i, j; 982 - 983 - /* default to level-triggered */ 984 - memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off); 985 - 986 - for (np = allnodes; np != 0; np = np->allnext) { 987 - for (j = 0; j < np->n_intrs; j++) { 988 - i = np->intrs[j].line; 989 - if (i >= off && i < max) 990 - senses[i-off] = np->intrs[j].sense; 991 - } 992 - } 993 - } 994 - 995 - /** 996 1392 * Construct and return a list of the device_nodes with a given name. 997 1393 */ 998 1394 struct device_node *find_devices(const char *name) ··· 1370 1808 node->deadprops = NULL; 1371 1809 } 1372 1810 } 1373 - kfree(node->intrs); 1374 1811 kfree(node->full_name); 1375 1812 kfree(node->data); 1376 1813 kfree(node); ··· 1442 1881 #ifdef CONFIG_PPC_PSERIES 1443 1882 /* 1444 1883 * Fix up the uninitialized fields in a new device node: 1445 - * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields 1446 - * 1447 - * A lot of boot-time code is duplicated here, because functions such 1448 - * as finish_node_interrupts, interpret_pci_props, etc. cannot use the 1449 - * slab allocator. 1450 - * 1451 - * This should probably be split up into smaller chunks. 1884 + * name, type and pci-specific fields 1452 1885 */ 1453 1886 1454 1887 static int of_finish_dynamic_node(struct device_node *node) ··· 1483 1928 switch (action) { 1484 1929 case PSERIES_RECONFIG_ADD: 1485 1930 err = of_finish_dynamic_node(node); 1486 - if (!err) 1487 - finish_node(node, NULL, 0); 1488 1931 if (err < 0) { 1489 1932 printk(KERN_ERR "finish_node returned %d\n", err); 1490 1933 err = NOTIFY_BAD;
-17
arch/powerpc/kernel/rtas_pci.c
··· 297 297 struct device_node *node; 298 298 struct pci_controller *phb; 299 299 unsigned int index; 300 - unsigned int root_size_cells = 0; 301 - unsigned int *opprop = NULL; 302 300 struct device_node *root = of_find_node_by_path("/"); 303 301 304 - if (ppc64_interrupt_controller == IC_OPEN_PIC) { 305 - opprop = (unsigned int *)get_property(root, 306 - "platform-open-pic", NULL); 307 - } 308 - 309 - root_size_cells = prom_n_size_cells(root); 310 - 311 302 index = 0; 312 - 313 303 for (node = of_get_next_child(root, NULL); 314 304 node != NULL; 315 305 node = of_get_next_child(root, node)) { ··· 314 324 setup_phb(node, phb); 315 325 pci_process_bridge_OF_ranges(phb, node, 0); 316 326 pci_setup_phb_io(phb, index == 0); 317 - #ifdef CONFIG_PPC_PSERIES 318 - /* XXX This code need serious fixing ... --BenH */ 319 - if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { 320 - int addr = root_size_cells * (index + 2) - 1; 321 - mpic_assign_isu(pSeries_mpic, index, opprop[addr]); 322 - } 323 - #endif 324 327 index++; 325 328 } 326 329
-1
arch/powerpc/kernel/setup_32.c
··· 239 239 ppc_md.init_early(); 240 240 241 241 find_legacy_serial_ports(); 242 - finish_device_tree(); 243 242 244 243 smp_setup_cpu_maps(); 245 244
+6 -11
arch/powerpc/kernel/setup_64.c
··· 361 361 362 362 /* 363 363 * Fill the ppc64_caches & systemcfg structures with informations 364 - * retrieved from the device-tree. Need to be called before 365 - * finish_device_tree() since the later requires some of the 366 - * informations filled up here to properly parse the interrupt tree. 364 + * retrieved from the device-tree. 367 365 */ 368 366 initialize_cache_info(); 367 + 368 + /* 369 + * Initialize irq remapping subsystem 370 + */ 371 + irq_early_init(); 369 372 370 373 #ifdef CONFIG_PPC_RTAS 371 374 /* ··· 395 392 * so that further code can be debugged 396 393 */ 397 394 find_legacy_serial_ports(); 398 - 399 - /* 400 - * "Finish" the device-tree, that is do the actual parsing of 401 - * some of the properties like the interrupt map 402 - */ 403 - finish_device_tree(); 404 395 405 396 /* 406 397 * Initialize xmon ··· 424 427 425 428 printk("-----------------------------------------------------\n"); 426 429 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 427 - printk("ppc64_interrupt_controller = 0x%ld\n", 428 - ppc64_interrupt_controller); 429 430 printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); 430 431 printk("ppc64_caches.dcache_line_size = 0x%x\n", 431 432 ppc64_caches.dline_size);
+1 -11
arch/powerpc/kernel/vio.c
··· 218 218 { 219 219 struct vio_dev *viodev; 220 220 unsigned int *unit_address; 221 - unsigned int *irq_p; 222 221 223 222 /* we need the 'device_type' property, in order to match with drivers */ 224 223 if (of_node->type == NULL) { ··· 242 243 243 244 viodev->dev.platform_data = of_node_get(of_node); 244 245 245 - viodev->irq = NO_IRQ; 246 - irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL); 247 - if (irq_p) { 248 - int virq = virt_irq_create_mapping(*irq_p); 249 - if (virq == NO_IRQ) { 250 - printk(KERN_ERR "Unable to allocate interrupt " 251 - "number for %s\n", of_node->full_name); 252 - } else 253 - viodev->irq = irq_offset_up(virq); 254 - } 246 + viodev->irq = irq_of_parse_and_map(of_node, 0); 255 247 256 248 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); 257 249 viodev->name = of_node->name;
+173 -221
arch/powerpc/platforms/cell/interrupt.c
··· 1 1 /* 2 2 * Cell Internal Interrupt Controller 3 3 * 4 + * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org) 5 + * IBM, Corp. 6 + * 4 7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 8 * 6 9 * Author: Arnd Bergmann <arndb@de.ibm.com> ··· 28 25 #include <linux/module.h> 29 26 #include <linux/percpu.h> 30 27 #include <linux/types.h> 28 + #include <linux/ioport.h> 31 29 32 30 #include <asm/io.h> 33 31 #include <asm/pgtable.h> 34 32 #include <asm/prom.h> 35 33 #include <asm/ptrace.h> 34 + #include <asm/machdep.h> 36 35 37 36 #include "interrupt.h" 38 37 #include "cbe_regs.h" ··· 44 39 u8 target_id; 45 40 u8 eoi_stack[16]; 46 41 int eoi_ptr; 42 + struct irq_host *host; 47 43 }; 48 44 49 45 static DEFINE_PER_CPU(struct iic, iic); 46 + #define IIC_NODE_COUNT 2 47 + static struct irq_host *iic_hosts[IIC_NODE_COUNT]; 48 + 49 + /* Convert between "pending" bits and hw irq number */ 50 + static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) 51 + { 52 + unsigned char unit = bits.source & 0xf; 53 + 54 + if (bits.flags & CBE_IIC_IRQ_IPI) 55 + return IIC_IRQ_IPI0 | (bits.prio >> 4); 56 + else if (bits.class <= 3) 57 + return (bits.class << 4) | unit; 58 + else 59 + return IIC_IRQ_INVALID; 60 + } 50 61 51 62 static void iic_mask(unsigned int irq) 52 63 { ··· 86 65 .eoi = iic_eoi, 87 66 }; 88 67 89 - /* XXX All of this has to be reworked completely. We need to assign a real 90 - * interrupt numbers to the external interrupts and remove all the hard coded 91 - * interrupt maps (rely on the device-tree whenever possible). 92 - * 93 - * Basically, my scheme is to define the "pendings" bits to be the HW interrupt 94 - * number (ignoring the data and flags here). That means we can sort-of split 95 - * external sources based on priority, and we can use request_irq() on pretty 96 - * much anything. 97 - * 98 - * For spider or axon, they have their own interrupt space. spider will just have 99 - * local "hardward" interrupts 0...xx * node stride. The node stride is not 100 - * necessary (separate interrupt chips will have separate HW number space), but 101 - * will allow to be compatible with existing device-trees. 102 - * 103 - * All of thise little world will get a standard remapping scheme to map those HW 104 - * numbers into the linux flat irq number space. 105 - */ 106 - static int iic_external_get_irq(struct cbe_iic_pending_bits pending) 107 - { 108 - int irq; 109 - unsigned char node, unit; 110 - 111 - node = pending.source >> 4; 112 - unit = pending.source & 0xf; 113 - irq = -1; 114 - 115 - /* 116 - * This mapping is specific to the Cell Broadband 117 - * Engine. We might need to get the numbers 118 - * from the device tree to support future CPUs. 119 - */ 120 - switch (unit) { 121 - case 0x00: 122 - case 0x0b: 123 - /* 124 - * One of these units can be connected 125 - * to an external interrupt controller. 126 - */ 127 - if (pending.class != 2) 128 - break; 129 - /* TODO: We might want to silently ignore cascade interrupts 130 - * when no cascade handler exist yet 131 - */ 132 - irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE; 133 - break; 134 - case 0x01 ... 0x04: 135 - case 0x07 ... 0x0a: 136 - /* 137 - * These units are connected to the SPEs 138 - */ 139 - if (pending.class > 2) 140 - break; 141 - irq = IIC_SPE_OFFSET 142 - + pending.class * IIC_CLASS_STRIDE 143 - + node * IIC_NODE_STRIDE 144 - + unit; 145 - break; 146 - } 147 - if (irq == -1) 148 - printk(KERN_WARNING "Unexpected interrupt class %02x, " 149 - "source %02x, prio %02x, cpu %02x\n", pending.class, 150 - pending.source, pending.prio, smp_processor_id()); 151 - return irq; 152 - } 153 - 154 68 /* Get an IRQ number from the pending state register of the IIC */ 155 - int iic_get_irq(struct pt_regs *regs) 69 + static unsigned int iic_get_irq(struct pt_regs *regs) 156 70 { 157 - struct iic *iic; 158 - int irq; 159 - struct cbe_iic_pending_bits pending; 71 + struct cbe_iic_pending_bits pending; 72 + struct iic *iic; 160 73 161 - iic = &__get_cpu_var(iic); 162 - *(unsigned long *) &pending = 163 - in_be64((unsigned long __iomem *) &iic->regs->pending_destr); 164 - iic->eoi_stack[++iic->eoi_ptr] = pending.prio; 165 - BUG_ON(iic->eoi_ptr > 15); 166 - 167 - irq = -1; 168 - if (pending.flags & CBE_IIC_IRQ_VALID) { 169 - if (pending.flags & CBE_IIC_IRQ_IPI) { 170 - irq = IIC_IPI_OFFSET + (pending.prio >> 4); 171 - /* 172 - if (irq > 0x80) 173 - printk(KERN_WARNING "Unexpected IPI prio %02x" 174 - "on CPU %02x\n", pending.prio, 175 - smp_processor_id()); 176 - */ 177 - } else { 178 - irq = iic_external_get_irq(pending); 179 - } 180 - } 181 - return irq; 182 - } 183 - 184 - /* hardcoded part to be compatible with older firmware */ 185 - 186 - static int __init setup_iic_hardcoded(void) 187 - { 188 - struct device_node *np; 189 - int nodeid, cpu; 190 - unsigned long regs; 191 - struct iic *iic; 192 - 193 - for_each_possible_cpu(cpu) { 194 - iic = &per_cpu(iic, cpu); 195 - nodeid = cpu/2; 196 - 197 - for (np = of_find_node_by_type(NULL, "cpu"); 198 - np; 199 - np = of_find_node_by_type(np, "cpu")) { 200 - if (nodeid == *(int *)get_property(np, "node-id", NULL)) 201 - break; 202 - } 203 - 204 - if (!np) { 205 - printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); 206 - iic->regs = NULL; 207 - iic->target_id = 0xff; 208 - return -ENODEV; 209 - } 210 - 211 - regs = *(long *)get_property(np, "iic", NULL); 212 - 213 - /* hack until we have decided on the devtree info */ 214 - regs += 0x400; 215 - if (cpu & 1) 216 - regs += 0x20; 217 - 218 - printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs); 219 - iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs)); 220 - iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe); 221 - iic->eoi_stack[0] = 0xff; 222 - } 223 - 224 - return 0; 225 - } 226 - 227 - static int __init setup_iic(void) 228 - { 229 - struct device_node *dn; 230 - unsigned long *regs; 231 - char *compatible; 232 - unsigned *np, found = 0; 233 - struct iic *iic = NULL; 234 - 235 - for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { 236 - compatible = (char *)get_property(dn, "compatible", NULL); 237 - 238 - if (!compatible) { 239 - printk(KERN_WARNING "no compatible property found !\n"); 240 - continue; 241 - } 242 - 243 - if (strstr(compatible, "IBM,CBEA-Internal-Interrupt-Controller")) 244 - regs = (unsigned long *)get_property(dn,"reg", NULL); 245 - else 246 - continue; 247 - 248 - if (!regs) 249 - printk(KERN_WARNING "IIC: no reg property\n"); 250 - 251 - np = (unsigned int *)get_property(dn, "ibm,interrupt-server-ranges", NULL); 252 - 253 - if (!np) { 254 - printk(KERN_WARNING "IIC: CPU association not found\n"); 255 - iic->regs = NULL; 256 - iic->target_id = 0xff; 257 - return -ENODEV; 258 - } 259 - 260 - iic = &per_cpu(iic, np[0]); 261 - iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs)); 262 - iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe); 263 - iic->eoi_stack[0] = 0xff; 264 - printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs); 265 - 266 - iic = &per_cpu(iic, np[1]); 267 - iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs)); 268 - iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe); 269 - iic->eoi_stack[0] = 0xff; 270 - 271 - printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs); 272 - 273 - found++; 274 - } 275 - 276 - if (found) 277 - return 0; 278 - else 279 - return -ENODEV; 74 + iic = &__get_cpu_var(iic); 75 + *(unsigned long *) &pending = 76 + in_be64((unsigned long __iomem *) &iic->regs->pending_destr); 77 + iic->eoi_stack[++iic->eoi_ptr] = pending.prio; 78 + BUG_ON(iic->eoi_ptr > 15); 79 + if (pending.flags & CBE_IIC_IRQ_VALID) 80 + return irq_linear_revmap(iic->host, 81 + iic_pending_to_hwnum(pending)); 82 + return NO_IRQ; 280 83 } 281 84 282 85 #ifdef CONFIG_SMP ··· 108 263 /* Use the highest interrupt priorities for IPI */ 109 264 static inline int iic_ipi_to_irq(int ipi) 110 265 { 111 - return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi; 266 + return IIC_IRQ_IPI0 + IIC_NUM_IPIS - 1 - ipi; 112 267 } 113 268 114 269 static inline int iic_irq_to_ipi(int irq) 115 270 { 116 - return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET); 271 + return IIC_NUM_IPIS - 1 - (irq - IIC_IRQ_IPI0); 117 272 } 118 273 119 274 void iic_setup_cpu(void) ··· 132 287 } 133 288 EXPORT_SYMBOL_GPL(iic_get_target_id); 134 289 290 + struct irq_host *iic_get_irq_host(int node) 291 + { 292 + if (node < 0 || node >= IIC_NODE_COUNT) 293 + return NULL; 294 + return iic_hosts[node]; 295 + } 296 + EXPORT_SYMBOL_GPL(iic_get_irq_host); 297 + 298 + 135 299 static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 136 300 { 137 - smp_message_recv(iic_irq_to_ipi(irq), regs); 301 + int ipi = (int)(long)dev_id; 302 + 303 + smp_message_recv(ipi, regs); 304 + 138 305 return IRQ_HANDLED; 139 306 } 140 307 141 308 static void iic_request_ipi(int ipi, const char *name) 142 309 { 143 - int irq; 310 + int node, virq; 144 311 145 - irq = iic_ipi_to_irq(ipi); 146 - 147 - /* IPIs are marked IRQF_DISABLED as they must run with irqs 148 - * disabled */ 149 - set_irq_chip_and_handler(irq, &iic_chip, handle_percpu_irq); 150 - request_irq(irq, iic_ipi_action, IRQF_DISABLED, name, NULL); 312 + for (node = 0; node < IIC_NODE_COUNT; node++) { 313 + char *rname; 314 + if (iic_hosts[node] == NULL) 315 + continue; 316 + virq = irq_create_mapping(iic_hosts[node], 317 + iic_ipi_to_irq(ipi), 0); 318 + if (virq == NO_IRQ) { 319 + printk(KERN_ERR 320 + "iic: failed to map IPI %s on node %d\n", 321 + name, node); 322 + continue; 323 + } 324 + rname = kzalloc(strlen(name) + 16, GFP_KERNEL); 325 + if (rname) 326 + sprintf(rname, "%s node %d", name, node); 327 + else 328 + rname = (char *)name; 329 + if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, 330 + rname, (void *)(long)ipi)) 331 + printk(KERN_ERR 332 + "iic: failed to request IPI %s on node %d\n", 333 + name, node); 334 + } 151 335 } 152 336 153 337 void iic_request_IPIs(void) ··· 187 313 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); 188 314 #endif /* CONFIG_DEBUGGER */ 189 315 } 316 + 190 317 #endif /* CONFIG_SMP */ 191 318 192 - static void __init iic_setup_builtin_handlers(void) 319 + 320 + static int iic_host_match(struct irq_host *h, struct device_node *node) 193 321 { 194 - int be, isrc; 322 + return h->host_data != NULL && node == h->host_data; 323 + } 195 324 196 - /* XXX FIXME: Assume two threads per BE are present */ 197 - for (be=0; be < num_present_cpus() / 2; be++) { 198 - int irq; 325 + static int iic_host_map(struct irq_host *h, unsigned int virq, 326 + irq_hw_number_t hw, unsigned int flags) 327 + { 328 + if (hw < IIC_IRQ_IPI0) 329 + set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq); 330 + else 331 + set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); 332 + return 0; 333 + } 199 334 200 - /* setup SPE chip and handlers */ 201 - for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) { 202 - irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc; 203 - set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq); 335 + static int iic_host_xlate(struct irq_host *h, struct device_node *ct, 336 + u32 *intspec, unsigned int intsize, 337 + irq_hw_number_t *out_hwirq, unsigned int *out_flags) 338 + 339 + { 340 + /* Currently, we don't translate anything. That needs to be fixed as 341 + * we get better defined device-trees. iic interrupts have to be 342 + * explicitely mapped by whoever needs them 343 + */ 344 + return -ENODEV; 345 + } 346 + 347 + static struct irq_host_ops iic_host_ops = { 348 + .match = iic_host_match, 349 + .map = iic_host_map, 350 + .xlate = iic_host_xlate, 351 + }; 352 + 353 + static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, 354 + struct irq_host *host) 355 + { 356 + /* XXX FIXME: should locate the linux CPU number from the HW cpu 357 + * number properly. We are lucky for now 358 + */ 359 + struct iic *iic = &per_cpu(iic, hw_cpu); 360 + 361 + iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); 362 + BUG_ON(iic->regs == NULL); 363 + 364 + iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); 365 + iic->eoi_stack[0] = 0xff; 366 + iic->host = host; 367 + out_be64(&iic->regs->prio, 0); 368 + 369 + printk(KERN_INFO "IIC for CPU %d at %lx mapped to %p, target id 0x%x\n", 370 + hw_cpu, addr, iic->regs, iic->target_id); 371 + } 372 + 373 + static int __init setup_iic(void) 374 + { 375 + struct device_node *dn; 376 + struct resource r0, r1; 377 + struct irq_host *host; 378 + int found = 0; 379 + u32 *np; 380 + 381 + for (dn = NULL; 382 + (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) { 383 + if (!device_is_compatible(dn, 384 + "IBM,CBEA-Internal-Interrupt-Controller")) 385 + continue; 386 + np = (u32 *)get_property(dn, "ibm,interrupt-server-ranges", 387 + NULL); 388 + if (np == NULL) { 389 + printk(KERN_WARNING "IIC: CPU association not found\n"); 390 + of_node_put(dn); 391 + return -ENODEV; 204 392 } 205 - /* setup cascade chip */ 206 - irq = IIC_EXT_CASCADE + be * IIC_NODE_STRIDE; 207 - set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq); 393 + if (of_address_to_resource(dn, 0, &r0) || 394 + of_address_to_resource(dn, 1, &r1)) { 395 + printk(KERN_WARNING "IIC: Can't resolve addresses\n"); 396 + of_node_put(dn); 397 + return -ENODEV; 398 + } 399 + host = NULL; 400 + if (found < IIC_NODE_COUNT) { 401 + host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 402 + IIC_SOURCE_COUNT, 403 + &iic_host_ops, 404 + IIC_IRQ_INVALID); 405 + iic_hosts[found] = host; 406 + BUG_ON(iic_hosts[found] == NULL); 407 + iic_hosts[found]->host_data = of_node_get(dn); 408 + found++; 409 + } 410 + init_one_iic(np[0], r0.start, host); 411 + init_one_iic(np[1], r1.start, host); 208 412 } 413 + 414 + if (found) 415 + return 0; 416 + else 417 + return -ENODEV; 209 418 } 210 419 211 420 void __init iic_init_IRQ(void) 212 421 { 213 - int cpu, irq_offset; 214 - struct iic *iic; 215 - 422 + /* Discover and initialize iics */ 216 423 if (setup_iic() < 0) 217 - setup_iic_hardcoded(); 424 + panic("IIC: Failed to initialize !\n"); 218 425 219 - irq_offset = 0; 220 - for_each_possible_cpu(cpu) { 221 - iic = &per_cpu(iic, cpu); 222 - if (iic->regs) 223 - out_be64(&iic->regs->prio, 0xff); 224 - } 225 - iic_setup_builtin_handlers(); 426 + /* Set master interrupt handling function */ 427 + ppc_md.get_irq = iic_get_irq; 226 428 429 + /* Enable on current CPU */ 430 + iic_setup_cpu(); 227 431 }
+8 -9
arch/powerpc/platforms/cell/interrupt.h
··· 37 37 */ 38 38 39 39 enum { 40 - IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */ 41 - IIC_EXT_CASCADE = 0x20, /* There is no interrupt 32 on spider */ 42 - IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */ 43 - IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */ 44 - IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */ 45 - IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */ 46 - IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ 47 - IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */ 40 + IIC_IRQ_INVALID = 0xff, 41 + IIC_IRQ_MAX = 0x3f, 42 + IIC_IRQ_EXT_IOIF0 = 0x20, 43 + IIC_IRQ_EXT_IOIF1 = 0x2b, 44 + IIC_IRQ_IPI0 = 0x40, 45 + IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ 46 + IIC_SOURCE_COUNT = 0x50, 48 47 }; 49 48 50 49 extern void iic_init_IRQ(void); 51 - extern int iic_get_irq(struct pt_regs *regs); 52 50 extern void iic_cause_IPI(int cpu, int mesg); 53 51 extern void iic_request_IPIs(void); 54 52 extern void iic_setup_cpu(void); 55 53 56 54 extern u8 iic_get_target_id(int cpu); 55 + extern struct irq_host *iic_get_irq_host(int node); 57 56 58 57 extern void spider_init_IRQ(void); 59 58
+9 -4
arch/powerpc/platforms/cell/setup.c
··· 80 80 printk("*** %04x : %s\n", hex, s ? s : ""); 81 81 } 82 82 83 + static void __init cell_pcibios_fixup(void) 84 + { 85 + struct pci_dev *dev = NULL; 86 + 87 + for_each_pci_dev(dev) 88 + pci_read_irq_line(dev); 89 + } 90 + 83 91 static void __init cell_init_irq(void) 84 92 { 85 93 iic_init_IRQ(); ··· 138 130 139 131 cell_init_iommu(); 140 132 141 - ppc64_interrupt_controller = IC_CELL_PIC; 142 - 143 133 DBG(" <- cell_init_early()\n"); 144 134 } 145 135 ··· 184 178 .check_legacy_ioport = cell_check_legacy_ioport, 185 179 .progress = cell_progress, 186 180 .init_IRQ = cell_init_irq, 187 - .get_irq = iic_get_irq, 188 - 181 + .pcibios_fixup = cell_pcibios_fixup, 189 182 #ifdef CONFIG_KEXEC 190 183 .machine_kexec = default_machine_kexec, 191 184 .machine_kexec_prepare = default_machine_kexec_prepare,
+249 -98
arch/powerpc/platforms/cell/spider-pic.c
··· 22 22 23 23 #include <linux/interrupt.h> 24 24 #include <linux/irq.h> 25 + #include <linux/ioport.h> 25 26 26 27 #include <asm/pgtable.h> 27 28 #include <asm/prom.h> ··· 57 56 REISWAITEN = 0x508, /* Reissue Wait Control*/ 58 57 }; 59 58 60 - static void __iomem *spider_pics[4]; 59 + #define SPIDER_CHIP_COUNT 4 60 + #define SPIDER_SRC_COUNT 64 61 + #define SPIDER_IRQ_INVALID 63 61 62 62 - static void __iomem *spider_get_pic(int irq) 63 + struct spider_pic { 64 + struct irq_host *host; 65 + struct device_node *of_node; 66 + void __iomem *regs; 67 + unsigned int node_id; 68 + }; 69 + static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; 70 + 71 + static struct spider_pic *spider_virq_to_pic(unsigned int virq) 63 72 { 64 - int node = irq / IIC_NODE_STRIDE; 65 - irq %= IIC_NODE_STRIDE; 66 - 67 - if (irq >= IIC_EXT_OFFSET && 68 - irq < IIC_EXT_OFFSET + IIC_NUM_EXT && 69 - spider_pics) 70 - return spider_pics[node]; 71 - return NULL; 73 + return irq_map[virq].host->host_data; 72 74 } 73 75 74 - static int spider_get_nr(unsigned int irq) 76 + static void __iomem *spider_get_irq_config(struct spider_pic *pic, 77 + unsigned int src) 75 78 { 76 - return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET; 79 + return pic->regs + TIR_CFGA + 8 * src; 77 80 } 78 81 79 - static void __iomem *spider_get_irq_config(int irq) 82 + static void spider_unmask_irq(unsigned int virq) 80 83 { 81 - void __iomem *pic; 82 - pic = spider_get_pic(irq); 83 - return pic + TIR_CFGA + 8 * spider_get_nr(irq); 84 - } 84 + struct spider_pic *pic = spider_virq_to_pic(virq); 85 + void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); 85 86 86 - static void spider_unmask_irq(unsigned int irq) 87 - { 88 - int nodeid = (irq / IIC_NODE_STRIDE) * 0x10; 89 - void __iomem *cfg = spider_get_irq_config(irq); 90 - irq = spider_get_nr(irq); 91 - 92 - /* FIXME: Most of that is configuration and has nothing to do with enabling/disable, 93 - * besides, it's also partially bogus. 87 + /* We use no locking as we should be covered by the descriptor lock 88 + * for access to invidual source configuration registers 94 89 */ 95 - out_be32(cfg, (in_be32(cfg) & ~0xf0)| 0x3107000eu | nodeid); 96 - out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq); 90 + out_be32(cfg, in_be32(cfg) | 0x30000000u); 97 91 } 98 92 99 - static void spider_mask_irq(unsigned int irq) 93 + static void spider_mask_irq(unsigned int virq) 100 94 { 101 - void __iomem *cfg = spider_get_irq_config(irq); 102 - irq = spider_get_nr(irq); 95 + struct spider_pic *pic = spider_virq_to_pic(virq); 96 + void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); 103 97 98 + /* We use no locking as we should be covered by the descriptor lock 99 + * for access to invidual source configuration registers 100 + */ 104 101 out_be32(cfg, in_be32(cfg) & ~0x30000000u); 105 102 } 106 103 107 - static void spider_ack_irq(unsigned int irq) 104 + static void spider_ack_irq(unsigned int virq) 108 105 { 109 - /* Should reset edge detection logic but we don't configure any edge interrupt 110 - * at the moment. 106 + struct spider_pic *pic = spider_virq_to_pic(virq); 107 + unsigned int src = irq_map[virq].hwirq; 108 + 109 + /* Reset edge detection logic if necessary 111 110 */ 111 + if (get_irq_desc(virq)->status & IRQ_LEVEL) 112 + return; 113 + 114 + /* Only interrupts 47 to 50 can be set to edge */ 115 + if (src < 47 || src > 50) 116 + return; 117 + 118 + /* Perform the clear of the edge logic */ 119 + out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); 112 120 } 113 121 114 122 static struct irq_chip spider_pic = { ··· 127 117 .ack = spider_ack_irq, 128 118 }; 129 119 130 - static int spider_get_irq(int node) 120 + static int spider_host_match(struct irq_host *h, struct device_node *node) 131 121 { 132 - unsigned long cs; 133 - void __iomem *regs = spider_pics[node]; 134 - 135 - cs = in_be32(regs + TIR_CS) >> 24; 136 - 137 - if (cs == 63) 138 - return -1; 139 - else 140 - return cs; 122 + struct spider_pic *pic = h->host_data; 123 + return node == pic->of_node; 141 124 } 125 + 126 + static int spider_host_map(struct irq_host *h, unsigned int virq, 127 + irq_hw_number_t hw, unsigned int flags) 128 + { 129 + unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; 130 + struct spider_pic *pic = h->host_data; 131 + void __iomem *cfg = spider_get_irq_config(pic, hw); 132 + int level = 0; 133 + u32 ic; 134 + 135 + /* Note that only level high is supported for most interrupts */ 136 + if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH && 137 + (hw < 47 || hw > 50)) 138 + return -EINVAL; 139 + 140 + /* Decode sense type */ 141 + switch(sense) { 142 + case IRQ_TYPE_EDGE_RISING: 143 + ic = 0x3; 144 + break; 145 + case IRQ_TYPE_EDGE_FALLING: 146 + ic = 0x2; 147 + break; 148 + case IRQ_TYPE_LEVEL_LOW: 149 + ic = 0x0; 150 + level = 1; 151 + break; 152 + case IRQ_TYPE_LEVEL_HIGH: 153 + case IRQ_TYPE_NONE: 154 + ic = 0x1; 155 + level = 1; 156 + break; 157 + default: 158 + return -EINVAL; 159 + } 160 + 161 + /* Configure the source. One gross hack that was there before and 162 + * that I've kept around is the priority to the BE which I set to 163 + * be the same as the interrupt source number. I don't know wether 164 + * that's supposed to make any kind of sense however, we'll have to 165 + * decide that, but for now, I'm not changing the behaviour. 166 + */ 167 + out_be32(cfg, (ic << 24) | (0x7 << 16) | (pic->node_id << 4) | 0xe); 168 + out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff)); 169 + 170 + if (level) 171 + get_irq_desc(virq)->status |= IRQ_LEVEL; 172 + set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq); 173 + return 0; 174 + } 175 + 176 + static int spider_host_xlate(struct irq_host *h, struct device_node *ct, 177 + u32 *intspec, unsigned int intsize, 178 + irq_hw_number_t *out_hwirq, unsigned int *out_flags) 179 + 180 + { 181 + /* Spider interrupts have 2 cells, first is the interrupt source, 182 + * second, well, I don't know for sure yet ... We mask the top bits 183 + * because old device-trees encode a node number in there 184 + */ 185 + *out_hwirq = intspec[0] & 0x3f; 186 + *out_flags = IRQ_TYPE_LEVEL_HIGH; 187 + return 0; 188 + } 189 + 190 + static struct irq_host_ops spider_host_ops = { 191 + .match = spider_host_match, 192 + .map = spider_host_map, 193 + .xlate = spider_host_xlate, 194 + }; 142 195 143 196 static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc, 144 197 struct pt_regs *regs) 145 198 { 146 - int node = (int)(long)desc->handler_data; 147 - int cascade_irq; 199 + struct spider_pic *pic = desc->handler_data; 200 + unsigned int cs, virq; 148 201 149 - cascade_irq = spider_get_irq(node); 150 - generic_handle_irq(cascade_irq, regs); 202 + cs = in_be32(pic->regs + TIR_CS) >> 24; 203 + if (cs == SPIDER_IRQ_INVALID) 204 + virq = NO_IRQ; 205 + else 206 + virq = irq_linear_revmap(pic->host, cs); 207 + if (virq != NO_IRQ) 208 + generic_handle_irq(virq, regs); 151 209 desc->chip->eoi(irq); 152 210 } 153 211 154 - /* hardcoded part to be compatible with older firmware */ 155 - 156 - static void __init spider_init_one(int node, unsigned long addr) 212 + /* For hooking up the cascace we have a problem. Our device-tree is 213 + * crap and we don't know on which BE iic interrupt we are hooked on at 214 + * least not the "standard" way. We can reconstitute it based on two 215 + * informations though: which BE node we are connected to and wether 216 + * we are connected to IOIF0 or IOIF1. Right now, we really only care 217 + * about the IBM cell blade and we know that its firmware gives us an 218 + * interrupt-map property which is pretty strange. 219 + */ 220 + static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) 157 221 { 158 - int n, irq; 222 + unsigned int virq; 223 + u32 *imap, *tmp; 224 + int imaplen, intsize, unit; 225 + struct device_node *iic; 226 + struct irq_host *iic_host; 159 227 160 - spider_pics[node] = ioremap(addr, 0x800); 161 - if (spider_pics[node] == NULL) 228 + #if 0 /* Enable that when we have a way to retreive the node as well */ 229 + /* First, we check wether we have a real "interrupts" in the device 230 + * tree in case the device-tree is ever fixed 231 + */ 232 + struct of_irq oirq; 233 + if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) { 234 + virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 235 + oirq.size); 236 + goto bail; 237 + } 238 + #endif 239 + 240 + /* Now do the horrible hacks */ 241 + tmp = (u32 *)get_property(pic->of_node, "#interrupt-cells", NULL); 242 + if (tmp == NULL) 243 + return NO_IRQ; 244 + intsize = *tmp; 245 + imap = (u32 *)get_property(pic->of_node, "interrupt-map", &imaplen); 246 + if (imap == NULL || imaplen < (intsize + 1)) 247 + return NO_IRQ; 248 + iic = of_find_node_by_phandle(imap[intsize]); 249 + if (iic == NULL) 250 + return NO_IRQ; 251 + imap += intsize + 1; 252 + tmp = (u32 *)get_property(iic, "#interrupt-cells", NULL); 253 + if (tmp == NULL) 254 + return NO_IRQ; 255 + intsize = *tmp; 256 + /* Assume unit is last entry of interrupt specifier */ 257 + unit = imap[intsize - 1]; 258 + /* Ok, we have a unit, now let's try to get the node */ 259 + tmp = (u32 *)get_property(iic, "ibm,interrupt-server-ranges", NULL); 260 + if (tmp == NULL) { 261 + of_node_put(iic); 262 + return NO_IRQ; 263 + } 264 + /* ugly as hell but works for now */ 265 + pic->node_id = (*tmp) >> 1; 266 + of_node_put(iic); 267 + 268 + /* Ok, now let's get cracking. You may ask me why I just didn't match 269 + * the iic host from the iic OF node, but that way I'm still compatible 270 + * with really really old old firmwares for which we don't have a node 271 + */ 272 + iic_host = iic_get_irq_host(pic->node_id); 273 + if (iic_host == NULL) 274 + return NO_IRQ; 275 + /* Manufacture an IIC interrupt number of class 2 */ 276 + virq = irq_create_mapping(iic_host, 0x20 | unit, 0); 277 + if (virq == NO_IRQ) 278 + printk(KERN_ERR "spider_pic: failed to map cascade !"); 279 + return virq; 280 + } 281 + 282 + 283 + static void __init spider_init_one(struct device_node *of_node, int chip, 284 + unsigned long addr) 285 + { 286 + struct spider_pic *pic = &spider_pics[chip]; 287 + int i, virq; 288 + 289 + /* Map registers */ 290 + pic->regs = ioremap(addr, 0x1000); 291 + if (pic->regs == NULL) 162 292 panic("spider_pic: can't map registers !"); 163 293 164 - printk(KERN_INFO "spider_pic: mapped for node %d, addr: 0x%lx mapped to %p\n", 165 - node, addr, spider_pics[node]); 294 + /* Allocate a host */ 295 + pic->host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, SPIDER_SRC_COUNT, 296 + &spider_host_ops, SPIDER_IRQ_INVALID); 297 + if (pic->host == NULL) 298 + panic("spider_pic: can't allocate irq host !"); 299 + pic->host->host_data = pic; 166 300 167 - for (n = 0; n < IIC_NUM_EXT; n++) { 168 - if (n == IIC_EXT_CASCADE) 169 - continue; 170 - irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE; 171 - set_irq_chip_and_handler(irq, &spider_pic, handle_level_irq); 172 - get_irq_desc(irq)->status |= IRQ_LEVEL; 301 + /* Fill out other bits */ 302 + pic->of_node = of_node_get(of_node); 303 + 304 + /* Go through all sources and disable them */ 305 + for (i = 0; i < SPIDER_SRC_COUNT; i++) { 306 + void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i; 307 + out_be32(cfg, in_be32(cfg) & ~0x30000000u); 173 308 } 174 309 175 310 /* do not mask any interrupts because of level */ 176 - out_be32(spider_pics[node] + TIR_MSK, 0x0); 177 - 178 - /* disable edge detection clear */ 179 - /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */ 311 + out_be32(pic->regs + TIR_MSK, 0x0); 180 312 181 313 /* enable interrupt packets to be output */ 182 - out_be32(spider_pics[node] + TIR_PIEN, 183 - in_be32(spider_pics[node] + TIR_PIEN) | 0x1); 314 + out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1); 184 315 185 - /* Hook up cascade */ 186 - irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE; 187 - set_irq_data(irq, (void *)(long)node); 188 - set_irq_chained_handler(irq, spider_irq_cascade); 316 + /* Hook up the cascade interrupt to the iic and nodeid */ 317 + virq = spider_find_cascade_and_node(pic); 318 + if (virq == NO_IRQ) 319 + return; 320 + set_irq_data(virq, pic); 321 + set_irq_chained_handler(virq, spider_irq_cascade); 322 + 323 + printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", 324 + pic->node_id, addr, of_node->full_name); 189 325 190 326 /* Enable the interrupt detection enable bit. Do this last! */ 191 - out_be32(spider_pics[node] + TIR_DEN, 192 - in_be32(spider_pics[node] + TIR_DEN) | 0x1); 327 + out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1); 193 328 } 194 329 195 330 void __init spider_init_IRQ(void) 196 331 { 197 - unsigned long *spider_reg; 332 + struct resource r; 198 333 struct device_node *dn; 199 - char *compatible; 200 - int node = 0; 334 + int chip = 0; 201 335 202 - /* XXX node numbers are totally bogus. We _hope_ we get the device nodes in the right 203 - * order here but that's definitely not guaranteed, we need to get the node from the 204 - * device tree instead. There is currently no proper property for it (but our whole 205 - * device-tree is bogus anyway) so all we can do is pray or maybe test the address 206 - * and deduce the node-id 336 + /* XXX node numbers are totally bogus. We _hope_ we get the device 337 + * nodes in the right order here but that's definitely not guaranteed, 338 + * we need to get the node from the device tree instead. 339 + * There is currently no proper property for it (but our whole 340 + * device-tree is bogus anyway) so all we can do is pray or maybe test 341 + * the address and deduce the node-id 207 342 */ 208 - for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { 209 - compatible = (char *)get_property(dn, "compatible", NULL); 210 - 211 - if (!compatible) 212 - continue; 213 - 214 - if (strstr(compatible, "CBEA,platform-spider-pic")) 215 - spider_reg = (unsigned long *)get_property(dn, "reg", NULL); 216 - else if (strstr(compatible, "sti,platform-spider-pic") && (node < 2)) { 217 - static long hard_coded_pics[] = { 0x24000008000, 0x34000008000 }; 218 - spider_reg = &hard_coded_pics[node]; 343 + for (dn = NULL; 344 + (dn = of_find_node_by_name(dn, "interrupt-controller"));) { 345 + if (device_is_compatible(dn, "CBEA,platform-spider-pic")) { 346 + if (of_address_to_resource(dn, 0, &r)) { 347 + printk(KERN_WARNING "spider-pic: Failed\n"); 348 + continue; 349 + } 350 + } else if (device_is_compatible(dn, "sti,platform-spider-pic") 351 + && (chip < 2)) { 352 + static long hard_coded_pics[] = 353 + { 0x24000008000, 0x34000008000 }; 354 + r.start = hard_coded_pics[chip]; 219 355 } else 220 356 continue; 221 - 222 - if (spider_reg == NULL) 223 - printk(KERN_ERR "spider_pic: No address for node %d\n", node); 224 - 225 - spider_init_one(node, *spider_reg); 226 - node++; 357 + spider_init_one(dn, chip++, r.start); 227 358 } 228 359 }
+75 -44
arch/powerpc/platforms/cell/spu_base.c
··· 264 264 return stat ? IRQ_HANDLED : IRQ_NONE; 265 265 } 266 266 267 - static int 268 - spu_request_irqs(struct spu *spu) 267 + static int spu_request_irqs(struct spu *spu) 269 268 { 270 - int ret; 271 - int irq_base; 269 + int ret = 0; 272 270 273 - irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; 271 + if (spu->irqs[0] != NO_IRQ) { 272 + snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", 273 + spu->number); 274 + ret = request_irq(spu->irqs[0], spu_irq_class_0, 275 + IRQF_DISABLED, 276 + spu->irq_c0, spu); 277 + if (ret) 278 + goto bail0; 279 + } 280 + if (spu->irqs[1] != NO_IRQ) { 281 + snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", 282 + spu->number); 283 + ret = request_irq(spu->irqs[1], spu_irq_class_1, 284 + IRQF_DISABLED, 285 + spu->irq_c1, spu); 286 + if (ret) 287 + goto bail1; 288 + } 289 + if (spu->irqs[2] != NO_IRQ) { 290 + snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", 291 + spu->number); 292 + ret = request_irq(spu->irqs[2], spu_irq_class_2, 293 + IRQF_DISABLED, 294 + spu->irq_c2, spu); 295 + if (ret) 296 + goto bail2; 297 + } 298 + return 0; 274 299 275 - snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); 276 - ret = request_irq(irq_base + spu->isrc, 277 - spu_irq_class_0, IRQF_DISABLED, spu->irq_c0, spu); 278 - if (ret) 279 - goto out; 280 - 281 - snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); 282 - ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, 283 - spu_irq_class_1, IRQF_DISABLED, spu->irq_c1, spu); 284 - if (ret) 285 - goto out1; 286 - 287 - snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); 288 - ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, 289 - spu_irq_class_2, IRQF_DISABLED, spu->irq_c2, spu); 290 - if (ret) 291 - goto out2; 292 - goto out; 293 - 294 - out2: 295 - free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); 296 - out1: 297 - free_irq(irq_base + spu->isrc, spu); 298 - out: 300 + bail2: 301 + if (spu->irqs[1] != NO_IRQ) 302 + free_irq(spu->irqs[1], spu); 303 + bail1: 304 + if (spu->irqs[0] != NO_IRQ) 305 + free_irq(spu->irqs[0], spu); 306 + bail0: 299 307 return ret; 300 308 } 301 309 302 - static void 303 - spu_free_irqs(struct spu *spu) 310 + static void spu_free_irqs(struct spu *spu) 304 311 { 305 - int irq_base; 306 - 307 - irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; 308 - 309 - free_irq(irq_base + spu->isrc, spu); 310 - free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); 311 - free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); 312 + if (spu->irqs[0] != NO_IRQ) 313 + free_irq(spu->irqs[0], spu); 314 + if (spu->irqs[1] != NO_IRQ) 315 + free_irq(spu->irqs[1], spu); 316 + if (spu->irqs[2] != NO_IRQ) 317 + free_irq(spu->irqs[2], spu); 312 318 } 313 319 314 320 static LIST_HEAD(spu_list); ··· 565 559 iounmap((u8 __iomem *)spu->local_store); 566 560 } 567 561 562 + /* This function shall be abstracted for HV platforms */ 563 + static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) 564 + { 565 + struct irq_host *host; 566 + unsigned int isrc; 567 + u32 *tmp; 568 + 569 + host = iic_get_irq_host(spu->node); 570 + if (host == NULL) 571 + return -ENODEV; 572 + 573 + /* Get the interrupt source from the device-tree */ 574 + tmp = (u32 *)get_property(np, "isrc", NULL); 575 + if (!tmp) 576 + return -ENODEV; 577 + spu->isrc = isrc = tmp[0]; 578 + 579 + /* Now map interrupts of all 3 classes */ 580 + spu->irqs[0] = irq_create_mapping(host, 0x00 | isrc, 0); 581 + spu->irqs[1] = irq_create_mapping(host, 0x10 | isrc, 0); 582 + spu->irqs[2] = irq_create_mapping(host, 0x20 | isrc, 0); 583 + 584 + /* Right now, we only fail if class 2 failed */ 585 + return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; 586 + } 587 + 568 588 static int __init spu_map_device(struct spu *spu, struct device_node *node) 569 589 { 570 590 char *prop; 571 591 int ret; 572 592 573 593 ret = -ENODEV; 574 - prop = get_property(node, "isrc", NULL); 575 - if (!prop) 576 - goto out; 577 - spu->isrc = *(unsigned int *)prop; 578 - 579 594 spu->name = get_property(node, "name", NULL); 580 595 if (!spu->name) 581 596 goto out; ··· 663 636 return ret; 664 637 } 665 638 666 - sysdev_create_file(&spu->sysdev, &attr_isrc); 639 + if (spu->isrc != 0) 640 + sysdev_create_file(&spu->sysdev, &attr_isrc); 667 641 sysfs_add_device_to_node(&spu->sysdev, spu->nid); 668 642 669 643 return 0; ··· 696 668 spu->nid = of_node_to_nid(spe); 697 669 if (spu->nid == -1) 698 670 spu->nid = 0; 671 + ret = spu_map_interrupts(spu, spe); 672 + if (ret) 673 + goto out_unmap; 699 674 spin_lock_init(&spu->register_lock); 700 675 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); 701 676 spu_mfc_sr1_set(spu, 0x33);
+2 -9
arch/powerpc/platforms/chrp/pci.c
··· 18 18 #include <asm/machdep.h> 19 19 #include <asm/sections.h> 20 20 #include <asm/pci-bridge.h> 21 - #include <asm/open_pic.h> 22 21 #include <asm/grackle.h> 23 22 #include <asm/rtas.h> 24 23 ··· 160 161 chrp_pcibios_fixup(void) 161 162 { 162 163 struct pci_dev *dev = NULL; 163 - struct device_node *np; 164 164 165 - /* PCI interrupts are controlled by the OpenPIC */ 166 - for_each_pci_dev(dev) { 167 - np = pci_device_to_OF_node(dev); 168 - if ((np != 0) && (np->n_intrs > 0) && (np->intrs[0].line != 0)) 169 - dev->irq = np->intrs[0].line; 170 - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); 171 - } 165 + for_each_pci_dev(dev) 166 + pci_read_irq_line(dev); 172 167 } 173 168 174 169 #define PRG_CL_RESET_VALID 0x00010000
+66 -44
arch/powerpc/platforms/chrp/setup.c
··· 59 59 int _chrp_type; 60 60 EXPORT_SYMBOL(_chrp_type); 61 61 62 - struct mpic *chrp_mpic; 62 + static struct mpic *chrp_mpic; 63 63 64 64 /* Used for doing CHRP event-scans */ 65 65 DEFINE_PER_CPU(struct timer_list, heartbeat_timer); ··· 315 315 jiffies + event_scan_interval); 316 316 } 317 317 318 - void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc, 319 - struct pt_regs *regs) 318 + static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc, 319 + struct pt_regs *regs) 320 320 { 321 - unsigned int max = 100; 322 - 323 - while(max--) { 324 - int irq = i8259_irq(regs); 325 - if (max == 99) 326 - desc->chip->eoi(irq); 327 - if (irq < 0) 328 - break; 329 - generic_handle_irq(irq, regs); 330 - }; 321 + unsigned int cascade_irq = i8259_irq(regs); 322 + if (cascade_irq != NO_IRQ) 323 + generic_handle_irq(cascade_irq, regs); 324 + desc->chip->eoi(irq); 331 325 } 332 326 333 327 /* ··· 330 336 static void __init chrp_find_openpic(void) 331 337 { 332 338 struct device_node *np, *root; 333 - int len, i, j, irq_count; 339 + int len, i, j; 334 340 int isu_size, idu_size; 335 341 unsigned int *iranges, *opprop = NULL; 336 342 int oplen = 0; 337 343 unsigned long opaddr; 338 344 int na = 1; 339 - unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS]; 340 345 341 - np = find_type_devices("open-pic"); 346 + np = of_find_node_by_type(NULL, "open-pic"); 342 347 if (np == NULL) 343 348 return; 344 - root = find_path_device("/"); 349 + root = of_find_node_by_path("/"); 345 350 if (root) { 346 351 opprop = (unsigned int *) get_property 347 352 (root, "platform-open-pic", &oplen); ··· 351 358 oplen /= na * sizeof(unsigned int); 352 359 } else { 353 360 struct resource r; 354 - if (of_address_to_resource(np, 0, &r)) 355 - return; 361 + if (of_address_to_resource(np, 0, &r)) { 362 + goto bail; 363 + } 356 364 opaddr = r.start; 357 365 oplen = 0; 358 366 } 359 367 360 368 printk(KERN_INFO "OpenPIC at %lx\n", opaddr); 361 - 362 - irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */ 363 - prom_get_irq_senses(init_senses, NUM_ISA_INTERRUPTS, NR_IRQS - 4); 364 - /* i8259 cascade is always positive level */ 365 - init_senses[0] = IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE; 366 369 367 370 iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len); 368 371 if (iranges == NULL) ··· 386 397 if (len > 1) 387 398 isu_size = iranges[3]; 388 399 389 - chrp_mpic = mpic_alloc(opaddr, MPIC_PRIMARY, 390 - isu_size, NUM_ISA_INTERRUPTS, irq_count, 391 - NR_IRQS - 4, init_senses, irq_count, 392 - " MPIC "); 400 + chrp_mpic = mpic_alloc(np, opaddr, MPIC_PRIMARY, 401 + isu_size, 0, " MPIC "); 393 402 if (chrp_mpic == NULL) { 394 403 printk(KERN_ERR "Failed to allocate MPIC structure\n"); 395 - return; 404 + goto bail; 396 405 } 397 - 398 406 j = na - 1; 399 407 for (i = 1; i < len; ++i) { 400 408 iranges += 2; ··· 403 417 } 404 418 405 419 mpic_init(chrp_mpic); 406 - set_irq_chained_handler(NUM_ISA_INTERRUPTS, chrp_8259_cascade); 420 + ppc_md.get_irq = mpic_get_irq; 421 + bail: 422 + of_node_put(root); 423 + of_node_put(np); 407 424 } 408 425 409 426 #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) ··· 417 428 }; 418 429 #endif 419 430 420 - void __init chrp_init_IRQ(void) 431 + static void __init chrp_find_8259(void) 421 432 { 422 - struct device_node *np; 433 + struct device_node *np, *pic = NULL; 423 434 unsigned long chrp_int_ack = 0; 424 - #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) 425 - struct device_node *kbd; 426 - #endif 435 + unsigned int cascade_irq; 427 436 437 + /* Look for cascade */ 438 + for_each_node_by_type(np, "interrupt-controller") 439 + if (device_is_compatible(np, "chrp,iic")) { 440 + pic = np; 441 + break; 442 + } 443 + /* Ok, 8259 wasn't found. We need to handle the case where 444 + * we have a pegasos that claims to be chrp but doesn't have 445 + * a proper interrupt tree 446 + */ 447 + if (pic == NULL && chrp_mpic != NULL) { 448 + printk(KERN_ERR "i8259: Not found in device-tree" 449 + " assuming no legacy interrupts\n"); 450 + return; 451 + } 452 + 453 + /* Look for intack. In a perfect world, we would look for it on 454 + * the ISA bus that holds the 8259 but heh... Works that way. If 455 + * we ever see a problem, we can try to re-use the pSeries code here. 456 + * Also, Pegasos-type platforms don't have a proper node to start 457 + * from anyway 458 + */ 428 459 for (np = find_devices("pci"); np != NULL; np = np->next) { 429 460 unsigned int *addrp = (unsigned int *) 430 461 get_property(np, "8259-interrupt-acknowledge", NULL); ··· 455 446 break; 456 447 } 457 448 if (np == NULL) 458 - printk(KERN_ERR "Cannot find PCI interrupt acknowledge address\n"); 449 + printk(KERN_WARNING "Cannot find PCI interrupt acknowledge" 450 + " address, polling\n"); 459 451 452 + i8259_init(pic, chrp_int_ack); 453 + if (ppc_md.get_irq == NULL) 454 + ppc_md.get_irq = i8259_irq; 455 + if (chrp_mpic != NULL) { 456 + cascade_irq = irq_of_parse_and_map(pic, 0); 457 + if (cascade_irq == NO_IRQ) 458 + printk(KERN_ERR "i8259: failed to map cascade irq\n"); 459 + else 460 + set_irq_chained_handler(cascade_irq, 461 + chrp_8259_cascade); 462 + } 463 + } 464 + 465 + void __init chrp_init_IRQ(void) 466 + { 467 + #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) 468 + struct device_node *kbd; 469 + #endif 460 470 chrp_find_openpic(); 461 - 462 - i8259_init(chrp_int_ack, 0); 471 + chrp_find_8259(); 463 472 464 473 if (_chrp_type == _CHRP_Pegasos) 465 474 ppc_md.get_irq = i8259_irq; ··· 562 535 DMA_MODE_READ = 0x44; 563 536 DMA_MODE_WRITE = 0x48; 564 537 isa_io_base = CHRP_ISA_IO_BASE; /* default value */ 565 - ppc_do_canonicalize_irqs = 1; 566 - 567 - /* Assume we have an 8259... */ 568 - __irq_offset_value = NUM_ISA_INTERRUPTS; 569 538 570 539 return 1; 571 540 } ··· 573 550 .init = chrp_init2, 574 551 .show_cpuinfo = chrp_show_cpuinfo, 575 552 .init_IRQ = chrp_init_IRQ, 576 - .get_irq = mpic_get_irq, 577 553 .pcibios_fixup = chrp_pcibios_fixup, 578 554 .restart = rtas_restart, 579 555 .power_off = rtas_power_off,
-1
arch/powerpc/platforms/chrp/smp.c
··· 29 29 #include <asm/smp.h> 30 30 #include <asm/residual.h> 31 31 #include <asm/time.h> 32 - #include <asm/open_pic.h> 33 32 #include <asm/machdep.h> 34 33 #include <asm/smp.h> 35 34 #include <asm/mpic.h>
+58 -34
arch/powerpc/platforms/iseries/irq.c
··· 162 162 printk(KERN_ERR "pci_event_handler: NULL event received\n"); 163 163 } 164 164 165 - /* 166 - * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c 167 - * It must be called before the bus walk. 168 - */ 169 - void __init iSeries_init_IRQ(void) 170 - { 171 - /* Register PCI event handler and open an event path */ 172 - int ret; 173 - 174 - ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, 175 - &pci_event_handler); 176 - if (ret == 0) { 177 - ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); 178 - if (ret != 0) 179 - printk(KERN_ERR "iseries_init_IRQ: open event path " 180 - "failed with rc 0x%x\n", ret); 181 - } else 182 - printk(KERN_ERR "iseries_init_IRQ: register handler " 183 - "failed with rc 0x%x\n", ret); 184 - } 185 - 186 165 #define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff) 187 166 #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) 188 167 #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) ··· 175 196 { 176 197 u32 bus, dev_id, function, mask; 177 198 const u32 sub_bus = 0; 178 - unsigned int rirq = virt_irq_to_real_map[irq]; 199 + unsigned int rirq = (unsigned int)irq_map[irq].hwirq; 179 200 180 201 /* The IRQ has already been locked by the caller */ 181 202 bus = REAL_IRQ_TO_BUS(rirq); ··· 192 213 { 193 214 u32 bus, dev_id, function, mask; 194 215 const u32 sub_bus = 0; 195 - unsigned int rirq = virt_irq_to_real_map[irq]; 216 + unsigned int rirq = (unsigned int)irq_map[irq].hwirq; 196 217 197 218 bus = REAL_IRQ_TO_BUS(rirq); 198 219 function = REAL_IRQ_TO_FUNC(rirq); ··· 233 254 { 234 255 u32 bus, dev_id, function, mask; 235 256 const u32 sub_bus = 0; 236 - unsigned int rirq = virt_irq_to_real_map[irq]; 257 + unsigned int rirq = (unsigned int)irq_map[irq].hwirq; 237 258 238 259 /* irq should be locked by the caller */ 239 260 bus = REAL_IRQ_TO_BUS(rirq); ··· 256 277 { 257 278 u32 bus, dev_id, function, mask; 258 279 const u32 sub_bus = 0; 259 - unsigned int rirq = virt_irq_to_real_map[irq]; 280 + unsigned int rirq = (unsigned int)irq_map[irq].hwirq; 260 281 261 282 /* The IRQ has already been locked by the caller */ 262 283 bus = REAL_IRQ_TO_BUS(rirq); ··· 270 291 271 292 static void iseries_end_IRQ(unsigned int irq) 272 293 { 273 - unsigned int rirq = virt_irq_to_real_map[irq]; 294 + unsigned int rirq = (unsigned int)irq_map[irq].hwirq; 274 295 275 296 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), 276 297 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); ··· 293 314 int __init iSeries_allocate_IRQ(HvBusNumber bus, 294 315 HvSubBusNumber sub_bus, u32 bsubbus) 295 316 { 296 - int virtirq; 297 317 unsigned int realirq; 298 318 u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus); 299 319 u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus); 300 320 301 321 realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3) 302 322 + function; 303 - virtirq = virt_irq_create_mapping(realirq); 304 - set_irq_chip_and_handler(virtirq, &iseries_pic, handle_fasteoi_irq); 305 - return virtirq; 323 + 324 + return irq_create_mapping(NULL, realirq, IRQ_TYPE_NONE); 306 325 } 307 326 308 327 #endif /* CONFIG_PCI */ ··· 308 331 /* 309 332 * Get the next pending IRQ. 310 333 */ 311 - int iSeries_get_irq(struct pt_regs *regs) 334 + unsigned int iSeries_get_irq(struct pt_regs *regs) 312 335 { 313 - /* -2 means ignore this interrupt */ 314 - int irq = -2; 336 + int irq = NO_IRQ_IGNORE; 315 337 316 338 #ifdef CONFIG_SMP 317 339 if (get_lppaca()->int_dword.fields.ipi_cnt) { ··· 333 357 } 334 358 spin_unlock(&pending_irqs_lock); 335 359 if (irq >= NR_IRQS) 336 - irq = -2; 360 + irq = NO_IRQ_IGNORE; 337 361 } 338 362 #endif 339 363 340 364 return irq; 341 365 } 366 + 367 + static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, 368 + irq_hw_number_t hw, unsigned int flags) 369 + { 370 + set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); 371 + 372 + return 0; 373 + } 374 + 375 + static struct irq_host_ops iseries_irq_host_ops = { 376 + .map = iseries_irq_host_map, 377 + }; 378 + 379 + /* 380 + * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c 381 + * It must be called before the bus walk. 382 + */ 383 + void __init iSeries_init_IRQ(void) 384 + { 385 + /* Register PCI event handler and open an event path */ 386 + struct irq_host *host; 387 + int ret; 388 + 389 + /* 390 + * The Hypervisor only allows us up to 256 interrupt 391 + * sources (the irq number is passed in a u8). 392 + */ 393 + irq_set_virq_count(256); 394 + 395 + /* Create irq host. No need for a revmap since HV will give us 396 + * back our virtual irq number 397 + */ 398 + host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &iseries_irq_host_ops, 0); 399 + BUG_ON(host == NULL); 400 + irq_set_default_host(host); 401 + 402 + ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, 403 + &pci_event_handler); 404 + if (ret == 0) { 405 + ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); 406 + if (ret != 0) 407 + printk(KERN_ERR "iseries_init_IRQ: open event path " 408 + "failed with rc 0x%x\n", ret); 409 + } else 410 + printk(KERN_ERR "iseries_init_IRQ: register handler " 411 + "failed with rc 0x%x\n", ret); 412 + } 413 +
+1 -1
arch/powerpc/platforms/iseries/irq.h
··· 4 4 extern void iSeries_init_IRQ(void); 5 5 extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32); 6 6 extern void iSeries_activate_IRQs(void); 7 - extern int iSeries_get_irq(struct pt_regs *); 7 + extern unsigned int iSeries_get_irq(struct pt_regs *); 8 8 9 9 #endif /* _ISERIES_IRQ_H */
-8
arch/powerpc/platforms/iseries/setup.c
··· 294 294 { 295 295 DBG(" -> iSeries_init_early()\n"); 296 296 297 - ppc64_interrupt_controller = IC_ISERIES; 298 - 299 297 #if defined(CONFIG_BLK_DEV_INITRD) 300 298 /* 301 299 * If the init RAM disk has been configured and there is ··· 656 658 657 659 powerpc_firmware_features |= FW_FEATURE_ISERIES; 658 660 powerpc_firmware_features |= FW_FEATURE_LPAR; 659 - 660 - /* 661 - * The Hypervisor only allows us up to 256 interrupt 662 - * sources (the irq number is passed in a u8). 663 - */ 664 - virt_irq_max = 255; 665 661 666 662 hpte_init_iSeries(); 667 663
+11 -6
arch/powerpc/platforms/maple/pci.c
··· 443 443 int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) 444 444 { 445 445 struct device_node *np; 446 - int irq = channel ? 15 : 14; 446 + unsigned int defirq = channel ? 15 : 14; 447 + unsigned int irq; 447 448 448 449 if (pdev->vendor != PCI_VENDOR_ID_AMD || 449 450 pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) 450 - return irq; 451 + return defirq; 451 452 452 453 np = pci_device_to_OF_node(pdev); 453 454 if (np == NULL) 454 - return irq; 455 - if (np->n_intrs < 2) 456 - return irq; 457 - return np->intrs[channel & 0x1].line; 455 + return defirq; 456 + irq = irq_of_parse_and_map(np, channel & 0x1); 457 + if (irq == NO_IRQ) { 458 + printk("Failed to map onboard IDE interrupt for channel %d\n", 459 + channel); 460 + return defirq; 461 + } 462 + return irq; 458 463 } 459 464 460 465 /* XXX: To remove once all firmwares are ok */
+59 -29
arch/powerpc/platforms/maple/setup.c
··· 198 198 { 199 199 DBG(" -> maple_init_early\n"); 200 200 201 - /* Setup interrupt mapping options */ 202 - ppc64_interrupt_controller = IC_OPEN_PIC; 203 - 204 201 iommu_init_early_dart(); 205 202 206 203 DBG(" <- maple_init_early\n"); 207 204 } 208 205 209 - 210 - static __init void maple_init_IRQ(void) 206 + /* 207 + * This is almost identical to pSeries and CHRP. We need to make that 208 + * code generic at one point, with appropriate bits in the device-tree to 209 + * identify the presence of an HT APIC 210 + */ 211 + static void __init maple_init_IRQ(void) 211 212 { 212 - struct device_node *root; 213 + struct device_node *root, *np, *mpic_node = NULL; 213 214 unsigned int *opprop; 214 - unsigned long opic_addr; 215 + unsigned long openpic_addr = 0; 216 + int naddr, n, i, opplen, has_isus = 0; 215 217 struct mpic *mpic; 216 - unsigned char senses[128]; 217 - int n; 218 + unsigned int flags = MPIC_PRIMARY; 218 219 219 - DBG(" -> maple_init_IRQ\n"); 220 + /* Locate MPIC in the device-tree. Note that there is a bug 221 + * in Maple device-tree where the type of the controller is 222 + * open-pic and not interrupt-controller 223 + */ 224 + for_each_node_by_type(np, "open-pic") { 225 + mpic_node = np; 226 + break; 227 + } 228 + if (mpic_node == NULL) { 229 + printk(KERN_ERR 230 + "Failed to locate the MPIC interrupt controller\n"); 231 + return; 232 + } 220 233 221 - /* XXX: Non standard, replace that with a proper openpic/mpic node 222 - * in the device-tree. Find the Open PIC if present */ 234 + /* Find address list in /platform-open-pic */ 223 235 root = of_find_node_by_path("/"); 224 - opprop = (unsigned int *) get_property(root, 225 - "platform-open-pic", NULL); 226 - if (opprop == 0) 227 - panic("OpenPIC not found !\n"); 228 - 229 - n = prom_n_addr_cells(root); 230 - for (opic_addr = 0; n > 0; --n) 231 - opic_addr = (opic_addr << 32) + *opprop++; 236 + naddr = prom_n_addr_cells(root); 237 + opprop = (unsigned int *) get_property(root, "platform-open-pic", 238 + &opplen); 239 + if (opprop != 0) { 240 + openpic_addr = of_read_number(opprop, naddr); 241 + has_isus = (opplen > naddr); 242 + printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", 243 + openpic_addr, has_isus); 244 + } 232 245 of_node_put(root); 233 246 234 - /* Obtain sense values from device-tree */ 235 - prom_get_irq_senses(senses, 0, 128); 247 + BUG_ON(openpic_addr == 0); 236 248 237 - mpic = mpic_alloc(opic_addr, 238 - MPIC_PRIMARY | MPIC_BIG_ENDIAN | 239 - MPIC_BROKEN_U3 | MPIC_WANTS_RESET, 240 - 0, 0, 128, 128, senses, 128, "U3-MPIC"); 249 + /* Check for a big endian MPIC */ 250 + if (get_property(np, "big-endian", NULL) != NULL) 251 + flags |= MPIC_BIG_ENDIAN; 252 + 253 + /* XXX Maple specific bits */ 254 + flags |= MPIC_BROKEN_U3 | MPIC_WANTS_RESET; 255 + 256 + /* Setup the openpic driver. More device-tree junks, we hard code no 257 + * ISUs for now. I'll have to revisit some stuffs with the folks doing 258 + * the firmware for those 259 + */ 260 + mpic = mpic_alloc(mpic_node, openpic_addr, flags, 261 + /*has_isus ? 16 :*/ 0, 0, " MPIC "); 241 262 BUG_ON(mpic == NULL); 242 - mpic_init(mpic); 243 263 244 - DBG(" <- maple_init_IRQ\n"); 264 + /* Add ISUs */ 265 + opplen /= sizeof(u32); 266 + for (n = 0, i = naddr; i < opplen; i += naddr, n++) { 267 + unsigned long isuaddr = of_read_number(opprop + i, naddr); 268 + mpic_assign_isu(mpic, n, isuaddr); 269 + } 270 + 271 + /* All ISUs are setup, complete initialization */ 272 + mpic_init(mpic); 273 + ppc_md.get_irq = mpic_get_irq; 274 + of_node_put(mpic_node); 275 + of_node_put(root); 245 276 } 246 277 247 278 static void __init maple_progress(char *s, unsigned short hex) ··· 310 279 .setup_arch = maple_setup_arch, 311 280 .init_early = maple_init_early, 312 281 .init_IRQ = maple_init_IRQ, 313 - .get_irq = mpic_get_irq, 314 282 .pcibios_fixup = maple_pcibios_fixup, 315 283 .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, 316 284 .restart = maple_restart,
+3 -1
arch/powerpc/platforms/powermac/bootx_init.c
··· 162 162 { 163 163 u32 val; 164 164 165 + bootx_dt_add_prop("linux,bootx", NULL, 0, mem_end); 166 + 165 167 if (bootx_info->kernelParamsOffset) { 166 168 char *args = (char *)((unsigned long)bootx_info) + 167 169 bootx_info->kernelParamsOffset; ··· 230 228 231 229 if (!strcmp(namep, "/chosen")) { 232 230 DBG(" detected /chosen ! adding properties names !\n"); 233 - bootx_dt_add_string("linux,platform", mem_end); 231 + bootx_dt_add_string("linux,bootx", mem_end); 234 232 bootx_dt_add_string("linux,stdout-path", mem_end); 235 233 bootx_dt_add_string("linux,initrd-start", mem_end); 236 234 bootx_dt_add_string("linux,initrd-end", mem_end);
+5 -4
arch/powerpc/platforms/powermac/low_i2c.c
··· 522 522 host->speed = KW_I2C_MODE_25KHZ; 523 523 break; 524 524 } 525 - if (np->n_intrs > 0) 526 - host->irq = np->intrs[0].line; 527 - else 528 - host->irq = NO_IRQ; 525 + host->irq = irq_of_parse_and_map(np, 0); 526 + if (host->irq == NO_IRQ) 527 + printk(KERN_WARNING 528 + "low_i2c: Failed to map interrupt for %s\n", 529 + np->full_name); 529 530 530 531 host->base = ioremap((*addrp), 0x1000); 531 532 if (host->base == NULL) {
+2 -3
arch/powerpc/platforms/powermac/nvram.c
··· 29 29 #include <asm/machdep.h> 30 30 #include <asm/nvram.h> 31 31 32 + #include "pmac.h" 33 + 32 34 #define DEBUG 33 35 34 36 #ifdef DEBUG ··· 81 79 static int nvram_partitions[3]; 82 80 // XXX Turn that into a sem 83 81 static DEFINE_SPINLOCK(nv_lock); 84 - 85 - extern int pmac_newworld; 86 - extern int system_running; 87 82 88 83 static int (*core99_write_bank)(int bank, u8* datas); 89 84 static int (*core99_erase_bank)(int bank);
+46 -22
arch/powerpc/platforms/powermac/pci.c
··· 46 46 static struct pci_controller *u3_agp; 47 47 static struct pci_controller *u4_pcie; 48 48 static struct pci_controller *u3_ht; 49 + #define has_second_ohare 0 50 + #else 51 + static int has_second_ohare; 49 52 #endif /* CONFIG_PPC64 */ 50 53 51 54 extern u8 pci_cache_line_size; ··· 650 647 early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val); 651 648 } 652 649 650 + static void __init init_second_ohare(void) 651 + { 652 + struct device_node *np = of_find_node_by_name(NULL, "pci106b,7"); 653 + unsigned char bus, devfn; 654 + unsigned short cmd; 655 + 656 + if (np == NULL) 657 + return; 658 + 659 + /* This must run before we initialize the PICs since the second 660 + * ohare hosts a PIC that will be accessed there. 661 + */ 662 + if (pci_device_from_OF_node(np, &bus, &devfn) == 0) { 663 + struct pci_controller* hose = 664 + pci_find_hose_for_OF_device(np); 665 + if (!hose) { 666 + printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); 667 + return; 668 + } 669 + early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); 670 + cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; 671 + cmd &= ~PCI_COMMAND_IO; 672 + early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); 673 + } 674 + has_second_ohare = 1; 675 + } 676 + 653 677 /* 654 678 * Some Apple desktop machines have a NEC PD720100A USB2 controller 655 679 * on the motherboard. Open Firmware, on these, will disable the ··· 718 688 " EHCI, fixing up...\n"); 719 689 data &= ~1UL; 720 690 early_write_config_dword(hose, bus, devfn, 0xe4, data); 721 - early_write_config_byte(hose, bus, 722 - devfn | 2, PCI_INTERRUPT_LINE, 723 - nec->intrs[0].line); 724 691 } 725 692 } 726 693 } ··· 985 958 return 0; 986 959 } 987 960 988 - static void __init pcibios_fixup_OF_interrupts(void) 961 + void __init pmac_pcibios_fixup(void) 989 962 { 990 963 struct pci_dev* dev = NULL; 991 964 992 - /* 993 - * Open Firmware often doesn't initialize the 994 - * PCI_INTERRUPT_LINE config register properly, so we 995 - * should find the device node and apply the interrupt 996 - * obtained from the OF device-tree 997 - */ 998 965 for_each_pci_dev(dev) { 999 - struct device_node *node; 1000 - node = pci_device_to_OF_node(dev); 1001 - /* this is the node, see if it has interrupts */ 1002 - if (node && node->n_intrs > 0) 1003 - dev->irq = node->intrs[0].line; 1004 - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); 1005 - } 1006 - } 966 + /* Read interrupt from the device-tree */ 967 + pci_read_irq_line(dev); 1007 968 1008 - void __init pmac_pcibios_fixup(void) 1009 - { 1010 - /* Fixup interrupts according to OF tree */ 1011 - pcibios_fixup_OF_interrupts(); 969 + /* Fixup interrupt for the modem/ethernet combo controller. 970 + * on machines with a second ohare chip. 971 + * The number in the device tree (27) is bogus (correct for 972 + * the ethernet-only board but not the combo ethernet/modem 973 + * board). The real interrupt is 28 on the second controller 974 + * -> 28+32 = 60. 975 + */ 976 + if (has_second_ohare && 977 + dev->vendor == PCI_VENDOR_ID_DEC && 978 + dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) 979 + dev->irq = irq_create_mapping(NULL, 60, 0); 980 + } 1012 981 } 1013 982 1014 983 #ifdef CONFIG_PPC64 ··· 1094 1071 1095 1072 #else /* CONFIG_PPC64 */ 1096 1073 init_p2pbridge(); 1074 + init_second_ohare(); 1097 1075 fixup_nec_usb2(); 1098 1076 1099 1077 /* We are still having some issues with the Xserve G4, enabling
+6 -7
arch/powerpc/platforms/powermac/pfunc_base.c
··· 24 24 25 25 static int macio_do_gpio_irq_enable(struct pmf_function *func) 26 26 { 27 - if (func->node->n_intrs < 1) 27 + unsigned int irq = irq_of_parse_and_map(func->node, 0); 28 + if (irq == NO_IRQ) 28 29 return -EINVAL; 29 - 30 - return request_irq(func->node->intrs[0].line, macio_gpio_irq, 0, 31 - func->node->name, func); 30 + return request_irq(irq, macio_gpio_irq, 0, func->node->name, func); 32 31 } 33 32 34 33 static int macio_do_gpio_irq_disable(struct pmf_function *func) 35 34 { 36 - if (func->node->n_intrs < 1) 35 + unsigned int irq = irq_of_parse_and_map(func->node, 0); 36 + if (irq == NO_IRQ) 37 37 return -EINVAL; 38 - 39 - free_irq(func->node->intrs[0].line, func); 38 + free_irq(irq, func); 40 39 return 0; 41 40 } 42 41
+127 -201
arch/powerpc/platforms/powermac/pic.c
··· 65 65 66 66 static DEFINE_SPINLOCK(pmac_pic_lock); 67 67 68 - #define GATWICK_IRQ_POOL_SIZE 10 69 - static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE]; 70 - 71 68 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 72 69 static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; 73 70 static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 74 71 static int pmac_irq_cascade = -1; 72 + static struct irq_host *pmac_pic_host; 75 73 76 74 static void __pmac_retrigger(unsigned int irq_nr) 77 75 { ··· 84 86 } 85 87 } 86 88 87 - static void pmac_mask_and_ack_irq(unsigned int irq_nr) 89 + static void pmac_mask_and_ack_irq(unsigned int virq) 88 90 { 89 - unsigned long bit = 1UL << (irq_nr & 0x1f); 90 - int i = irq_nr >> 5; 91 + unsigned int src = irq_map[virq].hwirq; 92 + unsigned long bit = 1UL << (virq & 0x1f); 93 + int i = virq >> 5; 91 94 unsigned long flags; 92 95 93 - if ((unsigned)irq_nr >= max_irqs) 94 - return; 95 - 96 96 spin_lock_irqsave(&pmac_pic_lock, flags); 97 - __clear_bit(irq_nr, ppc_cached_irq_mask); 98 - if (__test_and_clear_bit(irq_nr, ppc_lost_interrupts)) 97 + __clear_bit(src, ppc_cached_irq_mask); 98 + if (__test_and_clear_bit(src, ppc_lost_interrupts)) 99 99 atomic_dec(&ppc_n_lost_interrupts); 100 100 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); 101 101 out_le32(&pmac_irq_hw[i]->ack, bit); ··· 106 110 spin_unlock_irqrestore(&pmac_pic_lock, flags); 107 111 } 108 112 109 - static void pmac_ack_irq(unsigned int irq_nr) 113 + static void pmac_ack_irq(unsigned int virq) 110 114 { 111 - unsigned long bit = 1UL << (irq_nr & 0x1f); 112 - int i = irq_nr >> 5; 115 + unsigned int src = irq_map[virq].hwirq; 116 + unsigned long bit = 1UL << (src & 0x1f); 117 + int i = src >> 5; 113 118 unsigned long flags; 114 119 115 - if ((unsigned)irq_nr >= max_irqs) 116 - return; 117 - 118 120 spin_lock_irqsave(&pmac_pic_lock, flags); 119 - if (__test_and_clear_bit(irq_nr, ppc_lost_interrupts)) 121 + if (__test_and_clear_bit(src, ppc_lost_interrupts)) 120 122 atomic_dec(&ppc_n_lost_interrupts); 121 123 out_le32(&pmac_irq_hw[i]->ack, bit); 122 124 (void)in_le32(&pmac_irq_hw[i]->ack); ··· 151 157 /* When an irq gets requested for the first client, if it's an 152 158 * edge interrupt, we clear any previous one on the controller 153 159 */ 154 - static unsigned int pmac_startup_irq(unsigned int irq_nr) 160 + static unsigned int pmac_startup_irq(unsigned int virq) 155 161 { 156 162 unsigned long flags; 157 - unsigned long bit = 1UL << (irq_nr & 0x1f); 158 - int i = irq_nr >> 5; 163 + unsigned int src = irq_map[virq].hwirq; 164 + unsigned long bit = 1UL << (src & 0x1f); 165 + int i = src >> 5; 159 166 160 167 spin_lock_irqsave(&pmac_pic_lock, flags); 161 - if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0) 168 + if ((irq_desc[virq].status & IRQ_LEVEL) == 0) 162 169 out_le32(&pmac_irq_hw[i]->ack, bit); 163 - __set_bit(irq_nr, ppc_cached_irq_mask); 164 - __pmac_set_irq_mask(irq_nr, 0); 170 + __set_bit(src, ppc_cached_irq_mask); 171 + __pmac_set_irq_mask(src, 0); 165 172 spin_unlock_irqrestore(&pmac_pic_lock, flags); 166 173 167 174 return 0; 168 175 } 169 176 170 - static void pmac_mask_irq(unsigned int irq_nr) 177 + static void pmac_mask_irq(unsigned int virq) 171 178 { 172 179 unsigned long flags; 180 + unsigned int src = irq_map[virq].hwirq; 173 181 174 182 spin_lock_irqsave(&pmac_pic_lock, flags); 175 - __clear_bit(irq_nr, ppc_cached_irq_mask); 176 - __pmac_set_irq_mask(irq_nr, 0); 183 + __clear_bit(src, ppc_cached_irq_mask); 184 + __pmac_set_irq_mask(src, 0); 177 185 spin_unlock_irqrestore(&pmac_pic_lock, flags); 178 186 } 179 187 180 - static void pmac_unmask_irq(unsigned int irq_nr) 188 + static void pmac_unmask_irq(unsigned int virq) 181 189 { 182 190 unsigned long flags; 191 + unsigned int src = irq_map[virq].hwirq; 183 192 184 193 spin_lock_irqsave(&pmac_pic_lock, flags); 185 - __set_bit(irq_nr, ppc_cached_irq_mask); 186 - __pmac_set_irq_mask(irq_nr, 0); 194 + __set_bit(src, ppc_cached_irq_mask); 195 + __pmac_set_irq_mask(src, 0); 187 196 spin_unlock_irqrestore(&pmac_pic_lock, flags); 188 197 } 189 198 190 - static int pmac_retrigger(unsigned int irq_nr) 199 + static int pmac_retrigger(unsigned int virq) 191 200 { 192 201 unsigned long flags; 193 202 194 203 spin_lock_irqsave(&pmac_pic_lock, flags); 195 - __pmac_retrigger(irq_nr); 204 + __pmac_retrigger(irq_map[virq].hwirq); 196 205 spin_unlock_irqrestore(&pmac_pic_lock, flags); 197 206 return 1; 198 207 } ··· 235 238 return rc; 236 239 } 237 240 238 - static int pmac_get_irq(struct pt_regs *regs) 241 + static unsigned int pmac_pic_get_irq(struct pt_regs *regs) 239 242 { 240 243 int irq; 241 244 unsigned long bits = 0; ··· 247 250 /* IPI's are a hack on the powersurge -- Cort */ 248 251 if ( smp_processor_id() != 0 ) { 249 252 psurge_smp_message_recv(regs); 250 - return -2; /* ignore, already handled */ 253 + return NO_IRQ_IGNORE; /* ignore, already handled */ 251 254 } 252 255 #endif /* CONFIG_SMP */ 253 256 spin_lock_irqsave(&pmac_pic_lock, flags); ··· 263 266 break; 264 267 } 265 268 spin_unlock_irqrestore(&pmac_pic_lock, flags); 266 - 267 - return irq; 268 - } 269 - 270 - /* This routine will fix some missing interrupt values in the device tree 271 - * on the gatwick mac-io controller used by some PowerBooks 272 - * 273 - * Walking of OF nodes could use a bit more fixing up here, but it's not 274 - * very important as this is all boot time code on static portions of the 275 - * device-tree. 276 - * 277 - * However, the modifications done to "intrs" will have to be removed and 278 - * replaced with proper updates of the "interrupts" properties or 279 - * AAPL,interrupts, yet to be decided, once the dynamic parsing is there. 280 - */ 281 - static void __init pmac_fix_gatwick_interrupts(struct device_node *gw, 282 - int irq_base) 283 - { 284 - struct device_node *node; 285 - int count; 286 - 287 - memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool)); 288 - count = 0; 289 - for (node = NULL; (node = of_get_next_child(gw, node)) != NULL;) { 290 - /* Fix SCC */ 291 - if ((strcasecmp(node->name, "escc") == 0) && node->child) { 292 - if (node->child->n_intrs < 3) { 293 - node->child->intrs = &gatwick_int_pool[count]; 294 - count += 3; 295 - } 296 - node->child->n_intrs = 3; 297 - node->child->intrs[0].line = 15+irq_base; 298 - node->child->intrs[1].line = 4+irq_base; 299 - node->child->intrs[2].line = 5+irq_base; 300 - printk(KERN_INFO "irq: fixed SCC on gatwick" 301 - " (%d,%d,%d)\n", 302 - node->child->intrs[0].line, 303 - node->child->intrs[1].line, 304 - node->child->intrs[2].line); 305 - } 306 - /* Fix media-bay & left SWIM */ 307 - if (strcasecmp(node->name, "media-bay") == 0) { 308 - struct device_node* ya_node; 309 - 310 - if (node->n_intrs == 0) 311 - node->intrs = &gatwick_int_pool[count++]; 312 - node->n_intrs = 1; 313 - node->intrs[0].line = 29+irq_base; 314 - printk(KERN_INFO "irq: fixed media-bay on gatwick" 315 - " (%d)\n", node->intrs[0].line); 316 - 317 - ya_node = node->child; 318 - while(ya_node) { 319 - if (strcasecmp(ya_node->name, "floppy") == 0) { 320 - if (ya_node->n_intrs < 2) { 321 - ya_node->intrs = &gatwick_int_pool[count]; 322 - count += 2; 323 - } 324 - ya_node->n_intrs = 2; 325 - ya_node->intrs[0].line = 19+irq_base; 326 - ya_node->intrs[1].line = 1+irq_base; 327 - printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n", 328 - ya_node->intrs[0].line, ya_node->intrs[1].line); 329 - } 330 - if (strcasecmp(ya_node->name, "ata4") == 0) { 331 - if (ya_node->n_intrs < 2) { 332 - ya_node->intrs = &gatwick_int_pool[count]; 333 - count += 2; 334 - } 335 - ya_node->n_intrs = 2; 336 - ya_node->intrs[0].line = 14+irq_base; 337 - ya_node->intrs[1].line = 3+irq_base; 338 - printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n", 339 - ya_node->intrs[0].line, ya_node->intrs[1].line); 340 - } 341 - ya_node = ya_node->sibling; 342 - } 343 - } 344 - } 345 - if (count > 10) { 346 - printk("WARNING !! Gatwick interrupt pool overflow\n"); 347 - printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE); 348 - printk(" requested = %d\n", count); 349 - } 350 - } 351 - 352 - /* 353 - * The PowerBook 3400/2400/3500 can have a combo ethernet/modem 354 - * card which includes an ohare chip that acts as a second interrupt 355 - * controller. If we find this second ohare, set it up and fix the 356 - * interrupt value in the device tree for the ethernet chip. 357 - */ 358 - static void __init enable_second_ohare(struct device_node *np) 359 - { 360 - unsigned char bus, devfn; 361 - unsigned short cmd; 362 - struct device_node *ether; 363 - 364 - /* This code doesn't strictly belong here, it could be part of 365 - * either the PCI initialisation or the feature code. It's kept 366 - * here for historical reasons. 367 - */ 368 - if (pci_device_from_OF_node(np, &bus, &devfn) == 0) { 369 - struct pci_controller* hose = 370 - pci_find_hose_for_OF_device(np); 371 - if (!hose) { 372 - printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); 373 - return; 374 - } 375 - early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); 376 - cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; 377 - cmd &= ~PCI_COMMAND_IO; 378 - early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); 379 - } 380 - 381 - /* Fix interrupt for the modem/ethernet combo controller. The number 382 - * in the device tree (27) is bogus (correct for the ethernet-only 383 - * board but not the combo ethernet/modem board). 384 - * The real interrupt is 28 on the second controller -> 28+32 = 60. 385 - */ 386 - ether = of_find_node_by_name(NULL, "pci1011,14"); 387 - if (ether && ether->n_intrs > 0) { 388 - ether->intrs[0].line = 60; 389 - printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", 390 - ether->intrs[0].line); 391 - } 392 - of_node_put(ether); 269 + if (unlikely(irq < 0)) 270 + return NO_IRQ; 271 + return irq_linear_revmap(pmac_pic_host, irq); 393 272 } 394 273 395 274 #ifdef CONFIG_XMON ··· 284 411 .name = "cascade", 285 412 }; 286 413 414 + static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) 415 + { 416 + /* We match all, we don't always have a node anyway */ 417 + return 1; 418 + } 419 + 420 + static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, 421 + irq_hw_number_t hw, unsigned int flags) 422 + { 423 + struct irq_desc *desc = get_irq_desc(virq); 424 + int level; 425 + 426 + if (hw >= max_irqs) 427 + return -EINVAL; 428 + 429 + /* Mark level interrupts, set delayed disable for edge ones and set 430 + * handlers 431 + */ 432 + level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); 433 + if (level) 434 + desc->status |= IRQ_LEVEL; 435 + else 436 + desc->status |= IRQ_DELAYED_DISABLE; 437 + set_irq_chip_and_handler(virq, &pmac_pic, level ? 438 + handle_level_irq : handle_edge_irq); 439 + return 0; 440 + } 441 + 442 + static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct, 443 + u32 *intspec, unsigned int intsize, 444 + irq_hw_number_t *out_hwirq, 445 + unsigned int *out_flags) 446 + 447 + { 448 + *out_hwirq = *intspec; 449 + return 0; 450 + } 451 + 452 + static struct irq_host_ops pmac_pic_host_ops = { 453 + .match = pmac_pic_host_match, 454 + .map = pmac_pic_host_map, 455 + .xlate = pmac_pic_host_xlate, 456 + }; 457 + 287 458 static void __init pmac_pic_probe_oldstyle(void) 288 459 { 289 460 int i; ··· 337 420 struct resource r; 338 421 339 422 /* Set our get_irq function */ 340 - ppc_md.get_irq = pmac_get_irq; 423 + ppc_md.get_irq = pmac_pic_get_irq; 341 424 342 425 /* 343 426 * Find the interrupt controller type & node ··· 355 438 if (slave) { 356 439 max_irqs = 64; 357 440 level_mask[1] = OHARE_LEVEL_MASK; 358 - enable_second_ohare(slave); 359 441 } 360 442 } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { 361 443 max_irqs = max_real_irqs = 64; ··· 378 462 max_irqs = 128; 379 463 level_mask[2] = HEATHROW_LEVEL_MASK; 380 464 level_mask[3] = 0; 381 - pmac_fix_gatwick_interrupts(slave, max_real_irqs); 382 465 } 383 466 } 384 467 BUG_ON(master == NULL); 385 468 386 - /* Mark level interrupts and set handlers */ 387 - for (i = 0; i < max_irqs; i++) { 388 - int level = !!(level_mask[i >> 5] & (1UL << (i & 0x1f))); 389 - if (level) 390 - irq_desc[i].status |= IRQ_LEVEL; 391 - else 392 - irq_desc[i].status |= IRQ_DELAYED_DISABLE; 393 - set_irq_chip_and_handler(i, &pmac_pic, level ? 394 - handle_level_irq : handle_edge_irq); 395 - } 469 + /* 470 + * Allocate an irq host 471 + */ 472 + pmac_pic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, max_irqs, 473 + &pmac_pic_host_ops, 474 + max_irqs); 475 + BUG_ON(pmac_pic_host == NULL); 476 + irq_set_default_host(pmac_pic_host); 396 477 397 478 /* Get addresses of first controller if we have a node for it */ 398 479 BUG_ON(of_address_to_resource(master, 0, &r)); ··· 416 503 pmac_irq_hw[i++] = 417 504 (volatile struct pmac_irq_hw __iomem *) 418 505 (addr + 0x10); 419 - pmac_irq_cascade = slave->intrs[0].line; 506 + pmac_irq_cascade = irq_of_parse_and_map(slave, 0); 420 507 421 508 printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" 422 509 " cascade: %d\n", slave->full_name, ··· 429 516 out_le32(&pmac_irq_hw[i]->enable, 0); 430 517 431 518 /* Hookup cascade irq */ 432 - if (slave) 519 + if (slave && pmac_irq_cascade != NO_IRQ) 433 520 setup_irq(pmac_irq_cascade, &gatwick_cascade_action); 434 521 435 522 printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); 436 523 #ifdef CONFIG_XMON 437 - setup_irq(20, &xmon_action); 524 + setup_irq(irq_create_mapping(NULL, 20, 0), &xmon_action); 438 525 #endif 439 526 } 440 527 #endif /* CONFIG_PPC32 */ ··· 443 530 struct pt_regs *regs) 444 531 { 445 532 struct mpic *mpic = desc->handler_data; 446 - unsigned int max = 100; 447 533 448 - while(max--) { 449 - int cascade_irq = mpic_get_one_irq(mpic, regs); 450 - if (max == 99) 451 - desc->chip->eoi(irq); 452 - if (irq < 0) 453 - break; 534 + unsigned int cascade_irq = mpic_get_one_irq(mpic, regs); 535 + if (cascade_irq != NO_IRQ) 454 536 generic_handle_irq(cascade_irq, regs); 455 - }; 537 + desc->chip->eoi(irq); 456 538 } 457 539 458 540 static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) ··· 457 549 int nmi_irq; 458 550 459 551 pswitch = of_find_node_by_name(NULL, "programmer-switch"); 460 - if (pswitch && pswitch->n_intrs) { 461 - nmi_irq = pswitch->intrs[0].line; 462 - mpic_irq_set_priority(nmi_irq, 9); 463 - setup_irq(nmi_irq, &xmon_action); 552 + if (pswitch) { 553 + nmi_irq = irq_of_parse_and_map(pswitch, 0); 554 + if (nmi_irq != NO_IRQ) { 555 + mpic_irq_set_priority(nmi_irq, 9); 556 + setup_irq(nmi_irq, &xmon_action); 557 + } 558 + of_node_put(pswitch); 464 559 } 465 - of_node_put(pswitch); 466 560 #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ 467 561 } 468 562 469 563 static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, 470 564 int master) 471 565 { 472 - unsigned char senses[128]; 473 - int offset = master ? 0 : 128; 474 - int count = master ? 128 : 124; 475 566 const char *name = master ? " MPIC 1 " : " MPIC 2 "; 476 567 struct resource r; 477 568 struct mpic *mpic; ··· 483 576 484 577 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); 485 578 486 - prom_get_irq_senses(senses, offset, offset + count); 487 - 488 579 flags |= MPIC_WANTS_RESET; 489 580 if (get_property(np, "big-endian", NULL)) 490 581 flags |= MPIC_BIG_ENDIAN; ··· 493 588 if (master && (flags & MPIC_BIG_ENDIAN)) 494 589 flags |= MPIC_BROKEN_U3; 495 590 496 - mpic = mpic_alloc(r.start, flags, 0, offset, count, master ? 252 : 0, 497 - senses, count, name); 591 + mpic = mpic_alloc(np, r.start, flags, 0, 0, name); 498 592 if (mpic == NULL) 499 593 return NULL; 500 594 ··· 506 602 { 507 603 struct mpic *mpic1, *mpic2; 508 604 struct device_node *np, *master = NULL, *slave = NULL; 605 + unsigned int cascade; 509 606 510 607 /* We can have up to 2 MPICs cascaded */ 511 608 for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) ··· 543 638 of_node_put(master); 544 639 545 640 /* No slave, let's go out */ 546 - if (slave == NULL || slave->n_intrs < 1) 641 + if (slave == NULL) 547 642 return 0; 643 + 644 + /* Get/Map slave interrupt */ 645 + cascade = irq_of_parse_and_map(slave, 0); 646 + if (cascade == NO_IRQ) { 647 + printk(KERN_ERR "Failed to map cascade IRQ\n"); 648 + return 0; 649 + } 548 650 549 651 mpic2 = pmac_setup_one_mpic(slave, 0); 550 652 if (mpic2 == NULL) { ··· 559 647 of_node_put(slave); 560 648 return 0; 561 649 } 562 - set_irq_data(slave->intrs[0].line, mpic2); 563 - set_irq_chained_handler(slave->intrs[0].line, pmac_u3_cascade); 650 + set_irq_data(cascade, mpic2); 651 + set_irq_chained_handler(cascade, pmac_u3_cascade); 564 652 565 653 of_node_put(slave); 566 654 return 0; ··· 569 657 570 658 void __init pmac_pic_init(void) 571 659 { 660 + unsigned int flags = 0; 661 + 662 + /* We configure the OF parsing based on our oldworld vs. newworld 663 + * platform type and wether we were booted by BootX. 664 + */ 665 + #ifdef CONFIG_PPC32 666 + if (!pmac_newworld) 667 + flags |= OF_IMAP_OLDWORLD_MAC; 668 + if (get_property(of_chosen, "linux,bootx", NULL) != NULL) 669 + flags |= OF_IMAP_NO_PHANDLE; 670 + of_irq_map_init(flags); 671 + #endif /* CONFIG_PPC_32 */ 672 + 572 673 /* We first try to detect Apple's new Core99 chipset, since mac-io 573 674 * is quite different on those machines and contains an IBM MPIC2. 574 675 */ ··· 604 679 605 680 /* This used to be passed by the PMU driver but that link got 606 681 * broken with the new driver model. We use this tweak for now... 682 + * We really want to do things differently though... 607 683 */ 608 684 static int pmacpic_find_viaint(void) 609 685 { ··· 618 692 np = of_find_node_by_name(NULL, "via-pmu"); 619 693 if (np == NULL) 620 694 goto not_found; 621 - viaint = np->intrs[0].line; 695 + viaint = irq_of_parse_and_map(np, 0);; 622 696 #endif /* CONFIG_ADB_PMU */ 623 697 624 698 not_found:
+2
arch/powerpc/platforms/powermac/pmac.h
··· 12 12 13 13 struct rtc_time; 14 14 15 + extern int pmac_newworld; 16 + 15 17 extern long pmac_time_init(void); 16 18 extern unsigned long pmac_get_boot_time(void); 17 19 extern void pmac_get_rtc_time(struct rtc_time *);
-3
arch/powerpc/platforms/powermac/setup.c
··· 613 613 udbg_adb_init(!!strstr(cmd_line, "btextdbg")); 614 614 615 615 #ifdef CONFIG_PPC64 616 - /* Setup interrupt mapping options */ 617 - ppc64_interrupt_controller = IC_OPEN_PIC; 618 - 619 616 iommu_init_early_dart(); 620 617 #endif 621 618 }
+51 -27
arch/powerpc/platforms/pseries/ras.c
··· 72 72 73 73 /* #define DEBUG */ 74 74 75 - static void request_ras_irqs(struct device_node *np, char *propname, 75 + 76 + static void request_ras_irqs(struct device_node *np, 76 77 irqreturn_t (*handler)(int, void *, struct pt_regs *), 77 78 const char *name) 78 79 { 79 - unsigned int *ireg, len, i; 80 - int virq, n_intr; 80 + int i, index, count = 0; 81 + struct of_irq oirq; 82 + u32 *opicprop; 83 + unsigned int opicplen; 84 + unsigned int virqs[16]; 81 85 82 - ireg = (unsigned int *)get_property(np, propname, &len); 83 - if (ireg == NULL) 84 - return; 85 - n_intr = prom_n_intr_cells(np); 86 - len /= n_intr * sizeof(*ireg); 86 + /* Check for obsolete "open-pic-interrupt" property. If present, then 87 + * map those interrupts using the default interrupt host and default 88 + * trigger 89 + */ 90 + opicprop = (u32 *)get_property(np, "open-pic-interrupt", &opicplen); 91 + if (opicprop) { 92 + opicplen /= sizeof(u32); 93 + for (i = 0; i < opicplen; i++) { 94 + if (count > 15) 95 + break; 96 + virqs[count] = irq_create_mapping(NULL, *(opicprop++), 97 + IRQ_TYPE_NONE); 98 + if (virqs[count] == NO_IRQ) 99 + printk(KERN_ERR "Unable to allocate interrupt " 100 + "number for %s\n", np->full_name); 101 + else 102 + count++; 87 103 88 - for (i = 0; i < len; i++) { 89 - virq = virt_irq_create_mapping(*ireg); 90 - if (virq == NO_IRQ) { 91 - printk(KERN_ERR "Unable to allocate interrupt " 92 - "number for %s\n", np->full_name); 93 - return; 94 104 } 95 - if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) { 105 + } 106 + /* Else use normal interrupt tree parsing */ 107 + else { 108 + /* First try to do a proper OF tree parsing */ 109 + for (index = 0; of_irq_map_one(np, index, &oirq) == 0; 110 + index++) { 111 + if (count > 15) 112 + break; 113 + virqs[count] = irq_create_of_mapping(oirq.controller, 114 + oirq.specifier, 115 + oirq.size); 116 + if (virqs[count] == NO_IRQ) 117 + printk(KERN_ERR "Unable to allocate interrupt " 118 + "number for %s\n", np->full_name); 119 + else 120 + count++; 121 + } 122 + } 123 + 124 + /* Now request them */ 125 + for (i = 0; i < count; i++) { 126 + if (request_irq(virqs[i], handler, 0, name, NULL)) { 96 127 printk(KERN_ERR "Unable to request interrupt %d for " 97 - "%s\n", irq_offset_up(virq), np->full_name); 128 + "%s\n", virqs[i], np->full_name); 98 129 return; 99 130 } 100 - ireg += n_intr; 101 131 } 102 132 } 103 133 ··· 145 115 /* Internal Errors */ 146 116 np = of_find_node_by_path("/event-sources/internal-errors"); 147 117 if (np != NULL) { 148 - request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt, 149 - "RAS_ERROR"); 150 - request_ras_irqs(np, "interrupts", ras_error_interrupt, 151 - "RAS_ERROR"); 118 + request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR"); 152 119 of_node_put(np); 153 120 } 154 121 155 122 /* EPOW Events */ 156 123 np = of_find_node_by_path("/event-sources/epow-events"); 157 124 if (np != NULL) { 158 - request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt, 159 - "RAS_EPOW"); 160 - request_ras_irqs(np, "interrupts", ras_epow_interrupt, 161 - "RAS_EPOW"); 125 + request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW"); 162 126 of_node_put(np); 163 127 } 164 128 ··· 186 162 187 163 status = rtas_call(ras_check_exception_token, 6, 1, NULL, 188 164 RAS_VECTOR_OFFSET, 189 - virt_irq_to_real(irq_offset_down(irq)), 165 + irq_map[irq].hwirq, 190 166 RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, 191 167 critical, __pa(&ras_log_buf), 192 168 rtas_get_error_log_max()); ··· 222 198 223 199 status = rtas_call(ras_check_exception_token, 6, 1, NULL, 224 200 RAS_VECTOR_OFFSET, 225 - virt_irq_to_real(irq_offset_down(irq)), 201 + irq_map[irq].hwirq, 226 202 RTAS_INTERNAL_ERROR, 1 /*Time Critical */, 227 203 __pa(&ras_log_buf), 228 204 rtas_get_error_log_max());
+130 -118
arch/powerpc/platforms/pseries/setup.c
··· 76 76 #define DBG(fmt...) 77 77 #endif 78 78 79 + /* move those away to a .h */ 80 + extern void smp_init_pseries_mpic(void); 81 + extern void smp_init_pseries_xics(void); 79 82 extern void find_udbg_vterm(void); 80 83 81 84 int fwnmi_active; /* TRUE if an FWNMI handler is present */ ··· 86 83 static void pseries_shared_idle_sleep(void); 87 84 static void pseries_dedicated_idle_sleep(void); 88 85 89 - struct mpic *pSeries_mpic; 86 + static struct device_node *pSeries_mpic_node; 90 87 91 88 static void pSeries_show_cpuinfo(struct seq_file *m) 92 89 { ··· 121 118 fwnmi_active = 1; 122 119 } 123 120 124 - void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc, 121 + void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc, 125 122 struct pt_regs *regs) 126 123 { 127 - unsigned int max = 100; 128 - 129 - while(max--) { 130 - int cascade_irq = i8259_irq(regs); 131 - if (max == 99) 132 - desc->chip->eoi(irq); 133 - if (cascade_irq < 0) 134 - break; 124 + unsigned int cascade_irq = i8259_irq(regs); 125 + if (cascade_irq != NO_IRQ) 135 126 generic_handle_irq(cascade_irq, regs); 136 - }; 127 + desc->chip->eoi(irq); 137 128 } 138 129 139 - static void __init pSeries_init_mpic(void) 130 + static void __init pseries_mpic_init_IRQ(void) 140 131 { 132 + struct device_node *np, *old, *cascade = NULL; 141 133 unsigned int *addrp; 142 - struct device_node *np; 143 134 unsigned long intack = 0; 144 - 145 - /* All ISUs are setup, complete initialization */ 146 - mpic_init(pSeries_mpic); 147 - 148 - /* Check what kind of cascade ACK we have */ 149 - if (!(np = of_find_node_by_name(NULL, "pci")) 150 - || !(addrp = (unsigned int *) 151 - get_property(np, "8259-interrupt-acknowledge", NULL))) 152 - printk(KERN_ERR "Cannot find pci to get ack address\n"); 153 - else 154 - intack = addrp[prom_n_addr_cells(np)-1]; 155 - of_node_put(np); 156 - 157 - /* Setup the legacy interrupts & controller */ 158 - i8259_init(intack, 0); 159 - 160 - /* Hook cascade to mpic */ 161 - set_irq_chained_handler(NUM_ISA_INTERRUPTS, pSeries_8259_cascade); 162 - } 163 - 164 - static void __init pSeries_setup_mpic(void) 165 - { 166 135 unsigned int *opprop; 167 136 unsigned long openpic_addr = 0; 168 - unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS]; 169 - struct device_node *root; 170 - int irq_count; 137 + unsigned int cascade_irq; 138 + int naddr, n, i, opplen; 139 + struct mpic *mpic; 171 140 172 - /* Find the Open PIC if present */ 173 - root = of_find_node_by_path("/"); 174 - opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL); 141 + np = of_find_node_by_path("/"); 142 + naddr = prom_n_addr_cells(np); 143 + opprop = (unsigned int *) get_property(np, "platform-open-pic", &opplen); 175 144 if (opprop != 0) { 176 - int n = prom_n_addr_cells(root); 177 - 178 - for (openpic_addr = 0; n > 0; --n) 179 - openpic_addr = (openpic_addr << 32) + *opprop++; 145 + openpic_addr = of_read_number(opprop, naddr); 180 146 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); 181 147 } 182 - of_node_put(root); 148 + of_node_put(np); 183 149 184 150 BUG_ON(openpic_addr == 0); 185 151 186 - /* Get the sense values from OF */ 187 - prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS); 188 - 189 152 /* Setup the openpic driver */ 190 - irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */ 191 - pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY, 192 - 16, 16, irq_count, /* isu size, irq offset, irq count */ 193 - NR_IRQS - 4, /* ipi offset */ 194 - senses, irq_count, /* sense & sense size */ 195 - " MPIC "); 153 + mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, 154 + MPIC_PRIMARY, 155 + 16, 250, /* isu size, irq count */ 156 + " MPIC "); 157 + BUG_ON(mpic == NULL); 158 + 159 + /* Add ISUs */ 160 + opplen /= sizeof(u32); 161 + for (n = 0, i = naddr; i < opplen; i += naddr, n++) { 162 + unsigned long isuaddr = of_read_number(opprop + i, naddr); 163 + mpic_assign_isu(mpic, n, isuaddr); 164 + } 165 + 166 + /* All ISUs are setup, complete initialization */ 167 + mpic_init(mpic); 168 + 169 + /* Look for cascade */ 170 + for_each_node_by_type(np, "interrupt-controller") 171 + if (device_is_compatible(np, "chrp,iic")) { 172 + cascade = np; 173 + break; 174 + } 175 + if (cascade == NULL) 176 + return; 177 + 178 + cascade_irq = irq_of_parse_and_map(cascade, 0); 179 + if (cascade == NO_IRQ) { 180 + printk(KERN_ERR "xics: failed to map cascade interrupt"); 181 + return; 182 + } 183 + 184 + /* Check ACK type */ 185 + for (old = of_node_get(cascade); old != NULL ; old = np) { 186 + np = of_get_parent(old); 187 + of_node_put(old); 188 + if (np == NULL) 189 + break; 190 + if (strcmp(np->name, "pci") != 0) 191 + continue; 192 + addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", 193 + NULL); 194 + if (addrp == NULL) 195 + continue; 196 + naddr = prom_n_addr_cells(np); 197 + intack = addrp[naddr-1]; 198 + if (naddr > 1) 199 + intack |= ((unsigned long)addrp[naddr-2]) << 32; 200 + } 201 + if (intack) 202 + printk(KERN_DEBUG "mpic: PCI 8259 intack at 0x%016lx\n", 203 + intack); 204 + i8259_init(cascade, intack); 205 + of_node_put(cascade); 206 + set_irq_chained_handler(cascade_irq, pseries_8259_cascade); 196 207 } 197 208 198 209 static void pseries_lpar_enable_pmcs(void) ··· 224 207 get_lppaca()->pmcregs_in_use = 1; 225 208 } 226 209 210 + #ifdef CONFIG_KEXEC 211 + static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary) 212 + { 213 + mpic_teardown_this_cpu(secondary); 214 + } 215 + 216 + static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary) 217 + { 218 + /* Don't risk a hypervisor call if we're crashing */ 219 + if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { 220 + unsigned long vpa = __pa(get_lppaca()); 221 + 222 + if (unregister_vpa(hard_smp_processor_id(), vpa)) { 223 + printk("VPA deregistration of cpu %u (hw_cpu_id %d) " 224 + "failed\n", smp_processor_id(), 225 + hard_smp_processor_id()); 226 + } 227 + } 228 + xics_teardown_cpu(secondary); 229 + } 230 + #endif /* CONFIG_KEXEC */ 231 + 232 + static void __init pseries_discover_pic(void) 233 + { 234 + struct device_node *np; 235 + char *typep; 236 + 237 + for (np = NULL; (np = of_find_node_by_name(np, 238 + "interrupt-controller"));) { 239 + typep = (char *)get_property(np, "compatible", NULL); 240 + if (strstr(typep, "open-pic")) { 241 + pSeries_mpic_node = of_node_get(np); 242 + ppc_md.init_IRQ = pseries_mpic_init_IRQ; 243 + ppc_md.get_irq = mpic_get_irq; 244 + #ifdef CONFIG_KEXEC 245 + ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_mpic; 246 + #endif 247 + #ifdef CONFIG_SMP 248 + smp_init_pseries_mpic(); 249 + #endif 250 + return; 251 + } else if (strstr(typep, "ppc-xicp")) { 252 + ppc_md.init_IRQ = xics_init_IRQ; 253 + #ifdef CONFIG_KEXEC 254 + ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics; 255 + #endif 256 + #ifdef CONFIG_SMP 257 + smp_init_pseries_xics(); 258 + #endif 259 + return; 260 + } 261 + } 262 + printk(KERN_ERR "pSeries_discover_pic: failed to recognize" 263 + " interrupt-controller\n"); 264 + } 265 + 227 266 static void __init pSeries_setup_arch(void) 228 267 { 229 - /* Fixup ppc_md depending on the type of interrupt controller */ 230 - if (ppc64_interrupt_controller == IC_OPEN_PIC) { 231 - ppc_md.init_IRQ = pSeries_init_mpic; 232 - ppc_md.get_irq = mpic_get_irq; 233 - /* Allocate the mpic now, so that find_and_init_phbs() can 234 - * fill the ISUs */ 235 - pSeries_setup_mpic(); 236 - } else 237 - ppc_md.init_IRQ = xics_init_IRQ; 268 + /* Discover PIC type and setup ppc_md accordingly */ 269 + pseries_discover_pic(); 238 270 239 - #ifdef CONFIG_SMP 240 - smp_init_pSeries(); 241 - #endif 242 271 /* openpic global configuration register (64-bit format). */ 243 272 /* openpic Interrupt Source Unit pointer (64-bit format). */ 244 273 /* python0 facility area (mmio) (64-bit format) REAL address. */ ··· 336 273 } 337 274 arch_initcall(pSeries_init_panel); 338 275 339 - static void __init pSeries_discover_pic(void) 340 - { 341 - struct device_node *np; 342 - char *typep; 343 - 344 - /* 345 - * Setup interrupt mapping options that are needed for finish_device_tree 346 - * to properly parse the OF interrupt tree & do the virtual irq mapping 347 - */ 348 - __irq_offset_value = NUM_ISA_INTERRUPTS; 349 - ppc64_interrupt_controller = IC_INVALID; 350 - for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) { 351 - typep = (char *)get_property(np, "compatible", NULL); 352 - if (strstr(typep, "open-pic")) { 353 - ppc64_interrupt_controller = IC_OPEN_PIC; 354 - break; 355 - } else if (strstr(typep, "ppc-xicp")) { 356 - ppc64_interrupt_controller = IC_PPC_XIC; 357 - break; 358 - } 359 - } 360 - if (ppc64_interrupt_controller == IC_INVALID) 361 - printk("pSeries_discover_pic: failed to recognize" 362 - " interrupt-controller\n"); 363 - 364 - } 365 - 366 276 static void pSeries_mach_cpu_die(void) 367 277 { 368 278 local_irq_disable(); ··· 377 341 ppc_md.set_dabr = pseries_set_xdabr; 378 342 379 343 iommu_init_early_pSeries(); 380 - 381 - pSeries_discover_pic(); 382 344 383 345 DBG(" <- pSeries_init_early()\n"); 384 346 } ··· 549 515 return PCI_PROBE_NORMAL; 550 516 } 551 517 552 - #ifdef CONFIG_KEXEC 553 - static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) 554 - { 555 - /* Don't risk a hypervisor call if we're crashing */ 556 - if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { 557 - unsigned long vpa = __pa(get_lppaca()); 558 - 559 - if (unregister_vpa(hard_smp_processor_id(), vpa)) { 560 - printk("VPA deregistration of cpu %u (hw_cpu_id %d) " 561 - "failed\n", smp_processor_id(), 562 - hard_smp_processor_id()); 563 - } 564 - } 565 - 566 - if (ppc64_interrupt_controller == IC_OPEN_PIC) 567 - mpic_teardown_this_cpu(secondary); 568 - else 569 - xics_teardown_cpu(secondary); 570 - } 571 - #endif 572 - 573 518 define_machine(pseries) { 574 519 .name = "pSeries", 575 520 .probe = pSeries_probe, ··· 573 560 .system_reset_exception = pSeries_system_reset_exception, 574 561 .machine_check_exception = pSeries_machine_check_exception, 575 562 #ifdef CONFIG_KEXEC 576 - .kexec_cpu_down = pseries_kexec_cpu_down, 577 563 .machine_kexec = default_machine_kexec, 578 564 .machine_kexec_prepare = default_machine_kexec_prepare, 579 565 .machine_crash_shutdown = default_machine_crash_shutdown,
+16 -16
arch/powerpc/platforms/pseries/smp.c
··· 416 416 #endif 417 417 418 418 /* This is called very early */ 419 - void __init smp_init_pSeries(void) 419 + static void __init smp_init_pseries(void) 420 420 { 421 421 int i; 422 422 423 423 DBG(" -> smp_init_pSeries()\n"); 424 - 425 - switch (ppc64_interrupt_controller) { 426 - #ifdef CONFIG_MPIC 427 - case IC_OPEN_PIC: 428 - smp_ops = &pSeries_mpic_smp_ops; 429 - break; 430 - #endif 431 - #ifdef CONFIG_XICS 432 - case IC_PPC_XIC: 433 - smp_ops = &pSeries_xics_smp_ops; 434 - break; 435 - #endif 436 - default: 437 - panic("Invalid interrupt controller"); 438 - } 439 424 440 425 #ifdef CONFIG_HOTPLUG_CPU 441 426 smp_ops->cpu_disable = pSeries_cpu_disable; ··· 456 471 DBG(" <- smp_init_pSeries()\n"); 457 472 } 458 473 474 + #ifdef CONFIG_MPIC 475 + void __init smp_init_pseries_mpic(void) 476 + { 477 + smp_ops = &pSeries_mpic_smp_ops; 478 + 479 + smp_init_pseries(); 480 + } 481 + #endif 482 + 483 + void __init smp_init_pseries_xics(void) 484 + { 485 + smp_ops = &pSeries_xics_smp_ops; 486 + 487 + smp_init_pseries(); 488 + }
+303 -194
arch/powerpc/platforms/pseries/xics.c
··· 8 8 * as published by the Free Software Foundation; either version 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 + 12 + #undef DEBUG 13 + 11 14 #include <linux/types.h> 12 15 #include <linux/threads.h> 13 16 #include <linux/kernel.h> ··· 22 19 #include <linux/gfp.h> 23 20 #include <linux/radix-tree.h> 24 21 #include <linux/cpu.h> 22 + 25 23 #include <asm/firmware.h> 26 24 #include <asm/prom.h> 27 25 #include <asm/io.h> ··· 34 30 #include <asm/i8259.h> 35 31 36 32 #include "xics.h" 37 - 38 - /* This is used to map real irq numbers to virtual */ 39 - static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC); 40 33 41 34 #define XICS_IPI 2 42 35 #define XICS_IRQ_SPURIOUS 0 ··· 65 64 66 65 static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; 67 66 68 - static int xics_irq_8259_cascade = 0; 69 - static int xics_irq_8259_cascade_real = 0; 70 67 static unsigned int default_server = 0xFF; 71 68 static unsigned int default_distrib_server = 0; 72 69 static unsigned int interrupt_server_size = 8; 70 + 71 + static struct irq_host *xics_host; 73 72 74 73 /* 75 74 * XICS only has a single IPI, so encode the messages per CPU ··· 86 85 /* Direct HW low level accessors */ 87 86 88 87 89 - static inline int direct_xirr_info_get(int n_cpu) 88 + static inline unsigned int direct_xirr_info_get(int n_cpu) 90 89 { 91 90 return in_be32(&xics_per_cpu[n_cpu]->xirr.word); 92 91 } ··· 131 130 return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); 132 131 } 133 132 134 - static inline int lpar_xirr_info_get(int n_cpu) 133 + static inline unsigned int lpar_xirr_info_get(int n_cpu) 135 134 { 136 135 unsigned long lpar_rc; 137 136 unsigned long return_value; ··· 139 138 lpar_rc = plpar_xirr(&return_value); 140 139 if (lpar_rc != H_SUCCESS) 141 140 panic(" bad return code xirr - rc = %lx \n", lpar_rc); 142 - return (int)return_value; 141 + return (unsigned int)return_value; 143 142 } 144 143 145 144 static inline void lpar_xirr_info_set(int n_cpu, int value) ··· 176 175 177 176 178 177 #ifdef CONFIG_SMP 179 - static int get_irq_server(unsigned int irq) 178 + static int get_irq_server(unsigned int virq) 180 179 { 181 180 unsigned int server; 182 181 /* For the moment only implement delivery to all cpus or one cpu */ 183 - cpumask_t cpumask = irq_desc[irq].affinity; 182 + cpumask_t cpumask = irq_desc[virq].affinity; 184 183 cpumask_t tmp = CPU_MASK_NONE; 185 184 186 185 if (!distribute_irqs) ··· 201 200 202 201 } 203 202 #else 204 - static int get_irq_server(unsigned int irq) 203 + static int get_irq_server(unsigned int virq) 205 204 { 206 205 return default_server; 207 206 } ··· 214 213 int call_status; 215 214 unsigned int server; 216 215 217 - irq = virt_irq_to_real(irq_offset_down(virq)); 218 - WARN_ON(irq == NO_IRQ); 219 - if (irq == XICS_IPI || irq == NO_IRQ) 216 + pr_debug("xics: unmask virq %d\n", virq); 217 + 218 + irq = (unsigned int)irq_map[virq].hwirq; 219 + pr_debug(" -> map to hwirq 0x%x\n", irq); 220 + if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) 220 221 return; 221 222 222 223 server = get_irq_server(virq); ··· 270 267 { 271 268 unsigned int irq; 272 269 273 - irq = virt_irq_to_real(irq_offset_down(virq)); 274 - WARN_ON(irq == NO_IRQ); 275 - if (irq != NO_IRQ) 276 - xics_mask_real_irq(irq); 277 - } 270 + pr_debug("xics: mask virq %d\n", virq); 278 271 279 - static void xics_set_irq_revmap(unsigned int virq) 280 - { 281 - unsigned int irq; 282 - 283 - irq = irq_offset_down(virq); 284 - if (radix_tree_insert(&irq_map, virt_irq_to_real(irq), 285 - &virt_irq_to_real_map[irq]) == -ENOMEM) 286 - printk(KERN_CRIT "Out of memory creating real -> virtual" 287 - " IRQ mapping for irq %u (real 0x%x)\n", 288 - virq, virt_irq_to_real(irq)); 272 + irq = (unsigned int)irq_map[virq].hwirq; 273 + if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) 274 + return; 275 + xics_mask_real_irq(irq); 289 276 } 290 277 291 278 static unsigned int xics_startup(unsigned int virq) 292 279 { 293 - xics_set_irq_revmap(virq); 280 + unsigned int irq; 281 + 282 + /* force a reverse mapping of the interrupt so it gets in the cache */ 283 + irq = (unsigned int)irq_map[virq].hwirq; 284 + irq_radix_revmap(xics_host, irq); 285 + 286 + /* unmask it */ 294 287 xics_unmask_irq(virq); 295 288 return 0; 296 289 } 297 290 298 - static unsigned int real_irq_to_virt(unsigned int real_irq) 299 - { 300 - unsigned int *ptr; 301 - 302 - ptr = radix_tree_lookup(&irq_map, real_irq); 303 - if (ptr == NULL) 304 - return NO_IRQ; 305 - return ptr - virt_irq_to_real_map; 306 - } 307 - 308 - static void xics_eoi_direct(unsigned int irq) 291 + static void xics_eoi_direct(unsigned int virq) 309 292 { 310 293 int cpu = smp_processor_id(); 294 + unsigned int irq = (unsigned int)irq_map[virq].hwirq; 311 295 312 296 iosync(); 313 - direct_xirr_info_set(cpu, ((0xff << 24) | 314 - (virt_irq_to_real(irq_offset_down(irq))))); 297 + direct_xirr_info_set(cpu, (0xff << 24) | irq); 315 298 } 316 299 317 300 318 - static void xics_eoi_lpar(unsigned int irq) 301 + static void xics_eoi_lpar(unsigned int virq) 319 302 { 320 303 int cpu = smp_processor_id(); 304 + unsigned int irq = (unsigned int)irq_map[virq].hwirq; 321 305 322 306 iosync(); 323 - lpar_xirr_info_set(cpu, ((0xff << 24) | 324 - (virt_irq_to_real(irq_offset_down(irq))))); 325 - 307 + lpar_xirr_info_set(cpu, (0xff << 24) | irq); 326 308 } 327 309 328 - static inline int xics_remap_irq(int vec) 310 + static inline unsigned int xics_remap_irq(unsigned int vec) 329 311 { 330 - int irq; 312 + unsigned int irq; 331 313 332 314 vec &= 0x00ffffff; 333 315 334 316 if (vec == XICS_IRQ_SPURIOUS) 335 317 return NO_IRQ; 336 - 337 - irq = real_irq_to_virt(vec); 338 - if (irq == NO_IRQ) 339 - irq = real_irq_to_virt_slowpath(vec); 318 + irq = irq_radix_revmap(xics_host, vec); 340 319 if (likely(irq != NO_IRQ)) 341 - return irq_offset_up(irq); 320 + return irq; 342 321 343 322 printk(KERN_ERR "Interrupt %u (real) is invalid," 344 323 " disabling it.\n", vec); ··· 328 343 return NO_IRQ; 329 344 } 330 345 331 - static int xics_get_irq_direct(struct pt_regs *regs) 346 + static unsigned int xics_get_irq_direct(struct pt_regs *regs) 332 347 { 333 348 unsigned int cpu = smp_processor_id(); 334 349 335 350 return xics_remap_irq(direct_xirr_info_get(cpu)); 336 351 } 337 352 338 - static int xics_get_irq_lpar(struct pt_regs *regs) 353 + static unsigned int xics_get_irq_lpar(struct pt_regs *regs) 339 354 { 340 355 unsigned int cpu = smp_processor_id(); 341 356 ··· 422 437 unsigned long newmask; 423 438 cpumask_t tmp = CPU_MASK_NONE; 424 439 425 - irq = virt_irq_to_real(irq_offset_down(virq)); 426 - if (irq == XICS_IPI || irq == NO_IRQ) 440 + irq = (unsigned int)irq_map[virq].hwirq; 441 + if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) 427 442 return; 428 443 429 444 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); ··· 454 469 } 455 470 } 456 471 472 + void xics_setup_cpu(void) 473 + { 474 + int cpu = smp_processor_id(); 475 + 476 + xics_set_cpu_priority(cpu, 0xff); 477 + 478 + /* 479 + * Put the calling processor into the GIQ. This is really only 480 + * necessary from a secondary thread as the OF start-cpu interface 481 + * performs this function for us on primary threads. 482 + * 483 + * XXX: undo of teardown on kexec needs this too, as may hotplug 484 + */ 485 + rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, 486 + (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); 487 + } 488 + 489 + 457 490 static struct irq_chip xics_pic_direct = { 458 491 .typename = " XICS ", 459 492 .startup = xics_startup, ··· 492 489 }; 493 490 494 491 495 - void xics_setup_cpu(void) 492 + static int xics_host_match(struct irq_host *h, struct device_node *node) 496 493 { 497 - int cpu = smp_processor_id(); 498 - 499 - xics_set_cpu_priority(cpu, 0xff); 500 - 501 - /* 502 - * Put the calling processor into the GIQ. This is really only 503 - * necessary from a secondary thread as the OF start-cpu interface 504 - * performs this function for us on primary threads. 505 - * 506 - * XXX: undo of teardown on kexec needs this too, as may hotplug 494 + /* IBM machines have interrupt parents of various funky types for things 495 + * like vdevices, events, etc... The trick we use here is to match 496 + * everything here except the legacy 8259 which is compatible "chrp,iic" 507 497 */ 508 - rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, 509 - (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); 498 + return !device_is_compatible(node, "chrp,iic"); 510 499 } 511 500 512 - void xics_init_IRQ(void) 501 + static int xics_host_map_direct(struct irq_host *h, unsigned int virq, 502 + irq_hw_number_t hw, unsigned int flags) 503 + { 504 + unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; 505 + 506 + pr_debug("xics: map_direct virq %d, hwirq 0x%lx, flags: 0x%x\n", 507 + virq, hw, flags); 508 + 509 + if (sense && sense != IRQ_TYPE_LEVEL_LOW) 510 + printk(KERN_WARNING "xics: using unsupported sense 0x%x" 511 + " for irq %d (h: 0x%lx)\n", flags, virq, hw); 512 + 513 + get_irq_desc(virq)->status |= IRQ_LEVEL; 514 + set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq); 515 + return 0; 516 + } 517 + 518 + static int xics_host_map_lpar(struct irq_host *h, unsigned int virq, 519 + irq_hw_number_t hw, unsigned int flags) 520 + { 521 + unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; 522 + 523 + pr_debug("xics: map_lpar virq %d, hwirq 0x%lx, flags: 0x%x\n", 524 + virq, hw, flags); 525 + 526 + if (sense && sense != IRQ_TYPE_LEVEL_LOW) 527 + printk(KERN_WARNING "xics: using unsupported sense 0x%x" 528 + " for irq %d (h: 0x%lx)\n", flags, virq, hw); 529 + 530 + get_irq_desc(virq)->status |= IRQ_LEVEL; 531 + set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq); 532 + return 0; 533 + } 534 + 535 + static int xics_host_xlate(struct irq_host *h, struct device_node *ct, 536 + u32 *intspec, unsigned int intsize, 537 + irq_hw_number_t *out_hwirq, unsigned int *out_flags) 538 + 539 + { 540 + /* Current xics implementation translates everything 541 + * to level. It is not technically right for MSIs but this 542 + * is irrelevant at this point. We might get smarter in the future 543 + */ 544 + *out_hwirq = intspec[0]; 545 + *out_flags = IRQ_TYPE_LEVEL_LOW; 546 + 547 + return 0; 548 + } 549 + 550 + static struct irq_host_ops xics_host_direct_ops = { 551 + .match = xics_host_match, 552 + .map = xics_host_map_direct, 553 + .xlate = xics_host_xlate, 554 + }; 555 + 556 + static struct irq_host_ops xics_host_lpar_ops = { 557 + .match = xics_host_match, 558 + .map = xics_host_map_lpar, 559 + .xlate = xics_host_xlate, 560 + }; 561 + 562 + static void __init xics_init_host(void) 563 + { 564 + struct irq_host_ops *ops; 565 + 566 + if (firmware_has_feature(FW_FEATURE_LPAR)) 567 + ops = &xics_host_lpar_ops; 568 + else 569 + ops = &xics_host_direct_ops; 570 + xics_host = irq_alloc_host(IRQ_HOST_MAP_TREE, 0, ops, 571 + XICS_IRQ_SPURIOUS); 572 + BUG_ON(xics_host == NULL); 573 + irq_set_default_host(xics_host); 574 + } 575 + 576 + static void __init xics_map_one_cpu(int hw_id, unsigned long addr, 577 + unsigned long size) 578 + { 579 + #ifdef CONFIG_SMP 580 + int i; 581 + 582 + /* This may look gross but it's good enough for now, we don't quite 583 + * have a hard -> linux processor id matching. 584 + */ 585 + for_each_possible_cpu(i) { 586 + if (!cpu_present(i)) 587 + continue; 588 + if (hw_id == get_hard_smp_processor_id(i)) { 589 + xics_per_cpu[i] = ioremap(addr, size); 590 + return; 591 + } 592 + } 593 + #else 594 + if (hw_id != 0) 595 + return; 596 + xics_per_cpu[0] = ioremap(addr, size); 597 + #endif /* CONFIG_SMP */ 598 + } 599 + 600 + static void __init xics_init_one_node(struct device_node *np, 601 + unsigned int *indx) 602 + { 603 + unsigned int ilen; 604 + u32 *ireg; 605 + 606 + /* This code does the theorically broken assumption that the interrupt 607 + * server numbers are the same as the hard CPU numbers. 608 + * This happens to be the case so far but we are playing with fire... 609 + * should be fixed one of these days. -BenH. 610 + */ 611 + ireg = (u32 *)get_property(np, "ibm,interrupt-server-ranges", NULL); 612 + 613 + /* Do that ever happen ? we'll know soon enough... but even good'old 614 + * f80 does have that property .. 615 + */ 616 + WARN_ON(ireg == NULL); 617 + if (ireg) { 618 + /* 619 + * set node starting index for this node 620 + */ 621 + *indx = *ireg; 622 + } 623 + ireg = (u32 *)get_property(np, "reg", &ilen); 624 + if (!ireg) 625 + panic("xics_init_IRQ: can't find interrupt reg property"); 626 + 627 + while (ilen >= (4 * sizeof(u32))) { 628 + unsigned long addr, size; 629 + 630 + /* XXX Use proper OF parsing code here !!! */ 631 + addr = (unsigned long)*ireg++ << 32; 632 + ilen -= sizeof(u32); 633 + addr |= *ireg++; 634 + ilen -= sizeof(u32); 635 + size = (unsigned long)*ireg++ << 32; 636 + ilen -= sizeof(u32); 637 + size |= *ireg++; 638 + ilen -= sizeof(u32); 639 + xics_map_one_cpu(*indx, addr, size); 640 + (*indx)++; 641 + } 642 + } 643 + 644 + 645 + static void __init xics_setup_8259_cascade(void) 646 + { 647 + struct device_node *np, *old, *found = NULL; 648 + int cascade, naddr; 649 + u32 *addrp; 650 + unsigned long intack = 0; 651 + 652 + for_each_node_by_type(np, "interrupt-controller") 653 + if (device_is_compatible(np, "chrp,iic")) { 654 + found = np; 655 + break; 656 + } 657 + if (found == NULL) { 658 + printk(KERN_DEBUG "xics: no ISA interrupt controller\n"); 659 + return; 660 + } 661 + cascade = irq_of_parse_and_map(found, 0); 662 + if (cascade == NO_IRQ) { 663 + printk(KERN_ERR "xics: failed to map cascade interrupt"); 664 + return; 665 + } 666 + pr_debug("xics: cascade mapped to irq %d\n", cascade); 667 + 668 + for (old = of_node_get(found); old != NULL ; old = np) { 669 + np = of_get_parent(old); 670 + of_node_put(old); 671 + if (np == NULL) 672 + break; 673 + if (strcmp(np->name, "pci") != 0) 674 + continue; 675 + addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", NULL); 676 + if (addrp == NULL) 677 + continue; 678 + naddr = prom_n_addr_cells(np); 679 + intack = addrp[naddr-1]; 680 + if (naddr > 1) 681 + intack |= ((unsigned long)addrp[naddr-2]) << 32; 682 + } 683 + if (intack) 684 + printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack); 685 + i8259_init(found, intack); 686 + of_node_put(found); 687 + set_irq_chained_handler(cascade, pseries_8259_cascade); 688 + } 689 + 690 + void __init xics_init_IRQ(void) 513 691 { 514 692 int i; 515 - unsigned long intr_size = 0; 516 693 struct device_node *np; 517 - uint *ireg, ilen, indx = 0; 518 - unsigned long intr_base = 0; 519 - struct xics_interrupt_node { 520 - unsigned long addr; 521 - unsigned long size; 522 - } intnodes[NR_CPUS]; 523 - struct irq_chip *chip; 694 + u32 *ireg, ilen, indx = 0; 695 + int found = 0; 524 696 525 697 ppc64_boot_msg(0x20, "XICS Init"); 526 698 ··· 704 526 ibm_int_on = rtas_token("ibm,int-on"); 705 527 ibm_int_off = rtas_token("ibm,int-off"); 706 528 707 - np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation"); 708 - if (!np) 709 - panic("xics_init_IRQ: can't find interrupt presentation"); 710 - 711 - nextnode: 712 - ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL); 713 - if (ireg) { 714 - /* 715 - * set node starting index for this node 716 - */ 717 - indx = *ireg; 529 + for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") { 530 + found = 1; 531 + if (firmware_has_feature(FW_FEATURE_LPAR)) 532 + break; 533 + xics_init_one_node(np, &indx); 718 534 } 535 + if (found == 0) 536 + return; 719 537 720 - ireg = (uint *)get_property(np, "reg", &ilen); 721 - if (!ireg) 722 - panic("xics_init_IRQ: can't find interrupt reg property"); 723 - 724 - while (ilen) { 725 - intnodes[indx].addr = (unsigned long)*ireg++ << 32; 726 - ilen -= sizeof(uint); 727 - intnodes[indx].addr |= *ireg++; 728 - ilen -= sizeof(uint); 729 - intnodes[indx].size = (unsigned long)*ireg++ << 32; 730 - ilen -= sizeof(uint); 731 - intnodes[indx].size |= *ireg++; 732 - ilen -= sizeof(uint); 733 - indx++; 734 - if (indx >= NR_CPUS) break; 735 - } 736 - 737 - np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation"); 738 - if ((indx < NR_CPUS) && np) goto nextnode; 538 + xics_init_host(); 739 539 740 540 /* Find the server numbers for the boot cpu. */ 741 541 for (np = of_find_node_by_type(NULL, "cpu"); 742 542 np; 743 543 np = of_find_node_by_type(np, "cpu")) { 744 - ireg = (uint *)get_property(np, "reg", &ilen); 544 + ireg = (u32 *)get_property(np, "reg", &ilen); 745 545 if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) { 746 - ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", 747 - &ilen); 546 + ireg = (u32 *)get_property(np, 547 + "ibm,ppc-interrupt-gserver#s", 548 + &ilen); 748 549 i = ilen / sizeof(int); 749 550 if (ireg && i > 0) { 750 551 default_server = ireg[0]; 751 - default_distrib_server = ireg[i-1]; /* take last element */ 552 + /* take last element */ 553 + default_distrib_server = ireg[i-1]; 752 554 } 753 - ireg = (uint *)get_property(np, 555 + ireg = (u32 *)get_property(np, 754 556 "ibm,interrupt-server#-size", NULL); 755 557 if (ireg) 756 558 interrupt_server_size = *ireg; ··· 739 581 } 740 582 of_node_put(np); 741 583 742 - intr_base = intnodes[0].addr; 743 - intr_size = intnodes[0].size; 744 - 745 - if (firmware_has_feature(FW_FEATURE_LPAR)) { 746 - ppc_md.get_irq = xics_get_irq_lpar; 747 - chip = &xics_pic_lpar; 748 - } else { 749 - #ifdef CONFIG_SMP 750 - for_each_possible_cpu(i) { 751 - int hard_id; 752 - 753 - /* FIXME: Do this dynamically! --RR */ 754 - if (!cpu_present(i)) 755 - continue; 756 - 757 - hard_id = get_hard_smp_processor_id(i); 758 - xics_per_cpu[i] = ioremap(intnodes[hard_id].addr, 759 - intnodes[hard_id].size); 760 - } 761 - #else 762 - xics_per_cpu[0] = ioremap(intr_base, intr_size); 763 - #endif /* CONFIG_SMP */ 584 + if (firmware_has_feature(FW_FEATURE_LPAR)) 585 + ppc_md.get_irq = xics_get_irq_lpar; 586 + else 764 587 ppc_md.get_irq = xics_get_irq_direct; 765 - chip = &xics_pic_direct; 766 - 767 - } 768 - 769 - for (i = irq_offset_value(); i < NR_IRQS; ++i) { 770 - /* All IRQs on XICS are level for now. MSI code may want to modify 771 - * that for reporting purposes 772 - */ 773 - get_irq_desc(i)->status |= IRQ_LEVEL; 774 - set_irq_chip_and_handler(i, chip, handle_fasteoi_irq); 775 - } 776 588 777 589 xics_setup_cpu(); 778 590 591 + xics_setup_8259_cascade(); 592 + 779 593 ppc64_boot_msg(0x21, "XICS Done"); 780 594 } 781 - 782 - static int xics_setup_8259_cascade(void) 783 - { 784 - struct device_node *np; 785 - uint *ireg; 786 - 787 - np = of_find_node_by_type(NULL, "interrupt-controller"); 788 - if (np == NULL) { 789 - printk(KERN_WARNING "xics: no ISA interrupt controller\n"); 790 - xics_irq_8259_cascade_real = -1; 791 - xics_irq_8259_cascade = -1; 792 - return 0; 793 - } 794 - 795 - ireg = (uint *) get_property(np, "interrupts", NULL); 796 - if (!ireg) 797 - panic("xics_init_IRQ: can't find ISA interrupts property"); 798 - 799 - xics_irq_8259_cascade_real = *ireg; 800 - xics_irq_8259_cascade = irq_offset_up 801 - (virt_irq_create_mapping(xics_irq_8259_cascade_real)); 802 - i8259_init(0, 0); 803 - of_node_put(np); 804 - 805 - xics_set_irq_revmap(xics_irq_8259_cascade); 806 - set_irq_chained_handler(xics_irq_8259_cascade, pSeries_8259_cascade); 807 - 808 - return 0; 809 - } 810 - arch_initcall(xics_setup_8259_cascade); 811 595 812 596 813 597 #ifdef CONFIG_SMP 814 598 void xics_request_IPIs(void) 815 599 { 816 - virt_irq_to_real_map[XICS_IPI] = XICS_IPI; 600 + unsigned int ipi; 601 + 602 + ipi = irq_create_mapping(xics_host, XICS_IPI, 0); 603 + BUG_ON(ipi == NO_IRQ); 817 604 818 605 /* 819 606 * IPIs are marked IRQF_DISABLED as they must run with irqs 820 607 * disabled 821 608 */ 822 - set_irq_handler(irq_offset_up(XICS_IPI), handle_percpu_irq); 609 + set_irq_handler(ipi, handle_percpu_irq); 823 610 if (firmware_has_feature(FW_FEATURE_LPAR)) 824 - request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_lpar, 825 - SA_INTERRUPT, "IPI", NULL); 611 + request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED, 612 + "IPI", NULL); 826 613 else 827 - request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_direct, 828 - SA_INTERRUPT, "IPI", NULL); 614 + request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED, 615 + "IPI", NULL); 829 616 } 830 617 #endif /* CONFIG_SMP */ 831 618 832 619 void xics_teardown_cpu(int secondary) 833 620 { 834 - struct irq_desc *desc = get_irq_desc(irq_offset_up(XICS_IPI)); 835 621 int cpu = smp_processor_id(); 622 + unsigned int ipi; 623 + struct irq_desc *desc; 836 624 837 - xics_set_cpu_priority(cpu, 0); 625 + xics_set_cpu_priority(cpu, 0); 838 626 839 627 /* 840 628 * we need to EOI the IPI if we got here from kexec down IPI ··· 789 685 * should we be flagging idle loop instead? 790 686 * or creating some task to be scheduled? 791 687 */ 688 + 689 + ipi = irq_find_mapping(xics_host, XICS_IPI); 690 + if (ipi == XICS_IRQ_SPURIOUS) 691 + return; 692 + desc = get_irq_desc(ipi); 792 693 if (desc->chip && desc->chip->eoi) 793 694 desc->chip->eoi(XICS_IPI); 794 695 ··· 803 694 */ 804 695 if (secondary) 805 696 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, 806 - (1UL << interrupt_server_size) - 1 - 807 - default_distrib_server, 0); 697 + (1UL << interrupt_server_size) - 1 - 698 + default_distrib_server, 0); 808 699 } 809 700 810 701 #ifdef CONFIG_HOTPLUG_CPU ··· 832 723 unsigned long flags; 833 724 834 725 /* We cant set affinity on ISA interrupts */ 835 - if (virq < irq_offset_value()) 726 + if (virq < NUM_ISA_INTERRUPTS) 836 727 continue; 837 - 838 - desc = get_irq_desc(virq); 839 - irq = virt_irq_to_real(irq_offset_down(virq)); 840 - 728 + if (irq_map[virq].host != xics_host) 729 + continue; 730 + irq = (unsigned int)irq_map[virq].hwirq; 841 731 /* We need to get IPIs still. */ 842 - if (irq == XICS_IPI || irq == NO_IRQ) 732 + if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) 843 733 continue; 734 + desc = get_irq_desc(virq); 844 735 845 736 /* We only need to migrate enabled IRQS */ 846 737 if (desc == NULL || desc->chip == NULL
+1 -1
arch/powerpc/platforms/pseries/xics.h
··· 31 31 extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; 32 32 33 33 struct irq_desc; 34 - extern void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc, 34 + extern void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc, 35 35 struct pt_regs *regs); 36 36 37 37 #endif /* _POWERPC_KERNEL_XICS_H */
+112 -29
arch/powerpc/sysdev/i8259.c
··· 6 6 * as published by the Free Software Foundation; either version 7 7 * 2 of the License, or (at your option) any later version. 8 8 */ 9 + #undef DEBUG 10 + 9 11 #include <linux/init.h> 10 12 #include <linux/ioport.h> 11 13 #include <linux/interrupt.h> 14 + #include <linux/kernel.h> 15 + #include <linux/delay.h> 12 16 #include <asm/io.h> 13 17 #include <asm/i8259.h> 18 + #include <asm/prom.h> 14 19 15 20 static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */ 16 21 ··· 25 20 26 21 static DEFINE_SPINLOCK(i8259_lock); 27 22 28 - static int i8259_pic_irq_offset; 23 + static struct device_node *i8259_node; 24 + static struct irq_host *i8259_host; 29 25 30 26 /* 31 27 * Acknowledge the IRQ using either the PCI host bridge's interrupt ··· 34 28 * which is called. It should be noted that polling is broken on some 35 29 * IBM and Motorola PReP boxes so we must use the int-ack feature on them. 36 30 */ 37 - int i8259_irq(struct pt_regs *regs) 31 + unsigned int i8259_irq(struct pt_regs *regs) 38 32 { 39 33 int irq; 40 - 41 - spin_lock(&i8259_lock); 34 + int lock = 0; 42 35 43 36 /* Either int-ack or poll for the IRQ */ 44 37 if (pci_intack) 45 38 irq = readb(pci_intack); 46 39 else { 40 + spin_lock(&i8259_lock); 41 + lock = 1; 42 + 47 43 /* Perform an interrupt acknowledge cycle on controller 1. */ 48 44 outb(0x0C, 0x20); /* prepare for poll */ 49 45 irq = inb(0x20) & 7; ··· 70 62 if (!pci_intack) 71 63 outb(0x0B, 0x20); /* ISR register */ 72 64 if(~inb(0x20) & 0x80) 73 - irq = -1; 74 - } 65 + irq = NO_IRQ; 66 + } else if (irq == 0xff) 67 + irq = NO_IRQ; 75 68 76 - spin_unlock(&i8259_lock); 77 - return irq + i8259_pic_irq_offset; 69 + if (lock) 70 + spin_unlock(&i8259_lock); 71 + return irq; 78 72 } 79 73 80 74 static void i8259_mask_and_ack_irq(unsigned int irq_nr) ··· 84 74 unsigned long flags; 85 75 86 76 spin_lock_irqsave(&i8259_lock, flags); 87 - irq_nr -= i8259_pic_irq_offset; 88 77 if (irq_nr > 7) { 89 78 cached_A1 |= 1 << (irq_nr-8); 90 79 inb(0xA1); /* DUMMY */ ··· 109 100 { 110 101 unsigned long flags; 111 102 103 + pr_debug("i8259_mask_irq(%d)\n", irq_nr); 104 + 112 105 spin_lock_irqsave(&i8259_lock, flags); 113 - irq_nr -= i8259_pic_irq_offset; 114 106 if (irq_nr < 8) 115 107 cached_21 |= 1 << irq_nr; 116 108 else ··· 124 114 { 125 115 unsigned long flags; 126 116 117 + pr_debug("i8259_unmask_irq(%d)\n", irq_nr); 118 + 127 119 spin_lock_irqsave(&i8259_lock, flags); 128 - irq_nr -= i8259_pic_irq_offset; 129 120 if (irq_nr < 8) 130 121 cached_21 &= ~(1 << irq_nr); 131 122 else ··· 163 152 .flags = IORESOURCE_BUSY, 164 153 }; 165 154 166 - static struct irqaction i8259_irqaction = { 167 - .handler = no_action, 168 - .flags = IRQF_DISABLED, 169 - .mask = CPU_MASK_NONE, 170 - .name = "82c59 secondary cascade", 155 + static int i8259_host_match(struct irq_host *h, struct device_node *node) 156 + { 157 + return i8259_node == NULL || i8259_node == node; 158 + } 159 + 160 + static int i8259_host_map(struct irq_host *h, unsigned int virq, 161 + irq_hw_number_t hw, unsigned int flags) 162 + { 163 + pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); 164 + 165 + /* We block the internal cascade */ 166 + if (hw == 2) 167 + get_irq_desc(virq)->status |= IRQ_NOREQUEST; 168 + 169 + /* We use the level stuff only for now, we might want to 170 + * be more cautious here but that works for now 171 + */ 172 + get_irq_desc(virq)->status |= IRQ_LEVEL; 173 + set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq); 174 + return 0; 175 + } 176 + 177 + static void i8259_host_unmap(struct irq_host *h, unsigned int virq) 178 + { 179 + /* Make sure irq is masked in hardware */ 180 + i8259_mask_irq(virq); 181 + 182 + /* remove chip and handler */ 183 + set_irq_chip_and_handler(virq, NULL, NULL); 184 + 185 + /* Make sure it's completed */ 186 + synchronize_irq(virq); 187 + } 188 + 189 + static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, 190 + u32 *intspec, unsigned int intsize, 191 + irq_hw_number_t *out_hwirq, unsigned int *out_flags) 192 + { 193 + static unsigned char map_isa_senses[4] = { 194 + IRQ_TYPE_LEVEL_LOW, 195 + IRQ_TYPE_LEVEL_HIGH, 196 + IRQ_TYPE_EDGE_FALLING, 197 + IRQ_TYPE_EDGE_RISING, 198 + }; 199 + 200 + *out_hwirq = intspec[0]; 201 + if (intsize > 1 && intspec[1] < 4) 202 + *out_flags = map_isa_senses[intspec[1]]; 203 + else 204 + *out_flags = IRQ_TYPE_NONE; 205 + 206 + return 0; 207 + } 208 + 209 + static struct irq_host_ops i8259_host_ops = { 210 + .match = i8259_host_match, 211 + .map = i8259_host_map, 212 + .unmap = i8259_host_unmap, 213 + .xlate = i8259_host_xlate, 171 214 }; 172 215 173 - /* 174 - * i8259_init() 175 - * intack_addr - PCI interrupt acknowledge (real) address which will return 176 - * the active irq from the 8259 216 + /**** 217 + * i8259_init - Initialize the legacy controller 218 + * @node: device node of the legacy PIC (can be NULL, but then, it will match 219 + * all interrupts, so beware) 220 + * @intack_addr: PCI interrupt acknowledge (real) address which will return 221 + * the active irq from the 8259 177 222 */ 178 - void __init i8259_init(unsigned long intack_addr, int offset) 223 + void i8259_init(struct device_node *node, unsigned long intack_addr) 179 224 { 180 225 unsigned long flags; 181 - int i; 182 226 227 + /* initialize the controller */ 183 228 spin_lock_irqsave(&i8259_lock, flags); 184 - i8259_pic_irq_offset = offset; 229 + 230 + /* Mask all first */ 231 + outb(0xff, 0xA1); 232 + outb(0xff, 0x21); 185 233 186 234 /* init master interrupt controller */ 187 235 outb(0x11, 0x20); /* Start init sequence */ ··· 254 184 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */ 255 185 outb(0x01, 0xA1); /* Select 8086 mode */ 256 186 187 + /* That thing is slow */ 188 + udelay(100); 189 + 257 190 /* always read ISR */ 258 191 outb(0x0B, 0x20); 259 192 outb(0x0B, 0xA0); 260 193 261 - /* Mask all interrupts */ 194 + /* Unmask the internal cascade */ 195 + cached_21 &= ~(1 << 2); 196 + 197 + /* Set interrupt masks */ 262 198 outb(cached_A1, 0xA1); 263 199 outb(cached_21, 0x21); 264 200 265 201 spin_unlock_irqrestore(&i8259_lock, flags); 266 202 267 - for (i = 0; i < NUM_ISA_INTERRUPTS; ++i) { 268 - set_irq_chip_and_handler(offset + i, &i8259_pic, 269 - handle_level_irq); 270 - irq_desc[offset + i].status |= IRQ_LEVEL; 203 + /* create a legacy host */ 204 + if (node) 205 + i8259_node = of_node_get(node); 206 + i8259_host = irq_alloc_host(IRQ_HOST_MAP_LEGACY, 0, &i8259_host_ops, 0); 207 + if (i8259_host == NULL) { 208 + printk(KERN_ERR "i8259: failed to allocate irq host !\n"); 209 + return; 271 210 } 272 211 273 212 /* reserve our resources */ 274 - setup_irq(offset + 2, &i8259_irqaction); 213 + /* XXX should we continue doing that ? it seems to cause problems 214 + * with further requesting of PCI IO resources for that range... 215 + * need to look into it. 216 + */ 275 217 request_resource(&ioport_resource, &pic1_iores); 276 218 request_resource(&ioport_resource, &pic2_iores); 277 219 request_resource(&ioport_resource, &pic_edgectrl_iores); ··· 291 209 if (intack_addr != 0) 292 210 pci_intack = ioremap(intack_addr, 1); 293 211 212 + printk(KERN_INFO "i8259 legacy interrupt controller initialized\n"); 294 213 }
+211 -118
arch/powerpc/sysdev/mpic.c
··· 340 340 #endif /* CONFIG_MPIC_BROKEN_U3 */ 341 341 342 342 343 + #define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) 344 + 343 345 /* Find an mpic associated with a given linux interrupt */ 344 346 static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi) 345 347 { 346 - struct mpic *mpic = mpics; 348 + unsigned int src = mpic_irq_to_hw(irq); 347 349 348 - while(mpic) { 349 - /* search IPIs first since they may override the main interrupts */ 350 - if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) { 351 - if (is_ipi) 352 - *is_ipi = 1; 353 - return mpic; 354 - } 355 - if (irq >= mpic->irq_offset && 356 - irq < (mpic->irq_offset + mpic->irq_count)) { 357 - if (is_ipi) 358 - *is_ipi = 0; 359 - return mpic; 360 - } 361 - mpic = mpic -> next; 362 - } 363 - return NULL; 350 + if (irq < NUM_ISA_INTERRUPTS) 351 + return NULL; 352 + if (is_ipi) 353 + *is_ipi = (src >= MPIC_VEC_IPI_0 && src <= MPIC_VEC_IPI_3); 354 + 355 + return irq_desc[irq].chip_data; 364 356 } 365 357 366 358 /* Convert a cpu mask from logical to physical cpu numbers. */ ··· 390 398 #ifdef CONFIG_SMP 391 399 static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 392 400 { 393 - struct mpic *mpic = dev_id; 394 - 395 - smp_message_recv(irq - mpic->ipi_offset, regs); 401 + smp_message_recv(mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0, regs); 396 402 return IRQ_HANDLED; 397 403 } 398 404 #endif /* CONFIG_SMP */ ··· 404 414 { 405 415 unsigned int loops = 100000; 406 416 struct mpic *mpic = mpic_from_irq(irq); 407 - unsigned int src = irq - mpic->irq_offset; 417 + unsigned int src = mpic_irq_to_hw(irq); 408 418 409 419 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); 410 420 ··· 425 435 { 426 436 unsigned int loops = 100000; 427 437 struct mpic *mpic = mpic_from_irq(irq); 428 - unsigned int src = irq - mpic->irq_offset; 438 + unsigned int src = mpic_irq_to_hw(irq); 429 439 430 440 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); 431 441 ··· 462 472 static void mpic_unmask_ht_irq(unsigned int irq) 463 473 { 464 474 struct mpic *mpic = mpic_from_irq(irq); 465 - unsigned int src = irq - mpic->irq_offset; 475 + unsigned int src = mpic_irq_to_hw(irq); 466 476 467 477 mpic_unmask_irq(irq); 468 478 ··· 473 483 static unsigned int mpic_startup_ht_irq(unsigned int irq) 474 484 { 475 485 struct mpic *mpic = mpic_from_irq(irq); 476 - unsigned int src = irq - mpic->irq_offset; 486 + unsigned int src = mpic_irq_to_hw(irq); 477 487 478 488 mpic_unmask_irq(irq); 479 489 mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status); ··· 484 494 static void mpic_shutdown_ht_irq(unsigned int irq) 485 495 { 486 496 struct mpic *mpic = mpic_from_irq(irq); 487 - unsigned int src = irq - mpic->irq_offset; 497 + unsigned int src = mpic_irq_to_hw(irq); 488 498 489 499 mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status); 490 500 mpic_mask_irq(irq); ··· 493 503 static void mpic_end_ht_irq(unsigned int irq) 494 504 { 495 505 struct mpic *mpic = mpic_from_irq(irq); 496 - unsigned int src = irq - mpic->irq_offset; 506 + unsigned int src = mpic_irq_to_hw(irq); 497 507 498 508 #ifdef DEBUG_IRQ 499 509 DBG("%s: end_irq: %d\n", mpic->name, irq); ··· 515 525 static void mpic_unmask_ipi(unsigned int irq) 516 526 { 517 527 struct mpic *mpic = mpic_from_ipi(irq); 518 - unsigned int src = irq - mpic->ipi_offset; 528 + unsigned int src = mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0; 519 529 520 530 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); 521 531 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); ··· 545 555 static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) 546 556 { 547 557 struct mpic *mpic = mpic_from_irq(irq); 558 + unsigned int src = mpic_irq_to_hw(irq); 548 559 549 560 cpumask_t tmp; 550 561 551 562 cpus_and(tmp, cpumask, cpu_online_map); 552 563 553 - mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION, 564 + mpic_irq_write(src, MPIC_IRQ_DESTINATION, 554 565 mpic_physmask(cpus_addr(tmp)[0])); 566 + } 567 + 568 + static unsigned int mpic_flags_to_vecpri(unsigned int flags, int *level) 569 + { 570 + unsigned int vecpri; 571 + 572 + /* Now convert sense value */ 573 + switch(flags & IRQ_TYPE_SENSE_MASK) { 574 + case IRQ_TYPE_EDGE_RISING: 575 + vecpri = MPIC_VECPRI_SENSE_EDGE | 576 + MPIC_VECPRI_POLARITY_POSITIVE; 577 + *level = 0; 578 + break; 579 + case IRQ_TYPE_EDGE_FALLING: 580 + vecpri = MPIC_VECPRI_SENSE_EDGE | 581 + MPIC_VECPRI_POLARITY_NEGATIVE; 582 + *level = 0; 583 + break; 584 + case IRQ_TYPE_LEVEL_HIGH: 585 + vecpri = MPIC_VECPRI_SENSE_LEVEL | 586 + MPIC_VECPRI_POLARITY_POSITIVE; 587 + *level = 1; 588 + break; 589 + case IRQ_TYPE_LEVEL_LOW: 590 + default: 591 + vecpri = MPIC_VECPRI_SENSE_LEVEL | 592 + MPIC_VECPRI_POLARITY_NEGATIVE; 593 + *level = 1; 594 + } 595 + return vecpri; 555 596 } 556 597 557 598 static struct irq_chip mpic_irq_chip = { ··· 610 589 #endif /* CONFIG_MPIC_BROKEN_U3 */ 611 590 612 591 592 + static int mpic_host_match(struct irq_host *h, struct device_node *node) 593 + { 594 + struct mpic *mpic = h->host_data; 595 + 596 + /* Exact match, unless mpic node is NULL */ 597 + return mpic->of_node == NULL || mpic->of_node == node; 598 + } 599 + 600 + static int mpic_host_map(struct irq_host *h, unsigned int virq, 601 + irq_hw_number_t hw, unsigned int flags) 602 + { 603 + struct irq_desc *desc = get_irq_desc(virq); 604 + struct irq_chip *chip; 605 + struct mpic *mpic = h->host_data; 606 + unsigned int vecpri = MPIC_VECPRI_SENSE_LEVEL | 607 + MPIC_VECPRI_POLARITY_NEGATIVE; 608 + int level; 609 + 610 + pr_debug("mpic: map virq %d, hwirq 0x%lx, flags: 0x%x\n", 611 + virq, hw, flags); 612 + 613 + if (hw == MPIC_VEC_SPURRIOUS) 614 + return -EINVAL; 615 + #ifdef CONFIG_SMP 616 + else if (hw >= MPIC_VEC_IPI_0) { 617 + WARN_ON(!(mpic->flags & MPIC_PRIMARY)); 618 + 619 + pr_debug("mpic: mapping as IPI\n"); 620 + set_irq_chip_data(virq, mpic); 621 + set_irq_chip_and_handler(virq, &mpic->hc_ipi, 622 + handle_percpu_irq); 623 + return 0; 624 + } 625 + #endif /* CONFIG_SMP */ 626 + 627 + if (hw >= mpic->irq_count) 628 + return -EINVAL; 629 + 630 + /* If no sense provided, check default sense array */ 631 + if (((flags & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE) && 632 + mpic->senses && hw < mpic->senses_count) 633 + flags |= mpic->senses[hw]; 634 + 635 + vecpri = mpic_flags_to_vecpri(flags, &level); 636 + if (level) 637 + desc->status |= IRQ_LEVEL; 638 + chip = &mpic->hc_irq; 639 + 640 + #ifdef CONFIG_MPIC_BROKEN_U3 641 + /* Check for HT interrupts, override vecpri */ 642 + if (mpic_is_ht_interrupt(mpic, hw)) { 643 + vecpri &= ~(MPIC_VECPRI_SENSE_MASK | 644 + MPIC_VECPRI_POLARITY_MASK); 645 + vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; 646 + chip = &mpic->hc_ht_irq; 647 + } 648 + #endif 649 + 650 + /* Reconfigure irq */ 651 + vecpri |= MPIC_VECPRI_MASK | hw | (8 << MPIC_VECPRI_PRIORITY_SHIFT); 652 + mpic_irq_write(hw, MPIC_IRQ_VECTOR_PRI, vecpri); 653 + 654 + pr_debug("mpic: mapping as IRQ\n"); 655 + 656 + set_irq_chip_data(virq, mpic); 657 + set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq); 658 + return 0; 659 + } 660 + 661 + static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, 662 + u32 *intspec, unsigned int intsize, 663 + irq_hw_number_t *out_hwirq, unsigned int *out_flags) 664 + 665 + { 666 + static unsigned char map_mpic_senses[4] = { 667 + IRQ_TYPE_EDGE_RISING, 668 + IRQ_TYPE_LEVEL_LOW, 669 + IRQ_TYPE_LEVEL_HIGH, 670 + IRQ_TYPE_EDGE_FALLING, 671 + }; 672 + 673 + *out_hwirq = intspec[0]; 674 + if (intsize > 1 && intspec[1] < 4) 675 + *out_flags = map_mpic_senses[intspec[1]]; 676 + else 677 + *out_flags = IRQ_TYPE_NONE; 678 + 679 + return 0; 680 + } 681 + 682 + static struct irq_host_ops mpic_host_ops = { 683 + .match = mpic_host_match, 684 + .map = mpic_host_map, 685 + .xlate = mpic_host_xlate, 686 + }; 687 + 613 688 /* 614 689 * Exported functions 615 690 */ 616 691 617 - 618 - struct mpic * __init mpic_alloc(unsigned long phys_addr, 692 + struct mpic * __init mpic_alloc(struct device_node *node, 693 + unsigned long phys_addr, 619 694 unsigned int flags, 620 695 unsigned int isu_size, 621 - unsigned int irq_offset, 622 696 unsigned int irq_count, 623 - unsigned int ipi_offset, 624 - unsigned char *senses, 625 - unsigned int senses_count, 626 697 const char *name) 627 698 { 628 699 struct mpic *mpic; ··· 726 613 if (mpic == NULL) 727 614 return NULL; 728 615 729 - 730 616 memset(mpic, 0, sizeof(struct mpic)); 731 617 mpic->name = name; 618 + mpic->of_node = node ? of_node_get(node) : NULL; 732 619 620 + mpic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 256, 621 + &mpic_host_ops, 622 + MPIC_VEC_SPURRIOUS); 623 + if (mpic->irqhost == NULL) { 624 + of_node_put(node); 625 + return NULL; 626 + } 627 + 628 + mpic->irqhost->host_data = mpic; 733 629 mpic->hc_irq = mpic_irq_chip; 734 630 mpic->hc_irq.typename = name; 735 631 if (flags & MPIC_PRIMARY) ··· 750 628 mpic->hc_ht_irq.set_affinity = mpic_set_affinity; 751 629 #endif /* CONFIG_MPIC_BROKEN_U3 */ 752 630 #ifdef CONFIG_SMP 753 - mpic->hc_ipi.typename = name; 754 631 mpic->hc_ipi = mpic_ipi_chip; 632 + mpic->hc_ipi.typename = name; 755 633 #endif /* CONFIG_SMP */ 756 634 757 635 mpic->flags = flags; 758 636 mpic->isu_size = isu_size; 759 - mpic->irq_offset = irq_offset; 760 637 mpic->irq_count = irq_count; 761 - mpic->ipi_offset = ipi_offset; 762 638 mpic->num_sources = 0; /* so far */ 763 - mpic->senses = senses; 764 - mpic->senses_count = senses_count; 765 639 766 640 /* Map the global registers */ 767 641 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); ··· 825 707 mpic->next = mpics; 826 708 mpics = mpic; 827 709 828 - if (flags & MPIC_PRIMARY) 710 + if (flags & MPIC_PRIMARY) { 829 711 mpic_primary = mpic; 712 + irq_set_default_host(mpic->irqhost); 713 + } 830 714 831 715 return mpic; 832 716 } ··· 845 725 mpic->num_sources = isu_first + mpic->isu_size; 846 726 } 847 727 728 + void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count) 729 + { 730 + mpic->senses = senses; 731 + mpic->senses_count = count; 732 + } 733 + 848 734 void __init mpic_init(struct mpic *mpic) 849 735 { 850 736 int i; 851 737 852 738 BUG_ON(mpic->num_sources == 0); 739 + WARN_ON(mpic->num_sources > MPIC_VEC_IPI_0); 740 + 741 + /* Sanitize source count */ 742 + if (mpic->num_sources > MPIC_VEC_IPI_0) 743 + mpic->num_sources = MPIC_VEC_IPI_0; 853 744 854 745 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); 855 746 ··· 884 753 MPIC_VECPRI_MASK | 885 754 (10 << MPIC_VECPRI_PRIORITY_SHIFT) | 886 755 (MPIC_VEC_IPI_0 + i)); 887 - #ifdef CONFIG_SMP 888 - if (!(mpic->flags & MPIC_PRIMARY)) 889 - continue; 890 - set_irq_chip_data(mpic->ipi_offset+i, mpic); 891 - set_irq_chip_and_handler(mpic->ipi_offset+i, 892 - &mpic->hc_ipi, 893 - handle_percpu_irq); 894 - #endif /* CONFIG_SMP */ 895 756 } 896 757 897 758 /* Initialize interrupt sources */ ··· 900 777 for (i = 0; i < mpic->num_sources; i++) { 901 778 /* start with vector = source number, and masked */ 902 779 u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); 903 - int level = 0; 780 + int level = 1; 904 781 905 - /* if it's an IPI, we skip it */ 906 - if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) && 907 - (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4)) 908 - continue; 909 - 910 782 /* do senses munging */ 911 - if (mpic->senses && i < mpic->senses_count) { 912 - if (mpic->senses[i] & IRQ_SENSE_LEVEL) 913 - vecpri |= MPIC_VECPRI_SENSE_LEVEL; 914 - if (mpic->senses[i] & IRQ_POLARITY_POSITIVE) 915 - vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; 916 - } else 783 + if (mpic->senses && i < mpic->senses_count) 784 + vecpri = mpic_flags_to_vecpri(mpic->senses[i], 785 + &level); 786 + else 917 787 vecpri |= MPIC_VECPRI_SENSE_LEVEL; 918 - 919 - /* remember if it was a level interrupts */ 920 - level = (vecpri & MPIC_VECPRI_SENSE_LEVEL); 921 788 922 789 /* deal with broken U3 */ 923 790 if (mpic->flags & MPIC_BROKEN_U3) { ··· 929 816 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); 930 817 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 931 818 1 << hard_smp_processor_id()); 932 - 933 - /* init linux descriptors */ 934 - if (i < mpic->irq_count) { 935 - struct irq_chip *chip = &mpic->hc_irq; 936 - 937 - irq_desc[mpic->irq_offset+i].status |= 938 - level ? IRQ_LEVEL : 0; 939 - #ifdef CONFIG_MPIC_BROKEN_U3 940 - if (mpic_is_ht_interrupt(mpic, i)) 941 - chip = &mpic->hc_ht_irq; 942 - #endif /* CONFIG_MPIC_BROKEN_U3 */ 943 - set_irq_chip_data(mpic->irq_offset+i, mpic); 944 - set_irq_chip_and_handler(mpic->irq_offset+i, chip, 945 - handle_fasteoi_irq); 946 - } 947 819 } 948 820 949 821 /* Init spurrious vector */ ··· 969 871 { 970 872 int is_ipi; 971 873 struct mpic *mpic = mpic_find(irq, &is_ipi); 874 + unsigned int src = mpic_irq_to_hw(irq); 972 875 unsigned long flags; 973 876 u32 reg; 974 877 975 878 spin_lock_irqsave(&mpic_lock, flags); 976 879 if (is_ipi) { 977 - reg = mpic_ipi_read(irq - mpic->ipi_offset) & 880 + reg = mpic_ipi_read(src - MPIC_VEC_IPI_0) & 978 881 ~MPIC_VECPRI_PRIORITY_MASK; 979 - mpic_ipi_write(irq - mpic->ipi_offset, 882 + mpic_ipi_write(src - MPIC_VEC_IPI_0, 980 883 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 981 884 } else { 982 - reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI) 885 + reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) 983 886 & ~MPIC_VECPRI_PRIORITY_MASK; 984 - mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI, 887 + mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 985 888 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 986 889 } 987 890 spin_unlock_irqrestore(&mpic_lock, flags); ··· 992 893 { 993 894 int is_ipi; 994 895 struct mpic *mpic = mpic_find(irq, &is_ipi); 896 + unsigned int src = mpic_irq_to_hw(irq); 995 897 unsigned long flags; 996 898 u32 reg; 997 899 998 900 spin_lock_irqsave(&mpic_lock, flags); 999 901 if (is_ipi) 1000 - reg = mpic_ipi_read(irq - mpic->ipi_offset); 902 + reg = mpic_ipi_read(src = MPIC_VEC_IPI_0); 1001 903 else 1002 - reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI); 904 + reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI); 1003 905 spin_unlock_irqrestore(&mpic_lock, flags); 1004 906 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; 1005 907 } ··· 1095 995 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); 1096 996 } 1097 997 1098 - int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) 998 + unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) 1099 999 { 1100 - u32 irq; 1000 + u32 src; 1101 1001 1102 - irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; 1002 + src = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; 1103 1003 #ifdef DEBUG_LOW 1104 - DBG("%s: get_one_irq(): %d\n", mpic->name, irq); 1004 + DBG("%s: get_one_irq(): %d\n", mpic->name, src); 1105 1005 #endif 1106 - if (unlikely(irq == MPIC_VEC_SPURRIOUS)) 1107 - return -1; 1108 - if (irq < MPIC_VEC_IPI_0) { 1109 - #ifdef DEBUG_IRQ 1110 - DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset); 1111 - #endif 1112 - return irq + mpic->irq_offset; 1113 - } 1114 - #ifdef DEBUG_IPI 1115 - DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0); 1116 - #endif 1117 - return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset; 1006 + if (unlikely(src == MPIC_VEC_SPURRIOUS)) 1007 + return NO_IRQ; 1008 + return irq_linear_revmap(mpic->irqhost, src); 1118 1009 } 1119 1010 1120 - int mpic_get_irq(struct pt_regs *regs) 1011 + unsigned int mpic_get_irq(struct pt_regs *regs) 1121 1012 { 1122 1013 struct mpic *mpic = mpic_primary; 1123 1014 ··· 1122 1031 void mpic_request_ipis(void) 1123 1032 { 1124 1033 struct mpic *mpic = mpic_primary; 1125 - 1034 + int i; 1035 + static char *ipi_names[] = { 1036 + "IPI0 (call function)", 1037 + "IPI1 (reschedule)", 1038 + "IPI2 (unused)", 1039 + "IPI3 (debugger break)", 1040 + }; 1126 1041 BUG_ON(mpic == NULL); 1127 - 1128 - printk("requesting IPIs ... \n"); 1129 1042 1130 - /* 1131 - * IPIs are marked IRQF_DISABLED as they must run with irqs 1132 - * disabled 1133 - */ 1134 - request_irq(mpic->ipi_offset+0, mpic_ipi_action, IRQF_DISABLED, 1135 - "IPI0 (call function)", mpic); 1136 - request_irq(mpic->ipi_offset+1, mpic_ipi_action, IRQF_DISABLED, 1137 - "IPI1 (reschedule)", mpic); 1138 - request_irq(mpic->ipi_offset+2, mpic_ipi_action, IRQF_DISABLED, 1139 - "IPI2 (unused)", mpic); 1140 - request_irq(mpic->ipi_offset+3, mpic_ipi_action, IRQF_DISABLED, 1141 - "IPI3 (debugger break)", mpic); 1043 + printk(KERN_INFO "mpic: requesting IPIs ... \n"); 1142 1044 1143 - printk("IPIs requested... \n"); 1045 + for (i = 0; i < 4; i++) { 1046 + unsigned int vipi = irq_create_mapping(mpic->irqhost, 1047 + MPIC_VEC_IPI_0 + i, 0); 1048 + if (vipi == NO_IRQ) { 1049 + printk(KERN_ERR "Failed to map IPI %d\n", i); 1050 + break; 1051 + } 1052 + request_irq(vipi, mpic_ipi_action, IRQF_DISABLED, 1053 + ipi_names[i], mpic); 1054 + } 1144 1055 } 1145 1056 1146 1057 void smp_mpic_message_pass(int target, int msg)
+3 -4
drivers/char/hvsi.c
··· 1299 1299 hp->inbuf_end = hp->inbuf; 1300 1300 hp->state = HVSI_CLOSED; 1301 1301 hp->vtermno = *vtermno; 1302 - hp->virq = virt_irq_create_mapping(irq[0]); 1302 + hp->virq = irq_create_mapping(NULL, irq[0], 0); 1303 1303 if (hp->virq == NO_IRQ) { 1304 1304 printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", 1305 - __FUNCTION__, hp->virq); 1305 + __FUNCTION__, irq[0]); 1306 1306 continue; 1307 - } else 1308 - hp->virq = irq_offset_up(hp->virq); 1307 + } 1309 1308 1310 1309 hvsi_count++; 1311 1310 }
+4 -15
drivers/macintosh/macio-adb.c
··· 90 90 { 91 91 struct device_node *adbs; 92 92 struct resource r; 93 + unsigned int irq; 93 94 94 95 adbs = find_compatible_devices("adb", "chrp,adb0"); 95 96 if (adbs == 0) 96 97 return -ENXIO; 97 98 98 - #if 0 99 - { int i = 0; 100 - 101 - printk("macio_adb_init: node = %p, addrs =", adbs->node); 102 - while(!of_address_to_resource(adbs, i, &r)) 103 - printk(" %x(%x)", r.start, r.end - r.start); 104 - printk(", intrs ="); 105 - for (i = 0; i < adbs->n_intrs; ++i) 106 - printk(" %x", adbs->intrs[i].line); 107 - printk("\n"); } 108 - #endif 109 99 if (of_address_to_resource(adbs, 0, &r)) 110 100 return -ENXIO; 111 101 adb = ioremap(r.start, sizeof(struct adb_regs)); ··· 107 117 out_8(&adb->active_lo.r, 0xff); 108 118 out_8(&adb->autopoll.r, APE); 109 119 110 - if (request_irq(adbs->intrs[0].line, macio_adb_interrupt, 111 - 0, "ADB", (void *)0)) { 112 - printk(KERN_ERR "ADB: can't get irq %d\n", 113 - adbs->intrs[0].line); 120 + irq = irq_of_parse_and_map(adbs, 0); 121 + if (request_irq(irq, macio_adb_interrupt, 0, "ADB", (void *)0)) { 122 + printk(KERN_ERR "ADB: can't get irq %d\n", irq); 114 123 return -EAGAIN; 115 124 } 116 125 out_8(&adb->intr_enb.r, DFB | TAG);
+100 -46
drivers/macintosh/macio_asic.c
··· 280 280 static int macio_resource_quirks(struct device_node *np, struct resource *res, 281 281 int index) 282 282 { 283 - if (res->flags & IORESOURCE_MEM) { 284 - /* Grand Central has too large resource 0 on some machines */ 285 - if (index == 0 && !strcmp(np->name, "gc")) 286 - res->end = res->start + 0x1ffff; 283 + /* Only quirks for memory resources for now */ 284 + if ((res->flags & IORESOURCE_MEM) == 0) 285 + return 0; 287 286 288 - /* Airport has bogus resource 2 */ 289 - if (index >= 2 && !strcmp(np->name, "radio")) 290 - return 1; 287 + /* Grand Central has too large resource 0 on some machines */ 288 + if (index == 0 && !strcmp(np->name, "gc")) 289 + res->end = res->start + 0x1ffff; 290 + 291 + /* Airport has bogus resource 2 */ 292 + if (index >= 2 && !strcmp(np->name, "radio")) 293 + return 1; 291 294 292 295 #ifndef CONFIG_PPC64 293 - /* DBDMAs may have bogus sizes */ 294 - if ((res->start & 0x0001f000) == 0x00008000) 295 - res->end = res->start + 0xff; 296 + /* DBDMAs may have bogus sizes */ 297 + if ((res->start & 0x0001f000) == 0x00008000) 298 + res->end = res->start + 0xff; 296 299 #endif /* CONFIG_PPC64 */ 297 300 298 - /* ESCC parent eats child resources. We could have added a 299 - * level of hierarchy, but I don't really feel the need 300 - * for it 301 - */ 302 - if (!strcmp(np->name, "escc")) 303 - return 1; 301 + /* ESCC parent eats child resources. We could have added a 302 + * level of hierarchy, but I don't really feel the need 303 + * for it 304 + */ 305 + if (!strcmp(np->name, "escc")) 306 + return 1; 304 307 305 - /* ESCC has bogus resources >= 3 */ 306 - if (index >= 3 && !(strcmp(np->name, "ch-a") && 307 - strcmp(np->name, "ch-b"))) 308 - return 1; 308 + /* ESCC has bogus resources >= 3 */ 309 + if (index >= 3 && !(strcmp(np->name, "ch-a") && 310 + strcmp(np->name, "ch-b"))) 311 + return 1; 309 312 310 - /* Media bay has too many resources, keep only first one */ 311 - if (index > 0 && !strcmp(np->name, "media-bay")) 312 - return 1; 313 + /* Media bay has too many resources, keep only first one */ 314 + if (index > 0 && !strcmp(np->name, "media-bay")) 315 + return 1; 313 316 314 - /* Some older IDE resources have bogus sizes */ 315 - if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") && 316 - strcmp(np->type, "ide") && strcmp(np->type, "ata"))) { 317 - if (index == 0 && (res->end - res->start) > 0xfff) 318 - res->end = res->start + 0xfff; 319 - if (index == 1 && (res->end - res->start) > 0xff) 320 - res->end = res->start + 0xff; 321 - } 317 + /* Some older IDE resources have bogus sizes */ 318 + if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") && 319 + strcmp(np->type, "ide") && strcmp(np->type, "ata"))) { 320 + if (index == 0 && (res->end - res->start) > 0xfff) 321 + res->end = res->start + 0xfff; 322 + if (index == 1 && (res->end - res->start) > 0xff) 323 + res->end = res->start + 0xff; 322 324 } 323 325 return 0; 324 326 } 325 327 328 + static void macio_create_fixup_irq(struct macio_dev *dev, int index, 329 + unsigned int line) 330 + { 331 + unsigned int irq; 332 + 333 + irq = irq_create_mapping(NULL, line, 0); 334 + if (irq != NO_IRQ) { 335 + dev->interrupt[index].start = irq; 336 + dev->interrupt[index].flags = IORESOURCE_IRQ; 337 + dev->interrupt[index].name = dev->ofdev.dev.bus_id; 338 + } 339 + if (dev->n_interrupts <= index) 340 + dev->n_interrupts = index + 1; 341 + } 342 + 343 + static void macio_add_missing_resources(struct macio_dev *dev) 344 + { 345 + struct device_node *np = dev->ofdev.node; 346 + unsigned int irq_base; 347 + 348 + /* Gatwick has some missing interrupts on child nodes */ 349 + if (dev->bus->chip->type != macio_gatwick) 350 + return; 351 + 352 + /* irq_base is always 64 on gatwick. I have no cleaner way to get 353 + * that value from here at this point 354 + */ 355 + irq_base = 64; 356 + 357 + /* Fix SCC */ 358 + if (strcmp(np->name, "ch-a") == 0) { 359 + macio_create_fixup_irq(dev, 0, 15 + irq_base); 360 + macio_create_fixup_irq(dev, 1, 4 + irq_base); 361 + macio_create_fixup_irq(dev, 2, 5 + irq_base); 362 + printk(KERN_INFO "macio: fixed SCC irqs on gatwick\n"); 363 + } 364 + 365 + /* Fix media-bay */ 366 + if (strcmp(np->name, "media-bay") == 0) { 367 + macio_create_fixup_irq(dev, 0, 29 + irq_base); 368 + printk(KERN_INFO "macio: fixed media-bay irq on gatwick\n"); 369 + } 370 + 371 + /* Fix left media bay childs */ 372 + if (dev->media_bay != NULL && strcmp(np->name, "floppy") == 0) { 373 + macio_create_fixup_irq(dev, 0, 19 + irq_base); 374 + macio_create_fixup_irq(dev, 1, 1 + irq_base); 375 + printk(KERN_INFO "macio: fixed left floppy irqs\n"); 376 + } 377 + if (dev->media_bay != NULL && strcasecmp(np->name, "ata4") == 0) { 378 + macio_create_fixup_irq(dev, 0, 14 + irq_base); 379 + macio_create_fixup_irq(dev, 0, 3 + irq_base); 380 + printk(KERN_INFO "macio: fixed left ide irqs\n"); 381 + } 382 + } 326 383 327 384 static void macio_setup_interrupts(struct macio_dev *dev) 328 385 { 329 386 struct device_node *np = dev->ofdev.node; 330 - int i,j; 387 + unsigned int irq; 388 + int i = 0, j = 0; 331 389 332 - /* For now, we use pre-parsed entries in the device-tree for 333 - * interrupt routing and addresses, but we should change that 334 - * to dynamically parsed entries and so get rid of most of the 335 - * clutter in struct device_node 336 - */ 337 - for (i = j = 0; i < np->n_intrs; i++) { 390 + for (;;) { 338 391 struct resource *res = &dev->interrupt[j]; 339 392 340 393 if (j >= MACIO_DEV_COUNT_IRQS) 341 394 break; 342 - res->start = np->intrs[i].line; 343 - res->flags = IORESOURCE_IO; 344 - if (np->intrs[j].sense) 345 - res->flags |= IORESOURCE_IRQ_LOWLEVEL; 346 - else 347 - res->flags |= IORESOURCE_IRQ_HIGHEDGE; 395 + irq = irq_of_parse_and_map(np, i++); 396 + if (irq == NO_IRQ) 397 + break; 398 + res->start = irq; 399 + res->flags = IORESOURCE_IRQ; 348 400 res->name = dev->ofdev.dev.bus_id; 349 - if (macio_resource_quirks(np, res, i)) 401 + if (macio_resource_quirks(np, res, i - 1)) { 350 402 memset(res, 0, sizeof(struct resource)); 351 - else 403 + continue; 404 + } else 352 405 j++; 353 406 } 354 407 dev->n_interrupts = j; ··· 498 445 /* Setup interrupts & resources */ 499 446 macio_setup_interrupts(dev); 500 447 macio_setup_resources(dev, parent_res); 448 + macio_add_missing_resources(dev); 501 449 502 450 /* Register with core */ 503 451 if (of_device_register(&dev->ofdev) != 0) {
+2 -4
drivers/macintosh/smu.c
··· 497 497 smu->doorbell = *data; 498 498 if (smu->doorbell < 0x50) 499 499 smu->doorbell += 0x50; 500 - if (np->n_intrs > 0) 501 - smu->db_irq = np->intrs[0].line; 500 + smu->db_irq = irq_of_parse_and_map(np, 0); 502 501 503 502 of_node_put(np); 504 503 ··· 514 515 smu->msg = *data; 515 516 if (smu->msg < 0x50) 516 517 smu->msg += 0x50; 517 - if (np->n_intrs > 0) 518 - smu->msg_irq = np->intrs[0].line; 518 + smu->msg_irq = irq_of_parse_and_map(np, 0); 519 519 of_node_put(np); 520 520 } while(0); 521 521
+15 -9
drivers/macintosh/via-cuda.c
··· 34 34 static volatile unsigned char __iomem *via; 35 35 static DEFINE_SPINLOCK(cuda_lock); 36 36 37 - #ifdef CONFIG_MAC 38 - #define CUDA_IRQ IRQ_MAC_ADB 39 - #define eieio() 40 - #else 41 - #define CUDA_IRQ vias->intrs[0].line 42 - #endif 43 - 44 37 /* VIA registers - spaced 0x200 bytes apart */ 45 38 #define RS 0x200 /* skip between registers */ 46 39 #define B 0 /* B-side data */ ··· 182 189 183 190 static int __init via_cuda_start(void) 184 191 { 192 + unsigned int irq; 193 + 185 194 if (via == NULL) 186 195 return -ENODEV; 187 196 188 - if (request_irq(CUDA_IRQ, cuda_interrupt, 0, "ADB", cuda_interrupt)) { 189 - printk(KERN_ERR "cuda_init: can't get irq %d\n", CUDA_IRQ); 197 + #ifdef CONFIG_MAC 198 + irq = IRQ_MAC_ADB; 199 + #else /* CONFIG_MAC */ 200 + irq = irq_of_parse_and_map(vias, 0); 201 + if (irq == NO_IRQ) { 202 + printk(KERN_ERR "via-cuda: can't map interrupts for %s\n", 203 + vias->full_name); 204 + return -ENODEV; 205 + } 206 + #endif /* CONFIG_MAP */ 207 + 208 + if (request_irq(irq, cuda_interrupt, 0, "ADB", cuda_interrupt)) { 209 + printk(KERN_ERR "via-cuda: can't request irq %d\n", irq); 190 210 return -EAGAIN; 191 211 } 192 212
+14 -19
drivers/macintosh/via-pmu.c
··· 64 64 #include <asm/backlight.h> 65 65 #endif 66 66 67 - #ifdef CONFIG_PPC32 68 - #include <asm/open_pic.h> 69 - #endif 70 - 71 67 #include "via-pmu-event.h" 72 68 73 69 /* Some compile options */ ··· 147 151 static int pmu_has_adb; 148 152 static struct device_node *gpio_node; 149 153 static unsigned char __iomem *gpio_reg = NULL; 150 - static int gpio_irq = -1; 154 + static int gpio_irq = NO_IRQ; 151 155 static int gpio_irq_enabled = -1; 152 156 static volatile int pmu_suspended = 0; 153 157 static spinlock_t pmu_lock; ··· 399 403 */ 400 404 static int __init via_pmu_start(void) 401 405 { 406 + unsigned int irq; 407 + 402 408 if (vias == NULL) 403 409 return -ENODEV; 404 410 405 411 batt_req.complete = 1; 406 412 407 - #ifndef CONFIG_PPC_MERGE 408 - if (pmu_kind == PMU_KEYLARGO_BASED) 409 - openpic_set_irq_priority(vias->intrs[0].line, 410 - OPENPIC_PRIORITY_DEFAULT + 1); 411 - #endif 412 - 413 - if (request_irq(vias->intrs[0].line, via_pmu_interrupt, 0, "VIA-PMU", 414 - (void *)0)) { 415 - printk(KERN_ERR "VIA-PMU: can't get irq %d\n", 416 - vias->intrs[0].line); 417 - return -EAGAIN; 413 + irq = irq_of_parse_and_map(vias, 0); 414 + if (irq == NO_IRQ) { 415 + printk(KERN_ERR "via-pmu: can't map interruptn"); 416 + return -ENODEV; 417 + } 418 + if (request_irq(irq, via_pmu_interrupt, 0, "VIA-PMU", (void *)0)) { 419 + printk(KERN_ERR "via-pmu: can't request irq %d\n", irq); 420 + return -ENODEV; 418 421 } 419 422 420 423 if (pmu_kind == PMU_KEYLARGO_BASED) { ··· 421 426 if (gpio_node == NULL) 422 427 gpio_node = of_find_node_by_name(NULL, 423 428 "pmu-interrupt"); 424 - if (gpio_node && gpio_node->n_intrs > 0) 425 - gpio_irq = gpio_node->intrs[0].line; 429 + if (gpio_node) 430 + gpio_irq = irq_of_parse_and_map(gpio_node, 0); 426 431 427 - if (gpio_irq != -1) { 432 + if (gpio_irq != NO_IRQ) { 428 433 if (request_irq(gpio_irq, gpio1_interrupt, 0, 429 434 "GPIO1 ADB", (void *)0)) 430 435 printk(KERN_ERR "pmu: can't get irq %d"
+2 -2
drivers/net/mace.c
··· 242 242 } 243 243 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); 244 244 if (rc) { 245 - printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[1].line); 245 + printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); 246 246 goto err_free_irq; 247 247 } 248 248 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); 249 249 if (rc) { 250 - printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[2].line); 250 + printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); 251 251 goto err_free_tx_irq; 252 252 } 253 253
+3 -3
drivers/serial/pmac_zilog.c
··· 1443 1443 uap->flags &= ~PMACZILOG_FLAG_HAS_DMA; 1444 1444 goto no_dma; 1445 1445 } 1446 - uap->tx_dma_irq = np->intrs[1].line; 1447 - uap->rx_dma_irq = np->intrs[2].line; 1446 + uap->tx_dma_irq = irq_of_parse_and_map(np, 1); 1447 + uap->rx_dma_irq = irq_of_parse_and_map(np, 2); 1448 1448 } 1449 1449 no_dma: 1450 1450 ··· 1491 1491 * Init remaining bits of "port" structure 1492 1492 */ 1493 1493 uap->port.iotype = UPIO_MEM; 1494 - uap->port.irq = np->intrs[0].line; 1494 + uap->port.irq = irq_of_parse_and_map(np, 0); 1495 1495 uap->port.uartclk = ZS_CLOCK; 1496 1496 uap->port.fifosize = 1; 1497 1497 uap->port.ops = &pmz_pops;
+5
include/asm-powerpc/i8259.h
··· 4 4 5 5 #include <linux/irq.h> 6 6 7 + #ifdef CONFIG_PPC_MERGE 8 + extern void i8259_init(struct device_node *node, unsigned long intack_addr); 9 + extern unsigned int i8259_irq(struct pt_regs *regs); 10 + #else 7 11 extern void i8259_init(unsigned long intack_addr, int offset); 8 12 extern int i8259_irq(struct pt_regs *regs); 13 + #endif 9 14 10 15 #endif /* __KERNEL__ */ 11 16 #endif /* _ASM_POWERPC_I8259_H */
+316 -60
include/asm-powerpc/irq.h
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 12 + #include <linux/config.h> 12 13 #include <linux/threads.h> 14 + #include <linux/list.h> 15 + #include <linux/radix-tree.h> 13 16 14 17 #include <asm/types.h> 15 18 #include <asm/atomic.h> 16 19 17 - /* this number is used when no interrupt has been assigned */ 20 + 21 + #define get_irq_desc(irq) (&irq_desc[(irq)]) 22 + 23 + /* Define a way to iterate across irqs. */ 24 + #define for_each_irq(i) \ 25 + for ((i) = 0; (i) < NR_IRQS; ++(i)) 26 + 27 + extern atomic_t ppc_n_lost_interrupts; 28 + 29 + #ifdef CONFIG_PPC_MERGE 30 + 31 + /* This number is used when no interrupt has been assigned */ 32 + #define NO_IRQ (0) 33 + 34 + /* This is a special irq number to return from get_irq() to tell that 35 + * no interrupt happened _and_ ignore it (don't count it as bad). Some 36 + * platforms like iSeries rely on that. 37 + */ 38 + #define NO_IRQ_IGNORE ((unsigned int)-1) 39 + 40 + /* Total number of virq in the platform (make it a CONFIG_* option ? */ 41 + #define NR_IRQS 512 42 + 43 + /* Number of irqs reserved for the legacy controller */ 44 + #define NUM_ISA_INTERRUPTS 16 45 + 46 + /* This type is the placeholder for a hardware interrupt number. It has to 47 + * be big enough to enclose whatever representation is used by a given 48 + * platform. 49 + */ 50 + typedef unsigned long irq_hw_number_t; 51 + 52 + /* Interrupt controller "host" data structure. This could be defined as a 53 + * irq domain controller. That is, it handles the mapping between hardware 54 + * and virtual interrupt numbers for a given interrupt domain. The host 55 + * structure is generally created by the PIC code for a given PIC instance 56 + * (though a host can cover more than one PIC if they have a flat number 57 + * model). It's the host callbacks that are responsible for setting the 58 + * irq_chip on a given irq_desc after it's been mapped. 59 + * 60 + * The host code and data structures are fairly agnostic to the fact that 61 + * we use an open firmware device-tree. We do have references to struct 62 + * device_node in two places: in irq_find_host() to find the host matching 63 + * a given interrupt controller node, and of course as an argument to its 64 + * counterpart host->ops->match() callback. However, those are treated as 65 + * generic pointers by the core and the fact that it's actually a device-node 66 + * pointer is purely a convention between callers and implementation. This 67 + * code could thus be used on other architectures by replacing those two 68 + * by some sort of arch-specific void * "token" used to identify interrupt 69 + * controllers. 70 + */ 71 + struct irq_host; 72 + struct radix_tree_root; 73 + 74 + /* Functions below are provided by the host and called whenever a new mapping 75 + * is created or an old mapping is disposed. The host can then proceed to 76 + * whatever internal data structures management is required. It also needs 77 + * to setup the irq_desc when returning from map(). 78 + */ 79 + struct irq_host_ops { 80 + /* Match an interrupt controller device node to a host, returns 81 + * 1 on a match 82 + */ 83 + int (*match)(struct irq_host *h, struct device_node *node); 84 + 85 + /* Create or update a mapping between a virtual irq number and a hw 86 + * irq number. This can be called several times for the same mapping 87 + * but with different flags, though unmap shall always be called 88 + * before the virq->hw mapping is changed. 89 + */ 90 + int (*map)(struct irq_host *h, unsigned int virq, 91 + irq_hw_number_t hw, unsigned int flags); 92 + 93 + /* Dispose of such a mapping */ 94 + void (*unmap)(struct irq_host *h, unsigned int virq); 95 + 96 + /* Translate device-tree interrupt specifier from raw format coming 97 + * from the firmware to a irq_hw_number_t (interrupt line number) and 98 + * trigger flags that can be passed to irq_create_mapping(). 99 + * If no translation is provided, raw format is assumed to be one cell 100 + * for interrupt line and default sense. 101 + */ 102 + int (*xlate)(struct irq_host *h, struct device_node *ctrler, 103 + u32 *intspec, unsigned int intsize, 104 + irq_hw_number_t *out_hwirq, unsigned int *out_flags); 105 + }; 106 + 107 + struct irq_host { 108 + struct list_head link; 109 + 110 + /* type of reverse mapping technique */ 111 + unsigned int revmap_type; 112 + #define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */ 113 + #define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */ 114 + #define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */ 115 + #define IRQ_HOST_MAP_TREE 3 /* radix tree */ 116 + union { 117 + struct { 118 + unsigned int size; 119 + unsigned int *revmap; 120 + } linear; 121 + struct radix_tree_root tree; 122 + } revmap_data; 123 + struct irq_host_ops *ops; 124 + void *host_data; 125 + irq_hw_number_t inval_irq; 126 + }; 127 + 128 + /* The main irq map itself is an array of NR_IRQ entries containing the 129 + * associate host and irq number. An entry with a host of NULL is free. 130 + * An entry can be allocated if it's free, the allocator always then sets 131 + * hwirq first to the host's invalid irq number and then fills ops. 132 + */ 133 + struct irq_map_entry { 134 + irq_hw_number_t hwirq; 135 + struct irq_host *host; 136 + }; 137 + 138 + extern struct irq_map_entry irq_map[NR_IRQS]; 139 + 140 + 141 + /*** 142 + * irq_alloc_host - Allocate a new irq_host data structure 143 + * @node: device-tree node of the interrupt controller 144 + * @revmap_type: type of reverse mapping to use 145 + * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map 146 + * @ops: map/unmap host callbacks 147 + * @inval_irq: provide a hw number in that host space that is always invalid 148 + * 149 + * Allocates and initialize and irq_host structure. Note that in the case of 150 + * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns 151 + * for all legacy interrupts except 0 (which is always the invalid irq for 152 + * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by 153 + * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated 154 + * later during boot automatically (the reverse mapping will use the slow path 155 + * until that happens). 156 + */ 157 + extern struct irq_host *irq_alloc_host(unsigned int revmap_type, 158 + unsigned int revmap_arg, 159 + struct irq_host_ops *ops, 160 + irq_hw_number_t inval_irq); 161 + 162 + 163 + /*** 164 + * irq_find_host - Locates a host for a given device node 165 + * @node: device-tree node of the interrupt controller 166 + */ 167 + extern struct irq_host *irq_find_host(struct device_node *node); 168 + 169 + 170 + /*** 171 + * irq_set_default_host - Set a "default" host 172 + * @host: default host pointer 173 + * 174 + * For convenience, it's possible to set a "default" host that will be used 175 + * whenever NULL is passed to irq_create_mapping(). It makes life easier for 176 + * platforms that want to manipulate a few hard coded interrupt numbers that 177 + * aren't properly represented in the device-tree. 178 + */ 179 + extern void irq_set_default_host(struct irq_host *host); 180 + 181 + 182 + /*** 183 + * irq_set_virq_count - Set the maximum number of virt irqs 184 + * @count: number of linux virtual irqs, capped with NR_IRQS 185 + * 186 + * This is mainly for use by platforms like iSeries who want to program 187 + * the virtual irq number in the controller to avoid the reverse mapping 188 + */ 189 + extern void irq_set_virq_count(unsigned int count); 190 + 191 + 192 + /*** 193 + * irq_create_mapping - Map a hardware interrupt into linux virq space 194 + * @host: host owning this hardware interrupt or NULL for default host 195 + * @hwirq: hardware irq number in that host space 196 + * @flags: flags passed to the controller. contains the trigger type among 197 + * others. Use IRQ_TYPE_* defined in include/linux/irq.h 198 + * 199 + * Only one mapping per hardware interrupt is permitted. Returns a linux 200 + * virq number. The flags can be used to provide sense information to the 201 + * controller (typically extracted from the device-tree). If no information 202 + * is passed, the controller defaults will apply (for example, xics can only 203 + * do edge so flags are irrelevant for some pseries specific irqs). 204 + * 205 + * The device-tree generally contains the trigger info in an encoding that is 206 + * specific to a given type of controller. In that case, you can directly use 207 + * host->ops->trigger_xlate() to translate that. 208 + * 209 + * It is recommended that new PICs that don't have existing OF bindings chose 210 + * to use a representation of triggers identical to linux. 211 + */ 212 + extern unsigned int irq_create_mapping(struct irq_host *host, 213 + irq_hw_number_t hwirq, 214 + unsigned int flags); 215 + 216 + 217 + /*** 218 + * irq_dispose_mapping - Unmap an interrupt 219 + * @virq: linux virq number of the interrupt to unmap 220 + */ 221 + extern void irq_dispose_mapping(unsigned int virq); 222 + 223 + /*** 224 + * irq_find_mapping - Find a linux virq from an hw irq number. 225 + * @host: host owning this hardware interrupt 226 + * @hwirq: hardware irq number in that host space 227 + * 228 + * This is a slow path, for use by generic code. It's expected that an 229 + * irq controller implementation directly calls the appropriate low level 230 + * mapping function. 231 + */ 232 + extern unsigned int irq_find_mapping(struct irq_host *host, 233 + irq_hw_number_t hwirq); 234 + 235 + 236 + /*** 237 + * irq_radix_revmap - Find a linux virq from a hw irq number. 238 + * @host: host owning this hardware interrupt 239 + * @hwirq: hardware irq number in that host space 240 + * 241 + * This is a fast path, for use by irq controller code that uses radix tree 242 + * revmaps 243 + */ 244 + extern unsigned int irq_radix_revmap(struct irq_host *host, 245 + irq_hw_number_t hwirq); 246 + 247 + /*** 248 + * irq_linear_revmap - Find a linux virq from a hw irq number. 249 + * @host: host owning this hardware interrupt 250 + * @hwirq: hardware irq number in that host space 251 + * 252 + * This is a fast path, for use by irq controller code that uses linear 253 + * revmaps. It does fallback to the slow path if the revmap doesn't exist 254 + * yet and will create the revmap entry with appropriate locking 255 + */ 256 + 257 + extern unsigned int irq_linear_revmap(struct irq_host *host, 258 + irq_hw_number_t hwirq); 259 + 260 + 261 + 262 + /*** 263 + * irq_alloc_virt - Allocate virtual irq numbers 264 + * @host: host owning these new virtual irqs 265 + * @count: number of consecutive numbers to allocate 266 + * @hint: pass a hint number, the allocator will try to use a 1:1 mapping 267 + * 268 + * This is a low level function that is used internally by irq_create_mapping() 269 + * and that can be used by some irq controllers implementations for things 270 + * like allocating ranges of numbers for MSIs. The revmaps are left untouched. 271 + */ 272 + extern unsigned int irq_alloc_virt(struct irq_host *host, 273 + unsigned int count, 274 + unsigned int hint); 275 + 276 + /*** 277 + * irq_free_virt - Free virtual irq numbers 278 + * @virq: virtual irq number of the first interrupt to free 279 + * @count: number of interrupts to free 280 + * 281 + * This function is the opposite of irq_alloc_virt. It will not clear reverse 282 + * maps, this should be done previously by unmap'ing the interrupt. In fact, 283 + * all interrupts covered by the range being freed should have been unmapped 284 + * prior to calling this. 285 + */ 286 + extern void irq_free_virt(unsigned int virq, unsigned int count); 287 + 288 + 289 + /* -- OF helpers -- */ 290 + 291 + /* irq_create_of_mapping - Map a hardware interrupt into linux virq space 292 + * @controller: Device node of the interrupt controller 293 + * @inspec: Interrupt specifier from the device-tree 294 + * @intsize: Size of the interrupt specifier from the device-tree 295 + * 296 + * This function is identical to irq_create_mapping except that it takes 297 + * as input informations straight from the device-tree (typically the results 298 + * of the of_irq_map_*() functions 299 + */ 300 + extern unsigned int irq_create_of_mapping(struct device_node *controller, 301 + u32 *intspec, unsigned int intsize); 302 + 303 + 304 + /* irq_of_parse_and_map - Parse nad Map an interrupt into linux virq space 305 + * @device: Device node of the device whose interrupt is to be mapped 306 + * @index: Index of the interrupt to map 307 + * 308 + * This function is a wrapper that chains of_irq_map_one() and 309 + * irq_create_of_mapping() to make things easier to callers 310 + */ 311 + extern unsigned int irq_of_parse_and_map(struct device_node *dev, int index); 312 + 313 + /* -- End OF helpers -- */ 314 + 315 + /*** 316 + * irq_early_init - Init irq remapping subsystem 317 + */ 318 + extern void irq_early_init(void); 319 + 320 + static __inline__ int irq_canonicalize(int irq) 321 + { 322 + return irq; 323 + } 324 + 325 + 326 + #else /* CONFIG_PPC_MERGE */ 327 + 328 + /* This number is used when no interrupt has been assigned */ 18 329 #define NO_IRQ (-1) 330 + #define NO_IRQ_IGNORE (-2) 331 + 19 332 20 333 /* 21 334 * These constants are used for passing information about interrupt ··· 343 30 #define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */ 344 31 #define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */ 345 32 346 - #define get_irq_desc(irq) (&irq_desc[(irq)]) 347 - 348 - /* Define a way to iterate across irqs. */ 349 - #define for_each_irq(i) \ 350 - for ((i) = 0; (i) < NR_IRQS; ++(i)) 351 - 352 - #ifdef CONFIG_PPC64 353 - 354 - /* 355 - * Maximum number of interrupt sources that we can handle. 356 - */ 357 - #define NR_IRQS 512 358 - 359 - /* Interrupt numbers are virtual in case they are sparsely 360 - * distributed by the hardware. 361 - */ 362 - extern unsigned int virt_irq_to_real_map[NR_IRQS]; 363 - 364 - /* The maximum virtual IRQ number that we support. This 365 - * can be set by the platform and will be reduced by the 366 - * value of __irq_offset_value. It defaults to and is 367 - * capped by (NR_IRQS - 1). 368 - */ 369 - extern unsigned int virt_irq_max; 370 - 371 - /* Create a mapping for a real_irq if it doesn't already exist. 372 - * Return the virtual irq as a convenience. 373 - */ 374 - int virt_irq_create_mapping(unsigned int real_irq); 375 - void virt_irq_init(void); 376 - 377 - static inline unsigned int virt_irq_to_real(unsigned int virt_irq) 378 - { 379 - return virt_irq_to_real_map[virt_irq]; 380 - } 381 - 382 - extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq); 383 - 384 - /* 385 - * List of interrupt controllers. 386 - */ 387 - #define IC_INVALID 0 388 - #define IC_OPEN_PIC 1 389 - #define IC_PPC_XIC 2 390 - #define IC_CELL_PIC 3 391 - #define IC_ISERIES 4 392 - 393 - extern u64 ppc64_interrupt_controller; 394 - 395 - #else /* 32-bit */ 396 33 397 34 #if defined(CONFIG_40x) 398 35 #include <asm/ibm4xx.h> ··· 775 512 776 513 #endif /* CONFIG_8260 */ 777 514 778 - #endif 515 + #endif /* Whatever way too big #ifdef */ 779 516 780 - #ifndef CONFIG_PPC_MERGE 781 517 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 782 518 /* pedantic: these are long because they are used with set_bit --RR */ 783 519 extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 784 - #endif 785 - 786 - extern atomic_t ppc_n_lost_interrupts; 787 - 788 - #define virt_irq_create_mapping(x) (x) 789 - 790 - #endif 791 520 792 521 /* 793 522 * Because many systems have two overlapping names spaces for ··· 818 563 irq = 9; 819 564 return irq; 820 565 } 566 + #endif /* CONFIG_PPC_MERGE */ 821 567 822 568 extern int distribute_irqs; 823 569
+1 -1
include/asm-powerpc/machdep.h
··· 97 97 void (*show_percpuinfo)(struct seq_file *m, int i); 98 98 99 99 void (*init_IRQ)(void); 100 - int (*get_irq)(struct pt_regs *); 100 + unsigned int (*get_irq)(struct pt_regs *); 101 101 #ifdef CONFIG_KEXEC 102 102 void (*kexec_cpu_down)(int crash_shutdown, int secondary); 103 103 #endif
+27 -15
include/asm-powerpc/mpic.h
··· 129 129 /* The instance data of a given MPIC */ 130 130 struct mpic 131 131 { 132 + /* The device node of the interrupt controller */ 133 + struct device_node *of_node; 134 + 135 + /* The remapper for this MPIC */ 136 + struct irq_host *irqhost; 137 + 132 138 /* The "linux" controller struct */ 133 139 struct irq_chip hc_irq; 134 140 #ifdef CONFIG_MPIC_BROKEN_U3 ··· 150 144 unsigned int isu_size; 151 145 unsigned int isu_shift; 152 146 unsigned int isu_mask; 153 - /* Offset of irq vector numbers */ 154 - unsigned int irq_offset; 155 147 unsigned int irq_count; 156 - /* Offset of ipi vector numbers */ 157 - unsigned int ipi_offset; 158 148 /* Number of sources */ 159 149 unsigned int num_sources; 160 150 /* Number of CPUs */ 161 151 unsigned int num_cpus; 162 - /* senses array */ 152 + /* default senses array */ 163 153 unsigned char *senses; 164 154 unsigned int senses_count; 165 155 ··· 211 209 * The values in the array start at the first source of the MPIC, 212 210 * that is senses[0] correspond to linux irq "irq_offset". 213 211 */ 214 - extern struct mpic *mpic_alloc(unsigned long phys_addr, 212 + extern struct mpic *mpic_alloc(struct device_node *node, 213 + unsigned long phys_addr, 215 214 unsigned int flags, 216 215 unsigned int isu_size, 217 - unsigned int irq_offset, 218 216 unsigned int irq_count, 219 - unsigned int ipi_offset, 220 - unsigned char *senses, 221 - unsigned int senses_num, 222 217 const char *name); 223 218 224 219 /* Assign ISUs, to call before mpic_init() ··· 226 227 */ 227 228 extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, 228 229 unsigned long phys_addr); 230 + 231 + /* Set default sense codes 232 + * 233 + * @mpic: controller 234 + * @senses: array of sense codes 235 + * @count: size of above array 236 + * 237 + * Optionally provide an array (indexed on hardware interrupt numbers 238 + * for this MPIC) of default sense codes for the chip. Those are linux 239 + * sense codes IRQ_TYPE_* 240 + * 241 + * The driver gets ownership of the pointer, don't dispose of it or 242 + * anything like that. __init only. 243 + */ 244 + extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count); 245 + 229 246 230 247 /* Initialize the controller. After this has been called, none of the above 231 248 * should be called again for this mpic ··· 284 269 void smp_mpic_message_pass(int target, int msg); 285 270 286 271 /* Fetch interrupt from a given mpic */ 287 - extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); 272 + extern unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); 288 273 /* This one gets to the primary mpic */ 289 - extern int mpic_get_irq(struct pt_regs *regs); 274 + extern unsigned int mpic_get_irq(struct pt_regs *regs); 290 275 291 276 /* Set the EPIC clock ratio */ 292 277 void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio); 293 278 294 279 /* Enable/Disable EPIC serial interrupt mode */ 295 280 void mpic_set_serial_int(struct mpic *mpic, int enable); 296 - 297 - /* global mpic for pSeries */ 298 - extern struct mpic *pSeries_mpic; 299 281 300 282 #endif /* __KERNEL__ */ 301 283 #endif /* _ASM_POWERPC_MPIC_H */
-7
include/asm-powerpc/prom.h
··· 64 64 typedef u32 phandle; 65 65 typedef u32 ihandle; 66 66 67 - struct interrupt_info { 68 - int line; 69 - int sense; /* +ve/-ve logic, edge or level, etc. */ 70 - }; 71 - 72 67 struct property { 73 68 char *name; 74 69 int length; ··· 76 81 char *type; 77 82 phandle node; 78 83 phandle linux_phandle; 79 - int n_intrs; 80 - struct interrupt_info *intrs; 81 84 char *full_name; 82 85 83 86 struct property *properties;
+1
include/asm-powerpc/spu.h
··· 117 117 struct list_head sched_list; 118 118 int number; 119 119 int nid; 120 + unsigned int irqs[3]; 120 121 u32 isrc; 121 122 u32 node; 122 123 u64 flags;
+1 -6
sound/aoa/core/snd-aoa-gpio-feature.c
··· 112 112 113 113 static void get_irq(struct device_node * np, int *irqptr) 114 114 { 115 - *irqptr = -1; 116 - if (!np) 117 - return; 118 - if (np->n_intrs != 1) 119 - return; 120 - *irqptr = np->intrs[0].line; 115 + *irqptr = irq_of_parse_and_map(np, 0); 121 116 } 122 117 123 118 /* 0x4 is outenable, 0x1 is out, thus 4 or 5 */
+4 -3
sound/aoa/soundbus/i2sbus/i2sbus-core.c
··· 129 129 if (strncmp(np->name, "i2s-", 4)) 130 130 return 0; 131 131 132 - if (np->n_intrs != 3) 132 + if (macio_irq_count(macio) != 3) 133 133 return 0; 134 134 135 135 dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL); ··· 183 183 snprintf(dev->rnames[i], sizeof(dev->rnames[i]), rnames[i], np->name); 184 184 } 185 185 for (i=0;i<3;i++) { 186 - if (request_irq(np->intrs[i].line, ints[i], 0, dev->rnames[i], dev)) 186 + if (request_irq(macio_irq(macio, i), ints[i], 0, 187 + dev->rnames[i], dev)) 187 188 goto err; 188 - dev->interrupts[i] = np->intrs[i].line; 189 + dev->interrupts[i] = macio_irq(macio, i); 189 190 } 190 191 191 192 for (i=0;i<3;i++) {
+6 -10
sound/oss/dmasound/dmasound_awacs.c
··· 374 374 *gpio_pol = *pp; 375 375 else 376 376 *gpio_pol = 1; 377 - if (np->n_intrs > 0) 378 - return np->intrs[0].line; 379 - 380 - return 0; 377 + return irq_of_parse_and_map(np, 0); 381 378 } 382 379 383 380 static inline void ··· 2861 2864 * other info if necessary (early AWACS we want to read chip ids) 2862 2865 */ 2863 2866 2864 - if (of_get_address(io, 2, NULL, NULL) == NULL || io->n_intrs < 3) { 2867 + if (of_get_address(io, 2, NULL, NULL) == NULL) { 2865 2868 /* OK - maybe we need to use the 'awacs' node (on earlier 2866 2869 * machines). 2867 2870 */ 2868 2871 if (awacs_node) { 2869 2872 io = awacs_node ; 2870 - if (of_get_address(io, 2, NULL, NULL) == NULL || 2871 - io->n_intrs < 3) { 2873 + if (of_get_address(io, 2, NULL, NULL) == NULL) { 2872 2874 printk("dmasound_pmac: can't use %s\n", 2873 2875 io->full_name); 2874 2876 return -ENODEV; ··· 2936 2940 if (awacs_revision == AWACS_SCREAMER && awacs) 2937 2941 awacs_recalibrate(); 2938 2942 2939 - awacs_irq = io->intrs[0].line; 2940 - awacs_tx_irq = io->intrs[1].line; 2941 - awacs_rx_irq = io->intrs[2].line; 2943 + awacs_irq = irq_of_parse_and_map(io, 0); 2944 + awacs_tx_irq = irq_of_parse_and_map(io, 1); 2945 + awacs_rx_irq = irq_of_parse_and_map(io, 2); 2942 2946 2943 2947 /* Hack for legacy crap that will be killed someday */ 2944 2948 awacs_node = io;
+14 -19
sound/ppc/pmac.c
··· 1120 1120 struct snd_pmac *chip; 1121 1121 struct device_node *np; 1122 1122 int i, err; 1123 + unsigned int irq; 1123 1124 unsigned long ctrl_addr, txdma_addr, rxdma_addr; 1124 1125 static struct snd_device_ops ops = { 1125 1126 .dev_free = snd_pmac_dev_free, ··· 1154 1153 if (chip->is_k2) { 1155 1154 static char *rnames[] = { 1156 1155 "Sound Control", "Sound DMA" }; 1157 - if (np->n_intrs < 3) { 1158 - err = -ENODEV; 1159 - goto __error; 1160 - } 1161 1156 for (i = 0; i < 2; i ++) { 1162 1157 if (of_address_to_resource(np->parent, i, 1163 1158 &chip->rsrc[i])) { ··· 1182 1185 } else { 1183 1186 static char *rnames[] = { 1184 1187 "Sound Control", "Sound Tx DMA", "Sound Rx DMA" }; 1185 - if (np->n_intrs < 3) { 1186 - err = -ENODEV; 1187 - goto __error; 1188 - } 1189 1188 for (i = 0; i < 3; i ++) { 1190 1189 if (of_address_to_resource(np, i, 1191 1190 &chip->rsrc[i])) { ··· 1213 1220 chip->playback.dma = ioremap(txdma_addr, 0x100); 1214 1221 chip->capture.dma = ioremap(rxdma_addr, 0x100); 1215 1222 if (chip->model <= PMAC_BURGUNDY) { 1216 - if (request_irq(np->intrs[0].line, snd_pmac_ctrl_intr, 0, 1223 + irq = irq_of_parse_and_map(np, 0); 1224 + if (request_irq(irq, snd_pmac_ctrl_intr, 0, 1217 1225 "PMac", (void*)chip)) { 1218 - snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[0].line); 1226 + snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", 1227 + irq); 1219 1228 err = -EBUSY; 1220 1229 goto __error; 1221 1230 } 1222 - chip->irq = np->intrs[0].line; 1231 + chip->irq = irq; 1223 1232 } 1224 - if (request_irq(np->intrs[1].line, snd_pmac_tx_intr, 0, 1225 - "PMac Output", (void*)chip)) { 1226 - snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[1].line); 1233 + irq = irq_of_parse_and_map(np, 1); 1234 + if (request_irq(irq, snd_pmac_tx_intr, 0, "PMac Output", (void*)chip)){ 1235 + snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); 1227 1236 err = -EBUSY; 1228 1237 goto __error; 1229 1238 } 1230 - chip->tx_irq = np->intrs[1].line; 1231 - if (request_irq(np->intrs[2].line, snd_pmac_rx_intr, 0, 1232 - "PMac Input", (void*)chip)) { 1233 - snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[2].line); 1239 + chip->tx_irq = irq; 1240 + irq = irq_of_parse_and_map(np, 2); 1241 + if (request_irq(irq, snd_pmac_rx_intr, 0, "PMac Input", (void*)chip)) { 1242 + snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); 1234 1243 err = -EBUSY; 1235 1244 goto __error; 1236 1245 } 1237 - chip->rx_irq = np->intrs[2].line; 1246 + chip->rx_irq = irq; 1238 1247 1239 1248 snd_pmac_sound_feature(chip, 1); 1240 1249
+4 -4
sound/ppc/tumbler.c
··· 1121 1121 DBG("(I) GPIO device %s found, offset: %x, active state: %d !\n", 1122 1122 device, gp->addr, gp->active_state); 1123 1123 1124 - return (node->n_intrs > 0) ? node->intrs[0].line : 0; 1124 + return irq_of_parse_and_map(node, 0); 1125 1125 } 1126 1126 1127 1127 /* reset audio */ ··· 1264 1264 &mix->line_mute, 1); 1265 1265 irq = tumbler_find_device("headphone-detect", 1266 1266 NULL, &mix->hp_detect, 0); 1267 - if (irq < 0) 1267 + if (irq <= NO_IRQ) 1268 1268 irq = tumbler_find_device("headphone-detect", 1269 1269 NULL, &mix->hp_detect, 1); 1270 - if (irq < 0) 1270 + if (irq <= NO_IRQ) 1271 1271 irq = tumbler_find_device("keywest-gpio15", 1272 1272 NULL, &mix->hp_detect, 1); 1273 1273 mix->headphone_irq = irq; 1274 1274 irq = tumbler_find_device("line-output-detect", 1275 1275 NULL, &mix->line_detect, 0); 1276 - if (irq < 0) 1276 + if (irq <= NO_IRQ) 1277 1277 irq = tumbler_find_device("line-output-detect", 1278 1278 NULL, &mix->line_detect, 1); 1279 1279 mix->lineout_irq = irq;