[SPARC64]: Add support for IRQ pre-handlers.

This allows a PCI controller to shim into IRQ delivery
so that DMA queues can be drained, if necessary.

If some bus specific code needs to run before an IRQ
handler is invoked, the bus driver simply needs to setup
the function pointer in bucket->irq_info->pre_handler and
the two args bucket->irq_info->pre_handler_arg[12].

The Schizo PCI driver is converted over to use a pre-handler
for the DMA write-sync processing it needs when a device
is behind a PCI->PCI bus deeper than the top-level APB
bridges.

While we're here, clean up all of the action allocation
and handling. Now, we allocate the irqaction as part of
the bucket->irq_info area. There is an array of 4 irqaction
(for PCI irq sharing) and a bitmask saying which entries
are active.

The bucket->irq_info is allocated at build_irq() time, not
at request_irq() time. This simplifies request_irq() and
free_irq() tremendously.

The SMP dynamic IRQ retargetting code got removed in this
change too. It was disabled for a few months now, and we
can resurrect it in the future if we want.

Signed-off-by: David S. Miller <davem@davemloft.net>

+248 -468
+3 -18
arch/sparc64/kernel/entry.S
··· 553 sllx %g3, 5, %g3 554 or %g2, %lo(ivector_table), %g2 555 add %g2, %g3, %g3 556 - ldx [%g3 + 0x08], %g2 /* irq_info */ 557 ldub [%g3 + 0x04], %g4 /* pil */ 558 - brz,pn %g2, do_ivec_spurious 559 - mov 1, %g2 560 - 561 sllx %g2, %g4, %g2 562 sllx %g4, 2, %g4 563 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ 564 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ 565 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ ··· 565 retry 566 do_ivec_xcall: 567 mov 0x50, %g1 568 - 569 ldxa [%g1 + %g0] ASI_INTR_R, %g1 570 srl %g3, 0, %g3 571 mov 0x60, %g7 572 ldxa [%g7 + %g0] ASI_INTR_R, %g7 573 stxa %g0, [%g0] ASI_INTR_RECEIVE ··· 578 .align 32 579 1: jmpl %g3, %g0 580 nop 581 - 582 - do_ivec_spurious: 583 - stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */ 584 - rdpr %pstate, %g5 585 - 586 - wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate 587 - sethi %hi(109f), %g7 588 - ba,pt %xcc, etrap 589 - 109: or %g7, %lo(109b), %g7 590 - call catch_disabled_ivec 591 - add %sp, PTREGS_OFF, %o0 592 - ba,pt %xcc, rtrap 593 - clr %l6 594 595 .globl save_alternate_globals 596 save_alternate_globals: /* %o0 = save_area */
··· 553 sllx %g3, 5, %g3 554 or %g2, %lo(ivector_table), %g2 555 add %g2, %g3, %g3 556 ldub [%g3 + 0x04], %g4 /* pil */ 557 + mov 1, %g2 558 sllx %g2, %g4, %g2 559 sllx %g4, 2, %g4 560 + 561 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ 562 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ 563 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ ··· 567 retry 568 do_ivec_xcall: 569 mov 0x50, %g1 570 ldxa [%g1 + %g0] ASI_INTR_R, %g1 571 srl %g3, 0, %g3 572 + 573 mov 0x60, %g7 574 ldxa [%g7 + %g0] ASI_INTR_R, %g7 575 stxa %g0, [%g0] ASI_INTR_RECEIVE ··· 580 .align 32 581 1: jmpl %g3, %g0 582 nop 583 584 .globl save_alternate_globals 585 save_alternate_globals: /* %o0 = save_area */
+199 -386
arch/sparc64/kernel/irq.c
··· 71 struct irq_work_struct __irq_work[NR_CPUS]; 72 #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) 73 74 - #ifdef CONFIG_PCI 75 - /* This is a table of physical addresses used to deal with IBF_DMA_SYNC. 76 - * It is used for PCI only to synchronize DMA transfers with IRQ delivery 77 - * for devices behind busses other than APB on Sabre systems. 78 - * 79 - * Currently these physical addresses are just config space accesses 80 - * to the command register for that device. 81 - */ 82 - unsigned long pci_dma_wsync; 83 - unsigned long dma_sync_reg_table[256]; 84 - unsigned char dma_sync_reg_table_entry = 0; 85 - #endif 86 - 87 - /* This is based upon code in the 32-bit Sparc kernel written mostly by 88 - * David Redman (djhr@tadpole.co.uk). 89 - */ 90 - #define MAX_STATIC_ALLOC 4 91 - static struct irqaction static_irqaction[MAX_STATIC_ALLOC]; 92 - static int static_irq_count; 93 - 94 - /* This is exported so that fast IRQ handlers can get at it... -DaveM */ 95 - struct irqaction *irq_action[NR_IRQS+1] = { 96 - NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, 97 - NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL 98 - }; 99 100 /* This only synchronizes entities which modify IRQ handler 101 * state and some selected user-level spots that want to ··· 217 * the CPU %tick register and not by some normal vectored interrupt 218 * source. To handle this special case, we use this dummy INO bucket. 219 */ 220 static struct ino_bucket pil0_dummy_bucket = { 221 - 0, /* irq_chain */ 222 - 0, /* pil */ 223 - 0, /* pending */ 224 - 0, /* flags */ 225 - 0, /* __unused */ 226 - NULL, /* irq_info */ 227 - 0UL, /* iclr */ 228 - 0UL, /* imap */ 229 }; 230 231 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) 232 { ··· 261 prom_halt(); 262 } 263 264 /* Ok, looks good, set it up. Don't touch the irq_chain or 265 * the pending flag. 266 */ 267 - bucket = &ivector_table[ino]; 268 - if ((bucket->flags & IBF_ACTIVE) || 269 - (bucket->irq_info != NULL)) { 270 - /* This is a gross fatal error if it happens here. */ 271 - prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n"); 272 - prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n", 273 - ino, pil, inofixup, iclr, imap); 274 - prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n", 275 - bucket->pil, bucket->iclr, bucket->imap); 276 - prom_printf("IRQ: Cannot continue, halting...\n"); 277 - prom_halt(); 278 - } 279 bucket->imap = imap; 280 bucket->iclr = iclr; 281 bucket->pil = pil; 282 bucket->flags = 0; 283 284 - bucket->irq_info = NULL; 285 - 286 return __irq(bucket); 287 } 288 ··· 307 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); 308 } 309 310 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), 311 unsigned long irqflags, const char *name, void *dev_id) 312 { 313 - struct irqaction *action, *tmp = NULL; 314 struct ino_bucket *bucket = __bucket(irq); 315 unsigned long flags; 316 int pending = 0; 317 318 - if ((bucket != &pil0_dummy_bucket) && 319 - (bucket < &ivector_table[0] || 320 - bucket >= &ivector_table[NUM_IVECS])) { 321 - unsigned int *caller; 322 - 323 - __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); 324 - printk(KERN_CRIT "request_irq: Old style IRQ registry attempt " 325 - "from %p, irq %08x.\n", caller, irq); 326 return -EINVAL; 327 - } 328 - if (!handler) 329 - return -EINVAL; 330 331 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { 332 /* ··· 383 384 spin_lock_irqsave(&irq_action_lock, flags); 385 386 - action = *(bucket->pil + irq_action); 387 - if (action) { 388 - if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) 389 - for (tmp = action; tmp->next; tmp = tmp->next) 390 - ; 391 - else { 392 - spin_unlock_irqrestore(&irq_action_lock, flags); 393 - return -EBUSY; 394 - } 395 - action = NULL; /* Or else! */ 396 } 397 398 - /* If this is flagged as statically allocated then we use our 399 - * private struct which is never freed. 400 - */ 401 - if (irqflags & SA_STATIC_ALLOC) { 402 - if (static_irq_count < MAX_STATIC_ALLOC) 403 - action = &static_irqaction[static_irq_count++]; 404 - else 405 - printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " 406 - "using kmalloc\n", irq, name); 407 - } 408 - if (action == NULL) 409 - action = (struct irqaction *)kmalloc(sizeof(struct irqaction), 410 - GFP_ATOMIC); 411 - 412 if (!action) { 413 spin_unlock_irqrestore(&irq_action_lock, flags); 414 return -ENOMEM; 415 } 416 417 - if (bucket == &pil0_dummy_bucket) { 418 - bucket->irq_info = action; 419 - bucket->flags |= IBF_ACTIVE; 420 - } else { 421 - if ((bucket->flags & IBF_ACTIVE) != 0) { 422 - void *orig = bucket->irq_info; 423 - void **vector = NULL; 424 - 425 - if ((bucket->flags & IBF_PCI) == 0) { 426 - printk("IRQ: Trying to share non-PCI bucket.\n"); 427 - goto free_and_ebusy; 428 - } 429 - if ((bucket->flags & IBF_MULTI) == 0) { 430 - vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC); 431 - if (vector == NULL) 432 - goto free_and_enomem; 433 - 434 - /* We might have slept. */ 435 - if ((bucket->flags & IBF_MULTI) != 0) { 436 - int ent; 437 - 438 - kfree(vector); 439 - vector = (void **)bucket->irq_info; 440 - for(ent = 0; ent < 4; ent++) { 441 - if (vector[ent] == NULL) { 442 - vector[ent] = action; 443 - break; 444 - } 445 - } 446 - if (ent == 4) 447 - goto free_and_ebusy; 448 - } else { 449 - vector[0] = orig; 450 - vector[1] = action; 451 - vector[2] = NULL; 452 - vector[3] = NULL; 453 - bucket->irq_info = vector; 454 - bucket->flags |= IBF_MULTI; 455 - } 456 - } else { 457 - int ent; 458 - 459 - vector = (void **)orig; 460 - for (ent = 0; ent < 4; ent++) { 461 - if (vector[ent] == NULL) { 462 - vector[ent] = action; 463 - break; 464 - } 465 - } 466 - if (ent == 4) 467 - goto free_and_ebusy; 468 - } 469 - } else { 470 - bucket->irq_info = action; 471 - bucket->flags |= IBF_ACTIVE; 472 - } 473 pending = bucket->pending; 474 if (pending) 475 bucket->pending = 0; ··· 410 put_ino_in_irqaction(action, irq); 411 put_smpaff_in_irqaction(action, CPU_MASK_NONE); 412 413 - if (tmp) 414 - tmp->next = action; 415 - else 416 - *(bucket->pil + irq_action) = action; 417 418 enable_irq(irq); 419 ··· 419 atomic_bucket_insert(bucket); 420 set_softint(1 << bucket->pil); 421 } 422 spin_unlock_irqrestore(&irq_action_lock, flags); 423 - if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC))) 424 register_irq_proc(__irq_ino(irq)); 425 426 #ifdef CONFIG_SMP 427 distribute_irqs(); 428 #endif 429 return 0; 430 - 431 - free_and_ebusy: 432 - kfree(action); 433 - spin_unlock_irqrestore(&irq_action_lock, flags); 434 - return -EBUSY; 435 - 436 - free_and_enomem: 437 - kfree(action); 438 - spin_unlock_irqrestore(&irq_action_lock, flags); 439 - return -ENOMEM; 440 } 441 442 EXPORT_SYMBOL(request_irq); 443 444 void free_irq(unsigned int irq, void *dev_id) 445 { 446 struct irqaction *action; 447 - struct irqaction *tmp = NULL; 448 unsigned long flags; 449 - struct ino_bucket *bucket = __bucket(irq), *bp; 450 451 - if ((bucket != &pil0_dummy_bucket) && 452 - (bucket < &ivector_table[0] || 453 - bucket >= &ivector_table[NUM_IVECS])) { 454 - unsigned int *caller; 455 - 456 - __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); 457 - printk(KERN_CRIT "free_irq: Old style IRQ removal attempt " 458 - "from %p, irq %08x.\n", caller, irq); 459 - return; 460 - } 461 - 462 spin_lock_irqsave(&irq_action_lock, flags); 463 464 - action = *(bucket->pil + irq_action); 465 - if (!action->handler) { 466 - printk("Freeing free IRQ %d\n", bucket->pil); 467 - return; 468 - } 469 - if (dev_id) { 470 - for ( ; action; action = action->next) { 471 - if (action->dev_id == dev_id) 472 - break; 473 - tmp = action; 474 - } 475 - if (!action) { 476 - printk("Trying to free free shared IRQ %d\n", bucket->pil); 477 - spin_unlock_irqrestore(&irq_action_lock, flags); 478 - return; 479 - } 480 - } else if (action->flags & SA_SHIRQ) { 481 - printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil); 482 - spin_unlock_irqrestore(&irq_action_lock, flags); 483 - return; 484 - } 485 - 486 - if (action->flags & SA_STATIC_ALLOC) { 487 - printk("Attempt to free statically allocated IRQ %d (%s)\n", 488 - bucket->pil, action->name); 489 - spin_unlock_irqrestore(&irq_action_lock, flags); 490 - return; 491 - } 492 - 493 - if (action && tmp) 494 - tmp->next = action->next; 495 - else 496 - *(bucket->pil + irq_action) = action->next; 497 498 spin_unlock_irqrestore(&irq_action_lock, flags); 499 500 synchronize_irq(irq); 501 502 spin_lock_irqsave(&irq_action_lock, flags); 503 504 if (bucket != &pil0_dummy_bucket) { 505 unsigned long imap = bucket->imap; 506 - void **vector, *orig; 507 - int ent; 508 509 - orig = bucket->irq_info; 510 - vector = (void **)orig; 511 512 - if ((bucket->flags & IBF_MULTI) != 0) { 513 - int other = 0; 514 - void *orphan = NULL; 515 - for (ent = 0; ent < 4; ent++) { 516 - if (vector[ent] == action) 517 - vector[ent] = NULL; 518 - else if (vector[ent] != NULL) { 519 - orphan = vector[ent]; 520 - other++; 521 - } 522 - } 523 - 524 - /* Only free when no other shared irq 525 - * uses this bucket. 526 - */ 527 - if (other) { 528 - if (other == 1) { 529 - /* Convert back to non-shared bucket. */ 530 - bucket->irq_info = orphan; 531 - bucket->flags &= ~(IBF_MULTI); 532 - kfree(vector); 533 - } 534 - goto out; 535 - } 536 - } else { 537 - bucket->irq_info = NULL; 538 - } 539 - 540 - /* This unique interrupt source is now inactive. */ 541 - bucket->flags &= ~IBF_ACTIVE; 542 - 543 - /* See if any other buckets share this bucket's IMAP 544 - * and are still active. 545 - */ 546 - for (ent = 0; ent < NUM_IVECS; ent++) { 547 - bp = &ivector_table[ent]; 548 - if (bp != bucket && 549 - bp->imap == imap && 550 - (bp->flags & IBF_ACTIVE) != 0) 551 break; 552 } 553 554 - /* Only disable when no other sub-irq levels of 555 - * the same IMAP are active. 556 - */ 557 - if (ent == NUM_IVECS) 558 - disable_irq(irq); 559 } 560 561 - out: 562 - kfree(action); 563 spin_unlock_irqrestore(&irq_action_lock, flags); 564 } 565 ··· 554 } 555 #endif /* CONFIG_SMP */ 556 557 - void catch_disabled_ivec(struct pt_regs *regs) 558 { 559 - int cpu = smp_processor_id(); 560 - struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0)); 561 562 - /* We can actually see this on Ultra/PCI PCI cards, which are bridges 563 - * to other devices. Here a single IMAP enabled potentially multiple 564 - * unique interrupt sources (which each do have a unique ICLR register. 565 - * 566 - * So what we do is just register that the IVEC arrived, when registered 567 - * for real the request_irq() code will check the bit and signal 568 - * a local CPU interrupt for it. 569 - */ 570 - #if 0 571 - printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n", 572 - bucket - &ivector_table[0], regs->tpc); 573 - #endif 574 - *irq_work(cpu, 0) = 0; 575 - bucket->pending = 1; 576 - } 577 578 - /* Tune this... */ 579 - #define FORWARD_VOLUME 12 580 - 581 - #ifdef CONFIG_SMP 582 - 583 - static inline void redirect_intr(int cpu, struct ino_bucket *bp) 584 - { 585 - /* Ok, here is what is going on: 586 - * 1) Retargeting IRQs on Starfire is very 587 - * expensive so just forget about it on them. 588 - * 2) Moving around very high priority interrupts 589 - * is a losing game. 590 - * 3) If the current cpu is idle, interrupts are 591 - * useful work, so keep them here. But do not 592 - * pass to our neighbour if he is not very idle. 593 - * 4) If sysadmin explicitly asks for directed intrs, 594 - * Just Do It. 595 - */ 596 - struct irqaction *ap = bp->irq_info; 597 - cpumask_t cpu_mask; 598 - unsigned int buddy, ticks; 599 - 600 - cpu_mask = get_smpaff_in_irqaction(ap); 601 - cpus_and(cpu_mask, cpu_mask, cpu_online_map); 602 - if (cpus_empty(cpu_mask)) 603 - cpu_mask = cpu_online_map; 604 - 605 - if (this_is_starfire != 0 || 606 - bp->pil >= 10 || current->pid == 0) 607 goto out; 608 - 609 - /* 'cpu' is the MID (ie. UPAID), calculate the MID 610 - * of our buddy. 611 - */ 612 - buddy = cpu + 1; 613 - if (buddy >= NR_CPUS) 614 - buddy = 0; 615 - 616 - ticks = 0; 617 - while (!cpu_isset(buddy, cpu_mask)) { 618 - if (++buddy >= NR_CPUS) 619 - buddy = 0; 620 - if (++ticks > NR_CPUS) { 621 - put_smpaff_in_irqaction(ap, CPU_MASK_NONE); 622 - goto out; 623 - } 624 } 625 626 - if (buddy == cpu) 627 - goto out; 628 629 - /* Voo-doo programming. */ 630 - if (cpu_data(buddy).idle_volume < FORWARD_VOLUME) 631 - goto out; 632 633 - /* This just so happens to be correct on Cheetah 634 - * at the moment. 635 - */ 636 - buddy <<= 26; 637 638 - /* Push it to our buddy. */ 639 - upa_writel(buddy | IMAP_VALID, bp->imap); 640 641 out: 642 - return; 643 } 644 - 645 - #endif 646 647 void handler_irq(int irq, struct pt_regs *regs) 648 { 649 - struct ino_bucket *bp, *nbp; 650 int cpu = smp_processor_id(); 651 652 #ifndef CONFIG_SMP ··· 620 clear_softint(clr_mask); 621 } 622 #else 623 - int should_forward = 0; 624 - 625 clear_softint(1 << irq); 626 #endif 627 ··· 634 #else 635 bp = __bucket(xchg32(irq_work(cpu, irq), 0)); 636 #endif 637 - for ( ; bp != NULL; bp = nbp) { 638 - unsigned char flags = bp->flags; 639 - unsigned char random = 0; 640 641 - nbp = __bucket(bp->irq_chain); 642 bp->irq_chain = 0; 643 - 644 - bp->flags |= IBF_INPROGRESS; 645 - 646 - if ((flags & IBF_ACTIVE) != 0) { 647 - #ifdef CONFIG_PCI 648 - if ((flags & IBF_DMA_SYNC) != 0) { 649 - upa_readl(dma_sync_reg_table[bp->synctab_ent]); 650 - upa_readq(pci_dma_wsync); 651 - } 652 - #endif 653 - if ((flags & IBF_MULTI) == 0) { 654 - struct irqaction *ap = bp->irq_info; 655 - int ret; 656 - 657 - ret = ap->handler(__irq(bp), ap->dev_id, regs); 658 - if (ret == IRQ_HANDLED) 659 - random |= ap->flags; 660 - } else { 661 - void **vector = (void **)bp->irq_info; 662 - int ent; 663 - for (ent = 0; ent < 4; ent++) { 664 - struct irqaction *ap = vector[ent]; 665 - if (ap != NULL) { 666 - int ret; 667 - 668 - ret = ap->handler(__irq(bp), 669 - ap->dev_id, 670 - regs); 671 - if (ret == IRQ_HANDLED) 672 - random |= ap->flags; 673 - } 674 - } 675 - } 676 - /* Only the dummy bucket lacks IMAP/ICLR. */ 677 - if (bp->pil != 0) { 678 - #ifdef CONFIG_SMP 679 - if (should_forward) { 680 - redirect_intr(cpu, bp); 681 - should_forward = 0; 682 - } 683 - #endif 684 - upa_writel(ICLR_IDLE, bp->iclr); 685 - 686 - /* Test and add entropy */ 687 - if (random & SA_SAMPLE_RANDOM) 688 - add_interrupt_randomness(irq); 689 - } 690 - } else 691 - bp->pending = 1; 692 - 693 - bp->flags &= ~IBF_INPROGRESS; 694 } 695 irq_exit(); 696 } ··· 769 */ 770 for (level = 1; level < NR_IRQS; level++) { 771 struct irqaction *p = irq_action[level]; 772 - if (level == 12) continue; 773 while(p) { 774 cpu = retarget_one_irq(p, cpu); 775 p = p->next;
··· 71 struct irq_work_struct __irq_work[NR_CPUS]; 72 #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) 73 74 + static struct irqaction *irq_action[NR_IRQS+1]; 75 76 /* This only synchronizes entities which modify IRQ handler 77 * state and some selected user-level spots that want to ··· 241 * the CPU %tick register and not by some normal vectored interrupt 242 * source. To handle this special case, we use this dummy INO bucket. 243 */ 244 + static struct irq_desc pil0_dummy_desc; 245 static struct ino_bucket pil0_dummy_bucket = { 246 + .irq_info = &pil0_dummy_desc, 247 }; 248 + 249 + static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup, 250 + unsigned long iclr, unsigned long imap, 251 + struct ino_bucket *bucket) 252 + { 253 + prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> " 254 + "(%d:%d:%016lx:%016lx), halting...\n", 255 + ino, bucket->pil, bucket->iclr, bucket->imap, 256 + pil, inofixup, iclr, imap); 257 + prom_halt(); 258 + } 259 260 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) 261 { ··· 280 prom_halt(); 281 } 282 283 + bucket = &ivector_table[ino]; 284 + if (bucket->flags & IBF_ACTIVE) 285 + build_irq_error("IRQ: Trying to build active INO bucket.\n", 286 + ino, pil, inofixup, iclr, imap, bucket); 287 + 288 + if (bucket->irq_info) { 289 + if (bucket->imap != imap || bucket->iclr != iclr) 290 + build_irq_error("IRQ: Trying to reinit INO bucket.\n", 291 + ino, pil, inofixup, iclr, imap, bucket); 292 + 293 + goto out; 294 + } 295 + 296 + bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); 297 + if (!bucket->irq_info) { 298 + prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); 299 + prom_halt(); 300 + } 301 + memset(bucket->irq_info, 0, sizeof(struct irq_desc)); 302 + 303 /* Ok, looks good, set it up. Don't touch the irq_chain or 304 * the pending flag. 305 */ 306 bucket->imap = imap; 307 bucket->iclr = iclr; 308 bucket->pil = pil; 309 bucket->flags = 0; 310 311 + out: 312 return __irq(bucket); 313 } 314 ··· 319 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); 320 } 321 322 + static int check_irq_sharing(int pil, unsigned long irqflags) 323 + { 324 + struct irqaction *action, *tmp; 325 + 326 + action = *(irq_action + pil); 327 + if (action) { 328 + if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { 329 + for (tmp = action; tmp->next; tmp = tmp->next) 330 + ; 331 + } else { 332 + return -EBUSY; 333 + } 334 + } 335 + return 0; 336 + } 337 + 338 + static void append_irq_action(int pil, struct irqaction *action) 339 + { 340 + struct irqaction **pp = irq_action + pil; 341 + 342 + while (*pp) 343 + pp = &((*pp)->next); 344 + *pp = action; 345 + } 346 + 347 + static struct irqaction *get_action_slot(struct ino_bucket *bucket) 348 + { 349 + struct irq_desc *desc = bucket->irq_info; 350 + int max_irq, i; 351 + 352 + max_irq = 1; 353 + if (bucket->flags & IBF_PCI) 354 + max_irq = MAX_IRQ_DESC_ACTION; 355 + for (i = 0; i < max_irq; i++) { 356 + struct irqaction *p = &desc->action[i]; 357 + u32 mask = (1 << i); 358 + 359 + if (desc->action_active_mask & mask) 360 + continue; 361 + 362 + desc->action_active_mask |= mask; 363 + return p; 364 + } 365 + return NULL; 366 + } 367 + 368 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), 369 unsigned long irqflags, const char *name, void *dev_id) 370 { 371 + struct irqaction *action; 372 struct ino_bucket *bucket = __bucket(irq); 373 unsigned long flags; 374 int pending = 0; 375 376 + if (unlikely(!handler)) 377 return -EINVAL; 378 + 379 + if (unlikely(!bucket->irq_info)) 380 + return -ENODEV; 381 382 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { 383 /* ··· 356 357 spin_lock_irqsave(&irq_action_lock, flags); 358 359 + if (check_irq_sharing(bucket->pil, irqflags)) { 360 + spin_unlock_irqrestore(&irq_action_lock, flags); 361 + return -EBUSY; 362 } 363 364 + action = get_action_slot(bucket); 365 if (!action) { 366 spin_unlock_irqrestore(&irq_action_lock, flags); 367 return -ENOMEM; 368 } 369 370 + bucket->flags |= IBF_ACTIVE; 371 + pending = 0; 372 + if (bucket != &pil0_dummy_bucket) { 373 pending = bucket->pending; 374 if (pending) 375 bucket->pending = 0; ··· 456 put_ino_in_irqaction(action, irq); 457 put_smpaff_in_irqaction(action, CPU_MASK_NONE); 458 459 + append_irq_action(bucket->pil, action); 460 461 enable_irq(irq); 462 ··· 468 atomic_bucket_insert(bucket); 469 set_softint(1 << bucket->pil); 470 } 471 + 472 spin_unlock_irqrestore(&irq_action_lock, flags); 473 + 474 + if (bucket != &pil0_dummy_bucket) 475 register_irq_proc(__irq_ino(irq)); 476 477 #ifdef CONFIG_SMP 478 distribute_irqs(); 479 #endif 480 return 0; 481 } 482 483 EXPORT_SYMBOL(request_irq); 484 485 + static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id) 486 + { 487 + struct ino_bucket *bucket = __bucket(irq); 488 + struct irqaction *action, **pp; 489 + 490 + pp = irq_action + bucket->pil; 491 + action = *pp; 492 + if (unlikely(!action)) 493 + return NULL; 494 + 495 + if (unlikely(!action->handler)) { 496 + printk("Freeing free IRQ %d\n", bucket->pil); 497 + return NULL; 498 + } 499 + 500 + while (action && action->dev_id != dev_id) { 501 + pp = &action->next; 502 + action = *pp; 503 + } 504 + 505 + if (likely(action)) 506 + *pp = action->next; 507 + 508 + return action; 509 + } 510 + 511 void free_irq(unsigned int irq, void *dev_id) 512 { 513 struct irqaction *action; 514 + struct ino_bucket *bucket; 515 unsigned long flags; 516 517 spin_lock_irqsave(&irq_action_lock, flags); 518 519 + action = unlink_irq_action(irq, dev_id); 520 521 spin_unlock_irqrestore(&irq_action_lock, flags); 522 + 523 + if (unlikely(!action)) 524 + return; 525 526 synchronize_irq(irq); 527 528 spin_lock_irqsave(&irq_action_lock, flags); 529 530 + bucket = __bucket(irq); 531 if (bucket != &pil0_dummy_bucket) { 532 + struct irq_desc *desc = bucket->irq_info; 533 unsigned long imap = bucket->imap; 534 + int ent, i; 535 536 + for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { 537 + struct irqaction *p = &desc->action[i]; 538 539 + if (p == action) { 540 + desc->action_active_mask &= ~(1 << i); 541 break; 542 + } 543 } 544 545 + if (!desc->action_active_mask) { 546 + /* This unique interrupt source is now inactive. */ 547 + bucket->flags &= ~IBF_ACTIVE; 548 + 549 + /* See if any other buckets share this bucket's IMAP 550 + * and are still active. 551 + */ 552 + for (ent = 0; ent < NUM_IVECS; ent++) { 553 + struct ino_bucket *bp = &ivector_table[ent]; 554 + if (bp != bucket && 555 + bp->imap == imap && 556 + (bp->flags & IBF_ACTIVE) != 0) 557 + break; 558 + } 559 + 560 + /* Only disable when no other sub-irq levels of 561 + * the same IMAP are active. 562 + */ 563 + if (ent == NUM_IVECS) 564 + disable_irq(irq); 565 + } 566 } 567 568 spin_unlock_irqrestore(&irq_action_lock, flags); 569 } 570 ··· 647 } 648 #endif /* CONFIG_SMP */ 649 650 + static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) 651 { 652 + struct irq_desc *desc = bp->irq_info; 653 + unsigned char flags = bp->flags; 654 + u32 action_mask, i; 655 + int random; 656 657 + bp->flags |= IBF_INPROGRESS; 658 659 + if (unlikely(!(flags & IBF_ACTIVE))) { 660 + bp->pending = 1; 661 goto out; 662 } 663 664 + if (desc->pre_handler) 665 + desc->pre_handler(bp, 666 + desc->pre_handler_arg1, 667 + desc->pre_handler_arg2); 668 669 + action_mask = desc->action_active_mask; 670 + random = 0; 671 + for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { 672 + struct irqaction *p = &desc->action[i]; 673 + u32 mask = (1 << i); 674 675 + if (!(action_mask & mask)) 676 + continue; 677 678 + action_mask &= ~mask; 679 680 + if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED) 681 + random |= p->flags; 682 + 683 + if (!action_mask) 684 + break; 685 + } 686 + if (bp->pil != 0) { 687 + upa_writel(ICLR_IDLE, bp->iclr); 688 + /* Test and add entropy */ 689 + if (random & SA_SAMPLE_RANDOM) 690 + add_interrupt_randomness(irq); 691 + } 692 out: 693 + bp->flags &= ~IBF_INPROGRESS; 694 } 695 696 void handler_irq(int irq, struct pt_regs *regs) 697 { 698 + struct ino_bucket *bp; 699 int cpu = smp_processor_id(); 700 701 #ifndef CONFIG_SMP ··· 757 clear_softint(clr_mask); 758 } 759 #else 760 clear_softint(1 << irq); 761 #endif 762 ··· 773 #else 774 bp = __bucket(xchg32(irq_work(cpu, irq), 0)); 775 #endif 776 + while (bp) { 777 + struct ino_bucket *nbp = __bucket(bp->irq_chain); 778 779 bp->irq_chain = 0; 780 + process_bucket(irq, bp, regs); 781 + bp = nbp; 782 } 783 irq_exit(); 784 } ··· 959 */ 960 for (level = 1; level < NR_IRQS; level++) { 961 struct irqaction *p = irq_action[level]; 962 + 963 + if (level == 12) 964 + continue; 965 + 966 while(p) { 967 cpu = retarget_one_irq(p, cpu); 968 p = p->next;
+26 -20
arch/sparc64/kernel/pci_sabre.c
··· 595 return ret; 596 } 597 598 static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, 599 struct pci_dev *pdev, 600 unsigned int ino) ··· 656 if (pdev) { 657 struct pcidev_cookie *pcp = pdev->sysdata; 658 659 - /* When a device lives behind a bridge deeper in the 660 - * PCI bus topology than APB, a special sequence must 661 - * run to make sure all pending DMA transfers at the 662 - * time of IRQ delivery are visible in the coherency 663 - * domain by the cpu. This sequence is to perform 664 - * a read on the far side of the non-APB bridge, then 665 - * perform a read of Sabre's DMA write-sync register. 666 - * 667 - * Currently, the PCI_CONFIG register for the device 668 - * is used for this read from the far side of the bridge. 669 - */ 670 if (pdev->bus->number != pcp->pbm->pci_first_busno) { 671 - bucket->flags |= IBF_DMA_SYNC; 672 - bucket->synctab_ent = dma_sync_reg_table_entry++; 673 - dma_sync_reg_table[bucket->synctab_ent] = 674 - (unsigned long) sabre_pci_config_mkaddr( 675 - pcp->pbm, 676 - pdev->bus->number, pdev->devfn, PCI_COMMAND); 677 } 678 } 679 return __irq(bucket); ··· 1633 */ 1634 p->pbm_A.controller_regs = pr_regs[0].phys_addr; 1635 p->pbm_B.controller_regs = pr_regs[0].phys_addr; 1636 - pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC; 1637 1638 - printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n", 1639 - p->pbm_A.controller_regs, pci_dma_wsync); 1640 1641 /* Clear interrupts */ 1642
··· 595 return ret; 596 } 597 598 + /* When a device lives behind a bridge deeper in the PCI bus topology 599 + * than APB, a special sequence must run to make sure all pending DMA 600 + * transfers at the time of IRQ delivery are visible in the coherency 601 + * domain by the cpu. This sequence is to perform a read on the far 602 + * side of the non-APB bridge, then perform a read of Sabre's DMA 603 + * write-sync register. 604 + */ 605 + static void sabre_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) 606 + { 607 + struct pci_dev *pdev = _arg1; 608 + unsigned long sync_reg = (unsigned long) _arg2; 609 + u16 _unused; 610 + 611 + pci_read_config_word(pdev, PCI_VENDOR_ID, &_unused); 612 + sabre_read(sync_reg); 613 + } 614 + 615 static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, 616 struct pci_dev *pdev, 617 unsigned int ino) ··· 639 if (pdev) { 640 struct pcidev_cookie *pcp = pdev->sysdata; 641 642 if (pdev->bus->number != pcp->pbm->pci_first_busno) { 643 + struct pci_controller_info *p = pcp->pbm->parent; 644 + struct irq_desc *d = bucket->irq_info; 645 + 646 + d->pre_handler = sabre_wsync_handler; 647 + d->pre_handler_arg1 = pdev; 648 + d->pre_handler_arg2 = (void *) 649 + p->pbm_A.controller_regs + SABRE_WRSYNC; 650 } 651 } 652 return __irq(bucket); ··· 1626 */ 1627 p->pbm_A.controller_regs = pr_regs[0].phys_addr; 1628 p->pbm_B.controller_regs = pr_regs[0].phys_addr; 1629 1630 + printk("PCI: Found SABRE, main regs at %016lx\n", 1631 + p->pbm_A.controller_regs); 1632 1633 /* Clear interrupts */ 1634
+1 -1
arch/sparc64/kernel/time.c
··· 973 int err; 974 975 /* Register IRQ handler. */ 976 - err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC, 977 "timer", NULL); 978 979 if (err) {
··· 973 int err; 974 975 /* Register IRQ handler. */ 976 + err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, 0, 977 "timer", NULL); 978 979 if (err) {
+19 -28
include/asm-sparc64/irq.h
··· 16 #include <asm/pil.h> 17 #include <asm/ptrace.h> 18 19 /* You should not mess with this directly. That's the job of irq.c. 20 * 21 * If you make changes here, please update hand coded assembler of ··· 54 /* Miscellaneous flags. */ 55 /*0x06*/unsigned char flags; 56 57 - /* This is used to deal with IBF_DMA_SYNC on 58 - * Sabre systems. 59 - */ 60 - /*0x07*/unsigned char synctab_ent; 61 62 - /* Reference to handler for this IRQ. If this is 63 - * non-NULL this means it is active and should be 64 - * serviced. Else the pending member is set to one 65 - * and later registry of the interrupt checks for 66 - * this condition. 67 - * 68 - * Normally this is just an irq_action structure. 69 - * But, on PCI, if multiple interrupt sources behind 70 - * a bridge have multiple interrupt sources that share 71 - * the same INO bucket, this points to an array of 72 - * pointers to four IRQ action structures. 73 - */ 74 - /*0x08*/void *irq_info; 75 76 /* Sun5 Interrupt Clear Register. */ 77 /*0x10*/unsigned long iclr; ··· 67 /*0x18*/unsigned long imap; 68 69 }; 70 - 71 - #ifdef CONFIG_PCI 72 - extern unsigned long pci_dma_wsync; 73 - extern unsigned long dma_sync_reg_table[256]; 74 - extern unsigned char dma_sync_reg_table_entry; 75 - #endif 76 77 /* IMAP/ICLR register defines */ 78 #define IMAP_VALID 0x80000000 /* IRQ Enabled */ ··· 83 #define ICLR_PENDING 0x00000003 /* Pending state */ 84 85 /* Only 8-bits are available, be careful. -DaveM */ 86 - #define IBF_DMA_SYNC 0x01 /* DMA synchronization behind PCI bridge needed. */ 87 - #define IBF_PCI 0x02 /* Indicates PSYCHO/SABRE/SCHIZO PCI interrupt. */ 88 - #define IBF_ACTIVE 0x04 /* This interrupt is active and has a handler. */ 89 - #define IBF_MULTI 0x08 /* On PCI, indicates shared bucket. */ 90 - #define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */ 91 92 #define NUM_IVECS (IMAP_INR + 1) 93 extern struct ino_bucket ivector_table[NUM_IVECS];
··· 16 #include <asm/pil.h> 17 #include <asm/ptrace.h> 18 19 + struct ino_bucket; 20 + 21 + #define MAX_IRQ_DESC_ACTION 4 22 + 23 + struct irq_desc { 24 + void (*pre_handler)(struct ino_bucket *, void *, void *); 25 + void *pre_handler_arg1; 26 + void *pre_handler_arg2; 27 + u32 action_active_mask; 28 + struct irqaction action[MAX_IRQ_DESC_ACTION]; 29 + }; 30 + 31 /* You should not mess with this directly. That's the job of irq.c. 32 * 33 * If you make changes here, please update hand coded assembler of ··· 42 /* Miscellaneous flags. */ 43 /*0x06*/unsigned char flags; 44 45 + /* Currently unused. */ 46 + /*0x07*/unsigned char __pad; 47 48 + /* Reference to IRQ descriptor for this bucket. */ 49 + /*0x08*/struct irq_desc *irq_info; 50 51 /* Sun5 Interrupt Clear Register. */ 52 /*0x10*/unsigned long iclr; ··· 68 /*0x18*/unsigned long imap; 69 70 }; 71 72 /* IMAP/ICLR register defines */ 73 #define IMAP_VALID 0x80000000 /* IRQ Enabled */ ··· 90 #define ICLR_PENDING 0x00000003 /* Pending state */ 91 92 /* Only 8-bits are available, be careful. -DaveM */ 93 + #define IBF_PCI 0x02 /* PSYCHO/SABRE/SCHIZO PCI interrupt. */ 94 + #define IBF_ACTIVE 0x04 /* Interrupt is active and has a handler.*/ 95 + #define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */ 96 97 #define NUM_IVECS (IMAP_INR + 1) 98 extern struct ino_bucket ivector_table[NUM_IVECS];
-15
include/asm-sparc64/signal.h
··· 162 #define MINSIGSTKSZ 4096 163 #define SIGSTKSZ 16384 164 165 - #ifdef __KERNEL__ 166 - /* 167 - * DJHR 168 - * SA_STATIC_ALLOC is used for the SPARC system to indicate that this 169 - * interrupt handler's irq structure should be statically allocated 170 - * by the request_irq routine. 171 - * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge 172 - * of interrupt usage and that sucks. Also without a flag like this 173 - * it may be possible for the free_irq routine to attempt to free 174 - * statically allocated data.. which is NOT GOOD. 175 - * 176 - */ 177 - #define SA_STATIC_ALLOC 0x80 178 - #endif 179 - 180 #include <asm-generic/signal.h> 181 182 struct __new_sigaction {
··· 162 #define MINSIGSTKSZ 4096 163 #define SIGSTKSZ 16384 164 165 #include <asm-generic/signal.h> 166 167 struct __new_sigaction {