[SPARC64]: Add support for IRQ pre-handlers.

This allows a PCI controller to shim into IRQ delivery
so that DMA queues can be drained, if necessary.

If some bus specific code needs to run before an IRQ
handler is invoked, the bus driver simply needs to setup
the function pointer in bucket->irq_info->pre_handler and
the two args bucket->irq_info->pre_handler_arg[12].

The Schizo PCI driver is converted over to use a pre-handler
for the DMA write-sync processing it needs when a device
is behind a PCI->PCI bus deeper than the top-level APB
bridges.

While we're here, clean up all of the action allocation
and handling. Now, we allocate the irqaction as part of
the bucket->irq_info area. There is an array of 4 irqaction
(for PCI irq sharing) and a bitmask saying which entries
are active.

The bucket->irq_info is allocated at build_irq() time, not
at request_irq() time. This simplifies request_irq() and
free_irq() tremendously.

The SMP dynamic IRQ retargetting code got removed in this
change too. It was disabled for a few months now, and we
can resurrect it in the future if we want.

Signed-off-by: David S. Miller <davem@davemloft.net>

+248 -468
+3 -18
arch/sparc64/kernel/entry.S
··· 553 553 sllx %g3, 5, %g3 554 554 or %g2, %lo(ivector_table), %g2 555 555 add %g2, %g3, %g3 556 - ldx [%g3 + 0x08], %g2 /* irq_info */ 557 556 ldub [%g3 + 0x04], %g4 /* pil */ 558 - brz,pn %g2, do_ivec_spurious 559 - mov 1, %g2 560 - 557 + mov 1, %g2 561 558 sllx %g2, %g4, %g2 562 559 sllx %g4, 2, %g4 560 + 563 561 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ 564 562 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ 565 563 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ ··· 565 567 retry 566 568 do_ivec_xcall: 567 569 mov 0x50, %g1 568 - 569 570 ldxa [%g1 + %g0] ASI_INTR_R, %g1 570 571 srl %g3, 0, %g3 572 + 571 573 mov 0x60, %g7 572 574 ldxa [%g7 + %g0] ASI_INTR_R, %g7 573 575 stxa %g0, [%g0] ASI_INTR_RECEIVE ··· 578 580 .align 32 579 581 1: jmpl %g3, %g0 580 582 nop 581 - 582 - do_ivec_spurious: 583 - stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */ 584 - rdpr %pstate, %g5 585 - 586 - wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate 587 - sethi %hi(109f), %g7 588 - ba,pt %xcc, etrap 589 - 109: or %g7, %lo(109b), %g7 590 - call catch_disabled_ivec 591 - add %sp, PTREGS_OFF, %o0 592 - ba,pt %xcc, rtrap 593 - clr %l6 594 583 595 584 .globl save_alternate_globals 596 585 save_alternate_globals: /* %o0 = save_area */
+199 -386
arch/sparc64/kernel/irq.c
··· 71 71 struct irq_work_struct __irq_work[NR_CPUS]; 72 72 #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) 73 73 74 - #ifdef CONFIG_PCI 75 - /* This is a table of physical addresses used to deal with IBF_DMA_SYNC. 76 - * It is used for PCI only to synchronize DMA transfers with IRQ delivery 77 - * for devices behind busses other than APB on Sabre systems. 78 - * 79 - * Currently these physical addresses are just config space accesses 80 - * to the command register for that device. 81 - */ 82 - unsigned long pci_dma_wsync; 83 - unsigned long dma_sync_reg_table[256]; 84 - unsigned char dma_sync_reg_table_entry = 0; 85 - #endif 86 - 87 - /* This is based upon code in the 32-bit Sparc kernel written mostly by 88 - * David Redman (djhr@tadpole.co.uk). 89 - */ 90 - #define MAX_STATIC_ALLOC 4 91 - static struct irqaction static_irqaction[MAX_STATIC_ALLOC]; 92 - static int static_irq_count; 93 - 94 - /* This is exported so that fast IRQ handlers can get at it... -DaveM */ 95 - struct irqaction *irq_action[NR_IRQS+1] = { 96 - NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, 97 - NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL 98 - }; 74 + static struct irqaction *irq_action[NR_IRQS+1]; 99 75 100 76 /* This only synchronizes entities which modify IRQ handler 101 77 * state and some selected user-level spots that want to ··· 217 241 * the CPU %tick register and not by some normal vectored interrupt 218 242 * source. To handle this special case, we use this dummy INO bucket. 219 243 */ 244 + static struct irq_desc pil0_dummy_desc; 220 245 static struct ino_bucket pil0_dummy_bucket = { 221 - 0, /* irq_chain */ 222 - 0, /* pil */ 223 - 0, /* pending */ 224 - 0, /* flags */ 225 - 0, /* __unused */ 226 - NULL, /* irq_info */ 227 - 0UL, /* iclr */ 228 - 0UL, /* imap */ 246 + .irq_info = &pil0_dummy_desc, 229 247 }; 248 + 249 + static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup, 250 + unsigned long iclr, unsigned long imap, 251 + struct ino_bucket *bucket) 252 + { 253 + prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> " 254 + "(%d:%d:%016lx:%016lx), halting...\n", 255 + ino, bucket->pil, bucket->iclr, bucket->imap, 256 + pil, inofixup, iclr, imap); 257 + prom_halt(); 258 + } 230 259 231 260 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) 232 261 { ··· 261 280 prom_halt(); 262 281 } 263 282 283 + bucket = &ivector_table[ino]; 284 + if (bucket->flags & IBF_ACTIVE) 285 + build_irq_error("IRQ: Trying to build active INO bucket.\n", 286 + ino, pil, inofixup, iclr, imap, bucket); 287 + 288 + if (bucket->irq_info) { 289 + if (bucket->imap != imap || bucket->iclr != iclr) 290 + build_irq_error("IRQ: Trying to reinit INO bucket.\n", 291 + ino, pil, inofixup, iclr, imap, bucket); 292 + 293 + goto out; 294 + } 295 + 296 + bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); 297 + if (!bucket->irq_info) { 298 + prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); 299 + prom_halt(); 300 + } 301 + memset(bucket->irq_info, 0, sizeof(struct irq_desc)); 302 + 264 303 /* Ok, looks good, set it up. Don't touch the irq_chain or 265 304 * the pending flag. 266 305 */ 267 - bucket = &ivector_table[ino]; 268 - if ((bucket->flags & IBF_ACTIVE) || 269 - (bucket->irq_info != NULL)) { 270 - /* This is a gross fatal error if it happens here. */ 271 - prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n"); 272 - prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n", 273 - ino, pil, inofixup, iclr, imap); 274 - prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n", 275 - bucket->pil, bucket->iclr, bucket->imap); 276 - prom_printf("IRQ: Cannot continue, halting...\n"); 277 - prom_halt(); 278 - } 279 306 bucket->imap = imap; 280 307 bucket->iclr = iclr; 281 308 bucket->pil = pil; 282 309 bucket->flags = 0; 283 310 284 - bucket->irq_info = NULL; 285 - 311 + out: 286 312 return __irq(bucket); 287 313 } 288 314 ··· 307 319 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); 308 320 } 309 321 322 + static int check_irq_sharing(int pil, unsigned long irqflags) 323 + { 324 + struct irqaction *action, *tmp; 325 + 326 + action = *(irq_action + pil); 327 + if (action) { 328 + if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { 329 + for (tmp = action; tmp->next; tmp = tmp->next) 330 + ; 331 + } else { 332 + return -EBUSY; 333 + } 334 + } 335 + return 0; 336 + } 337 + 338 + static void append_irq_action(int pil, struct irqaction *action) 339 + { 340 + struct irqaction **pp = irq_action + pil; 341 + 342 + while (*pp) 343 + pp = &((*pp)->next); 344 + *pp = action; 345 + } 346 + 347 + static struct irqaction *get_action_slot(struct ino_bucket *bucket) 348 + { 349 + struct irq_desc *desc = bucket->irq_info; 350 + int max_irq, i; 351 + 352 + max_irq = 1; 353 + if (bucket->flags & IBF_PCI) 354 + max_irq = MAX_IRQ_DESC_ACTION; 355 + for (i = 0; i < max_irq; i++) { 356 + struct irqaction *p = &desc->action[i]; 357 + u32 mask = (1 << i); 358 + 359 + if (desc->action_active_mask & mask) 360 + continue; 361 + 362 + desc->action_active_mask |= mask; 363 + return p; 364 + } 365 + return NULL; 366 + } 367 + 310 368 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), 311 369 unsigned long irqflags, const char *name, void *dev_id) 312 370 { 313 - struct irqaction *action, *tmp = NULL; 371 + struct irqaction *action; 314 372 struct ino_bucket *bucket = __bucket(irq); 315 373 unsigned long flags; 316 374 int pending = 0; 317 375 318 - if ((bucket != &pil0_dummy_bucket) && 319 - (bucket < &ivector_table[0] || 320 - bucket >= &ivector_table[NUM_IVECS])) { 321 - unsigned int *caller; 322 - 323 - __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); 324 - printk(KERN_CRIT "request_irq: Old style IRQ registry attempt " 325 - "from %p, irq %08x.\n", caller, irq); 376 + if (unlikely(!handler)) 326 377 return -EINVAL; 327 - } 328 - if (!handler) 329 - return -EINVAL; 378 + 379 + if (unlikely(!bucket->irq_info)) 380 + return -ENODEV; 330 381 331 382 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { 332 383 /* ··· 383 356 384 357 spin_lock_irqsave(&irq_action_lock, flags); 385 358 386 - action = *(bucket->pil + irq_action); 387 - if (action) { 388 - if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) 389 - for (tmp = action; tmp->next; tmp = tmp->next) 390 - ; 391 - else { 392 - spin_unlock_irqrestore(&irq_action_lock, flags); 393 - return -EBUSY; 394 - } 395 - action = NULL; /* Or else! */ 359 + if (check_irq_sharing(bucket->pil, irqflags)) { 360 + spin_unlock_irqrestore(&irq_action_lock, flags); 361 + return -EBUSY; 396 362 } 397 363 398 - /* If this is flagged as statically allocated then we use our 399 - * private struct which is never freed. 400 - */ 401 - if (irqflags & SA_STATIC_ALLOC) { 402 - if (static_irq_count < MAX_STATIC_ALLOC) 403 - action = &static_irqaction[static_irq_count++]; 404 - else 405 - printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " 406 - "using kmalloc\n", irq, name); 407 - } 408 - if (action == NULL) 409 - action = (struct irqaction *)kmalloc(sizeof(struct irqaction), 410 - GFP_ATOMIC); 411 - 364 + action = get_action_slot(bucket); 412 365 if (!action) { 413 366 spin_unlock_irqrestore(&irq_action_lock, flags); 414 367 return -ENOMEM; 415 368 } 416 369 417 - if (bucket == &pil0_dummy_bucket) { 418 - bucket->irq_info = action; 419 - bucket->flags |= IBF_ACTIVE; 420 - } else { 421 - if ((bucket->flags & IBF_ACTIVE) != 0) { 422 - void *orig = bucket->irq_info; 423 - void **vector = NULL; 424 - 425 - if ((bucket->flags & IBF_PCI) == 0) { 426 - printk("IRQ: Trying to share non-PCI bucket.\n"); 427 - goto free_and_ebusy; 428 - } 429 - if ((bucket->flags & IBF_MULTI) == 0) { 430 - vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC); 431 - if (vector == NULL) 432 - goto free_and_enomem; 433 - 434 - /* We might have slept. */ 435 - if ((bucket->flags & IBF_MULTI) != 0) { 436 - int ent; 437 - 438 - kfree(vector); 439 - vector = (void **)bucket->irq_info; 440 - for(ent = 0; ent < 4; ent++) { 441 - if (vector[ent] == NULL) { 442 - vector[ent] = action; 443 - break; 444 - } 445 - } 446 - if (ent == 4) 447 - goto free_and_ebusy; 448 - } else { 449 - vector[0] = orig; 450 - vector[1] = action; 451 - vector[2] = NULL; 452 - vector[3] = NULL; 453 - bucket->irq_info = vector; 454 - bucket->flags |= IBF_MULTI; 455 - } 456 - } else { 457 - int ent; 458 - 459 - vector = (void **)orig; 460 - for (ent = 0; ent < 4; ent++) { 461 - if (vector[ent] == NULL) { 462 - vector[ent] = action; 463 - break; 464 - } 465 - } 466 - if (ent == 4) 467 - goto free_and_ebusy; 468 - } 469 - } else { 470 - bucket->irq_info = action; 471 - bucket->flags |= IBF_ACTIVE; 472 - } 370 + bucket->flags |= IBF_ACTIVE; 371 + pending = 0; 372 + if (bucket != &pil0_dummy_bucket) { 473 373 pending = bucket->pending; 474 374 if (pending) 475 375 bucket->pending = 0; ··· 410 456 put_ino_in_irqaction(action, irq); 411 457 put_smpaff_in_irqaction(action, CPU_MASK_NONE); 412 458 413 - if (tmp) 414 - tmp->next = action; 415 - else 416 - *(bucket->pil + irq_action) = action; 459 + append_irq_action(bucket->pil, action); 417 460 418 461 enable_irq(irq); 419 462 ··· 419 468 atomic_bucket_insert(bucket); 420 469 set_softint(1 << bucket->pil); 421 470 } 471 + 422 472 spin_unlock_irqrestore(&irq_action_lock, flags); 423 - if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC))) 473 + 474 + if (bucket != &pil0_dummy_bucket) 424 475 register_irq_proc(__irq_ino(irq)); 425 476 426 477 #ifdef CONFIG_SMP 427 478 distribute_irqs(); 428 479 #endif 429 480 return 0; 430 - 431 - free_and_ebusy: 432 - kfree(action); 433 - spin_unlock_irqrestore(&irq_action_lock, flags); 434 - return -EBUSY; 435 - 436 - free_and_enomem: 437 - kfree(action); 438 - spin_unlock_irqrestore(&irq_action_lock, flags); 439 - return -ENOMEM; 440 481 } 441 482 442 483 EXPORT_SYMBOL(request_irq); 443 484 485 + static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id) 486 + { 487 + struct ino_bucket *bucket = __bucket(irq); 488 + struct irqaction *action, **pp; 489 + 490 + pp = irq_action + bucket->pil; 491 + action = *pp; 492 + if (unlikely(!action)) 493 + return NULL; 494 + 495 + if (unlikely(!action->handler)) { 496 + printk("Freeing free IRQ %d\n", bucket->pil); 497 + return NULL; 498 + } 499 + 500 + while (action && action->dev_id != dev_id) { 501 + pp = &action->next; 502 + action = *pp; 503 + } 504 + 505 + if (likely(action)) 506 + *pp = action->next; 507 + 508 + return action; 509 + } 510 + 444 511 void free_irq(unsigned int irq, void *dev_id) 445 512 { 446 513 struct irqaction *action; 447 - struct irqaction *tmp = NULL; 514 + struct ino_bucket *bucket; 448 515 unsigned long flags; 449 - struct ino_bucket *bucket = __bucket(irq), *bp; 450 516 451 - if ((bucket != &pil0_dummy_bucket) && 452 - (bucket < &ivector_table[0] || 453 - bucket >= &ivector_table[NUM_IVECS])) { 454 - unsigned int *caller; 455 - 456 - __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); 457 - printk(KERN_CRIT "free_irq: Old style IRQ removal attempt " 458 - "from %p, irq %08x.\n", caller, irq); 459 - return; 460 - } 461 - 462 517 spin_lock_irqsave(&irq_action_lock, flags); 463 518 464 - action = *(bucket->pil + irq_action); 465 - if (!action->handler) { 466 - printk("Freeing free IRQ %d\n", bucket->pil); 467 - return; 468 - } 469 - if (dev_id) { 470 - for ( ; action; action = action->next) { 471 - if (action->dev_id == dev_id) 472 - break; 473 - tmp = action; 474 - } 475 - if (!action) { 476 - printk("Trying to free free shared IRQ %d\n", bucket->pil); 477 - spin_unlock_irqrestore(&irq_action_lock, flags); 478 - return; 479 - } 480 - } else if (action->flags & SA_SHIRQ) { 481 - printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil); 482 - spin_unlock_irqrestore(&irq_action_lock, flags); 483 - return; 484 - } 485 - 486 - if (action->flags & SA_STATIC_ALLOC) { 487 - printk("Attempt to free statically allocated IRQ %d (%s)\n", 488 - bucket->pil, action->name); 489 - spin_unlock_irqrestore(&irq_action_lock, flags); 490 - return; 491 - } 492 - 493 - if (action && tmp) 494 - tmp->next = action->next; 495 - else 496 - *(bucket->pil + irq_action) = action->next; 519 + action = unlink_irq_action(irq, dev_id); 497 520 498 521 spin_unlock_irqrestore(&irq_action_lock, flags); 522 + 523 + if (unlikely(!action)) 524 + return; 499 525 500 526 synchronize_irq(irq); 501 527 502 528 spin_lock_irqsave(&irq_action_lock, flags); 503 529 530 + bucket = __bucket(irq); 504 531 if (bucket != &pil0_dummy_bucket) { 532 + struct irq_desc *desc = bucket->irq_info; 505 533 unsigned long imap = bucket->imap; 506 - void **vector, *orig; 507 - int ent; 534 + int ent, i; 508 535 509 - orig = bucket->irq_info; 510 - vector = (void **)orig; 536 + for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { 537 + struct irqaction *p = &desc->action[i]; 511 538 512 - if ((bucket->flags & IBF_MULTI) != 0) { 513 - int other = 0; 514 - void *orphan = NULL; 515 - for (ent = 0; ent < 4; ent++) { 516 - if (vector[ent] == action) 517 - vector[ent] = NULL; 518 - else if (vector[ent] != NULL) { 519 - orphan = vector[ent]; 520 - other++; 521 - } 522 - } 523 - 524 - /* Only free when no other shared irq 525 - * uses this bucket. 526 - */ 527 - if (other) { 528 - if (other == 1) { 529 - /* Convert back to non-shared bucket. */ 530 - bucket->irq_info = orphan; 531 - bucket->flags &= ~(IBF_MULTI); 532 - kfree(vector); 533 - } 534 - goto out; 535 - } 536 - } else { 537 - bucket->irq_info = NULL; 538 - } 539 - 540 - /* This unique interrupt source is now inactive. */ 541 - bucket->flags &= ~IBF_ACTIVE; 542 - 543 - /* See if any other buckets share this bucket's IMAP 544 - * and are still active. 545 - */ 546 - for (ent = 0; ent < NUM_IVECS; ent++) { 547 - bp = &ivector_table[ent]; 548 - if (bp != bucket && 549 - bp->imap == imap && 550 - (bp->flags & IBF_ACTIVE) != 0) 539 + if (p == action) { 540 + desc->action_active_mask &= ~(1 << i); 551 541 break; 542 + } 552 543 } 553 544 554 - /* Only disable when no other sub-irq levels of 555 - * the same IMAP are active. 556 - */ 557 - if (ent == NUM_IVECS) 558 - disable_irq(irq); 545 + if (!desc->action_active_mask) { 546 + /* This unique interrupt source is now inactive. */ 547 + bucket->flags &= ~IBF_ACTIVE; 548 + 549 + /* See if any other buckets share this bucket's IMAP 550 + * and are still active. 551 + */ 552 + for (ent = 0; ent < NUM_IVECS; ent++) { 553 + struct ino_bucket *bp = &ivector_table[ent]; 554 + if (bp != bucket && 555 + bp->imap == imap && 556 + (bp->flags & IBF_ACTIVE) != 0) 557 + break; 558 + } 559 + 560 + /* Only disable when no other sub-irq levels of 561 + * the same IMAP are active. 562 + */ 563 + if (ent == NUM_IVECS) 564 + disable_irq(irq); 565 + } 559 566 } 560 567 561 - out: 562 - kfree(action); 563 568 spin_unlock_irqrestore(&irq_action_lock, flags); 564 569 } 565 570 ··· 554 647 } 555 648 #endif /* CONFIG_SMP */ 556 649 557 - void catch_disabled_ivec(struct pt_regs *regs) 650 + static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) 558 651 { 559 - int cpu = smp_processor_id(); 560 - struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0)); 652 + struct irq_desc *desc = bp->irq_info; 653 + unsigned char flags = bp->flags; 654 + u32 action_mask, i; 655 + int random; 561 656 562 - /* We can actually see this on Ultra/PCI PCI cards, which are bridges 563 - * to other devices. Here a single IMAP enabled potentially multiple 564 - * unique interrupt sources (which each do have a unique ICLR register. 565 - * 566 - * So what we do is just register that the IVEC arrived, when registered 567 - * for real the request_irq() code will check the bit and signal 568 - * a local CPU interrupt for it. 569 - */ 570 - #if 0 571 - printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n", 572 - bucket - &ivector_table[0], regs->tpc); 573 - #endif 574 - *irq_work(cpu, 0) = 0; 575 - bucket->pending = 1; 576 - } 657 + bp->flags |= IBF_INPROGRESS; 577 658 578 - /* Tune this... */ 579 - #define FORWARD_VOLUME 12 580 - 581 - #ifdef CONFIG_SMP 582 - 583 - static inline void redirect_intr(int cpu, struct ino_bucket *bp) 584 - { 585 - /* Ok, here is what is going on: 586 - * 1) Retargeting IRQs on Starfire is very 587 - * expensive so just forget about it on them. 588 - * 2) Moving around very high priority interrupts 589 - * is a losing game. 590 - * 3) If the current cpu is idle, interrupts are 591 - * useful work, so keep them here. But do not 592 - * pass to our neighbour if he is not very idle. 593 - * 4) If sysadmin explicitly asks for directed intrs, 594 - * Just Do It. 595 - */ 596 - struct irqaction *ap = bp->irq_info; 597 - cpumask_t cpu_mask; 598 - unsigned int buddy, ticks; 599 - 600 - cpu_mask = get_smpaff_in_irqaction(ap); 601 - cpus_and(cpu_mask, cpu_mask, cpu_online_map); 602 - if (cpus_empty(cpu_mask)) 603 - cpu_mask = cpu_online_map; 604 - 605 - if (this_is_starfire != 0 || 606 - bp->pil >= 10 || current->pid == 0) 659 + if (unlikely(!(flags & IBF_ACTIVE))) { 660 + bp->pending = 1; 607 661 goto out; 608 - 609 - /* 'cpu' is the MID (ie. UPAID), calculate the MID 610 - * of our buddy. 611 - */ 612 - buddy = cpu + 1; 613 - if (buddy >= NR_CPUS) 614 - buddy = 0; 615 - 616 - ticks = 0; 617 - while (!cpu_isset(buddy, cpu_mask)) { 618 - if (++buddy >= NR_CPUS) 619 - buddy = 0; 620 - if (++ticks > NR_CPUS) { 621 - put_smpaff_in_irqaction(ap, CPU_MASK_NONE); 622 - goto out; 623 - } 624 662 } 625 663 626 - if (buddy == cpu) 627 - goto out; 664 + if (desc->pre_handler) 665 + desc->pre_handler(bp, 666 + desc->pre_handler_arg1, 667 + desc->pre_handler_arg2); 628 668 629 - /* Voo-doo programming. */ 630 - if (cpu_data(buddy).idle_volume < FORWARD_VOLUME) 631 - goto out; 669 + action_mask = desc->action_active_mask; 670 + random = 0; 671 + for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { 672 + struct irqaction *p = &desc->action[i]; 673 + u32 mask = (1 << i); 632 674 633 - /* This just so happens to be correct on Cheetah 634 - * at the moment. 635 - */ 636 - buddy <<= 26; 675 + if (!(action_mask & mask)) 676 + continue; 637 677 638 - /* Push it to our buddy. */ 639 - upa_writel(buddy | IMAP_VALID, bp->imap); 678 + action_mask &= ~mask; 640 679 680 + if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED) 681 + random |= p->flags; 682 + 683 + if (!action_mask) 684 + break; 685 + } 686 + if (bp->pil != 0) { 687 + upa_writel(ICLR_IDLE, bp->iclr); 688 + /* Test and add entropy */ 689 + if (random & SA_SAMPLE_RANDOM) 690 + add_interrupt_randomness(irq); 691 + } 641 692 out: 642 - return; 693 + bp->flags &= ~IBF_INPROGRESS; 643 694 } 644 - 645 - #endif 646 695 647 696 void handler_irq(int irq, struct pt_regs *regs) 648 697 { 649 - struct ino_bucket *bp, *nbp; 698 + struct ino_bucket *bp; 650 699 int cpu = smp_processor_id(); 651 700 652 701 #ifndef CONFIG_SMP ··· 620 757 clear_softint(clr_mask); 621 758 } 622 759 #else 623 - int should_forward = 0; 624 - 625 760 clear_softint(1 << irq); 626 761 #endif 627 762 ··· 634 773 #else 635 774 bp = __bucket(xchg32(irq_work(cpu, irq), 0)); 636 775 #endif 637 - for ( ; bp != NULL; bp = nbp) { 638 - unsigned char flags = bp->flags; 639 - unsigned char random = 0; 776 + while (bp) { 777 + struct ino_bucket *nbp = __bucket(bp->irq_chain); 640 778 641 - nbp = __bucket(bp->irq_chain); 642 779 bp->irq_chain = 0; 643 - 644 - bp->flags |= IBF_INPROGRESS; 645 - 646 - if ((flags & IBF_ACTIVE) != 0) { 647 - #ifdef CONFIG_PCI 648 - if ((flags & IBF_DMA_SYNC) != 0) { 649 - upa_readl(dma_sync_reg_table[bp->synctab_ent]); 650 - upa_readq(pci_dma_wsync); 651 - } 652 - #endif 653 - if ((flags & IBF_MULTI) == 0) { 654 - struct irqaction *ap = bp->irq_info; 655 - int ret; 656 - 657 - ret = ap->handler(__irq(bp), ap->dev_id, regs); 658 - if (ret == IRQ_HANDLED) 659 - random |= ap->flags; 660 - } else { 661 - void **vector = (void **)bp->irq_info; 662 - int ent; 663 - for (ent = 0; ent < 4; ent++) { 664 - struct irqaction *ap = vector[ent]; 665 - if (ap != NULL) { 666 - int ret; 667 - 668 - ret = ap->handler(__irq(bp), 669 - ap->dev_id, 670 - regs); 671 - if (ret == IRQ_HANDLED) 672 - random |= ap->flags; 673 - } 674 - } 675 - } 676 - /* Only the dummy bucket lacks IMAP/ICLR. */ 677 - if (bp->pil != 0) { 678 - #ifdef CONFIG_SMP 679 - if (should_forward) { 680 - redirect_intr(cpu, bp); 681 - should_forward = 0; 682 - } 683 - #endif 684 - upa_writel(ICLR_IDLE, bp->iclr); 685 - 686 - /* Test and add entropy */ 687 - if (random & SA_SAMPLE_RANDOM) 688 - add_interrupt_randomness(irq); 689 - } 690 - } else 691 - bp->pending = 1; 692 - 693 - bp->flags &= ~IBF_INPROGRESS; 780 + process_bucket(irq, bp, regs); 781 + bp = nbp; 694 782 } 695 783 irq_exit(); 696 784 } ··· 769 959 */ 770 960 for (level = 1; level < NR_IRQS; level++) { 771 961 struct irqaction *p = irq_action[level]; 772 - if (level == 12) continue; 962 + 963 + if (level == 12) 964 + continue; 965 + 773 966 while(p) { 774 967 cpu = retarget_one_irq(p, cpu); 775 968 p = p->next;
+26 -20
arch/sparc64/kernel/pci_sabre.c
··· 595 595 return ret; 596 596 } 597 597 598 + /* When a device lives behind a bridge deeper in the PCI bus topology 599 + * than APB, a special sequence must run to make sure all pending DMA 600 + * transfers at the time of IRQ delivery are visible in the coherency 601 + * domain by the cpu. This sequence is to perform a read on the far 602 + * side of the non-APB bridge, then perform a read of Sabre's DMA 603 + * write-sync register. 604 + */ 605 + static void sabre_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) 606 + { 607 + struct pci_dev *pdev = _arg1; 608 + unsigned long sync_reg = (unsigned long) _arg2; 609 + u16 _unused; 610 + 611 + pci_read_config_word(pdev, PCI_VENDOR_ID, &_unused); 612 + sabre_read(sync_reg); 613 + } 614 + 598 615 static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, 599 616 struct pci_dev *pdev, 600 617 unsigned int ino) ··· 656 639 if (pdev) { 657 640 struct pcidev_cookie *pcp = pdev->sysdata; 658 641 659 - /* When a device lives behind a bridge deeper in the 660 - * PCI bus topology than APB, a special sequence must 661 - * run to make sure all pending DMA transfers at the 662 - * time of IRQ delivery are visible in the coherency 663 - * domain by the cpu. This sequence is to perform 664 - * a read on the far side of the non-APB bridge, then 665 - * perform a read of Sabre's DMA write-sync register. 666 - * 667 - * Currently, the PCI_CONFIG register for the device 668 - * is used for this read from the far side of the bridge. 669 - */ 670 642 if (pdev->bus->number != pcp->pbm->pci_first_busno) { 671 - bucket->flags |= IBF_DMA_SYNC; 672 - bucket->synctab_ent = dma_sync_reg_table_entry++; 673 - dma_sync_reg_table[bucket->synctab_ent] = 674 - (unsigned long) sabre_pci_config_mkaddr( 675 - pcp->pbm, 676 - pdev->bus->number, pdev->devfn, PCI_COMMAND); 643 + struct pci_controller_info *p = pcp->pbm->parent; 644 + struct irq_desc *d = bucket->irq_info; 645 + 646 + d->pre_handler = sabre_wsync_handler; 647 + d->pre_handler_arg1 = pdev; 648 + d->pre_handler_arg2 = (void *) 649 + p->pbm_A.controller_regs + SABRE_WRSYNC; 677 650 } 678 651 } 679 652 return __irq(bucket); ··· 1633 1626 */ 1634 1627 p->pbm_A.controller_regs = pr_regs[0].phys_addr; 1635 1628 p->pbm_B.controller_regs = pr_regs[0].phys_addr; 1636 - pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC; 1637 1629 1638 - printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n", 1639 - p->pbm_A.controller_regs, pci_dma_wsync); 1630 + printk("PCI: Found SABRE, main regs at %016lx\n", 1631 + p->pbm_A.controller_regs); 1640 1632 1641 1633 /* Clear interrupts */ 1642 1634
+1 -1
arch/sparc64/kernel/time.c
··· 973 973 int err; 974 974 975 975 /* Register IRQ handler. */ 976 - err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC, 976 + err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, 0, 977 977 "timer", NULL); 978 978 979 979 if (err) {
+19 -28
include/asm-sparc64/irq.h
··· 16 16 #include <asm/pil.h> 17 17 #include <asm/ptrace.h> 18 18 19 + struct ino_bucket; 20 + 21 + #define MAX_IRQ_DESC_ACTION 4 22 + 23 + struct irq_desc { 24 + void (*pre_handler)(struct ino_bucket *, void *, void *); 25 + void *pre_handler_arg1; 26 + void *pre_handler_arg2; 27 + u32 action_active_mask; 28 + struct irqaction action[MAX_IRQ_DESC_ACTION]; 29 + }; 30 + 19 31 /* You should not mess with this directly. That's the job of irq.c. 20 32 * 21 33 * If you make changes here, please update hand coded assembler of ··· 54 42 /* Miscellaneous flags. */ 55 43 /*0x06*/unsigned char flags; 56 44 57 - /* This is used to deal with IBF_DMA_SYNC on 58 - * Sabre systems. 59 - */ 60 - /*0x07*/unsigned char synctab_ent; 45 + /* Currently unused. */ 46 + /*0x07*/unsigned char __pad; 61 47 62 - /* Reference to handler for this IRQ. If this is 63 - * non-NULL this means it is active and should be 64 - * serviced. Else the pending member is set to one 65 - * and later registry of the interrupt checks for 66 - * this condition. 67 - * 68 - * Normally this is just an irq_action structure. 69 - * But, on PCI, if multiple interrupt sources behind 70 - * a bridge have multiple interrupt sources that share 71 - * the same INO bucket, this points to an array of 72 - * pointers to four IRQ action structures. 73 - */ 74 - /*0x08*/void *irq_info; 48 + /* Reference to IRQ descriptor for this bucket. */ 49 + /*0x08*/struct irq_desc *irq_info; 75 50 76 51 /* Sun5 Interrupt Clear Register. */ 77 52 /*0x10*/unsigned long iclr; ··· 67 68 /*0x18*/unsigned long imap; 68 69 69 70 }; 70 - 71 - #ifdef CONFIG_PCI 72 - extern unsigned long pci_dma_wsync; 73 - extern unsigned long dma_sync_reg_table[256]; 74 - extern unsigned char dma_sync_reg_table_entry; 75 - #endif 76 71 77 72 /* IMAP/ICLR register defines */ 78 73 #define IMAP_VALID 0x80000000 /* IRQ Enabled */ ··· 83 90 #define ICLR_PENDING 0x00000003 /* Pending state */ 84 91 85 92 /* Only 8-bits are available, be careful. -DaveM */ 86 - #define IBF_DMA_SYNC 0x01 /* DMA synchronization behind PCI bridge needed. */ 87 - #define IBF_PCI 0x02 /* Indicates PSYCHO/SABRE/SCHIZO PCI interrupt. */ 88 - #define IBF_ACTIVE 0x04 /* This interrupt is active and has a handler. */ 89 - #define IBF_MULTI 0x08 /* On PCI, indicates shared bucket. */ 90 - #define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */ 93 + #define IBF_PCI 0x02 /* PSYCHO/SABRE/SCHIZO PCI interrupt. */ 94 + #define IBF_ACTIVE 0x04 /* Interrupt is active and has a handler.*/ 95 + #define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */ 91 96 92 97 #define NUM_IVECS (IMAP_INR + 1) 93 98 extern struct ino_bucket ivector_table[NUM_IVECS];
-15
include/asm-sparc64/signal.h
··· 162 162 #define MINSIGSTKSZ 4096 163 163 #define SIGSTKSZ 16384 164 164 165 - #ifdef __KERNEL__ 166 - /* 167 - * DJHR 168 - * SA_STATIC_ALLOC is used for the SPARC system to indicate that this 169 - * interrupt handler's irq structure should be statically allocated 170 - * by the request_irq routine. 171 - * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge 172 - * of interrupt usage and that sucks. Also without a flag like this 173 - * it may be possible for the free_irq routine to attempt to free 174 - * statically allocated data.. which is NOT GOOD. 175 - * 176 - */ 177 - #define SA_STATIC_ALLOC 0x80 178 - #endif 179 - 180 165 #include <asm-generic/signal.h> 181 166 182 167 struct __new_sigaction {