Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

softirq: Add support for triggering softirq work on softirqs.

This is basically a genericization of Jens Axboe's block layer
remote softirq changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

authored by

David S. Miller and committed by
Jens Axboe
54514a70 2e532d68

+153 -1
+21
include/linux/interrupt.h
··· 11 11 #include <linux/hardirq.h> 12 12 #include <linux/sched.h> 13 13 #include <linux/irqflags.h> 14 + #include <linux/smp.h> 15 + #include <linux/percpu.h> 14 16 #include <asm/atomic.h> 15 17 #include <asm/ptrace.h> 16 18 #include <asm/system.h> ··· 275 273 extern void raise_softirq_irqoff(unsigned int nr); 276 274 extern void raise_softirq(unsigned int nr); 277 275 276 + /* This is the worklist that queues up per-cpu softirq work. 277 + * 278 + * send_remote_sendirq() adds work to these lists, and 279 + * the softirq handler itself dequeues from them. The queues 280 + * are protected by disabling local cpu interrupts and they must 281 + * only be accessed by the local cpu that they are for. 282 + */ 283 + DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); 284 + 285 + /* Try to send a softirq to a remote cpu. If this cannot be done, the 286 + * work will be queued to the local cpu. 287 + */ 288 + extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); 289 + 290 + /* Like send_remote_softirq(), but the caller must disable local cpu interrupts 291 + * and compute the current cpu, passed in as 'this_cpu'. 292 + */ 293 + extern void __send_remote_softirq(struct call_single_data *cp, int cpu, 294 + int this_cpu, int softirq); 278 295 279 296 /* Tasklets --- multithreaded analogue of BHs. 280 297
+3 -1
include/linux/smp.h
··· 7 7 */ 8 8 9 9 #include <linux/errno.h> 10 + #include <linux/types.h> 10 11 #include <linux/list.h> 11 12 #include <linux/cpumask.h> 12 13 ··· 17 16 struct list_head list; 18 17 void (*func) (void *info); 19 18 void *info; 20 - unsigned int flags; 19 + u16 flags; 20 + u16 priv; 21 21 }; 22 22 23 23 #ifdef CONFIG_SMP
+129
kernel/softirq.c
··· 6 6 * Distribute under GPLv2. 7 7 * 8 8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 9 + * 10 + * Remote softirq infrastructure is by Jens Axboe. 9 11 */ 10 12 11 13 #include <linux/module.h> ··· 476 474 477 475 EXPORT_SYMBOL(tasklet_kill); 478 476 477 + DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); 478 + EXPORT_PER_CPU_SYMBOL(softirq_work_list); 479 + 480 + static void __local_trigger(struct call_single_data *cp, int softirq) 481 + { 482 + struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); 483 + 484 + list_add_tail(&cp->list, head); 485 + 486 + /* Trigger the softirq only if the list was previously empty. */ 487 + if (head->next == &cp->list) 488 + raise_softirq_irqoff(softirq); 489 + } 490 + 491 + #ifdef CONFIG_USE_GENERIC_SMP_HELPERS 492 + static void remote_softirq_receive(void *data) 493 + { 494 + struct call_single_data *cp = data; 495 + unsigned long flags; 496 + int softirq; 497 + 498 + softirq = cp->priv; 499 + 500 + local_irq_save(flags); 501 + __local_trigger(cp, softirq); 502 + local_irq_restore(flags); 503 + } 504 + 505 + static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) 506 + { 507 + if (cpu_online(cpu)) { 508 + cp->func = remote_softirq_receive; 509 + cp->info = cp; 510 + cp->flags = 0; 511 + cp->priv = softirq; 512 + 513 + __smp_call_function_single(cpu, cp); 514 + return 0; 515 + } 516 + return 1; 517 + } 518 + #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ 519 + static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) 520 + { 521 + return 1; 522 + } 523 + #endif 524 + 525 + /** 526 + * __send_remote_softirq - try to schedule softirq work on a remote cpu 527 + * @cp: private SMP call function data area 528 + * @cpu: the remote cpu 529 + * @this_cpu: the currently executing cpu 530 + * @softirq: the softirq for the work 531 + * 532 + * Attempt to schedule softirq work on a remote cpu. If this cannot be 533 + * done, the work is instead queued up on the local cpu. 534 + * 535 + * Interrupts must be disabled. 536 + */ 537 + void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) 538 + { 539 + if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) 540 + __local_trigger(cp, softirq); 541 + } 542 + EXPORT_SYMBOL(__send_remote_softirq); 543 + 544 + /** 545 + * send_remote_softirq - try to schedule softirq work on a remote cpu 546 + * @cp: private SMP call function data area 547 + * @cpu: the remote cpu 548 + * @softirq: the softirq for the work 549 + * 550 + * Like __send_remote_softirq except that disabling interrupts and 551 + * computing the current cpu is done for the caller. 552 + */ 553 + void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) 554 + { 555 + unsigned long flags; 556 + int this_cpu; 557 + 558 + local_irq_save(flags); 559 + this_cpu = smp_processor_id(); 560 + __send_remote_softirq(cp, cpu, this_cpu, softirq); 561 + local_irq_restore(flags); 562 + } 563 + EXPORT_SYMBOL(send_remote_softirq); 564 + 565 + static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, 566 + unsigned long action, void *hcpu) 567 + { 568 + /* 569 + * If a CPU goes away, splice its entries to the current CPU 570 + * and trigger a run of the softirq 571 + */ 572 + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 573 + int cpu = (unsigned long) hcpu; 574 + int i; 575 + 576 + local_irq_disable(); 577 + for (i = 0; i < NR_SOFTIRQS; i++) { 578 + struct list_head *head = &per_cpu(softirq_work_list[i], cpu); 579 + struct list_head *local_head; 580 + 581 + if (list_empty(head)) 582 + continue; 583 + 584 + local_head = &__get_cpu_var(softirq_work_list[i]); 585 + list_splice_init(head, local_head); 586 + raise_softirq_irqoff(i); 587 + } 588 + local_irq_enable(); 589 + } 590 + 591 + return NOTIFY_OK; 592 + } 593 + 594 + static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { 595 + .notifier_call = remote_softirq_cpu_notify, 596 + }; 597 + 479 598 void __init softirq_init(void) 480 599 { 481 600 int cpu; 482 601 483 602 for_each_possible_cpu(cpu) { 603 + int i; 604 + 484 605 per_cpu(tasklet_vec, cpu).tail = 485 606 &per_cpu(tasklet_vec, cpu).head; 486 607 per_cpu(tasklet_hi_vec, cpu).tail = 487 608 &per_cpu(tasklet_hi_vec, cpu).head; 609 + for (i = 0; i < NR_SOFTIRQS; i++) 610 + INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); 488 611 } 612 + 613 + register_hotcpu_notifier(&remote_softirq_cpu_notifier); 489 614 490 615 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 491 616 open_softirq(HI_SOFTIRQ, tasklet_hi_action);