genirq: Use hlist for managing resend handlers

The current implementation utilizes a bitmap for managing interrupt resend
handlers, which is allocated based on the SPARSE_IRQ/NR_IRQS macros.
However, this method may not efficiently utilize memory during runtime,
particularly when IRQ_BITMAP_BITS is large.

Address this issue by using an hlist to manage interrupt resend handlers
instead of relying on a static bitmap memory allocation. Additionally, a
new function, clear_irq_resend(), is introduced and called from
irq_shutdown to ensure a graceful teardown of the interrupt.

Signed-off-by: Shanker Donthineni <sdonthineni@nvidia.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20230519134902.1495562-2-sdonthineni@nvidia.com

authored by Shanker Donthineni and committed by Thomas Gleixner bc06a9e0 d15121be

+37 -16
+3
include/linux/irqdesc.h
··· 102 102 int parent_irq; 103 103 struct module *owner; 104 104 const char *name; 105 + #ifdef CONFIG_HARDIRQS_SW_RESEND 106 + struct hlist_node resend_node; 107 + #endif 105 108 } ____cacheline_internodealigned_in_smp; 106 109 107 110 #ifdef CONFIG_SPARSE_IRQ
+1
kernel/irq/chip.c
··· 306 306 void irq_shutdown(struct irq_desc *desc) 307 307 { 308 308 if (irqd_is_started(&desc->irq_data)) { 309 + clear_irq_resend(desc); 309 310 desc->depth = 1; 310 311 if (desc->irq_data.chip->irq_shutdown) { 311 312 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+2
kernel/irq/internals.h
··· 113 113 114 114 /* Resending of interrupts :*/ 115 115 int check_irq_resend(struct irq_desc *desc, bool inject); 116 + void clear_irq_resend(struct irq_desc *desc); 117 + void irq_resend_init(struct irq_desc *desc); 116 118 bool irq_wait_for_poll(struct irq_desc *desc); 117 119 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); 118 120
+2
kernel/irq/irqdesc.c
··· 415 415 desc_set_defaults(irq, desc, node, affinity, owner); 416 416 irqd_set(&desc->irq_data, flags); 417 417 kobject_init(&desc->kobj, &irq_kobj_type); 418 + irq_resend_init(desc); 418 419 419 420 return desc; 420 421 ··· 582 581 mutex_init(&desc[i].request_mutex); 583 582 init_waitqueue_head(&desc[i].wait_for_threads); 584 583 desc_set_defaults(i, &desc[i], node, NULL, NULL); 584 + irq_resend_init(desc); 585 585 } 586 586 return arch_early_irq_init(); 587 587 }
+29 -16
kernel/irq/resend.c
··· 21 21 22 22 #ifdef CONFIG_HARDIRQS_SW_RESEND 23 23 24 - /* Bitmap to handle software resend of interrupts: */ 25 - static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS); 24 + /* hlist_head to handle software resend of interrupts: */ 25 + static HLIST_HEAD(irq_resend_list); 26 + static DEFINE_RAW_SPINLOCK(irq_resend_lock); 26 27 27 28 /* 28 29 * Run software resends of IRQ's ··· 31 30 static void resend_irqs(struct tasklet_struct *unused) 32 31 { 33 32 struct irq_desc *desc; 34 - int irq; 35 33 36 - while (!bitmap_empty(irqs_resend, nr_irqs)) { 37 - irq = find_first_bit(irqs_resend, nr_irqs); 38 - clear_bit(irq, irqs_resend); 39 - desc = irq_to_desc(irq); 40 - if (!desc) 41 - continue; 42 - local_irq_disable(); 34 + raw_spin_lock_irq(&irq_resend_lock); 35 + while (!hlist_empty(&irq_resend_list)) { 36 + desc = hlist_entry(irq_resend_list.first, struct irq_desc, 37 + resend_node); 38 + hlist_del_init(&desc->resend_node); 39 + raw_spin_unlock(&irq_resend_lock); 43 40 desc->handle_irq(desc); 44 - local_irq_enable(); 41 + raw_spin_lock(&irq_resend_lock); 45 42 } 43 + raw_spin_unlock_irq(&irq_resend_lock); 46 44 } 47 45 48 46 /* Tasklet to handle resend: */ ··· 49 49 50 50 static int irq_sw_resend(struct irq_desc *desc) 51 51 { 52 - unsigned int irq = irq_desc_get_irq(desc); 53 - 54 52 /* 55 53 * Validate whether this interrupt can be safely injected from 56 54 * non interrupt context ··· 68 70 */ 69 71 if (!desc->parent_irq) 70 72 return -EINVAL; 71 - irq = desc->parent_irq; 72 73 } 73 74 74 - /* Set it pending and activate the softirq: */ 75 - set_bit(irq, irqs_resend); 75 + /* Add to resend_list and activate the softirq: */ 76 + raw_spin_lock(&irq_resend_lock); 77 + hlist_add_head(&desc->resend_node, &irq_resend_list); 78 + raw_spin_unlock(&irq_resend_lock); 76 79 tasklet_schedule(&resend_tasklet); 77 80 return 0; 78 81 } 79 82 83 + void clear_irq_resend(struct irq_desc *desc) 84 + { 85 + raw_spin_lock(&irq_resend_lock); 86 + hlist_del_init(&desc->resend_node); 87 + raw_spin_unlock(&irq_resend_lock); 88 + } 89 + 90 + void irq_resend_init(struct irq_desc *desc) 91 + { 92 + INIT_HLIST_NODE(&desc->resend_node); 93 + } 80 94 #else 95 + void clear_irq_resend(struct irq_desc *desc) {} 96 + void irq_resend_init(struct irq_desc *desc) {} 97 + 81 98 static int irq_sw_resend(struct irq_desc *desc) 82 99 { 83 100 return -EINVAL;