genirq: Use hlist for managing resend handlers

The current implementation utilizes a bitmap for managing interrupt resend
handlers, which is allocated based on the SPARSE_IRQ/NR_IRQS macros.
However, this method may not efficiently utilize memory during runtime,
particularly when IRQ_BITMAP_BITS is large.

Address this issue by using an hlist to manage interrupt resend handlers
instead of relying on a static bitmap memory allocation. Additionally, a
new function, clear_irq_resend(), is introduced and called from
irq_shutdown to ensure a graceful teardown of the interrupt.

Signed-off-by: Shanker Donthineni <sdonthineni@nvidia.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20230519134902.1495562-2-sdonthineni@nvidia.com

authored by Shanker Donthineni and committed by Thomas Gleixner bc06a9e0 d15121be

+37 -16
+3
include/linux/irqdesc.h
··· 102 int parent_irq; 103 struct module *owner; 104 const char *name; 105 } ____cacheline_internodealigned_in_smp; 106 107 #ifdef CONFIG_SPARSE_IRQ
··· 102 int parent_irq; 103 struct module *owner; 104 const char *name; 105 + #ifdef CONFIG_HARDIRQS_SW_RESEND 106 + struct hlist_node resend_node; 107 + #endif 108 } ____cacheline_internodealigned_in_smp; 109 110 #ifdef CONFIG_SPARSE_IRQ
+1
kernel/irq/chip.c
··· 306 void irq_shutdown(struct irq_desc *desc) 307 { 308 if (irqd_is_started(&desc->irq_data)) { 309 desc->depth = 1; 310 if (desc->irq_data.chip->irq_shutdown) { 311 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
··· 306 void irq_shutdown(struct irq_desc *desc) 307 { 308 if (irqd_is_started(&desc->irq_data)) { 309 + clear_irq_resend(desc); 310 desc->depth = 1; 311 if (desc->irq_data.chip->irq_shutdown) { 312 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+2
kernel/irq/internals.h
··· 113 114 /* Resending of interrupts :*/ 115 int check_irq_resend(struct irq_desc *desc, bool inject); 116 bool irq_wait_for_poll(struct irq_desc *desc); 117 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); 118
··· 113 114 /* Resending of interrupts :*/ 115 int check_irq_resend(struct irq_desc *desc, bool inject); 116 + void clear_irq_resend(struct irq_desc *desc); 117 + void irq_resend_init(struct irq_desc *desc); 118 bool irq_wait_for_poll(struct irq_desc *desc); 119 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); 120
+2
kernel/irq/irqdesc.c
··· 415 desc_set_defaults(irq, desc, node, affinity, owner); 416 irqd_set(&desc->irq_data, flags); 417 kobject_init(&desc->kobj, &irq_kobj_type); 418 419 return desc; 420 ··· 582 mutex_init(&desc[i].request_mutex); 583 init_waitqueue_head(&desc[i].wait_for_threads); 584 desc_set_defaults(i, &desc[i], node, NULL, NULL); 585 } 586 return arch_early_irq_init(); 587 }
··· 415 desc_set_defaults(irq, desc, node, affinity, owner); 416 irqd_set(&desc->irq_data, flags); 417 kobject_init(&desc->kobj, &irq_kobj_type); 418 + irq_resend_init(desc); 419 420 return desc; 421 ··· 581 mutex_init(&desc[i].request_mutex); 582 init_waitqueue_head(&desc[i].wait_for_threads); 583 desc_set_defaults(i, &desc[i], node, NULL, NULL); 584 + irq_resend_init(desc); 585 } 586 return arch_early_irq_init(); 587 }
+29 -16
kernel/irq/resend.c
··· 21 22 #ifdef CONFIG_HARDIRQS_SW_RESEND 23 24 - /* Bitmap to handle software resend of interrupts: */ 25 - static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS); 26 27 /* 28 * Run software resends of IRQ's ··· 31 static void resend_irqs(struct tasklet_struct *unused) 32 { 33 struct irq_desc *desc; 34 - int irq; 35 36 - while (!bitmap_empty(irqs_resend, nr_irqs)) { 37 - irq = find_first_bit(irqs_resend, nr_irqs); 38 - clear_bit(irq, irqs_resend); 39 - desc = irq_to_desc(irq); 40 - if (!desc) 41 - continue; 42 - local_irq_disable(); 43 desc->handle_irq(desc); 44 - local_irq_enable(); 45 } 46 } 47 48 /* Tasklet to handle resend: */ ··· 49 50 static int irq_sw_resend(struct irq_desc *desc) 51 { 52 - unsigned int irq = irq_desc_get_irq(desc); 53 - 54 /* 55 * Validate whether this interrupt can be safely injected from 56 * non interrupt context ··· 68 */ 69 if (!desc->parent_irq) 70 return -EINVAL; 71 - irq = desc->parent_irq; 72 } 73 74 - /* Set it pending and activate the softirq: */ 75 - set_bit(irq, irqs_resend); 76 tasklet_schedule(&resend_tasklet); 77 return 0; 78 } 79 80 #else 81 static int irq_sw_resend(struct irq_desc *desc) 82 { 83 return -EINVAL;
··· 21 22 #ifdef CONFIG_HARDIRQS_SW_RESEND 23 24 + /* hlist_head to handle software resend of interrupts: */ 25 + static HLIST_HEAD(irq_resend_list); 26 + static DEFINE_RAW_SPINLOCK(irq_resend_lock); 27 28 /* 29 * Run software resends of IRQ's ··· 30 static void resend_irqs(struct tasklet_struct *unused) 31 { 32 struct irq_desc *desc; 33 34 + raw_spin_lock_irq(&irq_resend_lock); 35 + while (!hlist_empty(&irq_resend_list)) { 36 + desc = hlist_entry(irq_resend_list.first, struct irq_desc, 37 + resend_node); 38 + hlist_del_init(&desc->resend_node); 39 + raw_spin_unlock(&irq_resend_lock); 40 desc->handle_irq(desc); 41 + raw_spin_lock(&irq_resend_lock); 42 } 43 + raw_spin_unlock_irq(&irq_resend_lock); 44 } 45 46 /* Tasklet to handle resend: */ ··· 49 50 static int irq_sw_resend(struct irq_desc *desc) 51 { 52 /* 53 * Validate whether this interrupt can be safely injected from 54 * non interrupt context ··· 70 */ 71 if (!desc->parent_irq) 72 return -EINVAL; 73 } 74 75 + /* Add to resend_list and activate the softirq: */ 76 + raw_spin_lock(&irq_resend_lock); 77 + hlist_add_head(&desc->resend_node, &irq_resend_list); 78 + raw_spin_unlock(&irq_resend_lock); 79 tasklet_schedule(&resend_tasklet); 80 return 0; 81 } 82 83 + void clear_irq_resend(struct irq_desc *desc) 84 + { 85 + raw_spin_lock(&irq_resend_lock); 86 + hlist_del_init(&desc->resend_node); 87 + raw_spin_unlock(&irq_resend_lock); 88 + } 89 + 90 + void irq_resend_init(struct irq_desc *desc) 91 + { 92 + INIT_HLIST_NODE(&desc->resend_node); 93 + } 94 #else 95 + void clear_irq_resend(struct irq_desc *desc) {} 96 + void irq_resend_init(struct irq_desc *desc) {} 97 + 98 static int irq_sw_resend(struct irq_desc *desc) 99 { 100 return -EINVAL;