Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: fix a migrating irq bug when hotplug cpu

When cpu is disabled, all irqs will be migratged to another cpu.
In some cases, a new affinity is different, the old affinity need
to be updated and if irq_set_affinity's return value is IRQ_SET_MASK_OK_DONE,
the old affinity can not be updated. Fix it by using irq_do_set_affinity.

And migrating interrupts is a core code matter, so use the generic
function irq_migrate_all_off_this_cpu() to migrate interrupts in
kernel/irq/migration.c.

Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Russell King - ARM Linux <linux@arm.linux.org.uk>
Cc: Hanjun Guo <hanjun.guo@linaro.org>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Yang Yingliang and committed by
Catalin Marinas
217d453d a78afccb

+3 -64
+1
arch/arm64/Kconfig
··· 427 427 428 428 config HOTPLUG_CPU 429 429 bool "Support for hot-pluggable CPUs" 430 + select GENERIC_IRQ_MIGRATION 430 431 help 431 432 Say Y here to experiment with turning CPUs off and on. CPUs 432 433 can be controlled through /sys/devices/system/cpu.
-1
arch/arm64/include/asm/irq.h
··· 7 7 8 8 struct pt_regs; 9 9 10 - extern void migrate_irqs(void); 11 10 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); 12 11 13 12 static inline void acpi_irq_init(void)
-62
arch/arm64/kernel/irq.c
··· 27 27 #include <linux/init.h> 28 28 #include <linux/irqchip.h> 29 29 #include <linux/seq_file.h> 30 - #include <linux/ratelimit.h> 31 30 32 31 unsigned long irq_err_count; 33 32 ··· 53 54 if (!handle_arch_irq) 54 55 panic("No interrupt controller found."); 55 56 } 56 - 57 - #ifdef CONFIG_HOTPLUG_CPU 58 - static bool migrate_one_irq(struct irq_desc *desc) 59 - { 60 - struct irq_data *d = irq_desc_get_irq_data(desc); 61 - const struct cpumask *affinity = irq_data_get_affinity_mask(d); 62 - struct irq_chip *c; 63 - bool ret = false; 64 - 65 - /* 66 - * If this is a per-CPU interrupt, or the affinity does not 67 - * include this CPU, then we have nothing to do. 68 - */ 69 - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) 70 - return false; 71 - 72 - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 73 - affinity = cpu_online_mask; 74 - ret = true; 75 - } 76 - 77 - c = irq_data_get_irq_chip(d); 78 - if (!c->irq_set_affinity) 79 - pr_debug("IRQ%u: unable to set affinity\n", d->irq); 80 - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) 81 - cpumask_copy(irq_data_get_affinity_mask(d), affinity); 82 - 83 - return ret; 84 - } 85 - 86 - /* 87 - * The current CPU has been marked offline. Migrate IRQs off this CPU. 88 - * If the affinity settings do not allow other CPUs, force them onto any 89 - * available CPU. 90 - * 91 - * Note: we must iterate over all IRQs, whether they have an attached 92 - * action structure or not, as we need to get chained interrupts too. 93 - */ 94 - void migrate_irqs(void) 95 - { 96 - unsigned int i; 97 - struct irq_desc *desc; 98 - unsigned long flags; 99 - 100 - local_irq_save(flags); 101 - 102 - for_each_irq_desc(i, desc) { 103 - bool affinity_broken; 104 - 105 - raw_spin_lock(&desc->lock); 106 - affinity_broken = migrate_one_irq(desc); 107 - raw_spin_unlock(&desc->lock); 108 - 109 - if (affinity_broken) 110 - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", 111 - i, smp_processor_id()); 112 - } 113 - 114 - local_irq_restore(flags); 115 - } 116 - #endif /* CONFIG_HOTPLUG_CPU */
+2 -1
arch/arm64/kernel/smp.c
··· 231 231 /* 232 232 * OK - migrate IRQs away from this CPU 233 233 */ 234 - migrate_irqs(); 234 + irq_migrate_all_off_this_cpu(); 235 + 235 236 return 0; 236 237 } 237 238