Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'irq-core-2025-01-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull interrupt subsystem updates from Thomas Gleixner:

- Consolidate the machine_kexec_mask_interrupts() by providing a
generic implementation and replacing the copy & pasta orgy in the
relevant architectures.

- Prevent unconditional operations on interrupt chips during kexec
shutdown, which can trigger warnings in certain cases when the
underlying interrupt has been shut down before.

- Make the enforcement of interrupt handling in interrupt context
unconditionally available, so that it actually works for non x86
related interrupt chips. The earlier enablement for ARM GIC chips set
the required chip flag, but did not notice that the check was hidden
behind a config switch which is not selected by ARM[64].

- Decrapify the handling of deferred interrupt affinity setting.

Some interrupt chips require that affinity changes are made from the
context of handling an interrupt to avoid certain race conditions.
For x86 this was the default, but with interrupt remapping this
requirement was lifted and a flag was introduced which tells the core
code that affinity changes can be done in any context. Unrestricted
affinity changes are the default for the majority of interrupt chips.

RISCV has the requirement to add the deferred mode to one of it's
interrupt controllers, but with the original implementation this
would require to add the any context flag to all other RISC-V
interrupt chips. That's backwards, so reverse the logic and require
that chips, which need the deferred mode have to be marked
accordingly. That avoids chasing the 'sane' chips and marking them.

- Add multi-node support to the Loongarch AVEC interrupt controller
driver.

- The usual tiny cleanups, fixes and improvements all over the place.

* tag 'irq-core-2025-01-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
genirq/generic_chip: Export irq_gc_mask_disable_and_ack_set()
genirq/timings: Add kernel-doc for a function parameter
genirq: Remove IRQ_MOVE_PCNTXT and related code
x86/apic: Convert to IRQCHIP_MOVE_DEFERRED
genirq: Provide IRQCHIP_MOVE_DEFERRED
hexagon: Remove GENERIC_PENDING_IRQ leftover
ARC: Remove GENERIC_PENDING_IRQ
genirq: Remove handle_enforce_irqctx() wrapper
genirq: Make handle_enforce_irqctx() unconditionally available
irqchip/loongarch-avec: Add multi-nodes topology support
irqchip/ts4800: Replace seq_printf() by seq_puts()
irqchip/ti-sci-inta : Add module build support
irqchip/ti-sci-intr: Add module build support
irqchip/irq-brcmstb-l2: Replace brcmstb_l2_mask_and_ack() by generic function
irqchip: keystone: Use syscon_regmap_lookup_by_phandle_args
genirq/kexec: Prevent redundant IRQ masking by checking state before shutdown
kexec: Consolidate machine_kexec_mask_interrupts() implementation
genirq: Reuse irq_thread_fn() for forced thread case
genirq: Move irq_thread_fn() further up in the code

+108 -230
-1
arch/arc/Kconfig
··· 25 25 # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP 26 26 select GENERIC_IRQ_SHOW 27 27 select GENERIC_PCI_IOMAP 28 - select GENERIC_PENDING_IRQ if SMP 29 28 select GENERIC_SCHED_CLOCK 30 29 select GENERIC_SMP_IDLE_THREAD 31 30 select GENERIC_IOREMAP
-2
arch/arc/kernel/mcip.c
··· 357 357 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) 358 358 { 359 359 irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq); 360 - irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); 361 - 362 360 return 0; 363 361 } 364 362
-23
arch/arm/kernel/machine_kexec.c
··· 127 127 cpus_stopped = 1; 128 128 } 129 129 130 - static void machine_kexec_mask_interrupts(void) 131 - { 132 - unsigned int i; 133 - struct irq_desc *desc; 134 - 135 - for_each_irq_desc(i, desc) { 136 - struct irq_chip *chip; 137 - 138 - chip = irq_desc_get_chip(desc); 139 - if (!chip) 140 - continue; 141 - 142 - if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) 143 - chip->irq_eoi(&desc->irq_data); 144 - 145 - if (chip->irq_mask) 146 - chip->irq_mask(&desc->irq_data); 147 - 148 - if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) 149 - chip->irq_disable(&desc->irq_data); 150 - } 151 - } 152 - 153 130 void machine_crash_shutdown(struct pt_regs *regs) 154 131 { 155 132 local_irq_disable();
+1
arch/arm64/Kconfig
··· 149 149 select GENERIC_IDLE_POLL_SETUP 150 150 select GENERIC_IOREMAP 151 151 select GENERIC_IRQ_IPI 152 + select GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD 152 153 select GENERIC_IRQ_PROBE 153 154 select GENERIC_IRQ_SHOW 154 155 select GENERIC_IRQ_SHOW_LEVEL
-2
arch/arm64/Kconfig.platforms
··· 135 135 select SOC_TI 136 136 select TI_MESSAGE_MANAGER 137 137 select TI_SCI_PROTOCOL 138 - select TI_SCI_INTR_IRQCHIP 139 - select TI_SCI_INTA_IRQCHIP 140 138 select TI_K3_SOCINFO 141 139 help 142 140 This enables support for Texas Instruments' K3 multicore SoC
-31
arch/arm64/kernel/machine_kexec.c
··· 207 207 BUG(); /* Should never get here. */ 208 208 } 209 209 210 - static void machine_kexec_mask_interrupts(void) 211 - { 212 - unsigned int i; 213 - struct irq_desc *desc; 214 - 215 - for_each_irq_desc(i, desc) { 216 - struct irq_chip *chip; 217 - int ret; 218 - 219 - chip = irq_desc_get_chip(desc); 220 - if (!chip) 221 - continue; 222 - 223 - /* 224 - * First try to remove the active state. If this 225 - * fails, try to EOI the interrupt. 226 - */ 227 - ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false); 228 - 229 - if (ret && irqd_irq_inprogress(&desc->irq_data) && 230 - chip->irq_eoi) 231 - chip->irq_eoi(&desc->irq_data); 232 - 233 - if (chip->irq_mask) 234 - chip->irq_mask(&desc->irq_data); 235 - 236 - if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) 237 - chip->irq_disable(&desc->irq_data); 238 - } 239 - } 240 - 241 210 /** 242 211 * machine_crash_shutdown - shutdown non-crashing cpus and save registers 243 212 */
-1
arch/hexagon/Kconfig
··· 20 20 # select ARCH_HAS_CPU_IDLE_WAIT 21 21 # select GPIOLIB 22 22 # select HAVE_CLK 23 - # select GENERIC_PENDING_IRQ if SMP 24 23 select GENERIC_ATOMIC64 25 24 select HAVE_PERF_EVENTS 26 25 # GENERIC_ALLOCATOR is used by dma_alloc_coherent()
-1
arch/powerpc/include/asm/kexec.h
··· 61 61 extern void kexec_smp_wait(void); /* get and clear naca physid, wait for 62 62 master to copy new code to 0 */ 63 63 extern void default_machine_kexec(struct kimage *image); 64 - extern void machine_kexec_mask_interrupts(void); 65 64 66 65 void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer, 67 66 unsigned long start_address) __noreturn;
-22
arch/powerpc/kexec/core.c
··· 22 22 #include <asm/setup.h> 23 23 #include <asm/firmware.h> 24 24 25 - void machine_kexec_mask_interrupts(void) { 26 - unsigned int i; 27 - struct irq_desc *desc; 28 - 29 - for_each_irq_desc(i, desc) { 30 - struct irq_chip *chip; 31 - 32 - chip = irq_desc_get_chip(desc); 33 - if (!chip) 34 - continue; 35 - 36 - if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) 37 - chip->irq_eoi(&desc->irq_data); 38 - 39 - if (chip->irq_mask) 40 - chip->irq_mask(&desc->irq_data); 41 - 42 - if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) 43 - chip->irq_disable(&desc->irq_data); 44 - } 45 - } 46 - 47 25 #ifdef CONFIG_CRASH_DUMP 48 26 void machine_crash_shutdown(struct pt_regs *regs) 49 27 {
+1
arch/powerpc/kexec/core_32.c
··· 7 7 * Copyright (C) 2005 IBM Corporation. 8 8 */ 9 9 10 + #include <linux/irq.h> 10 11 #include <linux/kexec.h> 11 12 #include <linux/mm.h> 12 13 #include <linux/string.h>
-23
arch/riscv/kernel/machine_kexec.c
··· 114 114 #endif 115 115 } 116 116 117 - static void machine_kexec_mask_interrupts(void) 118 - { 119 - unsigned int i; 120 - struct irq_desc *desc; 121 - 122 - for_each_irq_desc(i, desc) { 123 - struct irq_chip *chip; 124 - 125 - chip = irq_desc_get_chip(desc); 126 - if (!chip) 127 - continue; 128 - 129 - if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) 130 - chip->irq_eoi(&desc->irq_data); 131 - 132 - if (chip->irq_mask) 133 - chip->irq_mask(&desc->irq_data); 134 - 135 - if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) 136 - chip->irq_disable(&desc->irq_data); 137 - } 138 - } 139 - 140 117 /* 141 118 * machine_crash_shutdown - Prepare to kexec after a kernel crash 142 119 *
+1 -1
arch/x86/hyperv/irqdomain.c
··· 304 304 .irq_retrigger = irq_chip_retrigger_hierarchy, 305 305 .irq_compose_msi_msg = hv_irq_compose_msi_msg, 306 306 .irq_set_affinity = msi_domain_set_affinity, 307 - .flags = IRQCHIP_SKIP_SET_WAKE, 307 + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MOVE_DEFERRED, 308 308 }; 309 309 310 310 static struct msi_domain_ops pci_msi_domain_ops = {
+1 -1
arch/x86/kernel/apic/io_apic.c
··· 1861 1861 .irq_set_affinity = ioapic_set_affinity, 1862 1862 .irq_retrigger = irq_chip_retrigger_hierarchy, 1863 1863 .irq_get_irqchip_state = ioapic_irq_get_chip_state, 1864 - .flags = IRQCHIP_SKIP_SET_WAKE | 1864 + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MOVE_DEFERRED | 1865 1865 IRQCHIP_AFFINITY_PRE_STARTUP, 1866 1866 }; 1867 1867
+2 -1
arch/x86/kernel/apic/msi.c
··· 214 214 if (WARN_ON_ONCE(domain != real_parent)) 215 215 return false; 216 216 info->chip->irq_set_affinity = msi_set_affinity; 217 + info->chip->flags |= IRQCHIP_MOVE_DEFERRED; 217 218 break; 218 219 case DOMAIN_BUS_DMAR: 219 220 case DOMAIN_BUS_AMDVI: ··· 316 315 .irq_retrigger = irq_chip_retrigger_hierarchy, 317 316 .irq_compose_msi_msg = dmar_msi_compose_msg, 318 317 .irq_write_msi_msg = dmar_msi_write_msg, 319 - .flags = IRQCHIP_SKIP_SET_WAKE | 318 + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MOVE_DEFERRED | 320 319 IRQCHIP_AFFINITY_PRE_STARTUP, 321 320 }; 322 321
-8
arch/x86/kernel/hpet.c
··· 517 517 struct msi_domain_info *info, unsigned int virq, 518 518 irq_hw_number_t hwirq, msi_alloc_info_t *arg) 519 519 { 520 - irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); 521 520 irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL, 522 521 handle_edge_irq, arg->data, "edge"); 523 522 524 523 return 0; 525 524 } 526 525 527 - static void hpet_msi_free(struct irq_domain *domain, 528 - struct msi_domain_info *info, unsigned int virq) 529 - { 530 - irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT); 531 - } 532 - 533 526 static struct msi_domain_ops hpet_msi_domain_ops = { 534 527 .msi_init = hpet_msi_init, 535 - .msi_free = hpet_msi_free, 536 528 }; 537 529 538 530 static struct msi_domain_info hpet_msi_domain_info = {
-3
arch/x86/platform/uv/uv_irq.c
··· 92 92 if (ret >= 0) { 93 93 if (info->uv.limit == UV_AFFINITY_CPU) 94 94 irq_set_status_flags(virq, IRQ_NO_BALANCING); 95 - else 96 - irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); 97 95 98 96 chip_data->pnode = uv_blade_to_pnode(info->uv.blade); 99 97 chip_data->offset = info->uv.offset; ··· 111 113 112 114 BUG_ON(nr_irqs != 1); 113 115 kfree(irq_data->chip_data); 114 - irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT); 115 116 irq_clear_status_flags(virq, IRQ_NO_BALANCING); 116 117 irq_domain_free_irqs_top(domain, virq, nr_irqs); 117 118 }
+1 -1
drivers/iommu/amd/init.c
··· 2332 2332 .irq_retrigger = irq_chip_retrigger_hierarchy, 2333 2333 .irq_set_affinity = intcapxt_set_affinity, 2334 2334 .irq_set_wake = intcapxt_set_wake, 2335 - .flags = IRQCHIP_MASK_ON_SUSPEND, 2335 + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_MOVE_DEFERRED, 2336 2336 }; 2337 2337 2338 2338 static const struct irq_domain_ops intcapxt_domain_ops = {
-1
drivers/iommu/amd/iommu.c
··· 3540 3540 irq_data->chip_data = data; 3541 3541 irq_data->chip = &amd_ir_chip; 3542 3542 irq_remapping_prepare_irte(data, cfg, info, devid, index, i); 3543 - irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); 3544 3543 } 3545 3544 3546 3545 return 0;
-1
drivers/iommu/intel/irq_remapping.c
··· 1463 1463 else 1464 1464 irq_data->chip = &intel_ir_chip; 1465 1465 intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i); 1466 - irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); 1467 1466 } 1468 1467 return 0; 1469 1468
+4 -2
drivers/irqchip/Kconfig
··· 534 534 Support for the Loongson-1 platform Interrupt Controller. 535 535 536 536 config TI_SCI_INTR_IRQCHIP 537 - bool 537 + tristate "TI SCI INTR Interrupt Controller" 538 538 depends on TI_SCI_PROTOCOL 539 + depends on ARCH_K3 || COMPILE_TEST 539 540 select IRQ_DOMAIN_HIERARCHY 540 541 help 541 542 This enables the irqchip driver support for K3 Interrupt router ··· 545 544 TI System Controller, say Y here. Otherwise, say N. 546 545 547 546 config TI_SCI_INTA_IRQCHIP 548 - bool 547 + tristate "TI SCI INTA Interrupt Controller" 549 548 depends on TI_SCI_PROTOCOL 549 + depends on ARCH_K3 || (COMPILE_TEST && ARM64) 550 550 select IRQ_DOMAIN_HIERARCHY 551 551 select TI_SCI_INTA_MSI_DOMAIN 552 552 help
+1 -27
drivers/irqchip/irq-brcmstb-l2.c
··· 61 61 u32 saved_mask; /* for suspend/resume */ 62 62 }; 63 63 64 - /** 65 - * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt 66 - * @d: irq_data 67 - * 68 - * Chip has separate enable/disable registers instead of a single mask 69 - * register and pending interrupt is acknowledged by setting a bit. 70 - * 71 - * Note: This function is generic and could easily be added to the 72 - * generic irqchip implementation if there ever becomes a will to do so. 73 - * Perhaps with a name like irq_gc_mask_disable_and_ack_set(). 74 - * 75 - * e.g.: https://patchwork.kernel.org/patch/9831047/ 76 - */ 77 - static void brcmstb_l2_mask_and_ack(struct irq_data *d) 78 - { 79 - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 80 - struct irq_chip_type *ct = irq_data_get_chip_type(d); 81 - u32 mask = d->mask; 82 - 83 - irq_gc_lock(gc); 84 - irq_reg_writel(gc, mask, ct->regs.disable); 85 - *ct->mask_cache &= ~mask; 86 - irq_reg_writel(gc, mask, ct->regs.ack); 87 - irq_gc_unlock(gc); 88 - } 89 - 90 64 static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) 91 65 { 92 66 struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); ··· 222 248 if (init_params->cpu_clear >= 0) { 223 249 ct->regs.ack = init_params->cpu_clear; 224 250 ct->chip.irq_ack = irq_gc_ack_set_bit; 225 - ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack; 251 + ct->chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set; 226 252 } else { 227 253 /* No Ack - but still slightly more efficient to define this */ 228 254 ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
+2 -9
drivers/irqchip/irq-keystone.c
··· 141 141 if (!kirq) 142 142 return -ENOMEM; 143 143 144 - kirq->devctrl_regs = 145 - syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev"); 144 + kirq->devctrl_regs = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-dev", 145 + 1, &kirq->devctrl_offset); 146 146 if (IS_ERR(kirq->devctrl_regs)) 147 147 return PTR_ERR(kirq->devctrl_regs); 148 - 149 - ret = of_property_read_u32_index(np, "ti,syscon-dev", 1, 150 - &kirq->devctrl_offset); 151 - if (ret) { 152 - dev_err(dev, "couldn't read the devctrl_offset offset!\n"); 153 - return ret; 154 - } 155 148 156 149 kirq->irq = platform_get_irq(pdev, 0); 157 150 if (kirq->irq < 0)
+12 -4
drivers/irqchip/irq-loongarch-avec.c
··· 56 56 unsigned int moving; 57 57 }; 58 58 59 + static inline void avecintc_enable(void) 60 + { 61 + u64 value; 62 + 63 + value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); 64 + value |= IOCSR_MISC_FUNC_AVEC_EN; 65 + iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC); 66 + } 67 + 59 68 static inline void avecintc_ack_irq(struct irq_data *d) 60 69 { 61 70 } ··· 135 126 return 0; 136 127 137 128 guard(raw_spinlock)(&loongarch_avec.lock); 129 + 130 + avecintc_enable(); 138 131 139 132 irq_matrix_online(loongarch_avec.vector_matrix); 140 133 ··· 350 339 static int __init avecintc_init(struct irq_domain *parent) 351 340 { 352 341 int ret, parent_irq; 353 - unsigned long value; 354 342 355 343 raw_spin_lock_init(&loongarch_avec.lock); 356 344 ··· 388 378 "irqchip/loongarch/avecintc:starting", 389 379 avecintc_cpu_online, avecintc_cpu_offline); 390 380 #endif 391 - value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); 392 - value |= IOCSR_MISC_FUNC_AVEC_EN; 393 - iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC); 381 + avecintc_enable(); 394 382 395 383 return ret; 396 384
+1
drivers/irqchip/irq-ti-sci-inta.c
··· 743 743 744 744 MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>"); 745 745 MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol"); 746 + MODULE_LICENSE("GPL");
+1
drivers/irqchip/irq-ti-sci-intr.c
··· 303 303 304 304 MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>"); 305 305 MODULE_DESCRIPTION("K3 Interrupt Router driver over TI SCI protocol"); 306 + MODULE_LICENSE("GPL");
+1 -1
drivers/irqchip/irq-ts4800.c
··· 52 52 { 53 53 struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d); 54 54 55 - seq_printf(p, "%s", dev_name(&data->pdev->dev)); 55 + seq_puts(p, dev_name(&data->pdev->dev)); 56 56 } 57 57 58 58 static const struct irq_chip ts4800_chip = {
+1
drivers/pci/controller/pci-hyperv.c
··· 2053 2053 .irq_set_affinity = irq_chip_set_affinity_parent, 2054 2054 #ifdef CONFIG_X86 2055 2055 .irq_ack = irq_chip_ack_parent, 2056 + .flags = IRQCHIP_MOVE_DEFERRED, 2056 2057 #elif defined(CONFIG_ARM64) 2057 2058 .irq_eoi = irq_chip_eoi_parent, 2058 2059 #endif
-6
drivers/xen/events/events_base.c
··· 722 722 INIT_RCU_WORK(&info->rwork, delayed_free_irq); 723 723 724 724 set_info_for_irq(irq, info); 725 - /* 726 - * Interrupt affinity setting can be immediate. No point 727 - * in delaying it until an interrupt is handled. 728 - */ 729 - irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 730 - 731 725 INIT_LIST_HEAD(&info->eoi_list); 732 726 list_add_tail(&info->list, &xen_irq_list_head); 733 727 }
+6 -11
include/linux/irq.h
··· 64 64 * IRQ_NOAUTOEN - Interrupt is not automatically enabled in 65 65 * request/setup_irq() 66 66 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) 67 - * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context 68 67 * IRQ_NESTED_THREAD - Interrupt nests into another thread 69 68 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable 70 69 * IRQ_IS_POLLED - Always polled by another interrupt. Exclude ··· 92 93 IRQ_NOREQUEST = (1 << 11), 93 94 IRQ_NOAUTOEN = (1 << 12), 94 95 IRQ_NO_BALANCING = (1 << 13), 95 - IRQ_MOVE_PCNTXT = (1 << 14), 96 96 IRQ_NESTED_THREAD = (1 << 15), 97 97 IRQ_NOTHREAD = (1 << 16), 98 98 IRQ_PER_CPU_DEVID = (1 << 17), ··· 103 105 104 106 #define IRQF_MODIFY_MASK \ 105 107 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ 106 - IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ 108 + IRQ_NOAUTOEN | IRQ_LEVEL | IRQ_NO_BALANCING | \ 107 109 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ 108 110 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN) 109 111 ··· 199 201 * IRQD_LEVEL - Interrupt is level triggered 200 202 * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup 201 203 * from suspend 202 - * IRQD_MOVE_PCNTXT - Interrupt can be moved in process 203 - * context 204 204 * IRQD_IRQ_DISABLED - Disabled state of the interrupt 205 205 * IRQD_IRQ_MASKED - Masked state of the interrupt 206 206 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt ··· 229 233 IRQD_AFFINITY_SET = BIT(12), 230 234 IRQD_LEVEL = BIT(13), 231 235 IRQD_WAKEUP_STATE = BIT(14), 232 - IRQD_MOVE_PCNTXT = BIT(15), 233 236 IRQD_IRQ_DISABLED = BIT(16), 234 237 IRQD_IRQ_MASKED = BIT(17), 235 238 IRQD_IRQ_INPROGRESS = BIT(18), ··· 331 336 static inline bool irqd_is_wakeup_set(struct irq_data *d) 332 337 { 333 338 return __irqd_to_state(d) & IRQD_WAKEUP_STATE; 334 - } 335 - 336 - static inline bool irqd_can_move_in_process_context(struct irq_data *d) 337 - { 338 - return __irqd_to_state(d) & IRQD_MOVE_PCNTXT; 339 339 } 340 340 341 341 static inline bool irqd_irq_disabled(struct irq_data *d) ··· 557 567 * in the suspend path if they are in disabled state 558 568 * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup 559 569 * IRQCHIP_IMMUTABLE: Don't ever change anything in this chip 570 + * IRQCHIP_MOVE_DEFERRED: Move the interrupt in actual interrupt context 560 571 */ 561 572 enum { 562 573 IRQCHIP_SET_TYPE_MASKED = (1 << 0), ··· 572 581 IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), 573 582 IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), 574 583 IRQCHIP_IMMUTABLE = (1 << 11), 584 + IRQCHIP_MOVE_DEFERRED = (1 << 12), 575 585 }; 576 586 577 587 #include <linux/irqdesc.h> ··· 685 693 extern int irq_chip_request_resources_parent(struct irq_data *data); 686 694 extern void irq_chip_release_resources_parent(struct irq_data *data); 687 695 #endif 696 + 697 + /* Disable or mask interrupts during a kernel kexec */ 698 + extern void machine_kexec_mask_interrupts(void); 688 699 689 700 /* Handling of unhandled and spurious interrupts: */ 690 701 extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
+10
kernel/irq/Kconfig
··· 31 31 config GENERIC_PENDING_IRQ 32 32 bool 33 33 34 + # Deduce delayed migration from top-level interrupt chip flags 35 + config GENERIC_PENDING_IRQ_CHIPFLAGS 36 + bool 37 + 34 38 # Support for generic irq migrating off cpu before the cpu is offline. 35 39 config GENERIC_IRQ_MIGRATION 36 40 bool ··· 144 140 developers and debugging of hard to diagnose interrupt problems. 145 141 146 142 If you don't know what to do here, say N. 143 + 144 + # Clear forwarded VM interrupts during kexec. 145 + # This option ensures the kernel clears active states for interrupts 146 + # forwarded to virtual machines (VMs) during a machine kexec. 147 + config GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD 148 + bool 147 149 148 150 endmenu 149 151
+1 -1
kernel/irq/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 - obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o 3 + obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o kexec.o 4 4 obj-$(CONFIG_IRQ_TIMINGS) += timings.o 5 5 ifeq ($(CONFIG_TEST_IRQ_TIMINGS),y) 6 6 CFLAGS_timings.o += -DDEBUG
+1 -3
kernel/irq/chip.c
··· 1114 1114 trigger = irqd_get_trigger_type(&desc->irq_data); 1115 1115 1116 1116 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1117 - IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 1117 + IRQD_TRIGGER_MASK | IRQD_LEVEL); 1118 1118 if (irq_settings_has_no_balance_set(desc)) 1119 1119 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1120 1120 if (irq_settings_is_per_cpu(desc)) 1121 1121 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1122 - if (irq_settings_can_move_pcntxt(desc)) 1123 - irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 1124 1122 if (irq_settings_is_level(desc)) 1125 1123 irqd_set(&desc->irq_data, IRQD_LEVEL); 1126 1124
+1 -1
kernel/irq/debugfs.c
··· 53 53 BIT_MASK_DESCR(IRQCHIP_SUPPORTS_NMI), 54 54 BIT_MASK_DESCR(IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND), 55 55 BIT_MASK_DESCR(IRQCHIP_IMMUTABLE), 56 + BIT_MASK_DESCR(IRQCHIP_MOVE_DEFERRED), 56 57 }; 57 58 58 59 static void ··· 109 108 BIT_MASK_DESCR(IRQD_NO_BALANCING), 110 109 111 110 BIT_MASK_DESCR(IRQD_SINGLE_TARGET), 112 - BIT_MASK_DESCR(IRQD_MOVE_PCNTXT), 113 111 BIT_MASK_DESCR(IRQD_AFFINITY_SET), 114 112 BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING), 115 113 BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
+1
kernel/irq/generic-chip.c
··· 162 162 irq_reg_writel(gc, mask, ct->regs.ack); 163 163 irq_gc_unlock(gc); 164 164 } 165 + EXPORT_SYMBOL_GPL(irq_gc_mask_disable_and_ack_set); 165 166 166 167 /** 167 168 * irq_gc_eoi - EOI interrupt
+1 -9
kernel/irq/internals.h
··· 421 421 #ifdef CONFIG_GENERIC_PENDING_IRQ 422 422 static inline bool irq_can_move_pcntxt(struct irq_data *data) 423 423 { 424 - return irqd_can_move_in_process_context(data); 424 + return !(data->chip->flags & IRQCHIP_MOVE_DEFERRED); 425 425 } 426 426 static inline bool irq_move_pending(struct irq_data *data) 427 427 { ··· 440 440 static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) 441 441 { 442 442 return desc->pending_mask; 443 - } 444 - static inline bool handle_enforce_irqctx(struct irq_data *data) 445 - { 446 - return irqd_is_handle_enforce_irqctx(data); 447 443 } 448 444 bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); 449 445 #else /* CONFIG_GENERIC_PENDING_IRQ */ ··· 464 468 return NULL; 465 469 } 466 470 static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) 467 - { 468 - return false; 469 - } 470 - static inline bool handle_enforce_irqctx(struct irq_data *data) 471 471 { 472 472 return false; 473 473 }
+1 -1
kernel/irq/irqdesc.c
··· 708 708 return -EINVAL; 709 709 710 710 data = irq_desc_get_irq_data(desc); 711 - if (WARN_ON_ONCE(!in_hardirq() && handle_enforce_irqctx(data))) 711 + if (WARN_ON_ONCE(!in_hardirq() && irqd_is_handle_enforce_irqctx(data))) 712 712 return -EPERM; 713 713 714 714 generic_handle_irq_desc(desc);
+36
kernel/irq/kexec.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/interrupt.h> 4 + #include <linux/irq.h> 5 + #include <linux/irqdesc.h> 6 + #include <linux/irqnr.h> 7 + 8 + #include "internals.h" 9 + 10 + void machine_kexec_mask_interrupts(void) 11 + { 12 + struct irq_desc *desc; 13 + unsigned int i; 14 + 15 + for_each_irq_desc(i, desc) { 16 + struct irq_chip *chip; 17 + int check_eoi = 1; 18 + 19 + chip = irq_desc_get_chip(desc); 20 + if (!chip || !irqd_is_started(&desc->irq_data)) 21 + continue; 22 + 23 + if (IS_ENABLED(CONFIG_GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD)) { 24 + /* 25 + * First try to remove the active state from an interrupt which is forwarded 26 + * to a VM. If the interrupt is not forwarded, try to EOI the interrupt. 27 + */ 28 + check_eoi = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false); 29 + } 30 + 31 + if (check_eoi && chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) 32 + chip->irq_eoi(&desc->irq_data); 33 + 34 + irq_shutdown(desc); 35 + } 36 + }
+18 -25
kernel/irq/manage.c
··· 1182 1182 } 1183 1183 1184 1184 /* 1185 + * Interrupts explicitly requested as threaded interrupts want to be 1186 + * preemptible - many of them need to sleep and wait for slow busses to 1187 + * complete. 1188 + */ 1189 + static irqreturn_t irq_thread_fn(struct irq_desc *desc, struct irqaction *action) 1190 + { 1191 + irqreturn_t ret = action->thread_fn(action->irq, action->dev_id); 1192 + 1193 + if (ret == IRQ_HANDLED) 1194 + atomic_inc(&desc->threads_handled); 1195 + 1196 + irq_finalize_oneshot(desc, action); 1197 + return ret; 1198 + } 1199 + 1200 + /* 1185 1201 * Interrupts which are not explicitly requested as threaded 1186 1202 * interrupts rely on the implicit bh/preempt disable of the hard irq 1187 1203 * context. So we need to disable bh here to avoid deadlocks and other 1188 1204 * side effects. 1189 1205 */ 1190 - static irqreturn_t 1191 - irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 1206 + static irqreturn_t irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 1192 1207 { 1193 1208 irqreturn_t ret; 1194 1209 1195 1210 local_bh_disable(); 1196 1211 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 1197 1212 local_irq_disable(); 1198 - ret = action->thread_fn(action->irq, action->dev_id); 1199 - if (ret == IRQ_HANDLED) 1200 - atomic_inc(&desc->threads_handled); 1201 - 1202 - irq_finalize_oneshot(desc, action); 1213 + ret = irq_thread_fn(desc, action); 1203 1214 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 1204 1215 local_irq_enable(); 1205 1216 local_bh_enable(); 1206 - return ret; 1207 - } 1208 - 1209 - /* 1210 - * Interrupts explicitly requested as threaded interrupts want to be 1211 - * preemptible - many of them need to sleep and wait for slow busses to 1212 - * complete. 1213 - */ 1214 - static irqreturn_t irq_thread_fn(struct irq_desc *desc, 1215 - struct irqaction *action) 1216 - { 1217 - irqreturn_t ret; 1218 - 1219 - ret = action->thread_fn(action->irq, action->dev_id); 1220 - if (ret == IRQ_HANDLED) 1221 - atomic_inc(&desc->threads_handled); 1222 - 1223 - irq_finalize_oneshot(desc, action); 1224 1217 return ret; 1225 1218 } 1226 1219
+1 -1
kernel/irq/resend.c
··· 53 53 * Validate whether this interrupt can be safely injected from 54 54 * non interrupt context 55 55 */ 56 - if (handle_enforce_irqctx(&desc->irq_data)) 56 + if (irqd_is_handle_enforce_irqctx(&desc->irq_data)) 57 57 return -EINVAL; 58 58 59 59 /*
-6
kernel/irq/settings.h
··· 11 11 _IRQ_NOREQUEST = IRQ_NOREQUEST, 12 12 _IRQ_NOTHREAD = IRQ_NOTHREAD, 13 13 _IRQ_NOAUTOEN = IRQ_NOAUTOEN, 14 - _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, 15 14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING, 16 15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, 17 16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, ··· 139 140 static inline void irq_settings_set_noprobe(struct irq_desc *desc) 140 141 { 141 142 desc->status_use_accessors |= _IRQ_NOPROBE; 142 - } 143 - 144 - static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) 145 - { 146 - return desc->status_use_accessors & _IRQ_MOVE_PCNTXT; 147 143 } 148 144 149 145 static inline bool irq_settings_can_autoenable(struct irq_desc *desc)
+1
kernel/irq/timings.c
··· 509 509 510 510 /** 511 511 * irq_timings_next_event - Return when the next event is supposed to arrive 512 + * @now: current time 512 513 * 513 514 * During the last busy cycle, the number of interrupts is incremented 514 515 * and stored in the irq_timings structure. This information is