Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'irq-core-2021-06-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
"Updates for the interrupt subsystem:

Core changes:

- Cleanup and simplification of common code to invoke the low level
interrupt flow handlers when this invocation requires irqdomain
resolution. Add the necessary core infrastructure.

- Provide a proper interface for modular PMU drivers to set the
interrupt affinity.

- Add a request flag which allows to exclude interrupts from spurious
interrupt detection. Useful especially for IPI handlers which
always return IRQ_HANDLED which turns the spurious interrupt
detection into a pointless waste of CPU cycles.

Driver changes:

- Bulk convert interrupt chip drivers to the new irqdomain low level
flow handler invocation mechanism.

- Add device tree bindings for the Renesas R-Car M3-W+ SoC

- Enable modular build of the Qualcomm PDC driver

- The usual small fixes and improvements"

* tag 'irq-core-2021-06-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (38 commits)
dt-bindings: interrupt-controller: arm,gic-v3: Describe GICv3 optional properties
irqchip: gic-pm: Remove redundant error log of clock bulk
irqchip/sun4i: Remove unnecessary oom message
irqchip/irq-imx-gpcv2: Remove unnecessary oom message
irqchip/imgpdc: Remove unnecessary oom message
irqchip/gic-v3-its: Remove unnecessary oom message
irqchip/gic-v2m: Remove unnecessary oom message
irqchip/exynos-combiner: Remove unnecessary oom message
irqchip: Bulk conversion to generic_handle_domain_irq()
genirq: Move non-irqdomain handle_domain_irq() handling into ARM's handle_IRQ()
genirq: Add generic_handle_domain_irq() helper
irqchip/nvic: Convert from handle_IRQ() to handle_domain_irq()
irqdesc: Fix __handle_domain_irq() comment
genirq: Use irq_resolve_mapping() to implement __handle_domain_irq() and co
irqdomain: Introduce irq_resolve_mapping()
irqdomain: Protect the linear revmap with RCU
irqdomain: Cache irq_data instead of a virq number in the revmap
irqdomain: Use struct_size() helper when allocating irqdomain
irqdomain: Make normal and nomap irqdomains exclusive
powerpc: Move the use of irq_domain_add_nomap() behind a config option
...

+408 -354
-1
Documentation/core-api/irq/irq-domain.rst
··· 146 146 147 147 irq_domain_add_simple() 148 148 irq_domain_add_legacy() 149 - irq_domain_add_legacy_isa() 150 149 irq_domain_create_simple() 151 150 irq_domain_create_legacy() 152 151
+13
Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
··· 145 145 required: 146 146 - affinity 147 147 148 + clocks: 149 + maxItems: 1 150 + 151 + clock-names: 152 + items: 153 + - const: aclk 154 + 155 + power-domains: 156 + maxItems: 1 157 + 158 + resets: 159 + maxItems: 1 160 + 148 161 dependencies: 149 162 mbi-ranges: [ msi-controller ] 150 163 msi-controller: [ mbi-ranges ]
+1
Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
··· 29 29 - renesas,intc-ex-r8a774c0 # RZ/G2E 30 30 - renesas,intc-ex-r8a7795 # R-Car H3 31 31 - renesas,intc-ex-r8a7796 # R-Car M3-W 32 + - renesas,intc-ex-r8a77961 # R-Car M3-W+ 32 33 - renesas,intc-ex-r8a77965 # R-Car M3-N 33 34 - renesas,intc-ex-r8a77970 # R-Car V3M 34 35 - renesas,intc-ex-r8a77980 # R-Car V3H
+21 -1
arch/arm/kernel/irq.c
··· 63 63 */ 64 64 void handle_IRQ(unsigned int irq, struct pt_regs *regs) 65 65 { 66 - __handle_domain_irq(NULL, irq, false, regs); 66 + struct pt_regs *old_regs = set_irq_regs(regs); 67 + struct irq_desc *desc; 68 + 69 + irq_enter(); 70 + 71 + /* 72 + * Some hardware gives randomly wrong interrupts. Rather 73 + * than crashing, do something sensible. 74 + */ 75 + if (unlikely(!irq || irq >= nr_irqs)) 76 + desc = NULL; 77 + else 78 + desc = irq_to_desc(irq); 79 + 80 + if (likely(desc)) 81 + handle_irq_desc(desc); 82 + else 83 + ack_bad_irq(irq); 84 + 85 + irq_exit(); 86 + set_irq_regs(old_regs); 67 87 } 68 88 69 89 /*
-1
arch/mips/include/asm/irq.h
··· 11 11 12 12 #include <linux/linkage.h> 13 13 #include <linux/smp.h> 14 - #include <linux/irqdomain.h> 15 14 16 15 #include <asm/mipsmtregs.h> 17 16
+1
arch/mips/lantiq/xway/dma.c
··· 12 12 #include <linux/spinlock.h> 13 13 #include <linux/clk.h> 14 14 #include <linux/err.h> 15 + #include <linux/of.h> 15 16 16 17 #include <lantiq_soc.h> 17 18 #include <xway_dma.h>
+1
arch/mips/pci/pci-rt3883.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/delay.h> 15 15 #include <linux/interrupt.h> 16 + #include <linux/irqdomain.h> 16 17 #include <linux/of.h> 17 18 #include <linux/of_irq.h> 18 19 #include <linux/of_pci.h>
+1
arch/mips/pci/pci-xtalk-bridge.c
··· 13 13 #include <linux/platform_data/xtalk-bridge.h> 14 14 #include <linux/nvmem-consumer.h> 15 15 #include <linux/crc16.h> 16 + #include <linux/irqdomain.h> 16 17 17 18 #include <asm/pci/bridge.h> 18 19 #include <asm/paccess.h>
+1
arch/mips/sgi-ip27/ip27-irq.c
··· 9 9 10 10 #include <linux/interrupt.h> 11 11 #include <linux/irq.h> 12 + #include <linux/irqdomain.h> 12 13 #include <linux/ioport.h> 13 14 #include <linux/kernel.h> 14 15 #include <linux/bitops.h>
+1
arch/mips/sgi-ip30/ip30-irq.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/interrupt.h> 8 8 #include <linux/irq.h> 9 + #include <linux/irqdomain.h> 9 10 #include <linux/percpu.h> 10 11 #include <linux/spinlock.h> 11 12 #include <linux/tick.h>
-1
arch/nios2/include/asm/irq.h
··· 10 10 #define NIOS2_CPU_NR_IRQS 32 11 11 12 12 #include <asm-generic/irq.h> 13 - #include <linux/irqdomain.h> 14 13 15 14 #endif
+1
arch/nios2/kernel/irq.c
··· 11 11 12 12 #include <linux/init.h> 13 13 #include <linux/interrupt.h> 14 + #include <linux/irqdomain.h> 14 15 #include <linux/of.h> 15 16 16 17 static u32 ienable;
+2 -3
arch/powerpc/include/asm/irq.h
··· 6 6 /* 7 7 */ 8 8 9 - #include <linux/irqdomain.h> 10 9 #include <linux/threads.h> 11 10 #include <linux/list.h> 12 11 #include <linux/radix-tree.h> ··· 22 23 /* Total number of virq in the platform */ 23 24 #define NR_IRQS CONFIG_NR_IRQS 24 25 25 - /* Same thing, used by the generic IRQ code */ 26 - #define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS 26 + /* Number of irqs reserved for a legacy isa controller */ 27 + #define NR_IRQS_LEGACY 16 27 28 28 29 extern irq_hw_number_t virq_to_hw(unsigned int virq); 29 30
+1
arch/powerpc/kernel/mce.c
··· 18 18 #include <linux/extable.h> 19 19 #include <linux/ftrace.h> 20 20 #include <linux/memblock.h> 21 + #include <linux/of.h> 21 22 22 23 #include <asm/interrupt.h> 23 24 #include <asm/machdep.h>
+1
arch/powerpc/kvm/book3s_hv_uvmem.c
··· 90 90 #include <linux/migrate.h> 91 91 #include <linux/kvm_host.h> 92 92 #include <linux/ksm.h> 93 + #include <linux/of.h> 93 94 #include <asm/ultravisor.h> 94 95 #include <asm/mman.h> 95 96 #include <asm/kvm_ppc.h>
+1
arch/powerpc/kvm/book3s_xive.c
··· 14 14 #include <linux/percpu.h> 15 15 #include <linux/cpumask.h> 16 16 #include <linux/uaccess.h> 17 + #include <linux/irqdomain.h> 17 18 #include <asm/kvm_book3s.h> 18 19 #include <asm/kvm_ppc.h> 19 20 #include <asm/hvcall.h>
+1
arch/powerpc/kvm/book3s_xive_native.c
··· 12 12 #include <linux/spinlock.h> 13 13 #include <linux/delay.h> 14 14 #include <linux/file.h> 15 + #include <linux/irqdomain.h> 15 16 #include <asm/uaccess.h> 16 17 #include <asm/kvm_book3s.h> 17 18 #include <asm/kvm_ppc.h>
+1
arch/powerpc/mm/book3s64/radix_pgtable.c
··· 11 11 #include <linux/kernel.h> 12 12 #include <linux/sched/mm.h> 13 13 #include <linux/memblock.h> 14 + #include <linux/of.h> 14 15 #include <linux/of_fdt.h> 15 16 #include <linux/mm.h> 16 17 #include <linux/hugetlb.h>
+1
arch/powerpc/platforms/cell/Kconfig
··· 35 35 config AXON_MSI 36 36 bool 37 37 depends on PPC_IBM_CELL_BLADE && PCI_MSI 38 + select IRQ_DOMAIN_NOMAP 38 39 default y 39 40 40 41 menu "Cell Broadband Engine options"
+1
arch/powerpc/platforms/cell/pmu.c
··· 10 10 */ 11 11 12 12 #include <linux/interrupt.h> 13 + #include <linux/irqdomain.h> 13 14 #include <linux/types.h> 14 15 #include <linux/export.h> 15 16 #include <asm/io.h>
+1
arch/powerpc/platforms/embedded6xx/flipper-pic.c
··· 12 12 #include <linux/kernel.h> 13 13 #include <linux/init.h> 14 14 #include <linux/irq.h> 15 + #include <linux/irqdomain.h> 15 16 #include <linux/of.h> 16 17 #include <linux/of_address.h> 17 18 #include <asm/io.h>
+1
arch/powerpc/platforms/powermac/Kconfig
··· 24 24 bool "Support for powersurge upgrade cards" if EXPERT 25 25 depends on SMP && PPC32 && PPC_PMAC 26 26 select PPC_SMP_MUXED_IPI 27 + select IRQ_DOMAIN_NOMAP 27 28 default y 28 29 help 29 30 The powersurge cpu boards can be used in the generation
+1
arch/powerpc/platforms/ps3/Kconfig
··· 7 7 select USB_OHCI_BIG_ENDIAN_MMIO 8 8 select USB_EHCI_BIG_ENDIAN_MMIO 9 9 select HAVE_PCI 10 + select IRQ_DOMAIN_NOMAP 10 11 help 11 12 This option enables support for the Sony PS3 game console 12 13 and other platforms using the PS3 hypervisor. Enabling this
+3 -2
arch/powerpc/platforms/ps3/interrupt.c
··· 9 9 #include <linux/kernel.h> 10 10 #include <linux/export.h> 11 11 #include <linux/irq.h> 12 + #include <linux/irqdomain.h> 12 13 13 14 #include <asm/machdep.h> 14 15 #include <asm/udbg.h> ··· 46 45 * implementation equates HV plug value to Linux virq value, constrains each 47 46 * interrupt to have a system wide unique plug number, and limits the range 48 47 * of the plug values to map into the first dword of the bitmaps. This 49 - * gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note 48 + * gives a usable range of plug values of {NR_IRQS_LEGACY..63}. Note 50 49 * that there is no constraint on how many in this set an individual thread 51 50 * can acquire. 52 51 * ··· 722 721 } 723 722 724 723 #if defined(DEBUG) 725 - if (unlikely(plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX)) { 724 + if (unlikely(plug < NR_IRQS_LEGACY || plug > PS3_PLUG_MAX)) { 726 725 dump_bmp(&per_cpu(ps3_private, 0)); 727 726 dump_bmp(&per_cpu(ps3_private, 1)); 728 727 BUG();
+1
arch/powerpc/platforms/pseries/ibmebus.c
··· 42 42 #include <linux/kobject.h> 43 43 #include <linux/dma-map-ops.h> 44 44 #include <linux/interrupt.h> 45 + #include <linux/irqdomain.h> 45 46 #include <linux/of.h> 46 47 #include <linux/slab.h> 47 48 #include <linux/stat.h>
+1
arch/powerpc/sysdev/ehv_pic.c
··· 14 14 #include <linux/kernel.h> 15 15 #include <linux/init.h> 16 16 #include <linux/irq.h> 17 + #include <linux/irqdomain.h> 17 18 #include <linux/smp.h> 18 19 #include <linux/interrupt.h> 19 20 #include <linux/slab.h>
+1
arch/powerpc/sysdev/fsl_mpic_err.c
··· 8 8 #include <linux/irq.h> 9 9 #include <linux/smp.h> 10 10 #include <linux/interrupt.h> 11 + #include <linux/irqdomain.h> 11 12 12 13 #include <asm/io.h> 13 14 #include <asm/irq.h>
+2 -1
arch/powerpc/sysdev/i8259.c
··· 260 260 raw_spin_unlock_irqrestore(&i8259_lock, flags); 261 261 262 262 /* create a legacy host */ 263 - i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL); 263 + i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0, 264 + &i8259_host_ops, NULL); 264 265 if (i8259_host == NULL) { 265 266 printk(KERN_ERR "i8259: failed to allocate irq host !\n"); 266 267 return;
+1 -1
arch/powerpc/sysdev/mpic.c
··· 602 602 /* Find an mpic associated with a given linux interrupt */ 603 603 static struct mpic *mpic_find(unsigned int irq) 604 604 { 605 - if (irq < NUM_ISA_INTERRUPTS) 605 + if (irq < NR_IRQS_LEGACY) 606 606 return NULL; 607 607 608 608 return irq_get_chip_data(irq);
+2 -1
arch/powerpc/sysdev/tsi108_pci.c
··· 404 404 { 405 405 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); 406 406 407 - pci_irq_host = irq_domain_add_legacy_isa(node, &pci_irq_domain_ops, NULL); 407 + pci_irq_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0, 408 + &pci_irq_domain_ops, NULL); 408 409 if (pci_irq_host == NULL) { 409 410 printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n"); 410 411 return;
+1
arch/powerpc/sysdev/xics/icp-hv.c
··· 7 7 #include <linux/irq.h> 8 8 #include <linux/smp.h> 9 9 #include <linux/interrupt.h> 10 + #include <linux/irqdomain.h> 10 11 #include <linux/cpu.h> 11 12 #include <linux/of.h> 12 13
+1
arch/powerpc/sysdev/xics/icp-opal.c
··· 7 7 #include <linux/irq.h> 8 8 #include <linux/smp.h> 9 9 #include <linux/interrupt.h> 10 + #include <linux/irqdomain.h> 10 11 #include <linux/cpu.h> 11 12 #include <linux/of.h> 12 13
+1 -1
arch/powerpc/sysdev/xics/xics-common.c
··· 201 201 struct ics *ics; 202 202 203 203 /* We can't set affinity on ISA interrupts */ 204 - if (virq < NUM_ISA_INTERRUPTS) 204 + if (virq < NR_IRQS_LEGACY) 205 205 continue; 206 206 /* We only need to migrate enabled IRQS */ 207 207 if (!desc->action)
+1
arch/powerpc/sysdev/xive/Kconfig
··· 3 3 bool 4 4 select PPC_SMP_MUXED_IPI 5 5 select HARDIRQS_SW_RESEND 6 + select IRQ_DOMAIN_NOMAP 6 7 7 8 config PPC_XIVE_NATIVE 8 9 bool
+1 -1
drivers/irqchip/Kconfig
··· 415 415 for Goldfish based virtual platforms. 416 416 417 417 config QCOM_PDC 418 - bool "QCOM PDC" 418 + tristate "QCOM PDC" 419 419 depends on ARCH_QCOM 420 420 select IRQ_DOMAIN_HIERARCHY 421 421 help
+5 -9
drivers/irqchip/exynos-combiner.c
··· 66 66 { 67 67 struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); 68 68 struct irq_chip *chip = irq_desc_get_chip(desc); 69 - unsigned int cascade_irq, combiner_irq; 69 + unsigned int combiner_irq; 70 70 unsigned long status; 71 + int ret; 71 72 72 73 chained_irq_enter(chip, desc); 73 74 ··· 81 80 goto out; 82 81 83 82 combiner_irq = chip_data->hwirq_offset + __ffs(status); 84 - cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); 85 - 86 - if (unlikely(!cascade_irq)) 83 + ret = generic_handle_domain_irq(combiner_irq_domain, combiner_irq); 84 + if (unlikely(ret)) 87 85 handle_bad_irq(desc); 88 - else 89 - generic_handle_irq(cascade_irq); 90 86 91 87 out: 92 88 chained_irq_exit(chip, desc); ··· 177 179 nr_irq = max_nr * IRQ_IN_COMBINER; 178 180 179 181 combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL); 180 - if (!combiner_data) { 181 - pr_warn("%s: could not allocate combiner data\n", __func__); 182 + if (!combiner_data) 182 183 return; 183 - } 184 184 185 185 combiner_irq_domain = irq_domain_add_linear(np, nr_irq, 186 186 &combiner_irq_domain_ops, combiner_data);
+2 -5
drivers/irqchip/irq-al-fic.c
··· 111 111 struct irq_chip *irqchip = irq_desc_get_chip(desc); 112 112 struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); 113 113 unsigned long pending; 114 - unsigned int irq; 115 114 u32 hwirq; 116 115 117 116 chained_irq_enter(irqchip, desc); ··· 118 119 pending = readl_relaxed(fic->base + AL_FIC_CAUSE); 119 120 pending &= ~gc->mask_cache; 120 121 121 - for_each_set_bit(hwirq, &pending, NR_FIC_IRQS) { 122 - irq = irq_find_mapping(domain, hwirq); 123 - generic_handle_irq(irq); 124 - } 122 + for_each_set_bit(hwirq, &pending, NR_FIC_IRQS) 123 + generic_handle_domain_irq(domain, hwirq); 125 124 126 125 chained_irq_exit(irqchip, desc); 127 126 }
+8 -11
drivers/irqchip/irq-armada-370-xp.c
··· 582 582 583 583 for (msinr = PCI_MSI_DOORBELL_START; 584 584 msinr < PCI_MSI_DOORBELL_END; msinr++) { 585 - int irq; 585 + unsigned int irq; 586 586 587 587 if (!(msimask & BIT(msinr))) 588 588 continue; 589 589 590 - if (is_chained) { 591 - irq = irq_find_mapping(armada_370_xp_msi_inner_domain, 592 - msinr - PCI_MSI_DOORBELL_START); 593 - generic_handle_irq(irq); 594 - } else { 595 - irq = msinr - PCI_MSI_DOORBELL_START; 590 + irq = msinr - PCI_MSI_DOORBELL_START; 591 + 592 + if (is_chained) 593 + generic_handle_domain_irq(armada_370_xp_msi_inner_domain, 594 + irq); 595 + else 596 596 handle_domain_irq(armada_370_xp_msi_inner_domain, 597 597 irq, regs); 598 - } 599 598 } 600 599 } 601 600 #else ··· 605 606 { 606 607 struct irq_chip *chip = irq_desc_get_chip(desc); 607 608 unsigned long irqmap, irqn, irqsrc, cpuid; 608 - unsigned int cascade_irq; 609 609 610 610 chained_irq_enter(chip, desc); 611 611 ··· 626 628 continue; 627 629 } 628 630 629 - cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn); 630 - generic_handle_irq(cascade_irq); 631 + generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn); 631 632 } 632 633 633 634 chained_irq_exit(chip, desc);
+3 -5
drivers/irqchip/irq-aspeed-i2c-ic.c
··· 34 34 struct aspeed_i2c_ic *i2c_ic = irq_desc_get_handler_data(desc); 35 35 struct irq_chip *chip = irq_desc_get_chip(desc); 36 36 unsigned long bit, status; 37 - unsigned int bus_irq; 38 37 39 38 chained_irq_enter(chip, desc); 40 39 status = readl(i2c_ic->base); 41 - for_each_set_bit(bit, &status, ASPEED_I2C_IC_NUM_BUS) { 42 - bus_irq = irq_find_mapping(i2c_ic->irq_domain, bit); 43 - generic_handle_irq(bus_irq); 44 - } 40 + for_each_set_bit(bit, &status, ASPEED_I2C_IC_NUM_BUS) 41 + generic_handle_domain_irq(i2c_ic->irq_domain, bit); 42 + 45 43 chained_irq_exit(chip, desc); 46 44 } 47 45
+2 -4
drivers/irqchip/irq-aspeed-scu-ic.c
··· 44 44 45 45 static void aspeed_scu_ic_irq_handler(struct irq_desc *desc) 46 46 { 47 - unsigned int irq; 48 47 unsigned int sts; 49 48 unsigned long bit; 50 49 unsigned long enabled; ··· 73 74 max = scu_ic->num_irqs + bit; 74 75 75 76 for_each_set_bit_from(bit, &status, max) { 76 - irq = irq_find_mapping(scu_ic->irq_domain, 77 - bit - scu_ic->irq_shift); 78 - generic_handle_irq(irq); 77 + generic_handle_domain_irq(scu_ic->irq_domain, 78 + bit - scu_ic->irq_shift); 79 79 80 80 regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 81 81 BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+1 -1
drivers/irqchip/irq-ath79-misc.c
··· 50 50 while (pending) { 51 51 int bit = __ffs(pending); 52 52 53 - generic_handle_irq(irq_linear_revmap(domain, bit)); 53 + generic_handle_domain_irq(domain, bit); 54 54 pending &= ~BIT(bit); 55 55 } 56 56
+1 -1
drivers/irqchip/irq-bcm2835.c
··· 254 254 u32 hwirq; 255 255 256 256 while ((hwirq = get_next_armctrl_hwirq()) != ~0) 257 - generic_handle_irq(irq_linear_revmap(intc.domain, hwirq)); 257 + generic_handle_domain_irq(intc.domain, hwirq); 258 258 } 259 259 260 260 IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic",
+1 -1
drivers/irqchip/irq-bcm2836.c
··· 161 161 mbox_val = readl_relaxed(intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu); 162 162 if (mbox_val) { 163 163 int hwirq = ffs(mbox_val) - 1; 164 - generic_handle_irq(irq_find_mapping(ipi_domain, hwirq)); 164 + generic_handle_domain_irq(ipi_domain, hwirq); 165 165 } 166 166 167 167 chained_irq_exit(chip, desc);
+2 -4
drivers/irqchip/irq-bcm7038-l1.c
··· 145 145 ~cpu->mask_cache[idx]; 146 146 raw_spin_unlock_irqrestore(&intc->lock, flags); 147 147 148 - for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { 149 - generic_handle_irq(irq_find_mapping(intc->domain, 150 - base + hwirq)); 151 - } 148 + for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) 149 + generic_handle_domain_irq(intc->domain, base + hwirq); 152 150 } 153 151 154 152 chained_irq_exit(chip, desc);
+2 -4
drivers/irqchip/irq-bcm7120-l2.c
··· 74 74 data->irq_map_mask[idx]; 75 75 irq_gc_unlock(gc); 76 76 77 - for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { 78 - generic_handle_irq(irq_find_mapping(b->domain, 79 - base + hwirq)); 80 - } 77 + for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) 78 + generic_handle_domain_irq(b->domain, base + hwirq); 81 79 } 82 80 83 81 chained_irq_exit(chip, desc);
+1 -1
drivers/irqchip/irq-brcmstb-l2.c
··· 110 110 do { 111 111 irq = ffs(status) - 1; 112 112 status &= ~(1 << irq); 113 - generic_handle_irq(irq_linear_revmap(b->domain, irq)); 113 + generic_handle_domain_irq(b->domain, irq); 114 114 } while (status); 115 115 out: 116 116 chained_irq_exit(chip, desc);
+1 -2
drivers/irqchip/irq-dw-apb-ictl.c
··· 62 62 63 63 while (stat) { 64 64 u32 hwirq = ffs(stat) - 1; 65 - u32 virq = irq_find_mapping(d, gc->irq_base + hwirq); 65 + generic_handle_domain_irq(d, gc->irq_base + hwirq); 66 66 67 - generic_handle_irq(virq); 68 67 stat &= ~BIT(hwirq); 69 68 } 70 69 }
+1 -3
drivers/irqchip/irq-gic-pm.c
··· 30 30 int ret; 31 31 32 32 ret = clk_bulk_prepare_enable(data->num_clocks, chip_pm->clks); 33 - if (ret) { 34 - dev_err(dev, "clk_enable failed: %d\n", ret); 33 + if (ret) 35 34 return ret; 36 - } 37 35 38 36 /* 39 37 * On the very first resume, the pointer to chip_pm->chip_data
+1 -3
drivers/irqchip/irq-gic-v2m.c
··· 323 323 struct v2m_data *v2m; 324 324 325 325 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); 326 - if (!v2m) { 327 - pr_err("Failed to allocate struct v2m_data.\n"); 326 + if (!v2m) 328 327 return -ENOMEM; 329 - } 330 328 331 329 INIT_LIST_HEAD(&v2m->entry); 332 330 v2m->fwnode = fwnode;
+2 -6
drivers/irqchip/irq-gic-v3-its.c
··· 4895 4895 entries = roundup_pow_of_two(nr_cpu_ids); 4896 4896 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), 4897 4897 GFP_KERNEL); 4898 - if (!vpe_proxy.vpes) { 4899 - pr_err("ITS: Can't allocate GICv4 proxy device array\n"); 4898 + if (!vpe_proxy.vpes) 4900 4899 return -ENOMEM; 4901 - } 4902 4900 4903 4901 /* Use the last possible DevID */ 4904 4902 devid = GENMASK(device_ids(its) - 1, 0); ··· 5312 5314 5313 5315 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), 5314 5316 GFP_KERNEL); 5315 - if (!its_srat_maps) { 5316 - pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); 5317 + if (!its_srat_maps) 5317 5318 return; 5318 - } 5319 5319 5320 5320 acpi_table_parse_entries(ACPI_SIG_SRAT, 5321 5321 sizeof(struct acpi_table_srat),
+5 -8
drivers/irqchip/irq-gic.c
··· 375 375 { 376 376 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); 377 377 struct irq_chip *chip = irq_desc_get_chip(desc); 378 - unsigned int cascade_irq, gic_irq; 378 + unsigned int gic_irq; 379 379 unsigned long status; 380 + int ret; 380 381 381 382 chained_irq_enter(chip, desc); 382 383 ··· 387 386 if (gic_irq == GICC_INT_SPURIOUS) 388 387 goto out; 389 388 390 - cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); 391 - if (unlikely(gic_irq < 32 || gic_irq > 1020)) { 389 + isb(); 390 + ret = generic_handle_domain_irq(chip_data->domain, gic_irq); 391 + if (unlikely(ret)) 392 392 handle_bad_irq(desc); 393 - } else { 394 - isb(); 395 - generic_handle_irq(cascade_irq); 396 - } 397 - 398 393 out: 399 394 chained_irq_exit(chip, desc); 400 395 }
+2 -3
drivers/irqchip/irq-goldfish-pic.c
··· 34 34 { 35 35 struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc); 36 36 struct irq_chip *host_chip = irq_desc_get_chip(desc); 37 - u32 pending, hwirq, virq; 37 + u32 pending, hwirq; 38 38 39 39 chained_irq_enter(host_chip, desc); 40 40 41 41 pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING); 42 42 while (pending) { 43 43 hwirq = __fls(pending); 44 - virq = irq_linear_revmap(gfpic->irq_domain, hwirq); 45 - generic_handle_irq(virq); 44 + generic_handle_domain_irq(gfpic->irq_domain, hwirq); 46 45 pending &= ~(1 << hwirq); 47 46 } 48 47
+1 -3
drivers/irqchip/irq-i8259.c
··· 333 333 { 334 334 struct irq_domain *domain = irq_desc_get_handler_data(desc); 335 335 int hwirq = i8259_poll(); 336 - unsigned int irq; 337 336 338 337 if (hwirq < 0) 339 338 return; 340 339 341 - irq = irq_linear_revmap(domain, hwirq); 342 - generic_handle_irq(irq); 340 + generic_handle_domain_irq(domain, hwirq); 343 341 } 344 342 345 343 int __init i8259_of_init(struct device_node *node, struct device_node *parent)
+2 -4
drivers/irqchip/irq-idt3243x.c
··· 28 28 { 29 29 struct idt_pic_data *idtpic = irq_desc_get_handler_data(desc); 30 30 struct irq_chip *host_chip = irq_desc_get_chip(desc); 31 - u32 pending, hwirq, virq; 31 + u32 pending, hwirq; 32 32 33 33 chained_irq_enter(host_chip, desc); 34 34 ··· 36 36 pending &= ~idtpic->gc->mask_cache; 37 37 while (pending) { 38 38 hwirq = __fls(pending); 39 - virq = irq_linear_revmap(idtpic->irq_domain, hwirq); 40 - if (virq) 41 - generic_handle_irq(virq); 39 + generic_handle_domain_irq(idtpic->irq_domain, hwirq); 42 40 pending &= ~(1 << hwirq); 43 41 } 44 42
+6 -13
drivers/irqchip/irq-imgpdc.c
··· 223 223 { 224 224 unsigned int irq = irq_desc_get_irq(desc); 225 225 struct pdc_intc_priv *priv; 226 - unsigned int i, irq_no; 226 + unsigned int i; 227 227 228 228 priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc); 229 229 ··· 237 237 found: 238 238 239 239 /* pass on the interrupt */ 240 - irq_no = irq_linear_revmap(priv->domain, i); 241 - generic_handle_irq(irq_no); 240 + generic_handle_domain_irq(priv->domain, i); 242 241 } 243 242 244 243 static void pdc_intc_syswake_isr(struct irq_desc *desc) 245 244 { 246 245 struct pdc_intc_priv *priv; 247 - unsigned int syswake, irq_no; 246 + unsigned int syswake; 248 247 unsigned int status; 249 248 250 249 priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc); ··· 257 258 if (!(status & 1)) 258 259 continue; 259 260 260 - irq_no = irq_linear_revmap(priv->domain, 261 - syswake_to_hwirq(syswake)); 262 - generic_handle_irq(irq_no); 261 + generic_handle_domain_irq(priv->domain, syswake_to_hwirq(syswake)); 263 262 } 264 263 } 265 264 ··· 313 316 314 317 /* Allocate driver data */ 315 318 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 316 - if (!priv) { 317 - dev_err(&pdev->dev, "cannot allocate device data\n"); 319 + if (!priv) 318 320 return -ENOMEM; 319 - } 320 321 raw_spin_lock_init(&priv->lock); 321 322 platform_set_drvdata(pdev, priv); 322 323 ··· 351 356 /* Get peripheral IRQ numbers */ 352 357 priv->perip_irqs = devm_kcalloc(&pdev->dev, 4, priv->nr_perips, 353 358 GFP_KERNEL); 354 - if (!priv->perip_irqs) { 355 - dev_err(&pdev->dev, "cannot allocate perip IRQ list\n"); 359 + if (!priv->perip_irqs) 356 360 return -ENOMEM; 357 - } 358 361 for (i = 0; i < priv->nr_perips; ++i) { 359 362 irq = platform_get_irq(pdev, 1 + i); 360 363 if (irq < 0)
+1 -3
drivers/irqchip/irq-imx-gpcv2.c
··· 228 228 } 229 229 230 230 cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL); 231 - if (!cd) { 232 - pr_err("%pOF: kzalloc failed!\n", node); 231 + if (!cd) 233 232 return -ENOMEM; 234 - } 235 233 236 234 raw_spin_lock_init(&cd->rlock); 237 235
+3 -6
drivers/irqchip/irq-imx-intmux.c
··· 182 182 struct intmux_data *data = container_of(irqchip_data, struct intmux_data, 183 183 irqchip_data[idx]); 184 184 unsigned long irqstat; 185 - int pos, virq; 185 + int pos; 186 186 187 187 chained_irq_enter(irq_desc_get_chip(desc), desc); 188 188 189 189 /* read the interrupt source pending status of this channel */ 190 190 irqstat = readl_relaxed(data->regs + CHANIPR(idx)); 191 191 192 - for_each_set_bit(pos, &irqstat, 32) { 193 - virq = irq_find_mapping(irqchip_data->domain, pos); 194 - if (virq) 195 - generic_handle_irq(virq); 196 - } 192 + for_each_set_bit(pos, &irqstat, 32) 193 + generic_handle_domain_irq(irqchip_data->domain, pos); 197 194 198 195 chained_irq_exit(irq_desc_get_chip(desc), desc); 199 196 }
+3 -6
drivers/irqchip/irq-imx-irqsteer.c
··· 122 122 for (i = 0; i < 2; i++, hwirq += 32) { 123 123 int idx = imx_irqsteer_get_reg_index(data, hwirq); 124 124 unsigned long irqmap; 125 - int pos, virq; 125 + int pos; 126 126 127 127 if (hwirq >= data->reg_num * 32) 128 128 break; ··· 130 130 irqmap = readl_relaxed(data->regs + 131 131 CHANSTATUS(idx, data->reg_num)); 132 132 133 - for_each_set_bit(pos, &irqmap, 32) { 134 - virq = irq_find_mapping(data->domain, pos + hwirq); 135 - if (virq) 136 - generic_handle_irq(virq); 137 - } 133 + for_each_set_bit(pos, &irqmap, 32) 134 + generic_handle_domain_irq(data->domain, pos + hwirq); 138 135 } 139 136 140 137 chained_irq_exit(irq_desc_get_chip(desc), desc);
+1 -1
drivers/irqchip/irq-ingenic-tcu.c
··· 38 38 irq_reg &= ~irq_mask; 39 39 40 40 for_each_set_bit(i, (unsigned long *)&irq_reg, 32) 41 - generic_handle_irq(irq_linear_revmap(domain, i)); 41 + generic_handle_domain_irq(domain, i); 42 42 43 43 chained_irq_exit(irq_chip, desc); 44 44 }
+1 -2
drivers/irqchip/irq-ingenic.c
··· 49 49 while (pending) { 50 50 int bit = __fls(pending); 51 51 52 - irq = irq_linear_revmap(domain, bit + (i * 32)); 53 - generic_handle_irq(irq); 52 + generic_handle_domain_irq(domain, bit + (i * 32)); 54 53 pending &= ~BIT(bit); 55 54 } 56 55 }
+6 -8
drivers/irqchip/irq-keystone.c
··· 89 89 struct keystone_irq_device *kirq = keystone_irq; 90 90 unsigned long wa_lock_flags; 91 91 unsigned long pending; 92 - int src, virq; 92 + int src, err; 93 93 94 94 dev_dbg(kirq->dev, "start irq %d\n", irq); 95 95 ··· 104 104 105 105 for (src = 0; src < KEYSTONE_N_IRQ; src++) { 106 106 if (BIT(src) & pending) { 107 - virq = irq_find_mapping(kirq->irqd, src); 108 - dev_dbg(kirq->dev, "dispatch bit %d, virq %d\n", 109 - src, virq); 110 - if (!virq) 111 - dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n", 112 - src, virq); 113 107 raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags); 114 - generic_handle_irq(virq); 108 + err = generic_handle_domain_irq(kirq->irqd, src); 115 109 raw_spin_unlock_irqrestore(&kirq->wa_lock, 116 110 wa_lock_flags); 111 + 112 + if (err) 113 + dev_warn_ratelimited(kirq->dev, "spurious irq detected hwirq %d\n", 114 + src); 117 115 } 118 116 } 119 117
+1 -1
drivers/irqchip/irq-loongson-htpic.c
··· 48 48 break; 49 49 } 50 50 51 - generic_handle_irq(irq_linear_revmap(priv->domain, bit)); 51 + generic_handle_domain_irq(priv->domain, bit); 52 52 pending &= ~BIT(bit); 53 53 } 54 54 chained_irq_exit(chip, desc);
+2 -2
drivers/irqchip/irq-loongson-htvec.c
··· 47 47 while (pending) { 48 48 int bit = __ffs(pending); 49 49 50 - generic_handle_irq(irq_linear_revmap(priv->htvec_domain, bit + 51 - VEC_COUNT_PER_REG * i)); 50 + generic_handle_domain_irq(priv->htvec_domain, 51 + bit + VEC_COUNT_PER_REG * i); 52 52 pending &= ~BIT(bit); 53 53 handled = true; 54 54 }
+1 -1
drivers/irqchip/irq-loongson-liointc.c
··· 73 73 while (pending) { 74 74 int bit = __ffs(pending); 75 75 76 - generic_handle_irq(irq_find_mapping(gc->domain, bit)); 76 + generic_handle_domain_irq(gc->domain, bit); 77 77 pending &= ~BIT(bit); 78 78 } 79 79
+1 -1
drivers/irqchip/irq-lpc32xx.c
··· 141 141 while (hwirq) { 142 142 irq = __ffs(hwirq); 143 143 hwirq &= ~BIT(irq); 144 - generic_handle_irq(irq_find_mapping(ic->domain, irq)); 144 + generic_handle_domain_irq(ic->domain, irq); 145 145 } 146 146 147 147 chained_irq_exit(chip, desc);
+2 -4
drivers/irqchip/irq-ls-scfg-msi.c
··· 194 194 struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc); 195 195 struct ls_scfg_msi *msi_data = msir->msi_data; 196 196 unsigned long val; 197 - int pos, size, virq, hwirq; 197 + int pos, size, hwirq; 198 198 199 199 chained_irq_enter(irq_desc_get_chip(desc), desc); 200 200 ··· 206 206 for_each_set_bit_from(pos, &val, size) { 207 207 hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) | 208 208 msir->srs; 209 - virq = irq_find_mapping(msi_data->parent, hwirq); 210 - if (virq) 211 - generic_handle_irq(virq); 209 + generic_handle_domain_irq(msi_data->parent, hwirq); 212 210 } 213 211 214 212 chained_irq_exit(irq_desc_get_chip(desc), desc);
+1 -1
drivers/irqchip/irq-ls1x.c
··· 50 50 while (pending) { 51 51 int bit = __ffs(pending); 52 52 53 - generic_handle_irq(irq_find_mapping(priv->domain, bit)); 53 + generic_handle_domain_irq(priv->domain, bit); 54 54 pending &= ~BIT(bit); 55 55 } 56 56
+6 -6
drivers/irqchip/irq-mbigen.c
··· 273 273 } 274 274 275 275 #ifdef CONFIG_ACPI 276 + static const struct acpi_device_id mbigen_acpi_match[] = { 277 + { "HISI0152", 0 }, 278 + {} 279 + }; 280 + MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match); 281 + 276 282 static int mbigen_acpi_create_domain(struct platform_device *pdev, 277 283 struct mbigen_device *mgn_chip) 278 284 { ··· 374 368 { /* END */ } 375 369 }; 376 370 MODULE_DEVICE_TABLE(of, mbigen_of_match); 377 - 378 - static const struct acpi_device_id mbigen_acpi_match[] = { 379 - { "HISI0152", 0 }, 380 - {} 381 - }; 382 - MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match); 383 371 384 372 static struct platform_driver mbigen_platform_driver = { 385 373 .driver = {
+11 -10
drivers/irqchip/irq-mips-gic.c
··· 16 16 #include <linux/interrupt.h> 17 17 #include <linux/irq.h> 18 18 #include <linux/irqchip.h> 19 + #include <linux/irqdomain.h> 19 20 #include <linux/of_address.h> 20 21 #include <linux/percpu.h> 21 22 #include <linux/sched.h> ··· 148 147 149 148 static void gic_handle_shared_int(bool chained) 150 149 { 151 - unsigned int intr, virq; 150 + unsigned int intr; 152 151 unsigned long *pcpu_mask; 153 152 DECLARE_BITMAP(pending, GIC_MAX_INTRS); 154 153 ··· 165 164 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 166 165 167 166 for_each_set_bit(intr, pending, gic_shared_intrs) { 168 - virq = irq_linear_revmap(gic_irq_domain, 169 - GIC_SHARED_TO_HWIRQ(intr)); 170 167 if (chained) 171 - generic_handle_irq(virq); 168 + generic_handle_domain_irq(gic_irq_domain, 169 + GIC_SHARED_TO_HWIRQ(intr)); 172 170 else 173 - do_IRQ(virq); 171 + do_IRQ(irq_find_mapping(gic_irq_domain, 172 + GIC_SHARED_TO_HWIRQ(intr))); 174 173 } 175 174 } 176 175 ··· 308 307 static void gic_handle_local_int(bool chained) 309 308 { 310 309 unsigned long pending, masked; 311 - unsigned int intr, virq; 310 + unsigned int intr; 312 311 313 312 pending = read_gic_vl_pend(); 314 313 masked = read_gic_vl_mask(); ··· 316 315 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 317 316 318 317 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) { 319 - virq = irq_linear_revmap(gic_irq_domain, 320 - GIC_LOCAL_TO_HWIRQ(intr)); 321 318 if (chained) 322 - generic_handle_irq(virq); 319 + generic_handle_domain_irq(gic_irq_domain, 320 + GIC_LOCAL_TO_HWIRQ(intr)); 323 321 else 324 - do_IRQ(virq); 322 + do_IRQ(irq_find_mapping(gic_irq_domain, 323 + GIC_LOCAL_TO_HWIRQ(intr))); 325 324 } 326 325 } 327 326
+1 -1
drivers/irqchip/irq-mscc-ocelot.c
··· 107 107 while (reg) { 108 108 u32 hwirq = __fls(reg); 109 109 110 - generic_handle_irq(irq_find_mapping(d, hwirq)); 110 + generic_handle_domain_irq(d, hwirq); 111 111 reg &= ~(BIT(hwirq)); 112 112 } 113 113
+2 -5
drivers/irqchip/irq-mvebu-pic.c
··· 91 91 struct mvebu_pic *pic = irq_desc_get_handler_data(desc); 92 92 struct irq_chip *chip = irq_desc_get_chip(desc); 93 93 unsigned long irqmap, irqn; 94 - unsigned int cascade_irq; 95 94 96 95 irqmap = readl_relaxed(pic->base + PIC_CAUSE); 97 96 chained_irq_enter(chip, desc); 98 97 99 - for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) { 100 - cascade_irq = irq_find_mapping(pic->domain, irqn); 101 - generic_handle_irq(cascade_irq); 102 - } 98 + for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) 99 + generic_handle_domain_irq(pic->domain, irqn); 103 100 104 101 chained_irq_exit(chip, desc); 105 102 }
+4 -9
drivers/irqchip/irq-mvebu-sei.c
··· 337 337 irqmap = readl_relaxed(sei->base + GICP_SECR(idx)); 338 338 for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) { 339 339 unsigned long hwirq; 340 - unsigned int virq; 340 + int err; 341 341 342 342 hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit; 343 - virq = irq_find_mapping(sei->sei_domain, hwirq); 344 - if (likely(virq)) { 345 - generic_handle_irq(virq); 346 - continue; 347 - } 348 - 349 - dev_warn(sei->dev, 350 - "Spurious IRQ detected (hwirq %lu)\n", hwirq); 343 + err = generic_handle_domain_irq(sei->sei_domain, hwirq); 344 + if (unlikely(err)) 345 + dev_warn(sei->dev, "Spurious IRQ detected (hwirq %lu)\n", hwirq); 351 346 } 352 347 } 353 348
+1 -3
drivers/irqchip/irq-nvic.c
··· 40 40 asmlinkage void __exception_irq_entry 41 41 nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs) 42 42 { 43 - unsigned int irq = irq_linear_revmap(nvic_irq_domain, hwirq); 44 - 45 - handle_IRQ(irq, regs); 43 + handle_domain_irq(nvic_irq_domain, hwirq, regs); 46 44 } 47 45 48 46 static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+1 -1
drivers/irqchip/irq-orion.c
··· 117 117 while (stat) { 118 118 u32 hwirq = __fls(stat); 119 119 120 - generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq)); 120 + generic_handle_domain_irq(d, gc->irq_base + hwirq); 121 121 stat &= ~(1 << hwirq); 122 122 } 123 123 }
+3 -6
drivers/irqchip/irq-partition-percpu.c
··· 124 124 break; 125 125 } 126 126 127 - if (unlikely(hwirq == part->nr_parts)) { 127 + if (unlikely(hwirq == part->nr_parts)) 128 128 handle_bad_irq(desc); 129 - } else { 130 - unsigned int irq; 131 - irq = irq_find_mapping(part->domain, hwirq); 132 - generic_handle_irq(irq); 133 - } 129 + else 130 + generic_handle_domain_irq(part->domain, hwirq); 134 131 135 132 chained_irq_exit(chip, desc); 136 133 }
+3 -6
drivers/irqchip/irq-pruss-intc.c
··· 488 488 489 489 while (true) { 490 490 u32 hipir; 491 - unsigned int virq; 492 - int hwirq; 491 + int hwirq, err; 493 492 494 493 /* get highest priority pending PRUSS system event */ 495 494 hipir = pruss_intc_read_reg(intc, PRU_INTC_HIPIR(host_irq)); ··· 496 497 break; 497 498 498 499 hwirq = hipir & GENMASK(9, 0); 499 - virq = irq_find_mapping(intc->domain, hwirq); 500 + err = generic_handle_domain_irq(intc->domain, hwirq); 500 501 501 502 /* 502 503 * NOTE: manually ACK any system events that do not have a 503 504 * handler mapped yet 504 505 */ 505 - if (WARN_ON_ONCE(!virq)) 506 + if (WARN_ON_ONCE(err)) 506 507 pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq); 507 - else 508 - generic_handle_irq(virq); 509 508 } 510 509 511 510 chained_irq_exit(chip, desc);
+1 -1
drivers/irqchip/irq-realtek-rtl.c
··· 85 85 goto out; 86 86 } 87 87 domain = irq_desc_get_handler_data(desc); 88 - generic_handle_irq(irq_find_mapping(domain, __ffs(pending))); 88 + generic_handle_domain_irq(domain, __ffs(pending)); 89 89 90 90 out: 91 91 chained_irq_exit(chip, desc);
+1 -1
drivers/irqchip/irq-renesas-irqc.c
··· 115 115 if (ioread32(p->iomem + DETECT_STATUS) & bit) { 116 116 iowrite32(bit, p->iomem + DETECT_STATUS); 117 117 irqc_dbg(i, "demux2"); 118 - generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq)); 118 + generic_handle_domain_irq(p->irq_domain, i->hw_irq); 119 119 return IRQ_HANDLED; 120 120 } 121 121 return IRQ_NONE;
+3 -5
drivers/irqchip/irq-sifive-plic.c
··· 233 233 chained_irq_enter(chip, desc); 234 234 235 235 while ((hwirq = readl(claim))) { 236 - int irq = irq_find_mapping(handler->priv->irqdomain, hwirq); 237 - 238 - if (unlikely(irq <= 0)) 236 + int err = generic_handle_domain_irq(handler->priv->irqdomain, 237 + hwirq); 238 + if (unlikely(err)) 239 239 pr_warn_ratelimited("can't find mapping for hwirq %lu\n", 240 240 hwirq); 241 - else 242 - generic_handle_irq(irq); 243 241 } 244 242 245 243 chained_irq_exit(chip, desc);
+4 -6
drivers/irqchip/irq-stm32-exti.c
··· 257 257 { 258 258 struct irq_domain *domain = irq_desc_get_handler_data(desc); 259 259 struct irq_chip *chip = irq_desc_get_chip(desc); 260 - unsigned int virq, nbanks = domain->gc->num_chips; 260 + unsigned int nbanks = domain->gc->num_chips; 261 261 struct irq_chip_generic *gc; 262 262 unsigned long pending; 263 263 int n, i, irq_base = 0; ··· 268 268 gc = irq_get_domain_generic_chip(domain, irq_base); 269 269 270 270 while ((pending = stm32_exti_pending(gc))) { 271 - for_each_set_bit(n, &pending, IRQS_PER_BANK) { 272 - virq = irq_find_mapping(domain, irq_base + n); 273 - generic_handle_irq(virq); 274 - } 275 - } 271 + for_each_set_bit(n, &pending, IRQS_PER_BANK) 272 + generic_handle_domain_irq(domain, irq_base + n); 273 + } 276 274 } 277 275 278 276 chained_irq_exit(chip, desc);
+2 -6
drivers/irqchip/irq-sun4i.c
··· 147 147 struct device_node *parent) 148 148 { 149 149 irq_ic_data = kzalloc(sizeof(struct sun4i_irq_chip_data), GFP_KERNEL); 150 - if (!irq_ic_data) { 151 - pr_err("kzalloc failed!\n"); 150 + if (!irq_ic_data) 152 151 return -ENOMEM; 153 - } 154 152 155 153 irq_ic_data->enable_reg_offset = SUN4I_IRQ_ENABLE_REG_OFFSET; 156 154 irq_ic_data->mask_reg_offset = SUN4I_IRQ_MASK_REG_OFFSET; ··· 162 164 struct device_node *parent) 163 165 { 164 166 irq_ic_data = kzalloc(sizeof(struct sun4i_irq_chip_data), GFP_KERNEL); 165 - if (!irq_ic_data) { 166 - pr_err("kzalloc failed!\n"); 167 + if (!irq_ic_data) 167 168 return -ENOMEM; 168 - } 169 169 170 170 irq_ic_data->enable_reg_offset = SUNIV_IRQ_ENABLE_REG_OFFSET; 171 171 irq_ic_data->mask_reg_offset = SUNIV_IRQ_MASK_REG_OFFSET;
+1 -2
drivers/irqchip/irq-sunxi-nmi.c
··· 88 88 { 89 89 struct irq_domain *domain = irq_desc_get_handler_data(desc); 90 90 struct irq_chip *chip = irq_desc_get_chip(desc); 91 - unsigned int virq = irq_find_mapping(domain, 0); 92 91 93 92 chained_irq_enter(chip, desc); 94 - generic_handle_irq(virq); 93 + generic_handle_domain_irq(domain, 0); 95 94 chained_irq_exit(chip, desc); 96 95 } 97 96
+1 -1
drivers/irqchip/irq-tb10x.c
··· 91 91 struct irq_domain *domain = irq_desc_get_handler_data(desc); 92 92 unsigned int irq = irq_desc_get_irq(desc); 93 93 94 - generic_handle_irq(irq_find_mapping(domain, irq)); 94 + generic_handle_domain_irq(domain, irq); 95 95 } 96 96 97 97 static int __init of_tb10x_init_irq(struct device_node *ictl,
+3 -6
drivers/irqchip/irq-ti-sci-inta.c
··· 147 147 struct ti_sci_inta_vint_desc *vint_desc; 148 148 struct ti_sci_inta_irq_domain *inta; 149 149 struct irq_domain *domain; 150 - unsigned int virq, bit; 150 + unsigned int bit; 151 151 unsigned long val; 152 152 153 153 vint_desc = irq_desc_get_handler_data(desc); ··· 159 159 val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 + 160 160 VINT_STATUS_MASKED_OFFSET); 161 161 162 - for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) { 163 - virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq); 164 - if (virq) 165 - generic_handle_irq(virq); 166 - } 162 + for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) 163 + generic_handle_domain_irq(domain, vint_desc->events[bit].hwirq); 167 164 168 165 chained_irq_exit(irq_desc_get_chip(desc), desc); 169 166 }
+1 -2
drivers/irqchip/irq-ts4800.c
··· 79 79 80 80 do { 81 81 unsigned int bit = __ffs(status); 82 - int irq = irq_find_mapping(data->domain, bit); 83 82 83 + generic_handle_domain_irq(data->domain, bit); 84 84 status &= ~(1 << bit); 85 - generic_handle_irq(irq); 86 85 } while (status); 87 86 88 87 out:
+1 -1
drivers/irqchip/irq-versatile-fpga.c
··· 85 85 unsigned int irq = ffs(status) - 1; 86 86 87 87 status &= ~(1 << irq); 88 - generic_handle_irq(irq_find_mapping(f->domain, irq)); 88 + generic_handle_domain_irq(f->domain, irq); 89 89 } while (status); 90 90 91 91 out:
+1 -1
drivers/irqchip/irq-vic.c
··· 225 225 226 226 while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { 227 227 hwirq = ffs(stat) - 1; 228 - generic_handle_irq(irq_find_mapping(vic->domain, hwirq)); 228 + generic_handle_domain_irq(vic->domain, hwirq); 229 229 } 230 230 231 231 chained_irq_exit(host_chip, desc);
+5 -18
drivers/irqchip/irq-xilinx-intc.c
··· 110 110 .irq_mask_ack = intc_mask_ack, 111 111 }; 112 112 113 - static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc) 114 - { 115 - unsigned int irq = 0; 116 - u32 hwirq; 117 - 118 - hwirq = xintc_read(irqc, IVR); 119 - if (hwirq != -1U) 120 - irq = irq_find_mapping(irqc->root_domain, hwirq); 121 - 122 - pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq); 123 - 124 - return irq; 125 - } 126 - 127 113 unsigned int xintc_get_irq(void) 128 114 { 129 115 unsigned int irq = -1; ··· 150 164 { 151 165 struct irq_chip *chip = irq_desc_get_chip(desc); 152 166 struct xintc_irq_chip *irqc; 153 - u32 pending; 154 167 155 168 irqc = irq_data_get_irq_handler_data(&desc->irq_data); 156 169 chained_irq_enter(chip, desc); 157 170 do { 158 - pending = xintc_get_irq_local(irqc); 159 - if (pending == 0) 171 + u32 hwirq = xintc_read(irqc, IVR); 172 + 173 + if (hwirq == -1U) 160 174 break; 161 - generic_handle_irq(pending); 175 + 176 + generic_handle_domain_irq(irqc->root_domain, hwirq); 162 177 } while (true); 163 178 chained_irq_exit(chip, desc); 164 179 }
+1 -5
drivers/irqchip/qcom-irq-combiner.c
··· 53 53 chained_irq_enter(chip, desc); 54 54 55 55 for (reg = 0; reg < combiner->nregs; reg++) { 56 - int virq; 57 56 int hwirq; 58 57 u32 bit; 59 58 u32 status; ··· 69 70 bit = __ffs(status); 70 71 status &= ~(1 << bit); 71 72 hwirq = irq_nr(reg, bit); 72 - virq = irq_find_mapping(combiner->domain, hwirq); 73 - if (virq > 0) 74 - generic_handle_irq(virq); 75 - 73 + generic_handle_domain_irq(combiner->domain, hwirq); 76 74 } 77 75 } 78 76
+7 -1
drivers/irqchip/qcom-pdc.c
··· 11 11 #include <linux/irqdomain.h> 12 12 #include <linux/io.h> 13 13 #include <linux/kernel.h> 14 + #include <linux/module.h> 14 15 #include <linux/of.h> 15 16 #include <linux/of_address.h> 16 17 #include <linux/of_device.h> 18 + #include <linux/of_irq.h> 17 19 #include <linux/soc/qcom/irq.h> 18 20 #include <linux/spinlock.h> 19 21 #include <linux/slab.h> ··· 461 459 return ret; 462 460 } 463 461 464 - IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init); 462 + IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_pdc) 463 + IRQCHIP_MATCH("qcom,pdc", qcom_pdc_init) 464 + IRQCHIP_PLATFORM_DRIVER_END(qcom_pdc) 465 + MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller"); 466 + MODULE_LICENSE("GPL v2");
+1
drivers/mfd/ioc3.c
··· 14 14 #include <linux/delay.h> 15 15 #include <linux/errno.h> 16 16 #include <linux/interrupt.h> 17 + #include <linux/irqdomain.h> 17 18 #include <linux/mfd/core.h> 18 19 #include <linux/module.h> 19 20 #include <linux/pci.h>
+1
drivers/scsi/ibmvscsi/ibmvfc.c
··· 13 13 #include <linux/dmapool.h> 14 14 #include <linux/delay.h> 15 15 #include <linux/interrupt.h> 16 + #include <linux/irqdomain.h> 16 17 #include <linux/kthread.h> 17 18 #include <linux/slab.h> 18 19 #include <linux/of.h>
+1
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 22 22 #include <linux/list.h> 23 23 #include <linux/string.h> 24 24 #include <linux/delay.h> 25 + #include <linux/of.h> 25 26 26 27 #include <target/target_core_base.h> 27 28 #include <target/target_core_fabric.h>
+2
drivers/staging/octeon-usb/octeon-hcd.c
··· 50 50 #include <linux/module.h> 51 51 #include <linux/usb/hcd.h> 52 52 #include <linux/prefetch.h> 53 + #include <linux/irqdomain.h> 53 54 #include <linux/dma-mapping.h> 54 55 #include <linux/platform_device.h> 56 + #include <linux/of.h> 55 57 56 58 #include <asm/octeon/octeon.h> 57 59
+1
drivers/watchdog/octeon-wdt-main.c
··· 54 54 #include <linux/delay.h> 55 55 #include <linux/cpu.h> 56 56 #include <linux/irq.h> 57 + #include <linux/irqdomain.h> 57 58 58 59 #include <asm/mipsregs.h> 59 60 #include <asm/uasm.h>
+3
include/linux/interrupt.h
··· 64 64 * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. 65 65 * Users will enable it explicitly by enable_irq() or enable_nmi() 66 66 * later. 67 + * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers, 68 + * depends on IRQF_PERCPU. 67 69 */ 68 70 #define IRQF_SHARED 0x00000080 69 71 #define IRQF_PROBE_SHARED 0x00000100 ··· 80 78 #define IRQF_EARLY_RESUME 0x00020000 81 79 #define IRQF_COND_SUSPEND 0x00040000 82 80 #define IRQF_NO_AUTOEN 0x00080000 81 + #define IRQF_NO_DEBUG 0x00100000 83 82 84 83 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) 85 84
+2
include/linux/irq.h
··· 72 72 * mechanism and from core side polling. 73 73 * IRQ_DISABLE_UNLAZY - Disable lazy irq disable 74 74 * IRQ_HIDDEN - Don't show up in /proc/interrupts 75 + * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging 75 76 */ 76 77 enum { 77 78 IRQ_TYPE_NONE = 0x00000000, ··· 100 99 IRQ_IS_POLLED = (1 << 18), 101 100 IRQ_DISABLE_UNLAZY = (1 << 19), 102 101 IRQ_HIDDEN = (1 << 20), 102 + IRQ_NO_DEBUG = (1 << 21), 103 103 }; 104 104 105 105 #define IRQF_MODIFY_MASK \
+7 -11
include/linux/irqdesc.h
··· 158 158 desc->handle_irq(desc); 159 159 } 160 160 161 + int handle_irq_desc(struct irq_desc *desc); 161 162 int generic_handle_irq(unsigned int irq); 162 163 163 - #ifdef CONFIG_HANDLE_DOMAIN_IRQ 164 + #ifdef CONFIG_IRQ_DOMAIN 164 165 /* 165 166 * Convert a HW interrupt number to a logical one using a IRQ domain, 166 167 * and handle the result interrupt number. Return -EINVAL if 167 - * conversion failed. Providing a NULL domain indicates that the 168 - * conversion has already been done. 168 + * conversion failed. 169 169 */ 170 - int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, 171 - bool lookup, struct pt_regs *regs); 170 + int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq); 172 171 173 - static inline int handle_domain_irq(struct irq_domain *domain, 174 - unsigned int hwirq, struct pt_regs *regs) 175 - { 176 - return __handle_domain_irq(domain, hwirq, true, regs); 177 - } 172 + #ifdef CONFIG_HANDLE_DOMAIN_IRQ 173 + int handle_domain_irq(struct irq_domain *domain, 174 + unsigned int hwirq, struct pt_regs *regs); 178 175 179 - #ifdef CONFIG_IRQ_DOMAIN 180 176 int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, 181 177 struct pt_regs *regs); 182 178 #endif
+37 -28
include/linux/irqdomain.h
··· 41 41 struct irq_domain; 42 42 struct irq_chip; 43 43 struct irq_data; 44 + struct irq_desc; 44 45 struct cpumask; 45 46 struct seq_file; 46 47 struct irq_affinity_desc; 47 - 48 - /* Number of irqs reserved for a legacy isa controller */ 49 - #define NUM_ISA_INTERRUPTS 16 50 48 51 49 #define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 52 50 ··· 150 152 * @parent: Pointer to parent irq_domain to support hierarchy irq_domains 151 153 * 152 154 * Revmap data, used internally by irq_domain 153 - * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that 154 - * support direct mapping 155 - * @revmap_size: Size of the linear map table @linear_revmap[] 155 + * @revmap_size: Size of the linear map table @revmap[] 156 156 * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map 157 - * @linear_revmap: Linear table of hwirq->virq reverse mappings 157 + * @revmap_mutex: Lock for the revmap 158 + * @revmap: Linear table of irq_data pointers 158 159 */ 159 160 struct irq_domain { 160 161 struct list_head link; ··· 173 176 174 177 /* reverse map data. The linear map gets appended to the irq_domain */ 175 178 irq_hw_number_t hwirq_max; 176 - unsigned int revmap_direct_max_irq; 177 179 unsigned int revmap_size; 178 180 struct radix_tree_root revmap_tree; 179 - struct mutex revmap_tree_mutex; 180 - unsigned int linear_revmap[]; 181 + struct mutex revmap_mutex; 182 + struct irq_data __rcu *revmap[]; 181 183 }; 182 184 183 185 /* Irq domain flags */ ··· 205 209 * handled in core code. 206 210 */ 207 211 IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6), 212 + 213 + /* Irq domain doesn't translate anything */ 214 + IRQ_DOMAIN_FLAG_NO_MAP = (1 << 7), 208 215 209 216 /* 210 217 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved ··· 347 348 { 348 349 return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); 349 350 } 351 + 352 + #ifdef CONFIG_IRQ_DOMAIN_NOMAP 350 353 static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, 351 354 unsigned int max_irq, 352 355 const struct irq_domain_ops *ops, ··· 356 355 { 357 356 return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); 358 357 } 359 - static inline struct irq_domain *irq_domain_add_legacy_isa( 360 - struct device_node *of_node, 361 - const struct irq_domain_ops *ops, 362 - void *host_data) 363 - { 364 - return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops, 365 - host_data); 366 - } 358 + 359 + extern unsigned int irq_create_direct_mapping(struct irq_domain *host); 360 + #endif 361 + 367 362 static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, 368 363 const struct irq_domain_ops *ops, 369 364 void *host_data) ··· 402 405 return irq_create_mapping_affinity(host, hwirq, NULL); 403 406 } 404 407 408 + extern struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, 409 + irq_hw_number_t hwirq, 410 + unsigned int *irq); 411 + 412 + static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain, 413 + irq_hw_number_t hwirq) 414 + { 415 + return __irq_resolve_mapping(domain, hwirq, NULL); 416 + } 405 417 406 418 /** 407 - * irq_linear_revmap() - Find a linux irq from a hw irq number. 419 + * irq_find_mapping() - Find a linux irq from a hw irq number. 408 420 * @domain: domain owning this hardware interrupt 409 421 * @hwirq: hardware irq number in that domain space 410 - * 411 - * This is a fast path alternative to irq_find_mapping() that can be 412 - * called directly by irq controller code to save a handful of 413 - * instructions. It is always safe to call, but won't find irqs mapped 414 - * using the radix tree. 415 422 */ 423 + static inline unsigned int irq_find_mapping(struct irq_domain *domain, 424 + irq_hw_number_t hwirq) 425 + { 426 + unsigned int irq; 427 + 428 + if (__irq_resolve_mapping(domain, hwirq, &irq)) 429 + return irq; 430 + 431 + return 0; 432 + } 433 + 416 434 static inline unsigned int irq_linear_revmap(struct irq_domain *domain, 417 435 irq_hw_number_t hwirq) 418 436 { 419 - return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0; 437 + return irq_find_mapping(domain, hwirq); 420 438 } 421 - extern unsigned int irq_find_mapping(struct irq_domain *host, 422 - irq_hw_number_t hwirq); 423 - extern unsigned int irq_create_direct_mapping(struct irq_domain *host); 424 439 425 440 extern const struct irq_domain_ops irq_domain_simple_ops; 426 441
+5
kernel/irq/Kconfig
··· 70 70 bool 71 71 select IRQ_DOMAIN 72 72 73 + # Support for obsolete non-mapping irq domains 74 + config IRQ_DOMAIN_NOMAP 75 + bool 76 + select IRQ_DOMAIN 77 + 73 78 # Support for hierarchical fasteoi+edge and fasteoi+level handlers 74 79 config IRQ_FASTEOI_HIERARCHY_HANDLERS 75 80 bool
+1 -1
kernel/irq/chip.c
··· 481 481 for_each_action_of_desc(desc, action) 482 482 action_ret |= action->thread_fn(action->irq, action->dev_id); 483 483 484 - if (!noirqdebug) 484 + if (!irq_settings_no_debug(desc)) 485 485 note_interrupt(desc, action_ret); 486 486 487 487 raw_spin_lock_irq(&desc->lock);
+1 -1
kernel/irq/handle.c
··· 197 197 198 198 add_interrupt_randomness(desc->irq_data.irq, flags); 199 199 200 - if (!noirqdebug) 200 + if (!irq_settings_no_debug(desc)) 201 201 note_interrupt(desc, retval); 202 202 return retval; 203 203 }
+43 -30
kernel/irq/irqdesc.c
··· 632 632 633 633 #endif /* !CONFIG_SPARSE_IRQ */ 634 634 635 - /** 636 - * generic_handle_irq - Invoke the handler for a particular irq 637 - * @irq: The irq number to handle 638 - * 639 - */ 640 - int generic_handle_irq(unsigned int irq) 635 + int handle_irq_desc(struct irq_desc *desc) 641 636 { 642 - struct irq_desc *desc = irq_to_desc(irq); 643 637 struct irq_data *data; 644 638 645 639 if (!desc) ··· 646 652 generic_handle_irq_desc(desc); 647 653 return 0; 648 654 } 655 + EXPORT_SYMBOL_GPL(handle_irq_desc); 656 + 657 + /** 658 + * generic_handle_irq - Invoke the handler for a particular irq 659 + * @irq: The irq number to handle 660 + * 661 + */ 662 + int generic_handle_irq(unsigned int irq) 663 + { 664 + return handle_irq_desc(irq_to_desc(irq)); 665 + } 649 666 EXPORT_SYMBOL_GPL(generic_handle_irq); 667 + 668 + #ifdef CONFIG_IRQ_DOMAIN 669 + /** 670 + * generic_handle_domain_irq - Invoke the handler for a HW irq belonging 671 + * to a domain, usually for a non-root interrupt 672 + * controller 673 + * @domain: The domain where to perform the lookup 674 + * @hwirq: The HW irq number to convert to a logical one 675 + * 676 + * Returns: 0 on success, or -EINVAL if conversion has failed 677 + * 678 + */ 679 + int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq) 680 + { 681 + return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); 682 + } 683 + EXPORT_SYMBOL_GPL(generic_handle_domain_irq); 650 684 651 685 #ifdef CONFIG_HANDLE_DOMAIN_IRQ 652 686 /** 653 - * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain 687 + * handle_domain_irq - Invoke the handler for a HW irq belonging to a domain, 688 + * usually for a root interrupt controller 654 689 * @domain: The domain where to perform the lookup 655 690 * @hwirq: The HW irq number to convert to a logical one 656 691 * @lookup: Whether to perform the domain lookup or not ··· 687 664 * 688 665 * Returns: 0 on success, or -EINVAL if conversion has failed 689 666 */ 690 - int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, 691 - bool lookup, struct pt_regs *regs) 667 + int handle_domain_irq(struct irq_domain *domain, 668 + unsigned int hwirq, struct pt_regs *regs) 692 669 { 693 670 struct pt_regs *old_regs = set_irq_regs(regs); 694 - unsigned int irq = hwirq; 671 + struct irq_desc *desc; 695 672 int ret = 0; 696 673 697 674 irq_enter(); 698 675 699 - #ifdef CONFIG_IRQ_DOMAIN 700 - if (lookup) 701 - irq = irq_find_mapping(domain, hwirq); 702 - #endif 703 - 704 - /* 705 - * Some hardware gives randomly wrong interrupts. Rather 706 - * than crashing, do something sensible. 707 - */ 708 - if (unlikely(!irq || irq >= nr_irqs)) { 709 - ack_bad_irq(irq); 676 + /* The irqdomain code provides boundary checks */ 677 + desc = irq_resolve_mapping(domain, hwirq); 678 + if (likely(desc)) 679 + handle_irq_desc(desc); 680 + else 710 681 ret = -EINVAL; 711 - } else { 712 - generic_handle_irq(irq); 713 - } 714 682 715 683 irq_exit(); 716 684 set_irq_regs(old_regs); 717 685 return ret; 718 686 } 719 687 720 - #ifdef CONFIG_IRQ_DOMAIN 721 688 /** 722 689 * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain 723 690 * @domain: The domain where to perform the lookup ··· 722 709 struct pt_regs *regs) 723 710 { 724 711 struct pt_regs *old_regs = set_irq_regs(regs); 725 - unsigned int irq; 712 + struct irq_desc *desc; 726 713 int ret = 0; 727 714 728 715 /* ··· 730 717 */ 731 718 WARN_ON(!in_nmi()); 732 719 733 - irq = irq_find_mapping(domain, hwirq); 720 + desc = irq_resolve_mapping(domain, hwirq); 734 721 735 722 /* 736 723 * ack_bad_irq is not NMI-safe, just report 737 724 * an invalid interrupt. 738 725 */ 739 - if (likely(irq)) 740 - generic_handle_irq(irq); 726 + if (likely(desc)) 727 + handle_irq_desc(desc); 741 728 else 742 729 ret = -EINVAL; 743 730
+81 -39
kernel/irq/irqdomain.c
··· 146 146 147 147 static atomic_t unknown_domains; 148 148 149 - domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), 149 + if (WARN_ON((size && direct_max) || 150 + (!IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && direct_max))) 151 + return NULL; 152 + 153 + domain = kzalloc_node(struct_size(domain, revmap, size), 150 154 GFP_KERNEL, of_node_to_nid(to_of_node(fwnode))); 151 155 if (!domain) 152 156 return NULL; ··· 213 209 214 210 /* Fill structure */ 215 211 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); 216 - mutex_init(&domain->revmap_tree_mutex); 212 + mutex_init(&domain->revmap_mutex); 217 213 domain->ops = ops; 218 214 domain->host_data = host_data; 219 215 domain->hwirq_max = hwirq_max; 216 + 217 + if (direct_max) { 218 + size = direct_max; 219 + domain->flags |= IRQ_DOMAIN_FLAG_NO_MAP; 220 + } 221 + 220 222 domain->revmap_size = size; 221 - domain->revmap_direct_max_irq = direct_max; 223 + 222 224 irq_domain_check_hierarchy(domain); 223 225 224 226 mutex_lock(&irq_domain_mutex); ··· 492 482 return irq_default_domain; 493 483 } 494 484 485 + static bool irq_domain_is_nomap(struct irq_domain *domain) 486 + { 487 + return IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && 488 + (domain->flags & IRQ_DOMAIN_FLAG_NO_MAP); 489 + } 490 + 495 491 static void irq_domain_clear_mapping(struct irq_domain *domain, 496 492 irq_hw_number_t hwirq) 497 493 { 498 - if (hwirq < domain->revmap_size) { 499 - domain->linear_revmap[hwirq] = 0; 500 - } else { 501 - mutex_lock(&domain->revmap_tree_mutex); 494 + if (irq_domain_is_nomap(domain)) 495 + return; 496 + 497 + mutex_lock(&domain->revmap_mutex); 498 + if (hwirq < domain->revmap_size) 499 + rcu_assign_pointer(domain->revmap[hwirq], NULL); 500 + else 502 501 radix_tree_delete(&domain->revmap_tree, hwirq); 503 - mutex_unlock(&domain->revmap_tree_mutex); 504 - } 502 + mutex_unlock(&domain->revmap_mutex); 505 503 } 506 504 507 505 static void irq_domain_set_mapping(struct irq_domain *domain, 508 506 irq_hw_number_t hwirq, 509 507 struct irq_data *irq_data) 510 508 { 511 - if (hwirq < domain->revmap_size) { 512 - domain->linear_revmap[hwirq] = irq_data->irq; 513 - } else { 514 - mutex_lock(&domain->revmap_tree_mutex); 509 + if (irq_domain_is_nomap(domain)) 510 + return; 511 + 512 + mutex_lock(&domain->revmap_mutex); 513 + if (hwirq < domain->revmap_size) 514 + rcu_assign_pointer(domain->revmap[hwirq], irq_data); 515 + else 515 516 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); 516 - mutex_unlock(&domain->revmap_tree_mutex); 517 - } 517 + mutex_unlock(&domain->revmap_mutex); 518 518 } 519 519 520 520 static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) ··· 624 604 } 625 605 EXPORT_SYMBOL_GPL(irq_domain_associate_many); 626 606 607 + #ifdef CONFIG_IRQ_DOMAIN_NOMAP 627 608 /** 628 609 * irq_create_direct_mapping() - Allocate an irq for direct mapping 629 610 * @domain: domain to allocate the irq for or NULL for default domain ··· 649 628 pr_debug("create_direct virq allocation failed\n"); 650 629 return 0; 651 630 } 652 - if (virq >= domain->revmap_direct_max_irq) { 631 + if (virq >= domain->revmap_size) { 653 632 pr_err("ERROR: no free irqs available below %i maximum\n", 654 - domain->revmap_direct_max_irq); 633 + domain->revmap_size); 655 634 irq_free_desc(virq); 656 635 return 0; 657 636 } ··· 665 644 return virq; 666 645 } 667 646 EXPORT_SYMBOL_GPL(irq_create_direct_mapping); 647 + #endif 668 648 669 649 /** 670 650 * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space ··· 884 862 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 885 863 886 864 /** 887 - * irq_find_mapping() - Find a linux irq from a hw irq number. 865 + * __irq_resolve_mapping() - Find a linux irq from a hw irq number. 888 866 * @domain: domain owning this hardware interrupt 889 867 * @hwirq: hardware irq number in that domain space 868 + * @irq: optional pointer to return the Linux irq if required 869 + * 870 + * Returns the interrupt descriptor. 890 871 */ 891 - unsigned int irq_find_mapping(struct irq_domain *domain, 892 - irq_hw_number_t hwirq) 872 + struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, 873 + irq_hw_number_t hwirq, 874 + unsigned int *irq) 893 875 { 876 + struct irq_desc *desc = NULL; 894 877 struct irq_data *data; 895 878 896 879 /* Look for default domain if necessary */ 897 880 if (domain == NULL) 898 881 domain = irq_default_domain; 899 882 if (domain == NULL) 900 - return 0; 883 + return desc; 901 884 902 - if (hwirq < domain->revmap_direct_max_irq) { 903 - data = irq_domain_get_irq_data(domain, hwirq); 904 - if (data && data->hwirq == hwirq) 905 - return hwirq; 885 + if (irq_domain_is_nomap(domain)) { 886 + if (hwirq < domain->revmap_size) { 887 + data = irq_domain_get_irq_data(domain, hwirq); 888 + if (data && data->hwirq == hwirq) 889 + desc = irq_data_to_desc(data); 890 + } 891 + 892 + return desc; 906 893 } 907 894 895 + rcu_read_lock(); 908 896 /* Check if the hwirq is in the linear revmap. */ 909 897 if (hwirq < domain->revmap_size) 910 - return domain->linear_revmap[hwirq]; 898 + data = rcu_dereference(domain->revmap[hwirq]); 899 + else 900 + data = radix_tree_lookup(&domain->revmap_tree, hwirq); 911 901 912 - rcu_read_lock(); 913 - data = radix_tree_lookup(&domain->revmap_tree, hwirq); 902 + if (likely(data)) { 903 + desc = irq_data_to_desc(data); 904 + if (irq) 905 + *irq = data->irq; 906 + } 907 + 914 908 rcu_read_unlock(); 915 - return data ? data->irq : 0; 909 + return desc; 916 910 } 917 - EXPORT_SYMBOL_GPL(irq_find_mapping); 911 + EXPORT_SYMBOL_GPL(__irq_resolve_mapping); 918 912 919 913 /** 920 914 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings ··· 1506 1468 { 1507 1469 void __rcu **slot; 1508 1470 1509 - if (d->hwirq < d->domain->revmap_size) 1510 - return; /* Not using radix tree. */ 1471 + if (irq_domain_is_nomap(d->domain)) 1472 + return; 1511 1473 1512 1474 /* Fix up the revmap. */ 1513 - mutex_lock(&d->domain->revmap_tree_mutex); 1514 - slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq); 1515 - if (slot) 1516 - radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); 1517 - mutex_unlock(&d->domain->revmap_tree_mutex); 1475 + mutex_lock(&d->domain->revmap_mutex); 1476 + if (d->hwirq < d->domain->revmap_size) { 1477 + /* Not using radix tree */ 1478 + rcu_assign_pointer(d->domain->revmap[d->hwirq], d); 1479 + } else { 1480 + slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq); 1481 + if (slot) 1482 + radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); 1483 + } 1484 + mutex_unlock(&d->domain->revmap_mutex); 1518 1485 } 1519 1486 1520 1487 /** ··· 1871 1828 irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind) 1872 1829 { 1873 1830 seq_printf(m, "%*sname: %s\n", ind, "", d->name); 1874 - seq_printf(m, "%*ssize: %u\n", ind + 1, "", 1875 - d->revmap_size + d->revmap_direct_max_irq); 1831 + seq_printf(m, "%*ssize: %u\n", ind + 1, "", d->revmap_size); 1876 1832 seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount); 1877 1833 seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags); 1878 1834 if (d->ops && d->ops->debug_show)
+5
kernel/irq/manage.c
··· 1717 1717 if (new->flags & IRQF_PERCPU) { 1718 1718 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1719 1719 irq_settings_set_per_cpu(desc); 1720 + if (new->flags & IRQF_NO_DEBUG) 1721 + irq_settings_set_no_debug(desc); 1720 1722 } 1723 + 1724 + if (noirqdebug) 1725 + irq_settings_set_no_debug(desc); 1721 1726 1722 1727 if (new->flags & IRQF_ONESHOT) 1723 1728 desc->istate |= IRQS_ONESHOT;
+12
kernel/irq/settings.h
··· 18 18 _IRQ_IS_POLLED = IRQ_IS_POLLED, 19 19 _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, 20 20 _IRQ_HIDDEN = IRQ_HIDDEN, 21 + _IRQ_NO_DEBUG = IRQ_NO_DEBUG, 21 22 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 22 23 }; 23 24 ··· 34 33 #define IRQ_IS_POLLED GOT_YOU_MORON 35 34 #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON 36 35 #define IRQ_HIDDEN GOT_YOU_MORON 36 + #define IRQ_NO_DEBUG GOT_YOU_MORON 37 37 #undef IRQF_MODIFY_MASK 38 38 #define IRQF_MODIFY_MASK GOT_YOU_MORON 39 39 ··· 175 173 static inline bool irq_settings_is_hidden(struct irq_desc *desc) 176 174 { 177 175 return desc->status_use_accessors & _IRQ_HIDDEN; 176 + } 177 + 178 + static inline void irq_settings_set_no_debug(struct irq_desc *desc) 179 + { 180 + desc->status_use_accessors |= _IRQ_NO_DEBUG; 181 + } 182 + 183 + static inline bool irq_settings_no_debug(struct irq_desc *desc) 184 + { 185 + return desc->status_use_accessors & _IRQ_NO_DEBUG; 178 186 }