Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'irqchip-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core

Pull irqchip updates from Marc Zyngier:

- Add support for the STM32MP13 variant

- Move parent device away from struct irq_chip

- Remove all instances of non-const strings assigned to
struct irq_chip::name, enabling a nice cleanup for VIC and GIC)

- Simplify the Qualcomm PDC driver

- A bunch of SiFive PLIC cleanups

- Add support for a new variant of the Meson GPIO block

- Add support for the irqchip side of the Apple M1 PMU

- Add support for the Apple M1 Pro/Max AICv2 irqchip

- Add support for the Qualcomm MPM wakeup gadget

- Move the Xilinx driver over to the generic irqdomain handling

- Tiny speedup for IPIs on GICv3 systems

- The usual odd cleanups

Link: https://lore.kernel.org/all/20220313105142.704579-1-maz@kernel.org

+1619 -445
+2
Documentation/devicetree/bindings/arm/pmu.yaml
··· 20 20 items: 21 21 - enum: 22 22 - apm,potenza-pmu 23 + - apple,firestorm-pmu 24 + - apple,icestorm-pmu 23 25 - arm,armv8-pmuv3 # Only for s/w models 24 26 - arm,arm1136-pmu 25 27 - arm,arm1176-pmu
+1
Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
··· 18 18 "amlogic,meson-g12a-gpio-intc" for G12A SoCs (S905D2, S905X2, S905Y2) 19 19 "amlogic,meson-sm1-gpio-intc" for SM1 SoCs (S905D3, S905X3, S905Y3) 20 20 "amlogic,meson-a1-gpio-intc" for A1 SoCs (A113L) 21 + "amlogic,meson-s4-gpio-intc" for S4 SoCs (S802X2, S905Y4, S805X2G, S905W2) 21 22 - reg : Specifies base physical address and size of the registers. 22 23 - interrupt-controller : Identifies the node as an interrupt controller. 23 24 - #interrupt-cells : Specifies the number of cells needed to encode an
+31
Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
··· 56 56 - 1: virtual HV timer 57 57 - 2: physical guest timer 58 58 - 3: virtual guest timer 59 + - 4: 'efficient' CPU PMU 60 + - 5: 'performance' CPU PMU 59 61 60 62 The 3rd cell contains the interrupt flags. This is normally 61 63 IRQ_TYPE_LEVEL_HIGH (4). ··· 69 67 70 68 power-domains: 71 69 maxItems: 1 70 + 71 + affinities: 72 + type: object 73 + additionalProperties: false 74 + description: 75 + FIQ affinity can be expressed as a single "affinities" node, 76 + containing a set of sub-nodes, one per FIQ with a non-default 77 + affinity. 78 + patternProperties: 79 + "^.+-affinity$": 80 + type: object 81 + additionalProperties: false 82 + properties: 83 + apple,fiq-index: 84 + description: 85 + The interrupt number specified as a FIQ, and for which 86 + the affinity is not the default. 87 + $ref: /schemas/types.yaml#/definitions/uint32 88 + maximum: 5 89 + 90 + cpus: 91 + $ref: /schemas/types.yaml#/definitions/phandle-array 92 + description: 93 + Should be a list of phandles to CPU nodes (as described in 94 + Documentation/devicetree/bindings/arm/cpus.yaml). 95 + 96 + required: 97 + - fiq-index 98 + - cpus 72 99 73 100 required: 74 101 - compatible
+98
Documentation/devicetree/bindings/interrupt-controller/apple,aic2.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/interrupt-controller/apple,aic2.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Apple Interrupt Controller 2 8 + 9 + maintainers: 10 + - Hector Martin <marcan@marcan.st> 11 + 12 + description: | 13 + The Apple Interrupt Controller 2 is a simple interrupt controller present on 14 + Apple ARM SoC platforms starting with t600x (M1 Pro and Max). 15 + 16 + It provides the following features: 17 + 18 + - Level-triggered hardware IRQs wired to SoC blocks 19 + - Single mask bit per IRQ 20 + - Automatic masking on event delivery (auto-ack) 21 + - Software triggering (ORed with hw line) 22 + - Automatic prioritization (single event/ack register per CPU, lower IRQs = 23 + higher priority) 24 + - Automatic masking on ack 25 + - Support for multiple dies 26 + 27 + This device also represents the FIQ interrupt sources on platforms using AIC, 28 + which do not go through a discrete interrupt controller. It also handles 29 + FIQ-based Fast IPIs. 30 + 31 + properties: 32 + compatible: 33 + items: 34 + - const: apple,t6000-aic 35 + - const: apple,aic2 36 + 37 + interrupt-controller: true 38 + 39 + '#interrupt-cells': 40 + const: 4 41 + description: | 42 + The 1st cell contains the interrupt type: 43 + - 0: Hardware IRQ 44 + - 1: FIQ 45 + 46 + The 2nd cell contains the die ID. 47 + 48 + The next cell contains the interrupt number. 49 + - HW IRQs: interrupt number 50 + - FIQs: 51 + - 0: physical HV timer 52 + - 1: virtual HV timer 53 + - 2: physical guest timer 54 + - 3: virtual guest timer 55 + 56 + The last cell contains the interrupt flags. This is normally 57 + IRQ_TYPE_LEVEL_HIGH (4). 58 + 59 + reg: 60 + items: 61 + - description: Address and size of the main AIC2 registers. 62 + - description: Address and size of the AIC2 Event register. 63 + 64 + reg-names: 65 + items: 66 + - const: core 67 + - const: event 68 + 69 + power-domains: 70 + maxItems: 1 71 + 72 + required: 73 + - compatible 74 + - '#interrupt-cells' 75 + - interrupt-controller 76 + - reg 77 + - reg-names 78 + 79 + additionalProperties: false 80 + 81 + allOf: 82 + - $ref: /schemas/interrupt-controller.yaml# 83 + 84 + examples: 85 + - | 86 + soc { 87 + #address-cells = <2>; 88 + #size-cells = <2>; 89 + 90 + aic: interrupt-controller@28e100000 { 91 + compatible = "apple,t6000-aic", "apple,aic2"; 92 + #interrupt-cells = <4>; 93 + interrupt-controller; 94 + reg = <0x2 0x8e100000 0x0 0xc000>, 95 + <0x2 0x8e10c000 0x0 0x4>; 96 + reg-names = "core", "event"; 97 + }; 98 + };
+96
Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/interrupt-controller/qcom,mpm.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Qualcom MPM Interrupt Controller 8 + 9 + maintainers: 10 + - Shawn Guo <shawn.guo@linaro.org> 11 + 12 + description: 13 + Qualcomm Technologies Inc. SoCs based on the RPM architecture have a 14 + MSM Power Manager (MPM) that is in always-on domain. In addition to managing 15 + resources during sleep, the hardware also has an interrupt controller that 16 + monitors the interrupts when the system is asleep, wakes up the APSS when 17 + one of these interrupts occur and replays it to GIC interrupt controller 18 + after GIC becomes operational. 19 + 20 + allOf: 21 + - $ref: /schemas/interrupt-controller.yaml# 22 + 23 + properties: 24 + compatible: 25 + items: 26 + - const: qcom,mpm 27 + 28 + reg: 29 + maxItems: 1 30 + description: 31 + Specifies the base address and size of vMPM registers in RPM MSG RAM. 32 + 33 + interrupts: 34 + maxItems: 1 35 + description: 36 + Specify the IRQ used by RPM to wakeup APSS. 37 + 38 + mboxes: 39 + maxItems: 1 40 + description: 41 + Specify the mailbox used to notify RPM for writing vMPM registers. 42 + 43 + interrupt-controller: true 44 + 45 + '#interrupt-cells': 46 + const: 2 47 + description: 48 + The first cell is the MPM pin number for the interrupt, and the second 49 + is the trigger type. 50 + 51 + qcom,mpm-pin-count: 52 + description: 53 + Specify the total MPM pin count that a SoC supports. 54 + $ref: /schemas/types.yaml#/definitions/uint32 55 + 56 + qcom,mpm-pin-map: 57 + description: 58 + A set of MPM pin numbers and the corresponding GIC SPIs. 59 + $ref: /schemas/types.yaml#/definitions/uint32-matrix 60 + items: 61 + items: 62 + - description: MPM pin number 63 + - description: GIC SPI number for the MPM pin 64 + 65 + required: 66 + - compatible 67 + - reg 68 + - interrupts 69 + - mboxes 70 + - interrupt-controller 71 + - '#interrupt-cells' 72 + - qcom,mpm-pin-count 73 + - qcom,mpm-pin-map 74 + 75 + additionalProperties: false 76 + 77 + examples: 78 + - | 79 + #include <dt-bindings/interrupt-controller/arm-gic.h> 80 + mpm: interrupt-controller@45f01b8 { 81 + compatible = "qcom,mpm"; 82 + interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>; 83 + reg = <0x45f01b8 0x1000>; 84 + mboxes = <&apcs_glb 1>; 85 + interrupt-controller; 86 + #interrupt-cells = <2>; 87 + interrupt-parent = <&intc>; 88 + qcom,mpm-pin-count = <96>; 89 + qcom,mpm-pin-map = <2 275>, 90 + <5 296>, 91 + <12 422>, 92 + <24 79>, 93 + <86 183>, 94 + <90 260>, 95 + <91 260>; 96 + };
+1
Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
··· 20 20 - items: 21 21 - enum: 22 22 - st,stm32mp1-exti 23 + - st,stm32mp13-exti 23 24 - const: syscon 24 25 25 26 "#interrupt-cells":
+1 -1
MAINTAINERS
··· 1769 1769 F: Documentation/devicetree/bindings/arm/apple.yaml 1770 1770 F: Documentation/devicetree/bindings/arm/apple/* 1771 1771 F: Documentation/devicetree/bindings/i2c/apple,i2c.yaml 1772 - F: Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml 1772 + F: Documentation/devicetree/bindings/interrupt-controller/apple,* 1773 1773 F: Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml 1774 1774 F: Documentation/devicetree/bindings/pci/apple,pcie.yaml 1775 1775 F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
+24
arch/arm64/boot/dts/apple/t8103.dtsi
··· 97 97 <AIC_FIQ AIC_TMR_HV_VIRT IRQ_TYPE_LEVEL_HIGH>; 98 98 }; 99 99 100 + pmu-e { 101 + compatible = "apple,icestorm-pmu"; 102 + interrupt-parent = <&aic>; 103 + interrupts = <AIC_FIQ AIC_CPU_PMU_E IRQ_TYPE_LEVEL_HIGH>; 104 + }; 105 + 106 + pmu-p { 107 + compatible = "apple,firestorm-pmu"; 108 + interrupt-parent = <&aic>; 109 + interrupts = <AIC_FIQ AIC_CPU_PMU_P IRQ_TYPE_LEVEL_HIGH>; 110 + }; 111 + 100 112 clkref: clock-ref { 101 113 compatible = "fixed-clock"; 102 114 #clock-cells = <0>; ··· 225 213 interrupt-controller; 226 214 reg = <0x2 0x3b100000 0x0 0x8000>; 227 215 power-domains = <&ps_aic>; 216 + 217 + affinities { 218 + e-core-pmu-affinity { 219 + apple,fiq-index = <AIC_CPU_PMU_E>; 220 + cpus = <&cpu0 &cpu1 &cpu2 &cpu3>; 221 + }; 222 + 223 + p-core-pmu-affinity { 224 + apple,fiq-index = <AIC_CPU_PMU_P>; 225 + cpus = <&cpu4 &cpu5 &cpu6 &cpu7>; 226 + }; 227 + }; 228 228 }; 229 229 230 230 pmgr: power-management@23b700000 {
+19
arch/arm64/include/asm/apple_m1_pmu.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #ifndef __ASM_APPLE_M1_PMU_h 4 + #define __ASM_APPLE_M1_PMU_h 5 + 6 + #include <linux/bits.h> 7 + #include <asm/sysreg.h> 8 + 9 + /* Core PMC control register */ 10 + #define SYS_IMP_APL_PMCR0_EL1 sys_reg(3, 1, 15, 0, 0) 11 + #define PMCR0_IMODE GENMASK(10, 8) 12 + #define PMCR0_IMODE_OFF 0 13 + #define PMCR0_IMODE_PMI 1 14 + #define PMCR0_IMODE_AIC 2 15 + #define PMCR0_IMODE_HALT 3 16 + #define PMCR0_IMODE_FIQ 4 17 + #define PMCR0_IACT BIT(11) 18 + 19 + #endif /* __ASM_APPLE_M1_PMU_h */
+2
arch/microblaze/Kconfig
··· 45 45 select SET_FS 46 46 select ZONE_DMA 47 47 select TRACE_IRQFLAGS_SUPPORT 48 + select GENERIC_IRQ_MULTI_HANDLER 49 + select HANDLE_DOMAIN_IRQ 48 50 49 51 # Endianness selection 50 52 choice
-3
arch/microblaze/include/asm/irq.h
··· 11 11 struct pt_regs; 12 12 extern void do_IRQ(struct pt_regs *regs); 13 13 14 - /* should be defined in each interrupt controller driver */ 15 - extern unsigned int xintc_get_irq(void); 16 - 17 14 #endif /* _ASM_MICROBLAZE_IRQ_H */
+1 -15
arch/microblaze/kernel/irq.c
··· 20 20 #include <linux/irqchip.h> 21 21 #include <linux/of_irq.h> 22 22 23 - static u32 concurrent_irq; 24 - 25 23 void __irq_entry do_IRQ(struct pt_regs *regs) 26 24 { 27 - unsigned int irq; 28 25 struct pt_regs *old_regs = set_irq_regs(regs); 29 26 trace_hardirqs_off(); 30 27 31 28 irq_enter(); 32 - irq = xintc_get_irq(); 33 - next_irq: 34 - BUG_ON(!irq); 35 - generic_handle_irq(irq); 36 - 37 - irq = xintc_get_irq(); 38 - if (irq != -1U) { 39 - pr_debug("next irq: %d\n", irq); 40 - ++concurrent_irq; 41 - goto next_irq; 42 - } 43 - 29 + handle_arch_irq(regs); 44 30 irq_exit(); 45 31 set_irq_regs(old_regs); 46 32 trace_hardirqs_on();
-1
drivers/gpio/gpio-mt7621.c
··· 239 239 240 240 rg->chip.offset = bank * MTK_BANK_WIDTH; 241 241 rg->irq_chip.name = dev_name(dev); 242 - rg->irq_chip.parent_device = dev; 243 242 rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask; 244 243 rg->irq_chip.irq_mask = mediatek_gpio_irq_mask; 245 244 rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
+4 -3
drivers/gpio/gpio-omap.c
··· 986 986 writel_relaxed(0, base + bank->regs->ctrl); 987 987 } 988 988 989 - static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) 989 + static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc, 990 + struct device *pm_dev) 990 991 { 991 992 struct gpio_irq_chip *irq; 992 993 static int gpio; ··· 1053 1052 if (ret) 1054 1053 return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n"); 1055 1054 1055 + irq_domain_set_pm_device(bank->chip.irq.domain, pm_dev); 1056 1056 ret = devm_request_irq(bank->chip.parent, bank->irq, 1057 1057 omap_gpio_irq_handler, 1058 1058 0, dev_name(bank->chip.parent), bank); ··· 1404 1402 irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 1405 1403 irqc->name = dev_name(&pdev->dev); 1406 1404 irqc->flags = IRQCHIP_MASK_ON_SUSPEND; 1407 - irqc->parent_device = dev; 1408 1405 1409 1406 bank->irq = platform_get_irq(pdev, 0); 1410 1407 if (bank->irq <= 0) { ··· 1467 1466 1468 1467 omap_gpio_mod_init(bank); 1469 1468 1470 - ret = omap_gpio_chip_init(bank, irqc); 1469 + ret = omap_gpio_chip_init(bank, irqc, dev); 1471 1470 if (ret) { 1472 1471 pm_runtime_put_sync(dev); 1473 1472 pm_runtime_disable(dev);
+1 -1
drivers/gpio/gpio-rcar.c
··· 530 530 531 531 irq_chip = &p->irq_chip; 532 532 irq_chip->name = "gpio-rcar"; 533 - irq_chip->parent_device = dev; 534 533 irq_chip->irq_mask = gpio_rcar_irq_disable; 535 534 irq_chip->irq_unmask = gpio_rcar_irq_enable; 536 535 irq_chip->irq_set_type = gpio_rcar_irq_set_type; ··· 551 552 goto err0; 552 553 } 553 554 555 + irq_domain_set_pm_device(gpio_chip->irq.domain, dev); 554 556 ret = devm_request_irq(dev, p->irq_parent, gpio_rcar_irq_handler, 555 557 IRQF_SHARED, name, p); 556 558 if (ret) {
+2 -1
drivers/gpio/gpio-tqmx86.c
··· 281 281 u8 irq_status; 282 282 283 283 irq_chip->name = chip->label; 284 - irq_chip->parent_device = &pdev->dev; 285 284 irq_chip->irq_mask = tqmx86_gpio_irq_mask; 286 285 irq_chip->irq_unmask = tqmx86_gpio_irq_unmask; 287 286 irq_chip->irq_set_type = tqmx86_gpio_irq_set_type; ··· 314 315 dev_err(dev, "Could not register GPIO chip\n"); 315 316 goto out_pm_dis; 316 317 } 318 + 319 + irq_domain_set_pm_device(girq->domain, dev); 317 320 318 321 dev_info(dev, "GPIO functionality initialized with %d pins\n", 319 322 chip->ngpio);
+8
drivers/irqchip/Kconfig
··· 430 430 Power Domain Controller driver to manage and configure wakeup 431 431 IRQs for Qualcomm Technologies Inc (QTI) mobile chips. 432 432 433 + config QCOM_MPM 434 + tristate "QCOM MPM" 435 + depends on ARCH_QCOM 436 + select IRQ_DOMAIN_HIERARCHY 437 + help 438 + MSM Power Manager driver to manage and configure wakeup 439 + IRQs for Qualcomm Technologies Inc (QTI) mobile chips. 440 + 433 441 config CSKY_MPINTC 434 442 bool 435 443 depends on CSKY
+1
drivers/irqchip/Makefile
··· 94 94 obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o 95 95 obj-$(CONFIG_NDS32) += irq-ativic32.o 96 96 obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o 97 + obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o 97 98 obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o 98 99 obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o 99 100 obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
+444 -107
drivers/irqchip/irq-apple-aic.c
··· 24 24 * - Default "this CPU" register view and explicit per-CPU views 25 25 * 26 26 * In addition, this driver also handles FIQs, as these are routed to the same 27 - * IRQ vector. These are used for Fast IPIs (TODO), the ARMv8 timer IRQs, and 27 + * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and 28 28 * performance counters (TODO). 29 29 * 30 30 * Implementation notes: ··· 52 52 #include <linux/irqchip.h> 53 53 #include <linux/irqchip/arm-vgic-info.h> 54 54 #include <linux/irqdomain.h> 55 + #include <linux/jump_label.h> 55 56 #include <linux/limits.h> 56 57 #include <linux/of_address.h> 57 58 #include <linux/slab.h> 59 + #include <asm/apple_m1_pmu.h> 60 + #include <asm/cputype.h> 58 61 #include <asm/exception.h> 59 62 #include <asm/sysreg.h> 60 63 #include <asm/virt.h> ··· 65 62 #include <dt-bindings/interrupt-controller/apple-aic.h> 66 63 67 64 /* 68 - * AIC registers (MMIO) 65 + * AIC v1 registers (MMIO) 69 66 */ 70 67 71 68 #define AIC_INFO 0x0004 72 - #define AIC_INFO_NR_HW GENMASK(15, 0) 69 + #define AIC_INFO_NR_IRQ GENMASK(15, 0) 73 70 74 71 #define AIC_CONFIG 0x0010 75 72 76 73 #define AIC_WHOAMI 0x2000 77 74 #define AIC_EVENT 0x2004 78 - #define AIC_EVENT_TYPE GENMASK(31, 16) 75 + #define AIC_EVENT_DIE GENMASK(31, 24) 76 + #define AIC_EVENT_TYPE GENMASK(23, 16) 79 77 #define AIC_EVENT_NUM GENMASK(15, 0) 80 78 81 - #define AIC_EVENT_TYPE_HW 1 79 + #define AIC_EVENT_TYPE_FIQ 0 /* Software use */ 80 + #define AIC_EVENT_TYPE_IRQ 1 82 81 #define AIC_EVENT_TYPE_IPI 4 83 82 #define AIC_EVENT_IPI_OTHER 1 84 83 #define AIC_EVENT_IPI_SELF 2 ··· 96 91 #define AIC_IPI_SELF BIT(31) 97 92 98 93 #define AIC_TARGET_CPU 0x3000 99 - #define AIC_SW_SET 0x4000 100 - #define AIC_SW_CLR 0x4080 101 - #define AIC_MASK_SET 0x4100 102 - #define AIC_MASK_CLR 0x4180 103 94 104 95 #define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7)) 105 96 #define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7)) 106 97 #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7)) 107 98 #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7)) 108 99 100 + #define AIC_MAX_IRQ 0x400 101 + 102 + /* 103 + * AIC v2 registers (MMIO) 104 + */ 105 + 106 + #define AIC2_VERSION 0x0000 107 + #define AIC2_VERSION_VER GENMASK(7, 0) 108 + 109 + #define AIC2_INFO1 0x0004 110 + #define AIC2_INFO1_NR_IRQ GENMASK(15, 0) 111 + #define AIC2_INFO1_LAST_DIE GENMASK(27, 24) 112 + 113 + #define AIC2_INFO2 0x0008 114 + 115 + #define AIC2_INFO3 0x000c 116 + #define AIC2_INFO3_MAX_IRQ GENMASK(15, 0) 117 + #define AIC2_INFO3_MAX_DIE GENMASK(27, 24) 118 + 119 + #define AIC2_RESET 0x0010 120 + #define AIC2_RESET_RESET BIT(0) 121 + 122 + #define AIC2_CONFIG 0x0014 123 + #define AIC2_CONFIG_ENABLE BIT(0) 124 + #define AIC2_CONFIG_PREFER_PCPU BIT(28) 125 + 126 + #define AIC2_TIMEOUT 0x0028 127 + #define AIC2_CLUSTER_PRIO 0x0030 128 + #define AIC2_DELAY_GROUPS 0x0100 129 + 130 + #define AIC2_IRQ_CFG 0x2000 131 + 132 + /* 133 + * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG: 134 + * 135 + * Repeat for each die: 136 + * IRQ_CFG: u32 * MAX_IRQS 137 + * SW_SET: u32 * (MAX_IRQS / 32) 138 + * SW_CLR: u32 * (MAX_IRQS / 32) 139 + * MASK_SET: u32 * (MAX_IRQS / 32) 140 + * MASK_CLR: u32 * (MAX_IRQS / 32) 141 + * HW_STATE: u32 * (MAX_IRQS / 32) 142 + * 143 + * This is followed by a set of event registers, each 16K page aligned. 144 + * The first one is the AP event register we will use. Unfortunately, 145 + * the actual implemented die count is not specified anywhere in the 146 + * capability registers, so we have to explicitly specify the event 147 + * register as a second reg entry in the device tree to remain 148 + * forward-compatible. 149 + */ 150 + 151 + #define AIC2_IRQ_CFG_TARGET GENMASK(3, 0) 152 + #define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5) 153 + 109 154 #define MASK_REG(x) (4 * ((x) >> 5)) 110 155 #define MASK_BIT(x) BIT((x) & GENMASK(4, 0)) 111 156 112 157 /* 113 158 * IMP-DEF sysregs that control FIQ sources 114 - * Note: sysreg-based IPIs are not supported yet. 115 159 */ 116 - 117 - /* Core PMC control register */ 118 - #define SYS_IMP_APL_PMCR0_EL1 sys_reg(3, 1, 15, 0, 0) 119 - #define PMCR0_IMODE GENMASK(10, 8) 120 - #define PMCR0_IMODE_OFF 0 121 - #define PMCR0_IMODE_PMI 1 122 - #define PMCR0_IMODE_AIC 2 123 - #define PMCR0_IMODE_HALT 3 124 - #define PMCR0_IMODE_FIQ 4 125 - #define PMCR0_IACT BIT(11) 126 160 127 161 /* IPI request registers */ 128 162 #define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0) ··· 199 155 #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4) 200 156 #define UPMSR_IACT BIT(0) 201 157 202 - #define AIC_NR_FIQ 4 158 + /* MPIDR fields */ 159 + #define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0) 160 + #define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1) 161 + 162 + #define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \ 163 + FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \ 164 + FIELD_PREP(AIC_EVENT_NUM, irq)) 165 + #define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \ 166 + FIELD_PREP(AIC_EVENT_NUM, x)) 167 + #define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x) 168 + #define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x) 169 + #define AIC_NR_FIQ 6 203 170 #define AIC_NR_SWIPI 32 204 171 205 172 /* ··· 228 173 #define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS 229 174 #define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT 230 175 176 + DEFINE_STATIC_KEY_TRUE(use_fast_ipi); 177 + 178 + struct aic_info { 179 + int version; 180 + 181 + /* Register offsets */ 182 + u32 event; 183 + u32 target_cpu; 184 + u32 irq_cfg; 185 + u32 sw_set; 186 + u32 sw_clr; 187 + u32 mask_set; 188 + u32 mask_clr; 189 + 190 + u32 die_stride; 191 + 192 + /* Features */ 193 + bool fast_ipi; 194 + }; 195 + 196 + static const struct aic_info aic1_info = { 197 + .version = 1, 198 + 199 + .event = AIC_EVENT, 200 + .target_cpu = AIC_TARGET_CPU, 201 + }; 202 + 203 + static const struct aic_info aic1_fipi_info = { 204 + .version = 1, 205 + 206 + .event = AIC_EVENT, 207 + .target_cpu = AIC_TARGET_CPU, 208 + 209 + .fast_ipi = true, 210 + }; 211 + 212 + static const struct aic_info aic2_info = { 213 + .version = 2, 214 + 215 + .irq_cfg = AIC2_IRQ_CFG, 216 + 217 + .fast_ipi = true, 218 + }; 219 + 220 + static const struct of_device_id aic_info_match[] = { 221 + { 222 + .compatible = "apple,t8103-aic", 223 + .data = &aic1_fipi_info, 224 + }, 225 + { 226 + .compatible = "apple,aic", 227 + .data = &aic1_info, 228 + }, 229 + { 230 + .compatible = "apple,aic2", 231 + .data = &aic2_info, 232 + }, 233 + {} 234 + }; 235 + 231 236 struct aic_irq_chip { 232 237 void __iomem *base; 238 + void __iomem *event; 233 239 struct irq_domain *hw_domain; 234 240 struct irq_domain *ipi_domain; 235 - int nr_hw; 241 + struct { 242 + cpumask_t aff; 243 + } *fiq_aff[AIC_NR_FIQ]; 244 + 245 + int nr_irq; 246 + int max_irq; 247 + int nr_die; 248 + int max_die; 249 + 250 + struct aic_info info; 236 251 }; 237 252 238 253 static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked); ··· 330 205 331 206 static void aic_irq_mask(struct irq_data *d) 332 207 { 208 + irq_hw_number_t hwirq = irqd_to_hwirq(d); 333 209 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); 334 210 335 - aic_ic_write(ic, AIC_MASK_SET + MASK_REG(irqd_to_hwirq(d)), 336 - MASK_BIT(irqd_to_hwirq(d))); 211 + u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride; 212 + u32 irq = AIC_HWIRQ_IRQ(hwirq); 213 + 214 + aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq)); 337 215 } 338 216 339 217 static void aic_irq_unmask(struct irq_data *d) 340 218 { 219 + irq_hw_number_t hwirq = irqd_to_hwirq(d); 341 220 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); 342 221 343 - aic_ic_write(ic, AIC_MASK_CLR + MASK_REG(d->hwirq), 344 - MASK_BIT(irqd_to_hwirq(d))); 222 + u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride; 223 + u32 irq = AIC_HWIRQ_IRQ(hwirq); 224 + 225 + aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq)); 345 226 } 346 227 347 228 static void aic_irq_eoi(struct irq_data *d) ··· 370 239 * We cannot use a relaxed read here, as reads from DMA buffers 371 240 * need to be ordered after the IRQ fires. 372 241 */ 373 - event = readl(ic->base + AIC_EVENT); 242 + event = readl(ic->event + ic->info.event); 374 243 type = FIELD_GET(AIC_EVENT_TYPE, event); 375 244 irq = FIELD_GET(AIC_EVENT_NUM, event); 376 245 377 - if (type == AIC_EVENT_TYPE_HW) 378 - generic_handle_domain_irq(aic_irqc->hw_domain, irq); 246 + if (type == AIC_EVENT_TYPE_IRQ) 247 + generic_handle_domain_irq(aic_irqc->hw_domain, event); 379 248 else if (type == AIC_EVENT_TYPE_IPI && irq == 1) 380 249 aic_handle_ipi(regs); 381 250 else if (event != 0) ··· 402 271 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); 403 272 int cpu; 404 273 274 + BUG_ON(!ic->info.target_cpu); 275 + 405 276 if (force) 406 277 cpu = cpumask_first(mask_val); 407 278 else 408 279 cpu = cpumask_any_and(mask_val, cpu_online_mask); 409 280 410 - aic_ic_write(ic, AIC_TARGET_CPU + hwirq * 4, BIT(cpu)); 281 + aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu)); 411 282 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 412 283 413 284 return IRQ_SET_MASK_OK; ··· 433 300 .irq_set_type = aic_irq_set_type, 434 301 }; 435 302 303 + static struct irq_chip aic2_chip = { 304 + .name = "AIC2", 305 + .irq_mask = aic_irq_mask, 306 + .irq_unmask = aic_irq_unmask, 307 + .irq_eoi = aic_irq_eoi, 308 + .irq_set_type = aic_irq_set_type, 309 + }; 310 + 436 311 /* 437 312 * FIQ irqchip 438 313 */ 439 314 440 315 static unsigned long aic_fiq_get_idx(struct irq_data *d) 441 316 { 442 - struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); 443 - 444 - return irqd_to_hwirq(d) - ic->nr_hw; 317 + return AIC_HWIRQ_IRQ(irqd_to_hwirq(d)); 445 318 } 446 319 447 320 static void aic_fiq_set_mask(struct irq_data *d) ··· 525 386 */ 526 387 527 388 if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) { 528 - pr_err_ratelimited("Fast IPI fired. Acking.\n"); 529 - write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); 389 + if (static_branch_likely(&use_fast_ipi)) { 390 + aic_handle_ipi(regs); 391 + } else { 392 + pr_err_ratelimited("Fast IPI fired. Acking.\n"); 393 + write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); 394 + } 530 395 } 531 396 532 397 if (TIMER_FIRING(read_sysreg(cntp_ctl_el0))) 533 398 generic_handle_domain_irq(aic_irqc->hw_domain, 534 - aic_irqc->nr_hw + AIC_TMR_EL0_PHYS); 399 + AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS)); 535 400 536 401 if (TIMER_FIRING(read_sysreg(cntv_ctl_el0))) 537 402 generic_handle_domain_irq(aic_irqc->hw_domain, 538 - aic_irqc->nr_hw + AIC_TMR_EL0_VIRT); 403 + AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT)); 539 404 540 405 if (is_kernel_in_hyp_mode()) { 541 406 uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2); ··· 547 404 if ((enabled & VM_TMR_FIQ_ENABLE_P) && 548 405 TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02))) 549 406 generic_handle_domain_irq(aic_irqc->hw_domain, 550 - aic_irqc->nr_hw + AIC_TMR_EL02_PHYS); 407 + AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS)); 551 408 552 409 if ((enabled & VM_TMR_FIQ_ENABLE_V) && 553 410 TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02))) 554 411 generic_handle_domain_irq(aic_irqc->hw_domain, 555 - aic_irqc->nr_hw + AIC_TMR_EL02_VIRT); 412 + AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT)); 556 413 } 557 414 558 - if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) == 559 - (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) { 560 - /* 561 - * Not supported yet, let's figure out how to handle this when 562 - * we implement these proprietary performance counters. For now, 563 - * just mask it and move on. 564 - */ 565 - pr_err_ratelimited("PMC FIQ fired. Masking.\n"); 566 - sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT, 567 - FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF)); 415 + if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) { 416 + int irq; 417 + if (cpumask_test_cpu(smp_processor_id(), 418 + &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff)) 419 + irq = AIC_CPU_PMU_P; 420 + else 421 + irq = AIC_CPU_PMU_E; 422 + generic_handle_domain_irq(aic_irqc->hw_domain, 423 + AIC_FIQ_HWIRQ(irq)); 568 424 } 569 425 570 426 if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ && ··· 597 455 irq_hw_number_t hw) 598 456 { 599 457 struct aic_irq_chip *ic = id->host_data; 458 + u32 type = FIELD_GET(AIC_EVENT_TYPE, hw); 459 + struct irq_chip *chip = &aic_chip; 600 460 601 - if (hw < ic->nr_hw) { 602 - irq_domain_set_info(id, irq, hw, &aic_chip, id->host_data, 461 + if (ic->info.version == 2) 462 + chip = &aic2_chip; 463 + 464 + if (type == AIC_EVENT_TYPE_IRQ) { 465 + irq_domain_set_info(id, irq, hw, chip, id->host_data, 603 466 handle_fasteoi_irq, NULL, NULL); 604 467 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); 605 468 } else { 606 - irq_set_percpu_devid(irq); 469 + int fiq = FIELD_GET(AIC_EVENT_NUM, hw); 470 + 471 + switch (fiq) { 472 + case AIC_CPU_PMU_P: 473 + case AIC_CPU_PMU_E: 474 + irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff); 475 + break; 476 + default: 477 + irq_set_percpu_devid(irq); 478 + break; 479 + } 480 + 607 481 irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data, 608 482 handle_percpu_devid_irq, NULL, NULL); 609 483 } ··· 633 475 unsigned int *type) 634 476 { 635 477 struct aic_irq_chip *ic = id->host_data; 478 + u32 *args; 479 + u32 die = 0; 636 480 637 - if (fwspec->param_count != 3 || !is_of_node(fwspec->fwnode)) 481 + if (fwspec->param_count < 3 || fwspec->param_count > 4 || 482 + !is_of_node(fwspec->fwnode)) 638 483 return -EINVAL; 484 + 485 + args = &fwspec->param[1]; 486 + 487 + if (fwspec->param_count == 4) { 488 + die = args[0]; 489 + args++; 490 + } 639 491 640 492 switch (fwspec->param[0]) { 641 493 case AIC_IRQ: 642 - if (fwspec->param[1] >= ic->nr_hw) 494 + if (die >= ic->nr_die) 643 495 return -EINVAL; 644 - *hwirq = fwspec->param[1]; 496 + if (args[0] >= ic->nr_irq) 497 + return -EINVAL; 498 + *hwirq = AIC_IRQ_HWIRQ(die, args[0]); 645 499 break; 646 500 case AIC_FIQ: 647 - if (fwspec->param[1] >= AIC_NR_FIQ) 501 + if (die != 0) 648 502 return -EINVAL; 649 - *hwirq = ic->nr_hw + fwspec->param[1]; 503 + if (args[0] >= AIC_NR_FIQ) 504 + return -EINVAL; 505 + *hwirq = AIC_FIQ_HWIRQ(args[0]); 650 506 651 507 /* 652 508 * In EL1 the non-redirected registers are the guest's, 653 509 * not EL2's, so remap the hwirqs to match. 654 510 */ 655 511 if (!is_kernel_in_hyp_mode()) { 656 - switch (fwspec->param[1]) { 512 + switch (args[0]) { 657 513 case AIC_TMR_GUEST_PHYS: 658 - *hwirq = ic->nr_hw + AIC_TMR_EL0_PHYS; 514 + *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS); 659 515 break; 660 516 case AIC_TMR_GUEST_VIRT: 661 - *hwirq = ic->nr_hw + AIC_TMR_EL0_VIRT; 517 + *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT); 662 518 break; 663 519 case AIC_TMR_HV_PHYS: 664 520 case AIC_TMR_HV_VIRT: ··· 686 514 return -EINVAL; 687 515 } 688 516 689 - *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 517 + *type = args[1] & IRQ_TYPE_SENSE_MASK; 690 518 691 519 return 0; 692 520 } ··· 735 563 * IPI irqchip 736 564 */ 737 565 566 + static void aic_ipi_send_fast(int cpu) 567 + { 568 + u64 mpidr = cpu_logical_map(cpu); 569 + u64 my_mpidr = read_cpuid_mpidr(); 570 + u64 cluster = MPIDR_CLUSTER(mpidr); 571 + u64 idx = MPIDR_CPU(mpidr); 572 + 573 + if (MPIDR_CLUSTER(my_mpidr) == cluster) 574 + write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), 575 + SYS_IMP_APL_IPI_RR_LOCAL_EL1); 576 + else 577 + write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster), 578 + SYS_IMP_APL_IPI_RR_GLOBAL_EL1); 579 + isb(); 580 + } 581 + 738 582 static void aic_ipi_mask(struct irq_data *d) 739 583 { 740 584 u32 irq_bit = BIT(irqd_to_hwirq(d)); ··· 776 588 * If a pending vIPI was unmasked, raise a HW IPI to ourselves. 777 589 * No barriers needed here since this is a self-IPI. 778 590 */ 779 - if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) 780 - aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id())); 591 + if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) { 592 + if (static_branch_likely(&use_fast_ipi)) 593 + aic_ipi_send_fast(smp_processor_id()); 594 + else 595 + aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id())); 596 + } 781 597 } 782 598 783 599 static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) ··· 809 617 smp_mb__after_atomic(); 810 618 811 619 if (!(pending & irq_bit) && 812 - (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) 813 - send |= AIC_IPI_SEND_CPU(cpu); 620 + (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) { 621 + if (static_branch_likely(&use_fast_ipi)) 622 + aic_ipi_send_fast(cpu); 623 + else 624 + send |= AIC_IPI_SEND_CPU(cpu); 625 + } 814 626 } 815 627 816 628 /* ··· 846 650 /* 847 651 * Ack the IPI. We need to order this after the AIC event read, but 848 652 * that is enforced by normal MMIO ordering guarantees. 653 + * 654 + * For the Fast IPI case, this needs to be ordered before the vIPI 655 + * handling below, so we need to isb(); 849 656 */ 850 - aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER); 657 + if (static_branch_likely(&use_fast_ipi)) { 658 + write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); 659 + isb(); 660 + } else { 661 + aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER); 662 + } 851 663 852 664 /* 853 665 * The mask read does not need to be ordered. Only we can change ··· 883 679 * No ordering needed here; at worst this just changes the timing of 884 680 * when the next IPI will be delivered. 885 681 */ 886 - aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); 682 + if (!static_branch_likely(&use_fast_ipi)) 683 + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); 887 684 } 888 685 889 686 static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq, ··· 971 766 /* Commit all of the above */ 972 767 isb(); 973 768 974 - /* 975 - * Make sure the kernel's idea of logical CPU order is the same as AIC's 976 - * If we ever end up with a mismatch here, we will have to introduce 977 - * a mapping table similar to what other irqchip drivers do. 978 - */ 979 - WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); 769 + if (aic_irqc->info.version == 1) { 770 + /* 771 + * Make sure the kernel's idea of logical CPU order is the same as AIC's 772 + * If we ever end up with a mismatch here, we will have to introduce 773 + * a mapping table similar to what other irqchip drivers do. 774 + */ 775 + WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); 980 776 981 - /* 982 - * Always keep IPIs unmasked at the hardware level (except auto-masking 983 - * by AIC during processing). We manage masks at the vIPI level. 984 - */ 985 - aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); 986 - aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); 987 - aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); 777 + /* 778 + * Always keep IPIs unmasked at the hardware level (except auto-masking 779 + * by AIC during processing). We manage masks at the vIPI level. 780 + * These registers only exist on AICv1, AICv2 always uses fast IPIs. 781 + */ 782 + aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); 783 + if (static_branch_likely(&use_fast_ipi)) { 784 + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER); 785 + } else { 786 + aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); 787 + aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); 788 + } 789 + } 988 790 989 791 /* Initialize the local mask state */ 990 792 __this_cpu_write(aic_fiq_unmasked, 0); ··· 1005 793 .no_hw_deactivation = true, 1006 794 }; 1007 795 796 + static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff) 797 + { 798 + int i, n; 799 + u32 fiq; 800 + 801 + if (of_property_read_u32(aff, "apple,fiq-index", &fiq) || 802 + WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq]) 803 + return; 804 + 805 + n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32)); 806 + if (WARN_ON(n < 0)) 807 + return; 808 + 809 + ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL); 810 + if (!ic->fiq_aff[fiq]) 811 + return; 812 + 813 + for (i = 0; i < n; i++) { 814 + struct device_node *cpu_node; 815 + u32 cpu_phandle; 816 + int cpu; 817 + 818 + if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle)) 819 + continue; 820 + 821 + cpu_node = of_find_node_by_phandle(cpu_phandle); 822 + if (WARN_ON(!cpu_node)) 823 + continue; 824 + 825 + cpu = of_cpu_node_to_id(cpu_node); 826 + if (WARN_ON(cpu < 0)) 827 + continue; 828 + 829 + cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff); 830 + } 831 + } 832 + 1008 833 static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent) 1009 834 { 1010 - int i; 835 + int i, die; 836 + u32 off, start_off; 1011 837 void __iomem *regs; 1012 - u32 info; 1013 838 struct aic_irq_chip *irqc; 839 + struct device_node *affs; 840 + const struct of_device_id *match; 1014 841 1015 842 regs = of_iomap(node, 0); 1016 843 if (WARN_ON(!regs)) 1017 844 return -EIO; 1018 845 1019 846 irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); 1020 - if (!irqc) 847 + if (!irqc) { 848 + iounmap(regs); 1021 849 return -ENOMEM; 850 + } 1022 851 1023 - aic_irqc = irqc; 1024 852 irqc->base = regs; 1025 853 1026 - info = aic_ic_read(irqc, AIC_INFO); 1027 - irqc->nr_hw = FIELD_GET(AIC_INFO_NR_HW, info); 854 + match = of_match_node(aic_info_match, node); 855 + if (!match) 856 + goto err_unmap; 1028 857 1029 - irqc->hw_domain = irq_domain_create_linear(of_node_to_fwnode(node), 1030 - irqc->nr_hw + AIC_NR_FIQ, 1031 - &aic_irq_domain_ops, irqc); 1032 - if (WARN_ON(!irqc->hw_domain)) { 1033 - iounmap(irqc->base); 1034 - kfree(irqc); 1035 - return -ENODEV; 858 + irqc->info = *(struct aic_info *)match->data; 859 + 860 + aic_irqc = irqc; 861 + 862 + switch (irqc->info.version) { 863 + case 1: { 864 + u32 info; 865 + 866 + info = aic_ic_read(irqc, AIC_INFO); 867 + irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info); 868 + irqc->max_irq = AIC_MAX_IRQ; 869 + irqc->nr_die = irqc->max_die = 1; 870 + 871 + off = start_off = irqc->info.target_cpu; 872 + off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */ 873 + 874 + irqc->event = irqc->base; 875 + 876 + break; 1036 877 } 878 + case 2: { 879 + u32 info1, info3; 880 + 881 + info1 = aic_ic_read(irqc, AIC2_INFO1); 882 + info3 = aic_ic_read(irqc, AIC2_INFO3); 883 + 884 + irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1); 885 + irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3); 886 + irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1; 887 + irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3); 888 + 889 + off = start_off = irqc->info.irq_cfg; 890 + off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */ 891 + 892 + irqc->event = of_iomap(node, 1); 893 + if (WARN_ON(!irqc->event)) 894 + goto err_unmap; 895 + 896 + break; 897 + } 898 + } 899 + 900 + irqc->info.sw_set = off; 901 + off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */ 902 + irqc->info.sw_clr = off; 903 + off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */ 904 + irqc->info.mask_set = off; 905 + off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */ 906 + irqc->info.mask_clr = off; 907 + off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */ 908 + off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */ 909 + 910 + if (irqc->info.fast_ipi) 911 + static_branch_enable(&use_fast_ipi); 912 + else 913 + static_branch_disable(&use_fast_ipi); 914 + 915 + irqc->info.die_stride = off - start_off; 916 + 917 + irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node), 918 + &aic_irq_domain_ops, irqc); 919 + if (WARN_ON(!irqc->hw_domain)) 920 + goto err_unmap; 1037 921 1038 922 irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED); 1039 923 1040 - if (aic_init_smp(irqc, node)) { 1041 - irq_domain_remove(irqc->hw_domain); 1042 - iounmap(irqc->base); 1043 - kfree(irqc); 1044 - return -ENODEV; 924 + if (aic_init_smp(irqc, node)) 925 + goto err_remove_domain; 926 + 927 + affs = of_get_child_by_name(node, "affinities"); 928 + if (affs) { 929 + struct device_node *chld; 930 + 931 + for_each_child_of_node(affs, chld) 932 + build_fiq_affinity(irqc, chld); 1045 933 } 1046 934 1047 935 set_handle_irq(aic_handle_irq); 1048 936 set_handle_fiq(aic_handle_fiq); 1049 937 1050 - for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++) 1051 - aic_ic_write(irqc, AIC_MASK_SET + i * 4, U32_MAX); 1052 - for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++) 1053 - aic_ic_write(irqc, AIC_SW_CLR + i * 4, U32_MAX); 1054 - for (i = 0; i < irqc->nr_hw; i++) 1055 - aic_ic_write(irqc, AIC_TARGET_CPU + i * 4, 1); 938 + off = 0; 939 + for (die = 0; die < irqc->nr_die; die++) { 940 + for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++) 941 + aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX); 942 + for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++) 943 + aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX); 944 + if (irqc->info.target_cpu) 945 + for (i = 0; i < irqc->nr_irq; i++) 946 + aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1); 947 + off += irqc->info.die_stride; 948 + } 949 + 950 + if (irqc->info.version == 2) { 951 + u32 config = aic_ic_read(irqc, AIC2_CONFIG); 952 + 953 + config |= AIC2_CONFIG_ENABLE; 954 + aic_ic_write(irqc, AIC2_CONFIG, config); 955 + } 1056 956 1057 957 if (!is_kernel_in_hyp_mode()) 1058 958 pr_info("Kernel running in EL1, mapping interrupts"); 959 + 960 + if (static_branch_likely(&use_fast_ipi)) 961 + pr_info("Using Fast IPIs"); 1059 962 1060 963 cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING, 1061 964 "irqchip/apple-aic/ipi:starting", ··· 1178 851 1179 852 vgic_set_kvm_info(&vgic_info); 1180 853 1181 - pr_info("Initialized with %d IRQs, %d FIQs, %d vIPIs\n", 1182 - irqc->nr_hw, AIC_NR_FIQ, AIC_NR_SWIPI); 854 + pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs", 855 + irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI); 1183 856 1184 857 return 0; 858 + 859 + err_remove_domain: 860 + irq_domain_remove(irqc->hw_domain); 861 + err_unmap: 862 + if (irqc->event && irqc->event != irqc->base) 863 + iounmap(irqc->event); 864 + iounmap(irqc->base); 865 + kfree(irqc); 866 + return -ENODEV; 1185 867 } 1186 868 1187 - IRQCHIP_DECLARE(apple_m1_aic, "apple,aic", aic_of_ic_init); 869 + IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init); 870 + IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);
-1
drivers/irqchip/irq-ftintc010.c
··· 11 11 #include <linux/irq.h> 12 12 #include <linux/io.h> 13 13 #include <linux/irqchip.h> 14 - #include <linux/irqchip/versatile-fpga.h> 15 14 #include <linux/irqdomain.h> 16 15 #include <linux/module.h> 17 16 #include <linux/of.h>
+1 -1
drivers/irqchip/irq-gic-v3.c
··· 1211 1211 * Ensure that stores to Normal memory are visible to the 1212 1212 * other CPUs before issuing the IPI. 1213 1213 */ 1214 - wmb(); 1214 + dsb(ishst); 1215 1215 1216 1216 for_each_cpu(cpu, mask) { 1217 1217 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
+55 -49
drivers/irqchip/irq-gic.c
··· 34 34 #include <linux/irqdomain.h> 35 35 #include <linux/interrupt.h> 36 36 #include <linux/percpu.h> 37 + #include <linux/seq_file.h> 37 38 #include <linux/slab.h> 38 39 #include <linux/irqchip.h> 39 40 #include <linux/irqchip/chained_irq.h> ··· 67 66 }; 68 67 69 68 struct gic_chip_data { 70 - struct irq_chip chip; 71 69 union gic_base dist_base; 72 70 union gic_base cpu_base; 73 71 void __iomem *raw_dist_base; ··· 397 397 chained_irq_exit(chip, desc); 398 398 } 399 399 400 - static const struct irq_chip gic_chip = { 401 - .irq_mask = gic_mask_irq, 402 - .irq_unmask = gic_unmask_irq, 403 - .irq_eoi = gic_eoi_irq, 404 - .irq_set_type = gic_set_type, 405 - .irq_retrigger = gic_retrigger, 406 - .irq_get_irqchip_state = gic_irq_get_irqchip_state, 407 - .irq_set_irqchip_state = gic_irq_set_irqchip_state, 408 - .flags = IRQCHIP_SET_TYPE_MASKED | 409 - IRQCHIP_SKIP_SET_WAKE | 410 - IRQCHIP_MASK_ON_SUSPEND, 411 - }; 400 + static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p) 401 + { 402 + struct gic_chip_data *gic = irq_data_get_irq_chip_data(d); 403 + 404 + if (gic->domain->dev) 405 + seq_printf(p, gic->domain->dev->of_node->name); 406 + else 407 + seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0])); 408 + } 412 409 413 410 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) 414 411 { ··· 796 799 bool force) 797 800 { 798 801 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); 802 + struct gic_chip_data *gic = irq_data_get_irq_chip_data(d); 799 803 unsigned int cpu; 804 + 805 + if (unlikely(gic != &gic_data[0])) 806 + return -EINVAL; 800 807 801 808 if (!force) 802 809 cpu = cpumask_any_and(mask_val, cpu_online_mask); ··· 880 879 #define gic_set_affinity NULL 881 880 #define gic_ipi_send_mask NULL 882 881 #endif 882 + 883 + static const struct irq_chip gic_chip = { 884 + .irq_mask = gic_mask_irq, 885 + .irq_unmask = gic_unmask_irq, 886 + .irq_eoi = gic_eoi_irq, 887 + .irq_set_type = gic_set_type, 888 + .irq_retrigger = gic_retrigger, 889 + .irq_set_affinity = gic_set_affinity, 890 + .ipi_send_mask = gic_ipi_send_mask, 891 + .irq_get_irqchip_state = gic_irq_get_irqchip_state, 892 + .irq_set_irqchip_state = gic_irq_set_irqchip_state, 893 + .irq_print_chip = gic_irq_print_chip, 894 + .flags = IRQCHIP_SET_TYPE_MASKED | 895 + IRQCHIP_SKIP_SET_WAKE | 896 + IRQCHIP_MASK_ON_SUSPEND, 897 + }; 898 + 899 + static const struct irq_chip gic_chip_mode1 = { 900 + .name = "GICv2", 901 + .irq_mask = gic_eoimode1_mask_irq, 902 + .irq_unmask = gic_unmask_irq, 903 + .irq_eoi = gic_eoimode1_eoi_irq, 904 + .irq_set_type = gic_set_type, 905 + .irq_retrigger = gic_retrigger, 906 + .irq_set_affinity = gic_set_affinity, 907 + .ipi_send_mask = gic_ipi_send_mask, 908 + .irq_get_irqchip_state = gic_irq_get_irqchip_state, 909 + .irq_set_irqchip_state = gic_irq_set_irqchip_state, 910 + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 911 + .flags = IRQCHIP_SET_TYPE_MASKED | 912 + IRQCHIP_SKIP_SET_WAKE | 913 + IRQCHIP_MASK_ON_SUSPEND, 914 + }; 883 915 884 916 #ifdef CONFIG_BL_SWITCHER 885 917 /* ··· 1058 1024 { 1059 1025 struct gic_chip_data *gic = d->host_data; 1060 1026 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); 1027 + const struct irq_chip *chip; 1028 + 1029 + chip = (static_branch_likely(&supports_deactivate_key) && 1030 + gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip; 1061 1031 1062 1032 switch (hw) { 1063 1033 case 0 ... 31: 1064 1034 irq_set_percpu_devid(irq); 1065 - irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, 1035 + irq_domain_set_info(d, irq, hw, chip, d->host_data, 1066 1036 handle_percpu_devid_irq, NULL, NULL); 1067 1037 break; 1068 1038 default: 1069 - irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, 1039 + irq_domain_set_info(d, irq, hw, chip, d->host_data, 1070 1040 handle_fasteoi_irq, NULL, NULL); 1071 1041 irq_set_probe(irq); 1072 1042 irqd_set_single_target(irqd); ··· 1164 1126 .map = gic_irq_domain_map, 1165 1127 .unmap = gic_irq_domain_unmap, 1166 1128 }; 1167 - 1168 - static void gic_init_chip(struct gic_chip_data *gic, struct device *dev, 1169 - const char *name, bool use_eoimode1) 1170 - { 1171 - /* Initialize irq_chip */ 1172 - gic->chip = gic_chip; 1173 - gic->chip.name = name; 1174 - gic->chip.parent_device = dev; 1175 - 1176 - if (use_eoimode1) { 1177 - gic->chip.irq_mask = gic_eoimode1_mask_irq; 1178 - gic->chip.irq_eoi = gic_eoimode1_eoi_irq; 1179 - gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity; 1180 - } 1181 - 1182 - if (gic == &gic_data[0]) { 1183 - gic->chip.irq_set_affinity = gic_set_affinity; 1184 - gic->chip.ipi_send_mask = gic_ipi_send_mask; 1185 - } 1186 - } 1187 1129 1188 1130 static int gic_init_bases(struct gic_chip_data *gic, 1189 1131 struct fwnode_handle *handle) ··· 1264 1246 static int __init __gic_init_bases(struct gic_chip_data *gic, 1265 1247 struct fwnode_handle *handle) 1266 1248 { 1267 - char *name; 1268 1249 int i, ret; 1269 1250 1270 1251 if (WARN_ON(!gic || gic->domain)) ··· 1283 1266 pr_info("GIC: Using split EOI/Deactivate mode\n"); 1284 1267 } 1285 1268 1286 - if (static_branch_likely(&supports_deactivate_key) && gic == &gic_data[0]) { 1287 - name = kasprintf(GFP_KERNEL, "GICv2"); 1288 - gic_init_chip(gic, NULL, name, true); 1289 - } else { 1290 - name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0])); 1291 - gic_init_chip(gic, NULL, name, false); 1292 - } 1293 - 1294 1269 ret = gic_init_bases(gic, handle); 1295 - if (ret) 1296 - kfree(name); 1297 - else if (gic == &gic_data[0]) 1270 + if (gic == &gic_data[0]) 1298 1271 gic_smp_init(); 1299 1272 1300 1273 return ret; ··· 1467 1460 if (!*gic) 1468 1461 return -ENOMEM; 1469 1462 1470 - gic_init_chip(*gic, dev, dev->of_node->name, false); 1471 - 1472 1463 ret = gic_of_setup(*gic, dev->of_node); 1473 1464 if (ret) 1474 1465 return ret; ··· 1477 1472 return ret; 1478 1473 } 1479 1474 1475 + irq_domain_set_pm_device((*gic)->domain, dev); 1480 1476 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic); 1481 1477 1482 1478 return 0;
+3 -5
drivers/irqchip/irq-imx-intmux.c
··· 61 61 #define CHAN_MAX_NUM 0x8 62 62 63 63 struct intmux_irqchip_data { 64 - struct irq_chip chip; 65 64 u32 saved_reg; 66 65 int chanidx; 67 66 int irq; ··· 113 114 raw_spin_unlock_irqrestore(&data->lock, flags); 114 115 } 115 116 116 - static struct irq_chip imx_intmux_irq_chip = { 117 + static struct irq_chip imx_intmux_irq_chip __ro_after_init = { 117 118 .name = "intmux", 118 119 .irq_mask = imx_intmux_irq_mask, 119 120 .irq_unmask = imx_intmux_irq_unmask, ··· 125 126 struct intmux_irqchip_data *data = h->host_data; 126 127 127 128 irq_set_chip_data(irq, data); 128 - irq_set_chip_and_handler(irq, &data->chip, handle_level_irq); 129 + irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq); 129 130 130 131 return 0; 131 132 } ··· 240 241 } 241 242 242 243 for (i = 0; i < channum; i++) { 243 - data->irqchip_data[i].chip = imx_intmux_irq_chip; 244 - data->irqchip_data[i].chip.parent_device = &pdev->dev; 245 244 data->irqchip_data[i].chanidx = i; 246 245 247 246 data->irqchip_data[i].irq = irq_of_parse_and_map(np, i); ··· 257 260 goto out; 258 261 } 259 262 data->irqchip_data[i].domain = domain; 263 + irq_domain_set_pm_device(domain, &pdev->dev); 260 264 261 265 /* disable all interrupt sources of this channel firstly */ 262 266 writel_relaxed(0, data->regs + CHANIER(i));
+22 -12
drivers/irqchip/irq-lpc32xx.c
··· 11 11 #include <linux/of_address.h> 12 12 #include <linux/of_irq.h> 13 13 #include <linux/of_platform.h> 14 + #include <linux/seq_file.h> 14 15 #include <linux/slab.h> 15 16 #include <asm/exception.h> 16 17 ··· 26 25 27 26 struct lpc32xx_irq_chip { 28 27 void __iomem *base; 28 + phys_addr_t addr; 29 29 struct irq_domain *domain; 30 - struct irq_chip chip; 31 30 }; 32 31 33 32 static struct lpc32xx_irq_chip *lpc32xx_mic_irqc; ··· 119 118 return 0; 120 119 } 121 120 121 + static void lpc32xx_irq_print_chip(struct irq_data *d, struct seq_file *p) 122 + { 123 + struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d); 124 + 125 + if (ic == lpc32xx_mic_irqc) 126 + seq_printf(p, "%08x.mic", ic->addr); 127 + else 128 + seq_printf(p, "%08x.sic", ic->addr); 129 + } 130 + 131 + static const struct irq_chip lpc32xx_chip = { 132 + .irq_ack = lpc32xx_irq_ack, 133 + .irq_mask = lpc32xx_irq_mask, 134 + .irq_unmask = lpc32xx_irq_unmask, 135 + .irq_set_type = lpc32xx_irq_set_type, 136 + .irq_print_chip = lpc32xx_irq_print_chip, 137 + }; 138 + 122 139 static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs) 123 140 { 124 141 struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc; ··· 172 153 struct lpc32xx_irq_chip *ic = id->host_data; 173 154 174 155 irq_set_chip_data(virq, ic); 175 - irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq); 156 + irq_set_chip_and_handler(virq, &lpc32xx_chip, handle_level_irq); 176 157 irq_set_status_flags(virq, IRQ_LEVEL); 177 158 irq_set_noprobe(virq); 178 159 ··· 202 183 if (!irqc) 203 184 return -ENOMEM; 204 185 186 + irqc->addr = addr; 205 187 irqc->base = of_iomap(node, 0); 206 188 if (!irqc->base) { 207 189 pr_err("%pOF: unable to map registers\n", node); ··· 210 190 return -EINVAL; 211 191 } 212 192 213 - irqc->chip.irq_ack = lpc32xx_irq_ack; 214 - irqc->chip.irq_mask = lpc32xx_irq_mask; 215 - irqc->chip.irq_unmask = lpc32xx_irq_unmask; 216 - irqc->chip.irq_set_type = lpc32xx_irq_set_type; 217 - if (is_mic) 218 - irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr); 219 - else 220 - irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr); 221 - 222 193 irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS, 223 194 &lpc32xx_irq_domain_ops, irqc); 224 195 if (!irqc->domain) { 225 196 pr_err("unable to add irq domain\n"); 226 197 iounmap(irqc->base); 227 - kfree(irqc->chip.name); 228 198 kfree(irqc); 229 199 return -ENODEV; 230 200 }
+90 -16
drivers/irqchip/irq-meson-gpio.c
··· 16 16 #include <linux/of.h> 17 17 #include <linux/of_address.h> 18 18 19 - #define NUM_CHANNEL 8 19 + #define MAX_NUM_CHANNEL 64 20 20 #define MAX_INPUT_MUX 256 21 21 22 22 #define REG_EDGE_POL 0x00 ··· 26 26 27 27 /* use for A1 like chips */ 28 28 #define REG_PIN_A1_SEL 0x04 29 + /* Used for s4 chips */ 30 + #define REG_EDGE_POL_S4 0x1c 29 31 30 32 /* 31 33 * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by ··· 53 51 unsigned int channel, 54 52 unsigned long hwirq); 55 53 static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl); 54 + static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, 55 + unsigned int type, u32 *channel_hwirq); 56 + static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, 57 + unsigned int type, u32 *channel_hwirq); 56 58 57 59 struct irq_ctl_ops { 58 60 void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl, 59 61 unsigned int channel, unsigned long hwirq); 60 62 void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl); 63 + int (*gpio_irq_set_type)(struct meson_gpio_irq_controller *ctl, 64 + unsigned int type, u32 *channel_hwirq); 61 65 }; 62 66 63 67 struct meson_gpio_irq_params { 64 68 unsigned int nr_hwirq; 69 + unsigned int nr_channels; 65 70 bool support_edge_both; 66 71 unsigned int edge_both_offset; 67 72 unsigned int edge_single_offset; ··· 77 68 struct irq_ctl_ops ops; 78 69 }; 79 70 80 - #define INIT_MESON_COMMON(irqs, init, sel) \ 71 + #define INIT_MESON_COMMON(irqs, init, sel, type) \ 81 72 .nr_hwirq = irqs, \ 82 73 .ops = { \ 83 74 .gpio_irq_init = init, \ 84 75 .gpio_irq_sel_pin = sel, \ 76 + .gpio_irq_set_type = type, \ 85 77 }, 86 78 87 79 #define INIT_MESON8_COMMON_DATA(irqs) \ 88 80 INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \ 89 - meson8_gpio_irq_sel_pin) \ 81 + meson8_gpio_irq_sel_pin, \ 82 + meson8_gpio_irq_set_type) \ 90 83 .edge_single_offset = 0, \ 91 84 .pol_low_offset = 16, \ 92 85 .pin_sel_mask = 0xff, \ 86 + .nr_channels = 8, \ 93 87 94 88 #define INIT_MESON_A1_COMMON_DATA(irqs) \ 95 89 INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ 96 - meson_a1_gpio_irq_sel_pin) \ 90 + meson_a1_gpio_irq_sel_pin, \ 91 + meson8_gpio_irq_set_type) \ 97 92 .support_edge_both = true, \ 98 93 .edge_both_offset = 16, \ 99 94 .edge_single_offset = 8, \ 100 95 .pol_low_offset = 0, \ 101 96 .pin_sel_mask = 0x7f, \ 97 + .nr_channels = 8, \ 98 + 99 + #define INIT_MESON_S4_COMMON_DATA(irqs) \ 100 + INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ 101 + meson_a1_gpio_irq_sel_pin, \ 102 + meson_s4_gpio_irq_set_type) \ 103 + .support_edge_both = true, \ 104 + .edge_both_offset = 0, \ 105 + .edge_single_offset = 12, \ 106 + .pol_low_offset = 0, \ 107 + .pin_sel_mask = 0xff, \ 108 + .nr_channels = 12, \ 102 109 103 110 static const struct meson_gpio_irq_params meson8_params = { 104 111 INIT_MESON8_COMMON_DATA(134) ··· 146 121 INIT_MESON_A1_COMMON_DATA(62) 147 122 }; 148 123 124 + static const struct meson_gpio_irq_params s4_params = { 125 + INIT_MESON_S4_COMMON_DATA(82) 126 + }; 127 + 149 128 static const struct of_device_id meson_irq_gpio_matches[] = { 150 129 { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params }, 151 130 { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params }, ··· 159 130 { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params }, 160 131 { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params }, 161 132 { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params }, 133 + { .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params }, 162 134 { } 163 135 }; 164 136 165 137 struct meson_gpio_irq_controller { 166 138 const struct meson_gpio_irq_params *params; 167 139 void __iomem *base; 168 - u32 channel_irqs[NUM_CHANNEL]; 169 - DECLARE_BITMAP(channel_map, NUM_CHANNEL); 140 + u32 channel_irqs[MAX_NUM_CHANNEL]; 141 + DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL); 170 142 spinlock_t lock; 171 143 }; 172 144 ··· 237 207 spin_lock_irqsave(&ctl->lock, flags); 238 208 239 209 /* Find a free channel */ 240 - idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL); 241 - if (idx >= NUM_CHANNEL) { 210 + idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels); 211 + if (idx >= ctl->params->nr_channels) { 242 212 spin_unlock_irqrestore(&ctl->lock, flags); 243 213 pr_err("No channel available\n"); 244 214 return -ENOSPC; ··· 286 256 clear_bit(idx, ctl->channel_map); 287 257 } 288 258 289 - static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, 290 - unsigned int type, 291 - u32 *channel_hwirq) 259 + static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, 260 + unsigned int type, u32 *channel_hwirq) 292 261 { 293 262 u32 val = 0; 294 263 unsigned int idx; ··· 328 299 return 0; 329 300 } 330 301 302 + /* 303 + * gpio irq relative registers for s4 304 + * -PADCTRL_GPIO_IRQ_CTRL0 305 + * bit[31]: enable/disable all the irq lines 306 + * bit[12-23]: single edge trigger 307 + * bit[0-11]: polarity trigger 308 + * 309 + * -PADCTRL_GPIO_IRQ_CTRL[X] 310 + * bit[0-16]: 7 bits to choose gpio source for irq line 2*[X] - 2 311 + * bit[16-22]:7 bits to choose gpio source for irq line 2*[X] - 1 312 + * where X = 1-6 313 + * 314 + * -PADCTRL_GPIO_IRQ_CTRL[7] 315 + * bit[0-11]: both edge trigger 316 + */ 317 + static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl, 318 + unsigned int type, u32 *channel_hwirq) 319 + { 320 + u32 val = 0; 321 + unsigned int idx; 322 + 323 + idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); 324 + 325 + type &= IRQ_TYPE_SENSE_MASK; 326 + 327 + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0); 328 + 329 + if (type == IRQ_TYPE_EDGE_BOTH) { 330 + val |= BIT(ctl->params->edge_both_offset + idx); 331 + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, 332 + BIT(ctl->params->edge_both_offset + idx), val); 333 + return 0; 334 + } 335 + 336 + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) 337 + val |= BIT(ctl->params->pol_low_offset + idx); 338 + 339 + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) 340 + val |= BIT(ctl->params->edge_single_offset + idx); 341 + 342 + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, 343 + BIT(idx) | BIT(12 + idx), val); 344 + return 0; 345 + }; 346 + 331 347 static unsigned int meson_gpio_irq_type_output(unsigned int type) 332 348 { 333 349 unsigned int sense = type & IRQ_TYPE_SENSE_MASK; ··· 397 323 u32 *channel_hwirq = irq_data_get_irq_chip_data(data); 398 324 int ret; 399 325 400 - ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq); 326 + ret = ctl->params->ops.gpio_irq_set_type(ctl, type, channel_hwirq); 401 327 if (ret) 402 328 return ret; 403 329 ··· 524 450 ret = of_property_read_variable_u32_array(node, 525 451 "amlogic,channel-interrupts", 526 452 ctl->channel_irqs, 527 - NUM_CHANNEL, 528 - NUM_CHANNEL); 453 + ctl->params->nr_channels, 454 + ctl->params->nr_channels); 529 455 if (ret < 0) { 530 - pr_err("can't get %d channel interrupts\n", NUM_CHANNEL); 456 + pr_err("can't get %d channel interrupts\n", ctl->params->nr_channels); 531 457 return ret; 532 458 } 533 459 ··· 581 507 } 582 508 583 509 pr_info("%d to %d gpio interrupt mux initialized\n", 584 - ctl->params->nr_hwirq, NUM_CHANNEL); 510 + ctl->params->nr_hwirq, ctl->params->nr_channels); 585 511 586 512 return 0; 587 513
+18 -10
drivers/irqchip/irq-mvebu-pic.c
··· 18 18 #include <linux/module.h> 19 19 #include <linux/of_irq.h> 20 20 #include <linux/platform_device.h> 21 + #include <linux/seq_file.h> 21 22 22 23 #define PIC_CAUSE 0x0 23 24 #define PIC_MASK 0x4 ··· 30 29 void __iomem *base; 31 30 u32 parent_irq; 32 31 struct irq_domain *domain; 33 - struct irq_chip irq_chip; 32 + struct platform_device *pdev; 34 33 }; 35 34 36 35 static void mvebu_pic_reset(struct mvebu_pic *pic) ··· 67 66 writel(reg, pic->base + PIC_MASK); 68 67 } 69 68 69 + static void mvebu_pic_print_chip(struct irq_data *d, struct seq_file *p) 70 + { 71 + struct mvebu_pic *pic = irq_data_get_irq_chip_data(d); 72 + 73 + seq_printf(p, dev_name(&pic->pdev->dev)); 74 + } 75 + 76 + static const struct irq_chip mvebu_pic_chip = { 77 + .irq_mask = mvebu_pic_mask_irq, 78 + .irq_unmask = mvebu_pic_unmask_irq, 79 + .irq_eoi = mvebu_pic_eoi_irq, 80 + .irq_print_chip = mvebu_pic_print_chip, 81 + }; 82 + 70 83 static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq, 71 84 irq_hw_number_t hwirq) 72 85 { ··· 88 73 89 74 irq_set_percpu_devid(virq); 90 75 irq_set_chip_data(virq, pic); 91 - irq_set_chip_and_handler(virq, &pic->irq_chip, 92 - handle_percpu_devid_irq); 76 + irq_set_chip_and_handler(virq, &mvebu_pic_chip, handle_percpu_devid_irq); 93 77 irq_set_status_flags(virq, IRQ_LEVEL); 94 78 irq_set_probe(virq); 95 79 ··· 134 120 { 135 121 struct device_node *node = pdev->dev.of_node; 136 122 struct mvebu_pic *pic; 137 - struct irq_chip *irq_chip; 138 123 139 124 pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL); 140 125 if (!pic) 141 126 return -ENOMEM; 142 127 128 + pic->pdev = pdev; 143 129 pic->base = devm_platform_ioremap_resource(pdev, 0); 144 130 if (IS_ERR(pic->base)) 145 131 return PTR_ERR(pic->base); 146 - 147 - irq_chip = &pic->irq_chip; 148 - irq_chip->name = dev_name(&pdev->dev); 149 - irq_chip->irq_mask = mvebu_pic_mask_irq; 150 - irq_chip->irq_unmask = mvebu_pic_unmask_irq; 151 - irq_chip->irq_eoi = mvebu_pic_eoi_irq; 152 132 153 133 pic->parent_irq = irq_of_parse_and_map(node, 0); 154 134 if (pic->parent_irq <= 0) {
+2
drivers/irqchip/irq-nvic.c
··· 107 107 108 108 if (!nvic_irq_domain) { 109 109 pr_warn("Failed to allocate irq domain\n"); 110 + iounmap(nvic_base); 110 111 return -ENOMEM; 111 112 } 112 113 ··· 117 116 if (ret) { 118 117 pr_warn("Failed to allocate irq chips\n"); 119 118 irq_domain_remove(nvic_irq_domain); 119 + iounmap(nvic_base); 120 120 return ret; 121 121 } 122 122
+461
drivers/irqchip/irq-qcom-mpm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2021, Linaro Limited 4 + * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved. 5 + */ 6 + 7 + #include <linux/delay.h> 8 + #include <linux/err.h> 9 + #include <linux/init.h> 10 + #include <linux/interrupt.h> 11 + #include <linux/io.h> 12 + #include <linux/irqchip.h> 13 + #include <linux/irqdomain.h> 14 + #include <linux/mailbox_client.h> 15 + #include <linux/module.h> 16 + #include <linux/of.h> 17 + #include <linux/of_device.h> 18 + #include <linux/platform_device.h> 19 + #include <linux/pm_domain.h> 20 + #include <linux/slab.h> 21 + #include <linux/soc/qcom/irq.h> 22 + #include <linux/spinlock.h> 23 + 24 + /* 25 + * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller, 26 + * which is commonly found on Qualcomm SoCs built on the RPM architecture. 27 + * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is 28 + * asleep, and wakes up the AP when one of those interrupts occurs. This driver 29 + * doesn't directly access physical MPM registers though. Instead, the access 30 + * is bridged via a piece of internal memory (SRAM) that is accessible to both 31 + * AP and RPM. This piece of memory is called 'vMPM' in the driver. 32 + * 33 + * When SoC is awake, the vMPM is owned by AP and the register setup by this 34 + * driver all happens on vMPM. When AP is about to get power collapsed, the 35 + * driver sends a mailbox notification to RPM, which will take over the vMPM 36 + * ownership and dump vMPM into physical MPM registers. On wakeup, AP is woken 37 + * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM. 38 + * Then AP start owning vMPM again. 39 + * 40 + * vMPM register map: 41 + * 42 + * 31 0 43 + * +--------------------------------+ 44 + * | TIMER0 | 0x00 45 + * +--------------------------------+ 46 + * | TIMER1 | 0x04 47 + * +--------------------------------+ 48 + * | ENABLE0 | 0x08 49 + * +--------------------------------+ 50 + * | ... | ... 51 + * +--------------------------------+ 52 + * | ENABLEn | 53 + * +--------------------------------+ 54 + * | FALLING_EDGE0 | 55 + * +--------------------------------+ 56 + * | ... | 57 + * +--------------------------------+ 58 + * | STATUSn | 59 + * +--------------------------------+ 60 + * 61 + * n = DIV_ROUND_UP(pin_cnt, 32) 62 + * 63 + */ 64 + 65 + #define MPM_REG_ENABLE 0 66 + #define MPM_REG_FALLING_EDGE 1 67 + #define MPM_REG_RISING_EDGE 2 68 + #define MPM_REG_POLARITY 3 69 + #define MPM_REG_STATUS 4 70 + 71 + /* MPM pin map to GIC hwirq */ 72 + struct mpm_gic_map { 73 + int pin; 74 + irq_hw_number_t hwirq; 75 + }; 76 + 77 + struct qcom_mpm_priv { 78 + void __iomem *base; 79 + raw_spinlock_t lock; 80 + struct mbox_client mbox_client; 81 + struct mbox_chan *mbox_chan; 82 + struct mpm_gic_map *maps; 83 + unsigned int map_cnt; 84 + unsigned int reg_stride; 85 + struct irq_domain *domain; 86 + struct generic_pm_domain genpd; 87 + }; 88 + 89 + static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg, 90 + unsigned int index) 91 + { 92 + unsigned int offset = (reg * priv->reg_stride + index + 2) * 4; 93 + 94 + return readl_relaxed(priv->base + offset); 95 + } 96 + 97 + static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg, 98 + unsigned int index, u32 val) 99 + { 100 + unsigned int offset = (reg * priv->reg_stride + index + 2) * 4; 101 + 102 + writel_relaxed(val, priv->base + offset); 103 + 104 + /* Ensure the write is completed */ 105 + wmb(); 106 + } 107 + 108 + static void qcom_mpm_enable_irq(struct irq_data *d, bool en) 109 + { 110 + struct qcom_mpm_priv *priv = d->chip_data; 111 + int pin = d->hwirq; 112 + unsigned int index = pin / 32; 113 + unsigned int shift = pin % 32; 114 + unsigned long flags, val; 115 + 116 + raw_spin_lock_irqsave(&priv->lock, flags); 117 + 118 + val = qcom_mpm_read(priv, MPM_REG_ENABLE, index); 119 + __assign_bit(shift, &val, en); 120 + qcom_mpm_write(priv, MPM_REG_ENABLE, index, val); 121 + 122 + raw_spin_unlock_irqrestore(&priv->lock, flags); 123 + } 124 + 125 + static void qcom_mpm_mask(struct irq_data *d) 126 + { 127 + qcom_mpm_enable_irq(d, false); 128 + 129 + if (d->parent_data) 130 + irq_chip_mask_parent(d); 131 + } 132 + 133 + static void qcom_mpm_unmask(struct irq_data *d) 134 + { 135 + qcom_mpm_enable_irq(d, true); 136 + 137 + if (d->parent_data) 138 + irq_chip_unmask_parent(d); 139 + } 140 + 141 + static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg, 142 + unsigned int index, unsigned int shift) 143 + { 144 + unsigned long flags, val; 145 + 146 + raw_spin_lock_irqsave(&priv->lock, flags); 147 + 148 + val = qcom_mpm_read(priv, reg, index); 149 + __assign_bit(shift, &val, set); 150 + qcom_mpm_write(priv, reg, index, val); 151 + 152 + raw_spin_unlock_irqrestore(&priv->lock, flags); 153 + } 154 + 155 + static int qcom_mpm_set_type(struct irq_data *d, unsigned int type) 156 + { 157 + struct qcom_mpm_priv *priv = d->chip_data; 158 + int pin = d->hwirq; 159 + unsigned int index = pin / 32; 160 + unsigned int shift = pin % 32; 161 + 162 + if (type & IRQ_TYPE_EDGE_RISING) 163 + mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift); 164 + else 165 + mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift); 166 + 167 + if (type & IRQ_TYPE_EDGE_FALLING) 168 + mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift); 169 + else 170 + mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift); 171 + 172 + if (type & IRQ_TYPE_LEVEL_HIGH) 173 + mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift); 174 + else 175 + mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift); 176 + 177 + if (!d->parent_data) 178 + return 0; 179 + 180 + if (type & IRQ_TYPE_EDGE_BOTH) 181 + type = IRQ_TYPE_EDGE_RISING; 182 + 183 + if (type & IRQ_TYPE_LEVEL_MASK) 184 + type = IRQ_TYPE_LEVEL_HIGH; 185 + 186 + return irq_chip_set_type_parent(d, type); 187 + } 188 + 189 + static struct irq_chip qcom_mpm_chip = { 190 + .name = "mpm", 191 + .irq_eoi = irq_chip_eoi_parent, 192 + .irq_mask = qcom_mpm_mask, 193 + .irq_unmask = qcom_mpm_unmask, 194 + .irq_retrigger = irq_chip_retrigger_hierarchy, 195 + .irq_set_type = qcom_mpm_set_type, 196 + .irq_set_affinity = irq_chip_set_affinity_parent, 197 + .flags = IRQCHIP_MASK_ON_SUSPEND | 198 + IRQCHIP_SKIP_SET_WAKE, 199 + }; 200 + 201 + static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin) 202 + { 203 + struct mpm_gic_map *maps = priv->maps; 204 + int i; 205 + 206 + for (i = 0; i < priv->map_cnt; i++) { 207 + if (maps[i].pin == pin) 208 + return &maps[i]; 209 + } 210 + 211 + return NULL; 212 + } 213 + 214 + static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq, 215 + unsigned int nr_irqs, void *data) 216 + { 217 + struct qcom_mpm_priv *priv = domain->host_data; 218 + struct irq_fwspec *fwspec = data; 219 + struct irq_fwspec parent_fwspec; 220 + struct mpm_gic_map *map; 221 + irq_hw_number_t pin; 222 + unsigned int type; 223 + int ret; 224 + 225 + ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type); 226 + if (ret) 227 + return ret; 228 + 229 + ret = irq_domain_set_hwirq_and_chip(domain, virq, pin, 230 + &qcom_mpm_chip, priv); 231 + if (ret) 232 + return ret; 233 + 234 + map = get_mpm_gic_map(priv, pin); 235 + if (map == NULL) 236 + return irq_domain_disconnect_hierarchy(domain->parent, virq); 237 + 238 + if (type & IRQ_TYPE_EDGE_BOTH) 239 + type = IRQ_TYPE_EDGE_RISING; 240 + 241 + if (type & IRQ_TYPE_LEVEL_MASK) 242 + type = IRQ_TYPE_LEVEL_HIGH; 243 + 244 + parent_fwspec.fwnode = domain->parent->fwnode; 245 + parent_fwspec.param_count = 3; 246 + parent_fwspec.param[0] = 0; 247 + parent_fwspec.param[1] = map->hwirq; 248 + parent_fwspec.param[2] = type; 249 + 250 + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, 251 + &parent_fwspec); 252 + } 253 + 254 + static const struct irq_domain_ops qcom_mpm_ops = { 255 + .alloc = qcom_mpm_alloc, 256 + .free = irq_domain_free_irqs_common, 257 + .translate = irq_domain_translate_twocell, 258 + }; 259 + 260 + /* Triggered by RPM when system resumes from deep sleep */ 261 + static irqreturn_t qcom_mpm_handler(int irq, void *dev_id) 262 + { 263 + struct qcom_mpm_priv *priv = dev_id; 264 + unsigned long enable, pending; 265 + irqreturn_t ret = IRQ_NONE; 266 + unsigned long flags; 267 + int i, j; 268 + 269 + for (i = 0; i < priv->reg_stride; i++) { 270 + raw_spin_lock_irqsave(&priv->lock, flags); 271 + enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i); 272 + pending = qcom_mpm_read(priv, MPM_REG_STATUS, i); 273 + pending &= enable; 274 + raw_spin_unlock_irqrestore(&priv->lock, flags); 275 + 276 + for_each_set_bit(j, &pending, 32) { 277 + unsigned int pin = 32 * i + j; 278 + struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin); 279 + struct irq_data *d = &desc->irq_data; 280 + 281 + if (!irqd_is_level_type(d)) 282 + irq_set_irqchip_state(d->irq, 283 + IRQCHIP_STATE_PENDING, true); 284 + ret = IRQ_HANDLED; 285 + } 286 + } 287 + 288 + return ret; 289 + } 290 + 291 + static int mpm_pd_power_off(struct generic_pm_domain *genpd) 292 + { 293 + struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv, 294 + genpd); 295 + int i, ret; 296 + 297 + for (i = 0; i < priv->reg_stride; i++) 298 + qcom_mpm_write(priv, MPM_REG_STATUS, i, 0); 299 + 300 + /* Notify RPM to write vMPM into HW */ 301 + ret = mbox_send_message(priv->mbox_chan, NULL); 302 + if (ret < 0) 303 + return ret; 304 + 305 + return 0; 306 + } 307 + 308 + static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq) 309 + { 310 + int i; 311 + 312 + for (i = 0; i < cnt; i++) 313 + if (maps[i].hwirq == hwirq) 314 + return true; 315 + 316 + return false; 317 + } 318 + 319 + static int qcom_mpm_init(struct device_node *np, struct device_node *parent) 320 + { 321 + struct platform_device *pdev = of_find_device_by_node(np); 322 + struct device *dev = &pdev->dev; 323 + struct irq_domain *parent_domain; 324 + struct generic_pm_domain *genpd; 325 + struct qcom_mpm_priv *priv; 326 + unsigned int pin_cnt; 327 + int i, irq; 328 + int ret; 329 + 330 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 331 + if (!priv) 332 + return -ENOMEM; 333 + 334 + ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt); 335 + if (ret) { 336 + dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret); 337 + return ret; 338 + } 339 + 340 + priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32); 341 + 342 + ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map"); 343 + if (ret < 0) { 344 + dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret); 345 + return ret; 346 + } 347 + 348 + if (ret % 2) { 349 + dev_err(dev, "invalid qcom,mpm-pin-map\n"); 350 + return -EINVAL; 351 + } 352 + 353 + priv->map_cnt = ret / 2; 354 + priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps), 355 + GFP_KERNEL); 356 + if (!priv->maps) 357 + return -ENOMEM; 358 + 359 + for (i = 0; i < priv->map_cnt; i++) { 360 + u32 pin, hwirq; 361 + 362 + of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin); 363 + of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq); 364 + 365 + if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) { 366 + dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n", 367 + pin, hwirq); 368 + continue; 369 + } 370 + 371 + priv->maps[i].pin = pin; 372 + priv->maps[i].hwirq = hwirq; 373 + } 374 + 375 + raw_spin_lock_init(&priv->lock); 376 + 377 + priv->base = devm_platform_ioremap_resource(pdev, 0); 378 + if (!priv->base) 379 + return PTR_ERR(priv->base); 380 + 381 + for (i = 0; i < priv->reg_stride; i++) { 382 + qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0); 383 + qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0); 384 + qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0); 385 + qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0); 386 + qcom_mpm_write(priv, MPM_REG_STATUS, i, 0); 387 + } 388 + 389 + irq = platform_get_irq(pdev, 0); 390 + if (irq < 0) 391 + return irq; 392 + 393 + genpd = &priv->genpd; 394 + genpd->flags = GENPD_FLAG_IRQ_SAFE; 395 + genpd->power_off = mpm_pd_power_off; 396 + 397 + genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev)); 398 + if (!genpd->name) 399 + return -ENOMEM; 400 + 401 + ret = pm_genpd_init(genpd, NULL, false); 402 + if (ret) { 403 + dev_err(dev, "failed to init genpd: %d\n", ret); 404 + return ret; 405 + } 406 + 407 + ret = of_genpd_add_provider_simple(np, genpd); 408 + if (ret) { 409 + dev_err(dev, "failed to add genpd provider: %d\n", ret); 410 + goto remove_genpd; 411 + } 412 + 413 + priv->mbox_client.dev = dev; 414 + priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0); 415 + if (IS_ERR(priv->mbox_chan)) { 416 + ret = PTR_ERR(priv->mbox_chan); 417 + dev_err(dev, "failed to acquire IPC channel: %d\n", ret); 418 + return ret; 419 + } 420 + 421 + parent_domain = irq_find_host(parent); 422 + if (!parent_domain) { 423 + dev_err(dev, "failed to find MPM parent domain\n"); 424 + ret = -ENXIO; 425 + goto free_mbox; 426 + } 427 + 428 + priv->domain = irq_domain_create_hierarchy(parent_domain, 429 + IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt, 430 + of_node_to_fwnode(np), &qcom_mpm_ops, priv); 431 + if (!priv->domain) { 432 + dev_err(dev, "failed to create MPM domain\n"); 433 + ret = -ENOMEM; 434 + goto free_mbox; 435 + } 436 + 437 + irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP); 438 + 439 + ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND, 440 + "qcom_mpm", priv); 441 + if (ret) { 442 + dev_err(dev, "failed to request irq: %d\n", ret); 443 + goto remove_domain; 444 + } 445 + 446 + return 0; 447 + 448 + remove_domain: 449 + irq_domain_remove(priv->domain); 450 + free_mbox: 451 + mbox_free_channel(priv->mbox_chan); 452 + remove_genpd: 453 + pm_genpd_remove(genpd); 454 + return ret; 455 + } 456 + 457 + IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm) 458 + IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init) 459 + IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm) 460 + MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager"); 461 + MODULE_LICENSE("GPL v2");
+2 -1
drivers/irqchip/irq-renesas-intc-irqpin.c
··· 508 508 509 509 irq_chip = &p->irq_chip; 510 510 irq_chip->name = "intc-irqpin"; 511 - irq_chip->parent_device = dev; 512 511 irq_chip->irq_mask = disable_fn; 513 512 irq_chip->irq_unmask = enable_fn; 514 513 irq_chip->irq_set_type = intc_irqpin_irq_set_type; ··· 521 522 dev_err(dev, "cannot initialize irq domain\n"); 522 523 goto err0; 523 524 } 525 + 526 + irq_domain_set_pm_device(p->irq_domain, dev); 524 527 525 528 if (p->shared_irqs) { 526 529 /* request one shared interrupt */
+2 -1
drivers/irqchip/irq-renesas-irqc.c
··· 188 188 p->gc->reg_base = p->cpu_int_base; 189 189 p->gc->chip_types[0].regs.enable = IRQC_EN_SET; 190 190 p->gc->chip_types[0].regs.disable = IRQC_EN_STS; 191 - p->gc->chip_types[0].chip.parent_device = dev; 192 191 p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; 193 192 p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; 194 193 p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type; 195 194 p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake; 196 195 p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND; 196 + 197 + irq_domain_set_pm_device(p->irq_domain, dev); 197 198 198 199 /* request interrupts one by one */ 199 200 for (k = 0; k < p->number_of_irqs; k++) {
+26 -12
drivers/irqchip/irq-sifive-plic.c
··· 44 44 * Each hart context has a vector of interrupt enable bits associated with it. 45 45 * There's one bit for each interrupt source. 46 46 */ 47 - #define ENABLE_BASE 0x2000 48 - #define ENABLE_PER_HART 0x80 47 + #define CONTEXT_ENABLE_BASE 0x2000 48 + #define CONTEXT_ENABLE_SIZE 0x80 49 49 50 50 /* 51 51 * Each hart context has a set of control registers associated with it. Right ··· 53 53 * take an interrupt, and a register to claim interrupts. 54 54 */ 55 55 #define CONTEXT_BASE 0x200000 56 - #define CONTEXT_PER_HART 0x1000 56 + #define CONTEXT_SIZE 0x1000 57 57 #define CONTEXT_THRESHOLD 0x00 58 58 #define CONTEXT_CLAIM 0x04 59 59 ··· 81 81 static bool plic_cpuhp_setup_done __ro_after_init; 82 82 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 83 83 84 - static inline void plic_toggle(struct plic_handler *handler, 85 - int hwirq, int enable) 84 + static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) 86 85 { 87 - u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32); 86 + u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32); 88 87 u32 hwirq_mask = 1 << (hwirq % 32); 89 88 90 - raw_spin_lock(&handler->enable_lock); 91 89 if (enable) 92 90 writel(readl(reg) | hwirq_mask, reg); 93 91 else 94 92 writel(readl(reg) & ~hwirq_mask, reg); 93 + } 94 + 95 + static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) 96 + { 97 + raw_spin_lock(&handler->enable_lock); 98 + __plic_toggle(handler->enable_base, hwirq, enable); 95 99 raw_spin_unlock(&handler->enable_lock); 96 100 } 97 101 ··· 328 324 * Skip contexts other than external interrupts for our 329 325 * privilege level. 330 326 */ 331 - if (parent.args[0] != RV_IRQ_EXT) 327 + if (parent.args[0] != RV_IRQ_EXT) { 328 + /* Disable S-mode enable bits if running in M-mode. */ 329 + if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { 330 + void __iomem *enable_base = priv->regs + 331 + CONTEXT_ENABLE_BASE + 332 + i * CONTEXT_ENABLE_SIZE; 333 + 334 + for (hwirq = 1; hwirq <= nr_irqs; hwirq++) 335 + __plic_toggle(enable_base, hwirq, 0); 336 + } 332 337 continue; 338 + } 333 339 334 340 hartid = riscv_of_parent_hartid(parent.np); 335 341 if (hartid < 0) { ··· 375 361 376 362 cpumask_set_cpu(cpu, &priv->lmask); 377 363 handler->present = true; 378 - handler->hart_base = 379 - priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART; 364 + handler->hart_base = priv->regs + CONTEXT_BASE + 365 + i * CONTEXT_SIZE; 380 366 raw_spin_lock_init(&handler->enable_lock); 381 - handler->enable_base = 382 - priv->regs + ENABLE_BASE + i * ENABLE_PER_HART; 367 + handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + 368 + i * CONTEXT_ENABLE_SIZE; 383 369 handler->priv = priv; 384 370 done: 385 371 for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+50
drivers/irqchip/irq-stm32-exti.c
··· 214 214 { .exti = 73, .irq_parent = 129, .chip = &stm32_exti_h_chip }, 215 215 }; 216 216 217 + static const struct stm32_desc_irq stm32mp13_desc_irq[] = { 218 + { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip }, 219 + { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip }, 220 + { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip }, 221 + { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip }, 222 + { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip }, 223 + { .exti = 5, .irq_parent = 24, .chip = &stm32_exti_h_chip }, 224 + { .exti = 6, .irq_parent = 65, .chip = &stm32_exti_h_chip }, 225 + { .exti = 7, .irq_parent = 66, .chip = &stm32_exti_h_chip }, 226 + { .exti = 8, .irq_parent = 67, .chip = &stm32_exti_h_chip }, 227 + { .exti = 9, .irq_parent = 68, .chip = &stm32_exti_h_chip }, 228 + { .exti = 10, .irq_parent = 41, .chip = &stm32_exti_h_chip }, 229 + { .exti = 11, .irq_parent = 43, .chip = &stm32_exti_h_chip }, 230 + { .exti = 12, .irq_parent = 77, .chip = &stm32_exti_h_chip }, 231 + { .exti = 13, .irq_parent = 78, .chip = &stm32_exti_h_chip }, 232 + { .exti = 14, .irq_parent = 106, .chip = &stm32_exti_h_chip }, 233 + { .exti = 15, .irq_parent = 109, .chip = &stm32_exti_h_chip }, 234 + { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip }, 235 + { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct }, 236 + { .exti = 21, .irq_parent = 32, .chip = &stm32_exti_h_chip_direct }, 237 + { .exti = 22, .irq_parent = 34, .chip = &stm32_exti_h_chip_direct }, 238 + { .exti = 23, .irq_parent = 73, .chip = &stm32_exti_h_chip_direct }, 239 + { .exti = 24, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct }, 240 + { .exti = 25, .irq_parent = 114, .chip = &stm32_exti_h_chip_direct }, 241 + { .exti = 26, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct }, 242 + { .exti = 27, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct }, 243 + { .exti = 28, .irq_parent = 40, .chip = &stm32_exti_h_chip_direct }, 244 + { .exti = 29, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct }, 245 + { .exti = 30, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct }, 246 + { .exti = 31, .irq_parent = 54, .chip = &stm32_exti_h_chip_direct }, 247 + { .exti = 32, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct }, 248 + { .exti = 33, .irq_parent = 84, .chip = &stm32_exti_h_chip_direct }, 249 + { .exti = 44, .irq_parent = 96, .chip = &stm32_exti_h_chip_direct }, 250 + { .exti = 47, .irq_parent = 92, .chip = &stm32_exti_h_chip_direct }, 251 + { .exti = 48, .irq_parent = 116, .chip = &stm32_exti_h_chip_direct }, 252 + { .exti = 50, .irq_parent = 117, .chip = &stm32_exti_h_chip_direct }, 253 + { .exti = 52, .irq_parent = 118, .chip = &stm32_exti_h_chip_direct }, 254 + { .exti = 53, .irq_parent = 119, .chip = &stm32_exti_h_chip_direct }, 255 + { .exti = 68, .irq_parent = 63, .chip = &stm32_exti_h_chip_direct }, 256 + { .exti = 70, .irq_parent = 98, .chip = &stm32_exti_h_chip_direct }, 257 + }; 258 + 217 259 static const struct stm32_exti_drv_data stm32mp1_drv_data = { 218 260 .exti_banks = stm32mp1_exti_banks, 219 261 .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), 220 262 .desc_irqs = stm32mp1_desc_irq, 221 263 .irq_nr = ARRAY_SIZE(stm32mp1_desc_irq), 264 + }; 265 + 266 + static const struct stm32_exti_drv_data stm32mp13_drv_data = { 267 + .exti_banks = stm32mp1_exti_banks, 268 + .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks), 269 + .desc_irqs = stm32mp13_desc_irq, 270 + .irq_nr = ARRAY_SIZE(stm32mp13_desc_irq), 222 271 }; 223 272 224 273 static const struct ··· 971 922 /* platform driver only for MP1 */ 972 923 static const struct of_device_id stm32_exti_ids[] = { 973 924 { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data}, 925 + { .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data}, 974 926 {}, 975 927 }; 976 928 MODULE_DEVICE_TABLE(of, stm32_exti_ids);
+17 -8
drivers/irqchip/irq-ts4800.c
··· 19 19 #include <linux/of_address.h> 20 20 #include <linux/of_irq.h> 21 21 #include <linux/platform_device.h> 22 + #include <linux/seq_file.h> 22 23 23 24 #define IRQ_MASK 0x4 24 25 #define IRQ_STATUS 0x8 25 26 26 27 struct ts4800_irq_data { 27 28 void __iomem *base; 29 + struct platform_device *pdev; 28 30 struct irq_domain *domain; 29 - struct irq_chip irq_chip; 30 31 }; 31 32 32 33 static void ts4800_irq_mask(struct irq_data *d) ··· 48 47 writew(reg & ~mask, data->base + IRQ_MASK); 49 48 } 50 49 50 + static void ts4800_irq_print_chip(struct irq_data *d, struct seq_file *p) 51 + { 52 + struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d); 53 + 54 + seq_printf(p, "%s", dev_name(&data->pdev->dev)); 55 + } 56 + 57 + static const struct irq_chip ts4800_chip = { 58 + .irq_mask = ts4800_irq_mask, 59 + .irq_unmask = ts4800_irq_unmask, 60 + .irq_print_chip = ts4800_irq_print_chip, 61 + }; 62 + 51 63 static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq, 52 64 irq_hw_number_t hwirq) 53 65 { 54 66 struct ts4800_irq_data *data = d->host_data; 55 67 56 - irq_set_chip_and_handler(irq, &data->irq_chip, handle_simple_irq); 68 + irq_set_chip_and_handler(irq, &ts4800_chip, handle_simple_irq); 57 69 irq_set_chip_data(irq, data); 58 70 irq_set_noprobe(irq); 59 71 ··· 106 92 { 107 93 struct device_node *node = pdev->dev.of_node; 108 94 struct ts4800_irq_data *data; 109 - struct irq_chip *irq_chip; 110 95 int parent_irq; 111 96 112 97 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 113 98 if (!data) 114 99 return -ENOMEM; 115 100 101 + data->pdev = pdev; 116 102 data->base = devm_platform_ioremap_resource(pdev, 0); 117 103 if (IS_ERR(data->base)) 118 104 return PTR_ERR(data->base); ··· 124 110 dev_err(&pdev->dev, "failed to get parent IRQ\n"); 125 111 return -EINVAL; 126 112 } 127 - 128 - irq_chip = &data->irq_chip; 129 - irq_chip->name = dev_name(&pdev->dev); 130 - irq_chip->irq_mask = ts4800_irq_mask; 131 - irq_chip->irq_unmask = ts4800_irq_unmask; 132 113 133 114 data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data); 134 115 if (!data->domain) {
+26 -20
drivers/irqchip/irq-versatile-fpga.c
··· 7 7 #include <linux/io.h> 8 8 #include <linux/irqchip.h> 9 9 #include <linux/irqchip/chained_irq.h> 10 - #include <linux/irqchip/versatile-fpga.h> 11 10 #include <linux/irqdomain.h> 12 11 #include <linux/module.h> 13 12 #include <linux/of.h> 14 13 #include <linux/of_address.h> 15 14 #include <linux/of_irq.h> 15 + #include <linux/seq_file.h> 16 16 17 17 #include <asm/exception.h> 18 18 #include <asm/mach/irq.h> ··· 34 34 /** 35 35 * struct fpga_irq_data - irq data container for the FPGA IRQ controller 36 36 * @base: memory offset in virtual memory 37 - * @chip: chip container for this instance 38 37 * @domain: IRQ domain for this instance 39 38 * @valid: mask for valid IRQs on this controller 40 39 * @used_irqs: number of active IRQs on this controller 41 40 */ 42 41 struct fpga_irq_data { 43 42 void __iomem *base; 44 - struct irq_chip chip; 45 43 u32 valid; 46 44 struct irq_domain *domain; 47 45 u8 used_irqs; ··· 64 66 65 67 writel(mask, f->base + IRQ_ENABLE_SET); 66 68 } 69 + 70 + static void fpga_irq_print_chip(struct irq_data *d, struct seq_file *p) 71 + { 72 + struct fpga_irq_data *f = irq_data_get_irq_chip_data(d); 73 + 74 + seq_printf(p, irq_domain_get_of_node(f->domain)->name); 75 + } 76 + 77 + static const struct irq_chip fpga_chip = { 78 + .irq_ack = fpga_irq_mask, 79 + .irq_mask = fpga_irq_mask, 80 + .irq_unmask = fpga_irq_unmask, 81 + .irq_print_chip = fpga_irq_print_chip, 82 + }; 67 83 68 84 static void fpga_irq_handle(struct irq_desc *desc) 69 85 { ··· 128 116 * Keep iterating over all registered FPGA IRQ controllers until there are 129 117 * no pending interrupts. 130 118 */ 131 - asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs) 119 + static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs) 132 120 { 133 121 int i, handled; 134 122 ··· 147 135 if (!(f->valid & BIT(hwirq))) 148 136 return -EPERM; 149 137 irq_set_chip_data(irq, f); 150 - irq_set_chip_and_handler(irq, &f->chip, 151 - handle_level_irq); 138 + irq_set_chip_and_handler(irq, &fpga_chip, handle_level_irq); 152 139 irq_set_probe(irq); 153 140 return 0; 154 141 } ··· 157 146 .xlate = irq_domain_xlate_onetwocell, 158 147 }; 159 148 160 - void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, 161 - int parent_irq, u32 valid, struct device_node *node) 149 + static void __init fpga_irq_init(void __iomem *base, int parent_irq, 150 + u32 valid, struct device_node *node) 162 151 { 163 152 struct fpga_irq_data *f; 164 153 int i; ··· 169 158 } 170 159 f = &fpga_irq_devices[fpga_irq_id]; 171 160 f->base = base; 172 - f->chip.name = name; 173 - f->chip.irq_ack = fpga_irq_mask; 174 - f->chip.irq_mask = fpga_irq_mask; 175 - f->chip.irq_unmask = fpga_irq_unmask; 176 161 f->valid = valid; 177 162 178 163 if (parent_irq != -1) { ··· 176 169 f); 177 170 } 178 171 179 - /* This will also allocate irq descriptors */ 180 - f->domain = irq_domain_add_simple(node, fls(valid), irq_start, 172 + f->domain = irq_domain_add_linear(node, fls(valid), 181 173 &fpga_irqdomain_ops, f); 182 174 183 175 /* This will allocate all valid descriptors in the linear case */ 184 176 for (i = 0; i < fls(valid); i++) 185 177 if (valid & BIT(i)) { 186 - if (!irq_start) 187 - irq_create_mapping(f->domain, i); 178 + /* Is this still required? */ 179 + irq_create_mapping(f->domain, i); 188 180 f->used_irqs++; 189 181 } 190 182 191 183 pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs", 192 - fpga_irq_id, name, base, f->used_irqs); 184 + fpga_irq_id, node->name, base, f->used_irqs); 193 185 if (parent_irq != -1) 194 186 pr_cont(", parent IRQ: %d\n", parent_irq); 195 187 else ··· 198 192 } 199 193 200 194 #ifdef CONFIG_OF 201 - int __init fpga_irq_of_init(struct device_node *node, 202 - struct device_node *parent) 195 + static int __init fpga_irq_of_init(struct device_node *node, 196 + struct device_node *parent) 203 197 { 204 198 void __iomem *base; 205 199 u32 clear_mask; ··· 228 222 parent_irq = -1; 229 223 } 230 224 231 - fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); 225 + fpga_irq_init(base, parent_irq, valid_mask, node); 232 226 233 227 /* 234 228 * On Versatile AB/PB, some secondary interrupts have a direct
+16 -14
drivers/irqchip/irq-xilinx-intc.c
··· 32 32 #define MER_ME (1<<0) 33 33 #define MER_HIE (1<<1) 34 34 35 + #define SPURIOUS_IRQ (-1U) 36 + 35 37 static DEFINE_STATIC_KEY_FALSE(xintc_is_be); 36 38 37 39 struct xintc_irq_chip { ··· 112 110 .irq_mask_ack = intc_mask_ack, 113 111 }; 114 112 115 - unsigned int xintc_get_irq(void) 116 - { 117 - unsigned int irq = -1; 118 - u32 hwirq; 119 - 120 - hwirq = xintc_read(primary_intc, IVR); 121 - if (hwirq != -1U) 122 - irq = irq_find_mapping(primary_intc->root_domain, hwirq); 123 - 124 - pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq); 125 - 126 - return irq; 127 - } 128 - 129 113 static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) 130 114 { 131 115 struct xintc_irq_chip *irqc = d->host_data; ··· 150 162 generic_handle_domain_irq(irqc->root_domain, hwirq); 151 163 } while (true); 152 164 chained_irq_exit(chip, desc); 165 + } 166 + 167 + static void xil_intc_handle_irq(struct pt_regs *regs) 168 + { 169 + u32 hwirq; 170 + 171 + do { 172 + hwirq = xintc_read(primary_intc, IVR); 173 + if (unlikely(hwirq == SPURIOUS_IRQ)) 174 + break; 175 + 176 + generic_handle_domain_irq(primary_intc->root_domain, hwirq); 177 + } while (true); 153 178 } 154 179 155 180 static int __init xilinx_intc_of_init(struct device_node *intc, ··· 234 233 } else { 235 234 primary_intc = irqc; 236 235 irq_set_default_host(primary_intc->root_domain); 236 + set_handle_irq(xil_intc_handle_irq); 237 237 } 238 238 239 239 return 0;
+29 -110
drivers/irqchip/qcom-pdc.c
··· 21 21 #include <linux/slab.h> 22 22 #include <linux/types.h> 23 23 24 - #define PDC_MAX_IRQS 168 25 24 #define PDC_MAX_GPIO_IRQS 256 26 - 27 - #define CLEAR_INTR(reg, intr) (reg & ~(1 << intr)) 28 - #define ENABLE_INTR(reg, intr) (reg | (1 << intr)) 29 25 30 26 #define IRQ_ENABLE_BANK 0x10 31 27 #define IRQ_i_CFG 0x110 32 - 33 - #define PDC_NO_PARENT_IRQ ~0UL 34 28 35 29 struct pdc_pin_region { 36 30 u32 pin_base; 37 31 u32 parent_base; 38 32 u32 cnt; 39 33 }; 34 + 35 + #define pin_to_hwirq(r, p) ((r)->parent_base + (p) - (r)->pin_base) 40 36 41 37 static DEFINE_RAW_SPINLOCK(pdc_lock); 42 38 static void __iomem *pdc_base; ··· 52 56 static void pdc_enable_intr(struct irq_data *d, bool on) 53 57 { 54 58 int pin_out = d->hwirq; 59 + unsigned long enable; 60 + unsigned long flags; 55 61 u32 index, mask; 56 - u32 enable; 57 62 58 63 index = pin_out / 32; 59 64 mask = pin_out % 32; 60 65 61 - raw_spin_lock(&pdc_lock); 66 + raw_spin_lock_irqsave(&pdc_lock, flags); 62 67 enable = pdc_reg_read(IRQ_ENABLE_BANK, index); 63 - enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask); 68 + __assign_bit(mask, &enable, on); 64 69 pdc_reg_write(IRQ_ENABLE_BANK, index, enable); 65 - raw_spin_unlock(&pdc_lock); 70 + raw_spin_unlock_irqrestore(&pdc_lock, flags); 66 71 } 67 72 68 73 static void qcom_pdc_gic_disable(struct irq_data *d) ··· 183 186 .irq_set_affinity = irq_chip_set_affinity_parent, 184 187 }; 185 188 186 - static irq_hw_number_t get_parent_hwirq(int pin) 189 + static struct pdc_pin_region *get_pin_region(int pin) 187 190 { 188 191 int i; 189 - struct pdc_pin_region *region; 190 192 191 193 for (i = 0; i < pdc_region_cnt; i++) { 192 - region = &pdc_region[i]; 193 - if (pin >= region->pin_base && 194 - pin < region->pin_base + region->cnt) 195 - return (region->parent_base + pin - region->pin_base); 194 + if (pin >= pdc_region[i].pin_base && 195 + pin < pdc_region[i].pin_base + pdc_region[i].cnt) 196 + return &pdc_region[i]; 196 197 } 197 198 198 - return PDC_NO_PARENT_IRQ; 199 - } 200 - 201 - static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec, 202 - unsigned long *hwirq, unsigned int *type) 203 - { 204 - if (is_of_node(fwspec->fwnode)) { 205 - if (fwspec->param_count != 2) 206 - return -EINVAL; 207 - 208 - *hwirq = fwspec->param[0]; 209 - *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; 210 - return 0; 211 - } 212 - 213 - return -EINVAL; 199 + return NULL; 214 200 } 215 201 216 202 static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq, ··· 201 221 { 202 222 struct irq_fwspec *fwspec = data; 203 223 struct irq_fwspec parent_fwspec; 204 - irq_hw_number_t hwirq, parent_hwirq; 224 + struct pdc_pin_region *region; 225 + irq_hw_number_t hwirq; 205 226 unsigned int type; 206 227 int ret; 207 228 208 - ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type); 209 - if (ret) 210 - return ret; 211 - 212 - ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, 213 - &qcom_pdc_gic_chip, NULL); 214 - if (ret) 215 - return ret; 216 - 217 - parent_hwirq = get_parent_hwirq(hwirq); 218 - if (parent_hwirq == PDC_NO_PARENT_IRQ) 219 - return irq_domain_disconnect_hierarchy(domain->parent, virq); 220 - 221 - if (type & IRQ_TYPE_EDGE_BOTH) 222 - type = IRQ_TYPE_EDGE_RISING; 223 - 224 - if (type & IRQ_TYPE_LEVEL_MASK) 225 - type = IRQ_TYPE_LEVEL_HIGH; 226 - 227 - parent_fwspec.fwnode = domain->parent->fwnode; 228 - parent_fwspec.param_count = 3; 229 - parent_fwspec.param[0] = 0; 230 - parent_fwspec.param[1] = parent_hwirq; 231 - parent_fwspec.param[2] = type; 232 - 233 - return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, 234 - &parent_fwspec); 235 - } 236 - 237 - static const struct irq_domain_ops qcom_pdc_ops = { 238 - .translate = qcom_pdc_translate, 239 - .alloc = qcom_pdc_alloc, 240 - .free = irq_domain_free_irqs_common, 241 - }; 242 - 243 - static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq, 244 - unsigned int nr_irqs, void *data) 245 - { 246 - struct irq_fwspec *fwspec = data; 247 - struct irq_fwspec parent_fwspec; 248 - irq_hw_number_t hwirq, parent_hwirq; 249 - unsigned int type; 250 - int ret; 251 - 252 - ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type); 229 + ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type); 253 230 if (ret) 254 231 return ret; 255 232 ··· 218 281 if (ret) 219 282 return ret; 220 283 221 - parent_hwirq = get_parent_hwirq(hwirq); 222 - if (parent_hwirq == PDC_NO_PARENT_IRQ) 284 + region = get_pin_region(hwirq); 285 + if (!region) 223 286 return irq_domain_disconnect_hierarchy(domain->parent, virq); 224 287 225 288 if (type & IRQ_TYPE_EDGE_BOTH) ··· 231 294 parent_fwspec.fwnode = domain->parent->fwnode; 232 295 parent_fwspec.param_count = 3; 233 296 parent_fwspec.param[0] = 0; 234 - parent_fwspec.param[1] = parent_hwirq; 297 + parent_fwspec.param[1] = pin_to_hwirq(region, hwirq); 235 298 parent_fwspec.param[2] = type; 236 299 237 300 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, 238 301 &parent_fwspec); 239 302 } 240 303 241 - static int qcom_pdc_gpio_domain_select(struct irq_domain *d, 242 - struct irq_fwspec *fwspec, 243 - enum irq_domain_bus_token bus_token) 244 - { 245 - return bus_token == DOMAIN_BUS_WAKEUP; 246 - } 247 - 248 - static const struct irq_domain_ops qcom_pdc_gpio_ops = { 249 - .select = qcom_pdc_gpio_domain_select, 250 - .alloc = qcom_pdc_gpio_alloc, 304 + static const struct irq_domain_ops qcom_pdc_ops = { 305 + .translate = irq_domain_translate_twocell, 306 + .alloc = qcom_pdc_alloc, 251 307 .free = irq_domain_free_irqs_common, 252 308 }; 253 309 ··· 291 361 292 362 static int qcom_pdc_init(struct device_node *node, struct device_node *parent) 293 363 { 294 - struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain; 364 + struct irq_domain *parent_domain, *pdc_domain; 295 365 int ret; 296 366 297 367 pdc_base = of_iomap(node, 0); ··· 313 383 goto fail; 314 384 } 315 385 316 - pdc_domain = irq_domain_create_hierarchy(parent_domain, 0, PDC_MAX_IRQS, 317 - of_fwnode_handle(node), 318 - &qcom_pdc_ops, NULL); 386 + pdc_domain = irq_domain_create_hierarchy(parent_domain, 387 + IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP, 388 + PDC_MAX_GPIO_IRQS, 389 + of_fwnode_handle(node), 390 + &qcom_pdc_ops, NULL); 319 391 if (!pdc_domain) { 320 - pr_err("%pOF: GIC domain add failed\n", node); 392 + pr_err("%pOF: PDC domain add failed\n", node); 321 393 ret = -ENOMEM; 322 394 goto fail; 323 395 } 324 396 325 - pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain, 326 - IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP, 327 - PDC_MAX_GPIO_IRQS, 328 - of_fwnode_handle(node), 329 - &qcom_pdc_gpio_ops, NULL); 330 - if (!pdc_gpio_domain) { 331 - pr_err("%pOF: PDC domain add failed for GPIO domain\n", node); 332 - ret = -ENOMEM; 333 - goto remove; 334 - } 335 - 336 - irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP); 397 + irq_domain_update_bus_token(pdc_domain, DOMAIN_BUS_WAKEUP); 337 398 338 399 return 0; 339 400 340 - remove: 341 - irq_domain_remove(pdc_domain); 342 401 fail: 343 402 kfree(pdc_region); 344 403 iounmap(pdc_base);
+1 -1
drivers/pci/controller/pcie-apple.c
··· 219 219 if (hwirq < 0) 220 220 return -ENOSPC; 221 221 222 - fwspec.param[1] += hwirq; 222 + fwspec.param[fwspec.param_count - 2] += hwirq; 223 223 224 224 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec); 225 225 if (ret)
+12 -13
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
··· 78 78 struct gpio_chip gc; 79 79 int irqbase; 80 80 int irq; 81 - void *priv; 82 81 struct irq_chip irq_chip; 83 82 u32 pinctrl_id; 84 83 int (*direction_input)(struct gpio_chip *chip, unsigned offset); ··· 225 226 chained_irq_enter(chip, desc); 226 227 sts = ioread32(bank->base + NPCM7XX_GP_N_EVST); 227 228 en = ioread32(bank->base + NPCM7XX_GP_N_EVEN); 228 - dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts, 229 + dev_dbg(bank->gc.parent, "==> got irq sts %.8x %.8x\n", sts, 229 230 en); 230 231 231 232 sts &= en; ··· 240 241 gpiochip_get_data(irq_data_get_irq_chip_data(d)); 241 242 unsigned int gpio = BIT(d->hwirq); 242 243 243 - dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio, 244 + dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio, 244 245 d->irq, type); 245 246 switch (type) { 246 247 case IRQ_TYPE_EDGE_RISING: 247 - dev_dbg(d->chip->parent_device, "edge.rising\n"); 248 + dev_dbg(bank->gc.parent, "edge.rising\n"); 248 249 npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); 249 250 npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); 250 251 break; 251 252 case IRQ_TYPE_EDGE_FALLING: 252 - dev_dbg(d->chip->parent_device, "edge.falling\n"); 253 + dev_dbg(bank->gc.parent, "edge.falling\n"); 253 254 npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); 254 255 npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); 255 256 break; 256 257 case IRQ_TYPE_EDGE_BOTH: 257 - dev_dbg(d->chip->parent_device, "edge.both\n"); 258 + dev_dbg(bank->gc.parent, "edge.both\n"); 258 259 npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); 259 260 break; 260 261 case IRQ_TYPE_LEVEL_LOW: 261 - dev_dbg(d->chip->parent_device, "level.low\n"); 262 + dev_dbg(bank->gc.parent, "level.low\n"); 262 263 npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); 263 264 break; 264 265 case IRQ_TYPE_LEVEL_HIGH: 265 - dev_dbg(d->chip->parent_device, "level.high\n"); 266 + dev_dbg(bank->gc.parent, "level.high\n"); 266 267 npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); 267 268 break; 268 269 default: 269 - dev_dbg(d->chip->parent_device, "invalid irq type\n"); 270 + dev_dbg(bank->gc.parent, "invalid irq type\n"); 270 271 return -EINVAL; 271 272 } 272 273 ··· 288 289 gpiochip_get_data(irq_data_get_irq_chip_data(d)); 289 290 unsigned int gpio = d->hwirq; 290 291 291 - dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq); 292 + dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq); 292 293 iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST); 293 294 } 294 295 ··· 300 301 unsigned int gpio = d->hwirq; 301 302 302 303 /* Clear events */ 303 - dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq); 304 + dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq); 304 305 iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC); 305 306 } 306 307 ··· 312 313 unsigned int gpio = d->hwirq; 313 314 314 315 /* Enable events */ 315 - dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq); 316 + dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq); 316 317 iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS); 317 318 } 318 319 ··· 322 323 unsigned int gpio = d->hwirq; 323 324 324 325 /* active-high, input, clear interrupt, enable interrupt */ 325 - dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq); 326 + dev_dbg(gc->parent, "startup: %u.%u\n", gpio, d->irq); 326 327 npcmgpio_direction_input(gc, gpio); 327 328 npcmgpio_irq_ack(d); 328 329 npcmgpio_irq_unmask(d);
+2 -1
drivers/pinctrl/pinctrl-starfive.c
··· 1307 1307 sfp->gc.base = -1; 1308 1308 sfp->gc.ngpio = NR_GPIOS; 1309 1309 1310 - starfive_irq_chip.parent_device = dev; 1311 1310 starfive_irq_chip.name = sfp->gc.label; 1312 1311 1313 1312 sfp->gc.irq.chip = &starfive_irq_chip; ··· 1328 1329 ret = devm_gpiochip_add_data(dev, &sfp->gc, sfp); 1329 1330 if (ret) 1330 1331 return dev_err_probe(dev, ret, "could not register gpiochip\n"); 1332 + 1333 + irq_domain_set_pm_device(sfp->gc.irq.domain, dev); 1331 1334 1332 1335 out_pinctrl_enable: 1333 1336 return pinctrl_enable(sfp->pctl);
+2
include/dt-bindings/interrupt-controller/apple-aic.h
··· 11 11 #define AIC_TMR_HV_VIRT 1 12 12 #define AIC_TMR_GUEST_PHYS 2 13 13 #define AIC_TMR_GUEST_VIRT 3 14 + #define AIC_CPU_PMU_E 4 15 + #define AIC_CPU_PMU_P 5 14 16 15 17 #endif
+4 -5
include/linux/irq.h
··· 456 456 /** 457 457 * struct irq_chip - hardware interrupt chip descriptor 458 458 * 459 - * @parent_device: pointer to parent device for irqchip 460 459 * @name: name for /proc/interrupts 461 460 * @irq_startup: start up the interrupt (defaults to ->enable if NULL) 462 461 * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) ··· 502 503 * @flags: chip specific flags 503 504 */ 504 505 struct irq_chip { 505 - struct device *parent_device; 506 506 const char *name; 507 507 unsigned int (*irq_startup)(struct irq_data *data); 508 508 void (*irq_shutdown)(struct irq_data *data); ··· 710 712 extern struct irq_chip dummy_irq_chip; 711 713 712 714 extern void 713 - irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 715 + irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, 714 716 irq_flow_handler_t handle, const char *name); 715 717 716 - static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, 718 + static inline void irq_set_chip_and_handler(unsigned int irq, 719 + const struct irq_chip *chip, 717 720 irq_flow_handler_t handle) 718 721 { 719 722 irq_set_chip_and_handler_name(irq, chip, handle, NULL); ··· 804 805 } 805 806 806 807 /* Set/get chip/data for an IRQ: */ 807 - extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); 808 + extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip); 808 809 extern int irq_set_handler_data(unsigned int irq, void *data); 809 810 extern int irq_set_chip_data(unsigned int irq, void *data); 810 811 extern int irq_set_irq_type(unsigned int irq, unsigned int type);
-14
include/linux/irqchip/versatile-fpga.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef PLAT_FPGA_IRQ_H 3 - #define PLAT_FPGA_IRQ_H 4 - 5 - struct device_node; 6 - struct pt_regs; 7 - 8 - void fpga_handle_irq(struct pt_regs *regs); 9 - void fpga_irq_init(void __iomem *, const char *, int, int, u32, 10 - struct device_node *node); 11 - int fpga_irq_of_init(struct device_node *node, 12 - struct device_node *parent); 13 - 14 - #endif
+13 -2
include/linux/irqdomain.h
··· 151 151 * @gc: Pointer to a list of generic chips. There is a helper function for 152 152 * setting up one or more generic chips for interrupt controllers 153 153 * drivers using the generic chip library which uses this pointer. 154 + * @dev: Pointer to a device that the domain represent, and that will be 155 + * used for power management purposes. 154 156 * @parent: Pointer to parent irq_domain to support hierarchy irq_domains 155 157 * 156 158 * Revmap data, used internally by irq_domain ··· 173 171 struct fwnode_handle *fwnode; 174 172 enum irq_domain_bus_token bus_token; 175 173 struct irq_domain_chip_generic *gc; 174 + struct device *dev; 176 175 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 177 176 struct irq_domain *parent; 178 177 #endif ··· 227 224 static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) 228 225 { 229 226 return to_of_node(d->fwnode); 227 + } 228 + 229 + static inline void irq_domain_set_pm_device(struct irq_domain *d, 230 + struct device *dev) 231 + { 232 + if (d) 233 + d->dev = dev; 230 234 } 231 235 232 236 #ifdef CONFIG_IRQ_DOMAIN ··· 479 469 extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 480 470 unsigned int virq); 481 471 extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 482 - irq_hw_number_t hwirq, struct irq_chip *chip, 472 + irq_hw_number_t hwirq, 473 + const struct irq_chip *chip, 483 474 void *chip_data, irq_flow_handler_t handler, 484 475 void *handler_data, const char *handler_name); 485 476 extern void irq_domain_reset_irq_data(struct irq_data *irq_data); ··· 523 512 extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, 524 513 unsigned int virq, 525 514 irq_hw_number_t hwirq, 526 - struct irq_chip *chip, 515 + const struct irq_chip *chip, 527 516 void *chip_data); 528 517 extern void irq_domain_free_irqs_common(struct irq_domain *domain, 529 518 unsigned int virq,
+18 -11
kernel/irq/chip.c
··· 38 38 * @irq: irq number 39 39 * @chip: pointer to irq chip description structure 40 40 */ 41 - int irq_set_chip(unsigned int irq, struct irq_chip *chip) 41 + int irq_set_chip(unsigned int irq, const struct irq_chip *chip) 42 42 { 43 43 unsigned long flags; 44 44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); ··· 46 46 if (!desc) 47 47 return -EINVAL; 48 48 49 - if (!chip) 50 - chip = &no_irq_chip; 51 - 52 - desc->irq_data.chip = chip; 49 + desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); 53 50 irq_put_desc_unlock(desc, flags); 54 51 /* 55 52 * For !CONFIG_SPARSE_IRQ make the irq show up in ··· 1070 1073 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 1071 1074 1072 1075 void 1073 - irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 1076 + irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, 1074 1077 irq_flow_handler_t handle, const char *name) 1075 1078 { 1076 1079 irq_set_chip(irq, chip); ··· 1555 1558 return 0; 1556 1559 } 1557 1560 1561 + static struct device *irq_get_parent_device(struct irq_data *data) 1562 + { 1563 + if (data->domain) 1564 + return data->domain->dev; 1565 + 1566 + return NULL; 1567 + } 1568 + 1558 1569 /** 1559 1570 * irq_chip_pm_get - Enable power for an IRQ chip 1560 1571 * @data: Pointer to interrupt specific data ··· 1572 1567 */ 1573 1568 int irq_chip_pm_get(struct irq_data *data) 1574 1569 { 1570 + struct device *dev = irq_get_parent_device(data); 1575 1571 int retval; 1576 1572 1577 - if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) { 1578 - retval = pm_runtime_get_sync(data->chip->parent_device); 1573 + if (IS_ENABLED(CONFIG_PM) && dev) { 1574 + retval = pm_runtime_get_sync(dev); 1579 1575 if (retval < 0) { 1580 - pm_runtime_put_noidle(data->chip->parent_device); 1576 + pm_runtime_put_noidle(dev); 1581 1577 return retval; 1582 1578 } 1583 1579 } ··· 1596 1590 */ 1597 1591 int irq_chip_pm_put(struct irq_data *data) 1598 1592 { 1593 + struct device *dev = irq_get_parent_device(data); 1599 1594 int retval = 0; 1600 1595 1601 - if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) 1602 - retval = pm_runtime_put(data->chip->parent_device); 1596 + if (IS_ENABLED(CONFIG_PM) && dev) 1597 + retval = pm_runtime_put(dev); 1603 1598 1604 1599 return (retval < 0) ? retval : 0; 1605 1600 }
+6 -2
kernel/irq/debugfs.c
··· 69 69 seq_printf(m, "chip: None\n"); 70 70 return; 71 71 } 72 - seq_printf(m, "%*schip: %s\n", ind, "", chip->name); 73 - seq_printf(m, "%*sflags: 0x%lx\n", ind + 1, "", chip->flags); 72 + seq_printf(m, "%*schip: ", ind, ""); 73 + if (chip->irq_print_chip) 74 + chip->irq_print_chip(data, m); 75 + else 76 + seq_printf(m, "%s", chip->name); 77 + seq_printf(m, "\n%*sflags: 0x%lx\n", ind + 1, "", chip->flags); 74 78 irq_debug_show_bits(m, ind, chip->flags, irqchip_flags, 75 79 ARRAY_SIZE(irqchip_flags)); 76 80 }
+5 -4
kernel/irq/irqdomain.c
··· 1319 1319 * @chip_data: The associated chip data 1320 1320 */ 1321 1321 int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, 1322 - irq_hw_number_t hwirq, struct irq_chip *chip, 1322 + irq_hw_number_t hwirq, 1323 + const struct irq_chip *chip, 1323 1324 void *chip_data) 1324 1325 { 1325 1326 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); ··· 1329 1328 return -ENOENT; 1330 1329 1331 1330 irq_data->hwirq = hwirq; 1332 - irq_data->chip = chip ? chip : &no_irq_chip; 1331 + irq_data->chip = (struct irq_chip *)(chip ? chip : &no_irq_chip); 1333 1332 irq_data->chip_data = chip_data; 1334 1333 1335 1334 return 0; ··· 1348 1347 * @handler_name: The interrupt handler name 1349 1348 */ 1350 1349 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 1351 - irq_hw_number_t hwirq, struct irq_chip *chip, 1350 + irq_hw_number_t hwirq, const struct irq_chip *chip, 1352 1351 void *chip_data, irq_flow_handler_t handler, 1353 1352 void *handler_data, const char *handler_name) 1354 1353 { ··· 1854 1853 * @handler_name: The interrupt handler name 1855 1854 */ 1856 1855 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, 1857 - irq_hw_number_t hwirq, struct irq_chip *chip, 1856 + irq_hw_number_t hwirq, const struct irq_chip *chip, 1858 1857 void *chip_data, irq_flow_handler_t handler, 1859 1858 void *handler_data, const char *handler_name) 1860 1859 {