Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'irq-drivers-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq controller updates from Thomas Gleixner:
"Update for interrupt chip drivers:

- Convert the generic interrupt chip to lock guards to remove copy &
pasta boilerplate code and gotos.

- A new driver fot the interrupt controller in the EcoNet EN751221
MIPS SoC.

- Extend the SG2042-MSI driver to support the new SG2044 SoC

- Updates and cleanups for the (ancient) VT8500 driver

- Improve the scalability of the ARM GICV4.1 ITS driver by utilizing
node local copies a VM's interrupt translation table when possible.
This results in a 12% reduction of VM IPI latency in certain
workloads.

- The usual cleanups and improvements all over the place"

* tag 'irq-drivers-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
irqchip/irq-pruss-intc: Simplify chained interrupt handler setup
irqchip/gic-v4.1: Use local 4_1 ITS to generate VSGI
irqchip/econet-en751221: Switch to of_fwnode_handle()
irqchip/irq-vt8500: Switch to irq_domain_create_*()
irqchip/econet-en751221: Switch to irq_domain_create_linear()
irqchip/irq-vt8500: Use fewer global variables and add error handling
irqchip/irq-vt8500: Use a dedicated chained handler function
irqchip/irq-vt8500: Don't require 8 interrupts from a chained controller
irqchip/irq-vt8500: Drop redundant copy of the device node pointer
irqchip/irq-vt8500: Split up ack/mask functions
irqchip/sg2042-msi: Fix wrong type cast in sg2044_msi_irq_ack()
irqchip/sg2042-msi: Add the Sophgo SG2044 MSI interrupt controller
irqchip/sg2042-msi: Introduce configurable chipinfo for SG2042
irqchip/sg2042-msi: Rename functions and data structures to be SG2042 agnostic
dt-bindings: interrupt-controller: Add Sophgo SG2044 MSI controller
genirq/generic-chip: Fix incorrect lock guard conversions
genirq/generic-chip: Remove unused lock wrappers
irqchip: Convert generic irqchip locking to guards
gpio: mvebu: Convert generic irqchip locking to guard()
ARM: orion/gpio:: Convert generic irqchip locking to guard()
...

+683 -316
+78
Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/interrupt-controller/econet,en751221-intc.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: EcoNet EN751221 Interrupt Controller 8 + 9 + maintainers: 10 + - Caleb James DeLisle <cjd@cjdns.fr> 11 + 12 + description: 13 + The EcoNet EN751221 Interrupt Controller is a simple interrupt controller 14 + designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can 15 + be routed to either VPE but not both, so to support per-CPU interrupts, a 16 + secondary IRQ number is allocated to control masking/unmasking on VPE#1. For 17 + lack of a better term we call these "shadow interrupts". The assignment of 18 + shadow interrupts is defined by the SoC integrator when wiring the interrupt 19 + lines, so they are configurable in the device tree. 20 + 21 + allOf: 22 + - $ref: /schemas/interrupt-controller.yaml# 23 + 24 + properties: 25 + compatible: 26 + const: econet,en751221-intc 27 + 28 + reg: 29 + maxItems: 1 30 + 31 + "#interrupt-cells": 32 + const: 1 33 + 34 + interrupt-controller: true 35 + 36 + interrupts: 37 + maxItems: 1 38 + description: Interrupt line connecting this controller to its parent. 39 + 40 + econet,shadow-interrupts: 41 + $ref: /schemas/types.yaml#/definitions/uint32-matrix 42 + description: 43 + An array of interrupt number pairs where each pair represents a shadow 44 + interrupt relationship. The first number in each pair is the primary IRQ, 45 + and the second is its shadow IRQ used for VPE#1 control. For example, 46 + <8 3> means IRQ 8 is shadowed by IRQ 3, so IRQ 3 cannot be mapped, but 47 + when VPE#1 requests IRQ 8, it will manipulate the IRQ 3 mask bit. 48 + minItems: 1 49 + maxItems: 20 50 + items: 51 + items: 52 + - description: primary per-CPU IRQ 53 + - description: shadow IRQ number 54 + 55 + required: 56 + - compatible 57 + - reg 58 + - interrupt-controller 59 + - "#interrupt-cells" 60 + - interrupts 61 + 62 + additionalProperties: false 63 + 64 + examples: 65 + - | 66 + interrupt-controller@1fb40000 { 67 + compatible = "econet,en751221-intc"; 68 + reg = <0x1fb40000 0x100>; 69 + 70 + interrupt-controller; 71 + #interrupt-cells = <1>; 72 + 73 + interrupt-parent = <&cpuintc>; 74 + interrupts = <2>; 75 + 76 + econet,shadow-interrupts = <7 2>, <8 3>, <13 12>, <30 29>; 77 + }; 78 + ...
+3 -1
Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml
··· 18 18 19 19 properties: 20 20 compatible: 21 - const: sophgo,sg2042-msi 21 + enum: 22 + - sophgo,sg2042-msi 23 + - sophgo,sg2044-msi 22 24 23 25 reg: 24 26 items:
+2 -4
arch/arm/plat-orion/gpio.c
··· 496 496 u32 reg_val; 497 497 u32 mask = d->mask; 498 498 499 - irq_gc_lock(gc); 499 + guard(raw_spinlock)(&gc->lock); 500 500 reg_val = irq_reg_readl(gc, ct->regs.mask); 501 501 reg_val |= mask; 502 502 irq_reg_writel(gc, reg_val, ct->regs.mask); 503 - irq_gc_unlock(gc); 504 503 } 505 504 506 505 static void orion_gpio_mask_irq(struct irq_data *d) ··· 509 510 u32 mask = d->mask; 510 511 u32 reg_val; 511 512 512 - irq_gc_lock(gc); 513 + guard(raw_spinlock)(&gc->lock); 513 514 reg_val = irq_reg_readl(gc, ct->regs.mask); 514 515 reg_val &= ~mask; 515 516 irq_reg_writel(gc, reg_val, ct->regs.mask); 516 - irq_gc_unlock(gc); 517 517 } 518 518 519 519 void __init orion_gpio_init(int gpio_base, int ngpio,
+5 -10
drivers/gpio/gpio-mvebu.c
··· 408 408 struct mvebu_gpio_chip *mvchip = gc->private; 409 409 u32 mask = d->mask; 410 410 411 - irq_gc_lock(gc); 411 + guard(raw_spinlock)(&gc->lock); 412 412 mvebu_gpio_write_edge_cause(mvchip, ~mask); 413 - irq_gc_unlock(gc); 414 413 } 415 414 416 415 static void mvebu_gpio_edge_irq_mask(struct irq_data *d) ··· 419 420 struct irq_chip_type *ct = irq_data_get_chip_type(d); 420 421 u32 mask = d->mask; 421 422 422 - irq_gc_lock(gc); 423 + guard(raw_spinlock)(&gc->lock); 423 424 ct->mask_cache_priv &= ~mask; 424 425 mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv); 425 - irq_gc_unlock(gc); 426 426 } 427 427 428 428 static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) ··· 431 433 struct irq_chip_type *ct = irq_data_get_chip_type(d); 432 434 u32 mask = d->mask; 433 435 434 - irq_gc_lock(gc); 436 + guard(raw_spinlock)(&gc->lock); 435 437 mvebu_gpio_write_edge_cause(mvchip, ~mask); 436 438 ct->mask_cache_priv |= mask; 437 439 mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv); 438 - irq_gc_unlock(gc); 439 440 } 440 441 441 442 static void mvebu_gpio_level_irq_mask(struct irq_data *d) ··· 444 447 struct irq_chip_type *ct = irq_data_get_chip_type(d); 445 448 u32 mask = d->mask; 446 449 447 - irq_gc_lock(gc); 450 + guard(raw_spinlock)(&gc->lock); 448 451 ct->mask_cache_priv &= ~mask; 449 452 mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv); 450 - irq_gc_unlock(gc); 451 453 } 452 454 453 455 static void mvebu_gpio_level_irq_unmask(struct irq_data *d) ··· 456 460 struct irq_chip_type *ct = irq_data_get_chip_type(d); 457 461 u32 mask = d->mask; 458 462 459 - irq_gc_lock(gc); 463 + guard(raw_spinlock)(&gc->lock); 460 464 ct->mask_cache_priv |= mask; 461 465 mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv); 462 - irq_gc_unlock(gc); 463 466 } 464 467 465 468 /*****************************************************************************
+5
drivers/irqchip/Kconfig
··· 166 166 select GENERIC_IRQ_CHIP 167 167 select IRQ_DOMAIN_HIERARCHY 168 168 169 + config ECONET_EN751221_INTC 170 + bool 171 + select GENERIC_IRQ_CHIP 172 + select IRQ_DOMAIN 173 + 169 174 config FARADAY_FTINTC010 170 175 bool 171 176 select IRQ_DOMAIN
+1
drivers/irqchip/Makefile
··· 10 10 obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o 11 11 obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o 12 12 obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o 13 + obj-$(CONFIG_ECONET_EN751221_INTC) += irq-econet-en751221.o 13 14 obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o 14 15 obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o 15 16 obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
+5 -13
drivers/irqchip/irq-al-fic.c
··· 65 65 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 66 66 struct al_fic *fic = gc->private; 67 67 enum al_fic_state new_state; 68 - int ret = 0; 69 68 70 - irq_gc_lock(gc); 69 + guard(raw_spinlock)(&gc->lock); 71 70 72 71 if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) && 73 72 ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) { 74 73 pr_debug("fic doesn't support flow type %d\n", flow_type); 75 - ret = -EINVAL; 76 - goto err; 74 + return -EINVAL; 77 75 } 78 76 79 77 new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ? ··· 89 91 if (fic->state == AL_FIC_UNCONFIGURED) { 90 92 al_fic_set_trigger(fic, gc, new_state); 91 93 } else if (fic->state != new_state) { 92 - pr_debug("fic %s state already configured to %d\n", 93 - fic->name, fic->state); 94 - ret = -EINVAL; 95 - goto err; 94 + pr_debug("fic %s state already configured to %d\n", fic->name, fic->state); 95 + return -EINVAL; 96 96 } 97 - 98 - err: 99 - irq_gc_unlock(gc); 100 - 101 - return ret; 97 + return 0; 102 98 } 103 99 104 100 static void al_fic_irq_handler(struct irq_desc *desc)
+6 -13
drivers/irqchip/irq-atmel-aic.c
··· 78 78 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 79 79 80 80 /* Enable interrupt on AIC5 */ 81 - irq_gc_lock(gc); 81 + guard(raw_spinlock)(&gc->lock); 82 82 irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); 83 - irq_gc_unlock(gc); 84 83 85 84 return 1; 86 85 } ··· 105 106 { 106 107 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 107 108 108 - irq_gc_lock(gc); 109 + guard(raw_spinlock)(&gc->lock); 109 110 irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR); 110 111 irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR); 111 - irq_gc_unlock(gc); 112 112 } 113 113 114 114 static void aic_resume(struct irq_data *d) 115 115 { 116 116 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 117 117 118 - irq_gc_lock(gc); 118 + guard(raw_spinlock)(&gc->lock); 119 119 irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR); 120 120 irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR); 121 - irq_gc_unlock(gc); 122 121 } 123 122 124 123 static void aic_pm_shutdown(struct irq_data *d) 125 124 { 126 125 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 127 126 128 - irq_gc_lock(gc); 127 + guard(raw_spinlock)(&gc->lock); 129 128 irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); 130 129 irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); 131 - irq_gc_unlock(gc); 132 130 } 133 131 #else 134 132 #define aic_suspend NULL ··· 171 175 { 172 176 struct irq_domain_chip_generic *dgc = d->gc; 173 177 struct irq_chip_generic *gc; 174 - unsigned long flags; 175 178 unsigned smr; 176 - int idx; 177 - int ret; 179 + int idx, ret; 178 180 179 181 if (!dgc) 180 182 return -EINVAL; ··· 188 194 189 195 gc = dgc->gc[idx]; 190 196 191 - irq_gc_lock_irqsave(gc, flags); 197 + guard(raw_spinlock_irq)(&gc->lock); 192 198 smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); 193 199 aic_common_set_priority(intspec[2], &smr); 194 200 irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); 195 - irq_gc_unlock_irqrestore(gc, flags); 196 201 197 202 return ret; 198 203 }
+8 -20
drivers/irqchip/irq-atmel-aic5.c
··· 92 92 * Disable interrupt on AIC5. We always take the lock of the 93 93 * first irq chip as all chips share the same registers. 94 94 */ 95 - irq_gc_lock(bgc); 95 + guard(raw_spinlock)(&bgc->lock); 96 96 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 97 97 irq_reg_writel(gc, 1, AT91_AIC5_IDCR); 98 98 gc->mask_cache &= ~d->mask; 99 - irq_gc_unlock(bgc); 100 99 } 101 100 102 101 static void aic5_unmask(struct irq_data *d) ··· 108 109 * Enable interrupt on AIC5. We always take the lock of the 109 110 * first irq chip as all chips share the same registers. 110 111 */ 111 - irq_gc_lock(bgc); 112 + guard(raw_spinlock)(&bgc->lock); 112 113 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 113 114 irq_reg_writel(gc, 1, AT91_AIC5_IECR); 114 115 gc->mask_cache |= d->mask; 115 - irq_gc_unlock(bgc); 116 116 } 117 117 118 118 static int aic5_retrigger(struct irq_data *d) ··· 120 122 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); 121 123 122 124 /* Enable interrupt on AIC5 */ 123 - irq_gc_lock(bgc); 125 + guard(raw_spinlock)(&bgc->lock); 124 126 irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); 125 127 irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); 126 - irq_gc_unlock(bgc); 127 - 128 128 return 1; 129 129 } 130 130 ··· 133 137 unsigned int smr; 134 138 int ret; 135 139 136 - irq_gc_lock(bgc); 140 + guard(raw_spinlock)(&bgc->lock); 137 141 irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); 138 142 smr = irq_reg_readl(bgc, AT91_AIC5_SMR); 139 143 ret = aic_common_set_type(d, type, &smr); 140 144 if (!ret) 141 145 irq_reg_writel(bgc, smr, AT91_AIC5_SMR); 142 - irq_gc_unlock(bgc); 143 - 144 146 return ret; 145 147 } 146 148 ··· 160 166 smr_cache[i] = irq_reg_readl(bgc, AT91_AIC5_SMR); 161 167 } 162 168 163 - irq_gc_lock(bgc); 169 + guard(raw_spinlock)(&bgc->lock); 164 170 for (i = 0; i < dgc->irqs_per_chip; i++) { 165 171 mask = 1 << i; 166 172 if ((mask & gc->mask_cache) == (mask & gc->wake_active)) ··· 172 178 else 173 179 irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); 174 180 } 175 - irq_gc_unlock(bgc); 176 181 } 177 182 178 183 static void aic5_resume(struct irq_data *d) ··· 183 190 int i; 184 191 u32 mask; 185 192 186 - irq_gc_lock(bgc); 193 + guard(raw_spinlock)(&bgc->lock); 187 194 188 195 if (smr_cache) { 189 196 irq_reg_writel(bgc, 0xffffffff, AT91_AIC5_SPU); ··· 207 214 else 208 215 irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); 209 216 } 210 - irq_gc_unlock(bgc); 211 217 } 212 218 213 219 static void aic5_pm_shutdown(struct irq_data *d) ··· 217 225 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 218 226 int i; 219 227 220 - irq_gc_lock(bgc); 228 + guard(raw_spinlock)(&bgc->lock); 221 229 for (i = 0; i < dgc->irqs_per_chip; i++) { 222 230 irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); 223 231 irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); 224 232 irq_reg_writel(bgc, 1, AT91_AIC5_ICCR); 225 233 } 226 - irq_gc_unlock(bgc); 227 234 } 228 235 #else 229 236 #define aic5_suspend NULL ··· 268 277 unsigned int *out_type) 269 278 { 270 279 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); 271 - unsigned long flags; 272 280 unsigned smr; 273 281 int ret; 274 282 ··· 279 289 if (ret) 280 290 return ret; 281 291 282 - irq_gc_lock_irqsave(bgc, flags); 292 + guard(raw_spinlock_irq)(&bgc->lock); 283 293 irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); 284 294 smr = irq_reg_readl(bgc, AT91_AIC5_SMR); 285 295 aic_common_set_priority(intspec[2], &smr); 286 296 irq_reg_writel(bgc, smr, AT91_AIC5_SMR); 287 - irq_gc_unlock_irqrestore(bgc, flags); 288 - 289 297 return ret; 290 298 } 291 299
+9 -13
drivers/irqchip/irq-bcm7120-l2.c
··· 63 63 64 64 for (idx = 0; idx < b->n_words; idx++) { 65 65 int base = idx * IRQS_PER_WORD; 66 - struct irq_chip_generic *gc = 67 - irq_get_domain_generic_chip(b->domain, base); 66 + struct irq_chip_generic *gc; 68 67 unsigned long pending; 69 68 int hwirq; 70 69 71 - irq_gc_lock(gc); 72 - pending = irq_reg_readl(gc, b->stat_offset[idx]) & 73 - gc->mask_cache & 74 - data->irq_map_mask[idx]; 75 - irq_gc_unlock(gc); 70 + gc = irq_get_domain_generic_chip(b->domain, base); 71 + scoped_guard (raw_spinlock, &gc->lock) { 72 + pending = irq_reg_readl(gc, b->stat_offset[idx]) & gc->mask_cache & 73 + data->irq_map_mask[idx]; 74 + } 76 75 77 76 for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) 78 77 generic_handle_domain_irq(b->domain, base + hwirq); ··· 85 86 struct bcm7120_l2_intc_data *b = gc->private; 86 87 struct irq_chip_type *ct = gc->chip_types; 87 88 88 - irq_gc_lock(gc); 89 + guard(raw_spinlock)(&gc->lock); 89 90 if (b->can_wake) 90 - irq_reg_writel(gc, gc->mask_cache | gc->wake_active, 91 - ct->regs.mask); 92 - irq_gc_unlock(gc); 91 + irq_reg_writel(gc, gc->mask_cache | gc->wake_active, ct->regs.mask); 93 92 } 94 93 95 94 static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc) ··· 95 98 struct irq_chip_type *ct = gc->chip_types; 96 99 97 100 /* Restore the saved mask */ 98 - irq_gc_lock(gc); 101 + guard(raw_spinlock)(&gc->lock); 99 102 irq_reg_writel(gc, gc->mask_cache, ct->regs.mask); 100 - irq_gc_unlock(gc); 101 103 } 102 104 103 105 static int bcm7120_l2_intc_init_one(struct device_node *dn,
+2 -6
drivers/irqchip/irq-brcmstb-l2.c
··· 97 97 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 98 98 struct irq_chip_type *ct = irq_data_get_chip_type(d); 99 99 struct brcmstb_l2_intc_data *b = gc->private; 100 - unsigned long flags; 101 100 102 - irq_gc_lock_irqsave(gc, flags); 101 + guard(raw_spinlock_irqsave)(&gc->lock); 103 102 /* Save the current mask */ 104 103 if (save) 105 104 b->saved_mask = irq_reg_readl(gc, ct->regs.mask); ··· 108 109 irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); 109 110 irq_reg_writel(gc, gc->wake_active, ct->regs.enable); 110 111 } 111 - irq_gc_unlock_irqrestore(gc, flags); 112 112 } 113 113 114 114 static void brcmstb_l2_intc_shutdown(struct irq_data *d) ··· 125 127 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 126 128 struct irq_chip_type *ct = irq_data_get_chip_type(d); 127 129 struct brcmstb_l2_intc_data *b = gc->private; 128 - unsigned long flags; 129 130 130 - irq_gc_lock_irqsave(gc, flags); 131 + guard(raw_spinlock_irqsave)(&gc->lock); 131 132 if (ct->chip.irq_ack) { 132 133 /* Clear unmasked non-wakeup interrupts */ 133 134 irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, ··· 136 139 /* Restore the saved mask */ 137 140 irq_reg_writel(gc, b->saved_mask, ct->regs.disable); 138 141 irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); 139 - irq_gc_unlock_irqrestore(gc, flags); 140 142 } 141 143 142 144 static int __init brcmstb_l2_intc_of_init(struct device_node *np,
+1 -2
drivers/irqchip/irq-csky-apb-intc.c
··· 50 50 unsigned long ifr = ct->regs.mask - 8; 51 51 u32 mask = d->mask; 52 52 53 - irq_gc_lock(gc); 53 + guard(raw_spinlock)(&gc->lock); 54 54 *ct->mask_cache |= mask; 55 55 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); 56 56 irq_reg_writel(gc, irq_reg_readl(gc, ifr) & ~mask, ifr); 57 - irq_gc_unlock(gc); 58 57 } 59 58 60 59 static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base,
+1 -2
drivers/irqchip/irq-dw-apb-ictl.c
··· 101 101 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 102 102 struct irq_chip_type *ct = irq_data_get_chip_type(d); 103 103 104 - irq_gc_lock(gc); 104 + guard(raw_spinlock)(&gc->lock); 105 105 writel_relaxed(~0, gc->reg_base + ct->regs.enable); 106 106 writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask); 107 - irq_gc_unlock(gc); 108 107 } 109 108 #else 110 109 #define dw_apb_ictl_resume NULL
+310
drivers/irqchip/irq-econet-en751221.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * EN751221 Interrupt Controller Driver. 4 + * 5 + * The EcoNet EN751221 Interrupt Controller is a simple interrupt controller 6 + * designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can 7 + * be routed to either VPE but not both, so to support per-CPU interrupts, a 8 + * secondary IRQ number is allocated to control masking/unmasking on VPE#1. In 9 + * this driver, these are called "shadow interrupts". The assignment of shadow 10 + * interrupts is defined by the SoC integrator when wiring the interrupt lines, 11 + * so they are configurable in the device tree. 12 + * 13 + * If an interrupt (say 30) needs per-CPU capability, the SoC integrator 14 + * allocates another IRQ number (say 29) to be its shadow. The device tree 15 + * reflects this by adding the pair <30 29> to the "econet,shadow-interrupts" 16 + * property. 17 + * 18 + * When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29, 19 + * telling the hardware to mask VPE#1's view of IRQ 30. 20 + * 21 + * Copyright (C) 2025 Caleb James DeLisle <cjd@cjdns.fr> 22 + */ 23 + 24 + #include <linux/cleanup.h> 25 + #include <linux/io.h> 26 + #include <linux/of.h> 27 + #include <linux/of_address.h> 28 + #include <linux/of_irq.h> 29 + #include <linux/irqdomain.h> 30 + #include <linux/irqchip.h> 31 + #include <linux/irqchip/chained_irq.h> 32 + 33 + #define IRQ_COUNT 40 34 + 35 + #define NOT_PERCPU 0xff 36 + #define IS_SHADOW 0xfe 37 + 38 + #define REG_MASK0 0x04 39 + #define REG_MASK1 0x50 40 + #define REG_PENDING0 0x08 41 + #define REG_PENDING1 0x54 42 + 43 + /** 44 + * @membase: Base address of the interrupt controller registers 45 + * @interrupt_shadows: Array of all interrupts, for each value, 46 + * - NOT_PERCPU: This interrupt is not per-cpu, so it has no shadow 47 + * - IS_SHADOW: This interrupt is a shadow of another per-cpu interrupt 48 + * - else: This is a per-cpu interrupt whose shadow is the value 49 + */ 50 + static struct { 51 + void __iomem *membase; 52 + u8 interrupt_shadows[IRQ_COUNT]; 53 + } econet_intc __ro_after_init; 54 + 55 + static DEFINE_RAW_SPINLOCK(irq_lock); 56 + 57 + /* IRQs must be disabled */ 58 + static void econet_wreg(u32 reg, u32 val, u32 mask) 59 + { 60 + u32 v; 61 + 62 + guard(raw_spinlock)(&irq_lock); 63 + 64 + v = ioread32(econet_intc.membase + reg); 65 + v &= ~mask; 66 + v |= val & mask; 67 + iowrite32(v, econet_intc.membase + reg); 68 + } 69 + 70 + /* IRQs must be disabled */ 71 + static void econet_chmask(u32 hwirq, bool unmask) 72 + { 73 + u32 reg, mask; 74 + u8 shadow; 75 + 76 + /* 77 + * If the IRQ is a shadow, it should never be manipulated directly. 78 + * It should only be masked/unmasked as a result of the "real" per-cpu 79 + * irq being manipulated by a thread running on VPE#1. 80 + * If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask. 81 + * This is single processor only, so smp_processor_id() never exceeds 1. 82 + */ 83 + shadow = econet_intc.interrupt_shadows[hwirq]; 84 + if (WARN_ON_ONCE(shadow == IS_SHADOW)) 85 + return; 86 + else if (shadow != NOT_PERCPU && smp_processor_id() == 1) 87 + hwirq = shadow; 88 + 89 + if (hwirq >= 32) { 90 + reg = REG_MASK1; 91 + mask = BIT(hwirq - 32); 92 + } else { 93 + reg = REG_MASK0; 94 + mask = BIT(hwirq); 95 + } 96 + 97 + econet_wreg(reg, unmask ? mask : 0, mask); 98 + } 99 + 100 + /* IRQs must be disabled */ 101 + static void econet_intc_mask(struct irq_data *d) 102 + { 103 + econet_chmask(d->hwirq, false); 104 + } 105 + 106 + /* IRQs must be disabled */ 107 + static void econet_intc_unmask(struct irq_data *d) 108 + { 109 + econet_chmask(d->hwirq, true); 110 + } 111 + 112 + static void econet_mask_all(void) 113 + { 114 + /* IRQs are generally disabled during init, but guarding here makes it non-obligatory. */ 115 + guard(irqsave)(); 116 + econet_wreg(REG_MASK0, 0, ~0); 117 + econet_wreg(REG_MASK1, 0, ~0); 118 + } 119 + 120 + static void econet_intc_handle_pending(struct irq_domain *d, u32 pending, u32 offset) 121 + { 122 + int hwirq; 123 + 124 + while (pending) { 125 + hwirq = fls(pending) - 1; 126 + generic_handle_domain_irq(d, hwirq + offset); 127 + pending &= ~BIT(hwirq); 128 + } 129 + } 130 + 131 + static void econet_intc_from_parent(struct irq_desc *desc) 132 + { 133 + struct irq_chip *chip = irq_desc_get_chip(desc); 134 + struct irq_domain *domain; 135 + u32 pending0, pending1; 136 + 137 + chained_irq_enter(chip, desc); 138 + 139 + pending0 = ioread32(econet_intc.membase + REG_PENDING0); 140 + pending1 = ioread32(econet_intc.membase + REG_PENDING1); 141 + 142 + if (unlikely(!(pending0 | pending1))) { 143 + spurious_interrupt(); 144 + } else { 145 + domain = irq_desc_get_handler_data(desc); 146 + econet_intc_handle_pending(domain, pending0, 0); 147 + econet_intc_handle_pending(domain, pending1, 32); 148 + } 149 + 150 + chained_irq_exit(chip, desc); 151 + } 152 + 153 + static const struct irq_chip econet_irq_chip; 154 + 155 + static int econet_intc_map(struct irq_domain *d, u32 irq, irq_hw_number_t hwirq) 156 + { 157 + int ret; 158 + 159 + if (hwirq >= IRQ_COUNT) { 160 + pr_err("%s: hwirq %lu out of range\n", __func__, hwirq); 161 + return -EINVAL; 162 + } else if (econet_intc.interrupt_shadows[hwirq] == IS_SHADOW) { 163 + pr_err("%s: can't map hwirq %lu, it is a shadow interrupt\n", __func__, hwirq); 164 + return -EINVAL; 165 + } 166 + 167 + if (econet_intc.interrupt_shadows[hwirq] == NOT_PERCPU) { 168 + irq_set_chip_and_handler(irq, &econet_irq_chip, handle_level_irq); 169 + } else { 170 + irq_set_chip_and_handler(irq, &econet_irq_chip, handle_percpu_devid_irq); 171 + ret = irq_set_percpu_devid(irq); 172 + if (ret) 173 + pr_warn("%s: Failed irq_set_percpu_devid for %u: %d\n", d->name, irq, ret); 174 + } 175 + 176 + irq_set_chip_data(irq, NULL); 177 + return 0; 178 + } 179 + 180 + static const struct irq_chip econet_irq_chip = { 181 + .name = "en751221-intc", 182 + .irq_unmask = econet_intc_unmask, 183 + .irq_mask = econet_intc_mask, 184 + .irq_mask_ack = econet_intc_mask, 185 + }; 186 + 187 + static const struct irq_domain_ops econet_domain_ops = { 188 + .xlate = irq_domain_xlate_onecell, 189 + .map = econet_intc_map 190 + }; 191 + 192 + static int __init get_shadow_interrupts(struct device_node *node) 193 + { 194 + const char *field = "econet,shadow-interrupts"; 195 + int num_shadows; 196 + 197 + num_shadows = of_property_count_u32_elems(node, field); 198 + 199 + memset(econet_intc.interrupt_shadows, NOT_PERCPU, 200 + sizeof(econet_intc.interrupt_shadows)); 201 + 202 + if (num_shadows <= 0) { 203 + return 0; 204 + } else if (num_shadows % 2) { 205 + pr_err("%pOF: %s count is odd, ignoring\n", node, field); 206 + return 0; 207 + } 208 + 209 + u32 *shadows __free(kfree) = kmalloc_array(num_shadows, sizeof(u32), GFP_KERNEL); 210 + if (!shadows) 211 + return -ENOMEM; 212 + 213 + if (of_property_read_u32_array(node, field, shadows, num_shadows)) { 214 + pr_err("%pOF: Failed to read %s\n", node, field); 215 + return -EINVAL; 216 + } 217 + 218 + for (int i = 0; i < num_shadows; i += 2) { 219 + u32 shadow = shadows[i + 1]; 220 + u32 target = shadows[i]; 221 + 222 + if (shadow > IRQ_COUNT) { 223 + pr_err("%pOF: %s[%d] shadow(%d) out of range\n", 224 + node, field, i + 1, shadow); 225 + continue; 226 + } 227 + 228 + if (target >= IRQ_COUNT) { 229 + pr_err("%pOF: %s[%d] target(%d) out of range\n", node, field, i, target); 230 + continue; 231 + } 232 + 233 + if (econet_intc.interrupt_shadows[target] != NOT_PERCPU) { 234 + pr_err("%pOF: %s[%d] target(%d) already has a shadow\n", 235 + node, field, i, target); 236 + continue; 237 + } 238 + 239 + if (econet_intc.interrupt_shadows[shadow] != NOT_PERCPU) { 240 + pr_err("%pOF: %s[%d] shadow(%d) already has a target\n", 241 + node, field, i + 1, shadow); 242 + continue; 243 + } 244 + 245 + econet_intc.interrupt_shadows[target] = shadow; 246 + econet_intc.interrupt_shadows[shadow] = IS_SHADOW; 247 + } 248 + 249 + return 0; 250 + } 251 + 252 + static int __init econet_intc_of_init(struct device_node *node, struct device_node *parent) 253 + { 254 + struct irq_domain *domain; 255 + struct resource res; 256 + int ret, irq; 257 + 258 + ret = get_shadow_interrupts(node); 259 + if (ret) 260 + return ret; 261 + 262 + irq = irq_of_parse_and_map(node, 0); 263 + if (!irq) { 264 + pr_err("%pOF: DT: Failed to get IRQ from 'interrupts'\n", node); 265 + return -EINVAL; 266 + } 267 + 268 + if (of_address_to_resource(node, 0, &res)) { 269 + pr_err("%pOF: DT: Failed to get 'reg'\n", node); 270 + ret = -EINVAL; 271 + goto err_dispose_mapping; 272 + } 273 + 274 + if (!request_mem_region(res.start, resource_size(&res), res.name)) { 275 + pr_err("%pOF: Failed to request memory\n", node); 276 + ret = -EBUSY; 277 + goto err_dispose_mapping; 278 + } 279 + 280 + econet_intc.membase = ioremap(res.start, resource_size(&res)); 281 + if (!econet_intc.membase) { 282 + pr_err("%pOF: Failed to remap membase\n", node); 283 + ret = -ENOMEM; 284 + goto err_release; 285 + } 286 + 287 + econet_mask_all(); 288 + 289 + domain = irq_domain_create_linear(of_fwnode_handle(node), IRQ_COUNT, 290 + &econet_domain_ops, NULL); 291 + if (!domain) { 292 + pr_err("%pOF: Failed to add irqdomain\n", node); 293 + ret = -ENOMEM; 294 + goto err_unmap; 295 + } 296 + 297 + irq_set_chained_handler_and_data(irq, econet_intc_from_parent, domain); 298 + 299 + return 0; 300 + 301 + err_unmap: 302 + iounmap(econet_intc.membase); 303 + err_release: 304 + release_mem_region(res.start, resource_size(&res)); 305 + err_dispose_mapping: 306 + irq_dispose_mapping(irq); 307 + return ret; 308 + } 309 + 310 + IRQCHIP_DECLARE(econet_en751221_intc, "econet,en751221-intc", econet_intc_of_init);
+5 -1
drivers/irqchip/irq-gic-v3-its.c
··· 125 125 int vlpi_redist_offset; 126 126 }; 127 127 128 + static DEFINE_PER_CPU(struct its_node *, local_4_1_its); 129 + 128 130 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) 129 131 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) 130 132 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) ··· 2780 2778 } 2781 2779 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); 2782 2780 2781 + *this_cpu_ptr(&local_4_1_its) = its; 2783 2782 return val; 2784 2783 } 2785 2784 ··· 2818 2815 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; 2819 2816 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; 2820 2817 2818 + *this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu); 2821 2819 return val; 2822 2820 } 2823 2821 ··· 4184 4180 4185 4181 static struct its_node *find_4_1_its(void) 4186 4182 { 4187 - static struct its_node *its = NULL; 4183 + struct its_node *its = *this_cpu_ptr(&local_4_1_its); 4188 4184 4189 4185 if (!its) { 4190 4186 list_for_each_entry(its, &its_nodes, entry) {
+3 -6
drivers/irqchip/irq-ingenic-tcu.c
··· 52 52 struct regmap *map = gc->private; 53 53 u32 mask = d->mask; 54 54 55 - irq_gc_lock(gc); 55 + guard(raw_spinlock)(&gc->lock); 56 56 regmap_write(map, ct->regs.ack, mask); 57 57 regmap_write(map, ct->regs.enable, mask); 58 58 *ct->mask_cache |= mask; 59 - irq_gc_unlock(gc); 60 59 } 61 60 62 61 static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d) ··· 65 66 struct regmap *map = gc->private; 66 67 u32 mask = d->mask; 67 68 68 - irq_gc_lock(gc); 69 + guard(raw_spinlock)(&gc->lock); 69 70 regmap_write(map, ct->regs.disable, mask); 70 71 *ct->mask_cache &= ~mask; 71 - irq_gc_unlock(gc); 72 72 } 73 73 74 74 static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d) ··· 77 79 struct regmap *map = gc->private; 78 80 u32 mask = d->mask; 79 81 80 - irq_gc_lock(gc); 82 + guard(raw_spinlock)(&gc->lock); 81 83 regmap_write(map, ct->regs.ack, mask); 82 84 regmap_write(map, ct->regs.disable, mask); 83 - irq_gc_unlock(gc); 84 85 } 85 86 86 87 static int __init ingenic_tcu_irq_init(struct device_node *np,
+7 -11
drivers/irqchip/irq-lan966x-oic.c
··· 71 71 struct lan966x_oic_chip_regs *chip_regs = gc->private; 72 72 u32 map; 73 73 74 - irq_gc_lock(gc); 75 - 76 - /* Map the source interrupt to the destination */ 77 - map = irq_reg_readl(gc, chip_regs->reg_off_map); 78 - map |= data->mask; 79 - irq_reg_writel(gc, map, chip_regs->reg_off_map); 80 - 81 - irq_gc_unlock(gc); 74 + scoped_guard (raw_spinlock, &gc->lock) { 75 + /* Map the source interrupt to the destination */ 76 + map = irq_reg_readl(gc, chip_regs->reg_off_map); 77 + map |= data->mask; 78 + irq_reg_writel(gc, map, chip_regs->reg_off_map); 79 + } 82 80 83 81 ct->chip.irq_ack(data); 84 82 ct->chip.irq_unmask(data); ··· 93 95 94 96 ct->chip.irq_mask(data); 95 97 96 - irq_gc_lock(gc); 98 + guard(raw_spinlock)(&gc->lock); 97 99 98 100 /* Unmap the interrupt */ 99 101 map = irq_reg_readl(gc, chip_regs->reg_off_map); 100 102 map &= ~data->mask; 101 103 irq_reg_writel(gc, map, chip_regs->reg_off_map); 102 - 103 - irq_gc_unlock(gc); 104 104 } 105 105 106 106 static int lan966x_oic_irq_set_type(struct irq_data *data,
+2 -7
drivers/irqchip/irq-loongson-liointc.c
··· 116 116 { 117 117 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 118 118 u32 mask = data->mask; 119 - unsigned long flags; 120 119 121 - irq_gc_lock_irqsave(gc, flags); 120 + guard(raw_spinlock)(&gc->lock); 122 121 switch (type) { 123 122 case IRQ_TYPE_LEVEL_HIGH: 124 123 liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false); ··· 136 137 liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); 137 138 break; 138 139 default: 139 - irq_gc_unlock_irqrestore(gc, flags); 140 140 return -EINVAL; 141 141 } 142 - irq_gc_unlock_irqrestore(gc, flags); 143 142 144 143 irqd_set_trigger_type(data, type); 145 144 return 0; ··· 154 157 static void liointc_resume(struct irq_chip_generic *gc) 155 158 { 156 159 struct liointc_priv *priv = gc->private; 157 - unsigned long flags; 158 160 int i; 159 161 160 - irq_gc_lock_irqsave(gc, flags); 162 + guard(raw_spinlock_irqsave)(&gc->lock); 161 163 /* Disable all at first */ 162 164 writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE); 163 165 /* Restore map cache */ ··· 166 170 writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE); 167 171 /* Restore mask cache */ 168 172 writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE); 169 - irq_gc_unlock_irqrestore(gc, flags); 170 173 } 171 174 172 175 static int parent_irq[LIOINTC_NUM_PARENT];
+1 -2
drivers/irqchip/irq-mscc-ocelot.c
··· 83 83 unsigned int mask = data->mask; 84 84 u32 val; 85 85 86 - irq_gc_lock(gc); 86 + guard(raw_spinlock)(&gc->lock); 87 87 /* 88 88 * Clear sticky bits for edge mode interrupts. 89 89 * Serval has only one trigger register replication, but the adjacent ··· 97 97 98 98 *ct->mask_cache &= ~mask; 99 99 irq_reg_writel(gc, mask, p->reg_off_ena_set); 100 - irq_gc_unlock(gc); 101 100 } 102 101 103 102 static void ocelot_irq_handler(struct irq_desc *desc)
+1 -2
drivers/irqchip/irq-pruss-intc.c
··· 581 581 host_data->intc = intc; 582 582 host_data->host_irq = i; 583 583 584 - irq_set_handler_data(irq, host_data); 585 - irq_set_chained_handler(irq, pruss_intc_irq_handler); 584 + irq_set_chained_handler_and_data(irq, pruss_intc_irq_handler, host_data); 586 585 } 587 586 588 587 return 0;
+120 -40
drivers/irqchip/irq-sg2042-msi.c
··· 19 19 20 20 #include "irq-msi-lib.h" 21 21 22 - #define SG2042_MAX_MSI_VECTOR 32 23 - 24 - struct sg2042_msi_chipdata { 25 - void __iomem *reg_clr; // clear reg, see TRM, 10.1.33, GP_INTR0_CLR 26 - 27 - phys_addr_t doorbell_addr; // see TRM, 10.1.32, GP_INTR0_SET 28 - 29 - u32 irq_first; // The vector number that MSIs starts 30 - u32 num_irqs; // The number of vectors for MSIs 31 - 32 - DECLARE_BITMAP(msi_map, SG2042_MAX_MSI_VECTOR); 33 - struct mutex msi_map_lock; // lock for msi_map 22 + struct sg204x_msi_chip_info { 23 + const struct irq_chip *irqchip; 24 + const struct msi_parent_ops *parent_ops; 34 25 }; 35 26 36 - static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_req) 27 + /** 28 + * struct sg204x_msi_chipdata - chip data for the SG204x MSI IRQ controller 29 + * @reg_clr: clear reg, see TRM, 10.1.33, GP_INTR0_CLR 30 + * @doorbell_addr: see TRM, 10.1.32, GP_INTR0_SET 31 + * @irq_first: First vectors number that MSIs starts 32 + * @num_irqs: Number of vectors for MSIs 33 + * @msi_map: mapping for allocated MSI vectors. 34 + * @msi_map_lock: Lock for msi_map 35 + * @chip_info: chip specific infomations 36 + */ 37 + struct sg204x_msi_chipdata { 38 + void __iomem *reg_clr; 39 + 40 + phys_addr_t doorbell_addr; 41 + 42 + u32 irq_first; 43 + u32 num_irqs; 44 + 45 + unsigned long *msi_map; 46 + struct mutex msi_map_lock; 47 + 48 + const struct sg204x_msi_chip_info *chip_info; 49 + }; 50 + 51 + static int sg204x_msi_allocate_hwirq(struct sg204x_msi_chipdata *data, int num_req) 37 52 { 38 53 int first; 39 54 ··· 58 43 return first >= 0 ? first : -ENOSPC; 59 44 } 60 45 61 - static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, int num_req) 46 + static void sg204x_msi_free_hwirq(struct sg204x_msi_chipdata *data, int hwirq, int num_req) 62 47 { 63 48 guard(mutex)(&data->msi_map_lock); 64 49 bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req)); ··· 66 51 67 52 static void sg2042_msi_irq_ack(struct irq_data *d) 68 53 { 69 - struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d); 54 + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); 70 55 int bit_off = d->hwirq; 71 56 72 57 writel(1 << bit_off, data->reg_clr); ··· 76 61 77 62 static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 78 63 { 79 - struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d); 64 + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); 80 65 81 66 msg->address_hi = upper_32_bits(data->doorbell_addr); 82 67 msg->address_lo = lower_32_bits(data->doorbell_addr); ··· 94 79 .irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg, 95 80 }; 96 81 97 - static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq) 82 + static void sg2044_msi_irq_ack(struct irq_data *d) 98 83 { 99 - struct sg2042_msi_chipdata *data = domain->host_data; 84 + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); 85 + 86 + writel(0, (u32 __iomem *)data->reg_clr + d->hwirq); 87 + irq_chip_ack_parent(d); 88 + } 89 + 90 + static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 91 + { 92 + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); 93 + phys_addr_t doorbell = data->doorbell_addr + 4 * (d->hwirq / 32); 94 + 95 + msg->address_lo = lower_32_bits(doorbell); 96 + msg->address_hi = upper_32_bits(doorbell); 97 + msg->data = d->hwirq % 32; 98 + } 99 + 100 + static struct irq_chip sg2044_msi_middle_irq_chip = { 101 + .name = "SG2044 MSI", 102 + .irq_ack = sg2044_msi_irq_ack, 103 + .irq_mask = irq_chip_mask_parent, 104 + .irq_unmask = irq_chip_unmask_parent, 105 + #ifdef CONFIG_SMP 106 + .irq_set_affinity = irq_chip_set_affinity_parent, 107 + #endif 108 + .irq_compose_msi_msg = sg2044_msi_irq_compose_msi_msg, 109 + }; 110 + 111 + static int sg204x_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq) 112 + { 113 + struct sg204x_msi_chipdata *data = domain->host_data; 100 114 struct irq_fwspec fwspec; 101 115 struct irq_data *d; 102 116 int ret; ··· 143 99 return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); 144 100 } 145 101 146 - static int sg2042_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq, 102 + static int sg204x_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq, 147 103 unsigned int nr_irqs, void *args) 148 104 { 149 - struct sg2042_msi_chipdata *data = domain->host_data; 105 + struct sg204x_msi_chipdata *data = domain->host_data; 150 106 int hwirq, err, i; 151 107 152 - hwirq = sg2042_msi_allocate_hwirq(data, nr_irqs); 108 + hwirq = sg204x_msi_allocate_hwirq(data, nr_irqs); 153 109 if (hwirq < 0) 154 110 return hwirq; 155 111 156 112 for (i = 0; i < nr_irqs; i++) { 157 - err = sg2042_msi_parent_domain_alloc(domain, virq + i, hwirq + i); 113 + err = sg204x_msi_parent_domain_alloc(domain, virq + i, hwirq + i); 158 114 if (err) 159 115 goto err_hwirq; 160 116 161 117 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 162 - &sg2042_msi_middle_irq_chip, data); 118 + data->chip_info->irqchip, data); 163 119 } 164 - 165 120 return 0; 166 121 167 122 err_hwirq: 168 - sg2042_msi_free_hwirq(data, hwirq, nr_irqs); 123 + sg204x_msi_free_hwirq(data, hwirq, nr_irqs); 169 124 irq_domain_free_irqs_parent(domain, virq, i); 170 - 171 125 return err; 172 126 } 173 127 174 - static void sg2042_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq, 128 + static void sg204x_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq, 175 129 unsigned int nr_irqs) 176 130 { 177 131 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 178 - struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d); 132 + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); 179 133 180 134 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 181 - sg2042_msi_free_hwirq(data, d->hwirq, nr_irqs); 135 + sg204x_msi_free_hwirq(data, d->hwirq, nr_irqs); 182 136 } 183 137 184 - static const struct irq_domain_ops sg2042_msi_middle_domain_ops = { 185 - .alloc = sg2042_msi_middle_domain_alloc, 186 - .free = sg2042_msi_middle_domain_free, 138 + static const struct irq_domain_ops sg204x_msi_middle_domain_ops = { 139 + .alloc = sg204x_msi_middle_domain_alloc, 140 + .free = sg204x_msi_middle_domain_free, 187 141 .select = msi_lib_irq_domain_select, 188 142 }; 189 143 ··· 200 158 .init_dev_msi_info = msi_lib_init_dev_msi_info, 201 159 }; 202 160 203 - static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data, 161 + #define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ 162 + MSI_FLAG_USE_DEF_CHIP_OPS) 163 + 164 + #define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ 165 + MSI_FLAG_PCI_MSIX) 166 + 167 + static const struct msi_parent_ops sg2044_msi_parent_ops = { 168 + .required_flags = SG2044_MSI_FLAGS_REQUIRED, 169 + .supported_flags = SG2044_MSI_FLAGS_SUPPORTED, 170 + .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, 171 + .bus_select_mask = MATCH_PCI_MSI, 172 + .bus_select_token = DOMAIN_BUS_NEXUS, 173 + .prefix = "SG2044-", 174 + .init_dev_msi_info = msi_lib_init_dev_msi_info, 175 + }; 176 + 177 + static int sg204x_msi_init_domains(struct sg204x_msi_chipdata *data, 204 178 struct irq_domain *plic_domain, struct device *dev) 205 179 { 206 180 struct fwnode_handle *fwnode = dev_fwnode(dev); 207 181 struct irq_domain *middle_domain; 208 182 209 183 middle_domain = irq_domain_create_hierarchy(plic_domain, 0, data->num_irqs, fwnode, 210 - &sg2042_msi_middle_domain_ops, data); 184 + &sg204x_msi_middle_domain_ops, data); 211 185 if (!middle_domain) { 212 186 pr_err("Failed to create the MSI middle domain\n"); 213 187 return -ENOMEM; ··· 232 174 irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS); 233 175 234 176 middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; 235 - middle_domain->msi_parent_ops = &sg2042_msi_parent_ops; 236 - 177 + middle_domain->msi_parent_ops = data->chip_info->parent_ops; 237 178 return 0; 238 179 } 239 180 240 181 static int sg2042_msi_probe(struct platform_device *pdev) 241 182 { 242 183 struct fwnode_reference_args args = { }; 243 - struct sg2042_msi_chipdata *data; 184 + struct sg204x_msi_chipdata *data; 244 185 struct device *dev = &pdev->dev; 245 186 struct irq_domain *plic_domain; 246 187 struct resource *res; 247 188 int ret; 248 189 249 - data = devm_kzalloc(dev, sizeof(struct sg2042_msi_chipdata), GFP_KERNEL); 190 + data = devm_kzalloc(dev, sizeof(struct sg204x_msi_chipdata), GFP_KERNEL); 250 191 if (!data) 251 192 return -ENOMEM; 193 + 194 + data->chip_info = device_get_match_data(&pdev->dev); 195 + if (!data->chip_info) { 196 + dev_err(&pdev->dev, "Failed to get irqchip\n"); 197 + return -EINVAL; 198 + } 252 199 253 200 data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr"); 254 201 if (IS_ERR(data->reg_clr)) { ··· 295 232 296 233 mutex_init(&data->msi_map_lock); 297 234 298 - return sg2042_msi_init_domains(data, plic_domain, dev); 235 + data->msi_map = devm_bitmap_zalloc(&pdev->dev, data->num_irqs, GFP_KERNEL); 236 + if (!data->msi_map) { 237 + dev_err(&pdev->dev, "Unable to allocate msi mapping\n"); 238 + return -ENOMEM; 239 + } 240 + 241 + return sg204x_msi_init_domains(data, plic_domain, dev); 299 242 } 300 243 244 + static const struct sg204x_msi_chip_info sg2042_chip_info = { 245 + .irqchip = &sg2042_msi_middle_irq_chip, 246 + .parent_ops = &sg2042_msi_parent_ops, 247 + }; 248 + 249 + static const struct sg204x_msi_chip_info sg2044_chip_info = { 250 + .irqchip = &sg2044_msi_middle_irq_chip, 251 + .parent_ops = &sg2044_msi_parent_ops, 252 + }; 253 + 301 254 static const struct of_device_id sg2042_msi_of_match[] = { 302 - { .compatible = "sophgo,sg2042-msi" }, 255 + { .compatible = "sophgo,sg2042-msi", .data = &sg2042_chip_info }, 256 + { .compatible = "sophgo,sg2044-msi", .data = &sg2044_chip_info }, 303 257 { } 304 258 }; 305 259
+6 -15
drivers/irqchip/irq-stm32-exti.c
··· 169 169 u32 rtsr, ftsr; 170 170 int err; 171 171 172 - irq_gc_lock(gc); 172 + guard(raw_spinlock)(&gc->lock); 173 173 174 174 rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst); 175 175 ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst); 176 176 177 177 err = stm32_exti_set_type(d, type, &rtsr, &ftsr); 178 178 if (err) 179 - goto unlock; 179 + return err; 180 180 181 181 irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst); 182 182 irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst); 183 - 184 - unlock: 185 - irq_gc_unlock(gc); 186 - 187 - return err; 183 + return 0; 188 184 } 189 185 190 186 static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data, ··· 213 217 { 214 218 struct stm32_exti_chip_data *chip_data = gc->private; 215 219 216 - irq_gc_lock(gc); 220 + guard(raw_spinlock)(&gc->lock); 217 221 stm32_chip_suspend(chip_data, gc->wake_active); 218 - irq_gc_unlock(gc); 219 222 } 220 223 221 224 static void stm32_irq_resume(struct irq_chip_generic *gc) 222 225 { 223 226 struct stm32_exti_chip_data *chip_data = gc->private; 224 227 225 - irq_gc_lock(gc); 228 + guard(raw_spinlock)(&gc->lock); 226 229 stm32_chip_resume(chip_data, gc->mask_cache); 227 - irq_gc_unlock(gc); 228 230 } 229 231 230 232 static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq, ··· 259 265 struct stm32_exti_chip_data *chip_data = gc->private; 260 266 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; 261 267 262 - irq_gc_lock(gc); 263 - 268 + guard(raw_spinlock)(&gc->lock); 264 269 irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst); 265 - 266 - irq_gc_unlock(gc); 267 270 } 268 271 269 272 static struct
+2 -7
drivers/irqchip/irq-sunxi-nmi.c
··· 111 111 unsigned int src_type; 112 112 unsigned int i; 113 113 114 - irq_gc_lock(gc); 114 + guard(raw_spinlock)(&gc->lock); 115 115 116 116 switch (flow_type & IRQF_TRIGGER_MASK) { 117 117 case IRQ_TYPE_EDGE_FALLING: ··· 128 128 src_type = SUNXI_SRC_TYPE_LEVEL_LOW; 129 129 break; 130 130 default: 131 - irq_gc_unlock(gc); 132 - pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", 133 - data->irq); 131 + pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", data->irq); 134 132 return -EBADR; 135 133 } 136 134 ··· 143 145 src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK; 144 146 src_type_reg |= src_type; 145 147 sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg); 146 - 147 - irq_gc_unlock(gc); 148 - 149 148 return IRQ_SET_MASK_OK; 150 149 } 151 150
+3 -10
drivers/irqchip/irq-tb10x.c
··· 41 41 static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) 42 42 { 43 43 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 44 - uint32_t im, mod, pol; 44 + uint32_t mod, pol, im = data->mask; 45 45 46 - im = data->mask; 47 - 48 - irq_gc_lock(gc); 46 + guard(raw_spinlock)(&gc->lock); 49 47 50 48 mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im; 51 49 pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im; ··· 65 67 case IRQ_TYPE_EDGE_RISING: 66 68 break; 67 69 default: 68 - irq_gc_unlock(gc); 69 - pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", 70 - __func__, data->irq); 70 + pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", __func__, data->irq); 71 71 return -EBADR; 72 72 } 73 73 ··· 75 79 ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod); 76 80 ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol); 77 81 ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im); 78 - 79 - irq_gc_unlock(gc); 80 - 81 82 return IRQ_SET_MASK_OK; 82 83 } 83 84
+80 -73
drivers/irqchip/irq-vt8500.c
··· 15 15 #include <linux/io.h> 16 16 #include <linux/irq.h> 17 17 #include <linux/irqchip.h> 18 + #include <linux/irqchip/chained_irq.h> 18 19 #include <linux/irqdomain.h> 19 20 #include <linux/interrupt.h> 20 21 #include <linux/bitops.h> ··· 64 63 struct irq_domain *domain; /* Domain for this controller */ 65 64 }; 66 65 67 - /* Global variable for accessing io-mem addresses */ 68 - static struct vt8500_irq_data intc[VT8500_INTC_MAX]; 69 - static u32 active_cnt = 0; 66 + /* Primary interrupt controller data */ 67 + static struct vt8500_irq_data *primary_intc; 68 + 69 + static void vt8500_irq_ack(struct irq_data *d) 70 + { 71 + struct vt8500_irq_data *priv = d->domain->host_data; 72 + void __iomem *base = priv->base; 73 + void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4); 74 + u32 status = (1 << (d->hwirq & 0x1f)); 75 + 76 + writel(status, stat_reg); 77 + } 70 78 71 79 static void vt8500_irq_mask(struct irq_data *d) 72 80 { 73 81 struct vt8500_irq_data *priv = d->domain->host_data; 74 82 void __iomem *base = priv->base; 75 - void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4); 76 - u8 edge, dctr; 77 - u32 status; 83 + u8 dctr; 78 84 79 - edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE; 80 - if (edge) { 81 - status = readl(stat_reg); 82 - 83 - status |= (1 << (d->hwirq & 0x1f)); 84 - writel(status, stat_reg); 85 - } else { 86 - dctr = readb(base + VT8500_ICDC + d->hwirq); 87 - dctr &= ~VT8500_INT_ENABLE; 88 - writeb(dctr, base + VT8500_ICDC + d->hwirq); 89 - } 85 + dctr = readb(base + VT8500_ICDC + d->hwirq); 86 + dctr &= ~VT8500_INT_ENABLE; 87 + writeb(dctr, base + VT8500_ICDC + d->hwirq); 90 88 } 91 89 92 90 static void vt8500_irq_unmask(struct irq_data *d) ··· 130 130 } 131 131 132 132 static struct irq_chip vt8500_irq_chip = { 133 - .name = "vt8500", 134 - .irq_ack = vt8500_irq_mask, 135 - .irq_mask = vt8500_irq_mask, 136 - .irq_unmask = vt8500_irq_unmask, 137 - .irq_set_type = vt8500_irq_set_type, 133 + .name = "vt8500", 134 + .irq_ack = vt8500_irq_ack, 135 + .irq_mask = vt8500_irq_mask, 136 + .irq_unmask = vt8500_irq_unmask, 137 + .irq_set_type = vt8500_irq_set_type, 138 138 }; 139 139 140 140 static void __init vt8500_init_irq_hw(void __iomem *base) ··· 163 163 .xlate = irq_domain_xlate_onecell, 164 164 }; 165 165 166 + static inline void vt8500_handle_irq_common(struct vt8500_irq_data *intc) 167 + { 168 + unsigned long irqnr = readl_relaxed(intc->base) & 0x3F; 169 + unsigned long stat; 170 + 171 + /* 172 + * Highest Priority register default = 63, so check that this 173 + * is a real interrupt by checking the status register 174 + */ 175 + if (irqnr == 63) { 176 + stat = readl_relaxed(intc->base + VT8500_ICIS + 4); 177 + if (!(stat & BIT(31))) 178 + return; 179 + } 180 + 181 + generic_handle_domain_irq(intc->domain, irqnr); 182 + } 183 + 166 184 static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs) 167 185 { 168 - u32 stat, i; 169 - int irqnr; 170 - void __iomem *base; 186 + vt8500_handle_irq_common(primary_intc); 187 + } 171 188 172 - /* Loop through each active controller */ 173 - for (i=0; i<active_cnt; i++) { 174 - base = intc[i].base; 175 - irqnr = readl_relaxed(base) & 0x3F; 176 - /* 177 - Highest Priority register default = 63, so check that this 178 - is a real interrupt by checking the status register 179 - */ 180 - if (irqnr == 63) { 181 - stat = readl_relaxed(base + VT8500_ICIS + 4); 182 - if (!(stat & BIT(31))) 183 - continue; 184 - } 189 + static void vt8500_handle_irq_chained(struct irq_desc *desc) 190 + { 191 + struct irq_domain *d = irq_desc_get_handler_data(desc); 192 + struct irq_chip *chip = irq_desc_get_chip(desc); 193 + struct vt8500_irq_data *intc = d->host_data; 185 194 186 - generic_handle_domain_irq(intc[i].domain, irqnr); 187 - } 195 + chained_irq_enter(chip, desc); 196 + vt8500_handle_irq_common(intc); 197 + chained_irq_exit(chip, desc); 188 198 } 189 199 190 200 static int __init vt8500_irq_init(struct device_node *node, 191 201 struct device_node *parent) 192 202 { 193 - int irq, i; 194 - struct device_node *np = node; 203 + struct vt8500_irq_data *intc; 204 + int irq, i, ret = 0; 195 205 196 - if (active_cnt == VT8500_INTC_MAX) { 197 - pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n", 198 - __func__); 199 - goto out; 200 - } 206 + intc = kzalloc(sizeof(*intc), GFP_KERNEL); 207 + if (!intc) 208 + return -ENOMEM; 201 209 202 - intc[active_cnt].base = of_iomap(np, 0); 203 - intc[active_cnt].domain = irq_domain_add_linear(node, 64, 204 - &vt8500_irq_domain_ops, &intc[active_cnt]); 205 - 206 - if (!intc[active_cnt].base) { 210 + intc->base = of_iomap(node, 0); 211 + if (!intc->base) { 207 212 pr_err("%s: Unable to map IO memory\n", __func__); 208 - goto out; 213 + ret = -ENOMEM; 214 + goto err_free; 209 215 } 210 216 211 - if (!intc[active_cnt].domain) { 217 + intc->domain = irq_domain_create_linear(of_fwnode_handle(node), 64, 218 + &vt8500_irq_domain_ops, intc); 219 + if (!intc->domain) { 212 220 pr_err("%s: Unable to add irq domain!\n", __func__); 213 - goto out; 221 + ret = -ENOMEM; 222 + goto err_unmap; 214 223 } 215 224 216 - set_handle_irq(vt8500_handle_irq); 217 - 218 - vt8500_init_irq_hw(intc[active_cnt].base); 225 + vt8500_init_irq_hw(intc->base); 219 226 220 227 pr_info("vt8500-irq: Added interrupt controller\n"); 221 228 222 - active_cnt++; 223 - 224 - /* check if this is a slaved controller */ 225 - if (of_irq_count(np) != 0) { 226 - /* check that we have the correct number of interrupts */ 227 - if (of_irq_count(np) != 8) { 228 - pr_err("%s: Incorrect IRQ map for slaved controller\n", 229 - __func__); 230 - return -EINVAL; 231 - } 232 - 233 - for (i = 0; i < 8; i++) { 234 - irq = irq_of_parse_and_map(np, i); 235 - enable_irq(irq); 229 + /* check if this is a chained controller */ 230 + if (of_irq_count(node) != 0) { 231 + for (i = 0; i < of_irq_count(node); i++) { 232 + irq = irq_of_parse_and_map(node, i); 233 + irq_set_chained_handler_and_data(irq, vt8500_handle_irq_chained, 234 + intc); 236 235 } 237 236 238 237 pr_info("vt8500-irq: Enabled slave->parent interrupts\n"); 238 + } else { 239 + primary_intc = intc; 240 + set_handle_irq(vt8500_handle_irq); 239 241 } 240 - out: 241 242 return 0; 243 + 244 + err_unmap: 245 + iounmap(intc->base); 246 + err_free: 247 + kfree(intc); 248 + return ret; 242 249 } 243 250 244 251 IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init);
+1 -2
drivers/soc/dove/pmu.c
··· 257 257 * So, let's structure the code so that the window is as small as 258 258 * possible. 259 259 */ 260 - irq_gc_lock(gc); 260 + guard(raw_spinlock)(&gc->lock); 261 261 done &= readl_relaxed(base + PMC_IRQ_CAUSE); 262 262 writel_relaxed(done, base + PMC_IRQ_CAUSE); 263 - irq_gc_unlock(gc); 264 263 } 265 264 266 265 static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
-25
include/linux/irq.h
··· 1221 1221 1222 1222 #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) 1223 1223 1224 - #ifdef CONFIG_SMP 1225 - static inline void irq_gc_lock(struct irq_chip_generic *gc) 1226 - { 1227 - raw_spin_lock(&gc->lock); 1228 - } 1229 - 1230 - static inline void irq_gc_unlock(struct irq_chip_generic *gc) 1231 - { 1232 - raw_spin_unlock(&gc->lock); 1233 - } 1234 - #else 1235 - static inline void irq_gc_lock(struct irq_chip_generic *gc) { } 1236 - static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } 1237 - #endif 1238 - 1239 - /* 1240 - * The irqsave variants are for usage in non interrupt code. Do not use 1241 - * them in irq_chip callbacks. Use irq_gc_lock() instead. 1242 - */ 1243 - #define irq_gc_lock_irqsave(gc, flags) \ 1244 - raw_spin_lock_irqsave(&(gc)->lock, flags) 1245 - 1246 - #define irq_gc_unlock_irqrestore(gc, flags) \ 1247 - raw_spin_unlock_irqrestore(&(gc)->lock, flags) 1248 - 1249 1224 static inline void irq_reg_writel(struct irq_chip_generic *gc, 1250 1225 u32 val, int reg_offset) 1251 1226 {
+16 -31
kernel/irq/generic-chip.c
··· 40 40 struct irq_chip_type *ct = irq_data_get_chip_type(d); 41 41 u32 mask = d->mask; 42 42 43 - irq_gc_lock(gc); 43 + guard(raw_spinlock)(&gc->lock); 44 44 irq_reg_writel(gc, mask, ct->regs.disable); 45 45 *ct->mask_cache &= ~mask; 46 - irq_gc_unlock(gc); 47 46 } 48 47 EXPORT_SYMBOL_GPL(irq_gc_mask_disable_reg); 49 48 ··· 59 60 struct irq_chip_type *ct = irq_data_get_chip_type(d); 60 61 u32 mask = d->mask; 61 62 62 - irq_gc_lock(gc); 63 + guard(raw_spinlock)(&gc->lock); 63 64 *ct->mask_cache |= mask; 64 65 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); 65 - irq_gc_unlock(gc); 66 66 } 67 67 EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit); 68 68 ··· 78 80 struct irq_chip_type *ct = irq_data_get_chip_type(d); 79 81 u32 mask = d->mask; 80 82 81 - irq_gc_lock(gc); 83 + guard(raw_spinlock)(&gc->lock); 82 84 *ct->mask_cache &= ~mask; 83 85 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); 84 - irq_gc_unlock(gc); 85 86 } 86 87 EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit); 87 88 ··· 97 100 struct irq_chip_type *ct = irq_data_get_chip_type(d); 98 101 u32 mask = d->mask; 99 102 100 - irq_gc_lock(gc); 103 + guard(raw_spinlock)(&gc->lock); 101 104 irq_reg_writel(gc, mask, ct->regs.enable); 102 105 *ct->mask_cache |= mask; 103 - irq_gc_unlock(gc); 104 106 } 105 107 EXPORT_SYMBOL_GPL(irq_gc_unmask_enable_reg); 106 108 ··· 113 117 struct irq_chip_type *ct = irq_data_get_chip_type(d); 114 118 u32 mask = d->mask; 115 119 116 - irq_gc_lock(gc); 120 + guard(raw_spinlock)(&gc->lock); 117 121 irq_reg_writel(gc, mask, ct->regs.ack); 118 - irq_gc_unlock(gc); 119 122 } 120 123 EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit); 121 124 ··· 128 133 struct irq_chip_type *ct = irq_data_get_chip_type(d); 129 134 u32 mask = ~d->mask; 130 135 131 - irq_gc_lock(gc); 136 + guard(raw_spinlock)(&gc->lock); 132 137 irq_reg_writel(gc, mask, ct->regs.ack); 133 - irq_gc_unlock(gc); 134 138 } 135 139 136 140 /** ··· 150 156 struct irq_chip_type *ct = irq_data_get_chip_type(d); 151 157 u32 mask = d->mask; 152 158 153 - irq_gc_lock(gc); 159 + guard(raw_spinlock)(&gc->lock); 154 160 irq_reg_writel(gc, mask, ct->regs.disable); 155 161 *ct->mask_cache &= ~mask; 156 162 irq_reg_writel(gc, mask, ct->regs.ack); 157 - irq_gc_unlock(gc); 158 163 } 159 164 EXPORT_SYMBOL_GPL(irq_gc_mask_disable_and_ack_set); 160 165 ··· 167 174 struct irq_chip_type *ct = irq_data_get_chip_type(d); 168 175 u32 mask = d->mask; 169 176 170 - irq_gc_lock(gc); 177 + guard(raw_spinlock)(&gc->lock); 171 178 irq_reg_writel(gc, mask, ct->regs.eoi); 172 - irq_gc_unlock(gc); 173 179 } 174 180 175 181 /** ··· 188 196 if (!(mask & gc->wake_enabled)) 189 197 return -EINVAL; 190 198 191 - irq_gc_lock(gc); 199 + guard(raw_spinlock)(&gc->lock); 192 200 if (on) 193 201 gc->wake_active |= mask; 194 202 else 195 203 gc->wake_active &= ~mask; 196 - irq_gc_unlock(gc); 197 204 return 0; 198 205 } 199 206 EXPORT_SYMBOL_GPL(irq_gc_set_wake); ··· 279 288 { 280 289 struct irq_domain_chip_generic *dgc; 281 290 struct irq_chip_generic *gc; 282 - unsigned long flags; 283 291 int numchips, i; 284 292 size_t dgc_sz; 285 293 size_t gc_sz; ··· 330 340 goto err; 331 341 } 332 342 333 - raw_spin_lock_irqsave(&gc_lock, flags); 334 - list_add_tail(&gc->list, &gc_list); 335 - raw_spin_unlock_irqrestore(&gc_lock, flags); 343 + scoped_guard (raw_spinlock_irqsave, &gc_lock) 344 + list_add_tail(&gc->list, &gc_list); 336 345 /* Calc pointer to the next generic chip */ 337 346 tmp += gc_sz; 338 347 } ··· 448 459 struct irq_chip_generic *gc; 449 460 struct irq_chip_type *ct; 450 461 struct irq_chip *chip; 451 - unsigned long flags; 452 462 int idx; 453 463 454 464 gc = __irq_get_domain_generic_chip(d, hw_irq); ··· 467 479 468 480 /* We only init the cache for the first mapping of a generic chip */ 469 481 if (!gc->installed) { 470 - raw_spin_lock_irqsave(&gc->lock, flags); 482 + guard(raw_spinlock_irqsave)(&gc->lock); 471 483 irq_gc_init_mask_cache(gc, dgc->gc_flags); 472 - raw_spin_unlock_irqrestore(&gc->lock, flags); 473 484 } 474 485 475 486 /* Mark the interrupt as installed */ ··· 535 548 struct irq_chip *chip = &ct->chip; 536 549 unsigned int i; 537 550 538 - raw_spin_lock(&gc_lock); 539 - list_add_tail(&gc->list, &gc_list); 540 - raw_spin_unlock(&gc_lock); 551 + scoped_guard (raw_spinlock, &gc_lock) 552 + list_add_tail(&gc->list, &gc_list); 541 553 542 554 irq_gc_init_mask_cache(gc, flags); 543 555 ··· 602 616 { 603 617 unsigned int i, virq; 604 618 605 - raw_spin_lock(&gc_lock); 606 - list_del(&gc->list); 607 - raw_spin_unlock(&gc_lock); 619 + scoped_guard (raw_spinlock, &gc_lock) 620 + list_del(&gc->list); 608 621 609 622 for (i = 0; msk; msk >>= 1, i++) { 610 623 if (!(msk & 0x01))