Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'irq-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull interrupt subsystem updates from Thomas Gleixner:
"Tree wide:

- Make nr_irqs static to the core code and provide accessor functions
to remove existing and prevent future aliasing problems with local
variables or function arguments of the same name.

Core code:

- Prevent freeing an interrupt in the devres code which is not
managed by devres in the first place.

- Use seq_put_decimal_ull_width() for decimal values output in
/proc/interrupts which increases performance significantly as it
avoids parsing the format strings over and over.

- Optimize raising the timer and hrtimer soft interrupts by using the
'set bit only' variants instead of the combined version which
checks whether ksoftirqd should be woken up. The latter is a
pointless exercise as both soft interrupts are raised in the
context of the timer interrupt and therefore never wake up
ksoftirqd.

- Delegate timer/hrtimer soft interrupt processing to a dedicated
thread on RT.

Timer and hrtimer soft interrupts are always processed in ksoftirqd
on RT enabled kernels. This can lead to high latencies when other
soft interrupts are delegated to ksoftirqd as well.

The separate thread allows to run them seperately under a RT
scheduling policy to reduce the latency overhead.

Drivers:

- New drivers or extensions of existing drivers to support Renesas
RZ/V2H(P), Aspeed AST27XX, T-HEAD C900 and ATMEL sam9x7 interrupt
chips

- Support for multi-cluster GICs on MIPS.

MIPS CPUs can come with multiple CPU clusters, where each CPU
cluster has its own GIC (Generic Interrupt Controller). This
requires to access the GIC of a remote cluster through a redirect
register block.

This is encapsulated into a set of helper functions to keep the
complexity out of the actual code paths which handle the GIC
details.

- Support for encrypted guests in the ARM GICV3 ITS driver

The ITS page needs to be shared with the hypervisor and therefore
must be decrypted.

- Small cleanups and fixes all over the place"

* tag 'irq-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (50 commits)
irqchip/riscv-aplic: Prevent crash when MSI domain is missing
genirq/proc: Use seq_put_decimal_ull_width() for decimal values
softirq: Use a dedicated thread for timer wakeups on PREEMPT_RT.
timers: Use __raise_softirq_irqoff() to raise the softirq.
hrtimer: Use __raise_softirq_irqoff() to raise the softirq
riscv: defconfig: Enable T-HEAD C900 ACLINT SSWI drivers
irqchip: Add T-HEAD C900 ACLINT SSWI driver
dt-bindings: interrupt-controller: Add T-HEAD C900 ACLINT SSWI device
irqchip/stm32mp-exti: Use of_property_present() for non-boolean properties
irqchip/mips-gic: Fix selection of GENERIC_IRQ_EFFECTIVE_AFF_MASK
irqchip/mips-gic: Prevent indirect access to clusters without CPU cores
irqchip/mips-gic: Multi-cluster support
irqchip/mips-gic: Setup defaults in each cluster
irqchip/mips-gic: Support multi-cluster in for_each_online_cpu_gic()
irqchip/mips-gic: Replace open coded online CPU iterations
genirq/irqdesc: Use str_enabled_disabled() helper in wakeup_show()
genirq/devres: Don't free interrupt which is not managed by devres
irqchip/gic-v3-its: Fix over allocation in itt_alloc_pool()
irqchip/aspeed-intc: Add AST27XX INTC support
dt-bindings: interrupt-controller: Add support for ASPEED AST27XX INTC
...

+1945 -134
+86
Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2700-intc.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/interrupt-controller/aspeed,ast2700-intc.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Aspeed AST2700 Interrupt Controller 8 + 9 + description: 10 + This interrupt controller hardware is second level interrupt controller that 11 + is hooked to a parent interrupt controller. It's useful to combine multiple 12 + interrupt sources into 1 interrupt to parent interrupt controller. 13 + 14 + maintainers: 15 + - Kevin Chen <kevin_chen@aspeedtech.com> 16 + 17 + properties: 18 + compatible: 19 + enum: 20 + - aspeed,ast2700-intc-ic 21 + 22 + reg: 23 + maxItems: 1 24 + 25 + interrupt-controller: true 26 + 27 + '#interrupt-cells': 28 + const: 2 29 + description: 30 + The first cell is the IRQ number, the second cell is the trigger 31 + type as defined in interrupt.txt in this directory. 32 + 33 + interrupts: 34 + maxItems: 6 35 + description: | 36 + Depend to which INTC0 or INTC1 used. 37 + INTC0 and INTC1 are two kinds of interrupt controller with enable and raw 38 + status registers for use. 39 + INTC0 is used to assert GIC if interrupt in INTC1 asserted. 40 + INTC1 is used to assert INTC0 if interrupt of modules asserted. 41 + +-----+ +-------+ +---------+---module0 42 + | GIC |---| INTC0 |--+--| INTC1_0 |---module2 43 + | | | | | | |---... 44 + +-----+ +-------+ | +---------+---module31 45 + | 46 + | +---------+---module0 47 + +---| INTC1_1 |---module2 48 + | | |---... 49 + | +---------+---module31 50 + ... 51 + | +---------+---module0 52 + +---| INTC1_5 |---module2 53 + | |---... 54 + +---------+---module31 55 + 56 + 57 + required: 58 + - compatible 59 + - reg 60 + - interrupt-controller 61 + - '#interrupt-cells' 62 + - interrupts 63 + 64 + additionalProperties: false 65 + 66 + examples: 67 + - | 68 + #include <dt-bindings/interrupt-controller/arm-gic.h> 69 + 70 + bus { 71 + #address-cells = <2>; 72 + #size-cells = <2>; 73 + 74 + interrupt-controller@12101b00 { 75 + compatible = "aspeed,ast2700-intc-ic"; 76 + reg = <0 0x12101b00 0 0x10>; 77 + #interrupt-cells = <2>; 78 + interrupt-controller; 79 + interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>, 80 + <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>, 81 + <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>, 82 + <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>, 83 + <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>, 84 + <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>; 85 + }; 86 + };
+1
Documentation/devicetree/bindings/interrupt-controller/atmel,aic.yaml
··· 23 23 - atmel,sama5d3-aic 24 24 - atmel,sama5d4-aic 25 25 - microchip,sam9x60-aic 26 + - microchip,sam9x7-aic 26 27 27 28 reg: 28 29 maxItems: 1
+278
Documentation/devicetree/bindings/interrupt-controller/renesas,rzv2h-icu.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/interrupt-controller/renesas,rzv2h-icu.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Renesas RZ/V2H(P) Interrupt Control Unit 8 + 9 + maintainers: 10 + - Fabrizio Castro <fabrizio.castro.jz@renesas.com> 11 + - Geert Uytterhoeven <geert+renesas@glider.be> 12 + 13 + allOf: 14 + - $ref: /schemas/interrupt-controller.yaml# 15 + 16 + description: 17 + The Interrupt Control Unit (ICU) handles external interrupts (NMI, IRQ, and 18 + TINT), error interrupts, DMAC requests, GPT interrupts, and internal 19 + interrupts. 20 + 21 + properties: 22 + compatible: 23 + const: renesas,r9a09g057-icu # RZ/V2H(P) 24 + 25 + '#interrupt-cells': 26 + description: The first cell is the SPI number of the NMI or the 27 + PORT_IRQ[0-15] interrupt, as per user manual. The second cell is used to 28 + specify the flag. 29 + const: 2 30 + 31 + '#address-cells': 32 + const: 0 33 + 34 + interrupt-controller: true 35 + 36 + reg: 37 + maxItems: 1 38 + 39 + interrupts: 40 + minItems: 58 41 + items: 42 + - description: NMI interrupt 43 + - description: PORT_IRQ0 interrupt 44 + - description: PORT_IRQ1 interrupt 45 + - description: PORT_IRQ2 interrupt 46 + - description: PORT_IRQ3 interrupt 47 + - description: PORT_IRQ4 interrupt 48 + - description: PORT_IRQ5 interrupt 49 + - description: PORT_IRQ6 interrupt 50 + - description: PORT_IRQ7 interrupt 51 + - description: PORT_IRQ8 interrupt 52 + - description: PORT_IRQ9 interrupt 53 + - description: PORT_IRQ10 interrupt 54 + - description: PORT_IRQ11 interrupt 55 + - description: PORT_IRQ12 interrupt 56 + - description: PORT_IRQ13 interrupt 57 + - description: PORT_IRQ14 interrupt 58 + - description: PORT_IRQ15 interrupt 59 + - description: GPIO interrupt, TINT0 60 + - description: GPIO interrupt, TINT1 61 + - description: GPIO interrupt, TINT2 62 + - description: GPIO interrupt, TINT3 63 + - description: GPIO interrupt, TINT4 64 + - description: GPIO interrupt, TINT5 65 + - description: GPIO interrupt, TINT6 66 + - description: GPIO interrupt, TINT7 67 + - description: GPIO interrupt, TINT8 68 + - description: GPIO interrupt, TINT9 69 + - description: GPIO interrupt, TINT10 70 + - description: GPIO interrupt, TINT11 71 + - description: GPIO interrupt, TINT12 72 + - description: GPIO interrupt, TINT13 73 + - description: GPIO interrupt, TINT14 74 + - description: GPIO interrupt, TINT15 75 + - description: GPIO interrupt, TINT16 76 + - description: GPIO interrupt, TINT17 77 + - description: GPIO interrupt, TINT18 78 + - description: GPIO interrupt, TINT19 79 + - description: GPIO interrupt, TINT20 80 + - description: GPIO interrupt, TINT21 81 + - description: GPIO interrupt, TINT22 82 + - description: GPIO interrupt, TINT23 83 + - description: GPIO interrupt, TINT24 84 + - description: GPIO interrupt, TINT25 85 + - description: GPIO interrupt, TINT26 86 + - description: GPIO interrupt, TINT27 87 + - description: GPIO interrupt, TINT28 88 + - description: GPIO interrupt, TINT29 89 + - description: GPIO interrupt, TINT30 90 + - description: GPIO interrupt, TINT31 91 + - description: Software interrupt, INTA55_0 92 + - description: Software interrupt, INTA55_1 93 + - description: Software interrupt, INTA55_2 94 + - description: Software interrupt, INTA55_3 95 + - description: Error interrupt to CA55 96 + - description: GTCCRA compare match/input capture (U0) 97 + - description: GTCCRB compare match/input capture (U0) 98 + - description: GTCCRA compare match/input capture (U1) 99 + - description: GTCCRB compare match/input capture (U1) 100 + 101 + interrupt-names: 102 + minItems: 58 103 + items: 104 + - const: nmi 105 + - const: port_irq0 106 + - const: port_irq1 107 + - const: port_irq2 108 + - const: port_irq3 109 + - const: port_irq4 110 + - const: port_irq5 111 + - const: port_irq6 112 + - const: port_irq7 113 + - const: port_irq8 114 + - const: port_irq9 115 + - const: port_irq10 116 + - const: port_irq11 117 + - const: port_irq12 118 + - const: port_irq13 119 + - const: port_irq14 120 + - const: port_irq15 121 + - const: tint0 122 + - const: tint1 123 + - const: tint2 124 + - const: tint3 125 + - const: tint4 126 + - const: tint5 127 + - const: tint6 128 + - const: tint7 129 + - const: tint8 130 + - const: tint9 131 + - const: tint10 132 + - const: tint11 133 + - const: tint12 134 + - const: tint13 135 + - const: tint14 136 + - const: tint15 137 + - const: tint16 138 + - const: tint17 139 + - const: tint18 140 + - const: tint19 141 + - const: tint20 142 + - const: tint21 143 + - const: tint22 144 + - const: tint23 145 + - const: tint24 146 + - const: tint25 147 + - const: tint26 148 + - const: tint27 149 + - const: tint28 150 + - const: tint29 151 + - const: tint30 152 + - const: tint31 153 + - const: int-ca55-0 154 + - const: int-ca55-1 155 + - const: int-ca55-2 156 + - const: int-ca55-3 157 + - const: icu-error-ca55 158 + - const: gpt-u0-gtciada 159 + - const: gpt-u0-gtciadb 160 + - const: gpt-u1-gtciada 161 + - const: gpt-u1-gtciadb 162 + 163 + clocks: 164 + maxItems: 1 165 + 166 + power-domains: 167 + maxItems: 1 168 + 169 + resets: 170 + maxItems: 1 171 + 172 + required: 173 + - compatible 174 + - reg 175 + - '#interrupt-cells' 176 + - '#address-cells' 177 + - interrupt-controller 178 + - interrupts 179 + - interrupt-names 180 + - clocks 181 + - power-domains 182 + - resets 183 + 184 + unevaluatedProperties: false 185 + 186 + examples: 187 + - | 188 + #include <dt-bindings/interrupt-controller/arm-gic.h> 189 + #include <dt-bindings/clock/renesas-cpg-mssr.h> 190 + 191 + icu: interrupt-controller@10400000 { 192 + compatible = "renesas,r9a09g057-icu"; 193 + reg = <0x10400000 0x10000>; 194 + #interrupt-cells = <2>; 195 + #address-cells = <0>; 196 + interrupt-controller; 197 + interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, 198 + <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, 199 + <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, 200 + <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, 201 + <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, 202 + <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, 203 + <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, 204 + <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, 205 + <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, 206 + <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, 207 + <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, 208 + <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, 209 + <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, 210 + <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, 211 + <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, 212 + <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, 213 + <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, 214 + <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>, 215 + <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>, 216 + <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>, 217 + <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>, 218 + <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>, 219 + <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>, 220 + <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>, 221 + <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>, 222 + <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>, 223 + <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>, 224 + <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>, 225 + <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>, 226 + <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>, 227 + <GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>, 228 + <GIC_SPI 433 IRQ_TYPE_LEVEL_HIGH>, 229 + <GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>, 230 + <GIC_SPI 435 IRQ_TYPE_LEVEL_HIGH>, 231 + <GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>, 232 + <GIC_SPI 437 IRQ_TYPE_LEVEL_HIGH>, 233 + <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>, 234 + <GIC_SPI 439 IRQ_TYPE_LEVEL_HIGH>, 235 + <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>, 236 + <GIC_SPI 441 IRQ_TYPE_LEVEL_HIGH>, 237 + <GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>, 238 + <GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>, 239 + <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>, 240 + <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>, 241 + <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>, 242 + <GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>, 243 + <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>, 244 + <GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>, 245 + <GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>, 246 + <GIC_SPI 262 IRQ_TYPE_EDGE_RISING>, 247 + <GIC_SPI 263 IRQ_TYPE_EDGE_RISING>, 248 + <GIC_SPI 264 IRQ_TYPE_EDGE_RISING>, 249 + <GIC_SPI 265 IRQ_TYPE_EDGE_RISING>, 250 + <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>, 251 + <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>, 252 + <GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>, 253 + <GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>, 254 + <GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>; 255 + interrupt-names = "nmi", 256 + "port_irq0", "port_irq1", "port_irq2", 257 + "port_irq3", "port_irq4", "port_irq5", 258 + "port_irq6", "port_irq7", "port_irq8", 259 + "port_irq9", "port_irq10", "port_irq11", 260 + "port_irq12", "port_irq13", "port_irq14", 261 + "port_irq15", 262 + "tint0", "tint1", "tint2", "tint3", 263 + "tint4", "tint5", "tint6", "tint7", 264 + "tint8", "tint9", "tint10", "tint11", 265 + "tint12", "tint13", "tint14", "tint15", 266 + "tint16", "tint17", "tint18", "tint19", 267 + "tint20", "tint21", "tint22", "tint23", 268 + "tint24", "tint25", "tint26", "tint27", 269 + "tint28", "tint29", "tint30", "tint31", 270 + "int-ca55-0", "int-ca55-1", 271 + "int-ca55-2", "int-ca55-3", 272 + "icu-error-ca55", 273 + "gpt-u0-gtciada", "gpt-u0-gtciadb", 274 + "gpt-u1-gtciada", "gpt-u1-gtciadb"; 275 + clocks = <&cpg CPG_MOD 0x5>; 276 + power-domains = <&cpg>; 277 + resets = <&cpg 0x36>; 278 + };
+58
Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/interrupt-controller/thead,c900-aclint-sswi.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: T-HEAD C900 ACLINT Supervisor-level Software Interrupt Device 8 + 9 + maintainers: 10 + - Inochi Amaoto <inochiama@outlook.com> 11 + 12 + description: 13 + The SSWI device is a part of the THEAD ACLINT device. It provides 14 + supervisor-level IPI functionality for a set of HARTs on a THEAD 15 + platform. It provides a register to set an IPI (SETSSIP) for each 16 + HART connected to the SSWI device. 17 + 18 + properties: 19 + compatible: 20 + items: 21 + - enum: 22 + - sophgo,sg2044-aclint-sswi 23 + - const: thead,c900-aclint-sswi 24 + 25 + reg: 26 + maxItems: 1 27 + 28 + "#interrupt-cells": 29 + const: 0 30 + 31 + interrupt-controller: true 32 + 33 + interrupts-extended: 34 + minItems: 1 35 + maxItems: 4095 36 + 37 + additionalProperties: false 38 + 39 + required: 40 + - compatible 41 + - reg 42 + - "#interrupt-cells" 43 + - interrupt-controller 44 + - interrupts-extended 45 + 46 + examples: 47 + - | 48 + interrupt-controller@94000000 { 49 + compatible = "sophgo,sg2044-aclint-sswi", "thead,c900-aclint-sswi"; 50 + reg = <0x94000000 0x00004000>; 51 + #interrupt-cells = <0>; 52 + interrupt-controller; 53 + interrupts-extended = <&cpu1intc 1>, 54 + <&cpu2intc 1>, 55 + <&cpu3intc 1>, 56 + <&cpu4intc 1>; 57 + }; 58 + ...
+2 -3
arch/arm/kernel/irq.c
··· 111 111 * Some hardware gives randomly wrong interrupts. Rather 112 112 * than crashing, do something sensible. 113 113 */ 114 - if (unlikely(!irq || irq >= nr_irqs)) 114 + if (unlikely(!irq || irq >= irq_get_nr_irqs())) 115 115 desc = NULL; 116 116 else 117 117 desc = irq_to_desc(irq); ··· 151 151 #ifdef CONFIG_SPARSE_IRQ 152 152 int __init arch_probe_nr_irqs(void) 153 153 { 154 - nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS; 155 - return nr_irqs; 154 + return irq_set_nr_irqs(machine_desc->nr_irqs ? : NR_IRQS); 156 155 } 157 156 #endif
+90
arch/arm64/boot/dts/renesas/r9a09g057.dtsi
··· 90 90 #size-cells = <2>; 91 91 ranges; 92 92 93 + icu: interrupt-controller@10400000 { 94 + compatible = "renesas,r9a09g057-icu"; 95 + reg = <0 0x10400000 0 0x10000>; 96 + #interrupt-cells = <2>; 97 + #address-cells = <0>; 98 + interrupt-controller; 99 + interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, 100 + <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, 101 + <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, 102 + <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, 103 + <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, 104 + <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, 105 + <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, 106 + <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, 107 + <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, 108 + <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, 109 + <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, 110 + <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, 111 + <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, 112 + <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, 113 + <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, 114 + <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, 115 + <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, 116 + <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>, 117 + <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>, 118 + <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>, 119 + <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>, 120 + <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>, 121 + <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>, 122 + <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>, 123 + <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>, 124 + <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>, 125 + <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>, 126 + <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>, 127 + <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>, 128 + <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>, 129 + <GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>, 130 + <GIC_SPI 433 IRQ_TYPE_LEVEL_HIGH>, 131 + <GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>, 132 + <GIC_SPI 435 IRQ_TYPE_LEVEL_HIGH>, 133 + <GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>, 134 + <GIC_SPI 437 IRQ_TYPE_LEVEL_HIGH>, 135 + <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>, 136 + <GIC_SPI 439 IRQ_TYPE_LEVEL_HIGH>, 137 + <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>, 138 + <GIC_SPI 441 IRQ_TYPE_LEVEL_HIGH>, 139 + <GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>, 140 + <GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>, 141 + <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>, 142 + <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>, 143 + <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>, 144 + <GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>, 145 + <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>, 146 + <GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>, 147 + <GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>, 148 + <GIC_SPI 262 IRQ_TYPE_EDGE_RISING>, 149 + <GIC_SPI 263 IRQ_TYPE_EDGE_RISING>, 150 + <GIC_SPI 264 IRQ_TYPE_EDGE_RISING>, 151 + <GIC_SPI 265 IRQ_TYPE_EDGE_RISING>, 152 + <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>, 153 + <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>, 154 + <GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>, 155 + <GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>, 156 + <GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>; 157 + interrupt-names = "nmi", 158 + "port_irq0", "port_irq1", "port_irq2", 159 + "port_irq3", "port_irq4", "port_irq5", 160 + "port_irq6", "port_irq7", "port_irq8", 161 + "port_irq9", "port_irq10", "port_irq11", 162 + "port_irq12", "port_irq13", "port_irq14", 163 + "port_irq15", 164 + "tint0", "tint1", "tint2", "tint3", 165 + "tint4", "tint5", "tint6", "tint7", 166 + "tint8", "tint9", "tint10", "tint11", 167 + "tint12", "tint13", "tint14", "tint15", 168 + "tint16", "tint17", "tint18", "tint19", 169 + "tint20", "tint21", "tint22", "tint23", 170 + "tint24", "tint25", "tint26", "tint27", 171 + "tint28", "tint29", "tint30", "tint31", 172 + "int-ca55-0", "int-ca55-1", 173 + "int-ca55-2", "int-ca55-3", 174 + "icu-error-ca55", 175 + "gpt-u0-gtciada", "gpt-u0-gtciadb", 176 + "gpt-u1-gtciada", "gpt-u1-gtciadb"; 177 + clocks = <&cpg CPG_MOD 0x5>; 178 + power-domains = <&cpg>; 179 + resets = <&cpg 0x36>; 180 + }; 181 + 93 182 pinctrl: pinctrl@10410000 { 94 183 compatible = "renesas,r9a09g057-pinctrl"; 95 184 reg = <0 0x10410000 0 0x10000>; ··· 188 99 gpio-ranges = <&pinctrl 0 0 96>; 189 100 #interrupt-cells = <2>; 190 101 interrupt-controller; 102 + interrupt-parent = <&icu>; 191 103 power-domains = <&cpg>; 192 104 resets = <&cpg 0xa5>, <&cpg 0xa6>; 193 105 };
+2 -2
arch/loongarch/kernel/irq.c
··· 92 92 int nr_io_pics = bitmap_weight(loongson_sysconf.cores_io_master, NR_CPUS); 93 93 94 94 if (!cpu_has_avecint) 95 - nr_irqs = (64 + NR_VECTORS * nr_io_pics); 95 + irq_set_nr_irqs(64 + NR_VECTORS * nr_io_pics); 96 96 else 97 - nr_irqs = (64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics)); 97 + irq_set_nr_irqs(64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics)); 98 98 99 99 return NR_IRQS_LEGACY; 100 100 }
+1 -1
arch/powerpc/platforms/cell/axon_msi.c
··· 112 112 pr_devel("axon_msi: woff %x roff %x msi %x\n", 113 113 write_offset, msic->read_offset, msi); 114 114 115 - if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { 115 + if (msi < irq_get_nr_irqs() && irq_get_chip_data(msi) == msic) { 116 116 generic_handle_irq(msi); 117 117 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); 118 118 } else {
+1
arch/riscv/configs/defconfig
··· 256 256 CONFIG_RPMSG_VIRTIO=y 257 257 CONFIG_PM_DEVFREQ=y 258 258 CONFIG_IIO=y 259 + CONFIG_THEAD_C900_ACLINT_SSWI=y 259 260 CONFIG_PHY_SUN4I_USB=m 260 261 CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m 261 262 CONFIG_PHY_STARFIVE_JH7110_PCIE=m
+1 -1
arch/s390/kernel/irq.c
··· 258 258 seq_putc(p, '\n'); 259 259 goto out; 260 260 } 261 - if (index < nr_irqs) { 261 + if (index < irq_get_nr_irqs()) { 262 262 show_msi_interrupt(p, index); 263 263 goto out; 264 264 }
+4 -2
arch/x86/kernel/acpi/boot.c
··· 1171 1171 } 1172 1172 1173 1173 count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, 1174 - acpi_parse_int_src_ovr, nr_irqs); 1174 + acpi_parse_int_src_ovr, 1175 + irq_get_nr_irqs()); 1175 1176 if (count < 0) { 1176 1177 pr_err("Error parsing interrupt source overrides entry\n"); 1177 1178 /* TBD: Cleanup to allow fallback to MPS */ ··· 1192 1191 mp_config_acpi_legacy_irqs(); 1193 1192 1194 1193 count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, 1195 - acpi_parse_nmi_src, nr_irqs); 1194 + acpi_parse_nmi_src, 1195 + irq_get_nr_irqs()); 1196 1196 if (count < 0) { 1197 1197 pr_err("Error parsing NMI SRC entry\n"); 1198 1198 /* TBD: Cleanup to allow fallback to MPS */
+4 -4
arch/x86/kernel/apic/vector.c
··· 712 712 { 713 713 int nr; 714 714 715 - if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 716 - nr_irqs = NR_VECTORS * nr_cpu_ids; 715 + if (irq_get_nr_irqs() > NR_VECTORS * nr_cpu_ids) 716 + irq_set_nr_irqs(NR_VECTORS * nr_cpu_ids); 717 717 718 718 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; 719 719 #if defined(CONFIG_PCI_MSI) ··· 725 725 else 726 726 nr += gsi_top * 16; 727 727 #endif 728 - if (nr < nr_irqs) 729 - nr_irqs = nr; 728 + if (nr < irq_get_nr_irqs()) 729 + irq_set_nr_irqs(nr); 730 730 731 731 /* 732 732 * We don't know if PIC is present at this point so we need to do
+1
drivers/char/hpet.c
··· 162 162 163 163 static void hpet_timer_set_irq(struct hpet_dev *devp) 164 164 { 165 + const unsigned int nr_irqs = irq_get_nr_irqs(); 165 166 unsigned long v; 166 167 int irq, gsi; 167 168 struct hpet_timer __iomem *timer;
+20
drivers/irqchip/Kconfig
··· 258 258 Enable support for the Renesas RZ/G2L (and alike SoC) Interrupt Controller 259 259 for external devices. 260 260 261 + config RENESAS_RZV2H_ICU 262 + bool "Renesas RZ/V2H(P) ICU support" if COMPILE_TEST 263 + select GENERIC_IRQ_CHIP 264 + select IRQ_DOMAIN_HIERARCHY 265 + help 266 + Enable support for the Renesas RZ/V2H(P) Interrupt Control Unit (ICU) 267 + 261 268 config SL28CPLD_INTC 262 269 bool "Kontron sl28cpld IRQ controller" 263 270 depends on MFD_SL28CPLD=y || COMPILE_TEST ··· 345 338 346 339 config MIPS_GIC 347 340 bool 341 + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP 348 342 select GENERIC_IRQ_IPI if SMP 349 343 select IRQ_DOMAIN_HIERARCHY 350 344 select MIPS_CM ··· 609 601 help 610 602 This enables support for the INTC chip found in StarFive JH8100 611 603 SoC. 604 + 605 + If you don't know what to do here, say Y. 606 + 607 + config THEAD_C900_ACLINT_SSWI 608 + bool "THEAD C9XX ACLINT S-mode IPI Interrupt Controller" 609 + depends on RISCV 610 + depends on SMP 611 + select IRQ_DOMAIN_HIERARCHY 612 + select GENERIC_IRQ_IPI_MUX 613 + help 614 + This enables support for T-HEAD specific ACLINT SSWI device 615 + support. 612 616 613 617 If you don't know what to do here, say Y. 614 618
+3
drivers/irqchip/Makefile
··· 51 51 obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o 52 52 obj-$(CONFIG_RENESAS_RZA1_IRQC) += irq-renesas-rza1.o 53 53 obj-$(CONFIG_RENESAS_RZG2L_IRQC) += irq-renesas-rzg2l.o 54 + obj-$(CONFIG_RENESAS_RZV2H_ICU) += irq-renesas-rzv2h.o 54 55 obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o 55 56 obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o 56 57 obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o ··· 85 84 obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o 86 85 obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o 87 86 obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o 87 + obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-intc.o 88 88 obj-$(CONFIG_STM32MP_EXTI) += irq-stm32mp-exti.o 89 89 obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o 90 90 obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o ··· 103 101 obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o 104 102 obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o 105 103 obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o 104 + obj-$(CONFIG_THEAD_C900_ACLINT_SSWI) += irq-thead-c900-aclint-sswi.o 106 105 obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o 107 106 obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o 108 107 obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o
+139
drivers/irqchip/irq-aspeed-intc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Aspeed Interrupt Controller. 4 + * 5 + * Copyright (C) 2023 ASPEED Technology Inc. 6 + */ 7 + 8 + #include <linux/bitops.h> 9 + #include <linux/irq.h> 10 + #include <linux/irqchip.h> 11 + #include <linux/irqchip/chained_irq.h> 12 + #include <linux/irqdomain.h> 13 + #include <linux/of_address.h> 14 + #include <linux/of_irq.h> 15 + #include <linux/io.h> 16 + #include <linux/spinlock.h> 17 + 18 + #define INTC_INT_ENABLE_REG 0x00 19 + #define INTC_INT_STATUS_REG 0x04 20 + #define INTC_IRQS_PER_WORD 32 21 + 22 + struct aspeed_intc_ic { 23 + void __iomem *base; 24 + raw_spinlock_t gic_lock; 25 + raw_spinlock_t intc_lock; 26 + struct irq_domain *irq_domain; 27 + }; 28 + 29 + static void aspeed_intc_ic_irq_handler(struct irq_desc *desc) 30 + { 31 + struct aspeed_intc_ic *intc_ic = irq_desc_get_handler_data(desc); 32 + struct irq_chip *chip = irq_desc_get_chip(desc); 33 + 34 + chained_irq_enter(chip, desc); 35 + 36 + scoped_guard(raw_spinlock, &intc_ic->gic_lock) { 37 + unsigned long bit, status; 38 + 39 + status = readl(intc_ic->base + INTC_INT_STATUS_REG); 40 + for_each_set_bit(bit, &status, INTC_IRQS_PER_WORD) { 41 + generic_handle_domain_irq(intc_ic->irq_domain, bit); 42 + writel(BIT(bit), intc_ic->base + INTC_INT_STATUS_REG); 43 + } 44 + } 45 + 46 + chained_irq_exit(chip, desc); 47 + } 48 + 49 + static void aspeed_intc_irq_mask(struct irq_data *data) 50 + { 51 + struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data); 52 + unsigned int mask = readl(intc_ic->base + INTC_INT_ENABLE_REG) & ~BIT(data->hwirq); 53 + 54 + guard(raw_spinlock)(&intc_ic->intc_lock); 55 + writel(mask, intc_ic->base + INTC_INT_ENABLE_REG); 56 + } 57 + 58 + static void aspeed_intc_irq_unmask(struct irq_data *data) 59 + { 60 + struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data); 61 + unsigned int unmask = readl(intc_ic->base + INTC_INT_ENABLE_REG) | BIT(data->hwirq); 62 + 63 + guard(raw_spinlock)(&intc_ic->intc_lock); 64 + writel(unmask, intc_ic->base + INTC_INT_ENABLE_REG); 65 + } 66 + 67 + static struct irq_chip aspeed_intc_chip = { 68 + .name = "ASPEED INTC", 69 + .irq_mask = aspeed_intc_irq_mask, 70 + .irq_unmask = aspeed_intc_irq_unmask, 71 + }; 72 + 73 + static int aspeed_intc_ic_map_irq_domain(struct irq_domain *domain, unsigned int irq, 74 + irq_hw_number_t hwirq) 75 + { 76 + irq_set_chip_and_handler(irq, &aspeed_intc_chip, handle_level_irq); 77 + irq_set_chip_data(irq, domain->host_data); 78 + 79 + return 0; 80 + } 81 + 82 + static const struct irq_domain_ops aspeed_intc_ic_irq_domain_ops = { 83 + .map = aspeed_intc_ic_map_irq_domain, 84 + }; 85 + 86 + static int __init aspeed_intc_ic_of_init(struct device_node *node, 87 + struct device_node *parent) 88 + { 89 + struct aspeed_intc_ic *intc_ic; 90 + int irq, i, ret = 0; 91 + 92 + intc_ic = kzalloc(sizeof(*intc_ic), GFP_KERNEL); 93 + if (!intc_ic) 94 + return -ENOMEM; 95 + 96 + intc_ic->base = of_iomap(node, 0); 97 + if (!intc_ic->base) { 98 + pr_err("Failed to iomap intc_ic base\n"); 99 + ret = -ENOMEM; 100 + goto err_free_ic; 101 + } 102 + writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG); 103 + writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG); 104 + 105 + intc_ic->irq_domain = irq_domain_add_linear(node, INTC_IRQS_PER_WORD, 106 + &aspeed_intc_ic_irq_domain_ops, intc_ic); 107 + if (!intc_ic->irq_domain) { 108 + ret = -ENOMEM; 109 + goto err_iounmap; 110 + } 111 + 112 + raw_spin_lock_init(&intc_ic->gic_lock); 113 + raw_spin_lock_init(&intc_ic->intc_lock); 114 + 115 + /* Check all the irq numbers valid. If not, unmaps all the base and frees the data. */ 116 + for (i = 0; i < of_irq_count(node); i++) { 117 + irq = irq_of_parse_and_map(node, i); 118 + if (!irq) { 119 + pr_err("Failed to get irq number\n"); 120 + ret = -EINVAL; 121 + goto err_iounmap; 122 + } 123 + } 124 + 125 + for (i = 0; i < of_irq_count(node); i++) { 126 + irq = irq_of_parse_and_map(node, i); 127 + irq_set_chained_handler_and_data(irq, aspeed_intc_ic_irq_handler, intc_ic); 128 + } 129 + 130 + return 0; 131 + 132 + err_iounmap: 133 + iounmap(intc_ic->base); 134 + err_free_ic: 135 + kfree(intc_ic); 136 + return ret; 137 + } 138 + 139 + IRQCHIP_DECLARE(ast2700_intc_ic, "aspeed,ast2700-intc-ic", aspeed_intc_ic_of_init);
+9
drivers/irqchip/irq-atmel-aic5.c
··· 319 319 { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup }, 320 320 { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup }, 321 321 { .compatible = "microchip,sam9x60", .data = sam9x60_aic_irq_fixup }, 322 + { .compatible = "microchip,sam9x7", .data = sam9x60_aic_irq_fixup }, 322 323 { /* sentinel */ }, 323 324 }; 324 325 ··· 406 405 return aic5_of_init(node, parent, NR_SAM9X60_IRQS); 407 406 } 408 407 IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init); 408 + 409 + #define NR_SAM9X7_IRQS 70 410 + 411 + static int __init sam9x7_aic5_of_init(struct device_node *node, struct device_node *parent) 412 + { 413 + return aic5_of_init(node, parent, NR_SAM9X7_IRQS); 414 + } 415 + IRQCHIP_DECLARE(sam9x7_aic5, "microchip,sam9x7-aic", sam9x7_aic5_of_init);
+116 -25
drivers/irqchip/irq-gic-v3-its.c
··· 12 12 #include <linux/crash_dump.h> 13 13 #include <linux/delay.h> 14 14 #include <linux/efi.h> 15 + #include <linux/genalloc.h> 15 16 #include <linux/interrupt.h> 16 17 #include <linux/iommu.h> 17 18 #include <linux/iopoll.h> 18 19 #include <linux/irqdomain.h> 19 20 #include <linux/list.h> 20 21 #include <linux/log2.h> 22 + #include <linux/mem_encrypt.h> 21 23 #include <linux/memblock.h> 22 24 #include <linux/mm.h> 23 25 #include <linux/msi.h> ··· 29 27 #include <linux/of_pci.h> 30 28 #include <linux/of_platform.h> 31 29 #include <linux/percpu.h> 30 + #include <linux/set_memory.h> 32 31 #include <linux/slab.h> 33 32 #include <linux/syscore_ops.h> 34 33 ··· 167 164 struct its_node *its; 168 165 struct event_lpi_map event_map; 169 166 void *itt; 167 + u32 itt_sz; 170 168 u32 nr_ites; 171 169 u32 device_id; 172 170 bool shared; ··· 202 198 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) 203 199 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 204 200 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) 201 + 202 + static struct page *its_alloc_pages_node(int node, gfp_t gfp, 203 + unsigned int order) 204 + { 205 + struct page *page; 206 + int ret = 0; 207 + 208 + page = alloc_pages_node(node, gfp, order); 209 + 210 + if (!page) 211 + return NULL; 212 + 213 + ret = set_memory_decrypted((unsigned long)page_address(page), 214 + 1 << order); 215 + /* 216 + * If set_memory_decrypted() fails then we don't know what state the 217 + * page is in, so we can't free it. Instead we leak it. 218 + * set_memory_decrypted() will already have WARNed. 219 + */ 220 + if (ret) 221 + return NULL; 222 + 223 + return page; 224 + } 225 + 226 + static struct page *its_alloc_pages(gfp_t gfp, unsigned int order) 227 + { 228 + return its_alloc_pages_node(NUMA_NO_NODE, gfp, order); 229 + } 230 + 231 + static void its_free_pages(void *addr, unsigned int order) 232 + { 233 + /* 234 + * If the memory cannot be encrypted again then we must leak the pages. 235 + * set_memory_encrypted() will already have WARNed. 236 + */ 237 + if (set_memory_encrypted((unsigned long)addr, 1 << order)) 238 + return; 239 + free_pages((unsigned long)addr, order); 240 + } 241 + 242 + static struct gen_pool *itt_pool; 243 + 244 + static void *itt_alloc_pool(int node, int size) 245 + { 246 + unsigned long addr; 247 + struct page *page; 248 + 249 + if (size >= PAGE_SIZE) { 250 + page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size)); 251 + 252 + return page ? page_address(page) : NULL; 253 + } 254 + 255 + do { 256 + addr = gen_pool_alloc(itt_pool, size); 257 + if (addr) 258 + break; 259 + 260 + page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); 261 + if (!page) 262 + break; 263 + 264 + gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node); 265 + } while (!addr); 266 + 267 + return (void *)addr; 268 + } 269 + 270 + static void itt_free_pool(void *addr, int size) 271 + { 272 + if (!addr) 273 + return; 274 + 275 + if (size >= PAGE_SIZE) { 276 + its_free_pages(addr, get_order(size)); 277 + return; 278 + } 279 + 280 + gen_pool_free(itt_pool, (unsigned long)addr, size); 281 + } 205 282 206 283 /* 207 284 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we ··· 706 621 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); 707 622 708 623 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); 709 - itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); 710 624 711 625 its_encode_cmd(cmd, GITS_CMD_MAPD); 712 626 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); ··· 2265 2181 { 2266 2182 struct page *prop_page; 2267 2183 2268 - prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); 2184 + prop_page = its_alloc_pages(gfp_flags, 2185 + get_order(LPI_PROPBASE_SZ)); 2269 2186 if (!prop_page) 2270 2187 return NULL; 2271 2188 ··· 2277 2192 2278 2193 static void its_free_prop_table(struct page *prop_page) 2279 2194 { 2280 - free_pages((unsigned long)page_address(prop_page), 2281 - get_order(LPI_PROPBASE_SZ)); 2195 + its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ)); 2282 2196 } 2283 2197 2284 2198 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) ··· 2399 2315 order = get_order(GITS_BASER_PAGES_MAX * psz); 2400 2316 } 2401 2317 2402 - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); 2318 + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); 2403 2319 if (!page) 2404 2320 return -ENOMEM; 2405 2321 ··· 2412 2328 /* 52bit PA is supported only when PageSize=64K */ 2413 2329 if (psz != SZ_64K) { 2414 2330 pr_err("ITS: no 52bit PA support when psz=%d\n", psz); 2415 - free_pages((unsigned long)base, order); 2331 + its_free_pages(base, order); 2416 2332 return -ENXIO; 2417 2333 } 2418 2334 ··· 2468 2384 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", 2469 2385 &its->phys_base, its_base_type_string[type], 2470 2386 val, tmp); 2471 - free_pages((unsigned long)base, order); 2387 + its_free_pages(base, order); 2472 2388 return -ENXIO; 2473 2389 } 2474 2390 ··· 2607 2523 2608 2524 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 2609 2525 if (its->tables[i].base) { 2610 - free_pages((unsigned long)its->tables[i].base, 2611 - its->tables[i].order); 2526 + its_free_pages(its->tables[i].base, its->tables[i].order); 2612 2527 its->tables[i].base = NULL; 2613 2528 } 2614 2529 } ··· 2873 2790 2874 2791 /* Allocate memory for 2nd level table */ 2875 2792 if (!table[idx]) { 2876 - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); 2793 + page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); 2877 2794 if (!page) 2878 2795 return false; 2879 2796 ··· 2992 2909 2993 2910 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", 2994 2911 np, npg, psz, epp, esz); 2995 - page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); 2912 + page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); 2996 2913 if (!page) 2997 2914 return -ENOMEM; 2998 2915 ··· 3038 2955 { 3039 2956 struct page *pend_page; 3040 2957 3041 - pend_page = alloc_pages(gfp_flags | __GFP_ZERO, 3042 - get_order(LPI_PENDBASE_SZ)); 2958 + pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ)); 3043 2959 if (!pend_page) 3044 2960 return NULL; 3045 2961 ··· 3050 2968 3051 2969 static void its_free_pending_table(struct page *pt) 3052 2970 { 3053 - free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); 2971 + its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ)); 3054 2972 } 3055 2973 3056 2974 /* ··· 3385 3303 3386 3304 /* Allocate memory for 2nd level table */ 3387 3305 if (!table[idx]) { 3388 - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 3389 - get_order(baser->psz)); 3306 + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 3307 + get_order(baser->psz)); 3390 3308 if (!page) 3391 3309 return false; 3392 3310 ··· 3481 3399 if (WARN_ON(!is_power_of_2(nvecs))) 3482 3400 nvecs = roundup_pow_of_two(nvecs); 3483 3401 3484 - dev = kzalloc(sizeof(*dev), GFP_KERNEL); 3485 3402 /* 3486 3403 * Even if the device wants a single LPI, the ITT must be 3487 3404 * sized as a power of two (and you need at least one bit...). 3488 3405 */ 3489 3406 nr_ites = max(2, nvecs); 3490 3407 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); 3491 - sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 3492 - itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); 3408 + sz = max(sz, ITS_ITT_ALIGN); 3409 + 3410 + itt = itt_alloc_pool(its->numa_node, sz); 3411 + 3412 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 3413 + 3493 3414 if (alloc_lpis) { 3494 3415 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); 3495 3416 if (lpi_map) ··· 3504 3419 lpi_base = 0; 3505 3420 } 3506 3421 3507 - if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { 3422 + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { 3508 3423 kfree(dev); 3509 - kfree(itt); 3424 + itt_free_pool(itt, sz); 3510 3425 bitmap_free(lpi_map); 3511 3426 kfree(col_map); 3512 3427 return NULL; ··· 3516 3431 3517 3432 dev->its = its; 3518 3433 dev->itt = itt; 3434 + dev->itt_sz = sz; 3519 3435 dev->nr_ites = nr_ites; 3520 3436 dev->event_map.lpi_map = lpi_map; 3521 3437 dev->event_map.col_map = col_map; ··· 3544 3458 list_del(&its_dev->entry); 3545 3459 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); 3546 3460 kfree(its_dev->event_map.col_map); 3547 - kfree(its_dev->itt); 3461 + itt_free_pool(its_dev->itt, its_dev->itt_sz); 3548 3462 kfree(its_dev); 3549 3463 } 3550 3464 ··· 5218 5132 } 5219 5133 } 5220 5134 5221 - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 5222 - get_order(ITS_CMD_QUEUE_SZ)); 5135 + page = its_alloc_pages_node(its->numa_node, 5136 + GFP_KERNEL | __GFP_ZERO, 5137 + get_order(ITS_CMD_QUEUE_SZ)); 5223 5138 if (!page) { 5224 5139 err = -ENOMEM; 5225 5140 goto out_unmap_sgir; ··· 5284 5197 out_free_tables: 5285 5198 its_free_tables(its); 5286 5199 out_free_cmd: 5287 - free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); 5200 + its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); 5288 5201 out_unmap_sgir: 5289 5202 if (its->sgir_base) 5290 5203 iounmap(its->sgir_base); ··· 5769 5682 bool has_v4 = false; 5770 5683 bool has_v4_1 = false; 5771 5684 int err; 5685 + 5686 + itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1); 5687 + if (!itt_pool) 5688 + return -ENOMEM; 5772 5689 5773 5690 gic_rdists = rdists; 5774 5691
+226 -43
drivers/irqchip/irq-mips-gic.c
··· 66 66 bool mask; 67 67 } gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS]; 68 68 69 + static int __gic_with_next_online_cpu(int prev) 70 + { 71 + unsigned int cpu; 72 + 73 + /* Discover the next online CPU */ 74 + cpu = cpumask_next(prev, cpu_online_mask); 75 + 76 + /* If there isn't one, we're done */ 77 + if (cpu >= nr_cpu_ids) 78 + return cpu; 79 + 80 + /* 81 + * Move the access lock to the next CPU's GIC local register block. 82 + * 83 + * Set GIC_VL_OTHER. Since the caller holds gic_lock nothing can 84 + * clobber the written value. 85 + */ 86 + write_gic_vl_other(mips_cm_vp_id(cpu)); 87 + 88 + return cpu; 89 + } 90 + 91 + static inline void gic_unlock_cluster(void) 92 + { 93 + if (mips_cps_multicluster_cpus()) 94 + mips_cm_unlock_other(); 95 + } 96 + 97 + /** 98 + * for_each_online_cpu_gic() - Iterate over online CPUs, access local registers 99 + * @cpu: An integer variable to hold the current CPU number 100 + * @gic_lock: A pointer to raw spin lock used as a guard 101 + * 102 + * Iterate over online CPUs & configure the other/redirect register region to 103 + * access each CPUs GIC local register block, which can be accessed from the 104 + * loop body using read_gic_vo_*() or write_gic_vo_*() accessor functions or 105 + * their derivatives. 106 + */ 107 + #define for_each_online_cpu_gic(cpu, gic_lock) \ 108 + guard(raw_spinlock_irqsave)(gic_lock); \ 109 + for ((cpu) = __gic_with_next_online_cpu(-1); \ 110 + (cpu) < nr_cpu_ids; \ 111 + gic_unlock_cluster(), \ 112 + (cpu) = __gic_with_next_online_cpu(cpu)) 113 + 114 + /** 115 + * gic_irq_lock_cluster() - Lock redirect block access to IRQ's cluster 116 + * @d: struct irq_data corresponding to the interrupt we're interested in 117 + * 118 + * Locks redirect register block access to the global register block of the GIC 119 + * within the remote cluster that the IRQ corresponding to @d is affine to, 120 + * returning true when this redirect block setup & locking has been performed. 121 + * 122 + * If @d is affine to the local cluster then no locking is performed and this 123 + * function will return false, indicating to the caller that it should access 124 + * the local clusters registers without the overhead of indirection through the 125 + * redirect block. 126 + * 127 + * In summary, if this function returns true then the caller should access GIC 128 + * registers using redirect register block accessors & then call 129 + * mips_cm_unlock_other() when done. If this function returns false then the 130 + * caller should trivially access GIC registers in the local cluster. 131 + * 132 + * Returns true if locking performed, else false. 133 + */ 134 + static bool gic_irq_lock_cluster(struct irq_data *d) 135 + { 136 + unsigned int cpu, cl; 137 + 138 + cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 139 + BUG_ON(cpu >= NR_CPUS); 140 + 141 + cl = cpu_cluster(&cpu_data[cpu]); 142 + if (cl == cpu_cluster(&current_cpu_data)) 143 + return false; 144 + if (mips_cps_numcores(cl) == 0) 145 + return false; 146 + mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); 147 + return true; 148 + } 149 + 69 150 static void gic_clear_pcpu_masks(unsigned int intr) 70 151 { 71 152 unsigned int i; ··· 193 112 { 194 113 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); 195 114 196 - write_gic_wedge(GIC_WEDGE_RW | hwirq); 115 + if (gic_irq_lock_cluster(d)) { 116 + write_gic_redir_wedge(GIC_WEDGE_RW | hwirq); 117 + mips_cm_unlock_other(); 118 + } else { 119 + write_gic_wedge(GIC_WEDGE_RW | hwirq); 120 + } 197 121 } 198 122 199 123 int gic_get_c0_compare_int(void) ··· 266 180 { 267 181 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 268 182 269 - write_gic_rmask(intr); 183 + if (gic_irq_lock_cluster(d)) { 184 + write_gic_redir_rmask(intr); 185 + mips_cm_unlock_other(); 186 + } else { 187 + write_gic_rmask(intr); 188 + } 189 + 270 190 gic_clear_pcpu_masks(intr); 271 191 } 272 192 ··· 281 189 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 282 190 unsigned int cpu; 283 191 284 - write_gic_smask(intr); 192 + if (gic_irq_lock_cluster(d)) { 193 + write_gic_redir_smask(intr); 194 + mips_cm_unlock_other(); 195 + } else { 196 + write_gic_smask(intr); 197 + } 285 198 286 199 gic_clear_pcpu_masks(intr); 287 200 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); ··· 297 200 { 298 201 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 299 202 300 - write_gic_wedge(irq); 203 + if (gic_irq_lock_cluster(d)) { 204 + write_gic_redir_wedge(irq); 205 + mips_cm_unlock_other(); 206 + } else { 207 + write_gic_wedge(irq); 208 + } 301 209 } 302 210 303 211 static int gic_set_type(struct irq_data *d, unsigned int type) ··· 342 240 break; 343 241 } 344 242 345 - change_gic_pol(irq, pol); 346 - change_gic_trig(irq, trig); 347 - change_gic_dual(irq, dual); 243 + if (gic_irq_lock_cluster(d)) { 244 + change_gic_redir_pol(irq, pol); 245 + change_gic_redir_trig(irq, trig); 246 + change_gic_redir_dual(irq, dual); 247 + mips_cm_unlock_other(); 248 + } else { 249 + change_gic_pol(irq, pol); 250 + change_gic_trig(irq, trig); 251 + change_gic_dual(irq, dual); 252 + } 348 253 349 254 if (trig == GIC_TRIG_EDGE) 350 255 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, ··· 369 260 bool force) 370 261 { 371 262 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 263 + unsigned int cpu, cl, old_cpu, old_cl; 372 264 unsigned long flags; 373 - unsigned int cpu; 374 265 266 + /* 267 + * The GIC specifies that we can only route an interrupt to one VP(E), 268 + * ie. CPU in Linux parlance, at a time. Therefore we always route to 269 + * the first online CPU in the mask. 270 + */ 375 271 cpu = cpumask_first_and(cpumask, cpu_online_mask); 376 272 if (cpu >= NR_CPUS) 377 273 return -EINVAL; 378 274 379 - /* Assumption : cpumask refers to a single CPU */ 275 + old_cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 276 + old_cl = cpu_cluster(&cpu_data[old_cpu]); 277 + cl = cpu_cluster(&cpu_data[cpu]); 278 + 380 279 raw_spin_lock_irqsave(&gic_lock, flags); 381 280 382 - /* Re-route this IRQ */ 383 - write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 281 + /* 282 + * If we're moving affinity between clusters, stop routing the 283 + * interrupt to any VP(E) in the old cluster. 284 + */ 285 + if (cl != old_cl) { 286 + if (gic_irq_lock_cluster(d)) { 287 + write_gic_redir_map_vp(irq, 0); 288 + mips_cm_unlock_other(); 289 + } else { 290 + write_gic_map_vp(irq, 0); 291 + } 292 + } 384 293 385 - /* Update the pcpu_masks */ 386 - gic_clear_pcpu_masks(irq); 387 - if (read_gic_mask(irq)) 388 - set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 389 - 294 + /* 295 + * Update effective affinity - after this gic_irq_lock_cluster() will 296 + * begin operating on the new cluster. 297 + */ 390 298 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 299 + 300 + /* 301 + * If we're moving affinity between clusters, configure the interrupt 302 + * trigger type in the new cluster. 303 + */ 304 + if (cl != old_cl) 305 + gic_set_type(d, irqd_get_trigger_type(d)); 306 + 307 + /* Route the interrupt to its new VP(E) */ 308 + if (gic_irq_lock_cluster(d)) { 309 + write_gic_redir_map_pin(irq, 310 + GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 311 + write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 312 + 313 + /* Update the pcpu_masks */ 314 + gic_clear_pcpu_masks(irq); 315 + if (read_gic_redir_mask(irq)) 316 + set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 317 + 318 + mips_cm_unlock_other(); 319 + } else { 320 + write_gic_map_pin(irq, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 321 + write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 322 + 323 + /* Update the pcpu_masks */ 324 + gic_clear_pcpu_masks(irq); 325 + if (read_gic_mask(irq)) 326 + set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 327 + } 328 + 391 329 raw_spin_unlock_irqrestore(&gic_lock, flags); 392 330 393 331 return IRQ_SET_MASK_OK; ··· 506 350 static void gic_mask_local_irq_all_vpes(struct irq_data *d) 507 351 { 508 352 struct gic_all_vpes_chip_data *cd; 509 - unsigned long flags; 510 353 int intr, cpu; 354 + 355 + if (!mips_cps_multicluster_cpus()) 356 + return; 511 357 512 358 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 513 359 cd = irq_data_get_irq_chip_data(d); 514 360 cd->mask = false; 515 361 516 - raw_spin_lock_irqsave(&gic_lock, flags); 517 - for_each_online_cpu(cpu) { 518 - write_gic_vl_other(mips_cm_vp_id(cpu)); 362 + for_each_online_cpu_gic(cpu, &gic_lock) 519 363 write_gic_vo_rmask(BIT(intr)); 520 - } 521 - raw_spin_unlock_irqrestore(&gic_lock, flags); 522 364 } 523 365 524 366 static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 525 367 { 526 368 struct gic_all_vpes_chip_data *cd; 527 - unsigned long flags; 528 369 int intr, cpu; 370 + 371 + if (!mips_cps_multicluster_cpus()) 372 + return; 529 373 530 374 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 531 375 cd = irq_data_get_irq_chip_data(d); 532 376 cd->mask = true; 533 377 534 - raw_spin_lock_irqsave(&gic_lock, flags); 535 - for_each_online_cpu(cpu) { 536 - write_gic_vl_other(mips_cm_vp_id(cpu)); 378 + for_each_online_cpu_gic(cpu, &gic_lock) 537 379 write_gic_vo_smask(BIT(intr)); 538 - } 539 - raw_spin_unlock_irqrestore(&gic_lock, flags); 540 380 } 541 381 542 382 static void gic_all_vpes_irq_cpu_online(void) ··· 588 436 unsigned long flags; 589 437 590 438 data = irq_get_irq_data(virq); 439 + irq_data_update_effective_affinity(data, cpumask_of(cpu)); 591 440 592 441 raw_spin_lock_irqsave(&gic_lock, flags); 593 - write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 594 - write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 595 - irq_data_update_effective_affinity(data, cpumask_of(cpu)); 442 + 443 + /* Route the interrupt to its VP(E) */ 444 + if (gic_irq_lock_cluster(data)) { 445 + write_gic_redir_map_pin(intr, 446 + GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 447 + write_gic_redir_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 448 + mips_cm_unlock_other(); 449 + } else { 450 + write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 451 + write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 452 + } 453 + 596 454 raw_spin_unlock_irqrestore(&gic_lock, flags); 597 455 598 456 return 0; ··· 631 469 irq_hw_number_t hwirq) 632 470 { 633 471 struct gic_all_vpes_chip_data *cd; 634 - unsigned long flags; 635 472 unsigned int intr; 636 473 int err, cpu; 637 474 u32 map; ··· 694 533 if (!gic_local_irq_is_routable(intr)) 695 534 return -EPERM; 696 535 697 - raw_spin_lock_irqsave(&gic_lock, flags); 698 - for_each_online_cpu(cpu) { 699 - write_gic_vl_other(mips_cm_vp_id(cpu)); 700 - write_gic_vo_map(mips_gic_vx_map_reg(intr), map); 536 + if (mips_cps_multicluster_cpus()) { 537 + for_each_online_cpu_gic(cpu, &gic_lock) 538 + write_gic_vo_map(mips_gic_vx_map_reg(intr), map); 701 539 } 702 - raw_spin_unlock_irqrestore(&gic_lock, flags); 703 540 704 541 return 0; 705 542 } ··· 780 621 if (ret) 781 622 goto error; 782 623 624 + /* Set affinity to cpu. */ 625 + irq_data_update_effective_affinity(irq_get_irq_data(virq + i), 626 + cpumask_of(cpu)); 783 627 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); 784 628 if (ret) 785 629 goto error; ··· 896 734 static int __init gic_of_init(struct device_node *node, 897 735 struct device_node *parent) 898 736 { 899 - unsigned int cpu_vec, i, gicconfig; 737 + unsigned int cpu_vec, i, gicconfig, cl, nclusters; 900 738 unsigned long reserved; 901 739 phys_addr_t gic_base; 902 740 struct resource res; ··· 977 815 978 816 board_bind_eic_interrupt = &gic_bind_eic_interrupt; 979 817 980 - /* Setup defaults */ 981 - for (i = 0; i < gic_shared_intrs; i++) { 982 - change_gic_pol(i, GIC_POL_ACTIVE_HIGH); 983 - change_gic_trig(i, GIC_TRIG_LEVEL); 984 - write_gic_rmask(i); 818 + /* 819 + * Initialise each cluster's GIC shared registers to sane default 820 + * values. 821 + * Otherwise, the IPI set up will be erased if we move code 822 + * to gic_cpu_startup for each cpu. 823 + */ 824 + nclusters = mips_cps_numclusters(); 825 + for (cl = 0; cl < nclusters; cl++) { 826 + if (cl == cpu_cluster(&current_cpu_data)) { 827 + for (i = 0; i < gic_shared_intrs; i++) { 828 + change_gic_pol(i, GIC_POL_ACTIVE_HIGH); 829 + change_gic_trig(i, GIC_TRIG_LEVEL); 830 + write_gic_rmask(i); 831 + } 832 + } else if (mips_cps_numcores(cl) != 0) { 833 + mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); 834 + for (i = 0; i < gic_shared_intrs; i++) { 835 + change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH); 836 + change_gic_redir_trig(i, GIC_TRIG_LEVEL); 837 + write_gic_redir_rmask(i); 838 + } 839 + mips_cm_unlock_other(); 840 + 841 + } else { 842 + pr_warn("No CPU cores on the cluster %d skip it\n", cl); 843 + } 985 844 } 986 845 987 846 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+513
drivers/irqchip/irq-renesas-rzv2h.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Renesas RZ/V2H(P) ICU Driver 4 + * 5 + * Based on irq-renesas-rzg2l.c 6 + * 7 + * Copyright (C) 2024 Renesas Electronics Corporation. 8 + * 9 + * Author: Fabrizio Castro <fabrizio.castro.jz@renesas.com> 10 + */ 11 + 12 + #include <linux/bitfield.h> 13 + #include <linux/cleanup.h> 14 + #include <linux/clk.h> 15 + #include <linux/err.h> 16 + #include <linux/io.h> 17 + #include <linux/irqchip.h> 18 + #include <linux/irqdomain.h> 19 + #include <linux/of_address.h> 20 + #include <linux/of_platform.h> 21 + #include <linux/pm_runtime.h> 22 + #include <linux/reset.h> 23 + #include <linux/spinlock.h> 24 + #include <linux/syscore_ops.h> 25 + 26 + /* DT "interrupts" indexes */ 27 + #define ICU_IRQ_START 1 28 + #define ICU_IRQ_COUNT 16 29 + #define ICU_TINT_START (ICU_IRQ_START + ICU_IRQ_COUNT) 30 + #define ICU_TINT_COUNT 32 31 + #define ICU_NUM_IRQ (ICU_TINT_START + ICU_TINT_COUNT) 32 + 33 + /* Registers */ 34 + #define ICU_NSCNT 0x00 35 + #define ICU_NSCLR 0x04 36 + #define ICU_NITSR 0x08 37 + #define ICU_ISCTR 0x10 38 + #define ICU_ISCLR 0x14 39 + #define ICU_IITSR 0x18 40 + #define ICU_TSCTR 0x20 41 + #define ICU_TSCLR 0x24 42 + #define ICU_TITSR(k) (0x28 + (k) * 4) 43 + #define ICU_TSSR(k) (0x30 + (k) * 4) 44 + 45 + /* NMI */ 46 + #define ICU_NMI_EDGE_FALLING 0 47 + #define ICU_NMI_EDGE_RISING 1 48 + 49 + #define ICU_NSCLR_NCLR BIT(0) 50 + 51 + /* IRQ */ 52 + #define ICU_IRQ_LEVEL_LOW 0 53 + #define ICU_IRQ_EDGE_FALLING 1 54 + #define ICU_IRQ_EDGE_RISING 2 55 + #define ICU_IRQ_EDGE_BOTH 3 56 + 57 + #define ICU_IITSR_IITSEL_PREP(iitsel, n) ((iitsel) << ((n) * 2)) 58 + #define ICU_IITSR_IITSEL_GET(iitsr, n) (((iitsr) >> ((n) * 2)) & 0x03) 59 + #define ICU_IITSR_IITSEL_MASK(n) ICU_IITSR_IITSEL_PREP(0x03, n) 60 + 61 + /* TINT */ 62 + #define ICU_TINT_EDGE_RISING 0 63 + #define ICU_TINT_EDGE_FALLING 1 64 + #define ICU_TINT_LEVEL_HIGH 2 65 + #define ICU_TINT_LEVEL_LOW 3 66 + 67 + #define ICU_TSSR_K(tint_nr) ((tint_nr) / 4) 68 + #define ICU_TSSR_TSSEL_N(tint_nr) ((tint_nr) % 4) 69 + #define ICU_TSSR_TSSEL_PREP(tssel, n) ((tssel) << ((n) * 8)) 70 + #define ICU_TSSR_TSSEL_MASK(n) ICU_TSSR_TSSEL_PREP(0x7F, n) 71 + #define ICU_TSSR_TIEN(n) (BIT(7) << ((n) * 8)) 72 + 73 + #define ICU_TITSR_K(tint_nr) ((tint_nr) / 16) 74 + #define ICU_TITSR_TITSEL_N(tint_nr) ((tint_nr) % 16) 75 + #define ICU_TITSR_TITSEL_PREP(titsel, n) ICU_IITSR_IITSEL_PREP(titsel, n) 76 + #define ICU_TITSR_TITSEL_MASK(n) ICU_IITSR_IITSEL_MASK(n) 77 + #define ICU_TITSR_TITSEL_GET(titsr, n) ICU_IITSR_IITSEL_GET(titsr, n) 78 + 79 + #define ICU_TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x)) 80 + #define ICU_TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x)) 81 + #define ICU_PB5_TINT 0x55 82 + 83 + /** 84 + * struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure. 85 + * @base: Controller's base address 86 + * @irqchip: Pointer to struct irq_chip 87 + * @fwspec: IRQ firmware specific data 88 + * @lock: Lock to serialize access to hardware registers 89 + */ 90 + struct rzv2h_icu_priv { 91 + void __iomem *base; 92 + const struct irq_chip *irqchip; 93 + struct irq_fwspec fwspec[ICU_NUM_IRQ]; 94 + raw_spinlock_t lock; 95 + }; 96 + 97 + static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data) 98 + { 99 + return data->domain->host_data; 100 + } 101 + 102 + static void rzv2h_icu_eoi(struct irq_data *d) 103 + { 104 + struct rzv2h_icu_priv *priv = irq_data_to_priv(d); 105 + unsigned int hw_irq = irqd_to_hwirq(d); 106 + unsigned int tintirq_nr; 107 + u32 bit; 108 + 109 + scoped_guard(raw_spinlock, &priv->lock) { 110 + if (hw_irq >= ICU_TINT_START) { 111 + tintirq_nr = hw_irq - ICU_TINT_START; 112 + bit = BIT(tintirq_nr); 113 + if (!irqd_is_level_type(d)) 114 + writel_relaxed(bit, priv->base + ICU_TSCLR); 115 + } else if (hw_irq >= ICU_IRQ_START) { 116 + tintirq_nr = hw_irq - ICU_IRQ_START; 117 + bit = BIT(tintirq_nr); 118 + if (!irqd_is_level_type(d)) 119 + writel_relaxed(bit, priv->base + ICU_ISCLR); 120 + } else { 121 + writel_relaxed(ICU_NSCLR_NCLR, priv->base + ICU_NSCLR); 122 + } 123 + } 124 + 125 + irq_chip_eoi_parent(d); 126 + } 127 + 128 + static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable) 129 + { 130 + struct rzv2h_icu_priv *priv = irq_data_to_priv(d); 131 + unsigned int hw_irq = irqd_to_hwirq(d); 132 + u32 tint_nr, tssel_n, k, tssr; 133 + 134 + if (hw_irq < ICU_TINT_START) 135 + return; 136 + 137 + tint_nr = hw_irq - ICU_TINT_START; 138 + k = ICU_TSSR_K(tint_nr); 139 + tssel_n = ICU_TSSR_TSSEL_N(tint_nr); 140 + 141 + guard(raw_spinlock)(&priv->lock); 142 + tssr = readl_relaxed(priv->base + ICU_TSSR(k)); 143 + if (enable) 144 + tssr |= ICU_TSSR_TIEN(tssel_n); 145 + else 146 + tssr &= ~ICU_TSSR_TIEN(tssel_n); 147 + writel_relaxed(tssr, priv->base + ICU_TSSR(k)); 148 + } 149 + 150 + static void rzv2h_icu_irq_disable(struct irq_data *d) 151 + { 152 + irq_chip_disable_parent(d); 153 + rzv2h_tint_irq_endisable(d, false); 154 + } 155 + 156 + static void rzv2h_icu_irq_enable(struct irq_data *d) 157 + { 158 + rzv2h_tint_irq_endisable(d, true); 159 + irq_chip_enable_parent(d); 160 + } 161 + 162 + static int rzv2h_nmi_set_type(struct irq_data *d, unsigned int type) 163 + { 164 + struct rzv2h_icu_priv *priv = irq_data_to_priv(d); 165 + u32 sense; 166 + 167 + switch (type & IRQ_TYPE_SENSE_MASK) { 168 + case IRQ_TYPE_EDGE_FALLING: 169 + sense = ICU_NMI_EDGE_FALLING; 170 + break; 171 + 172 + case IRQ_TYPE_EDGE_RISING: 173 + sense = ICU_NMI_EDGE_RISING; 174 + break; 175 + 176 + default: 177 + return -EINVAL; 178 + } 179 + 180 + writel_relaxed(sense, priv->base + ICU_NITSR); 181 + 182 + return 0; 183 + } 184 + 185 + static void rzv2h_clear_irq_int(struct rzv2h_icu_priv *priv, unsigned int hwirq) 186 + { 187 + unsigned int irq_nr = hwirq - ICU_IRQ_START; 188 + u32 isctr, iitsr, iitsel; 189 + u32 bit = BIT(irq_nr); 190 + 191 + isctr = readl_relaxed(priv->base + ICU_ISCTR); 192 + iitsr = readl_relaxed(priv->base + ICU_IITSR); 193 + iitsel = ICU_IITSR_IITSEL_GET(iitsr, irq_nr); 194 + 195 + /* 196 + * When level sensing is used, the interrupt flag gets automatically cleared when the 197 + * interrupt signal is de-asserted by the source of the interrupt request, therefore clear 198 + * the interrupt only for edge triggered interrupts. 199 + */ 200 + if ((isctr & bit) && (iitsel != ICU_IRQ_LEVEL_LOW)) 201 + writel_relaxed(bit, priv->base + ICU_ISCLR); 202 + } 203 + 204 + static int rzv2h_irq_set_type(struct irq_data *d, unsigned int type) 205 + { 206 + struct rzv2h_icu_priv *priv = irq_data_to_priv(d); 207 + unsigned int hwirq = irqd_to_hwirq(d); 208 + u32 irq_nr = hwirq - ICU_IRQ_START; 209 + u32 iitsr, sense; 210 + 211 + switch (type & IRQ_TYPE_SENSE_MASK) { 212 + case IRQ_TYPE_LEVEL_LOW: 213 + sense = ICU_IRQ_LEVEL_LOW; 214 + break; 215 + 216 + case IRQ_TYPE_EDGE_FALLING: 217 + sense = ICU_IRQ_EDGE_FALLING; 218 + break; 219 + 220 + case IRQ_TYPE_EDGE_RISING: 221 + sense = ICU_IRQ_EDGE_RISING; 222 + break; 223 + 224 + case IRQ_TYPE_EDGE_BOTH: 225 + sense = ICU_IRQ_EDGE_BOTH; 226 + break; 227 + 228 + default: 229 + return -EINVAL; 230 + } 231 + 232 + guard(raw_spinlock)(&priv->lock); 233 + iitsr = readl_relaxed(priv->base + ICU_IITSR); 234 + iitsr &= ~ICU_IITSR_IITSEL_MASK(irq_nr); 235 + iitsr |= ICU_IITSR_IITSEL_PREP(sense, irq_nr); 236 + rzv2h_clear_irq_int(priv, hwirq); 237 + writel_relaxed(iitsr, priv->base + ICU_IITSR); 238 + 239 + return 0; 240 + } 241 + 242 + static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq) 243 + { 244 + unsigned int tint_nr = hwirq - ICU_TINT_START; 245 + int titsel_n = ICU_TITSR_TITSEL_N(tint_nr); 246 + u32 tsctr, titsr, titsel; 247 + u32 bit = BIT(tint_nr); 248 + int k = tint_nr / 16; 249 + 250 + tsctr = readl_relaxed(priv->base + ICU_TSCTR); 251 + titsr = readl_relaxed(priv->base + ICU_TITSR(k)); 252 + titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n); 253 + 254 + /* 255 + * Writing 1 to the corresponding flag from register ICU_TSCTR only has effect if 256 + * TSTATn = 1b and if it's a rising edge or a falling edge interrupt. 257 + */ 258 + if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) || 259 + (titsel == ICU_TINT_EDGE_FALLING))) 260 + writel_relaxed(bit, priv->base + ICU_TSCLR); 261 + } 262 + 263 + static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type) 264 + { 265 + u32 titsr, titsr_k, titsel_n, tien; 266 + struct rzv2h_icu_priv *priv; 267 + u32 tssr, tssr_k, tssel_n; 268 + unsigned int hwirq; 269 + u32 tint, sense; 270 + int tint_nr; 271 + 272 + switch (type & IRQ_TYPE_SENSE_MASK) { 273 + case IRQ_TYPE_LEVEL_LOW: 274 + sense = ICU_TINT_LEVEL_LOW; 275 + break; 276 + 277 + case IRQ_TYPE_LEVEL_HIGH: 278 + sense = ICU_TINT_LEVEL_HIGH; 279 + break; 280 + 281 + case IRQ_TYPE_EDGE_RISING: 282 + sense = ICU_TINT_EDGE_RISING; 283 + break; 284 + 285 + case IRQ_TYPE_EDGE_FALLING: 286 + sense = ICU_TINT_EDGE_FALLING; 287 + break; 288 + 289 + default: 290 + return -EINVAL; 291 + } 292 + 293 + tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d); 294 + if (tint > ICU_PB5_TINT) 295 + return -EINVAL; 296 + 297 + priv = irq_data_to_priv(d); 298 + hwirq = irqd_to_hwirq(d); 299 + 300 + tint_nr = hwirq - ICU_TINT_START; 301 + 302 + tssr_k = ICU_TSSR_K(tint_nr); 303 + tssel_n = ICU_TSSR_TSSEL_N(tint_nr); 304 + 305 + titsr_k = ICU_TITSR_K(tint_nr); 306 + titsel_n = ICU_TITSR_TITSEL_N(tint_nr); 307 + tien = ICU_TSSR_TIEN(titsel_n); 308 + 309 + guard(raw_spinlock)(&priv->lock); 310 + 311 + tssr = readl_relaxed(priv->base + ICU_TSSR(tssr_k)); 312 + tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n) | tien); 313 + tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n); 314 + 315 + writel_relaxed(tssr, priv->base + ICU_TSSR(tssr_k)); 316 + 317 + titsr = readl_relaxed(priv->base + ICU_TITSR(titsr_k)); 318 + titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n); 319 + titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n); 320 + 321 + writel_relaxed(titsr, priv->base + ICU_TITSR(titsr_k)); 322 + 323 + rzv2h_clear_tint_int(priv, hwirq); 324 + 325 + writel_relaxed(tssr | tien, priv->base + ICU_TSSR(tssr_k)); 326 + 327 + return 0; 328 + } 329 + 330 + static int rzv2h_icu_set_type(struct irq_data *d, unsigned int type) 331 + { 332 + unsigned int hw_irq = irqd_to_hwirq(d); 333 + int ret; 334 + 335 + if (hw_irq >= ICU_TINT_START) 336 + ret = rzv2h_tint_set_type(d, type); 337 + else if (hw_irq >= ICU_IRQ_START) 338 + ret = rzv2h_irq_set_type(d, type); 339 + else 340 + ret = rzv2h_nmi_set_type(d, type); 341 + 342 + if (ret) 343 + return ret; 344 + 345 + return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); 346 + } 347 + 348 + static const struct irq_chip rzv2h_icu_chip = { 349 + .name = "rzv2h-icu", 350 + .irq_eoi = rzv2h_icu_eoi, 351 + .irq_mask = irq_chip_mask_parent, 352 + .irq_unmask = irq_chip_unmask_parent, 353 + .irq_disable = rzv2h_icu_irq_disable, 354 + .irq_enable = rzv2h_icu_irq_enable, 355 + .irq_get_irqchip_state = irq_chip_get_parent_state, 356 + .irq_set_irqchip_state = irq_chip_set_parent_state, 357 + .irq_retrigger = irq_chip_retrigger_hierarchy, 358 + .irq_set_type = rzv2h_icu_set_type, 359 + .irq_set_affinity = irq_chip_set_affinity_parent, 360 + .flags = IRQCHIP_SET_TYPE_MASKED, 361 + }; 362 + 363 + static int rzv2h_icu_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, 364 + void *arg) 365 + { 366 + struct rzv2h_icu_priv *priv = domain->host_data; 367 + unsigned long tint = 0; 368 + irq_hw_number_t hwirq; 369 + unsigned int type; 370 + int ret; 371 + 372 + ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type); 373 + if (ret) 374 + return ret; 375 + 376 + /* 377 + * For TINT interrupts the hwirq and TINT are encoded in 378 + * fwspec->param[0]. 379 + * hwirq is embedded in bits 0-15. 380 + * TINT is embedded in bits 16-31. 381 + */ 382 + if (hwirq >= ICU_TINT_START) { 383 + tint = ICU_TINT_EXTRACT_GPIOINT(hwirq); 384 + hwirq = ICU_TINT_EXTRACT_HWIRQ(hwirq); 385 + 386 + if (hwirq < ICU_TINT_START) 387 + return -EINVAL; 388 + } 389 + 390 + if (hwirq > (ICU_NUM_IRQ - 1)) 391 + return -EINVAL; 392 + 393 + ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip, 394 + (void *)(uintptr_t)tint); 395 + if (ret) 396 + return ret; 397 + 398 + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]); 399 + } 400 + 401 + static const struct irq_domain_ops rzv2h_icu_domain_ops = { 402 + .alloc = rzv2h_icu_alloc, 403 + .free = irq_domain_free_irqs_common, 404 + .translate = irq_domain_translate_twocell, 405 + }; 406 + 407 + static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device_node *np) 408 + { 409 + struct of_phandle_args map; 410 + unsigned int i; 411 + int ret; 412 + 413 + for (i = 0; i < ICU_NUM_IRQ; i++) { 414 + ret = of_irq_parse_one(np, i, &map); 415 + if (ret) 416 + return ret; 417 + 418 + of_phandle_args_to_fwspec(np, map.args, map.args_count, &priv->fwspec[i]); 419 + } 420 + 421 + return 0; 422 + } 423 + 424 + static int rzv2h_icu_init(struct device_node *node, struct device_node *parent) 425 + { 426 + struct irq_domain *irq_domain, *parent_domain; 427 + struct rzv2h_icu_priv *rzv2h_icu_data; 428 + struct platform_device *pdev; 429 + struct reset_control *resetn; 430 + int ret; 431 + 432 + pdev = of_find_device_by_node(node); 433 + if (!pdev) 434 + return -ENODEV; 435 + 436 + parent_domain = irq_find_host(parent); 437 + if (!parent_domain) { 438 + dev_err(&pdev->dev, "cannot find parent domain\n"); 439 + ret = -ENODEV; 440 + goto put_dev; 441 + } 442 + 443 + rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL); 444 + if (!rzv2h_icu_data) { 445 + ret = -ENOMEM; 446 + goto put_dev; 447 + } 448 + 449 + rzv2h_icu_data->irqchip = &rzv2h_icu_chip; 450 + 451 + rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL); 452 + if (IS_ERR(rzv2h_icu_data->base)) { 453 + ret = PTR_ERR(rzv2h_icu_data->base); 454 + goto put_dev; 455 + } 456 + 457 + ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node); 458 + if (ret) { 459 + dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret); 460 + goto put_dev; 461 + } 462 + 463 + resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL); 464 + if (IS_ERR(resetn)) { 465 + ret = PTR_ERR(resetn); 466 + goto put_dev; 467 + } 468 + 469 + ret = reset_control_deassert(resetn); 470 + if (ret) { 471 + dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret); 472 + goto put_dev; 473 + } 474 + 475 + pm_runtime_enable(&pdev->dev); 476 + ret = pm_runtime_resume_and_get(&pdev->dev); 477 + if (ret < 0) { 478 + dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret); 479 + goto pm_disable; 480 + } 481 + 482 + raw_spin_lock_init(&rzv2h_icu_data->lock); 483 + 484 + irq_domain = irq_domain_add_hierarchy(parent_domain, 0, ICU_NUM_IRQ, node, 485 + &rzv2h_icu_domain_ops, rzv2h_icu_data); 486 + if (!irq_domain) { 487 + dev_err(&pdev->dev, "failed to add irq domain\n"); 488 + ret = -ENOMEM; 489 + goto pm_put; 490 + } 491 + 492 + /* 493 + * coccicheck complains about a missing put_device call before returning, but it's a false 494 + * positive. We still need &pdev->dev after successfully returning from this function. 495 + */ 496 + return 0; 497 + 498 + pm_put: 499 + pm_runtime_put(&pdev->dev); 500 + pm_disable: 501 + pm_runtime_disable(&pdev->dev); 502 + reset_control_assert(resetn); 503 + put_dev: 504 + put_device(&pdev->dev); 505 + 506 + return ret; 507 + } 508 + 509 + IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu) 510 + IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init) 511 + IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu) 512 + MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>"); 513 + MODULE_DESCRIPTION("Renesas RZ/V2H(P) ICU Driver");
+2 -1
drivers/irqchip/irq-riscv-aplic-main.c
··· 207 207 else 208 208 rc = aplic_direct_setup(dev, regs); 209 209 if (rc) 210 - dev_err(dev, "failed to setup APLIC in %s mode\n", msi_mode ? "MSI" : "direct"); 210 + dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n", 211 + msi_mode ? "MSI" : "direct"); 211 212 212 213 #ifdef CONFIG_ACPI 213 214 if (!acpi_disabled)
+3
drivers/irqchip/irq-riscv-aplic-msi.c
··· 266 266 if (msi_domain) 267 267 dev_set_msi_domain(dev, msi_domain); 268 268 } 269 + 270 + if (!dev_get_msi_domain(dev)) 271 + return -EPROBE_DEFER; 269 272 } 270 273 271 274 if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, &aplic_msi_template,
+4 -5
drivers/irqchip/irq-sifive-plic.c
··· 252 252 253 253 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 254 254 255 - for (i = 0; i < priv->nr_irqs; i++) 256 - if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)) 257 - __set_bit(i, priv->prio_save); 258 - else 259 - __clear_bit(i, priv->prio_save); 255 + for (i = 0; i < priv->nr_irqs; i++) { 256 + __assign_bit(i, priv->prio_save, 257 + readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)); 258 + } 260 259 261 260 for_each_cpu(cpu, cpu_present_mask) { 262 261 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+1 -2
drivers/irqchip/irq-stm32mp-exti.c
··· 696 696 if (ret) 697 697 return ret; 698 698 699 - if (of_property_read_bool(np, "interrupts-extended")) 700 - host_data->dt_has_irqs_desc = true; 699 + host_data->dt_has_irqs_desc = of_property_present(np, "interrupts-extended"); 701 700 702 701 return 0; 703 702 }
+176
drivers/irqchip/irq-thead-c900-aclint-sswi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com> 4 + */ 5 + 6 + #define pr_fmt(fmt) "thead-c900-aclint-sswi: " fmt 7 + #include <linux/cpu.h> 8 + #include <linux/interrupt.h> 9 + #include <linux/io.h> 10 + #include <linux/irq.h> 11 + #include <linux/irqchip.h> 12 + #include <linux/irqchip/chained_irq.h> 13 + #include <linux/module.h> 14 + #include <linux/of.h> 15 + #include <linux/of_address.h> 16 + #include <linux/of_irq.h> 17 + #include <linux/pci.h> 18 + #include <linux/spinlock.h> 19 + #include <linux/smp.h> 20 + #include <linux/string_choices.h> 21 + #include <asm/sbi.h> 22 + #include <asm/vendorid_list.h> 23 + 24 + #define THEAD_ACLINT_xSWI_REGISTER_SIZE 4 25 + 26 + #define THEAD_C9XX_CSR_SXSTATUS 0x5c0 27 + #define THEAD_C9XX_SXSTATUS_CLINTEE BIT(17) 28 + 29 + static int sswi_ipi_virq __ro_after_init; 30 + static DEFINE_PER_CPU(void __iomem *, sswi_cpu_regs); 31 + 32 + static void thead_aclint_sswi_ipi_send(unsigned int cpu) 33 + { 34 + writel_relaxed(0x1, per_cpu(sswi_cpu_regs, cpu)); 35 + } 36 + 37 + static void thead_aclint_sswi_ipi_clear(void) 38 + { 39 + writel_relaxed(0x0, this_cpu_read(sswi_cpu_regs)); 40 + } 41 + 42 + static void thead_aclint_sswi_ipi_handle(struct irq_desc *desc) 43 + { 44 + struct irq_chip *chip = irq_desc_get_chip(desc); 45 + 46 + chained_irq_enter(chip, desc); 47 + 48 + csr_clear(CSR_IP, IE_SIE); 49 + thead_aclint_sswi_ipi_clear(); 50 + 51 + ipi_mux_process(); 52 + 53 + chained_irq_exit(chip, desc); 54 + } 55 + 56 + static int thead_aclint_sswi_starting_cpu(unsigned int cpu) 57 + { 58 + enable_percpu_irq(sswi_ipi_virq, irq_get_trigger_type(sswi_ipi_virq)); 59 + 60 + return 0; 61 + } 62 + 63 + static int thead_aclint_sswi_dying_cpu(unsigned int cpu) 64 + { 65 + thead_aclint_sswi_ipi_clear(); 66 + 67 + disable_percpu_irq(sswi_ipi_virq); 68 + 69 + return 0; 70 + } 71 + 72 + static int __init thead_aclint_sswi_parse_irq(struct fwnode_handle *fwnode, 73 + void __iomem *reg) 74 + { 75 + struct of_phandle_args parent; 76 + unsigned long hartid; 77 + u32 contexts, i; 78 + int rc, cpu; 79 + 80 + contexts = of_irq_count(to_of_node(fwnode)); 81 + if (!(contexts)) { 82 + pr_err("%pfwP: no ACLINT SSWI context available\n", fwnode); 83 + return -EINVAL; 84 + } 85 + 86 + for (i = 0; i < contexts; i++) { 87 + rc = of_irq_parse_one(to_of_node(fwnode), i, &parent); 88 + if (rc) 89 + return rc; 90 + 91 + rc = riscv_of_parent_hartid(parent.np, &hartid); 92 + if (rc) 93 + return rc; 94 + 95 + if (parent.args[0] != RV_IRQ_SOFT) 96 + return -ENOTSUPP; 97 + 98 + cpu = riscv_hartid_to_cpuid(hartid); 99 + 100 + per_cpu(sswi_cpu_regs, cpu) = reg + i * THEAD_ACLINT_xSWI_REGISTER_SIZE; 101 + } 102 + 103 + pr_info("%pfwP: register %u CPU%s\n", fwnode, contexts, str_plural(contexts)); 104 + 105 + return 0; 106 + } 107 + 108 + static int __init thead_aclint_sswi_probe(struct fwnode_handle *fwnode) 109 + { 110 + struct irq_domain *domain; 111 + void __iomem *reg; 112 + int virq, rc; 113 + 114 + /* If it is T-HEAD CPU, check whether SSWI is enabled */ 115 + if (riscv_cached_mvendorid(0) == THEAD_VENDOR_ID && 116 + !(csr_read(THEAD_C9XX_CSR_SXSTATUS) & THEAD_C9XX_SXSTATUS_CLINTEE)) 117 + return -ENOTSUPP; 118 + 119 + if (!is_of_node(fwnode)) 120 + return -EINVAL; 121 + 122 + reg = of_iomap(to_of_node(fwnode), 0); 123 + if (!reg) 124 + return -ENOMEM; 125 + 126 + /* Parse SSWI setting */ 127 + rc = thead_aclint_sswi_parse_irq(fwnode, reg); 128 + if (rc < 0) 129 + return rc; 130 + 131 + /* If mulitple SSWI devices are present, do not register irq again */ 132 + if (sswi_ipi_virq) 133 + return 0; 134 + 135 + /* Find riscv intc domain and create IPI irq mapping */ 136 + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); 137 + if (!domain) { 138 + pr_err("%pfwP: Failed to find INTC domain\n", fwnode); 139 + return -ENOENT; 140 + } 141 + 142 + sswi_ipi_virq = irq_create_mapping(domain, RV_IRQ_SOFT); 143 + if (!sswi_ipi_virq) { 144 + pr_err("unable to create ACLINT SSWI IRQ mapping\n"); 145 + return -ENOMEM; 146 + } 147 + 148 + /* Register SSWI irq and handler */ 149 + virq = ipi_mux_create(BITS_PER_BYTE, thead_aclint_sswi_ipi_send); 150 + if (virq <= 0) { 151 + pr_err("unable to create muxed IPIs\n"); 152 + irq_dispose_mapping(sswi_ipi_virq); 153 + return virq < 0 ? virq : -ENOMEM; 154 + } 155 + 156 + irq_set_chained_handler(sswi_ipi_virq, thead_aclint_sswi_ipi_handle); 157 + 158 + cpuhp_setup_state(CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING, 159 + "irqchip/thead-aclint-sswi:starting", 160 + thead_aclint_sswi_starting_cpu, 161 + thead_aclint_sswi_dying_cpu); 162 + 163 + riscv_ipi_set_virq_range(virq, BITS_PER_BYTE); 164 + 165 + /* Announce that SSWI is providing IPIs */ 166 + pr_info("providing IPIs using THEAD ACLINT SSWI\n"); 167 + 168 + return 0; 169 + } 170 + 171 + static int __init thead_aclint_sswi_early_probe(struct device_node *node, 172 + struct device_node *parent) 173 + { 174 + return thead_aclint_sswi_probe(&node->fwnode); 175 + } 176 + IRQCHIP_DECLARE(thead_aclint_sswi, "thead,c900-aclint-sswi", thead_aclint_sswi_early_probe);
+1 -1
drivers/net/ethernet/3com/3c59x.c
··· 1302 1302 if (print_info) 1303 1303 pr_cont(", IRQ %d\n", dev->irq); 1304 1304 /* Tell them about an invalid IRQ. */ 1305 - if (dev->irq <= 0 || dev->irq >= nr_irqs) 1305 + if (dev->irq <= 0 || dev->irq >= irq_get_nr_irqs()) 1306 1306 pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n", 1307 1307 dev->irq); 1308 1308
+1
drivers/net/hamradio/baycom_ser_fdx.c
··· 373 373 374 374 static int ser12_open(struct net_device *dev) 375 375 { 376 + const unsigned int nr_irqs = irq_get_nr_irqs(); 376 377 struct baycom_state *bc = netdev_priv(dev); 377 378 enum uart u; 378 379
+3 -1
drivers/net/hamradio/scc.c
··· 1460 1460 1461 1461 static void z8530_init(void) 1462 1462 { 1463 + const unsigned int nr_irqs = irq_get_nr_irqs(); 1463 1464 struct scc_channel *scc; 1464 1465 int chip, k; 1465 1466 unsigned long flags; ··· 1736 1735 1737 1736 if (hwcfg.irq == 2) hwcfg.irq = 9; 1738 1737 1739 - if (hwcfg.irq < 0 || hwcfg.irq >= nr_irqs) 1738 + if (hwcfg.irq < 0 || hwcfg.irq >= irq_get_nr_irqs()) 1740 1739 return -EINVAL; 1741 1740 1742 1741 if (!Ivec[hwcfg.irq].used && hwcfg.irq) ··· 2118 2117 2119 2118 static void __exit scc_cleanup_driver(void) 2120 2119 { 2120 + const unsigned int nr_irqs = irq_get_nr_irqs(); 2121 2121 io_port ctrl; 2122 2122 int k; 2123 2123 struct scc_channel *scc;
+1 -1
drivers/scsi/aha152x.c
··· 295 295 #else 296 296 #define IRQ_MIN 9 297 297 #if defined(__PPC) 298 - #define IRQ_MAX (nr_irqs-1) 298 + #define IRQ_MAX (irq_get_nr_irqs()-1) 299 299 #else 300 300 #define IRQ_MAX 12 301 301 #endif
+1
drivers/sh/intc/virq-debugfs.c
··· 18 18 19 19 static int intc_irq_xlate_show(struct seq_file *m, void *priv) 20 20 { 21 + const unsigned int nr_irqs = irq_get_nr_irqs(); 21 22 int i; 22 23 23 24 seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");
+1
drivers/soc/renesas/Kconfig
··· 347 347 348 348 config ARCH_R9A09G057 349 349 bool "ARM64 Platform support for RZ/V2H(P)" 350 + select RENESAS_RZV2H_ICU 350 351 help 351 352 This enables support for the Renesas RZ/V2H(P) SoC variants. 352 353
+1 -1
drivers/tty/serial/8250/8250_port.c
··· 3176 3176 static int 3177 3177 serial8250_verify_port(struct uart_port *port, struct serial_struct *ser) 3178 3178 { 3179 - if (ser->irq >= nr_irqs || ser->irq < 0 || 3179 + if (ser->irq >= irq_get_nr_irqs() || ser->irq < 0 || 3180 3180 ser->baud_base < 9600 || ser->type < PORT_UNKNOWN || 3181 3181 ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS || 3182 3182 ser->type == PORT_STARTECH)
+1 -1
drivers/tty/serial/amba-pl010.c
··· 499 499 int ret = 0; 500 500 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) 501 501 ret = -EINVAL; 502 - if (ser->irq < 0 || ser->irq >= nr_irqs) 502 + if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs()) 503 503 ret = -EINVAL; 504 504 if (ser->baud_base < 9600) 505 505 ret = -EINVAL;
+1 -1
drivers/tty/serial/amba-pl011.c
··· 2202 2202 2203 2203 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) 2204 2204 ret = -EINVAL; 2205 - if (ser->irq < 0 || ser->irq >= nr_irqs) 2205 + if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs()) 2206 2206 ret = -EINVAL; 2207 2207 if (ser->baud_base < 9600) 2208 2208 ret = -EINVAL;
+1 -1
drivers/tty/serial/cpm_uart.c
··· 631 631 632 632 if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM) 633 633 ret = -EINVAL; 634 - if (ser->irq < 0 || ser->irq >= nr_irqs) 634 + if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs()) 635 635 ret = -EINVAL; 636 636 if (ser->baud_base < 9600) 637 637 ret = -EINVAL;
+1 -1
drivers/tty/serial/serial_core.c
··· 919 919 if (uport->ops->verify_port) 920 920 retval = uport->ops->verify_port(uport, new_info); 921 921 922 - if ((new_info->irq >= nr_irqs) || (new_info->irq < 0) || 922 + if ((new_info->irq >= irq_get_nr_irqs()) || (new_info->irq < 0) || 923 923 (new_info->baud_base < 9600)) 924 924 retval = -EINVAL; 925 925
+1 -1
drivers/tty/serial/ucc_uart.c
··· 1045 1045 if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM) 1046 1046 return -EINVAL; 1047 1047 1048 - if (ser->irq < 0 || ser->irq >= nr_irqs) 1048 + if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs()) 1049 1049 return -EINVAL; 1050 1050 1051 1051 if (ser->baud_base < 9600)
+1 -1
drivers/xen/events/events_base.c
··· 411 411 { 412 412 const struct irq_info *info = NULL; 413 413 414 - if (likely(irq < nr_irqs)) 414 + if (likely(irq < irq_get_nr_irqs())) 415 415 info = info_for_irq(irq); 416 416 if (!info) 417 417 return 0;
+2 -2
fs/proc/interrupts.c
··· 11 11 */ 12 12 static void *int_seq_start(struct seq_file *f, loff_t *pos) 13 13 { 14 - return (*pos <= nr_irqs) ? pos : NULL; 14 + return *pos <= irq_get_nr_irqs() ? pos : NULL; 15 15 } 16 16 17 17 static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos) 18 18 { 19 19 (*pos)++; 20 - if (*pos > nr_irqs) 20 + if (*pos > irq_get_nr_irqs()) 21 21 return NULL; 22 22 return pos; 23 23 }
+2 -2
fs/proc/stat.c
··· 76 76 seq_put_decimal_ull(p, " ", kstat_irqs_usr(i)); 77 77 next = i + 1; 78 78 } 79 - show_irq_gap(p, nr_irqs - next); 79 + show_irq_gap(p, irq_get_nr_irqs() - next); 80 80 } 81 81 82 82 static int show_stat(struct seq_file *p, void *v) ··· 196 196 unsigned int size = 1024 + 128 * num_online_cpus(); 197 197 198 198 /* minimum size to display an interrupt count : 2 bytes */ 199 - size += 2 * nr_irqs; 199 + size += 2 * irq_get_nr_irqs(); 200 200 return single_open_size(file, show_stat, NULL, size); 201 201 } 202 202
+1
include/linux/cpuhotplug.h
··· 147 147 CPUHP_AP_IRQ_EIOINTC_STARTING, 148 148 CPUHP_AP_IRQ_AVECINTC_STARTING, 149 149 CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 150 + CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING, 150 151 CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, 151 152 CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING, 152 153 CPUHP_AP_ARM_MVEBU_COHERENCY,
+47
include/linux/interrupt.h
··· 616 616 extern void raise_softirq_irqoff(unsigned int nr); 617 617 extern void raise_softirq(unsigned int nr); 618 618 619 + /* 620 + * With forced-threaded interrupts enabled a raised softirq is deferred to 621 + * ksoftirqd unless it can be handled within the threaded interrupt. This 622 + * affects timer_list timers and hrtimers which are explicitly marked with 623 + * HRTIMER_MODE_SOFT. 624 + * With PREEMPT_RT enabled more hrtimers are moved to softirq for processing 625 + * which includes all timers which are not explicitly marked HRTIMER_MODE_HARD. 626 + * Userspace controlled timers (like the clock_nanosleep() interface) is divided 627 + * into two categories: Tasks with elevated scheduling policy including 628 + * SCHED_{FIFO|RR|DL} and the remaining scheduling policy. The tasks with the 629 + * elevated scheduling policy are woken up directly from the HARDIRQ while all 630 + * other wake ups are delayed to softirq and so to ksoftirqd. 631 + * 632 + * The ksoftirqd runs at SCHED_OTHER policy at which it should remain since it 633 + * handles the softirq in an overloaded situation (not handled everything 634 + * within its last run). 635 + * If the timers are handled at SCHED_OTHER priority then they competes with all 636 + * other SCHED_OTHER tasks for CPU resources are possibly delayed. 637 + * Moving timers softirqs to a low priority SCHED_FIFO thread instead ensures 638 + * that timer are performed before scheduling any SCHED_OTHER thread. 639 + */ 640 + DECLARE_PER_CPU(struct task_struct *, ktimerd); 641 + DECLARE_PER_CPU(unsigned long, pending_timer_softirq); 642 + void raise_ktimers_thread(unsigned int nr); 643 + 644 + static inline unsigned int local_timers_pending_force_th(void) 645 + { 646 + return __this_cpu_read(pending_timer_softirq); 647 + } 648 + 649 + static inline void raise_timer_softirq(unsigned int nr) 650 + { 651 + lockdep_assert_in_irq(); 652 + if (force_irqthreads()) 653 + raise_ktimers_thread(nr); 654 + else 655 + __raise_softirq_irqoff(nr); 656 + } 657 + 658 + static inline unsigned int local_timers_pending(void) 659 + { 660 + if (force_irqthreads()) 661 + return local_timers_pending_force_th(); 662 + else 663 + return local_softirq_pending(); 664 + } 665 + 619 666 DECLARE_PER_CPU(struct task_struct *, ksoftirqd); 620 667 621 668 static inline struct task_struct *this_cpu_ksoftirqd(void)
+21 -15
include/linux/irqnr.h
··· 5 5 #include <uapi/linux/irqnr.h> 6 6 7 7 8 - extern int nr_irqs; 8 + unsigned int irq_get_nr_irqs(void) __pure; 9 + unsigned int irq_set_nr_irqs(unsigned int nr); 9 10 extern struct irq_desc *irq_to_desc(unsigned int irq); 10 11 unsigned int irq_get_next_irq(unsigned int offset); 11 12 12 - # define for_each_irq_desc(irq, desc) \ 13 - for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ 14 - irq++, desc = irq_to_desc(irq)) \ 15 - if (!desc) \ 16 - ; \ 17 - else 18 - 13 + #define for_each_irq_desc(irq, desc) \ 14 + for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \ 15 + __nr_irqs__ = 0) \ 16 + for (irq = 0, desc = irq_to_desc(irq); irq < __nr_irqs__; \ 17 + irq++, desc = irq_to_desc(irq)) \ 18 + if (!desc) \ 19 + ; \ 20 + else 19 21 20 22 # define for_each_irq_desc_reverse(irq, desc) \ 21 - for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \ 22 - irq--, desc = irq_to_desc(irq)) \ 23 + for (irq = irq_get_nr_irqs() - 1, desc = irq_to_desc(irq); \ 24 + irq >= 0; irq--, desc = irq_to_desc(irq)) \ 23 25 if (!desc) \ 24 26 ; \ 25 27 else 26 28 27 - # define for_each_active_irq(irq) \ 28 - for (irq = irq_get_next_irq(0); irq < nr_irqs; \ 29 - irq = irq_get_next_irq(irq + 1)) 29 + #define for_each_active_irq(irq) \ 30 + for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \ 31 + __nr_irqs__ = 0) \ 32 + for (irq = irq_get_next_irq(0); irq < __nr_irqs__; \ 33 + irq = irq_get_next_irq(irq + 1)) 30 34 31 - #define for_each_irq_nr(irq) \ 32 - for (irq = 0; irq < nr_irqs; irq++) 35 + #define for_each_irq_nr(irq) \ 36 + for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \ 37 + __nr_irqs__ = 0) \ 38 + for (irq = 0; irq < __nr_irqs__; irq++) 33 39 34 40 #endif
+1 -2
kernel/irq/devres.c
··· 141 141 { 142 142 struct irq_devres match_data = { irq, dev_id }; 143 143 144 - WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match, 144 + WARN_ON(devres_release(dev, devm_irq_release, devm_irq_match, 145 145 &match_data)); 146 - free_irq(irq, dev_id); 147 146 } 148 147 EXPORT_SYMBOL(devm_free_irq); 149 148
+26 -4
kernel/irq/irqdesc.c
··· 15 15 #include <linux/maple_tree.h> 16 16 #include <linux/irqdomain.h> 17 17 #include <linux/sysfs.h> 18 + #include <linux/string_choices.h> 18 19 19 20 #include "internals.h" 20 21 ··· 139 138 desc_smp_init(desc, node, affinity); 140 139 } 141 140 142 - int nr_irqs = NR_IRQS; 143 - EXPORT_SYMBOL_GPL(nr_irqs); 141 + static unsigned int nr_irqs = NR_IRQS; 142 + 143 + /** 144 + * irq_get_nr_irqs() - Number of interrupts supported by the system. 145 + */ 146 + unsigned int irq_get_nr_irqs(void) 147 + { 148 + return nr_irqs; 149 + } 150 + EXPORT_SYMBOL_GPL(irq_get_nr_irqs); 151 + 152 + /** 153 + * irq_set_nr_irqs() - Set the number of interrupts supported by the system. 154 + * @nr: New number of interrupts. 155 + * 156 + * Return: @nr. 157 + */ 158 + unsigned int irq_set_nr_irqs(unsigned int nr) 159 + { 160 + nr_irqs = nr; 161 + 162 + return nr; 163 + } 164 + EXPORT_SYMBOL_GPL(irq_set_nr_irqs); 144 165 145 166 static DEFINE_MUTEX(sparse_irq_lock); 146 167 static struct maple_tree sparse_irqs = MTREE_INIT_EXT(sparse_irqs, ··· 321 298 ssize_t ret = 0; 322 299 323 300 raw_spin_lock_irq(&desc->lock); 324 - ret = sprintf(buf, "%s\n", 325 - irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); 301 + ret = sprintf(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); 326 302 raw_spin_unlock_irq(&desc->lock); 327 303 328 304 return ret;
+1 -1
kernel/irq/irqdomain.c
··· 1225 1225 virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE, 1226 1226 affinity); 1227 1227 } else { 1228 - hint = hwirq % nr_irqs; 1228 + hint = hwirq % irq_get_nr_irqs(); 1229 1229 if (hint == 0) 1230 1230 hint++; 1231 1231 virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
+8 -4
kernel/irq/proc.c
··· 457 457 } 458 458 459 459 #ifndef ACTUAL_NR_IRQS 460 - # define ACTUAL_NR_IRQS nr_irqs 460 + # define ACTUAL_NR_IRQS irq_get_nr_irqs() 461 461 #endif 462 462 463 463 int show_interrupts(struct seq_file *p, void *v) 464 464 { 465 + const unsigned int nr_irqs = irq_get_nr_irqs(); 465 466 static int prec; 466 467 467 468 int i = *(loff_t *) v, j; ··· 495 494 if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs) 496 495 goto outsparse; 497 496 498 - seq_printf(p, "%*d: ", prec, i); 499 - for_each_online_cpu(j) 500 - seq_printf(p, "%10u ", desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0); 497 + seq_printf(p, "%*d:", prec, i); 498 + for_each_online_cpu(j) { 499 + unsigned int cnt = desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0; 500 + 501 + seq_put_decimal_ull_width(p, " ", cnt, 10); 502 + } 501 503 502 504 raw_spin_lock_irqsave(&desc->lock, flags); 503 505 if (desc->irq_data.chip) {
+8
kernel/rcu/rcutorture.c
··· 2476 2476 WARN_ON_ONCE(!t); 2477 2477 sp.sched_priority = 2; 2478 2478 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2479 + #ifdef CONFIG_IRQ_FORCED_THREADING 2480 + if (force_irqthreads()) { 2481 + t = per_cpu(ktimerd, cpu); 2482 + WARN_ON_ONCE(!t); 2483 + sp.sched_priority = 2; 2484 + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 2485 + } 2486 + #endif 2479 2487 } 2480 2488 2481 2489 /* Don't allow time recalculation while creating a new task. */
+68 -1
kernel/softirq.c
··· 624 624 #endif 625 625 } 626 626 627 + #ifdef CONFIG_IRQ_FORCED_THREADING 628 + DEFINE_PER_CPU(struct task_struct *, ktimerd); 629 + DEFINE_PER_CPU(unsigned long, pending_timer_softirq); 630 + 631 + static void wake_timersd(void) 632 + { 633 + struct task_struct *tsk = __this_cpu_read(ktimerd); 634 + 635 + if (tsk) 636 + wake_up_process(tsk); 637 + } 638 + 639 + #else 640 + 641 + static inline void wake_timersd(void) { } 642 + 643 + #endif 644 + 627 645 static inline void __irq_exit_rcu(void) 628 646 { 629 647 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED ··· 653 635 preempt_count_sub(HARDIRQ_OFFSET); 654 636 if (!in_interrupt() && local_softirq_pending()) 655 637 invoke_softirq(); 638 + 639 + if (IS_ENABLED(CONFIG_IRQ_FORCED_THREADING) && force_irqthreads() && 640 + local_timers_pending_force_th() && !(in_nmi() | in_hardirq())) 641 + wake_timersd(); 656 642 657 643 tick_irq_exit(); 658 644 } ··· 987 965 .thread_comm = "ksoftirqd/%u", 988 966 }; 989 967 968 + #ifdef CONFIG_IRQ_FORCED_THREADING 969 + static void ktimerd_setup(unsigned int cpu) 970 + { 971 + /* Above SCHED_NORMAL to handle timers before regular tasks. */ 972 + sched_set_fifo_low(current); 973 + } 974 + 975 + static int ktimerd_should_run(unsigned int cpu) 976 + { 977 + return local_timers_pending_force_th(); 978 + } 979 + 980 + void raise_ktimers_thread(unsigned int nr) 981 + { 982 + trace_softirq_raise(nr); 983 + __this_cpu_or(pending_timer_softirq, BIT(nr)); 984 + } 985 + 986 + static void run_ktimerd(unsigned int cpu) 987 + { 988 + unsigned int timer_si; 989 + 990 + ksoftirqd_run_begin(); 991 + 992 + timer_si = local_timers_pending_force_th(); 993 + __this_cpu_write(pending_timer_softirq, 0); 994 + or_softirq_pending(timer_si); 995 + 996 + __do_softirq(); 997 + 998 + ksoftirqd_run_end(); 999 + } 1000 + 1001 + static struct smp_hotplug_thread timer_thread = { 1002 + .store = &ktimerd, 1003 + .setup = ktimerd_setup, 1004 + .thread_should_run = ktimerd_should_run, 1005 + .thread_fn = run_ktimerd, 1006 + .thread_comm = "ktimers/%u", 1007 + }; 1008 + #endif 1009 + 990 1010 static __init int spawn_ksoftirqd(void) 991 1011 { 992 1012 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, 993 1013 takeover_tasklets); 994 1014 BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); 995 - 1015 + #ifdef CONFIG_IRQ_FORCED_THREADING 1016 + if (force_irqthreads()) 1017 + BUG_ON(smpboot_register_percpu_thread(&timer_thread)); 1018 + #endif 996 1019 return 0; 997 1020 } 998 1021 early_initcall(spawn_ksoftirqd);
+2 -2
kernel/time/hrtimer.c
··· 1811 1811 if (!ktime_before(now, cpu_base->softirq_expires_next)) { 1812 1812 cpu_base->softirq_expires_next = KTIME_MAX; 1813 1813 cpu_base->softirq_activated = 1; 1814 - raise_softirq_irqoff(HRTIMER_SOFTIRQ); 1814 + raise_timer_softirq(HRTIMER_SOFTIRQ); 1815 1815 } 1816 1816 1817 1817 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); ··· 1906 1906 if (!ktime_before(now, cpu_base->softirq_expires_next)) { 1907 1907 cpu_base->softirq_expires_next = KTIME_MAX; 1908 1908 cpu_base->softirq_activated = 1; 1909 - raise_softirq_irqoff(HRTIMER_SOFTIRQ); 1909 + raise_timer_softirq(HRTIMER_SOFTIRQ); 1910 1910 } 1911 1911 1912 1912 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+1 -1
kernel/time/tick-sched.c
··· 865 865 866 866 static inline bool local_timer_softirq_pending(void) 867 867 { 868 - return local_softirq_pending() & BIT(TIMER_SOFTIRQ); 868 + return local_timers_pending() & BIT(TIMER_SOFTIRQ); 869 869 } 870 870 871 871 /*
+1 -1
kernel/time/timer.c
··· 2499 2499 */ 2500 2500 if (time_after_eq(jiffies, READ_ONCE(base->next_expiry)) || 2501 2501 (i == BASE_DEF && tmigr_requires_handle_remote())) { 2502 - raise_softirq(TIMER_SOFTIRQ); 2502 + raise_timer_softirq(TIMER_SOFTIRQ); 2503 2503 return; 2504 2504 } 2505 2505 }