Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright The Asahi Linux Contributors
4 *
5 * Based on irq-lpc32xx:
6 * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
7 * Based on irq-bcm2836:
8 * Copyright 2015 Broadcom
9 */
10
11/*
12 * AIC is a fairly simple interrupt controller with the following features:
13 *
14 * - 896 level-triggered hardware IRQs
15 * - Single mask bit per IRQ
16 * - Per-IRQ affinity setting
17 * - Automatic masking on event delivery (auto-ack)
18 * - Software triggering (ORed with hw line)
19 * - 2 per-CPU IPIs (meant as "self" and "other", but they are
20 * interchangeable if not symmetric)
21 * - Automatic prioritization (single event/ack register per CPU, lower IRQs =
22 * higher priority)
23 * - Automatic masking on ack
24 * - Default "this CPU" register view and explicit per-CPU views
25 *
26 * In addition, this driver also handles FIQs, as these are routed to the same
27 * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
28 * performance counters (TODO).
29 *
30 * Implementation notes:
31 *
32 * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs,
33 * and one for IPIs.
34 * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller
35 * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused).
36 * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu.
37 * - DT bindings use 3-cell form (like GIC):
38 * - <0 nr flags> - hwirq #nr
39 * - <1 nr flags> - FIQ #nr
40 * - nr=0 Physical HV timer
41 * - nr=1 Virtual HV timer
42 * - nr=2 Physical guest timer
43 * - nr=3 Virtual guest timer
44 */
45
46#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
47
48#include <linux/bits.h>
49#include <linux/bitfield.h>
50#include <linux/cpuhotplug.h>
51#include <linux/io.h>
52#include <linux/irqchip.h>
53#include <linux/irqchip/arm-vgic-info.h>
54#include <linux/irqdomain.h>
55#include <linux/jump_label.h>
56#include <linux/limits.h>
57#include <linux/of_address.h>
58#include <linux/slab.h>
59#include <asm/apple_m1_pmu.h>
60#include <asm/cputype.h>
61#include <asm/exception.h>
62#include <asm/sysreg.h>
63#include <asm/virt.h>
64
65#include <dt-bindings/interrupt-controller/apple-aic.h>
66
67/*
68 * AIC v1 registers (MMIO)
69 */
70
71#define AIC_INFO 0x0004
72#define AIC_INFO_NR_IRQ GENMASK(15, 0)
73
74#define AIC_CONFIG 0x0010
75
76#define AIC_WHOAMI 0x2000
77#define AIC_EVENT 0x2004
78#define AIC_EVENT_DIE GENMASK(31, 24)
79#define AIC_EVENT_TYPE GENMASK(23, 16)
80#define AIC_EVENT_NUM GENMASK(15, 0)
81
82#define AIC_EVENT_TYPE_FIQ 0 /* Software use */
83#define AIC_EVENT_TYPE_IRQ 1
84#define AIC_EVENT_TYPE_IPI 4
85#define AIC_EVENT_IPI_OTHER 1
86#define AIC_EVENT_IPI_SELF 2
87
88#define AIC_IPI_SEND 0x2008
89#define AIC_IPI_ACK 0x200c
90#define AIC_IPI_MASK_SET 0x2024
91#define AIC_IPI_MASK_CLR 0x2028
92
93#define AIC_IPI_SEND_CPU(cpu) BIT(cpu)
94
95#define AIC_IPI_OTHER BIT(0)
96#define AIC_IPI_SELF BIT(31)
97
98#define AIC_TARGET_CPU 0x3000
99
100#define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
101#define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
102#define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
103#define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
104
105#define AIC_MAX_IRQ 0x400
106
107/*
108 * AIC v2 registers (MMIO)
109 */
110
111#define AIC2_VERSION 0x0000
112#define AIC2_VERSION_VER GENMASK(7, 0)
113
114#define AIC2_INFO1 0x0004
115#define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
116#define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
117
118#define AIC2_INFO2 0x0008
119
120#define AIC2_INFO3 0x000c
121#define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
122#define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
123
124#define AIC2_RESET 0x0010
125#define AIC2_RESET_RESET BIT(0)
126
127#define AIC2_CONFIG 0x0014
128#define AIC2_CONFIG_ENABLE BIT(0)
129#define AIC2_CONFIG_PREFER_PCPU BIT(28)
130
131#define AIC2_TIMEOUT 0x0028
132#define AIC2_CLUSTER_PRIO 0x0030
133#define AIC2_DELAY_GROUPS 0x0100
134
135#define AIC2_IRQ_CFG 0x2000
136
137/* AIC v3 registers (MMIO) */
138#define AIC3_IRQ_CFG 0x10000
139
140/*
141 * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
142 * AIC3 registers use the same layout but start at AIC3_IRQ_CFG:
143 *
144 * Repeat for each die:
145 * IRQ_CFG: u32 * MAX_IRQS
146 * SW_SET: u32 * (MAX_IRQS / 32)
147 * SW_CLR: u32 * (MAX_IRQS / 32)
148 * MASK_SET: u32 * (MAX_IRQS / 32)
149 * MASK_CLR: u32 * (MAX_IRQS / 32)
150 * HW_STATE: u32 * (MAX_IRQS / 32)
151 *
152 * This is followed by a set of event registers, each 16K page aligned.
153 * The first one is the AP event register we will use. Unfortunately,
154 * the actual implemented die count is not specified anywhere in the
155 * capability registers, so we have to explicitly specify the event
156 * register as a second reg entry in the device tree to remain
157 * forward-compatible.
158 */
159
160#define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
161#define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
162
163#define MASK_REG(x) (4 * ((x) >> 5))
164#define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
165
166/*
167 * IMP-DEF sysregs that control FIQ sources
168 */
169
170/* IPI request registers */
171#define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
172#define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
173#define IPI_RR_CPU GENMASK(7, 0)
174/* Cluster only used for the GLOBAL register */
175#define IPI_RR_CLUSTER GENMASK(23, 16)
176#define IPI_RR_TYPE GENMASK(29, 28)
177#define IPI_RR_IMMEDIATE 0
178#define IPI_RR_RETRACT 1
179#define IPI_RR_DEFERRED 2
180#define IPI_RR_NOWAKE 3
181
182/* IPI status register */
183#define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1)
184#define IPI_SR_PENDING BIT(0)
185
186/* Guest timer FIQ enable register */
187#define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
188#define VM_TMR_FIQ_ENABLE_V BIT(0)
189#define VM_TMR_FIQ_ENABLE_P BIT(1)
190
191/* Deferred IPI countdown register */
192#define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1)
193
194/* Uncore PMC control register */
195#define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4)
196#define UPMCR0_IMODE GENMASK(18, 16)
197#define UPMCR0_IMODE_OFF 0
198#define UPMCR0_IMODE_AIC 2
199#define UPMCR0_IMODE_HALT 3
200#define UPMCR0_IMODE_FIQ 4
201
202/* Uncore PMC status register */
203#define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
204#define UPMSR_IACT BIT(0)
205
206/* MPIDR fields */
207#define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
208#define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
209
210#define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \
211 FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
212 FIELD_PREP(AIC_EVENT_NUM, irq))
213#define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
214 FIELD_PREP(AIC_EVENT_NUM, x))
215#define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x)
216#define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x)
217#define AIC_NR_SWIPI 32
218
219/*
220 * FIQ hwirq index definitions: FIQ sources use the DT binding defines
221 * directly, except that timers are special. At the irqchip level, the
222 * two timer types are represented by their access method: _EL0 registers
223 * or _EL02 registers. In the DT binding, the timers are represented
224 * by their purpose (HV or guest). This mapping is for when the kernel is
225 * running at EL2 (with VHE). When the kernel is running at EL1, the
226 * mapping differs and aic_irq_domain_translate() performs the remapping.
227 */
228enum fiq_hwirq {
229 /* Must be ordered as in apple-aic.h */
230 AIC_TMR_EL0_PHYS = AIC_TMR_HV_PHYS,
231 AIC_TMR_EL0_VIRT = AIC_TMR_HV_VIRT,
232 AIC_TMR_EL02_PHYS = AIC_TMR_GUEST_PHYS,
233 AIC_TMR_EL02_VIRT = AIC_TMR_GUEST_VIRT,
234 AIC_CPU_PMU_Effi = AIC_CPU_PMU_E,
235 AIC_CPU_PMU_Perf = AIC_CPU_PMU_P,
236 /* No need for this to be discovered from DT */
237 AIC_VGIC_MI,
238 AIC_NR_FIQ
239};
240
241/* True if UNCORE/UNCORE2 and Sn_... IPI registers are present and used (A11+) */
242static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
243/* True if SYS_IMP_APL_IPI_RR_LOCAL_EL1 exists for local fast IPIs (M1+) */
244static DEFINE_STATIC_KEY_TRUE(use_local_fast_ipi);
245
246struct aic_info {
247 int version;
248
249 /* Register offsets */
250 u32 event;
251 u32 target_cpu;
252 u32 irq_cfg;
253 u32 sw_set;
254 u32 sw_clr;
255 u32 mask_set;
256 u32 mask_clr;
257
258 u32 die_stride;
259
260 /* Features */
261 bool fast_ipi;
262 bool local_fast_ipi;
263};
264
265static const struct aic_info aic1_info __initconst = {
266 .version = 1,
267
268 .event = AIC_EVENT,
269 .target_cpu = AIC_TARGET_CPU,
270};
271
272static const struct aic_info aic1_fipi_info __initconst = {
273 .version = 1,
274
275 .event = AIC_EVENT,
276 .target_cpu = AIC_TARGET_CPU,
277
278 .fast_ipi = true,
279};
280
281static const struct aic_info aic1_local_fipi_info __initconst = {
282 .version = 1,
283
284 .event = AIC_EVENT,
285 .target_cpu = AIC_TARGET_CPU,
286
287 .fast_ipi = true,
288 .local_fast_ipi = true,
289};
290
291static const struct aic_info aic2_info __initconst = {
292 .version = 2,
293
294 .irq_cfg = AIC2_IRQ_CFG,
295
296 .fast_ipi = true,
297 .local_fast_ipi = true,
298};
299
300static const struct aic_info aic3_info __initconst = {
301 .version = 3,
302
303 .irq_cfg = AIC3_IRQ_CFG,
304
305 .fast_ipi = true,
306 .local_fast_ipi = true,
307};
308
309static const struct of_device_id aic_info_match[] = {
310 {
311 .compatible = "apple,t8103-aic",
312 .data = &aic1_local_fipi_info,
313 },
314 {
315 .compatible = "apple,t8015-aic",
316 .data = &aic1_fipi_info,
317 },
318 {
319 .compatible = "apple,aic",
320 .data = &aic1_info,
321 },
322 {
323 .compatible = "apple,aic2",
324 .data = &aic2_info,
325 },
326 {
327 .compatible = "apple,t8122-aic3",
328 .data = &aic3_info,
329 },
330 {}
331};
332
333struct aic_irq_chip {
334 void __iomem *base;
335 void __iomem *event;
336 struct irq_domain *hw_domain;
337 struct {
338 cpumask_t aff;
339 } *fiq_aff[AIC_NR_FIQ];
340
341 int nr_irq;
342 int max_irq;
343 int nr_die;
344 int max_die;
345
346 struct aic_info info;
347};
348
349static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
350
351static struct aic_irq_chip *aic_irqc;
352
353static void aic_handle_ipi(struct pt_regs *regs);
354
355static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg)
356{
357 return readl_relaxed(ic->base + reg);
358}
359
360static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
361{
362 writel_relaxed(val, ic->base + reg);
363}
364
365/*
366 * IRQ irqchip
367 */
368
369static void aic_irq_mask(struct irq_data *d)
370{
371 irq_hw_number_t hwirq = irqd_to_hwirq(d);
372 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
373
374 u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
375 u32 irq = AIC_HWIRQ_IRQ(hwirq);
376
377 aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq));
378}
379
380static void aic_irq_unmask(struct irq_data *d)
381{
382 irq_hw_number_t hwirq = irqd_to_hwirq(d);
383 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
384
385 u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
386 u32 irq = AIC_HWIRQ_IRQ(hwirq);
387
388 aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
389}
390
391static void aic_irq_eoi(struct irq_data *d)
392{
393 /*
394 * Reading the interrupt reason automatically acknowledges and masks
395 * the IRQ, so we just unmask it here if needed.
396 */
397 if (!irqd_irq_masked(d))
398 aic_irq_unmask(d);
399}
400
401static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
402{
403 struct aic_irq_chip *ic = aic_irqc;
404 u32 event, type, irq;
405
406 do {
407 /*
408 * We cannot use a relaxed read here, as reads from DMA buffers
409 * need to be ordered after the IRQ fires.
410 */
411 event = readl(ic->event + ic->info.event);
412 type = FIELD_GET(AIC_EVENT_TYPE, event);
413 irq = FIELD_GET(AIC_EVENT_NUM, event);
414
415 if (type == AIC_EVENT_TYPE_IRQ)
416 generic_handle_domain_irq(aic_irqc->hw_domain, event);
417 else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
418 aic_handle_ipi(regs);
419 else if (event != 0)
420 pr_err_ratelimited("Unknown IRQ event %d, %d\n", type, irq);
421 } while (event);
422
423 /*
424 * vGIC maintenance interrupts end up here too, so we need to check
425 * for them separately. It should however only trigger when NV is
426 * in use, and be cleared when coming back from the handler.
427 */
428 if (is_kernel_in_hyp_mode() &&
429 (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
430 read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
431 u64 val;
432
433 generic_handle_domain_irq(aic_irqc->hw_domain,
434 AIC_FIQ_HWIRQ(AIC_VGIC_MI));
435
436 if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
437 (val = read_sysreg_s(SYS_ICH_MISR_EL2)))) {
438 pr_err_ratelimited("vGIC IRQ fired and not handled by KVM (MISR=%llx), disabling.\n",
439 val);
440 sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
441 }
442 }
443}
444
445static int aic_irq_set_affinity(struct irq_data *d,
446 const struct cpumask *mask_val, bool force)
447{
448 irq_hw_number_t hwirq = irqd_to_hwirq(d);
449 struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
450 int cpu;
451
452 BUG_ON(!ic->info.target_cpu);
453
454 if (force)
455 cpu = cpumask_first(mask_val);
456 else
457 cpu = cpumask_any_and(mask_val, cpu_online_mask);
458
459 aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
460 irq_data_update_effective_affinity(d, cpumask_of(cpu));
461
462 return IRQ_SET_MASK_OK;
463}
464
465static int aic_irq_set_type(struct irq_data *d, unsigned int type)
466{
467 /*
468 * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't
469 * have a way to find out the type of any given IRQ, so just allow both.
470 */
471 return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL;
472}
473
474static struct irq_chip aic_chip = {
475 .name = "AIC",
476 .irq_mask = aic_irq_mask,
477 .irq_unmask = aic_irq_unmask,
478 .irq_eoi = aic_irq_eoi,
479 .irq_set_affinity = aic_irq_set_affinity,
480 .irq_set_type = aic_irq_set_type,
481};
482
483static struct irq_chip aic2_chip = {
484 .name = "AIC2",
485 .irq_mask = aic_irq_mask,
486 .irq_unmask = aic_irq_unmask,
487 .irq_eoi = aic_irq_eoi,
488 .irq_set_type = aic_irq_set_type,
489};
490
491/*
492 * FIQ irqchip
493 */
494
495static unsigned long aic_fiq_get_idx(struct irq_data *d)
496{
497 return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
498}
499
500static void aic_fiq_set_mask(struct irq_data *d)
501{
502 /* Only the guest timers have real mask bits, unfortunately. */
503 switch (aic_fiq_get_idx(d)) {
504 case AIC_TMR_EL02_PHYS:
505 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0);
506 isb();
507 break;
508 case AIC_TMR_EL02_VIRT:
509 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0);
510 isb();
511 break;
512 default:
513 break;
514 }
515}
516
517static void aic_fiq_clear_mask(struct irq_data *d)
518{
519 switch (aic_fiq_get_idx(d)) {
520 case AIC_TMR_EL02_PHYS:
521 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P);
522 isb();
523 break;
524 case AIC_TMR_EL02_VIRT:
525 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V);
526 isb();
527 break;
528 default:
529 break;
530 }
531}
532
533static void aic_fiq_mask(struct irq_data *d)
534{
535 aic_fiq_set_mask(d);
536 __this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d)));
537}
538
539static void aic_fiq_unmask(struct irq_data *d)
540{
541 aic_fiq_clear_mask(d);
542 __this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d)));
543}
544
545static void aic_fiq_eoi(struct irq_data *d)
546{
547 /* We mask to ack (where we can), so we need to unmask at EOI. */
548 if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d)))
549 aic_fiq_clear_mask(d);
550}
551
552#define TIMER_FIRING(x) \
553 (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \
554 ARCH_TIMER_CTRL_IT_STAT)) == \
555 (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
556
557static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
558{
559 /*
560 * It would be really nice if we had a system register that lets us get
561 * the FIQ source state without having to peek down into sources...
562 * but such a register does not seem to exist.
563 *
564 * So, we have these potential sources to test for:
565 * - Fast IPIs (not yet used)
566 * - The 4 timers (CNTP, CNTV for each of HV and guest)
567 * - Per-core PMCs (not yet supported)
568 * - Per-cluster uncore PMCs (not yet supported)
569 *
570 * Since not dealing with any of these results in a FIQ storm,
571 * we check for everything here, even things we don't support yet.
572 */
573
574 if (static_branch_likely(&use_fast_ipi) &&
575 (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING))
576 aic_handle_ipi(regs);
577
578 if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
579 generic_handle_domain_irq(aic_irqc->hw_domain,
580 AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
581
582 if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
583 generic_handle_domain_irq(aic_irqc->hw_domain,
584 AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
585
586 if (is_kernel_in_hyp_mode()) {
587 uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
588
589 if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
590 TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
591 generic_handle_domain_irq(aic_irqc->hw_domain,
592 AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
593
594 if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
595 TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
596 generic_handle_domain_irq(aic_irqc->hw_domain,
597 AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
598 }
599
600 if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
601 (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT))
602 generic_handle_domain_irq(aic_irqc->hw_domain,
603 AIC_FIQ_HWIRQ(AIC_CPU_PMU_P));
604
605 if (static_branch_likely(&use_fast_ipi) &&
606 (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ) &&
607 (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
608 /* Same story with uncore PMCs */
609 pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
610 sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
611 FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
612 }
613}
614
615static int aic_fiq_set_type(struct irq_data *d, unsigned int type)
616{
617 return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
618}
619
620static struct irq_chip fiq_chip = {
621 .name = "AIC-FIQ",
622 .irq_mask = aic_fiq_mask,
623 .irq_unmask = aic_fiq_unmask,
624 .irq_ack = aic_fiq_set_mask,
625 .irq_eoi = aic_fiq_eoi,
626 .irq_set_type = aic_fiq_set_type,
627};
628
629/*
630 * Main IRQ domain
631 */
632
633static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
634 irq_hw_number_t hw)
635{
636 struct aic_irq_chip *ic = id->host_data;
637 u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
638 struct irq_chip *chip = &aic_chip;
639
640 if (ic->info.version == 2 || ic->info.version == 3)
641 chip = &aic2_chip;
642
643 if (type == AIC_EVENT_TYPE_IRQ) {
644 irq_domain_set_info(id, irq, hw, chip, id->host_data,
645 handle_fasteoi_irq, NULL, NULL);
646 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
647 } else {
648 irq_set_percpu_devid(irq);
649 irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
650 handle_percpu_devid_irq, NULL, NULL);
651 }
652
653 return 0;
654}
655
656static int aic_irq_get_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
657{
658 const struct cpumask *mask;
659 u32 intid;
660
661 info->flags = 0;
662 info->affinity = NULL;
663
664 if (fwspec->param[0] != AIC_FIQ)
665 return 0;
666
667 if (fwspec->param_count == 3)
668 intid = fwspec->param[1];
669 else
670 intid = fwspec->param[2];
671
672 if (aic_irqc->fiq_aff[intid])
673 mask = &aic_irqc->fiq_aff[intid]->aff;
674 else
675 mask = cpu_possible_mask;
676
677 info->affinity = mask;
678 info->flags = IRQ_FWSPEC_INFO_AFFINITY_VALID;
679
680 return 0;
681}
682
683static int aic_irq_domain_translate(struct irq_domain *id,
684 struct irq_fwspec *fwspec,
685 unsigned long *hwirq,
686 unsigned int *type)
687{
688 struct aic_irq_chip *ic = id->host_data;
689 u32 *args;
690 u32 die = 0;
691
692 if (fwspec->param_count < 3 || fwspec->param_count > 4 ||
693 !is_of_node(fwspec->fwnode))
694 return -EINVAL;
695
696 args = &fwspec->param[1];
697
698 if (fwspec->param_count == 4) {
699 die = args[0];
700 args++;
701 }
702
703 switch (fwspec->param[0]) {
704 case AIC_IRQ:
705 if (die >= ic->nr_die)
706 return -EINVAL;
707 if (args[0] >= ic->nr_irq)
708 return -EINVAL;
709 *hwirq = AIC_IRQ_HWIRQ(die, args[0]);
710 break;
711 case AIC_FIQ:
712 if (die != 0)
713 return -EINVAL;
714 if (args[0] >= AIC_NR_FIQ)
715 return -EINVAL;
716 *hwirq = AIC_FIQ_HWIRQ(args[0]);
717
718 /*
719 * In EL1 the non-redirected registers are the guest's,
720 * not EL2's, so remap the hwirqs to match.
721 */
722 if (!is_kernel_in_hyp_mode()) {
723 switch (args[0]) {
724 case AIC_TMR_GUEST_PHYS:
725 *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS);
726 break;
727 case AIC_TMR_GUEST_VIRT:
728 *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT);
729 break;
730 case AIC_TMR_HV_PHYS:
731 case AIC_TMR_HV_VIRT:
732 return -ENOENT;
733 default:
734 break;
735 }
736 }
737
738 /* Merge the two PMUs on a single interrupt */
739 if (*hwirq == AIC_CPU_PMU_E)
740 *hwirq = AIC_CPU_PMU_P;
741 break;
742 default:
743 return -EINVAL;
744 }
745
746 *type = args[1] & IRQ_TYPE_SENSE_MASK;
747
748 return 0;
749}
750
751static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
752 unsigned int nr_irqs, void *arg)
753{
754 unsigned int type = IRQ_TYPE_NONE;
755 struct irq_fwspec *fwspec = arg;
756 irq_hw_number_t hwirq;
757 int i, ret;
758
759 ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type);
760 if (ret)
761 return ret;
762
763 for (i = 0; i < nr_irqs; i++) {
764 ret = aic_irq_domain_map(domain, virq + i, hwirq + i);
765 if (ret)
766 return ret;
767 }
768
769 return 0;
770}
771
772static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
773 unsigned int nr_irqs)
774{
775 int i;
776
777 for (i = 0; i < nr_irqs; i++) {
778 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
779
780 irq_set_handler(virq + i, NULL);
781 irq_domain_reset_irq_data(d);
782 }
783}
784
785static const struct irq_domain_ops aic_irq_domain_ops = {
786 .translate = aic_irq_domain_translate,
787 .alloc = aic_irq_domain_alloc,
788 .free = aic_irq_domain_free,
789 .get_fwspec_info = aic_irq_get_fwspec_info,
790};
791
792/*
793 * IPI irqchip
794 */
795
796static void aic_ipi_send_fast(int cpu)
797{
798 u64 mpidr = cpu_logical_map(cpu);
799 u64 my_mpidr = read_cpuid_mpidr();
800 u64 cluster = MPIDR_CLUSTER(mpidr);
801 u64 idx = MPIDR_CPU(mpidr);
802
803 if (static_branch_likely(&use_local_fast_ipi) && MPIDR_CLUSTER(my_mpidr) == cluster) {
804 write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), SYS_IMP_APL_IPI_RR_LOCAL_EL1);
805 } else {
806 write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
807 SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
808 }
809 isb();
810}
811
812static void aic_handle_ipi(struct pt_regs *regs)
813{
814 /*
815 * Ack the IPI. We need to order this after the AIC event read, but
816 * that is enforced by normal MMIO ordering guarantees.
817 *
818 * For the Fast IPI case, this needs to be ordered before the vIPI
819 * handling below, so we need to isb();
820 */
821 if (static_branch_likely(&use_fast_ipi)) {
822 write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
823 isb();
824 } else {
825 aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
826 }
827
828 ipi_mux_process();
829
830 /*
831 * No ordering needed here; at worst this just changes the timing of
832 * when the next IPI will be delivered.
833 */
834 if (!static_branch_likely(&use_fast_ipi))
835 aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
836}
837
838static void aic_ipi_send_single(unsigned int cpu)
839{
840 if (static_branch_likely(&use_fast_ipi))
841 aic_ipi_send_fast(cpu);
842 else
843 aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu));
844}
845
846static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
847{
848 int base_ipi;
849
850 base_ipi = ipi_mux_create(AIC_NR_SWIPI, aic_ipi_send_single);
851 if (WARN_ON(base_ipi <= 0))
852 return -ENODEV;
853
854 set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
855
856 return 0;
857}
858
859static int aic_init_cpu(unsigned int cpu)
860{
861 /* Mask all hard-wired per-CPU IRQ/FIQ sources */
862
863 /* Pending Fast IPI FIQs */
864 if (static_branch_likely(&use_fast_ipi))
865 write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
866
867 /* Timer FIQs */
868 sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
869 sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
870
871 /* EL2-only (VHE mode) IRQ sources */
872 if (is_kernel_in_hyp_mode()) {
873 /* Guest timers */
874 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2,
875 VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
876
877 /* vGIC maintenance IRQ */
878 sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
879 }
880
881 /* PMC FIQ */
882 sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
883 FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
884
885 /* Uncore PMC FIQ */
886 if (static_branch_likely(&use_fast_ipi)) {
887 sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
888 FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
889 }
890
891 /* Commit all of the above */
892 isb();
893
894 if (aic_irqc->info.version == 1) {
895 /*
896 * Make sure the kernel's idea of logical CPU order is the same as AIC's
897 * If we ever end up with a mismatch here, we will have to introduce
898 * a mapping table similar to what other irqchip drivers do.
899 */
900 WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
901
902 /*
903 * Always keep IPIs unmasked at the hardware level (except auto-masking
904 * by AIC during processing). We manage masks at the vIPI level.
905 * These registers only exist on AICv1, AICv2 always uses fast IPIs.
906 */
907 aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
908 if (static_branch_likely(&use_fast_ipi)) {
909 aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
910 } else {
911 aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
912 aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
913 }
914 }
915
916 /* Initialize the local mask state */
917 __this_cpu_write(aic_fiq_unmasked, 0);
918
919 return 0;
920}
921
922static struct gic_kvm_info vgic_info __initdata = {
923 .type = GIC_V3,
924 .no_maint_irq_mask = true,
925 .no_hw_deactivation = true,
926};
927
928static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
929{
930 int i, n;
931 u32 fiq;
932
933 if (of_property_read_u32(aff, "apple,fiq-index", &fiq) ||
934 WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq])
935 return;
936
937 n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32));
938 if (WARN_ON(n < 0))
939 return;
940
941 ic->fiq_aff[fiq] = kzalloc_obj(*ic->fiq_aff[fiq]);
942 if (!ic->fiq_aff[fiq])
943 return;
944
945 for (i = 0; i < n; i++) {
946 struct device_node *cpu_node;
947 u32 cpu_phandle;
948 int cpu;
949
950 if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle))
951 continue;
952
953 cpu_node = of_find_node_by_phandle(cpu_phandle);
954 if (WARN_ON(!cpu_node))
955 continue;
956
957 cpu = of_cpu_node_to_id(cpu_node);
958 of_node_put(cpu_node);
959 if (WARN_ON(cpu < 0))
960 continue;
961
962 cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
963 }
964}
965
966static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
967{
968 int i, die;
969 u32 off, start_off;
970 void __iomem *regs;
971 struct aic_irq_chip *irqc;
972 struct device_node *affs;
973 const struct of_device_id *match;
974
975 regs = of_iomap(node, 0);
976 if (WARN_ON(!regs))
977 return -EIO;
978
979 irqc = kzalloc_obj(*irqc);
980 if (!irqc) {
981 iounmap(regs);
982 return -ENOMEM;
983 }
984
985 irqc->base = regs;
986
987 match = of_match_node(aic_info_match, node);
988 if (!match)
989 goto err_unmap;
990
991 irqc->info = *(struct aic_info *)match->data;
992
993 aic_irqc = irqc;
994
995 switch (irqc->info.version) {
996 case 1: {
997 u32 info;
998
999 info = aic_ic_read(irqc, AIC_INFO);
1000 irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info);
1001 irqc->max_irq = AIC_MAX_IRQ;
1002 irqc->nr_die = irqc->max_die = 1;
1003
1004 off = start_off = irqc->info.target_cpu;
1005 off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */
1006
1007 irqc->event = irqc->base;
1008
1009 break;
1010 }
1011 case 2 ... 3: {
1012 u32 info1, info3;
1013
1014 info1 = aic_ic_read(irqc, AIC2_INFO1);
1015 info3 = aic_ic_read(irqc, AIC2_INFO3);
1016
1017 irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
1018 irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
1019 irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1;
1020 irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3);
1021
1022 off = start_off = irqc->info.irq_cfg;
1023 off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */
1024
1025 irqc->event = of_iomap(node, 1);
1026 if (WARN_ON(!irqc->event))
1027 goto err_unmap;
1028
1029 break;
1030 }
1031 }
1032
1033 irqc->info.sw_set = off;
1034 off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */
1035 irqc->info.sw_clr = off;
1036 off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */
1037 irqc->info.mask_set = off;
1038 off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */
1039 irqc->info.mask_clr = off;
1040 off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
1041 off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
1042
1043 if (!irqc->info.fast_ipi)
1044 static_branch_disable(&use_fast_ipi);
1045
1046 if (!irqc->info.local_fast_ipi)
1047 static_branch_disable(&use_local_fast_ipi);
1048
1049 irqc->info.die_stride = off - start_off;
1050
1051 irqc->hw_domain = irq_domain_create_tree(of_fwnode_handle(node),
1052 &aic_irq_domain_ops, irqc);
1053 if (WARN_ON(!irqc->hw_domain))
1054 goto err_unmap;
1055
1056 irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
1057
1058 if (aic_init_smp(irqc, node))
1059 goto err_remove_domain;
1060
1061 affs = of_get_child_by_name(node, "affinities");
1062 if (affs) {
1063 struct device_node *chld;
1064
1065 for_each_child_of_node(affs, chld)
1066 build_fiq_affinity(irqc, chld);
1067 }
1068 of_node_put(affs);
1069
1070 set_handle_irq(aic_handle_irq);
1071 set_handle_fiq(aic_handle_fiq);
1072
1073 off = 0;
1074 for (die = 0; die < irqc->nr_die; die++) {
1075 for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
1076 aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX);
1077 for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
1078 aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX);
1079 if (irqc->info.target_cpu)
1080 for (i = 0; i < irqc->nr_irq; i++)
1081 aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
1082 off += irqc->info.die_stride;
1083 }
1084
1085 if (irqc->info.version == 2 || irqc->info.version == 3) {
1086 u32 config = aic_ic_read(irqc, AIC2_CONFIG);
1087
1088 config |= AIC2_CONFIG_ENABLE;
1089 aic_ic_write(irqc, AIC2_CONFIG, config);
1090 }
1091
1092 if (!is_kernel_in_hyp_mode())
1093 pr_info("Kernel running in EL1, mapping interrupts");
1094
1095 if (static_branch_likely(&use_fast_ipi))
1096 pr_info("Using Fast IPIs");
1097
1098 cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
1099 "irqchip/apple-aic/ipi:starting",
1100 aic_init_cpu, NULL);
1101
1102 if (is_kernel_in_hyp_mode()) {
1103 struct irq_fwspec mi = {
1104 .fwnode = of_fwnode_handle(node),
1105 .param_count = 3,
1106 .param = {
1107 [0] = AIC_FIQ, /* This is a lie */
1108 [1] = AIC_VGIC_MI,
1109 [2] = IRQ_TYPE_LEVEL_HIGH,
1110 },
1111 };
1112
1113 vgic_info.maint_irq = irq_create_fwspec_mapping(&mi);
1114 WARN_ON(!vgic_info.maint_irq);
1115 }
1116
1117 vgic_set_kvm_info(&vgic_info);
1118
1119 pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
1120 irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI);
1121
1122 return 0;
1123
1124err_remove_domain:
1125 irq_domain_remove(irqc->hw_domain);
1126err_unmap:
1127 if (irqc->event && irqc->event != irqc->base)
1128 iounmap(irqc->event);
1129 iounmap(irqc->base);
1130 kfree(irqc);
1131 return -ENODEV;
1132}
1133
1134IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
1135IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);
1136IRQCHIP_DECLARE(apple_aic3, "apple,t8122-aic3", aic_of_ic_init);