Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Always keep DAIF.[IF] in sync

Apple SoCs (A11 and newer) have some interrupt sources hardwired to the
FIQ line. We implement support for this by simply treating IRQs and FIQs
the same way in the interrupt vectors.

To support these systems, the FIQ mask bit needs to be kept in sync with
the IRQ mask bit, so both kinds of exceptions are masked together. No
other platforms should be delivering FIQ exceptions right now, and we
already unmask FIQ in normal process context, so this should not have an
effect on other systems - if spurious FIQs were arriving, they would
already panic the kernel.

Signed-off-by: Hector Martin <marcan@marcan.st>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Hector Martin <marcan@marcan.st>
Cc: James Morse <james.morse@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210315115629.57191-6-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Hector Martin and committed by
Catalin Marinas
f0098155 9eb563cd

+26 -25
+1 -1
arch/arm64/include/asm/arch_gicv3.h
··· 173 173 174 174 static inline void gic_arch_enable_irqs(void) 175 175 { 176 - asm volatile ("msr daifclr, #2" : : : "memory"); 176 + asm volatile ("msr daifclr, #3" : : : "memory"); 177 177 } 178 178 179 179 #endif /* __ASSEMBLY__ */
+4 -4
arch/arm64/include/asm/assembler.h
··· 40 40 msr daif, \flags 41 41 .endm 42 42 43 - /* IRQ is the lowest priority flag, unconditionally unmask the rest. */ 44 - .macro enable_da_f 45 - msr daifclr, #(8 | 4 | 1) 43 + /* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */ 44 + .macro enable_da 45 + msr daifclr, #(8 | 4) 46 46 .endm 47 47 48 48 /* ··· 50 50 */ 51 51 .macro save_and_disable_irq, flags 52 52 mrs \flags, daif 53 - msr daifset, #2 53 + msr daifset, #3 54 54 .endm 55 55 56 56 .macro restore_irq, flags
+5 -5
arch/arm64/include/asm/daifflags.h
··· 13 13 #include <asm/ptrace.h> 14 14 15 15 #define DAIF_PROCCTX 0 16 - #define DAIF_PROCCTX_NOIRQ PSR_I_BIT 17 - #define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT) 16 + #define DAIF_PROCCTX_NOIRQ (PSR_I_BIT | PSR_F_BIT) 17 + #define DAIF_ERRCTX (PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) 18 18 #define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) 19 19 20 20 ··· 47 47 if (system_uses_irq_prio_masking()) { 48 48 /* If IRQs are masked with PMR, reflect it in the flags */ 49 49 if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON) 50 - flags |= PSR_I_BIT; 50 + flags |= PSR_I_BIT | PSR_F_BIT; 51 51 } 52 52 53 53 return flags; ··· 69 69 bool irq_disabled = flags & PSR_I_BIT; 70 70 71 71 WARN_ON(system_has_prio_mask_debugging() && 72 - !(read_sysreg(daif) & PSR_I_BIT)); 72 + (read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) != (PSR_I_BIT | PSR_F_BIT)); 73 73 74 74 if (!irq_disabled) { 75 75 trace_hardirqs_on(); ··· 86 86 * If interrupts are disabled but we can take 87 87 * asynchronous errors, we can take NMIs 88 88 */ 89 - flags &= ~PSR_I_BIT; 89 + flags &= ~(PSR_I_BIT | PSR_F_BIT); 90 90 pmr = GIC_PRIO_IRQOFF; 91 91 } else { 92 92 pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
+7 -9
arch/arm64/include/asm/irqflags.h
··· 12 12 13 13 /* 14 14 * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and 15 - * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai' 15 + * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif' 16 16 * order: 17 17 * Masking debug exceptions causes all other exceptions to be masked too/ 18 - * Masking SError masks irq, but not debug exceptions. Masking irqs has no 19 - * side effects for other flags. Keeping to this order makes it easier for 20 - * entry.S to know which exceptions should be unmasked. 21 - * 22 - * FIQ is never expected, but we mask it when we disable debug exceptions, and 23 - * unmask it at all other times. 18 + * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are 19 + * always masked and unmasked together, and have no side effects for other 20 + * flags. Keeping to this order makes it easier for entry.S to know which 21 + * exceptions should be unmasked. 24 22 */ 25 23 26 24 /* ··· 33 35 } 34 36 35 37 asm volatile(ALTERNATIVE( 36 - "msr daifclr, #2 // arch_local_irq_enable", 38 + "msr daifclr, #3 // arch_local_irq_enable", 37 39 __msr_s(SYS_ICC_PMR_EL1, "%0"), 38 40 ARM64_HAS_IRQ_PRIO_MASKING) 39 41 : ··· 52 54 } 53 55 54 56 asm volatile(ALTERNATIVE( 55 - "msr daifset, #2 // arch_local_irq_disable", 57 + "msr daifset, #3 // arch_local_irq_disable", 56 58 __msr_s(SYS_ICC_PMR_EL1, "%0"), 57 59 ARM64_HAS_IRQ_PRIO_MASKING) 58 60 :
+7 -5
arch/arm64/kernel/entry.S
··· 533 533 534 534 .macro el1_interrupt_handler, handler:req 535 535 gic_prio_irq_setup pmr=x20, tmp=x1 536 - enable_da_f 536 + enable_da 537 537 538 538 mov x0, sp 539 539 bl enter_el1_irq_or_nmi ··· 544 544 ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count 545 545 alternative_if ARM64_HAS_IRQ_PRIO_MASKING 546 546 /* 547 - * DA_F were cleared at start of handling. If anything is set in DAIF, 548 - * we come back from an NMI, so skip preemption 547 + * DA were cleared at start of handling, and IF are cleared by 548 + * the GIC irqchip driver using gic_arch_enable_irqs() for 549 + * normal IRQs. If anything is set, it means we come back from 550 + * an NMI instead of a normal IRQ, so skip preemption 549 551 */ 550 552 mrs x0, daif 551 553 orr x24, x24, x0 ··· 564 562 .macro el0_interrupt_handler, handler:req 565 563 gic_prio_irq_setup pmr=x20, tmp=x0 566 564 user_exit_irqoff 567 - enable_da_f 565 + enable_da 568 566 569 567 tbz x22, #55, 1f 570 568 bl do_el0_irq_bp_hardening ··· 765 763 mov x0, sp 766 764 mov x1, x25 767 765 bl do_serror 768 - enable_da_f 766 + enable_da 769 767 b ret_to_user 770 768 SYM_CODE_END(el0_error) 771 769
+1 -1
arch/arm64/kernel/process.c
··· 84 84 unsigned long daif_bits; 85 85 86 86 daif_bits = read_sysreg(daif); 87 - write_sysreg(daif_bits | PSR_I_BIT, daif); 87 + write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif); 88 88 89 89 /* 90 90 * Unmask PMR before going idle to make sure interrupts can
+1
arch/arm64/kernel/smp.c
··· 188 188 cpuflags = read_sysreg(daif); 189 189 190 190 WARN_ON(!(cpuflags & PSR_I_BIT)); 191 + WARN_ON(!(cpuflags & PSR_F_BIT)); 191 192 192 193 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); 193 194 }