Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/64: Change soft_enabled from flag to bitmask

"paca->soft_enabled" is used as a flag to mask some of interrupts.
Currently supported flags values and their details:

soft_enabled MSR[EE]

0 0 Disabled (PMI and HMI not masked)
1 1 Enabled

"paca->soft_enabled" is initialized to 1 to make the interripts as
enabled. arch_local_irq_disable() will toggle the value when
interrupts needs to disbled. At this point, the interrupts are not
actually disabled, instead, interrupt vector has code to check for the
flag and mask it when it occurs. By "mask it", it update interrupt
paca->irq_happened and return. arch_local_irq_restore() is called to
re-enable interrupts, which checks and replays interrupts if any
occured.

Now, as mentioned, current logic doesnot mask "performance monitoring
interrupts" and PMIs are implemented as NMI. But this patchset depends
on local_irq_* for a successful local_* update. Meaning, mask all
possible interrupts during local_* update and replay them after the
update.

So the idea here is to reserve the "paca->soft_enabled" logic. New
values and details:

soft_enabled MSR[EE]

1 0 Disabled (PMI and HMI not masked)
0 1 Enabled

Reason for the this change is to create foundation for a third mask
value "0x2" for "soft_enabled" to add support to mask PMIs. When
->soft_enabled is set to a value "3", PMI interrupts are mask and when
set to a value of "1", PMI are not mask. With this patch also extends
soft_enabled as interrupt disable mask.

Current flags are renamed from IRQ_[EN?DIS}ABLED to
IRQS_ENABLED and IRQS_DISABLED.

Patch also fixes the ptrace call to force the user to see the softe
value to be alway 1. Reason being, even though userspace has no
business knowing about softe, it is part of pt_regs. Like-wise in
signal context.

Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Madhavan Srinivasan and committed by
Michael Ellerman
01417c6c acb396d7

+60 -31
+2 -2
arch/powerpc/include/asm/exception-64s.h
··· 499 499 500 500 #define __SOFTEN_TEST(h, vec) \ 501 501 lbz r10,PACASOFTIRQEN(r13); \ 502 - cmpwi r10,IRQS_DISABLED; \ 502 + andi. r10,r10,IRQS_DISABLED; \ 503 503 li r10,SOFTEN_VALUE_##vec; \ 504 - beq masked_##h##interrupt 504 + bne masked_##h##interrupt 505 505 506 506 #define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec) 507 507
+23 -7
arch/powerpc/include/asm/hw_irq.h
··· 31 31 /* 32 32 * flags for paca->soft_enabled 33 33 */ 34 - #define IRQS_ENABLED 1 35 - #define IRQS_DISABLED 0 34 + #define IRQS_ENABLED 0 35 + #define IRQS_DISABLED 1 36 36 37 37 #endif /* CONFIG_PPC64 */ 38 38 ··· 68 68 */ 69 69 static inline notrace void soft_enabled_set(unsigned long enable) 70 70 { 71 + #ifdef CONFIG_TRACE_IRQFLAGS 72 + /* 73 + * mask must always include LINUX bit if any are set, and 74 + * interrupts don't get replayed until the Linux interrupt is 75 + * unmasked. This could be changed to replay partial unmasks 76 + * in future, which would allow Linux masks to nest inside 77 + * other masks, among other things. For now, be very dumb and 78 + * simple. 79 + */ 80 + WARN_ON(mask && !(mask & IRQS_DISABLED)); 81 + #endif 82 + 71 83 asm volatile( 72 84 "stb %0,%1(13)" 73 85 : ··· 88 76 : "memory"); 89 77 } 90 78 91 - static inline notrace unsigned long soft_enabled_set_return(unsigned long enable) 79 + static inline notrace unsigned long soft_enabled_set_return(unsigned long mask) 92 80 { 93 81 unsigned long flags; 82 + 83 + #ifdef CONFIG_TRACE_IRQFLAGS 84 + WARN_ON(mask && !(mask & IRQS_DISABLED)); 85 + #endif 94 86 95 87 asm volatile( 96 88 "lbz %0,%1(13); stb %2,%1(13)" 97 89 : "=&r" (flags) 98 90 : "i" (offsetof(struct paca_struct, soft_enabled)), 99 - "r" (enable) 91 + "r" (mask) 100 92 : "memory"); 101 93 102 94 return flags; ··· 130 114 131 115 static inline bool arch_irqs_disabled_flags(unsigned long flags) 132 116 { 133 - return flags == IRQS_DISABLED; 117 + return flags & IRQS_DISABLED; 134 118 } 135 119 136 120 static inline bool arch_irqs_disabled(void) ··· 149 133 #define hard_irq_disable() do { \ 150 134 unsigned long flags; \ 151 135 __hard_irq_disable(); \ 152 - flags = soft_enabled_set_return(IRQS_DISABLED); \ 136 + flags = soft_enabled_set_return(IRQS_DISABLED);\ 153 137 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ 154 138 if (!arch_irqs_disabled_flags(flags)) \ 155 139 trace_hardirqs_off(); \ ··· 174 158 175 159 static inline bool arch_irq_disabled_regs(struct pt_regs *regs) 176 160 { 177 - return (regs->softe == IRQS_DISABLED); 161 + return (regs->softe & IRQS_DISABLED); 178 162 } 179 163 180 164 extern bool prep_irq_for_idle(void);
+2 -2
arch/powerpc/include/asm/irqflags.h
··· 49 49 #define RECONCILE_IRQ_STATE(__rA, __rB) \ 50 50 lbz __rA,PACASOFTIRQEN(r13); \ 51 51 lbz __rB,PACAIRQHAPPENED(r13); \ 52 - cmpwi cr0,__rA,IRQS_DISABLED;\ 52 + andi. __rA,__rA,IRQS_DISABLED;\ 53 53 li __rA,IRQS_DISABLED; \ 54 54 ori __rB,__rB,PACA_IRQ_HARD_DIS; \ 55 55 stb __rB,PACAIRQHAPPENED(r13); \ 56 - beq 44f; \ 56 + bne 44f; \ 57 57 stb __rA,PACASOFTIRQEN(r13); \ 58 58 TRACE_DISABLE_INTS; \ 59 59 44:
+10 -11
arch/powerpc/kernel/entry_64.S
··· 130 130 */ 131 131 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) 132 132 lbz r10,PACASOFTIRQEN(r13) 133 - xori r10,r10,IRQS_ENABLED 134 - 1: tdnei r10,0 133 + 1: tdnei r10,IRQS_ENABLED 135 134 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING 136 135 #endif 137 136 ··· 740 741 beq+ restore 741 742 /* Check that preempt_count() == 0 and interrupts are enabled */ 742 743 lwz r8,TI_PREEMPT(r9) 743 - cmpwi cr1,r8,0 744 + cmpwi cr0,r8,0 745 + bne restore 744 746 ld r0,SOFTE(r1) 745 - cmpdi r0,IRQS_DISABLED 746 - crandc eq,cr1*4+eq,eq 747 + andi. r0,r0,IRQS_DISABLED 747 748 bne restore 748 749 749 750 /* ··· 782 783 */ 783 784 ld r5,SOFTE(r1) 784 785 lbz r6,PACASOFTIRQEN(r13) 785 - cmpwi cr0,r5,IRQS_DISABLED 786 - beq .Lrestore_irq_off 786 + andi. r5,r5,IRQS_DISABLED 787 + bne .Lrestore_irq_off 787 788 788 789 /* We are enabling, were we already enabled ? Yes, just return */ 789 - cmpwi cr0,r6,IRQS_ENABLED 790 + andi. r6,r6,IRQS_DISABLED 790 791 beq cr0,.Ldo_restore 791 792 792 793 /* ··· 1030 1031 li r0,0 1031 1032 mtcr r0 1032 1033 1033 - #ifdef CONFIG_BUG 1034 + #ifdef CONFIG_BUG 1034 1035 /* There is no way it is acceptable to get here with interrupts enabled, 1035 1036 * check it with the asm equivalent of WARN_ON 1036 1037 */ 1037 1038 lbz r0,PACASOFTIRQEN(r13) 1038 - 1: tdnei r0,IRQS_DISABLED 1039 + 1: tdeqi r0,IRQS_ENABLED 1039 1040 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING 1040 1041 #endif 1041 - 1042 + 1042 1043 /* Hard-disable interrupts */ 1043 1044 mfmsr r6 1044 1045 rldicl r7,r6,48,1
+5 -5
arch/powerpc/kernel/exceptions-64e.S
··· 210 210 ld r5,SOFTE(r1) 211 211 212 212 /* Interrupts had better not already be enabled... */ 213 - twnei r6,IRQS_DISABLED 213 + tweqi r6,IRQS_ENABLED 214 214 215 - cmpwi cr0,r5,IRQS_DISABLED 216 - beq 1f 215 + andi. r6,r5,IRQS_DISABLED 216 + bne 1f 217 217 218 218 TRACE_ENABLE_INTS 219 219 stb r5,PACASOFTIRQEN(r13) ··· 352 352 353 353 #define PROLOG_ADDITION_MASKABLE_GEN(n) \ 354 354 lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ 355 - cmpwi cr0,r10,IRQS_DISABLED; /* yes -> go out of line */ \ 356 - beq masked_interrupt_book3e_##n 355 + andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \ 356 + bne masked_interrupt_book3e_##n 357 357 358 358 #define PROLOG_ADDITION_2REGS_GEN(n) \ 359 359 std r14,PACA_EXGEN+EX_R14(r13); \
+17 -3
arch/powerpc/kernel/irq.c
··· 219 219 return 0; 220 220 } 221 221 222 - notrace void arch_local_irq_restore(unsigned long en) 222 + notrace void arch_local_irq_restore(unsigned long mask) 223 223 { 224 224 unsigned char irq_happened; 225 225 unsigned int replay; 226 226 227 227 /* Write the new soft-enabled value */ 228 - soft_enabled_set(en); 229 - if (en == IRQS_DISABLED) 228 + soft_enabled_set(mask); 229 + if (mask) { 230 + #ifdef CONFIG_TRACE_IRQFLAGS 231 + /* 232 + * mask must always include LINUX bit if any 233 + * are set, and interrupts don't get replayed until 234 + * the Linux interrupt is unmasked. This could be 235 + * changed to replay partial unmasks in future, 236 + * which would allow Linux masks to nest inside 237 + * other masks, among other things. For now, be very 238 + * dumb and simple. 239 + */ 240 + WARN_ON(!(mask & IRQS_DISABLED)); 241 + #endif 230 242 return; 243 + } 244 + 231 245 /* 232 246 * From this point onward, we can take interrupts, preempt, 233 247 * etc... unless we got hard-disabled. We check if an event
+1 -1
arch/powerpc/perf/core-book3s.c
··· 322 322 */ 323 323 static inline int perf_intr_is_nmi(struct pt_regs *regs) 324 324 { 325 - return (regs->softe == IRQS_DISABLED); 325 + return (regs->softe & IRQS_DISABLED); 326 326 } 327 327 328 328 /*