Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/64: Rename soft_enabled to irq_soft_mask

Rename the paca->soft_enabled to paca->irq_soft_mask as it is no
longer used as a flag for interrupt state, but a mask.

Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Madhavan Srinivasan and committed by
Michael Ellerman
4e26bc4a 01417c6c

+74 -81
+2 -2
arch/powerpc/include/asm/exception-64s.h
··· 432 432 mflr r9; /* Get LR, later save to stack */ \ 433 433 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 434 434 std r9,_LINK(r1); \ 435 - lbz r10,PACASOFTIRQEN(r13); \ 435 + lbz r10,PACAIRQSOFTMASK(r13); \ 436 436 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 437 437 std r10,SOFTE(r1); \ 438 438 std r11,_XER(r1); \ ··· 498 498 #define SOFTEN_VALUE_0xea0 PACA_IRQ_EE 499 499 500 500 #define __SOFTEN_TEST(h, vec) \ 501 - lbz r10,PACASOFTIRQEN(r13); \ 501 + lbz r10,PACAIRQSOFTMASK(r13); \ 502 502 andi. r10,r10,IRQS_DISABLED; \ 503 503 li r10,SOFTEN_VALUE_##vec; \ 504 504 bne masked_##h##interrupt
+31 -25
arch/powerpc/include/asm/hw_irq.h
··· 29 29 #define PACA_IRQ_HMI 0x20 30 30 31 31 /* 32 - * flags for paca->soft_enabled 32 + * flags for paca->irq_soft_mask 33 33 */ 34 34 #define IRQS_ENABLED 0 35 35 #define IRQS_DISABLED 1 ··· 49 49 #ifdef CONFIG_PPC64 50 50 #include <asm/paca.h> 51 51 52 - static inline notrace unsigned long soft_enabled_return(void) 52 + static inline notrace unsigned long irq_soft_mask_return(void) 53 53 { 54 54 unsigned long flags; 55 55 56 56 asm volatile( 57 57 "lbz %0,%1(13)" 58 58 : "=r" (flags) 59 - : "i" (offsetof(struct paca_struct, soft_enabled))); 59 + : "i" (offsetof(struct paca_struct, irq_soft_mask))); 60 60 61 61 return flags; 62 62 } ··· 64 64 /* 65 65 * The "memory" clobber acts as both a compiler barrier 66 66 * for the critical section and as a clobber because 67 - * we changed paca->soft_enabled 67 + * we changed paca->irq_soft_mask 68 68 */ 69 - static inline notrace void soft_enabled_set(unsigned long enable) 69 + static inline notrace void irq_soft_mask_set(unsigned long mask) 70 70 { 71 71 #ifdef CONFIG_TRACE_IRQFLAGS 72 72 /* 73 - * mask must always include LINUX bit if any are set, and 74 - * interrupts don't get replayed until the Linux interrupt is 75 - * unmasked. This could be changed to replay partial unmasks 76 - * in future, which would allow Linux masks to nest inside 77 - * other masks, among other things. For now, be very dumb and 78 - * simple. 73 + * The irq mask must always include the STD bit if any are set. 74 + * 75 + * and interrupts don't get replayed until the standard 76 + * interrupt (local_irq_disable()) is unmasked. 77 + * 78 + * Other masks must only provide additional masking beyond 79 + * the standard, and they are also not replayed until the 80 + * standard interrupt becomes unmasked. 81 + * 82 + * This could be changed, but it will require partial 83 + * unmasks to be replayed, among other things. For now, take 84 + * the simple approach. 79 85 */ 80 86 WARN_ON(mask && !(mask & IRQS_DISABLED)); 81 87 #endif ··· 89 83 asm volatile( 90 84 "stb %0,%1(13)" 91 85 : 92 - : "r" (enable), 93 - "i" (offsetof(struct paca_struct, soft_enabled)) 86 + : "r" (mask), 87 + "i" (offsetof(struct paca_struct, irq_soft_mask)) 94 88 : "memory"); 95 89 } 96 90 97 - static inline notrace unsigned long soft_enabled_set_return(unsigned long mask) 91 + static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) 98 92 { 99 93 unsigned long flags; 100 94 ··· 105 99 asm volatile( 106 100 "lbz %0,%1(13); stb %2,%1(13)" 107 101 : "=&r" (flags) 108 - : "i" (offsetof(struct paca_struct, soft_enabled)), 102 + : "i" (offsetof(struct paca_struct, irq_soft_mask)), 109 103 "r" (mask) 110 104 : "memory"); 111 105 ··· 114 108 115 109 static inline unsigned long arch_local_save_flags(void) 116 110 { 117 - return soft_enabled_return(); 111 + return irq_soft_mask_return(); 118 112 } 119 113 120 114 static inline void arch_local_irq_disable(void) 121 115 { 122 - soft_enabled_set(IRQS_DISABLED); 116 + irq_soft_mask_set(IRQS_DISABLED); 123 117 } 124 118 125 119 extern void arch_local_irq_restore(unsigned long); ··· 131 125 132 126 static inline unsigned long arch_local_irq_save(void) 133 127 { 134 - return soft_enabled_set_return(IRQS_DISABLED); 128 + return irq_soft_mask_set_return(IRQS_DISABLED); 135 129 } 136 130 137 131 static inline bool arch_irqs_disabled_flags(unsigned long flags) ··· 152 146 #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) 153 147 #endif 154 148 155 - #define hard_irq_disable() do { \ 156 - unsigned long flags; \ 157 - __hard_irq_disable(); \ 158 - flags = soft_enabled_set_return(IRQS_DISABLED);\ 159 - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ 160 - if (!arch_irqs_disabled_flags(flags)) \ 161 - trace_hardirqs_off(); \ 149 + #define hard_irq_disable() do { \ 150 + unsigned long flags; \ 151 + __hard_irq_disable(); \ 152 + flags = irq_soft_mask_set_return(IRQS_DISABLED); \ 153 + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ 154 + if (!arch_irqs_disabled_flags(flags)) \ 155 + trace_hardirqs_off(); \ 162 156 } while(0) 163 157 164 158 static inline bool lazy_irq_pending(void)
+6 -6
arch/powerpc/include/asm/irqflags.h
··· 47 47 * be clobbered. 48 48 */ 49 49 #define RECONCILE_IRQ_STATE(__rA, __rB) \ 50 - lbz __rA,PACASOFTIRQEN(r13); \ 50 + lbz __rA,PACAIRQSOFTMASK(r13); \ 51 51 lbz __rB,PACAIRQHAPPENED(r13); \ 52 - andi. __rA,__rA,IRQS_DISABLED;\ 53 - li __rA,IRQS_DISABLED; \ 52 + andi. __rA,__rA,IRQS_DISABLED; \ 53 + li __rA,IRQS_DISABLED; \ 54 54 ori __rB,__rB,PACA_IRQ_HARD_DIS; \ 55 55 stb __rB,PACAIRQHAPPENED(r13); \ 56 56 bne 44f; \ 57 - stb __rA,PACASOFTIRQEN(r13); \ 57 + stb __rA,PACAIRQSOFTMASK(r13); \ 58 58 TRACE_DISABLE_INTS; \ 59 59 44: 60 60 ··· 64 64 65 65 #define RECONCILE_IRQ_STATE(__rA, __rB) \ 66 66 lbz __rA,PACAIRQHAPPENED(r13); \ 67 - li __rB,IRQS_DISABLED; \ 67 + li __rB,IRQS_DISABLED; \ 68 68 ori __rA,__rA,PACA_IRQ_HARD_DIS; \ 69 - stb __rB,PACASOFTIRQEN(r13); \ 69 + stb __rB,PACAIRQSOFTMASK(r13); \ 70 70 stb __rA,PACAIRQHAPPENED(r13) 71 71 #endif 72 72 #endif
+1 -1
arch/powerpc/include/asm/kvm_ppc.h
··· 873 873 874 874 /* Only need to enable IRQs by hard enabling them after this */ 875 875 local_paca->irq_happened = 0; 876 - soft_enabled_set(IRQS_ENABLED); 876 + irq_soft_mask_set(IRQS_ENABLED); 877 877 #endif 878 878 } 879 879
+1 -1
arch/powerpc/include/asm/paca.h
··· 159 159 u64 saved_r1; /* r1 save for RTAS calls or PM */ 160 160 u64 saved_msr; /* MSR saved here by enter_rtas */ 161 161 u16 trap_save; /* Used when bad stack is encountered */ 162 - u8 soft_enabled; /* irq soft-enable flag */ 162 + u8 irq_soft_mask; /* mask for irq soft masking */ 163 163 u8 irq_happened; /* irq happened while soft-disabled */ 164 164 u8 io_sync; /* writel() needs spin_unlock sync */ 165 165 u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
+1 -1
arch/powerpc/kernel/asm-offsets.c
··· 178 178 OFFSET(PACATOC, paca_struct, kernel_toc); 179 179 OFFSET(PACAKBASE, paca_struct, kernelbase); 180 180 OFFSET(PACAKMSR, paca_struct, kernel_msr); 181 - OFFSET(PACASOFTIRQEN, paca_struct, soft_enabled); 181 + OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask); 182 182 OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); 183 183 #ifdef CONFIG_PPC_BOOK3S 184 184 OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
+6 -6
arch/powerpc/kernel/entry_64.S
··· 129 129 * is correct 130 130 */ 131 131 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) 132 - lbz r10,PACASOFTIRQEN(r13) 132 + lbz r10,PACAIRQSOFTMASK(r13) 133 133 1: tdnei r10,IRQS_ENABLED 134 134 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING 135 135 #endif ··· 781 781 * are about to re-enable interrupts 782 782 */ 783 783 ld r5,SOFTE(r1) 784 - lbz r6,PACASOFTIRQEN(r13) 784 + lbz r6,PACAIRQSOFTMASK(r13) 785 785 andi. r5,r5,IRQS_DISABLED 786 786 bne .Lrestore_irq_off 787 787 ··· 806 806 .Lrestore_no_replay: 807 807 TRACE_ENABLE_INTS 808 808 li r0,IRQS_ENABLED 809 - stb r0,PACASOFTIRQEN(r13); 809 + stb r0,PACAIRQSOFTMASK(r13); 810 810 811 811 /* 812 812 * Final return path. BookE is handled in a different file ··· 913 913 1: 914 914 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) 915 915 /* The interrupt should not have soft enabled. */ 916 - lbz r7,PACASOFTIRQEN(r13) 917 - 1: tdnei r7,IRQS_DISABLED 916 + lbz r7,PACAIRQSOFTMASK(r13) 917 + 1: tdeqi r7,IRQS_ENABLED 918 918 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING 919 919 #endif 920 920 b .Ldo_restore ··· 1034 1034 /* There is no way it is acceptable to get here with interrupts enabled, 1035 1035 * check it with the asm equivalent of WARN_ON 1036 1036 */ 1037 - lbz r0,PACASOFTIRQEN(r13) 1037 + lbz r0,PACAIRQSOFTMASK(r13) 1038 1038 1: tdeqi r0,IRQS_ENABLED 1039 1039 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING 1040 1040 #endif
+5 -5
arch/powerpc/kernel/exceptions-64e.S
··· 139 139 mfspr r10,SPRN_ESR 140 140 SPECIAL_EXC_STORE(r10,ESR) 141 141 142 - lbz r10,PACASOFTIRQEN(r13) 142 + lbz r10,PACAIRQSOFTMASK(r13) 143 143 SPECIAL_EXC_STORE(r10,SOFTE) 144 144 ld r10,_NIP(r1) 145 145 SPECIAL_EXC_STORE(r10,CSRR0) ··· 206 206 mtspr SPRN_MAS8,r10 207 207 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) 208 208 209 - lbz r6,PACASOFTIRQEN(r13) 209 + lbz r6,PACAIRQSOFTMASK(r13) 210 210 ld r5,SOFTE(r1) 211 211 212 212 /* Interrupts had better not already be enabled... */ ··· 216 216 bne 1f 217 217 218 218 TRACE_ENABLE_INTS 219 - stb r5,PACASOFTIRQEN(r13) 219 + stb r5,PACAIRQSOFTMASK(r13) 220 220 1: 221 221 /* 222 222 * Restore PACAIRQHAPPENED rather than setting it based on ··· 351 351 #define PROLOG_ADDITION_NONE_MC(n) 352 352 353 353 #define PROLOG_ADDITION_MASKABLE_GEN(n) \ 354 - lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ 354 + lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \ 355 355 andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \ 356 356 bne masked_interrupt_book3e_##n 357 357 ··· 397 397 mfspr r8,SPRN_XER; /* save XER in stackframe */ \ 398 398 ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ 399 399 lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ 400 - lbz r11,PACASOFTIRQEN(r13); /* get current IRQ softe */ \ 400 + lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \ 401 401 ld r12,exception_marker@toc(r2); \ 402 402 li r0,0; \ 403 403 std r3,GPR10(r1); /* save r10 to stackframe */ \
+3 -3
arch/powerpc/kernel/head_64.S
··· 766 766 * in the PACA when doing hotplug) 767 767 */ 768 768 li r0,IRQS_DISABLED 769 - stb r0,PACASOFTIRQEN(r13) 769 + stb r0,PACAIRQSOFTMASK(r13) 770 770 li r0,PACA_IRQ_HARD_DIS 771 771 stb r0,PACAIRQHAPPENED(r13) 772 772 ··· 823 823 * in the PACA when doing hotplug) 824 824 */ 825 825 li r7,IRQS_DISABLED 826 - stb r7,PACASOFTIRQEN(r13) 826 + stb r7,PACAIRQSOFTMASK(r13) 827 827 li r0,PACA_IRQ_HARD_DIS 828 828 stb r0,PACAIRQHAPPENED(r13) 829 829 ··· 990 990 * in the PACA when doing hotplug) 991 991 */ 992 992 li r0,IRQS_DISABLED 993 - stb r0,PACASOFTIRQEN(r13) 993 + stb r0,PACAIRQSOFTMASK(r13) 994 994 li r0,PACA_IRQ_HARD_DIS 995 995 stb r0,PACAIRQHAPPENED(r13) 996 996
+1 -1
arch/powerpc/kernel/idle_book3e.S
··· 48 48 addi r1,r1,128 49 49 #endif 50 50 li r0,IRQS_ENABLED 51 - stb r0,PACASOFTIRQEN(r13) 51 + stb r0,PACAIRQSOFTMASK(r13) 52 52 53 53 /* Interrupts will make use return to LR, so get something we want 54 54 * in there
+1 -1
arch/powerpc/kernel/idle_power4.S
··· 55 55 #endif /* CONFIG_TRACE_IRQFLAGS */ 56 56 57 57 li r0,IRQS_ENABLED 58 - stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ 58 + stb r0,PACAIRQSOFTMASK(r13) /* we'll hard-enable shortly */ 59 59 BEGIN_FTR_SECTION 60 60 DSSALL 61 61 sync
+5 -18
arch/powerpc/kernel/irq.c
··· 225 225 unsigned int replay; 226 226 227 227 /* Write the new soft-enabled value */ 228 - soft_enabled_set(mask); 229 - if (mask) { 230 - #ifdef CONFIG_TRACE_IRQFLAGS 231 - /* 232 - * mask must always include LINUX bit if any 233 - * are set, and interrupts don't get replayed until 234 - * the Linux interrupt is unmasked. This could be 235 - * changed to replay partial unmasks in future, 236 - * which would allow Linux masks to nest inside 237 - * other masks, among other things. For now, be very 238 - * dumb and simple. 239 - */ 240 - WARN_ON(!(mask & IRQS_DISABLED)); 241 - #endif 228 + irq_soft_mask_set(mask); 229 + if (mask) 242 230 return; 243 - } 244 231 245 232 /* 246 233 * From this point onward, we can take interrupts, preempt, ··· 272 285 } 273 286 #endif /* CONFIG_TRACE_IRQFLAGS */ 274 287 275 - soft_enabled_set(IRQS_DISABLED); 288 + irq_soft_mask_set(IRQS_DISABLED); 276 289 trace_hardirqs_off(); 277 290 278 291 /* ··· 284 297 285 298 /* We can soft-enable now */ 286 299 trace_hardirqs_on(); 287 - soft_enabled_set(IRQS_ENABLED); 300 + irq_soft_mask_set(IRQS_ENABLED); 288 301 289 302 /* 290 303 * And replay if we have to. This will return with interrupts ··· 359 372 * of entering the low power state. 360 373 */ 361 374 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 362 - soft_enabled_set(IRQS_ENABLED); 375 + irq_soft_mask_set(IRQS_ENABLED); 363 376 364 377 /* Tell the caller to enter the low power state */ 365 378 return true;
+1 -1
arch/powerpc/kernel/optprobes_head.S
··· 58 58 std r5,_XER(r1) 59 59 mfcr r5 60 60 std r5,_CCR(r1) 61 - lbz r5,PACASOFTIRQEN(r13) 61 + lbz r5,PACAIRQSOFTMASK(r13) 62 62 std r5,SOFTE(r1) 63 63 64 64 /*
+1 -1
arch/powerpc/kernel/ptrace.c
··· 285 285 286 286 #ifdef CONFIG_PPC64 287 287 /* 288 - * softe copies paca->soft_enabled variable state. Since soft_enabled is 288 + * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is 289 289 * no more used as a flag, lets force usr to alway see the softe value as 1 290 290 * which means interrupts are not soft disabled. 291 291 */
+2 -2
arch/powerpc/kernel/setup_64.c
··· 189 189 /* Allow percpu accesses to work until we setup percpu data */ 190 190 get_paca()->data_offset = 0; 191 191 /* Mark interrupts disabled in PACA */ 192 - soft_enabled_set(IRQS_DISABLED); 192 + irq_soft_mask_set(IRQS_DISABLED); 193 193 } 194 194 195 195 static void __init configure_exceptions(void) ··· 352 352 void early_setup_secondary(void) 353 353 { 354 354 /* Mark interrupts disabled in PACA */ 355 - soft_enabled_set(IRQS_DISABLED); 355 + irq_soft_mask_set(IRQS_DISABLED); 356 356 357 357 /* Initialize the hash table or TLB handling */ 358 358 early_init_mmu_secondary();
+3 -3
arch/powerpc/kernel/time.c
··· 244 244 void accumulate_stolen_time(void) 245 245 { 246 246 u64 sst, ust; 247 - unsigned long save_soft_enabled = soft_enabled_return(); 247 + unsigned long save_irq_soft_mask = irq_soft_mask_return(); 248 248 struct cpu_accounting_data *acct = &local_paca->accounting; 249 249 250 250 /* We are called early in the exception entry, before ··· 253 253 * needs to reflect that so various debug stuff doesn't 254 254 * complain 255 255 */ 256 - soft_enabled_set(IRQS_DISABLED); 256 + irq_soft_mask_set(IRQS_DISABLED); 257 257 258 258 sst = scan_dispatch_log(acct->starttime_user); 259 259 ust = scan_dispatch_log(acct->starttime); ··· 261 261 acct->utime -= ust; 262 262 acct->steal_time += ust + sst; 263 263 264 - soft_enabled_set(save_soft_enabled); 264 + irq_soft_mask_set(save_irq_soft_mask); 265 265 } 266 266 267 267 static inline u64 calculate_stolen_time(u64 stop_tb)
+1 -1
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 3249 3249 mfctr r4 3250 3250 #endif 3251 3251 mfxer r5 3252 - lbz r6, PACASOFTIRQEN(r13) 3252 + lbz r6, PACAIRQSOFTMASK(r13) 3253 3253 std r3, _LINK(r1) 3254 3254 std r4, _CTR(r1) 3255 3255 std r5, _XER(r1)
+1 -1
arch/powerpc/mm/hugetlbpage.c
··· 752 752 * So long as we atomically load page table pointers we are safe against teardown, 753 753 * we can follow the address down to the the page and take a ref on it. 754 754 * This function need to be called with interrupts disabled. We use this variant 755 - * when we have MSR[EE] = 0 but the paca->soft_enabled = IRQS_ENABLED 755 + * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED 756 756 */ 757 757 pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, 758 758 bool *is_thp, unsigned *hpage_shift)
+2 -2
arch/powerpc/xmon/xmon.c
··· 1623 1623 printf(" current = 0x%lx\n", current); 1624 1624 #ifdef CONFIG_PPC64 1625 1625 printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n", 1626 - local_paca, local_paca->soft_enabled, local_paca->irq_happened); 1626 + local_paca, local_paca->irq_soft_mask, local_paca->irq_happened); 1627 1627 #endif 1628 1628 if (current) { 1629 1629 printf(" pid = %ld, comm = %s\n", ··· 2391 2391 DUMP(p, stab_rr, "lx"); 2392 2392 DUMP(p, saved_r1, "lx"); 2393 2393 DUMP(p, trap_save, "x"); 2394 - DUMP(p, soft_enabled, "x"); 2394 + DUMP(p, irq_soft_mask, "x"); 2395 2395 DUMP(p, irq_happened, "x"); 2396 2396 DUMP(p, io_sync, "x"); 2397 2397 DUMP(p, irq_work_pending, "x");