Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/64: Interrupts save PPR on stack rather than thread_struct

PPR is the odd register out when it comes to interrupt handling, it is
saved in current->thread.ppr while all others are saved on the stack.

The difficulty with this is that accessing thread.ppr can cause a SLB
fault, but the SLB fault handler implementation in C change had
assumed the normal exception entry handlers would not cause an SLB
fault.

Fix this by allocating room in the interrupt stack to save PPR.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Nicholas Piggin and committed by
Michael Ellerman
4c2de74c 3eeacd9f

+19 -23
+4 -5
arch/powerpc/include/asm/exception-64s.h
··· 236 236 * PPR save/restore macros used in exceptions_64s.S 237 237 * Used for P7 or later processors 238 238 */ 239 - #define SAVE_PPR(area, ra, rb) \ 239 + #define SAVE_PPR(area, ra) \ 240 240 BEGIN_FTR_SECTION_NESTED(940) \ 241 - ld ra,PACACURRENT(r13); \ 242 - ld rb,area+EX_PPR(r13); /* Read PPR from paca */ \ 243 - std rb,TASKTHREADPPR(ra); \ 241 + ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \ 242 + std ra,_PPR(r1); \ 244 243 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940) 245 244 246 245 #define RESTORE_PPR_PACA(area, ra) \ ··· 507 508 3: EXCEPTION_PROLOG_COMMON_1(); \ 508 509 beq 4f; /* if from kernel mode */ \ 509 510 ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \ 510 - SAVE_PPR(area, r9, r10); \ 511 + SAVE_PPR(area, r9); \ 511 512 4: EXCEPTION_PROLOG_COMMON_2(area) \ 512 513 EXCEPTION_PROLOG_COMMON_3(n) \ 513 514 ACCOUNT_STOLEN_TIME
+2 -4
arch/powerpc/include/asm/processor.h
··· 32 32 /* Default SMT priority is set to 3. Use 11- 13bits to save priority. */ 33 33 #define PPR_PRIORITY 3 34 34 #ifdef __ASSEMBLY__ 35 - #define INIT_PPR (PPR_PRIORITY << 50) 35 + #define DEFAULT_PPR (PPR_PRIORITY << 50) 36 36 #else 37 - #define INIT_PPR ((u64)PPR_PRIORITY << 50) 37 + #define DEFAULT_PPR ((u64)PPR_PRIORITY << 50) 38 38 #endif /* __ASSEMBLY__ */ 39 39 #endif /* CONFIG_PPC64 */ 40 40 ··· 341 341 * onwards. 342 342 */ 343 343 int dscr_inherit; 344 - unsigned long ppr; /* used to save/restore SMT priority */ 345 344 unsigned long tidr; 346 345 #endif 347 346 #ifdef CONFIG_PPC_BOOK3S_64 ··· 388 389 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 389 390 .addr_limit = KERNEL_DS, \ 390 391 .fpexc_mode = 0, \ 391 - .ppr = INIT_PPR, \ 392 392 .fscr = FSCR_TAR | FSCR_EBB \ 393 393 } 394 394 #endif
+4
arch/powerpc/include/asm/ptrace.h
··· 51 51 unsigned long result; 52 52 }; 53 53 }; 54 + 55 + #ifdef CONFIG_PPC64 56 + unsigned long ppr; 57 + #endif 54 58 }; 55 59 #endif 56 60
+1 -1
arch/powerpc/kernel/asm-offsets.c
··· 89 89 #ifdef CONFIG_PPC64 90 90 DEFINE(SIGSEGV, SIGSEGV); 91 91 DEFINE(NMI_MASK, NMI_MASK); 92 - OFFSET(TASKTHREADPPR, task_struct, thread.ppr); 93 92 #else 94 93 OFFSET(THREAD_INFO, task_struct, stack); 95 94 DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16)); ··· 322 323 STACK_PT_REGS_OFFSET(_ESR, dsisr); 323 324 #else /* CONFIG_PPC64 */ 324 325 STACK_PT_REGS_OFFSET(SOFTE, softe); 326 + STACK_PT_REGS_OFFSET(_PPR, ppr); 325 327 #endif /* CONFIG_PPC64 */ 326 328 327 329 #if defined(CONFIG_PPC32)
+5 -10
arch/powerpc/kernel/entry_64.S
··· 386 386 387 387 4: /* Anything else left to do? */ 388 388 BEGIN_FTR_SECTION 389 - lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */ 390 - ld r10,PACACURRENT(r13) 389 + lis r3,DEFAULT_PPR@highest /* Set default PPR */ 391 390 sldi r3,r3,32 /* bits 11-13 are used for ppr */ 392 - std r3,TASKTHREADPPR(r10) 391 + std r3,_PPR(r1) 393 392 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 394 393 395 394 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) ··· 941 942 andi. r0,r3,MSR_RI 942 943 beq- .Lunrecov_restore 943 944 944 - /* Load PPR from thread struct before we clear MSR:RI */ 945 - BEGIN_FTR_SECTION 946 - ld r2,PACACURRENT(r13) 947 - ld r2,TASKTHREADPPR(r2) 948 - END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 949 - 950 945 /* 951 946 * Clear RI before restoring r13. If we are returning to 952 947 * userspace and we take an exception after restoring r13, ··· 961 968 andi. r0,r3,MSR_PR 962 969 beq 1f 963 970 BEGIN_FTR_SECTION 964 - mtspr SPRN_PPR,r2 /* Restore PPR */ 971 + /* Restore PPR */ 972 + ld r2,_PPR(r1) 973 + mtspr SPRN_PPR,r2 965 974 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 966 975 ACCOUNT_CPU_USER_EXIT(r13, r2, r4) 967 976 REST_GPR(13, r1)
+1 -1
arch/powerpc/kernel/process.c
··· 1710 1710 p->thread.dscr = mfspr(SPRN_DSCR); 1711 1711 } 1712 1712 if (cpu_has_feature(CPU_FTR_HAS_PPR)) 1713 - p->thread.ppr = INIT_PPR; 1713 + childregs->ppr = DEFAULT_PPR; 1714 1714 1715 1715 p->thread.tidr = 0; 1716 1716 #endif
+2 -2
arch/powerpc/kernel/ptrace.c
··· 1609 1609 void *kbuf, void __user *ubuf) 1610 1610 { 1611 1611 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 1612 - &target->thread.ppr, 0, sizeof(u64)); 1612 + &target->thread.regs->ppr, 0, sizeof(u64)); 1613 1613 } 1614 1614 1615 1615 static int ppr_set(struct task_struct *target, ··· 1618 1618 const void *kbuf, const void __user *ubuf) 1619 1619 { 1620 1620 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1621 - &target->thread.ppr, 0, sizeof(u64)); 1621 + &target->thread.regs->ppr, 0, sizeof(u64)); 1622 1622 } 1623 1623 1624 1624 static int dscr_get(struct task_struct *target,