Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/64s: avoid reloading (H)SRR registers if they are still valid

When an interrupt is taken, the SRR registers are set to return to where
it left off. Unless they are modified in the meantime, or the return
address or MSR are modified, there is no need to reload these registers
when returning from interrupt.

Introduce per-CPU flags that track the validity of SRR and HSRR
registers. These are cleared when returning from interrupt, when
using the registers for something else (e.g., OPAL calls), when
adjusting the return address or MSR of a context, and when context
switching (which changes the return address and MSR).

This improves the performance of interrupt returns.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
[mpe: Fold in fixup patch from Nick]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210617155116.2167984-5-npiggin@gmail.com

authored by

Nicholas Piggin and committed by
Michael Ellerman
59dc5bfc 1df7d5e4

+418 -179
+4
arch/powerpc/Kconfig.debug
··· 85 85 config PPC_IRQ_SOFT_MASK_DEBUG 86 86 bool "Include extra checks for powerpc irq soft masking" 87 87 88 + config PPC_RFI_SRR_DEBUG 89 + bool "Include extra checks for RFI SRR register validity" 90 + depends on PPC_BOOK3S_64 91 + 88 92 config XMON 89 93 bool "Include xmon kernel debugger" 90 94 depends on DEBUG_KERNEL
+9 -1
arch/powerpc/include/asm/hw_irq.h
··· 389 389 return !(regs->msr & MSR_EE); 390 390 } 391 391 392 - static inline void may_hard_irq_enable(void) { } 392 + static inline bool may_hard_irq_enable(void) 393 + { 394 + return false; 395 + } 396 + 397 + static inline void do_hard_irq_enable(void) 398 + { 399 + BUILD_BUG(); 400 + } 393 401 394 402 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) 395 403 {
+13 -1
arch/powerpc/include/asm/interrupt.h
··· 73 73 #include <asm/kprobes.h> 74 74 #include <asm/runlatch.h> 75 75 76 + #ifdef CONFIG_PPC_BOOK3S_64 77 + static inline void srr_regs_clobbered(void) 78 + { 79 + local_paca->srr_valid = 0; 80 + local_paca->hsrr_valid = 0; 81 + } 82 + #else 83 + static inline void srr_regs_clobbered(void) 84 + { 85 + } 86 + #endif 87 + 76 88 static inline void nap_adjust_return(struct pt_regs *regs) 77 89 { 78 90 #ifdef CONFIG_PPC_970_NAP 79 91 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { 80 92 /* Can avoid a test-and-clear because NMIs do not call this */ 81 93 clear_thread_local_flags(_TLF_NAPPING); 82 - regs->nip = (unsigned long)power4_idle_nap_return; 94 + regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return); 83 95 } 84 96 #endif 85 97 }
+1 -1
arch/powerpc/include/asm/livepatch.h
··· 16 16 { 17 17 struct pt_regs *regs = ftrace_get_regs(fregs); 18 18 19 - regs->nip = ip; 19 + regs_set_return_ip(regs, ip); 20 20 } 21 21 22 22 #define klp_get_ftrace_location klp_get_ftrace_location
+4
arch/powerpc/include/asm/paca.h
··· 168 168 #ifdef CONFIG_PPC_BOOK3E 169 169 u16 trap_save; /* Used when bad stack is encountered */ 170 170 #endif 171 + #ifdef CONFIG_PPC_BOOK3S_64 172 + u8 hsrr_valid; /* HSRRs set for HRFID */ 173 + u8 srr_valid; /* SRRs set for RFID */ 174 + #endif 171 175 u8 irq_soft_mask; /* mask for irq soft masking */ 172 176 u8 irq_happened; /* irq happened while soft-disabled */ 173 177 u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
+2 -2
arch/powerpc/include/asm/probes.h
··· 34 34 /* Enable single stepping for the current task */ 35 35 static inline void enable_single_step(struct pt_regs *regs) 36 36 { 37 - regs->msr |= MSR_SINGLESTEP; 37 + regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP); 38 38 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 39 39 /* 40 40 * We turn off Critical Input Exception(CE) to ensure that the single 41 41 * step will be for the instruction we have the probe on; if we don't, 42 42 * it is possible we'd get the single step reported for CE. 43 43 */ 44 - regs->msr &= ~MSR_CE; 44 + regs_set_return_msr(regs, regs->msr & ~MSR_CE); 45 45 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); 46 46 #ifdef CONFIG_PPC_47x 47 47 isync();
+42 -10
arch/powerpc/include/asm/ptrace.h
··· 123 123 #endif /* __powerpc64__ */ 124 124 125 125 #ifndef __ASSEMBLY__ 126 + #include <asm/paca.h> 127 + 128 + #ifdef CONFIG_SMP 129 + extern unsigned long profile_pc(struct pt_regs *regs); 130 + #else 131 + #define profile_pc(regs) instruction_pointer(regs) 132 + #endif 133 + 134 + long do_syscall_trace_enter(struct pt_regs *regs); 135 + void do_syscall_trace_leave(struct pt_regs *regs); 136 + 137 + static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip) 138 + { 139 + regs->nip = ip; 140 + #ifdef CONFIG_PPC_BOOK3S_64 141 + local_paca->hsrr_valid = 0; 142 + local_paca->srr_valid = 0; 143 + #endif 144 + } 145 + 146 + static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr) 147 + { 148 + regs->msr = msr; 149 + #ifdef CONFIG_PPC_BOOK3S_64 150 + local_paca->hsrr_valid = 0; 151 + local_paca->srr_valid = 0; 152 + #endif 153 + } 154 + 155 + static inline void set_return_regs_changed(void) 156 + { 157 + #ifdef CONFIG_PPC_BOOK3S_64 158 + local_paca->hsrr_valid = 0; 159 + local_paca->srr_valid = 0; 160 + #endif 161 + } 162 + 163 + static inline void regs_add_return_ip(struct pt_regs *regs, long offset) 164 + { 165 + regs_set_return_ip(regs, regs->nip + offset); 166 + } 126 167 127 168 static inline unsigned long instruction_pointer(struct pt_regs *regs) 128 169 { ··· 173 132 static inline void instruction_pointer_set(struct pt_regs *regs, 174 133 unsigned long val) 175 134 { 176 - regs->nip = val; 135 + regs_set_return_ip(regs, val); 177 136 } 178 137 179 138 static inline unsigned long user_stack_pointer(struct pt_regs *regs) ··· 185 144 { 186 145 return 0; 187 146 } 188 - 189 - #ifdef CONFIG_SMP 190 - extern unsigned long profile_pc(struct pt_regs *regs); 191 - #else 192 - #define profile_pc(regs) instruction_pointer(regs) 193 - #endif 194 - 195 - long do_syscall_trace_enter(struct pt_regs *regs); 196 - void do_syscall_trace_leave(struct pt_regs *regs); 197 147 198 148 #ifdef __powerpc64__ 199 149 #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
+4
arch/powerpc/kernel/asm-offsets.c
··· 190 190 OFFSET(PACATOC, paca_struct, kernel_toc); 191 191 OFFSET(PACAKBASE, paca_struct, kernelbase); 192 192 OFFSET(PACAKMSR, paca_struct, kernel_msr); 193 + #ifdef CONFIG_PPC_BOOK3S_64 194 + OFFSET(PACAHSRR_VALID, paca_struct, hsrr_valid); 195 + OFFSET(PACASRR_VALID, paca_struct, srr_valid); 196 + #endif 193 197 OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask); 194 198 OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); 195 199 OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
+85 -7
arch/powerpc/kernel/entry_64.S
··· 64 64 .section ".text" 65 65 .align 7 66 66 67 + .macro DEBUG_SRR_VALID srr 68 + #ifdef CONFIG_PPC_RFI_SRR_DEBUG 69 + .ifc \srr,srr 70 + mfspr r11,SPRN_SRR0 71 + ld r12,_NIP(r1) 72 + 100: tdne r11,r12 73 + EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 74 + mfspr r11,SPRN_SRR1 75 + ld r12,_MSR(r1) 76 + 100: tdne r11,r12 77 + EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 78 + .else 79 + mfspr r11,SPRN_HSRR0 80 + ld r12,_NIP(r1) 81 + 100: tdne r11,r12 82 + EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 83 + mfspr r11,SPRN_HSRR1 84 + ld r12,_MSR(r1) 85 + 100: tdne r11,r12 86 + EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) 87 + .endif 88 + #endif 89 + .endm 90 + 67 91 #ifdef CONFIG_PPC_BOOK3S 68 92 .macro system_call_vectored name trapnr 69 93 .globl system_call_vectored_\name ··· 310 286 ld r11,exception_marker@toc(r2) 311 287 std r11,-16(r10) /* "regshere" marker */ 312 288 289 + #ifdef CONFIG_PPC_BOOK3S 290 + li r11,1 291 + stb r11,PACASRR_VALID(r13) 292 + #endif 293 + 313 294 /* 314 295 * We always enter kernel from userspace with irq soft-mask enabled and 315 296 * nothing pending. system_call_exception() will call ··· 335 306 bl syscall_exit_prepare 336 307 337 308 ld r2,_CCR(r1) 309 + ld r6,_LINK(r1) 310 + mtlr r6 311 + 312 + #ifdef CONFIG_PPC_BOOK3S 313 + lbz r4,PACASRR_VALID(r13) 314 + cmpdi r4,0 315 + bne 1f 316 + li r4,0 317 + stb r4,PACASRR_VALID(r13) 318 + #endif 338 319 ld r4,_NIP(r1) 339 320 ld r5,_MSR(r1) 340 - ld r6,_LINK(r1) 321 + mtspr SPRN_SRR0,r4 322 + mtspr SPRN_SRR1,r5 323 + 1: 324 + DEBUG_SRR_VALID srr 341 325 342 326 BEGIN_FTR_SECTION 343 327 stdcx. r0,0,r1 /* to clear the reservation */ 344 328 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) 345 - 346 - mtspr SPRN_SRR0,r4 347 - mtspr SPRN_SRR1,r5 348 - mtlr r6 349 329 350 330 cmpdi r3,0 351 331 bne .Lsyscall_restore_regs ··· 711 673 kuap_user_restore r3, r4 712 674 #endif 713 675 .Lfast_user_interrupt_return_\srr\(): 714 - ld r11,_NIP(r1) 715 - ld r12,_MSR(r1) 676 + 716 677 BEGIN_FTR_SECTION 717 678 ld r10,_PPR(r1) 718 679 mtspr SPRN_PPR,r10 719 680 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 681 + 682 + #ifdef CONFIG_PPC_BOOK3S 683 + .ifc \srr,srr 684 + lbz r4,PACASRR_VALID(r13) 685 + .else 686 + lbz r4,PACAHSRR_VALID(r13) 687 + .endif 688 + cmpdi r4,0 689 + li r4,0 690 + bne 1f 691 + #endif 692 + ld r11,_NIP(r1) 693 + ld r12,_MSR(r1) 720 694 .ifc \srr,srr 721 695 mtspr SPRN_SRR0,r11 722 696 mtspr SPRN_SRR1,r12 697 + 1: 698 + #ifdef CONFIG_PPC_BOOK3S 699 + stb r4,PACASRR_VALID(r13) 700 + #endif 723 701 .else 724 702 mtspr SPRN_HSRR0,r11 725 703 mtspr SPRN_HSRR1,r12 704 + 1: 705 + #ifdef CONFIG_PPC_BOOK3S 706 + stb r4,PACAHSRR_VALID(r13) 707 + #endif 726 708 .endif 709 + DEBUG_SRR_VALID \srr 727 710 728 711 BEGIN_FTR_SECTION 729 712 stdcx. r0,0,r1 /* to clear the reservation */ ··· 789 730 790 731 .Lfast_kernel_interrupt_return_\srr\(): 791 732 cmpdi cr1,r3,0 733 + #ifdef CONFIG_PPC_BOOK3S 734 + .ifc \srr,srr 735 + lbz r4,PACASRR_VALID(r13) 736 + .else 737 + lbz r4,PACAHSRR_VALID(r13) 738 + .endif 739 + cmpdi r4,0 740 + li r4,0 741 + bne 1f 742 + #endif 792 743 ld r11,_NIP(r1) 793 744 ld r12,_MSR(r1) 794 745 .ifc \srr,srr 795 746 mtspr SPRN_SRR0,r11 796 747 mtspr SPRN_SRR1,r12 748 + 1: 749 + #ifdef CONFIG_PPC_BOOK3S 750 + stb r4,PACASRR_VALID(r13) 751 + #endif 797 752 .else 798 753 mtspr SPRN_HSRR0,r11 799 754 mtspr SPRN_HSRR1,r12 755 + 1: 756 + #ifdef CONFIG_PPC_BOOK3S 757 + stb r4,PACAHSRR_VALID(r13) 758 + #endif 800 759 .endif 760 + DEBUG_SRR_VALID \srr 801 761 802 762 BEGIN_FTR_SECTION 803 763 stdcx. r0,0,r1 /* to clear the reservation */
+27
arch/powerpc/kernel/exceptions-64s.S
··· 485 485 std r0,GPR0(r1) /* save r0 in stackframe */ 486 486 std r10,GPR1(r1) /* save r1 in stackframe */ 487 487 488 + /* Mark our [H]SRRs valid for return */ 489 + li r10,1 490 + .if IHSRR_IF_HVMODE 491 + BEGIN_FTR_SECTION 492 + stb r10,PACAHSRR_VALID(r13) 493 + FTR_SECTION_ELSE 494 + stb r10,PACASRR_VALID(r13) 495 + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 496 + .elseif IHSRR 497 + stb r10,PACAHSRR_VALID(r13) 498 + .else 499 + stb r10,PACASRR_VALID(r13) 500 + .endif 501 + 488 502 .if ISET_RI 489 503 li r10,MSR_RI 490 504 mtmsrd r10,1 /* Set MSR_RI */ ··· 598 584 .macro EXCEPTION_RESTORE_REGS hsrr=0 599 585 /* Move original SRR0 and SRR1 into the respective regs */ 600 586 ld r9,_MSR(r1) 587 + li r10,0 601 588 .if \hsrr 602 589 mtspr SPRN_HSRR1,r9 590 + stb r10,PACAHSRR_VALID(r13) 603 591 .else 604 592 mtspr SPRN_SRR1,r9 593 + stb r10,PACASRR_VALID(r13) 605 594 .endif 606 595 ld r9,_NIP(r1) 607 596 .if \hsrr ··· 1735 1718 * 1736 1719 * Be careful to avoid touching the kernel stack. 1737 1720 */ 1721 + li r10,0 1722 + stb r10,PACAHSRR_VALID(r13) 1738 1723 ld r10,PACA_EXGEN+EX_CTR(r13) 1739 1724 mtctr r10 1740 1725 mtcrf 0x80,r9 ··· 2532 2513 ld r10,PACA_EXGEN+EX_CFAR(r13) 2533 2514 mtspr SPRN_CFAR,r10 2534 2515 END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 2516 + li r10,0 2517 + stb r10,PACAHSRR_VALID(r13) 2535 2518 ld r10,PACA_EXGEN+EX_R10(r13) 2536 2519 ld r11,PACA_EXGEN+EX_R11(r13) 2537 2520 ld r12,PACA_EXGEN+EX_R12(r13) ··· 2694 2673 ori r11,r11,PACA_IRQ_HARD_DIS 2695 2674 stb r11,PACAIRQHAPPENED(r13) 2696 2675 2: /* done */ 2676 + li r10,0 2677 + .if \hsrr 2678 + stb r10,PACAHSRR_VALID(r13) 2679 + .else 2680 + stb r10,PACASRR_VALID(r13) 2681 + .endif 2697 2682 ld r10,PACA_EXGEN+EX_CTR(r13) 2698 2683 mtctr r10 2699 2684 mtcrf 0x80,r9
+4
arch/powerpc/kernel/fpu.S
··· 103 103 ori r12,r12,MSR_FP 104 104 or r12,r12,r4 105 105 std r12,_MSR(r1) 106 + #ifdef CONFIG_PPC_BOOK3S_64 107 + li r4,0 108 + stb r4,PACASRR_VALID(r13) 109 + #endif 106 110 #endif 107 111 li r4,1 108 112 stb r4,THREAD_LOAD_FP(r5)
+2 -2
arch/powerpc/kernel/hw_breakpoint.c
··· 486 486 return; 487 487 488 488 reset: 489 - regs->msr &= ~MSR_SE; 489 + regs_set_return_msr(regs, regs->msr & ~MSR_SE); 490 490 for (i = 0; i < nr_wp_slots(); i++) { 491 491 info = counter_arch_bp(__this_cpu_read(bp_per_reg[i])); 492 492 __set_breakpoint(i, info); ··· 537 537 current->thread.last_hit_ubp[i] = bp[i]; 538 538 info[i] = NULL; 539 539 } 540 - regs->msr |= MSR_SE; 540 + regs_set_return_msr(regs, regs->msr | MSR_SE); 541 541 return false; 542 542 } 543 543
+5 -5
arch/powerpc/kernel/kgdb.c
··· 147 147 return 0; 148 148 149 149 if (*(u32 *)regs->nip == BREAK_INSTR) 150 - regs->nip += BREAK_INSTR_SIZE; 150 + regs_add_return_ip(regs, BREAK_INSTR_SIZE); 151 151 152 152 return 1; 153 153 } ··· 372 372 373 373 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) 374 374 { 375 - regs->nip = pc; 375 + regs_set_return_ip(regs, pc); 376 376 } 377 377 378 378 /* ··· 394 394 case 'c': 395 395 /* handle the optional parameter */ 396 396 if (kgdb_hex2long(&ptr, &addr)) 397 - linux_regs->nip = addr; 397 + regs_set_return_ip(linux_regs, addr); 398 398 399 399 atomic_set(&kgdb_cpu_doing_single_step, -1); 400 400 /* set the trace bit if we're stepping */ ··· 402 402 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 403 403 mtspr(SPRN_DBCR0, 404 404 mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); 405 - linux_regs->msr |= MSR_DE; 405 + regs_set_return_msr(linux_regs, linux_regs->msr | MSR_DE); 406 406 #else 407 - linux_regs->msr |= MSR_SE; 407 + regs_set_return_msr(linux_regs, linux_regs->msr | MSR_SE); 408 408 #endif 409 409 atomic_set(&kgdb_cpu_doing_single_step, 410 410 raw_smp_processor_id());
+2 -2
arch/powerpc/kernel/kprobes-ftrace.c
··· 39 39 * On powerpc, NIP is *before* this instruction for the 40 40 * pre handler 41 41 */ 42 - regs->nip -= MCOUNT_INSN_SIZE; 42 + regs_add_return_ip(regs, -MCOUNT_INSN_SIZE); 43 43 44 44 __this_cpu_write(current_kprobe, p); 45 45 kcb->kprobe_status = KPROBE_HIT_ACTIVE; ··· 48 48 * Emulate singlestep (and also recover regs->nip) 49 49 * as if there is a nop 50 50 */ 51 - regs->nip += MCOUNT_INSN_SIZE; 51 + regs_add_return_ip(regs, MCOUNT_INSN_SIZE); 52 52 if (unlikely(p->post_handler)) { 53 53 kcb->kprobe_status = KPROBE_HIT_SSDONE; 54 54 p->post_handler(p, regs, 0);
+13 -10
arch/powerpc/kernel/kprobes.c
··· 194 194 * variant as values in regs could play a part in 195 195 * if the trap is taken or not 196 196 */ 197 - regs->nip = (unsigned long)p->ainsn.insn; 197 + regs_set_return_ip(regs, (unsigned long)p->ainsn.insn); 198 198 } 199 199 200 200 static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) ··· 335 335 kprobe_opcode_t insn = *p->ainsn.insn; 336 336 if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) { 337 337 /* Turn off 'trace' bits */ 338 - regs->msr &= ~MSR_SINGLESTEP; 339 - regs->msr |= kcb->kprobe_saved_msr; 338 + regs_set_return_msr(regs, 339 + (regs->msr & ~MSR_SINGLESTEP) | 340 + kcb->kprobe_saved_msr); 340 341 goto no_kprobe; 341 342 } 342 343 ··· 432 431 * we end up emulating it in kprobe_handler(), which increments the nip 433 432 * again. 434 433 */ 435 - regs->nip = orig_ret_address - 4; 434 + regs_set_return_ip(regs, orig_ret_address - 4); 436 435 regs->link = orig_ret_address; 437 436 438 437 return 0; ··· 467 466 } 468 467 469 468 /* Adjust nip to after the single-stepped instruction */ 470 - regs->nip = (unsigned long)cur->addr + len; 471 - regs->msr |= kcb->kprobe_saved_msr; 469 + regs_set_return_ip(regs, (unsigned long)cur->addr + len); 470 + regs_set_return_msr(regs, regs->msr | kcb->kprobe_saved_msr); 472 471 473 472 /*Restore back the original saved kprobes variables and continue. */ 474 473 if (kcb->kprobe_status == KPROBE_REENTER) { ··· 507 506 * and allow the page fault handler to continue as a 508 507 * normal page fault. 509 508 */ 510 - regs->nip = (unsigned long)cur->addr; 511 - regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */ 512 - regs->msr |= kcb->kprobe_saved_msr; 509 + regs_set_return_ip(regs, (unsigned long)cur->addr); 510 + /* Turn off 'trace' bits */ 511 + regs_set_return_msr(regs, 512 + (regs->msr & ~MSR_SINGLESTEP) | 513 + kcb->kprobe_saved_msr); 513 514 if (kcb->kprobe_status == KPROBE_REENTER) 514 515 restore_previous_kprobe(kcb); 515 516 else ··· 542 539 * zero, try to fix up. 543 540 */ 544 541 if ((entry = search_exception_tables(regs->nip)) != NULL) { 545 - regs->nip = extable_fixup(entry); 542 + regs_set_return_ip(regs, extable_fixup(entry)); 546 543 return 1; 547 544 } 548 545
+1 -1
arch/powerpc/kernel/mce.c
··· 273 273 entry = search_kernel_exception_table(regs->nip); 274 274 if (entry) { 275 275 mce_err->ignore_event = true; 276 - regs->nip = extable_fixup(entry); 276 + regs_set_return_ip(regs, extable_fixup(entry)); 277 277 } 278 278 } 279 279
+1 -1
arch/powerpc/kernel/optprobes.c
··· 106 106 kprobes_inc_nmissed_count(&op->kp); 107 107 } else { 108 108 __this_cpu_write(current_kprobe, &op->kp); 109 - regs->nip = (unsigned long)op->kp.addr; 109 + regs_set_return_ip(regs, (unsigned long)op->kp.addr); 110 110 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 111 111 opt_pre_handler(&op->kp, regs); 112 112 __this_cpu_write(current_kprobe, NULL);
+23 -19
arch/powerpc/kernel/process.c
··· 96 96 if (tsk == current && tsk->thread.regs && 97 97 MSR_TM_ACTIVE(tsk->thread.regs->msr) && 98 98 !test_thread_flag(TIF_RESTORE_TM)) { 99 - tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; 99 + regs_set_return_msr(&tsk->thread.ckpt_regs, 100 + tsk->thread.regs->msr); 100 101 set_thread_flag(TIF_RESTORE_TM); 101 102 } 102 103 } ··· 162 161 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); 163 162 if (cpu_has_feature(CPU_FTR_VSX)) 164 163 msr &= ~MSR_VSX; 165 - tsk->thread.regs->msr = msr; 164 + regs_set_return_msr(tsk->thread.regs, msr); 166 165 } 167 166 168 167 void giveup_fpu(struct task_struct *tsk) ··· 245 244 msr &= ~MSR_VEC; 246 245 if (cpu_has_feature(CPU_FTR_VSX)) 247 246 msr &= ~MSR_VSX; 248 - tsk->thread.regs->msr = msr; 247 + regs_set_return_msr(tsk->thread.regs, msr); 249 248 } 250 249 251 250 void giveup_altivec(struct task_struct *tsk) ··· 560 559 561 560 msr_check_and_clear(new_msr); 562 561 563 - regs->msr |= new_msr | fpexc_mode; 562 + regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode); 564 563 } 565 564 } 566 565 #endif /* CONFIG_PPC_BOOK3S_64 */ ··· 1115 1114 #endif 1116 1115 restore_math(regs); 1117 1116 1118 - regs->msr |= msr_diff; 1117 + regs_set_return_msr(regs, regs->msr | msr_diff); 1119 1118 } 1120 1119 1121 1120 #else /* !CONFIG_PPC_TRANSACTIONAL_MEM */ ··· 1258 1257 } 1259 1258 1260 1259 /* 1261 - * Call restore_sprs() before calling _switch(). If we move it after 1262 - * _switch() then we miss out on calling it for new tasks. The reason 1263 - * for this is we manually create a stack frame for new tasks that 1264 - * directly returns through ret_from_fork() or 1260 + * Call restore_sprs() and set_return_regs_changed() before calling 1261 + * _switch(). If we move it after _switch() then we miss out on calling 1262 + * it for new tasks. The reason for this is we manually create a stack 1263 + * frame for new tasks that directly returns through ret_from_fork() or 1265 1264 * ret_from_kernel_thread(). See copy_thread() for details. 1266 1265 */ 1267 1266 restore_sprs(old_thread, new_thread); 1267 + 1268 + set_return_regs_changed(); /* _switch changes stack (and regs) */ 1268 1269 1269 1270 #ifdef CONFIG_PPC32 1270 1271 kuap_assert_locked(); ··· 1853 1850 } 1854 1851 regs->gpr[2] = toc; 1855 1852 } 1856 - regs->nip = entry; 1857 - regs->msr = MSR_USER64; 1853 + regs_set_return_ip(regs, entry); 1854 + regs_set_return_msr(regs, MSR_USER64); 1858 1855 } else { 1859 - regs->nip = start; 1860 1856 regs->gpr[2] = 0; 1861 - regs->msr = MSR_USER32; 1857 + regs_set_return_ip(regs, start); 1858 + regs_set_return_msr(regs, MSR_USER32); 1862 1859 } 1860 + 1863 1861 #endif 1864 1862 #ifdef CONFIG_VSX 1865 1863 current->thread.used_vsr = 0; ··· 1891 1887 current->thread.tm_tfiar = 0; 1892 1888 current->thread.load_tm = 0; 1893 1889 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1894 - 1895 1890 } 1896 1891 EXPORT_SYMBOL(start_thread); 1897 1892 ··· 1938 1935 if (val > PR_FP_EXC_PRECISE) 1939 1936 return -EINVAL; 1940 1937 tsk->thread.fpexc_mode = __pack_fe01(val); 1941 - if (regs != NULL && (regs->msr & MSR_FP) != 0) 1942 - regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) 1943 - | tsk->thread.fpexc_mode; 1938 + if (regs != NULL && (regs->msr & MSR_FP) != 0) { 1939 + regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1)) 1940 + | tsk->thread.fpexc_mode); 1941 + } 1944 1942 return 0; 1945 1943 } 1946 1944 ··· 1987 1983 return -EINVAL; 1988 1984 1989 1985 if (val == PR_ENDIAN_BIG) 1990 - regs->msr &= ~MSR_LE; 1986 + regs_set_return_msr(regs, regs->msr & ~MSR_LE); 1991 1987 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) 1992 - regs->msr |= MSR_LE; 1988 + regs_set_return_msr(regs, regs->msr | MSR_LE); 1993 1989 else 1994 1990 return -EINVAL; 1995 1991
+3
arch/powerpc/kernel/prom_init.c
··· 32 32 #include <asm/rtas.h> 33 33 #include <asm/page.h> 34 34 #include <asm/processor.h> 35 + #include <asm/interrupt.h> 35 36 #include <asm/irq.h> 36 37 #include <asm/io.h> 37 38 #include <asm/smp.h> ··· 1793 1792 asm volatile("sc 1\n" : "=r" (arg1) : 1794 1793 "r" (arg1), 1795 1794 "r" (arg2) :); 1795 + srr_regs_clobbered(); 1796 + 1796 1797 return arg1; 1797 1798 } 1798 1799
+11 -9
arch/powerpc/kernel/ptrace/ptrace-adv.c
··· 12 12 if (regs != NULL) { 13 13 task->thread.debug.dbcr0 &= ~DBCR0_BT; 14 14 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 15 - regs->msr |= MSR_DE; 15 + regs_set_return_msr(regs, regs->msr | MSR_DE); 16 16 } 17 17 set_tsk_thread_flag(task, TIF_SINGLESTEP); 18 18 } ··· 24 24 if (regs != NULL) { 25 25 task->thread.debug.dbcr0 &= ~DBCR0_IC; 26 26 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT; 27 - regs->msr |= MSR_DE; 27 + regs_set_return_msr(regs, regs->msr | MSR_DE); 28 28 } 29 29 set_tsk_thread_flag(task, TIF_SINGLESTEP); 30 30 } ··· 50 50 * All debug events were off..... 51 51 */ 52 52 task->thread.debug.dbcr0 &= ~DBCR0_IDM; 53 - regs->msr &= ~MSR_DE; 53 + regs_set_return_msr(regs, regs->msr & ~MSR_DE); 54 54 } 55 55 } 56 56 clear_tsk_thread_flag(task, TIF_SINGLESTEP); ··· 82 82 83 83 int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data) 84 84 { 85 + struct pt_regs *regs = task->thread.regs; 85 86 #ifdef CONFIG_HAVE_HW_BREAKPOINT 86 87 int ret; 87 88 struct thread_struct *thread = &task->thread; ··· 113 112 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); 114 113 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, 115 114 task->thread.debug.dbcr1)) { 116 - task->thread.regs->msr &= ~MSR_DE; 115 + regs_set_return_msr(regs, regs->msr & ~MSR_DE); 117 116 task->thread.debug.dbcr0 &= ~DBCR0_IDM; 118 117 } 119 118 return 0; ··· 133 132 dbcr_dac(task) |= DBCR_DAC1R; 134 133 if (data & 0x2UL) 135 134 dbcr_dac(task) |= DBCR_DAC1W; 136 - task->thread.regs->msr |= MSR_DE; 135 + regs_set_return_msr(regs, regs->msr | MSR_DE); 137 136 return 0; 138 137 } 139 138 ··· 221 220 } 222 221 out: 223 222 child->thread.debug.dbcr0 |= DBCR0_IDM; 224 - child->thread.regs->msr |= MSR_DE; 223 + regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE); 225 224 226 225 return slot; 227 226 } ··· 337 336 return -ENOSPC; 338 337 } 339 338 child->thread.debug.dbcr0 |= DBCR0_IDM; 340 - child->thread.regs->msr |= MSR_DE; 339 + regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE); 341 340 342 341 return slot + 4; 343 342 } ··· 431 430 child->thread.debug.dbcr2 |= DBCR2_DAC12MX; 432 431 else /* PPC_BREAKPOINT_MODE_MASK */ 433 432 child->thread.debug.dbcr2 |= DBCR2_DAC12MM; 434 - child->thread.regs->msr |= MSR_DE; 433 + regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE); 435 434 436 435 return 5; 437 436 } ··· 486 485 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0, 487 486 child->thread.debug.dbcr1)) { 488 487 child->thread.debug.dbcr0 &= ~DBCR0_IDM; 489 - child->thread.regs->msr &= ~MSR_DE; 488 + regs_set_return_msr(child->thread.regs, 489 + child->thread.regs->msr & ~MSR_DE); 490 490 } 491 491 } 492 492 return rc;
+5 -9
arch/powerpc/kernel/ptrace/ptrace-noadv.c
··· 11 11 { 12 12 struct pt_regs *regs = task->thread.regs; 13 13 14 - if (regs != NULL) { 15 - regs->msr &= ~MSR_BE; 16 - regs->msr |= MSR_SE; 17 - } 14 + if (regs != NULL) 15 + regs_set_return_msr(regs, (regs->msr & ~MSR_BE) | MSR_SE); 18 16 set_tsk_thread_flag(task, TIF_SINGLESTEP); 19 17 } 20 18 ··· 20 22 { 21 23 struct pt_regs *regs = task->thread.regs; 22 24 23 - if (regs != NULL) { 24 - regs->msr &= ~MSR_SE; 25 - regs->msr |= MSR_BE; 26 - } 25 + if (regs != NULL) 26 + regs_set_return_msr(regs, (regs->msr & ~MSR_SE) | MSR_BE); 27 27 set_tsk_thread_flag(task, TIF_SINGLESTEP); 28 28 } 29 29 ··· 30 34 struct pt_regs *regs = task->thread.regs; 31 35 32 36 if (regs != NULL) 33 - regs->msr &= ~(MSR_SE | MSR_BE); 37 + regs_set_return_msr(regs, regs->msr & ~(MSR_SE | MSR_BE)); 34 38 35 39 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 36 40 }
+3 -2
arch/powerpc/kernel/ptrace/ptrace-view.c
··· 113 113 114 114 static __always_inline int set_user_msr(struct task_struct *task, unsigned long msr) 115 115 { 116 - task->thread.regs->msr &= ~MSR_DEBUGCHANGE; 117 - task->thread.regs->msr |= msr & MSR_DEBUGCHANGE; 116 + unsigned long newmsr = (task->thread.regs->msr & ~MSR_DEBUGCHANGE) | 117 + (msr & MSR_DEBUGCHANGE); 118 + regs_set_return_msr(task->thread.regs, newmsr); 118 119 return 0; 119 120 } 120 121
+11 -3
arch/powerpc/kernel/rtas.c
··· 25 25 #include <linux/reboot.h> 26 26 #include <linux/syscalls.h> 27 27 28 + #include <asm/interrupt.h> 28 29 #include <asm/prom.h> 29 30 #include <asm/rtas.h> 30 31 #include <asm/hvcall.h> ··· 46 45 47 46 /* This is here deliberately so it's only used in this file */ 48 47 void enter_rtas(unsigned long); 48 + 49 + static inline void do_enter_rtas(unsigned long args) 50 + { 51 + enter_rtas(args); 52 + 53 + srr_regs_clobbered(); /* rtas uses SRRs, invalidate */ 54 + } 49 55 50 56 struct rtas_t rtas = { 51 57 .lock = __ARCH_SPIN_LOCK_UNLOCKED ··· 392 384 save_args = rtas.args; 393 385 rtas.args = err_args; 394 386 395 - enter_rtas(__pa(&rtas.args)); 387 + do_enter_rtas(__pa(&rtas.args)); 396 388 397 389 err_args = rtas.args; 398 390 rtas.args = save_args; ··· 438 430 for (i = 0; i < nret; ++i) 439 431 args->rets[i] = 0; 440 432 441 - enter_rtas(__pa(args)); 433 + do_enter_rtas(__pa(args)); 442 434 } 443 435 444 436 void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...) ··· 1146 1138 flags = lock_rtas(); 1147 1139 1148 1140 rtas.args = args; 1149 - enter_rtas(__pa(&rtas.args)); 1141 + do_enter_rtas(__pa(&rtas.args)); 1150 1142 args = rtas.args; 1151 1143 1152 1144 /* A -1 return code indicates that the last command couldn't
+6 -6
arch/powerpc/kernel/signal.c
··· 214 214 regs->gpr[0] = __NR_restart_syscall; 215 215 else 216 216 regs->gpr[3] = regs->orig_gpr3; 217 - regs->nip -= 4; 217 + regs_add_return_ip(regs, -4); 218 218 regs->result = 0; 219 219 } else { 220 220 if (trap_is_scv(regs)) { ··· 322 322 * For signals taken in non-TM or suspended mode, we use the 323 323 * normal/non-checkpointed stack pointer. 324 324 */ 325 - 326 - unsigned long ret = tsk->thread.regs->gpr[1]; 325 + struct pt_regs *regs = tsk->thread.regs; 326 + unsigned long ret = regs->gpr[1]; 327 327 328 328 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 329 329 BUG_ON(tsk != current); 330 330 331 - if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) { 331 + if (MSR_TM_ACTIVE(regs->msr)) { 332 332 preempt_disable(); 333 333 tm_reclaim_current(TM_CAUSE_SIGNAL); 334 - if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr)) 334 + if (MSR_TM_TRANSACTIONAL(regs->msr)) 335 335 ret = tsk->thread.ckpt_regs.gpr[1]; 336 336 337 337 /* ··· 341 341 * (tm_recheckpoint_new_task() would recheckpoint). Besides, we 342 342 * enter the signal handler in non-transactional state. 343 343 */ 344 - tsk->thread.regs->msr &= ~MSR_TS_MASK; 344 + regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK); 345 345 preempt_enable(); 346 346 } 347 347 #endif
+20 -20
arch/powerpc/kernel/signal_32.c
··· 479 479 480 480 /* if doing signal return, restore the previous little-endian mode */ 481 481 if (sig) 482 - regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 482 + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); 483 483 484 484 #ifdef CONFIG_ALTIVEC 485 485 /* 486 486 * Force the process to reload the altivec registers from 487 487 * current->thread when it next does altivec instructions 488 488 */ 489 - regs->msr &= ~MSR_VEC; 489 + regs_set_return_msr(regs, regs->msr & ~MSR_VEC); 490 490 if (msr & MSR_VEC) { 491 491 /* restore altivec registers from the stack */ 492 492 unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs, ··· 508 508 * Force the process to reload the VSX registers from 509 509 * current->thread when it next does VSX instruction. 510 510 */ 511 - regs->msr &= ~MSR_VSX; 511 + regs_set_return_msr(regs, regs->msr & ~MSR_VSX); 512 512 if (msr & MSR_VSX) { 513 513 /* 514 514 * Restore altivec registers from the stack to a local ··· 524 524 * force the process to reload the FP registers from 525 525 * current->thread when it next does FP instructions 526 526 */ 527 - regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); 527 + regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1)); 528 528 529 529 #ifdef CONFIG_SPE 530 530 /* force the process to reload the spe registers from 531 531 current->thread when it next does spe instructions */ 532 - regs->msr &= ~MSR_SPE; 532 + regs_set_return_msr(regs, regs->msr & ~MSR_SPE); 533 533 if (msr & MSR_SPE) { 534 534 /* restore spe registers from the stack */ 535 535 unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs, ··· 580 580 unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed); 581 581 582 582 /* Restore the previous little-endian mode */ 583 - regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 583 + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); 584 584 585 - regs->msr &= ~MSR_VEC; 585 + regs_set_return_msr(regs, regs->msr & ~MSR_VEC); 586 586 if (msr & MSR_VEC) { 587 587 /* restore altivec registers from the stack */ 588 588 unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs, ··· 601 601 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 602 602 mtspr(SPRN_VRSAVE, current->thread.ckvrsave); 603 603 604 - regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); 604 + regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1)); 605 605 606 606 unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed); 607 607 608 - regs->msr &= ~MSR_VSX; 608 + regs_set_return_msr(regs, regs->msr & ~MSR_VSX); 609 609 if (msr & MSR_VSX) { 610 610 /* 611 611 * Restore altivec registers from the stack to a local ··· 672 672 * 673 673 * Pull in the MSR TM bits from the user context 674 674 */ 675 - regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); 675 + regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK)); 676 676 /* Now, recheckpoint. This loads up all of the checkpointed (older) 677 677 * registers, including FP and V[S]Rs. After recheckpointing, the 678 678 * transactional versions should be loaded. ··· 687 687 msr_check_and_set(msr & (MSR_FP | MSR_VEC)); 688 688 if (msr & MSR_FP) { 689 689 load_fp_state(&current->thread.fp_state); 690 - regs->msr |= (MSR_FP | current->thread.fpexc_mode); 690 + regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode)); 691 691 } 692 692 if (msr & MSR_VEC) { 693 693 load_vr_state(&current->thread.vr_state); 694 - regs->msr |= MSR_VEC; 694 + regs_set_return_msr(regs, regs->msr | MSR_VEC); 695 695 } 696 696 697 697 preempt_enable(); ··· 801 801 regs->gpr[4] = (unsigned long)&frame->info; 802 802 regs->gpr[5] = (unsigned long)&frame->uc; 803 803 regs->gpr[6] = (unsigned long)frame; 804 - regs->nip = (unsigned long) ksig->ka.sa.sa_handler; 804 + regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler); 805 805 /* enter the signal handler in native-endian mode */ 806 - regs->msr &= ~MSR_LE; 807 - regs->msr |= (MSR_KERNEL & MSR_LE); 806 + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE)); 807 + 808 808 return 0; 809 809 810 810 failed: ··· 889 889 regs->gpr[1] = newsp; 890 890 regs->gpr[3] = ksig->sig; 891 891 regs->gpr[4] = (unsigned long) sc; 892 - regs->nip = (unsigned long)ksig->ka.sa.sa_handler; 892 + regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler); 893 893 /* enter the signal handler in native-endian mode */ 894 - regs->msr &= ~MSR_LE; 895 - regs->msr |= (MSR_KERNEL & MSR_LE); 894 + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE)); 895 + 896 896 return 0; 897 897 898 898 failed: ··· 1142 1142 * set, and recheckpoint was not called. This avoid 1143 1143 * hitting a TM Bad thing at RFID 1144 1144 */ 1145 - regs->msr &= ~MSR_TS_MASK; 1145 + regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK); 1146 1146 } 1147 1147 /* Fall through, for non-TM restore */ 1148 1148 #endif ··· 1231 1231 affect the contents of these registers. After this point, 1232 1232 failure is a problem, anyway, and it's very unlikely unless 1233 1233 the user is really doing something wrong. */ 1234 - regs->msr = new_msr; 1234 + regs_set_return_msr(regs, new_msr); 1235 1235 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1236 1236 current->thread.debug.dbcr0 = new_dbcr0; 1237 1237 #endif
+16 -14
arch/powerpc/kernel/signal_64.c
··· 354 354 /* get MSR separately, transfer the LE bit if doing signal return */ 355 355 unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out); 356 356 if (sig) 357 - regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 357 + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); 358 358 unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out); 359 359 unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out); 360 360 unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out); ··· 376 376 * This has to be done before copying stuff into tsk->thread.fpr/vr 377 377 * for the reasons explained in the previous comment. 378 378 */ 379 - regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); 379 + regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX)); 380 380 381 381 #ifdef CONFIG_ALTIVEC 382 382 unsafe_get_user(v_regs, &sc->v_regs, efault_out); ··· 468 468 return -EINVAL; 469 469 470 470 /* pull in MSR LE from user context */ 471 - regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 471 + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); 472 472 473 473 /* The following non-GPR non-FPR non-VR state is also checkpointed: */ 474 474 err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); ··· 495 495 * This has to be done before copying stuff into tsk->thread.fpr/vr 496 496 * for the reasons explained in the previous comment. 497 497 */ 498 - regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); 498 + regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX)); 499 499 500 500 #ifdef CONFIG_ALTIVEC 501 501 err |= __get_user(v_regs, &sc->v_regs); ··· 565 565 preempt_disable(); 566 566 567 567 /* pull in MSR TS bits from user context */ 568 - regs->msr |= msr & MSR_TS_MASK; 568 + regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK)); 569 569 570 570 /* 571 571 * Ensure that TM is enabled in regs->msr before we leave the signal ··· 583 583 * to be de-scheduled with MSR[TS] set but without calling 584 584 * tm_recheckpoint(). This can cause a bug. 585 585 */ 586 - regs->msr |= MSR_TM; 586 + regs_set_return_msr(regs, regs->msr | MSR_TM); 587 587 588 588 /* This loads the checkpointed FP/VEC state, if used */ 589 589 tm_recheckpoint(&tsk->thread); ··· 591 591 msr_check_and_set(msr & (MSR_FP | MSR_VEC)); 592 592 if (msr & MSR_FP) { 593 593 load_fp_state(&tsk->thread.fp_state); 594 - regs->msr |= (MSR_FP | tsk->thread.fpexc_mode); 594 + regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode)); 595 595 } 596 596 if (msr & MSR_VEC) { 597 597 load_vr_state(&tsk->thread.vr_state); 598 - regs->msr |= MSR_VEC; 598 + regs_set_return_msr(regs, regs->msr | MSR_VEC); 599 599 } 600 600 601 601 preempt_enable(); ··· 717 717 718 718 /* This returns like rt_sigreturn */ 719 719 set_thread_flag(TIF_RESTOREALL); 720 + 720 721 return 0; 721 722 722 723 efault_out: ··· 784 783 * the MSR[TS] that came from user context later, at 785 784 * restore_tm_sigcontexts. 786 785 */ 787 - regs->msr &= ~MSR_TS_MASK; 786 + regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK); 788 787 789 788 if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) 790 789 goto badframe; ··· 816 815 * MSR[TS] set, but without CPU in the proper state, 817 816 * causing a TM bad thing. 818 817 */ 819 - current->thread.regs->msr &= ~MSR_TS_MASK; 818 + regs_set_return_msr(current->thread.regs, 819 + current->thread.regs->msr & ~MSR_TS_MASK); 820 820 if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext))) 821 821 goto badframe; 822 822 ··· 831 829 goto badframe; 832 830 833 831 set_thread_flag(TIF_RESTOREALL); 832 + 834 833 return 0; 835 834 836 835 badframe_block: ··· 911 908 912 909 /* Set up to return from userspace. */ 913 910 if (tsk->mm->context.vdso) { 914 - regs->nip = VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64); 911 + regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64)); 915 912 } else { 916 913 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); 917 914 if (err) 918 915 goto badframe; 919 - regs->nip = (unsigned long) &frame->tramp[0]; 916 + regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]); 920 917 } 921 918 922 919 /* Allocate a dummy caller frame for the signal handler. */ ··· 941 938 } 942 939 943 940 /* enter the signal handler in native-endian mode */ 944 - regs->msr &= ~MSR_LE; 945 - regs->msr |= (MSR_KERNEL & MSR_LE); 941 + regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE)); 946 942 regs->gpr[1] = newsp; 947 943 regs->gpr[3] = ksig->sig; 948 944 regs->result = 0;
+2 -1
arch/powerpc/kernel/syscalls.c
··· 114 114 { 115 115 struct thread_info *ti; 116 116 117 - current->thread.regs->msr ^= MSR_LE; 117 + regs_set_return_msr(current->thread.regs, 118 + current->thread.regs->msr ^ MSR_LE); 118 119 119 120 /* 120 121 * Set TIF_RESTOREALL so that r3 isn't clobbered on return to
+21 -21
arch/powerpc/kernel/traps.c
··· 428 428 return; 429 429 430 430 nonrecoverable: 431 - regs->msr &= ~MSR_RI; 431 + regs_set_return_msr(regs, regs->msr & ~MSR_RI); 432 432 #endif 433 433 } 434 434 DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception) ··· 550 550 printk(KERN_DEBUG "%s bad port %lx at %p\n", 551 551 (*nip & 0x100)? "OUT to": "IN from", 552 552 regs->gpr[rb] - _IO_BASE, nip); 553 - regs->msr |= MSR_RI; 554 - regs->nip = extable_fixup(entry); 553 + regs_set_return_msr(regs, regs->msr | MSR_RI); 554 + regs_set_return_ip(regs, extable_fixup(entry)); 555 555 return 1; 556 556 } 557 557 } ··· 587 587 #define REASON_BOUNDARY SRR1_BOUNDARY 588 588 589 589 #define single_stepping(regs) ((regs)->msr & MSR_SE) 590 - #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 591 - #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE) 590 + #define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE)) 591 + #define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE)) 592 592 #endif 593 593 594 594 #define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4) ··· 1032 1032 #endif /* !__LITTLE_ENDIAN__ */ 1033 1033 1034 1034 /* Go to next instruction */ 1035 - regs->nip += 4; 1035 + regs_add_return_ip(regs, 4); 1036 1036 } 1037 1037 #endif /* CONFIG_VSX */ 1038 1038 ··· 1477 1477 1478 1478 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1479 1479 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { 1480 - regs->nip += 4; 1480 + regs_add_return_ip(regs, 4); 1481 1481 return; 1482 1482 } 1483 1483 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); ··· 1539 1539 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1540 1540 switch (emulate_instruction(regs)) { 1541 1541 case 0: 1542 - regs->nip += 4; 1542 + regs_add_return_ip(regs, 4); 1543 1543 emulate_single_step(regs); 1544 1544 return; 1545 1545 case -EFAULT: ··· 1567 1567 */ 1568 1568 DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt) 1569 1569 { 1570 - regs->msr |= REASON_ILLEGAL; 1570 + regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL); 1571 1571 do_program_check(regs); 1572 1572 } 1573 1573 ··· 1594 1594 1595 1595 if (fixed == 1) { 1596 1596 /* skip over emulated instruction */ 1597 - regs->nip += inst_length(reason); 1597 + regs_add_return_ip(regs, inst_length(reason)); 1598 1598 emulate_single_step(regs); 1599 1599 return; 1600 1600 } ··· 1660 1660 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1661 1661 if (user_mode(regs)) { 1662 1662 current->thread.load_tm++; 1663 - regs->msr |= MSR_TM; 1663 + regs_set_return_msr(regs, regs->msr | MSR_TM); 1664 1664 tm_enable(); 1665 1665 tm_restore_sprs(&current->thread); 1666 1666 return; ··· 1752 1752 pr_err("DSCR based mfspr emulation failed\n"); 1753 1753 return; 1754 1754 } 1755 - regs->nip += 4; 1755 + regs_add_return_ip(regs, 4); 1756 1756 emulate_single_step(regs); 1757 1757 } 1758 1758 return; ··· 1949 1949 */ 1950 1950 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1951 1951 current->thread.debug.dbcr1)) 1952 - regs->msr |= MSR_DE; 1952 + regs_set_return_msr(regs, regs->msr | MSR_DE); 1953 1953 else 1954 1954 /* Make sure the IDM flag is off */ 1955 1955 current->thread.debug.dbcr0 &= ~DBCR0_IDM; ··· 1970 1970 * instead of stopping here when hitting a BT 1971 1971 */ 1972 1972 if (debug_status & DBSR_BT) { 1973 - regs->msr &= ~MSR_DE; 1973 + regs_set_return_msr(regs, regs->msr & ~MSR_DE); 1974 1974 1975 1975 /* Disable BT */ 1976 1976 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); ··· 1981 1981 if (user_mode(regs)) { 1982 1982 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1983 1983 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1984 - regs->msr |= MSR_DE; 1984 + regs_set_return_msr(regs, regs->msr | MSR_DE); 1985 1985 return; 1986 1986 } 1987 1987 ··· 1995 1995 if (debugger_sstep(regs)) 1996 1996 return; 1997 1997 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1998 - regs->msr &= ~MSR_DE; 1998 + regs_set_return_msr(regs, regs->msr & ~MSR_DE); 1999 1999 2000 2000 /* Disable instruction completion */ 2001 2001 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); ··· 2017 2017 current->thread.debug.dbcr0 &= ~DBCR0_IC; 2018 2018 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 2019 2019 current->thread.debug.dbcr1)) 2020 - regs->msr |= MSR_DE; 2020 + regs_set_return_msr(regs, regs->msr | MSR_DE); 2021 2021 else 2022 2022 /* Make sure the IDM bit is off */ 2023 2023 current->thread.debug.dbcr0 &= ~DBCR0_IDM; ··· 2045 2045 PPC_WARN_EMULATED(altivec, regs); 2046 2046 err = emulate_altivec(regs); 2047 2047 if (err == 0) { 2048 - regs->nip += 4; /* skip emulated instruction */ 2048 + regs_add_return_ip(regs, 4); /* skip emulated instruction */ 2049 2049 emulate_single_step(regs); 2050 2050 return; 2051 2051 } ··· 2110 2110 2111 2111 err = do_spe_mathemu(regs); 2112 2112 if (err == 0) { 2113 - regs->nip += 4; /* skip emulated instruction */ 2113 + regs_add_return_ip(regs, 4); /* skip emulated instruction */ 2114 2114 emulate_single_step(regs); 2115 2115 return; 2116 2116 } ··· 2141 2141 giveup_spe(current); 2142 2142 preempt_enable(); 2143 2143 2144 - regs->nip -= 4; 2144 + regs_add_return_ip(regs, -4); 2145 2145 err = speround_handler(regs); 2146 2146 if (err == 0) { 2147 - regs->nip += 4; /* skip emulated instruction */ 2147 + regs_add_return_ip(regs, 4); /* skip emulated instruction */ 2148 2148 emulate_single_step(regs); 2149 2149 return; 2150 2150 }
+2 -2
arch/powerpc/kernel/uprobes.c
··· 62 62 63 63 autask->saved_trap_nr = current->thread.trap_nr; 64 64 current->thread.trap_nr = UPROBE_TRAP_NR; 65 - regs->nip = current->utask->xol_vaddr; 65 + regs_set_return_ip(regs, current->utask->xol_vaddr); 66 66 67 67 user_enable_single_step(current); 68 68 return 0; ··· 119 119 * support doesn't exist and have to fix-up the next instruction 120 120 * to be executed. 121 121 */ 122 - regs->nip = (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn); 122 + regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn)); 123 123 124 124 user_disable_single_step(current); 125 125 return 0;
+6
arch/powerpc/kernel/vector.S
··· 73 73 addi r5,r4,THREAD /* Get THREAD */ 74 74 oris r12,r12,MSR_VEC@h 75 75 std r12,_MSR(r1) 76 + #ifdef CONFIG_PPC_BOOK3S_64 77 + li r4,0 78 + stb r4,PACASRR_VALID(r13) 79 + #endif 76 80 #endif 77 81 li r4,1 78 82 stb r4,THREAD_LOAD_VEC(r5) ··· 135 131 /* enable use of VSX after return */ 136 132 oris r12,r12,MSR_VSX@h 137 133 std r12,_MSR(r1) 134 + li r4,0 135 + stb r4,PACASRR_VALID(r13) 138 136 b fast_interrupt_return_srr 139 137 140 138 #endif /* CONFIG_VSX */
+3
arch/powerpc/kvm/book3s_hv.c
··· 4626 4626 4627 4627 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 4628 4628 atomic_dec(&kvm->arch.vcpus_running); 4629 + 4630 + srr_regs_clobbered(); 4631 + 4629 4632 return r; 4630 4633 } 4631 4634
+2
arch/powerpc/kvm/book3s_pr.c
··· 25 25 #include <asm/cputable.h> 26 26 #include <asm/cacheflush.h> 27 27 #include <linux/uaccess.h> 28 + #include <asm/interrupt.h> 28 29 #include <asm/io.h> 29 30 #include <asm/kvm_ppc.h> 30 31 #include <asm/kvm_book3s.h> ··· 1849 1848 /* Make sure we save the guest TAR/EBB/DSCR state */ 1850 1849 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); 1851 1850 1851 + srr_regs_clobbered(); 1852 1852 out: 1853 1853 vcpu->mode = OUTSIDE_GUEST_MODE; 1854 1854 return ret;
+1 -1
arch/powerpc/lib/error-inject.c
··· 11 11 * function in the kernel/module, captured on a kprobe. We don't need 12 12 * to worry about 32-bit userspace on a 64-bit kernel. 13 13 */ 14 - regs->nip = regs->link; 14 + regs_set_return_ip(regs, regs->link); 15 15 } 16 16 NOKPROBE_SYMBOL(override_function_with_return);
+9 -8
arch/powerpc/lib/sstep.c
··· 3225 3225 default: 3226 3226 WARN_ON_ONCE(1); 3227 3227 } 3228 - regs->nip = next_pc; 3228 + regs_set_return_ip(regs, next_pc); 3229 3229 } 3230 3230 NOKPROBE_SYMBOL(emulate_update_regs); 3231 3231 ··· 3563 3563 /* can't step mtmsr[d] that would clear MSR_RI */ 3564 3564 return -1; 3565 3565 /* here op.val is the mask of bits to change */ 3566 - regs->msr = (regs->msr & ~op.val) | (val & op.val); 3566 + regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val)); 3567 3567 goto instr_done; 3568 3568 3569 3569 #ifdef CONFIG_PPC64 ··· 3576 3576 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) && 3577 3577 cpu_has_feature(CPU_FTR_REAL_LE) && 3578 3578 regs->gpr[0] == 0x1ebe) { 3579 - regs->msr ^= MSR_LE; 3579 + regs_set_return_msr(regs, regs->msr ^ MSR_LE); 3580 3580 goto instr_done; 3581 3581 } 3582 3582 regs->gpr[9] = regs->gpr[13]; ··· 3584 3584 regs->gpr[11] = regs->nip + 4; 3585 3585 regs->gpr[12] = regs->msr & MSR_MASK; 3586 3586 regs->gpr[13] = (unsigned long) get_paca(); 3587 - regs->nip = (unsigned long) &system_call_common; 3588 - regs->msr = MSR_KERNEL; 3587 + regs_set_return_ip(regs, (unsigned long) &system_call_common); 3588 + regs_set_return_msr(regs, MSR_KERNEL); 3589 3589 return 1; 3590 3590 3591 3591 #ifdef CONFIG_PPC_BOOK3S_64 ··· 3595 3595 regs->gpr[11] = regs->nip + 4; 3596 3596 regs->gpr[12] = regs->msr & MSR_MASK; 3597 3597 regs->gpr[13] = (unsigned long) get_paca(); 3598 - regs->nip = (unsigned long) &system_call_vectored_emulate; 3599 - regs->msr = MSR_KERNEL; 3598 + regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate); 3599 + regs_set_return_msr(regs, MSR_KERNEL); 3600 3600 return 1; 3601 3601 #endif 3602 3602 ··· 3607 3607 return 0; 3608 3608 3609 3609 instr_done: 3610 - regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)); 3610 + regs_set_return_ip(regs, 3611 + truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type))); 3611 3612 return 1; 3612 3613 } 3613 3614 NOKPROBE_SYMBOL(emulate_step);
+1
arch/powerpc/lib/test_emulate_step.c
··· 1609 1609 if (!regs || !ppc_inst_val(instr)) 1610 1610 return -EINVAL; 1611 1611 1612 + /* This is not a return frame regs */ 1612 1613 regs->nip = patch_site_addr(&patch__exec_instr); 1613 1614 1614 1615 analysed = analyse_instr(&op, regs, instr);
+1 -1
arch/powerpc/math-emu/math.c
··· 453 453 break; 454 454 } 455 455 456 - regs->nip += 4; 456 + regs_add_return_ip(regs, 4); 457 457 return 0; 458 458 459 459 illegal:
+1 -1
arch/powerpc/math-emu/math_efp.c
··· 710 710 illegal: 711 711 if (have_e500_cpu_a005_erratum) { 712 712 /* according to e500 cpu a005 erratum, reissue efp inst */ 713 - regs->nip -= 4; 713 + regs_add_return_ip(regs, -4); 714 714 pr_debug("re-issue efp inst: %08lx\n", speinsn); 715 715 return 0; 716 716 }
+2 -2
arch/powerpc/platforms/embedded6xx/holly.c
··· 251 251 /* Are we prepared to handle this fault */ 252 252 if ((entry = search_exception_tables(regs->nip)) != NULL) { 253 253 tsi108_clear_pci_cfg_error(); 254 - regs->msr |= MSR_RI; 255 - regs->nip = extable_fixup(entry); 254 + regs_set_return_msr(regs, regs->msr | MSR_RI); 255 + regs_set_return_ip(regs, extable_fixup(entry)); 256 256 return 1; 257 257 } 258 258 return 0;
+2 -2
arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
··· 173 173 /* Are we prepared to handle this fault */ 174 174 if ((entry = search_exception_tables(regs->nip)) != NULL) { 175 175 tsi108_clear_pci_cfg_error(); 176 - regs->msr |= MSR_RI; 177 - regs->nip = extable_fixup(entry); 176 + regs_set_return_msr(regs, regs->msr | MSR_RI); 177 + regs_set_return_ip(regs, extable_fixup(entry)); 178 178 return 1; 179 179 } 180 180 return 0;
+2 -2
arch/powerpc/platforms/pasemi/idle.c
··· 37 37 */ 38 38 39 39 if (regs->msr & SRR1_WAKEMASK) 40 - regs->nip = regs->link; 40 + regs_set_return_ip(regs, regs->link); 41 41 42 42 switch (regs->msr & SRR1_WAKEMASK) { 43 43 case SRR1_WAKEDEC: ··· 58 58 restore_astate(hard_smp_processor_id()); 59 59 60 60 /* everything handled */ 61 - regs->msr |= MSR_RI; 61 + regs_set_return_msr(regs, regs->msr | MSR_RI); 62 62 return 1; 63 63 } 64 64
+4
arch/powerpc/platforms/powernv/opal-call.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <linux/percpu.h> 3 3 #include <linux/jump_label.h> 4 + #include <asm/interrupt.h> 4 5 #include <asm/opal-api.h> 5 6 #include <asm/trace.h> 6 7 #include <asm/asm-prototypes.h> ··· 100 99 unsigned long msr = mfmsr(); 101 100 bool mmu = (msr & (MSR_IR|MSR_DR)); 102 101 int64_t ret; 102 + 103 + /* OPAL call / firmware may use SRR and/or HSRR */ 104 + srr_regs_clobbered(); 103 105 104 106 msr &= ~MSR_EE; 105 107
+1 -1
arch/powerpc/platforms/powernv/opal.c
··· 773 773 * Setup regs->nip to rfi into fixup address. 774 774 */ 775 775 if (recover_addr) 776 - regs->nip = recover_addr; 776 + regs_set_return_ip(regs, recover_addr); 777 777 778 778 out: 779 779 return !!recover_addr;
+29
arch/powerpc/platforms/pseries/hvCall.S
··· 108 108 mfcr r0 109 109 stw r0,8(r1) 110 110 HVSC /* invoke the hypervisor */ 111 + 112 + li r4,0 113 + stb r4,PACASRR_VALID(r13) 114 + 111 115 lwz r0,8(r1) 112 116 mtcrf 0xff,r0 113 117 blr /* return r3 = status */ ··· 124 120 HCALL_BRANCH(plpar_hcall_norets_trace) 125 121 HVSC /* invoke the hypervisor */ 126 122 123 + li r4,0 124 + stb r4,PACASRR_VALID(r13) 125 + 127 126 lwz r0,8(r1) 128 127 mtcrf 0xff,r0 129 128 blr /* return r3 = status */ ··· 136 129 HCALL_INST_PRECALL(R4) 137 130 HVSC 138 131 HCALL_INST_POSTCALL_NORETS 132 + 133 + li r4,0 134 + stb r4,PACASRR_VALID(r13) 135 + 139 136 lwz r0,8(r1) 140 137 mtcrf 0xff,r0 141 138 blr ··· 170 159 std r6, 16(r12) 171 160 std r7, 24(r12) 172 161 162 + li r4,0 163 + stb r4,PACASRR_VALID(r13) 164 + 173 165 lwz r0,8(r1) 174 166 mtcrf 0xff,r0 175 167 ··· 201 187 std r7,24(r12) 202 188 203 189 HCALL_INST_POSTCALL(r12) 190 + 191 + li r4,0 192 + stb r4,PACASRR_VALID(r13) 204 193 205 194 lwz r0,8(r1) 206 195 mtcrf 0xff,r0 ··· 239 222 std r5, 8(r12) 240 223 std r6, 16(r12) 241 224 std r7, 24(r12) 225 + 226 + li r4,0 227 + stb r4,PACASRR_VALID(r13) 242 228 243 229 lwz r0,8(r1) 244 230 mtcrf 0xff,r0 ··· 282 262 std r11,56(r12) 283 263 std r0, 64(r12) 284 264 265 + li r4,0 266 + stb r4,PACASRR_VALID(r13) 267 + 285 268 lwz r0,8(r1) 286 269 mtcrf 0xff,r0 287 270 ··· 322 299 std r0,64(r12) 323 300 324 301 HCALL_INST_POSTCALL(r12) 302 + 303 + li r4,0 304 + stb r4,PACASRR_VALID(r13) 325 305 326 306 lwz r0,8(r1) 327 307 mtcrf 0xff,r0 ··· 364 338 std r10,48(r12) 365 339 std r11,56(r12) 366 340 std r0, 64(r12) 341 + 342 + li r4,0 343 + stb r4,PACASRR_VALID(r13) 367 344 368 345 lwz r0,8(r1) 369 346 mtcrf 0xff,r0
+2 -2
arch/powerpc/platforms/pseries/ras.c
··· 487 487 if ((be64_to_cpu(regs->msr) & 488 488 (MSR_LE|MSR_RI|MSR_DR|MSR_IR|MSR_ME|MSR_PR| 489 489 MSR_ILE|MSR_HV|MSR_SF)) == (MSR_DR|MSR_SF)) { 490 - regs->nip = be64_to_cpu((__be64)regs->nip); 491 - regs->msr = 0; 490 + regs_set_return_ip(regs, be64_to_cpu((__be64)regs->nip)); 491 + regs_set_return_msr(regs, 0); 492 492 } 493 493 #endif 494 494
+1 -1
arch/powerpc/sysdev/fsl_pci.c
··· 1072 1072 ret = get_kernel_nofault(inst, (void *)regs->nip); 1073 1073 1074 1074 if (!ret && mcheck_handle_load(regs, inst)) { 1075 - regs->nip += 4; 1075 + regs_add_return_ip(regs, 4); 1076 1076 return 1; 1077 1077 } 1078 1078 }
+2 -2
arch/powerpc/sysdev/fsl_rio.c
··· 108 108 __func__); 109 109 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 110 110 0); 111 - regs->msr |= MSR_RI; 112 - regs->nip = extable_fixup(entry); 111 + regs_set_return_msr(regs, regs->msr | MSR_RI); 112 + regs_set_return_ip(regs, extable_fixup(entry)); 113 113 return 1; 114 114 } 115 115 }
+7 -7
arch/powerpc/xmon/xmon.c
··· 514 514 515 515 bp = in_breakpoint_table(regs->nip, &offset); 516 516 if (bp != NULL) { 517 - regs->nip = bp->address + offset; 517 + regs_set_return_ip(regs, bp->address + offset); 518 518 atomic_dec(&bp->ref_count); 519 519 } 520 520 ··· 702 702 if (regs->msr & MSR_DE) { 703 703 bp = at_breakpoint(regs->nip); 704 704 if (bp != NULL) { 705 - regs->nip = (unsigned long) &bp->instr[0]; 705 + regs_set_return_ip(regs, (unsigned long) &bp->instr[0]); 706 706 atomic_inc(&bp->ref_count); 707 707 } 708 708 } ··· 712 712 if (bp != NULL) { 713 713 int stepped = emulate_step(regs, ppc_inst_read(bp->instr)); 714 714 if (stepped == 0) { 715 - regs->nip = (unsigned long) &bp->instr[0]; 715 + regs_set_return_ip(regs, (unsigned long) &bp->instr[0]); 716 716 atomic_inc(&bp->ref_count); 717 717 } else if (stepped < 0) { 718 718 printf("Couldn't single-step %s instruction\n", ··· 766 766 /* Are we at the trap at bp->instr[1] for some bp? */ 767 767 bp = in_breakpoint_table(regs->nip, &offset); 768 768 if (bp != NULL && (offset == 4 || offset == 8)) { 769 - regs->nip = bp->address + offset; 769 + regs_set_return_ip(regs, bp->address + offset); 770 770 atomic_dec(&bp->ref_count); 771 771 return 1; 772 772 } ··· 836 836 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { 837 837 bp = in_breakpoint_table(regs->nip, &offset); 838 838 if (bp != NULL) { 839 - regs->nip = bp->address + offset; 839 + regs_set_return_ip(regs, bp->address + offset); 840 840 atomic_dec(&bp->ref_count); 841 841 } 842 842 } ··· 1188 1188 #ifdef CONFIG_BOOKE 1189 1189 static int do_step(struct pt_regs *regs) 1190 1190 { 1191 - regs->msr |= MSR_DE; 1191 + regs_set_return_msr(regs, regs->msr | MSR_DE); 1192 1192 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); 1193 1193 return 1; 1194 1194 } ··· 1221 1221 } 1222 1222 } 1223 1223 } 1224 - regs->msr |= MSR_SE; 1224 + regs_set_return_msr(regs, regs->msr | MSR_SE); 1225 1225 return 1; 1226 1226 } 1227 1227 #endif