Merge tag 'powerpc-6.7-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

- Fix corruption of f0/vs0 during FP/Vector save, seen as userspace
crashes when using io-uring workers (in particular with MariaDB)

- Fix KVM_RUN potentially clobbering all host userspace FP/Vector
registers

Thanks to Timothy Pearson, Jens Axboe, and Nicholas Piggin.

* tag 'powerpc-6.7-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
KVM: PPC: Book3S HV: Fix KVM_RUN clobbering FP/VEC user registers
powerpc: Don't clobber f0/vs0 during fp|altivec register save

Changed files
+18 -3
arch
powerpc
+13
arch/powerpc/kernel/fpu.S
··· 23 23 #include <asm/feature-fixups.h> 24 24 25 25 #ifdef CONFIG_VSX 26 + #define __REST_1FPVSR(n,c,base) \ 27 + BEGIN_FTR_SECTION \ 28 + b 2f; \ 29 + END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 30 + REST_FPR(n,base); \ 31 + b 3f; \ 32 + 2: REST_VSR(n,c,base); \ 33 + 3: 34 + 26 35 #define __REST_32FPVSRS(n,c,base) \ 27 36 BEGIN_FTR_SECTION \ 28 37 b 2f; \ ··· 50 41 2: SAVE_32VSRS(n,c,base); \ 51 42 3: 52 43 #else 44 + #define __REST_1FPVSR(n,b,base) REST_FPR(n, base) 53 45 #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) 54 46 #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) 55 47 #endif 48 + #define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base) 56 49 #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) 57 50 #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) 58 51 ··· 78 67 SAVE_32FPVSRS(0, R4, R3) 79 68 mffs fr0 80 69 stfd fr0,FPSTATE_FPSCR(r3) 70 + REST_1FPVSR(0, R4, R3) 81 71 blr 82 72 EXPORT_SYMBOL(store_fp_state) 83 73 ··· 150 138 2: SAVE_32FPVSRS(0, R4, R6) 151 139 mffs fr0 152 140 stfd fr0,FPSTATE_FPSCR(r6) 141 + REST_1FPVSR(0, R4, R6) 153 142 blr
+3 -3
arch/powerpc/kernel/process.c
··· 1198 1198 1199 1199 usermsr = current->thread.regs->msr; 1200 1200 1201 + /* Caller has enabled FP/VEC/VSX/TM in MSR */ 1201 1202 if (usermsr & MSR_FP) 1202 - save_fpu(current); 1203 - 1203 + __giveup_fpu(current); 1204 1204 if (usermsr & MSR_VEC) 1205 - save_altivec(current); 1205 + __giveup_altivec(current); 1206 1206 1207 1207 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1208 1208 if (usermsr & MSR_TM) {
+2
arch/powerpc/kernel/vector.S
··· 33 33 mfvscr v0 34 34 li r4, VRSTATE_VSCR 35 35 stvx v0, r4, r3 36 + lvx v0, 0, r3 36 37 blr 37 38 EXPORT_SYMBOL(store_vr_state) 38 39 ··· 110 109 mfvscr v0 111 110 li r4,VRSTATE_VSCR 112 111 stvx v0,r4,r7 112 + lvx v0,0,r7 113 113 blr 114 114 115 115 #ifdef CONFIG_VSX