Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Put FP/VSX and VR state into structures

This creates new 'thread_fp_state' and 'thread_vr_state' structures
to store FP/VSX state (including FPSCR) and Altivec/VSX state
(including VSCR), and uses them in the thread_struct. In the
thread_fp_state, the FPRs and VSRs are represented as u64 rather
than double, since we rarely perform floating-point computations
on the values, and this will enable the structures to be used
in KVM code as well. Similarly FPSCR is now a u64 rather than
a structure of two 32-bit values.

This takes the offsets out of the macros such as SAVE_32FPRS,
REST_32FPRS, etc. This enables the same macros to be used for normal
and transactional state, enabling us to delete the transactional
versions of the macros. This also removes the unused do_load_up_fpu
and do_load_up_altivec, which were in fact buggy since they didn't
create large enough stack frames to account for the fact that
load_up_fpu and load_up_altivec are not designed to be called from C
and assume that their caller's stack frame is an interrupt frame.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Paul Mackerras and committed by
Benjamin Herrenschmidt
de79f7b9 8e0a1611

+200 -358
+6 -89
arch/powerpc/include/asm/ppc_asm.h
··· 98 98 #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) 99 99 #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) 100 100 101 - #define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) 101 + #define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base) 102 102 #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) 103 103 #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) 104 104 #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) 105 105 #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) 106 106 #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) 107 - #define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) 107 + #define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base) 108 108 #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) 109 109 #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) 110 110 #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) 111 111 #define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base) 112 112 #define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) 113 113 114 - #define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b 114 + #define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b 115 115 #define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) 116 116 #define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base) 117 117 #define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base) 118 118 #define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base) 119 119 #define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) 120 - #define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b 120 + #define REST_VR(n,b,base) li b,16*(n); lvx n,base,b 121 121 #define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) 122 122 #define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base) 123 123 #define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base) 124 124 #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) 125 125 #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) 126 126 127 - /* Save/restore FPRs, VRs and VSRs from their checkpointed backups in 128 - * thread_struct: 129 - */ 130 - #define SAVE_FPR_TRANSACT(n, base) stfd n,THREAD_TRANSACT_FPR0+ \ 131 - 8*TS_FPRWIDTH*(n)(base) 132 - #define SAVE_2FPRS_TRANSACT(n, base) SAVE_FPR_TRANSACT(n, base); \ 133 - SAVE_FPR_TRANSACT(n+1, base) 134 - #define SAVE_4FPRS_TRANSACT(n, base) SAVE_2FPRS_TRANSACT(n, base); \ 135 - SAVE_2FPRS_TRANSACT(n+2, base) 136 - #define SAVE_8FPRS_TRANSACT(n, base) SAVE_4FPRS_TRANSACT(n, base); \ 137 - SAVE_4FPRS_TRANSACT(n+4, base) 138 - #define SAVE_16FPRS_TRANSACT(n, base) SAVE_8FPRS_TRANSACT(n, base); \ 139 - SAVE_8FPRS_TRANSACT(n+8, base) 140 - #define SAVE_32FPRS_TRANSACT(n, base) SAVE_16FPRS_TRANSACT(n, base); \ 141 - SAVE_16FPRS_TRANSACT(n+16, base) 142 - 143 - #define REST_FPR_TRANSACT(n, base) lfd n,THREAD_TRANSACT_FPR0+ \ 144 - 8*TS_FPRWIDTH*(n)(base) 145 - #define REST_2FPRS_TRANSACT(n, base) REST_FPR_TRANSACT(n, base); \ 146 - REST_FPR_TRANSACT(n+1, base) 147 - #define REST_4FPRS_TRANSACT(n, base) REST_2FPRS_TRANSACT(n, base); \ 148 - REST_2FPRS_TRANSACT(n+2, base) 149 - #define REST_8FPRS_TRANSACT(n, base) REST_4FPRS_TRANSACT(n, base); \ 150 - REST_4FPRS_TRANSACT(n+4, base) 151 - #define REST_16FPRS_TRANSACT(n, base) REST_8FPRS_TRANSACT(n, base); \ 152 - REST_8FPRS_TRANSACT(n+8, base) 153 - #define REST_32FPRS_TRANSACT(n, base) REST_16FPRS_TRANSACT(n, base); \ 154 - REST_16FPRS_TRANSACT(n+16, base) 155 - 156 - 157 - #define SAVE_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \ 158 - stvx n,b,base 159 - #define SAVE_2VRS_TRANSACT(n,b,base) SAVE_VR_TRANSACT(n,b,base); \ 160 - SAVE_VR_TRANSACT(n+1,b,base) 161 - #define SAVE_4VRS_TRANSACT(n,b,base) SAVE_2VRS_TRANSACT(n,b,base); \ 162 - SAVE_2VRS_TRANSACT(n+2,b,base) 163 - #define SAVE_8VRS_TRANSACT(n,b,base) SAVE_4VRS_TRANSACT(n,b,base); \ 164 - SAVE_4VRS_TRANSACT(n+4,b,base) 165 - #define SAVE_16VRS_TRANSACT(n,b,base) SAVE_8VRS_TRANSACT(n,b,base); \ 166 - SAVE_8VRS_TRANSACT(n+8,b,base) 167 - #define SAVE_32VRS_TRANSACT(n,b,base) SAVE_16VRS_TRANSACT(n,b,base); \ 168 - SAVE_16VRS_TRANSACT(n+16,b,base) 169 - 170 - #define REST_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \ 171 - lvx n,b,base 172 - #define REST_2VRS_TRANSACT(n,b,base) REST_VR_TRANSACT(n,b,base); \ 173 - REST_VR_TRANSACT(n+1,b,base) 174 - #define REST_4VRS_TRANSACT(n,b,base) REST_2VRS_TRANSACT(n,b,base); \ 175 - REST_2VRS_TRANSACT(n+2,b,base) 176 - #define REST_8VRS_TRANSACT(n,b,base) REST_4VRS_TRANSACT(n,b,base); \ 177 - REST_4VRS_TRANSACT(n+4,b,base) 178 - #define REST_16VRS_TRANSACT(n,b,base) REST_8VRS_TRANSACT(n,b,base); \ 179 - REST_8VRS_TRANSACT(n+8,b,base) 180 - #define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \ 181 - REST_16VRS_TRANSACT(n+16,b,base) 182 - 183 - 184 - #define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \ 185 - STXVD2X(n,R##base,R##b) 186 - #define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \ 187 - SAVE_VSR_TRANSACT(n+1,b,base) 188 - #define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \ 189 - SAVE_2VSRS_TRANSACT(n+2,b,base) 190 - #define SAVE_8VSRS_TRANSACT(n,b,base) SAVE_4VSRS_TRANSACT(n,b,base); \ 191 - SAVE_4VSRS_TRANSACT(n+4,b,base) 192 - #define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base); \ 193 - SAVE_8VSRS_TRANSACT(n+8,b,base) 194 - #define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \ 195 - SAVE_16VSRS_TRANSACT(n+16,b,base) 196 - 197 - #define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \ 198 - LXVD2X(n,R##base,R##b) 199 - #define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \ 200 - REST_VSR_TRANSACT(n+1,b,base) 201 - #define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \ 202 - REST_2VSRS_TRANSACT(n+2,b,base) 203 - #define REST_8VSRS_TRANSACT(n,b,base) REST_4VSRS_TRANSACT(n,b,base); \ 204 - REST_4VSRS_TRANSACT(n+4,b,base) 205 - #define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base); \ 206 - REST_8VSRS_TRANSACT(n+8,b,base) 207 - #define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \ 208 - REST_16VSRS_TRANSACT(n+16,b,base) 209 - 210 127 /* Save the lower 32 VSRs in the thread VSR region */ 211 - #define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b) 128 + #define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X(n,R##base,R##b) 212 129 #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) 213 130 #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) 214 131 #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) 215 132 #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) 216 133 #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) 217 - #define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b) 134 + #define REST_VSR(n,b,base) li b,16*(n); LXVD2X(n,R##base,R##b) 218 135 #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) 219 136 #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) 220 137 #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
+18 -22
arch/powerpc/include/asm/processor.h
··· 144 144 145 145 #define TS_FPROFFSET 0 146 146 #define TS_VSRLOWOFFSET 1 147 - #define TS_FPR(i) fpr[i][TS_FPROFFSET] 148 - #define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET] 147 + #define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET] 148 + #define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET] 149 + 150 + /* FP and VSX 0-31 register set */ 151 + struct thread_fp_state { 152 + u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); 153 + u64 fpscr; /* Floating point status */ 154 + }; 155 + 156 + /* Complete AltiVec register set including VSCR */ 157 + struct thread_vr_state { 158 + vector128 vr[32] __attribute__((aligned(16))); 159 + vector128 vscr __attribute__((aligned(16))); 160 + }; 149 161 150 162 struct thread_struct { 151 163 unsigned long ksp; /* Kernel stack pointer */ ··· 210 198 unsigned long dvc2; 211 199 #endif 212 200 #endif 213 - /* FP and VSX 0-31 register set */ 214 - double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); 215 - struct { 216 - 217 - unsigned int pad; 218 - unsigned int val; /* Floating point status */ 219 - } fpscr; 201 + struct thread_fp_state fp_state; 220 202 int fpexc_mode; /* floating-point exception mode */ 221 203 unsigned int align_ctl; /* alignment handling control */ 222 204 #ifdef CONFIG_PPC64 ··· 228 222 struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ 229 223 unsigned long trap_nr; /* last trap # on this thread */ 230 224 #ifdef CONFIG_ALTIVEC 231 - /* Complete AltiVec register set */ 232 - vector128 vr[32] __attribute__((aligned(16))); 233 - /* AltiVec status */ 234 - vector128 vscr __attribute__((aligned(16))); 225 + struct thread_vr_state vr_state; 235 226 unsigned long vrsave; 236 227 int used_vr; /* set if process has used altivec */ 237 228 #endif /* CONFIG_ALTIVEC */ ··· 265 262 * transact_fpr[] is the new set of transactional values. 266 263 * VRs work the same way. 267 264 */ 268 - double transact_fpr[32][TS_FPRWIDTH]; 269 - struct { 270 - unsigned int pad; 271 - unsigned int val; /* Floating point status */ 272 - } transact_fpscr; 273 - vector128 transact_vr[32] __attribute__((aligned(16))); 274 - vector128 transact_vscr __attribute__((aligned(16))); 265 + struct thread_fp_state transact_fp; 266 + struct thread_vr_state transact_vr; 275 267 unsigned long transact_vrsave; 276 268 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 277 269 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER ··· 320 322 .ksp = INIT_SP, \ 321 323 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 322 324 .fs = KERNEL_DS, \ 323 - .fpr = {{0}}, \ 324 - .fpscr = { .val = 0, }, \ 325 325 .fpexc_mode = 0, \ 326 326 .ppr = INIT_PPR, \ 327 327 }
+1 -1
arch/powerpc/include/asm/sfp-machine.h
··· 125 125 #define FP_EX_DIVZERO (1 << (31 - 5)) 126 126 #define FP_EX_INEXACT (1 << (31 - 6)) 127 127 128 - #define __FPU_FPSCR (current->thread.fpscr.val) 128 + #define __FPU_FPSCR (current->thread.fp_state.fpscr) 129 129 130 130 /* We only actually write to the destination register 131 131 * if exceptions signalled (if any) will not trap.
+3 -3
arch/powerpc/kernel/align.c
··· 660 660 if (reg < 32) 661 661 ptr = (char *) &current->thread.TS_FPR(reg); 662 662 else 663 - ptr = (char *) &current->thread.vr[reg - 32]; 663 + ptr = (char *) &current->thread.vr_state.vr[reg - 32]; 664 664 665 665 lptr = (unsigned long *) ptr; 666 666 ··· 897 897 return -EFAULT; 898 898 } 899 899 } else if (flags & F) { 900 - data.dd = current->thread.TS_FPR(reg); 900 + data.ll = current->thread.TS_FPR(reg); 901 901 if (flags & S) { 902 902 /* Single-precision FP store requires conversion... */ 903 903 #ifdef CONFIG_PPC_FPU ··· 975 975 if (unlikely(ret)) 976 976 return -EFAULT; 977 977 } else if (flags & F) 978 - current->thread.TS_FPR(reg) = data.dd; 978 + current->thread.TS_FPR(reg) = data.ll; 979 979 else 980 980 regs->gpr[reg] = data.ll; 981 981
+8 -17
arch/powerpc/kernel/asm-offsets.c
··· 90 90 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); 91 91 #endif 92 92 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); 93 - DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); 94 - DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); 93 + DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state)); 94 + DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr)); 95 95 #ifdef CONFIG_ALTIVEC 96 - DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); 96 + DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state)); 97 97 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); 98 - DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); 99 98 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); 99 + DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr)); 100 100 #endif /* CONFIG_ALTIVEC */ 101 101 #ifdef CONFIG_VSX 102 - DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr)); 103 102 DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr)); 104 103 #endif /* CONFIG_VSX */ 105 104 #ifdef CONFIG_PPC64 ··· 142 143 DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); 143 144 DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); 144 145 DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); 145 - DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, 146 - transact_vr[0])); 147 - DEFINE(THREAD_TRANSACT_VSCR, offsetof(struct thread_struct, 148 - transact_vscr)); 146 + DEFINE(THREAD_TRANSACT_VRSTATE, offsetof(struct thread_struct, 147 + transact_vr)); 149 148 DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct, 150 149 transact_vrsave)); 151 - DEFINE(THREAD_TRANSACT_FPR0, offsetof(struct thread_struct, 152 - transact_fpr[0])); 153 - DEFINE(THREAD_TRANSACT_FPSCR, offsetof(struct thread_struct, 154 - transact_fpscr)); 155 - #ifdef CONFIG_VSX 156 - DEFINE(THREAD_TRANSACT_VSR0, offsetof(struct thread_struct, 157 - transact_fpr[0])); 158 - #endif 150 + DEFINE(THREAD_TRANSACT_FPSTATE, offsetof(struct thread_struct, 151 + transact_fp)); 159 152 /* Local pt_regs on stack for Transactional Memory funcs. */ 160 153 DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD + 161 154 sizeof(struct pt_regs) + 16);
+13 -46
arch/powerpc/kernel/fpu.S
··· 35 35 2: REST_32VSRS(n,c,base); \ 36 36 3: 37 37 38 - #define __REST_32FPVSRS_TRANSACT(n,c,base) \ 39 - BEGIN_FTR_SECTION \ 40 - b 2f; \ 41 - END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 42 - REST_32FPRS_TRANSACT(n,base); \ 43 - b 3f; \ 44 - 2: REST_32VSRS_TRANSACT(n,c,base); \ 45 - 3: 46 - 47 38 #define __SAVE_32FPVSRS(n,c,base) \ 48 39 BEGIN_FTR_SECTION \ 49 40 b 2f; \ ··· 45 54 3: 46 55 #else 47 56 #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) 48 - #define __REST_32FPVSRS_TRANSACT(n,b,base) REST_32FPRS(n, base) 49 57 #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) 50 58 #endif 51 59 #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) 52 - #define REST_32FPVSRS_TRANSACT(n,c,base) \ 53 - __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base) 54 60 #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) 55 61 56 62 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 57 - /* 58 - * Wrapper to call load_up_fpu from C. 59 - * void do_load_up_fpu(struct pt_regs *regs); 60 - */ 61 - _GLOBAL(do_load_up_fpu) 62 - mflr r0 63 - std r0, 16(r1) 64 - stdu r1, -112(r1) 65 - 66 - subi r6, r3, STACK_FRAME_OVERHEAD 67 - /* load_up_fpu expects r12=MSR, r13=PACA, and returns 68 - * with r12 = new MSR. 69 - */ 70 - ld r12,_MSR(r6) 71 - GET_PACA(r13) 72 - 73 - bl load_up_fpu 74 - std r12,_MSR(r6) 75 - 76 - ld r0, 112+16(r1) 77 - addi r1, r1, 112 78 - mtlr r0 79 - blr 80 - 81 - 82 63 /* void do_load_up_transact_fpu(struct thread_struct *thread) 83 64 * 84 65 * This is similar to load_up_fpu but for the transactional version of the FP ··· 68 105 SYNC 69 106 MTMSRD(r5) 70 107 71 - lfd fr0,THREAD_TRANSACT_FPSCR(r3) 108 + addi r7,r3,THREAD_TRANSACT_FPSTATE 109 + lfd fr0,FPSTATE_FPSCR(r7) 72 110 MTFSF_L(fr0) 73 - REST_32FPVSRS_TRANSACT(0, R4, R3) 111 + REST_32FPVSRS(0, R4, R7) 74 112 75 113 /* FP/VSX off again */ 76 114 MTMSRD(r6) ··· 111 147 beq 1f 112 148 toreal(r4) 113 149 addi r4,r4,THREAD /* want last_task_used_math->thread */ 114 - SAVE_32FPVSRS(0, R5, R4) 150 + addi r8,r4,THREAD_FPSTATE 151 + SAVE_32FPVSRS(0, R5, R8) 115 152 mffs fr0 116 - stfd fr0,THREAD_FPSCR(r4) 153 + stfd fr0,FPSTATE_FPSCR(r8) 117 154 PPC_LL r5,PT_REGS(r4) 118 155 toreal(r5) 119 156 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) ··· 125 160 #endif /* CONFIG_SMP */ 126 161 /* enable use of FP after return */ 127 162 #ifdef CONFIG_PPC32 128 - mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ 163 + mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ 129 164 lwz r4,THREAD_FPEXC_MODE(r5) 130 165 ori r9,r9,MSR_FP /* enable FP for current */ 131 166 or r9,r9,r4 ··· 137 172 or r12,r12,r4 138 173 std r12,_MSR(r1) 139 174 #endif 140 - lfd fr0,THREAD_FPSCR(r5) 175 + addi r7,r5,THREAD_FPSTATE 176 + lfd fr0,FPSTATE_FPSCR(r7) 141 177 MTFSF_L(fr0) 142 - REST_32FPVSRS(0, R4, R5) 178 + REST_32FPVSRS(0, R4, R7) 143 179 #ifndef CONFIG_SMP 144 180 subi r4,r5,THREAD 145 181 fromreal(r4) ··· 174 208 addi r3,r3,THREAD /* want THREAD of task */ 175 209 PPC_LL r5,PT_REGS(r3) 176 210 PPC_LCMPI 0,r5,0 177 - SAVE_32FPVSRS(0, R4 ,R3) 211 + addi r6,r3,THREAD_FPSTATE 212 + SAVE_32FPVSRS(0, R4, R6) 178 213 mffs fr0 179 - stfd fr0,THREAD_FPSCR(r3) 214 + stfd fr0,FPSTATE_FPSCR(r6) 180 215 beq 1f 181 216 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 182 217 li r3,MSR_FP|MSR_FE0|MSR_FE1
+3 -5
arch/powerpc/kernel/process.c
··· 1113 1113 #ifdef CONFIG_VSX 1114 1114 current->thread.used_vsr = 0; 1115 1115 #endif 1116 - memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 1117 - current->thread.fpscr.val = 0; 1116 + memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); 1118 1117 #ifdef CONFIG_ALTIVEC 1119 - memset(current->thread.vr, 0, sizeof(current->thread.vr)); 1120 - memset(&current->thread.vscr, 0, sizeof(current->thread.vscr)); 1121 - current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ 1118 + memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state)); 1119 + current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ 1122 1120 current->thread.vrsave = 0; 1123 1121 current->thread.used_vr = 0; 1124 1122 #endif /* CONFIG_ALTIVEC */
+25 -24
arch/powerpc/kernel/ptrace.c
··· 362 362 void *kbuf, void __user *ubuf) 363 363 { 364 364 #ifdef CONFIG_VSX 365 - double buf[33]; 365 + u64 buf[33]; 366 366 int i; 367 367 #endif 368 368 flush_fp_to_thread(target); ··· 371 371 /* copy to local buffer then write that out */ 372 372 for (i = 0; i < 32 ; i++) 373 373 buf[i] = target->thread.TS_FPR(i); 374 - memcpy(&buf[32], &target->thread.fpscr, sizeof(double)); 374 + buf[32] = target->thread.fp_state.fpscr; 375 375 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 376 376 377 377 #else 378 - BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != 379 - offsetof(struct thread_struct, TS_FPR(32))); 378 + BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != 379 + offsetof(struct thread_fp_state, fpr[32][0])); 380 380 381 381 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 382 - &target->thread.fpr, 0, -1); 382 + &target->thread.fp_state, 0, -1); 383 383 #endif 384 384 } 385 385 ··· 388 388 const void *kbuf, const void __user *ubuf) 389 389 { 390 390 #ifdef CONFIG_VSX 391 - double buf[33]; 391 + u64 buf[33]; 392 392 int i; 393 393 #endif 394 394 flush_fp_to_thread(target); ··· 400 400 return i; 401 401 for (i = 0; i < 32 ; i++) 402 402 target->thread.TS_FPR(i) = buf[i]; 403 - memcpy(&target->thread.fpscr, &buf[32], sizeof(double)); 403 + target->thread.fp_state.fpscr = buf[32]; 404 404 return 0; 405 405 #else 406 - BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != 407 - offsetof(struct thread_struct, TS_FPR(32))); 406 + BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != 407 + offsetof(struct thread_fp_state, fpr[32][0])); 408 408 409 409 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 410 - &target->thread.fpr, 0, -1); 410 + &target->thread.fp_state, 0, -1); 411 411 #endif 412 412 } 413 413 ··· 440 440 441 441 flush_altivec_to_thread(target); 442 442 443 - BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != 444 - offsetof(struct thread_struct, vr[32])); 443 + BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != 444 + offsetof(struct thread_vr_state, vr[32])); 445 445 446 446 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 447 - &target->thread.vr, 0, 447 + &target->thread.vr_state, 0, 448 448 33 * sizeof(vector128)); 449 449 if (!ret) { 450 450 /* ··· 471 471 472 472 flush_altivec_to_thread(target); 473 473 474 - BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != 475 - offsetof(struct thread_struct, vr[32])); 474 + BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != 475 + offsetof(struct thread_vr_state, vr[32])); 476 476 477 477 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 478 - &target->thread.vr, 0, 33 * sizeof(vector128)); 478 + &target->thread.vr_state, 0, 479 + 33 * sizeof(vector128)); 479 480 if (!ret && count > 0) { 480 481 /* 481 482 * We use only the first word of vrsave. ··· 515 514 unsigned int pos, unsigned int count, 516 515 void *kbuf, void __user *ubuf) 517 516 { 518 - double buf[32]; 517 + u64 buf[32]; 519 518 int ret, i; 520 519 521 520 flush_vsx_to_thread(target); 522 521 523 522 for (i = 0; i < 32 ; i++) 524 - buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET]; 523 + buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; 525 524 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 526 525 buf, 0, 32 * sizeof(double)); 527 526 ··· 532 531 unsigned int pos, unsigned int count, 533 532 const void *kbuf, const void __user *ubuf) 534 533 { 535 - double buf[32]; 534 + u64 buf[32]; 536 535 int ret,i; 537 536 538 537 flush_vsx_to_thread(target); ··· 540 539 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 541 540 buf, 0, 32 * sizeof(double)); 542 541 for (i = 0; i < 32 ; i++) 543 - target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 542 + target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 544 543 545 544 546 545 return ret; ··· 1555 1554 1556 1555 flush_fp_to_thread(child); 1557 1556 if (fpidx < (PT_FPSCR - PT_FPR0)) 1558 - tmp = ((unsigned long *)child->thread.fpr) 1557 + tmp = ((unsigned long *)child->thread.fp_state.fpr) 1559 1558 [fpidx * TS_FPRWIDTH]; 1560 1559 else 1561 - tmp = child->thread.fpscr.val; 1560 + tmp = child->thread.fp_state.fpscr; 1562 1561 } 1563 1562 ret = put_user(tmp, datalp); 1564 1563 break; ··· 1588 1587 1589 1588 flush_fp_to_thread(child); 1590 1589 if (fpidx < (PT_FPSCR - PT_FPR0)) 1591 - ((unsigned long *)child->thread.fpr) 1590 + ((unsigned long *)child->thread.fp_state.fpr) 1592 1591 [fpidx * TS_FPRWIDTH] = data; 1593 1592 else 1594 - child->thread.fpscr.val = data; 1593 + child->thread.fp_state.fpscr = data; 1595 1594 ret = 0; 1596 1595 } 1597 1596 break;
+4 -7
arch/powerpc/kernel/ptrace32.c
··· 43 43 #define FPRNUMBER(i) (((i) - PT_FPR0) >> 1) 44 44 #define FPRHALF(i) (((i) - PT_FPR0) & 1) 45 45 #define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i) 46 - #define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0)) 47 46 48 47 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 49 48 compat_ulong_t caddr, compat_ulong_t cdata) ··· 104 105 * to be an array of unsigned int (32 bits) - the 105 106 * index passed in is based on this assumption. 106 107 */ 107 - tmp = ((unsigned int *)child->thread.fpr) 108 + tmp = ((unsigned int *)child->thread.fp_state.fpr) 108 109 [FPRINDEX(index)]; 109 110 } 110 111 ret = put_user((unsigned int)tmp, (u32 __user *)data); ··· 146 147 if (numReg >= PT_FPR0) { 147 148 flush_fp_to_thread(child); 148 149 /* get 64 bit FPR */ 149 - tmp = ((u64 *)child->thread.fpr) 150 - [FPRINDEX_3264(numReg)]; 150 + tmp = child->thread.fp_state.fpr[numReg - PT_FPR0][0]; 151 151 } else { /* register within PT_REGS struct */ 152 152 unsigned long tmp2; 153 153 ret = ptrace_get_reg(child, numReg, &tmp2); ··· 205 207 * to be an array of unsigned int (32 bits) - the 206 208 * index passed in is based on this assumption. 207 209 */ 208 - ((unsigned int *)child->thread.fpr) 210 + ((unsigned int *)child->thread.fp_state.fpr) 209 211 [FPRINDEX(index)] = data; 210 212 ret = 0; 211 213 } ··· 249 251 u64 *tmp; 250 252 flush_fp_to_thread(child); 251 253 /* get 64 bit FPR ... */ 252 - tmp = &(((u64 *)child->thread.fpr) 253 - [FPRINDEX_3264(numReg)]); 254 + tmp = &child->thread.fp_state.fpr[numReg - PT_FPR0][0]; 254 255 /* ... write the 32 bit part we want */ 255 256 ((u32 *)tmp)[index % 2] = data; 256 257 ret = 0;
+37 -35
arch/powerpc/kernel/signal_32.c
··· 265 265 unsigned long copy_fpr_to_user(void __user *to, 266 266 struct task_struct *task) 267 267 { 268 - double buf[ELF_NFPREG]; 268 + u64 buf[ELF_NFPREG]; 269 269 int i; 270 270 271 271 /* save FPR copy to local buffer then write to the thread_struct */ 272 272 for (i = 0; i < (ELF_NFPREG - 1) ; i++) 273 273 buf[i] = task->thread.TS_FPR(i); 274 - memcpy(&buf[i], &task->thread.fpscr, sizeof(double)); 274 + buf[i] = task->thread.fp_state.fpscr; 275 275 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); 276 276 } 277 277 278 278 unsigned long copy_fpr_from_user(struct task_struct *task, 279 279 void __user *from) 280 280 { 281 - double buf[ELF_NFPREG]; 281 + u64 buf[ELF_NFPREG]; 282 282 int i; 283 283 284 284 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) 285 285 return 1; 286 286 for (i = 0; i < (ELF_NFPREG - 1) ; i++) 287 287 task->thread.TS_FPR(i) = buf[i]; 288 - memcpy(&task->thread.fpscr, &buf[i], sizeof(double)); 288 + task->thread.fp_state.fpscr = buf[i]; 289 289 290 290 return 0; 291 291 } ··· 293 293 unsigned long copy_vsx_to_user(void __user *to, 294 294 struct task_struct *task) 295 295 { 296 - double buf[ELF_NVSRHALFREG]; 296 + u64 buf[ELF_NVSRHALFREG]; 297 297 int i; 298 298 299 299 /* save FPR copy to local buffer then write to the thread_struct */ 300 300 for (i = 0; i < ELF_NVSRHALFREG; i++) 301 - buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET]; 301 + buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; 302 302 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); 303 303 } 304 304 305 305 unsigned long copy_vsx_from_user(struct task_struct *task, 306 306 void __user *from) 307 307 { 308 - double buf[ELF_NVSRHALFREG]; 308 + u64 buf[ELF_NVSRHALFREG]; 309 309 int i; 310 310 311 311 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) 312 312 return 1; 313 313 for (i = 0; i < ELF_NVSRHALFREG ; i++) 314 - task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 314 + task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 315 315 return 0; 316 316 } 317 317 ··· 319 319 unsigned long copy_transact_fpr_to_user(void __user *to, 320 320 struct task_struct *task) 321 321 { 322 - double buf[ELF_NFPREG]; 322 + u64 buf[ELF_NFPREG]; 323 323 int i; 324 324 325 325 /* save FPR copy to local buffer then write to the thread_struct */ 326 326 for (i = 0; i < (ELF_NFPREG - 1) ; i++) 327 327 buf[i] = task->thread.TS_TRANS_FPR(i); 328 - memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double)); 328 + buf[i] = task->thread.transact_fp.fpscr; 329 329 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); 330 330 } 331 331 332 332 unsigned long copy_transact_fpr_from_user(struct task_struct *task, 333 333 void __user *from) 334 334 { 335 - double buf[ELF_NFPREG]; 335 + u64 buf[ELF_NFPREG]; 336 336 int i; 337 337 338 338 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) 339 339 return 1; 340 340 for (i = 0; i < (ELF_NFPREG - 1) ; i++) 341 341 task->thread.TS_TRANS_FPR(i) = buf[i]; 342 - memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double)); 342 + task->thread.transact_fp.fpscr = buf[i]; 343 343 344 344 return 0; 345 345 } ··· 347 347 unsigned long copy_transact_vsx_to_user(void __user *to, 348 348 struct task_struct *task) 349 349 { 350 - double buf[ELF_NVSRHALFREG]; 350 + u64 buf[ELF_NVSRHALFREG]; 351 351 int i; 352 352 353 353 /* save FPR copy to local buffer then write to the thread_struct */ 354 354 for (i = 0; i < ELF_NVSRHALFREG; i++) 355 - buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET]; 355 + buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET]; 356 356 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); 357 357 } 358 358 359 359 unsigned long copy_transact_vsx_from_user(struct task_struct *task, 360 360 void __user *from) 361 361 { 362 - double buf[ELF_NVSRHALFREG]; 362 + u64 buf[ELF_NVSRHALFREG]; 363 363 int i; 364 364 365 365 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) 366 366 return 1; 367 367 for (i = 0; i < ELF_NVSRHALFREG ; i++) 368 - task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i]; 368 + task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 369 369 return 0; 370 370 } 371 371 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ ··· 373 373 inline unsigned long copy_fpr_to_user(void __user *to, 374 374 struct task_struct *task) 375 375 { 376 - return __copy_to_user(to, task->thread.fpr, 376 + return __copy_to_user(to, task->thread.fp_state.fpr, 377 377 ELF_NFPREG * sizeof(double)); 378 378 } 379 379 380 380 inline unsigned long copy_fpr_from_user(struct task_struct *task, 381 381 void __user *from) 382 382 { 383 - return __copy_from_user(task->thread.fpr, from, 383 + return __copy_from_user(task->thread.fp_state.fpr, from, 384 384 ELF_NFPREG * sizeof(double)); 385 385 } 386 386 ··· 388 388 inline unsigned long copy_transact_fpr_to_user(void __user *to, 389 389 struct task_struct *task) 390 390 { 391 - return __copy_to_user(to, task->thread.transact_fpr, 391 + return __copy_to_user(to, task->thread.transact_fp.fpr, 392 392 ELF_NFPREG * sizeof(double)); 393 393 } 394 394 395 395 inline unsigned long copy_transact_fpr_from_user(struct task_struct *task, 396 396 void __user *from) 397 397 { 398 - return __copy_from_user(task->thread.transact_fpr, from, 398 + return __copy_from_user(task->thread.transact_fp.fpr, from, 399 399 ELF_NFPREG * sizeof(double)); 400 400 } 401 401 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ ··· 423 423 /* save altivec registers */ 424 424 if (current->thread.used_vr) { 425 425 flush_altivec_to_thread(current); 426 - if (__copy_to_user(&frame->mc_vregs, current->thread.vr, 426 + if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state, 427 427 ELF_NVRREG * sizeof(vector128))) 428 428 return 1; 429 429 /* set MSR_VEC in the saved MSR value to indicate that ··· 534 534 /* save altivec registers */ 535 535 if (current->thread.used_vr) { 536 536 flush_altivec_to_thread(current); 537 - if (__copy_to_user(&frame->mc_vregs, current->thread.vr, 537 + if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state, 538 538 ELF_NVRREG * sizeof(vector128))) 539 539 return 1; 540 540 if (msr & MSR_VEC) { 541 541 if (__copy_to_user(&tm_frame->mc_vregs, 542 - current->thread.transact_vr, 542 + &current->thread.transact_vr, 543 543 ELF_NVRREG * sizeof(vector128))) 544 544 return 1; 545 545 } else { 546 546 if (__copy_to_user(&tm_frame->mc_vregs, 547 - current->thread.vr, 547 + &current->thread.vr_state, 548 548 ELF_NVRREG * sizeof(vector128))) 549 549 return 1; 550 550 } ··· 692 692 regs->msr &= ~MSR_VEC; 693 693 if (msr & MSR_VEC) { 694 694 /* restore altivec registers from the stack */ 695 - if (__copy_from_user(current->thread.vr, &sr->mc_vregs, 695 + if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs, 696 696 sizeof(sr->mc_vregs))) 697 697 return 1; 698 698 } else if (current->thread.used_vr) 699 - memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); 699 + memset(&current->thread.vr_state, 0, 700 + ELF_NVRREG * sizeof(vector128)); 700 701 701 702 /* Always get VRSAVE back */ 702 703 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) ··· 723 722 return 1; 724 723 } else if (current->thread.used_vsr) 725 724 for (i = 0; i < 32 ; i++) 726 - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 725 + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; 727 726 #endif /* CONFIG_VSX */ 728 727 /* 729 728 * force the process to reload the FP registers from ··· 799 798 regs->msr &= ~MSR_VEC; 800 799 if (msr & MSR_VEC) { 801 800 /* restore altivec registers from the stack */ 802 - if (__copy_from_user(current->thread.vr, &sr->mc_vregs, 801 + if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs, 803 802 sizeof(sr->mc_vregs)) || 804 - __copy_from_user(current->thread.transact_vr, 803 + __copy_from_user(&current->thread.transact_vr, 805 804 &tm_sr->mc_vregs, 806 805 sizeof(sr->mc_vregs))) 807 806 return 1; 808 807 } else if (current->thread.used_vr) { 809 - memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); 810 - memset(current->thread.transact_vr, 0, 808 + memset(&current->thread.vr_state, 0, 809 + ELF_NVRREG * sizeof(vector128)); 810 + memset(&current->thread.transact_vr, 0, 811 811 ELF_NVRREG * sizeof(vector128)); 812 812 } 813 813 ··· 840 838 return 1; 841 839 } else if (current->thread.used_vsr) 842 840 for (i = 0; i < 32 ; i++) { 843 - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 844 - current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; 841 + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; 842 + current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; 845 843 } 846 844 #endif /* CONFIG_VSX */ 847 845 ··· 1032 1030 if (__put_user(0, &rt_sf->uc.uc_link)) 1033 1031 goto badframe; 1034 1032 1035 - current->thread.fpscr.val = 0; /* turn off all fp exceptions */ 1033 + current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ 1036 1034 1037 1035 /* create a stack frame for the caller of the handler */ 1038 1036 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); ··· 1464 1462 1465 1463 regs->link = tramp; 1466 1464 1467 - current->thread.fpscr.val = 0; /* turn off all fp exceptions */ 1465 + current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ 1468 1466 1469 1467 /* create a stack frame for the caller of the handler */ 1470 1468 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
+15 -14
arch/powerpc/kernel/signal_64.c
··· 103 103 if (current->thread.used_vr) { 104 104 flush_altivec_to_thread(current); 105 105 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ 106 - err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128)); 106 + err |= __copy_to_user(v_regs, &current->thread.vr_state, 107 + 33 * sizeof(vector128)); 107 108 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) 108 109 * contains valid data. 109 110 */ ··· 196 195 if (current->thread.used_vr) { 197 196 flush_altivec_to_thread(current); 198 197 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ 199 - err |= __copy_to_user(v_regs, current->thread.vr, 198 + err |= __copy_to_user(v_regs, &current->thread.vr_state, 200 199 33 * sizeof(vector128)); 201 200 /* If VEC was enabled there are transactional VRs valid too, 202 201 * else they're a copy of the checkpointed VRs. 203 202 */ 204 203 if (msr & MSR_VEC) 205 204 err |= __copy_to_user(tm_v_regs, 206 - current->thread.transact_vr, 205 + &current->thread.transact_vr, 207 206 33 * sizeof(vector128)); 208 207 else 209 208 err |= __copy_to_user(tm_v_regs, 210 - current->thread.vr, 209 + &current->thread.vr_state, 211 210 33 * sizeof(vector128)); 212 211 213 212 /* set MSR_VEC in the MSR value in the frame to indicate ··· 350 349 return -EFAULT; 351 350 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 352 351 if (v_regs != NULL && (msr & MSR_VEC) != 0) 353 - err |= __copy_from_user(current->thread.vr, v_regs, 352 + err |= __copy_from_user(&current->thread.vr_state, v_regs, 354 353 33 * sizeof(vector128)); 355 354 else if (current->thread.used_vr) 356 - memset(current->thread.vr, 0, 33 * sizeof(vector128)); 355 + memset(&current->thread.vr_state, 0, 33 * sizeof(vector128)); 357 356 /* Always get VRSAVE back */ 358 357 if (v_regs != NULL) 359 358 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); ··· 375 374 err |= copy_vsx_from_user(current, v_regs); 376 375 else 377 376 for (i = 0; i < 32 ; i++) 378 - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 377 + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; 379 378 #endif 380 379 return err; 381 380 } ··· 469 468 return -EFAULT; 470 469 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 471 470 if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { 472 - err |= __copy_from_user(current->thread.vr, v_regs, 471 + err |= __copy_from_user(&current->thread.vr_state, v_regs, 473 472 33 * sizeof(vector128)); 474 - err |= __copy_from_user(current->thread.transact_vr, tm_v_regs, 473 + err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs, 475 474 33 * sizeof(vector128)); 476 475 } 477 476 else if (current->thread.used_vr) { 478 - memset(current->thread.vr, 0, 33 * sizeof(vector128)); 479 - memset(current->thread.transact_vr, 0, 33 * sizeof(vector128)); 477 + memset(&current->thread.vr_state, 0, 33 * sizeof(vector128)); 478 + memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128)); 480 479 } 481 480 /* Always get VRSAVE back */ 482 481 if (v_regs != NULL && tm_v_regs != NULL) { ··· 508 507 err |= copy_transact_vsx_from_user(current, tm_v_regs); 509 508 } else { 510 509 for (i = 0; i < 32 ; i++) { 511 - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 512 - current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; 510 + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; 511 + current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; 513 512 } 514 513 } 515 514 #endif ··· 748 747 goto badframe; 749 748 750 749 /* Make sure signal handler doesn't get spurious FP exceptions */ 751 - current->thread.fpscr.val = 0; 750 + current->thread.fp_state.fpscr = 0; 752 751 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 753 752 /* Remove TM bits from thread's MSR. The MSR in the sigcontext 754 753 * just indicates to userland that we were doing a transaction, but we
+22 -19
arch/powerpc/kernel/tm.S
··· 12 12 #include <asm/reg.h> 13 13 14 14 #ifdef CONFIG_VSX 15 - /* See fpu.S, this is very similar but to save/restore checkpointed FPRs/VSRs */ 16 - #define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \ 15 + /* See fpu.S, this is borrowed from there */ 16 + #define __SAVE_32FPRS_VSRS(n,c,base) \ 17 17 BEGIN_FTR_SECTION \ 18 18 b 2f; \ 19 19 END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 20 - SAVE_32FPRS_TRANSACT(n,base); \ 20 + SAVE_32FPRS(n,base); \ 21 21 b 3f; \ 22 - 2: SAVE_32VSRS_TRANSACT(n,c,base); \ 22 + 2: SAVE_32VSRS(n,c,base); \ 23 23 3: 24 - /* ...and this is just plain borrowed from there. */ 25 24 #define __REST_32FPRS_VSRS(n,c,base) \ 26 25 BEGIN_FTR_SECTION \ 27 26 b 2f; \ ··· 30 31 2: REST_32VSRS(n,c,base); \ 31 32 3: 32 33 #else 33 - #define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) SAVE_32FPRS_TRANSACT(n, base) 34 - #define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base) 34 + #define __SAVE_32FPRS_VSRS(n,c,base) SAVE_32FPRS(n, base) 35 + #define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base) 35 36 #endif 36 - #define SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \ 37 - __SAVE_32FPRS_VSRS_TRANSACT(n,__REG_##c,__REG_##base) 37 + #define SAVE_32FPRS_VSRS(n,c,base) \ 38 + __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base) 38 39 #define REST_32FPRS_VSRS(n,c,base) \ 39 40 __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base) 40 41 ··· 156 157 andis. r0, r4, MSR_VEC@h 157 158 beq dont_backup_vec 158 159 159 - SAVE_32VRS_TRANSACT(0, r6, r3) /* r6 scratch, r3 thread */ 160 + addi r7, r3, THREAD_TRANSACT_VRSTATE 161 + SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */ 160 162 mfvscr vr0 161 - li r6, THREAD_TRANSACT_VSCR 162 - stvx vr0, r3, r6 163 + li r6, VRSTATE_VSCR 164 + stvx vr0, r7, r6 163 165 dont_backup_vec: 164 166 mfspr r0, SPRN_VRSAVE 165 167 std r0, THREAD_TRANSACT_VRSAVE(r3) ··· 168 168 andi. r0, r4, MSR_FP 169 169 beq dont_backup_fp 170 170 171 - SAVE_32FPRS_VSRS_TRANSACT(0, R6, R3) /* r6 scratch, r3 thread */ 171 + addi r7, r3, THREAD_TRANSACT_FPSTATE 172 + SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */ 172 173 173 174 mffs fr0 174 - stfd fr0,THREAD_TRANSACT_FPSCR(r3) 175 + stfd fr0,FPSTATE_FPSCR(r7) 175 176 176 177 dont_backup_fp: 177 178 /* The moment we treclaim, ALL of our GPRs will switch ··· 359 358 andis. r0, r4, MSR_VEC@h 360 359 beq dont_restore_vec 361 360 362 - li r5, THREAD_VSCR 363 - lvx vr0, r3, r5 361 + addi r8, r3, THREAD_VRSTATE 362 + li r5, VRSTATE_VSCR 363 + lvx vr0, r8, r5 364 364 mtvscr vr0 365 - REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ 365 + REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */ 366 366 dont_restore_vec: 367 367 ld r5, THREAD_VRSAVE(r3) 368 368 mtspr SPRN_VRSAVE, r5 ··· 372 370 andi. r0, r4, MSR_FP 373 371 beq dont_restore_fp 374 372 375 - lfd fr0, THREAD_FPSCR(r3) 373 + addi r8, r3, THREAD_FPSTATE 374 + lfd fr0, FPSTATE_FPSCR(r8) 376 375 MTFSF_L(fr0) 377 - REST_32FPRS_VSRS(0, R4, R3) 376 + REST_32FPRS_VSRS(0, R4, R8) 378 377 379 378 dont_restore_fp: 380 379 mtmsr r6 /* FP/Vec off again! */
+3 -7
arch/powerpc/kernel/traps.c
··· 816 816 817 817 flush_fp_to_thread(current); 818 818 819 - code = __parse_fpscr(current->thread.fpscr.val); 819 + code = __parse_fpscr(current->thread.fp_state.fpscr); 820 820 821 821 _exception(SIGFPE, regs, code, regs->nip); 822 822 } ··· 1069 1069 return 0; 1070 1070 case 1: { 1071 1071 int code = 0; 1072 - code = __parse_fpscr(current->thread.fpscr.val); 1072 + code = __parse_fpscr(current->thread.fp_state.fpscr); 1073 1073 _exception(SIGFPE, regs, code, regs->nip); 1074 1074 return 0; 1075 1075 } ··· 1371 1371 1372 1372 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1373 1373 1374 - extern void do_load_up_fpu(struct pt_regs *regs); 1375 - 1376 1374 void fp_unavailable_tm(struct pt_regs *regs) 1377 1375 { 1378 1376 /* Note: This does not handle any kind of FP laziness. */ ··· 1401 1403 } 1402 1404 1403 1405 #ifdef CONFIG_ALTIVEC 1404 - extern void do_load_up_altivec(struct pt_regs *regs); 1405 - 1406 1406 void altivec_unavailable_tm(struct pt_regs *regs) 1407 1407 { 1408 1408 /* See the comments in fp_unavailable_tm(). This function operates ··· 1630 1634 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1631 1635 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1632 1636 "in %s at %lx\n", current->comm, regs->nip); 1633 - current->thread.vscr.u[3] |= 0x10000; 1637 + current->thread.vr_state.vscr.u[3] |= 0x10000; 1634 1638 } 1635 1639 } 1636 1640 #endif /* CONFIG_ALTIVEC */
+3 -3
arch/powerpc/kernel/vecemu.c
··· 271 271 vb = (instr >> 11) & 0x1f; 272 272 vc = (instr >> 6) & 0x1f; 273 273 274 - vrs = current->thread.vr; 274 + vrs = current->thread.vr_state.vr; 275 275 switch (instr & 0x3f) { 276 276 case 10: 277 277 switch (vc) { ··· 320 320 case 14: /* vctuxs */ 321 321 for (i = 0; i < 4; ++i) 322 322 vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va, 323 - &current->thread.vscr.u[3]); 323 + &current->thread.vr_state.vscr.u[3]); 324 324 break; 325 325 case 15: /* vctsxs */ 326 326 for (i = 0; i < 4; ++i) 327 327 vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va, 328 - &current->thread.vscr.u[3]); 328 + &current->thread.vr_state.vscr.u[3]); 329 329 break; 330 330 default: 331 331 return -EINVAL;
+15 -35
arch/powerpc/kernel/vector.S
··· 8 8 #include <asm/ptrace.h> 9 9 10 10 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 11 - /* 12 - * Wrapper to call load_up_altivec from C. 13 - * void do_load_up_altivec(struct pt_regs *regs); 14 - */ 15 - _GLOBAL(do_load_up_altivec) 16 - mflr r0 17 - std r0, 16(r1) 18 - stdu r1, -112(r1) 19 - 20 - subi r6, r3, STACK_FRAME_OVERHEAD 21 - /* load_up_altivec expects r12=MSR, r13=PACA, and returns 22 - * with r12 = new MSR. 23 - */ 24 - ld r12,_MSR(r6) 25 - GET_PACA(r13) 26 - bl load_up_altivec 27 - std r12,_MSR(r6) 28 - 29 - ld r0, 112+16(r1) 30 - addi r1, r1, 112 31 - mtlr r0 32 - blr 33 - 34 11 /* void do_load_up_transact_altivec(struct thread_struct *thread) 35 12 * 36 13 * This is similar to load_up_altivec but for the transactional version of the ··· 23 46 li r4,1 24 47 stw r4,THREAD_USED_VR(r3) 25 48 26 - li r10,THREAD_TRANSACT_VSCR 49 + li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR 27 50 lvx vr0,r10,r3 28 51 mtvscr vr0 29 - REST_32VRS_TRANSACT(0,r4,r3) 52 + addi r10,r3,THREAD_TRANSACT_VRSTATE 53 + REST_32VRS(0,r4,r10) 30 54 31 55 /* Disable VEC again. */ 32 56 MTMSRD(r6) ··· 37 59 #endif 38 60 39 61 /* 40 - * load_up_altivec(unused, unused, tsk) 41 62 * Disable VMX for the task which had it previously, 42 63 * and save its vector registers in its thread_struct. 43 64 * Enables the VMX for use in the kernel on return. ··· 67 90 /* Save VMX state to last_task_used_altivec's THREAD struct */ 68 91 toreal(r4) 69 92 addi r4,r4,THREAD 70 - SAVE_32VRS(0,r5,r4) 93 + addi r7,r4,THREAD_VRSTATE 94 + SAVE_32VRS(0,r5,r7) 71 95 mfvscr vr0 72 - li r10,THREAD_VSCR 73 - stvx vr0,r10,r4 96 + li r10,VRSTATE_VSCR 97 + stvx vr0,r10,r7 74 98 /* Disable VMX for last_task_used_altivec */ 75 99 PPC_LL r5,PT_REGS(r4) 76 100 toreal(r5) ··· 103 125 oris r12,r12,MSR_VEC@h 104 126 std r12,_MSR(r1) 105 127 #endif 128 + addi r7,r5,THREAD_VRSTATE 106 129 li r4,1 107 - li r10,THREAD_VSCR 130 + li r10,VRSTATE_VSCR 108 131 stw r4,THREAD_USED_VR(r5) 109 - lvx vr0,r10,r5 132 + lvx vr0,r10,r7 110 133 mtvscr vr0 111 - REST_32VRS(0,r4,r5) 134 + REST_32VRS(0,r4,r7) 112 135 #ifndef CONFIG_SMP 113 136 /* Update last_task_used_altivec to 'current' */ 114 137 subi r4,r5,THREAD /* Back to 'current' */ ··· 144 165 PPC_LCMPI 0,r3,0 145 166 beqlr /* if no previous owner, done */ 146 167 addi r3,r3,THREAD /* want THREAD of task */ 168 + addi r7,r3,THREAD_VRSTATE 147 169 PPC_LL r5,PT_REGS(r3) 148 170 PPC_LCMPI 0,r5,0 149 - SAVE_32VRS(0,r4,r3) 171 + SAVE_32VRS(0,r4,r7) 150 172 mfvscr vr0 151 - li r4,THREAD_VSCR 152 - stvx vr0,r4,r3 173 + li r4,VRSTATE_VSCR 174 + stvx vr0,r4,r7 153 175 beq 1f 154 176 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 155 177 #ifdef CONFIG_VSX
+15 -21
arch/powerpc/kvm/book3s_pr.c
··· 444 444 #ifdef CONFIG_VSX 445 445 u64 *vcpu_vsx = vcpu->arch.vsr; 446 446 #endif 447 - u64 *thread_fpr = (u64*)t->fpr; 447 + u64 *thread_fpr = &t->fp_state.fpr[0][0]; 448 448 int i; 449 449 450 450 /* ··· 466 466 /* 467 467 * Note that on CPUs with VSX, giveup_fpu stores 468 468 * both the traditional FP registers and the added VSX 469 - * registers into thread.fpr[]. 469 + * registers into thread.fp_state.fpr[]. 470 470 */ 471 471 if (current->thread.regs->msr & MSR_FP) 472 472 giveup_fpu(current); 473 473 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 474 474 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; 475 475 476 - vcpu->arch.fpscr = t->fpscr.val; 476 + vcpu->arch.fpscr = t->fp_state.fpscr; 477 477 478 478 #ifdef CONFIG_VSX 479 479 if (cpu_has_feature(CPU_FTR_VSX)) ··· 486 486 if (msr & MSR_VEC) { 487 487 if (current->thread.regs->msr & MSR_VEC) 488 488 giveup_altivec(current); 489 - memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); 490 - vcpu->arch.vscr = t->vscr; 489 + memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr)); 490 + vcpu->arch.vscr = t->vr_state.vscr; 491 491 } 492 492 #endif 493 493 ··· 539 539 #ifdef CONFIG_VSX 540 540 u64 *vcpu_vsx = vcpu->arch.vsr; 541 541 #endif 542 - u64 *thread_fpr = (u64*)t->fpr; 542 + u64 *thread_fpr = &t->fp_state.fpr[0][0]; 543 543 int i; 544 544 545 545 /* When we have paired singles, we emulate in software */ ··· 584 584 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) 585 585 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; 586 586 #endif 587 - t->fpscr.val = vcpu->arch.fpscr; 587 + t->fp_state.fpscr = vcpu->arch.fpscr; 588 588 t->fpexc_mode = 0; 589 589 kvmppc_load_up_fpu(); 590 590 } 591 591 592 592 if (msr & MSR_VEC) { 593 593 #ifdef CONFIG_ALTIVEC 594 - memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); 595 - t->vscr = vcpu->arch.vscr; 594 + memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); 595 + t->vr_state.vscr = vcpu->arch.vscr; 596 596 t->vrsave = -1; 597 597 kvmppc_load_up_altivec(); 598 598 #endif ··· 1116 1116 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1117 1117 { 1118 1118 int ret; 1119 - double fpr[32][TS_FPRWIDTH]; 1120 - unsigned int fpscr; 1119 + struct thread_fp_state fp; 1121 1120 int fpexc_mode; 1122 1121 #ifdef CONFIG_ALTIVEC 1123 - vector128 vr[32]; 1124 - vector128 vscr; 1122 + struct thread_vr_state vr; 1125 1123 unsigned long uninitialized_var(vrsave); 1126 1124 int used_vr; 1127 1125 #endif ··· 1151 1153 /* Save FPU state in stack */ 1152 1154 if (current->thread.regs->msr & MSR_FP) 1153 1155 giveup_fpu(current); 1154 - memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); 1155 - fpscr = current->thread.fpscr.val; 1156 + fp = current->thread.fp_state; 1156 1157 fpexc_mode = current->thread.fpexc_mode; 1157 1158 1158 1159 #ifdef CONFIG_ALTIVEC ··· 1160 1163 if (used_vr) { 1161 1164 if (current->thread.regs->msr & MSR_VEC) 1162 1165 giveup_altivec(current); 1163 - memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); 1164 - vscr = current->thread.vscr; 1166 + vr = current->thread.vr_state; 1165 1167 vrsave = current->thread.vrsave; 1166 1168 } 1167 1169 #endif ··· 1192 1196 current->thread.regs->msr = ext_msr; 1193 1197 1194 1198 /* Restore FPU/VSX state from stack */ 1195 - memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); 1196 - current->thread.fpscr.val = fpscr; 1199 + current->thread.fp_state = fp; 1197 1200 current->thread.fpexc_mode = fpexc_mode; 1198 1201 1199 1202 #ifdef CONFIG_ALTIVEC 1200 1203 /* Restore Altivec state from stack */ 1201 1204 if (used_vr && current->thread.used_vr) { 1202 - memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); 1203 - current->thread.vscr = vscr; 1205 + current->thread.vr_state = vr; 1204 1206 current->thread.vrsave = vrsave; 1205 1207 } 1206 1208 current->thread.used_vr = used_vr;
+9 -10
arch/powerpc/kvm/booke.c
··· 656 656 { 657 657 int ret, s; 658 658 #ifdef CONFIG_PPC_FPU 659 - unsigned int fpscr; 659 + struct thread_fp_state fp; 660 660 int fpexc_mode; 661 - u64 fpr[32]; 662 661 #endif 663 662 664 663 if (!vcpu->arch.sane) { ··· 676 677 #ifdef CONFIG_PPC_FPU 677 678 /* Save userspace FPU state in stack */ 678 679 enable_kernel_fp(); 679 - memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); 680 - fpscr = current->thread.fpscr.val; 680 + fp = current->thread.fp_state; 681 681 fpexc_mode = current->thread.fpexc_mode; 682 682 683 683 /* Restore guest FPU state to thread */ 684 - memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr)); 685 - current->thread.fpscr.val = vcpu->arch.fpscr; 684 + memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr, 685 + sizeof(vcpu->arch.fpr)); 686 + current->thread.fp_state.fpscr = vcpu->arch.fpscr; 686 687 687 688 /* 688 689 * Since we can't trap on MSR_FP in GS-mode, we consider the guest ··· 708 709 vcpu->fpu_active = 0; 709 710 710 711 /* Save guest FPU state from thread */ 711 - memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr)); 712 - vcpu->arch.fpscr = current->thread.fpscr.val; 712 + memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr, 713 + sizeof(vcpu->arch.fpr)); 714 + vcpu->arch.fpscr = current->thread.fp_state.fpscr; 713 715 714 716 /* Restore userspace FPU state from stack */ 715 - memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); 716 - current->thread.fpscr.val = fpscr; 717 + current->thread.fp_state = fp; 717 718 current->thread.fpexc_mode = fpexc_mode; 718 719 #endif 719 720