Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Revert "MIPS: Save/restore MSA context around signals"

This reverts commit eec43a224cf1 "MIPS: Save/restore MSA context around
signals" and the MSA parts of ca750649e08c "MIPS: kernel: signal:
Prevent save/restore FPU context in user memory" (the restore path of
which appears incorrect anyway...).

The reverted patch took care not to break compatibility with userland
users of struct sigcontext, but inadvertantly changed the offset of the
uc_sigmask field of struct ucontext. Thus Linux v3.15 breaks the
userland ABI. The MSA context will need to be saved via some other
opt-in mechanism, but for now revert the change to reduce the fallout.

This will have minimal impact upon use of MSA since the only supported
CPU which includes it (the P5600) is 32-bit and therefore requires that
the experimental CONFIG_MIPS_O32_FP64_SUPPORT Kconfig option be selected
before the kernel will set FR=1 for a task, a requirement for MSA use.
Thus the users of MSA are limited to known small groups of people & this
patch won't be breaking any previously working MSA-using userland
outside of experimental settings.

[ralf@linux-mips.org: Fixed rejects.]

Cc: stable@vger.kernel.org
Reported-by: Joseph S. Myers <joseph@codesourcery.com>
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/7107/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Paul Burton and committed by
Ralf Baechle
16f77de8 fb738f85

+16 -363
-2
arch/mips/include/asm/sigcontext.h
··· 32 32 __u32 sc_lo2; 33 33 __u32 sc_hi3; 34 34 __u32 sc_lo3; 35 - __u64 sc_msaregs[32]; /* Most significant 64 bits */ 36 - __u32 sc_msa_csr; 37 35 }; 38 36 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ 39 37 #endif /* _ASM_SIGCONTEXT_H */
-8
arch/mips/include/uapi/asm/sigcontext.h
··· 12 12 #include <linux/types.h> 13 13 #include <asm/sgidefs.h> 14 14 15 - /* Bits which may be set in sc_used_math */ 16 - #define USEDMATH_FP (1 << 0) 17 - #define USEDMATH_MSA (1 << 1) 18 - 19 15 #if _MIPS_SIM == _MIPS_SIM_ABI32 20 16 21 17 /* ··· 37 41 unsigned long sc_lo2; 38 42 unsigned long sc_hi3; 39 43 unsigned long sc_lo3; 40 - unsigned long long sc_msaregs[32]; /* Most significant 64 bits */ 41 - unsigned long sc_msa_csr; 42 44 }; 43 45 44 46 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ ··· 70 76 __u32 sc_used_math; 71 77 __u32 sc_dsp; 72 78 __u32 sc_reserved; 73 - __u64 sc_msaregs[32]; 74 - __u32 sc_msa_csr; 75 79 }; 76 80 77 81
-3
arch/mips/kernel/asm-offsets.c
··· 293 293 OFFSET(SC_LO2, sigcontext, sc_lo2); 294 294 OFFSET(SC_HI3, sigcontext, sc_hi3); 295 295 OFFSET(SC_LO3, sigcontext, sc_lo3); 296 - OFFSET(SC_MSAREGS, sigcontext, sc_msaregs); 297 296 BLANK(); 298 297 } 299 298 #endif ··· 307 308 OFFSET(SC_MDLO, sigcontext, sc_mdlo); 308 309 OFFSET(SC_PC, sigcontext, sc_pc); 309 310 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); 310 - OFFSET(SC_MSAREGS, sigcontext, sc_msaregs); 311 311 BLANK(); 312 312 } 313 313 #endif ··· 318 320 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); 319 321 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); 320 322 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); 321 - OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs); 322 323 BLANK(); 323 324 } 324 325 #endif
-213
arch/mips/kernel/r4k_fpu.S
··· 13 13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc. 14 14 */ 15 15 #include <asm/asm.h> 16 - #include <asm/asmmacro.h> 17 16 #include <asm/errno.h> 18 17 #include <asm/fpregdef.h> 19 18 #include <asm/mipsregs.h> ··· 244 245 li v0, 0 # success 245 246 END(_restore_fp_context32) 246 247 #endif 247 - 248 - #ifdef CONFIG_CPU_HAS_MSA 249 - 250 - .macro save_sc_msareg wr, off, sc, tmp 251 - #ifdef CONFIG_64BIT 252 - copy_u_d \tmp, \wr, 1 253 - EX sd \tmp, (\off+(\wr*8))(\sc) 254 - #elif defined(CONFIG_CPU_LITTLE_ENDIAN) 255 - copy_u_w \tmp, \wr, 2 256 - EX sw \tmp, (\off+(\wr*8)+0)(\sc) 257 - copy_u_w \tmp, \wr, 3 258 - EX sw \tmp, (\off+(\wr*8)+4)(\sc) 259 - #else /* CONFIG_CPU_BIG_ENDIAN */ 260 - copy_u_w \tmp, \wr, 2 261 - EX sw \tmp, (\off+(\wr*8)+4)(\sc) 262 - copy_u_w \tmp, \wr, 3 263 - EX sw \tmp, (\off+(\wr*8)+0)(\sc) 264 - #endif 265 - .endm 266 - 267 - /* 268 - * int _save_msa_context(struct sigcontext *sc) 269 - * 270 - * Save the upper 64 bits of each vector register along with the MSA_CSR 271 - * register into sc. Returns zero on success, else non-zero. 272 - */ 273 - LEAF(_save_msa_context) 274 - save_sc_msareg 0, SC_MSAREGS, a0, t0 275 - save_sc_msareg 1, SC_MSAREGS, a0, t0 276 - save_sc_msareg 2, SC_MSAREGS, a0, t0 277 - save_sc_msareg 3, SC_MSAREGS, a0, t0 278 - save_sc_msareg 4, SC_MSAREGS, a0, t0 279 - save_sc_msareg 5, SC_MSAREGS, a0, t0 280 - save_sc_msareg 6, SC_MSAREGS, a0, t0 281 - save_sc_msareg 7, SC_MSAREGS, a0, t0 282 - save_sc_msareg 8, SC_MSAREGS, a0, t0 283 - save_sc_msareg 9, SC_MSAREGS, a0, t0 284 - save_sc_msareg 10, SC_MSAREGS, a0, t0 285 - save_sc_msareg 11, SC_MSAREGS, a0, t0 286 - save_sc_msareg 12, SC_MSAREGS, a0, t0 287 - save_sc_msareg 13, SC_MSAREGS, a0, t0 288 - save_sc_msareg 14, SC_MSAREGS, a0, t0 289 - save_sc_msareg 15, SC_MSAREGS, a0, t0 290 - save_sc_msareg 16, SC_MSAREGS, a0, t0 291 - save_sc_msareg 17, SC_MSAREGS, a0, t0 292 - save_sc_msareg 18, SC_MSAREGS, a0, t0 293 - save_sc_msareg 19, SC_MSAREGS, a0, t0 294 - save_sc_msareg 20, SC_MSAREGS, a0, t0 295 - save_sc_msareg 21, SC_MSAREGS, a0, t0 296 - save_sc_msareg 22, SC_MSAREGS, a0, t0 297 - save_sc_msareg 23, SC_MSAREGS, a0, t0 298 - save_sc_msareg 24, SC_MSAREGS, a0, t0 299 - save_sc_msareg 25, SC_MSAREGS, a0, t0 300 - save_sc_msareg 26, SC_MSAREGS, a0, t0 301 - save_sc_msareg 27, SC_MSAREGS, a0, t0 302 - save_sc_msareg 28, SC_MSAREGS, a0, t0 303 - save_sc_msareg 29, SC_MSAREGS, a0, t0 304 - save_sc_msareg 30, SC_MSAREGS, a0, t0 305 - save_sc_msareg 31, SC_MSAREGS, a0, t0 306 - jr ra 307 - li v0, 0 308 - END(_save_msa_context) 309 - 310 - #ifdef CONFIG_MIPS32_COMPAT 311 - 312 - /* 313 - * int _save_msa_context32(struct sigcontext32 *sc) 314 - * 315 - * Save the upper 64 bits of each vector register along with the MSA_CSR 316 - * register into sc. Returns zero on success, else non-zero. 317 - */ 318 - LEAF(_save_msa_context32) 319 - save_sc_msareg 0, SC32_MSAREGS, a0, t0 320 - save_sc_msareg 1, SC32_MSAREGS, a0, t0 321 - save_sc_msareg 2, SC32_MSAREGS, a0, t0 322 - save_sc_msareg 3, SC32_MSAREGS, a0, t0 323 - save_sc_msareg 4, SC32_MSAREGS, a0, t0 324 - save_sc_msareg 5, SC32_MSAREGS, a0, t0 325 - save_sc_msareg 6, SC32_MSAREGS, a0, t0 326 - save_sc_msareg 7, SC32_MSAREGS, a0, t0 327 - save_sc_msareg 8, SC32_MSAREGS, a0, t0 328 - save_sc_msareg 9, SC32_MSAREGS, a0, t0 329 - save_sc_msareg 10, SC32_MSAREGS, a0, t0 330 - save_sc_msareg 11, SC32_MSAREGS, a0, t0 331 - save_sc_msareg 12, SC32_MSAREGS, a0, t0 332 - save_sc_msareg 13, SC32_MSAREGS, a0, t0 333 - save_sc_msareg 14, SC32_MSAREGS, a0, t0 334 - save_sc_msareg 15, SC32_MSAREGS, a0, t0 335 - save_sc_msareg 16, SC32_MSAREGS, a0, t0 336 - save_sc_msareg 17, SC32_MSAREGS, a0, t0 337 - save_sc_msareg 18, SC32_MSAREGS, a0, t0 338 - save_sc_msareg 19, SC32_MSAREGS, a0, t0 339 - save_sc_msareg 20, SC32_MSAREGS, a0, t0 340 - save_sc_msareg 21, SC32_MSAREGS, a0, t0 341 - save_sc_msareg 22, SC32_MSAREGS, a0, t0 342 - save_sc_msareg 23, SC32_MSAREGS, a0, t0 343 - save_sc_msareg 24, SC32_MSAREGS, a0, t0 344 - save_sc_msareg 25, SC32_MSAREGS, a0, t0 345 - save_sc_msareg 26, SC32_MSAREGS, a0, t0 346 - save_sc_msareg 27, SC32_MSAREGS, a0, t0 347 - save_sc_msareg 28, SC32_MSAREGS, a0, t0 348 - save_sc_msareg 29, SC32_MSAREGS, a0, t0 349 - save_sc_msareg 30, SC32_MSAREGS, a0, t0 350 - save_sc_msareg 31, SC32_MSAREGS, a0, t0 351 - jr ra 352 - li v0, 0 353 - END(_save_msa_context32) 354 - 355 - #endif /* CONFIG_MIPS32_COMPAT */ 356 - 357 - .macro restore_sc_msareg wr, off, sc, tmp 358 - #ifdef CONFIG_64BIT 359 - EX ld \tmp, (\off+(\wr*8))(\sc) 360 - insert_d \wr, 1, \tmp 361 - #elif defined(CONFIG_CPU_LITTLE_ENDIAN) 362 - EX lw \tmp, (\off+(\wr*8)+0)(\sc) 363 - insert_w \wr, 2, \tmp 364 - EX lw \tmp, (\off+(\wr*8)+4)(\sc) 365 - insert_w \wr, 3, \tmp 366 - #else /* CONFIG_CPU_BIG_ENDIAN */ 367 - EX lw \tmp, (\off+(\wr*8)+4)(\sc) 368 - insert_w \wr, 2, \tmp 369 - EX lw \tmp, (\off+(\wr*8)+0)(\sc) 370 - insert_w \wr, 3, \tmp 371 - #endif 372 - .endm 373 - 374 - /* 375 - * int _restore_msa_context(struct sigcontext *sc) 376 - */ 377 - LEAF(_restore_msa_context) 378 - restore_sc_msareg 0, SC_MSAREGS, a0, t0 379 - restore_sc_msareg 1, SC_MSAREGS, a0, t0 380 - restore_sc_msareg 2, SC_MSAREGS, a0, t0 381 - restore_sc_msareg 3, SC_MSAREGS, a0, t0 382 - restore_sc_msareg 4, SC_MSAREGS, a0, t0 383 - restore_sc_msareg 5, SC_MSAREGS, a0, t0 384 - restore_sc_msareg 6, SC_MSAREGS, a0, t0 385 - restore_sc_msareg 7, SC_MSAREGS, a0, t0 386 - restore_sc_msareg 8, SC_MSAREGS, a0, t0 387 - restore_sc_msareg 9, SC_MSAREGS, a0, t0 388 - restore_sc_msareg 10, SC_MSAREGS, a0, t0 389 - restore_sc_msareg 11, SC_MSAREGS, a0, t0 390 - restore_sc_msareg 12, SC_MSAREGS, a0, t0 391 - restore_sc_msareg 13, SC_MSAREGS, a0, t0 392 - restore_sc_msareg 14, SC_MSAREGS, a0, t0 393 - restore_sc_msareg 15, SC_MSAREGS, a0, t0 394 - restore_sc_msareg 16, SC_MSAREGS, a0, t0 395 - restore_sc_msareg 17, SC_MSAREGS, a0, t0 396 - restore_sc_msareg 18, SC_MSAREGS, a0, t0 397 - restore_sc_msareg 19, SC_MSAREGS, a0, t0 398 - restore_sc_msareg 20, SC_MSAREGS, a0, t0 399 - restore_sc_msareg 21, SC_MSAREGS, a0, t0 400 - restore_sc_msareg 22, SC_MSAREGS, a0, t0 401 - restore_sc_msareg 23, SC_MSAREGS, a0, t0 402 - restore_sc_msareg 24, SC_MSAREGS, a0, t0 403 - restore_sc_msareg 25, SC_MSAREGS, a0, t0 404 - restore_sc_msareg 26, SC_MSAREGS, a0, t0 405 - restore_sc_msareg 27, SC_MSAREGS, a0, t0 406 - restore_sc_msareg 28, SC_MSAREGS, a0, t0 407 - restore_sc_msareg 29, SC_MSAREGS, a0, t0 408 - restore_sc_msareg 30, SC_MSAREGS, a0, t0 409 - restore_sc_msareg 31, SC_MSAREGS, a0, t0 410 - jr ra 411 - li v0, 0 412 - END(_restore_msa_context) 413 - 414 - #ifdef CONFIG_MIPS32_COMPAT 415 - 416 - /* 417 - * int _restore_msa_context32(struct sigcontext32 *sc) 418 - */ 419 - LEAF(_restore_msa_context32) 420 - restore_sc_msareg 0, SC32_MSAREGS, a0, t0 421 - restore_sc_msareg 1, SC32_MSAREGS, a0, t0 422 - restore_sc_msareg 2, SC32_MSAREGS, a0, t0 423 - restore_sc_msareg 3, SC32_MSAREGS, a0, t0 424 - restore_sc_msareg 4, SC32_MSAREGS, a0, t0 425 - restore_sc_msareg 5, SC32_MSAREGS, a0, t0 426 - restore_sc_msareg 6, SC32_MSAREGS, a0, t0 427 - restore_sc_msareg 7, SC32_MSAREGS, a0, t0 428 - restore_sc_msareg 8, SC32_MSAREGS, a0, t0 429 - restore_sc_msareg 9, SC32_MSAREGS, a0, t0 430 - restore_sc_msareg 10, SC32_MSAREGS, a0, t0 431 - restore_sc_msareg 11, SC32_MSAREGS, a0, t0 432 - restore_sc_msareg 12, SC32_MSAREGS, a0, t0 433 - restore_sc_msareg 13, SC32_MSAREGS, a0, t0 434 - restore_sc_msareg 14, SC32_MSAREGS, a0, t0 435 - restore_sc_msareg 15, SC32_MSAREGS, a0, t0 436 - restore_sc_msareg 16, SC32_MSAREGS, a0, t0 437 - restore_sc_msareg 17, SC32_MSAREGS, a0, t0 438 - restore_sc_msareg 18, SC32_MSAREGS, a0, t0 439 - restore_sc_msareg 19, SC32_MSAREGS, a0, t0 440 - restore_sc_msareg 20, SC32_MSAREGS, a0, t0 441 - restore_sc_msareg 21, SC32_MSAREGS, a0, t0 442 - restore_sc_msareg 22, SC32_MSAREGS, a0, t0 443 - restore_sc_msareg 23, SC32_MSAREGS, a0, t0 444 - restore_sc_msareg 24, SC32_MSAREGS, a0, t0 445 - restore_sc_msareg 25, SC32_MSAREGS, a0, t0 446 - restore_sc_msareg 26, SC32_MSAREGS, a0, t0 447 - restore_sc_msareg 27, SC32_MSAREGS, a0, t0 448 - restore_sc_msareg 28, SC32_MSAREGS, a0, t0 449 - restore_sc_msareg 29, SC32_MSAREGS, a0, t0 450 - restore_sc_msareg 30, SC32_MSAREGS, a0, t0 451 - restore_sc_msareg 31, SC32_MSAREGS, a0, t0 452 - jr ra 453 - li v0, 0 454 - END(_restore_msa_context32) 455 - 456 - #endif /* CONFIG_MIPS32_COMPAT */ 457 - 458 - #endif /* CONFIG_CPU_HAS_MSA */ 459 248 460 249 .set reorder 461 250
+8 -71
arch/mips/kernel/signal.c
··· 31 31 #include <linux/bitops.h> 32 32 #include <asm/cacheflush.h> 33 33 #include <asm/fpu.h> 34 - #include <asm/msa.h> 35 34 #include <asm/sim.h> 36 35 #include <asm/ucontext.h> 37 36 #include <asm/cpu-features.h> ··· 46 47 47 48 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 48 49 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 49 - 50 - extern asmlinkage int _save_msa_context(struct sigcontext __user *sc); 51 - extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc); 52 50 53 51 struct sigframe { 54 52 u32 sf_ass[4]; /* argument save space for o32 */ ··· 96 100 } 97 101 98 102 /* 99 - * These functions will save only the upper 64 bits of the vector registers, 100 - * since the lower 64 bits have already been saved as the scalar FP context. 101 - */ 102 - static int copy_msa_to_sigcontext(struct sigcontext __user *sc) 103 - { 104 - int i; 105 - int err = 0; 106 - 107 - for (i = 0; i < NUM_FPU_REGS; i++) { 108 - err |= 109 - __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1), 110 - &sc->sc_msaregs[i]); 111 - } 112 - err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); 113 - 114 - return err; 115 - } 116 - 117 - static int copy_msa_from_sigcontext(struct sigcontext __user *sc) 118 - { 119 - int i; 120 - int err = 0; 121 - u64 val; 122 - 123 - for (i = 0; i < NUM_FPU_REGS; i++) { 124 - err |= __get_user(val, &sc->sc_msaregs[i]); 125 - set_fpr64(&current->thread.fpu.fpr[i], 1, val); 126 - } 127 - err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); 128 - 129 - return err; 130 - } 131 - 132 - /* 133 103 * Helper routines 134 104 */ 135 - static int protected_save_fp_context(struct sigcontext __user *sc, 136 - unsigned used_math) 105 + static int protected_save_fp_context(struct sigcontext __user *sc) 137 106 { 138 107 int err; 139 - bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA); 140 108 #ifndef CONFIG_EVA 141 109 while (1) { 142 110 lock_fpu_owner(); 143 111 if (is_fpu_owner()) { 144 112 err = save_fp_context(sc); 145 - if (save_msa && !err) 146 - err = _save_msa_context(sc); 147 113 unlock_fpu_owner(); 148 114 } else { 149 115 unlock_fpu_owner(); 150 116 err = copy_fp_to_sigcontext(sc); 151 - if (save_msa && !err) 152 - err = copy_msa_to_sigcontext(sc); 153 117 } 154 118 if (likely(!err)) 155 119 break; ··· 125 169 * EVA does not have FPU EVA instructions so saving fpu context directly 126 170 * does not work. 127 171 */ 128 - disable_msa(); 129 172 lose_fpu(1); 130 173 err = save_fp_context(sc); /* this might fail */ 131 - if (save_msa && !err) 132 - err = copy_msa_to_sigcontext(sc); 133 174 #endif 134 175 return err; 135 176 } 136 177 137 - static int protected_restore_fp_context(struct sigcontext __user *sc, 138 - unsigned used_math) 178 + static int protected_restore_fp_context(struct sigcontext __user *sc) 139 179 { 140 180 int err, tmp __maybe_unused; 141 - bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA); 142 181 #ifndef CONFIG_EVA 143 182 while (1) { 144 183 lock_fpu_owner(); 145 184 if (is_fpu_owner()) { 146 185 err = restore_fp_context(sc); 147 - if (restore_msa && !err) { 148 - enable_msa(); 149 - err = _restore_msa_context(sc); 150 - } else { 151 - /* signal handler may have used MSA */ 152 - disable_msa(); 153 - } 154 186 unlock_fpu_owner(); 155 187 } else { 156 188 unlock_fpu_owner(); 157 189 err = copy_fp_from_sigcontext(sc); 158 - if (!err && (used_math & USEDMATH_MSA)) 159 - err = copy_msa_from_sigcontext(sc); 160 190 } 161 191 if (likely(!err)) 162 192 break; ··· 158 216 * EVA does not have FPU EVA instructions so restoring fpu context 159 217 * directly does not work. 160 218 */ 161 - enable_msa(); 162 219 lose_fpu(0); 163 220 err = restore_fp_context(sc); /* this might fail */ 164 - if (restore_msa && !err) 165 - err = copy_msa_from_sigcontext(sc); 166 221 #endif 167 222 return err; 168 223 } ··· 191 252 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 192 253 } 193 254 194 - used_math = used_math() ? USEDMATH_FP : 0; 195 - used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0; 255 + used_math = !!used_math(); 196 256 err |= __put_user(used_math, &sc->sc_used_math); 197 257 198 258 if (used_math) { ··· 199 261 * Save FPU state to signal context. Signal handler 200 262 * will "inherit" current FPU state. 201 263 */ 202 - err |= protected_save_fp_context(sc, used_math); 264 + err |= protected_save_fp_context(sc); 203 265 } 204 266 return err; 205 267 } ··· 224 286 } 225 287 226 288 static int 227 - check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math) 289 + check_and_restore_fp_context(struct sigcontext __user *sc) 228 290 { 229 291 int err, sig; 230 292 231 293 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 232 294 if (err > 0) 233 295 err = 0; 234 - err |= protected_restore_fp_context(sc, used_math); 296 + err |= protected_restore_fp_context(sc); 235 297 return err ?: sig; 236 298 } 237 299 ··· 271 333 if (used_math) { 272 334 /* restore fpu context if we have used it before */ 273 335 if (!err) 274 - err = check_and_restore_fp_context(sc, used_math); 336 + err = check_and_restore_fp_context(sc); 275 337 } else { 276 - /* signal handler may have used FPU or MSA. Disable them. */ 277 - disable_msa(); 338 + /* signal handler may have used FPU. Give it up. */ 278 339 lose_fpu(0); 279 340 } 280 341
+8 -66
arch/mips/kernel/signal32.c
··· 30 30 #include <asm/sim.h> 31 31 #include <asm/ucontext.h> 32 32 #include <asm/fpu.h> 33 - #include <asm/msa.h> 34 33 #include <asm/war.h> 35 34 #include <asm/vdso.h> 36 35 #include <asm/dsp.h> ··· 41 42 42 43 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); 43 44 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); 44 - 45 - extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc); 46 - extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc); 47 45 48 46 /* 49 47 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... ··· 111 115 } 112 116 113 117 /* 114 - * These functions will save only the upper 64 bits of the vector registers, 115 - * since the lower 64 bits have already been saved as the scalar FP context. 116 - */ 117 - static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc) 118 - { 119 - int i; 120 - int err = 0; 121 - 122 - for (i = 0; i < NUM_FPU_REGS; i++) { 123 - err |= 124 - __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1), 125 - &sc->sc_msaregs[i]); 126 - } 127 - err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); 128 - 129 - return err; 130 - } 131 - 132 - static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc) 133 - { 134 - int i; 135 - int err = 0; 136 - u64 val; 137 - 138 - for (i = 0; i < NUM_FPU_REGS; i++) { 139 - err |= __get_user(val, &sc->sc_msaregs[i]); 140 - set_fpr64(&current->thread.fpu.fpr[i], 1, val); 141 - } 142 - err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); 143 - 144 - return err; 145 - } 146 - 147 - /* 148 118 * sigcontext handlers 149 119 */ 150 - static int protected_save_fp_context32(struct sigcontext32 __user *sc, 151 - unsigned used_math) 120 + static int protected_save_fp_context32(struct sigcontext32 __user *sc) 152 121 { 153 122 int err; 154 - bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA); 155 123 while (1) { 156 124 lock_fpu_owner(); 157 125 if (is_fpu_owner()) { 158 126 err = save_fp_context32(sc); 159 - if (save_msa && !err) 160 - err = _save_msa_context32(sc); 161 127 unlock_fpu_owner(); 162 128 } else { 163 129 unlock_fpu_owner(); 164 130 err = copy_fp_to_sigcontext32(sc); 165 - if (save_msa && !err) 166 - err = copy_msa_to_sigcontext32(sc); 167 131 } 168 132 if (likely(!err)) 169 133 break; ··· 137 181 return err; 138 182 } 139 183 140 - static int protected_restore_fp_context32(struct sigcontext32 __user *sc, 141 - unsigned used_math) 184 + static int protected_restore_fp_context32(struct sigcontext32 __user *sc) 142 185 { 143 186 int err, tmp __maybe_unused; 144 - bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA); 145 187 while (1) { 146 188 lock_fpu_owner(); 147 189 if (is_fpu_owner()) { 148 190 err = restore_fp_context32(sc); 149 - if (restore_msa && !err) { 150 - enable_msa(); 151 - err = _restore_msa_context32(sc); 152 - } else { 153 - /* signal handler may have used MSA */ 154 - disable_msa(); 155 - } 156 191 unlock_fpu_owner(); 157 192 } else { 158 193 unlock_fpu_owner(); 159 194 err = copy_fp_from_sigcontext32(sc); 160 - if (restore_msa && !err) 161 - err = copy_msa_from_sigcontext32(sc); 162 195 } 163 196 if (likely(!err)) 164 197 break; ··· 186 241 err |= __put_user(mflo3(), &sc->sc_lo3); 187 242 } 188 243 189 - used_math = used_math() ? USEDMATH_FP : 0; 190 - used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0; 244 + used_math = !!used_math(); 191 245 err |= __put_user(used_math, &sc->sc_used_math); 192 246 193 247 if (used_math) { ··· 194 250 * Save FPU state to signal context. Signal handler 195 251 * will "inherit" current FPU state. 196 252 */ 197 - err |= protected_save_fp_context32(sc, used_math); 253 + err |= protected_save_fp_context32(sc); 198 254 } 199 255 return err; 200 256 } 201 257 202 258 static int 203 - check_and_restore_fp_context32(struct sigcontext32 __user *sc, 204 - unsigned used_math) 259 + check_and_restore_fp_context32(struct sigcontext32 __user *sc) 205 260 { 206 261 int err, sig; 207 262 208 263 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 209 264 if (err > 0) 210 265 err = 0; 211 - err |= protected_restore_fp_context32(sc, used_math); 266 + err |= protected_restore_fp_context32(sc); 212 267 return err ?: sig; 213 268 } 214 269 ··· 244 301 if (used_math) { 245 302 /* restore fpu context if we have used it before */ 246 303 if (!err) 247 - err = check_and_restore_fp_context32(sc, used_math); 304 + err = check_and_restore_fp_context32(sc); 248 305 } else { 249 - /* signal handler may have used FPU or MSA. Disable them. */ 250 - disable_msa(); 306 + /* signal handler may have used FPU. Give it up. */ 251 307 lose_fpu(0); 252 308 } 253 309