Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/fpu: Convert task_struct::thread.fpu accesses to use x86_task_fpu()

This will make the removal of the task_struct::thread.fpu array
easier.

No change in functionality - code generated before and after this
commit is identical on x86-defconfig:

kepler:~/tip> diff -up vmlinux.before.asm vmlinux.after.asm
kepler:~/tip>

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Chang S. Bae <chang.seok.bae@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Link: https://lore.kernel.org/r/20250409211127.3544993-3-mingo@kernel.org

Ingo Molnar e3bfa385 77fbcced

+68 -68
+1 -1
arch/x86/include/asm/fpu/sched.h
··· 41 41 { 42 42 if (cpu_feature_enabled(X86_FEATURE_FPU) && 43 43 !(old->flags & (PF_KTHREAD | PF_USER_WORKER))) { 44 - struct fpu *old_fpu = &old->thread.fpu; 44 + struct fpu *old_fpu = x86_task_fpu(old); 45 45 46 46 save_fpregs_to_fpstate(old_fpu); 47 47 /*
+2 -2
arch/x86/kernel/fpu/context.h
··· 53 53 /* Internal helper for switch_fpu_return() and signal frame setup */ 54 54 static inline void fpregs_restore_userregs(void) 55 55 { 56 - struct fpu *fpu = &current->thread.fpu; 56 + struct fpu *fpu = x86_task_fpu(current); 57 57 int cpu = smp_processor_id(); 58 58 59 59 if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER))) ··· 67 67 * If PKRU is enabled, then the PKRU value is already 68 68 * correct because it was either set in switch_to() or in 69 69 * flush_thread(). So it is excluded because it might be 70 - * not up to date in current->thread.fpu.xsave state. 70 + * not up to date in current->thread.fpu->xsave state. 71 71 * 72 72 * XFD state is handled in restore_fpregs_from_fpstate(). 73 73 */
+15 -15
arch/x86/kernel/fpu/core.c
··· 211 211 return; 212 212 213 213 spin_lock_irq(&current->sighand->siglock); 214 - fpuperm = &current->group_leader->thread.fpu.guest_perm; 214 + fpuperm = &x86_task_fpu(current->group_leader)->guest_perm; 215 215 perm = fpuperm->__state_perm; 216 216 217 217 /* First fpstate allocation locks down permissions. */ ··· 323 323 */ 324 324 void fpu_sync_guest_vmexit_xfd_state(void) 325 325 { 326 - struct fpstate *fps = current->thread.fpu.fpstate; 326 + struct fpstate *fps = x86_task_fpu(current)->fpstate; 327 327 328 328 lockdep_assert_irqs_disabled(); 329 329 if (fpu_state_size_dynamic()) { ··· 337 337 int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest) 338 338 { 339 339 struct fpstate *guest_fps = guest_fpu->fpstate; 340 - struct fpu *fpu = &current->thread.fpu; 340 + struct fpu *fpu = x86_task_fpu(current); 341 341 struct fpstate *cur_fps = fpu->fpstate; 342 342 343 343 fpregs_lock(); ··· 438 438 if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) && 439 439 !test_thread_flag(TIF_NEED_FPU_LOAD)) { 440 440 set_thread_flag(TIF_NEED_FPU_LOAD); 441 - save_fpregs_to_fpstate(&current->thread.fpu); 441 + save_fpregs_to_fpstate(x86_task_fpu(current)); 442 442 } 443 443 __cpu_invalidate_fpregs_state(); 444 444 ··· 467 467 */ 468 468 void fpu_sync_fpstate(struct fpu *fpu) 469 469 { 470 - WARN_ON_FPU(fpu != &current->thread.fpu); 470 + WARN_ON_FPU(fpu != x86_task_fpu(current)); 471 471 472 472 fpregs_lock(); 473 473 trace_x86_fpu_before_save(fpu); ··· 552 552 static inline void fpu_inherit_perms(struct fpu *dst_fpu) 553 553 { 554 554 if (fpu_state_size_dynamic()) { 555 - struct fpu *src_fpu = &current->group_leader->thread.fpu; 555 + struct fpu *src_fpu = x86_task_fpu(current->group_leader); 556 556 557 557 spin_lock_irq(&current->sighand->siglock); 558 558 /* Fork also inherits the permissions of the parent */ ··· 572 572 if (!ssp) 573 573 return 0; 574 574 575 - xstate = get_xsave_addr(&dst->thread.fpu.fpstate->regs.xsave, 575 + xstate = get_xsave_addr(&x86_task_fpu(dst)->fpstate->regs.xsave, 576 576 XFEATURE_CET_USER); 577 577 578 578 /* ··· 593 593 int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal, 594 594 unsigned long ssp) 595 595 { 596 - struct fpu *src_fpu = &current->thread.fpu; 597 - struct fpu *dst_fpu = &dst->thread.fpu; 596 + struct fpu *src_fpu = x86_task_fpu(current); 597 + struct fpu *dst_fpu = x86_task_fpu(dst); 598 598 599 599 /* The new task's FPU state cannot be valid in the hardware. */ 600 600 dst_fpu->last_cpu = -1; ··· 686 686 { 687 687 preempt_disable(); 688 688 689 - if (fpu == &current->thread.fpu) { 689 + if (fpu == x86_task_fpu(current)) { 690 690 /* Ignore delayed exceptions from user space */ 691 691 asm volatile("1: fwait\n" 692 692 "2:\n" ··· 720 720 */ 721 721 static void fpu_reset_fpregs(void) 722 722 { 723 - struct fpu *fpu = &current->thread.fpu; 723 + struct fpu *fpu = x86_task_fpu(current); 724 724 725 725 fpregs_lock(); 726 726 __fpu_invalidate_fpregs_state(fpu); ··· 749 749 */ 750 750 void fpu__clear_user_states(struct fpu *fpu) 751 751 { 752 - WARN_ON_FPU(fpu != &current->thread.fpu); 752 + WARN_ON_FPU(fpu != x86_task_fpu(current)); 753 753 754 754 fpregs_lock(); 755 755 if (!cpu_feature_enabled(X86_FEATURE_FPU)) { ··· 782 782 783 783 void fpu_flush_thread(void) 784 784 { 785 - fpstate_reset(&current->thread.fpu); 785 + fpstate_reset(x86_task_fpu(current)); 786 786 fpu_reset_fpregs(); 787 787 } 788 788 /* ··· 823 823 */ 824 824 void fpregs_assert_state_consistent(void) 825 825 { 826 - struct fpu *fpu = &current->thread.fpu; 826 + struct fpu *fpu = x86_task_fpu(current); 827 827 828 828 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 829 829 return; ··· 835 835 836 836 void fpregs_mark_activate(void) 837 837 { 838 - struct fpu *fpu = &current->thread.fpu; 838 + struct fpu *fpu = x86_task_fpu(current); 839 839 840 840 fpregs_activate(fpu); 841 841 fpu->last_cpu = smp_processor_id();
+4 -4
arch/x86/kernel/fpu/init.c
··· 38 38 /* Flush out any pending x87 state: */ 39 39 #ifdef CONFIG_MATH_EMULATION 40 40 if (!boot_cpu_has(X86_FEATURE_FPU)) 41 - fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft); 41 + fpstate_init_soft(&x86_task_fpu(current)->fpstate->regs.soft); 42 42 else 43 43 #endif 44 44 asm volatile ("fninit"); ··· 154 154 * Subtract off the static size of the register state. 155 155 * It potentially has a bunch of padding. 156 156 */ 157 - task_size -= sizeof(current->thread.fpu.__fpstate.regs); 157 + task_size -= sizeof(union fpregs_state); 158 158 159 159 /* 160 160 * Add back the dynamically-calculated register state ··· 204 204 fpu_kernel_cfg.default_size = size; 205 205 fpu_user_cfg.max_size = size; 206 206 fpu_user_cfg.default_size = size; 207 - fpstate_reset(&current->thread.fpu); 207 + fpstate_reset(x86_task_fpu(current)); 208 208 } 209 209 210 210 /* ··· 213 213 */ 214 214 void __init fpu__init_system(void) 215 215 { 216 - fpstate_reset(&current->thread.fpu); 216 + fpstate_reset(x86_task_fpu(current)); 217 217 fpu__init_system_early_generic(); 218 218 219 219 /*
+11 -11
arch/x86/kernel/fpu/regset.c
··· 45 45 */ 46 46 static void sync_fpstate(struct fpu *fpu) 47 47 { 48 - if (fpu == &current->thread.fpu) 48 + if (fpu == x86_task_fpu(current)) 49 49 fpu_sync_fpstate(fpu); 50 50 } 51 51 ··· 63 63 * Only stopped child tasks can be used to modify the FPU 64 64 * state in the fpstate buffer: 65 65 */ 66 - WARN_ON_FPU(fpu == &current->thread.fpu); 66 + WARN_ON_FPU(fpu == x86_task_fpu(current)); 67 67 68 68 __fpu_invalidate_fpregs_state(fpu); 69 69 } ··· 71 71 int xfpregs_get(struct task_struct *target, const struct user_regset *regset, 72 72 struct membuf to) 73 73 { 74 - struct fpu *fpu = &target->thread.fpu; 74 + struct fpu *fpu = x86_task_fpu(target); 75 75 76 76 if (!cpu_feature_enabled(X86_FEATURE_FXSR)) 77 77 return -ENODEV; ··· 91 91 unsigned int pos, unsigned int count, 92 92 const void *kbuf, const void __user *ubuf) 93 93 { 94 - struct fpu *fpu = &target->thread.fpu; 94 + struct fpu *fpu = x86_task_fpu(target); 95 95 struct fxregs_state newstate; 96 96 int ret; 97 97 ··· 133 133 if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) 134 134 return -ENODEV; 135 135 136 - sync_fpstate(&target->thread.fpu); 136 + sync_fpstate(x86_task_fpu(target)); 137 137 138 138 copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_XSAVE); 139 139 return 0; ··· 143 143 unsigned int pos, unsigned int count, 144 144 const void *kbuf, const void __user *ubuf) 145 145 { 146 - struct fpu *fpu = &target->thread.fpu; 146 + struct fpu *fpu = x86_task_fpu(target); 147 147 struct xregs_state *tmpbuf = NULL; 148 148 int ret; 149 149 ··· 187 187 int ssp_get(struct task_struct *target, const struct user_regset *regset, 188 188 struct membuf to) 189 189 { 190 - struct fpu *fpu = &target->thread.fpu; 190 + struct fpu *fpu = x86_task_fpu(target); 191 191 struct cet_user_state *cetregs; 192 192 193 193 if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || ··· 214 214 unsigned int pos, unsigned int count, 215 215 const void *kbuf, const void __user *ubuf) 216 216 { 217 - struct fpu *fpu = &target->thread.fpu; 217 + struct fpu *fpu = x86_task_fpu(target); 218 218 struct xregs_state *xsave = &fpu->fpstate->regs.xsave; 219 219 struct cet_user_state *cetregs; 220 220 unsigned long user_ssp; ··· 368 368 void 369 369 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) 370 370 { 371 - __convert_from_fxsr(env, tsk, &tsk->thread.fpu.fpstate->regs.fxsave); 371 + __convert_from_fxsr(env, tsk, &x86_task_fpu(tsk)->fpstate->regs.fxsave); 372 372 } 373 373 374 374 void convert_to_fxsr(struct fxregs_state *fxsave, ··· 401 401 int fpregs_get(struct task_struct *target, const struct user_regset *regset, 402 402 struct membuf to) 403 403 { 404 - struct fpu *fpu = &target->thread.fpu; 404 + struct fpu *fpu = x86_task_fpu(target); 405 405 struct user_i387_ia32_struct env; 406 406 struct fxregs_state fxsave, *fx; 407 407 ··· 433 433 unsigned int pos, unsigned int count, 434 434 const void *kbuf, const void __user *ubuf) 435 435 { 436 - struct fpu *fpu = &target->thread.fpu; 436 + struct fpu *fpu = x86_task_fpu(target); 437 437 struct user_i387_ia32_struct env; 438 438 int ret; 439 439
+9 -9
arch/x86/kernel/fpu/signal.c
··· 43 43 * fpstate layout with out copying the extended state information 44 44 * in the memory layout. 45 45 */ 46 - if (__get_user(magic2, (__u32 __user *)(fpstate + current->thread.fpu.fpstate->user_size))) 46 + if (__get_user(magic2, (__u32 __user *)(fpstate + x86_task_fpu(current)->fpstate->user_size))) 47 47 return false; 48 48 49 49 if (likely(magic2 == FP_XSTATE_MAGIC2)) 50 50 return true; 51 51 setfx: 52 - trace_x86_fpu_xstate_check_failed(&current->thread.fpu); 52 + trace_x86_fpu_xstate_check_failed(x86_task_fpu(current)); 53 53 54 54 /* Set the parameters for fx only state */ 55 55 fx_sw->magic1 = 0; ··· 64 64 static inline bool save_fsave_header(struct task_struct *tsk, void __user *buf) 65 65 { 66 66 if (use_fxsr()) { 67 - struct xregs_state *xsave = &tsk->thread.fpu.fpstate->regs.xsave; 67 + struct xregs_state *xsave = &x86_task_fpu(tsk)->fpstate->regs.xsave; 68 68 struct user_i387_ia32_struct env; 69 69 struct _fpstate_32 __user *fp = buf; 70 70 71 71 fpregs_lock(); 72 72 if (!test_thread_flag(TIF_NEED_FPU_LOAD)) 73 - fxsave(&tsk->thread.fpu.fpstate->regs.fxsave); 73 + fxsave(&x86_task_fpu(tsk)->fpstate->regs.fxsave); 74 74 fpregs_unlock(); 75 75 76 76 convert_from_fxsr(&env, tsk); ··· 184 184 bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size, u32 pkru) 185 185 { 186 186 struct task_struct *tsk = current; 187 - struct fpstate *fpstate = tsk->thread.fpu.fpstate; 187 + struct fpstate *fpstate = x86_task_fpu(tsk)->fpstate; 188 188 bool ia32_fxstate = (buf != buf_fx); 189 189 int ret; 190 190 ··· 272 272 */ 273 273 static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only) 274 274 { 275 - struct fpu *fpu = &current->thread.fpu; 275 + struct fpu *fpu = x86_task_fpu(current); 276 276 int ret; 277 277 278 278 /* Restore enabled features only. */ ··· 332 332 bool ia32_fxstate) 333 333 { 334 334 struct task_struct *tsk = current; 335 - struct fpu *fpu = &tsk->thread.fpu; 335 + struct fpu *fpu = x86_task_fpu(tsk); 336 336 struct user_i387_ia32_struct env; 337 337 bool success, fx_only = false; 338 338 union fpregs_state *fpregs; ··· 452 452 */ 453 453 bool fpu__restore_sig(void __user *buf, int ia32_frame) 454 454 { 455 - struct fpu *fpu = &current->thread.fpu; 455 + struct fpu *fpu = x86_task_fpu(current); 456 456 void __user *buf_fx = buf; 457 457 bool ia32_fxstate = false; 458 458 bool success = false; ··· 499 499 fpu__alloc_mathframe(unsigned long sp, int ia32_frame, 500 500 unsigned long *buf_fx, unsigned long *size) 501 501 { 502 - unsigned long frame_size = xstate_sigframe_size(current->thread.fpu.fpstate); 502 + unsigned long frame_size = xstate_sigframe_size(x86_task_fpu(current)->fpstate); 503 503 504 504 *buf_fx = sp = round_down(sp - frame_size, 64); 505 505 if (ia32_frame && use_fxsr()) {
+11 -11
arch/x86/kernel/fpu/xstate.c
··· 763 763 */ 764 764 init_fpstate.xfd = 0; 765 765 766 - fpstate_reset(&current->thread.fpu); 766 + fpstate_reset(x86_task_fpu(current)); 767 767 } 768 768 769 769 /* ··· 871 871 goto out_disable; 872 872 873 873 /* Reset the state for the current task */ 874 - fpstate_reset(&current->thread.fpu); 874 + fpstate_reset(x86_task_fpu(current)); 875 875 876 876 /* 877 877 * Update info used for ptrace frames; use standard-format size and no ··· 945 945 } 946 946 947 947 if (fpu_state_size_dynamic()) 948 - wrmsrl(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd); 948 + wrmsrl(MSR_IA32_XFD, x86_task_fpu(current)->fpstate->xfd); 949 949 } 950 950 951 951 /* ··· 1227 1227 void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk, 1228 1228 enum xstate_copy_mode copy_mode) 1229 1229 { 1230 - __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate, 1231 - tsk->thread.fpu.fpstate->user_xfeatures, 1230 + __copy_xstate_to_uabi_buf(to, x86_task_fpu(tsk)->fpstate, 1231 + x86_task_fpu(tsk)->fpstate->user_xfeatures, 1232 1232 tsk->thread.pkru, copy_mode); 1233 1233 } 1234 1234 ··· 1368 1368 int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, 1369 1369 const void __user *ubuf) 1370 1370 { 1371 - return copy_uabi_to_xstate(tsk->thread.fpu.fpstate, NULL, ubuf, &tsk->thread.pkru); 1371 + return copy_uabi_to_xstate(x86_task_fpu(tsk)->fpstate, NULL, ubuf, &tsk->thread.pkru); 1372 1372 } 1373 1373 1374 1374 static bool validate_independent_components(u64 mask) ··· 1462 1462 * The XFD MSR does not match fpstate->xfd. That's invalid when 1463 1463 * the passed in fpstate is current's fpstate. 1464 1464 */ 1465 - if (fpstate->xfd == current->thread.fpu.fpstate->xfd) 1465 + if (fpstate->xfd == x86_task_fpu(current)->fpstate->xfd) 1466 1466 return false; 1467 1467 1468 1468 /* ··· 1539 1539 static int fpstate_realloc(u64 xfeatures, unsigned int ksize, 1540 1540 unsigned int usize, struct fpu_guest *guest_fpu) 1541 1541 { 1542 - struct fpu *fpu = &current->thread.fpu; 1542 + struct fpu *fpu = x86_task_fpu(current); 1543 1543 struct fpstate *curfps, *newfps = NULL; 1544 1544 unsigned int fpsize; 1545 1545 bool in_use; ··· 1632 1632 * AVX512. 1633 1633 */ 1634 1634 bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED); 1635 - struct fpu *fpu = &current->group_leader->thread.fpu; 1635 + struct fpu *fpu = x86_task_fpu(current->group_leader); 1636 1636 struct fpu_state_perm *perm; 1637 1637 unsigned int ksize, usize; 1638 1638 u64 mask; ··· 1735 1735 return -EPERM; 1736 1736 } 1737 1737 1738 - fpu = &current->group_leader->thread.fpu; 1738 + fpu = x86_task_fpu(current->group_leader); 1739 1739 perm = guest_fpu ? &fpu->guest_perm : &fpu->perm; 1740 1740 ksize = perm->__state_size; 1741 1741 usize = perm->__user_state_size; ··· 1840 1840 */ 1841 1841 static void avx512_status(struct seq_file *m, struct task_struct *task) 1842 1842 { 1843 - unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp); 1843 + unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp); 1844 1844 long delta; 1845 1845 1846 1846 if (!timestamp) {
+3 -3
arch/x86/kernel/fpu/xstate.h
··· 22 22 23 23 static inline u64 xstate_get_group_perm(bool guest) 24 24 { 25 - struct fpu *fpu = &current->group_leader->thread.fpu; 25 + struct fpu *fpu = x86_task_fpu(current->group_leader); 26 26 struct fpu_state_perm *perm; 27 27 28 28 /* Pairs with WRITE_ONCE() in xstate_request_perm() */ ··· 288 288 * internally, e.g. PKRU. That's user space ABI and also required 289 289 * to allow the signal handler to modify PKRU. 290 290 */ 291 - struct fpstate *fpstate = current->thread.fpu.fpstate; 291 + struct fpstate *fpstate = x86_task_fpu(current)->fpstate; 292 292 u64 mask = fpstate->user_xfeatures; 293 293 u32 lmask; 294 294 u32 hmask; ··· 322 322 u32 hmask = mask >> 32; 323 323 int err; 324 324 325 - xfd_validate_state(current->thread.fpu.fpstate, mask, true); 325 + xfd_validate_state(x86_task_fpu(current)->fpstate, mask, true); 326 326 327 327 stac(); 328 328 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
+3 -3
arch/x86/kernel/process.c
··· 103 103 dst->thread.vm86 = NULL; 104 104 #endif 105 105 /* Drop the copied pointer to current's fpstate */ 106 - dst->thread.fpu.fpstate = NULL; 106 + x86_task_fpu(dst)->fpstate = NULL; 107 107 108 108 return 0; 109 109 } ··· 112 112 void arch_release_task_struct(struct task_struct *tsk) 113 113 { 114 114 if (fpu_state_size_dynamic()) 115 - fpstate_free(&tsk->thread.fpu); 115 + fpstate_free(x86_task_fpu(tsk)); 116 116 } 117 117 #endif 118 118 ··· 122 122 void exit_thread(struct task_struct *tsk) 123 123 { 124 124 struct thread_struct *t = &tsk->thread; 125 - struct fpu *fpu = &t->fpu; 125 + struct fpu *fpu = x86_task_fpu(tsk); 126 126 127 127 if (test_thread_flag(TIF_IO_BITMAP)) 128 128 io_bitmap_exit(tsk);
+3 -3
arch/x86/kernel/signal.c
··· 255 255 handle_signal(struct ksignal *ksig, struct pt_regs *regs) 256 256 { 257 257 bool stepping, failed; 258 - struct fpu *fpu = &current->thread.fpu; 258 + struct fpu *fpu = x86_task_fpu(current); 259 259 260 260 if (v8086_mode(regs)) 261 261 save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL); ··· 423 423 if (!fpu_state_size_dynamic() && !strict_sigaltstack_size) 424 424 return true; 425 425 426 - fsize += current->group_leader->thread.fpu.perm.__user_state_size; 426 + fsize += x86_task_fpu(current->group_leader)->perm.__user_state_size; 427 427 if (likely(ss_size > fsize)) 428 428 return true; 429 429 430 430 if (strict_sigaltstack_size) 431 431 return ss_size > fsize; 432 432 433 - mask = current->group_leader->thread.fpu.perm.__state_perm; 433 + mask = x86_task_fpu(current->group_leader)->perm.__state_perm; 434 434 if (mask & XFEATURE_MASK_USER_DYNAMIC) 435 435 return ss_size > fsize; 436 436
+1 -1
arch/x86/kernel/traps.c
··· 1295 1295 static void math_error(struct pt_regs *regs, int trapnr) 1296 1296 { 1297 1297 struct task_struct *task = current; 1298 - struct fpu *fpu = &task->thread.fpu; 1298 + struct fpu *fpu = x86_task_fpu(task); 1299 1299 int si_code; 1300 1300 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 1301 1301 "simd exception";
+1 -1
arch/x86/math-emu/fpu_aux.c
··· 53 53 54 54 void finit(void) 55 55 { 56 - fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft); 56 + fpstate_init_soft(&x86_task_fpu(current)->fpstate->regs.soft); 57 57 } 58 58 59 59 /*
+2 -2
arch/x86/math-emu/fpu_entry.c
··· 641 641 unsigned int pos, unsigned int count, 642 642 const void *kbuf, const void __user *ubuf) 643 643 { 644 - struct swregs_state *s387 = &target->thread.fpu.fpstate->regs.soft; 644 + struct swregs_state *s387 = &x86_task_fpu(target)->fpstate->regs.soft; 645 645 void *space = s387->st_space; 646 646 int ret; 647 647 int offset, other, i, tags, regnr, tag, newtop; ··· 692 692 const struct user_regset *regset, 693 693 struct membuf to) 694 694 { 695 - struct swregs_state *s387 = &target->thread.fpu.fpstate->regs.soft; 695 + struct swregs_state *s387 = &x86_task_fpu(target)->fpstate->regs.soft; 696 696 const void *space = s387->st_space; 697 697 int offset = (S387->ftop & 7) * 10, other = 80 - offset; 698 698
+1 -1
arch/x86/math-emu/fpu_system.h
··· 73 73 return (d->type & SEG_TYPE_EXECUTE_MASK) == SEG_TYPE_WRITABLE; 74 74 } 75 75 76 - #define I387 (&current->thread.fpu.fpstate->regs) 76 + #define I387 (&x86_task_fpu(current)->fpstate->regs) 77 77 #define FPU_info (I387->soft.info) 78 78 79 79 #define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs))
+1 -1
arch/x86/mm/extable.c
··· 111 111 112 112 /* 113 113 * Handler for when we fail to restore a task's FPU state. We should never get 114 - * here because the FPU state of a task using the FPU (task->thread.fpu.state) 114 + * here because the FPU state of a task using the FPU (struct fpu::fpstate) 115 115 * should always be valid. However, past bugs have allowed userspace to set 116 116 * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn(). 117 117 * These caused XRSTOR to fail when switching to the task, leaking the FPU