Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 * x86-64 work by Andi Kleen 2002
9 */
10
11#ifndef _ASM_X86_FPU_INTERNAL_H
12#define _ASM_X86_FPU_INTERNAL_H
13
14#include <linux/compat.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/mm.h>
18
19#include <asm/user.h>
20#include <asm/fpu/api.h>
21#include <asm/fpu/xstate.h>
22#include <asm/fpu/xcr.h>
23#include <asm/cpufeature.h>
24#include <asm/trace/fpu.h>
25
26/*
27 * High level FPU state handling functions:
28 */
29extern void fpu__prepare_read(struct fpu *fpu);
30extern void fpu__prepare_write(struct fpu *fpu);
31extern void fpu__save(struct fpu *fpu);
32extern int fpu__restore_sig(void __user *buf, int ia32_frame);
33extern void fpu__drop(struct fpu *fpu);
34extern int fpu__copy(struct task_struct *dst, struct task_struct *src);
35extern void fpu__clear_user_states(struct fpu *fpu);
36extern void fpu__clear_all(struct fpu *fpu);
37extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
38
39/*
40 * Boot time FPU initialization functions:
41 */
42extern void fpu__init_cpu(void);
43extern void fpu__init_system_xstate(void);
44extern void fpu__init_cpu_xstate(void);
45extern void fpu__init_system(struct cpuinfo_x86 *c);
46extern void fpu__init_check_bugs(void);
47extern void fpu__resume_cpu(void);
48extern u64 fpu__get_supported_xfeatures_mask(void);
49
50/*
51 * Debugging facility:
52 */
53#ifdef CONFIG_X86_DEBUG_FPU
54# define WARN_ON_FPU(x) WARN_ON_ONCE(x)
55#else
56# define WARN_ON_FPU(x) ({ (void)(x); 0; })
57#endif
58
59/*
60 * FPU related CPU feature flag helper routines:
61 */
62static __always_inline __pure bool use_xsaveopt(void)
63{
64 return static_cpu_has(X86_FEATURE_XSAVEOPT);
65}
66
67static __always_inline __pure bool use_xsave(void)
68{
69 return static_cpu_has(X86_FEATURE_XSAVE);
70}
71
72static __always_inline __pure bool use_fxsr(void)
73{
74 return static_cpu_has(X86_FEATURE_FXSR);
75}
76
77/*
78 * fpstate handling functions:
79 */
80
81extern union fpregs_state init_fpstate;
82
83extern void fpstate_init(union fpregs_state *state);
84#ifdef CONFIG_MATH_EMULATION
85extern void fpstate_init_soft(struct swregs_state *soft);
86#else
87static inline void fpstate_init_soft(struct swregs_state *soft) {}
88#endif
89
90static inline void fpstate_init_xstate(struct xregs_state *xsave)
91{
92 /*
93 * XRSTORS requires these bits set in xcomp_bv, or it will
94 * trigger #GP:
95 */
96 xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
97}
98
99static inline void fpstate_init_fxstate(struct fxregs_state *fx)
100{
101 fx->cwd = 0x37f;
102 fx->mxcsr = MXCSR_DEFAULT;
103}
104extern void fpstate_sanitize_xstate(struct fpu *fpu);
105
106#define user_insn(insn, output, input...) \
107({ \
108 int err; \
109 \
110 might_fault(); \
111 \
112 asm volatile(ASM_STAC "\n" \
113 "1:" #insn "\n\t" \
114 "2: " ASM_CLAC "\n" \
115 ".section .fixup,\"ax\"\n" \
116 "3: movl $-1,%[err]\n" \
117 " jmp 2b\n" \
118 ".previous\n" \
119 _ASM_EXTABLE(1b, 3b) \
120 : [err] "=r" (err), output \
121 : "0"(0), input); \
122 err; \
123})
124
125#define kernel_insn_err(insn, output, input...) \
126({ \
127 int err; \
128 asm volatile("1:" #insn "\n\t" \
129 "2:\n" \
130 ".section .fixup,\"ax\"\n" \
131 "3: movl $-1,%[err]\n" \
132 " jmp 2b\n" \
133 ".previous\n" \
134 _ASM_EXTABLE(1b, 3b) \
135 : [err] "=r" (err), output \
136 : "0"(0), input); \
137 err; \
138})
139
140#define kernel_insn(insn, output, input...) \
141 asm volatile("1:" #insn "\n\t" \
142 "2:\n" \
143 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
144 : output : input)
145
146static inline int copy_fregs_to_user(struct fregs_state __user *fx)
147{
148 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
149}
150
151static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
152{
153 if (IS_ENABLED(CONFIG_X86_32))
154 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
155 else
156 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
157
158}
159
160static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
161{
162 if (IS_ENABLED(CONFIG_X86_32))
163 kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
164 else
165 kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
166}
167
168static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
169{
170 if (IS_ENABLED(CONFIG_X86_32))
171 return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
172 else
173 return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
174}
175
176static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
177{
178 if (IS_ENABLED(CONFIG_X86_32))
179 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
180 else
181 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
182}
183
184static inline void copy_kernel_to_fregs(struct fregs_state *fx)
185{
186 kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
187}
188
189static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
190{
191 return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
192}
193
194static inline int copy_user_to_fregs(struct fregs_state __user *fx)
195{
196 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
197}
198
199static inline void copy_fxregs_to_kernel(struct fpu *fpu)
200{
201 if (IS_ENABLED(CONFIG_X86_32))
202 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
203 else
204 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
205}
206
207/* These macros all use (%edi)/(%rdi) as the single memory argument. */
208#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
209#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
210#define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
211#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
212#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
213
214#define XSTATE_OP(op, st, lmask, hmask, err) \
215 asm volatile("1:" op "\n\t" \
216 "xor %[err], %[err]\n" \
217 "2:\n\t" \
218 ".pushsection .fixup,\"ax\"\n\t" \
219 "3: movl $-2,%[err]\n\t" \
220 "jmp 2b\n\t" \
221 ".popsection\n\t" \
222 _ASM_EXTABLE(1b, 3b) \
223 : [err] "=r" (err) \
224 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
225 : "memory")
226
227/*
228 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
229 * format and supervisor states in addition to modified optimization in
230 * XSAVEOPT.
231 *
232 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
233 * supports modified optimization which is not supported by XSAVE.
234 *
235 * We use XSAVE as a fallback.
236 *
237 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
238 * original instruction which gets replaced. We need to use it here as the
239 * address of the instruction where we might get an exception at.
240 */
241#define XSTATE_XSAVE(st, lmask, hmask, err) \
242 asm volatile(ALTERNATIVE_2(XSAVE, \
243 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
244 XSAVES, X86_FEATURE_XSAVES) \
245 "\n" \
246 "xor %[err], %[err]\n" \
247 "3:\n" \
248 ".pushsection .fixup,\"ax\"\n" \
249 "4: movl $-2, %[err]\n" \
250 "jmp 3b\n" \
251 ".popsection\n" \
252 _ASM_EXTABLE(661b, 4b) \
253 : [err] "=r" (err) \
254 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
255 : "memory")
256
257/*
258 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
259 * XSAVE area format.
260 */
261#define XSTATE_XRESTORE(st, lmask, hmask) \
262 asm volatile(ALTERNATIVE(XRSTOR, \
263 XRSTORS, X86_FEATURE_XSAVES) \
264 "\n" \
265 "3:\n" \
266 _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
267 : \
268 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
269 : "memory")
270
271/*
272 * This function is called only during boot time when x86 caps are not set
273 * up and alternative can not be used yet.
274 */
275static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
276{
277 u64 mask = xfeatures_mask_all;
278 u32 lmask = mask;
279 u32 hmask = mask >> 32;
280 int err;
281
282 WARN_ON(system_state != SYSTEM_BOOTING);
283
284 if (boot_cpu_has(X86_FEATURE_XSAVES))
285 XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
286 else
287 XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
288
289 /* We should never fault when copying to a kernel buffer: */
290 WARN_ON_FPU(err);
291}
292
293/*
294 * This function is called only during boot time when x86 caps are not set
295 * up and alternative can not be used yet.
296 */
297static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
298{
299 u64 mask = -1;
300 u32 lmask = mask;
301 u32 hmask = mask >> 32;
302 int err;
303
304 WARN_ON(system_state != SYSTEM_BOOTING);
305
306 if (boot_cpu_has(X86_FEATURE_XSAVES))
307 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
308 else
309 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
310
311 /*
312 * We should never fault when copying from a kernel buffer, and the FPU
313 * state we set at boot time should be valid.
314 */
315 WARN_ON_FPU(err);
316}
317
318/*
319 * Save processor xstate to xsave area.
320 */
321static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
322{
323 u64 mask = xfeatures_mask_all;
324 u32 lmask = mask;
325 u32 hmask = mask >> 32;
326 int err;
327
328 WARN_ON_FPU(!alternatives_patched);
329
330 XSTATE_XSAVE(xstate, lmask, hmask, err);
331
332 /* We should never fault when copying to a kernel buffer: */
333 WARN_ON_FPU(err);
334}
335
336/*
337 * Restore processor xstate from xsave area.
338 */
339static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
340{
341 u32 lmask = mask;
342 u32 hmask = mask >> 32;
343
344 XSTATE_XRESTORE(xstate, lmask, hmask);
345}
346
347/*
348 * Save xstate to user space xsave area.
349 *
350 * We don't use modified optimization because xrstor/xrstors might track
351 * a different application.
352 *
353 * We don't use compacted format xsave area for
354 * backward compatibility for old applications which don't understand
355 * compacted format of xsave area.
356 */
357static inline int copy_xregs_to_user(struct xregs_state __user *buf)
358{
359 u64 mask = xfeatures_mask_user();
360 u32 lmask = mask;
361 u32 hmask = mask >> 32;
362 int err;
363
364 /*
365 * Clear the xsave header first, so that reserved fields are
366 * initialized to zero.
367 */
368 err = __clear_user(&buf->header, sizeof(buf->header));
369 if (unlikely(err))
370 return -EFAULT;
371
372 stac();
373 XSTATE_OP(XSAVE, buf, lmask, hmask, err);
374 clac();
375
376 return err;
377}
378
379/*
380 * Restore xstate from user space xsave area.
381 */
382static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
383{
384 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
385 u32 lmask = mask;
386 u32 hmask = mask >> 32;
387 int err;
388
389 stac();
390 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
391 clac();
392
393 return err;
394}
395
396/*
397 * Restore xstate from kernel space xsave area, return an error code instead of
398 * an exception.
399 */
400static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
401{
402 u32 lmask = mask;
403 u32 hmask = mask >> 32;
404 int err;
405
406 if (static_cpu_has(X86_FEATURE_XSAVES))
407 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
408 else
409 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
410
411 return err;
412}
413
414extern int copy_fpregs_to_fpstate(struct fpu *fpu);
415
416static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
417{
418 if (use_xsave()) {
419 copy_kernel_to_xregs(&fpstate->xsave, mask);
420 } else {
421 if (use_fxsr())
422 copy_kernel_to_fxregs(&fpstate->fxsave);
423 else
424 copy_kernel_to_fregs(&fpstate->fsave);
425 }
426}
427
428static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
429{
430 /*
431 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
432 * pending. Clear the x87 state here by setting it to fixed values.
433 * "m" is a random variable that should be in L1.
434 */
435 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
436 asm volatile(
437 "fnclex\n\t"
438 "emms\n\t"
439 "fildl %P[addr]" /* set F?P to defined value */
440 : : [addr] "m" (fpstate));
441 }
442
443 __copy_kernel_to_fpregs(fpstate, -1);
444}
445
446extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
447
448/*
449 * FPU context switch related helper methods:
450 */
451
452DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
453
454/*
455 * The in-register FPU state for an FPU context on a CPU is assumed to be
456 * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
457 * matches the FPU.
458 *
459 * If the FPU register state is valid, the kernel can skip restoring the
460 * FPU state from memory.
461 *
462 * Any code that clobbers the FPU registers or updates the in-memory
463 * FPU state for a task MUST let the rest of the kernel know that the
464 * FPU registers are no longer valid for this task.
465 *
466 * Either one of these invalidation functions is enough. Invalidate
467 * a resource you control: CPU if using the CPU for something else
468 * (with preemption disabled), FPU for the current task, or a task that
469 * is prevented from running by the current task.
470 */
471static inline void __cpu_invalidate_fpregs_state(void)
472{
473 __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
474}
475
476static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
477{
478 fpu->last_cpu = -1;
479}
480
481static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
482{
483 return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
484}
485
486/*
487 * These generally need preemption protection to work,
488 * do try to avoid using these on their own:
489 */
490static inline void fpregs_deactivate(struct fpu *fpu)
491{
492 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
493 trace_x86_fpu_regs_deactivated(fpu);
494}
495
496static inline void fpregs_activate(struct fpu *fpu)
497{
498 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
499 trace_x86_fpu_regs_activated(fpu);
500}
501
502/*
503 * Internal helper, do not use directly. Use switch_fpu_return() instead.
504 */
505static inline void __fpregs_load_activate(void)
506{
507 struct fpu *fpu = ¤t->thread.fpu;
508 int cpu = smp_processor_id();
509
510 if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
511 return;
512
513 if (!fpregs_state_valid(fpu, cpu)) {
514 copy_kernel_to_fpregs(&fpu->state);
515 fpregs_activate(fpu);
516 fpu->last_cpu = cpu;
517 }
518 clear_thread_flag(TIF_NEED_FPU_LOAD);
519}
520
521/*
522 * FPU state switching for scheduling.
523 *
524 * This is a two-stage process:
525 *
526 * - switch_fpu_prepare() saves the old state.
527 * This is done within the context of the old process.
528 *
529 * - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
530 * will get loaded on return to userspace, or when the kernel needs it.
531 *
532 * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
533 * are saved in the current thread's FPU register state.
534 *
535 * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
536 * hold current()'s FPU registers. It is required to load the
537 * registers before returning to userland or using the content
538 * otherwise.
539 *
540 * The FPU context is only stored/restored for a user task and
541 * PF_KTHREAD is used to distinguish between kernel and user threads.
542 */
543static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
544{
545 if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
546 if (!copy_fpregs_to_fpstate(old_fpu))
547 old_fpu->last_cpu = -1;
548 else
549 old_fpu->last_cpu = cpu;
550
551 /* But leave fpu_fpregs_owner_ctx! */
552 trace_x86_fpu_regs_deactivated(old_fpu);
553 }
554}
555
556/*
557 * Misc helper functions:
558 */
559
560/*
561 * Load PKRU from the FPU context if available. Delay loading of the
562 * complete FPU state until the return to userland.
563 */
564static inline void switch_fpu_finish(struct fpu *new_fpu)
565{
566 u32 pkru_val = init_pkru_value;
567 struct pkru_state *pk;
568
569 if (!static_cpu_has(X86_FEATURE_FPU))
570 return;
571
572 set_thread_flag(TIF_NEED_FPU_LOAD);
573
574 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
575 return;
576
577 /*
578 * PKRU state is switched eagerly because it needs to be valid before we
579 * return to userland e.g. for a copy_to_user() operation.
580 */
581 if (!(current->flags & PF_KTHREAD)) {
582 /*
583 * If the PKRU bit in xsave.header.xfeatures is not set,
584 * then the PKRU component was in init state, which means
585 * XRSTOR will set PKRU to 0. If the bit is not set then
586 * get_xsave_addr() will return NULL because the PKRU value
587 * in memory is not valid. This means pkru_val has to be
588 * set to 0 and not to init_pkru_value.
589 */
590 pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
591 pkru_val = pk ? pk->pkru : 0;
592 }
593 __write_pkru(pkru_val);
594}
595
596#endif /* _ASM_X86_FPU_INTERNAL_H */