Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 * x86-64 work by Andi Kleen 2002
9 */
10
11#ifndef _ASM_X86_FPU_API_H
12#define _ASM_X86_FPU_API_H
13#include <linux/bottom_half.h>
14
15/*
16 * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
17 * disables preemption so be careful if you intend to use it for long periods
18 * of time.
19 * If you intend to use the FPU in irq/softirq you need to check first with
20 * irq_fpu_usable() if it is possible.
21 */
22
23/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
24#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
25#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
26
27extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
28extern void kernel_fpu_end(void);
29extern bool irq_fpu_usable(void);
30extern void fpregs_mark_activate(void);
31
32/* Code that is unaware of kernel_fpu_begin_mask() can use this */
33static inline void kernel_fpu_begin(void)
34{
35#ifdef CONFIG_X86_64
36 /*
37 * Any 64-bit code that uses 387 instructions must explicitly request
38 * KFPU_387.
39 */
40 kernel_fpu_begin_mask(KFPU_MXCSR);
41#else
42 /*
43 * 32-bit kernel code may use 387 operations as well as SSE2, etc,
44 * as long as it checks that the CPU has the required capability.
45 */
46 kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
47#endif
48}
49
50/*
51 * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
52 * A context switch will (and softirq might) save CPU's FPU registers to
53 * fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
54 * a random state.
55 *
56 * local_bh_disable() protects against both preemption and soft interrupts
57 * on !RT kernels.
58 *
59 * On RT kernels local_bh_disable() is not sufficient because it only
60 * serializes soft interrupt related sections via a local lock, but stays
61 * preemptible. Disabling preemption is the right choice here as bottom
62 * half processing is always in thread context on RT kernels so it
63 * implicitly prevents bottom half processing as well.
64 *
65 * Disabling preemption also serializes against kernel_fpu_begin().
66 */
67static inline void fpregs_lock(void)
68{
69 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
70 local_bh_disable();
71 else
72 preempt_disable();
73}
74
75static inline void fpregs_unlock(void)
76{
77 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
78 local_bh_enable();
79 else
80 preempt_enable();
81}
82
83#ifdef CONFIG_X86_DEBUG_FPU
84extern void fpregs_assert_state_consistent(void);
85#else
86static inline void fpregs_assert_state_consistent(void) { }
87#endif
88
89/*
90 * Load the task FPU state before returning to userspace.
91 */
92extern void switch_fpu_return(void);
93
94/*
95 * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
96 *
97 * If 'feature_name' is set then put a human-readable description of
98 * the feature there as well - this can be used to print error (or success)
99 * messages.
100 */
101extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
102
103/*
104 * Tasks that are not using SVA have mm->pasid set to zero to note that they
105 * will not have the valid bit set in MSR_IA32_PASID while they are running.
106 */
107#define PASID_DISABLED 0
108
109static inline void update_pasid(void) { }
110
111#endif /* _ASM_X86_FPU_API_H */