Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.32 119 lines 3.1 kB view raw
1#ifndef __ASM_X86_XSAVE_H 2#define __ASM_X86_XSAVE_H 3 4#include <linux/types.h> 5#include <asm/processor.h> 6#include <asm/i387.h> 7 8#define XSTATE_FP 0x1 9#define XSTATE_SSE 0x2 10#define XSTATE_YMM 0x4 11 12#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) 13 14#define FXSAVE_SIZE 512 15 16/* 17 * These are the features that the OS can handle currently. 18 */ 19#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) 20 21#ifdef CONFIG_X86_64 22#define REX_PREFIX "0x48, " 23#else 24#define REX_PREFIX 25#endif 26 27extern unsigned int xstate_size; 28extern u64 pcntxt_mask; 29extern struct xsave_struct *init_xstate_buf; 30 31extern void xsave_cntxt_init(void); 32extern void xsave_init(void); 33extern int init_fpu(struct task_struct *child); 34extern int check_for_xstate(struct i387_fxsave_struct __user *buf, 35 void __user *fpstate, 36 struct _fpx_sw_bytes *sw); 37 38static inline int xrstor_checking(struct xsave_struct *fx) 39{ 40 int err; 41 42 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" 43 "2:\n" 44 ".section .fixup,\"ax\"\n" 45 "3: movl $-1,%[err]\n" 46 " jmp 2b\n" 47 ".previous\n" 48 _ASM_EXTABLE(1b, 3b) 49 : [err] "=r" (err) 50 : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0) 51 : "memory"); 52 53 return err; 54} 55 56static inline int xsave_user(struct xsave_struct __user *buf) 57{ 58 int err; 59 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" 60 "2:\n" 61 ".section .fixup,\"ax\"\n" 62 "3: movl $-1,%[err]\n" 63 " jmp 2b\n" 64 ".previous\n" 65 ".section __ex_table,\"a\"\n" 66 _ASM_ALIGN "\n" 67 _ASM_PTR "1b,3b\n" 68 ".previous" 69 : [err] "=r" (err) 70 : "D" (buf), "a" (-1), "d" (-1), "0" (0) 71 : "memory"); 72 if (unlikely(err) && __clear_user(buf, xstate_size)) 73 err = -EFAULT; 74 /* No need to clear here because the caller clears USED_MATH */ 75 return err; 76} 77 78static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) 79{ 80 int err; 81 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); 82 u32 lmask = mask; 83 u32 hmask = mask >> 32; 84 85 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" 86 "2:\n" 87 ".section .fixup,\"ax\"\n" 88 "3: movl $-1,%[err]\n" 89 " jmp 2b\n" 90 ".previous\n" 91 ".section __ex_table,\"a\"\n" 92 _ASM_ALIGN "\n" 93 _ASM_PTR "1b,3b\n" 94 ".previous" 95 : [err] "=r" (err) 96 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) 97 : "memory"); /* memory required? */ 98 return err; 99} 100 101static inline void xrstor_state(struct xsave_struct *fx, u64 mask) 102{ 103 u32 lmask = mask; 104 u32 hmask = mask >> 32; 105 106 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" 107 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 108 : "memory"); 109} 110 111static inline void xsave(struct task_struct *tsk) 112{ 113 /* This, however, we can work around by forcing the compiler to select 114 an addressing mode that doesn't require extended registers. */ 115 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" 116 : : "D" (&(tsk->thread.xstate->xsave)), 117 "a" (-1), "d"(-1) : "memory"); 118} 119#endif