Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.2-rc2 453 lines 11 kB view raw
1/* 2 * Copyright (C) 1994 Linus Torvalds 3 * 4 * Pentium III FXSR, SSE support 5 * General FPU state handling cleanups 6 * Gareth Hughes <gareth@valinux.com>, May 2000 7 * x86-64 work by Andi Kleen 2002 8 */ 9 10#ifndef _ASM_X86_I387_H 11#define _ASM_X86_I387_H 12 13#ifndef __ASSEMBLY__ 14 15#include <linux/sched.h> 16#include <linux/kernel_stat.h> 17#include <linux/regset.h> 18#include <linux/hardirq.h> 19#include <linux/slab.h> 20#include <asm/asm.h> 21#include <asm/cpufeature.h> 22#include <asm/processor.h> 23#include <asm/sigcontext.h> 24#include <asm/user.h> 25#include <asm/uaccess.h> 26#include <asm/xsave.h> 27 28extern unsigned int sig_xstate_size; 29extern void fpu_init(void); 30extern void mxcsr_feature_mask_init(void); 31extern int init_fpu(struct task_struct *child); 32extern asmlinkage void math_state_restore(void); 33extern void __math_state_restore(void); 34extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 35 36extern user_regset_active_fn fpregs_active, xfpregs_active; 37extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, 38 xstateregs_get; 39extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, 40 xstateregs_set; 41 42/* 43 * xstateregs_active == fpregs_active. Please refer to the comment 44 * at the definition of fpregs_active. 45 */ 46#define xstateregs_active fpregs_active 47 48extern struct _fpx_sw_bytes fx_sw_reserved; 49#ifdef CONFIG_IA32_EMULATION 50extern unsigned int sig_xstate_ia32_size; 51extern struct _fpx_sw_bytes fx_sw_reserved_ia32; 52struct _fpstate_ia32; 53struct _xstate_ia32; 54extern int save_i387_xstate_ia32(void __user *buf); 55extern int restore_i387_xstate_ia32(void __user *buf); 56#endif 57 58#ifdef CONFIG_MATH_EMULATION 59extern void finit_soft_fpu(struct i387_soft_struct *soft); 60#else 61static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} 62#endif 63 64#define X87_FSW_ES (1 << 7) /* Exception Summary */ 65 66static __always_inline __pure bool use_xsaveopt(void) 67{ 68 return static_cpu_has(X86_FEATURE_XSAVEOPT); 69} 70 71static __always_inline __pure bool use_xsave(void) 72{ 73 return static_cpu_has(X86_FEATURE_XSAVE); 74} 75 76static __always_inline __pure bool use_fxsr(void) 77{ 78 return static_cpu_has(X86_FEATURE_FXSR); 79} 80 81extern void __sanitize_i387_state(struct task_struct *); 82 83static inline void sanitize_i387_state(struct task_struct *tsk) 84{ 85 if (!use_xsaveopt()) 86 return; 87 __sanitize_i387_state(tsk); 88} 89 90#ifdef CONFIG_X86_64 91static inline int fxrstor_checking(struct i387_fxsave_struct *fx) 92{ 93 int err; 94 95 /* See comment in fxsave() below. */ 96#ifdef CONFIG_AS_FXSAVEQ 97 asm volatile("1: fxrstorq %[fx]\n\t" 98 "2:\n" 99 ".section .fixup,\"ax\"\n" 100 "3: movl $-1,%[err]\n" 101 " jmp 2b\n" 102 ".previous\n" 103 _ASM_EXTABLE(1b, 3b) 104 : [err] "=r" (err) 105 : [fx] "m" (*fx), "0" (0)); 106#else 107 asm volatile("1: rex64/fxrstor (%[fx])\n\t" 108 "2:\n" 109 ".section .fixup,\"ax\"\n" 110 "3: movl $-1,%[err]\n" 111 " jmp 2b\n" 112 ".previous\n" 113 _ASM_EXTABLE(1b, 3b) 114 : [err] "=r" (err) 115 : [fx] "R" (fx), "m" (*fx), "0" (0)); 116#endif 117 return err; 118} 119 120static inline int fxsave_user(struct i387_fxsave_struct __user *fx) 121{ 122 int err; 123 124 /* 125 * Clear the bytes not touched by the fxsave and reserved 126 * for the SW usage. 127 */ 128 err = __clear_user(&fx->sw_reserved, 129 sizeof(struct _fpx_sw_bytes)); 130 if (unlikely(err)) 131 return -EFAULT; 132 133 /* See comment in fxsave() below. */ 134#ifdef CONFIG_AS_FXSAVEQ 135 asm volatile("1: fxsaveq %[fx]\n\t" 136 "2:\n" 137 ".section .fixup,\"ax\"\n" 138 "3: movl $-1,%[err]\n" 139 " jmp 2b\n" 140 ".previous\n" 141 _ASM_EXTABLE(1b, 3b) 142 : [err] "=r" (err), [fx] "=m" (*fx) 143 : "0" (0)); 144#else 145 asm volatile("1: rex64/fxsave (%[fx])\n\t" 146 "2:\n" 147 ".section .fixup,\"ax\"\n" 148 "3: movl $-1,%[err]\n" 149 " jmp 2b\n" 150 ".previous\n" 151 _ASM_EXTABLE(1b, 3b) 152 : [err] "=r" (err), "=m" (*fx) 153 : [fx] "R" (fx), "0" (0)); 154#endif 155 if (unlikely(err) && 156 __clear_user(fx, sizeof(struct i387_fxsave_struct))) 157 err = -EFAULT; 158 /* No need to clear here because the caller clears USED_MATH */ 159 return err; 160} 161 162static inline void fpu_fxsave(struct fpu *fpu) 163{ 164 /* Using "rex64; fxsave %0" is broken because, if the memory operand 165 uses any extended registers for addressing, a second REX prefix 166 will be generated (to the assembler, rex64 followed by semicolon 167 is a separate instruction), and hence the 64-bitness is lost. */ 168 169#ifdef CONFIG_AS_FXSAVEQ 170 /* Using "fxsaveq %0" would be the ideal choice, but is only supported 171 starting with gas 2.16. */ 172 __asm__ __volatile__("fxsaveq %0" 173 : "=m" (fpu->state->fxsave)); 174#else 175 /* Using, as a workaround, the properly prefixed form below isn't 176 accepted by any binutils version so far released, complaining that 177 the same type of prefix is used twice if an extended register is 178 needed for addressing (fix submitted to mainline 2005-11-21). 179 asm volatile("rex64/fxsave %0" 180 : "=m" (fpu->state->fxsave)); 181 This, however, we can work around by forcing the compiler to select 182 an addressing mode that doesn't require extended registers. */ 183 asm volatile("rex64/fxsave (%[fx])" 184 : "=m" (fpu->state->fxsave) 185 : [fx] "R" (&fpu->state->fxsave)); 186#endif 187} 188 189#else /* CONFIG_X86_32 */ 190 191/* perform fxrstor iff the processor has extended states, otherwise frstor */ 192static inline int fxrstor_checking(struct i387_fxsave_struct *fx) 193{ 194 /* 195 * The "nop" is needed to make the instructions the same 196 * length. 197 */ 198 alternative_input( 199 "nop ; frstor %1", 200 "fxrstor %1", 201 X86_FEATURE_FXSR, 202 "m" (*fx)); 203 204 return 0; 205} 206 207static inline void fpu_fxsave(struct fpu *fpu) 208{ 209 asm volatile("fxsave %[fx]" 210 : [fx] "=m" (fpu->state->fxsave)); 211} 212 213#endif /* CONFIG_X86_64 */ 214 215/* We need a safe address that is cheap to find and that is already 216 in L1 during context switch. The best choices are unfortunately 217 different for UP and SMP */ 218#ifdef CONFIG_SMP 219#define safe_address (__per_cpu_offset[0]) 220#else 221#define safe_address (kstat_cpu(0).cpustat.user) 222#endif 223 224/* 225 * These must be called with preempt disabled 226 */ 227static inline void fpu_save_init(struct fpu *fpu) 228{ 229 if (use_xsave()) { 230 fpu_xsave(fpu); 231 232 /* 233 * xsave header may indicate the init state of the FP. 234 */ 235 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) 236 return; 237 } else if (use_fxsr()) { 238 fpu_fxsave(fpu); 239 } else { 240 asm volatile("fnsave %[fx]; fwait" 241 : [fx] "=m" (fpu->state->fsave)); 242 return; 243 } 244 245 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) 246 asm volatile("fnclex"); 247 248 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 249 is pending. Clear the x87 state here by setting it to fixed 250 values. safe_address is a random variable that should be in L1 */ 251 alternative_input( 252 ASM_NOP8 ASM_NOP2, 253 "emms\n\t" /* clear stack tags */ 254 "fildl %P[addr]", /* set F?P to defined value */ 255 X86_FEATURE_FXSAVE_LEAK, 256 [addr] "m" (safe_address)); 257} 258 259static inline void __save_init_fpu(struct task_struct *tsk) 260{ 261 fpu_save_init(&tsk->thread.fpu); 262 task_thread_info(tsk)->status &= ~TS_USEDFPU; 263} 264 265static inline int fpu_fxrstor_checking(struct fpu *fpu) 266{ 267 return fxrstor_checking(&fpu->state->fxsave); 268} 269 270static inline int fpu_restore_checking(struct fpu *fpu) 271{ 272 if (use_xsave()) 273 return fpu_xrstor_checking(fpu); 274 else 275 return fpu_fxrstor_checking(fpu); 276} 277 278static inline int restore_fpu_checking(struct task_struct *tsk) 279{ 280 return fpu_restore_checking(&tsk->thread.fpu); 281} 282 283/* 284 * Signal frame handlers... 285 */ 286extern int save_i387_xstate(void __user *buf); 287extern int restore_i387_xstate(void __user *buf); 288 289static inline void __unlazy_fpu(struct task_struct *tsk) 290{ 291 if (task_thread_info(tsk)->status & TS_USEDFPU) { 292 __save_init_fpu(tsk); 293 stts(); 294 } else 295 tsk->fpu_counter = 0; 296} 297 298static inline void __clear_fpu(struct task_struct *tsk) 299{ 300 if (task_thread_info(tsk)->status & TS_USEDFPU) { 301 /* Ignore delayed exceptions from user space */ 302 asm volatile("1: fwait\n" 303 "2:\n" 304 _ASM_EXTABLE(1b, 2b)); 305 task_thread_info(tsk)->status &= ~TS_USEDFPU; 306 stts(); 307 } 308} 309 310static inline void kernel_fpu_begin(void) 311{ 312 struct thread_info *me = current_thread_info(); 313 preempt_disable(); 314 if (me->status & TS_USEDFPU) 315 __save_init_fpu(me->task); 316 else 317 clts(); 318} 319 320static inline void kernel_fpu_end(void) 321{ 322 stts(); 323 preempt_enable(); 324} 325 326static inline bool irq_fpu_usable(void) 327{ 328 struct pt_regs *regs; 329 330 return !in_interrupt() || !(regs = get_irq_regs()) || \ 331 user_mode(regs) || (read_cr0() & X86_CR0_TS); 332} 333 334/* 335 * Some instructions like VIA's padlock instructions generate a spurious 336 * DNA fault but don't modify SSE registers. And these instructions 337 * get used from interrupt context as well. To prevent these kernel instructions 338 * in interrupt context interacting wrongly with other user/kernel fpu usage, we 339 * should use them only in the context of irq_ts_save/restore() 340 */ 341static inline int irq_ts_save(void) 342{ 343 /* 344 * If in process context and not atomic, we can take a spurious DNA fault. 345 * Otherwise, doing clts() in process context requires disabling preemption 346 * or some heavy lifting like kernel_fpu_begin() 347 */ 348 if (!in_atomic()) 349 return 0; 350 351 if (read_cr0() & X86_CR0_TS) { 352 clts(); 353 return 1; 354 } 355 356 return 0; 357} 358 359static inline void irq_ts_restore(int TS_state) 360{ 361 if (TS_state) 362 stts(); 363} 364 365/* 366 * These disable preemption on their own and are safe 367 */ 368static inline void save_init_fpu(struct task_struct *tsk) 369{ 370 preempt_disable(); 371 __save_init_fpu(tsk); 372 stts(); 373 preempt_enable(); 374} 375 376static inline void unlazy_fpu(struct task_struct *tsk) 377{ 378 preempt_disable(); 379 __unlazy_fpu(tsk); 380 preempt_enable(); 381} 382 383static inline void clear_fpu(struct task_struct *tsk) 384{ 385 preempt_disable(); 386 __clear_fpu(tsk); 387 preempt_enable(); 388} 389 390/* 391 * i387 state interaction 392 */ 393static inline unsigned short get_fpu_cwd(struct task_struct *tsk) 394{ 395 if (cpu_has_fxsr) { 396 return tsk->thread.fpu.state->fxsave.cwd; 397 } else { 398 return (unsigned short)tsk->thread.fpu.state->fsave.cwd; 399 } 400} 401 402static inline unsigned short get_fpu_swd(struct task_struct *tsk) 403{ 404 if (cpu_has_fxsr) { 405 return tsk->thread.fpu.state->fxsave.swd; 406 } else { 407 return (unsigned short)tsk->thread.fpu.state->fsave.swd; 408 } 409} 410 411static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) 412{ 413 if (cpu_has_xmm) { 414 return tsk->thread.fpu.state->fxsave.mxcsr; 415 } else { 416 return MXCSR_DEFAULT; 417 } 418} 419 420static bool fpu_allocated(struct fpu *fpu) 421{ 422 return fpu->state != NULL; 423} 424 425static inline int fpu_alloc(struct fpu *fpu) 426{ 427 if (fpu_allocated(fpu)) 428 return 0; 429 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); 430 if (!fpu->state) 431 return -ENOMEM; 432 WARN_ON((unsigned long)fpu->state & 15); 433 return 0; 434} 435 436static inline void fpu_free(struct fpu *fpu) 437{ 438 if (fpu->state) { 439 kmem_cache_free(task_xstate_cachep, fpu->state); 440 fpu->state = NULL; 441 } 442} 443 444static inline void fpu_copy(struct fpu *dst, struct fpu *src) 445{ 446 memcpy(dst->state, src->state, xstate_size); 447} 448 449extern void fpu_finit(struct fpu *fpu); 450 451#endif /* __ASSEMBLY__ */ 452 453#endif /* _ASM_X86_I387_H */