Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/fpu: Rename xsave.header::xstate_bv to 'xfeatures'

'xsave.header::xstate_bv' is a misnomer - what does 'bv' stand for?

It probably comes from the 'XGETBV' instruction name, but I could
not find in the Intel documentation where that abbreviation comes
from. It could mean 'bit vector' - or something else?

But how about - instead of guessing about a weird name - we named
the field in an obvious and descriptive way that tells us exactly
what it does?

So rename it to 'xfeatures', which is a bitmask of the
xfeatures that are fpstate_active in that context structure.

Eyesore like:

fpu->state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;

is now much more readable:

fpu->state->xsave.header.xfeatures |= XSTATE_FP;

Which form is not just infinitely more readable, but is also
shorter as well.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+39 -39
+1 -1
arch/x86/include/asm/fpu/internal.h
··· 261 261 /* 262 262 * xsave header may indicate the init state of the FP. 263 263 */ 264 - if (!(fpu->state->xsave.header.xstate_bv & XSTATE_FP)) 264 + if (!(fpu->state->xsave.header.xfeatures & XSTATE_FP)) 265 265 return 1; 266 266 } else if (use_fxsr()) { 267 267 fpu_fxsave(fpu);
+1 -1
arch/x86/include/asm/fpu/types.h
··· 100 100 } __packed; 101 101 102 102 struct xstate_header { 103 - u64 xstate_bv; 103 + u64 xfeatures; 104 104 u64 xcomp_bv; 105 105 u64 reserved[6]; 106 106 } __attribute__((packed));
+4 -4
arch/x86/include/asm/user.h
··· 15 15 }; 16 16 17 17 struct user_xstate_header { 18 - __u64 xstate_bv; 18 + __u64 xfeatures; 19 19 __u64 reserved1[2]; 20 20 __u64 reserved2[5]; 21 21 }; ··· 41 41 * particular process/thread. 42 42 * 43 43 * Also when the user modifies certain state FP/SSE/etc through the 44 - * ptrace interface, they must ensure that the header.xstate_bv 44 + * ptrace interface, they must ensure that the header.xfeatures 45 45 * bytes[512..519] of the memory layout are updated correspondingly. 46 46 * i.e., for example when FP state is modified to a non-init state, 47 - * header.xstate_bv's bit 0 must be set to '1', when SSE is modified to 48 - * non-init state, header.xstate_bv's bit 1 must to be set to '1', etc. 47 + * header.xfeatures's bit 0 must be set to '1', when SSE is modified to 48 + * non-init state, header.xfeatures's bit 1 must to be set to '1', etc. 49 49 */ 50 50 #define USER_XSTATE_FX_SW_WORDS 6 51 51 #define USER_XSTATE_XCR0_WORD 0
+2 -2
arch/x86/include/uapi/asm/sigcontext.h
··· 25 25 __u32 extended_size; /* total size of the layout referred by 26 26 * fpstate pointer in the sigcontext. 27 27 */ 28 - __u64 xstate_bv; 28 + __u64 xfeatures; 29 29 /* feature bit mask (including fp/sse/extended 30 30 * state) that is present in the memory 31 31 * layout. ··· 210 210 #endif /* !__i386__ */ 211 211 212 212 struct _header { 213 - __u64 xstate_bv; 213 + __u64 xfeatures; 214 214 __u64 reserved1[2]; 215 215 __u64 reserved2[5]; 216 216 };
+3 -3
arch/x86/kernel/fpu/core.c
··· 470 470 * presence of FP and SSE state. 471 471 */ 472 472 if (cpu_has_xsave) 473 - fpu->state->xsave.header.xstate_bv |= XSTATE_FPSSE; 473 + fpu->state->xsave.header.xfeatures |= XSTATE_FPSSE; 474 474 475 475 return ret; 476 476 } ··· 528 528 * mxcsr reserved bits must be masked to zero for security reasons. 529 529 */ 530 530 xsave->i387.mxcsr &= mxcsr_feature_mask; 531 - xsave->header.xstate_bv &= xfeatures_mask; 531 + xsave->header.xfeatures &= xfeatures_mask; 532 532 /* 533 533 * These bits must be zero. 534 534 */ ··· 740 740 * presence of FP. 741 741 */ 742 742 if (cpu_has_xsave) 743 - fpu->state->xsave.header.xstate_bv |= XSTATE_FP; 743 + fpu->state->xsave.header.xfeatures |= XSTATE_FP; 744 744 return ret; 745 745 } 746 746
+26 -26
arch/x86/kernel/fpu/xsave.c
··· 32 32 /* 33 33 * If a processor implementation discern that a processor state component is 34 34 * in its initialized state it may modify the corresponding bit in the 35 - * header.xstate_bv as '0', with out modifying the corresponding memory 35 + * header.xfeatures as '0', with out modifying the corresponding memory 36 36 * layout in the case of xsaveopt. While presenting the xstate information to 37 37 * the user, we always ensure that the memory layout of a feature will be in 38 38 * the init state if the corresponding header bit is zero. This is to ensure ··· 43 43 { 44 44 struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; 45 45 int feature_bit = 0x2; 46 - u64 xstate_bv; 46 + u64 xfeatures; 47 47 48 48 if (!fx) 49 49 return; 50 50 51 - xstate_bv = tsk->thread.fpu.state->xsave.header.xstate_bv; 51 + xfeatures = tsk->thread.fpu.state->xsave.header.xfeatures; 52 52 53 53 /* 54 54 * None of the feature bits are in init state. So nothing else 55 55 * to do for us, as the memory layout is up to date. 56 56 */ 57 - if ((xstate_bv & xfeatures_mask) == xfeatures_mask) 57 + if ((xfeatures & xfeatures_mask) == xfeatures_mask) 58 58 return; 59 59 60 60 /* 61 61 * FP is in init state 62 62 */ 63 - if (!(xstate_bv & XSTATE_FP)) { 63 + if (!(xfeatures & XSTATE_FP)) { 64 64 fx->cwd = 0x37f; 65 65 fx->swd = 0; 66 66 fx->twd = 0; ··· 73 73 /* 74 74 * SSE is in init state 75 75 */ 76 - if (!(xstate_bv & XSTATE_SSE)) 76 + if (!(xfeatures & XSTATE_SSE)) 77 77 memset(&fx->xmm_space[0], 0, 256); 78 78 79 - xstate_bv = (xfeatures_mask & ~xstate_bv) >> 2; 79 + xfeatures = (xfeatures_mask & ~xfeatures) >> 2; 80 80 81 81 /* 82 82 * Update all the other memory layouts for which the corresponding 83 83 * header bit is in the init state. 84 84 */ 85 - while (xstate_bv) { 86 - if (xstate_bv & 0x1) { 85 + while (xfeatures) { 86 + if (xfeatures & 0x1) { 87 87 int offset = xstate_offsets[feature_bit]; 88 88 int size = xstate_sizes[feature_bit]; 89 89 ··· 92 92 size); 93 93 } 94 94 95 - xstate_bv >>= 1; 95 + xfeatures >>= 1; 96 96 feature_bit++; 97 97 } 98 98 } ··· 162 162 { 163 163 struct xsave_struct __user *x = buf; 164 164 struct _fpx_sw_bytes *sw_bytes; 165 - u32 xstate_bv; 165 + u32 xfeatures; 166 166 int err; 167 167 168 168 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ ··· 175 175 err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size)); 176 176 177 177 /* 178 - * Read the xstate_bv which we copied (directly from the cpu or 178 + * Read the xfeatures which we copied (directly from the cpu or 179 179 * from the state in task struct) to the user buffers. 180 180 */ 181 - err |= __get_user(xstate_bv, (__u32 *)&x->header.xstate_bv); 181 + err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures); 182 182 183 183 /* 184 184 * For legacy compatible, we always set FP/SSE bits in the bit 185 185 * vector while saving the state to the user context. This will 186 186 * enable us capturing any changes(during sigreturn) to 187 187 * the FP/SSE bits by the legacy applications which don't touch 188 - * xstate_bv in the xsave header. 188 + * xfeatures in the xsave header. 189 189 * 190 - * xsave aware apps can change the xstate_bv in the xsave 190 + * xsave aware apps can change the xfeatures in the xsave 191 191 * header as well as change any contents in the memory layout. 192 192 * xrestore as part of sigreturn will capture all the changes. 193 193 */ 194 - xstate_bv |= XSTATE_FPSSE; 194 + xfeatures |= XSTATE_FPSSE; 195 195 196 - err |= __put_user(xstate_bv, (__u32 *)&x->header.xstate_bv); 196 + err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures); 197 197 198 198 return err; 199 199 } ··· 277 277 static inline void 278 278 sanitize_restored_xstate(struct task_struct *tsk, 279 279 struct user_i387_ia32_struct *ia32_env, 280 - u64 xstate_bv, int fx_only) 280 + u64 xfeatures, int fx_only) 281 281 { 282 282 struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; 283 283 struct xstate_header *header = &xsave->header; ··· 291 291 * layout and not enabled by the OS. 292 292 */ 293 293 if (fx_only) 294 - header->xstate_bv = XSTATE_FPSSE; 294 + header->xfeatures = XSTATE_FPSSE; 295 295 else 296 - header->xstate_bv &= (xfeatures_mask & xstate_bv); 296 + header->xfeatures &= (xfeatures_mask & xfeatures); 297 297 } 298 298 299 299 if (use_fxsr()) { ··· 335 335 struct task_struct *tsk = current; 336 336 struct fpu *fpu = &tsk->thread.fpu; 337 337 int state_size = xstate_size; 338 - u64 xstate_bv = 0; 338 + u64 xfeatures = 0; 339 339 int fx_only = 0; 340 340 341 341 ia32_fxstate &= (config_enabled(CONFIG_X86_32) || ··· 369 369 fx_only = 1; 370 370 } else { 371 371 state_size = fx_sw_user.xstate_size; 372 - xstate_bv = fx_sw_user.xstate_bv; 372 + xfeatures = fx_sw_user.xfeatures; 373 373 } 374 374 } 375 375 ··· 398 398 fpstate_init(fpu); 399 399 err = -1; 400 400 } else { 401 - sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); 401 + sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); 402 402 } 403 403 404 404 fpu->fpstate_active = 1; ··· 415 415 * state to the registers directly (with exceptions handled). 416 416 */ 417 417 user_fpu_begin(); 418 - if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { 418 + if (restore_user_xstate(buf_fx, xfeatures, fx_only)) { 419 419 fpu_reset_state(fpu); 420 420 return -1; 421 421 } ··· 441 441 442 442 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; 443 443 fx_sw_reserved.extended_size = size; 444 - fx_sw_reserved.xstate_bv = xfeatures_mask; 444 + fx_sw_reserved.xfeatures = xfeatures_mask; 445 445 fx_sw_reserved.xstate_size = xstate_size; 446 446 447 447 if (config_enabled(CONFIG_IA32_EMULATION)) { ··· 576 576 if (cpu_has_xsaves) { 577 577 init_xstate_buf->header.xcomp_bv = 578 578 (u64)1 << 63 | xfeatures_mask; 579 - init_xstate_buf->header.xstate_bv = xfeatures_mask; 579 + init_xstate_buf->header.xfeatures = xfeatures_mask; 580 580 } 581 581 582 582 /*
+2 -2
arch/x86/kvm/x86.c
··· 3197 3197 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) 3198 3198 { 3199 3199 struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; 3200 - u64 xstate_bv = xsave->header.xstate_bv; 3200 + u64 xstate_bv = xsave->header.xfeatures; 3201 3201 u64 valid; 3202 3202 3203 3203 /* ··· 3243 3243 memcpy(xsave, src, XSAVE_HDR_OFFSET); 3244 3244 3245 3245 /* Set XSTATE_BV and possibly XCOMP_BV. */ 3246 - xsave->header.xstate_bv = xstate_bv; 3246 + xsave->header.xfeatures = xstate_bv; 3247 3247 if (cpu_has_xsaves) 3248 3248 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; 3249 3249