Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/fpu: Rename XSAVE macros

There are two concepts that have some confusing naming:
1. Extended State Component numbers (currently called
XFEATURE_BIT_*)
2. Extended State Component masks (currently called XSTATE_*)

The numbers are (currently) from 0-9. State component 3 is the
bounds registers for MPX, for instance.

But when we want to enable "state component 3", we go set a bit
in XCR0. The bit we set is 1<<3. We can check to see if a
state component feature is enabled by looking at its bit.

The current 'xfeature_bit's are at best xfeature bit _numbers_.
Calling them bits is at best inconsistent with ending the enum
list with 'XFEATURES_NR_MAX'.

This patch renames the enum to be 'xfeature'. These also
happen to be what the Intel documentation calls a "state
component".

We also want to differentiate these from the "XSTATE_*" macros.
The "XSTATE_*" macros are a mask, and we rename them to match.

These macros are reasonably widely used so this patch is a
wee bit big, but this really is just a rename.

The only non-mechanical part of this is the

s/XSTATE_EXTEND_MASK/XFEATURE_MASK_EXTEND/

We need a better name for it, but that's another patch.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: dave@sr71.net
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/20150902233126.38653250@viggo.jf.intel.com
[ Ported to v4.3-rc1. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Dave Hansen and committed by
Ingo Molnar
d91cab78 75933433

+103 -82
+2 -1
arch/x86/crypto/camellia_aesni_avx2_glue.c
··· 567 567 return -ENODEV; 568 568 } 569 569 570 - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 570 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, 571 + &feature_name)) { 571 572 pr_info("CPU feature '%s' is not supported.\n", feature_name); 572 573 return -ENODEV; 573 574 }
+2 -1
arch/x86/crypto/camellia_aesni_avx_glue.c
··· 554 554 { 555 555 const char *feature_name; 556 556 557 - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 557 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, 558 + &feature_name)) { 558 559 pr_info("CPU feature '%s' is not supported.\n", feature_name); 559 560 return -ENODEV; 560 561 }
+2 -1
arch/x86/crypto/cast5_avx_glue.c
··· 469 469 { 470 470 const char *feature_name; 471 471 472 - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 472 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, 473 + &feature_name)) { 473 474 pr_info("CPU feature '%s' is not supported.\n", feature_name); 474 475 return -ENODEV; 475 476 }
+2 -1
arch/x86/crypto/cast6_avx_glue.c
··· 591 591 { 592 592 const char *feature_name; 593 593 594 - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 594 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, 595 + &feature_name)) { 595 596 pr_info("CPU feature '%s' is not supported.\n", feature_name); 596 597 return -ENODEV; 597 598 }
+1 -1
arch/x86/crypto/chacha20_glue.c
··· 130 130 131 131 #ifdef CONFIG_AS_AVX2 132 132 chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 && 133 - cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); 133 + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); 134 134 #endif 135 135 return crypto_register_alg(&alg); 136 136 }
+1 -1
arch/x86/crypto/poly1305_glue.c
··· 184 184 185 185 #ifdef CONFIG_AS_AVX2 186 186 poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 && 187 - cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); 187 + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); 188 188 alg.descsize = sizeof(struct poly1305_simd_desc_ctx); 189 189 if (poly1305_use_avx2) 190 190 alg.descsize += 10 * sizeof(u32);
+2 -1
arch/x86/crypto/serpent_avx2_glue.c
··· 542 542 pr_info("AVX2 instructions are not detected.\n"); 543 543 return -ENODEV; 544 544 } 545 - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 545 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, 546 + &feature_name)) { 546 547 pr_info("CPU feature '%s' is not supported.\n", feature_name); 547 548 return -ENODEV; 548 549 }
+2 -1
arch/x86/crypto/serpent_avx_glue.c
··· 597 597 { 598 598 const char *feature_name; 599 599 600 - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 600 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, 601 + &feature_name)) { 601 602 pr_info("CPU feature '%s' is not supported.\n", feature_name); 602 603 return -ENODEV; 603 604 }
+1 -1
arch/x86/crypto/sha1_ssse3_glue.c
··· 121 121 #ifdef CONFIG_AS_AVX 122 122 static bool __init avx_usable(void) 123 123 { 124 - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) { 124 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { 125 125 if (cpu_has_avx) 126 126 pr_info("AVX detected but unusable.\n"); 127 127 return false;
+1 -1
arch/x86/crypto/sha256_ssse3_glue.c
··· 130 130 #ifdef CONFIG_AS_AVX 131 131 static bool __init avx_usable(void) 132 132 { 133 - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) { 133 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { 134 134 if (cpu_has_avx) 135 135 pr_info("AVX detected but unusable.\n"); 136 136 return false;
+1 -1
arch/x86/crypto/sha512_ssse3_glue.c
··· 129 129 #ifdef CONFIG_AS_AVX 130 130 static bool __init avx_usable(void) 131 131 { 132 - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) { 132 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { 133 133 if (cpu_has_avx) 134 134 pr_info("AVX detected but unusable.\n"); 135 135 return false;
+1 -1
arch/x86/crypto/twofish_avx_glue.c
··· 558 558 { 559 559 const char *feature_name; 560 560 561 - if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { 561 + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { 562 562 pr_info("CPU feature '%s' is not supported.\n", feature_name); 563 563 return -ENODEV; 564 564 }
+25 -19
arch/x86/include/asm/fpu/types.h
··· 95 95 /* 96 96 * List of XSAVE features Linux knows about: 97 97 */ 98 - enum xfeature_bit { 99 - XSTATE_BIT_FP, 100 - XSTATE_BIT_SSE, 101 - XSTATE_BIT_YMM, 102 - XSTATE_BIT_BNDREGS, 103 - XSTATE_BIT_BNDCSR, 104 - XSTATE_BIT_OPMASK, 105 - XSTATE_BIT_ZMM_Hi256, 106 - XSTATE_BIT_Hi16_ZMM, 98 + enum xfeature { 99 + XFEATURE_FP, 100 + XFEATURE_SSE, 101 + /* 102 + * Values above here are "legacy states". 103 + * Those below are "extended states". 104 + */ 105 + XFEATURE_YMM, 106 + XFEATURE_BNDREGS, 107 + XFEATURE_BNDCSR, 108 + XFEATURE_OPMASK, 109 + XFEATURE_ZMM_Hi256, 110 + XFEATURE_Hi16_ZMM, 107 111 108 112 XFEATURES_NR_MAX, 109 113 }; 110 114 111 - #define XSTATE_FP (1 << XSTATE_BIT_FP) 112 - #define XSTATE_SSE (1 << XSTATE_BIT_SSE) 113 - #define XSTATE_YMM (1 << XSTATE_BIT_YMM) 114 - #define XSTATE_BNDREGS (1 << XSTATE_BIT_BNDREGS) 115 - #define XSTATE_BNDCSR (1 << XSTATE_BIT_BNDCSR) 116 - #define XSTATE_OPMASK (1 << XSTATE_BIT_OPMASK) 117 - #define XSTATE_ZMM_Hi256 (1 << XSTATE_BIT_ZMM_Hi256) 118 - #define XSTATE_Hi16_ZMM (1 << XSTATE_BIT_Hi16_ZMM) 115 + #define XFEATURE_MASK_FP (1 << XFEATURE_FP) 116 + #define XFEATURE_MASK_SSE (1 << XFEATURE_SSE) 117 + #define XFEATURE_MASK_YMM (1 << XFEATURE_YMM) 118 + #define XFEATURE_MASK_BNDREGS (1 << XFEATURE_BNDREGS) 119 + #define XFEATURE_MASK_BNDCSR (1 << XFEATURE_BNDCSR) 120 + #define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK) 121 + #define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256) 122 + #define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM) 119 123 120 - #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) 121 - #define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM) 124 + #define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE) 125 + #define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \ 126 + | XFEATURE_MASK_ZMM_Hi256 \ 127 + | XFEATURE_MASK_Hi16_ZMM) 122 128 123 129 /* 124 130 * There are 16x 256-bit AVX registers named YMM0-YMM15.
+9 -5
arch/x86/include/asm/fpu/xstate.h
··· 6 6 #include <linux/uaccess.h> 7 7 8 8 /* Bit 63 of XCR0 is reserved for future expansion */ 9 - #define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63))) 9 + #define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63))) 10 10 11 11 #define XSTATE_CPUID 0x0000000d 12 12 ··· 19 19 #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) 20 20 21 21 /* Supported features which support lazy state saving */ 22 - #define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \ 23 - | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM) 22 + #define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \ 23 + XFEATURE_MASK_SSE | \ 24 + XFEATURE_MASK_YMM | \ 25 + XFEATURE_MASK_OPMASK | \ 26 + XFEATURE_MASK_ZMM_Hi256 | \ 27 + XFEATURE_MASK_Hi16_ZMM) 24 28 25 29 /* Supported features which require eager state saving */ 26 - #define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR) 30 + #define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR) 27 31 28 32 /* All currently supported features */ 29 - #define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER) 33 + #define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER) 30 34 31 35 #ifdef CONFIG_X86_64 32 36 #define REX_PREFIX "0x48, "
+3 -3
arch/x86/kernel/fpu/init.c
··· 290 290 if (cpu_has_xsaveopt && eagerfpu != DISABLE) 291 291 eagerfpu = ENABLE; 292 292 293 - if (xfeatures_mask & XSTATE_EAGER) { 293 + if (xfeatures_mask & XFEATURE_MASK_EAGER) { 294 294 if (eagerfpu == DISABLE) { 295 295 pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n", 296 - xfeatures_mask & XSTATE_EAGER); 297 - xfeatures_mask &= ~XSTATE_EAGER; 296 + xfeatures_mask & XFEATURE_MASK_EAGER); 297 + xfeatures_mask &= ~XFEATURE_MASK_EAGER; 298 298 } else { 299 299 eagerfpu = ENABLE; 300 300 }
+2 -2
arch/x86/kernel/fpu/regset.c
··· 66 66 * presence of FP and SSE state. 67 67 */ 68 68 if (cpu_has_xsave) 69 - fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE; 69 + fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; 70 70 71 71 return ret; 72 72 } ··· 326 326 * presence of FP. 327 327 */ 328 328 if (cpu_has_xsave) 329 - fpu->state.xsave.header.xfeatures |= XSTATE_FP; 329 + fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP; 330 330 return ret; 331 331 } 332 332
+3 -3
arch/x86/kernel/fpu/signal.c
··· 107 107 * header as well as change any contents in the memory layout. 108 108 * xrestore as part of sigreturn will capture all the changes. 109 109 */ 110 - xfeatures |= XSTATE_FPSSE; 110 + xfeatures |= XFEATURE_MASK_FPSSE; 111 111 112 112 err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures); 113 113 ··· 207 207 * layout and not enabled by the OS. 208 208 */ 209 209 if (fx_only) 210 - header->xfeatures = XSTATE_FPSSE; 210 + header->xfeatures = XFEATURE_MASK_FPSSE; 211 211 else 212 212 header->xfeatures &= (xfeatures_mask & xfeatures); 213 213 } ··· 230 230 { 231 231 if (use_xsave()) { 232 232 if ((unsigned long)buf % 64 || fx_only) { 233 - u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE; 233 + u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE; 234 234 copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); 235 235 return copy_user_to_fxregs(buf); 236 236 } else {
+20 -16
arch/x86/kernel/fpu/xstate.c
··· 72 72 /* 73 73 * So we use FLS here to be able to print the most advanced 74 74 * feature that was requested but is missing. So if a driver 75 - * asks about "XSTATE_SSE | XSTATE_YMM" we'll print the 75 + * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the 76 76 * missing AVX feature - this is the most informative message 77 77 * to users: 78 78 */ ··· 131 131 /* 132 132 * FP is in init state 133 133 */ 134 - if (!(xfeatures & XSTATE_FP)) { 134 + if (!(xfeatures & XFEATURE_MASK_FP)) { 135 135 fx->cwd = 0x37f; 136 136 fx->swd = 0; 137 137 fx->twd = 0; ··· 144 144 /* 145 145 * SSE is in init state 146 146 */ 147 - if (!(xfeatures & XSTATE_SSE)) 147 + if (!(xfeatures & XFEATURE_MASK_SSE)) 148 148 memset(&fx->xmm_space[0], 0, 256); 149 149 150 150 /* ··· 223 223 */ 224 224 static void __init print_xstate_features(void) 225 225 { 226 - print_xstate_feature(XSTATE_FP); 227 - print_xstate_feature(XSTATE_SSE); 228 - print_xstate_feature(XSTATE_YMM); 229 - print_xstate_feature(XSTATE_BNDREGS); 230 - print_xstate_feature(XSTATE_BNDCSR); 231 - print_xstate_feature(XSTATE_OPMASK); 232 - print_xstate_feature(XSTATE_ZMM_Hi256); 233 - print_xstate_feature(XSTATE_Hi16_ZMM); 226 + print_xstate_feature(XFEATURE_MASK_FP); 227 + print_xstate_feature(XFEATURE_MASK_SSE); 228 + print_xstate_feature(XFEATURE_MASK_YMM); 229 + print_xstate_feature(XFEATURE_MASK_BNDREGS); 230 + print_xstate_feature(XFEATURE_MASK_BNDCSR); 231 + print_xstate_feature(XFEATURE_MASK_OPMASK); 232 + print_xstate_feature(XFEATURE_MASK_ZMM_Hi256); 233 + print_xstate_feature(XFEATURE_MASK_Hi16_ZMM); 234 234 } 235 235 236 236 /* ··· 365 365 return 0; 366 366 } 367 367 368 - void fpu__init_disable_system_xstate(void) 368 + /* 369 + * We enabled the XSAVE hardware, but something went wrong and 370 + * we can not use it. Disable it. 371 + */ 372 + static void fpu__init_disable_system_xstate(void) 369 373 { 370 374 xfeatures_mask = 0; 371 375 cr4_clear_bits(X86_CR4_OSXSAVE); ··· 402 398 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); 403 399 xfeatures_mask = eax + ((u64)edx << 32); 404 400 405 - if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { 401 + if ((xfeatures_mask & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) { 406 402 pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask); 407 403 BUG(); 408 404 } ··· 455 451 * Inputs: 456 452 * xstate: the thread's storage area for all FPU data 457 453 * xstate_feature: state which is defined in xsave.h (e.g. 458 - * XSTATE_FP, XSTATE_SSE, etc...) 454 + * XFEATURE_MASK_FP, XFEATURE_MASK_SSE, etc...) 459 455 * Output: 460 456 * address of the state in the xsave area, or NULL if the 461 457 * field is not present in the xsave buffer. ··· 506 502 * Note that this only works on the current task. 507 503 * 508 504 * Inputs: 509 - * @xsave_state: state which is defined in xsave.h (e.g. XSTATE_FP, 510 - * XSTATE_SSE, etc...) 505 + * @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP, 506 + * XFEATURE_MASK_SSE, etc...) 511 507 * Output: 512 508 * address of the state in the xsave area or NULL if the state 513 509 * is not present or is in its 'init state'.
+1 -1
arch/x86/kernel/traps.c
··· 384 384 * which is all zeros which indicates MPX was not 385 385 * responsible for the exception. 386 386 */ 387 - bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); 387 + bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR); 388 388 if (!bndcsr) 389 389 goto exit_trap; 390 390
+2 -2
arch/x86/kvm/cpuid.c
··· 30 30 int feature_bit = 0; 31 31 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; 32 32 33 - xstate_bv &= XSTATE_EXTEND_MASK; 33 + xstate_bv &= XFEATURE_MASK_EXTEND; 34 34 while (xstate_bv) { 35 35 if (xstate_bv & 0x1) { 36 36 u32 eax, ebx, ecx, edx, offset; ··· 51 51 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; 52 52 53 53 if (!kvm_x86_ops->mpx_supported()) 54 - xcr0 &= ~(XSTATE_BNDREGS | XSTATE_BNDCSR); 54 + xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 55 55 56 56 return xcr0; 57 57 }
+14 -13
arch/x86/kvm/x86.c
··· 662 662 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 663 663 if (index != XCR_XFEATURE_ENABLED_MASK) 664 664 return 1; 665 - if (!(xcr0 & XSTATE_FP)) 665 + if (!(xcr0 & XFEATURE_MASK_FP)) 666 666 return 1; 667 - if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) 667 + if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE)) 668 668 return 1; 669 669 670 670 /* ··· 672 672 * saving. However, xcr0 bit 0 is always set, even if the 673 673 * emulated CPU does not support XSAVE (see fx_init). 674 674 */ 675 - valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; 675 + valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; 676 676 if (xcr0 & ~valid_bits) 677 677 return 1; 678 678 679 - if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR))) 679 + if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) != 680 + (!(xcr0 & XFEATURE_MASK_BNDCSR))) 680 681 return 1; 681 682 682 - if (xcr0 & XSTATE_AVX512) { 683 - if (!(xcr0 & XSTATE_YMM)) 683 + if (xcr0 & XFEATURE_MASK_AVX512) { 684 + if (!(xcr0 & XFEATURE_MASK_YMM)) 684 685 return 1; 685 - if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512) 686 + if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) 686 687 return 1; 687 688 } 688 689 kvm_put_guest_xcr0(vcpu); 689 690 vcpu->arch.xcr0 = xcr0; 690 691 691 - if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK) 692 + if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) 692 693 kvm_update_cpuid(vcpu); 693 694 return 0; 694 695 } ··· 2907 2906 * Copy each region from the possibly compacted offset to the 2908 2907 * non-compacted offset. 2909 2908 */ 2910 - valid = xstate_bv & ~XSTATE_FPSSE; 2909 + valid = xstate_bv & ~XFEATURE_MASK_FPSSE; 2911 2910 while (valid) { 2912 2911 u64 feature = valid & -valid; 2913 2912 int index = fls64(feature) - 1; ··· 2945 2944 * Copy each region from the non-compacted offset to the 2946 2945 * possibly compacted offset. 2947 2946 */ 2948 - valid = xstate_bv & ~XSTATE_FPSSE; 2947 + valid = xstate_bv & ~XFEATURE_MASK_FPSSE; 2949 2948 while (valid) { 2950 2949 u64 feature = valid & -valid; 2951 2950 int index = fls64(feature) - 1; ··· 2973 2972 &vcpu->arch.guest_fpu.state.fxsave, 2974 2973 sizeof(struct fxregs_state)); 2975 2974 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = 2976 - XSTATE_FPSSE; 2975 + XFEATURE_MASK_FPSSE; 2977 2976 } 2978 2977 } 2979 2978 ··· 2993 2992 return -EINVAL; 2994 2993 load_xsave(vcpu, (u8 *)guest_xsave->region); 2995 2994 } else { 2996 - if (xstate_bv & ~XSTATE_FPSSE) 2995 + if (xstate_bv & ~XFEATURE_MASK_FPSSE) 2997 2996 return -EINVAL; 2998 2997 memcpy(&vcpu->arch.guest_fpu.state.fxsave, 2999 2998 guest_xsave->region, sizeof(struct fxregs_state)); ··· 7002 7001 /* 7003 7002 * Ensure guest xcr0 is valid for loading 7004 7003 */ 7005 - vcpu->arch.xcr0 = XSTATE_FP; 7004 + vcpu->arch.xcr0 = XFEATURE_MASK_FP; 7006 7005 7007 7006 vcpu->arch.cr0 |= X86_CR0_ET; 7008 7007 }
+3 -3
arch/x86/kvm/x86.h
··· 180 180 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 181 181 int page_num); 182 182 183 - #define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \ 184 - | XSTATE_BNDREGS | XSTATE_BNDCSR \ 185 - | XSTATE_AVX512) 183 + #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 184 + | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ 185 + | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512) 186 186 extern u64 host_xcr0; 187 187 188 188 extern u64 kvm_supported_xcr0(void);
+3 -3
arch/x86/mm/mpx.c
··· 258 258 goto err_out; 259 259 } 260 260 /* get bndregs field from current task's xsave area */ 261 - bndregs = get_xsave_field_ptr(XSTATE_BNDREGS); 261 + bndregs = get_xsave_field_ptr(XFEATURE_MASK_BNDREGS); 262 262 if (!bndregs) { 263 263 err = -EINVAL; 264 264 goto err_out; ··· 315 315 * The bounds directory pointer is stored in a register 316 316 * only accessible if we first do an xsave. 317 317 */ 318 - bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); 318 + bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR); 319 319 if (!bndcsr) 320 320 return MPX_INVALID_BOUNDS_DIR; 321 321 ··· 492 492 const struct bndcsr *bndcsr; 493 493 struct mm_struct *mm = current->mm; 494 494 495 - bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); 495 + bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR); 496 496 if (!bndcsr) 497 497 return -EINVAL; 498 498 /*