Merge tag 'x86-urgent-2020-07-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"A series of fixes for x86:

- Reset MXCSR in kernel_fpu_begin() to prevent using a stale user
space value.

- Prevent writing MSR_TEST_CTRL on CPUs which are not explicitly
whitelisted for split lock detection. Some CPUs which do not
support it crash even when the MSR is written to 0 which is the
default value.

- Fix the XEN PV fallout of the entry code rework

- Fix the 32bit fallout of the entry code rework

- Add more selftests to ensure that these entry problems don't come
back.

- Disable 16 bit segments on XEN PV. It's not supported because XEN
PV does not implement ESPFIX64"

* tag 'x86-urgent-2020-07-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/ldt: Disable 16-bit segments on Xen PV
x86/entry/32: Fix #MC and #DB wiring on x86_32
x86/entry/xen: Route #DB correctly on Xen PV
x86/entry, selftests: Further improve user entry sanity checks
x86/entry/compat: Clear RAX high bits on Xen PV SYSENTER
selftests/x86: Consolidate and fix get/set_eflags() helpers
selftests/x86/syscall_nt: Clear weird flags after each test
selftests/x86/syscall_nt: Add more flag combinations
x86/entry/64/compat: Fix Xen PV SYSENTER frame setup
x86/entry: Move SYSENTER's regs->sp and regs->flags fixups into C
x86/entry: Assert that syscalls are on the right stack
x86/split_lock: Don't write MSR_TEST_CTRL on CPUs that aren't whitelisted
x86/fpu: Reset MXCSR to default in kernel_fpu_begin()

+46 -3
arch/x86/entry/common.c
··· 45 45 #define CREATE_TRACE_POINTS 46 46 #include <trace/events/syscalls.h> 47 47 48 + /* Check that the stack and regs on entry from user mode are sane. */ 49 + static void check_user_regs(struct pt_regs *regs) 50 + { 51 + if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) { 52 + /* 53 + * Make sure that the entry code gave us a sensible EFLAGS 54 + * register. Native because we want to check the actual CPU 55 + * state, not the interrupt state as imagined by Xen. 56 + */ 57 + unsigned long flags = native_save_fl(); 58 + WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF | 59 + X86_EFLAGS_NT)); 60 + 61 + /* We think we came from user mode. Make sure pt_regs agrees. */ 62 + WARN_ON_ONCE(!user_mode(regs)); 63 + 64 + /* 65 + * All entries from user mode (except #DF) should be on the 66 + * normal thread stack and should have user pt_regs in the 67 + * correct location. 68 + */ 69 + WARN_ON_ONCE(!on_thread_stack()); 70 + WARN_ON_ONCE(regs != task_pt_regs(current)); 71 + } 72 + } 73 + 48 74 #ifdef CONFIG_CONTEXT_TRACKING 49 75 /** 50 76 * enter_from_user_mode - Establish state when coming from user mode ··· 152 126 struct thread_info *ti = current_thread_info(); 153 127 unsigned long ret = 0; 154 128 u32 work; 155 - 156 - if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 157 - BUG_ON(regs != task_pt_regs(current)); 158 129 159 130 work = READ_ONCE(ti->flags); 160 131 ··· 369 346 { 370 347 struct thread_info *ti; 371 348 349 + check_user_regs(regs); 350 + 372 351 enter_from_user_mode(); 373 352 instrumentation_begin(); 374 353 ··· 434 409 /* Handles int $0x80 */ 435 410 __visible noinstr void do_int80_syscall_32(struct pt_regs *regs) 436 411 { 412 + check_user_regs(regs); 413 + 437 414 enter_from_user_mode(); 438 415 instrumentation_begin(); 439 416 ··· 487 460 vdso_image_32.sym_int80_landing_pad; 488 461 bool success; 489 462 463 + check_user_regs(regs); 464 + 490 465 /* 491 466 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward 492 467 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction. ··· 539 510 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0; 540 511 #endif 541 512 } 513 + 514 + /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */ 515 + __visible noinstr long do_SYSENTER_32(struct pt_regs *regs) 516 + { 517 + /* SYSENTER loses RSP, but the vDSO saved it in RBP. */ 518 + regs->sp = regs->bp; 519 + 520 + /* SYSENTER clobbers EFLAGS.IF. Assume it was set in usermode. */ 521 + regs->flags |= X86_EFLAGS_IF; 522 + 523 + return do_fast_syscall_32(regs); 524 + } 542 525 #endif 543 526 544 527 SYSCALL_DEFINE0(ni_syscall) ··· 594 553 bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs) 595 554 { 596 555 if (user_mode(regs)) { 556 + check_user_regs(regs); 597 557 enter_from_user_mode(); 598 558 return false; 599 559 } ··· 728 686 */ 729 687 void noinstr idtentry_enter_user(struct pt_regs *regs) 730 688 { 689 + check_user_regs(regs); 731 690 enter_from_user_mode(); 732 691 } 733 692
+2 -3
arch/x86/entry/entry_32.S
··· 933 933 934 934 .Lsysenter_past_esp: 935 935 pushl $__USER_DS /* pt_regs->ss */ 936 - pushl %ebp /* pt_regs->sp (stashed in bp) */ 936 + pushl $0 /* pt_regs->sp (placeholder) */ 937 937 pushfl /* pt_regs->flags (except IF = 0) */ 938 - orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 939 938 pushl $__USER_CS /* pt_regs->cs */ 940 939 pushl $0 /* pt_regs->ip = 0 (placeholder) */ 941 940 pushl %eax /* pt_regs->orig_ax */ ··· 964 965 .Lsysenter_flags_fixed: 965 966 966 967 movl %esp, %eax 967 - call do_fast_syscall_32 968 + call do_SYSENTER_32 968 969 /* XEN PV guests always use IRET path */ 969 970 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ 970 971 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+16 -15
arch/x86/entry/entry_64_compat.S
··· 57 57 58 58 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 59 59 60 + /* Construct struct pt_regs on stack */ 61 + pushq $__USER32_DS /* pt_regs->ss */ 62 + pushq $0 /* pt_regs->sp = 0 (placeholder) */ 63 + 64 + /* 65 + * Push flags. This is nasty. First, interrupts are currently 66 + * off, but we need pt_regs->flags to have IF set. Second, if TS 67 + * was set in usermode, it's still set, and we're singlestepping 68 + * through this code. do_SYSENTER_32() will fix up IF. 69 + */ 70 + pushfq /* pt_regs->flags (except IF = 0) */ 71 + pushq $__USER32_CS /* pt_regs->cs */ 72 + pushq $0 /* pt_regs->ip = 0 (placeholder) */ 73 + SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) 74 + 60 75 /* 61 76 * User tracing code (ptrace or signal handlers) might assume that 62 77 * the saved RAX contains a 32-bit number when we're invoking a 32-bit ··· 81 66 */ 82 67 movl %eax, %eax 83 68 84 - /* Construct struct pt_regs on stack */ 85 - pushq $__USER32_DS /* pt_regs->ss */ 86 - pushq %rbp /* pt_regs->sp (stashed in bp) */ 87 - 88 - /* 89 - * Push flags. This is nasty. First, interrupts are currently 90 - * off, but we need pt_regs->flags to have IF set. Second, even 91 - * if TF was set when SYSENTER started, it's clear by now. We fix 92 - * that later using TIF_SINGLESTEP. 93 - */ 94 - pushfq /* pt_regs->flags (except IF = 0) */ 95 - orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */ 96 - pushq $__USER32_CS /* pt_regs->cs */ 97 - pushq $0 /* pt_regs->ip = 0 (placeholder) */ 98 69 pushq %rax /* pt_regs->orig_ax */ 99 70 pushq %rdi /* pt_regs->di */ 100 71 pushq %rsi /* pt_regs->si */ ··· 136 135 .Lsysenter_flags_fixed: 137 136 138 137 movq %rsp, %rdi 139 - call do_fast_syscall_32 138 + call do_SYSENTER_32 140 139 /* XEN PV guests always use IRET path */ 141 140 ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \ 142 141 "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
+5
arch/x86/include/asm/fpu/internal.h
··· 623 623 * MXCSR and XCR definitions: 624 624 */ 625 625 626 + static inline void ldmxcsr(u32 mxcsr) 627 + { 628 + asm volatile("ldmxcsr %0" :: "m" (mxcsr)); 629 + } 630 + 626 631 extern unsigned int mxcsr_feature_mask; 627 632 628 633 #define XCR_XFEATURE_ENABLED_MASK 0x00000000
+19 -28
arch/x86/include/asm/idtentry.h
··· 353 353 354 354 #else /* CONFIG_X86_64 */ 355 355 356 - /* Maps to a regular IDTENTRY on 32bit for now */ 357 - # define DECLARE_IDTENTRY_IST DECLARE_IDTENTRY 358 - # define DEFINE_IDTENTRY_IST DEFINE_IDTENTRY 359 - 360 356 /** 361 357 * DECLARE_IDTENTRY_DF - Declare functions for double fault 32bit variant 362 358 * @vector: Vector number (ignored for C) ··· 383 387 #endif /* !CONFIG_X86_64 */ 384 388 385 389 /* C-Code mapping */ 390 + #define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW 391 + #define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW 392 + 393 + #ifdef CONFIG_X86_64 386 394 #define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST 387 395 #define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST 388 396 #define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST 389 397 390 - #define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW 391 - #define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW 392 - 393 398 #define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST 394 399 #define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST 395 400 #define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST 396 - 397 - /** 398 - * DECLARE_IDTENTRY_XEN - Declare functions for XEN redirect IDT entry points 399 - * @vector: Vector number (ignored for C) 400 - * @func: Function name of the entry point 401 - * 402 - * Used for xennmi and xendebug redirections. No DEFINE as this is all ASM 403 - * indirection magic. 404 - */ 405 - #define DECLARE_IDTENTRY_XEN(vector, func) \ 406 - asmlinkage void xen_asm_exc_xen##func(void); \ 407 - asmlinkage void asm_exc_xen##func(void) 401 + #endif 408 402 409 403 #else /* !__ASSEMBLY__ */ 410 404 ··· 441 455 # define DECLARE_IDTENTRY_MCE(vector, func) \ 442 456 DECLARE_IDTENTRY(vector, func) 443 457 444 - # define DECLARE_IDTENTRY_DEBUG(vector, func) \ 445 - DECLARE_IDTENTRY(vector, func) 446 - 447 458 /* No ASM emitted for DF as this goes through a C shim */ 448 459 # define DECLARE_IDTENTRY_DF(vector, func) 449 460 ··· 451 468 452 469 /* No ASM code emitted for NMI */ 453 470 #define DECLARE_IDTENTRY_NMI(vector, func) 454 - 455 - /* XEN NMI and DB wrapper */ 456 - #define DECLARE_IDTENTRY_XEN(vector, func) \ 457 - idtentry vector asm_exc_xen##func exc_##func has_error_code=0 458 471 459 472 /* 460 473 * ASM code to emit the common vector entry stubs where each stub is ··· 544 565 DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF, exc_page_fault); 545 566 546 567 #ifdef CONFIG_X86_MCE 568 + #ifdef CONFIG_X86_64 547 569 DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check); 570 + #else 571 + DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check); 572 + #endif 548 573 #endif 549 574 550 575 /* NMI */ 551 576 DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi); 552 - DECLARE_IDTENTRY_XEN(X86_TRAP_NMI, nmi); 577 + #ifdef CONFIG_XEN_PV 578 + DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi); 579 + #endif 553 580 554 581 /* #DB */ 582 + #ifdef CONFIG_X86_64 555 583 DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug); 556 - DECLARE_IDTENTRY_XEN(X86_TRAP_DB, debug); 584 + #else 585 + DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug); 586 + #endif 587 + #ifdef CONFIG_XEN_PV 588 + DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug); 589 + #endif 557 590 558 591 /* #DF */ 559 592 DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault);
+10 -1
arch/x86/kernel/cpu/intel.c
··· 50 50 static u64 msr_test_ctrl_cache __ro_after_init; 51 51 52 52 /* 53 + * With a name like MSR_TEST_CTL it should go without saying, but don't touch 54 + * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it 55 + * on CPUs that do not support SLD can cause fireworks, even when writing '0'. 56 + */ 57 + static bool cpu_model_supports_sld __ro_after_init; 58 + 59 + /* 53 60 * Processors which have self-snooping capability can handle conflicting 54 61 * memory type across CPUs by snooping its own cache. However, there exists 55 62 * CPU models in which having conflicting memory types still leads to ··· 1078 1071 1079 1072 static void split_lock_init(void) 1080 1073 { 1081 - split_lock_verify_msr(sld_state != sld_off); 1074 + if (cpu_model_supports_sld) 1075 + split_lock_verify_msr(sld_state != sld_off); 1082 1076 } 1083 1077 1084 1078 static void split_lock_warn(unsigned long ip) ··· 1185 1177 return; 1186 1178 } 1187 1179 1180 + cpu_model_supports_sld = true; 1188 1181 split_lock_setup(); 1189 1182 }
+3 -1
arch/x86/kernel/cpu/mce/core.c
··· 1901 1901 1902 1902 static __always_inline void exc_machine_check_kernel(struct pt_regs *regs) 1903 1903 { 1904 + WARN_ON_ONCE(user_mode(regs)); 1905 + 1904 1906 /* 1905 1907 * Only required when from kernel mode. See 1906 1908 * mce_check_crashing_cpu() for details. ··· 1956 1954 } 1957 1955 #else 1958 1956 /* 32bit unified entry point */ 1959 - DEFINE_IDTENTRY_MCE(exc_machine_check) 1957 + DEFINE_IDTENTRY_RAW(exc_machine_check) 1960 1958 { 1961 1959 unsigned long dr7; 1962 1960
+6
arch/x86/kernel/fpu/core.c
··· 101 101 copy_fpregs_to_fpstate(&current->thread.fpu); 102 102 } 103 103 __cpu_invalidate_fpregs_state(); 104 + 105 + if (boot_cpu_has(X86_FEATURE_XMM)) 106 + ldmxcsr(MXCSR_DEFAULT); 107 + 108 + if (boot_cpu_has(X86_FEATURE_FPU)) 109 + asm volatile ("fninit"); 104 110 } 105 111 EXPORT_SYMBOL_GPL(kernel_fpu_begin); 106 112
+34 -1
arch/x86/kernel/ldt.c
··· 29 29 #include <asm/mmu_context.h> 30 30 #include <asm/pgtable_areas.h> 31 31 32 + #include <xen/xen.h> 33 + 32 34 /* This is a multiple of PAGE_SIZE. */ 33 35 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) 34 36 ··· 545 543 return bytecount; 546 544 } 547 545 546 + static bool allow_16bit_segments(void) 547 + { 548 + if (!IS_ENABLED(CONFIG_X86_16BIT)) 549 + return false; 550 + 551 + #ifdef CONFIG_XEN_PV 552 + /* 553 + * Xen PV does not implement ESPFIX64, which means that 16-bit 554 + * segments will not work correctly. Until either Xen PV implements 555 + * ESPFIX64 and can signal this fact to the guest or unless someone 556 + * provides compelling evidence that allowing broken 16-bit segments 557 + * is worthwhile, disallow 16-bit segments under Xen PV. 558 + */ 559 + if (xen_pv_domain()) { 560 + static DEFINE_MUTEX(xen_warning); 561 + static bool warned; 562 + 563 + mutex_lock(&xen_warning); 564 + if (!warned) { 565 + pr_info("Warning: 16-bit segments do not work correctly in a Xen PV guest\n"); 566 + warned = true; 567 + } 568 + mutex_unlock(&xen_warning); 569 + 570 + return false; 571 + } 572 + #endif 573 + 574 + return true; 575 + } 576 + 548 577 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) 549 578 { 550 579 struct mm_struct *mm = current->mm; ··· 607 574 /* The user wants to clear the entry. */ 608 575 memset(&ldt, 0, sizeof(ldt)); 609 576 } else { 610 - if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 577 + if (!ldt_info.seg_32bit && !allow_16bit_segments()) { 611 578 error = -EINVAL; 612 579 goto out; 613 580 }
+13 -1
arch/x86/kernel/traps.c
··· 870 870 trace_hardirqs_off_finish(); 871 871 872 872 /* 873 + * If something gets miswired and we end up here for a user mode 874 + * #DB, we will malfunction. 875 + */ 876 + WARN_ON_ONCE(user_mode(regs)); 877 + 878 + /* 873 879 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a 874 880 * watchpoint at the same time then that will still be handled. 875 881 */ ··· 893 887 static __always_inline void exc_debug_user(struct pt_regs *regs, 894 888 unsigned long dr6) 895 889 { 890 + /* 891 + * If something gets miswired and we end up here for a kernel mode 892 + * #DB, we will malfunction. 893 + */ 894 + WARN_ON_ONCE(!user_mode(regs)); 895 + 896 896 idtentry_enter_user(regs); 897 897 instrumentation_begin(); 898 898 ··· 929 917 } 930 918 #else 931 919 /* 32 bit does not have separate entry points. */ 932 - DEFINE_IDTENTRY_DEBUG(exc_debug) 920 + DEFINE_IDTENTRY_RAW(exc_debug) 933 921 { 934 922 unsigned long dr6, dr7; 935 923
+24 -4
arch/x86/xen/enlighten_pv.c
··· 598 598 } 599 599 600 600 #ifdef CONFIG_X86_64 601 + void noist_exc_debug(struct pt_regs *regs); 602 + 603 + DEFINE_IDTENTRY_RAW(xenpv_exc_nmi) 604 + { 605 + /* On Xen PV, NMI doesn't use IST. The C part is the sane as native. */ 606 + exc_nmi(regs); 607 + } 608 + 609 + DEFINE_IDTENTRY_RAW(xenpv_exc_debug) 610 + { 611 + /* 612 + * There's no IST on Xen PV, but we still need to dispatch 613 + * to the correct handler. 614 + */ 615 + if (user_mode(regs)) 616 + noist_exc_debug(regs); 617 + else 618 + exc_debug(regs); 619 + } 620 + 601 621 struct trap_array_entry { 602 622 void (*orig)(void); 603 623 void (*xen)(void); ··· 629 609 .xen = xen_asm_##func, \ 630 610 .ist_okay = ist_ok } 631 611 632 - #define TRAP_ENTRY_REDIR(func, xenfunc, ist_ok) { \ 612 + #define TRAP_ENTRY_REDIR(func, ist_ok) { \ 633 613 .orig = asm_##func, \ 634 - .xen = xen_asm_##xenfunc, \ 614 + .xen = xen_asm_xenpv_##func, \ 635 615 .ist_okay = ist_ok } 636 616 637 617 static struct trap_array_entry trap_array[] = { 638 - TRAP_ENTRY_REDIR(exc_debug, exc_xendebug, true ), 618 + TRAP_ENTRY_REDIR(exc_debug, true ), 639 619 TRAP_ENTRY(exc_double_fault, true ), 640 620 #ifdef CONFIG_X86_MCE 641 621 TRAP_ENTRY(exc_machine_check, true ), 642 622 #endif 643 - TRAP_ENTRY_REDIR(exc_nmi, exc_xennmi, true ), 623 + TRAP_ENTRY_REDIR(exc_nmi, true ), 644 624 TRAP_ENTRY(exc_int3, false ), 645 625 TRAP_ENTRY(exc_overflow, false ), 646 626 #ifdef CONFIG_IA32_EMULATION
+18 -7
arch/x86/xen/xen-asm_64.S
··· 29 29 .endm 30 30 31 31 xen_pv_trap asm_exc_divide_error 32 - xen_pv_trap asm_exc_debug 33 - xen_pv_trap asm_exc_xendebug 32 + xen_pv_trap asm_xenpv_exc_debug 34 33 xen_pv_trap asm_exc_int3 35 - xen_pv_trap asm_exc_xennmi 34 + xen_pv_trap asm_xenpv_exc_nmi 36 35 xen_pv_trap asm_exc_overflow 37 36 xen_pv_trap asm_exc_bounds 38 37 xen_pv_trap asm_exc_invalid_op ··· 160 161 161 162 /* 32-bit compat sysenter target */ 162 163 SYM_FUNC_START(xen_sysenter_target) 163 - mov 0*8(%rsp), %rcx 164 - mov 1*8(%rsp), %r11 165 - mov 5*8(%rsp), %rsp 166 - jmp entry_SYSENTER_compat 164 + /* 165 + * NB: Xen is polite and clears TF from EFLAGS for us. This means 166 + * that we don't need to guard against single step exceptions here. 167 + */ 168 + popq %rcx 169 + popq %r11 170 + 171 + /* 172 + * Neither Xen nor the kernel really knows what the old SS and 173 + * CS were. The kernel expects __USER32_DS and __USER32_CS, so 174 + * report those values even though Xen will guess its own values. 175 + */ 176 + movq $__USER32_DS, 4*8(%rsp) 177 + movq $__USER32_CS, 1*8(%rsp) 178 + 179 + jmp entry_SYSENTER_compat_after_hwframe 167 180 SYM_FUNC_END(xen_sysenter_target) 168 181 169 182 #else /* !CONFIG_IA32_EMULATION */
+2 -2
tools/testing/selftests/x86/Makefile
··· 70 70 71 71 EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64) 72 72 73 - $(BINARIES_32): $(OUTPUT)/%_32: %.c 73 + $(BINARIES_32): $(OUTPUT)/%_32: %.c helpers.h 74 74 $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm 75 75 76 - $(BINARIES_64): $(OUTPUT)/%_64: %.c 76 + $(BINARIES_64): $(OUTPUT)/%_64: %.c helpers.h 77 77 $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl 78 78 79 79 # x86_64 users should be encouraged to install 32-bit libraries
+41
tools/testing/selftests/x86/helpers.h
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #ifndef __SELFTESTS_X86_HELPERS_H 3 + #define __SELFTESTS_X86_HELPERS_H 4 + 5 + #include <asm/processor-flags.h> 6 + 7 + static inline unsigned long get_eflags(void) 8 + { 9 + unsigned long eflags; 10 + 11 + asm volatile ( 12 + #ifdef __x86_64__ 13 + "subq $128, %%rsp\n\t" 14 + "pushfq\n\t" 15 + "popq %0\n\t" 16 + "addq $128, %%rsp" 17 + #else 18 + "pushfl\n\t" 19 + "popl %0" 20 + #endif 21 + : "=r" (eflags) :: "memory"); 22 + 23 + return eflags; 24 + } 25 + 26 + static inline void set_eflags(unsigned long eflags) 27 + { 28 + asm volatile ( 29 + #ifdef __x86_64__ 30 + "subq $128, %%rsp\n\t" 31 + "pushq %0\n\t" 32 + "popfq\n\t" 33 + "addq $128, %%rsp" 34 + #else 35 + "pushl %0\n\t" 36 + "popfl" 37 + #endif 38 + :: "r" (eflags) : "flags", "memory"); 39 + } 40 + 41 + #endif /* __SELFTESTS_X86_HELPERS_H */
+2 -15
tools/testing/selftests/x86/single_step_syscall.c
··· 31 31 #include <sys/ptrace.h> 32 32 #include <sys/user.h> 33 33 34 + #include "helpers.h" 35 + 34 36 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), 35 37 int flags) 36 38 { ··· 68 66 # define WIDTH "l" 69 67 # define INT80_CLOBBERS 70 68 #endif 71 - 72 - static unsigned long get_eflags(void) 73 - { 74 - unsigned long eflags; 75 - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); 76 - return eflags; 77 - } 78 - 79 - static void set_eflags(unsigned long eflags) 80 - { 81 - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH 82 - : : "rm" (eflags) : "flags"); 83 - } 84 - 85 - #define X86_EFLAGS_TF (1UL << 8) 86 69 87 70 static void sigtrap(int sig, siginfo_t *info, void *ctx_void) 88 71 {
+1 -20
tools/testing/selftests/x86/syscall_arg_fault.c
··· 15 15 #include <setjmp.h> 16 16 #include <errno.h> 17 17 18 - #ifdef __x86_64__ 19 - # define WIDTH "q" 20 - #else 21 - # define WIDTH "l" 22 - #endif 18 + #include "helpers.h" 23 19 24 20 /* Our sigaltstack scratch space. */ 25 21 static unsigned char altstack_data[SIGSTKSZ]; 26 - 27 - static unsigned long get_eflags(void) 28 - { 29 - unsigned long eflags; 30 - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); 31 - return eflags; 32 - } 33 - 34 - static void set_eflags(unsigned long eflags) 35 - { 36 - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH 37 - : : "rm" (eflags) : "flags"); 38 - } 39 - 40 - #define X86_EFLAGS_TF (1UL << 8) 41 22 42 23 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), 43 24 int flags)
+28 -19
tools/testing/selftests/x86/syscall_nt.c
··· 13 13 #include <signal.h> 14 14 #include <err.h> 15 15 #include <sys/syscall.h> 16 - #include <asm/processor-flags.h> 17 16 18 - #ifdef __x86_64__ 19 - # define WIDTH "q" 20 - #else 21 - # define WIDTH "l" 22 - #endif 17 + #include "helpers.h" 23 18 24 19 static unsigned int nerrs; 25 - 26 - static unsigned long get_eflags(void) 27 - { 28 - unsigned long eflags; 29 - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); 30 - return eflags; 31 - } 32 - 33 - static void set_eflags(unsigned long eflags) 34 - { 35 - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH 36 - : : "rm" (eflags) : "flags"); 37 - } 38 20 39 21 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), 40 22 int flags) ··· 41 59 set_eflags(get_eflags() | extraflags); 42 60 syscall(SYS_getpid); 43 61 flags = get_eflags(); 62 + set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED); 44 63 if ((flags & extraflags) == extraflags) { 45 64 printf("[OK]\tThe syscall worked and flags are still set\n"); 46 65 } else { ··· 56 73 printf("[RUN]\tSet NT and issue a syscall\n"); 57 74 do_it(X86_EFLAGS_NT); 58 75 76 + printf("[RUN]\tSet AC and issue a syscall\n"); 77 + do_it(X86_EFLAGS_AC); 78 + 79 + printf("[RUN]\tSet NT|AC and issue a syscall\n"); 80 + do_it(X86_EFLAGS_NT | X86_EFLAGS_AC); 81 + 59 82 /* 60 83 * Now try it again with TF set -- TF forces returns via IRET in all 61 84 * cases except non-ptregs-using 64-bit full fast path syscalls. ··· 69 80 70 81 sethandler(SIGTRAP, sigtrap, 0); 71 82 83 + printf("[RUN]\tSet TF and issue a syscall\n"); 84 + do_it(X86_EFLAGS_TF); 85 + 72 86 printf("[RUN]\tSet NT|TF and issue a syscall\n"); 73 87 do_it(X86_EFLAGS_NT | X86_EFLAGS_TF); 88 + 89 + printf("[RUN]\tSet AC|TF and issue a syscall\n"); 90 + do_it(X86_EFLAGS_AC | X86_EFLAGS_TF); 91 + 92 + printf("[RUN]\tSet NT|AC|TF and issue a syscall\n"); 93 + do_it(X86_EFLAGS_NT | X86_EFLAGS_AC | X86_EFLAGS_TF); 94 + 95 + /* 96 + * Now try DF. This is evil and it's plausible that we will crash 97 + * glibc, but glibc would have to do something rather surprising 98 + * for this to happen. 99 + */ 100 + printf("[RUN]\tSet DF and issue a syscall\n"); 101 + do_it(X86_EFLAGS_DF); 102 + 103 + printf("[RUN]\tSet TF|DF and issue a syscall\n"); 104 + do_it(X86_EFLAGS_TF | X86_EFLAGS_DF); 74 105 75 106 return nerrs == 0 ? 0 : 1; 76 107 }
+2 -13
tools/testing/selftests/x86/test_vsyscall.c
··· 20 20 #include <setjmp.h> 21 21 #include <sys/uio.h> 22 22 23 + #include "helpers.h" 24 + 23 25 #ifdef __x86_64__ 24 26 # define VSYS(x) (x) 25 27 #else ··· 495 493 } 496 494 497 495 #ifdef __x86_64__ 498 - #define X86_EFLAGS_TF (1UL << 8) 499 496 static volatile sig_atomic_t num_vsyscall_traps; 500 - 501 - static unsigned long get_eflags(void) 502 - { 503 - unsigned long eflags; 504 - asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags)); 505 - return eflags; 506 - } 507 - 508 - static void set_eflags(unsigned long eflags) 509 - { 510 - asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags"); 511 - } 512 497 513 498 static void sigtrap(int sig, siginfo_t *info, void *ctx_void) 514 499 {
+2 -21
tools/testing/selftests/x86/unwind_vdso.c
··· 11 11 #include <features.h> 12 12 #include <stdio.h> 13 13 14 + #include "helpers.h" 15 + 14 16 #if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16 15 17 16 18 int main() ··· 54 52 if (sigaction(sig, &sa, 0)) 55 53 err(1, "sigaction"); 56 54 } 57 - 58 - #ifdef __x86_64__ 59 - # define WIDTH "q" 60 - #else 61 - # define WIDTH "l" 62 - #endif 63 - 64 - static unsigned long get_eflags(void) 65 - { 66 - unsigned long eflags; 67 - asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags)); 68 - return eflags; 69 - } 70 - 71 - static void set_eflags(unsigned long eflags) 72 - { 73 - asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH 74 - : : "rm" (eflags) : "flags"); 75 - } 76 - 77 - #define X86_EFLAGS_TF (1UL << 8) 78 55 79 56 static volatile sig_atomic_t nerrs; 80 57 static unsigned long sysinfo;