Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/speculation: Add prctl for Speculative Store Bypass mitigation

Add prctl based control for Speculative Store Bypass mitigation and make it
the default mitigation for Intel and AMD.

Andi Kleen provided the following rationale (slightly redacted):

There are multiple levels of impact of Speculative Store Bypass:

1) JITed sandbox.
It cannot invoke system calls, but can do PRIME+PROBE and may have call
interfaces to other code

2) Native code process.
No protection inside the process at this level.

3) Kernel.

4) Between processes.

The prctl tries to protect against case (1) doing attacks.

If the untrusted code can do random system calls then control is already
lost in a much worse way. So there needs to be system call protection in
some way (using a JIT not allowing them or seccomp). Or rather if the
process can subvert its environment somehow to do the prctl it can already
execute arbitrary code, which is much worse than SSB.

To put it differently, the point of the prctl is to not allow JITed code
to read data it shouldn't read from its JITed sandbox. If it already has
escaped its sandbox then it can already read everything it wants in its
address space, and do much worse.

The ability to control Speculative Store Bypass allows to enable the
protection selectively without affecting overall system performance.

Based on an initial patch from Tim Chen. Completely rewritten.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

+79 -11
+5 -1
Documentation/admin-guide/kernel-parameters.txt
··· 4053 4053 off - Unconditionally enable Speculative Store Bypass 4054 4054 auto - Kernel detects whether the CPU model contains an 4055 4055 implementation of Speculative Store Bypass and 4056 - picks the most appropriate mitigation 4056 + picks the most appropriate mitigation. 4057 + prctl - Control Speculative Store Bypass per thread 4058 + via prctl. Speculative Store Bypass is enabled 4059 + for a process by default. The state of the control 4060 + is inherited on fork. 4057 4061 4058 4062 Not specifying this option is equivalent to 4059 4063 spec_store_bypass_disable=auto.
+1
arch/x86/include/asm/nospec-branch.h
··· 232 232 enum ssb_mitigation { 233 233 SPEC_STORE_BYPASS_NONE, 234 234 SPEC_STORE_BYPASS_DISABLE, 235 + SPEC_STORE_BYPASS_PRCTL, 235 236 }; 236 237 237 238 extern char __indirect_thunk_start[];
+73 -10
arch/x86/kernel/cpu/bugs.c
··· 12 12 #include <linux/utsname.h> 13 13 #include <linux/cpu.h> 14 14 #include <linux/module.h> 15 + #include <linux/nospec.h> 16 + #include <linux/prctl.h> 15 17 16 18 #include <asm/spec-ctrl.h> 17 19 #include <asm/cmdline.h> ··· 414 412 SPEC_STORE_BYPASS_CMD_NONE, 415 413 SPEC_STORE_BYPASS_CMD_AUTO, 416 414 SPEC_STORE_BYPASS_CMD_ON, 415 + SPEC_STORE_BYPASS_CMD_PRCTL, 417 416 }; 418 417 419 418 static const char *ssb_strings[] = { 420 419 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 421 - [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled" 420 + [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 421 + [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl" 422 422 }; 423 423 424 424 static const struct { 425 425 const char *option; 426 426 enum ssb_mitigation_cmd cmd; 427 427 } ssb_mitigation_options[] = { 428 - { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 429 - { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 430 - { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 428 + { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 429 + { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 430 + { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 431 + { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 431 432 }; 432 433 433 434 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) ··· 480 475 481 476 switch (cmd) { 482 477 case SPEC_STORE_BYPASS_CMD_AUTO: 483 - /* 484 - * AMD platforms by default don't need SSB mitigation. 485 - */ 486 - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 487 - break; 478 + /* Choose prctl as the default mode */ 479 + mode = SPEC_STORE_BYPASS_PRCTL; 480 + break; 488 481 case SPEC_STORE_BYPASS_CMD_ON: 489 482 mode = SPEC_STORE_BYPASS_DISABLE; 483 + break; 484 + case SPEC_STORE_BYPASS_CMD_PRCTL: 485 + mode = SPEC_STORE_BYPASS_PRCTL; 490 486 break; 491 487 case SPEC_STORE_BYPASS_CMD_NONE: 492 488 break; ··· 499 493 * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass 500 494 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 501 495 */ 502 - if (mode != SPEC_STORE_BYPASS_NONE) { 496 + if (mode == SPEC_STORE_BYPASS_DISABLE) { 503 497 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 504 498 /* 505 499 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses ··· 529 523 } 530 524 531 525 #undef pr_fmt 526 + 527 + static int ssb_prctl_set(unsigned long ctrl) 528 + { 529 + bool rds = !!test_tsk_thread_flag(current, TIF_RDS); 530 + 531 + if (ssb_mode != SPEC_STORE_BYPASS_PRCTL) 532 + return -ENXIO; 533 + 534 + if (ctrl == PR_SPEC_ENABLE) 535 + clear_tsk_thread_flag(current, TIF_RDS); 536 + else 537 + set_tsk_thread_flag(current, TIF_RDS); 538 + 539 + if (rds != !!test_tsk_thread_flag(current, TIF_RDS)) 540 + speculative_store_bypass_update(); 541 + 542 + return 0; 543 + } 544 + 545 + static int ssb_prctl_get(void) 546 + { 547 + switch (ssb_mode) { 548 + case SPEC_STORE_BYPASS_DISABLE: 549 + return PR_SPEC_DISABLE; 550 + case SPEC_STORE_BYPASS_PRCTL: 551 + if (test_tsk_thread_flag(current, TIF_RDS)) 552 + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 553 + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 554 + default: 555 + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 556 + return PR_SPEC_ENABLE; 557 + return PR_SPEC_NOT_AFFECTED; 558 + } 559 + } 560 + 561 + int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl) 562 + { 563 + if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE) 564 + return -ERANGE; 565 + 566 + switch (which) { 567 + case PR_SPEC_STORE_BYPASS: 568 + return ssb_prctl_set(ctrl); 569 + default: 570 + return -ENODEV; 571 + } 572 + } 573 + 574 + int arch_prctl_spec_ctrl_get(unsigned long which) 575 + { 576 + switch (which) { 577 + case PR_SPEC_STORE_BYPASS: 578 + return ssb_prctl_get(); 579 + default: 580 + return -ENODEV; 581 + } 582 + } 532 583 533 584 void x86_spec_ctrl_setup_ap(void) 534 585 {