Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86, smep, smap: Make the switching functions one-way

There is no fundamental reason why we should switch SMEP and SMAP on
during early cpu initialization just to switch them off again. Now
with %eflags and %cr4 forced to be initialized to a clean state, we
only need the one-way enable. Also, make the functions inline to make
them (somewhat) harder to abuse.

This does mean that SMEP and SMAP do not get initialized anywhere near
as early. Even using early_param() instead of __setup() doesn't give
us control early enough to do this during the early cpu initialization
phase. This seems reasonable to me, because SMEP and SMAP should not
matter until we have userspace to protect ourselves from, but it does
potentially make it possible for a bug involving a "leak of
permissions to userspace" to get uncaught.

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>

+18 -31
+18 -31
arch/x86/kernel/cpu/common.c
··· 259 259 } 260 260 #endif 261 261 262 - static int disable_smep __cpuinitdata; 263 262 static __init int setup_disable_smep(char *arg) 264 263 { 265 - disable_smep = 1; 264 + setup_clear_cpu_cap(X86_FEATURE_SMEP); 266 265 return 1; 267 266 } 268 267 __setup("nosmep", setup_disable_smep); 269 268 270 - static __cpuinit void setup_smep(struct cpuinfo_x86 *c) 269 + static __always_inline void setup_smep(struct cpuinfo_x86 *c) 271 270 { 272 - if (cpu_has(c, X86_FEATURE_SMEP)) { 273 - if (unlikely(disable_smep)) { 274 - setup_clear_cpu_cap(X86_FEATURE_SMEP); 275 - clear_in_cr4(X86_CR4_SMEP); 276 - } else 277 - set_in_cr4(X86_CR4_SMEP); 278 - } 271 + if (cpu_has(c, X86_FEATURE_SMEP)) 272 + set_in_cr4(X86_CR4_SMEP); 279 273 } 280 274 281 - static int disable_smap __cpuinitdata; 282 275 static __init int setup_disable_smap(char *arg) 283 276 { 284 - disable_smap = 1; 277 + setup_clear_cpu_cap(X86_FEATURE_SMAP); 285 278 return 1; 286 279 } 287 280 __setup("nosmap", setup_disable_smap); 288 281 289 - static __cpuinit void setup_smap(struct cpuinfo_x86 *c) 282 + static __always_inline void setup_smap(struct cpuinfo_x86 *c) 290 283 { 291 - if (cpu_has(c, X86_FEATURE_SMAP)) { 292 - if (unlikely(disable_smap)) { 293 - setup_clear_cpu_cap(X86_FEATURE_SMAP); 294 - clear_in_cr4(X86_CR4_SMAP); 295 - } else { 296 - set_in_cr4(X86_CR4_SMAP); 297 - /* 298 - * Don't use clac() here since alternatives 299 - * haven't run yet... 300 - */ 301 - asm volatile(__stringify(__ASM_CLAC) ::: "memory"); 302 - } 303 - } 284 + unsigned long eflags; 285 + 286 + /* This should have been cleared long ago */ 287 + raw_local_save_flags(eflags); 288 + BUG_ON(eflags & X86_EFLAGS_AC); 289 + 290 + if (cpu_has(c, X86_FEATURE_SMAP)) 291 + set_in_cr4(X86_CR4_SMAP); 304 292 } 305 293 306 294 /* ··· 725 737 c->cpu_index = 0; 726 738 filter_cpuid_features(c, false); 727 739 728 - setup_smep(c); 729 - setup_smap(c); 730 - 731 740 if (this_cpu->c_bsp_init) 732 741 this_cpu->c_bsp_init(c); 733 742 } ··· 809 824 c->phys_proc_id = c->initial_apicid; 810 825 } 811 826 812 - setup_smep(c); 813 - 814 827 get_model_name(c); /* Default name */ 815 828 816 829 detect_nopl(c); ··· 872 889 873 890 /* Disable the PN if appropriate */ 874 891 squash_the_stupid_serial_number(c); 892 + 893 + /* Set up SMEP/SMAP */ 894 + setup_smep(c); 895 + setup_smap(c); 875 896 876 897 /* 877 898 * The vendor-specific functions might have changed features.