Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/stackprotector/32: Make the canary into a regular percpu variable

On 32-bit kernels, the stackprotector canary is quite nasty -- it is
stored at %gs:(20), which is nasty because 32-bit kernels use %fs for
percpu storage. It's even nastier because it means that whether %gs
contains userspace state or kernel state while running kernel code
depends on whether stackprotector is enabled (this is
CONFIG_X86_32_LAZY_GS), and this setting radically changes the way
that segment selectors work. Supporting both variants is a
maintenance and testing mess.

Merely rearranging so that percpu and the stack canary
share the same segment would be messy as the 32-bit percpu address
layout isn't currently compatible with putting a variable at a fixed
offset.

Fortunately, GCC 8.1 added options that allow the stack canary to be
accessed as %fs:__stack_chk_guard, effectively turning it into an ordinary
percpu variable. This lets us get rid of all of the code to manage the
stack canary GDT descriptor and the CONFIG_X86_32_LAZY_GS mess.

(That name is special. We could use any symbol we want for the
%fs-relative mode, but for CONFIG_SMP=n, gcc refuses to let us use any
name other than __stack_chk_guard.)

Forcibly disable stackprotector on older compilers that don't support
the new options and turn the stack canary into a percpu variable. The
"lazy GS" approach is now used for all 32-bit configurations.

Also makes load_gs_index() work on 32-bit kernels. On 64-bit kernels,
it loads the GS selector and updates the user GSBASE accordingly. (This
is unchanged.) On 32-bit kernels, it loads the GS selector and updates
GSBASE, which is now always the user base. This means that the overall
effect is the same on 32-bit and 64-bit, which avoids some ifdeffery.

[ bp: Massage commit message. ]

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/c0ff7dba14041c7e5d1cae5d4df052f03759bef3.1613243844.git.luto@kernel.org

authored by

Andy Lutomirski and committed by
Borislav Petkov
3fb0fdb3 a38fd874

+60 -218
+2 -5
arch/x86/Kconfig
··· 360 360 def_bool y 361 361 depends on X86_64 && SMP 362 362 363 - config X86_32_LAZY_GS 364 - def_bool y 365 - depends on X86_32 && !STACKPROTECTOR 366 - 367 363 config ARCH_SUPPORTS_UPROBES 368 364 def_bool y 369 365 ··· 382 386 default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC)) 383 387 help 384 388 We have to make sure stack protector is unconditionally disabled if 385 - the compiler produces broken code. 389 + the compiler produces broken code or if it does not let us control 390 + the segment on 32-bit kernels. 386 391 387 392 menu "Processor type and features" 388 393
+8
arch/x86/Makefile
··· 79 79 80 80 # temporary until string.h is fixed 81 81 KBUILD_CFLAGS += -ffreestanding 82 + 83 + ifeq ($(CONFIG_STACKPROTECTOR),y) 84 + ifeq ($(CONFIG_SMP),y) 85 + KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard 86 + else 87 + KBUILD_CFLAGS += -mstack-protector-guard=global 88 + endif 89 + endif 82 90 else 83 91 BITS := 64 84 92 UTS_MACHINE := x86_64
+4 -52
arch/x86/entry/entry_32.S
··· 20 20 * 1C(%esp) - %ds 21 21 * 20(%esp) - %es 22 22 * 24(%esp) - %fs 23 - * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS 23 + * 28(%esp) - unused -- was %gs on old stackprotector kernels 24 24 * 2C(%esp) - orig_eax 25 25 * 30(%esp) - %eip 26 26 * 34(%esp) - %cs ··· 56 56 /* 57 57 * User gs save/restore 58 58 * 59 - * %gs is used for userland TLS and kernel only uses it for stack 60 - * canary which is required to be at %gs:20 by gcc. Read the comment 61 - * at the top of stackprotector.h for more info. 62 - * 63 - * Local labels 98 and 99 are used. 59 + * This is leftover junk from CONFIG_X86_32_LAZY_GS. A subsequent patch 60 + * will remove it entirely. 64 61 */ 65 - #ifdef CONFIG_X86_32_LAZY_GS 66 - 67 62 /* unfortunately push/pop can't be no-op */ 68 63 .macro PUSH_GS 69 64 pushl $0 ··· 81 86 .macro SET_KERNEL_GS reg 82 87 .endm 83 88 84 - #else /* CONFIG_X86_32_LAZY_GS */ 85 - 86 - .macro PUSH_GS 87 - pushl %gs 88 - .endm 89 - 90 - .macro POP_GS pop=0 91 - 98: popl %gs 92 - .if \pop <> 0 93 - add $\pop, %esp 94 - .endif 95 - .endm 96 - .macro POP_GS_EX 97 - .pushsection .fixup, "ax" 98 - 99: movl $0, (%esp) 99 - jmp 98b 100 - .popsection 101 - _ASM_EXTABLE(98b, 99b) 102 - .endm 103 - 104 - .macro PTGS_TO_GS 105 - 98: mov PT_GS(%esp), %gs 106 - .endm 107 - .macro PTGS_TO_GS_EX 108 - .pushsection .fixup, "ax" 109 - 99: movl $0, PT_GS(%esp) 110 - jmp 98b 111 - .popsection 112 - _ASM_EXTABLE(98b, 99b) 113 - .endm 114 - 115 - .macro GS_TO_REG reg 116 - movl %gs, \reg 117 - .endm 118 - .macro REG_TO_PTGS reg 119 - movl \reg, PT_GS(%esp) 120 - .endm 121 - .macro SET_KERNEL_GS reg 122 - movl $(__KERNEL_STACK_CANARY), \reg 123 - movl \reg, %gs 124 - .endm 125 - 126 - #endif /* CONFIG_X86_32_LAZY_GS */ 127 89 128 90 /* Unconditionally switch to user cr3 */ 129 91 .macro SWITCH_TO_USER_CR3 scratch_reg:req ··· 731 779 732 780 #ifdef CONFIG_STACKPROTECTOR 733 781 movl TASK_stack_canary(%edx), %ebx 734 - movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset 782 + movl %ebx, PER_CPU_VAR(__stack_chk_guard) 735 783 #endif 736 784 737 785 #ifdef CONFIG_RETPOLINE
+4 -11
arch/x86/include/asm/processor.h
··· 439 439 * GCC hardcodes the stack canary as %gs:40. Since the 440 440 * irq_stack is the object at %gs:0, we reserve the bottom 441 441 * 48 bytes of the irq stack for the canary. 442 + * 443 + * Once we are willing to require -mstack-protector-guard-symbol= 444 + * support for x86_64 stackprotector, we can get rid of this. 442 445 */ 443 446 char gs_base[40]; 444 447 unsigned long stack_canary; ··· 463 460 void current_save_fsgs(void); 464 461 #else /* X86_64 */ 465 462 #ifdef CONFIG_STACKPROTECTOR 466 - /* 467 - * Make sure stack canary segment base is cached-aligned: 468 - * "For Intel Atom processors, avoid non zero segment base address 469 - * that is not aligned to cache line boundary at all cost." 470 - * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) 471 - */ 472 - struct stack_canary { 473 - char __pad[20]; /* canary at %gs:20 */ 474 - unsigned long canary; 475 - }; 476 - DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 463 + DECLARE_PER_CPU(unsigned long, __stack_chk_guard); 477 464 #endif 478 465 DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); 479 466 DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
+4 -1
arch/x86/include/asm/ptrace.h
··· 37 37 unsigned short __esh; 38 38 unsigned short fs; 39 39 unsigned short __fsh; 40 - /* On interrupt, gs and __gsh store the vector number. */ 40 + /* 41 + * On interrupt, gs and __gsh store the vector number. They never 42 + * store gs any more. 43 + */ 41 44 unsigned short gs; 42 45 unsigned short __gsh; 43 46 /* On interrupt, this is the error code. */
+8 -22
arch/x86/include/asm/segment.h
··· 95 95 * 96 96 * 26 - ESPFIX small SS 97 97 * 27 - per-cpu [ offset to per-cpu data area ] 98 - * 28 - stack_canary-20 [ for stack protector ] <=== cacheline #8 98 + * 28 - unused 99 99 * 29 - unused 100 100 * 30 - unused 101 101 * 31 - TSS for double fault handler ··· 118 118 119 119 #define GDT_ENTRY_ESPFIX_SS 26 120 120 #define GDT_ENTRY_PERCPU 27 121 - #define GDT_ENTRY_STACK_CANARY 28 122 121 123 122 #define GDT_ENTRY_DOUBLEFAULT_TSS 31 124 123 ··· 155 156 # define __KERNEL_PERCPU (GDT_ENTRY_PERCPU*8) 156 157 #else 157 158 # define __KERNEL_PERCPU 0 158 - #endif 159 - 160 - #ifdef CONFIG_STACKPROTECTOR 161 - # define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) 162 - #else 163 - # define __KERNEL_STACK_CANARY 0 164 159 #endif 165 160 166 161 #else /* 64-bit: */ ··· 357 364 asm("mov %%" #seg ",%0":"=r" (value) : : "memory") 358 365 359 366 /* 360 - * x86-32 user GS accessors: 367 + * x86-32 user GS accessors. This is ugly and could do with some cleaning up. 361 368 */ 362 369 #ifdef CONFIG_X86_32 363 - # ifdef CONFIG_X86_32_LAZY_GS 364 - # define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; }) 365 - # define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) 366 - # define task_user_gs(tsk) ((tsk)->thread.gs) 367 - # define lazy_save_gs(v) savesegment(gs, (v)) 368 - # define lazy_load_gs(v) loadsegment(gs, (v)) 369 - # else /* X86_32_LAZY_GS */ 370 - # define get_user_gs(regs) (u16)((regs)->gs) 371 - # define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) 372 - # define task_user_gs(tsk) (task_pt_regs(tsk)->gs) 373 - # define lazy_save_gs(v) do { } while (0) 374 - # define lazy_load_gs(v) do { } while (0) 375 - # endif /* X86_32_LAZY_GS */ 370 + # define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; }) 371 + # define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) 372 + # define task_user_gs(tsk) ((tsk)->thread.gs) 373 + # define lazy_save_gs(v) savesegment(gs, (v)) 374 + # define lazy_load_gs(v) loadsegment(gs, (v)) 375 + # define load_gs_index(v) loadsegment(gs, (v)) 376 376 #endif /* X86_32 */ 377 377 378 378 #endif /* !__ASSEMBLY__ */
+16 -63
arch/x86/include/asm/stackprotector.h
··· 5 5 * Stack protector works by putting predefined pattern at the start of 6 6 * the stack frame and verifying that it hasn't been overwritten when 7 7 * returning from the function. The pattern is called stack canary 8 - * and unfortunately gcc requires it to be at a fixed offset from %gs. 9 - * On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64 10 - * and x86_32 use segment registers differently and thus handles this 11 - * requirement differently. 8 + * and unfortunately gcc historically required it to be at a fixed offset 9 + * from the percpu segment base. On x86_64, the offset is 40 bytes. 12 10 * 13 - * On x86_64, %gs is shared by percpu area and stack canary. All 14 - * percpu symbols are zero based and %gs points to the base of percpu 15 - * area. The first occupant of the percpu area is always 16 - * fixed_percpu_data which contains stack_canary at offset 40. Userland 17 - * %gs is always saved and restored on kernel entry and exit using 18 - * swapgs, so stack protector doesn't add any complexity there. 11 + * The same segment is shared by percpu area and stack canary. On 12 + * x86_64, percpu symbols are zero based and %gs (64-bit) points to the 13 + * base of percpu area. The first occupant of the percpu area is always 14 + * fixed_percpu_data which contains stack_canary at the approproate 15 + * offset. On x86_32, the stack canary is just a regular percpu 16 + * variable. 19 17 * 20 - * On x86_32, it's slightly more complicated. As in x86_64, %gs is 21 - * used for userland TLS. Unfortunately, some processors are much 22 - * slower at loading segment registers with different value when 23 - * entering and leaving the kernel, so the kernel uses %fs for percpu 24 - * area and manages %gs lazily so that %gs is switched only when 25 - * necessary, usually during task switch. 18 + * Putting percpu data in %fs on 32-bit is a minor optimization compared to 19 + * using %gs. Since 32-bit userspace normally has %fs == 0, we are likely 20 + * to load 0 into %fs on exit to usermode, whereas with percpu data in 21 + * %gs, we are likely to load a non-null %gs on return to user mode. 26 22 * 27 - * As gcc requires the stack canary at %gs:20, %gs can't be managed 28 - * lazily if stack protector is enabled, so the kernel saves and 29 - * restores userland %gs on kernel entry and exit. This behavior is 30 - * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in 31 - * system.h to hide the details. 23 + * Once we are willing to require GCC 8.1 or better for 64-bit stackprotector 24 + * support, we can remove some of this complexity. 32 25 */ 33 26 34 27 #ifndef _ASM_STACKPROTECTOR_H ··· 36 43 37 44 #include <linux/random.h> 38 45 #include <linux/sched.h> 39 - 40 - /* 41 - * 24 byte read-only segment initializer for stack canary. Linker 42 - * can't handle the address bit shifting. Address will be set in 43 - * head_32 for boot CPU and setup_per_cpu_areas() for others. 44 - */ 45 - #define GDT_STACK_CANARY_INIT \ 46 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18), 47 46 48 47 /* 49 48 * Initialize the stackprotector canary value. ··· 71 86 #ifdef CONFIG_X86_64 72 87 this_cpu_write(fixed_percpu_data.stack_canary, canary); 73 88 #else 74 - this_cpu_write(stack_canary.canary, canary); 89 + this_cpu_write(__stack_chk_guard, canary); 75 90 #endif 76 91 } 77 92 ··· 80 95 #ifdef CONFIG_X86_64 81 96 per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary; 82 97 #else 83 - per_cpu(stack_canary.canary, cpu) = idle->stack_canary; 84 - #endif 85 - } 86 - 87 - static inline void setup_stack_canary_segment(int cpu) 88 - { 89 - #ifdef CONFIG_X86_32 90 - unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); 91 - struct desc_struct *gdt_table = get_cpu_gdt_rw(cpu); 92 - struct desc_struct desc; 93 - 94 - desc = gdt_table[GDT_ENTRY_STACK_CANARY]; 95 - set_desc_base(&desc, canary); 96 - write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S); 97 - #endif 98 - } 99 - 100 - static inline void load_stack_canary_segment(void) 101 - { 102 - #ifdef CONFIG_X86_32 103 - asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory"); 98 + per_cpu(__stack_chk_guard, cpu) = idle->stack_canary; 104 99 #endif 105 100 } 106 101 107 102 #else /* STACKPROTECTOR */ 108 103 109 - #define GDT_STACK_CANARY_INIT 110 - 111 104 /* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */ 112 - 113 - static inline void setup_stack_canary_segment(int cpu) 114 - { } 115 105 116 106 static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle) 117 107 { } 118 - 119 - static inline void load_stack_canary_segment(void) 120 - { 121 - #ifdef CONFIG_X86_32 122 - asm volatile ("mov %0, %%gs" : : "r" (0)); 123 - #endif 124 - } 125 108 126 109 #endif /* STACKPROTECTOR */ 127 110 #endif /* _ASM_STACKPROTECTOR_H */
+2 -4
arch/x86/include/asm/suspend_32.h
··· 13 13 /* image of the saved processor state */ 14 14 struct saved_context { 15 15 /* 16 - * On x86_32, all segment registers, with the possible exception of 17 - * gs, are saved at kernel entry in pt_regs. 16 + * On x86_32, all segment registers except gs are saved at kernel 17 + * entry in pt_regs. 18 18 */ 19 - #ifdef CONFIG_X86_32_LAZY_GS 20 19 u16 gs; 21 - #endif 22 20 unsigned long cr0, cr2, cr3, cr4; 23 21 u64 misc_enable; 24 22 bool misc_enable_saved;
-5
arch/x86/kernel/asm-offsets_32.c
··· 53 53 offsetof(struct cpu_entry_area, tss.x86_tss.sp1) - 54 54 offsetofend(struct cpu_entry_area, entry_stack_page.stack)); 55 55 56 - #ifdef CONFIG_STACKPROTECTOR 57 - BLANK(); 58 - OFFSET(stack_canary_offset, stack_canary, canary); 59 - #endif 60 - 61 56 BLANK(); 62 57 DEFINE(EFI_svam, offsetof(efi_runtime_services_t, set_virtual_address_map)); 63 58 }
+2 -3
arch/x86/kernel/cpu/common.c
··· 161 161 162 162 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 163 163 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 164 - GDT_STACK_CANARY_INIT 165 164 #endif 166 165 } }; 167 166 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); ··· 598 599 __loadsegment_simple(gs, 0); 599 600 wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); 600 601 #endif 601 - load_stack_canary_segment(); 602 602 } 603 603 604 604 #ifdef CONFIG_X86_32 ··· 1794 1796 EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); 1795 1797 1796 1798 #ifdef CONFIG_STACKPROTECTOR 1797 - DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 1799 + DEFINE_PER_CPU(unsigned long, __stack_chk_guard); 1800 + EXPORT_PER_CPU_SYMBOL(__stack_chk_guard); 1798 1801 #endif 1799 1802 1800 1803 #endif /* CONFIG_X86_64 */
+1 -3
arch/x86/kernel/doublefault_32.c
··· 100 100 .ss = __KERNEL_DS, 101 101 .ds = __USER_DS, 102 102 .fs = __KERNEL_PERCPU, 103 - #ifndef CONFIG_X86_32_LAZY_GS 104 - .gs = __KERNEL_STACK_CANARY, 105 - #endif 103 + .gs = 0, 106 104 107 105 .__cr3 = __pa_nodebug(swapper_pg_dir), 108 106 },
+2 -16
arch/x86/kernel/head_32.S
··· 318 318 movl $(__KERNEL_PERCPU), %eax 319 319 movl %eax,%fs # set this cpu's percpu 320 320 321 - movl $(__KERNEL_STACK_CANARY),%eax 322 - movl %eax,%gs 321 + xorl %eax,%eax 322 + movl %eax,%gs # clear possible garbage in %gs 323 323 324 324 xorl %eax,%eax # Clear LDT 325 325 lldt %ax ··· 339 339 */ 340 340 __INIT 341 341 setup_once: 342 - #ifdef CONFIG_STACKPROTECTOR 343 - /* 344 - * Configure the stack canary. The linker can't handle this by 345 - * relocation. Manually set base address in stack canary 346 - * segment descriptor. 347 - */ 348 - movl $gdt_page,%eax 349 - movl $stack_canary,%ecx 350 - movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 351 - shrl $16, %ecx 352 - movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) 353 - movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) 354 - #endif 355 - 356 342 andl $0,setup_once_ref /* Once is enough, thanks */ 357 343 ret 358 344
-1
arch/x86/kernel/setup_percpu.c
··· 224 224 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 225 225 per_cpu(cpu_number, cpu) = cpu; 226 226 setup_percpu_segment(cpu); 227 - setup_stack_canary_segment(cpu); 228 227 /* 229 228 * Copy data used in early init routines from the 230 229 * initial arrays to the per cpu data areas. These
+1 -7
arch/x86/kernel/tls.c
··· 164 164 savesegment(fs, sel); 165 165 if (sel == modified_sel) 166 166 loadsegment(fs, sel); 167 + #endif 167 168 168 169 savesegment(gs, sel); 169 170 if (sel == modified_sel) 170 171 load_gs_index(sel); 171 - #endif 172 - 173 - #ifdef CONFIG_X86_32_LAZY_GS 174 - savesegment(gs, sel); 175 - if (sel == modified_sel) 176 - loadsegment(gs, sel); 177 - #endif 178 172 } else { 179 173 #ifdef CONFIG_X86_64 180 174 if (p->thread.fsindex == modified_sel)
-4
arch/x86/lib/insn-eval.c
··· 404 404 case INAT_SEG_REG_FS: 405 405 return (unsigned short)(regs->fs & 0xffff); 406 406 case INAT_SEG_REG_GS: 407 - /* 408 - * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. 409 - * The macro below takes care of both cases. 410 - */ 411 407 return get_user_gs(regs); 412 408 case INAT_SEG_REG_IGNORE: 413 409 default:
-14
arch/x86/platform/pvh/head.S
··· 46 46 47 47 #define PVH_GDT_ENTRY_CS 1 48 48 #define PVH_GDT_ENTRY_DS 2 49 - #define PVH_GDT_ENTRY_CANARY 3 50 49 #define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8) 51 50 #define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) 52 - #define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8) 53 51 54 52 SYM_CODE_START_LOCAL(pvh_start_xen) 55 53 cld ··· 109 111 110 112 #else /* CONFIG_X86_64 */ 111 113 112 - /* Set base address in stack canary descriptor. */ 113 - movl $_pa(gdt_start),%eax 114 - movl $_pa(canary),%ecx 115 - movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax) 116 - shrl $16, %ecx 117 - movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax) 118 - movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax) 119 - 120 - mov $PVH_CANARY_SEL,%eax 121 - mov %eax,%gs 122 - 123 114 call mk_early_pgtbl_32 124 115 125 116 mov $_pa(initial_page_table), %eax ··· 152 165 .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */ 153 166 #endif 154 167 .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */ 155 - .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */ 156 168 SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end) 157 169 158 170 .balign 16
+1 -5
arch/x86/power/cpu.c
··· 99 99 /* 100 100 * segment registers 101 101 */ 102 - #ifdef CONFIG_X86_32_LAZY_GS 103 102 savesegment(gs, ctxt->gs); 104 - #endif 105 103 #ifdef CONFIG_X86_64 106 - savesegment(gs, ctxt->gs); 107 104 savesegment(fs, ctxt->fs); 108 105 savesegment(ds, ctxt->ds); 109 106 savesegment(es, ctxt->es); ··· 229 232 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); 230 233 #else 231 234 loadsegment(fs, __KERNEL_PERCPU); 232 - loadsegment(gs, __KERNEL_STACK_CANARY); 233 235 #endif 234 236 235 237 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ ··· 251 255 */ 252 256 wrmsrl(MSR_FS_BASE, ctxt->fs_base); 253 257 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); 254 - #elif defined(CONFIG_X86_32_LAZY_GS) 258 + #else 255 259 loadsegment(gs, ctxt->gs); 256 260 #endif 257 261
-1
arch/x86/xen/enlighten_pv.c
··· 1204 1204 pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot; 1205 1205 pv_ops.cpu.load_gdt = xen_load_gdt_boot; 1206 1206 1207 - setup_stack_canary_segment(cpu); 1208 1207 switch_to_new_gdt(cpu); 1209 1208 1210 1209 pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
+5 -1
scripts/gcc-x86_32-has-stack-protector.sh
··· 1 1 #!/bin/sh 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 - echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m32 -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs" 4 + # This requires GCC 8.1 or better. Specifically, we require 5 + # -mstack-protector-guard-reg, added by 6 + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81708 7 + 8 + echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs"