Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: Add stackprotector support

Stackprotector support was previously unavailable on s390 because by
default compilers generate code which is not suitable for the kernel:
the canary value is accessed via thread local storage, where the address
of thread local storage is within access registers 0 and 1.

Using those registers also for the kernel would come with a significant
performance impact and more complicated kernel entry/exit code, since
access registers contents would have to be exchanged on every kernel entry
and exit.

With the upcoming gcc 16 release new compiler options will become available
which allow to generate code suitable for the kernel. [1]

Compiler option -mstack-protector-guard=global instructs gcc to generate
stackprotector code that refers to a global stackprotector canary value via
symbol __stack_chk_guard. Access to this value is guaranteed to occur via
larl and lgrl instructions.

Furthermore, compiler option -mstack-protector-guard-record generates a
section containing all code addresses that reference the canary value.

To allow for per task canary values the instructions which load the address
of __stack_chk_guard are patched so they access a lowcore field instead: a
per task canary value is available within the task_struct of each task, and
is written to the per-cpu lowcore location on each context switch.

Also add sanity checks and debugging option to be consistent with other
kernel code patching mechanisms.

Full debugging output can be enabled with the following kernel command line
options:

debug_stackprotector
bootdebug
ignore_loglevel
earlyprintk
dyndbg="file stackprotector.c +p"

Example debug output:

stackprot: 0000021e402d4eda: c010005a9ae3 -> c01f00070240

where "<insn address>: <old insn> -> <new insn>".

[1] gcc commit 0cd1f03939d5 ("s390: Support global stack protector")

Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>

+269 -4
+4
arch/s390/Kconfig
··· 69 69 Clang versions before 19.1.0 do not support A, 70 70 O, and R inline assembly format flags. 71 71 72 + config CC_HAS_STACKPROTECTOR_GLOBAL 73 + def_bool $(cc-option, -mstack-protector-guard=global -mstack-protector-guard-record) 74 + 72 75 config S390 73 76 def_bool y 74 77 # ··· 248 245 select HAVE_SAMPLE_FTRACE_DIRECT_MULTI 249 246 select HAVE_SETUP_PER_CPU_AREA 250 247 select HAVE_SOFTIRQ_ON_OWN_STACK 248 + select HAVE_STACKPROTECTOR if CC_HAS_STACKPROTECTOR_GLOBAL 251 249 select HAVE_SYSCALL_TRACEPOINTS 252 250 select HAVE_VIRT_CPU_ACCOUNTING 253 251 select HAVE_VIRT_CPU_ACCOUNTING_IDLE
+4
arch/s390/Makefile
··· 89 89 aflags-y += -DCC_USING_EXPOLINE 90 90 endif 91 91 92 + ifeq ($(CONFIG_STACKPROTECTOR),y) 93 + KBUILD_CFLAGS += -mstack-protector-guard=global -mstack-protector-guard-record 94 + endif 95 + 92 96 ifdef CONFIG_FUNCTION_TRACER 93 97 ifeq ($(call cc-option,-mfentry -mnop-mcount),) 94 98 # make use of hotpatch feature if the compiler supports it
+1
arch/s390/boot/Makefile
··· 32 32 obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o 33 33 obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o 34 34 obj-$(CONFIG_KMSAN) += kmsan.o 35 + obj-$(CONFIG_STACKPROTECTOR) += stackprotector.o 35 36 obj-all := $(obj-y) piggy.o syms.o 36 37 37 38 targets := bzImage section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
+4
arch/s390/boot/boot.h
··· 28 28 unsigned long invalid_pg_dir_off; 29 29 unsigned long alt_instructions; 30 30 unsigned long alt_instructions_end; 31 + #ifdef CONFIG_STACKPROTECTOR 32 + unsigned long stack_prot_start; 33 + unsigned long stack_prot_end; 34 + #endif 31 35 #ifdef CONFIG_KASAN 32 36 unsigned long kasan_early_shadow_page_off; 33 37 unsigned long kasan_early_shadow_pte_off;
+6
arch/s390/boot/ipl_parm.c
··· 3 3 #include <linux/init.h> 4 4 #include <linux/ctype.h> 5 5 #include <linux/pgtable.h> 6 + #include <asm/arch-stackprotector.h> 6 7 #include <asm/abs_lowcore.h> 7 8 #include <asm/page-states.h> 8 9 #include <asm/machine.h> ··· 294 293 if (!rc && !enabled) 295 294 cmma_flag = 0; 296 295 } 296 + 297 + #ifdef CONFIG_STACKPROTECTOR 298 + if (!strcmp(param, "debug_stackprotector")) 299 + stack_protector_debug = 1; 300 + #endif 297 301 298 302 #if IS_ENABLED(CONFIG_KVM) 299 303 if (!strcmp(param, "prot_virt")) {
+6
arch/s390/boot/stackprotector.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #define boot_fmt(fmt) "stackprot: " fmt 4 + 5 + #include "boot.h" 6 + #include "../kernel/stackprotector.c"
+8
arch/s390/boot/startup.c
··· 20 20 #include <asm/uv.h> 21 21 #include <asm/abs_lowcore.h> 22 22 #include <asm/physmem_info.h> 23 + #include <asm/stacktrace.h> 24 + #include <asm/asm-offsets.h> 25 + #include <asm/arch-stackprotector.h> 23 26 #include "decompressor.h" 24 27 #include "boot.h" 25 28 #include "uv.h" ··· 480 477 vmlinux.invalid_pg_dir_off += offset; 481 478 vmlinux.alt_instructions += offset; 482 479 vmlinux.alt_instructions_end += offset; 480 + #ifdef CONFIG_STACKPROTECTOR 481 + vmlinux.stack_prot_start += offset; 482 + vmlinux.stack_prot_end += offset; 483 + #endif 483 484 #ifdef CONFIG_KASAN 484 485 vmlinux.kasan_early_shadow_page_off += offset; 485 486 vmlinux.kasan_early_shadow_pte_off += offset; ··· 629 622 __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions, 630 623 (struct alt_instr *)_vmlinux_info.alt_instructions_end, 631 624 ALT_CTX_EARLY); 625 + stack_protector_apply_early(text_lma); 632 626 633 627 /* 634 628 * Save KASLR offset for early dumps, before vmcore_info is set.
+25
arch/s390/include/asm/arch-stackprotector.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _ASM_S390_ARCH_STACKPROTECTOR_H 4 + #define _ASM_S390_ARCH_STACKPROTECTOR_H 5 + 6 + extern unsigned long __stack_chk_guard; 7 + extern int stack_protector_debug; 8 + 9 + void __stack_protector_apply_early(unsigned long kernel_start); 10 + int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start); 11 + 12 + static inline void stack_protector_apply_early(unsigned long kernel_start) 13 + { 14 + if (IS_ENABLED(CONFIG_STACKPROTECTOR)) 15 + __stack_protector_apply_early(kernel_start); 16 + } 17 + 18 + static inline int stack_protector_apply(unsigned long *start, unsigned long *end) 19 + { 20 + if (IS_ENABLED(CONFIG_STACKPROTECTOR)) 21 + return __stack_protector_apply(start, end, 0); 22 + return 0; 23 + } 24 + 25 + #endif /* _ASM_S390_ARCH_STACKPROTECTOR_H */
+2 -1
arch/s390/include/asm/lowcore.h
··· 100 100 101 101 /* Save areas. */ 102 102 __u64 save_area[8]; /* 0x0200 */ 103 - __u8 pad_0x0240[0x0280-0x0240]; /* 0x0240 */ 103 + __u64 stack_canary; /* 0x0240 */ 104 + __u8 pad_0x0248[0x0280-0x0248]; /* 0x0248 */ 104 105 __u64 save_area_restart[1]; /* 0x0280 */ 105 106 106 107 __u64 pcpu; /* 0x0288 */
+16
arch/s390/include/asm/stackprotector.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _ASM_S390_STACKPROTECTOR_H 4 + #define _ASM_S390_STACKPROTECTOR_H 5 + 6 + #include <linux/sched.h> 7 + #include <asm/current.h> 8 + #include <asm/lowcore.h> 9 + 10 + static __always_inline void boot_init_stack_canary(void) 11 + { 12 + current->stack_canary = get_random_canary(); 13 + get_lowcore()->stack_canary = current->stack_canary; 14 + } 15 + 16 + #endif /* _ASM_S390_STACKPROTECTOR_H */
+1 -1
arch/s390/kernel/Makefile
··· 67 67 obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o 68 68 obj-$(CONFIG_UPROBES) += uprobes.o 69 69 obj-$(CONFIG_JUMP_LABEL) += jump_label.o 70 - 70 + obj-$(CONFIG_STACKPROTECTOR) += stackprotector.o 71 71 obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o 72 72 obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o 73 73 obj-$(CONFIG_CERT_STORE) += cert_store.o
+4
arch/s390/kernel/asm-offsets.c
··· 21 21 OFFSET(__TASK_stack, task_struct, stack); 22 22 OFFSET(__TASK_thread, task_struct, thread); 23 23 OFFSET(__TASK_pid, task_struct, pid); 24 + #ifdef CONFIG_STACKPROTECTOR 25 + OFFSET(__TASK_stack_canary, task_struct, stack_canary); 26 + #endif 24 27 BLANK(); 25 28 /* thread struct offsets */ 26 29 OFFSET(__THREAD_ksp, thread_struct, ksp); ··· 142 139 OFFSET(__LC_CURRENT_PID, lowcore, current_pid); 143 140 OFFSET(__LC_LAST_BREAK, lowcore, last_break); 144 141 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 142 + OFFSET(__LC_STACK_CANARY, lowcore, stack_canary); 145 143 OFFSET(__LC_DUMP_REIPL, lowcore, ipib); 146 144 OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info); 147 145 OFFSET(__LC_OS_INFO, lowcore, os_info);
+6 -2
arch/s390/kernel/entry.S
··· 162 162 stg %r3,__LC_CURRENT(%r13) # store task struct of next 163 163 stg %r15,__LC_KERNEL_STACK(%r13) # store end of kernel stack 164 164 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 165 - aghi %r3,__TASK_pid 166 - mvc __LC_CURRENT_PID(4,%r13),0(%r3) # store pid of next 165 + aghik %r4,%r3,__TASK_pid 166 + mvc __LC_CURRENT_PID(4,%r13),0(%r4) # store pid of next 167 167 ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40) 168 + #ifdef CONFIG_STACKPROTECTOR 169 + lg %r3,__TASK_stack_canary(%r3) 170 + stg %r3,__LC_STACK_CANARY(%r13) 171 + #endif 168 172 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 169 173 BR_EX %r14 170 174 SYM_FUNC_END(__switch_to_asm)
+9
arch/s390/kernel/module.c
··· 22 22 #include <linux/bug.h> 23 23 #include <linux/memory.h> 24 24 #include <linux/execmem.h> 25 + #include <asm/arch-stackprotector.h> 25 26 #include <asm/alternative.h> 26 27 #include <asm/nospec-branch.h> 27 28 #include <asm/facility.h> 28 29 #include <asm/ftrace.lds.h> 29 30 #include <asm/set_memory.h> 30 31 #include <asm/setup.h> 32 + #include <asm/asm-offsets.h> 31 33 32 34 #if 0 33 35 #define DEBUGP printk ··· 526 524 if (IS_ENABLED(CONFIG_EXPOLINE) && 527 525 (str_has_prefix(secname, ".s390_return"))) 528 526 nospec_revert(aseg, aseg + s->sh_size); 527 + 528 + if (IS_ENABLED(CONFIG_STACKPROTECTOR) && 529 + (str_has_prefix(secname, "__stack_protector_loc"))) { 530 + rc = stack_protector_apply(aseg, aseg + s->sh_size); 531 + if (rc) 532 + break; 533 + } 529 534 530 535 #ifdef CONFIG_FUNCTION_TRACER 531 536 if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
+3
arch/s390/kernel/smp.c
··· 280 280 lc->hardirq_timer = tsk->thread.hardirq_timer; 281 281 lc->softirq_timer = tsk->thread.softirq_timer; 282 282 lc->steal_timer = 0; 283 + #ifdef CONFIG_STACKPROTECTOR 284 + lc->stack_canary = tsk->stack_canary; 285 + #endif 283 286 } 284 287 285 288 static void pcpu_start_fn(int cpu, void (*func)(void *), void *data)
+156
arch/s390/kernel/stackprotector.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #ifndef pr_fmt 4 + #define pr_fmt(fmt) "stackprot: " fmt 5 + #endif 6 + 7 + #include <linux/export.h> 8 + #include <linux/uaccess.h> 9 + #include <linux/printk.h> 10 + #include <asm/abs_lowcore.h> 11 + #include <asm/sections.h> 12 + #include <asm/machine.h> 13 + #include <asm/asm-offsets.h> 14 + #include <asm/arch-stackprotector.h> 15 + 16 + #ifdef __DECOMPRESSOR 17 + 18 + #define DEBUGP boot_debug 19 + #define EMERGP boot_emerg 20 + #define PANIC boot_panic 21 + 22 + #else /* __DECOMPRESSOR */ 23 + 24 + #define DEBUGP pr_debug 25 + #define EMERGP pr_emerg 26 + #define PANIC panic 27 + 28 + #endif /* __DECOMPRESSOR */ 29 + 30 + int __bootdata_preserved(stack_protector_debug); 31 + 32 + unsigned long __stack_chk_guard; 33 + EXPORT_SYMBOL(__stack_chk_guard); 34 + 35 + struct insn_ril { 36 + u8 opc1 : 8; 37 + u8 r1 : 4; 38 + u8 opc2 : 4; 39 + u32 imm; 40 + } __packed; 41 + 42 + /* 43 + * Convert a virtual instruction address to a real instruction address. The 44 + * decompressor needs to patch instructions within the kernel image based on 45 + * their virtual addresses, while dynamic address translation is still 46 + * disabled. Therefore a translation from virtual kernel image addresses to 47 + * the corresponding physical addresses is required. 48 + * 49 + * After dynamic address translation is enabled and when the kernel needs to 50 + * patch instructions such a translation is not required since the addresses 51 + * are identical. 52 + */ 53 + static struct insn_ril *vaddress_to_insn(unsigned long vaddress) 54 + { 55 + #ifdef __DECOMPRESSOR 56 + return (struct insn_ril *)__kernel_pa(vaddress); 57 + #else 58 + return (struct insn_ril *)vaddress; 59 + #endif 60 + } 61 + 62 + static unsigned long insn_to_vaddress(struct insn_ril *insn) 63 + { 64 + #ifdef __DECOMPRESSOR 65 + return (unsigned long)__kernel_va(insn); 66 + #else 67 + return (unsigned long)insn; 68 + #endif 69 + } 70 + 71 + #define INSN_RIL_STRING_SIZE (sizeof(struct insn_ril) * 2 + 1) 72 + 73 + static void insn_ril_to_string(char *str, struct insn_ril *insn) 74 + { 75 + u8 *ptr = (u8 *)insn; 76 + int i; 77 + 78 + for (i = 0; i < sizeof(*insn); i++) 79 + hex_byte_pack(&str[2 * i], ptr[i]); 80 + str[2 * i] = 0; 81 + } 82 + 83 + static void stack_protector_dump(struct insn_ril *old, struct insn_ril *new) 84 + { 85 + char ostr[INSN_RIL_STRING_SIZE]; 86 + char nstr[INSN_RIL_STRING_SIZE]; 87 + 88 + insn_ril_to_string(ostr, old); 89 + insn_ril_to_string(nstr, new); 90 + DEBUGP("%016lx: %s -> %s\n", insn_to_vaddress(old), ostr, nstr); 91 + } 92 + 93 + static int stack_protector_verify(struct insn_ril *insn, unsigned long kernel_start) 94 + { 95 + char istr[INSN_RIL_STRING_SIZE]; 96 + unsigned long vaddress, offset; 97 + 98 + /* larl */ 99 + if (insn->opc1 == 0xc0 && insn->opc2 == 0x0) 100 + return 0; 101 + /* lgrl */ 102 + if (insn->opc1 == 0xc4 && insn->opc2 == 0x8) 103 + return 0; 104 + insn_ril_to_string(istr, insn); 105 + vaddress = insn_to_vaddress(insn); 106 + if (__is_defined(__DECOMPRESSOR)) { 107 + offset = (unsigned long)insn - kernel_start + TEXT_OFFSET; 108 + EMERGP("Unexpected instruction at %016lx/%016lx: %s\n", vaddress, offset, istr); 109 + PANIC("Stackprotector error\n"); 110 + } else { 111 + EMERGP("Unexpected instruction at %016lx: %s\n", vaddress, istr); 112 + } 113 + return -EINVAL; 114 + } 115 + 116 + int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start) 117 + { 118 + unsigned long canary, *loc; 119 + struct insn_ril *insn, new; 120 + int rc; 121 + 122 + /* 123 + * Convert LARL/LGRL instructions to LLILF so register R1 contains the 124 + * address of the per-cpu / per-process stack canary: 125 + * 126 + * LARL/LGRL R1,__stack_chk_guard => LLILF R1,__lc_stack_canary 127 + */ 128 + canary = __LC_STACK_CANARY; 129 + if (machine_has_relocated_lowcore()) 130 + canary += LOWCORE_ALT_ADDRESS; 131 + for (loc = start; loc < end; loc++) { 132 + insn = vaddress_to_insn(*loc); 133 + rc = stack_protector_verify(insn, kernel_start); 134 + if (rc) 135 + return rc; 136 + new = *insn; 137 + new.opc1 = 0xc0; 138 + new.opc2 = 0xf; 139 + new.imm = canary; 140 + if (stack_protector_debug) 141 + stack_protector_dump(insn, &new); 142 + s390_kernel_write(insn, &new, sizeof(*insn)); 143 + } 144 + return 0; 145 + } 146 + 147 + #ifdef __DECOMPRESSOR 148 + void __stack_protector_apply_early(unsigned long kernel_start) 149 + { 150 + unsigned long *start, *end; 151 + 152 + start = (unsigned long *)vmlinux.stack_prot_start; 153 + end = (unsigned long *)vmlinux.stack_prot_end; 154 + __stack_protector_apply(start, end, kernel_start); 155 + } 156 + #endif
+1
arch/s390/kernel/vdso64/Makefile
··· 32 32 KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64)) 33 33 KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64)) 34 34 KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables 35 + KBUILD_CFLAGS_64 += -fno-stack-protector 35 36 ldflags-y := -shared -soname=linux-vdso64.so.1 \ 36 37 --hash-style=both --build-id=sha1 -T 37 38
+13
arch/s390/kernel/vmlinux.lds.S
··· 150 150 *(.altinstr_replacement) 151 151 } 152 152 153 + #ifdef CONFIG_STACKPROTECTOR 154 + . = ALIGN(8); 155 + .stack_prot_table : { 156 + __stack_prot_start = .; 157 + KEEP(*(__stack_protector_loc)) 158 + __stack_prot_end = .; 159 + } 160 + #endif 161 + 153 162 /* 154 163 * Table with the patch locations to undo expolines 155 164 */ ··· 266 257 QUAD(invalid_pg_dir) 267 258 QUAD(__alt_instructions) 268 259 QUAD(__alt_instructions_end) 260 + #ifdef CONFIG_STACKPROTECTOR 261 + QUAD(__stack_prot_start) 262 + QUAD(__stack_prot_end) 263 + #endif 269 264 #ifdef CONFIG_KASAN 270 265 QUAD(kasan_early_shadow_page) 271 266 QUAD(kasan_early_shadow_pte)