Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: vdso: Expose sigreturn address on vdso to the kernel

Syscall user redirection requires the signal trampoline code to not be
captured, in order to support returning with a locked selector while
avoiding recursion back into the signal handler. For ia-32, which has
the trampoline in the vDSO, expose the entry points to the kernel, such
that it can avoid dispatching syscalls from that region to userspace.

Suggested-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20201127193238.821364-2-krisman@collabora.com

authored by

Gabriel Krisman Bertazi and committed by
Thomas Gleixner
c5c87812 01fe185d

+23
+2
arch/x86/entry/vdso/vdso2c.c
··· 101 101 {"__kernel_sigreturn", true}, 102 102 {"__kernel_rt_sigreturn", true}, 103 103 {"int80_landing_pad", true}, 104 + {"vdso32_rt_sigreturn_landing_pad", true}, 105 + {"vdso32_sigreturn_landing_pad", true}, 104 106 }; 105 107 106 108 __attribute__((format(printf, 1, 2))) __attribute__((noreturn))
+2
arch/x86/entry/vdso/vdso32/sigreturn.S
··· 18 18 movl $__NR_sigreturn, %eax 19 19 SYSCALL_ENTER_KERNEL 20 20 .LEND_sigreturn: 21 + SYM_INNER_LABEL(vdso32_sigreturn_landing_pad, SYM_L_GLOBAL) 21 22 nop 22 23 .size __kernel_sigreturn,.-.LSTART_sigreturn 23 24 ··· 30 29 movl $__NR_rt_sigreturn, %eax 31 30 SYSCALL_ENTER_KERNEL 32 31 .LEND_rt_sigreturn: 32 + SYM_INNER_LABEL(vdso32_rt_sigreturn_landing_pad, SYM_L_GLOBAL) 33 33 nop 34 34 .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn 35 35 .previous
+15
arch/x86/entry/vdso/vma.c
··· 436 436 } 437 437 #endif 438 438 439 + bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) 440 + { 441 + #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) 442 + const struct vdso_image *image = current->mm->context.vdso_image; 443 + unsigned long vdso = (unsigned long) current->mm->context.vdso; 444 + 445 + if (in_ia32_syscall() && image == &vdso_image_32) { 446 + if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad || 447 + regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad) 448 + return true; 449 + } 450 + #endif 451 + return false; 452 + } 453 + 439 454 #ifdef CONFIG_X86_64 440 455 static __init int vdso_setup(char *s) 441 456 {
+2
arch/x86/include/asm/elf.h
··· 388 388 compat_arch_setup_additional_pages(bprm, interpreter, \ 389 389 (ex->e_machine == EM_X86_64)) 390 390 391 + extern bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs); 392 + 391 393 /* Do not change the values. See get_align_mask() */ 392 394 enum align_flags { 393 395 ALIGN_VA_32 = BIT(0),
+2
arch/x86/include/asm/vdso.h
··· 27 27 long sym___kernel_rt_sigreturn; 28 28 long sym___kernel_vsyscall; 29 29 long sym_int80_landing_pad; 30 + long sym_vdso32_sigreturn_landing_pad; 31 + long sym_vdso32_rt_sigreturn_landing_pad; 30 32 }; 31 33 32 34 #ifdef CONFIG_X86_64