···482482#define __ARCH_WANT_SYS_SIGPENDING483483#define __ARCH_WANT_SYS_RT_SIGSUSPEND484484#define __ARCH_WANT_SYS_EXECVE485485+#define __ARCH_WANT_KERNEL_EXECVE485486486487/* "Conditional" syscalls. What we want is487488
+13
arch/alpha/kernel/entry.S
···626626 jmp $31, sys_exit627627.end ret_from_kernel_thread628628629629+ .globl ret_from_kernel_execve630630+ .align 4631631+ .ent ret_from_kernel_execve632632+ret_from_kernel_execve:633633+ mov $16, $sp634634+ /* Avoid the HAE being gratuitously wrong, to avoid restoring it. */635635+ ldq $2, alpha_mv+HAE_CACHE636636+ stq $2, 152($sp) /* HAE */637637+ mov $31, $19 /* to disable syscall restarts */638638+ br $31, ret_to_user639639+640640+.end ret_from_kernel_execve641641+629642630643/*631644 * Special system calls. Most of these are special in that they either
-19
arch/alpha/kernel/process.c
···435435 }436436 return pc;437437}438438-439439-int kernel_execve(const char *path, const char *const argv[], const char *const envp[])440440-{441441- /* Avoid the HAE being gratuitously wrong, which would cause us442442- to do the whole turn off interrupts thing and restore it. */443443- struct pt_regs regs = {.hae = alpha_mv.hae_cache};444444- int err = do_execve(path, argv, envp, ®s);445445- if (!err) {446446- struct pt_regs *p = current_pt_regs();447447- /* copy regs to normal position and off to userland we go... */448448- *p = regs;449449- __asm__ __volatile__ (450450- "mov %0, $sp;"451451- "br $31, ret_from_sys_call"452452- : : "r"(p));453453- }454454- return err;455455-}456456-EXPORT_SYMBOL(kernel_execve);