Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal

Pull generic execve() changes from Al Viro:
"This introduces the generic kernel_thread() and kernel_execve()
functions, and switches x86, arm, alpha, um and s390 over to them."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal: (26 commits)
s390: convert to generic kernel_execve()
s390: switch to generic kernel_thread()
s390: fold kernel_thread_helper() into ret_from_fork()
s390: fold execve_tail() into start_thread(), convert to generic sys_execve()
um: switch to generic kernel_thread()
x86, um/x86: switch to generic sys_execve and kernel_execve
x86: split ret_from_fork
alpha: introduce ret_from_kernel_execve(), switch to generic kernel_execve()
alpha: switch to generic kernel_thread()
alpha: switch to generic sys_execve()
arm: get rid of execve wrapper, switch to generic execve() implementation
arm: optimized current_pt_regs()
arm: introduce ret_from_kernel_execve(), switch to generic kernel_execve()
arm: split ret_from_fork, simplify kernel_thread() [based on patch by rmk]
generic sys_execve()
generic kernel_execve()
new helper: current_pt_regs()
preparation for generic kernel_thread()
um: kill thread->forking
um: let signal_delivered() do SIGTRAP on singlestepping into handler
...

+391 -900
+3
arch/Kconfig
··· 271 271 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 272 272 bool 273 273 274 + config GENERIC_KERNEL_THREAD 275 + bool 276 + 274 277 config HAVE_ARCH_SECCOMP_FILTER 275 278 bool 276 279 help
+1
arch/alpha/Kconfig
··· 20 20 select GENERIC_CMOS_UPDATE 21 21 select GENERIC_STRNCPY_FROM_USER 22 22 select GENERIC_STRNLEN_USER 23 + select GENERIC_KERNEL_THREAD 23 24 help 24 25 The Alpha is a 64-bit general-purpose processor designed and 25 26 marketed by the Digital Equipment Corporation of blessed memory,
+1
arch/alpha/include/asm/Kbuild
··· 10 10 header-y += reg.h 11 11 header-y += regdef.h 12 12 header-y += sysinfo.h 13 + generic-y += exec.h
-6
arch/alpha/include/asm/exec.h
··· 1 - #ifndef __ALPHA_EXEC_H 2 - #define __ALPHA_EXEC_H 3 - 4 - #define arch_align_stack(x) (x) 5 - 6 - #endif /* __ALPHA_EXEC_H */
-3
arch/alpha/include/asm/processor.h
··· 49 49 /* Free all resources held by a thread. */ 50 50 extern void release_thread(struct task_struct *); 51 51 52 - /* Create a kernel thread without removing it from tasklists. */ 53 - extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 54 - 55 52 unsigned long get_wchan(struct task_struct *p); 56 53 57 54 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+2
arch/alpha/include/asm/unistd.h
··· 481 481 #define __ARCH_WANT_SYS_OLDUMOUNT 482 482 #define __ARCH_WANT_SYS_SIGPENDING 483 483 #define __ARCH_WANT_SYS_RT_SIGSUSPEND 484 + #define __ARCH_WANT_SYS_EXECVE 485 + #define __ARCH_WANT_KERNEL_EXECVE 484 486 485 487 /* "Conditional" syscalls. What we want is 486 488
-3
arch/alpha/kernel/alpha_ksyms.c
··· 50 50 EXPORT_SYMBOL(alpha_write_fp_reg); 51 51 EXPORT_SYMBOL(alpha_write_fp_reg_s); 52 52 53 - /* entry.S */ 54 - EXPORT_SYMBOL(kernel_thread); 55 - 56 53 /* Networking helper routines. */ 57 54 EXPORT_SYMBOL(csum_tcpudp_magic); 58 55 EXPORT_SYMBOL(ip_compute_csum);
+18 -51
arch/alpha/kernel/entry.S
··· 609 609 .end ret_from_fork 610 610 611 611 /* 612 - * kernel_thread(fn, arg, clone_flags) 612 + * ... and new kernel threads - here 613 613 */ 614 614 .align 4 615 - .globl kernel_thread 616 - .ent kernel_thread 617 - kernel_thread: 618 - /* We can be called from a module. */ 619 - ldgp $gp, 0($27) 620 - .prologue 1 621 - subq $sp, SP_OFF+6*8, $sp 622 - br $1, 2f /* load start address */ 623 - 624 - /* We've now "returned" from a fake system call. */ 625 - unop 626 - blt $0, 1f /* error? */ 627 - ldi $1, 0x3fff 628 - beq $20, 1f /* parent or child? */ 629 - 630 - bic $sp, $1, $8 /* in child. */ 631 - jsr $26, ($27) 615 + .globl ret_from_kernel_thread 616 + .ent ret_from_kernel_thread 617 + ret_from_kernel_thread: 618 + mov $17, $16 619 + jsr $26, schedule_tail 620 + mov $9, $27 621 + mov $10, $16 622 + jsr $26, ($9) 632 623 ldgp $gp, 0($26) 633 624 mov $0, $16 634 625 mov $31, $26 635 626 jmp $31, sys_exit 627 + .end ret_from_kernel_thread 636 628 637 - 1: ret /* in parent. */ 638 - 639 - .align 4 640 - 2: /* Fake a system call stack frame, as we can't do system calls 641 - from kernel space. Note that we store FN and ARG as they 642 - need to be set up in the child for the call. Also store $8 643 - and $26 for use in the parent. */ 644 - stq $31, SP_OFF($sp) /* ps */ 645 - stq $1, SP_OFF+8($sp) /* pc */ 646 - stq $gp, SP_OFF+16($sp) /* gp */ 647 - stq $16, 136($sp) /* $27; FN for child */ 648 - stq $17, SP_OFF+24($sp) /* $16; ARG for child */ 649 - stq $8, 64($sp) /* $8 */ 650 - stq $26, 128($sp) /* $26 */ 629 + .globl ret_from_kernel_execve 630 + .align 4 631 + .ent ret_from_kernel_execve 632 + ret_from_kernel_execve: 633 + mov $16, $sp 651 634 /* Avoid the HAE being gratuitously wrong, to avoid restoring it. */ 652 635 ldq $2, alpha_mv+HAE_CACHE 653 636 stq $2, 152($sp) /* HAE */ 637 + mov $31, $19 /* to disable syscall restarts */ 638 + br $31, ret_to_user 654 639 655 - /* Shuffle FLAGS to the front; add CLONE_VM. */ 656 - ldi $1, CLONE_VM|CLONE_UNTRACED 657 - or $18, $1, $16 658 - bsr $26, sys_clone 659 - 660 - /* We don't actually care for a3 success widgetry in the kernel. 661 - Not for positive errno values. */ 662 - stq $0, 0($sp) /* $0 */ 663 - br ret_to_kernel 664 - .end kernel_thread 640 + .end ret_from_kernel_execve 665 641 666 642 667 643 /* ··· 719 743 1: br $1, undo_switch_stack 720 744 br ret_from_sys_call 721 745 .end sys_rt_sigreturn 722 - 723 - .align 4 724 - .globl sys_execve 725 - .ent sys_execve 726 - sys_execve: 727 - .prologue 0 728 - mov $sp, $19 729 - jmp $31, do_sys_execve 730 - .end sys_execve 731 746 732 747 .align 4 733 748 .globl alpha_ni_syscall
+19 -58
arch/alpha/kernel/process.c
··· 263 263 264 264 /* 265 265 * Copy an alpha thread.. 266 - * 267 - * Note the "stack_offset" stuff: when returning to kernel mode, we need 268 - * to have some extra stack-space for the kernel stack that still exists 269 - * after the "ret_from_fork". When returning to user mode, we only want 270 - * the space needed by the syscall stack frame (ie "struct pt_regs"). 271 - * Use the passed "regs" pointer to determine how much space we need 272 - * for a kernel fork(). 273 266 */ 274 267 275 268 int 276 269 copy_thread(unsigned long clone_flags, unsigned long usp, 277 - unsigned long unused, 270 + unsigned long arg, 278 271 struct task_struct * p, struct pt_regs * regs) 279 272 { 280 273 extern void ret_from_fork(void); 274 + extern void ret_from_kernel_thread(void); 281 275 282 276 struct thread_info *childti = task_thread_info(p); 283 - struct pt_regs * childregs; 284 - struct switch_stack * childstack, *stack; 285 - unsigned long stack_offset, settls; 277 + struct pt_regs *childregs = task_pt_regs(p); 278 + struct switch_stack *childstack, *stack; 279 + unsigned long settls; 286 280 287 - stack_offset = PAGE_SIZE - sizeof(struct pt_regs); 288 - if (!(regs->ps & 8)) 289 - stack_offset = (PAGE_SIZE-1) & (unsigned long) regs; 290 - childregs = (struct pt_regs *) 291 - (stack_offset + PAGE_SIZE + task_stack_page(p)); 292 - 281 + childstack = ((struct switch_stack *) childregs) - 1; 282 + if (unlikely(!regs)) { 283 + /* kernel thread */ 284 + memset(childstack, 0, 285 + sizeof(struct switch_stack) + sizeof(struct pt_regs)); 286 + childstack->r26 = (unsigned long) ret_from_kernel_thread; 287 + childstack->r9 = usp; /* function */ 288 + childstack->r10 = arg; 289 + childregs->hae = alpha_mv.hae_cache, 290 + childti->pcb.usp = 0; 291 + childti->pcb.ksp = (unsigned long) childstack; 292 + childti->pcb.flags = 1; /* set FEN, clear everything else */ 293 + return 0; 294 + } 293 295 *childregs = *regs; 294 296 settls = regs->r20; 295 297 childregs->r0 = 0; ··· 299 297 childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ 300 298 regs->r20 = 0; 301 299 stack = ((struct switch_stack *) regs) - 1; 302 - childstack = ((struct switch_stack *) childregs) - 1; 303 300 *childstack = *stack; 304 301 childstack->r26 = (unsigned long) ret_from_fork; 305 302 childti->pcb.usp = usp; ··· 387 386 EXPORT_SYMBOL(dump_elf_task_fp); 388 387 389 388 /* 390 - * sys_execve() executes a new program. 391 - */ 392 - asmlinkage int 393 - do_sys_execve(const char __user *ufilename, 394 - const char __user *const __user *argv, 395 - const char __user *const __user *envp, struct pt_regs *regs) 396 - { 397 - int error; 398 - char *filename; 399 - 400 - filename = getname(ufilename); 401 - error = PTR_ERR(filename); 402 - if (IS_ERR(filename)) 403 - goto out; 404 - error = do_execve(filename, argv, envp, regs); 405 - putname(filename); 406 - out: 407 - return error; 408 - } 409 - 410 - /* 411 389 * Return saved PC of a blocked thread. This assumes the frame 412 390 * pointer is the 6th saved long on the kernel stack and that the 413 391 * saved return address is the first long in the frame. This all ··· 439 459 } 440 460 return pc; 441 461 } 442 - 443 - int kernel_execve(const char *path, const char *const argv[], const char *const envp[]) 444 - { 445 - /* Avoid the HAE being gratuitously wrong, which would cause us 446 - to do the whole turn off interrupts thing and restore it. */ 447 - struct pt_regs regs = {.hae = alpha_mv.hae_cache}; 448 - int err = do_execve(path, argv, envp, &regs); 449 - if (!err) { 450 - struct pt_regs *p = current_pt_regs(); 451 - /* copy regs to normal position and off to userland we go... */ 452 - *p = regs; 453 - __asm__ __volatile__ ( 454 - "mov %0, $sp;" 455 - "br $31, ret_from_sys_call" 456 - : : "r"(p)); 457 - } 458 - return err; 459 - } 460 - EXPORT_SYMBOL(kernel_execve);
+1
arch/arm/Kconfig
··· 52 52 select GENERIC_STRNCPY_FROM_USER 53 53 select GENERIC_STRNLEN_USER 54 54 select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN 55 + select GENERIC_KERNEL_THREAD 55 56 help 56 57 The ARM series is a line of low-power-consumption RISC chip designs 57 58 licensed by ARM Ltd and targeted at embedded applications and
-5
arch/arm/include/asm/processor.h
··· 85 85 #define cpu_relax() barrier() 86 86 #endif 87 87 88 - /* 89 - * Create a new kernel thread 90 - */ 91 - extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 92 - 93 88 #define task_pt_regs(p) \ 94 89 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) 95 90
+5
arch/arm/include/asm/ptrace.h
··· 254 254 return regs->ARM_sp; 255 255 } 256 256 257 + #define current_pt_regs(void) ({ \ 258 + register unsigned long sp asm ("sp"); \ 259 + (struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \ 260 + }) 261 + 257 262 #endif /* __KERNEL__ */ 258 263 259 264 #endif /* __ASSEMBLY__ */
-1
arch/arm/include/asm/system.h
··· 2 2 #include <asm/barrier.h> 3 3 #include <asm/compiler.h> 4 4 #include <asm/cmpxchg.h> 5 - #include <asm/exec.h> 6 5 #include <asm/switch_to.h> 7 6 #include <asm/system_info.h> 8 7 #include <asm/system_misc.h>
+2
arch/arm/include/asm/unistd.h
··· 478 478 #define __ARCH_WANT_OLD_READDIR 479 479 #define __ARCH_WANT_SYS_SOCKETCALL 480 480 #endif 481 + #define __ARCH_WANT_SYS_EXECVE 482 + #define __ARCH_WANT_KERNEL_EXECVE 481 483 482 484 /* 483 485 * "Conditional" syscalls
+1 -1
arch/arm/kernel/calls.S
··· 20 20 CALL(sys_creat) 21 21 CALL(sys_link) 22 22 /* 10 */ CALL(sys_unlink) 23 - CALL(sys_execve_wrapper) 23 + CALL(sys_execve) 24 24 CALL(sys_chdir) 25 25 CALL(OBSOLETE(sys_time)) /* used by libc4 */ 26 26 CALL(sys_mknod)
+24 -5
arch/arm/kernel/entry-common.S
··· 91 91 b ret_slow_syscall 92 92 ENDPROC(ret_from_fork) 93 93 94 + ENTRY(ret_from_kernel_thread) 95 + UNWIND(.fnstart) 96 + UNWIND(.cantunwind) 97 + bl schedule_tail 98 + mov r0, r4 99 + adr lr, BSYM(1f) @ kernel threads should not exit 100 + mov pc, r5 101 + 1: bl do_exit 102 + nop 103 + UNWIND(.fnend) 104 + ENDPROC(ret_from_kernel_thread) 105 + 106 + /* 107 + * turn a kernel thread into userland process 108 + * use: ret_from_kernel_execve(struct pt_regs *normal) 109 + */ 110 + ENTRY(ret_from_kernel_execve) 111 + mov why, #0 @ not a syscall 112 + str why, [r0, #S_R0] @ ... and we want 0 in ->ARM_r0 as well 113 + get_thread_info tsk @ thread structure 114 + mov sp, r0 @ stack pointer just under pt_regs 115 + b ret_slow_syscall 116 + ENDPROC(ret_from_kernel_execve) 117 + 94 118 .equ NR_syscalls,0 95 119 #define CALL(x) .equ NR_syscalls,NR_syscalls+1 96 120 #include "calls.S" ··· 540 516 add r0, sp, #S_OFF 541 517 b sys_vfork 542 518 ENDPROC(sys_vfork_wrapper) 543 - 544 - sys_execve_wrapper: 545 - add r3, sp, #S_OFF 546 - b sys_execve 547 - ENDPROC(sys_execve_wrapper) 548 519 549 520 sys_clone_wrapper: 550 521 add ip, sp, #S_OFF
+13 -62
arch/arm/kernel/process.c
··· 373 373 } 374 374 375 375 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 376 + asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); 376 377 377 378 int 378 379 copy_thread(unsigned long clone_flags, unsigned long stack_start, ··· 382 381 struct thread_info *thread = task_thread_info(p); 383 382 struct pt_regs *childregs = task_pt_regs(p); 384 383 385 - *childregs = *regs; 386 - childregs->ARM_r0 = 0; 387 - childregs->ARM_sp = stack_start; 388 - 389 384 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); 385 + 386 + if (likely(regs)) { 387 + *childregs = *regs; 388 + childregs->ARM_r0 = 0; 389 + childregs->ARM_sp = stack_start; 390 + thread->cpu_context.pc = (unsigned long)ret_from_fork; 391 + } else { 392 + thread->cpu_context.r4 = stk_sz; 393 + thread->cpu_context.r5 = stack_start; 394 + thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread; 395 + childregs->ARM_cpsr = SVC_MODE; 396 + } 390 397 thread->cpu_context.sp = (unsigned long)childregs; 391 - thread->cpu_context.pc = (unsigned long)ret_from_fork; 392 398 393 399 clear_ptrace_hw_breakpoint(p); 394 400 ··· 430 422 return used_math != 0; 431 423 } 432 424 EXPORT_SYMBOL(dump_fpu); 433 - 434 - /* 435 - * Shuffle the argument into the correct register before calling the 436 - * thread function. r4 is the thread argument, r5 is the pointer to 437 - * the thread function, and r6 points to the exit function. 438 - */ 439 - extern void kernel_thread_helper(void); 440 - asm( ".pushsection .text\n" 441 - " .align\n" 442 - " .type kernel_thread_helper, #function\n" 443 - "kernel_thread_helper:\n" 444 - #ifdef CONFIG_TRACE_IRQFLAGS 445 - " bl trace_hardirqs_on\n" 446 - #endif 447 - " msr cpsr_c, r7\n" 448 - " mov r0, r4\n" 449 - " mov lr, r6\n" 450 - " mov pc, r5\n" 451 - " .size kernel_thread_helper, . - kernel_thread_helper\n" 452 - " .popsection"); 453 - 454 - #ifdef CONFIG_ARM_UNWIND 455 - extern void kernel_thread_exit(long code); 456 - asm( ".pushsection .text\n" 457 - " .align\n" 458 - " .type kernel_thread_exit, #function\n" 459 - "kernel_thread_exit:\n" 460 - " .fnstart\n" 461 - " .cantunwind\n" 462 - " bl do_exit\n" 463 - " nop\n" 464 - " .fnend\n" 465 - " .size kernel_thread_exit, . - kernel_thread_exit\n" 466 - " .popsection"); 467 - #else 468 - #define kernel_thread_exit do_exit 469 - #endif 470 - 471 - /* 472 - * Create a kernel thread. 473 - */ 474 - pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 475 - { 476 - struct pt_regs regs; 477 - 478 - memset(&regs, 0, sizeof(regs)); 479 - 480 - regs.ARM_r4 = (unsigned long)arg; 481 - regs.ARM_r5 = (unsigned long)fn; 482 - regs.ARM_r6 = (unsigned long)kernel_thread_exit; 483 - regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE; 484 - regs.ARM_pc = (unsigned long)kernel_thread_helper; 485 - regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT; 486 - 487 - return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 488 - } 489 - EXPORT_SYMBOL(kernel_thread); 490 425 491 426 unsigned long get_wchan(struct task_struct *p) 492 427 {
-63
arch/arm/kernel/sys_arm.c
··· 59 59 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL); 60 60 } 61 61 62 - /* sys_execve() executes a new program. 63 - * This is called indirectly via a small wrapper 64 - */ 65 - asmlinkage int sys_execve(const char __user *filenamei, 66 - const char __user *const __user *argv, 67 - const char __user *const __user *envp, struct pt_regs *regs) 68 - { 69 - int error; 70 - char * filename; 71 - 72 - filename = getname(filenamei); 73 - error = PTR_ERR(filename); 74 - if (IS_ERR(filename)) 75 - goto out; 76 - error = do_execve(filename, argv, envp, regs); 77 - putname(filename); 78 - out: 79 - return error; 80 - } 81 - 82 - int kernel_execve(const char *filename, 83 - const char *const argv[], 84 - const char *const envp[]) 85 - { 86 - struct pt_regs regs; 87 - int ret; 88 - 89 - memset(&regs, 0, sizeof(struct pt_regs)); 90 - ret = do_execve(filename, 91 - (const char __user *const __user *)argv, 92 - (const char __user *const __user *)envp, &regs); 93 - if (ret < 0) 94 - goto out; 95 - 96 - /* 97 - * Save argc to the register structure for userspace. 98 - */ 99 - regs.ARM_r0 = ret; 100 - 101 - /* 102 - * We were successful. We won't be returning to our caller, but 103 - * instead to user space by manipulating the kernel stack. 104 - */ 105 - asm( "add r0, %0, %1\n\t" 106 - "mov r1, %2\n\t" 107 - "mov r2, %3\n\t" 108 - "bl memmove\n\t" /* copy regs to top of stack */ 109 - "mov r8, #0\n\t" /* not a syscall */ 110 - "mov r9, %0\n\t" /* thread structure */ 111 - "mov sp, r0\n\t" /* reposition stack pointer */ 112 - "b ret_to_user" 113 - : 114 - : "r" (current_thread_info()), 115 - "Ir" (THREAD_START_SP - sizeof(regs)), 116 - "r" (&regs), 117 - "Ir" (sizeof(regs)) 118 - : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory"); 119 - 120 - out: 121 - return ret; 122 - } 123 - EXPORT_SYMBOL(kernel_execve); 124 - 125 62 /* 126 63 * Since loff_t is a 64 bit type we avoid a lot of ABI hassle 127 64 * with a different argument ordering.
+1
arch/avr32/include/asm/Kbuild
··· 1 1 include include/asm-generic/Kbuild.asm 2 2 3 3 generic-y += clkdev.h 4 + generic-y += exec.h 4 5 5 6 header-y += cachectl.h
-13
arch/avr32/include/asm/exec.h
··· 1 - /* 2 - * Copyright (C) 2004-2006 Atmel Corporation 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License version 2 as 6 - * published by the Free Software Foundation. 7 - */ 8 - #ifndef __ASM_AVR32_EXEC_H 9 - #define __ASM_AVR32_EXEC_H 10 - 11 - #define arch_align_stack(x) (x) 12 - 13 - #endif /* __ASM_AVR32_EXEC_H */
+1
arch/c6x/include/asm/Kbuild
··· 12 12 generic-y += dma.h 13 13 generic-y += emergency-restart.h 14 14 generic-y += errno.h 15 + generic-y += exec.h 15 16 generic-y += fb.h 16 17 generic-y += fcntl.h 17 18 generic-y += futex.h
-6
arch/c6x/include/asm/exec.h
··· 1 - #ifndef _ASM_C6X_EXEC_H 2 - #define _ASM_C6X_EXEC_H 3 - 4 - #define arch_align_stack(x) (x) 5 - 6 - #endif /* _ASM_C6X_EXEC_H */
+1
arch/cris/include/asm/Kbuild
··· 9 9 header-y += sync_serial.h 10 10 11 11 generic-y += clkdev.h 12 + generic-y += exec.h
-6
arch/cris/include/asm/exec.h
··· 1 - #ifndef __ASM_CRIS_EXEC_H 2 - #define __ASM_CRIS_EXEC_H 3 - 4 - #define arch_align_stack(x) (x) 5 - 6 - #endif /* __ASM_CRIS_EXEC_H */
+1
arch/frv/include/asm/Kbuild
··· 3 3 header-y += registers.h 4 4 header-y += termios.h 5 5 generic-y += clkdev.h 6 + generic-y += exec.h
-17
arch/frv/include/asm/exec.h
··· 1 - /* FR-V CPU executable handling 2 - * 3 - * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. 4 - * Written by David Howells (dhowells@redhat.com) 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public License 8 - * as published by the Free Software Foundation; either version 9 - * 2 of the License, or (at your option) any later version. 10 - */ 11 - 12 - #ifndef _ASM_EXEC_H 13 - #define _ASM_EXEC_H 14 - 15 - #define arch_align_stack(x) (x) 16 - 17 - #endif /* _ASM_EXEC_H */
+2 -1
arch/h8300/include/asm/Kbuild
··· 1 1 include include/asm-generic/Kbuild.asm 2 2 3 - generic-y += clkdev.h 3 + generic-y += clkdev.h 4 + generic-y += exec.h
-6
arch/h8300/include/asm/exec.h
··· 1 - #ifndef _H8300_EXEC_H 2 - #define _H8300_EXEC_H 3 - 4 - #define arch_align_stack(x) (x) 5 - 6 - #endif /* _H8300_EXEC_H */
-1
arch/hexagon/kernel/syscall.c
··· 87 87 88 88 return retval; 89 89 } 90 - EXPORT_SYMBOL(kernel_execve);
+1
arch/ia64/include/asm/Kbuild
··· 14 14 header-y += ucontext.h 15 15 header-y += ustack.h 16 16 generic-y += clkdev.h 17 + generic-y += exec.h
-14
arch/ia64/include/asm/exec.h
··· 1 - /* 2 - * Process execution defines. 3 - * 4 - * Copyright (C) 1998-2003 Hewlett-Packard Co 5 - * David Mosberger-Tang <davidm@hpl.hp.com> 6 - * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 7 - * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 8 - */ 9 - #ifndef _ASM_IA64_EXEC_H 10 - #define _ASM_IA64_EXEC_H 11 - 12 - #define arch_align_stack(x) (x) 13 - 14 - #endif /* _ASM_IA64_EXEC_H */
+2 -1
arch/m32r/include/asm/Kbuild
··· 1 1 include include/asm-generic/Kbuild.asm 2 2 3 - generic-y += clkdev.h 3 + generic-y += clkdev.h 4 + generic-y += exec.h
-14
arch/m32r/include/asm/exec.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto 7 - * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> 8 - */ 9 - #ifndef _ASM_M32R_EXEC_H 10 - #define _ASM_M32R_EXEC_H 11 - 12 - #define arch_align_stack(x) (x) 13 - 14 - #endif /* _ASM_M32R_EXEC_H */
+1
arch/m68k/include/asm/Kbuild
··· 7 7 generic-y += device.h 8 8 generic-y += emergency-restart.h 9 9 generic-y += errno.h 10 + generic-y += exec.h 10 11 generic-y += futex.h 11 12 generic-y += ioctl.h 12 13 generic-y += ipcbuf.h
-6
arch/m68k/include/asm/exec.h
··· 1 - #ifndef _M68K_EXEC_H 2 - #define _M68K_EXEC_H 3 - 4 - #define arch_align_stack(x) (x) 5 - 6 - #endif /* _M68K_EXEC_H */
+1
arch/microblaze/include/asm/Kbuild
··· 2 2 3 3 header-y += elf.h 4 4 generic-y += clkdev.h 5 + generic-y += exec.h
-14
arch/microblaze/include/asm/exec.h
··· 1 - /* 2 - * Copyright (C) 2006 Atmark Techno, Inc. 3 - * 4 - * This file is subject to the terms and conditions of the GNU General Public 5 - * License. See the file "COPYING" in the main directory of this archive 6 - * for more details. 7 - */ 8 - 9 - #ifndef _ASM_MICROBLAZE_EXEC_H 10 - #define _ASM_MICROBLAZE_EXEC_H 11 - 12 - #define arch_align_stack(x) (x) 13 - 14 - #endif /* _ASM_MICROBLAZE_EXEC_H */
+1
arch/mn10300/include/asm/Kbuild
··· 1 1 include include/asm-generic/Kbuild.asm 2 2 3 3 generic-y += clkdev.h 4 + generic-y += exec.h
-16
arch/mn10300/include/asm/exec.h
··· 1 - /* MN10300 process execution definitions 2 - * 3 - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 - * Written by David Howells (dhowells@redhat.com) 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public Licence 8 - * as published by the Free Software Foundation; either version 9 - * 2 of the Licence, or (at your option) any later version. 10 - */ 11 - #ifndef _ASM_EXEC_H 12 - #define _ASM_EXEC_H 13 - 14 - #define arch_align_stack(x) (x) 15 - 16 - #endif /* _ASM_EXEC_H */
+1
arch/parisc/include/asm/Kbuild
··· 3 3 header-y += pdc.h 4 4 generic-y += clkdev.h 5 5 generic-y += word-at-a-time.h 6 + generic-y += exec.h
-6
arch/parisc/include/asm/exec.h
··· 1 - #ifndef __PARISC_EXEC_H 2 - #define __PARISC_EXEC_H 3 - 4 - #define arch_align_stack(x) (x) 5 - 6 - #endif /* __PARISC_EXEC_H */
+1
arch/s390/Kconfig
··· 135 135 select GENERIC_CLOCKEVENTS 136 136 select KTIME_SCALAR if 32BIT 137 137 select HAVE_ARCH_SECCOMP_FILTER 138 + select GENERIC_KERNEL_THREAD 138 139 139 140 config SCHED_OMIT_FRAME_POINTER 140 141 def_bool y
+3 -1
arch/s390/include/asm/processor.h
··· 35 35 extern void s390_adjust_jiffies(void); 36 36 extern const struct seq_operations cpuinfo_op; 37 37 extern int sysctl_ieee_emulation_warnings; 38 + extern void execve_tail(void); 38 39 39 40 /* 40 41 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. ··· 127 126 regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \ 128 127 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 129 128 regs->gprs[15] = new_stackp; \ 129 + execve_tail(); \ 130 130 } while (0) 131 131 132 132 #define start_thread31(regs, new_psw, new_stackp) do { \ ··· 137 135 __tlb_flush_mm(current->mm); \ 138 136 crst_table_downgrade(current->mm, 1UL << 31); \ 139 137 update_mm(current->mm, current); \ 138 + execve_tail(); \ 140 139 } while (0) 141 140 142 141 /* Forward declaration, a strange C thing */ ··· 153 150 154 151 /* Free all resources held by a thread. */ 155 152 extern void release_thread(struct task_struct *); 156 - extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 157 153 158 154 /* 159 155 * Return saved PC of a blocked thread.
+2
arch/s390/include/asm/unistd.h
··· 417 417 # define __ARCH_WANT_COMPAT_SYS_TIME 418 418 # define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND 419 419 # endif 420 + #define __ARCH_WANT_SYS_EXECVE 421 + #define __ARCH_WANT_KERNEL_EXECVE 420 422 421 423 /* 422 424 * "Conditional" syscalls
-26
arch/s390/kernel/compat_linux.c
··· 432 432 return ret; 433 433 } 434 434 435 - /* 436 - * sys32_execve() executes a new program after the asm stub has set 437 - * things up for us. This should basically do what I want it to. 438 - */ 439 - asmlinkage long sys32_execve(const char __user *name, compat_uptr_t __user *argv, 440 - compat_uptr_t __user *envp) 441 - { 442 - struct pt_regs *regs = task_pt_regs(current); 443 - char *filename; 444 - long rc; 445 - 446 - filename = getname(name); 447 - rc = PTR_ERR(filename); 448 - if (IS_ERR(filename)) 449 - return rc; 450 - rc = compat_do_execve(filename, argv, envp, regs); 451 - if (rc) 452 - goto out; 453 - current->thread.fp_regs.fpc=0; 454 - asm volatile("sfpc %0,0" : : "d" (0)); 455 - rc = regs->gprs[2]; 456 - out: 457 - putname(filename); 458 - return rc; 459 - } 460 - 461 435 asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf, 462 436 size_t count, u32 poshi, u32 poslo) 463 437 {
-2
arch/s390/kernel/compat_linux.h
··· 125 125 compat_sigset_t __user *oset, size_t sigsetsize); 126 126 long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize); 127 127 long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo); 128 - long sys32_execve(const char __user *name, compat_uptr_t __user *argv, 129 - compat_uptr_t __user *envp); 130 128 long sys32_init_module(void __user *umod, unsigned long len, 131 129 const char __user *uargs); 132 130 long sys32_delete_module(const char __user *name_user, unsigned int flags);
+1 -1
arch/s390/kernel/compat_wrapper.S
··· 1576 1576 llgtr %r2,%r2 # char * 1577 1577 llgtr %r3,%r3 # compat_uptr_t * 1578 1578 llgtr %r4,%r4 # compat_uptr_t * 1579 - jg sys32_execve # branch to system call 1579 + jg compat_sys_execve # branch to system call 1580 1580 1581 1581 ENTRY(sys_fanotify_init_wrapper) 1582 1582 llgfr %r2,%r2 # unsigned int
+21 -30
arch/s390/kernel/entry.S
··· 331 331 l %r12,__LC_THREAD_INFO 332 332 l %r13,__LC_SVC_NEW_PSW+4 333 333 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 334 - jo 0f 335 - st %r15,__PT_R15(%r11) # store stack pointer for new kthread 336 - 0: l %r1,BASED(.Lschedule_tail) 334 + je 1f 335 + l %r1,BASED(.Lschedule_tail) 337 336 basr %r14,%r1 # call schedule_tail 338 337 TRACE_IRQS_ON 339 338 ssm __LC_SVC_NEW_PSW # reenable interrupts 340 339 j sysc_tracenogo 341 340 341 + 1: # it's a kernel thread 342 + st %r15,__PT_R15(%r11) # store stack pointer for new kthread 343 + l %r1,BASED(.Lschedule_tail) 344 + basr %r14,%r1 # call schedule_tail 345 + TRACE_IRQS_ON 346 + ssm __LC_SVC_NEW_PSW # reenable interrupts 347 + lm %r9,%r11,__PT_R9(%r11) # load gprs 348 + ENTRY(kernel_thread_starter) 349 + la %r2,0(%r10) 350 + basr %r14,%r9 351 + la %r2,0 352 + br %r11 # do_exit 353 + 342 354 # 343 355 # kernel_execve function needs to deal with pt_regs that is not 344 356 # at the usual place 345 357 # 346 - ENTRY(kernel_execve) 347 - stm %r12,%r15,48(%r15) 348 - lr %r14,%r15 349 - l %r13,__LC_SVC_NEW_PSW+4 350 - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 351 - st %r14,__SF_BACKCHAIN(%r15) 352 - la %r12,STACK_FRAME_OVERHEAD(%r15) 353 - xc 0(__PT_SIZE,%r12),0(%r12) 354 - l %r1,BASED(.Ldo_execve) 355 - lr %r5,%r12 356 - basr %r14,%r1 # call do_execve 357 - ltr %r2,%r2 358 - je 0f 359 - ahi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE) 360 - lm %r12,%r15,48(%r15) 361 - br %r14 362 - # execve succeeded. 363 - 0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 364 - l %r15,__LC_KERNEL_STACK # load ksp 365 - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 366 - la %r11,STACK_FRAME_OVERHEAD(%r15) 367 - mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs 368 - l %r12,__LC_THREAD_INFO 358 + ENTRY(ret_from_kernel_execve) 359 + ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 360 + lr %r15,%r2 361 + lr %r11,%r2 362 + ahi %r15,-STACK_FRAME_OVERHEAD 369 363 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 364 + l %r12,__LC_THREAD_INFO 370 365 ssm __LC_SVC_NEW_PSW # reenable interrupts 371 - l %r1,BASED(.Lexecve_tail) 372 - basr %r14,%r1 # call execve_tail 373 366 j sysc_return 374 367 375 368 /* ··· 924 931 .Ldo_signal: .long do_signal 925 932 .Ldo_notify_resume: .long do_notify_resume 926 933 .Ldo_per_trap: .long do_per_trap 927 - .Ldo_execve: .long do_execve 928 - .Lexecve_tail: .long execve_tail 929 934 .Ljump_table: .long pgm_check_table 930 935 .Lschedule: .long schedule 931 936 #ifdef CONFIG_PREEMPT
-3
arch/s390/kernel/entry.h
··· 58 58 long sys_clone(unsigned long newsp, unsigned long clone_flags, 59 59 int __user *parent_tidptr, int __user *child_tidptr); 60 60 long sys_vfork(void); 61 - void execve_tail(void); 62 - long sys_execve(const char __user *name, const char __user *const __user *argv, 63 - const char __user *const __user *envp); 64 61 long sys_sigsuspend(int history0, int history1, old_sigset_t mask); 65 62 long sys_sigaction(int sig, const struct old_sigaction __user *act, 66 63 struct old_sigaction __user *oact);
+21 -31
arch/s390/kernel/entry64.S
··· 353 353 la %r11,STACK_FRAME_OVERHEAD(%r15) 354 354 lg %r12,__LC_THREAD_INFO 355 355 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 356 - jo 0f 357 - stg %r15,__PT_R15(%r11) # store stack pointer for new kthread 358 - 0: brasl %r14,schedule_tail 356 + je 1f 357 + brasl %r14,schedule_tail 359 358 TRACE_IRQS_ON 360 359 ssm __LC_SVC_NEW_PSW # reenable interrupts 361 360 j sysc_tracenogo 362 - 363 - # 364 - # kernel_execve function needs to deal with pt_regs that is not 365 - # at the usual place 366 - # 367 - ENTRY(kernel_execve) 368 - stmg %r12,%r15,96(%r15) 369 - lgr %r14,%r15 370 - aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 371 - stg %r14,__SF_BACKCHAIN(%r15) 372 - la %r12,STACK_FRAME_OVERHEAD(%r15) 373 - xc 0(__PT_SIZE,%r12),0(%r12) 374 - lgr %r5,%r12 375 - brasl %r14,do_execve 376 - ltgfr %r2,%r2 377 - je 0f 378 - aghi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE) 379 - lmg %r12,%r15,96(%r15) 380 - br %r14 381 - # execve succeeded. 382 - 0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 383 - lg %r15,__LC_KERNEL_STACK # load ksp 384 - aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 385 - la %r11,STACK_FRAME_OVERHEAD(%r15) 386 - mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs 387 - lg %r12,__LC_THREAD_INFO 388 - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 361 + 1: # it's a kernel thread 362 + stg %r15,__PT_R15(%r11) # store stack pointer for new kthread 363 + brasl %r14,schedule_tail 364 + TRACE_IRQS_ON 389 365 ssm __LC_SVC_NEW_PSW # reenable interrupts 390 - brasl %r14,execve_tail 366 + lmg %r9,%r11,__PT_R9(%r11) # load gprs 367 + ENTRY(kernel_thread_starter) 368 + la %r2,0(%r10) 369 + basr %r14,%r9 370 + la %r2,0 371 + br %r11 # do_exit 372 + 373 + ENTRY(ret_from_kernel_execve) 374 + ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 375 + lgr %r15,%r2 376 + lgr %r11,%r2 377 + aghi %r15,-STACK_FRAME_OVERHEAD 378 + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 379 + lg %r12,__LC_THREAD_INFO 380 + ssm __LC_SVC_NEW_PSW # reenable interrupts 391 381 j sysc_return 392 382 393 383 /*
+33 -74
arch/s390/kernel/process.c
··· 100 100 101 101 extern void __kprobes kernel_thread_starter(void); 102 102 103 - asm( 104 - ".section .kprobes.text, \"ax\"\n" 105 - ".global kernel_thread_starter\n" 106 - "kernel_thread_starter:\n" 107 - " la 2,0(10)\n" 108 - " basr 14,9\n" 109 - " la 2,0\n" 110 - " br 11\n" 111 - ".previous\n"); 112 - 113 - int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 114 - { 115 - struct pt_regs regs; 116 - 117 - memset(&regs, 0, sizeof(regs)); 118 - regs.psw.mask = psw_kernel_bits | 119 - PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 120 - regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; 121 - regs.gprs[9] = (unsigned long) fn; 122 - regs.gprs[10] = (unsigned long) arg; 123 - regs.gprs[11] = (unsigned long) do_exit; 124 - regs.orig_gpr2 = -1; 125 - 126 - /* Ok, create the new process.. */ 127 - return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 128 - 0, &regs, 0, NULL, NULL); 129 - } 130 - EXPORT_SYMBOL(kernel_thread); 131 - 132 103 /* 133 104 * Free current thread data structures etc.. 134 105 */ ··· 117 146 } 118 147 119 148 int copy_thread(unsigned long clone_flags, unsigned long new_stackp, 120 - unsigned long unused, 149 + unsigned long arg, 121 150 struct task_struct *p, struct pt_regs *regs) 122 151 { 123 152 struct thread_info *ti; ··· 129 158 130 159 frame = container_of(task_pt_regs(p), struct fake_frame, childregs); 131 160 p->thread.ksp = (unsigned long) frame; 132 - /* Store access registers to kernel stack of new process. */ 133 - frame->childregs = *regs; 134 - frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ 135 - frame->childregs.gprs[15] = new_stackp; 136 - frame->sf.back_chain = 0; 161 + /* Save access registers to new thread structure. */ 162 + save_access_regs(&p->thread.acrs[0]); 163 + /* start new process with ar4 pointing to the correct address space */ 164 + p->thread.mm_segment = get_fs(); 165 + /* Don't copy debug registers */ 166 + memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); 167 + memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); 168 + clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 169 + clear_tsk_thread_flag(p, TIF_PER_TRAP); 170 + /* Initialize per thread user and system timer values */ 171 + ti = task_thread_info(p); 172 + ti->user_timer = 0; 173 + ti->system_timer = 0; 137 174 175 + frame->sf.back_chain = 0; 138 176 /* new return point is ret_from_fork */ 139 177 frame->sf.gprs[8] = (unsigned long) ret_from_fork; 140 - 141 178 /* fake return stack for resume(), don't go back to schedule */ 142 179 frame->sf.gprs[9] = (unsigned long) frame; 143 180 144 - /* Save access registers to new thread structure. */ 145 - save_access_regs(&p->thread.acrs[0]); 181 + /* Store access registers to kernel stack of new process. */ 182 + if (unlikely(!regs)) { 183 + /* kernel thread */ 184 + memset(&frame->childregs, 0, sizeof(struct pt_regs)); 185 + frame->childregs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | 186 + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 187 + frame->childregs.psw.addr = PSW_ADDR_AMODE | 188 + (unsigned long) kernel_thread_starter; 189 + frame->childregs.gprs[9] = new_stackp; /* function */ 190 + frame->childregs.gprs[10] = arg; 191 + frame->childregs.gprs[11] = (unsigned long) do_exit; 192 + frame->childregs.orig_gpr2 = -1; 193 + 194 + return 0; 195 + } 196 + frame->childregs = *regs; 197 + frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ 198 + frame->childregs.gprs[15] = new_stackp; 146 199 147 200 /* Don't copy runtime instrumentation info */ 148 201 p->thread.ri_cb = NULL; ··· 197 202 } 198 203 } 199 204 #endif /* CONFIG_64BIT */ 200 - /* start new process with ar4 pointing to the correct address space */ 201 - p->thread.mm_segment = get_fs(); 202 - /* Don't copy debug registers */ 203 - memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); 204 - memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); 205 - clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 206 - clear_tsk_thread_flag(p, TIF_PER_TRAP); 207 - /* Initialize per thread user and system timer values */ 208 - ti = task_thread_info(p); 209 - ti->user_timer = 0; 210 - ti->system_timer = 0; 211 205 return 0; 212 206 } 213 207 ··· 239 255 current->thread.fp_regs.fpc = 0; 240 256 if (MACHINE_HAS_IEEE) 241 257 asm volatile("sfpc %0,%0" : : "d" (0)); 242 - } 243 - 244 - /* 245 - * sys_execve() executes a new program. 246 - */ 247 - SYSCALL_DEFINE3(execve, const char __user *, name, 248 - const char __user *const __user *, argv, 249 - const char __user *const __user *, envp) 250 - { 251 - struct pt_regs *regs = task_pt_regs(current); 252 - char *filename; 253 - long rc; 254 - 255 - filename = getname(name); 256 - rc = PTR_ERR(filename); 257 - if (IS_ERR(filename)) 258 - return rc; 259 - rc = do_execve(filename, argv, envp, regs); 260 - if (rc) 261 - goto out; 262 - execve_tail(); 263 - rc = regs->gprs[2]; 264 - out: 265 - putname(filename); 266 - return rc; 267 258 } 268 259 269 260 /*
+1
arch/sh/include/asm/Kbuild
··· 7 7 generic-y += div64.h 8 8 generic-y += emergency-restart.h 9 9 generic-y += errno.h 10 + generic-y += exec.h 10 11 generic-y += fcntl.h 11 12 generic-y += ioctl.h 12 13 generic-y += ipcbuf.h
-10
arch/sh/include/asm/exec.h
··· 1 - /* 2 - * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 3 - * Copyright (C) 2002 Paul Mundt 4 - */ 5 - #ifndef __ASM_SH_EXEC_H 6 - #define __ASM_SH_EXEC_H 7 - 8 - #define arch_align_stack(x) (x) 9 - 10 - #endif /* __ASM_SH_EXEC_H */
+1
arch/sparc/include/asm/Kbuild
··· 3 3 4 4 generic-y += clkdev.h 5 5 generic-y += div64.h 6 + generic-y += exec.h 6 7 generic-y += local64.h 7 8 generic-y += irq_regs.h 8 9 generic-y += local.h
-6
arch/sparc/include/asm/exec.h
··· 1 - #ifndef __SPARC_EXEC_H 2 - #define __SPARC_EXEC_H 3 - 4 - #define arch_align_stack(x) (x) 5 - 6 - #endif /* __SPARC_EXEC_H */
+1
arch/tile/include/asm/Kbuild
··· 13 13 generic-y += div64.h 14 14 generic-y += emergency-restart.h 15 15 generic-y += errno.h 16 + generic-y += exec.h 16 17 generic-y += fb.h 17 18 generic-y += fcntl.h 18 19 generic-y += ioctl.h
-20
arch/tile/include/asm/exec.h
··· 1 - /* 2 - * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 - * 4 - * This program is free software; you can redistribute it and/or 5 - * modify it under the terms of the GNU General Public License 6 - * as published by the Free Software Foundation, version 2. 7 - * 8 - * This program is distributed in the hope that it will be useful, but 9 - * WITHOUT ANY WARRANTY; without even the implied warranty of 10 - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 - * NON INFRINGEMENT. See the GNU General Public License for 12 - * more details. 13 - */ 14 - 15 - #ifndef _ASM_TILE_EXEC_H 16 - #define _ASM_TILE_EXEC_H 17 - 18 - #define arch_align_stack(x) (x) 19 - 20 - #endif /* _ASM_TILE_EXEC_H */
-2
arch/um/include/asm/processor-generic.h
··· 63 63 { 64 64 } 65 65 66 - extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 67 - 68 66 extern unsigned long thread_saved_pc(struct task_struct *t); 69 67 70 68 static inline void mm_copy_segments(struct mm_struct *from_mm,
+2 -23
arch/um/kernel/exec.c
··· 16 16 #include <mem_user.h> 17 17 #include <skas.h> 18 18 #include <os.h> 19 - #include "internal.h" 20 19 21 20 void flush_thread(void) 22 21 { ··· 48 49 } 49 50 EXPORT_SYMBOL(start_thread); 50 51 51 - long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env) 52 + void __noreturn ret_from_kernel_execve(struct pt_regs *unused) 52 53 { 53 - long err; 54 - 55 - err = do_execve(file, argv, env, &current->thread.regs); 56 - if (!err) 57 - UML_LONGJMP(current->thread.exec_buf, 1); 58 - return err; 59 - } 60 - 61 - long sys_execve(const char __user *file, const char __user *const __user *argv, 62 - const char __user *const __user *env) 63 - { 64 - long error; 65 - char *filename; 66 - 67 - filename = getname(file); 68 - error = PTR_ERR(filename); 69 - if (IS_ERR(filename)) goto out; 70 - error = do_execve(filename, argv, env, &current->thread.regs); 71 - putname(filename); 72 - out: 73 - return error; 54 + UML_LONGJMP(current->thread.exec_buf, 1); 74 55 }
-1
arch/um/kernel/internal.h
··· 1 - extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
+3 -14
arch/um/kernel/process.c
··· 69 69 return page; 70 70 } 71 71 72 - int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 73 - { 74 - int pid; 75 - 76 - current->thread.request.u.thread.proc = fn; 77 - current->thread.request.u.thread.arg = arg; 78 - pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0, 79 - &current->thread.regs, 0, NULL, NULL); 80 - return pid; 81 - } 82 - EXPORT_SYMBOL(kernel_thread); 83 - 84 72 static inline void set_current(struct task_struct *task) 85 73 { 86 74 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) ··· 165 177 } 166 178 167 179 int copy_thread(unsigned long clone_flags, unsigned long sp, 168 - unsigned long stack_top, struct task_struct * p, 180 + unsigned long arg, struct task_struct * p, 169 181 struct pt_regs *regs) 170 182 { 171 183 void (*handler)(void); ··· 186 198 arch_copy_thread(&current->thread.arch, &p->thread.arch); 187 199 } else { 188 200 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); 189 - p->thread.request.u.thread = current->thread.request.u.thread; 201 + p->thread.request.u.thread.proc = (int (*)(void *))sp; 202 + p->thread.request.u.thread.arg = (void *)arg; 190 203 handler = new_thread_handler; 191 204 } 192 205
-17
arch/um/kernel/syscall.c
··· 13 13 #include <asm/mman.h> 14 14 #include <asm/uaccess.h> 15 15 #include <asm/unistd.h> 16 - #include "internal.h" 17 16 18 17 long sys_fork(void) 19 18 { ··· 48 49 err = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); 49 50 out: 50 51 return err; 51 - } 52 - 53 - int kernel_execve(const char *filename, 54 - const char *const argv[], 55 - const char *const envp[]) 56 - { 57 - mm_segment_t fs; 58 - int ret; 59 - 60 - fs = get_fs(); 61 - set_fs(KERNEL_DS); 62 - ret = um_execve(filename, (const char __user *const __user *)argv, 63 - (const char __user *const __user *) envp); 64 - set_fs(fs); 65 - 66 - return ret; 67 52 }
+1
arch/unicore32/include/asm/Kbuild
··· 11 11 generic-y += div64.h 12 12 generic-y += emergency-restart.h 13 13 generic-y += errno.h 14 + generic-y += exec.h 14 15 generic-y += fb.h 15 16 generic-y += fcntl.h 16 17 generic-y += ftrace.h
-15
arch/unicore32/include/asm/exec.h
··· 1 - /* 2 - * Process execution bits for PKUnity SoC and UniCore ISA 3 - * 4 - * Copyright (C) 2001-2012 GUAN Xue-tao 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - */ 10 - #ifndef __UNICORE_EXEC_H__ 11 - #define __UNICORE_EXEC_H__ 12 - 13 - #define arch_align_stack(x) (x) 14 - 15 - #endif /* __UNICORE_EXEC_H__ */
-1
arch/unicore32/kernel/sys.c
··· 104 104 out: 105 105 return ret; 106 106 } 107 - EXPORT_SYMBOL(kernel_execve); 108 107 109 108 /* Note: used by the compat code even in 64-bit Linux. */ 110 109 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
+1
arch/x86/Kconfig
··· 108 108 select GENERIC_STRNLEN_USER 109 109 select HAVE_RCU_USER_QS if X86_64 110 110 select HAVE_IRQ_TIME_ACCOUNTING 111 + select GENERIC_KERNEL_THREAD 111 112 112 113 config INSTRUCTION_DECODER 113 114 def_bool y
+1 -1
arch/x86/ia32/ia32entry.S
··· 465 465 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi 466 466 PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi 467 467 PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx 468 - PTREGSCALL stub32_execve, sys32_execve, %rcx 468 + PTREGSCALL stub32_execve, compat_sys_execve, %rcx 469 469 PTREGSCALL stub32_fork, sys_fork, %rdi 470 470 PTREGSCALL stub32_clone, sys32_clone, %rdx 471 471 PTREGSCALL stub32_vfork, sys_vfork, %rdi
-15
arch/x86/ia32/sys_ia32.c
··· 385 385 return ret; 386 386 } 387 387 388 - asmlinkage long sys32_execve(const char __user *name, compat_uptr_t __user *argv, 389 - compat_uptr_t __user *envp, struct pt_regs *regs) 390 - { 391 - long error; 392 - char *filename; 393 - 394 - filename = getname(name); 395 - error = PTR_ERR(filename); 396 - if (IS_ERR(filename)) 397 - return error; 398 - error = compat_do_execve(filename, argv, envp, regs); 399 - putname(filename); 400 - return error; 401 - } 402 - 403 388 asmlinkage long sys32_clone(unsigned int clone_flags, unsigned int newsp, 404 389 struct pt_regs *regs) 405 390 {
-5
arch/x86/include/asm/processor.h
··· 588 588 } mm_segment_t; 589 589 590 590 591 - /* 592 - * create a kernel thread without removing it from tasklists 593 - */ 594 - extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 595 - 596 591 /* Free all resources held by a thread. */ 597 592 extern void release_thread(struct task_struct *); 598 593
-2
arch/x86/include/asm/sys_ia32.h
··· 54 54 asmlinkage long sys32_personality(unsigned long); 55 55 asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); 56 56 57 - asmlinkage long sys32_execve(const char __user *, compat_uptr_t __user *, 58 - compat_uptr_t __user *, struct pt_regs *); 59 57 asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); 60 58 61 59 long sys32_lseek(unsigned int, int, unsigned int);
+1 -1
arch/x86/include/asm/syscalls.h
··· 25 25 int sys_vfork(struct pt_regs *); 26 26 long sys_execve(const char __user *, 27 27 const char __user *const __user *, 28 - const char __user *const __user *, struct pt_regs *); 28 + const char __user *const __user *); 29 29 long sys_clone(unsigned long, unsigned long, void __user *, 30 30 void __user *, struct pt_regs *); 31 31
-2
arch/x86/include/asm/thread_info.h
··· 79 79 #define TIF_SIGPENDING 2 /* signal pending */ 80 80 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 81 81 #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 82 - #define TIF_IRET 5 /* force IRET */ 83 82 #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 84 83 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 85 84 #define TIF_SECCOMP 8 /* secure computing */ ··· 104 105 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 105 106 #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 106 107 #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 107 - #define _TIF_IRET (1 << TIF_IRET) 108 108 #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 109 109 #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 110 110 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+2
arch/x86/include/asm/unistd.h
··· 50 50 # define __ARCH_WANT_SYS_TIME 51 51 # define __ARCH_WANT_SYS_UTIME 52 52 # define __ARCH_WANT_SYS_WAITPID 53 + # define __ARCH_WANT_SYS_EXECVE 54 + # define __ARCH_WANT_KERNEL_EXECVE 53 55 54 56 /* 55 57 * "Conditional" syscalls
+1 -1
arch/x86/kernel/Makefile
··· 23 23 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o 24 24 obj-$(CONFIG_IRQ_WORK) += irq_work.o 25 25 obj-y += probe_roms.o 26 - obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 26 + obj-$(CONFIG_X86_32) += i386_ksyms_32.o 27 27 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 28 28 obj-y += syscall_$(BITS).o 29 29 obj-$(CONFIG_X86_64) += vsyscall_64.o
+3
arch/x86/kernel/asm-offsets.c
··· 69 69 OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); 70 70 OFFSET(BP_pref_address, boot_params, hdr.pref_address); 71 71 OFFSET(BP_code32_start, boot_params, hdr.code32_start); 72 + 73 + BLANK(); 74 + DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); 72 75 }
+18 -8
arch/x86/kernel/entry_32.S
··· 299 299 CFI_ENDPROC 300 300 END(ret_from_fork) 301 301 302 + ENTRY(ret_from_kernel_execve) 303 + movl %eax, %esp 304 + movl $0,PT_EAX(%esp) 305 + GET_THREAD_INFO(%ebp) 306 + jmp syscall_exit 307 + END(ret_from_kernel_execve) 308 + 302 309 /* 303 310 * Interrupt exit functions should be protected against kprobes 304 311 */ ··· 330 323 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax 331 324 #else 332 325 /* 333 - * We can be coming here from a syscall done in the kernel space, 334 - * e.g. a failed kernel_execve(). 326 + * We can be coming here from child spawned by kernel_thread(). 335 327 */ 336 328 movl PT_CS(%esp), %eax 337 329 andl $SEGMENT_RPL_MASK, %eax ··· 738 732 PTREGSCALL1(iopl) 739 733 PTREGSCALL0(fork) 740 734 PTREGSCALL0(vfork) 741 - PTREGSCALL3(execve) 742 735 PTREGSCALL2(sigaltstack) 743 736 PTREGSCALL0(sigreturn) 744 737 PTREGSCALL0(rt_sigreturn) ··· 1020 1015 */ 1021 1016 .popsection 1022 1017 1023 - ENTRY(kernel_thread_helper) 1024 - pushl $0 # fake return address for unwinder 1018 + ENTRY(ret_from_kernel_thread) 1025 1019 CFI_STARTPROC 1026 - movl %edi,%eax 1027 - call *%esi 1020 + pushl_cfi %eax 1021 + call schedule_tail 1022 + GET_THREAD_INFO(%ebp) 1023 + popl_cfi %eax 1024 + pushl_cfi $0x0202 # Reset kernel eflags 1025 + popfl_cfi 1026 + movl PT_EBP(%esp),%eax 1027 + call *PT_EBX(%esp) 1028 1028 call do_exit 1029 1029 ud2 # padding for call trace 1030 1030 CFI_ENDPROC 1031 - ENDPROC(kernel_thread_helper) 1031 + ENDPROC(ret_from_kernel_thread) 1032 1032 1033 1033 #ifdef CONFIG_XEN 1034 1034 /* Xen doesn't set %esp to be precisely what the normal sysenter
+25 -49
arch/x86/kernel/entry_64.S
··· 554 554 RESTORE_REST 555 555 556 556 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? 557 - jz retint_restore_args 557 + jz 1f 558 558 559 559 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET 560 560 jnz int_ret_from_sys_call 561 561 562 562 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET 563 563 jmp ret_from_sys_call # go to the SYSRET fastpath 564 + 565 + 1: 566 + subq $REST_SKIP, %rsp # move the stack pointer back 567 + CFI_ADJUST_CFA_OFFSET REST_SKIP 568 + movq %rbp, %rdi 569 + call *%rbx 570 + # exit 571 + mov %eax, %edi 572 + call do_exit 573 + ud2 # padding for call trace 564 574 565 575 CFI_ENDPROC 566 576 END(ret_from_fork) ··· 872 862 PARTIAL_FRAME 0 873 863 SAVE_REST 874 864 FIXUP_TOP_OF_STACK %r11 875 - movq %rsp, %rcx 876 865 call sys_execve 877 866 RESTORE_TOP_OF_STACK %r11 878 867 movq %rax,RAX(%rsp) ··· 921 912 PARTIAL_FRAME 0 922 913 SAVE_REST 923 914 FIXUP_TOP_OF_STACK %r11 924 - movq %rsp, %rcx 925 - call sys32_execve 915 + call compat_sys_execve 926 916 RESTORE_TOP_OF_STACK %r11 927 917 movq %rax,RAX(%rsp) 928 918 RESTORE_REST ··· 1326 1318 jmp 2b 1327 1319 .previous 1328 1320 1329 - ENTRY(kernel_thread_helper) 1330 - pushq $0 # fake return address 1331 - CFI_STARTPROC 1332 - /* 1333 - * Here we are in the child and the registers are set as they were 1334 - * at kernel_thread() invocation in the parent. 1335 - */ 1336 - call *%rsi 1337 - # exit 1338 - mov %eax, %edi 1339 - call do_exit 1340 - ud2 # padding for call trace 1341 - CFI_ENDPROC 1342 - END(kernel_thread_helper) 1343 - 1344 - /* 1345 - * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. 1346 - * 1347 - * C extern interface: 1348 - * extern long execve(const char *name, char **argv, char **envp) 1349 - * 1350 - * asm input arguments: 1351 - * rdi: name, rsi: argv, rdx: envp 1352 - * 1353 - * We want to fallback into: 1354 - * extern long sys_execve(const char *name, char **argv,char **envp, struct pt_regs *regs) 1355 - * 1356 - * do_sys_execve asm fallback arguments: 1357 - * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack 1358 - */ 1359 - ENTRY(kernel_execve) 1360 - CFI_STARTPROC 1361 - FAKE_STACK_FRAME $0 1362 - SAVE_ALL 1363 - movq %rsp,%rcx 1364 - call sys_execve 1365 - movq %rax, RAX(%rsp) 1366 - RESTORE_REST 1367 - testq %rax,%rax 1368 - je int_ret_from_sys_call 1369 - RESTORE_ARGS 1370 - UNFAKE_STACK_FRAME 1371 - ret 1372 - CFI_ENDPROC 1373 - END(kernel_execve) 1321 + ENTRY(ret_from_kernel_execve) 1322 + movq %rdi, %rsp 1323 + movl $0, RAX(%rsp) 1324 + // RESTORE_REST 1325 + movq 0*8(%rsp), %r15 1326 + movq 1*8(%rsp), %r14 1327 + movq 2*8(%rsp), %r13 1328 + movq 3*8(%rsp), %r12 1329 + movq 4*8(%rsp), %rbp 1330 + movq 5*8(%rsp), %rbx 1331 + addq $(6*8), %rsp 1332 + jmp int_ret_from_sys_call 1333 + END(ret_from_kernel_execve) 1374 1334 1375 1335 /* Call softirq on interrupt stack. Interrupts are off. */ 1376 1336 ENTRY(call_softirq)
-65
arch/x86/kernel/process.c
··· 293 293 } 294 294 295 295 /* 296 - * This gets run with %si containing the 297 - * function to call, and %di containing 298 - * the "args". 299 - */ 300 - extern void kernel_thread_helper(void); 301 - 302 - /* 303 - * Create a kernel thread 304 - */ 305 - int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 306 - { 307 - struct pt_regs regs; 308 - 309 - memset(&regs, 0, sizeof(regs)); 310 - 311 - regs.si = (unsigned long) fn; 312 - regs.di = (unsigned long) arg; 313 - 314 - #ifdef CONFIG_X86_32 315 - regs.ds = __USER_DS; 316 - regs.es = __USER_DS; 317 - regs.fs = __KERNEL_PERCPU; 318 - regs.gs = __KERNEL_STACK_CANARY; 319 - #else 320 - regs.ss = __KERNEL_DS; 321 - #endif 322 - 323 - regs.orig_ax = -1; 324 - regs.ip = (unsigned long) kernel_thread_helper; 325 - regs.cs = __KERNEL_CS | get_kernel_rpl(); 326 - regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; 327 - 328 - /* Ok, create the new process.. */ 329 - return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 330 - } 331 - EXPORT_SYMBOL(kernel_thread); 332 - 333 - /* 334 - * sys_execve() executes a new program. 335 - */ 336 - long sys_execve(const char __user *name, 337 - const char __user *const __user *argv, 338 - const char __user *const __user *envp, struct pt_regs *regs) 339 - { 340 - long error; 341 - char *filename; 342 - 343 - filename = getname(name); 344 - error = PTR_ERR(filename); 345 - if (IS_ERR(filename)) 346 - return error; 347 - error = do_execve(filename, argv, envp, regs); 348 - 349 - #ifdef CONFIG_X86_32 350 - if (error == 0) { 351 - /* Make sure we don't return using sysenter.. */ 352 - set_thread_flag(TIF_IRET); 353 - } 354 - #endif 355 - 356 - putname(filename); 357 - return error; 358 - } 359 - 360 - /* 361 296 * Idle related variables and functions 362 297 */ 363 298 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
+31 -8
arch/x86/kernel/process_32.c
··· 57 57 #include <asm/switch_to.h> 58 58 59 59 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 60 + asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); 60 61 61 62 /* 62 63 * Return saved PC of a blocked thread. ··· 128 127 } 129 128 130 129 int copy_thread(unsigned long clone_flags, unsigned long sp, 131 - unsigned long unused, 130 + unsigned long arg, 132 131 struct task_struct *p, struct pt_regs *regs) 133 132 { 134 - struct pt_regs *childregs; 133 + struct pt_regs *childregs = task_pt_regs(p); 135 134 struct task_struct *tsk; 136 135 int err; 137 - 138 - childregs = task_pt_regs(p); 139 - *childregs = *regs; 140 - childregs->ax = 0; 141 - childregs->sp = sp; 142 136 143 137 p->thread.sp = (unsigned long) childregs; 144 138 p->thread.sp0 = (unsigned long) (childregs+1); 145 139 146 - p->thread.ip = (unsigned long) ret_from_fork; 140 + if (unlikely(!regs)) { 141 + /* kernel thread */ 142 + memset(childregs, 0, sizeof(struct pt_regs)); 143 + p->thread.ip = (unsigned long) ret_from_kernel_thread; 144 + task_user_gs(p) = __KERNEL_STACK_CANARY; 145 + childregs->ds = __USER_DS; 146 + childregs->es = __USER_DS; 147 + childregs->fs = __KERNEL_PERCPU; 148 + childregs->bx = sp; /* function */ 149 + childregs->bp = arg; 150 + childregs->orig_ax = -1; 151 + childregs->cs = __KERNEL_CS | get_kernel_rpl(); 152 + childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; 153 + p->fpu_counter = 0; 154 + p->thread.io_bitmap_ptr = NULL; 155 + memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); 156 + return 0; 157 + } 158 + *childregs = *regs; 159 + childregs->ax = 0; 160 + childregs->sp = sp; 147 161 162 + p->thread.ip = (unsigned long) ret_from_fork; 148 163 task_user_gs(p) = get_user_gs(regs); 149 164 150 165 p->fpu_counter = 0; ··· 207 190 regs->cs = __USER_CS; 208 191 regs->ip = new_ip; 209 192 regs->sp = new_sp; 193 + regs->flags = X86_EFLAGS_IF; 194 + /* 195 + * force it to the iret return path by making it look as if there was 196 + * some work pending. 197 + */ 198 + set_thread_flag(TIF_NOTIFY_RESUME); 210 199 } 211 200 EXPORT_SYMBOL_GPL(start_thread); 212 201
+21 -14
arch/x86/kernel/process_64.c
··· 146 146 } 147 147 148 148 int copy_thread(unsigned long clone_flags, unsigned long sp, 149 - unsigned long unused, 149 + unsigned long arg, 150 150 struct task_struct *p, struct pt_regs *regs) 151 151 { 152 152 int err; 153 153 struct pt_regs *childregs; 154 154 struct task_struct *me = current; 155 155 156 - childregs = ((struct pt_regs *) 157 - (THREAD_SIZE + task_stack_page(p))) - 1; 158 - *childregs = *regs; 159 - 160 - childregs->ax = 0; 161 - if (user_mode(regs)) 162 - childregs->sp = sp; 163 - else 164 - childregs->sp = (unsigned long)childregs; 165 - 156 + p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE; 157 + childregs = task_pt_regs(p); 166 158 p->thread.sp = (unsigned long) childregs; 167 - p->thread.sp0 = (unsigned long) (childregs+1); 168 159 p->thread.usersp = me->thread.usersp; 169 - 170 160 set_tsk_thread_flag(p, TIF_FORK); 171 - 172 161 p->fpu_counter = 0; 173 162 p->thread.io_bitmap_ptr = NULL; 174 163 ··· 167 178 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs; 168 179 savesegment(es, p->thread.es); 169 180 savesegment(ds, p->thread.ds); 181 + memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); 182 + 183 + if (unlikely(!regs)) { 184 + /* kernel thread */ 185 + memset(childregs, 0, sizeof(struct pt_regs)); 186 + childregs->sp = (unsigned long)childregs; 187 + childregs->ss = __KERNEL_DS; 188 + childregs->bx = sp; /* function */ 189 + childregs->bp = arg; 190 + childregs->orig_ax = -1; 191 + childregs->cs = __KERNEL_CS | get_kernel_rpl(); 192 + childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; 193 + return 0; 194 + } 195 + *childregs = *regs; 196 + 197 + childregs->ax = 0; 198 + childregs->sp = sp; 170 199 171 200 err = -ENOMEM; 172 201 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
-4
arch/x86/kernel/signal.c
··· 840 840 if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) 841 841 fire_user_return_notifiers(); 842 842 843 - #ifdef CONFIG_X86_32 844 - clear_thread_flag(TIF_IRET); 845 - #endif /* CONFIG_X86_32 */ 846 - 847 843 rcu_user_enter(); 848 844 } 849 845
-40
arch/x86/kernel/sys_i386_32.c
··· 1 - /* 2 - * This file contains various random system calls that 3 - * have a non-standard calling sequence on the Linux/i386 4 - * platform. 5 - */ 6 - 7 - #include <linux/errno.h> 8 - #include <linux/sched.h> 9 - #include <linux/mm.h> 10 - #include <linux/fs.h> 11 - #include <linux/smp.h> 12 - #include <linux/sem.h> 13 - #include <linux/msg.h> 14 - #include <linux/shm.h> 15 - #include <linux/stat.h> 16 - #include <linux/syscalls.h> 17 - #include <linux/mman.h> 18 - #include <linux/file.h> 19 - #include <linux/utsname.h> 20 - #include <linux/ipc.h> 21 - 22 - #include <linux/uaccess.h> 23 - #include <linux/unistd.h> 24 - 25 - #include <asm/syscalls.h> 26 - 27 - /* 28 - * Do a system call from kernel instead of calling sys_execve so we 29 - * end up with proper pt_regs. 30 - */ 31 - int kernel_execve(const char *filename, 32 - const char *const argv[], 33 - const char *const envp[]) 34 - { 35 - long __res; 36 - asm volatile ("int $0x80" 37 - : "=a" (__res) 38 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory"); 39 - return __res; 40 - }
+3 -3
arch/x86/kernel/vm86_32.c
··· 561 561 if ((trapno == 3) || (trapno == 1)) { 562 562 KVM86->regs32->ax = VM86_TRAP + (trapno << 8); 563 563 /* setting this flag forces the code in entry_32.S to 564 - call save_v86_state() and change the stack pointer 565 - to KVM86->regs32 */ 566 - set_thread_flag(TIF_IRET); 564 + the path where we call save_v86_state() and change 565 + the stack pointer to KVM86->regs32 */ 566 + set_thread_flag(TIF_NOTIFY_RESUME); 567 567 return 0; 568 568 } 569 569 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
+1 -1
arch/x86/syscalls/syscall_32.tbl
··· 17 17 8 i386 creat sys_creat 18 18 9 i386 link sys_link 19 19 10 i386 unlink sys_unlink 20 - 11 i386 execve ptregs_execve stub32_execve 20 + 11 i386 execve sys_execve stub32_execve 21 21 12 i386 chdir sys_chdir 22 22 13 i386 time sys_time compat_sys_time 23 23 14 i386 mknod sys_mknod
+1
arch/x86/um/Kconfig
··· 13 13 config UML_X86 14 14 def_bool y 15 15 select GENERIC_FIND_FIRST_BIT 16 + select GENERIC_KERNEL_THREAD 16 17 17 18 config 64BIT 18 19 bool "64-bit kernel" if SUBARCH = "x86"
-1
arch/x86/um/sys_call_table_32.c
··· 25 25 #define old_mmap sys_old_mmap 26 26 27 27 #define ptregs_fork sys_fork 28 - #define ptregs_execve sys_execve 29 28 #define ptregs_iopl sys_iopl 30 29 #define ptregs_vm86old sys_vm86old 31 30 #define ptregs_clone i386_clone
+1
arch/xtensa/include/asm/Kbuild
··· 1 1 include include/asm-generic/Kbuild.asm 2 2 3 3 generic-y += clkdev.h 4 + generic-y += exec.h
-14
arch/xtensa/include/asm/exec.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2001 - 2005 Tensilica Inc. 7 - */ 8 - 9 - #ifndef _XTENSA_EXEC_H 10 - #define _XTENSA_EXEC_H 11 - 12 - #define arch_align_stack(x) (x) 13 - 14 - #endif /* _XTENSA_EXEC_H */
-1
fs/binfmt_elf.c
··· 36 36 #include <asm/uaccess.h> 37 37 #include <asm/param.h> 38 38 #include <asm/page.h> 39 - #include <asm/exec.h> 40 39 41 40 #ifndef user_long_t 42 41 #define user_long_t long
-1
fs/binfmt_elf_fdpic.c
··· 39 39 #include <asm/uaccess.h> 40 40 #include <asm/param.h> 41 41 #include <asm/pgalloc.h> 42 - #include <asm/exec.h> 43 42 44 43 typedef char *elf_caddr_t; 45 44
+56 -5
fs/exec.c
··· 59 59 #include <asm/uaccess.h> 60 60 #include <asm/mmu_context.h> 61 61 #include <asm/tlb.h> 62 - #include <asm/exec.h> 63 62 64 63 #include <trace/events/task.h> 65 64 #include "internal.h" ··· 391 392 union { 392 393 const char __user *const __user *native; 393 394 #ifdef CONFIG_COMPAT 394 - compat_uptr_t __user *compat; 395 + const compat_uptr_t __user *compat; 395 396 #endif 396 397 } ptr; 397 398 }; ··· 1573 1574 } 1574 1575 1575 1576 #ifdef CONFIG_COMPAT 1576 - int compat_do_execve(char *filename, 1577 - compat_uptr_t __user *__argv, 1578 - compat_uptr_t __user *__envp, 1577 + int compat_do_execve(const char *filename, 1578 + const compat_uptr_t __user *__argv, 1579 + const compat_uptr_t __user *__envp, 1579 1580 struct pt_regs *regs) 1580 1581 { 1581 1582 struct user_arg_ptr argv = { ··· 1657 1658 { 1658 1659 return __get_dumpable(mm->flags); 1659 1660 } 1661 + 1662 + #ifdef __ARCH_WANT_SYS_EXECVE 1663 + SYSCALL_DEFINE3(execve, 1664 + const char __user *, filename, 1665 + const char __user *const __user *, argv, 1666 + const char __user *const __user *, envp) 1667 + { 1668 + const char *path = getname(filename); 1669 + int error = PTR_ERR(path); 1670 + if (!IS_ERR(path)) { 1671 + error = do_execve(path, argv, envp, current_pt_regs()); 1672 + putname(path); 1673 + } 1674 + return error; 1675 + } 1676 + #ifdef CONFIG_COMPAT 1677 + asmlinkage long compat_sys_execve(const char __user * filename, 1678 + const compat_uptr_t __user * argv, 1679 + const compat_uptr_t __user * envp) 1680 + { 1681 + const char *path = getname(filename); 1682 + int error = PTR_ERR(path); 1683 + if (!IS_ERR(path)) { 1684 + error = compat_do_execve(path, argv, envp, current_pt_regs()); 1685 + putname(path); 1686 + } 1687 + return error; 1688 + } 1689 + #endif 1690 + #endif 1691 + 1692 + #ifdef __ARCH_WANT_KERNEL_EXECVE 1693 + int kernel_execve(const char *filename, 1694 + const char *const argv[], 1695 + const char *const envp[]) 1696 + { 1697 + struct pt_regs *p = current_pt_regs(); 1698 + int ret; 1699 + 1700 + ret = do_execve(filename, 1701 + (const char __user *const __user *)argv, 1702 + (const char __user *const __user *)envp, p); 1703 + if (ret < 0) 1704 + return ret; 1705 + 1706 + /* 1707 + * We were successful. We won't be returning to our caller, but 1708 + * instead to user space by manipulating the kernel stack. 1709 + */ 1710 + ret_from_kernel_execve(p); 1711 + } 1712 + #endif
+6
include/linux/binfmts.h
··· 19 19 20 20 #ifdef __KERNEL__ 21 21 #include <linux/sched.h> 22 + #include <linux/unistd.h> 23 + #include <asm/exec.h> 22 24 23 25 #define CORENAME_MAX_SIZE 128 24 26 ··· 136 134 extern void install_exec_creds(struct linux_binprm *bprm); 137 135 extern void set_binfmt(struct linux_binfmt *new); 138 136 extern void free_bprm(struct linux_binprm *); 137 + 138 + #ifdef __ARCH_WANT_KERNEL_EXECVE 139 + extern void ret_from_kernel_execve(struct pt_regs *normal) __noreturn; 140 + #endif 139 141 140 142 #endif /* __KERNEL__ */ 141 143 #endif /* _LINUX_BINFMTS_H */
+6 -2
include/linux/compat.h
··· 284 284 const struct compat_iovec __user *vec, 285 285 unsigned long vlen, u32 pos_low, u32 pos_high); 286 286 287 - int compat_do_execve(char *filename, compat_uptr_t __user *argv, 288 - compat_uptr_t __user *envp, struct pt_regs *regs); 287 + int compat_do_execve(const char *filename, const compat_uptr_t __user *argv, 288 + const compat_uptr_t __user *envp, struct pt_regs *regs); 289 + #ifdef __ARCH_WANT_SYS_EXECVE 290 + asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, 291 + const compat_uptr_t __user *envp); 292 + #endif 289 293 290 294 asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, 291 295 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
+4
include/linux/ptrace.h
··· 401 401 #define arch_ptrace_stop(code, info) do { } while (0) 402 402 #endif 403 403 404 + #ifndef current_pt_regs 405 + #define current_pt_regs() task_pt_regs(current) 406 + #endif 407 + 404 408 extern int task_current_syscall(struct task_struct *target, long *callno, 405 409 unsigned long args[6], unsigned int maxargs, 406 410 unsigned long *sp, unsigned long *pc);
+3
include/linux/sched.h
··· 2332 2332 const char __user * const __user *, struct pt_regs *); 2333 2333 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 2334 2334 struct task_struct *fork_idle(int); 2335 + #ifdef CONFIG_GENERIC_KERNEL_THREAD 2336 + extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 2337 + #endif 2335 2338 2336 2339 extern void set_task_comm(struct task_struct *tsk, char *from); 2337 2340 extern char *get_task_comm(char *to, struct task_struct *tsk);
+12 -1
kernel/fork.c
··· 1584 1584 * requested, no event is reported; otherwise, report if the event 1585 1585 * for the type of forking is enabled. 1586 1586 */ 1587 - if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) { 1587 + if (!(clone_flags & CLONE_UNTRACED) && likely(user_mode(regs))) { 1588 1588 if (clone_flags & CLONE_VFORK) 1589 1589 trace = PTRACE_EVENT_VFORK; 1590 1590 else if ((clone_flags & CSIGNAL) != SIGCHLD) ··· 1633 1633 } 1634 1634 return nr; 1635 1635 } 1636 + 1637 + #ifdef CONFIG_GENERIC_KERNEL_THREAD 1638 + /* 1639 + * Create a kernel thread. 1640 + */ 1641 + pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 1642 + { 1643 + return do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, NULL, 1644 + (unsigned long)arg, NULL, NULL); 1645 + } 1646 + #endif 1636 1647 1637 1648 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 1638 1649 #define ARCH_MIN_MMSTRUCT_ALIGN 0