[PATCH] do_notify_parent_cldstop: remove 'to_self' param

The previous patch has changed callsites of do_notify_parent_cldstop() so that
to_self == (->ptrace & PT_PTRACED) always (as it should be). We can remove
this parameter now.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: john stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Oleg Nesterov and committed by Linus Torvalds a1d5e21e 883606a7

+11 -21
+11 -21
kernel/signal.c
··· 591 } 592 593 /* forward decl */ 594 - static void do_notify_parent_cldstop(struct task_struct *tsk, 595 - int to_self, 596 - int why); 597 598 /* 599 * Handle magic process-wide effects of stop/continue signals. ··· 641 p->signal->group_stop_count = 0; 642 p->signal->flags = SIGNAL_STOP_CONTINUED; 643 spin_unlock(&p->sighand->siglock); 644 - do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED); 645 spin_lock(&p->sighand->siglock); 646 } 647 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); ··· 682 p->signal->flags = SIGNAL_STOP_CONTINUED; 683 p->signal->group_exit_code = 0; 684 spin_unlock(&p->sighand->siglock); 685 - do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED); 686 spin_lock(&p->sighand->siglock); 687 } else { 688 /* ··· 1517 spin_unlock_irqrestore(&psig->siglock, flags); 1518 } 1519 1520 - static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why) 1521 { 1522 struct siginfo info; 1523 unsigned long flags; 1524 struct task_struct *parent; 1525 struct sighand_struct *sighand; 1526 1527 - if (to_self) 1528 parent = tsk->parent; 1529 else { 1530 tsk = tsk->group_leader; ··· 1599 !(current->ptrace & PT_ATTACHED)) && 1600 (likely(current->parent->signal != current->signal) || 1601 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { 1602 - do_notify_parent_cldstop(current, 1, CLD_TRAPPED); 1603 read_unlock(&tasklist_lock); 1604 schedule(); 1605 } else { ··· 1648 static void 1649 finish_stop(int stop_count) 1650 { 1651 - int to_self; 1652 - 1653 /* 1654 * If there are no other threads in the group, or if there is 1655 * a group stop in progress and we are the last to stop, 1656 * report to the parent. When ptraced, every thread reports itself. 1657 */ 1658 - if (current->ptrace & PT_PTRACED) 1659 - to_self = 1; 1660 - else if (stop_count == 0) 1661 - to_self = 0; 1662 - else 1663 - goto out; 1664 1665 - read_lock(&tasklist_lock); 1666 - do_notify_parent_cldstop(current, to_self, CLD_STOPPED); 1667 - read_unlock(&tasklist_lock); 1668 - 1669 - out: 1670 schedule(); 1671 /* 1672 * Now we don't run again until continued.
··· 591 } 592 593 /* forward decl */ 594 + static void do_notify_parent_cldstop(struct task_struct *tsk, int why); 595 596 /* 597 * Handle magic process-wide effects of stop/continue signals. ··· 643 p->signal->group_stop_count = 0; 644 p->signal->flags = SIGNAL_STOP_CONTINUED; 645 spin_unlock(&p->sighand->siglock); 646 + do_notify_parent_cldstop(p, CLD_STOPPED); 647 spin_lock(&p->sighand->siglock); 648 } 649 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); ··· 684 p->signal->flags = SIGNAL_STOP_CONTINUED; 685 p->signal->group_exit_code = 0; 686 spin_unlock(&p->sighand->siglock); 687 + do_notify_parent_cldstop(p, CLD_CONTINUED); 688 spin_lock(&p->sighand->siglock); 689 } else { 690 /* ··· 1519 spin_unlock_irqrestore(&psig->siglock, flags); 1520 } 1521 1522 + static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1523 { 1524 struct siginfo info; 1525 unsigned long flags; 1526 struct task_struct *parent; 1527 struct sighand_struct *sighand; 1528 1529 + if (tsk->ptrace & PT_PTRACED) 1530 parent = tsk->parent; 1531 else { 1532 tsk = tsk->group_leader; ··· 1601 !(current->ptrace & PT_ATTACHED)) && 1602 (likely(current->parent->signal != current->signal) || 1603 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { 1604 + do_notify_parent_cldstop(current, CLD_TRAPPED); 1605 read_unlock(&tasklist_lock); 1606 schedule(); 1607 } else { ··· 1650 static void 1651 finish_stop(int stop_count) 1652 { 1653 /* 1654 * If there are no other threads in the group, or if there is 1655 * a group stop in progress and we are the last to stop, 1656 * report to the parent. When ptraced, every thread reports itself. 1657 */ 1658 + if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { 1659 + read_lock(&tasklist_lock); 1660 + do_notify_parent_cldstop(current, CLD_STOPPED); 1661 + read_unlock(&tasklist_lock); 1662 + } 1663 1664 schedule(); 1665 /* 1666 * Now we don't run again until continued.