Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SCHED_SIGNAL_H
3#define _LINUX_SCHED_SIGNAL_H
4
5#include <linux/rculist.h>
6#include <linux/signal.h>
7#include <linux/sched.h>
8#include <linux/sched/jobctl.h>
9#include <linux/sched/task.h>
10#include <linux/cred.h>
11#include <linux/refcount.h>
12#include <linux/pid.h>
13#include <linux/posix-timers.h>
14#include <linux/mm_types.h>
15#include <asm/ptrace.h>
16
17/*
18 * Types defining task->signal and task->sighand and APIs using them:
19 */
20
21struct sighand_struct {
22 spinlock_t siglock;
23 refcount_t count;
24 wait_queue_head_t signalfd_wqh;
25 struct k_sigaction action[_NSIG];
26};
27
28/*
29 * Per-process accounting stats:
30 */
31struct pacct_struct {
32 int ac_flag;
33 long ac_exitcode;
34 unsigned long ac_mem;
35 u64 ac_utime, ac_stime;
36 unsigned long ac_minflt, ac_majflt;
37};
38
39struct cpu_itimer {
40 u64 expires;
41 u64 incr;
42};
43
44/*
45 * This is the atomic variant of task_cputime, which can be used for
46 * storing and updating task_cputime statistics without locking.
47 */
48struct task_cputime_atomic {
49 atomic64_t utime;
50 atomic64_t stime;
51 atomic64_t sum_exec_runtime;
52};
53
54#define INIT_CPUTIME_ATOMIC \
55 (struct task_cputime_atomic) { \
56 .utime = ATOMIC64_INIT(0), \
57 .stime = ATOMIC64_INIT(0), \
58 .sum_exec_runtime = ATOMIC64_INIT(0), \
59 }
60/**
61 * struct thread_group_cputimer - thread group interval timer counts
62 * @cputime_atomic: atomic thread group interval timers.
63 *
64 * This structure contains the version of task_cputime, above, that is
65 * used for thread group CPU timer calculations.
66 */
67struct thread_group_cputimer {
68 struct task_cputime_atomic cputime_atomic;
69};
70
71struct multiprocess_signals {
72 sigset_t signal;
73 struct hlist_node node;
74};
75
76struct core_thread {
77 struct task_struct *task;
78 struct core_thread *next;
79};
80
81struct core_state {
82 atomic_t nr_threads;
83 struct core_thread dumper;
84 struct completion startup;
85};
86
87/*
88 * NOTE! "signal_struct" does not have its own
89 * locking, because a shared signal_struct always
90 * implies a shared sighand_struct, so locking
91 * sighand_struct is always a proper superset of
92 * the locking of signal_struct.
93 */
94struct signal_struct {
95 refcount_t sigcnt;
96 atomic_t live;
97 int nr_threads;
98 int quick_threads;
99 struct list_head thread_head;
100
101 wait_queue_head_t wait_chldexit; /* for wait4() */
102
103 /* current thread group signal load-balancing target: */
104 struct task_struct *curr_target;
105
106 /* shared signal handling: */
107 struct sigpending shared_pending;
108
109 /* For collecting multiprocess signals during fork */
110 struct hlist_head multiprocess;
111
112 /* thread group exit support */
113 int group_exit_code;
114 /* notify group_exec_task when notify_count is less or equal to 0 */
115 int notify_count;
116 struct task_struct *group_exec_task;
117
118 /* thread group stop support, overloads group_exit_code too */
119 int group_stop_count;
120 unsigned int flags; /* see SIGNAL_* flags below */
121
122 struct core_state *core_state; /* coredumping support */
123
124 /*
125 * PR_SET_CHILD_SUBREAPER marks a process, like a service
126 * manager, to re-parent orphan (double-forking) child processes
127 * to this process instead of 'init'. The service manager is
128 * able to receive SIGCHLD signals and is able to investigate
129 * the process until it calls wait(). All children of this
130 * process will inherit a flag if they should look for a
131 * child_subreaper process at exit.
132 */
133 unsigned int is_child_subreaper:1;
134 unsigned int has_child_subreaper:1;
135
136#ifdef CONFIG_POSIX_TIMERS
137
138 /* POSIX.1b Interval Timers */
139 unsigned int timer_create_restore_ids:1;
140 atomic_t next_posix_timer_id;
141 struct hlist_head posix_timers;
142 struct hlist_head ignored_posix_timers;
143
144 /* ITIMER_REAL timer for the process */
145 struct hrtimer real_timer;
146 ktime_t it_real_incr;
147
148 /*
149 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
150 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
151 * values are defined to 0 and 1 respectively
152 */
153 struct cpu_itimer it[2];
154
155 /*
156 * Thread group totals for process CPU timers.
157 * See thread_group_cputimer(), et al, for details.
158 */
159 struct thread_group_cputimer cputimer;
160
161#endif
162 /* Empty if CONFIG_POSIX_TIMERS=n */
163 struct posix_cputimers posix_cputimers;
164
165 /* PID/PID hash table linkage. */
166 struct pid *pids[PIDTYPE_MAX];
167
168#ifdef CONFIG_NO_HZ_FULL
169 atomic_t tick_dep_mask;
170#endif
171
172 struct pid *tty_old_pgrp;
173
174 /* boolean value for session group leader */
175 int leader;
176
177 struct tty_struct *tty; /* NULL if no tty */
178
179#ifdef CONFIG_SCHED_AUTOGROUP
180 struct autogroup *autogroup;
181#endif
182 /*
183 * Cumulative resource counters for dead threads in the group,
184 * and for reaped dead child processes forked by this group.
185 * Live threads maintain their own counters and add to these
186 * in __exit_signal, except for the group leader.
187 */
188 seqlock_t stats_lock;
189 u64 utime, stime, cutime, cstime;
190 u64 gtime;
191 u64 cgtime;
192 struct prev_cputime prev_cputime;
193 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
194 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
195 unsigned long inblock, oublock, cinblock, coublock;
196 unsigned long maxrss, cmaxrss;
197 struct task_io_accounting ioac;
198
199 /*
200 * Cumulative ns of schedule CPU time fo dead threads in the
201 * group, not including a zombie group leader, (This only differs
202 * from jiffies_to_ns(utime + stime) if sched_clock uses something
203 * other than jiffies.)
204 */
205 unsigned long long sum_sched_runtime;
206
207 /*
208 * We don't bother to synchronize most readers of this at all,
209 * because there is no reader checking a limit that actually needs
210 * to get both rlim_cur and rlim_max atomically, and either one
211 * alone is a single word that can safely be read normally.
212 * getrlimit/setrlimit use task_lock(current->group_leader) to
213 * protect this instead of the siglock, because they really
214 * have no need to disable irqs.
215 */
216 struct rlimit rlim[RLIM_NLIMITS];
217
218#ifdef CONFIG_BSD_PROCESS_ACCT
219 struct pacct_struct pacct; /* per-process accounting information */
220#endif
221#ifdef CONFIG_TASKSTATS
222 struct taskstats *stats;
223#endif
224#ifdef CONFIG_AUDIT
225 unsigned audit_tty;
226 struct tty_audit_buf *tty_audit_buf;
227#endif
228
229 /*
230 * Thread is the potential origin of an oom condition; kill first on
231 * oom
232 */
233 bool oom_flag_origin;
234 short oom_score_adj; /* OOM kill score adjustment */
235 short oom_score_adj_min; /* OOM kill score adjustment min value.
236 * Only settable by CAP_SYS_RESOURCE. */
237 struct mm_struct *oom_mm; /* recorded mm when the thread group got
238 * killed by the oom killer */
239
240 struct mutex cred_guard_mutex; /* guard against foreign influences on
241 * credential calculations
242 * (notably. ptrace)
243 * Deprecated do not use in new code.
244 * Use exec_update_lock instead.
245 */
246 struct rw_semaphore exec_update_lock; /* Held while task_struct is
247 * being updated during exec,
248 * and may have inconsistent
249 * permissions.
250 */
251} __randomize_layout;
252
253/*
254 * Bits in flags field of signal_struct.
255 */
256#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
257#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
258#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
259/*
260 * Pending notifications to parent.
261 */
262#define SIGNAL_CLD_STOPPED 0x00000010
263#define SIGNAL_CLD_CONTINUED 0x00000020
264#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
265
266#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
267
268#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
269 SIGNAL_STOP_CONTINUED)
270
271static inline void signal_set_stop_flags(struct signal_struct *sig,
272 unsigned int flags)
273{
274 WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
275 sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
276}
277
278extern void flush_signals(struct task_struct *);
279extern void ignore_signals(struct task_struct *);
280extern void flush_signal_handlers(struct task_struct *, int force_default);
281extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type);
282
283static inline int kernel_dequeue_signal(void)
284{
285 struct task_struct *task = current;
286 kernel_siginfo_t __info;
287 enum pid_type __type;
288 int ret;
289
290 spin_lock_irq(&task->sighand->siglock);
291 ret = dequeue_signal(&task->blocked, &__info, &__type);
292 spin_unlock_irq(&task->sighand->siglock);
293
294 return ret;
295}
296
297static inline void kernel_signal_stop(void)
298{
299 spin_lock_irq(¤t->sighand->siglock);
300 if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
301 current->jobctl |= JOBCTL_STOPPED;
302 set_special_state(TASK_STOPPED);
303 }
304 spin_unlock_irq(¤t->sighand->siglock);
305
306 schedule();
307}
308
309int force_sig_fault_to_task(int sig, int code, void __user *addr,
310 struct task_struct *t);
311int force_sig_fault(int sig, int code, void __user *addr);
312int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t);
313
314int force_sig_mceerr(int code, void __user *, short);
315int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
316
317int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
318int force_sig_pkuerr(void __user *addr, u32 pkey);
319int send_sig_perf(void __user *addr, u32 type, u64 sig_data);
320
321int force_sig_ptrace_errno_trap(int errno, void __user *addr);
322int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno);
323int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
324 struct task_struct *t);
325int force_sig_seccomp(int syscall, int reason, bool force_coredump);
326
327extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
328extern void force_sigsegv(int sig);
329extern int force_sig_info(struct kernel_siginfo *);
330extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
331extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
332extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
333 const struct cred *);
334extern int kill_pgrp(struct pid *pid, int sig, int priv);
335extern int kill_pid(struct pid *pid, int sig, int priv);
336extern __must_check bool do_notify_parent(struct task_struct *, int);
337extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
338extern void force_sig(int);
339extern void force_fatal_sig(int);
340extern void force_exit_sig(int);
341extern int send_sig(int, struct task_struct *, int);
342extern int zap_other_threads(struct task_struct *p);
343extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
344
345static inline void clear_notify_signal(void)
346{
347 clear_thread_flag(TIF_NOTIFY_SIGNAL);
348 smp_mb__after_atomic();
349}
350
351/*
352 * Returns 'true' if kick_process() is needed to force a transition from
353 * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work.
354 */
355static inline bool __set_notify_signal(struct task_struct *task)
356{
357 return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
358 !wake_up_state(task, TASK_INTERRUPTIBLE);
359}
360
361/*
362 * Called to break out of interruptible wait loops, and enter the
363 * exit_to_user_mode_loop().
364 */
365static inline void set_notify_signal(struct task_struct *task)
366{
367 if (__set_notify_signal(task))
368 kick_process(task);
369}
370
371static inline int restart_syscall(void)
372{
373 set_tsk_thread_flag(current, TIF_SIGPENDING);
374 return -ERESTARTNOINTR;
375}
376
377static inline int task_sigpending(struct task_struct *p)
378{
379 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
380}
381
382static inline int signal_pending(struct task_struct *p)
383{
384 /*
385 * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
386 * behavior in terms of ensuring that we break out of wait loops
387 * so that notify signal callbacks can be processed.
388 */
389 if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
390 return 1;
391 return task_sigpending(p);
392}
393
394static inline int __fatal_signal_pending(struct task_struct *p)
395{
396 return unlikely(sigismember(&p->pending.signal, SIGKILL));
397}
398
399static inline int fatal_signal_pending(struct task_struct *p)
400{
401 return task_sigpending(p) && __fatal_signal_pending(p);
402}
403
404static inline int signal_pending_state(unsigned int state, struct task_struct *p)
405{
406 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
407 return 0;
408 if (!signal_pending(p))
409 return 0;
410
411 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
412}
413
414/*
415 * This should only be used in fault handlers to decide whether we
416 * should stop the current fault routine to handle the signals
417 * instead, especially with the case where we've got interrupted with
418 * a VM_FAULT_RETRY.
419 */
420static inline bool fault_signal_pending(vm_fault_t fault_flags,
421 struct pt_regs *regs)
422{
423 return unlikely((fault_flags & VM_FAULT_RETRY) &&
424 (fatal_signal_pending(current) ||
425 (user_mode(regs) && signal_pending(current))));
426}
427
428/*
429 * Reevaluate whether the task has signals pending delivery.
430 * Wake the task if so.
431 * This is required every time the blocked sigset_t changes.
432 * callers must hold sighand->siglock.
433 */
434extern void recalc_sigpending(void);
435extern void calculate_sigpending(void);
436
437extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
438
439static inline void signal_wake_up(struct task_struct *t, bool fatal)
440{
441 unsigned int state = 0;
442 if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
443 t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
444 state = TASK_WAKEKILL | __TASK_TRACED;
445 }
446 signal_wake_up_state(t, state);
447}
448static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
449{
450 unsigned int state = 0;
451 if (resume) {
452 t->jobctl &= ~JOBCTL_TRACED;
453 state = __TASK_TRACED;
454 }
455 signal_wake_up_state(t, state);
456}
457
458void task_join_group_stop(struct task_struct *task);
459
460#ifdef TIF_RESTORE_SIGMASK
461/*
462 * Legacy restore_sigmask accessors. These are inefficient on
463 * SMP architectures because they require atomic operations.
464 */
465
466/**
467 * set_restore_sigmask() - make sure saved_sigmask processing gets done
468 *
469 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
470 * will run before returning to user mode, to process the flag. For
471 * all callers, TIF_SIGPENDING is already set or it's no harm to set
472 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
473 * arch code will notice on return to user mode, in case those bits
474 * are scarce. We set TIF_SIGPENDING here to ensure that the arch
475 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
476 */
477static inline void set_restore_sigmask(void)
478{
479 set_thread_flag(TIF_RESTORE_SIGMASK);
480}
481
482static inline void clear_tsk_restore_sigmask(struct task_struct *task)
483{
484 clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
485}
486
487static inline void clear_restore_sigmask(void)
488{
489 clear_thread_flag(TIF_RESTORE_SIGMASK);
490}
491static inline bool test_tsk_restore_sigmask(struct task_struct *task)
492{
493 return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
494}
495static inline bool test_restore_sigmask(void)
496{
497 return test_thread_flag(TIF_RESTORE_SIGMASK);
498}
499static inline bool test_and_clear_restore_sigmask(void)
500{
501 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
502}
503
504#else /* TIF_RESTORE_SIGMASK */
505
506/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
507static inline void set_restore_sigmask(void)
508{
509 current->restore_sigmask = true;
510}
511static inline void clear_tsk_restore_sigmask(struct task_struct *task)
512{
513 task->restore_sigmask = false;
514}
515static inline void clear_restore_sigmask(void)
516{
517 current->restore_sigmask = false;
518}
519static inline bool test_restore_sigmask(void)
520{
521 return current->restore_sigmask;
522}
523static inline bool test_tsk_restore_sigmask(struct task_struct *task)
524{
525 return task->restore_sigmask;
526}
527static inline bool test_and_clear_restore_sigmask(void)
528{
529 if (!current->restore_sigmask)
530 return false;
531 current->restore_sigmask = false;
532 return true;
533}
534#endif
535
536static inline void restore_saved_sigmask(void)
537{
538 if (test_and_clear_restore_sigmask())
539 __set_current_blocked(¤t->saved_sigmask);
540}
541
542extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
543
544static inline void restore_saved_sigmask_unless(bool interrupted)
545{
546 if (interrupted)
547 WARN_ON(!signal_pending(current));
548 else
549 restore_saved_sigmask();
550}
551
552static inline sigset_t *sigmask_to_save(void)
553{
554 sigset_t *res = ¤t->blocked;
555 if (unlikely(test_restore_sigmask()))
556 res = ¤t->saved_sigmask;
557 return res;
558}
559
560static inline int kill_cad_pid(int sig, int priv)
561{
562 return kill_pid(cad_pid, sig, priv);
563}
564
565/* These can be the second arg to send_sig_info/send_group_sig_info. */
566#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
567#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
568
569static inline int __on_sig_stack(unsigned long sp)
570{
571#ifdef CONFIG_STACK_GROWSUP
572 return sp >= current->sas_ss_sp &&
573 sp - current->sas_ss_sp < current->sas_ss_size;
574#else
575 return sp > current->sas_ss_sp &&
576 sp - current->sas_ss_sp <= current->sas_ss_size;
577#endif
578}
579
580/*
581 * True if we are on the alternate signal stack.
582 */
583static inline int on_sig_stack(unsigned long sp)
584{
585 /*
586 * If the signal stack is SS_AUTODISARM then, by construction, we
587 * can't be on the signal stack unless user code deliberately set
588 * SS_AUTODISARM when we were already on it.
589 *
590 * This improves reliability: if user state gets corrupted such that
591 * the stack pointer points very close to the end of the signal stack,
592 * then this check will enable the signal to be handled anyway.
593 */
594 if (current->sas_ss_flags & SS_AUTODISARM)
595 return 0;
596
597 return __on_sig_stack(sp);
598}
599
600static inline int sas_ss_flags(unsigned long sp)
601{
602 if (!current->sas_ss_size)
603 return SS_DISABLE;
604
605 return on_sig_stack(sp) ? SS_ONSTACK : 0;
606}
607
608static inline void sas_ss_reset(struct task_struct *p)
609{
610 p->sas_ss_sp = 0;
611 p->sas_ss_size = 0;
612 p->sas_ss_flags = SS_DISABLE;
613}
614
615static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
616{
617 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
618#ifdef CONFIG_STACK_GROWSUP
619 return current->sas_ss_sp;
620#else
621 return current->sas_ss_sp + current->sas_ss_size;
622#endif
623 return sp;
624}
625
626extern void __cleanup_sighand(struct sighand_struct *);
627extern void flush_itimer_signals(void);
628
629#define tasklist_empty() \
630 list_empty(&init_task.tasks)
631
632#define next_task(p) \
633 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
634
635#define for_each_process(p) \
636 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
637
638extern bool current_is_single_threaded(void);
639
640/*
641 * Without tasklist/siglock it is only rcu-safe if g can't exit/exec,
642 * otherwise next_thread(t) will never reach g after list_del_rcu(g).
643 */
644#define while_each_thread(g, t) \
645 while ((t = next_thread(t)) != g)
646
647#define for_other_threads(p, t) \
648 for (t = p; (t = next_thread(t)) != p; )
649
650#define __for_each_thread(signal, t) \
651 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
652 lockdep_is_held(&tasklist_lock))
653
654#define for_each_thread(p, t) \
655 __for_each_thread((p)->signal, t)
656
657/* Careful: this is a double loop, 'break' won't work as expected. */
658#define for_each_process_thread(p, t) \
659 for_each_process(p) for_each_thread(p, t)
660
661typedef int (*proc_visitor)(struct task_struct *p, void *data);
662void walk_process_tree(struct task_struct *top, proc_visitor, void *);
663
664static inline
665struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
666{
667 struct pid *pid;
668 if (type == PIDTYPE_PID)
669 pid = task_pid(task);
670 else
671 pid = task->signal->pids[type];
672 return pid;
673}
674
675static inline struct pid *task_tgid(struct task_struct *task)
676{
677 return task->signal->pids[PIDTYPE_TGID];
678}
679
680/*
681 * Without tasklist or RCU lock it is not safe to dereference
682 * the result of task_pgrp/task_session even if task == current,
683 * we can race with another thread doing sys_setsid/sys_setpgid.
684 */
685static inline struct pid *task_pgrp(struct task_struct *task)
686{
687 return task->signal->pids[PIDTYPE_PGID];
688}
689
690static inline struct pid *task_session(struct task_struct *task)
691{
692 return task->signal->pids[PIDTYPE_SID];
693}
694
695static inline int get_nr_threads(struct task_struct *task)
696{
697 return task->signal->nr_threads;
698}
699
700static inline bool thread_group_leader(struct task_struct *p)
701{
702 return p->exit_signal >= 0;
703}
704
705static inline
706bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
707{
708 return p1->signal == p2->signal;
709}
710
711/*
712 * returns NULL if p is the last thread in the thread group
713 */
714static inline struct task_struct *__next_thread(struct task_struct *p)
715{
716 return list_next_or_null_rcu(&p->signal->thread_head,
717 &p->thread_node,
718 struct task_struct,
719 thread_node);
720}
721
722static inline struct task_struct *next_thread(struct task_struct *p)
723{
724 return __next_thread(p) ?: p->group_leader;
725}
726
727static inline int thread_group_empty(struct task_struct *p)
728{
729 return thread_group_leader(p) &&
730 list_is_last(&p->thread_node, &p->signal->thread_head);
731}
732
733#define delay_group_leader(p) \
734 (thread_group_leader(p) && !thread_group_empty(p))
735
736extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
737 unsigned long *flags);
738
739static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
740 unsigned long *flags)
741{
742 struct sighand_struct *ret;
743
744 ret = __lock_task_sighand(task, flags);
745 (void)__cond_lock(&task->sighand->siglock, ret);
746 return ret;
747}
748
749static inline void unlock_task_sighand(struct task_struct *task,
750 unsigned long *flags)
751{
752 spin_unlock_irqrestore(&task->sighand->siglock, *flags);
753}
754
755#ifdef CONFIG_LOCKDEP
756extern void lockdep_assert_task_sighand_held(struct task_struct *task);
757#else
758static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
759#endif
760
761static inline unsigned long task_rlimit(const struct task_struct *task,
762 unsigned int limit)
763{
764 return READ_ONCE(task->signal->rlim[limit].rlim_cur);
765}
766
767static inline unsigned long task_rlimit_max(const struct task_struct *task,
768 unsigned int limit)
769{
770 return READ_ONCE(task->signal->rlim[limit].rlim_max);
771}
772
773static inline unsigned long rlimit(unsigned int limit)
774{
775 return task_rlimit(current, limit);
776}
777
778static inline unsigned long rlimit_max(unsigned int limit)
779{
780 return task_rlimit_max(current, limit);
781}
782
783#endif /* _LINUX_SCHED_SIGNAL_H */