Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.6-rc5 706 lines 20 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_SCHED_SIGNAL_H 3#define _LINUX_SCHED_SIGNAL_H 4 5#include <linux/rculist.h> 6#include <linux/signal.h> 7#include <linux/sched.h> 8#include <linux/sched/jobctl.h> 9#include <linux/sched/task.h> 10#include <linux/cred.h> 11#include <linux/refcount.h> 12#include <linux/posix-timers.h> 13 14/* 15 * Types defining task->signal and task->sighand and APIs using them: 16 */ 17 18struct sighand_struct { 19 spinlock_t siglock; 20 refcount_t count; 21 wait_queue_head_t signalfd_wqh; 22 struct k_sigaction action[_NSIG]; 23}; 24 25/* 26 * Per-process accounting stats: 27 */ 28struct pacct_struct { 29 int ac_flag; 30 long ac_exitcode; 31 unsigned long ac_mem; 32 u64 ac_utime, ac_stime; 33 unsigned long ac_minflt, ac_majflt; 34}; 35 36struct cpu_itimer { 37 u64 expires; 38 u64 incr; 39}; 40 41/* 42 * This is the atomic variant of task_cputime, which can be used for 43 * storing and updating task_cputime statistics without locking. 44 */ 45struct task_cputime_atomic { 46 atomic64_t utime; 47 atomic64_t stime; 48 atomic64_t sum_exec_runtime; 49}; 50 51#define INIT_CPUTIME_ATOMIC \ 52 (struct task_cputime_atomic) { \ 53 .utime = ATOMIC64_INIT(0), \ 54 .stime = ATOMIC64_INIT(0), \ 55 .sum_exec_runtime = ATOMIC64_INIT(0), \ 56 } 57/** 58 * struct thread_group_cputimer - thread group interval timer counts 59 * @cputime_atomic: atomic thread group interval timers. 60 * 61 * This structure contains the version of task_cputime, above, that is 62 * used for thread group CPU timer calculations. 63 */ 64struct thread_group_cputimer { 65 struct task_cputime_atomic cputime_atomic; 66}; 67 68struct multiprocess_signals { 69 sigset_t signal; 70 struct hlist_node node; 71}; 72 73/* 74 * NOTE! "signal_struct" does not have its own 75 * locking, because a shared signal_struct always 76 * implies a shared sighand_struct, so locking 77 * sighand_struct is always a proper superset of 78 * the locking of signal_struct. 79 */ 80struct signal_struct { 81 refcount_t sigcnt; 82 atomic_t live; 83 int nr_threads; 84 struct list_head thread_head; 85 86 wait_queue_head_t wait_chldexit; /* for wait4() */ 87 88 /* current thread group signal load-balancing target: */ 89 struct task_struct *curr_target; 90 91 /* shared signal handling: */ 92 struct sigpending shared_pending; 93 94 /* For collecting multiprocess signals during fork */ 95 struct hlist_head multiprocess; 96 97 /* thread group exit support */ 98 int group_exit_code; 99 /* overloaded: 100 * - notify group_exit_task when ->count is equal to notify_count 101 * - everyone except group_exit_task is stopped during signal delivery 102 * of fatal signals, group_exit_task processes the signal. 103 */ 104 int notify_count; 105 struct task_struct *group_exit_task; 106 107 /* thread group stop support, overloads group_exit_code too */ 108 int group_stop_count; 109 unsigned int flags; /* see SIGNAL_* flags below */ 110 111 /* 112 * PR_SET_CHILD_SUBREAPER marks a process, like a service 113 * manager, to re-parent orphan (double-forking) child processes 114 * to this process instead of 'init'. The service manager is 115 * able to receive SIGCHLD signals and is able to investigate 116 * the process until it calls wait(). All children of this 117 * process will inherit a flag if they should look for a 118 * child_subreaper process at exit. 119 */ 120 unsigned int is_child_subreaper:1; 121 unsigned int has_child_subreaper:1; 122 123#ifdef CONFIG_POSIX_TIMERS 124 125 /* POSIX.1b Interval Timers */ 126 int posix_timer_id; 127 struct list_head posix_timers; 128 129 /* ITIMER_REAL timer for the process */ 130 struct hrtimer real_timer; 131 ktime_t it_real_incr; 132 133 /* 134 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use 135 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these 136 * values are defined to 0 and 1 respectively 137 */ 138 struct cpu_itimer it[2]; 139 140 /* 141 * Thread group totals for process CPU timers. 142 * See thread_group_cputimer(), et al, for details. 143 */ 144 struct thread_group_cputimer cputimer; 145 146#endif 147 /* Empty if CONFIG_POSIX_TIMERS=n */ 148 struct posix_cputimers posix_cputimers; 149 150 /* PID/PID hash table linkage. */ 151 struct pid *pids[PIDTYPE_MAX]; 152 153#ifdef CONFIG_NO_HZ_FULL 154 atomic_t tick_dep_mask; 155#endif 156 157 struct pid *tty_old_pgrp; 158 159 /* boolean value for session group leader */ 160 int leader; 161 162 struct tty_struct *tty; /* NULL if no tty */ 163 164#ifdef CONFIG_SCHED_AUTOGROUP 165 struct autogroup *autogroup; 166#endif 167 /* 168 * Cumulative resource counters for dead threads in the group, 169 * and for reaped dead child processes forked by this group. 170 * Live threads maintain their own counters and add to these 171 * in __exit_signal, except for the group leader. 172 */ 173 seqlock_t stats_lock; 174 u64 utime, stime, cutime, cstime; 175 u64 gtime; 176 u64 cgtime; 177 struct prev_cputime prev_cputime; 178 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 179 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 180 unsigned long inblock, oublock, cinblock, coublock; 181 unsigned long maxrss, cmaxrss; 182 struct task_io_accounting ioac; 183 184 /* 185 * Cumulative ns of schedule CPU time fo dead threads in the 186 * group, not including a zombie group leader, (This only differs 187 * from jiffies_to_ns(utime + stime) if sched_clock uses something 188 * other than jiffies.) 189 */ 190 unsigned long long sum_sched_runtime; 191 192 /* 193 * We don't bother to synchronize most readers of this at all, 194 * because there is no reader checking a limit that actually needs 195 * to get both rlim_cur and rlim_max atomically, and either one 196 * alone is a single word that can safely be read normally. 197 * getrlimit/setrlimit use task_lock(current->group_leader) to 198 * protect this instead of the siglock, because they really 199 * have no need to disable irqs. 200 */ 201 struct rlimit rlim[RLIM_NLIMITS]; 202 203#ifdef CONFIG_BSD_PROCESS_ACCT 204 struct pacct_struct pacct; /* per-process accounting information */ 205#endif 206#ifdef CONFIG_TASKSTATS 207 struct taskstats *stats; 208#endif 209#ifdef CONFIG_AUDIT 210 unsigned audit_tty; 211 struct tty_audit_buf *tty_audit_buf; 212#endif 213 214 /* 215 * Thread is the potential origin of an oom condition; kill first on 216 * oom 217 */ 218 bool oom_flag_origin; 219 short oom_score_adj; /* OOM kill score adjustment */ 220 short oom_score_adj_min; /* OOM kill score adjustment min value. 221 * Only settable by CAP_SYS_RESOURCE. */ 222 struct mm_struct *oom_mm; /* recorded mm when the thread group got 223 * killed by the oom killer */ 224 225 struct mutex cred_guard_mutex; /* guard against foreign influences on 226 * credential calculations 227 * (notably. ptrace) */ 228} __randomize_layout; 229 230/* 231 * Bits in flags field of signal_struct. 232 */ 233#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 234#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ 235#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ 236#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ 237/* 238 * Pending notifications to parent. 239 */ 240#define SIGNAL_CLD_STOPPED 0x00000010 241#define SIGNAL_CLD_CONTINUED 0x00000020 242#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) 243 244#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 245 246#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ 247 SIGNAL_STOP_CONTINUED) 248 249static inline void signal_set_stop_flags(struct signal_struct *sig, 250 unsigned int flags) 251{ 252 WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); 253 sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; 254} 255 256/* If true, all threads except ->group_exit_task have pending SIGKILL */ 257static inline int signal_group_exit(const struct signal_struct *sig) 258{ 259 return (sig->flags & SIGNAL_GROUP_EXIT) || 260 (sig->group_exit_task != NULL); 261} 262 263extern void flush_signals(struct task_struct *); 264extern void ignore_signals(struct task_struct *); 265extern void flush_signal_handlers(struct task_struct *, int force_default); 266extern int dequeue_signal(struct task_struct *task, 267 sigset_t *mask, kernel_siginfo_t *info); 268 269static inline int kernel_dequeue_signal(void) 270{ 271 struct task_struct *task = current; 272 kernel_siginfo_t __info; 273 int ret; 274 275 spin_lock_irq(&task->sighand->siglock); 276 ret = dequeue_signal(task, &task->blocked, &__info); 277 spin_unlock_irq(&task->sighand->siglock); 278 279 return ret; 280} 281 282static inline void kernel_signal_stop(void) 283{ 284 spin_lock_irq(&current->sighand->siglock); 285 if (current->jobctl & JOBCTL_STOP_DEQUEUED) 286 set_special_state(TASK_STOPPED); 287 spin_unlock_irq(&current->sighand->siglock); 288 289 schedule(); 290} 291#ifdef __ARCH_SI_TRAPNO 292# define ___ARCH_SI_TRAPNO(_a1) , _a1 293#else 294# define ___ARCH_SI_TRAPNO(_a1) 295#endif 296#ifdef __ia64__ 297# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 298#else 299# define ___ARCH_SI_IA64(_a1, _a2, _a3) 300#endif 301 302int force_sig_fault_to_task(int sig, int code, void __user *addr 303 ___ARCH_SI_TRAPNO(int trapno) 304 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 305 , struct task_struct *t); 306int force_sig_fault(int sig, int code, void __user *addr 307 ___ARCH_SI_TRAPNO(int trapno) 308 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); 309int send_sig_fault(int sig, int code, void __user *addr 310 ___ARCH_SI_TRAPNO(int trapno) 311 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 312 , struct task_struct *t); 313 314int force_sig_mceerr(int code, void __user *, short); 315int send_sig_mceerr(int code, void __user *, short, struct task_struct *); 316 317int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); 318int force_sig_pkuerr(void __user *addr, u32 pkey); 319 320int force_sig_ptrace_errno_trap(int errno, void __user *addr); 321 322extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); 323extern void force_sigsegv(int sig); 324extern int force_sig_info(struct kernel_siginfo *); 325extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); 326extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); 327extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, 328 const struct cred *); 329extern int kill_pgrp(struct pid *pid, int sig, int priv); 330extern int kill_pid(struct pid *pid, int sig, int priv); 331extern __must_check bool do_notify_parent(struct task_struct *, int); 332extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 333extern void force_sig(int); 334extern int send_sig(int, struct task_struct *, int); 335extern int zap_other_threads(struct task_struct *p); 336extern struct sigqueue *sigqueue_alloc(void); 337extern void sigqueue_free(struct sigqueue *); 338extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); 339extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 340 341static inline int restart_syscall(void) 342{ 343 set_tsk_thread_flag(current, TIF_SIGPENDING); 344 return -ERESTARTNOINTR; 345} 346 347static inline int signal_pending(struct task_struct *p) 348{ 349 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 350} 351 352static inline int __fatal_signal_pending(struct task_struct *p) 353{ 354 return unlikely(sigismember(&p->pending.signal, SIGKILL)); 355} 356 357static inline int fatal_signal_pending(struct task_struct *p) 358{ 359 return signal_pending(p) && __fatal_signal_pending(p); 360} 361 362static inline int signal_pending_state(long state, struct task_struct *p) 363{ 364 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) 365 return 0; 366 if (!signal_pending(p)) 367 return 0; 368 369 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 370} 371 372/* 373 * Reevaluate whether the task has signals pending delivery. 374 * Wake the task if so. 375 * This is required every time the blocked sigset_t changes. 376 * callers must hold sighand->siglock. 377 */ 378extern void recalc_sigpending_and_wake(struct task_struct *t); 379extern void recalc_sigpending(void); 380extern void calculate_sigpending(void); 381 382extern void signal_wake_up_state(struct task_struct *t, unsigned int state); 383 384static inline void signal_wake_up(struct task_struct *t, bool resume) 385{ 386 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); 387} 388static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) 389{ 390 signal_wake_up_state(t, resume ? __TASK_TRACED : 0); 391} 392 393void task_join_group_stop(struct task_struct *task); 394 395#ifdef TIF_RESTORE_SIGMASK 396/* 397 * Legacy restore_sigmask accessors. These are inefficient on 398 * SMP architectures because they require atomic operations. 399 */ 400 401/** 402 * set_restore_sigmask() - make sure saved_sigmask processing gets done 403 * 404 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code 405 * will run before returning to user mode, to process the flag. For 406 * all callers, TIF_SIGPENDING is already set or it's no harm to set 407 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the 408 * arch code will notice on return to user mode, in case those bits 409 * are scarce. We set TIF_SIGPENDING here to ensure that the arch 410 * signal code always gets run when TIF_RESTORE_SIGMASK is set. 411 */ 412static inline void set_restore_sigmask(void) 413{ 414 set_thread_flag(TIF_RESTORE_SIGMASK); 415} 416 417static inline void clear_tsk_restore_sigmask(struct task_struct *task) 418{ 419 clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); 420} 421 422static inline void clear_restore_sigmask(void) 423{ 424 clear_thread_flag(TIF_RESTORE_SIGMASK); 425} 426static inline bool test_tsk_restore_sigmask(struct task_struct *task) 427{ 428 return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); 429} 430static inline bool test_restore_sigmask(void) 431{ 432 return test_thread_flag(TIF_RESTORE_SIGMASK); 433} 434static inline bool test_and_clear_restore_sigmask(void) 435{ 436 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); 437} 438 439#else /* TIF_RESTORE_SIGMASK */ 440 441/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ 442static inline void set_restore_sigmask(void) 443{ 444 current->restore_sigmask = true; 445} 446static inline void clear_tsk_restore_sigmask(struct task_struct *task) 447{ 448 task->restore_sigmask = false; 449} 450static inline void clear_restore_sigmask(void) 451{ 452 current->restore_sigmask = false; 453} 454static inline bool test_restore_sigmask(void) 455{ 456 return current->restore_sigmask; 457} 458static inline bool test_tsk_restore_sigmask(struct task_struct *task) 459{ 460 return task->restore_sigmask; 461} 462static inline bool test_and_clear_restore_sigmask(void) 463{ 464 if (!current->restore_sigmask) 465 return false; 466 current->restore_sigmask = false; 467 return true; 468} 469#endif 470 471static inline void restore_saved_sigmask(void) 472{ 473 if (test_and_clear_restore_sigmask()) 474 __set_current_blocked(&current->saved_sigmask); 475} 476 477extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); 478 479static inline void restore_saved_sigmask_unless(bool interrupted) 480{ 481 if (interrupted) 482 WARN_ON(!test_thread_flag(TIF_SIGPENDING)); 483 else 484 restore_saved_sigmask(); 485} 486 487static inline sigset_t *sigmask_to_save(void) 488{ 489 sigset_t *res = &current->blocked; 490 if (unlikely(test_restore_sigmask())) 491 res = &current->saved_sigmask; 492 return res; 493} 494 495static inline int kill_cad_pid(int sig, int priv) 496{ 497 return kill_pid(cad_pid, sig, priv); 498} 499 500/* These can be the second arg to send_sig_info/send_group_sig_info. */ 501#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) 502#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) 503 504/* 505 * True if we are on the alternate signal stack. 506 */ 507static inline int on_sig_stack(unsigned long sp) 508{ 509 /* 510 * If the signal stack is SS_AUTODISARM then, by construction, we 511 * can't be on the signal stack unless user code deliberately set 512 * SS_AUTODISARM when we were already on it. 513 * 514 * This improves reliability: if user state gets corrupted such that 515 * the stack pointer points very close to the end of the signal stack, 516 * then this check will enable the signal to be handled anyway. 517 */ 518 if (current->sas_ss_flags & SS_AUTODISARM) 519 return 0; 520 521#ifdef CONFIG_STACK_GROWSUP 522 return sp >= current->sas_ss_sp && 523 sp - current->sas_ss_sp < current->sas_ss_size; 524#else 525 return sp > current->sas_ss_sp && 526 sp - current->sas_ss_sp <= current->sas_ss_size; 527#endif 528} 529 530static inline int sas_ss_flags(unsigned long sp) 531{ 532 if (!current->sas_ss_size) 533 return SS_DISABLE; 534 535 return on_sig_stack(sp) ? SS_ONSTACK : 0; 536} 537 538static inline void sas_ss_reset(struct task_struct *p) 539{ 540 p->sas_ss_sp = 0; 541 p->sas_ss_size = 0; 542 p->sas_ss_flags = SS_DISABLE; 543} 544 545static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) 546{ 547 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) 548#ifdef CONFIG_STACK_GROWSUP 549 return current->sas_ss_sp; 550#else 551 return current->sas_ss_sp + current->sas_ss_size; 552#endif 553 return sp; 554} 555 556extern void __cleanup_sighand(struct sighand_struct *); 557extern void flush_itimer_signals(void); 558 559#define tasklist_empty() \ 560 list_empty(&init_task.tasks) 561 562#define next_task(p) \ 563 list_entry_rcu((p)->tasks.next, struct task_struct, tasks) 564 565#define for_each_process(p) \ 566 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 567 568extern bool current_is_single_threaded(void); 569 570/* 571 * Careful: do_each_thread/while_each_thread is a double loop so 572 * 'break' will not work as expected - use goto instead. 573 */ 574#define do_each_thread(g, t) \ 575 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do 576 577#define while_each_thread(g, t) \ 578 while ((t = next_thread(t)) != g) 579 580#define __for_each_thread(signal, t) \ 581 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) 582 583#define for_each_thread(p, t) \ 584 __for_each_thread((p)->signal, t) 585 586/* Careful: this is a double loop, 'break' won't work as expected. */ 587#define for_each_process_thread(p, t) \ 588 for_each_process(p) for_each_thread(p, t) 589 590typedef int (*proc_visitor)(struct task_struct *p, void *data); 591void walk_process_tree(struct task_struct *top, proc_visitor, void *); 592 593static inline 594struct pid *task_pid_type(struct task_struct *task, enum pid_type type) 595{ 596 struct pid *pid; 597 if (type == PIDTYPE_PID) 598 pid = task_pid(task); 599 else 600 pid = task->signal->pids[type]; 601 return pid; 602} 603 604static inline struct pid *task_tgid(struct task_struct *task) 605{ 606 return task->signal->pids[PIDTYPE_TGID]; 607} 608 609/* 610 * Without tasklist or RCU lock it is not safe to dereference 611 * the result of task_pgrp/task_session even if task == current, 612 * we can race with another thread doing sys_setsid/sys_setpgid. 613 */ 614static inline struct pid *task_pgrp(struct task_struct *task) 615{ 616 return task->signal->pids[PIDTYPE_PGID]; 617} 618 619static inline struct pid *task_session(struct task_struct *task) 620{ 621 return task->signal->pids[PIDTYPE_SID]; 622} 623 624static inline int get_nr_threads(struct task_struct *task) 625{ 626 return task->signal->nr_threads; 627} 628 629static inline bool thread_group_leader(struct task_struct *p) 630{ 631 return p->exit_signal >= 0; 632} 633 634/* Do to the insanities of de_thread it is possible for a process 635 * to have the pid of the thread group leader without actually being 636 * the thread group leader. For iteration through the pids in proc 637 * all we care about is that we have a task with the appropriate 638 * pid, we don't actually care if we have the right task. 639 */ 640static inline bool has_group_leader_pid(struct task_struct *p) 641{ 642 return task_pid(p) == task_tgid(p); 643} 644 645static inline 646bool same_thread_group(struct task_struct *p1, struct task_struct *p2) 647{ 648 return p1->signal == p2->signal; 649} 650 651static inline struct task_struct *next_thread(const struct task_struct *p) 652{ 653 return list_entry_rcu(p->thread_group.next, 654 struct task_struct, thread_group); 655} 656 657static inline int thread_group_empty(struct task_struct *p) 658{ 659 return list_empty(&p->thread_group); 660} 661 662#define delay_group_leader(p) \ 663 (thread_group_leader(p) && !thread_group_empty(p)) 664 665extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, 666 unsigned long *flags); 667 668static inline struct sighand_struct *lock_task_sighand(struct task_struct *task, 669 unsigned long *flags) 670{ 671 struct sighand_struct *ret; 672 673 ret = __lock_task_sighand(task, flags); 674 (void)__cond_lock(&task->sighand->siglock, ret); 675 return ret; 676} 677 678static inline void unlock_task_sighand(struct task_struct *task, 679 unsigned long *flags) 680{ 681 spin_unlock_irqrestore(&task->sighand->siglock, *flags); 682} 683 684static inline unsigned long task_rlimit(const struct task_struct *task, 685 unsigned int limit) 686{ 687 return READ_ONCE(task->signal->rlim[limit].rlim_cur); 688} 689 690static inline unsigned long task_rlimit_max(const struct task_struct *task, 691 unsigned int limit) 692{ 693 return READ_ONCE(task->signal->rlim[limit].rlim_max); 694} 695 696static inline unsigned long rlimit(unsigned int limit) 697{ 698 return task_rlimit(current, limit); 699} 700 701static inline unsigned long rlimit_max(unsigned int limit) 702{ 703 return task_rlimit_max(current, limit); 704} 705 706#endif /* _LINUX_SCHED_SIGNAL_H */