Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6
7struct sched_param {
8 int sched_priority;
9};
10
11#include <asm/param.h> /* for HZ */
12
13#include <linux/capability.h>
14#include <linux/threads.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/timex.h>
18#include <linux/jiffies.h>
19#include <linux/rbtree.h>
20#include <linux/thread_info.h>
21#include <linux/cpumask.h>
22#include <linux/errno.h>
23#include <linux/nodemask.h>
24#include <linux/mm_types.h>
25
26#include <asm/page.h>
27#include <asm/ptrace.h>
28#include <asm/cputime.h>
29
30#include <linux/smp.h>
31#include <linux/sem.h>
32#include <linux/signal.h>
33#include <linux/compiler.h>
34#include <linux/completion.h>
35#include <linux/pid.h>
36#include <linux/percpu.h>
37#include <linux/topology.h>
38#include <linux/proportions.h>
39#include <linux/seccomp.h>
40#include <linux/rcupdate.h>
41#include <linux/rculist.h>
42#include <linux/rtmutex.h>
43
44#include <linux/time.h>
45#include <linux/param.h>
46#include <linux/resource.h>
47#include <linux/timer.h>
48#include <linux/hrtimer.h>
49#include <linux/task_io_accounting.h>
50#include <linux/latencytop.h>
51#include <linux/cred.h>
52#include <linux/llist.h>
53#include <linux/uidgid.h>
54#include <linux/gfp.h>
55
56#include <asm/processor.h>
57
58struct exec_domain;
59struct futex_pi_state;
60struct robust_list_head;
61struct bio_list;
62struct fs_struct;
63struct perf_event_context;
64struct blk_plug;
65
66/*
67 * List of flags we want to share for kernel threads,
68 * if only because they are not used by them anyway.
69 */
70#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
71
72/*
73 * These are the constant used to fake the fixed-point load-average
74 * counting. Some notes:
75 * - 11 bit fractions expand to 22 bits by the multiplies: this gives
76 * a load-average precision of 10 bits integer + 11 bits fractional
77 * - if you want to count load-averages more often, you need more
78 * precision, or rounding will get you. With 2-second counting freq,
79 * the EXP_n values would be 1981, 2034 and 2043 if still using only
80 * 11 bit fractions.
81 */
82extern unsigned long avenrun[]; /* Load averages */
83extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
84
85#define FSHIFT 11 /* nr of bits of precision */
86#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
87#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
88#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
89#define EXP_5 2014 /* 1/exp(5sec/5min) */
90#define EXP_15 2037 /* 1/exp(5sec/15min) */
91
92#define CALC_LOAD(load,exp,n) \
93 load *= exp; \
94 load += n*(FIXED_1-exp); \
95 load >>= FSHIFT;
96
97extern unsigned long total_forks;
98extern int nr_threads;
99DECLARE_PER_CPU(unsigned long, process_counts);
100extern int nr_processes(void);
101extern unsigned long nr_running(void);
102extern unsigned long nr_iowait(void);
103extern unsigned long nr_iowait_cpu(int cpu);
104extern unsigned long this_cpu_load(void);
105
106
107extern void calc_global_load(unsigned long ticks);
108extern void update_cpu_load_nohz(void);
109
110/* Notifier for when a task gets migrated to a new CPU */
111struct task_migration_notifier {
112 struct task_struct *task;
113 int from_cpu;
114 int to_cpu;
115};
116extern void register_task_migration_notifier(struct notifier_block *n);
117
118extern unsigned long get_parent_ip(unsigned long addr);
119
120extern void dump_cpu_task(int cpu);
121
122struct seq_file;
123struct cfs_rq;
124struct task_group;
125#ifdef CONFIG_SCHED_DEBUG
126extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
127extern void proc_sched_set_task(struct task_struct *p);
128extern void
129print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
130#endif
131
132/*
133 * Task state bitmask. NOTE! These bits are also
134 * encoded in fs/proc/array.c: get_task_state().
135 *
136 * We have two separate sets of flags: task->state
137 * is about runnability, while task->exit_state are
138 * about the task exiting. Confusing, but this way
139 * modifying one set can't modify the other one by
140 * mistake.
141 */
142#define TASK_RUNNING 0
143#define TASK_INTERRUPTIBLE 1
144#define TASK_UNINTERRUPTIBLE 2
145#define __TASK_STOPPED 4
146#define __TASK_TRACED 8
147/* in tsk->exit_state */
148#define EXIT_ZOMBIE 16
149#define EXIT_DEAD 32
150/* in tsk->state again */
151#define TASK_DEAD 64
152#define TASK_WAKEKILL 128
153#define TASK_WAKING 256
154#define TASK_PARKED 512
155#define TASK_STATE_MAX 1024
156
157#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
158
159extern char ___assert_task_state[1 - 2*!!(
160 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
161
162/* Convenience macros for the sake of set_task_state */
163#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
164#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
165#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
166
167/* Convenience macros for the sake of wake_up */
168#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
169#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
170
171/* get_task_state() */
172#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
173 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
174 __TASK_TRACED)
175
176#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
177#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
178#define task_is_dead(task) ((task)->exit_state != 0)
179#define task_is_stopped_or_traced(task) \
180 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
181#define task_contributes_to_load(task) \
182 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
183 (task->flags & PF_FROZEN) == 0)
184
185#define __set_task_state(tsk, state_value) \
186 do { (tsk)->state = (state_value); } while (0)
187#define set_task_state(tsk, state_value) \
188 set_mb((tsk)->state, (state_value))
189
190/*
191 * set_current_state() includes a barrier so that the write of current->state
192 * is correctly serialised wrt the caller's subsequent test of whether to
193 * actually sleep:
194 *
195 * set_current_state(TASK_UNINTERRUPTIBLE);
196 * if (do_i_need_to_sleep())
197 * schedule();
198 *
199 * If the caller does not need such serialisation then use __set_current_state()
200 */
201#define __set_current_state(state_value) \
202 do { current->state = (state_value); } while (0)
203#define set_current_state(state_value) \
204 set_mb(current->state, (state_value))
205
206/* Task command name length */
207#define TASK_COMM_LEN 16
208
209#include <linux/spinlock.h>
210
211/*
212 * This serializes "schedule()" and also protects
213 * the run-queue from deletions/modifications (but
214 * _adding_ to the beginning of the run-queue has
215 * a separate lock).
216 */
217extern rwlock_t tasklist_lock;
218extern spinlock_t mmlist_lock;
219
220struct task_struct;
221
222#ifdef CONFIG_PROVE_RCU
223extern int lockdep_tasklist_lock_is_held(void);
224#endif /* #ifdef CONFIG_PROVE_RCU */
225
226extern void sched_init(void);
227extern void sched_init_smp(void);
228extern asmlinkage void schedule_tail(struct task_struct *prev);
229extern void init_idle(struct task_struct *idle, int cpu);
230extern void init_idle_bootup_task(struct task_struct *idle);
231
232extern int runqueue_is_locked(int cpu);
233
234#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
235extern void nohz_balance_enter_idle(int cpu);
236extern void set_cpu_sd_state_idle(void);
237extern int get_nohz_timer_target(void);
238#else
239static inline void nohz_balance_enter_idle(int cpu) { }
240static inline void set_cpu_sd_state_idle(void) { }
241#endif
242
243/*
244 * Only dump TASK_* tasks. (0 for all tasks)
245 */
246extern void show_state_filter(unsigned long state_filter);
247
248static inline void show_state(void)
249{
250 show_state_filter(0);
251}
252
253extern void show_regs(struct pt_regs *);
254
255/*
256 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
257 * task), SP is the stack pointer of the first frame that should be shown in the back
258 * trace (or NULL if the entire call-chain of the task should be shown).
259 */
260extern void show_stack(struct task_struct *task, unsigned long *sp);
261
262void io_schedule(void);
263long io_schedule_timeout(long timeout);
264
265extern void cpu_init (void);
266extern void trap_init(void);
267extern void update_process_times(int user);
268extern void scheduler_tick(void);
269
270extern void sched_show_task(struct task_struct *p);
271
272#ifdef CONFIG_LOCKUP_DETECTOR
273extern void touch_softlockup_watchdog(void);
274extern void touch_softlockup_watchdog_sync(void);
275extern void touch_all_softlockup_watchdogs(void);
276extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
277 void __user *buffer,
278 size_t *lenp, loff_t *ppos);
279extern unsigned int softlockup_panic;
280void lockup_detector_init(void);
281#else
282static inline void touch_softlockup_watchdog(void)
283{
284}
285static inline void touch_softlockup_watchdog_sync(void)
286{
287}
288static inline void touch_all_softlockup_watchdogs(void)
289{
290}
291static inline void lockup_detector_init(void)
292{
293}
294#endif
295
296/* Attach to any functions which should be ignored in wchan output. */
297#define __sched __attribute__((__section__(".sched.text")))
298
299/* Linker adds these: start and end of __sched functions */
300extern char __sched_text_start[], __sched_text_end[];
301
302/* Is this address in the __sched functions? */
303extern int in_sched_functions(unsigned long addr);
304
305#define MAX_SCHEDULE_TIMEOUT LONG_MAX
306extern signed long schedule_timeout(signed long timeout);
307extern signed long schedule_timeout_interruptible(signed long timeout);
308extern signed long schedule_timeout_killable(signed long timeout);
309extern signed long schedule_timeout_uninterruptible(signed long timeout);
310asmlinkage void schedule(void);
311extern void schedule_preempt_disabled(void);
312
313struct nsproxy;
314struct user_namespace;
315
316#ifdef CONFIG_MMU
317extern void arch_pick_mmap_layout(struct mm_struct *mm);
318extern unsigned long
319arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
320 unsigned long, unsigned long);
321extern unsigned long
322arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
323 unsigned long len, unsigned long pgoff,
324 unsigned long flags);
325extern void arch_unmap_area(struct mm_struct *, unsigned long);
326extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
327#else
328static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
329#endif
330
331
332extern void set_dumpable(struct mm_struct *mm, int value);
333extern int get_dumpable(struct mm_struct *mm);
334
335/* mm flags */
336/* dumpable bits */
337#define MMF_DUMPABLE 0 /* core dump is permitted */
338#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
339
340#define MMF_DUMPABLE_BITS 2
341#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
342
343/* coredump filter bits */
344#define MMF_DUMP_ANON_PRIVATE 2
345#define MMF_DUMP_ANON_SHARED 3
346#define MMF_DUMP_MAPPED_PRIVATE 4
347#define MMF_DUMP_MAPPED_SHARED 5
348#define MMF_DUMP_ELF_HEADERS 6
349#define MMF_DUMP_HUGETLB_PRIVATE 7
350#define MMF_DUMP_HUGETLB_SHARED 8
351
352#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
353#define MMF_DUMP_FILTER_BITS 7
354#define MMF_DUMP_FILTER_MASK \
355 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
356#define MMF_DUMP_FILTER_DEFAULT \
357 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
358 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
359
360#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
361# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
362#else
363# define MMF_DUMP_MASK_DEFAULT_ELF 0
364#endif
365 /* leave room for more dump flags */
366#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
367#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
368#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
369
370#define MMF_HAS_UPROBES 19 /* has uprobes */
371#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
372
373#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
374
375struct sighand_struct {
376 atomic_t count;
377 struct k_sigaction action[_NSIG];
378 spinlock_t siglock;
379 wait_queue_head_t signalfd_wqh;
380};
381
382struct pacct_struct {
383 int ac_flag;
384 long ac_exitcode;
385 unsigned long ac_mem;
386 cputime_t ac_utime, ac_stime;
387 unsigned long ac_minflt, ac_majflt;
388};
389
390struct cpu_itimer {
391 cputime_t expires;
392 cputime_t incr;
393 u32 error;
394 u32 incr_error;
395};
396
397/**
398 * struct cputime - snaphsot of system and user cputime
399 * @utime: time spent in user mode
400 * @stime: time spent in system mode
401 *
402 * Gathers a generic snapshot of user and system time.
403 */
404struct cputime {
405 cputime_t utime;
406 cputime_t stime;
407};
408
409/**
410 * struct task_cputime - collected CPU time counts
411 * @utime: time spent in user mode, in &cputime_t units
412 * @stime: time spent in kernel mode, in &cputime_t units
413 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
414 *
415 * This is an extension of struct cputime that includes the total runtime
416 * spent by the task from the scheduler point of view.
417 *
418 * As a result, this structure groups together three kinds of CPU time
419 * that are tracked for threads and thread groups. Most things considering
420 * CPU time want to group these counts together and treat all three
421 * of them in parallel.
422 */
423struct task_cputime {
424 cputime_t utime;
425 cputime_t stime;
426 unsigned long long sum_exec_runtime;
427};
428/* Alternate field names when used to cache expirations. */
429#define prof_exp stime
430#define virt_exp utime
431#define sched_exp sum_exec_runtime
432
433#define INIT_CPUTIME \
434 (struct task_cputime) { \
435 .utime = 0, \
436 .stime = 0, \
437 .sum_exec_runtime = 0, \
438 }
439
440/*
441 * Disable preemption until the scheduler is running.
442 * Reset by start_kernel()->sched_init()->init_idle().
443 *
444 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
445 * before the scheduler is active -- see should_resched().
446 */
447#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
448
449/**
450 * struct thread_group_cputimer - thread group interval timer counts
451 * @cputime: thread group interval timers.
452 * @running: non-zero when there are timers running and
453 * @cputime receives updates.
454 * @lock: lock for fields in this struct.
455 *
456 * This structure contains the version of task_cputime, above, that is
457 * used for thread group CPU timer calculations.
458 */
459struct thread_group_cputimer {
460 struct task_cputime cputime;
461 int running;
462 raw_spinlock_t lock;
463};
464
465#include <linux/rwsem.h>
466struct autogroup;
467
468/*
469 * NOTE! "signal_struct" does not have its own
470 * locking, because a shared signal_struct always
471 * implies a shared sighand_struct, so locking
472 * sighand_struct is always a proper superset of
473 * the locking of signal_struct.
474 */
475struct signal_struct {
476 atomic_t sigcnt;
477 atomic_t live;
478 int nr_threads;
479
480 wait_queue_head_t wait_chldexit; /* for wait4() */
481
482 /* current thread group signal load-balancing target: */
483 struct task_struct *curr_target;
484
485 /* shared signal handling: */
486 struct sigpending shared_pending;
487
488 /* thread group exit support */
489 int group_exit_code;
490 /* overloaded:
491 * - notify group_exit_task when ->count is equal to notify_count
492 * - everyone except group_exit_task is stopped during signal delivery
493 * of fatal signals, group_exit_task processes the signal.
494 */
495 int notify_count;
496 struct task_struct *group_exit_task;
497
498 /* thread group stop support, overloads group_exit_code too */
499 int group_stop_count;
500 unsigned int flags; /* see SIGNAL_* flags below */
501
502 /*
503 * PR_SET_CHILD_SUBREAPER marks a process, like a service
504 * manager, to re-parent orphan (double-forking) child processes
505 * to this process instead of 'init'. The service manager is
506 * able to receive SIGCHLD signals and is able to investigate
507 * the process until it calls wait(). All children of this
508 * process will inherit a flag if they should look for a
509 * child_subreaper process at exit.
510 */
511 unsigned int is_child_subreaper:1;
512 unsigned int has_child_subreaper:1;
513
514 /* POSIX.1b Interval Timers */
515 int posix_timer_id;
516 struct list_head posix_timers;
517
518 /* ITIMER_REAL timer for the process */
519 struct hrtimer real_timer;
520 struct pid *leader_pid;
521 ktime_t it_real_incr;
522
523 /*
524 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
525 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
526 * values are defined to 0 and 1 respectively
527 */
528 struct cpu_itimer it[2];
529
530 /*
531 * Thread group totals for process CPU timers.
532 * See thread_group_cputimer(), et al, for details.
533 */
534 struct thread_group_cputimer cputimer;
535
536 /* Earliest-expiration cache. */
537 struct task_cputime cputime_expires;
538
539 struct list_head cpu_timers[3];
540
541 struct pid *tty_old_pgrp;
542
543 /* boolean value for session group leader */
544 int leader;
545
546 struct tty_struct *tty; /* NULL if no tty */
547
548#ifdef CONFIG_SCHED_AUTOGROUP
549 struct autogroup *autogroup;
550#endif
551 /*
552 * Cumulative resource counters for dead threads in the group,
553 * and for reaped dead child processes forked by this group.
554 * Live threads maintain their own counters and add to these
555 * in __exit_signal, except for the group leader.
556 */
557 cputime_t utime, stime, cutime, cstime;
558 cputime_t gtime;
559 cputime_t cgtime;
560#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
561 struct cputime prev_cputime;
562#endif
563 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
564 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
565 unsigned long inblock, oublock, cinblock, coublock;
566 unsigned long maxrss, cmaxrss;
567 struct task_io_accounting ioac;
568
569 /*
570 * Cumulative ns of schedule CPU time fo dead threads in the
571 * group, not including a zombie group leader, (This only differs
572 * from jiffies_to_ns(utime + stime) if sched_clock uses something
573 * other than jiffies.)
574 */
575 unsigned long long sum_sched_runtime;
576
577 /*
578 * We don't bother to synchronize most readers of this at all,
579 * because there is no reader checking a limit that actually needs
580 * to get both rlim_cur and rlim_max atomically, and either one
581 * alone is a single word that can safely be read normally.
582 * getrlimit/setrlimit use task_lock(current->group_leader) to
583 * protect this instead of the siglock, because they really
584 * have no need to disable irqs.
585 */
586 struct rlimit rlim[RLIM_NLIMITS];
587
588#ifdef CONFIG_BSD_PROCESS_ACCT
589 struct pacct_struct pacct; /* per-process accounting information */
590#endif
591#ifdef CONFIG_TASKSTATS
592 struct taskstats *stats;
593#endif
594#ifdef CONFIG_AUDIT
595 unsigned audit_tty;
596 unsigned audit_tty_log_passwd;
597 struct tty_audit_buf *tty_audit_buf;
598#endif
599#ifdef CONFIG_CGROUPS
600 /*
601 * group_rwsem prevents new tasks from entering the threadgroup and
602 * member tasks from exiting,a more specifically, setting of
603 * PF_EXITING. fork and exit paths are protected with this rwsem
604 * using threadgroup_change_begin/end(). Users which require
605 * threadgroup to remain stable should use threadgroup_[un]lock()
606 * which also takes care of exec path. Currently, cgroup is the
607 * only user.
608 */
609 struct rw_semaphore group_rwsem;
610#endif
611
612 oom_flags_t oom_flags;
613 short oom_score_adj; /* OOM kill score adjustment */
614 short oom_score_adj_min; /* OOM kill score adjustment min value.
615 * Only settable by CAP_SYS_RESOURCE. */
616
617 struct mutex cred_guard_mutex; /* guard against foreign influences on
618 * credential calculations
619 * (notably. ptrace) */
620};
621
622/*
623 * Bits in flags field of signal_struct.
624 */
625#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
626#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
627#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
628#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
629/*
630 * Pending notifications to parent.
631 */
632#define SIGNAL_CLD_STOPPED 0x00000010
633#define SIGNAL_CLD_CONTINUED 0x00000020
634#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
635
636#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
637
638/* If true, all threads except ->group_exit_task have pending SIGKILL */
639static inline int signal_group_exit(const struct signal_struct *sig)
640{
641 return (sig->flags & SIGNAL_GROUP_EXIT) ||
642 (sig->group_exit_task != NULL);
643}
644
645/*
646 * Some day this will be a full-fledged user tracking system..
647 */
648struct user_struct {
649 atomic_t __count; /* reference count */
650 atomic_t processes; /* How many processes does this user have? */
651 atomic_t files; /* How many open files does this user have? */
652 atomic_t sigpending; /* How many pending signals does this user have? */
653#ifdef CONFIG_INOTIFY_USER
654 atomic_t inotify_watches; /* How many inotify watches does this user have? */
655 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
656#endif
657#ifdef CONFIG_FANOTIFY
658 atomic_t fanotify_listeners;
659#endif
660#ifdef CONFIG_EPOLL
661 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
662#endif
663#ifdef CONFIG_POSIX_MQUEUE
664 /* protected by mq_lock */
665 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
666#endif
667 unsigned long locked_shm; /* How many pages of mlocked shm ? */
668
669#ifdef CONFIG_KEYS
670 struct key *uid_keyring; /* UID specific keyring */
671 struct key *session_keyring; /* UID's default session keyring */
672#endif
673
674 /* Hash table maintenance information */
675 struct hlist_node uidhash_node;
676 kuid_t uid;
677
678#ifdef CONFIG_PERF_EVENTS
679 atomic_long_t locked_vm;
680#endif
681};
682
683extern int uids_sysfs_init(void);
684
685extern struct user_struct *find_user(kuid_t);
686
687extern struct user_struct root_user;
688#define INIT_USER (&root_user)
689
690
691struct backing_dev_info;
692struct reclaim_state;
693
694#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
695struct sched_info {
696 /* cumulative counters */
697 unsigned long pcount; /* # of times run on this cpu */
698 unsigned long long run_delay; /* time spent waiting on a runqueue */
699
700 /* timestamps */
701 unsigned long long last_arrival,/* when we last ran on a cpu */
702 last_queued; /* when we were last queued to run */
703};
704#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
705
706#ifdef CONFIG_TASK_DELAY_ACCT
707struct task_delay_info {
708 spinlock_t lock;
709 unsigned int flags; /* Private per-task flags */
710
711 /* For each stat XXX, add following, aligned appropriately
712 *
713 * struct timespec XXX_start, XXX_end;
714 * u64 XXX_delay;
715 * u32 XXX_count;
716 *
717 * Atomicity of updates to XXX_delay, XXX_count protected by
718 * single lock above (split into XXX_lock if contention is an issue).
719 */
720
721 /*
722 * XXX_count is incremented on every XXX operation, the delay
723 * associated with the operation is added to XXX_delay.
724 * XXX_delay contains the accumulated delay time in nanoseconds.
725 */
726 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
727 u64 blkio_delay; /* wait for sync block io completion */
728 u64 swapin_delay; /* wait for swapin block io completion */
729 u32 blkio_count; /* total count of the number of sync block */
730 /* io operations performed */
731 u32 swapin_count; /* total count of the number of swapin block */
732 /* io operations performed */
733
734 struct timespec freepages_start, freepages_end;
735 u64 freepages_delay; /* wait for memory reclaim */
736 u32 freepages_count; /* total count of memory reclaim */
737};
738#endif /* CONFIG_TASK_DELAY_ACCT */
739
740static inline int sched_info_on(void)
741{
742#ifdef CONFIG_SCHEDSTATS
743 return 1;
744#elif defined(CONFIG_TASK_DELAY_ACCT)
745 extern int delayacct_on;
746 return delayacct_on;
747#else
748 return 0;
749#endif
750}
751
752enum cpu_idle_type {
753 CPU_IDLE,
754 CPU_NOT_IDLE,
755 CPU_NEWLY_IDLE,
756 CPU_MAX_IDLE_TYPES
757};
758
759/*
760 * Increase resolution of cpu_power calculations
761 */
762#define SCHED_POWER_SHIFT 10
763#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
764
765/*
766 * sched-domains (multiprocessor balancing) declarations:
767 */
768#ifdef CONFIG_SMP
769#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
770#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
771#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
772#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
773#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
774#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
775#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
776#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
777#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
778#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
779#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
780#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
781
782extern int __weak arch_sd_sibiling_asym_packing(void);
783
784struct sched_domain_attr {
785 int relax_domain_level;
786};
787
788#define SD_ATTR_INIT (struct sched_domain_attr) { \
789 .relax_domain_level = -1, \
790}
791
792extern int sched_domain_level_max;
793
794struct sched_group;
795
796struct sched_domain {
797 /* These fields must be setup */
798 struct sched_domain *parent; /* top domain must be null terminated */
799 struct sched_domain *child; /* bottom domain must be null terminated */
800 struct sched_group *groups; /* the balancing groups of the domain */
801 unsigned long min_interval; /* Minimum balance interval ms */
802 unsigned long max_interval; /* Maximum balance interval ms */
803 unsigned int busy_factor; /* less balancing by factor if busy */
804 unsigned int imbalance_pct; /* No balance until over watermark */
805 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
806 unsigned int busy_idx;
807 unsigned int idle_idx;
808 unsigned int newidle_idx;
809 unsigned int wake_idx;
810 unsigned int forkexec_idx;
811 unsigned int smt_gain;
812
813 int nohz_idle; /* NOHZ IDLE status */
814 int flags; /* See SD_* */
815 int level;
816
817 /* Runtime fields. */
818 unsigned long last_balance; /* init to jiffies. units in jiffies */
819 unsigned int balance_interval; /* initialise to 1. units in ms. */
820 unsigned int nr_balance_failed; /* initialise to 0 */
821
822 u64 last_update;
823
824#ifdef CONFIG_SCHEDSTATS
825 /* load_balance() stats */
826 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
827 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
828 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
829 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
830 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
831 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
832 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
833 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
834
835 /* Active load balancing */
836 unsigned int alb_count;
837 unsigned int alb_failed;
838 unsigned int alb_pushed;
839
840 /* SD_BALANCE_EXEC stats */
841 unsigned int sbe_count;
842 unsigned int sbe_balanced;
843 unsigned int sbe_pushed;
844
845 /* SD_BALANCE_FORK stats */
846 unsigned int sbf_count;
847 unsigned int sbf_balanced;
848 unsigned int sbf_pushed;
849
850 /* try_to_wake_up() stats */
851 unsigned int ttwu_wake_remote;
852 unsigned int ttwu_move_affine;
853 unsigned int ttwu_move_balance;
854#endif
855#ifdef CONFIG_SCHED_DEBUG
856 char *name;
857#endif
858 union {
859 void *private; /* used during construction */
860 struct rcu_head rcu; /* used during destruction */
861 };
862
863 unsigned int span_weight;
864 /*
865 * Span of all CPUs in this domain.
866 *
867 * NOTE: this field is variable length. (Allocated dynamically
868 * by attaching extra space to the end of the structure,
869 * depending on how many CPUs the kernel has booted up with)
870 */
871 unsigned long span[0];
872};
873
874static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
875{
876 return to_cpumask(sd->span);
877}
878
879extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
880 struct sched_domain_attr *dattr_new);
881
882/* Allocate an array of sched domains, for partition_sched_domains(). */
883cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
884void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
885
886bool cpus_share_cache(int this_cpu, int that_cpu);
887
888#else /* CONFIG_SMP */
889
890struct sched_domain_attr;
891
892static inline void
893partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
894 struct sched_domain_attr *dattr_new)
895{
896}
897
898static inline bool cpus_share_cache(int this_cpu, int that_cpu)
899{
900 return true;
901}
902
903#endif /* !CONFIG_SMP */
904
905
906struct io_context; /* See blkdev.h */
907
908
909#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
910extern void prefetch_stack(struct task_struct *t);
911#else
912static inline void prefetch_stack(struct task_struct *t) { }
913#endif
914
915struct audit_context; /* See audit.c */
916struct mempolicy;
917struct pipe_inode_info;
918struct uts_namespace;
919
920struct load_weight {
921 unsigned long weight, inv_weight;
922};
923
924struct sched_avg {
925 /*
926 * These sums represent an infinite geometric series and so are bound
927 * above by 1024/(1-y). Thus we only need a u32 to store them for for all
928 * choices of y < 1-2^(-32)*1024.
929 */
930 u32 runnable_avg_sum, runnable_avg_period;
931 u64 last_runnable_update;
932 s64 decay_count;
933 unsigned long load_avg_contrib;
934};
935
936#ifdef CONFIG_SCHEDSTATS
937struct sched_statistics {
938 u64 wait_start;
939 u64 wait_max;
940 u64 wait_count;
941 u64 wait_sum;
942 u64 iowait_count;
943 u64 iowait_sum;
944
945 u64 sleep_start;
946 u64 sleep_max;
947 s64 sum_sleep_runtime;
948
949 u64 block_start;
950 u64 block_max;
951 u64 exec_max;
952 u64 slice_max;
953
954 u64 nr_migrations_cold;
955 u64 nr_failed_migrations_affine;
956 u64 nr_failed_migrations_running;
957 u64 nr_failed_migrations_hot;
958 u64 nr_forced_migrations;
959
960 u64 nr_wakeups;
961 u64 nr_wakeups_sync;
962 u64 nr_wakeups_migrate;
963 u64 nr_wakeups_local;
964 u64 nr_wakeups_remote;
965 u64 nr_wakeups_affine;
966 u64 nr_wakeups_affine_attempts;
967 u64 nr_wakeups_passive;
968 u64 nr_wakeups_idle;
969};
970#endif
971
972struct sched_entity {
973 struct load_weight load; /* for load-balancing */
974 struct rb_node run_node;
975 struct list_head group_node;
976 unsigned int on_rq;
977
978 u64 exec_start;
979 u64 sum_exec_runtime;
980 u64 vruntime;
981 u64 prev_sum_exec_runtime;
982
983 u64 nr_migrations;
984
985#ifdef CONFIG_SCHEDSTATS
986 struct sched_statistics statistics;
987#endif
988
989#ifdef CONFIG_FAIR_GROUP_SCHED
990 struct sched_entity *parent;
991 /* rq on which this entity is (to be) queued: */
992 struct cfs_rq *cfs_rq;
993 /* rq "owned" by this entity/group: */
994 struct cfs_rq *my_q;
995#endif
996
997/*
998 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
999 * removed when useful for applications beyond shares distribution (e.g.
1000 * load-balance).
1001 */
1002#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1003 /* Per-entity load-tracking */
1004 struct sched_avg avg;
1005#endif
1006};
1007
1008struct sched_rt_entity {
1009 struct list_head run_list;
1010 unsigned long timeout;
1011 unsigned long watchdog_stamp;
1012 unsigned int time_slice;
1013
1014 struct sched_rt_entity *back;
1015#ifdef CONFIG_RT_GROUP_SCHED
1016 struct sched_rt_entity *parent;
1017 /* rq on which this entity is (to be) queued: */
1018 struct rt_rq *rt_rq;
1019 /* rq "owned" by this entity/group: */
1020 struct rt_rq *my_q;
1021#endif
1022};
1023
1024
1025struct rcu_node;
1026
1027enum perf_event_task_context {
1028 perf_invalid_context = -1,
1029 perf_hw_context = 0,
1030 perf_sw_context,
1031 perf_nr_task_contexts,
1032};
1033
1034struct task_struct {
1035 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1036 void *stack;
1037 atomic_t usage;
1038 unsigned int flags; /* per process flags, defined below */
1039 unsigned int ptrace;
1040
1041#ifdef CONFIG_SMP
1042 struct llist_node wake_entry;
1043 int on_cpu;
1044#endif
1045 int on_rq;
1046
1047 int prio, static_prio, normal_prio;
1048 unsigned int rt_priority;
1049 const struct sched_class *sched_class;
1050 struct sched_entity se;
1051 struct sched_rt_entity rt;
1052#ifdef CONFIG_CGROUP_SCHED
1053 struct task_group *sched_task_group;
1054#endif
1055
1056#ifdef CONFIG_PREEMPT_NOTIFIERS
1057 /* list of struct preempt_notifier: */
1058 struct hlist_head preempt_notifiers;
1059#endif
1060
1061 /*
1062 * fpu_counter contains the number of consecutive context switches
1063 * that the FPU is used. If this is over a threshold, the lazy fpu
1064 * saving becomes unlazy to save the trap. This is an unsigned char
1065 * so that after 256 times the counter wraps and the behavior turns
1066 * lazy again; this to deal with bursty apps that only use FPU for
1067 * a short time
1068 */
1069 unsigned char fpu_counter;
1070#ifdef CONFIG_BLK_DEV_IO_TRACE
1071 unsigned int btrace_seq;
1072#endif
1073
1074 unsigned int policy;
1075 int nr_cpus_allowed;
1076 cpumask_t cpus_allowed;
1077
1078#ifdef CONFIG_PREEMPT_RCU
1079 int rcu_read_lock_nesting;
1080 char rcu_read_unlock_special;
1081 struct list_head rcu_node_entry;
1082#endif /* #ifdef CONFIG_PREEMPT_RCU */
1083#ifdef CONFIG_TREE_PREEMPT_RCU
1084 struct rcu_node *rcu_blocked_node;
1085#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1086#ifdef CONFIG_RCU_BOOST
1087 struct rt_mutex *rcu_boost_mutex;
1088#endif /* #ifdef CONFIG_RCU_BOOST */
1089
1090#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1091 struct sched_info sched_info;
1092#endif
1093
1094 struct list_head tasks;
1095#ifdef CONFIG_SMP
1096 struct plist_node pushable_tasks;
1097#endif
1098
1099 struct mm_struct *mm, *active_mm;
1100#ifdef CONFIG_COMPAT_BRK
1101 unsigned brk_randomized:1;
1102#endif
1103#if defined(SPLIT_RSS_COUNTING)
1104 struct task_rss_stat rss_stat;
1105#endif
1106/* task state */
1107 int exit_state;
1108 int exit_code, exit_signal;
1109 int pdeath_signal; /* The signal sent when the parent dies */
1110 unsigned int jobctl; /* JOBCTL_*, siglock protected */
1111
1112 /* Used for emulating ABI behavior of previous Linux versions */
1113 unsigned int personality;
1114
1115 unsigned did_exec:1;
1116 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1117 * execve */
1118 unsigned in_iowait:1;
1119
1120 /* task may not gain privileges */
1121 unsigned no_new_privs:1;
1122
1123 /* Revert to default priority/policy when forking */
1124 unsigned sched_reset_on_fork:1;
1125 unsigned sched_contributes_to_load:1;
1126
1127 pid_t pid;
1128 pid_t tgid;
1129
1130#ifdef CONFIG_CC_STACKPROTECTOR
1131 /* Canary value for the -fstack-protector gcc feature */
1132 unsigned long stack_canary;
1133#endif
1134 /*
1135 * pointers to (original) parent process, youngest child, younger sibling,
1136 * older sibling, respectively. (p->father can be replaced with
1137 * p->real_parent->pid)
1138 */
1139 struct task_struct __rcu *real_parent; /* real parent process */
1140 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1141 /*
1142 * children/sibling forms the list of my natural children
1143 */
1144 struct list_head children; /* list of my children */
1145 struct list_head sibling; /* linkage in my parent's children list */
1146 struct task_struct *group_leader; /* threadgroup leader */
1147
1148 /*
1149 * ptraced is the list of tasks this task is using ptrace on.
1150 * This includes both natural children and PTRACE_ATTACH targets.
1151 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1152 */
1153 struct list_head ptraced;
1154 struct list_head ptrace_entry;
1155
1156 /* PID/PID hash table linkage. */
1157 struct pid_link pids[PIDTYPE_MAX];
1158 struct list_head thread_group;
1159
1160 struct completion *vfork_done; /* for vfork() */
1161 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1162 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1163
1164 cputime_t utime, stime, utimescaled, stimescaled;
1165 cputime_t gtime;
1166#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1167 struct cputime prev_cputime;
1168#endif
1169#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1170 seqlock_t vtime_seqlock;
1171 unsigned long long vtime_snap;
1172 enum {
1173 VTIME_SLEEPING = 0,
1174 VTIME_USER,
1175 VTIME_SYS,
1176 } vtime_snap_whence;
1177#endif
1178 unsigned long nvcsw, nivcsw; /* context switch counts */
1179 struct timespec start_time; /* monotonic time */
1180 struct timespec real_start_time; /* boot based time */
1181/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1182 unsigned long min_flt, maj_flt;
1183
1184 struct task_cputime cputime_expires;
1185 struct list_head cpu_timers[3];
1186
1187/* process credentials */
1188 const struct cred __rcu *real_cred; /* objective and real subjective task
1189 * credentials (COW) */
1190 const struct cred __rcu *cred; /* effective (overridable) subjective task
1191 * credentials (COW) */
1192 char comm[TASK_COMM_LEN]; /* executable name excluding path
1193 - access with [gs]et_task_comm (which lock
1194 it with task_lock())
1195 - initialized normally by setup_new_exec */
1196/* file system info */
1197 int link_count, total_link_count;
1198#ifdef CONFIG_SYSVIPC
1199/* ipc stuff */
1200 struct sysv_sem sysvsem;
1201#endif
1202#ifdef CONFIG_DETECT_HUNG_TASK
1203/* hung task detection */
1204 unsigned long last_switch_count;
1205#endif
1206/* CPU-specific state of this task */
1207 struct thread_struct thread;
1208/* filesystem information */
1209 struct fs_struct *fs;
1210/* open file information */
1211 struct files_struct *files;
1212/* namespaces */
1213 struct nsproxy *nsproxy;
1214/* signal handlers */
1215 struct signal_struct *signal;
1216 struct sighand_struct *sighand;
1217
1218 sigset_t blocked, real_blocked;
1219 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
1220 struct sigpending pending;
1221
1222 unsigned long sas_ss_sp;
1223 size_t sas_ss_size;
1224 int (*notifier)(void *priv);
1225 void *notifier_data;
1226 sigset_t *notifier_mask;
1227 struct callback_head *task_works;
1228
1229 struct audit_context *audit_context;
1230#ifdef CONFIG_AUDITSYSCALL
1231 kuid_t loginuid;
1232 unsigned int sessionid;
1233#endif
1234 struct seccomp seccomp;
1235
1236/* Thread group tracking */
1237 u32 parent_exec_id;
1238 u32 self_exec_id;
1239/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1240 * mempolicy */
1241 spinlock_t alloc_lock;
1242
1243 /* Protection of the PI data structures: */
1244 raw_spinlock_t pi_lock;
1245
1246#ifdef CONFIG_RT_MUTEXES
1247 /* PI waiters blocked on a rt_mutex held by this task */
1248 struct plist_head pi_waiters;
1249 /* Deadlock detection and priority inheritance handling */
1250 struct rt_mutex_waiter *pi_blocked_on;
1251#endif
1252
1253#ifdef CONFIG_DEBUG_MUTEXES
1254 /* mutex deadlock detection */
1255 struct mutex_waiter *blocked_on;
1256#endif
1257#ifdef CONFIG_TRACE_IRQFLAGS
1258 unsigned int irq_events;
1259 unsigned long hardirq_enable_ip;
1260 unsigned long hardirq_disable_ip;
1261 unsigned int hardirq_enable_event;
1262 unsigned int hardirq_disable_event;
1263 int hardirqs_enabled;
1264 int hardirq_context;
1265 unsigned long softirq_disable_ip;
1266 unsigned long softirq_enable_ip;
1267 unsigned int softirq_disable_event;
1268 unsigned int softirq_enable_event;
1269 int softirqs_enabled;
1270 int softirq_context;
1271#endif
1272#ifdef CONFIG_LOCKDEP
1273# define MAX_LOCK_DEPTH 48UL
1274 u64 curr_chain_key;
1275 int lockdep_depth;
1276 unsigned int lockdep_recursion;
1277 struct held_lock held_locks[MAX_LOCK_DEPTH];
1278 gfp_t lockdep_reclaim_gfp;
1279#endif
1280
1281/* journalling filesystem info */
1282 void *journal_info;
1283
1284/* stacked block device info */
1285 struct bio_list *bio_list;
1286
1287#ifdef CONFIG_BLOCK
1288/* stack plugging */
1289 struct blk_plug *plug;
1290#endif
1291
1292/* VM state */
1293 struct reclaim_state *reclaim_state;
1294
1295 struct backing_dev_info *backing_dev_info;
1296
1297 struct io_context *io_context;
1298
1299 unsigned long ptrace_message;
1300 siginfo_t *last_siginfo; /* For ptrace use. */
1301 struct task_io_accounting ioac;
1302#if defined(CONFIG_TASK_XACCT)
1303 u64 acct_rss_mem1; /* accumulated rss usage */
1304 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1305 cputime_t acct_timexpd; /* stime + utime since last update */
1306#endif
1307#ifdef CONFIG_CPUSETS
1308 nodemask_t mems_allowed; /* Protected by alloc_lock */
1309 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
1310 int cpuset_mem_spread_rotor;
1311 int cpuset_slab_spread_rotor;
1312#endif
1313#ifdef CONFIG_CGROUPS
1314 /* Control Group info protected by css_set_lock */
1315 struct css_set __rcu *cgroups;
1316 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1317 struct list_head cg_list;
1318#endif
1319#ifdef CONFIG_FUTEX
1320 struct robust_list_head __user *robust_list;
1321#ifdef CONFIG_COMPAT
1322 struct compat_robust_list_head __user *compat_robust_list;
1323#endif
1324 struct list_head pi_state_list;
1325 struct futex_pi_state *pi_state_cache;
1326#endif
1327#ifdef CONFIG_PERF_EVENTS
1328 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1329 struct mutex perf_event_mutex;
1330 struct list_head perf_event_list;
1331#endif
1332#ifdef CONFIG_NUMA
1333 struct mempolicy *mempolicy; /* Protected by alloc_lock */
1334 short il_next;
1335 short pref_node_fork;
1336#endif
1337#ifdef CONFIG_NUMA_BALANCING
1338 int numa_scan_seq;
1339 int numa_migrate_seq;
1340 unsigned int numa_scan_period;
1341 u64 node_stamp; /* migration stamp */
1342 struct callback_head numa_work;
1343#endif /* CONFIG_NUMA_BALANCING */
1344
1345 struct rcu_head rcu;
1346
1347 /*
1348 * cache last used pipe for splice
1349 */
1350 struct pipe_inode_info *splice_pipe;
1351
1352 struct page_frag task_frag;
1353
1354#ifdef CONFIG_TASK_DELAY_ACCT
1355 struct task_delay_info *delays;
1356#endif
1357#ifdef CONFIG_FAULT_INJECTION
1358 int make_it_fail;
1359#endif
1360 /*
1361 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1362 * balance_dirty_pages() for some dirty throttling pause
1363 */
1364 int nr_dirtied;
1365 int nr_dirtied_pause;
1366 unsigned long dirty_paused_when; /* start of a write-and-pause period */
1367
1368#ifdef CONFIG_LATENCYTOP
1369 int latency_record_count;
1370 struct latency_record latency_record[LT_SAVECOUNT];
1371#endif
1372 /*
1373 * time slack values; these are used to round up poll() and
1374 * select() etc timeout values. These are in nanoseconds.
1375 */
1376 unsigned long timer_slack_ns;
1377 unsigned long default_timer_slack_ns;
1378
1379#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1380 /* Index of current stored address in ret_stack */
1381 int curr_ret_stack;
1382 /* Stack of return addresses for return function tracing */
1383 struct ftrace_ret_stack *ret_stack;
1384 /* time stamp for last schedule */
1385 unsigned long long ftrace_timestamp;
1386 /*
1387 * Number of functions that haven't been traced
1388 * because of depth overrun.
1389 */
1390 atomic_t trace_overrun;
1391 /* Pause for the tracing */
1392 atomic_t tracing_graph_pause;
1393#endif
1394#ifdef CONFIG_TRACING
1395 /* state flags for use by tracers */
1396 unsigned long trace;
1397 /* bitmask and counter of trace recursion */
1398 unsigned long trace_recursion;
1399#endif /* CONFIG_TRACING */
1400#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1401 struct memcg_batch_info {
1402 int do_batch; /* incremented when batch uncharge started */
1403 struct mem_cgroup *memcg; /* target memcg of uncharge */
1404 unsigned long nr_pages; /* uncharged usage */
1405 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1406 } memcg_batch;
1407 unsigned int memcg_kmem_skip_account;
1408#endif
1409#ifdef CONFIG_HAVE_HW_BREAKPOINT
1410 atomic_t ptrace_bp_refcnt;
1411#endif
1412#ifdef CONFIG_UPROBES
1413 struct uprobe_task *utask;
1414#endif
1415#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1416 unsigned int sequential_io;
1417 unsigned int sequential_io_avg;
1418#endif
1419};
1420
1421/* Future-safe accessor for struct task_struct's cpus_allowed. */
1422#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1423
1424#ifdef CONFIG_NUMA_BALANCING
1425extern void task_numa_fault(int node, int pages, bool migrated);
1426extern void set_numabalancing_state(bool enabled);
1427#else
1428static inline void task_numa_fault(int node, int pages, bool migrated)
1429{
1430}
1431static inline void set_numabalancing_state(bool enabled)
1432{
1433}
1434#endif
1435
1436static inline struct pid *task_pid(struct task_struct *task)
1437{
1438 return task->pids[PIDTYPE_PID].pid;
1439}
1440
1441static inline struct pid *task_tgid(struct task_struct *task)
1442{
1443 return task->group_leader->pids[PIDTYPE_PID].pid;
1444}
1445
1446/*
1447 * Without tasklist or rcu lock it is not safe to dereference
1448 * the result of task_pgrp/task_session even if task == current,
1449 * we can race with another thread doing sys_setsid/sys_setpgid.
1450 */
1451static inline struct pid *task_pgrp(struct task_struct *task)
1452{
1453 return task->group_leader->pids[PIDTYPE_PGID].pid;
1454}
1455
1456static inline struct pid *task_session(struct task_struct *task)
1457{
1458 return task->group_leader->pids[PIDTYPE_SID].pid;
1459}
1460
1461struct pid_namespace;
1462
1463/*
1464 * the helpers to get the task's different pids as they are seen
1465 * from various namespaces
1466 *
1467 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
1468 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1469 * current.
1470 * task_xid_nr_ns() : id seen from the ns specified;
1471 *
1472 * set_task_vxid() : assigns a virtual id to a task;
1473 *
1474 * see also pid_nr() etc in include/linux/pid.h
1475 */
1476pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1477 struct pid_namespace *ns);
1478
1479static inline pid_t task_pid_nr(struct task_struct *tsk)
1480{
1481 return tsk->pid;
1482}
1483
1484static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1485 struct pid_namespace *ns)
1486{
1487 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1488}
1489
1490static inline pid_t task_pid_vnr(struct task_struct *tsk)
1491{
1492 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1493}
1494
1495
1496static inline pid_t task_tgid_nr(struct task_struct *tsk)
1497{
1498 return tsk->tgid;
1499}
1500
1501pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1502
1503static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1504{
1505 return pid_vnr(task_tgid(tsk));
1506}
1507
1508
1509static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1510 struct pid_namespace *ns)
1511{
1512 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1513}
1514
1515static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1516{
1517 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1518}
1519
1520
1521static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1522 struct pid_namespace *ns)
1523{
1524 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1525}
1526
1527static inline pid_t task_session_vnr(struct task_struct *tsk)
1528{
1529 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1530}
1531
1532/* obsolete, do not use */
1533static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1534{
1535 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1536}
1537
1538/**
1539 * pid_alive - check that a task structure is not stale
1540 * @p: Task structure to be checked.
1541 *
1542 * Test if a process is not yet dead (at most zombie state)
1543 * If pid_alive fails, then pointers within the task structure
1544 * can be stale and must not be dereferenced.
1545 */
1546static inline int pid_alive(struct task_struct *p)
1547{
1548 return p->pids[PIDTYPE_PID].pid != NULL;
1549}
1550
1551/**
1552 * is_global_init - check if a task structure is init
1553 * @tsk: Task structure to be checked.
1554 *
1555 * Check if a task structure is the first user space task the kernel created.
1556 */
1557static inline int is_global_init(struct task_struct *tsk)
1558{
1559 return tsk->pid == 1;
1560}
1561
1562extern struct pid *cad_pid;
1563
1564extern void free_task(struct task_struct *tsk);
1565#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1566
1567extern void __put_task_struct(struct task_struct *t);
1568
1569static inline void put_task_struct(struct task_struct *t)
1570{
1571 if (atomic_dec_and_test(&t->usage))
1572 __put_task_struct(t);
1573}
1574
1575#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1576extern void task_cputime(struct task_struct *t,
1577 cputime_t *utime, cputime_t *stime);
1578extern void task_cputime_scaled(struct task_struct *t,
1579 cputime_t *utimescaled, cputime_t *stimescaled);
1580extern cputime_t task_gtime(struct task_struct *t);
1581#else
1582static inline void task_cputime(struct task_struct *t,
1583 cputime_t *utime, cputime_t *stime)
1584{
1585 if (utime)
1586 *utime = t->utime;
1587 if (stime)
1588 *stime = t->stime;
1589}
1590
1591static inline void task_cputime_scaled(struct task_struct *t,
1592 cputime_t *utimescaled,
1593 cputime_t *stimescaled)
1594{
1595 if (utimescaled)
1596 *utimescaled = t->utimescaled;
1597 if (stimescaled)
1598 *stimescaled = t->stimescaled;
1599}
1600
1601static inline cputime_t task_gtime(struct task_struct *t)
1602{
1603 return t->gtime;
1604}
1605#endif
1606extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1607extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1608
1609/*
1610 * Per process flags
1611 */
1612#define PF_EXITING 0x00000004 /* getting shut down */
1613#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1614#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1615#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1616#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1617#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1618#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1619#define PF_DUMPCORE 0x00000200 /* dumped core */
1620#define PF_SIGNALED 0x00000400 /* killed by a signal */
1621#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1622#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1623#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1624#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1625#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1626#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1627#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1628#define PF_KSWAPD 0x00040000 /* I am kswapd */
1629#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1630#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1631#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1632#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1633#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1634#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1635#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
1636#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1637#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1638#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1639#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1640#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1641
1642/*
1643 * Only the _current_ task can read/write to tsk->flags, but other
1644 * tasks can access tsk->flags in readonly mode for example
1645 * with tsk_used_math (like during threaded core dumping).
1646 * There is however an exception to this rule during ptrace
1647 * or during fork: the ptracer task is allowed to write to the
1648 * child->flags of its traced child (same goes for fork, the parent
1649 * can write to the child->flags), because we're guaranteed the
1650 * child is not running and in turn not changing child->flags
1651 * at the same time the parent does it.
1652 */
1653#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1654#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1655#define clear_used_math() clear_stopped_child_used_math(current)
1656#define set_used_math() set_stopped_child_used_math(current)
1657#define conditional_stopped_child_used_math(condition, child) \
1658 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1659#define conditional_used_math(condition) \
1660 conditional_stopped_child_used_math(condition, current)
1661#define copy_to_stopped_child_used_math(child) \
1662 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1663/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1664#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1665#define used_math() tsk_used_math(current)
1666
1667/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
1668static inline gfp_t memalloc_noio_flags(gfp_t flags)
1669{
1670 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1671 flags &= ~__GFP_IO;
1672 return flags;
1673}
1674
1675static inline unsigned int memalloc_noio_save(void)
1676{
1677 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1678 current->flags |= PF_MEMALLOC_NOIO;
1679 return flags;
1680}
1681
1682static inline void memalloc_noio_restore(unsigned int flags)
1683{
1684 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1685}
1686
1687/*
1688 * task->jobctl flags
1689 */
1690#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
1691
1692#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
1693#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
1694#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
1695#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
1696#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
1697#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
1698#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
1699
1700#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
1701#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
1702#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
1703#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
1704#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
1705#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
1706#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
1707
1708#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1709#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1710
1711extern bool task_set_jobctl_pending(struct task_struct *task,
1712 unsigned int mask);
1713extern void task_clear_jobctl_trapping(struct task_struct *task);
1714extern void task_clear_jobctl_pending(struct task_struct *task,
1715 unsigned int mask);
1716
1717#ifdef CONFIG_PREEMPT_RCU
1718
1719#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1720#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1721
1722static inline void rcu_copy_process(struct task_struct *p)
1723{
1724 p->rcu_read_lock_nesting = 0;
1725 p->rcu_read_unlock_special = 0;
1726#ifdef CONFIG_TREE_PREEMPT_RCU
1727 p->rcu_blocked_node = NULL;
1728#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1729#ifdef CONFIG_RCU_BOOST
1730 p->rcu_boost_mutex = NULL;
1731#endif /* #ifdef CONFIG_RCU_BOOST */
1732 INIT_LIST_HEAD(&p->rcu_node_entry);
1733}
1734
1735#else
1736
1737static inline void rcu_copy_process(struct task_struct *p)
1738{
1739}
1740
1741#endif
1742
1743static inline void tsk_restore_flags(struct task_struct *task,
1744 unsigned long orig_flags, unsigned long flags)
1745{
1746 task->flags &= ~flags;
1747 task->flags |= orig_flags & flags;
1748}
1749
1750#ifdef CONFIG_SMP
1751extern void do_set_cpus_allowed(struct task_struct *p,
1752 const struct cpumask *new_mask);
1753
1754extern int set_cpus_allowed_ptr(struct task_struct *p,
1755 const struct cpumask *new_mask);
1756#else
1757static inline void do_set_cpus_allowed(struct task_struct *p,
1758 const struct cpumask *new_mask)
1759{
1760}
1761static inline int set_cpus_allowed_ptr(struct task_struct *p,
1762 const struct cpumask *new_mask)
1763{
1764 if (!cpumask_test_cpu(0, new_mask))
1765 return -EINVAL;
1766 return 0;
1767}
1768#endif
1769
1770#ifdef CONFIG_NO_HZ_COMMON
1771void calc_load_enter_idle(void);
1772void calc_load_exit_idle(void);
1773#else
1774static inline void calc_load_enter_idle(void) { }
1775static inline void calc_load_exit_idle(void) { }
1776#endif /* CONFIG_NO_HZ_COMMON */
1777
1778#ifndef CONFIG_CPUMASK_OFFSTACK
1779static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1780{
1781 return set_cpus_allowed_ptr(p, &new_mask);
1782}
1783#endif
1784
1785/*
1786 * Do not use outside of architecture code which knows its limitations.
1787 *
1788 * sched_clock() has no promise of monotonicity or bounded drift between
1789 * CPUs, use (which you should not) requires disabling IRQs.
1790 *
1791 * Please use one of the three interfaces below.
1792 */
1793extern unsigned long long notrace sched_clock(void);
1794/*
1795 * See the comment in kernel/sched/clock.c
1796 */
1797extern u64 cpu_clock(int cpu);
1798extern u64 local_clock(void);
1799extern u64 sched_clock_cpu(int cpu);
1800
1801
1802extern void sched_clock_init(void);
1803
1804#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1805static inline void sched_clock_tick(void)
1806{
1807}
1808
1809static inline void sched_clock_idle_sleep_event(void)
1810{
1811}
1812
1813static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1814{
1815}
1816#else
1817/*
1818 * Architectures can set this to 1 if they have specified
1819 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1820 * but then during bootup it turns out that sched_clock()
1821 * is reliable after all:
1822 */
1823extern int sched_clock_stable;
1824
1825extern void sched_clock_tick(void);
1826extern void sched_clock_idle_sleep_event(void);
1827extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1828#endif
1829
1830#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1831/*
1832 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1833 * The reason for this explicit opt-in is not to have perf penalty with
1834 * slow sched_clocks.
1835 */
1836extern void enable_sched_clock_irqtime(void);
1837extern void disable_sched_clock_irqtime(void);
1838#else
1839static inline void enable_sched_clock_irqtime(void) {}
1840static inline void disable_sched_clock_irqtime(void) {}
1841#endif
1842
1843extern unsigned long long
1844task_sched_runtime(struct task_struct *task);
1845
1846/* sched_exec is called by processes performing an exec */
1847#ifdef CONFIG_SMP
1848extern void sched_exec(void);
1849#else
1850#define sched_exec() {}
1851#endif
1852
1853extern void sched_clock_idle_sleep_event(void);
1854extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1855
1856#ifdef CONFIG_HOTPLUG_CPU
1857extern void idle_task_exit(void);
1858#else
1859static inline void idle_task_exit(void) {}
1860#endif
1861
1862#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
1863extern void wake_up_nohz_cpu(int cpu);
1864#else
1865static inline void wake_up_nohz_cpu(int cpu) { }
1866#endif
1867
1868#ifdef CONFIG_NO_HZ_FULL
1869extern bool sched_can_stop_tick(void);
1870extern u64 scheduler_tick_max_deferment(void);
1871#else
1872static inline bool sched_can_stop_tick(void) { return false; }
1873#endif
1874
1875#ifdef CONFIG_SCHED_AUTOGROUP
1876extern void sched_autogroup_create_attach(struct task_struct *p);
1877extern void sched_autogroup_detach(struct task_struct *p);
1878extern void sched_autogroup_fork(struct signal_struct *sig);
1879extern void sched_autogroup_exit(struct signal_struct *sig);
1880#ifdef CONFIG_PROC_FS
1881extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
1882extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
1883#endif
1884#else
1885static inline void sched_autogroup_create_attach(struct task_struct *p) { }
1886static inline void sched_autogroup_detach(struct task_struct *p) { }
1887static inline void sched_autogroup_fork(struct signal_struct *sig) { }
1888static inline void sched_autogroup_exit(struct signal_struct *sig) { }
1889#endif
1890
1891extern bool yield_to(struct task_struct *p, bool preempt);
1892extern void set_user_nice(struct task_struct *p, long nice);
1893extern int task_prio(const struct task_struct *p);
1894extern int task_nice(const struct task_struct *p);
1895extern int can_nice(const struct task_struct *p, const int nice);
1896extern int task_curr(const struct task_struct *p);
1897extern int idle_cpu(int cpu);
1898extern int sched_setscheduler(struct task_struct *, int,
1899 const struct sched_param *);
1900extern int sched_setscheduler_nocheck(struct task_struct *, int,
1901 const struct sched_param *);
1902extern struct task_struct *idle_task(int cpu);
1903/**
1904 * is_idle_task - is the specified task an idle task?
1905 * @p: the task in question.
1906 */
1907static inline bool is_idle_task(const struct task_struct *p)
1908{
1909 return p->pid == 0;
1910}
1911extern struct task_struct *curr_task(int cpu);
1912extern void set_curr_task(int cpu, struct task_struct *p);
1913
1914void yield(void);
1915
1916/*
1917 * The default (Linux) execution domain.
1918 */
1919extern struct exec_domain default_exec_domain;
1920
1921union thread_union {
1922 struct thread_info thread_info;
1923 unsigned long stack[THREAD_SIZE/sizeof(long)];
1924};
1925
1926#ifndef __HAVE_ARCH_KSTACK_END
1927static inline int kstack_end(void *addr)
1928{
1929 /* Reliable end of stack detection:
1930 * Some APM bios versions misalign the stack
1931 */
1932 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1933}
1934#endif
1935
1936extern union thread_union init_thread_union;
1937extern struct task_struct init_task;
1938
1939extern struct mm_struct init_mm;
1940
1941extern struct pid_namespace init_pid_ns;
1942
1943/*
1944 * find a task by one of its numerical ids
1945 *
1946 * find_task_by_pid_ns():
1947 * finds a task by its pid in the specified namespace
1948 * find_task_by_vpid():
1949 * finds a task by its virtual pid
1950 *
1951 * see also find_vpid() etc in include/linux/pid.h
1952 */
1953
1954extern struct task_struct *find_task_by_vpid(pid_t nr);
1955extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1956 struct pid_namespace *ns);
1957
1958extern void __set_special_pids(struct pid *pid);
1959
1960/* per-UID process charging. */
1961extern struct user_struct * alloc_uid(kuid_t);
1962static inline struct user_struct *get_uid(struct user_struct *u)
1963{
1964 atomic_inc(&u->__count);
1965 return u;
1966}
1967extern void free_uid(struct user_struct *);
1968
1969#include <asm/current.h>
1970
1971extern void xtime_update(unsigned long ticks);
1972
1973extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1974extern int wake_up_process(struct task_struct *tsk);
1975extern void wake_up_new_task(struct task_struct *tsk);
1976#ifdef CONFIG_SMP
1977 extern void kick_process(struct task_struct *tsk);
1978#else
1979 static inline void kick_process(struct task_struct *tsk) { }
1980#endif
1981extern void sched_fork(struct task_struct *p);
1982extern void sched_dead(struct task_struct *p);
1983
1984extern void proc_caches_init(void);
1985extern void flush_signals(struct task_struct *);
1986extern void __flush_signals(struct task_struct *);
1987extern void ignore_signals(struct task_struct *);
1988extern void flush_signal_handlers(struct task_struct *, int force_default);
1989extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
1990
1991static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
1992{
1993 unsigned long flags;
1994 int ret;
1995
1996 spin_lock_irqsave(&tsk->sighand->siglock, flags);
1997 ret = dequeue_signal(tsk, mask, info);
1998 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
1999
2000 return ret;
2001}
2002
2003extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2004 sigset_t *mask);
2005extern void unblock_all_signals(void);
2006extern void release_task(struct task_struct * p);
2007extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2008extern int force_sigsegv(int, struct task_struct *);
2009extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2010extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2011extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2012extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2013 const struct cred *, u32);
2014extern int kill_pgrp(struct pid *pid, int sig, int priv);
2015extern int kill_pid(struct pid *pid, int sig, int priv);
2016extern int kill_proc_info(int, struct siginfo *, pid_t);
2017extern __must_check bool do_notify_parent(struct task_struct *, int);
2018extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2019extern void force_sig(int, struct task_struct *);
2020extern int send_sig(int, struct task_struct *, int);
2021extern int zap_other_threads(struct task_struct *p);
2022extern struct sigqueue *sigqueue_alloc(void);
2023extern void sigqueue_free(struct sigqueue *);
2024extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2025extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2026
2027static inline void restore_saved_sigmask(void)
2028{
2029 if (test_and_clear_restore_sigmask())
2030 __set_current_blocked(¤t->saved_sigmask);
2031}
2032
2033static inline sigset_t *sigmask_to_save(void)
2034{
2035 sigset_t *res = ¤t->blocked;
2036 if (unlikely(test_restore_sigmask()))
2037 res = ¤t->saved_sigmask;
2038 return res;
2039}
2040
2041static inline int kill_cad_pid(int sig, int priv)
2042{
2043 return kill_pid(cad_pid, sig, priv);
2044}
2045
2046/* These can be the second arg to send_sig_info/send_group_sig_info. */
2047#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2048#define SEND_SIG_PRIV ((struct siginfo *) 1)
2049#define SEND_SIG_FORCED ((struct siginfo *) 2)
2050
2051/*
2052 * True if we are on the alternate signal stack.
2053 */
2054static inline int on_sig_stack(unsigned long sp)
2055{
2056#ifdef CONFIG_STACK_GROWSUP
2057 return sp >= current->sas_ss_sp &&
2058 sp - current->sas_ss_sp < current->sas_ss_size;
2059#else
2060 return sp > current->sas_ss_sp &&
2061 sp - current->sas_ss_sp <= current->sas_ss_size;
2062#endif
2063}
2064
2065static inline int sas_ss_flags(unsigned long sp)
2066{
2067 return (current->sas_ss_size == 0 ? SS_DISABLE
2068 : on_sig_stack(sp) ? SS_ONSTACK : 0);
2069}
2070
2071static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2072{
2073 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2074#ifdef CONFIG_STACK_GROWSUP
2075 return current->sas_ss_sp;
2076#else
2077 return current->sas_ss_sp + current->sas_ss_size;
2078#endif
2079 return sp;
2080}
2081
2082/*
2083 * Routines for handling mm_structs
2084 */
2085extern struct mm_struct * mm_alloc(void);
2086
2087/* mmdrop drops the mm and the page tables */
2088extern void __mmdrop(struct mm_struct *);
2089static inline void mmdrop(struct mm_struct * mm)
2090{
2091 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2092 __mmdrop(mm);
2093}
2094
2095/* mmput gets rid of the mappings and all user-space */
2096extern void mmput(struct mm_struct *);
2097/* Grab a reference to a task's mm, if it is not already going away */
2098extern struct mm_struct *get_task_mm(struct task_struct *task);
2099/*
2100 * Grab a reference to a task's mm, if it is not already going away
2101 * and ptrace_may_access with the mode parameter passed to it
2102 * succeeds.
2103 */
2104extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2105/* Remove the current tasks stale references to the old mm_struct */
2106extern void mm_release(struct task_struct *, struct mm_struct *);
2107/* Allocate a new mm structure and copy contents from tsk->mm */
2108extern struct mm_struct *dup_mm(struct task_struct *tsk);
2109
2110extern int copy_thread(unsigned long, unsigned long, unsigned long,
2111 struct task_struct *);
2112extern void flush_thread(void);
2113extern void exit_thread(void);
2114
2115extern void exit_files(struct task_struct *);
2116extern void __cleanup_sighand(struct sighand_struct *);
2117
2118extern void exit_itimers(struct signal_struct *);
2119extern void flush_itimer_signals(void);
2120
2121extern void do_group_exit(int);
2122
2123extern int allow_signal(int);
2124extern int disallow_signal(int);
2125
2126extern int do_execve(const char *,
2127 const char __user * const __user *,
2128 const char __user * const __user *);
2129extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2130struct task_struct *fork_idle(int);
2131extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2132
2133extern void set_task_comm(struct task_struct *tsk, char *from);
2134extern char *get_task_comm(char *to, struct task_struct *tsk);
2135
2136#ifdef CONFIG_SMP
2137void scheduler_ipi(void);
2138extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2139#else
2140static inline void scheduler_ipi(void) { }
2141static inline unsigned long wait_task_inactive(struct task_struct *p,
2142 long match_state)
2143{
2144 return 1;
2145}
2146#endif
2147
2148#define next_task(p) \
2149 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2150
2151#define for_each_process(p) \
2152 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2153
2154extern bool current_is_single_threaded(void);
2155
2156/*
2157 * Careful: do_each_thread/while_each_thread is a double loop so
2158 * 'break' will not work as expected - use goto instead.
2159 */
2160#define do_each_thread(g, t) \
2161 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2162
2163#define while_each_thread(g, t) \
2164 while ((t = next_thread(t)) != g)
2165
2166static inline int get_nr_threads(struct task_struct *tsk)
2167{
2168 return tsk->signal->nr_threads;
2169}
2170
2171static inline bool thread_group_leader(struct task_struct *p)
2172{
2173 return p->exit_signal >= 0;
2174}
2175
2176/* Do to the insanities of de_thread it is possible for a process
2177 * to have the pid of the thread group leader without actually being
2178 * the thread group leader. For iteration through the pids in proc
2179 * all we care about is that we have a task with the appropriate
2180 * pid, we don't actually care if we have the right task.
2181 */
2182static inline int has_group_leader_pid(struct task_struct *p)
2183{
2184 return p->pid == p->tgid;
2185}
2186
2187static inline
2188int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2189{
2190 return p1->tgid == p2->tgid;
2191}
2192
2193static inline struct task_struct *next_thread(const struct task_struct *p)
2194{
2195 return list_entry_rcu(p->thread_group.next,
2196 struct task_struct, thread_group);
2197}
2198
2199static inline int thread_group_empty(struct task_struct *p)
2200{
2201 return list_empty(&p->thread_group);
2202}
2203
2204#define delay_group_leader(p) \
2205 (thread_group_leader(p) && !thread_group_empty(p))
2206
2207/*
2208 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2209 * subscriptions and synchronises with wait4(). Also used in procfs. Also
2210 * pins the final release of task.io_context. Also protects ->cpuset and
2211 * ->cgroup.subsys[]. And ->vfork_done.
2212 *
2213 * Nests both inside and outside of read_lock(&tasklist_lock).
2214 * It must not be nested with write_lock_irq(&tasklist_lock),
2215 * neither inside nor outside.
2216 */
2217static inline void task_lock(struct task_struct *p)
2218{
2219 spin_lock(&p->alloc_lock);
2220}
2221
2222static inline void task_unlock(struct task_struct *p)
2223{
2224 spin_unlock(&p->alloc_lock);
2225}
2226
2227extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2228 unsigned long *flags);
2229
2230static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2231 unsigned long *flags)
2232{
2233 struct sighand_struct *ret;
2234
2235 ret = __lock_task_sighand(tsk, flags);
2236 (void)__cond_lock(&tsk->sighand->siglock, ret);
2237 return ret;
2238}
2239
2240static inline void unlock_task_sighand(struct task_struct *tsk,
2241 unsigned long *flags)
2242{
2243 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2244}
2245
2246#ifdef CONFIG_CGROUPS
2247static inline void threadgroup_change_begin(struct task_struct *tsk)
2248{
2249 down_read(&tsk->signal->group_rwsem);
2250}
2251static inline void threadgroup_change_end(struct task_struct *tsk)
2252{
2253 up_read(&tsk->signal->group_rwsem);
2254}
2255
2256/**
2257 * threadgroup_lock - lock threadgroup
2258 * @tsk: member task of the threadgroup to lock
2259 *
2260 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
2261 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2262 * change ->group_leader/pid. This is useful for cases where the threadgroup
2263 * needs to stay stable across blockable operations.
2264 *
2265 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2266 * synchronization. While held, no new task will be added to threadgroup
2267 * and no existing live task will have its PF_EXITING set.
2268 *
2269 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2270 * sub-thread becomes a new leader.
2271 */
2272static inline void threadgroup_lock(struct task_struct *tsk)
2273{
2274 down_write(&tsk->signal->group_rwsem);
2275}
2276
2277/**
2278 * threadgroup_unlock - unlock threadgroup
2279 * @tsk: member task of the threadgroup to unlock
2280 *
2281 * Reverse threadgroup_lock().
2282 */
2283static inline void threadgroup_unlock(struct task_struct *tsk)
2284{
2285 up_write(&tsk->signal->group_rwsem);
2286}
2287#else
2288static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2289static inline void threadgroup_change_end(struct task_struct *tsk) {}
2290static inline void threadgroup_lock(struct task_struct *tsk) {}
2291static inline void threadgroup_unlock(struct task_struct *tsk) {}
2292#endif
2293
2294#ifndef __HAVE_THREAD_FUNCTIONS
2295
2296#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2297#define task_stack_page(task) ((task)->stack)
2298
2299static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2300{
2301 *task_thread_info(p) = *task_thread_info(org);
2302 task_thread_info(p)->task = p;
2303}
2304
2305static inline unsigned long *end_of_stack(struct task_struct *p)
2306{
2307 return (unsigned long *)(task_thread_info(p) + 1);
2308}
2309
2310#endif
2311
2312static inline int object_is_on_stack(void *obj)
2313{
2314 void *stack = task_stack_page(current);
2315
2316 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2317}
2318
2319extern void thread_info_cache_init(void);
2320
2321#ifdef CONFIG_DEBUG_STACK_USAGE
2322static inline unsigned long stack_not_used(struct task_struct *p)
2323{
2324 unsigned long *n = end_of_stack(p);
2325
2326 do { /* Skip over canary */
2327 n++;
2328 } while (!*n);
2329
2330 return (unsigned long)n - (unsigned long)end_of_stack(p);
2331}
2332#endif
2333
2334/* set thread flags in other task's structures
2335 * - see asm/thread_info.h for TIF_xxxx flags available
2336 */
2337static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2338{
2339 set_ti_thread_flag(task_thread_info(tsk), flag);
2340}
2341
2342static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2343{
2344 clear_ti_thread_flag(task_thread_info(tsk), flag);
2345}
2346
2347static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2348{
2349 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2350}
2351
2352static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2353{
2354 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2355}
2356
2357static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2358{
2359 return test_ti_thread_flag(task_thread_info(tsk), flag);
2360}
2361
2362static inline void set_tsk_need_resched(struct task_struct *tsk)
2363{
2364 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2365}
2366
2367static inline void clear_tsk_need_resched(struct task_struct *tsk)
2368{
2369 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2370}
2371
2372static inline int test_tsk_need_resched(struct task_struct *tsk)
2373{
2374 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2375}
2376
2377static inline int restart_syscall(void)
2378{
2379 set_tsk_thread_flag(current, TIF_SIGPENDING);
2380 return -ERESTARTNOINTR;
2381}
2382
2383static inline int signal_pending(struct task_struct *p)
2384{
2385 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2386}
2387
2388static inline int __fatal_signal_pending(struct task_struct *p)
2389{
2390 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2391}
2392
2393static inline int fatal_signal_pending(struct task_struct *p)
2394{
2395 return signal_pending(p) && __fatal_signal_pending(p);
2396}
2397
2398static inline int signal_pending_state(long state, struct task_struct *p)
2399{
2400 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2401 return 0;
2402 if (!signal_pending(p))
2403 return 0;
2404
2405 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2406}
2407
2408static inline int need_resched(void)
2409{
2410 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2411}
2412
2413/*
2414 * cond_resched() and cond_resched_lock(): latency reduction via
2415 * explicit rescheduling in places that are safe. The return
2416 * value indicates whether a reschedule was done in fact.
2417 * cond_resched_lock() will drop the spinlock before scheduling,
2418 * cond_resched_softirq() will enable bhs before scheduling.
2419 */
2420extern int _cond_resched(void);
2421
2422#define cond_resched() ({ \
2423 __might_sleep(__FILE__, __LINE__, 0); \
2424 _cond_resched(); \
2425})
2426
2427extern int __cond_resched_lock(spinlock_t *lock);
2428
2429#ifdef CONFIG_PREEMPT_COUNT
2430#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2431#else
2432#define PREEMPT_LOCK_OFFSET 0
2433#endif
2434
2435#define cond_resched_lock(lock) ({ \
2436 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2437 __cond_resched_lock(lock); \
2438})
2439
2440extern int __cond_resched_softirq(void);
2441
2442#define cond_resched_softirq() ({ \
2443 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2444 __cond_resched_softirq(); \
2445})
2446
2447/*
2448 * Does a critical section need to be broken due to another
2449 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2450 * but a general need for low latency)
2451 */
2452static inline int spin_needbreak(spinlock_t *lock)
2453{
2454#ifdef CONFIG_PREEMPT
2455 return spin_is_contended(lock);
2456#else
2457 return 0;
2458#endif
2459}
2460
2461/*
2462 * Idle thread specific functions to determine the need_resched
2463 * polling state. We have two versions, one based on TS_POLLING in
2464 * thread_info.status and one based on TIF_POLLING_NRFLAG in
2465 * thread_info.flags
2466 */
2467#ifdef TS_POLLING
2468static inline int tsk_is_polling(struct task_struct *p)
2469{
2470 return task_thread_info(p)->status & TS_POLLING;
2471}
2472static inline void current_set_polling(void)
2473{
2474 current_thread_info()->status |= TS_POLLING;
2475}
2476
2477static inline void current_clr_polling(void)
2478{
2479 current_thread_info()->status &= ~TS_POLLING;
2480 smp_mb__after_clear_bit();
2481}
2482#elif defined(TIF_POLLING_NRFLAG)
2483static inline int tsk_is_polling(struct task_struct *p)
2484{
2485 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2486}
2487static inline void current_set_polling(void)
2488{
2489 set_thread_flag(TIF_POLLING_NRFLAG);
2490}
2491
2492static inline void current_clr_polling(void)
2493{
2494 clear_thread_flag(TIF_POLLING_NRFLAG);
2495}
2496#else
2497static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2498static inline void current_set_polling(void) { }
2499static inline void current_clr_polling(void) { }
2500#endif
2501
2502/*
2503 * Thread group CPU time accounting.
2504 */
2505void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2506void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2507
2508static inline void thread_group_cputime_init(struct signal_struct *sig)
2509{
2510 raw_spin_lock_init(&sig->cputimer.lock);
2511}
2512
2513/*
2514 * Reevaluate whether the task has signals pending delivery.
2515 * Wake the task if so.
2516 * This is required every time the blocked sigset_t changes.
2517 * callers must hold sighand->siglock.
2518 */
2519extern void recalc_sigpending_and_wake(struct task_struct *t);
2520extern void recalc_sigpending(void);
2521
2522extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2523
2524static inline void signal_wake_up(struct task_struct *t, bool resume)
2525{
2526 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2527}
2528static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2529{
2530 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2531}
2532
2533/*
2534 * Wrappers for p->thread_info->cpu access. No-op on UP.
2535 */
2536#ifdef CONFIG_SMP
2537
2538static inline unsigned int task_cpu(const struct task_struct *p)
2539{
2540 return task_thread_info(p)->cpu;
2541}
2542
2543extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2544
2545#else
2546
2547static inline unsigned int task_cpu(const struct task_struct *p)
2548{
2549 return 0;
2550}
2551
2552static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2553{
2554}
2555
2556#endif /* CONFIG_SMP */
2557
2558extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2559extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2560
2561#ifdef CONFIG_CGROUP_SCHED
2562extern struct task_group root_task_group;
2563#endif /* CONFIG_CGROUP_SCHED */
2564
2565extern int task_can_switch_user(struct user_struct *up,
2566 struct task_struct *tsk);
2567
2568#ifdef CONFIG_TASK_XACCT
2569static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2570{
2571 tsk->ioac.rchar += amt;
2572}
2573
2574static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2575{
2576 tsk->ioac.wchar += amt;
2577}
2578
2579static inline void inc_syscr(struct task_struct *tsk)
2580{
2581 tsk->ioac.syscr++;
2582}
2583
2584static inline void inc_syscw(struct task_struct *tsk)
2585{
2586 tsk->ioac.syscw++;
2587}
2588#else
2589static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2590{
2591}
2592
2593static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2594{
2595}
2596
2597static inline void inc_syscr(struct task_struct *tsk)
2598{
2599}
2600
2601static inline void inc_syscw(struct task_struct *tsk)
2602{
2603}
2604#endif
2605
2606#ifndef TASK_SIZE_OF
2607#define TASK_SIZE_OF(tsk) TASK_SIZE
2608#endif
2609
2610#ifdef CONFIG_MM_OWNER
2611extern void mm_update_next_owner(struct mm_struct *mm);
2612extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2613#else
2614static inline void mm_update_next_owner(struct mm_struct *mm)
2615{
2616}
2617
2618static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2619{
2620}
2621#endif /* CONFIG_MM_OWNER */
2622
2623static inline unsigned long task_rlimit(const struct task_struct *tsk,
2624 unsigned int limit)
2625{
2626 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2627}
2628
2629static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2630 unsigned int limit)
2631{
2632 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2633}
2634
2635static inline unsigned long rlimit(unsigned int limit)
2636{
2637 return task_rlimit(current, limit);
2638}
2639
2640static inline unsigned long rlimit_max(unsigned int limit)
2641{
2642 return task_rlimit_max(current, limit);
2643}
2644
2645#endif