Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6
7struct sched_param {
8 int sched_priority;
9};
10
11#include <asm/param.h> /* for HZ */
12
13#include <linux/capability.h>
14#include <linux/threads.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/timex.h>
18#include <linux/jiffies.h>
19#include <linux/rbtree.h>
20#include <linux/thread_info.h>
21#include <linux/cpumask.h>
22#include <linux/errno.h>
23#include <linux/nodemask.h>
24#include <linux/mm_types.h>
25
26#include <asm/page.h>
27#include <asm/ptrace.h>
28#include <asm/cputime.h>
29
30#include <linux/smp.h>
31#include <linux/sem.h>
32#include <linux/signal.h>
33#include <linux/compiler.h>
34#include <linux/completion.h>
35#include <linux/pid.h>
36#include <linux/percpu.h>
37#include <linux/topology.h>
38#include <linux/proportions.h>
39#include <linux/seccomp.h>
40#include <linux/rcupdate.h>
41#include <linux/rculist.h>
42#include <linux/rtmutex.h>
43
44#include <linux/time.h>
45#include <linux/param.h>
46#include <linux/resource.h>
47#include <linux/timer.h>
48#include <linux/hrtimer.h>
49#include <linux/task_io_accounting.h>
50#include <linux/latencytop.h>
51#include <linux/cred.h>
52#include <linux/llist.h>
53#include <linux/uidgid.h>
54
55#include <asm/processor.h>
56
57struct exec_domain;
58struct futex_pi_state;
59struct robust_list_head;
60struct bio_list;
61struct fs_struct;
62struct perf_event_context;
63struct blk_plug;
64
65/*
66 * List of flags we want to share for kernel threads,
67 * if only because they are not used by them anyway.
68 */
69#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
70
71/*
72 * These are the constant used to fake the fixed-point load-average
73 * counting. Some notes:
74 * - 11 bit fractions expand to 22 bits by the multiplies: this gives
75 * a load-average precision of 10 bits integer + 11 bits fractional
76 * - if you want to count load-averages more often, you need more
77 * precision, or rounding will get you. With 2-second counting freq,
78 * the EXP_n values would be 1981, 2034 and 2043 if still using only
79 * 11 bit fractions.
80 */
81extern unsigned long avenrun[]; /* Load averages */
82extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
83
84#define FSHIFT 11 /* nr of bits of precision */
85#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
86#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
87#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
88#define EXP_5 2014 /* 1/exp(5sec/5min) */
89#define EXP_15 2037 /* 1/exp(5sec/15min) */
90
91#define CALC_LOAD(load,exp,n) \
92 load *= exp; \
93 load += n*(FIXED_1-exp); \
94 load >>= FSHIFT;
95
96extern unsigned long total_forks;
97extern int nr_threads;
98DECLARE_PER_CPU(unsigned long, process_counts);
99extern int nr_processes(void);
100extern unsigned long nr_running(void);
101extern unsigned long nr_uninterruptible(void);
102extern unsigned long nr_iowait(void);
103extern unsigned long nr_iowait_cpu(int cpu);
104extern unsigned long this_cpu_load(void);
105
106
107extern void calc_global_load(unsigned long ticks);
108extern void update_cpu_load_nohz(void);
109
110/* Notifier for when a task gets migrated to a new CPU */
111struct task_migration_notifier {
112 struct task_struct *task;
113 int from_cpu;
114 int to_cpu;
115};
116extern void register_task_migration_notifier(struct notifier_block *n);
117
118extern unsigned long get_parent_ip(unsigned long addr);
119
120extern void dump_cpu_task(int cpu);
121
122struct seq_file;
123struct cfs_rq;
124struct task_group;
125#ifdef CONFIG_SCHED_DEBUG
126extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
127extern void proc_sched_set_task(struct task_struct *p);
128extern void
129print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
130#else
131static inline void
132proc_sched_show_task(struct task_struct *p, struct seq_file *m)
133{
134}
135static inline void proc_sched_set_task(struct task_struct *p)
136{
137}
138static inline void
139print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
140{
141}
142#endif
143
144/*
145 * Task state bitmask. NOTE! These bits are also
146 * encoded in fs/proc/array.c: get_task_state().
147 *
148 * We have two separate sets of flags: task->state
149 * is about runnability, while task->exit_state are
150 * about the task exiting. Confusing, but this way
151 * modifying one set can't modify the other one by
152 * mistake.
153 */
154#define TASK_RUNNING 0
155#define TASK_INTERRUPTIBLE 1
156#define TASK_UNINTERRUPTIBLE 2
157#define __TASK_STOPPED 4
158#define __TASK_TRACED 8
159/* in tsk->exit_state */
160#define EXIT_ZOMBIE 16
161#define EXIT_DEAD 32
162/* in tsk->state again */
163#define TASK_DEAD 64
164#define TASK_WAKEKILL 128
165#define TASK_WAKING 256
166#define TASK_STATE_MAX 512
167
168#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
169
170extern char ___assert_task_state[1 - 2*!!(
171 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
172
173/* Convenience macros for the sake of set_task_state */
174#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
175#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
176#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
177
178/* Convenience macros for the sake of wake_up */
179#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
180#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
181
182/* get_task_state() */
183#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
184 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
185 __TASK_TRACED)
186
187#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
188#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
189#define task_is_dead(task) ((task)->exit_state != 0)
190#define task_is_stopped_or_traced(task) \
191 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
192#define task_contributes_to_load(task) \
193 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
194 (task->flags & PF_FROZEN) == 0)
195
196#define __set_task_state(tsk, state_value) \
197 do { (tsk)->state = (state_value); } while (0)
198#define set_task_state(tsk, state_value) \
199 set_mb((tsk)->state, (state_value))
200
201/*
202 * set_current_state() includes a barrier so that the write of current->state
203 * is correctly serialised wrt the caller's subsequent test of whether to
204 * actually sleep:
205 *
206 * set_current_state(TASK_UNINTERRUPTIBLE);
207 * if (do_i_need_to_sleep())
208 * schedule();
209 *
210 * If the caller does not need such serialisation then use __set_current_state()
211 */
212#define __set_current_state(state_value) \
213 do { current->state = (state_value); } while (0)
214#define set_current_state(state_value) \
215 set_mb(current->state, (state_value))
216
217/* Task command name length */
218#define TASK_COMM_LEN 16
219
220#include <linux/spinlock.h>
221
222/*
223 * This serializes "schedule()" and also protects
224 * the run-queue from deletions/modifications (but
225 * _adding_ to the beginning of the run-queue has
226 * a separate lock).
227 */
228extern rwlock_t tasklist_lock;
229extern spinlock_t mmlist_lock;
230
231struct task_struct;
232
233#ifdef CONFIG_PROVE_RCU
234extern int lockdep_tasklist_lock_is_held(void);
235#endif /* #ifdef CONFIG_PROVE_RCU */
236
237extern void sched_init(void);
238extern void sched_init_smp(void);
239extern asmlinkage void schedule_tail(struct task_struct *prev);
240extern void init_idle(struct task_struct *idle, int cpu);
241extern void init_idle_bootup_task(struct task_struct *idle);
242
243extern int runqueue_is_locked(int cpu);
244
245#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
246extern void nohz_balance_enter_idle(int cpu);
247extern void set_cpu_sd_state_idle(void);
248extern int get_nohz_timer_target(void);
249#else
250static inline void nohz_balance_enter_idle(int cpu) { }
251static inline void set_cpu_sd_state_idle(void) { }
252#endif
253
254/*
255 * Only dump TASK_* tasks. (0 for all tasks)
256 */
257extern void show_state_filter(unsigned long state_filter);
258
259static inline void show_state(void)
260{
261 show_state_filter(0);
262}
263
264extern void show_regs(struct pt_regs *);
265
266/*
267 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
268 * task), SP is the stack pointer of the first frame that should be shown in the back
269 * trace (or NULL if the entire call-chain of the task should be shown).
270 */
271extern void show_stack(struct task_struct *task, unsigned long *sp);
272
273void io_schedule(void);
274long io_schedule_timeout(long timeout);
275
276extern void cpu_init (void);
277extern void trap_init(void);
278extern void update_process_times(int user);
279extern void scheduler_tick(void);
280
281extern void sched_show_task(struct task_struct *p);
282
283#ifdef CONFIG_LOCKUP_DETECTOR
284extern void touch_softlockup_watchdog(void);
285extern void touch_softlockup_watchdog_sync(void);
286extern void touch_all_softlockup_watchdogs(void);
287extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
288 void __user *buffer,
289 size_t *lenp, loff_t *ppos);
290extern unsigned int softlockup_panic;
291void lockup_detector_init(void);
292#else
293static inline void touch_softlockup_watchdog(void)
294{
295}
296static inline void touch_softlockup_watchdog_sync(void)
297{
298}
299static inline void touch_all_softlockup_watchdogs(void)
300{
301}
302static inline void lockup_detector_init(void)
303{
304}
305#endif
306
307#ifdef CONFIG_DETECT_HUNG_TASK
308extern unsigned int sysctl_hung_task_panic;
309extern unsigned long sysctl_hung_task_check_count;
310extern unsigned long sysctl_hung_task_timeout_secs;
311extern unsigned long sysctl_hung_task_warnings;
312extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
313 void __user *buffer,
314 size_t *lenp, loff_t *ppos);
315#else
316/* Avoid need for ifdefs elsewhere in the code */
317enum { sysctl_hung_task_timeout_secs = 0 };
318#endif
319
320/* Attach to any functions which should be ignored in wchan output. */
321#define __sched __attribute__((__section__(".sched.text")))
322
323/* Linker adds these: start and end of __sched functions */
324extern char __sched_text_start[], __sched_text_end[];
325
326/* Is this address in the __sched functions? */
327extern int in_sched_functions(unsigned long addr);
328
329#define MAX_SCHEDULE_TIMEOUT LONG_MAX
330extern signed long schedule_timeout(signed long timeout);
331extern signed long schedule_timeout_interruptible(signed long timeout);
332extern signed long schedule_timeout_killable(signed long timeout);
333extern signed long schedule_timeout_uninterruptible(signed long timeout);
334asmlinkage void schedule(void);
335extern void schedule_preempt_disabled(void);
336extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
337
338struct nsproxy;
339struct user_namespace;
340
341/*
342 * Default maximum number of active map areas, this limits the number of vmas
343 * per mm struct. Users can overwrite this number by sysctl but there is a
344 * problem.
345 *
346 * When a program's coredump is generated as ELF format, a section is created
347 * per a vma. In ELF, the number of sections is represented in unsigned short.
348 * This means the number of sections should be smaller than 65535 at coredump.
349 * Because the kernel adds some informative sections to a image of program at
350 * generating coredump, we need some margin. The number of extra sections is
351 * 1-3 now and depends on arch. We use "5" as safe margin, here.
352 */
353#define MAPCOUNT_ELF_CORE_MARGIN (5)
354#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
355
356extern int sysctl_max_map_count;
357
358#include <linux/aio.h>
359
360#ifdef CONFIG_MMU
361extern void arch_pick_mmap_layout(struct mm_struct *mm);
362extern unsigned long
363arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
364 unsigned long, unsigned long);
365extern unsigned long
366arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
367 unsigned long len, unsigned long pgoff,
368 unsigned long flags);
369extern void arch_unmap_area(struct mm_struct *, unsigned long);
370extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
371#else
372static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
373#endif
374
375
376extern void set_dumpable(struct mm_struct *mm, int value);
377extern int get_dumpable(struct mm_struct *mm);
378
379/* get/set_dumpable() values */
380#define SUID_DUMPABLE_DISABLED 0
381#define SUID_DUMPABLE_ENABLED 1
382#define SUID_DUMPABLE_SAFE 2
383
384/* mm flags */
385/* dumpable bits */
386#define MMF_DUMPABLE 0 /* core dump is permitted */
387#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
388
389#define MMF_DUMPABLE_BITS 2
390#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
391
392/* coredump filter bits */
393#define MMF_DUMP_ANON_PRIVATE 2
394#define MMF_DUMP_ANON_SHARED 3
395#define MMF_DUMP_MAPPED_PRIVATE 4
396#define MMF_DUMP_MAPPED_SHARED 5
397#define MMF_DUMP_ELF_HEADERS 6
398#define MMF_DUMP_HUGETLB_PRIVATE 7
399#define MMF_DUMP_HUGETLB_SHARED 8
400
401#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
402#define MMF_DUMP_FILTER_BITS 7
403#define MMF_DUMP_FILTER_MASK \
404 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
405#define MMF_DUMP_FILTER_DEFAULT \
406 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
407 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
408
409#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
410# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
411#else
412# define MMF_DUMP_MASK_DEFAULT_ELF 0
413#endif
414 /* leave room for more dump flags */
415#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
416#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
417#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
418
419#define MMF_HAS_UPROBES 19 /* has uprobes */
420#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
421
422#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
423
424struct sighand_struct {
425 atomic_t count;
426 struct k_sigaction action[_NSIG];
427 spinlock_t siglock;
428 wait_queue_head_t signalfd_wqh;
429};
430
431struct pacct_struct {
432 int ac_flag;
433 long ac_exitcode;
434 unsigned long ac_mem;
435 cputime_t ac_utime, ac_stime;
436 unsigned long ac_minflt, ac_majflt;
437};
438
439struct cpu_itimer {
440 cputime_t expires;
441 cputime_t incr;
442 u32 error;
443 u32 incr_error;
444};
445
446/**
447 * struct cputime - snaphsot of system and user cputime
448 * @utime: time spent in user mode
449 * @stime: time spent in system mode
450 *
451 * Gathers a generic snapshot of user and system time.
452 */
453struct cputime {
454 cputime_t utime;
455 cputime_t stime;
456};
457
458/**
459 * struct task_cputime - collected CPU time counts
460 * @utime: time spent in user mode, in &cputime_t units
461 * @stime: time spent in kernel mode, in &cputime_t units
462 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
463 *
464 * This is an extension of struct cputime that includes the total runtime
465 * spent by the task from the scheduler point of view.
466 *
467 * As a result, this structure groups together three kinds of CPU time
468 * that are tracked for threads and thread groups. Most things considering
469 * CPU time want to group these counts together and treat all three
470 * of them in parallel.
471 */
472struct task_cputime {
473 cputime_t utime;
474 cputime_t stime;
475 unsigned long long sum_exec_runtime;
476};
477/* Alternate field names when used to cache expirations. */
478#define prof_exp stime
479#define virt_exp utime
480#define sched_exp sum_exec_runtime
481
482#define INIT_CPUTIME \
483 (struct task_cputime) { \
484 .utime = 0, \
485 .stime = 0, \
486 .sum_exec_runtime = 0, \
487 }
488
489/*
490 * Disable preemption until the scheduler is running.
491 * Reset by start_kernel()->sched_init()->init_idle().
492 *
493 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
494 * before the scheduler is active -- see should_resched().
495 */
496#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
497
498/**
499 * struct thread_group_cputimer - thread group interval timer counts
500 * @cputime: thread group interval timers.
501 * @running: non-zero when there are timers running and
502 * @cputime receives updates.
503 * @lock: lock for fields in this struct.
504 *
505 * This structure contains the version of task_cputime, above, that is
506 * used for thread group CPU timer calculations.
507 */
508struct thread_group_cputimer {
509 struct task_cputime cputime;
510 int running;
511 raw_spinlock_t lock;
512};
513
514#include <linux/rwsem.h>
515struct autogroup;
516
517/*
518 * NOTE! "signal_struct" does not have its own
519 * locking, because a shared signal_struct always
520 * implies a shared sighand_struct, so locking
521 * sighand_struct is always a proper superset of
522 * the locking of signal_struct.
523 */
524struct signal_struct {
525 atomic_t sigcnt;
526 atomic_t live;
527 int nr_threads;
528
529 wait_queue_head_t wait_chldexit; /* for wait4() */
530
531 /* current thread group signal load-balancing target: */
532 struct task_struct *curr_target;
533
534 /* shared signal handling: */
535 struct sigpending shared_pending;
536
537 /* thread group exit support */
538 int group_exit_code;
539 /* overloaded:
540 * - notify group_exit_task when ->count is equal to notify_count
541 * - everyone except group_exit_task is stopped during signal delivery
542 * of fatal signals, group_exit_task processes the signal.
543 */
544 int notify_count;
545 struct task_struct *group_exit_task;
546
547 /* thread group stop support, overloads group_exit_code too */
548 int group_stop_count;
549 unsigned int flags; /* see SIGNAL_* flags below */
550
551 /*
552 * PR_SET_CHILD_SUBREAPER marks a process, like a service
553 * manager, to re-parent orphan (double-forking) child processes
554 * to this process instead of 'init'. The service manager is
555 * able to receive SIGCHLD signals and is able to investigate
556 * the process until it calls wait(). All children of this
557 * process will inherit a flag if they should look for a
558 * child_subreaper process at exit.
559 */
560 unsigned int is_child_subreaper:1;
561 unsigned int has_child_subreaper:1;
562
563 /* POSIX.1b Interval Timers */
564 struct list_head posix_timers;
565
566 /* ITIMER_REAL timer for the process */
567 struct hrtimer real_timer;
568 struct pid *leader_pid;
569 ktime_t it_real_incr;
570
571 /*
572 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
573 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
574 * values are defined to 0 and 1 respectively
575 */
576 struct cpu_itimer it[2];
577
578 /*
579 * Thread group totals for process CPU timers.
580 * See thread_group_cputimer(), et al, for details.
581 */
582 struct thread_group_cputimer cputimer;
583
584 /* Earliest-expiration cache. */
585 struct task_cputime cputime_expires;
586
587 struct list_head cpu_timers[3];
588
589 struct pid *tty_old_pgrp;
590
591 /* boolean value for session group leader */
592 int leader;
593
594 struct tty_struct *tty; /* NULL if no tty */
595
596#ifdef CONFIG_SCHED_AUTOGROUP
597 struct autogroup *autogroup;
598#endif
599 /*
600 * Cumulative resource counters for dead threads in the group,
601 * and for reaped dead child processes forked by this group.
602 * Live threads maintain their own counters and add to these
603 * in __exit_signal, except for the group leader.
604 */
605 cputime_t utime, stime, cutime, cstime;
606 cputime_t gtime;
607 cputime_t cgtime;
608#ifndef CONFIG_VIRT_CPU_ACCOUNTING
609 struct cputime prev_cputime;
610#endif
611 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
612 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
613 unsigned long inblock, oublock, cinblock, coublock;
614 unsigned long maxrss, cmaxrss;
615 struct task_io_accounting ioac;
616
617 /*
618 * Cumulative ns of schedule CPU time fo dead threads in the
619 * group, not including a zombie group leader, (This only differs
620 * from jiffies_to_ns(utime + stime) if sched_clock uses something
621 * other than jiffies.)
622 */
623 unsigned long long sum_sched_runtime;
624
625 /*
626 * We don't bother to synchronize most readers of this at all,
627 * because there is no reader checking a limit that actually needs
628 * to get both rlim_cur and rlim_max atomically, and either one
629 * alone is a single word that can safely be read normally.
630 * getrlimit/setrlimit use task_lock(current->group_leader) to
631 * protect this instead of the siglock, because they really
632 * have no need to disable irqs.
633 */
634 struct rlimit rlim[RLIM_NLIMITS];
635
636#ifdef CONFIG_BSD_PROCESS_ACCT
637 struct pacct_struct pacct; /* per-process accounting information */
638#endif
639#ifdef CONFIG_TASKSTATS
640 struct taskstats *stats;
641#endif
642#ifdef CONFIG_AUDIT
643 unsigned audit_tty;
644 struct tty_audit_buf *tty_audit_buf;
645#endif
646#ifdef CONFIG_CGROUPS
647 /*
648 * group_rwsem prevents new tasks from entering the threadgroup and
649 * member tasks from exiting,a more specifically, setting of
650 * PF_EXITING. fork and exit paths are protected with this rwsem
651 * using threadgroup_change_begin/end(). Users which require
652 * threadgroup to remain stable should use threadgroup_[un]lock()
653 * which also takes care of exec path. Currently, cgroup is the
654 * only user.
655 */
656 struct rw_semaphore group_rwsem;
657#endif
658
659 oom_flags_t oom_flags;
660 short oom_score_adj; /* OOM kill score adjustment */
661 short oom_score_adj_min; /* OOM kill score adjustment min value.
662 * Only settable by CAP_SYS_RESOURCE. */
663
664 struct mutex cred_guard_mutex; /* guard against foreign influences on
665 * credential calculations
666 * (notably. ptrace) */
667};
668
669/*
670 * Bits in flags field of signal_struct.
671 */
672#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
673#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
674#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
675/*
676 * Pending notifications to parent.
677 */
678#define SIGNAL_CLD_STOPPED 0x00000010
679#define SIGNAL_CLD_CONTINUED 0x00000020
680#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
681
682#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
683
684/* If true, all threads except ->group_exit_task have pending SIGKILL */
685static inline int signal_group_exit(const struct signal_struct *sig)
686{
687 return (sig->flags & SIGNAL_GROUP_EXIT) ||
688 (sig->group_exit_task != NULL);
689}
690
691/*
692 * Some day this will be a full-fledged user tracking system..
693 */
694struct user_struct {
695 atomic_t __count; /* reference count */
696 atomic_t processes; /* How many processes does this user have? */
697 atomic_t files; /* How many open files does this user have? */
698 atomic_t sigpending; /* How many pending signals does this user have? */
699#ifdef CONFIG_INOTIFY_USER
700 atomic_t inotify_watches; /* How many inotify watches does this user have? */
701 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
702#endif
703#ifdef CONFIG_FANOTIFY
704 atomic_t fanotify_listeners;
705#endif
706#ifdef CONFIG_EPOLL
707 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
708#endif
709#ifdef CONFIG_POSIX_MQUEUE
710 /* protected by mq_lock */
711 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
712#endif
713 unsigned long locked_shm; /* How many pages of mlocked shm ? */
714
715#ifdef CONFIG_KEYS
716 struct key *uid_keyring; /* UID specific keyring */
717 struct key *session_keyring; /* UID's default session keyring */
718#endif
719
720 /* Hash table maintenance information */
721 struct hlist_node uidhash_node;
722 kuid_t uid;
723
724#ifdef CONFIG_PERF_EVENTS
725 atomic_long_t locked_vm;
726#endif
727};
728
729extern int uids_sysfs_init(void);
730
731extern struct user_struct *find_user(kuid_t);
732
733extern struct user_struct root_user;
734#define INIT_USER (&root_user)
735
736
737struct backing_dev_info;
738struct reclaim_state;
739
740#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
741struct sched_info {
742 /* cumulative counters */
743 unsigned long pcount; /* # of times run on this cpu */
744 unsigned long long run_delay; /* time spent waiting on a runqueue */
745
746 /* timestamps */
747 unsigned long long last_arrival,/* when we last ran on a cpu */
748 last_queued; /* when we were last queued to run */
749};
750#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
751
752#ifdef CONFIG_TASK_DELAY_ACCT
753struct task_delay_info {
754 spinlock_t lock;
755 unsigned int flags; /* Private per-task flags */
756
757 /* For each stat XXX, add following, aligned appropriately
758 *
759 * struct timespec XXX_start, XXX_end;
760 * u64 XXX_delay;
761 * u32 XXX_count;
762 *
763 * Atomicity of updates to XXX_delay, XXX_count protected by
764 * single lock above (split into XXX_lock if contention is an issue).
765 */
766
767 /*
768 * XXX_count is incremented on every XXX operation, the delay
769 * associated with the operation is added to XXX_delay.
770 * XXX_delay contains the accumulated delay time in nanoseconds.
771 */
772 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
773 u64 blkio_delay; /* wait for sync block io completion */
774 u64 swapin_delay; /* wait for swapin block io completion */
775 u32 blkio_count; /* total count of the number of sync block */
776 /* io operations performed */
777 u32 swapin_count; /* total count of the number of swapin block */
778 /* io operations performed */
779
780 struct timespec freepages_start, freepages_end;
781 u64 freepages_delay; /* wait for memory reclaim */
782 u32 freepages_count; /* total count of memory reclaim */
783};
784#endif /* CONFIG_TASK_DELAY_ACCT */
785
786static inline int sched_info_on(void)
787{
788#ifdef CONFIG_SCHEDSTATS
789 return 1;
790#elif defined(CONFIG_TASK_DELAY_ACCT)
791 extern int delayacct_on;
792 return delayacct_on;
793#else
794 return 0;
795#endif
796}
797
798enum cpu_idle_type {
799 CPU_IDLE,
800 CPU_NOT_IDLE,
801 CPU_NEWLY_IDLE,
802 CPU_MAX_IDLE_TYPES
803};
804
805/*
806 * Increase resolution of nice-level calculations for 64-bit architectures.
807 * The extra resolution improves shares distribution and load balancing of
808 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
809 * hierarchies, especially on larger systems. This is not a user-visible change
810 * and does not change the user-interface for setting shares/weights.
811 *
812 * We increase resolution only if we have enough bits to allow this increased
813 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
814 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
815 * increased costs.
816 */
817#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
818# define SCHED_LOAD_RESOLUTION 10
819# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
820# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
821#else
822# define SCHED_LOAD_RESOLUTION 0
823# define scale_load(w) (w)
824# define scale_load_down(w) (w)
825#endif
826
827#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
828#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
829
830/*
831 * Increase resolution of cpu_power calculations
832 */
833#define SCHED_POWER_SHIFT 10
834#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
835
836/*
837 * sched-domains (multiprocessor balancing) declarations:
838 */
839#ifdef CONFIG_SMP
840#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
841#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
842#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
843#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
844#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
845#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
846#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
847#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
848#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
849#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
850#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
851#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
852
853extern int __weak arch_sd_sibiling_asym_packing(void);
854
855struct sched_group_power {
856 atomic_t ref;
857 /*
858 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
859 * single CPU.
860 */
861 unsigned int power, power_orig;
862 unsigned long next_update;
863 /*
864 * Number of busy cpus in this group.
865 */
866 atomic_t nr_busy_cpus;
867
868 unsigned long cpumask[0]; /* iteration mask */
869};
870
871struct sched_group {
872 struct sched_group *next; /* Must be a circular list */
873 atomic_t ref;
874
875 unsigned int group_weight;
876 struct sched_group_power *sgp;
877
878 /*
879 * The CPUs this group covers.
880 *
881 * NOTE: this field is variable length. (Allocated dynamically
882 * by attaching extra space to the end of the structure,
883 * depending on how many CPUs the kernel has booted up with)
884 */
885 unsigned long cpumask[0];
886};
887
888static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
889{
890 return to_cpumask(sg->cpumask);
891}
892
893/*
894 * cpumask masking which cpus in the group are allowed to iterate up the domain
895 * tree.
896 */
897static inline struct cpumask *sched_group_mask(struct sched_group *sg)
898{
899 return to_cpumask(sg->sgp->cpumask);
900}
901
902/**
903 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
904 * @group: The group whose first cpu is to be returned.
905 */
906static inline unsigned int group_first_cpu(struct sched_group *group)
907{
908 return cpumask_first(sched_group_cpus(group));
909}
910
911struct sched_domain_attr {
912 int relax_domain_level;
913};
914
915#define SD_ATTR_INIT (struct sched_domain_attr) { \
916 .relax_domain_level = -1, \
917}
918
919extern int sched_domain_level_max;
920
921struct sched_domain {
922 /* These fields must be setup */
923 struct sched_domain *parent; /* top domain must be null terminated */
924 struct sched_domain *child; /* bottom domain must be null terminated */
925 struct sched_group *groups; /* the balancing groups of the domain */
926 unsigned long min_interval; /* Minimum balance interval ms */
927 unsigned long max_interval; /* Maximum balance interval ms */
928 unsigned int busy_factor; /* less balancing by factor if busy */
929 unsigned int imbalance_pct; /* No balance until over watermark */
930 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
931 unsigned int busy_idx;
932 unsigned int idle_idx;
933 unsigned int newidle_idx;
934 unsigned int wake_idx;
935 unsigned int forkexec_idx;
936 unsigned int smt_gain;
937 int flags; /* See SD_* */
938 int level;
939
940 /* Runtime fields. */
941 unsigned long last_balance; /* init to jiffies. units in jiffies */
942 unsigned int balance_interval; /* initialise to 1. units in ms. */
943 unsigned int nr_balance_failed; /* initialise to 0 */
944
945 u64 last_update;
946
947#ifdef CONFIG_SCHEDSTATS
948 /* load_balance() stats */
949 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
950 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
951 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
952 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
953 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
954 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
955 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
956 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
957
958 /* Active load balancing */
959 unsigned int alb_count;
960 unsigned int alb_failed;
961 unsigned int alb_pushed;
962
963 /* SD_BALANCE_EXEC stats */
964 unsigned int sbe_count;
965 unsigned int sbe_balanced;
966 unsigned int sbe_pushed;
967
968 /* SD_BALANCE_FORK stats */
969 unsigned int sbf_count;
970 unsigned int sbf_balanced;
971 unsigned int sbf_pushed;
972
973 /* try_to_wake_up() stats */
974 unsigned int ttwu_wake_remote;
975 unsigned int ttwu_move_affine;
976 unsigned int ttwu_move_balance;
977#endif
978#ifdef CONFIG_SCHED_DEBUG
979 char *name;
980#endif
981 union {
982 void *private; /* used during construction */
983 struct rcu_head rcu; /* used during destruction */
984 };
985
986 unsigned int span_weight;
987 /*
988 * Span of all CPUs in this domain.
989 *
990 * NOTE: this field is variable length. (Allocated dynamically
991 * by attaching extra space to the end of the structure,
992 * depending on how many CPUs the kernel has booted up with)
993 */
994 unsigned long span[0];
995};
996
997static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
998{
999 return to_cpumask(sd->span);
1000}
1001
1002extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1003 struct sched_domain_attr *dattr_new);
1004
1005/* Allocate an array of sched domains, for partition_sched_domains(). */
1006cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1007void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1008
1009/* Test a flag in parent sched domain */
1010static inline int test_sd_parent(struct sched_domain *sd, int flag)
1011{
1012 if (sd->parent && (sd->parent->flags & flag))
1013 return 1;
1014
1015 return 0;
1016}
1017
1018unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1019unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1020
1021bool cpus_share_cache(int this_cpu, int that_cpu);
1022
1023#else /* CONFIG_SMP */
1024
1025struct sched_domain_attr;
1026
1027static inline void
1028partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1029 struct sched_domain_attr *dattr_new)
1030{
1031}
1032
1033static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1034{
1035 return true;
1036}
1037
1038#endif /* !CONFIG_SMP */
1039
1040
1041struct io_context; /* See blkdev.h */
1042
1043
1044#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1045extern void prefetch_stack(struct task_struct *t);
1046#else
1047static inline void prefetch_stack(struct task_struct *t) { }
1048#endif
1049
1050struct audit_context; /* See audit.c */
1051struct mempolicy;
1052struct pipe_inode_info;
1053struct uts_namespace;
1054
1055struct rq;
1056struct sched_domain;
1057
1058/*
1059 * wake flags
1060 */
1061#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1062#define WF_FORK 0x02 /* child wakeup after fork */
1063#define WF_MIGRATED 0x04 /* internal use, task got migrated */
1064
1065#define ENQUEUE_WAKEUP 1
1066#define ENQUEUE_HEAD 2
1067#ifdef CONFIG_SMP
1068#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1069#else
1070#define ENQUEUE_WAKING 0
1071#endif
1072
1073#define DEQUEUE_SLEEP 1
1074
1075struct sched_class {
1076 const struct sched_class *next;
1077
1078 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1079 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1080 void (*yield_task) (struct rq *rq);
1081 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1082
1083 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1084
1085 struct task_struct * (*pick_next_task) (struct rq *rq);
1086 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1087
1088#ifdef CONFIG_SMP
1089 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1090 void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
1091
1092 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1093 void (*post_schedule) (struct rq *this_rq);
1094 void (*task_waking) (struct task_struct *task);
1095 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1096
1097 void (*set_cpus_allowed)(struct task_struct *p,
1098 const struct cpumask *newmask);
1099
1100 void (*rq_online)(struct rq *rq);
1101 void (*rq_offline)(struct rq *rq);
1102#endif
1103
1104 void (*set_curr_task) (struct rq *rq);
1105 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1106 void (*task_fork) (struct task_struct *p);
1107
1108 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1109 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1110 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1111 int oldprio);
1112
1113 unsigned int (*get_rr_interval) (struct rq *rq,
1114 struct task_struct *task);
1115
1116#ifdef CONFIG_FAIR_GROUP_SCHED
1117 void (*task_move_group) (struct task_struct *p, int on_rq);
1118#endif
1119};
1120
1121struct load_weight {
1122 unsigned long weight, inv_weight;
1123};
1124
1125struct sched_avg {
1126 /*
1127 * These sums represent an infinite geometric series and so are bound
1128 * above by 1024/(1-y). Thus we only need a u32 to store them for for all
1129 * choices of y < 1-2^(-32)*1024.
1130 */
1131 u32 runnable_avg_sum, runnable_avg_period;
1132 u64 last_runnable_update;
1133 s64 decay_count;
1134 unsigned long load_avg_contrib;
1135};
1136
1137#ifdef CONFIG_SCHEDSTATS
1138struct sched_statistics {
1139 u64 wait_start;
1140 u64 wait_max;
1141 u64 wait_count;
1142 u64 wait_sum;
1143 u64 iowait_count;
1144 u64 iowait_sum;
1145
1146 u64 sleep_start;
1147 u64 sleep_max;
1148 s64 sum_sleep_runtime;
1149
1150 u64 block_start;
1151 u64 block_max;
1152 u64 exec_max;
1153 u64 slice_max;
1154
1155 u64 nr_migrations_cold;
1156 u64 nr_failed_migrations_affine;
1157 u64 nr_failed_migrations_running;
1158 u64 nr_failed_migrations_hot;
1159 u64 nr_forced_migrations;
1160
1161 u64 nr_wakeups;
1162 u64 nr_wakeups_sync;
1163 u64 nr_wakeups_migrate;
1164 u64 nr_wakeups_local;
1165 u64 nr_wakeups_remote;
1166 u64 nr_wakeups_affine;
1167 u64 nr_wakeups_affine_attempts;
1168 u64 nr_wakeups_passive;
1169 u64 nr_wakeups_idle;
1170};
1171#endif
1172
1173struct sched_entity {
1174 struct load_weight load; /* for load-balancing */
1175 struct rb_node run_node;
1176 struct list_head group_node;
1177 unsigned int on_rq;
1178
1179 u64 exec_start;
1180 u64 sum_exec_runtime;
1181 u64 vruntime;
1182 u64 prev_sum_exec_runtime;
1183
1184 u64 nr_migrations;
1185
1186#ifdef CONFIG_SCHEDSTATS
1187 struct sched_statistics statistics;
1188#endif
1189
1190#ifdef CONFIG_FAIR_GROUP_SCHED
1191 struct sched_entity *parent;
1192 /* rq on which this entity is (to be) queued: */
1193 struct cfs_rq *cfs_rq;
1194 /* rq "owned" by this entity/group: */
1195 struct cfs_rq *my_q;
1196#endif
1197/*
1198 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1199 * removed when useful for applications beyond shares distribution (e.g.
1200 * load-balance).
1201 */
1202#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1203 /* Per-entity load-tracking */
1204 struct sched_avg avg;
1205#endif
1206};
1207
1208struct sched_rt_entity {
1209 struct list_head run_list;
1210 unsigned long timeout;
1211 unsigned int time_slice;
1212
1213 struct sched_rt_entity *back;
1214#ifdef CONFIG_RT_GROUP_SCHED
1215 struct sched_rt_entity *parent;
1216 /* rq on which this entity is (to be) queued: */
1217 struct rt_rq *rt_rq;
1218 /* rq "owned" by this entity/group: */
1219 struct rt_rq *my_q;
1220#endif
1221};
1222
1223/*
1224 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1225 * Timeslices get refilled after they expire.
1226 */
1227#define RR_TIMESLICE (100 * HZ / 1000)
1228
1229struct rcu_node;
1230
1231enum perf_event_task_context {
1232 perf_invalid_context = -1,
1233 perf_hw_context = 0,
1234 perf_sw_context,
1235 perf_nr_task_contexts,
1236};
1237
1238struct task_struct {
1239 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1240 void *stack;
1241 atomic_t usage;
1242 unsigned int flags; /* per process flags, defined below */
1243 unsigned int ptrace;
1244
1245#ifdef CONFIG_SMP
1246 struct llist_node wake_entry;
1247 int on_cpu;
1248#endif
1249 int on_rq;
1250
1251 int prio, static_prio, normal_prio;
1252 unsigned int rt_priority;
1253 const struct sched_class *sched_class;
1254 struct sched_entity se;
1255 struct sched_rt_entity rt;
1256#ifdef CONFIG_CGROUP_SCHED
1257 struct task_group *sched_task_group;
1258#endif
1259
1260#ifdef CONFIG_PREEMPT_NOTIFIERS
1261 /* list of struct preempt_notifier: */
1262 struct hlist_head preempt_notifiers;
1263#endif
1264
1265 /*
1266 * fpu_counter contains the number of consecutive context switches
1267 * that the FPU is used. If this is over a threshold, the lazy fpu
1268 * saving becomes unlazy to save the trap. This is an unsigned char
1269 * so that after 256 times the counter wraps and the behavior turns
1270 * lazy again; this to deal with bursty apps that only use FPU for
1271 * a short time
1272 */
1273 unsigned char fpu_counter;
1274#ifdef CONFIG_BLK_DEV_IO_TRACE
1275 unsigned int btrace_seq;
1276#endif
1277
1278 unsigned int policy;
1279 int nr_cpus_allowed;
1280 cpumask_t cpus_allowed;
1281
1282#ifdef CONFIG_PREEMPT_RCU
1283 int rcu_read_lock_nesting;
1284 char rcu_read_unlock_special;
1285 struct list_head rcu_node_entry;
1286#endif /* #ifdef CONFIG_PREEMPT_RCU */
1287#ifdef CONFIG_TREE_PREEMPT_RCU
1288 struct rcu_node *rcu_blocked_node;
1289#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1290#ifdef CONFIG_RCU_BOOST
1291 struct rt_mutex *rcu_boost_mutex;
1292#endif /* #ifdef CONFIG_RCU_BOOST */
1293
1294#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1295 struct sched_info sched_info;
1296#endif
1297
1298 struct list_head tasks;
1299#ifdef CONFIG_SMP
1300 struct plist_node pushable_tasks;
1301#endif
1302
1303 struct mm_struct *mm, *active_mm;
1304#ifdef CONFIG_COMPAT_BRK
1305 unsigned brk_randomized:1;
1306#endif
1307#if defined(SPLIT_RSS_COUNTING)
1308 struct task_rss_stat rss_stat;
1309#endif
1310/* task state */
1311 int exit_state;
1312 int exit_code, exit_signal;
1313 int pdeath_signal; /* The signal sent when the parent dies */
1314 unsigned int jobctl; /* JOBCTL_*, siglock protected */
1315 /* ??? */
1316 unsigned int personality;
1317 unsigned did_exec:1;
1318 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1319 * execve */
1320 unsigned in_iowait:1;
1321
1322 /* task may not gain privileges */
1323 unsigned no_new_privs:1;
1324
1325 /* Revert to default priority/policy when forking */
1326 unsigned sched_reset_on_fork:1;
1327 unsigned sched_contributes_to_load:1;
1328
1329 pid_t pid;
1330 pid_t tgid;
1331
1332#ifdef CONFIG_CC_STACKPROTECTOR
1333 /* Canary value for the -fstack-protector gcc feature */
1334 unsigned long stack_canary;
1335#endif
1336 /*
1337 * pointers to (original) parent process, youngest child, younger sibling,
1338 * older sibling, respectively. (p->father can be replaced with
1339 * p->real_parent->pid)
1340 */
1341 struct task_struct __rcu *real_parent; /* real parent process */
1342 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1343 /*
1344 * children/sibling forms the list of my natural children
1345 */
1346 struct list_head children; /* list of my children */
1347 struct list_head sibling; /* linkage in my parent's children list */
1348 struct task_struct *group_leader; /* threadgroup leader */
1349
1350 /*
1351 * ptraced is the list of tasks this task is using ptrace on.
1352 * This includes both natural children and PTRACE_ATTACH targets.
1353 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1354 */
1355 struct list_head ptraced;
1356 struct list_head ptrace_entry;
1357
1358 /* PID/PID hash table linkage. */
1359 struct pid_link pids[PIDTYPE_MAX];
1360 struct list_head thread_group;
1361
1362 struct completion *vfork_done; /* for vfork() */
1363 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1364 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1365
1366 cputime_t utime, stime, utimescaled, stimescaled;
1367 cputime_t gtime;
1368#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1369 struct cputime prev_cputime;
1370#endif
1371 unsigned long nvcsw, nivcsw; /* context switch counts */
1372 struct timespec start_time; /* monotonic time */
1373 struct timespec real_start_time; /* boot based time */
1374/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1375 unsigned long min_flt, maj_flt;
1376
1377 struct task_cputime cputime_expires;
1378 struct list_head cpu_timers[3];
1379
1380/* process credentials */
1381 const struct cred __rcu *real_cred; /* objective and real subjective task
1382 * credentials (COW) */
1383 const struct cred __rcu *cred; /* effective (overridable) subjective task
1384 * credentials (COW) */
1385 char comm[TASK_COMM_LEN]; /* executable name excluding path
1386 - access with [gs]et_task_comm (which lock
1387 it with task_lock())
1388 - initialized normally by setup_new_exec */
1389/* file system info */
1390 int link_count, total_link_count;
1391#ifdef CONFIG_SYSVIPC
1392/* ipc stuff */
1393 struct sysv_sem sysvsem;
1394#endif
1395#ifdef CONFIG_DETECT_HUNG_TASK
1396/* hung task detection */
1397 unsigned long last_switch_count;
1398#endif
1399/* CPU-specific state of this task */
1400 struct thread_struct thread;
1401/* filesystem information */
1402 struct fs_struct *fs;
1403/* open file information */
1404 struct files_struct *files;
1405/* namespaces */
1406 struct nsproxy *nsproxy;
1407/* signal handlers */
1408 struct signal_struct *signal;
1409 struct sighand_struct *sighand;
1410
1411 sigset_t blocked, real_blocked;
1412 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
1413 struct sigpending pending;
1414
1415 unsigned long sas_ss_sp;
1416 size_t sas_ss_size;
1417 int (*notifier)(void *priv);
1418 void *notifier_data;
1419 sigset_t *notifier_mask;
1420 struct callback_head *task_works;
1421
1422 struct audit_context *audit_context;
1423#ifdef CONFIG_AUDITSYSCALL
1424 kuid_t loginuid;
1425 unsigned int sessionid;
1426#endif
1427 struct seccomp seccomp;
1428
1429/* Thread group tracking */
1430 u32 parent_exec_id;
1431 u32 self_exec_id;
1432/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1433 * mempolicy */
1434 spinlock_t alloc_lock;
1435
1436 /* Protection of the PI data structures: */
1437 raw_spinlock_t pi_lock;
1438
1439#ifdef CONFIG_RT_MUTEXES
1440 /* PI waiters blocked on a rt_mutex held by this task */
1441 struct plist_head pi_waiters;
1442 /* Deadlock detection and priority inheritance handling */
1443 struct rt_mutex_waiter *pi_blocked_on;
1444#endif
1445
1446#ifdef CONFIG_DEBUG_MUTEXES
1447 /* mutex deadlock detection */
1448 struct mutex_waiter *blocked_on;
1449#endif
1450#ifdef CONFIG_TRACE_IRQFLAGS
1451 unsigned int irq_events;
1452 unsigned long hardirq_enable_ip;
1453 unsigned long hardirq_disable_ip;
1454 unsigned int hardirq_enable_event;
1455 unsigned int hardirq_disable_event;
1456 int hardirqs_enabled;
1457 int hardirq_context;
1458 unsigned long softirq_disable_ip;
1459 unsigned long softirq_enable_ip;
1460 unsigned int softirq_disable_event;
1461 unsigned int softirq_enable_event;
1462 int softirqs_enabled;
1463 int softirq_context;
1464#endif
1465#ifdef CONFIG_LOCKDEP
1466# define MAX_LOCK_DEPTH 48UL
1467 u64 curr_chain_key;
1468 int lockdep_depth;
1469 unsigned int lockdep_recursion;
1470 struct held_lock held_locks[MAX_LOCK_DEPTH];
1471 gfp_t lockdep_reclaim_gfp;
1472#endif
1473
1474/* journalling filesystem info */
1475 void *journal_info;
1476
1477/* stacked block device info */
1478 struct bio_list *bio_list;
1479
1480#ifdef CONFIG_BLOCK
1481/* stack plugging */
1482 struct blk_plug *plug;
1483#endif
1484
1485/* VM state */
1486 struct reclaim_state *reclaim_state;
1487
1488 struct backing_dev_info *backing_dev_info;
1489
1490 struct io_context *io_context;
1491
1492 unsigned long ptrace_message;
1493 siginfo_t *last_siginfo; /* For ptrace use. */
1494 struct task_io_accounting ioac;
1495#if defined(CONFIG_TASK_XACCT)
1496 u64 acct_rss_mem1; /* accumulated rss usage */
1497 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1498 cputime_t acct_timexpd; /* stime + utime since last update */
1499#endif
1500#ifdef CONFIG_CPUSETS
1501 nodemask_t mems_allowed; /* Protected by alloc_lock */
1502 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
1503 int cpuset_mem_spread_rotor;
1504 int cpuset_slab_spread_rotor;
1505#endif
1506#ifdef CONFIG_CGROUPS
1507 /* Control Group info protected by css_set_lock */
1508 struct css_set __rcu *cgroups;
1509 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1510 struct list_head cg_list;
1511#endif
1512#ifdef CONFIG_FUTEX
1513 struct robust_list_head __user *robust_list;
1514#ifdef CONFIG_COMPAT
1515 struct compat_robust_list_head __user *compat_robust_list;
1516#endif
1517 struct list_head pi_state_list;
1518 struct futex_pi_state *pi_state_cache;
1519#endif
1520#ifdef CONFIG_PERF_EVENTS
1521 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1522 struct mutex perf_event_mutex;
1523 struct list_head perf_event_list;
1524#endif
1525#ifdef CONFIG_NUMA
1526 struct mempolicy *mempolicy; /* Protected by alloc_lock */
1527 short il_next;
1528 short pref_node_fork;
1529#endif
1530#ifdef CONFIG_NUMA_BALANCING
1531 int numa_scan_seq;
1532 int numa_migrate_seq;
1533 unsigned int numa_scan_period;
1534 u64 node_stamp; /* migration stamp */
1535 struct callback_head numa_work;
1536#endif /* CONFIG_NUMA_BALANCING */
1537
1538 struct rcu_head rcu;
1539
1540 /*
1541 * cache last used pipe for splice
1542 */
1543 struct pipe_inode_info *splice_pipe;
1544
1545 struct page_frag task_frag;
1546
1547#ifdef CONFIG_TASK_DELAY_ACCT
1548 struct task_delay_info *delays;
1549#endif
1550#ifdef CONFIG_FAULT_INJECTION
1551 int make_it_fail;
1552#endif
1553 /*
1554 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1555 * balance_dirty_pages() for some dirty throttling pause
1556 */
1557 int nr_dirtied;
1558 int nr_dirtied_pause;
1559 unsigned long dirty_paused_when; /* start of a write-and-pause period */
1560
1561#ifdef CONFIG_LATENCYTOP
1562 int latency_record_count;
1563 struct latency_record latency_record[LT_SAVECOUNT];
1564#endif
1565 /*
1566 * time slack values; these are used to round up poll() and
1567 * select() etc timeout values. These are in nanoseconds.
1568 */
1569 unsigned long timer_slack_ns;
1570 unsigned long default_timer_slack_ns;
1571
1572#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1573 /* Index of current stored address in ret_stack */
1574 int curr_ret_stack;
1575 /* Stack of return addresses for return function tracing */
1576 struct ftrace_ret_stack *ret_stack;
1577 /* time stamp for last schedule */
1578 unsigned long long ftrace_timestamp;
1579 /*
1580 * Number of functions that haven't been traced
1581 * because of depth overrun.
1582 */
1583 atomic_t trace_overrun;
1584 /* Pause for the tracing */
1585 atomic_t tracing_graph_pause;
1586#endif
1587#ifdef CONFIG_TRACING
1588 /* state flags for use by tracers */
1589 unsigned long trace;
1590 /* bitmask and counter of trace recursion */
1591 unsigned long trace_recursion;
1592#endif /* CONFIG_TRACING */
1593#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1594 struct memcg_batch_info {
1595 int do_batch; /* incremented when batch uncharge started */
1596 struct mem_cgroup *memcg; /* target memcg of uncharge */
1597 unsigned long nr_pages; /* uncharged usage */
1598 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1599 } memcg_batch;
1600 unsigned int memcg_kmem_skip_account;
1601#endif
1602#ifdef CONFIG_HAVE_HW_BREAKPOINT
1603 atomic_t ptrace_bp_refcnt;
1604#endif
1605#ifdef CONFIG_UPROBES
1606 struct uprobe_task *utask;
1607#endif
1608};
1609
1610/* Future-safe accessor for struct task_struct's cpus_allowed. */
1611#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1612
1613#ifdef CONFIG_NUMA_BALANCING
1614extern void task_numa_fault(int node, int pages, bool migrated);
1615extern void set_numabalancing_state(bool enabled);
1616#else
1617static inline void task_numa_fault(int node, int pages, bool migrated)
1618{
1619}
1620static inline void set_numabalancing_state(bool enabled)
1621{
1622}
1623#endif
1624
1625/*
1626 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1627 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1628 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1629 * values are inverted: lower p->prio value means higher priority.
1630 *
1631 * The MAX_USER_RT_PRIO value allows the actual maximum
1632 * RT priority to be separate from the value exported to
1633 * user-space. This allows kernel threads to set their
1634 * priority to a value higher than any user task. Note:
1635 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1636 */
1637
1638#define MAX_USER_RT_PRIO 100
1639#define MAX_RT_PRIO MAX_USER_RT_PRIO
1640
1641#define MAX_PRIO (MAX_RT_PRIO + 40)
1642#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1643
1644static inline int rt_prio(int prio)
1645{
1646 if (unlikely(prio < MAX_RT_PRIO))
1647 return 1;
1648 return 0;
1649}
1650
1651static inline int rt_task(struct task_struct *p)
1652{
1653 return rt_prio(p->prio);
1654}
1655
1656static inline struct pid *task_pid(struct task_struct *task)
1657{
1658 return task->pids[PIDTYPE_PID].pid;
1659}
1660
1661static inline struct pid *task_tgid(struct task_struct *task)
1662{
1663 return task->group_leader->pids[PIDTYPE_PID].pid;
1664}
1665
1666/*
1667 * Without tasklist or rcu lock it is not safe to dereference
1668 * the result of task_pgrp/task_session even if task == current,
1669 * we can race with another thread doing sys_setsid/sys_setpgid.
1670 */
1671static inline struct pid *task_pgrp(struct task_struct *task)
1672{
1673 return task->group_leader->pids[PIDTYPE_PGID].pid;
1674}
1675
1676static inline struct pid *task_session(struct task_struct *task)
1677{
1678 return task->group_leader->pids[PIDTYPE_SID].pid;
1679}
1680
1681struct pid_namespace;
1682
1683/*
1684 * the helpers to get the task's different pids as they are seen
1685 * from various namespaces
1686 *
1687 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
1688 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1689 * current.
1690 * task_xid_nr_ns() : id seen from the ns specified;
1691 *
1692 * set_task_vxid() : assigns a virtual id to a task;
1693 *
1694 * see also pid_nr() etc in include/linux/pid.h
1695 */
1696pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1697 struct pid_namespace *ns);
1698
1699static inline pid_t task_pid_nr(struct task_struct *tsk)
1700{
1701 return tsk->pid;
1702}
1703
1704static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1705 struct pid_namespace *ns)
1706{
1707 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1708}
1709
1710static inline pid_t task_pid_vnr(struct task_struct *tsk)
1711{
1712 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1713}
1714
1715
1716static inline pid_t task_tgid_nr(struct task_struct *tsk)
1717{
1718 return tsk->tgid;
1719}
1720
1721pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1722
1723static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1724{
1725 return pid_vnr(task_tgid(tsk));
1726}
1727
1728
1729static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1730 struct pid_namespace *ns)
1731{
1732 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1733}
1734
1735static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1736{
1737 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1738}
1739
1740
1741static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1742 struct pid_namespace *ns)
1743{
1744 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1745}
1746
1747static inline pid_t task_session_vnr(struct task_struct *tsk)
1748{
1749 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1750}
1751
1752/* obsolete, do not use */
1753static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1754{
1755 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1756}
1757
1758/**
1759 * pid_alive - check that a task structure is not stale
1760 * @p: Task structure to be checked.
1761 *
1762 * Test if a process is not yet dead (at most zombie state)
1763 * If pid_alive fails, then pointers within the task structure
1764 * can be stale and must not be dereferenced.
1765 */
1766static inline int pid_alive(struct task_struct *p)
1767{
1768 return p->pids[PIDTYPE_PID].pid != NULL;
1769}
1770
1771/**
1772 * is_global_init - check if a task structure is init
1773 * @tsk: Task structure to be checked.
1774 *
1775 * Check if a task structure is the first user space task the kernel created.
1776 */
1777static inline int is_global_init(struct task_struct *tsk)
1778{
1779 return tsk->pid == 1;
1780}
1781
1782extern struct pid *cad_pid;
1783
1784extern void free_task(struct task_struct *tsk);
1785#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1786
1787extern void __put_task_struct(struct task_struct *t);
1788
1789static inline void put_task_struct(struct task_struct *t)
1790{
1791 if (atomic_dec_and_test(&t->usage))
1792 __put_task_struct(t);
1793}
1794
1795extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1796extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1797
1798/*
1799 * Per process flags
1800 */
1801#define PF_EXITING 0x00000004 /* getting shut down */
1802#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1803#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1804#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1805#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1806#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1807#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1808#define PF_DUMPCORE 0x00000200 /* dumped core */
1809#define PF_SIGNALED 0x00000400 /* killed by a signal */
1810#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1811#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1812#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1813#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1814#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1815#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1816#define PF_KSWAPD 0x00040000 /* I am kswapd */
1817#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1818#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1819#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1820#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1821#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1822#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
1823#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
1824#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1825#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1826#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1827#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1828
1829/*
1830 * Only the _current_ task can read/write to tsk->flags, but other
1831 * tasks can access tsk->flags in readonly mode for example
1832 * with tsk_used_math (like during threaded core dumping).
1833 * There is however an exception to this rule during ptrace
1834 * or during fork: the ptracer task is allowed to write to the
1835 * child->flags of its traced child (same goes for fork, the parent
1836 * can write to the child->flags), because we're guaranteed the
1837 * child is not running and in turn not changing child->flags
1838 * at the same time the parent does it.
1839 */
1840#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1841#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1842#define clear_used_math() clear_stopped_child_used_math(current)
1843#define set_used_math() set_stopped_child_used_math(current)
1844#define conditional_stopped_child_used_math(condition, child) \
1845 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1846#define conditional_used_math(condition) \
1847 conditional_stopped_child_used_math(condition, current)
1848#define copy_to_stopped_child_used_math(child) \
1849 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1850/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1851#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1852#define used_math() tsk_used_math(current)
1853
1854/*
1855 * task->jobctl flags
1856 */
1857#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
1858
1859#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
1860#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
1861#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
1862#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
1863#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
1864#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
1865#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
1866
1867#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
1868#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
1869#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
1870#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
1871#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
1872#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
1873#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
1874
1875#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1876#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1877
1878extern bool task_set_jobctl_pending(struct task_struct *task,
1879 unsigned int mask);
1880extern void task_clear_jobctl_trapping(struct task_struct *task);
1881extern void task_clear_jobctl_pending(struct task_struct *task,
1882 unsigned int mask);
1883
1884#ifdef CONFIG_PREEMPT_RCU
1885
1886#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1887#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1888
1889static inline void rcu_copy_process(struct task_struct *p)
1890{
1891 p->rcu_read_lock_nesting = 0;
1892 p->rcu_read_unlock_special = 0;
1893#ifdef CONFIG_TREE_PREEMPT_RCU
1894 p->rcu_blocked_node = NULL;
1895#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1896#ifdef CONFIG_RCU_BOOST
1897 p->rcu_boost_mutex = NULL;
1898#endif /* #ifdef CONFIG_RCU_BOOST */
1899 INIT_LIST_HEAD(&p->rcu_node_entry);
1900}
1901
1902#else
1903
1904static inline void rcu_copy_process(struct task_struct *p)
1905{
1906}
1907
1908#endif
1909
1910static inline void tsk_restore_flags(struct task_struct *task,
1911 unsigned long orig_flags, unsigned long flags)
1912{
1913 task->flags &= ~flags;
1914 task->flags |= orig_flags & flags;
1915}
1916
1917#ifdef CONFIG_SMP
1918extern void do_set_cpus_allowed(struct task_struct *p,
1919 const struct cpumask *new_mask);
1920
1921extern int set_cpus_allowed_ptr(struct task_struct *p,
1922 const struct cpumask *new_mask);
1923#else
1924static inline void do_set_cpus_allowed(struct task_struct *p,
1925 const struct cpumask *new_mask)
1926{
1927}
1928static inline int set_cpus_allowed_ptr(struct task_struct *p,
1929 const struct cpumask *new_mask)
1930{
1931 if (!cpumask_test_cpu(0, new_mask))
1932 return -EINVAL;
1933 return 0;
1934}
1935#endif
1936
1937#ifdef CONFIG_NO_HZ
1938void calc_load_enter_idle(void);
1939void calc_load_exit_idle(void);
1940#else
1941static inline void calc_load_enter_idle(void) { }
1942static inline void calc_load_exit_idle(void) { }
1943#endif /* CONFIG_NO_HZ */
1944
1945#ifndef CONFIG_CPUMASK_OFFSTACK
1946static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1947{
1948 return set_cpus_allowed_ptr(p, &new_mask);
1949}
1950#endif
1951
1952/*
1953 * Do not use outside of architecture code which knows its limitations.
1954 *
1955 * sched_clock() has no promise of monotonicity or bounded drift between
1956 * CPUs, use (which you should not) requires disabling IRQs.
1957 *
1958 * Please use one of the three interfaces below.
1959 */
1960extern unsigned long long notrace sched_clock(void);
1961/*
1962 * See the comment in kernel/sched/clock.c
1963 */
1964extern u64 cpu_clock(int cpu);
1965extern u64 local_clock(void);
1966extern u64 sched_clock_cpu(int cpu);
1967
1968
1969extern void sched_clock_init(void);
1970
1971#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1972static inline void sched_clock_tick(void)
1973{
1974}
1975
1976static inline void sched_clock_idle_sleep_event(void)
1977{
1978}
1979
1980static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1981{
1982}
1983#else
1984/*
1985 * Architectures can set this to 1 if they have specified
1986 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1987 * but then during bootup it turns out that sched_clock()
1988 * is reliable after all:
1989 */
1990extern int sched_clock_stable;
1991
1992extern void sched_clock_tick(void);
1993extern void sched_clock_idle_sleep_event(void);
1994extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1995#endif
1996
1997#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1998/*
1999 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2000 * The reason for this explicit opt-in is not to have perf penalty with
2001 * slow sched_clocks.
2002 */
2003extern void enable_sched_clock_irqtime(void);
2004extern void disable_sched_clock_irqtime(void);
2005#else
2006static inline void enable_sched_clock_irqtime(void) {}
2007static inline void disable_sched_clock_irqtime(void) {}
2008#endif
2009
2010extern unsigned long long
2011task_sched_runtime(struct task_struct *task);
2012
2013/* sched_exec is called by processes performing an exec */
2014#ifdef CONFIG_SMP
2015extern void sched_exec(void);
2016#else
2017#define sched_exec() {}
2018#endif
2019
2020extern void sched_clock_idle_sleep_event(void);
2021extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2022
2023#ifdef CONFIG_HOTPLUG_CPU
2024extern void idle_task_exit(void);
2025#else
2026static inline void idle_task_exit(void) {}
2027#endif
2028
2029#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
2030extern void wake_up_idle_cpu(int cpu);
2031#else
2032static inline void wake_up_idle_cpu(int cpu) { }
2033#endif
2034
2035extern unsigned int sysctl_sched_latency;
2036extern unsigned int sysctl_sched_min_granularity;
2037extern unsigned int sysctl_sched_wakeup_granularity;
2038extern unsigned int sysctl_sched_child_runs_first;
2039
2040enum sched_tunable_scaling {
2041 SCHED_TUNABLESCALING_NONE,
2042 SCHED_TUNABLESCALING_LOG,
2043 SCHED_TUNABLESCALING_LINEAR,
2044 SCHED_TUNABLESCALING_END,
2045};
2046extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
2047
2048extern unsigned int sysctl_numa_balancing_scan_delay;
2049extern unsigned int sysctl_numa_balancing_scan_period_min;
2050extern unsigned int sysctl_numa_balancing_scan_period_max;
2051extern unsigned int sysctl_numa_balancing_scan_period_reset;
2052extern unsigned int sysctl_numa_balancing_scan_size;
2053extern unsigned int sysctl_numa_balancing_settle_count;
2054
2055#ifdef CONFIG_SCHED_DEBUG
2056extern unsigned int sysctl_sched_migration_cost;
2057extern unsigned int sysctl_sched_nr_migrate;
2058extern unsigned int sysctl_sched_time_avg;
2059extern unsigned int sysctl_timer_migration;
2060extern unsigned int sysctl_sched_shares_window;
2061
2062int sched_proc_update_handler(struct ctl_table *table, int write,
2063 void __user *buffer, size_t *length,
2064 loff_t *ppos);
2065#endif
2066#ifdef CONFIG_SCHED_DEBUG
2067static inline unsigned int get_sysctl_timer_migration(void)
2068{
2069 return sysctl_timer_migration;
2070}
2071#else
2072static inline unsigned int get_sysctl_timer_migration(void)
2073{
2074 return 1;
2075}
2076#endif
2077extern unsigned int sysctl_sched_rt_period;
2078extern int sysctl_sched_rt_runtime;
2079
2080int sched_rt_handler(struct ctl_table *table, int write,
2081 void __user *buffer, size_t *lenp,
2082 loff_t *ppos);
2083
2084#ifdef CONFIG_SCHED_AUTOGROUP
2085extern unsigned int sysctl_sched_autogroup_enabled;
2086
2087extern void sched_autogroup_create_attach(struct task_struct *p);
2088extern void sched_autogroup_detach(struct task_struct *p);
2089extern void sched_autogroup_fork(struct signal_struct *sig);
2090extern void sched_autogroup_exit(struct signal_struct *sig);
2091#ifdef CONFIG_PROC_FS
2092extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2093extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2094#endif
2095#else
2096static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2097static inline void sched_autogroup_detach(struct task_struct *p) { }
2098static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2099static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2100#endif
2101
2102#ifdef CONFIG_CFS_BANDWIDTH
2103extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2104#endif
2105
2106#ifdef CONFIG_RT_MUTEXES
2107extern int rt_mutex_getprio(struct task_struct *p);
2108extern void rt_mutex_setprio(struct task_struct *p, int prio);
2109extern void rt_mutex_adjust_pi(struct task_struct *p);
2110static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2111{
2112 return tsk->pi_blocked_on != NULL;
2113}
2114#else
2115static inline int rt_mutex_getprio(struct task_struct *p)
2116{
2117 return p->normal_prio;
2118}
2119# define rt_mutex_adjust_pi(p) do { } while (0)
2120static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2121{
2122 return false;
2123}
2124#endif
2125
2126extern bool yield_to(struct task_struct *p, bool preempt);
2127extern void set_user_nice(struct task_struct *p, long nice);
2128extern int task_prio(const struct task_struct *p);
2129extern int task_nice(const struct task_struct *p);
2130extern int can_nice(const struct task_struct *p, const int nice);
2131extern int task_curr(const struct task_struct *p);
2132extern int idle_cpu(int cpu);
2133extern int sched_setscheduler(struct task_struct *, int,
2134 const struct sched_param *);
2135extern int sched_setscheduler_nocheck(struct task_struct *, int,
2136 const struct sched_param *);
2137extern struct task_struct *idle_task(int cpu);
2138/**
2139 * is_idle_task - is the specified task an idle task?
2140 * @p: the task in question.
2141 */
2142static inline bool is_idle_task(const struct task_struct *p)
2143{
2144 return p->pid == 0;
2145}
2146extern struct task_struct *curr_task(int cpu);
2147extern void set_curr_task(int cpu, struct task_struct *p);
2148
2149void yield(void);
2150
2151/*
2152 * The default (Linux) execution domain.
2153 */
2154extern struct exec_domain default_exec_domain;
2155
2156union thread_union {
2157 struct thread_info thread_info;
2158 unsigned long stack[THREAD_SIZE/sizeof(long)];
2159};
2160
2161#ifndef __HAVE_ARCH_KSTACK_END
2162static inline int kstack_end(void *addr)
2163{
2164 /* Reliable end of stack detection:
2165 * Some APM bios versions misalign the stack
2166 */
2167 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2168}
2169#endif
2170
2171extern union thread_union init_thread_union;
2172extern struct task_struct init_task;
2173
2174extern struct mm_struct init_mm;
2175
2176extern struct pid_namespace init_pid_ns;
2177
2178/*
2179 * find a task by one of its numerical ids
2180 *
2181 * find_task_by_pid_ns():
2182 * finds a task by its pid in the specified namespace
2183 * find_task_by_vpid():
2184 * finds a task by its virtual pid
2185 *
2186 * see also find_vpid() etc in include/linux/pid.h
2187 */
2188
2189extern struct task_struct *find_task_by_vpid(pid_t nr);
2190extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2191 struct pid_namespace *ns);
2192
2193extern void __set_special_pids(struct pid *pid);
2194
2195/* per-UID process charging. */
2196extern struct user_struct * alloc_uid(kuid_t);
2197static inline struct user_struct *get_uid(struct user_struct *u)
2198{
2199 atomic_inc(&u->__count);
2200 return u;
2201}
2202extern void free_uid(struct user_struct *);
2203
2204#include <asm/current.h>
2205
2206extern void xtime_update(unsigned long ticks);
2207
2208extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2209extern int wake_up_process(struct task_struct *tsk);
2210extern void wake_up_new_task(struct task_struct *tsk);
2211#ifdef CONFIG_SMP
2212 extern void kick_process(struct task_struct *tsk);
2213#else
2214 static inline void kick_process(struct task_struct *tsk) { }
2215#endif
2216extern void sched_fork(struct task_struct *p);
2217extern void sched_dead(struct task_struct *p);
2218
2219extern void proc_caches_init(void);
2220extern void flush_signals(struct task_struct *);
2221extern void __flush_signals(struct task_struct *);
2222extern void ignore_signals(struct task_struct *);
2223extern void flush_signal_handlers(struct task_struct *, int force_default);
2224extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2225
2226static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2227{
2228 unsigned long flags;
2229 int ret;
2230
2231 spin_lock_irqsave(&tsk->sighand->siglock, flags);
2232 ret = dequeue_signal(tsk, mask, info);
2233 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2234
2235 return ret;
2236}
2237
2238extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2239 sigset_t *mask);
2240extern void unblock_all_signals(void);
2241extern void release_task(struct task_struct * p);
2242extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2243extern int force_sigsegv(int, struct task_struct *);
2244extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2245extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2246extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2247extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2248 const struct cred *, u32);
2249extern int kill_pgrp(struct pid *pid, int sig, int priv);
2250extern int kill_pid(struct pid *pid, int sig, int priv);
2251extern int kill_proc_info(int, struct siginfo *, pid_t);
2252extern __must_check bool do_notify_parent(struct task_struct *, int);
2253extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2254extern void force_sig(int, struct task_struct *);
2255extern int send_sig(int, struct task_struct *, int);
2256extern int zap_other_threads(struct task_struct *p);
2257extern struct sigqueue *sigqueue_alloc(void);
2258extern void sigqueue_free(struct sigqueue *);
2259extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2260extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2261extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2262
2263static inline void restore_saved_sigmask(void)
2264{
2265 if (test_and_clear_restore_sigmask())
2266 __set_current_blocked(¤t->saved_sigmask);
2267}
2268
2269static inline sigset_t *sigmask_to_save(void)
2270{
2271 sigset_t *res = ¤t->blocked;
2272 if (unlikely(test_restore_sigmask()))
2273 res = ¤t->saved_sigmask;
2274 return res;
2275}
2276
2277static inline int kill_cad_pid(int sig, int priv)
2278{
2279 return kill_pid(cad_pid, sig, priv);
2280}
2281
2282/* These can be the second arg to send_sig_info/send_group_sig_info. */
2283#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2284#define SEND_SIG_PRIV ((struct siginfo *) 1)
2285#define SEND_SIG_FORCED ((struct siginfo *) 2)
2286
2287/*
2288 * True if we are on the alternate signal stack.
2289 */
2290static inline int on_sig_stack(unsigned long sp)
2291{
2292#ifdef CONFIG_STACK_GROWSUP
2293 return sp >= current->sas_ss_sp &&
2294 sp - current->sas_ss_sp < current->sas_ss_size;
2295#else
2296 return sp > current->sas_ss_sp &&
2297 sp - current->sas_ss_sp <= current->sas_ss_size;
2298#endif
2299}
2300
2301static inline int sas_ss_flags(unsigned long sp)
2302{
2303 return (current->sas_ss_size == 0 ? SS_DISABLE
2304 : on_sig_stack(sp) ? SS_ONSTACK : 0);
2305}
2306
2307/*
2308 * Routines for handling mm_structs
2309 */
2310extern struct mm_struct * mm_alloc(void);
2311
2312/* mmdrop drops the mm and the page tables */
2313extern void __mmdrop(struct mm_struct *);
2314static inline void mmdrop(struct mm_struct * mm)
2315{
2316 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2317 __mmdrop(mm);
2318}
2319
2320/* mmput gets rid of the mappings and all user-space */
2321extern void mmput(struct mm_struct *);
2322/* Grab a reference to a task's mm, if it is not already going away */
2323extern struct mm_struct *get_task_mm(struct task_struct *task);
2324/*
2325 * Grab a reference to a task's mm, if it is not already going away
2326 * and ptrace_may_access with the mode parameter passed to it
2327 * succeeds.
2328 */
2329extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2330/* Remove the current tasks stale references to the old mm_struct */
2331extern void mm_release(struct task_struct *, struct mm_struct *);
2332/* Allocate a new mm structure and copy contents from tsk->mm */
2333extern struct mm_struct *dup_mm(struct task_struct *tsk);
2334
2335extern int copy_thread(unsigned long, unsigned long, unsigned long,
2336 struct task_struct *);
2337extern void flush_thread(void);
2338extern void exit_thread(void);
2339
2340extern void exit_files(struct task_struct *);
2341extern void __cleanup_sighand(struct sighand_struct *);
2342
2343extern void exit_itimers(struct signal_struct *);
2344extern void flush_itimer_signals(void);
2345
2346extern void do_group_exit(int);
2347
2348extern int allow_signal(int);
2349extern int disallow_signal(int);
2350
2351extern int do_execve(const char *,
2352 const char __user * const __user *,
2353 const char __user * const __user *);
2354extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2355struct task_struct *fork_idle(int);
2356extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2357
2358extern void set_task_comm(struct task_struct *tsk, char *from);
2359extern char *get_task_comm(char *to, struct task_struct *tsk);
2360
2361#ifdef CONFIG_SMP
2362void scheduler_ipi(void);
2363extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2364#else
2365static inline void scheduler_ipi(void) { }
2366static inline unsigned long wait_task_inactive(struct task_struct *p,
2367 long match_state)
2368{
2369 return 1;
2370}
2371#endif
2372
2373#define next_task(p) \
2374 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2375
2376#define for_each_process(p) \
2377 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2378
2379extern bool current_is_single_threaded(void);
2380
2381/*
2382 * Careful: do_each_thread/while_each_thread is a double loop so
2383 * 'break' will not work as expected - use goto instead.
2384 */
2385#define do_each_thread(g, t) \
2386 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2387
2388#define while_each_thread(g, t) \
2389 while ((t = next_thread(t)) != g)
2390
2391static inline int get_nr_threads(struct task_struct *tsk)
2392{
2393 return tsk->signal->nr_threads;
2394}
2395
2396static inline bool thread_group_leader(struct task_struct *p)
2397{
2398 return p->exit_signal >= 0;
2399}
2400
2401/* Do to the insanities of de_thread it is possible for a process
2402 * to have the pid of the thread group leader without actually being
2403 * the thread group leader. For iteration through the pids in proc
2404 * all we care about is that we have a task with the appropriate
2405 * pid, we don't actually care if we have the right task.
2406 */
2407static inline int has_group_leader_pid(struct task_struct *p)
2408{
2409 return p->pid == p->tgid;
2410}
2411
2412static inline
2413int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2414{
2415 return p1->tgid == p2->tgid;
2416}
2417
2418static inline struct task_struct *next_thread(const struct task_struct *p)
2419{
2420 return list_entry_rcu(p->thread_group.next,
2421 struct task_struct, thread_group);
2422}
2423
2424static inline int thread_group_empty(struct task_struct *p)
2425{
2426 return list_empty(&p->thread_group);
2427}
2428
2429#define delay_group_leader(p) \
2430 (thread_group_leader(p) && !thread_group_empty(p))
2431
2432/*
2433 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2434 * subscriptions and synchronises with wait4(). Also used in procfs. Also
2435 * pins the final release of task.io_context. Also protects ->cpuset and
2436 * ->cgroup.subsys[]. And ->vfork_done.
2437 *
2438 * Nests both inside and outside of read_lock(&tasklist_lock).
2439 * It must not be nested with write_lock_irq(&tasklist_lock),
2440 * neither inside nor outside.
2441 */
2442static inline void task_lock(struct task_struct *p)
2443{
2444 spin_lock(&p->alloc_lock);
2445}
2446
2447static inline void task_unlock(struct task_struct *p)
2448{
2449 spin_unlock(&p->alloc_lock);
2450}
2451
2452extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2453 unsigned long *flags);
2454
2455static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2456 unsigned long *flags)
2457{
2458 struct sighand_struct *ret;
2459
2460 ret = __lock_task_sighand(tsk, flags);
2461 (void)__cond_lock(&tsk->sighand->siglock, ret);
2462 return ret;
2463}
2464
2465static inline void unlock_task_sighand(struct task_struct *tsk,
2466 unsigned long *flags)
2467{
2468 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2469}
2470
2471#ifdef CONFIG_CGROUPS
2472static inline void threadgroup_change_begin(struct task_struct *tsk)
2473{
2474 down_read(&tsk->signal->group_rwsem);
2475}
2476static inline void threadgroup_change_end(struct task_struct *tsk)
2477{
2478 up_read(&tsk->signal->group_rwsem);
2479}
2480
2481/**
2482 * threadgroup_lock - lock threadgroup
2483 * @tsk: member task of the threadgroup to lock
2484 *
2485 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
2486 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2487 * perform exec. This is useful for cases where the threadgroup needs to
2488 * stay stable across blockable operations.
2489 *
2490 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2491 * synchronization. While held, no new task will be added to threadgroup
2492 * and no existing live task will have its PF_EXITING set.
2493 *
2494 * During exec, a task goes and puts its thread group through unusual
2495 * changes. After de-threading, exclusive access is assumed to resources
2496 * which are usually shared by tasks in the same group - e.g. sighand may
2497 * be replaced with a new one. Also, the exec'ing task takes over group
2498 * leader role including its pid. Exclude these changes while locked by
2499 * grabbing cred_guard_mutex which is used to synchronize exec path.
2500 */
2501static inline void threadgroup_lock(struct task_struct *tsk)
2502{
2503 /*
2504 * exec uses exit for de-threading nesting group_rwsem inside
2505 * cred_guard_mutex. Grab cred_guard_mutex first.
2506 */
2507 mutex_lock(&tsk->signal->cred_guard_mutex);
2508 down_write(&tsk->signal->group_rwsem);
2509}
2510
2511/**
2512 * threadgroup_unlock - unlock threadgroup
2513 * @tsk: member task of the threadgroup to unlock
2514 *
2515 * Reverse threadgroup_lock().
2516 */
2517static inline void threadgroup_unlock(struct task_struct *tsk)
2518{
2519 up_write(&tsk->signal->group_rwsem);
2520 mutex_unlock(&tsk->signal->cred_guard_mutex);
2521}
2522#else
2523static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2524static inline void threadgroup_change_end(struct task_struct *tsk) {}
2525static inline void threadgroup_lock(struct task_struct *tsk) {}
2526static inline void threadgroup_unlock(struct task_struct *tsk) {}
2527#endif
2528
2529#ifndef __HAVE_THREAD_FUNCTIONS
2530
2531#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2532#define task_stack_page(task) ((task)->stack)
2533
2534static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2535{
2536 *task_thread_info(p) = *task_thread_info(org);
2537 task_thread_info(p)->task = p;
2538}
2539
2540static inline unsigned long *end_of_stack(struct task_struct *p)
2541{
2542 return (unsigned long *)(task_thread_info(p) + 1);
2543}
2544
2545#endif
2546
2547static inline int object_is_on_stack(void *obj)
2548{
2549 void *stack = task_stack_page(current);
2550
2551 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2552}
2553
2554extern void thread_info_cache_init(void);
2555
2556#ifdef CONFIG_DEBUG_STACK_USAGE
2557static inline unsigned long stack_not_used(struct task_struct *p)
2558{
2559 unsigned long *n = end_of_stack(p);
2560
2561 do { /* Skip over canary */
2562 n++;
2563 } while (!*n);
2564
2565 return (unsigned long)n - (unsigned long)end_of_stack(p);
2566}
2567#endif
2568
2569/* set thread flags in other task's structures
2570 * - see asm/thread_info.h for TIF_xxxx flags available
2571 */
2572static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2573{
2574 set_ti_thread_flag(task_thread_info(tsk), flag);
2575}
2576
2577static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2578{
2579 clear_ti_thread_flag(task_thread_info(tsk), flag);
2580}
2581
2582static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2583{
2584 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2585}
2586
2587static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2588{
2589 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2590}
2591
2592static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2593{
2594 return test_ti_thread_flag(task_thread_info(tsk), flag);
2595}
2596
2597static inline void set_tsk_need_resched(struct task_struct *tsk)
2598{
2599 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2600}
2601
2602static inline void clear_tsk_need_resched(struct task_struct *tsk)
2603{
2604 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2605}
2606
2607static inline int test_tsk_need_resched(struct task_struct *tsk)
2608{
2609 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2610}
2611
2612static inline int restart_syscall(void)
2613{
2614 set_tsk_thread_flag(current, TIF_SIGPENDING);
2615 return -ERESTARTNOINTR;
2616}
2617
2618static inline int signal_pending(struct task_struct *p)
2619{
2620 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2621}
2622
2623static inline int __fatal_signal_pending(struct task_struct *p)
2624{
2625 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2626}
2627
2628static inline int fatal_signal_pending(struct task_struct *p)
2629{
2630 return signal_pending(p) && __fatal_signal_pending(p);
2631}
2632
2633static inline int signal_pending_state(long state, struct task_struct *p)
2634{
2635 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2636 return 0;
2637 if (!signal_pending(p))
2638 return 0;
2639
2640 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2641}
2642
2643static inline int need_resched(void)
2644{
2645 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2646}
2647
2648/*
2649 * cond_resched() and cond_resched_lock(): latency reduction via
2650 * explicit rescheduling in places that are safe. The return
2651 * value indicates whether a reschedule was done in fact.
2652 * cond_resched_lock() will drop the spinlock before scheduling,
2653 * cond_resched_softirq() will enable bhs before scheduling.
2654 */
2655extern int _cond_resched(void);
2656
2657#define cond_resched() ({ \
2658 __might_sleep(__FILE__, __LINE__, 0); \
2659 _cond_resched(); \
2660})
2661
2662extern int __cond_resched_lock(spinlock_t *lock);
2663
2664#ifdef CONFIG_PREEMPT_COUNT
2665#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2666#else
2667#define PREEMPT_LOCK_OFFSET 0
2668#endif
2669
2670#define cond_resched_lock(lock) ({ \
2671 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2672 __cond_resched_lock(lock); \
2673})
2674
2675extern int __cond_resched_softirq(void);
2676
2677#define cond_resched_softirq() ({ \
2678 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2679 __cond_resched_softirq(); \
2680})
2681
2682/*
2683 * Does a critical section need to be broken due to another
2684 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2685 * but a general need for low latency)
2686 */
2687static inline int spin_needbreak(spinlock_t *lock)
2688{
2689#ifdef CONFIG_PREEMPT
2690 return spin_is_contended(lock);
2691#else
2692 return 0;
2693#endif
2694}
2695
2696/*
2697 * Thread group CPU time accounting.
2698 */
2699void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2700void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2701
2702static inline void thread_group_cputime_init(struct signal_struct *sig)
2703{
2704 raw_spin_lock_init(&sig->cputimer.lock);
2705}
2706
2707/*
2708 * Reevaluate whether the task has signals pending delivery.
2709 * Wake the task if so.
2710 * This is required every time the blocked sigset_t changes.
2711 * callers must hold sighand->siglock.
2712 */
2713extern void recalc_sigpending_and_wake(struct task_struct *t);
2714extern void recalc_sigpending(void);
2715
2716extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2717
2718/*
2719 * Wrappers for p->thread_info->cpu access. No-op on UP.
2720 */
2721#ifdef CONFIG_SMP
2722
2723static inline unsigned int task_cpu(const struct task_struct *p)
2724{
2725 return task_thread_info(p)->cpu;
2726}
2727
2728extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2729
2730#else
2731
2732static inline unsigned int task_cpu(const struct task_struct *p)
2733{
2734 return 0;
2735}
2736
2737static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2738{
2739}
2740
2741#endif /* CONFIG_SMP */
2742
2743extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2744extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2745
2746extern void normalize_rt_tasks(void);
2747
2748#ifdef CONFIG_CGROUP_SCHED
2749
2750extern struct task_group root_task_group;
2751
2752extern struct task_group *sched_create_group(struct task_group *parent);
2753extern void sched_destroy_group(struct task_group *tg);
2754extern void sched_move_task(struct task_struct *tsk);
2755#ifdef CONFIG_FAIR_GROUP_SCHED
2756extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2757extern unsigned long sched_group_shares(struct task_group *tg);
2758#endif
2759#ifdef CONFIG_RT_GROUP_SCHED
2760extern int sched_group_set_rt_runtime(struct task_group *tg,
2761 long rt_runtime_us);
2762extern long sched_group_rt_runtime(struct task_group *tg);
2763extern int sched_group_set_rt_period(struct task_group *tg,
2764 long rt_period_us);
2765extern long sched_group_rt_period(struct task_group *tg);
2766extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2767#endif
2768#endif /* CONFIG_CGROUP_SCHED */
2769
2770extern int task_can_switch_user(struct user_struct *up,
2771 struct task_struct *tsk);
2772
2773#ifdef CONFIG_TASK_XACCT
2774static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2775{
2776 tsk->ioac.rchar += amt;
2777}
2778
2779static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2780{
2781 tsk->ioac.wchar += amt;
2782}
2783
2784static inline void inc_syscr(struct task_struct *tsk)
2785{
2786 tsk->ioac.syscr++;
2787}
2788
2789static inline void inc_syscw(struct task_struct *tsk)
2790{
2791 tsk->ioac.syscw++;
2792}
2793#else
2794static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2795{
2796}
2797
2798static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2799{
2800}
2801
2802static inline void inc_syscr(struct task_struct *tsk)
2803{
2804}
2805
2806static inline void inc_syscw(struct task_struct *tsk)
2807{
2808}
2809#endif
2810
2811#ifndef TASK_SIZE_OF
2812#define TASK_SIZE_OF(tsk) TASK_SIZE
2813#endif
2814
2815#ifdef CONFIG_MM_OWNER
2816extern void mm_update_next_owner(struct mm_struct *mm);
2817extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2818#else
2819static inline void mm_update_next_owner(struct mm_struct *mm)
2820{
2821}
2822
2823static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2824{
2825}
2826#endif /* CONFIG_MM_OWNER */
2827
2828static inline unsigned long task_rlimit(const struct task_struct *tsk,
2829 unsigned int limit)
2830{
2831 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2832}
2833
2834static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2835 unsigned int limit)
2836{
2837 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2838}
2839
2840static inline unsigned long rlimit(unsigned int limit)
2841{
2842 return task_rlimit(current, limit);
2843}
2844
2845static inline unsigned long rlimit_max(unsigned int limit)
2846{
2847 return task_rlimit_max(current, limit);
2848}
2849
2850#endif