1#ifndef _LINUX_SCHED_H 2#define _LINUX_SCHED_H 3 4/* 5 * cloning flags: 6 */ 7#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ 8#define CLONE_VM 0x00000100 /* set if VM shared between processes */ 9#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ 10#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ 11#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ 12#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ 13#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ 14#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ 15#define CLONE_THREAD 0x00010000 /* Same thread group? */ 16#define CLONE_NEWNS 0x00020000 /* New namespace group? */ 17#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ 18#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ 19#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ 20#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ 21#define CLONE_DETACHED 0x00400000 /* Unused, ignored */ 22#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ 23#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ 24#define CLONE_STOPPED 0x02000000 /* Start in stopped state */ 25#define CLONE_NEWUTS 0x04000000 /* New utsname group? */ 26#define CLONE_NEWIPC 0x08000000 /* New ipcs */ 27#define CLONE_NEWUSER 0x10000000 /* New user namespace */ 28#define CLONE_NEWPID 0x20000000 /* New pid namespace */ 29#define CLONE_NEWNET 0x40000000 /* New network namespace */ 30#define CLONE_IO 0x80000000 /* Clone io context */ 31 32/* 33 * Scheduling policies 34 */ 35#define SCHED_NORMAL 0 36#define SCHED_FIFO 1 37#define SCHED_RR 2 38#define SCHED_BATCH 3 39/* SCHED_ISO: reserved but not implemented yet */ 40#define SCHED_IDLE 5 41 42#ifdef __KERNEL__ 43 44struct sched_param { 45 int sched_priority; 46}; 47 48#include <asm/param.h> /* for HZ */ 49 50#include <linux/capability.h> 51#include <linux/threads.h> 52#include <linux/kernel.h> 53#include <linux/types.h> 54#include <linux/timex.h> 55#include <linux/jiffies.h> 56#include <linux/rbtree.h> 57#include <linux/thread_info.h> 58#include <linux/cpumask.h> 59#include <linux/errno.h> 60#include <linux/nodemask.h> 61#include <linux/mm_types.h> 62 63#include <asm/system.h> 64#include <asm/page.h> 65#include <asm/ptrace.h> 66#include <asm/cputime.h> 67 68#include <linux/smp.h> 69#include <linux/sem.h> 70#include <linux/signal.h> 71#include <linux/path.h> 72#include <linux/compiler.h> 73#include <linux/completion.h> 74#include <linux/pid.h> 75#include <linux/percpu.h> 76#include <linux/topology.h> 77#include <linux/proportions.h> 78#include <linux/seccomp.h> 79#include <linux/rcupdate.h> 80#include <linux/rculist.h> 81#include <linux/rtmutex.h> 82 83#include <linux/time.h> 84#include <linux/param.h> 85#include <linux/resource.h> 86#include <linux/timer.h> 87#include <linux/hrtimer.h> 88#include <linux/task_io_accounting.h> 89#include <linux/kobject.h> 90#include <linux/latencytop.h> 91#include <linux/cred.h> 92 93#include <asm/processor.h> 94 95struct exec_domain; 96struct futex_pi_state; 97struct robust_list_head; 98struct bio; 99struct fs_struct; 100struct bts_context; 101struct perf_counter_context; 102 103/* 104 * List of flags we want to share for kernel threads, 105 * if only because they are not used by them anyway. 106 */ 107#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) 108 109/* 110 * These are the constant used to fake the fixed-point load-average 111 * counting. Some notes: 112 * - 11 bit fractions expand to 22 bits by the multiplies: this gives 113 * a load-average precision of 10 bits integer + 11 bits fractional 114 * - if you want to count load-averages more often, you need more 115 * precision, or rounding will get you. With 2-second counting freq, 116 * the EXP_n values would be 1981, 2034 and 2043 if still using only 117 * 11 bit fractions. 118 */ 119extern unsigned long avenrun[]; /* Load averages */ 120extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); 121 122#define FSHIFT 11 /* nr of bits of precision */ 123#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 124#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ 125#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ 126#define EXP_5 2014 /* 1/exp(5sec/5min) */ 127#define EXP_15 2037 /* 1/exp(5sec/15min) */ 128 129#define CALC_LOAD(load,exp,n) \ 130 load *= exp; \ 131 load += n*(FIXED_1-exp); \ 132 load >>= FSHIFT; 133 134extern unsigned long total_forks; 135extern int nr_threads; 136DECLARE_PER_CPU(unsigned long, process_counts); 137extern int nr_processes(void); 138extern unsigned long nr_running(void); 139extern unsigned long nr_uninterruptible(void); 140extern unsigned long nr_iowait(void); 141extern void calc_global_load(void); 142extern u64 cpu_nr_migrations(int cpu); 143 144extern unsigned long get_parent_ip(unsigned long addr); 145 146struct seq_file; 147struct cfs_rq; 148struct task_group; 149#ifdef CONFIG_SCHED_DEBUG 150extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); 151extern void proc_sched_set_task(struct task_struct *p); 152extern void 153print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 154#else 155static inline void 156proc_sched_show_task(struct task_struct *p, struct seq_file *m) 157{ 158} 159static inline void proc_sched_set_task(struct task_struct *p) 160{ 161} 162static inline void 163print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 164{ 165} 166#endif 167 168extern unsigned long long time_sync_thresh; 169 170/* 171 * Task state bitmask. NOTE! These bits are also 172 * encoded in fs/proc/array.c: get_task_state(). 173 * 174 * We have two separate sets of flags: task->state 175 * is about runnability, while task->exit_state are 176 * about the task exiting. Confusing, but this way 177 * modifying one set can't modify the other one by 178 * mistake. 179 */ 180#define TASK_RUNNING 0 181#define TASK_INTERRUPTIBLE 1 182#define TASK_UNINTERRUPTIBLE 2 183#define __TASK_STOPPED 4 184#define __TASK_TRACED 8 185/* in tsk->exit_state */ 186#define EXIT_ZOMBIE 16 187#define EXIT_DEAD 32 188/* in tsk->state again */ 189#define TASK_DEAD 64 190#define TASK_WAKEKILL 128 191 192/* Convenience macros for the sake of set_task_state */ 193#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 194#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 195#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 196 197/* Convenience macros for the sake of wake_up */ 198#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 199#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 200 201/* get_task_state() */ 202#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 203 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 204 __TASK_TRACED) 205 206#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 207#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 208#define task_is_stopped_or_traced(task) \ 209 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 210#define task_contributes_to_load(task) \ 211 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 212 (task->flags & PF_FREEZING) == 0) 213 214#define __set_task_state(tsk, state_value) \ 215 do { (tsk)->state = (state_value); } while (0) 216#define set_task_state(tsk, state_value) \ 217 set_mb((tsk)->state, (state_value)) 218 219/* 220 * set_current_state() includes a barrier so that the write of current->state 221 * is correctly serialised wrt the caller's subsequent test of whether to 222 * actually sleep: 223 * 224 * set_current_state(TASK_UNINTERRUPTIBLE); 225 * if (do_i_need_to_sleep()) 226 * schedule(); 227 * 228 * If the caller does not need such serialisation then use __set_current_state() 229 */ 230#define __set_current_state(state_value) \ 231 do { current->state = (state_value); } while (0) 232#define set_current_state(state_value) \ 233 set_mb(current->state, (state_value)) 234 235/* Task command name length */ 236#define TASK_COMM_LEN 16 237 238#include <linux/spinlock.h> 239 240/* 241 * This serializes "schedule()" and also protects 242 * the run-queue from deletions/modifications (but 243 * _adding_ to the beginning of the run-queue has 244 * a separate lock). 245 */ 246extern rwlock_t tasklist_lock; 247extern spinlock_t mmlist_lock; 248 249struct task_struct; 250 251extern void sched_init(void); 252extern void sched_init_smp(void); 253extern asmlinkage void schedule_tail(struct task_struct *prev); 254extern void init_idle(struct task_struct *idle, int cpu); 255extern void init_idle_bootup_task(struct task_struct *idle); 256 257extern int runqueue_is_locked(void); 258extern void task_rq_unlock_wait(struct task_struct *p); 259 260extern cpumask_var_t nohz_cpu_mask; 261#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 262extern int select_nohz_load_balancer(int cpu); 263extern int get_nohz_load_balancer(void); 264#else 265static inline int select_nohz_load_balancer(int cpu) 266{ 267 return 0; 268} 269#endif 270 271/* 272 * Only dump TASK_* tasks. (0 for all tasks) 273 */ 274extern void show_state_filter(unsigned long state_filter); 275 276static inline void show_state(void) 277{ 278 show_state_filter(0); 279} 280 281extern void show_regs(struct pt_regs *); 282 283/* 284 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current 285 * task), SP is the stack pointer of the first frame that should be shown in the back 286 * trace (or NULL if the entire call-chain of the task should be shown). 287 */ 288extern void show_stack(struct task_struct *task, unsigned long *sp); 289 290void io_schedule(void); 291long io_schedule_timeout(long timeout); 292 293extern void cpu_init (void); 294extern void trap_init(void); 295extern void update_process_times(int user); 296extern void scheduler_tick(void); 297 298extern void sched_show_task(struct task_struct *p); 299 300#ifdef CONFIG_DETECT_SOFTLOCKUP 301extern void softlockup_tick(void); 302extern void touch_softlockup_watchdog(void); 303extern void touch_all_softlockup_watchdogs(void); 304extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 305 struct file *filp, void __user *buffer, 306 size_t *lenp, loff_t *ppos); 307extern unsigned int softlockup_panic; 308extern int softlockup_thresh; 309#else 310static inline void softlockup_tick(void) 311{ 312} 313static inline void touch_softlockup_watchdog(void) 314{ 315} 316static inline void touch_all_softlockup_watchdogs(void) 317{ 318} 319#endif 320 321#ifdef CONFIG_DETECT_HUNG_TASK 322extern unsigned int sysctl_hung_task_panic; 323extern unsigned long sysctl_hung_task_check_count; 324extern unsigned long sysctl_hung_task_timeout_secs; 325extern unsigned long sysctl_hung_task_warnings; 326extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 327 struct file *filp, void __user *buffer, 328 size_t *lenp, loff_t *ppos); 329#endif 330 331/* Attach to any functions which should be ignored in wchan output. */ 332#define __sched __attribute__((__section__(".sched.text"))) 333 334/* Linker adds these: start and end of __sched functions */ 335extern char __sched_text_start[], __sched_text_end[]; 336 337/* Is this address in the __sched functions? */ 338extern int in_sched_functions(unsigned long addr); 339 340#define MAX_SCHEDULE_TIMEOUT LONG_MAX 341extern signed long schedule_timeout(signed long timeout); 342extern signed long schedule_timeout_interruptible(signed long timeout); 343extern signed long schedule_timeout_killable(signed long timeout); 344extern signed long schedule_timeout_uninterruptible(signed long timeout); 345asmlinkage void __schedule(void); 346asmlinkage void schedule(void); 347extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); 348 349struct nsproxy; 350struct user_namespace; 351 352/* 353 * Default maximum number of active map areas, this limits the number of vmas 354 * per mm struct. Users can overwrite this number by sysctl but there is a 355 * problem. 356 * 357 * When a program's coredump is generated as ELF format, a section is created 358 * per a vma. In ELF, the number of sections is represented in unsigned short. 359 * This means the number of sections should be smaller than 65535 at coredump. 360 * Because the kernel adds some informative sections to a image of program at 361 * generating coredump, we need some margin. The number of extra sections is 362 * 1-3 now and depends on arch. We use "5" as safe margin, here. 363 */ 364#define MAPCOUNT_ELF_CORE_MARGIN (5) 365#define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 366 367extern int sysctl_max_map_count; 368 369#include <linux/aio.h> 370 371extern unsigned long 372arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 373 unsigned long, unsigned long); 374extern unsigned long 375arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 376 unsigned long len, unsigned long pgoff, 377 unsigned long flags); 378extern void arch_unmap_area(struct mm_struct *, unsigned long); 379extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 380 381#if USE_SPLIT_PTLOCKS 382/* 383 * The mm counters are not protected by its page_table_lock, 384 * so must be incremented atomically. 385 */ 386#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value) 387#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member)) 388#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) 389#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) 390#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) 391 392#else /* !USE_SPLIT_PTLOCKS */ 393/* 394 * The mm counters are protected by its page_table_lock, 395 * so can be incremented directly. 396 */ 397#define set_mm_counter(mm, member, value) (mm)->_##member = (value) 398#define get_mm_counter(mm, member) ((mm)->_##member) 399#define add_mm_counter(mm, member, value) (mm)->_##member += (value) 400#define inc_mm_counter(mm, member) (mm)->_##member++ 401#define dec_mm_counter(mm, member) (mm)->_##member-- 402 403#endif /* !USE_SPLIT_PTLOCKS */ 404 405#define get_mm_rss(mm) \ 406 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) 407#define update_hiwater_rss(mm) do { \ 408 unsigned long _rss = get_mm_rss(mm); \ 409 if ((mm)->hiwater_rss < _rss) \ 410 (mm)->hiwater_rss = _rss; \ 411} while (0) 412#define update_hiwater_vm(mm) do { \ 413 if ((mm)->hiwater_vm < (mm)->total_vm) \ 414 (mm)->hiwater_vm = (mm)->total_vm; \ 415} while (0) 416 417static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 418{ 419 return max(mm->hiwater_rss, get_mm_rss(mm)); 420} 421 422static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 423{ 424 return max(mm->hiwater_vm, mm->total_vm); 425} 426 427extern void set_dumpable(struct mm_struct *mm, int value); 428extern int get_dumpable(struct mm_struct *mm); 429 430/* mm flags */ 431/* dumpable bits */ 432#define MMF_DUMPABLE 0 /* core dump is permitted */ 433#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ 434#define MMF_DUMPABLE_BITS 2 435 436/* coredump filter bits */ 437#define MMF_DUMP_ANON_PRIVATE 2 438#define MMF_DUMP_ANON_SHARED 3 439#define MMF_DUMP_MAPPED_PRIVATE 4 440#define MMF_DUMP_MAPPED_SHARED 5 441#define MMF_DUMP_ELF_HEADERS 6 442#define MMF_DUMP_HUGETLB_PRIVATE 7 443#define MMF_DUMP_HUGETLB_SHARED 8 444#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 445#define MMF_DUMP_FILTER_BITS 7 446#define MMF_DUMP_FILTER_MASK \ 447 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 448#define MMF_DUMP_FILTER_DEFAULT \ 449 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ 450 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) 451 452#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS 453# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) 454#else 455# define MMF_DUMP_MASK_DEFAULT_ELF 0 456#endif 457 458struct sighand_struct { 459 atomic_t count; 460 struct k_sigaction action[_NSIG]; 461 spinlock_t siglock; 462 wait_queue_head_t signalfd_wqh; 463}; 464 465struct pacct_struct { 466 int ac_flag; 467 long ac_exitcode; 468 unsigned long ac_mem; 469 cputime_t ac_utime, ac_stime; 470 unsigned long ac_minflt, ac_majflt; 471}; 472 473/** 474 * struct task_cputime - collected CPU time counts 475 * @utime: time spent in user mode, in &cputime_t units 476 * @stime: time spent in kernel mode, in &cputime_t units 477 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 478 * 479 * This structure groups together three kinds of CPU time that are 480 * tracked for threads and thread groups. Most things considering 481 * CPU time want to group these counts together and treat all three 482 * of them in parallel. 483 */ 484struct task_cputime { 485 cputime_t utime; 486 cputime_t stime; 487 unsigned long long sum_exec_runtime; 488}; 489/* Alternate field names when used to cache expirations. */ 490#define prof_exp stime 491#define virt_exp utime 492#define sched_exp sum_exec_runtime 493 494#define INIT_CPUTIME \ 495 (struct task_cputime) { \ 496 .utime = cputime_zero, \ 497 .stime = cputime_zero, \ 498 .sum_exec_runtime = 0, \ 499 } 500 501/* 502 * Disable preemption until the scheduler is running. 503 * Reset by start_kernel()->sched_init()->init_idle(). 504 * 505 * We include PREEMPT_ACTIVE to avoid cond_resched() from working 506 * before the scheduler is active -- see should_resched(). 507 */ 508#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE) 509 510/** 511 * struct thread_group_cputimer - thread group interval timer counts 512 * @cputime: thread group interval timers. 513 * @running: non-zero when there are timers running and 514 * @cputime receives updates. 515 * @lock: lock for fields in this struct. 516 * 517 * This structure contains the version of task_cputime, above, that is 518 * used for thread group CPU timer calculations. 519 */ 520struct thread_group_cputimer { 521 struct task_cputime cputime; 522 int running; 523 spinlock_t lock; 524}; 525 526/* 527 * NOTE! "signal_struct" does not have it's own 528 * locking, because a shared signal_struct always 529 * implies a shared sighand_struct, so locking 530 * sighand_struct is always a proper superset of 531 * the locking of signal_struct. 532 */ 533struct signal_struct { 534 atomic_t count; 535 atomic_t live; 536 537 wait_queue_head_t wait_chldexit; /* for wait4() */ 538 539 /* current thread group signal load-balancing target: */ 540 struct task_struct *curr_target; 541 542 /* shared signal handling: */ 543 struct sigpending shared_pending; 544 545 /* thread group exit support */ 546 int group_exit_code; 547 /* overloaded: 548 * - notify group_exit_task when ->count is equal to notify_count 549 * - everyone except group_exit_task is stopped during signal delivery 550 * of fatal signals, group_exit_task processes the signal. 551 */ 552 int notify_count; 553 struct task_struct *group_exit_task; 554 555 /* thread group stop support, overloads group_exit_code too */ 556 int group_stop_count; 557 unsigned int flags; /* see SIGNAL_* flags below */ 558 559 /* POSIX.1b Interval Timers */ 560 struct list_head posix_timers; 561 562 /* ITIMER_REAL timer for the process */ 563 struct hrtimer real_timer; 564 struct pid *leader_pid; 565 ktime_t it_real_incr; 566 567 /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ 568 cputime_t it_prof_expires, it_virt_expires; 569 cputime_t it_prof_incr, it_virt_incr; 570 571 /* 572 * Thread group totals for process CPU timers. 573 * See thread_group_cputimer(), et al, for details. 574 */ 575 struct thread_group_cputimer cputimer; 576 577 /* Earliest-expiration cache. */ 578 struct task_cputime cputime_expires; 579 580 struct list_head cpu_timers[3]; 581 582 struct pid *tty_old_pgrp; 583 584 /* boolean value for session group leader */ 585 int leader; 586 587 struct tty_struct *tty; /* NULL if no tty */ 588 589 /* 590 * Cumulative resource counters for dead threads in the group, 591 * and for reaped dead child processes forked by this group. 592 * Live threads maintain their own counters and add to these 593 * in __exit_signal, except for the group leader. 594 */ 595 cputime_t utime, stime, cutime, cstime; 596 cputime_t gtime; 597 cputime_t cgtime; 598 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 599 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 600 unsigned long inblock, oublock, cinblock, coublock; 601 struct task_io_accounting ioac; 602 603 /* 604 * Cumulative ns of schedule CPU time fo dead threads in the 605 * group, not including a zombie group leader, (This only differs 606 * from jiffies_to_ns(utime + stime) if sched_clock uses something 607 * other than jiffies.) 608 */ 609 unsigned long long sum_sched_runtime; 610 611 /* 612 * We don't bother to synchronize most readers of this at all, 613 * because there is no reader checking a limit that actually needs 614 * to get both rlim_cur and rlim_max atomically, and either one 615 * alone is a single word that can safely be read normally. 616 * getrlimit/setrlimit use task_lock(current->group_leader) to 617 * protect this instead of the siglock, because they really 618 * have no need to disable irqs. 619 */ 620 struct rlimit rlim[RLIM_NLIMITS]; 621 622#ifdef CONFIG_BSD_PROCESS_ACCT 623 struct pacct_struct pacct; /* per-process accounting information */ 624#endif 625#ifdef CONFIG_TASKSTATS 626 struct taskstats *stats; 627#endif 628#ifdef CONFIG_AUDIT 629 unsigned audit_tty; 630 struct tty_audit_buf *tty_audit_buf; 631#endif 632}; 633 634/* Context switch must be unlocked if interrupts are to be enabled */ 635#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 636# define __ARCH_WANT_UNLOCKED_CTXSW 637#endif 638 639/* 640 * Bits in flags field of signal_struct. 641 */ 642#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 643#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ 644#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ 645#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ 646/* 647 * Pending notifications to parent. 648 */ 649#define SIGNAL_CLD_STOPPED 0x00000010 650#define SIGNAL_CLD_CONTINUED 0x00000020 651#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) 652 653#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 654 655/* If true, all threads except ->group_exit_task have pending SIGKILL */ 656static inline int signal_group_exit(const struct signal_struct *sig) 657{ 658 return (sig->flags & SIGNAL_GROUP_EXIT) || 659 (sig->group_exit_task != NULL); 660} 661 662/* 663 * Some day this will be a full-fledged user tracking system.. 664 */ 665struct user_struct { 666 atomic_t __count; /* reference count */ 667 atomic_t processes; /* How many processes does this user have? */ 668 atomic_t files; /* How many open files does this user have? */ 669 atomic_t sigpending; /* How many pending signals does this user have? */ 670#ifdef CONFIG_INOTIFY_USER 671 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 672 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 673#endif 674#ifdef CONFIG_EPOLL 675 atomic_t epoll_watches; /* The number of file descriptors currently watched */ 676#endif 677#ifdef CONFIG_POSIX_MQUEUE 678 /* protected by mq_lock */ 679 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 680#endif 681 unsigned long locked_shm; /* How many pages of mlocked shm ? */ 682 683#ifdef CONFIG_KEYS 684 struct key *uid_keyring; /* UID specific keyring */ 685 struct key *session_keyring; /* UID's default session keyring */ 686#endif 687 688 /* Hash table maintenance information */ 689 struct hlist_node uidhash_node; 690 uid_t uid; 691 struct user_namespace *user_ns; 692 693#ifdef CONFIG_USER_SCHED 694 struct task_group *tg; 695#ifdef CONFIG_SYSFS 696 struct kobject kobj; 697 struct delayed_work work; 698#endif 699#endif 700 701#ifdef CONFIG_PERF_COUNTERS 702 atomic_long_t locked_vm; 703#endif 704}; 705 706extern int uids_sysfs_init(void); 707 708extern struct user_struct *find_user(uid_t); 709 710extern struct user_struct root_user; 711#define INIT_USER (&root_user) 712 713 714struct backing_dev_info; 715struct reclaim_state; 716 717#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 718struct sched_info { 719 /* cumulative counters */ 720 unsigned long pcount; /* # of times run on this cpu */ 721 unsigned long long run_delay; /* time spent waiting on a runqueue */ 722 723 /* timestamps */ 724 unsigned long long last_arrival,/* when we last ran on a cpu */ 725 last_queued; /* when we were last queued to run */ 726#ifdef CONFIG_SCHEDSTATS 727 /* BKL stats */ 728 unsigned int bkl_count; 729#endif 730}; 731#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 732 733#ifdef CONFIG_TASK_DELAY_ACCT 734struct task_delay_info { 735 spinlock_t lock; 736 unsigned int flags; /* Private per-task flags */ 737 738 /* For each stat XXX, add following, aligned appropriately 739 * 740 * struct timespec XXX_start, XXX_end; 741 * u64 XXX_delay; 742 * u32 XXX_count; 743 * 744 * Atomicity of updates to XXX_delay, XXX_count protected by 745 * single lock above (split into XXX_lock if contention is an issue). 746 */ 747 748 /* 749 * XXX_count is incremented on every XXX operation, the delay 750 * associated with the operation is added to XXX_delay. 751 * XXX_delay contains the accumulated delay time in nanoseconds. 752 */ 753 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ 754 u64 blkio_delay; /* wait for sync block io completion */ 755 u64 swapin_delay; /* wait for swapin block io completion */ 756 u32 blkio_count; /* total count of the number of sync block */ 757 /* io operations performed */ 758 u32 swapin_count; /* total count of the number of swapin block */ 759 /* io operations performed */ 760 761 struct timespec freepages_start, freepages_end; 762 u64 freepages_delay; /* wait for memory reclaim */ 763 u32 freepages_count; /* total count of memory reclaim */ 764}; 765#endif /* CONFIG_TASK_DELAY_ACCT */ 766 767static inline int sched_info_on(void) 768{ 769#ifdef CONFIG_SCHEDSTATS 770 return 1; 771#elif defined(CONFIG_TASK_DELAY_ACCT) 772 extern int delayacct_on; 773 return delayacct_on; 774#else 775 return 0; 776#endif 777} 778 779enum cpu_idle_type { 780 CPU_IDLE, 781 CPU_NOT_IDLE, 782 CPU_NEWLY_IDLE, 783 CPU_MAX_IDLE_TYPES 784}; 785 786/* 787 * sched-domains (multiprocessor balancing) declarations: 788 */ 789 790/* 791 * Increase resolution of nice-level calculations: 792 */ 793#define SCHED_LOAD_SHIFT 10 794#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 795 796#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE 797 798#ifdef CONFIG_SMP 799#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 800#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ 801#define SD_BALANCE_EXEC 4 /* Balance on exec */ 802#define SD_BALANCE_FORK 8 /* Balance on fork, clone */ 803#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */ 804#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ 805#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ 806#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ 807#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ 808#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ 809#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 810#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ 811 812enum powersavings_balance_level { 813 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ 814 POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package 815 * first for long running threads 816 */ 817 POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle 818 * cpu package for power savings 819 */ 820 MAX_POWERSAVINGS_BALANCE_LEVELS 821}; 822 823extern int sched_mc_power_savings, sched_smt_power_savings; 824 825static inline int sd_balance_for_mc_power(void) 826{ 827 if (sched_smt_power_savings) 828 return SD_POWERSAVINGS_BALANCE; 829 830 return 0; 831} 832 833static inline int sd_balance_for_package_power(void) 834{ 835 if (sched_mc_power_savings | sched_smt_power_savings) 836 return SD_POWERSAVINGS_BALANCE; 837 838 return 0; 839} 840 841/* 842 * Optimise SD flags for power savings: 843 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. 844 * Keep default SD flags if sched_{smt,mc}_power_saving=0 845 */ 846 847static inline int sd_power_saving_flags(void) 848{ 849 if (sched_mc_power_savings | sched_smt_power_savings) 850 return SD_BALANCE_NEWIDLE; 851 852 return 0; 853} 854 855struct sched_group { 856 struct sched_group *next; /* Must be a circular list */ 857 858 /* 859 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 860 * single CPU. This is read only (except for setup, hotplug CPU). 861 * Note : Never change cpu_power without recompute its reciprocal 862 */ 863 unsigned int __cpu_power; 864 /* 865 * reciprocal value of cpu_power to avoid expensive divides 866 * (see include/linux/reciprocal_div.h) 867 */ 868 u32 reciprocal_cpu_power; 869 870 /* 871 * The CPUs this group covers. 872 * 873 * NOTE: this field is variable length. (Allocated dynamically 874 * by attaching extra space to the end of the structure, 875 * depending on how many CPUs the kernel has booted up with) 876 * 877 * It is also be embedded into static data structures at build 878 * time. (See 'struct static_sched_group' in kernel/sched.c) 879 */ 880 unsigned long cpumask[0]; 881}; 882 883static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 884{ 885 return to_cpumask(sg->cpumask); 886} 887 888enum sched_domain_level { 889 SD_LV_NONE = 0, 890 SD_LV_SIBLING, 891 SD_LV_MC, 892 SD_LV_CPU, 893 SD_LV_NODE, 894 SD_LV_ALLNODES, 895 SD_LV_MAX 896}; 897 898struct sched_domain_attr { 899 int relax_domain_level; 900}; 901 902#define SD_ATTR_INIT (struct sched_domain_attr) { \ 903 .relax_domain_level = -1, \ 904} 905 906struct sched_domain { 907 /* These fields must be setup */ 908 struct sched_domain *parent; /* top domain must be null terminated */ 909 struct sched_domain *child; /* bottom domain must be null terminated */ 910 struct sched_group *groups; /* the balancing groups of the domain */ 911 unsigned long min_interval; /* Minimum balance interval ms */ 912 unsigned long max_interval; /* Maximum balance interval ms */ 913 unsigned int busy_factor; /* less balancing by factor if busy */ 914 unsigned int imbalance_pct; /* No balance until over watermark */ 915 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 916 unsigned int busy_idx; 917 unsigned int idle_idx; 918 unsigned int newidle_idx; 919 unsigned int wake_idx; 920 unsigned int forkexec_idx; 921 int flags; /* See SD_* */ 922 enum sched_domain_level level; 923 924 /* Runtime fields. */ 925 unsigned long last_balance; /* init to jiffies. units in jiffies */ 926 unsigned int balance_interval; /* initialise to 1. units in ms. */ 927 unsigned int nr_balance_failed; /* initialise to 0 */ 928 929 u64 last_update; 930 931#ifdef CONFIG_SCHEDSTATS 932 /* load_balance() stats */ 933 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 934 unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; 935 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; 936 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; 937 unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; 938 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; 939 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; 940 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; 941 942 /* Active load balancing */ 943 unsigned int alb_count; 944 unsigned int alb_failed; 945 unsigned int alb_pushed; 946 947 /* SD_BALANCE_EXEC stats */ 948 unsigned int sbe_count; 949 unsigned int sbe_balanced; 950 unsigned int sbe_pushed; 951 952 /* SD_BALANCE_FORK stats */ 953 unsigned int sbf_count; 954 unsigned int sbf_balanced; 955 unsigned int sbf_pushed; 956 957 /* try_to_wake_up() stats */ 958 unsigned int ttwu_wake_remote; 959 unsigned int ttwu_move_affine; 960 unsigned int ttwu_move_balance; 961#endif 962#ifdef CONFIG_SCHED_DEBUG 963 char *name; 964#endif 965 966 /* 967 * Span of all CPUs in this domain. 968 * 969 * NOTE: this field is variable length. (Allocated dynamically 970 * by attaching extra space to the end of the structure, 971 * depending on how many CPUs the kernel has booted up with) 972 * 973 * It is also be embedded into static data structures at build 974 * time. (See 'struct static_sched_domain' in kernel/sched.c) 975 */ 976 unsigned long span[0]; 977}; 978 979static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 980{ 981 return to_cpumask(sd->span); 982} 983 984extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, 985 struct sched_domain_attr *dattr_new); 986 987/* Test a flag in parent sched domain */ 988static inline int test_sd_parent(struct sched_domain *sd, int flag) 989{ 990 if (sd->parent && (sd->parent->flags & flag)) 991 return 1; 992 993 return 0; 994} 995 996#else /* CONFIG_SMP */ 997 998struct sched_domain_attr; 999 1000static inline void 1001partition_sched_domains(int ndoms_new, struct cpumask *doms_new, 1002 struct sched_domain_attr *dattr_new) 1003{ 1004} 1005#endif /* !CONFIG_SMP */ 1006 1007struct io_context; /* See blkdev.h */ 1008 1009 1010#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 1011extern void prefetch_stack(struct task_struct *t); 1012#else 1013static inline void prefetch_stack(struct task_struct *t) { } 1014#endif 1015 1016struct audit_context; /* See audit.c */ 1017struct mempolicy; 1018struct pipe_inode_info; 1019struct uts_namespace; 1020 1021struct rq; 1022struct sched_domain; 1023 1024struct sched_class { 1025 const struct sched_class *next; 1026 1027 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 1028 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 1029 void (*yield_task) (struct rq *rq); 1030 1031 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); 1032 1033 struct task_struct * (*pick_next_task) (struct rq *rq); 1034 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1035 1036#ifdef CONFIG_SMP 1037 int (*select_task_rq)(struct task_struct *p, int sync); 1038 1039 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, 1040 struct rq *busiest, unsigned long max_load_move, 1041 struct sched_domain *sd, enum cpu_idle_type idle, 1042 int *all_pinned, int *this_best_prio); 1043 1044 int (*move_one_task) (struct rq *this_rq, int this_cpu, 1045 struct rq *busiest, struct sched_domain *sd, 1046 enum cpu_idle_type idle); 1047 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1048 int (*needs_post_schedule) (struct rq *this_rq); 1049 void (*post_schedule) (struct rq *this_rq); 1050 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1051 1052 void (*set_cpus_allowed)(struct task_struct *p, 1053 const struct cpumask *newmask); 1054 1055 void (*rq_online)(struct rq *rq); 1056 void (*rq_offline)(struct rq *rq); 1057#endif 1058 1059 void (*set_curr_task) (struct rq *rq); 1060 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1061 void (*task_new) (struct rq *rq, struct task_struct *p); 1062 1063 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 1064 int running); 1065 void (*switched_to) (struct rq *this_rq, struct task_struct *task, 1066 int running); 1067 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1068 int oldprio, int running); 1069 1070#ifdef CONFIG_FAIR_GROUP_SCHED 1071 void (*moved_group) (struct task_struct *p); 1072#endif 1073}; 1074 1075struct load_weight { 1076 unsigned long weight, inv_weight; 1077}; 1078 1079/* 1080 * CFS stats for a schedulable entity (task, task-group etc) 1081 * 1082 * Current field usage histogram: 1083 * 1084 * 4 se->block_start 1085 * 4 se->run_node 1086 * 4 se->sleep_start 1087 * 6 se->load.weight 1088 */ 1089struct sched_entity { 1090 struct load_weight load; /* for load-balancing */ 1091 struct rb_node run_node; 1092 struct list_head group_node; 1093 unsigned int on_rq; 1094 1095 u64 exec_start; 1096 u64 sum_exec_runtime; 1097 u64 vruntime; 1098 u64 prev_sum_exec_runtime; 1099 1100 u64 last_wakeup; 1101 u64 avg_overlap; 1102 1103 u64 nr_migrations; 1104 1105 u64 start_runtime; 1106 u64 avg_wakeup; 1107 1108#ifdef CONFIG_SCHEDSTATS 1109 u64 wait_start; 1110 u64 wait_max; 1111 u64 wait_count; 1112 u64 wait_sum; 1113 1114 u64 sleep_start; 1115 u64 sleep_max; 1116 s64 sum_sleep_runtime; 1117 1118 u64 block_start; 1119 u64 block_max; 1120 u64 exec_max; 1121 u64 slice_max; 1122 1123 u64 nr_migrations_cold; 1124 u64 nr_failed_migrations_affine; 1125 u64 nr_failed_migrations_running; 1126 u64 nr_failed_migrations_hot; 1127 u64 nr_forced_migrations; 1128 u64 nr_forced2_migrations; 1129 1130 u64 nr_wakeups; 1131 u64 nr_wakeups_sync; 1132 u64 nr_wakeups_migrate; 1133 u64 nr_wakeups_local; 1134 u64 nr_wakeups_remote; 1135 u64 nr_wakeups_affine; 1136 u64 nr_wakeups_affine_attempts; 1137 u64 nr_wakeups_passive; 1138 u64 nr_wakeups_idle; 1139#endif 1140 1141#ifdef CONFIG_FAIR_GROUP_SCHED 1142 struct sched_entity *parent; 1143 /* rq on which this entity is (to be) queued: */ 1144 struct cfs_rq *cfs_rq; 1145 /* rq "owned" by this entity/group: */ 1146 struct cfs_rq *my_q; 1147#endif 1148}; 1149 1150struct sched_rt_entity { 1151 struct list_head run_list; 1152 unsigned long timeout; 1153 unsigned int time_slice; 1154 int nr_cpus_allowed; 1155 1156 struct sched_rt_entity *back; 1157#ifdef CONFIG_RT_GROUP_SCHED 1158 struct sched_rt_entity *parent; 1159 /* rq on which this entity is (to be) queued: */ 1160 struct rt_rq *rt_rq; 1161 /* rq "owned" by this entity/group: */ 1162 struct rt_rq *my_q; 1163#endif 1164}; 1165 1166struct task_struct { 1167 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1168 void *stack; 1169 atomic_t usage; 1170 unsigned int flags; /* per process flags, defined below */ 1171 unsigned int ptrace; 1172 1173 int lock_depth; /* BKL lock depth */ 1174 1175#ifdef CONFIG_SMP 1176#ifdef __ARCH_WANT_UNLOCKED_CTXSW 1177 int oncpu; 1178#endif 1179#endif 1180 1181 int prio, static_prio, normal_prio; 1182 unsigned int rt_priority; 1183 const struct sched_class *sched_class; 1184 struct sched_entity se; 1185 struct sched_rt_entity rt; 1186 1187#ifdef CONFIG_PREEMPT_NOTIFIERS 1188 /* list of struct preempt_notifier: */ 1189 struct hlist_head preempt_notifiers; 1190#endif 1191 1192 /* 1193 * fpu_counter contains the number of consecutive context switches 1194 * that the FPU is used. If this is over a threshold, the lazy fpu 1195 * saving becomes unlazy to save the trap. This is an unsigned char 1196 * so that after 256 times the counter wraps and the behavior turns 1197 * lazy again; this to deal with bursty apps that only use FPU for 1198 * a short time 1199 */ 1200 unsigned char fpu_counter; 1201#ifdef CONFIG_BLK_DEV_IO_TRACE 1202 unsigned int btrace_seq; 1203#endif 1204 1205 unsigned int policy; 1206 cpumask_t cpus_allowed; 1207 1208#ifdef CONFIG_PREEMPT_RCU 1209 int rcu_read_lock_nesting; 1210 int rcu_flipctr_idx; 1211#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1212 1213#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1214 struct sched_info sched_info; 1215#endif 1216 1217 struct list_head tasks; 1218 struct plist_node pushable_tasks; 1219 1220 struct mm_struct *mm, *active_mm; 1221 1222/* task state */ 1223 struct linux_binfmt *binfmt; 1224 int exit_state; 1225 int exit_code, exit_signal; 1226 int pdeath_signal; /* The signal sent when the parent dies */ 1227 /* ??? */ 1228 unsigned int personality; 1229 unsigned did_exec:1; 1230 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1231 * execve */ 1232 pid_t pid; 1233 pid_t tgid; 1234 1235 /* Canary value for the -fstack-protector gcc feature */ 1236 unsigned long stack_canary; 1237 1238 /* 1239 * pointers to (original) parent process, youngest child, younger sibling, 1240 * older sibling, respectively. (p->father can be replaced with 1241 * p->real_parent->pid) 1242 */ 1243 struct task_struct *real_parent; /* real parent process */ 1244 struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ 1245 /* 1246 * children/sibling forms the list of my natural children 1247 */ 1248 struct list_head children; /* list of my children */ 1249 struct list_head sibling; /* linkage in my parent's children list */ 1250 struct task_struct *group_leader; /* threadgroup leader */ 1251 1252 /* 1253 * ptraced is the list of tasks this task is using ptrace on. 1254 * This includes both natural children and PTRACE_ATTACH targets. 1255 * p->ptrace_entry is p's link on the p->parent->ptraced list. 1256 */ 1257 struct list_head ptraced; 1258 struct list_head ptrace_entry; 1259 1260 /* 1261 * This is the tracer handle for the ptrace BTS extension. 1262 * This field actually belongs to the ptracer task. 1263 */ 1264 struct bts_context *bts; 1265 1266 /* PID/PID hash table linkage. */ 1267 struct pid_link pids[PIDTYPE_MAX]; 1268 struct list_head thread_group; 1269 1270 struct completion *vfork_done; /* for vfork() */ 1271 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1272 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1273 1274 cputime_t utime, stime, utimescaled, stimescaled; 1275 cputime_t gtime; 1276 cputime_t prev_utime, prev_stime; 1277 unsigned long nvcsw, nivcsw; /* context switch counts */ 1278 struct timespec start_time; /* monotonic time */ 1279 struct timespec real_start_time; /* boot based time */ 1280/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1281 unsigned long min_flt, maj_flt; 1282 1283 struct task_cputime cputime_expires; 1284 struct list_head cpu_timers[3]; 1285 1286/* process credentials */ 1287 const struct cred *real_cred; /* objective and real subjective task 1288 * credentials (COW) */ 1289 const struct cred *cred; /* effective (overridable) subjective task 1290 * credentials (COW) */ 1291 struct mutex cred_guard_mutex; /* guard against foreign influences on 1292 * credential calculations 1293 * (notably. ptrace) */ 1294 1295 char comm[TASK_COMM_LEN]; /* executable name excluding path 1296 - access with [gs]et_task_comm (which lock 1297 it with task_lock()) 1298 - initialized normally by flush_old_exec */ 1299/* file system info */ 1300 int link_count, total_link_count; 1301#ifdef CONFIG_SYSVIPC 1302/* ipc stuff */ 1303 struct sysv_sem sysvsem; 1304#endif 1305#ifdef CONFIG_DETECT_HUNG_TASK 1306/* hung task detection */ 1307 unsigned long last_switch_count; 1308#endif 1309/* CPU-specific state of this task */ 1310 struct thread_struct thread; 1311/* filesystem information */ 1312 struct fs_struct *fs; 1313/* open file information */ 1314 struct files_struct *files; 1315/* namespaces */ 1316 struct nsproxy *nsproxy; 1317/* signal handlers */ 1318 struct signal_struct *signal; 1319 struct sighand_struct *sighand; 1320 1321 sigset_t blocked, real_blocked; 1322 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ 1323 struct sigpending pending; 1324 1325 unsigned long sas_ss_sp; 1326 size_t sas_ss_size; 1327 int (*notifier)(void *priv); 1328 void *notifier_data; 1329 sigset_t *notifier_mask; 1330 struct audit_context *audit_context; 1331#ifdef CONFIG_AUDITSYSCALL 1332 uid_t loginuid; 1333 unsigned int sessionid; 1334#endif 1335 seccomp_t seccomp; 1336 1337/* Thread group tracking */ 1338 u32 parent_exec_id; 1339 u32 self_exec_id; 1340/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, 1341 * mempolicy */ 1342 spinlock_t alloc_lock; 1343 1344#ifdef CONFIG_GENERIC_HARDIRQS 1345 /* IRQ handler threads */ 1346 struct irqaction *irqaction; 1347#endif 1348 1349 /* Protection of the PI data structures: */ 1350 spinlock_t pi_lock; 1351 1352#ifdef CONFIG_RT_MUTEXES 1353 /* PI waiters blocked on a rt_mutex held by this task */ 1354 struct plist_head pi_waiters; 1355 /* Deadlock detection and priority inheritance handling */ 1356 struct rt_mutex_waiter *pi_blocked_on; 1357#endif 1358 1359#ifdef CONFIG_DEBUG_MUTEXES 1360 /* mutex deadlock detection */ 1361 struct mutex_waiter *blocked_on; 1362#endif 1363#ifdef CONFIG_TRACE_IRQFLAGS 1364 unsigned int irq_events; 1365 int hardirqs_enabled; 1366 unsigned long hardirq_enable_ip; 1367 unsigned int hardirq_enable_event; 1368 unsigned long hardirq_disable_ip; 1369 unsigned int hardirq_disable_event; 1370 int softirqs_enabled; 1371 unsigned long softirq_disable_ip; 1372 unsigned int softirq_disable_event; 1373 unsigned long softirq_enable_ip; 1374 unsigned int softirq_enable_event; 1375 int hardirq_context; 1376 int softirq_context; 1377#endif 1378#ifdef CONFIG_LOCKDEP 1379# define MAX_LOCK_DEPTH 48UL 1380 u64 curr_chain_key; 1381 int lockdep_depth; 1382 unsigned int lockdep_recursion; 1383 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1384 gfp_t lockdep_reclaim_gfp; 1385#endif 1386 1387/* journalling filesystem info */ 1388 void *journal_info; 1389 1390/* stacked block device info */ 1391 struct bio *bio_list, **bio_tail; 1392 1393/* VM state */ 1394 struct reclaim_state *reclaim_state; 1395 1396 struct backing_dev_info *backing_dev_info; 1397 1398 struct io_context *io_context; 1399 1400 unsigned long ptrace_message; 1401 siginfo_t *last_siginfo; /* For ptrace use. */ 1402 struct task_io_accounting ioac; 1403#if defined(CONFIG_TASK_XACCT) 1404 u64 acct_rss_mem1; /* accumulated rss usage */ 1405 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1406 cputime_t acct_timexpd; /* stime + utime since last update */ 1407#endif 1408#ifdef CONFIG_CPUSETS 1409 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1410 int cpuset_mem_spread_rotor; 1411#endif 1412#ifdef CONFIG_CGROUPS 1413 /* Control Group info protected by css_set_lock */ 1414 struct css_set *cgroups; 1415 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1416 struct list_head cg_list; 1417#endif 1418#ifdef CONFIG_FUTEX 1419 struct robust_list_head __user *robust_list; 1420#ifdef CONFIG_COMPAT 1421 struct compat_robust_list_head __user *compat_robust_list; 1422#endif 1423 struct list_head pi_state_list; 1424 struct futex_pi_state *pi_state_cache; 1425#endif 1426#ifdef CONFIG_PERF_COUNTERS 1427 struct perf_counter_context *perf_counter_ctxp; 1428 struct mutex perf_counter_mutex; 1429 struct list_head perf_counter_list; 1430#endif 1431#ifdef CONFIG_NUMA 1432 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1433 short il_next; 1434#endif 1435 atomic_t fs_excl; /* holding fs exclusive resources */ 1436 struct rcu_head rcu; 1437 1438 /* 1439 * cache last used pipe for splice 1440 */ 1441 struct pipe_inode_info *splice_pipe; 1442#ifdef CONFIG_TASK_DELAY_ACCT 1443 struct task_delay_info *delays; 1444#endif 1445#ifdef CONFIG_FAULT_INJECTION 1446 int make_it_fail; 1447#endif 1448 struct prop_local_single dirties; 1449#ifdef CONFIG_LATENCYTOP 1450 int latency_record_count; 1451 struct latency_record latency_record[LT_SAVECOUNT]; 1452#endif 1453 /* 1454 * time slack values; these are used to round up poll() and 1455 * select() etc timeout values. These are in nanoseconds. 1456 */ 1457 unsigned long timer_slack_ns; 1458 unsigned long default_timer_slack_ns; 1459 1460 struct list_head *scm_work_list; 1461#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1462 /* Index of current stored adress in ret_stack */ 1463 int curr_ret_stack; 1464 /* Stack of return addresses for return function tracing */ 1465 struct ftrace_ret_stack *ret_stack; 1466 /* time stamp for last schedule */ 1467 unsigned long long ftrace_timestamp; 1468 /* 1469 * Number of functions that haven't been traced 1470 * because of depth overrun. 1471 */ 1472 atomic_t trace_overrun; 1473 /* Pause for the tracing */ 1474 atomic_t tracing_graph_pause; 1475#endif 1476#ifdef CONFIG_TRACING 1477 /* state flags for use by tracers */ 1478 unsigned long trace; 1479 /* bitmask of trace recursion */ 1480 unsigned long trace_recursion; 1481#endif /* CONFIG_TRACING */ 1482}; 1483 1484/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1485#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) 1486 1487/* 1488 * Priority of a process goes from 0..MAX_PRIO-1, valid RT 1489 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH 1490 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority 1491 * values are inverted: lower p->prio value means higher priority. 1492 * 1493 * The MAX_USER_RT_PRIO value allows the actual maximum 1494 * RT priority to be separate from the value exported to 1495 * user-space. This allows kernel threads to set their 1496 * priority to a value higher than any user task. Note: 1497 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. 1498 */ 1499 1500#define MAX_USER_RT_PRIO 100 1501#define MAX_RT_PRIO MAX_USER_RT_PRIO 1502 1503#define MAX_PRIO (MAX_RT_PRIO + 40) 1504#define DEFAULT_PRIO (MAX_RT_PRIO + 20) 1505 1506static inline int rt_prio(int prio) 1507{ 1508 if (unlikely(prio < MAX_RT_PRIO)) 1509 return 1; 1510 return 0; 1511} 1512 1513static inline int rt_task(struct task_struct *p) 1514{ 1515 return rt_prio(p->prio); 1516} 1517 1518static inline struct pid *task_pid(struct task_struct *task) 1519{ 1520 return task->pids[PIDTYPE_PID].pid; 1521} 1522 1523static inline struct pid *task_tgid(struct task_struct *task) 1524{ 1525 return task->group_leader->pids[PIDTYPE_PID].pid; 1526} 1527 1528/* 1529 * Without tasklist or rcu lock it is not safe to dereference 1530 * the result of task_pgrp/task_session even if task == current, 1531 * we can race with another thread doing sys_setsid/sys_setpgid. 1532 */ 1533static inline struct pid *task_pgrp(struct task_struct *task) 1534{ 1535 return task->group_leader->pids[PIDTYPE_PGID].pid; 1536} 1537 1538static inline struct pid *task_session(struct task_struct *task) 1539{ 1540 return task->group_leader->pids[PIDTYPE_SID].pid; 1541} 1542 1543struct pid_namespace; 1544 1545/* 1546 * the helpers to get the task's different pids as they are seen 1547 * from various namespaces 1548 * 1549 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1550 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1551 * current. 1552 * task_xid_nr_ns() : id seen from the ns specified; 1553 * 1554 * set_task_vxid() : assigns a virtual id to a task; 1555 * 1556 * see also pid_nr() etc in include/linux/pid.h 1557 */ 1558pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 1559 struct pid_namespace *ns); 1560 1561static inline pid_t task_pid_nr(struct task_struct *tsk) 1562{ 1563 return tsk->pid; 1564} 1565 1566static inline pid_t task_pid_nr_ns(struct task_struct *tsk, 1567 struct pid_namespace *ns) 1568{ 1569 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1570} 1571 1572static inline pid_t task_pid_vnr(struct task_struct *tsk) 1573{ 1574 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1575} 1576 1577 1578static inline pid_t task_tgid_nr(struct task_struct *tsk) 1579{ 1580 return tsk->tgid; 1581} 1582 1583pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1584 1585static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1586{ 1587 return pid_vnr(task_tgid(tsk)); 1588} 1589 1590 1591static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, 1592 struct pid_namespace *ns) 1593{ 1594 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1595} 1596 1597static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1598{ 1599 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1600} 1601 1602 1603static inline pid_t task_session_nr_ns(struct task_struct *tsk, 1604 struct pid_namespace *ns) 1605{ 1606 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1607} 1608 1609static inline pid_t task_session_vnr(struct task_struct *tsk) 1610{ 1611 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1612} 1613 1614/* obsolete, do not use */ 1615static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1616{ 1617 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1618} 1619 1620/** 1621 * pid_alive - check that a task structure is not stale 1622 * @p: Task structure to be checked. 1623 * 1624 * Test if a process is not yet dead (at most zombie state) 1625 * If pid_alive fails, then pointers within the task structure 1626 * can be stale and must not be dereferenced. 1627 */ 1628static inline int pid_alive(struct task_struct *p) 1629{ 1630 return p->pids[PIDTYPE_PID].pid != NULL; 1631} 1632 1633/** 1634 * is_global_init - check if a task structure is init 1635 * @tsk: Task structure to be checked. 1636 * 1637 * Check if a task structure is the first user space task the kernel created. 1638 */ 1639static inline int is_global_init(struct task_struct *tsk) 1640{ 1641 return tsk->pid == 1; 1642} 1643 1644/* 1645 * is_container_init: 1646 * check whether in the task is init in its own pid namespace. 1647 */ 1648extern int is_container_init(struct task_struct *tsk); 1649 1650extern struct pid *cad_pid; 1651 1652extern void free_task(struct task_struct *tsk); 1653#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 1654 1655extern void __put_task_struct(struct task_struct *t); 1656 1657static inline void put_task_struct(struct task_struct *t) 1658{ 1659 if (atomic_dec_and_test(&t->usage)) 1660 __put_task_struct(t); 1661} 1662 1663extern cputime_t task_utime(struct task_struct *p); 1664extern cputime_t task_stime(struct task_struct *p); 1665extern cputime_t task_gtime(struct task_struct *p); 1666 1667/* 1668 * Per process flags 1669 */ 1670#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ 1671 /* Not implemented yet, only for 486*/ 1672#define PF_STARTING 0x00000002 /* being created */ 1673#define PF_EXITING 0x00000004 /* getting shut down */ 1674#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1675#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1676#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1677#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1678#define PF_DUMPCORE 0x00000200 /* dumped core */ 1679#define PF_SIGNALED 0x00000400 /* killed by a signal */ 1680#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1681#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ 1682#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1683#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ 1684#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1685#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1686#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1687#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1688#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ 1689#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1690#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1691#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1692#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1693#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1694#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1695#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ 1696#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1697#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1698#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1699#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ 1700 1701/* 1702 * Only the _current_ task can read/write to tsk->flags, but other 1703 * tasks can access tsk->flags in readonly mode for example 1704 * with tsk_used_math (like during threaded core dumping). 1705 * There is however an exception to this rule during ptrace 1706 * or during fork: the ptracer task is allowed to write to the 1707 * child->flags of its traced child (same goes for fork, the parent 1708 * can write to the child->flags), because we're guaranteed the 1709 * child is not running and in turn not changing child->flags 1710 * at the same time the parent does it. 1711 */ 1712#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1713#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1714#define clear_used_math() clear_stopped_child_used_math(current) 1715#define set_used_math() set_stopped_child_used_math(current) 1716#define conditional_stopped_child_used_math(condition, child) \ 1717 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1718#define conditional_used_math(condition) \ 1719 conditional_stopped_child_used_math(condition, current) 1720#define copy_to_stopped_child_used_math(child) \ 1721 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1722/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1723#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1724#define used_math() tsk_used_math(current) 1725 1726#ifdef CONFIG_SMP 1727extern int set_cpus_allowed_ptr(struct task_struct *p, 1728 const struct cpumask *new_mask); 1729#else 1730static inline int set_cpus_allowed_ptr(struct task_struct *p, 1731 const struct cpumask *new_mask) 1732{ 1733 if (!cpumask_test_cpu(0, new_mask)) 1734 return -EINVAL; 1735 return 0; 1736} 1737#endif 1738static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1739{ 1740 return set_cpus_allowed_ptr(p, &new_mask); 1741} 1742 1743/* 1744 * Architectures can set this to 1 if they have specified 1745 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 1746 * but then during bootup it turns out that sched_clock() 1747 * is reliable after all: 1748 */ 1749#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1750extern int sched_clock_stable; 1751#endif 1752 1753extern unsigned long long sched_clock(void); 1754 1755extern void sched_clock_init(void); 1756extern u64 sched_clock_cpu(int cpu); 1757 1758#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1759static inline void sched_clock_tick(void) 1760{ 1761} 1762 1763static inline void sched_clock_idle_sleep_event(void) 1764{ 1765} 1766 1767static inline void sched_clock_idle_wakeup_event(u64 delta_ns) 1768{ 1769} 1770#else 1771extern void sched_clock_tick(void); 1772extern void sched_clock_idle_sleep_event(void); 1773extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1774#endif 1775 1776/* 1777 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 1778 * clock constructed from sched_clock(): 1779 */ 1780extern unsigned long long cpu_clock(int cpu); 1781 1782extern unsigned long long 1783task_sched_runtime(struct task_struct *task); 1784extern unsigned long long thread_group_sched_runtime(struct task_struct *task); 1785 1786/* sched_exec is called by processes performing an exec */ 1787#ifdef CONFIG_SMP 1788extern void sched_exec(void); 1789#else 1790#define sched_exec() {} 1791#endif 1792 1793extern void sched_clock_idle_sleep_event(void); 1794extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1795 1796#ifdef CONFIG_HOTPLUG_CPU 1797extern void idle_task_exit(void); 1798#else 1799static inline void idle_task_exit(void) {} 1800#endif 1801 1802extern void sched_idle_next(void); 1803 1804#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) 1805extern void wake_up_idle_cpu(int cpu); 1806#else 1807static inline void wake_up_idle_cpu(int cpu) { } 1808#endif 1809 1810extern unsigned int sysctl_sched_latency; 1811extern unsigned int sysctl_sched_min_granularity; 1812extern unsigned int sysctl_sched_wakeup_granularity; 1813extern unsigned int sysctl_sched_shares_ratelimit; 1814extern unsigned int sysctl_sched_shares_thresh; 1815#ifdef CONFIG_SCHED_DEBUG 1816extern unsigned int sysctl_sched_child_runs_first; 1817extern unsigned int sysctl_sched_features; 1818extern unsigned int sysctl_sched_migration_cost; 1819extern unsigned int sysctl_sched_nr_migrate; 1820extern unsigned int sysctl_timer_migration; 1821 1822int sched_nr_latency_handler(struct ctl_table *table, int write, 1823 struct file *file, void __user *buffer, size_t *length, 1824 loff_t *ppos); 1825#endif 1826#ifdef CONFIG_SCHED_DEBUG 1827static inline unsigned int get_sysctl_timer_migration(void) 1828{ 1829 return sysctl_timer_migration; 1830} 1831#else 1832static inline unsigned int get_sysctl_timer_migration(void) 1833{ 1834 return 1; 1835} 1836#endif 1837extern unsigned int sysctl_sched_rt_period; 1838extern int sysctl_sched_rt_runtime; 1839 1840int sched_rt_handler(struct ctl_table *table, int write, 1841 struct file *filp, void __user *buffer, size_t *lenp, 1842 loff_t *ppos); 1843 1844extern unsigned int sysctl_sched_compat_yield; 1845 1846#ifdef CONFIG_RT_MUTEXES 1847extern int rt_mutex_getprio(struct task_struct *p); 1848extern void rt_mutex_setprio(struct task_struct *p, int prio); 1849extern void rt_mutex_adjust_pi(struct task_struct *p); 1850#else 1851static inline int rt_mutex_getprio(struct task_struct *p) 1852{ 1853 return p->normal_prio; 1854} 1855# define rt_mutex_adjust_pi(p) do { } while (0) 1856#endif 1857 1858extern void set_user_nice(struct task_struct *p, long nice); 1859extern int task_prio(const struct task_struct *p); 1860extern int task_nice(const struct task_struct *p); 1861extern int can_nice(const struct task_struct *p, const int nice); 1862extern int task_curr(const struct task_struct *p); 1863extern int idle_cpu(int cpu); 1864extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1865extern int sched_setscheduler_nocheck(struct task_struct *, int, 1866 struct sched_param *); 1867extern struct task_struct *idle_task(int cpu); 1868extern struct task_struct *curr_task(int cpu); 1869extern void set_curr_task(int cpu, struct task_struct *p); 1870 1871void yield(void); 1872 1873/* 1874 * The default (Linux) execution domain. 1875 */ 1876extern struct exec_domain default_exec_domain; 1877 1878union thread_union { 1879 struct thread_info thread_info; 1880 unsigned long stack[THREAD_SIZE/sizeof(long)]; 1881}; 1882 1883#ifndef __HAVE_ARCH_KSTACK_END 1884static inline int kstack_end(void *addr) 1885{ 1886 /* Reliable end of stack detection: 1887 * Some APM bios versions misalign the stack 1888 */ 1889 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); 1890} 1891#endif 1892 1893extern union thread_union init_thread_union; 1894extern struct task_struct init_task; 1895 1896extern struct mm_struct init_mm; 1897 1898extern struct pid_namespace init_pid_ns; 1899 1900/* 1901 * find a task by one of its numerical ids 1902 * 1903 * find_task_by_pid_ns(): 1904 * finds a task by its pid in the specified namespace 1905 * find_task_by_vpid(): 1906 * finds a task by its virtual pid 1907 * 1908 * see also find_vpid() etc in include/linux/pid.h 1909 */ 1910 1911extern struct task_struct *find_task_by_vpid(pid_t nr); 1912extern struct task_struct *find_task_by_pid_ns(pid_t nr, 1913 struct pid_namespace *ns); 1914 1915extern void __set_special_pids(struct pid *pid); 1916 1917/* per-UID process charging. */ 1918extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); 1919static inline struct user_struct *get_uid(struct user_struct *u) 1920{ 1921 atomic_inc(&u->__count); 1922 return u; 1923} 1924extern void free_uid(struct user_struct *); 1925extern void release_uids(struct user_namespace *ns); 1926 1927#include <asm/current.h> 1928 1929extern void do_timer(unsigned long ticks); 1930 1931extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1932extern int wake_up_process(struct task_struct *tsk); 1933extern void wake_up_new_task(struct task_struct *tsk, 1934 unsigned long clone_flags); 1935#ifdef CONFIG_SMP 1936 extern void kick_process(struct task_struct *tsk); 1937#else 1938 static inline void kick_process(struct task_struct *tsk) { } 1939#endif 1940extern void sched_fork(struct task_struct *p, int clone_flags); 1941extern void sched_dead(struct task_struct *p); 1942 1943extern void proc_caches_init(void); 1944extern void flush_signals(struct task_struct *); 1945extern void __flush_signals(struct task_struct *); 1946extern void ignore_signals(struct task_struct *); 1947extern void flush_signal_handlers(struct task_struct *, int force_default); 1948extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 1949 1950static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 1951{ 1952 unsigned long flags; 1953 int ret; 1954 1955 spin_lock_irqsave(&tsk->sighand->siglock, flags); 1956 ret = dequeue_signal(tsk, mask, info); 1957 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 1958 1959 return ret; 1960} 1961 1962extern void block_all_signals(int (*notifier)(void *priv), void *priv, 1963 sigset_t *mask); 1964extern void unblock_all_signals(void); 1965extern void release_task(struct task_struct * p); 1966extern int send_sig_info(int, struct siginfo *, struct task_struct *); 1967extern int force_sigsegv(int, struct task_struct *); 1968extern int force_sig_info(int, struct siginfo *, struct task_struct *); 1969extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); 1970extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); 1971extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32); 1972extern int kill_pgrp(struct pid *pid, int sig, int priv); 1973extern int kill_pid(struct pid *pid, int sig, int priv); 1974extern int kill_proc_info(int, struct siginfo *, pid_t); 1975extern int do_notify_parent(struct task_struct *, int); 1976extern void force_sig(int, struct task_struct *); 1977extern void force_sig_specific(int, struct task_struct *); 1978extern int send_sig(int, struct task_struct *, int); 1979extern void zap_other_threads(struct task_struct *p); 1980extern struct sigqueue *sigqueue_alloc(void); 1981extern void sigqueue_free(struct sigqueue *); 1982extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 1983extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 1984extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); 1985 1986static inline int kill_cad_pid(int sig, int priv) 1987{ 1988 return kill_pid(cad_pid, sig, priv); 1989} 1990 1991/* These can be the second arg to send_sig_info/send_group_sig_info. */ 1992#define SEND_SIG_NOINFO ((struct siginfo *) 0) 1993#define SEND_SIG_PRIV ((struct siginfo *) 1) 1994#define SEND_SIG_FORCED ((struct siginfo *) 2) 1995 1996static inline int is_si_special(const struct siginfo *info) 1997{ 1998 return info <= SEND_SIG_FORCED; 1999} 2000 2001/* True if we are on the alternate signal stack. */ 2002 2003static inline int on_sig_stack(unsigned long sp) 2004{ 2005 return (sp - current->sas_ss_sp < current->sas_ss_size); 2006} 2007 2008static inline int sas_ss_flags(unsigned long sp) 2009{ 2010 return (current->sas_ss_size == 0 ? SS_DISABLE 2011 : on_sig_stack(sp) ? SS_ONSTACK : 0); 2012} 2013 2014/* 2015 * Routines for handling mm_structs 2016 */ 2017extern struct mm_struct * mm_alloc(void); 2018 2019/* mmdrop drops the mm and the page tables */ 2020extern void __mmdrop(struct mm_struct *); 2021static inline void mmdrop(struct mm_struct * mm) 2022{ 2023 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 2024 __mmdrop(mm); 2025} 2026 2027/* mmput gets rid of the mappings and all user-space */ 2028extern void mmput(struct mm_struct *); 2029/* Grab a reference to a task's mm, if it is not already going away */ 2030extern struct mm_struct *get_task_mm(struct task_struct *task); 2031/* Remove the current tasks stale references to the old mm_struct */ 2032extern void mm_release(struct task_struct *, struct mm_struct *); 2033/* Allocate a new mm structure and copy contents from tsk->mm */ 2034extern struct mm_struct *dup_mm(struct task_struct *tsk); 2035 2036extern int copy_thread(unsigned long, unsigned long, unsigned long, 2037 struct task_struct *, struct pt_regs *); 2038extern void flush_thread(void); 2039extern void exit_thread(void); 2040 2041extern void exit_files(struct task_struct *); 2042extern void __cleanup_signal(struct signal_struct *); 2043extern void __cleanup_sighand(struct sighand_struct *); 2044 2045extern void exit_itimers(struct signal_struct *); 2046extern void flush_itimer_signals(void); 2047 2048extern NORET_TYPE void do_group_exit(int); 2049 2050extern void daemonize(const char *, ...); 2051extern int allow_signal(int); 2052extern int disallow_signal(int); 2053 2054extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 2055extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 2056struct task_struct *fork_idle(int); 2057 2058extern void set_task_comm(struct task_struct *tsk, char *from); 2059extern char *get_task_comm(char *to, struct task_struct *tsk); 2060 2061#ifdef CONFIG_SMP 2062extern void wait_task_context_switch(struct task_struct *p); 2063extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2064#else 2065static inline void wait_task_context_switch(struct task_struct *p) {} 2066static inline unsigned long wait_task_inactive(struct task_struct *p, 2067 long match_state) 2068{ 2069 return 1; 2070} 2071#endif 2072 2073#define next_task(p) \ 2074 list_entry_rcu((p)->tasks.next, struct task_struct, tasks) 2075 2076#define for_each_process(p) \ 2077 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2078 2079extern bool is_single_threaded(struct task_struct *); 2080 2081/* 2082 * Careful: do_each_thread/while_each_thread is a double loop so 2083 * 'break' will not work as expected - use goto instead. 2084 */ 2085#define do_each_thread(g, t) \ 2086 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do 2087 2088#define while_each_thread(g, t) \ 2089 while ((t = next_thread(t)) != g) 2090 2091/* de_thread depends on thread_group_leader not being a pid based check */ 2092#define thread_group_leader(p) (p == p->group_leader) 2093 2094/* Do to the insanities of de_thread it is possible for a process 2095 * to have the pid of the thread group leader without actually being 2096 * the thread group leader. For iteration through the pids in proc 2097 * all we care about is that we have a task with the appropriate 2098 * pid, we don't actually care if we have the right task. 2099 */ 2100static inline int has_group_leader_pid(struct task_struct *p) 2101{ 2102 return p->pid == p->tgid; 2103} 2104 2105static inline 2106int same_thread_group(struct task_struct *p1, struct task_struct *p2) 2107{ 2108 return p1->tgid == p2->tgid; 2109} 2110 2111static inline struct task_struct *next_thread(const struct task_struct *p) 2112{ 2113 return list_entry_rcu(p->thread_group.next, 2114 struct task_struct, thread_group); 2115} 2116 2117static inline int thread_group_empty(struct task_struct *p) 2118{ 2119 return list_empty(&p->thread_group); 2120} 2121 2122#define delay_group_leader(p) \ 2123 (thread_group_leader(p) && !thread_group_empty(p)) 2124 2125static inline int task_detached(struct task_struct *p) 2126{ 2127 return p->exit_signal == -1; 2128} 2129 2130/* 2131 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2132 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2133 * pins the final release of task.io_context. Also protects ->cpuset and 2134 * ->cgroup.subsys[]. 2135 * 2136 * Nests both inside and outside of read_lock(&tasklist_lock). 2137 * It must not be nested with write_lock_irq(&tasklist_lock), 2138 * neither inside nor outside. 2139 */ 2140static inline void task_lock(struct task_struct *p) 2141{ 2142 spin_lock(&p->alloc_lock); 2143} 2144 2145static inline void task_unlock(struct task_struct *p) 2146{ 2147 spin_unlock(&p->alloc_lock); 2148} 2149 2150extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, 2151 unsigned long *flags); 2152 2153static inline void unlock_task_sighand(struct task_struct *tsk, 2154 unsigned long *flags) 2155{ 2156 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2157} 2158 2159#ifndef __HAVE_THREAD_FUNCTIONS 2160 2161#define task_thread_info(task) ((struct thread_info *)(task)->stack) 2162#define task_stack_page(task) ((task)->stack) 2163 2164static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 2165{ 2166 *task_thread_info(p) = *task_thread_info(org); 2167 task_thread_info(p)->task = p; 2168} 2169 2170static inline unsigned long *end_of_stack(struct task_struct *p) 2171{ 2172 return (unsigned long *)(task_thread_info(p) + 1); 2173} 2174 2175#endif 2176 2177static inline int object_is_on_stack(void *obj) 2178{ 2179 void *stack = task_stack_page(current); 2180 2181 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 2182} 2183 2184extern void thread_info_cache_init(void); 2185 2186#ifdef CONFIG_DEBUG_STACK_USAGE 2187static inline unsigned long stack_not_used(struct task_struct *p) 2188{ 2189 unsigned long *n = end_of_stack(p); 2190 2191 do { /* Skip over canary */ 2192 n++; 2193 } while (!*n); 2194 2195 return (unsigned long)n - (unsigned long)end_of_stack(p); 2196} 2197#endif 2198 2199/* set thread flags in other task's structures 2200 * - see asm/thread_info.h for TIF_xxxx flags available 2201 */ 2202static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 2203{ 2204 set_ti_thread_flag(task_thread_info(tsk), flag); 2205} 2206 2207static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2208{ 2209 clear_ti_thread_flag(task_thread_info(tsk), flag); 2210} 2211 2212static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 2213{ 2214 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 2215} 2216 2217static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 2218{ 2219 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 2220} 2221 2222static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 2223{ 2224 return test_ti_thread_flag(task_thread_info(tsk), flag); 2225} 2226 2227static inline void set_tsk_need_resched(struct task_struct *tsk) 2228{ 2229 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2230} 2231 2232static inline void clear_tsk_need_resched(struct task_struct *tsk) 2233{ 2234 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2235} 2236 2237static inline int test_tsk_need_resched(struct task_struct *tsk) 2238{ 2239 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 2240} 2241 2242static inline int restart_syscall(void) 2243{ 2244 set_tsk_thread_flag(current, TIF_SIGPENDING); 2245 return -ERESTARTNOINTR; 2246} 2247 2248static inline int signal_pending(struct task_struct *p) 2249{ 2250 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2251} 2252 2253extern int __fatal_signal_pending(struct task_struct *p); 2254 2255static inline int fatal_signal_pending(struct task_struct *p) 2256{ 2257 return signal_pending(p) && __fatal_signal_pending(p); 2258} 2259 2260static inline int signal_pending_state(long state, struct task_struct *p) 2261{ 2262 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) 2263 return 0; 2264 if (!signal_pending(p)) 2265 return 0; 2266 2267 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 2268} 2269 2270static inline int need_resched(void) 2271{ 2272 return unlikely(test_thread_flag(TIF_NEED_RESCHED)); 2273} 2274 2275/* 2276 * cond_resched() and cond_resched_lock(): latency reduction via 2277 * explicit rescheduling in places that are safe. The return 2278 * value indicates whether a reschedule was done in fact. 2279 * cond_resched_lock() will drop the spinlock before scheduling, 2280 * cond_resched_softirq() will enable bhs before scheduling. 2281 */ 2282extern int _cond_resched(void); 2283#ifdef CONFIG_PREEMPT_BKL 2284static inline int cond_resched(void) 2285{ 2286 return 0; 2287} 2288#else 2289static inline int cond_resched(void) 2290{ 2291 return _cond_resched(); 2292} 2293#endif 2294extern int cond_resched_lock(spinlock_t * lock); 2295extern int cond_resched_softirq(void); 2296static inline int cond_resched_bkl(void) 2297{ 2298 return _cond_resched(); 2299} 2300 2301/* 2302 * Does a critical section need to be broken due to another 2303 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 2304 * but a general need for low latency) 2305 */ 2306static inline int spin_needbreak(spinlock_t *lock) 2307{ 2308#ifdef CONFIG_PREEMPT 2309 return spin_is_contended(lock); 2310#else 2311 return 0; 2312#endif 2313} 2314 2315/* 2316 * Thread group CPU time accounting. 2317 */ 2318void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); 2319void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); 2320 2321static inline void thread_group_cputime_init(struct signal_struct *sig) 2322{ 2323 sig->cputimer.cputime = INIT_CPUTIME; 2324 spin_lock_init(&sig->cputimer.lock); 2325 sig->cputimer.running = 0; 2326} 2327 2328static inline void thread_group_cputime_free(struct signal_struct *sig) 2329{ 2330} 2331 2332/* 2333 * Reevaluate whether the task has signals pending delivery. 2334 * Wake the task if so. 2335 * This is required every time the blocked sigset_t changes. 2336 * callers must hold sighand->siglock. 2337 */ 2338extern void recalc_sigpending_and_wake(struct task_struct *t); 2339extern void recalc_sigpending(void); 2340 2341extern void signal_wake_up(struct task_struct *t, int resume_stopped); 2342 2343/* 2344 * Wrappers for p->thread_info->cpu access. No-op on UP. 2345 */ 2346#ifdef CONFIG_SMP 2347 2348static inline unsigned int task_cpu(const struct task_struct *p) 2349{ 2350 return task_thread_info(p)->cpu; 2351} 2352 2353extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 2354 2355#else 2356 2357static inline unsigned int task_cpu(const struct task_struct *p) 2358{ 2359 return 0; 2360} 2361 2362static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 2363{ 2364} 2365 2366#endif /* CONFIG_SMP */ 2367 2368extern void arch_pick_mmap_layout(struct mm_struct *mm); 2369 2370#ifdef CONFIG_TRACING 2371extern void 2372__trace_special(void *__tr, void *__data, 2373 unsigned long arg1, unsigned long arg2, unsigned long arg3); 2374#else 2375static inline void 2376__trace_special(void *__tr, void *__data, 2377 unsigned long arg1, unsigned long arg2, unsigned long arg3) 2378{ 2379} 2380#endif 2381 2382extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2383extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2384 2385extern void normalize_rt_tasks(void); 2386 2387#ifdef CONFIG_GROUP_SCHED 2388 2389extern struct task_group init_task_group; 2390#ifdef CONFIG_USER_SCHED 2391extern struct task_group root_task_group; 2392extern void set_tg_uid(struct user_struct *user); 2393#endif 2394 2395extern struct task_group *sched_create_group(struct task_group *parent); 2396extern void sched_destroy_group(struct task_group *tg); 2397extern void sched_move_task(struct task_struct *tsk); 2398#ifdef CONFIG_FAIR_GROUP_SCHED 2399extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 2400extern unsigned long sched_group_shares(struct task_group *tg); 2401#endif 2402#ifdef CONFIG_RT_GROUP_SCHED 2403extern int sched_group_set_rt_runtime(struct task_group *tg, 2404 long rt_runtime_us); 2405extern long sched_group_rt_runtime(struct task_group *tg); 2406extern int sched_group_set_rt_period(struct task_group *tg, 2407 long rt_period_us); 2408extern long sched_group_rt_period(struct task_group *tg); 2409extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 2410#endif 2411#endif 2412 2413extern int task_can_switch_user(struct user_struct *up, 2414 struct task_struct *tsk); 2415 2416#ifdef CONFIG_TASK_XACCT 2417static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2418{ 2419 tsk->ioac.rchar += amt; 2420} 2421 2422static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2423{ 2424 tsk->ioac.wchar += amt; 2425} 2426 2427static inline void inc_syscr(struct task_struct *tsk) 2428{ 2429 tsk->ioac.syscr++; 2430} 2431 2432static inline void inc_syscw(struct task_struct *tsk) 2433{ 2434 tsk->ioac.syscw++; 2435} 2436#else 2437static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2438{ 2439} 2440 2441static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 2442{ 2443} 2444 2445static inline void inc_syscr(struct task_struct *tsk) 2446{ 2447} 2448 2449static inline void inc_syscw(struct task_struct *tsk) 2450{ 2451} 2452#endif 2453 2454#ifndef TASK_SIZE_OF 2455#define TASK_SIZE_OF(tsk) TASK_SIZE 2456#endif 2457 2458/* 2459 * Call the function if the target task is executing on a CPU right now: 2460 */ 2461extern void task_oncpu_function_call(struct task_struct *p, 2462 void (*func) (void *info), void *info); 2463 2464 2465#ifdef CONFIG_MM_OWNER 2466extern void mm_update_next_owner(struct mm_struct *mm); 2467extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); 2468#else 2469static inline void mm_update_next_owner(struct mm_struct *mm) 2470{ 2471} 2472 2473static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 2474{ 2475} 2476#endif /* CONFIG_MM_OWNER */ 2477 2478#define TASK_STATE_TO_CHAR_STR "RSDTtZX" 2479 2480#endif /* __KERNEL__ */ 2481 2482#endif