at v4.11 44 kB view raw
1#ifndef _LINUX_SCHED_H 2#define _LINUX_SCHED_H 3 4/* 5 * Define 'struct task_struct' and provide the main scheduler 6 * APIs (schedule(), wakeup variants, etc.) 7 */ 8 9#include <uapi/linux/sched.h> 10 11#include <asm/current.h> 12 13#include <linux/pid.h> 14#include <linux/sem.h> 15#include <linux/shm.h> 16#include <linux/kcov.h> 17#include <linux/mutex.h> 18#include <linux/plist.h> 19#include <linux/hrtimer.h> 20#include <linux/seccomp.h> 21#include <linux/nodemask.h> 22#include <linux/rcupdate.h> 23#include <linux/resource.h> 24#include <linux/latencytop.h> 25#include <linux/sched/prio.h> 26#include <linux/signal_types.h> 27#include <linux/mm_types_task.h> 28#include <linux/task_io_accounting.h> 29 30/* task_struct member predeclarations (sorted alphabetically): */ 31struct audit_context; 32struct backing_dev_info; 33struct bio_list; 34struct blk_plug; 35struct cfs_rq; 36struct fs_struct; 37struct futex_pi_state; 38struct io_context; 39struct mempolicy; 40struct nameidata; 41struct nsproxy; 42struct perf_event_context; 43struct pid_namespace; 44struct pipe_inode_info; 45struct rcu_node; 46struct reclaim_state; 47struct robust_list_head; 48struct sched_attr; 49struct sched_param; 50struct seq_file; 51struct sighand_struct; 52struct signal_struct; 53struct task_delay_info; 54struct task_group; 55 56/* 57 * Task state bitmask. NOTE! These bits are also 58 * encoded in fs/proc/array.c: get_task_state(). 59 * 60 * We have two separate sets of flags: task->state 61 * is about runnability, while task->exit_state are 62 * about the task exiting. Confusing, but this way 63 * modifying one set can't modify the other one by 64 * mistake. 65 */ 66 67/* Used in tsk->state: */ 68#define TASK_RUNNING 0 69#define TASK_INTERRUPTIBLE 1 70#define TASK_UNINTERRUPTIBLE 2 71#define __TASK_STOPPED 4 72#define __TASK_TRACED 8 73/* Used in tsk->exit_state: */ 74#define EXIT_DEAD 16 75#define EXIT_ZOMBIE 32 76#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 77/* Used in tsk->state again: */ 78#define TASK_DEAD 64 79#define TASK_WAKEKILL 128 80#define TASK_WAKING 256 81#define TASK_PARKED 512 82#define TASK_NOLOAD 1024 83#define TASK_NEW 2048 84#define TASK_STATE_MAX 4096 85 86#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" 87 88/* Convenience macros for the sake of set_current_state: */ 89#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 90#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 91#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 92 93#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 94 95/* Convenience macros for the sake of wake_up(): */ 96#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 97#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 98 99/* get_task_state(): */ 100#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 101 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 102 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) 103 104#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 105 106#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 107 108#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 109 110#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 111 (task->flags & PF_FROZEN) == 0 && \ 112 (task->state & TASK_NOLOAD) == 0) 113 114#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 115 116#define __set_current_state(state_value) \ 117 do { \ 118 current->task_state_change = _THIS_IP_; \ 119 current->state = (state_value); \ 120 } while (0) 121#define set_current_state(state_value) \ 122 do { \ 123 current->task_state_change = _THIS_IP_; \ 124 smp_store_mb(current->state, (state_value)); \ 125 } while (0) 126 127#else 128/* 129 * set_current_state() includes a barrier so that the write of current->state 130 * is correctly serialised wrt the caller's subsequent test of whether to 131 * actually sleep: 132 * 133 * for (;;) { 134 * set_current_state(TASK_UNINTERRUPTIBLE); 135 * if (!need_sleep) 136 * break; 137 * 138 * schedule(); 139 * } 140 * __set_current_state(TASK_RUNNING); 141 * 142 * If the caller does not need such serialisation (because, for instance, the 143 * condition test and condition change and wakeup are under the same lock) then 144 * use __set_current_state(). 145 * 146 * The above is typically ordered against the wakeup, which does: 147 * 148 * need_sleep = false; 149 * wake_up_state(p, TASK_UNINTERRUPTIBLE); 150 * 151 * Where wake_up_state() (and all other wakeup primitives) imply enough 152 * barriers to order the store of the variable against wakeup. 153 * 154 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, 155 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 156 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 157 * 158 * This is obviously fine, since they both store the exact same value. 159 * 160 * Also see the comments of try_to_wake_up(). 161 */ 162#define __set_current_state(state_value) do { current->state = (state_value); } while (0) 163#define set_current_state(state_value) smp_store_mb(current->state, (state_value)) 164#endif 165 166/* Task command name length: */ 167#define TASK_COMM_LEN 16 168 169extern cpumask_var_t cpu_isolated_map; 170 171extern void scheduler_tick(void); 172 173#define MAX_SCHEDULE_TIMEOUT LONG_MAX 174 175extern long schedule_timeout(long timeout); 176extern long schedule_timeout_interruptible(long timeout); 177extern long schedule_timeout_killable(long timeout); 178extern long schedule_timeout_uninterruptible(long timeout); 179extern long schedule_timeout_idle(long timeout); 180asmlinkage void schedule(void); 181extern void schedule_preempt_disabled(void); 182 183extern int __must_check io_schedule_prepare(void); 184extern void io_schedule_finish(int token); 185extern long io_schedule_timeout(long timeout); 186extern void io_schedule(void); 187 188/** 189 * struct prev_cputime - snaphsot of system and user cputime 190 * @utime: time spent in user mode 191 * @stime: time spent in system mode 192 * @lock: protects the above two fields 193 * 194 * Stores previous user/system time values such that we can guarantee 195 * monotonicity. 196 */ 197struct prev_cputime { 198#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 199 u64 utime; 200 u64 stime; 201 raw_spinlock_t lock; 202#endif 203}; 204 205/** 206 * struct task_cputime - collected CPU time counts 207 * @utime: time spent in user mode, in nanoseconds 208 * @stime: time spent in kernel mode, in nanoseconds 209 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 210 * 211 * This structure groups together three kinds of CPU time that are tracked for 212 * threads and thread groups. Most things considering CPU time want to group 213 * these counts together and treat all three of them in parallel. 214 */ 215struct task_cputime { 216 u64 utime; 217 u64 stime; 218 unsigned long long sum_exec_runtime; 219}; 220 221/* Alternate field names when used on cache expirations: */ 222#define virt_exp utime 223#define prof_exp stime 224#define sched_exp sum_exec_runtime 225 226struct sched_info { 227#ifdef CONFIG_SCHED_INFO 228 /* Cumulative counters: */ 229 230 /* # of times we have run on this CPU: */ 231 unsigned long pcount; 232 233 /* Time spent waiting on a runqueue: */ 234 unsigned long long run_delay; 235 236 /* Timestamps: */ 237 238 /* When did we last run on a CPU? */ 239 unsigned long long last_arrival; 240 241 /* When were we last queued to run? */ 242 unsigned long long last_queued; 243 244#endif /* CONFIG_SCHED_INFO */ 245}; 246 247/* 248 * Integer metrics need fixed point arithmetic, e.g., sched/fair 249 * has a few: load, load_avg, util_avg, freq, and capacity. 250 * 251 * We define a basic fixed point arithmetic range, and then formalize 252 * all these metrics based on that basic range. 253 */ 254# define SCHED_FIXEDPOINT_SHIFT 10 255# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) 256 257struct load_weight { 258 unsigned long weight; 259 u32 inv_weight; 260}; 261 262/* 263 * The load_avg/util_avg accumulates an infinite geometric series 264 * (see __update_load_avg() in kernel/sched/fair.c). 265 * 266 * [load_avg definition] 267 * 268 * load_avg = runnable% * scale_load_down(load) 269 * 270 * where runnable% is the time ratio that a sched_entity is runnable. 271 * For cfs_rq, it is the aggregated load_avg of all runnable and 272 * blocked sched_entities. 273 * 274 * load_avg may also take frequency scaling into account: 275 * 276 * load_avg = runnable% * scale_load_down(load) * freq% 277 * 278 * where freq% is the CPU frequency normalized to the highest frequency. 279 * 280 * [util_avg definition] 281 * 282 * util_avg = running% * SCHED_CAPACITY_SCALE 283 * 284 * where running% is the time ratio that a sched_entity is running on 285 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable 286 * and blocked sched_entities. 287 * 288 * util_avg may also factor frequency scaling and CPU capacity scaling: 289 * 290 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% 291 * 292 * where freq% is the same as above, and capacity% is the CPU capacity 293 * normalized to the greatest capacity (due to uarch differences, etc). 294 * 295 * N.B., the above ratios (runnable%, running%, freq%, and capacity%) 296 * themselves are in the range of [0, 1]. To do fixed point arithmetics, 297 * we therefore scale them to as large a range as necessary. This is for 298 * example reflected by util_avg's SCHED_CAPACITY_SCALE. 299 * 300 * [Overflow issue] 301 * 302 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities 303 * with the highest load (=88761), always runnable on a single cfs_rq, 304 * and should not overflow as the number already hits PID_MAX_LIMIT. 305 * 306 * For all other cases (including 32-bit kernels), struct load_weight's 307 * weight will overflow first before we do, because: 308 * 309 * Max(load_avg) <= Max(load.weight) 310 * 311 * Then it is the load_weight's responsibility to consider overflow 312 * issues. 313 */ 314struct sched_avg { 315 u64 last_update_time; 316 u64 load_sum; 317 u32 util_sum; 318 u32 period_contrib; 319 unsigned long load_avg; 320 unsigned long util_avg; 321}; 322 323struct sched_statistics { 324#ifdef CONFIG_SCHEDSTATS 325 u64 wait_start; 326 u64 wait_max; 327 u64 wait_count; 328 u64 wait_sum; 329 u64 iowait_count; 330 u64 iowait_sum; 331 332 u64 sleep_start; 333 u64 sleep_max; 334 s64 sum_sleep_runtime; 335 336 u64 block_start; 337 u64 block_max; 338 u64 exec_max; 339 u64 slice_max; 340 341 u64 nr_migrations_cold; 342 u64 nr_failed_migrations_affine; 343 u64 nr_failed_migrations_running; 344 u64 nr_failed_migrations_hot; 345 u64 nr_forced_migrations; 346 347 u64 nr_wakeups; 348 u64 nr_wakeups_sync; 349 u64 nr_wakeups_migrate; 350 u64 nr_wakeups_local; 351 u64 nr_wakeups_remote; 352 u64 nr_wakeups_affine; 353 u64 nr_wakeups_affine_attempts; 354 u64 nr_wakeups_passive; 355 u64 nr_wakeups_idle; 356#endif 357}; 358 359struct sched_entity { 360 /* For load-balancing: */ 361 struct load_weight load; 362 struct rb_node run_node; 363 struct list_head group_node; 364 unsigned int on_rq; 365 366 u64 exec_start; 367 u64 sum_exec_runtime; 368 u64 vruntime; 369 u64 prev_sum_exec_runtime; 370 371 u64 nr_migrations; 372 373 struct sched_statistics statistics; 374 375#ifdef CONFIG_FAIR_GROUP_SCHED 376 int depth; 377 struct sched_entity *parent; 378 /* rq on which this entity is (to be) queued: */ 379 struct cfs_rq *cfs_rq; 380 /* rq "owned" by this entity/group: */ 381 struct cfs_rq *my_q; 382#endif 383 384#ifdef CONFIG_SMP 385 /* 386 * Per entity load average tracking. 387 * 388 * Put into separate cache line so it does not 389 * collide with read-mostly values above. 390 */ 391 struct sched_avg avg ____cacheline_aligned_in_smp; 392#endif 393}; 394 395struct sched_rt_entity { 396 struct list_head run_list; 397 unsigned long timeout; 398 unsigned long watchdog_stamp; 399 unsigned int time_slice; 400 unsigned short on_rq; 401 unsigned short on_list; 402 403 struct sched_rt_entity *back; 404#ifdef CONFIG_RT_GROUP_SCHED 405 struct sched_rt_entity *parent; 406 /* rq on which this entity is (to be) queued: */ 407 struct rt_rq *rt_rq; 408 /* rq "owned" by this entity/group: */ 409 struct rt_rq *my_q; 410#endif 411}; 412 413struct sched_dl_entity { 414 struct rb_node rb_node; 415 416 /* 417 * Original scheduling parameters. Copied here from sched_attr 418 * during sched_setattr(), they will remain the same until 419 * the next sched_setattr(). 420 */ 421 u64 dl_runtime; /* Maximum runtime for each instance */ 422 u64 dl_deadline; /* Relative deadline of each instance */ 423 u64 dl_period; /* Separation of two instances (period) */ 424 u64 dl_bw; /* dl_runtime / dl_deadline */ 425 426 /* 427 * Actual scheduling parameters. Initialized with the values above, 428 * they are continously updated during task execution. Note that 429 * the remaining runtime could be < 0 in case we are in overrun. 430 */ 431 s64 runtime; /* Remaining runtime for this instance */ 432 u64 deadline; /* Absolute deadline for this instance */ 433 unsigned int flags; /* Specifying the scheduler behaviour */ 434 435 /* 436 * Some bool flags: 437 * 438 * @dl_throttled tells if we exhausted the runtime. If so, the 439 * task has to wait for a replenishment to be performed at the 440 * next firing of dl_timer. 441 * 442 * @dl_boosted tells if we are boosted due to DI. If so we are 443 * outside bandwidth enforcement mechanism (but only until we 444 * exit the critical section); 445 * 446 * @dl_yielded tells if task gave up the CPU before consuming 447 * all its available runtime during the last job. 448 */ 449 int dl_throttled; 450 int dl_boosted; 451 int dl_yielded; 452 453 /* 454 * Bandwidth enforcement timer. Each -deadline task has its 455 * own bandwidth to be enforced, thus we need one timer per task. 456 */ 457 struct hrtimer dl_timer; 458}; 459 460union rcu_special { 461 struct { 462 u8 blocked; 463 u8 need_qs; 464 u8 exp_need_qs; 465 466 /* Otherwise the compiler can store garbage here: */ 467 u8 pad; 468 } b; /* Bits. */ 469 u32 s; /* Set of bits. */ 470}; 471 472enum perf_event_task_context { 473 perf_invalid_context = -1, 474 perf_hw_context = 0, 475 perf_sw_context, 476 perf_nr_task_contexts, 477}; 478 479struct wake_q_node { 480 struct wake_q_node *next; 481}; 482 483struct task_struct { 484#ifdef CONFIG_THREAD_INFO_IN_TASK 485 /* 486 * For reasons of header soup (see current_thread_info()), this 487 * must be the first element of task_struct. 488 */ 489 struct thread_info thread_info; 490#endif 491 /* -1 unrunnable, 0 runnable, >0 stopped: */ 492 volatile long state; 493 void *stack; 494 atomic_t usage; 495 /* Per task flags (PF_*), defined further below: */ 496 unsigned int flags; 497 unsigned int ptrace; 498 499#ifdef CONFIG_SMP 500 struct llist_node wake_entry; 501 int on_cpu; 502#ifdef CONFIG_THREAD_INFO_IN_TASK 503 /* Current CPU: */ 504 unsigned int cpu; 505#endif 506 unsigned int wakee_flips; 507 unsigned long wakee_flip_decay_ts; 508 struct task_struct *last_wakee; 509 510 int wake_cpu; 511#endif 512 int on_rq; 513 514 int prio; 515 int static_prio; 516 int normal_prio; 517 unsigned int rt_priority; 518 519 const struct sched_class *sched_class; 520 struct sched_entity se; 521 struct sched_rt_entity rt; 522#ifdef CONFIG_CGROUP_SCHED 523 struct task_group *sched_task_group; 524#endif 525 struct sched_dl_entity dl; 526 527#ifdef CONFIG_PREEMPT_NOTIFIERS 528 /* List of struct preempt_notifier: */ 529 struct hlist_head preempt_notifiers; 530#endif 531 532#ifdef CONFIG_BLK_DEV_IO_TRACE 533 unsigned int btrace_seq; 534#endif 535 536 unsigned int policy; 537 int nr_cpus_allowed; 538 cpumask_t cpus_allowed; 539 540#ifdef CONFIG_PREEMPT_RCU 541 int rcu_read_lock_nesting; 542 union rcu_special rcu_read_unlock_special; 543 struct list_head rcu_node_entry; 544 struct rcu_node *rcu_blocked_node; 545#endif /* #ifdef CONFIG_PREEMPT_RCU */ 546 547#ifdef CONFIG_TASKS_RCU 548 unsigned long rcu_tasks_nvcsw; 549 bool rcu_tasks_holdout; 550 struct list_head rcu_tasks_holdout_list; 551 int rcu_tasks_idle_cpu; 552#endif /* #ifdef CONFIG_TASKS_RCU */ 553 554 struct sched_info sched_info; 555 556 struct list_head tasks; 557#ifdef CONFIG_SMP 558 struct plist_node pushable_tasks; 559 struct rb_node pushable_dl_tasks; 560#endif 561 562 struct mm_struct *mm; 563 struct mm_struct *active_mm; 564 565 /* Per-thread vma caching: */ 566 struct vmacache vmacache; 567 568#ifdef SPLIT_RSS_COUNTING 569 struct task_rss_stat rss_stat; 570#endif 571 int exit_state; 572 int exit_code; 573 int exit_signal; 574 /* The signal sent when the parent dies: */ 575 int pdeath_signal; 576 /* JOBCTL_*, siglock protected: */ 577 unsigned long jobctl; 578 579 /* Used for emulating ABI behavior of previous Linux versions: */ 580 unsigned int personality; 581 582 /* Scheduler bits, serialized by scheduler locks: */ 583 unsigned sched_reset_on_fork:1; 584 unsigned sched_contributes_to_load:1; 585 unsigned sched_migrated:1; 586 unsigned sched_remote_wakeup:1; 587 /* Force alignment to the next boundary: */ 588 unsigned :0; 589 590 /* Unserialized, strictly 'current' */ 591 592 /* Bit to tell LSMs we're in execve(): */ 593 unsigned in_execve:1; 594 unsigned in_iowait:1; 595#ifndef TIF_RESTORE_SIGMASK 596 unsigned restore_sigmask:1; 597#endif 598#ifdef CONFIG_MEMCG 599 unsigned memcg_may_oom:1; 600#ifndef CONFIG_SLOB 601 unsigned memcg_kmem_skip_account:1; 602#endif 603#endif 604#ifdef CONFIG_COMPAT_BRK 605 unsigned brk_randomized:1; 606#endif 607#ifdef CONFIG_CGROUPS 608 /* disallow userland-initiated cgroup migration */ 609 unsigned no_cgroup_migration:1; 610#endif 611 612 unsigned long atomic_flags; /* Flags requiring atomic access. */ 613 614 struct restart_block restart_block; 615 616 pid_t pid; 617 pid_t tgid; 618 619#ifdef CONFIG_CC_STACKPROTECTOR 620 /* Canary value for the -fstack-protector GCC feature: */ 621 unsigned long stack_canary; 622#endif 623 /* 624 * Pointers to the (original) parent process, youngest child, younger sibling, 625 * older sibling, respectively. (p->father can be replaced with 626 * p->real_parent->pid) 627 */ 628 629 /* Real parent process: */ 630 struct task_struct __rcu *real_parent; 631 632 /* Recipient of SIGCHLD, wait4() reports: */ 633 struct task_struct __rcu *parent; 634 635 /* 636 * Children/sibling form the list of natural children: 637 */ 638 struct list_head children; 639 struct list_head sibling; 640 struct task_struct *group_leader; 641 642 /* 643 * 'ptraced' is the list of tasks this task is using ptrace() on. 644 * 645 * This includes both natural children and PTRACE_ATTACH targets. 646 * 'ptrace_entry' is this task's link on the p->parent->ptraced list. 647 */ 648 struct list_head ptraced; 649 struct list_head ptrace_entry; 650 651 /* PID/PID hash table linkage. */ 652 struct pid_link pids[PIDTYPE_MAX]; 653 struct list_head thread_group; 654 struct list_head thread_node; 655 656 struct completion *vfork_done; 657 658 /* CLONE_CHILD_SETTID: */ 659 int __user *set_child_tid; 660 661 /* CLONE_CHILD_CLEARTID: */ 662 int __user *clear_child_tid; 663 664 u64 utime; 665 u64 stime; 666#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 667 u64 utimescaled; 668 u64 stimescaled; 669#endif 670 u64 gtime; 671 struct prev_cputime prev_cputime; 672#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 673 seqcount_t vtime_seqcount; 674 unsigned long long vtime_snap; 675 enum { 676 /* Task is sleeping or running in a CPU with VTIME inactive: */ 677 VTIME_INACTIVE = 0, 678 /* Task runs in userspace in a CPU with VTIME active: */ 679 VTIME_USER, 680 /* Task runs in kernelspace in a CPU with VTIME active: */ 681 VTIME_SYS, 682 } vtime_snap_whence; 683#endif 684 685#ifdef CONFIG_NO_HZ_FULL 686 atomic_t tick_dep_mask; 687#endif 688 /* Context switch counts: */ 689 unsigned long nvcsw; 690 unsigned long nivcsw; 691 692 /* Monotonic time in nsecs: */ 693 u64 start_time; 694 695 /* Boot based time in nsecs: */ 696 u64 real_start_time; 697 698 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ 699 unsigned long min_flt; 700 unsigned long maj_flt; 701 702#ifdef CONFIG_POSIX_TIMERS 703 struct task_cputime cputime_expires; 704 struct list_head cpu_timers[3]; 705#endif 706 707 /* Process credentials: */ 708 709 /* Tracer's credentials at attach: */ 710 const struct cred __rcu *ptracer_cred; 711 712 /* Objective and real subjective task credentials (COW): */ 713 const struct cred __rcu *real_cred; 714 715 /* Effective (overridable) subjective task credentials (COW): */ 716 const struct cred __rcu *cred; 717 718 /* 719 * executable name, excluding path. 720 * 721 * - normally initialized setup_new_exec() 722 * - access it with [gs]et_task_comm() 723 * - lock it with task_lock() 724 */ 725 char comm[TASK_COMM_LEN]; 726 727 struct nameidata *nameidata; 728 729#ifdef CONFIG_SYSVIPC 730 struct sysv_sem sysvsem; 731 struct sysv_shm sysvshm; 732#endif 733#ifdef CONFIG_DETECT_HUNG_TASK 734 unsigned long last_switch_count; 735#endif 736 /* Filesystem information: */ 737 struct fs_struct *fs; 738 739 /* Open file information: */ 740 struct files_struct *files; 741 742 /* Namespaces: */ 743 struct nsproxy *nsproxy; 744 745 /* Signal handlers: */ 746 struct signal_struct *signal; 747 struct sighand_struct *sighand; 748 sigset_t blocked; 749 sigset_t real_blocked; 750 /* Restored if set_restore_sigmask() was used: */ 751 sigset_t saved_sigmask; 752 struct sigpending pending; 753 unsigned long sas_ss_sp; 754 size_t sas_ss_size; 755 unsigned int sas_ss_flags; 756 757 struct callback_head *task_works; 758 759 struct audit_context *audit_context; 760#ifdef CONFIG_AUDITSYSCALL 761 kuid_t loginuid; 762 unsigned int sessionid; 763#endif 764 struct seccomp seccomp; 765 766 /* Thread group tracking: */ 767 u32 parent_exec_id; 768 u32 self_exec_id; 769 770 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ 771 spinlock_t alloc_lock; 772 773 /* Protection of the PI data structures: */ 774 raw_spinlock_t pi_lock; 775 776 struct wake_q_node wake_q; 777 778#ifdef CONFIG_RT_MUTEXES 779 /* PI waiters blocked on a rt_mutex held by this task: */ 780 struct rb_root pi_waiters; 781 struct rb_node *pi_waiters_leftmost; 782 /* Deadlock detection and priority inheritance handling: */ 783 struct rt_mutex_waiter *pi_blocked_on; 784#endif 785 786#ifdef CONFIG_DEBUG_MUTEXES 787 /* Mutex deadlock detection: */ 788 struct mutex_waiter *blocked_on; 789#endif 790 791#ifdef CONFIG_TRACE_IRQFLAGS 792 unsigned int irq_events; 793 unsigned long hardirq_enable_ip; 794 unsigned long hardirq_disable_ip; 795 unsigned int hardirq_enable_event; 796 unsigned int hardirq_disable_event; 797 int hardirqs_enabled; 798 int hardirq_context; 799 unsigned long softirq_disable_ip; 800 unsigned long softirq_enable_ip; 801 unsigned int softirq_disable_event; 802 unsigned int softirq_enable_event; 803 int softirqs_enabled; 804 int softirq_context; 805#endif 806 807#ifdef CONFIG_LOCKDEP 808# define MAX_LOCK_DEPTH 48UL 809 u64 curr_chain_key; 810 int lockdep_depth; 811 unsigned int lockdep_recursion; 812 struct held_lock held_locks[MAX_LOCK_DEPTH]; 813 gfp_t lockdep_reclaim_gfp; 814#endif 815 816#ifdef CONFIG_UBSAN 817 unsigned int in_ubsan; 818#endif 819 820 /* Journalling filesystem info: */ 821 void *journal_info; 822 823 /* Stacked block device info: */ 824 struct bio_list *bio_list; 825 826#ifdef CONFIG_BLOCK 827 /* Stack plugging: */ 828 struct blk_plug *plug; 829#endif 830 831 /* VM state: */ 832 struct reclaim_state *reclaim_state; 833 834 struct backing_dev_info *backing_dev_info; 835 836 struct io_context *io_context; 837 838 /* Ptrace state: */ 839 unsigned long ptrace_message; 840 siginfo_t *last_siginfo; 841 842 struct task_io_accounting ioac; 843#ifdef CONFIG_TASK_XACCT 844 /* Accumulated RSS usage: */ 845 u64 acct_rss_mem1; 846 /* Accumulated virtual memory usage: */ 847 u64 acct_vm_mem1; 848 /* stime + utime since last update: */ 849 u64 acct_timexpd; 850#endif 851#ifdef CONFIG_CPUSETS 852 /* Protected by ->alloc_lock: */ 853 nodemask_t mems_allowed; 854 /* Seqence number to catch updates: */ 855 seqcount_t mems_allowed_seq; 856 int cpuset_mem_spread_rotor; 857 int cpuset_slab_spread_rotor; 858#endif 859#ifdef CONFIG_CGROUPS 860 /* Control Group info protected by css_set_lock: */ 861 struct css_set __rcu *cgroups; 862 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 863 struct list_head cg_list; 864#endif 865#ifdef CONFIG_INTEL_RDT_A 866 int closid; 867#endif 868#ifdef CONFIG_FUTEX 869 struct robust_list_head __user *robust_list; 870#ifdef CONFIG_COMPAT 871 struct compat_robust_list_head __user *compat_robust_list; 872#endif 873 struct list_head pi_state_list; 874 struct futex_pi_state *pi_state_cache; 875#endif 876#ifdef CONFIG_PERF_EVENTS 877 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 878 struct mutex perf_event_mutex; 879 struct list_head perf_event_list; 880#endif 881#ifdef CONFIG_DEBUG_PREEMPT 882 unsigned long preempt_disable_ip; 883#endif 884#ifdef CONFIG_NUMA 885 /* Protected by alloc_lock: */ 886 struct mempolicy *mempolicy; 887 short il_next; 888 short pref_node_fork; 889#endif 890#ifdef CONFIG_NUMA_BALANCING 891 int numa_scan_seq; 892 unsigned int numa_scan_period; 893 unsigned int numa_scan_period_max; 894 int numa_preferred_nid; 895 unsigned long numa_migrate_retry; 896 /* Migration stamp: */ 897 u64 node_stamp; 898 u64 last_task_numa_placement; 899 u64 last_sum_exec_runtime; 900 struct callback_head numa_work; 901 902 struct list_head numa_entry; 903 struct numa_group *numa_group; 904 905 /* 906 * numa_faults is an array split into four regions: 907 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer 908 * in this precise order. 909 * 910 * faults_memory: Exponential decaying average of faults on a per-node 911 * basis. Scheduling placement decisions are made based on these 912 * counts. The values remain static for the duration of a PTE scan. 913 * faults_cpu: Track the nodes the process was running on when a NUMA 914 * hinting fault was incurred. 915 * faults_memory_buffer and faults_cpu_buffer: Record faults per node 916 * during the current scan window. When the scan completes, the counts 917 * in faults_memory and faults_cpu decay and these values are copied. 918 */ 919 unsigned long *numa_faults; 920 unsigned long total_numa_faults; 921 922 /* 923 * numa_faults_locality tracks if faults recorded during the last 924 * scan window were remote/local or failed to migrate. The task scan 925 * period is adapted based on the locality of the faults with different 926 * weights depending on whether they were shared or private faults 927 */ 928 unsigned long numa_faults_locality[3]; 929 930 unsigned long numa_pages_migrated; 931#endif /* CONFIG_NUMA_BALANCING */ 932 933 struct tlbflush_unmap_batch tlb_ubc; 934 935 struct rcu_head rcu; 936 937 /* Cache last used pipe for splice(): */ 938 struct pipe_inode_info *splice_pipe; 939 940 struct page_frag task_frag; 941 942#ifdef CONFIG_TASK_DELAY_ACCT 943 struct task_delay_info *delays; 944#endif 945 946#ifdef CONFIG_FAULT_INJECTION 947 int make_it_fail; 948#endif 949 /* 950 * When (nr_dirtied >= nr_dirtied_pause), it's time to call 951 * balance_dirty_pages() for a dirty throttling pause: 952 */ 953 int nr_dirtied; 954 int nr_dirtied_pause; 955 /* Start of a write-and-pause period: */ 956 unsigned long dirty_paused_when; 957 958#ifdef CONFIG_LATENCYTOP 959 int latency_record_count; 960 struct latency_record latency_record[LT_SAVECOUNT]; 961#endif 962 /* 963 * Time slack values; these are used to round up poll() and 964 * select() etc timeout values. These are in nanoseconds. 965 */ 966 u64 timer_slack_ns; 967 u64 default_timer_slack_ns; 968 969#ifdef CONFIG_KASAN 970 unsigned int kasan_depth; 971#endif 972 973#ifdef CONFIG_FUNCTION_GRAPH_TRACER 974 /* Index of current stored address in ret_stack: */ 975 int curr_ret_stack; 976 977 /* Stack of return addresses for return function tracing: */ 978 struct ftrace_ret_stack *ret_stack; 979 980 /* Timestamp for last schedule: */ 981 unsigned long long ftrace_timestamp; 982 983 /* 984 * Number of functions that haven't been traced 985 * because of depth overrun: 986 */ 987 atomic_t trace_overrun; 988 989 /* Pause tracing: */ 990 atomic_t tracing_graph_pause; 991#endif 992 993#ifdef CONFIG_TRACING 994 /* State flags for use by tracers: */ 995 unsigned long trace; 996 997 /* Bitmask and counter of trace recursion: */ 998 unsigned long trace_recursion; 999#endif /* CONFIG_TRACING */ 1000 1001#ifdef CONFIG_KCOV 1002 /* Coverage collection mode enabled for this task (0 if disabled): */ 1003 enum kcov_mode kcov_mode; 1004 1005 /* Size of the kcov_area: */ 1006 unsigned int kcov_size; 1007 1008 /* Buffer for coverage collection: */ 1009 void *kcov_area; 1010 1011 /* KCOV descriptor wired with this task or NULL: */ 1012 struct kcov *kcov; 1013#endif 1014 1015#ifdef CONFIG_MEMCG 1016 struct mem_cgroup *memcg_in_oom; 1017 gfp_t memcg_oom_gfp_mask; 1018 int memcg_oom_order; 1019 1020 /* Number of pages to reclaim on returning to userland: */ 1021 unsigned int memcg_nr_pages_over_high; 1022#endif 1023 1024#ifdef CONFIG_UPROBES 1025 struct uprobe_task *utask; 1026#endif 1027#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1028 unsigned int sequential_io; 1029 unsigned int sequential_io_avg; 1030#endif 1031#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1032 unsigned long task_state_change; 1033#endif 1034 int pagefault_disabled; 1035#ifdef CONFIG_MMU 1036 struct task_struct *oom_reaper_list; 1037#endif 1038#ifdef CONFIG_VMAP_STACK 1039 struct vm_struct *stack_vm_area; 1040#endif 1041#ifdef CONFIG_THREAD_INFO_IN_TASK 1042 /* A live task holds one reference: */ 1043 atomic_t stack_refcount; 1044#endif 1045 /* CPU-specific state of this task: */ 1046 struct thread_struct thread; 1047 1048 /* 1049 * WARNING: on x86, 'thread_struct' contains a variable-sized 1050 * structure. It *MUST* be at the end of 'task_struct'. 1051 * 1052 * Do not put anything below here! 1053 */ 1054}; 1055 1056static inline struct pid *task_pid(struct task_struct *task) 1057{ 1058 return task->pids[PIDTYPE_PID].pid; 1059} 1060 1061static inline struct pid *task_tgid(struct task_struct *task) 1062{ 1063 return task->group_leader->pids[PIDTYPE_PID].pid; 1064} 1065 1066/* 1067 * Without tasklist or RCU lock it is not safe to dereference 1068 * the result of task_pgrp/task_session even if task == current, 1069 * we can race with another thread doing sys_setsid/sys_setpgid. 1070 */ 1071static inline struct pid *task_pgrp(struct task_struct *task) 1072{ 1073 return task->group_leader->pids[PIDTYPE_PGID].pid; 1074} 1075 1076static inline struct pid *task_session(struct task_struct *task) 1077{ 1078 return task->group_leader->pids[PIDTYPE_SID].pid; 1079} 1080 1081/* 1082 * the helpers to get the task's different pids as they are seen 1083 * from various namespaces 1084 * 1085 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1086 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 1087 * current. 1088 * task_xid_nr_ns() : id seen from the ns specified; 1089 * 1090 * set_task_vxid() : assigns a virtual id to a task; 1091 * 1092 * see also pid_nr() etc in include/linux/pid.h 1093 */ 1094pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); 1095 1096static inline pid_t task_pid_nr(struct task_struct *tsk) 1097{ 1098 return tsk->pid; 1099} 1100 1101static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1102{ 1103 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 1104} 1105 1106static inline pid_t task_pid_vnr(struct task_struct *tsk) 1107{ 1108 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 1109} 1110 1111 1112static inline pid_t task_tgid_nr(struct task_struct *tsk) 1113{ 1114 return tsk->tgid; 1115} 1116 1117extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1118 1119static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1120{ 1121 return pid_vnr(task_tgid(tsk)); 1122} 1123 1124/** 1125 * pid_alive - check that a task structure is not stale 1126 * @p: Task structure to be checked. 1127 * 1128 * Test if a process is not yet dead (at most zombie state) 1129 * If pid_alive fails, then pointers within the task structure 1130 * can be stale and must not be dereferenced. 1131 * 1132 * Return: 1 if the process is alive. 0 otherwise. 1133 */ 1134static inline int pid_alive(const struct task_struct *p) 1135{ 1136 return p->pids[PIDTYPE_PID].pid != NULL; 1137} 1138 1139static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1140{ 1141 pid_t pid = 0; 1142 1143 rcu_read_lock(); 1144 if (pid_alive(tsk)) 1145 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 1146 rcu_read_unlock(); 1147 1148 return pid; 1149} 1150 1151static inline pid_t task_ppid_nr(const struct task_struct *tsk) 1152{ 1153 return task_ppid_nr_ns(tsk, &init_pid_ns); 1154} 1155 1156static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1157{ 1158 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 1159} 1160 1161static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1162{ 1163 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 1164} 1165 1166 1167static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1168{ 1169 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 1170} 1171 1172static inline pid_t task_session_vnr(struct task_struct *tsk) 1173{ 1174 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 1175} 1176 1177/* Obsolete, do not use: */ 1178static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1179{ 1180 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1181} 1182 1183/** 1184 * is_global_init - check if a task structure is init. Since init 1185 * is free to have sub-threads we need to check tgid. 1186 * @tsk: Task structure to be checked. 1187 * 1188 * Check if a task structure is the first user space task the kernel created. 1189 * 1190 * Return: 1 if the task structure is init. 0 otherwise. 1191 */ 1192static inline int is_global_init(struct task_struct *tsk) 1193{ 1194 return task_tgid_nr(tsk) == 1; 1195} 1196 1197extern struct pid *cad_pid; 1198 1199/* 1200 * Per process flags 1201 */ 1202#define PF_IDLE 0x00000002 /* I am an IDLE thread */ 1203#define PF_EXITING 0x00000004 /* Getting shut down */ 1204#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ 1205#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1206#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 1207#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ 1208#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ 1209#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ 1210#define PF_DUMPCORE 0x00000200 /* Dumped core */ 1211#define PF_SIGNALED 0x00000400 /* Killed by a signal */ 1212#define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1213#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ 1214#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 1215#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ 1216#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 1217#define PF_FROZEN 0x00010000 /* Frozen for system suspend */ 1218#define PF_FSTRANS 0x00020000 /* Inside a filesystem transaction */ 1219#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1220#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ 1221#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1222#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1223#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1224#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1225#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1226#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1227#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1228#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1229#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ 1230 1231/* 1232 * Only the _current_ task can read/write to tsk->flags, but other 1233 * tasks can access tsk->flags in readonly mode for example 1234 * with tsk_used_math (like during threaded core dumping). 1235 * There is however an exception to this rule during ptrace 1236 * or during fork: the ptracer task is allowed to write to the 1237 * child->flags of its traced child (same goes for fork, the parent 1238 * can write to the child->flags), because we're guaranteed the 1239 * child is not running and in turn not changing child->flags 1240 * at the same time the parent does it. 1241 */ 1242#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1243#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1244#define clear_used_math() clear_stopped_child_used_math(current) 1245#define set_used_math() set_stopped_child_used_math(current) 1246 1247#define conditional_stopped_child_used_math(condition, child) \ 1248 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1249 1250#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) 1251 1252#define copy_to_stopped_child_used_math(child) \ 1253 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1254 1255/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1256#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1257#define used_math() tsk_used_math(current) 1258 1259/* Per-process atomic flags. */ 1260#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 1261#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 1262#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1263#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ 1264 1265 1266#define TASK_PFA_TEST(name, func) \ 1267 static inline bool task_##func(struct task_struct *p) \ 1268 { return test_bit(PFA_##name, &p->atomic_flags); } 1269 1270#define TASK_PFA_SET(name, func) \ 1271 static inline void task_set_##func(struct task_struct *p) \ 1272 { set_bit(PFA_##name, &p->atomic_flags); } 1273 1274#define TASK_PFA_CLEAR(name, func) \ 1275 static inline void task_clear_##func(struct task_struct *p) \ 1276 { clear_bit(PFA_##name, &p->atomic_flags); } 1277 1278TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 1279TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 1280 1281TASK_PFA_TEST(SPREAD_PAGE, spread_page) 1282TASK_PFA_SET(SPREAD_PAGE, spread_page) 1283TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 1284 1285TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 1286TASK_PFA_SET(SPREAD_SLAB, spread_slab) 1287TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 1288 1289TASK_PFA_TEST(LMK_WAITING, lmk_waiting) 1290TASK_PFA_SET(LMK_WAITING, lmk_waiting) 1291 1292static inline void 1293tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) 1294{ 1295 task->flags &= ~flags; 1296 task->flags |= orig_flags & flags; 1297} 1298 1299extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 1300extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 1301#ifdef CONFIG_SMP 1302extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); 1303extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); 1304#else 1305static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1306{ 1307} 1308static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1309{ 1310 if (!cpumask_test_cpu(0, new_mask)) 1311 return -EINVAL; 1312 return 0; 1313} 1314#endif 1315 1316#ifndef cpu_relax_yield 1317#define cpu_relax_yield() cpu_relax() 1318#endif 1319 1320extern int yield_to(struct task_struct *p, bool preempt); 1321extern void set_user_nice(struct task_struct *p, long nice); 1322extern int task_prio(const struct task_struct *p); 1323 1324/** 1325 * task_nice - return the nice value of a given task. 1326 * @p: the task in question. 1327 * 1328 * Return: The nice value [ -20 ... 0 ... 19 ]. 1329 */ 1330static inline int task_nice(const struct task_struct *p) 1331{ 1332 return PRIO_TO_NICE((p)->static_prio); 1333} 1334 1335extern int can_nice(const struct task_struct *p, const int nice); 1336extern int task_curr(const struct task_struct *p); 1337extern int idle_cpu(int cpu); 1338extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 1339extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 1340extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1341extern struct task_struct *idle_task(int cpu); 1342 1343/** 1344 * is_idle_task - is the specified task an idle task? 1345 * @p: the task in question. 1346 * 1347 * Return: 1 if @p is an idle task. 0 otherwise. 1348 */ 1349static inline bool is_idle_task(const struct task_struct *p) 1350{ 1351 return !!(p->flags & PF_IDLE); 1352} 1353 1354extern struct task_struct *curr_task(int cpu); 1355extern void ia64_set_curr_task(int cpu, struct task_struct *p); 1356 1357void yield(void); 1358 1359union thread_union { 1360#ifndef CONFIG_THREAD_INFO_IN_TASK 1361 struct thread_info thread_info; 1362#endif 1363 unsigned long stack[THREAD_SIZE/sizeof(long)]; 1364}; 1365 1366#ifdef CONFIG_THREAD_INFO_IN_TASK 1367static inline struct thread_info *task_thread_info(struct task_struct *task) 1368{ 1369 return &task->thread_info; 1370} 1371#elif !defined(__HAVE_THREAD_FUNCTIONS) 1372# define task_thread_info(task) ((struct thread_info *)(task)->stack) 1373#endif 1374 1375/* 1376 * find a task by one of its numerical ids 1377 * 1378 * find_task_by_pid_ns(): 1379 * finds a task by its pid in the specified namespace 1380 * find_task_by_vpid(): 1381 * finds a task by its virtual pid 1382 * 1383 * see also find_vpid() etc in include/linux/pid.h 1384 */ 1385 1386extern struct task_struct *find_task_by_vpid(pid_t nr); 1387extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); 1388 1389extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1390extern int wake_up_process(struct task_struct *tsk); 1391extern void wake_up_new_task(struct task_struct *tsk); 1392 1393#ifdef CONFIG_SMP 1394extern void kick_process(struct task_struct *tsk); 1395#else 1396static inline void kick_process(struct task_struct *tsk) { } 1397#endif 1398 1399extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 1400 1401static inline void set_task_comm(struct task_struct *tsk, const char *from) 1402{ 1403 __set_task_comm(tsk, from, false); 1404} 1405 1406extern char *get_task_comm(char *to, struct task_struct *tsk); 1407 1408#ifdef CONFIG_SMP 1409void scheduler_ipi(void); 1410extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 1411#else 1412static inline void scheduler_ipi(void) { } 1413static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1414{ 1415 return 1; 1416} 1417#endif 1418 1419/* 1420 * Set thread flags in other task's structures. 1421 * See asm/thread_info.h for TIF_xxxx flags available: 1422 */ 1423static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 1424{ 1425 set_ti_thread_flag(task_thread_info(tsk), flag); 1426} 1427 1428static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1429{ 1430 clear_ti_thread_flag(task_thread_info(tsk), flag); 1431} 1432 1433static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 1434{ 1435 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 1436} 1437 1438static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1439{ 1440 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 1441} 1442 1443static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 1444{ 1445 return test_ti_thread_flag(task_thread_info(tsk), flag); 1446} 1447 1448static inline void set_tsk_need_resched(struct task_struct *tsk) 1449{ 1450 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 1451} 1452 1453static inline void clear_tsk_need_resched(struct task_struct *tsk) 1454{ 1455 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 1456} 1457 1458static inline int test_tsk_need_resched(struct task_struct *tsk) 1459{ 1460 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 1461} 1462 1463/* 1464 * cond_resched() and cond_resched_lock(): latency reduction via 1465 * explicit rescheduling in places that are safe. The return 1466 * value indicates whether a reschedule was done in fact. 1467 * cond_resched_lock() will drop the spinlock before scheduling, 1468 * cond_resched_softirq() will enable bhs before scheduling. 1469 */ 1470#ifndef CONFIG_PREEMPT 1471extern int _cond_resched(void); 1472#else 1473static inline int _cond_resched(void) { return 0; } 1474#endif 1475 1476#define cond_resched() ({ \ 1477 ___might_sleep(__FILE__, __LINE__, 0); \ 1478 _cond_resched(); \ 1479}) 1480 1481extern int __cond_resched_lock(spinlock_t *lock); 1482 1483#define cond_resched_lock(lock) ({ \ 1484 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ 1485 __cond_resched_lock(lock); \ 1486}) 1487 1488extern int __cond_resched_softirq(void); 1489 1490#define cond_resched_softirq() ({ \ 1491 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ 1492 __cond_resched_softirq(); \ 1493}) 1494 1495static inline void cond_resched_rcu(void) 1496{ 1497#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 1498 rcu_read_unlock(); 1499 cond_resched(); 1500 rcu_read_lock(); 1501#endif 1502} 1503 1504/* 1505 * Does a critical section need to be broken due to another 1506 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 1507 * but a general need for low latency) 1508 */ 1509static inline int spin_needbreak(spinlock_t *lock) 1510{ 1511#ifdef CONFIG_PREEMPT 1512 return spin_is_contended(lock); 1513#else 1514 return 0; 1515#endif 1516} 1517 1518static __always_inline bool need_resched(void) 1519{ 1520 return unlikely(tif_need_resched()); 1521} 1522 1523/* 1524 * Wrappers for p->thread_info->cpu access. No-op on UP. 1525 */ 1526#ifdef CONFIG_SMP 1527 1528static inline unsigned int task_cpu(const struct task_struct *p) 1529{ 1530#ifdef CONFIG_THREAD_INFO_IN_TASK 1531 return p->cpu; 1532#else 1533 return task_thread_info(p)->cpu; 1534#endif 1535} 1536 1537extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 1538 1539#else 1540 1541static inline unsigned int task_cpu(const struct task_struct *p) 1542{ 1543 return 0; 1544} 1545 1546static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 1547{ 1548} 1549 1550#endif /* CONFIG_SMP */ 1551 1552/* 1553 * In order to reduce various lock holder preemption latencies provide an 1554 * interface to see if a vCPU is currently running or not. 1555 * 1556 * This allows us to terminate optimistic spin loops and block, analogous to 1557 * the native optimistic spin heuristic of testing if the lock owner task is 1558 * running or not. 1559 */ 1560#ifndef vcpu_is_preempted 1561# define vcpu_is_preempted(cpu) false 1562#endif 1563 1564extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 1565extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 1566 1567#ifndef TASK_SIZE_OF 1568#define TASK_SIZE_OF(tsk) TASK_SIZE 1569#endif 1570 1571#endif