at v4.13 4.0 kB view raw
1#ifndef _LINUX_SCHED_TASK_H 2#define _LINUX_SCHED_TASK_H 3 4/* 5 * Interface between the scheduler and various task lifetime (fork()/exit()) 6 * functionality: 7 */ 8 9#include <linux/sched.h> 10 11struct task_struct; 12struct rusage; 13union thread_union; 14 15/* 16 * This serializes "schedule()" and also protects 17 * the run-queue from deletions/modifications (but 18 * _adding_ to the beginning of the run-queue has 19 * a separate lock). 20 */ 21extern rwlock_t tasklist_lock; 22extern spinlock_t mmlist_lock; 23 24extern union thread_union init_thread_union; 25extern struct task_struct init_task; 26 27#ifdef CONFIG_PROVE_RCU 28extern int lockdep_tasklist_lock_is_held(void); 29#endif /* #ifdef CONFIG_PROVE_RCU */ 30 31extern asmlinkage void schedule_tail(struct task_struct *prev); 32extern void init_idle(struct task_struct *idle, int cpu); 33extern void init_idle_bootup_task(struct task_struct *idle); 34 35extern int sched_fork(unsigned long clone_flags, struct task_struct *p); 36extern void sched_dead(struct task_struct *p); 37 38void __noreturn do_task_dead(void); 39 40extern void proc_caches_init(void); 41 42extern void release_task(struct task_struct * p); 43 44#ifdef CONFIG_HAVE_COPY_THREAD_TLS 45extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, 46 struct task_struct *, unsigned long); 47#else 48extern int copy_thread(unsigned long, unsigned long, unsigned long, 49 struct task_struct *); 50 51/* Architectures that haven't opted into copy_thread_tls get the tls argument 52 * via pt_regs, so ignore the tls argument passed via C. */ 53static inline int copy_thread_tls( 54 unsigned long clone_flags, unsigned long sp, unsigned long arg, 55 struct task_struct *p, unsigned long tls) 56{ 57 return copy_thread(clone_flags, sp, arg, p); 58} 59#endif 60extern void flush_thread(void); 61 62#ifdef CONFIG_HAVE_EXIT_THREAD 63extern void exit_thread(struct task_struct *tsk); 64#else 65static inline void exit_thread(struct task_struct *tsk) 66{ 67} 68#endif 69extern void do_group_exit(int); 70 71extern void exit_files(struct task_struct *); 72extern void exit_itimers(struct signal_struct *); 73 74extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); 75extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); 76struct task_struct *fork_idle(int); 77extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 78extern long kernel_wait4(pid_t, int *, int, struct rusage *); 79 80extern void free_task(struct task_struct *tsk); 81 82/* sched_exec is called by processes performing an exec */ 83#ifdef CONFIG_SMP 84extern void sched_exec(void); 85#else 86#define sched_exec() {} 87#endif 88 89#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 90 91extern void __put_task_struct(struct task_struct *t); 92 93static inline void put_task_struct(struct task_struct *t) 94{ 95 if (atomic_dec_and_test(&t->usage)) 96 __put_task_struct(t); 97} 98 99struct task_struct *task_rcu_dereference(struct task_struct **ptask); 100 101#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 102extern int arch_task_struct_size __read_mostly; 103#else 104# define arch_task_struct_size (sizeof(struct task_struct)) 105#endif 106 107#ifdef CONFIG_VMAP_STACK 108static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) 109{ 110 return t->stack_vm_area; 111} 112#else 113static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) 114{ 115 return NULL; 116} 117#endif 118 119/* 120 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 121 * subscriptions and synchronises with wait4(). Also used in procfs. Also 122 * pins the final release of task.io_context. Also protects ->cpuset and 123 * ->cgroup.subsys[]. And ->vfork_done. 124 * 125 * Nests both inside and outside of read_lock(&tasklist_lock). 126 * It must not be nested with write_lock_irq(&tasklist_lock), 127 * neither inside nor outside. 128 */ 129static inline void task_lock(struct task_struct *p) 130{ 131 spin_lock(&p->alloc_lock); 132} 133 134static inline void task_unlock(struct task_struct *p) 135{ 136 spin_unlock(&p->alloc_lock); 137} 138 139#endif /* _LINUX_SCHED_TASK_H */