at v6.17 6.6 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_SCHED_TASK_H 3#define _LINUX_SCHED_TASK_H 4 5/* 6 * Interface between the scheduler and various task lifetime (fork()/exit()) 7 * functionality: 8 */ 9 10#include <linux/rcupdate.h> 11#include <linux/refcount.h> 12#include <linux/sched.h> 13#include <linux/uaccess.h> 14 15struct task_struct; 16struct rusage; 17union thread_union; 18struct css_set; 19 20/* All the bits taken by the old clone syscall. */ 21#define CLONE_LEGACY_FLAGS 0xffffffffULL 22 23struct kernel_clone_args { 24 u64 flags; 25 int __user *pidfd; 26 int __user *child_tid; 27 int __user *parent_tid; 28 const char *name; 29 int exit_signal; 30 u32 kthread:1; 31 u32 io_thread:1; 32 u32 user_worker:1; 33 u32 no_files:1; 34 unsigned long stack; 35 unsigned long stack_size; 36 unsigned long tls; 37 pid_t *set_tid; 38 /* Number of elements in *set_tid */ 39 size_t set_tid_size; 40 int cgroup; 41 int idle; 42 int (*fn)(void *); 43 void *fn_arg; 44 struct cgroup *cgrp; 45 struct css_set *cset; 46 unsigned int kill_seq; 47}; 48 49/* 50 * This serializes "schedule()" and also protects 51 * the run-queue from deletions/modifications (but 52 * _adding_ to the beginning of the run-queue has 53 * a separate lock). 54 */ 55extern rwlock_t tasklist_lock; 56extern spinlock_t mmlist_lock; 57 58extern union thread_union init_thread_union; 59extern struct task_struct init_task; 60 61extern int lockdep_tasklist_lock_is_held(void); 62 63extern asmlinkage void schedule_tail(struct task_struct *prev); 64extern void init_idle(struct task_struct *idle, int cpu); 65 66extern int sched_fork(unsigned long clone_flags, struct task_struct *p); 67extern int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); 68extern void sched_cancel_fork(struct task_struct *p); 69extern void sched_post_fork(struct task_struct *p); 70extern void sched_dead(struct task_struct *p); 71 72void __noreturn do_task_dead(void); 73void __noreturn make_task_dead(int signr); 74 75extern void mm_cache_init(void); 76extern void proc_caches_init(void); 77 78extern void fork_init(void); 79 80extern void release_task(struct task_struct * p); 81 82extern int copy_thread(struct task_struct *, const struct kernel_clone_args *); 83 84extern void flush_thread(void); 85 86#ifdef CONFIG_HAVE_EXIT_THREAD 87extern void exit_thread(struct task_struct *tsk); 88#else 89static inline void exit_thread(struct task_struct *tsk) 90{ 91} 92#endif 93extern __noreturn void do_group_exit(int); 94 95extern void exit_files(struct task_struct *); 96extern void exit_itimers(struct task_struct *); 97 98extern pid_t kernel_clone(struct kernel_clone_args *kargs); 99struct task_struct *copy_process(struct pid *pid, int trace, int node, 100 struct kernel_clone_args *args); 101struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); 102struct task_struct *fork_idle(int); 103extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name, 104 unsigned long flags); 105extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags); 106extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); 107int kernel_wait(pid_t pid, int *stat); 108 109extern void free_task(struct task_struct *tsk); 110 111/* sched_exec is called by processes performing an exec */ 112extern void sched_exec(void); 113 114static inline struct task_struct *get_task_struct(struct task_struct *t) 115{ 116 refcount_inc(&t->usage); 117 return t; 118} 119 120static inline struct task_struct *tryget_task_struct(struct task_struct *t) 121{ 122 return refcount_inc_not_zero(&t->usage) ? t : NULL; 123} 124 125extern void __put_task_struct(struct task_struct *t); 126extern void __put_task_struct_rcu_cb(struct rcu_head *rhp); 127 128static inline void put_task_struct(struct task_struct *t) 129{ 130 if (!refcount_dec_and_test(&t->usage)) 131 return; 132 133 /* 134 * Under PREEMPT_RT, we can't call __put_task_struct 135 * in atomic context because it will indirectly 136 * acquire sleeping locks. The same is true if the 137 * current process has a mutex enqueued (blocked on 138 * a PI chain). 139 * 140 * In !RT, it is always safe to call __put_task_struct(). 141 * Though, in order to simplify the code, resort to the 142 * deferred call too. 143 * 144 * call_rcu() will schedule __put_task_struct_rcu_cb() 145 * to be called in process context. 146 * 147 * __put_task_struct() is called when 148 * refcount_dec_and_test(&t->usage) succeeds. 149 * 150 * This means that it can't "conflict" with 151 * put_task_struct_rcu_user() which abuses ->rcu the same 152 * way; rcu_users has a reference so task->usage can't be 153 * zero after rcu_users 1 -> 0 transition. 154 * 155 * delayed_free_task() also uses ->rcu, but it is only called 156 * when it fails to fork a process. Therefore, there is no 157 * way it can conflict with __put_task_struct(). 158 */ 159 call_rcu(&t->rcu, __put_task_struct_rcu_cb); 160} 161 162DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T)) 163 164static inline void put_task_struct_many(struct task_struct *t, int nr) 165{ 166 if (refcount_sub_and_test(nr, &t->usage)) 167 __put_task_struct(t); 168} 169 170void put_task_struct_rcu_user(struct task_struct *task); 171 172/* Free all architecture-specific resources held by a thread. */ 173void release_thread(struct task_struct *dead_task); 174 175#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 176extern int arch_task_struct_size __read_mostly; 177#else 178# define arch_task_struct_size (sizeof(struct task_struct)) 179#endif 180 181#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST 182/* 183 * If an architecture has not declared a thread_struct whitelist we 184 * must assume something there may need to be copied to userspace. 185 */ 186static inline void arch_thread_struct_whitelist(unsigned long *offset, 187 unsigned long *size) 188{ 189 *offset = 0; 190 /* Handle dynamically sized thread_struct. */ 191 *size = arch_task_struct_size - offsetof(struct task_struct, thread); 192} 193#endif 194 195#ifdef CONFIG_VMAP_STACK 196static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) 197{ 198 return t->stack_vm_area; 199} 200#else 201static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) 202{ 203 return NULL; 204} 205#endif 206 207/* 208 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 209 * subscriptions and synchronises with wait4(). Also used in procfs. Also 210 * pins the final release of task.io_context. Also protects ->cpuset and 211 * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist. 212 * 213 * Nests both inside and outside of read_lock(&tasklist_lock). 214 * It must not be nested with write_lock_irq(&tasklist_lock), 215 * neither inside nor outside. 216 */ 217static inline void task_lock(struct task_struct *p) 218{ 219 spin_lock(&p->alloc_lock); 220} 221 222static inline void task_unlock(struct task_struct *p) 223{ 224 spin_unlock(&p->alloc_lock); 225} 226 227DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T)) 228 229#endif /* _LINUX_SCHED_TASK_H */