at v2.6.34-rc2 5.4 kB view raw
1#ifndef _LINUX__INIT_TASK_H 2#define _LINUX__INIT_TASK_H 3 4#include <linux/rcupdate.h> 5#include <linux/irqflags.h> 6#include <linux/utsname.h> 7#include <linux/lockdep.h> 8#include <linux/ftrace.h> 9#include <linux/ipc.h> 10#include <linux/pid_namespace.h> 11#include <linux/user_namespace.h> 12#include <linux/securebits.h> 13#include <net/net_namespace.h> 14 15extern struct files_struct init_files; 16extern struct fs_struct init_fs; 17 18#define INIT_SIGNALS(sig) { \ 19 .count = ATOMIC_INIT(1), \ 20 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ 21 .shared_pending = { \ 22 .list = LIST_HEAD_INIT(sig.shared_pending.list), \ 23 .signal = {{0}}}, \ 24 .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ 25 .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ 26 .rlim = INIT_RLIMITS, \ 27 .cputimer = { \ 28 .cputime = INIT_CPUTIME, \ 29 .running = 0, \ 30 .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ 31 }, \ 32} 33 34extern struct nsproxy init_nsproxy; 35 36#define INIT_SIGHAND(sighand) { \ 37 .count = ATOMIC_INIT(1), \ 38 .action = { { { .sa_handler = NULL, } }, }, \ 39 .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ 40 .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ 41} 42 43extern struct group_info init_groups; 44 45#define INIT_STRUCT_PID { \ 46 .count = ATOMIC_INIT(1), \ 47 .tasks = { \ 48 { .first = &init_task.pids[PIDTYPE_PID].node }, \ 49 { .first = &init_task.pids[PIDTYPE_PGID].node }, \ 50 { .first = &init_task.pids[PIDTYPE_SID].node }, \ 51 }, \ 52 .rcu = RCU_HEAD_INIT, \ 53 .level = 0, \ 54 .numbers = { { \ 55 .nr = 0, \ 56 .ns = &init_pid_ns, \ 57 .pid_chain = { .next = NULL, .pprev = NULL }, \ 58 }, } \ 59} 60 61#define INIT_PID_LINK(type) \ 62{ \ 63 .node = { \ 64 .next = NULL, \ 65 .pprev = &init_struct_pid.tasks[type].first, \ 66 }, \ 67 .pid = &init_struct_pid, \ 68} 69 70#ifdef CONFIG_AUDITSYSCALL 71#define INIT_IDS \ 72 .loginuid = -1, \ 73 .sessionid = -1, 74#else 75#define INIT_IDS 76#endif 77 78/* 79 * Because of the reduced scope of CAP_SETPCAP when filesystem 80 * capabilities are in effect, it is safe to allow CAP_SETPCAP to 81 * be available in the default configuration. 82 */ 83# define CAP_INIT_BSET CAP_FULL_SET 84 85#ifdef CONFIG_TREE_PREEMPT_RCU 86#define INIT_TASK_RCU_PREEMPT(tsk) \ 87 .rcu_read_lock_nesting = 0, \ 88 .rcu_read_unlock_special = 0, \ 89 .rcu_blocked_node = NULL, \ 90 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), 91#else 92#define INIT_TASK_RCU_PREEMPT(tsk) 93#endif 94 95extern struct cred init_cred; 96 97#ifdef CONFIG_PERF_EVENTS 98# define INIT_PERF_EVENTS(tsk) \ 99 .perf_event_mutex = \ 100 __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ 101 .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), 102#else 103# define INIT_PERF_EVENTS(tsk) 104#endif 105 106/* 107 * INIT_TASK is used to set up the first task table, touch at 108 * your own risk!. Base=0, limit=0x1fffff (=2MB) 109 */ 110#define INIT_TASK(tsk) \ 111{ \ 112 .state = 0, \ 113 .stack = &init_thread_info, \ 114 .usage = ATOMIC_INIT(2), \ 115 .flags = PF_KTHREAD, \ 116 .lock_depth = -1, \ 117 .prio = MAX_PRIO-20, \ 118 .static_prio = MAX_PRIO-20, \ 119 .normal_prio = MAX_PRIO-20, \ 120 .policy = SCHED_NORMAL, \ 121 .cpus_allowed = CPU_MASK_ALL, \ 122 .mm = NULL, \ 123 .active_mm = &init_mm, \ 124 .se = { \ 125 .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ 126 }, \ 127 .rt = { \ 128 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ 129 .time_slice = HZ, \ 130 .nr_cpus_allowed = NR_CPUS, \ 131 }, \ 132 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 133 .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \ 134 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ 135 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ 136 .real_parent = &tsk, \ 137 .parent = &tsk, \ 138 .children = LIST_HEAD_INIT(tsk.children), \ 139 .sibling = LIST_HEAD_INIT(tsk.sibling), \ 140 .group_leader = &tsk, \ 141 .real_cred = &init_cred, \ 142 .cred = &init_cred, \ 143 .cred_guard_mutex = \ 144 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ 145 .comm = "swapper", \ 146 .thread = INIT_THREAD, \ 147 .fs = &init_fs, \ 148 .files = &init_files, \ 149 .signal = &init_signals, \ 150 .sighand = &init_sighand, \ 151 .nsproxy = &init_nsproxy, \ 152 .pending = { \ 153 .list = LIST_HEAD_INIT(tsk.pending.list), \ 154 .signal = {{0}}}, \ 155 .blocked = {{0}}, \ 156 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ 157 .journal_info = NULL, \ 158 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 159 .fs_excl = ATOMIC_INIT(0), \ 160 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 161 .timer_slack_ns = 50000, /* 50 usec default slack */ \ 162 .pids = { \ 163 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ 164 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ 165 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ 166 }, \ 167 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ 168 INIT_IDS \ 169 INIT_PERF_EVENTS(tsk) \ 170 INIT_TRACE_IRQFLAGS \ 171 INIT_LOCKDEP \ 172 INIT_FTRACE_GRAPH \ 173 INIT_TRACE_RECURSION \ 174 INIT_TASK_RCU_PREEMPT(tsk) \ 175} 176 177 178#define INIT_CPU_TIMERS(cpu_timers) \ 179{ \ 180 LIST_HEAD_INIT(cpu_timers[0]), \ 181 LIST_HEAD_INIT(cpu_timers[1]), \ 182 LIST_HEAD_INIT(cpu_timers[2]), \ 183} 184 185/* Attach to the init_task data structure for proper alignment */ 186#define __init_task_data __attribute__((__section__(".data.init_task"))) 187 188 189#endif