[PATCH] sched: cleanup, remove task_t, convert to struct task_struct

cleanup: remove task_t and convert all the uses to struct task_struct. I
introduced it for the scheduler anno and it was a mistake.

Conversion was mostly scripted, the result was reviewed and all
secondary whitespace and style impact (if any) was fixed up by hand.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Ingo Molnar and committed by Linus Torvalds 36c8b586 48f24c4d

+203 -187
+1 -1
arch/alpha/kernel/process.c
··· 474 */ 475 476 unsigned long 477 - thread_saved_pc(task_t *t) 478 { 479 unsigned long base = (unsigned long)task_stack_page(t); 480 unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
··· 474 */ 475 476 unsigned long 477 + thread_saved_pc(struct task_struct *t) 478 { 479 unsigned long base = (unsigned long)task_stack_page(t); 480 unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
+5 -5
arch/ia64/kernel/mca.c
··· 678 */ 679 680 static void 681 - ia64_mca_modify_comm(const task_t *previous_current) 682 { 683 char *p, comm[sizeof(current->comm)]; 684 if (previous_current->pid) ··· 709 * that we can do backtrace on the MCA/INIT handler code itself. 710 */ 711 712 - static task_t * 713 ia64_mca_modify_original_stack(struct pt_regs *regs, 714 const struct switch_stack *sw, 715 struct ia64_sal_os_state *sos, ··· 719 ia64_va va; 720 extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ 721 const pal_min_state_area_t *ms = sos->pal_min_state; 722 - task_t *previous_current; 723 struct pt_regs *old_regs; 724 struct switch_stack *old_sw; 725 unsigned size = sizeof(struct pt_regs) + ··· 1023 pal_processor_state_info_t *psp = (pal_processor_state_info_t *) 1024 &sos->proc_state_param; 1025 int recover, cpu = smp_processor_id(); 1026 - task_t *previous_current; 1027 struct ia64_mca_notify_die nd = 1028 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1029 ··· 1352 { 1353 static atomic_t slaves; 1354 static atomic_t monarchs; 1355 - task_t *previous_current; 1356 int cpu = smp_processor_id(); 1357 struct ia64_mca_notify_die nd = 1358 { .sos = sos, .monarch_cpu = &monarch_cpu };
··· 678 */ 679 680 static void 681 + ia64_mca_modify_comm(const struct task_struct *previous_current) 682 { 683 char *p, comm[sizeof(current->comm)]; 684 if (previous_current->pid) ··· 709 * that we can do backtrace on the MCA/INIT handler code itself. 710 */ 711 712 + static struct task_struct * 713 ia64_mca_modify_original_stack(struct pt_regs *regs, 714 const struct switch_stack *sw, 715 struct ia64_sal_os_state *sos, ··· 719 ia64_va va; 720 extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ 721 const pal_min_state_area_t *ms = sos->pal_min_state; 722 + struct task_struct *previous_current; 723 struct pt_regs *old_regs; 724 struct switch_stack *old_sw; 725 unsigned size = sizeof(struct pt_regs) + ··· 1023 pal_processor_state_info_t *psp = (pal_processor_state_info_t *) 1024 &sos->proc_state_param; 1025 int recover, cpu = smp_processor_id(); 1026 + struct task_struct *previous_current; 1027 struct ia64_mca_notify_die nd = 1028 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1029 ··· 1352 { 1353 static atomic_t slaves; 1354 static atomic_t monarchs; 1355 + struct task_struct *previous_current; 1356 int cpu = smp_processor_id(); 1357 struct ia64_mca_notify_die nd = 1358 { .sos = sos, .monarch_cpu = &monarch_cpu };
+1 -1
arch/ia64/kernel/smpboot.c
··· 124 extern void start_ap (void); 125 extern unsigned long ia64_iobase; 126 127 - task_t *task_for_booting_cpu; 128 129 /* 130 * State for each CPU
··· 124 extern void start_ap (void); 125 extern unsigned long ia64_iobase; 126 127 + struct task_struct *task_for_booting_cpu; 128 129 /* 130 * State for each CPU
+1 -1
arch/mips/kernel/entry.S
··· 65 #endif 66 67 FEXPORT(ret_from_fork) 68 - jal schedule_tail # a0 = task_t *prev 69 70 FEXPORT(syscall_exit) 71 local_irq_disable # make sure need_resched and
··· 65 #endif 66 67 FEXPORT(ret_from_fork) 68 + jal schedule_tail # a0 = struct task_struct *prev 69 70 FEXPORT(syscall_exit) 71 local_irq_disable # make sure need_resched and
+3 -3
arch/mips/kernel/mips-mt.c
··· 47 * used in sys_sched_set/getaffinity() in kernel/sched.c, so 48 * cloned here. 49 */ 50 - static inline task_t *find_process_by_pid(pid_t pid) 51 { 52 return pid ? find_task_by_pid(pid) : current; 53 } ··· 62 cpumask_t new_mask; 63 cpumask_t effective_mask; 64 int retval; 65 - task_t *p; 66 67 if (len < sizeof(new_mask)) 68 return -EINVAL; ··· 127 unsigned int real_len; 128 cpumask_t mask; 129 int retval; 130 - task_t *p; 131 132 real_len = sizeof(mask); 133 if (len < real_len)
··· 47 * used in sys_sched_set/getaffinity() in kernel/sched.c, so 48 * cloned here. 49 */ 50 + static inline struct task_struct *find_process_by_pid(pid_t pid) 51 { 52 return pid ? find_task_by_pid(pid) : current; 53 } ··· 62 cpumask_t new_mask; 63 cpumask_t effective_mask; 64 int retval; 65 + struct task_struct *p; 66 67 if (len < sizeof(new_mask)) 68 return -EINVAL; ··· 127 unsigned int real_len; 128 cpumask_t mask; 129 int retval; 130 + struct task_struct *p; 131 132 real_len = sizeof(mask); 133 if (len < real_len)
+1 -1
arch/um/kernel/tt/process_kern.c
··· 119 panic("read failed in suspend_new_thread, err = %d", -err); 120 } 121 122 - void schedule_tail(task_t *prev); 123 124 static void new_thread_handler(int sig) 125 {
··· 119 panic("read failed in suspend_new_thread, err = %d", -err); 120 } 121 122 + void schedule_tail(struct task_struct *prev); 123 124 static void new_thread_handler(int sig) 125 {
+1 -1
drivers/char/tty_io.c
··· 2336 2337 static int tiocsctty(struct tty_struct *tty, int arg) 2338 { 2339 - task_t *p; 2340 2341 if (current->signal->leader && 2342 (current->signal->session == tty->session))
··· 2336 2337 static int tiocsctty(struct tty_struct *tty, int arg) 2338 { 2339 + struct task_struct *p; 2340 2341 if (current->signal->leader && 2342 (current->signal->session == tty->session))
+2 -2
fs/eventpoll.c
··· 120 */ 121 struct wake_task_node { 122 struct list_head llink; 123 - task_t *task; 124 wait_queue_head_t *wq; 125 }; 126 ··· 413 { 414 int wake_nests = 0; 415 unsigned long flags; 416 - task_t *this_task = current; 417 struct list_head *lsthead = &psw->wake_task_list, *lnk; 418 struct wake_task_node *tncur; 419 struct wake_task_node tnode;
··· 120 */ 121 struct wake_task_node { 122 struct list_head llink; 123 + struct task_struct *task; 124 wait_queue_head_t *wq; 125 }; 126 ··· 413 { 414 int wake_nests = 0; 415 unsigned long flags; 416 + struct task_struct *this_task = current; 417 struct list_head *lsthead = &psw->wake_task_list, *lnk; 418 struct wake_task_node *tncur; 419 struct wake_task_node tnode;
+1 -1
include/asm-ia64/thread_info.h
··· 68 #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) 69 70 #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 71 - #define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) 72 #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) 73 74 #endif /* !__ASSEMBLY */
··· 68 #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) 69 70 #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 71 + #define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) 72 #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) 73 74 #endif /* !__ASSEMBLY */
+1 -1
include/asm-m32r/system.h
··· 18 * switch_to(prev, next) should switch from task `prev' to `next' 19 * `prev' will never be the same as `next'. 20 * 21 - * `next' and `prev' should be task_t, but it isn't always defined 22 */ 23 24 #define switch_to(prev, next, last) do { \
··· 18 * switch_to(prev, next) should switch from task `prev' to `next' 19 * `prev' will never be the same as `next'. 20 * 21 + * `next' and `prev' should be struct task_struct, but it isn't always defined 22 */ 23 24 #define switch_to(prev, next, last) do { \
+1 -1
include/asm-sh/system.h
··· 12 */ 13 14 #define switch_to(prev, next, last) do { \ 15 - task_t *__last; \ 16 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 17 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 18 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
··· 12 */ 13 14 #define switch_to(prev, next, last) do { \ 15 + struct task_struct *__last; \ 16 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 17 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 18 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
+28 -27
include/linux/sched.h
··· 184 extern rwlock_t tasklist_lock; 185 extern spinlock_t mmlist_lock; 186 187 - typedef struct task_struct task_t; 188 189 extern void sched_init(void); 190 extern void sched_init_smp(void); 191 - extern void init_idle(task_t *idle, int cpu); 192 193 extern cpumask_t nohz_cpu_mask; 194 ··· 383 wait_queue_head_t wait_chldexit; /* for wait4() */ 384 385 /* current thread group signal load-balancing target: */ 386 - task_t *curr_target; 387 388 /* shared signal handling: */ 389 struct sigpending shared_pending; ··· 699 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 700 701 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 702 - extern void prefetch_stack(struct task_struct*); 703 #else 704 static inline void prefetch_stack(struct task_struct *t) { } 705 #endif ··· 1031 #define used_math() tsk_used_math(current) 1032 1033 #ifdef CONFIG_SMP 1034 - extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); 1035 #else 1036 - static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) 1037 { 1038 if (!cpu_isset(0, new_mask)) 1039 return -EINVAL; ··· 1042 #endif 1043 1044 extern unsigned long long sched_clock(void); 1045 - extern unsigned long long current_sched_time(const task_t *current_task); 1046 1047 /* sched_exec is called by processes performing an exec */ 1048 #ifdef CONFIG_SMP ··· 1061 extern void sched_idle_next(void); 1062 1063 #ifdef CONFIG_RT_MUTEXES 1064 - extern int rt_mutex_getprio(task_t *p); 1065 - extern void rt_mutex_setprio(task_t *p, int prio); 1066 - extern void rt_mutex_adjust_pi(task_t *p); 1067 #else 1068 - static inline int rt_mutex_getprio(task_t *p) 1069 { 1070 return p->normal_prio; 1071 } 1072 # define rt_mutex_adjust_pi(p) do { } while (0) 1073 #endif 1074 1075 - extern void set_user_nice(task_t *p, long nice); 1076 - extern int task_prio(const task_t *p); 1077 - extern int task_nice(const task_t *p); 1078 - extern int can_nice(const task_t *p, const int nice); 1079 - extern int task_curr(const task_t *p); 1080 extern int idle_cpu(int cpu); 1081 extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1082 - extern task_t *idle_task(int cpu); 1083 - extern task_t *curr_task(int cpu); 1084 - extern void set_curr_task(int cpu, task_t *p); 1085 1086 void yield(void); 1087 ··· 1138 #else 1139 static inline void kick_process(struct task_struct *tsk) { } 1140 #endif 1141 - extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); 1142 - extern void FASTCALL(sched_exit(task_t * p)); 1143 1144 extern int in_group_p(gid_t); 1145 extern int in_egroup_p(gid_t); ··· 1244 extern void daemonize(const char *, ...); 1245 extern int allow_signal(int); 1246 extern int disallow_signal(int); 1247 - extern task_t *child_reaper; 1248 1249 extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 1250 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 1251 - task_t *fork_idle(int); 1252 1253 extern void set_task_comm(struct task_struct *tsk, char *from); 1254 extern void get_task_comm(char *to, struct task_struct *tsk); 1255 1256 #ifdef CONFIG_SMP 1257 - extern void wait_task_inactive(task_t * p); 1258 #else 1259 #define wait_task_inactive(p) do { } while (0) 1260 #endif ··· 1280 /* de_thread depends on thread_group_leader not being a pid based check */ 1281 #define thread_group_leader(p) (p == p->group_leader) 1282 1283 - static inline task_t *next_thread(const task_t *p) 1284 { 1285 return list_entry(rcu_dereference(p->thread_group.next), 1286 - task_t, thread_group); 1287 } 1288 1289 - static inline int thread_group_empty(task_t *p) 1290 { 1291 return list_empty(&p->thread_group); 1292 }
··· 184 extern rwlock_t tasklist_lock; 185 extern spinlock_t mmlist_lock; 186 187 + struct task_struct; 188 189 extern void sched_init(void); 190 extern void sched_init_smp(void); 191 + extern void init_idle(struct task_struct *idle, int cpu); 192 193 extern cpumask_t nohz_cpu_mask; 194 ··· 383 wait_queue_head_t wait_chldexit; /* for wait4() */ 384 385 /* current thread group signal load-balancing target: */ 386 + struct task_struct *curr_target; 387 388 /* shared signal handling: */ 389 struct sigpending shared_pending; ··· 699 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 700 701 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 702 + extern void prefetch_stack(struct task_struct *t); 703 #else 704 static inline void prefetch_stack(struct task_struct *t) { } 705 #endif ··· 1031 #define used_math() tsk_used_math(current) 1032 1033 #ifdef CONFIG_SMP 1034 + extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); 1035 #else 1036 + static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1037 { 1038 if (!cpu_isset(0, new_mask)) 1039 return -EINVAL; ··· 1042 #endif 1043 1044 extern unsigned long long sched_clock(void); 1045 + extern unsigned long long 1046 + current_sched_time(const struct task_struct *current_task); 1047 1048 /* sched_exec is called by processes performing an exec */ 1049 #ifdef CONFIG_SMP ··· 1060 extern void sched_idle_next(void); 1061 1062 #ifdef CONFIG_RT_MUTEXES 1063 + extern int rt_mutex_getprio(struct task_struct *p); 1064 + extern void rt_mutex_setprio(struct task_struct *p, int prio); 1065 + extern void rt_mutex_adjust_pi(struct task_struct *p); 1066 #else 1067 + static inline int rt_mutex_getprio(struct task_struct *p) 1068 { 1069 return p->normal_prio; 1070 } 1071 # define rt_mutex_adjust_pi(p) do { } while (0) 1072 #endif 1073 1074 + extern void set_user_nice(struct task_struct *p, long nice); 1075 + extern int task_prio(const struct task_struct *p); 1076 + extern int task_nice(const struct task_struct *p); 1077 + extern int can_nice(const struct task_struct *p, const int nice); 1078 + extern int task_curr(const struct task_struct *p); 1079 extern int idle_cpu(int cpu); 1080 extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1081 + extern struct task_struct *idle_task(int cpu); 1082 + extern struct task_struct *curr_task(int cpu); 1083 + extern void set_curr_task(int cpu, struct task_struct *p); 1084 1085 void yield(void); 1086 ··· 1137 #else 1138 static inline void kick_process(struct task_struct *tsk) { } 1139 #endif 1140 + extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); 1141 + extern void FASTCALL(sched_exit(struct task_struct * p)); 1142 1143 extern int in_group_p(gid_t); 1144 extern int in_egroup_p(gid_t); ··· 1243 extern void daemonize(const char *, ...); 1244 extern int allow_signal(int); 1245 extern int disallow_signal(int); 1246 + extern struct task_struct *child_reaper; 1247 1248 extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 1249 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 1250 + struct task_struct *fork_idle(int); 1251 1252 extern void set_task_comm(struct task_struct *tsk, char *from); 1253 extern void get_task_comm(char *to, struct task_struct *tsk); 1254 1255 #ifdef CONFIG_SMP 1256 + extern void wait_task_inactive(struct task_struct * p); 1257 #else 1258 #define wait_task_inactive(p) do { } while (0) 1259 #endif ··· 1279 /* de_thread depends on thread_group_leader not being a pid based check */ 1280 #define thread_group_leader(p) (p == p->group_leader) 1281 1282 + static inline struct task_struct *next_thread(const struct task_struct *p) 1283 { 1284 return list_entry(rcu_dereference(p->thread_group.next), 1285 + struct task_struct, thread_group); 1286 } 1287 1288 + static inline int thread_group_empty(struct task_struct *p) 1289 { 1290 return list_empty(&p->thread_group); 1291 }
+4 -4
kernel/capability.c
··· 46 int ret = 0; 47 pid_t pid; 48 __u32 version; 49 - task_t *target; 50 struct __user_cap_data_struct data; 51 52 if (get_user(version, &header->version)) ··· 96 kernel_cap_t *inheritable, 97 kernel_cap_t *permitted) 98 { 99 - task_t *g, *target; 100 int ret = -EPERM; 101 int found = 0; 102 ··· 128 kernel_cap_t *inheritable, 129 kernel_cap_t *permitted) 130 { 131 - task_t *g, *target; 132 int ret = -EPERM; 133 int found = 0; 134 ··· 172 { 173 kernel_cap_t inheritable, permitted, effective; 174 __u32 version; 175 - task_t *target; 176 int ret; 177 pid_t pid; 178
··· 46 int ret = 0; 47 pid_t pid; 48 __u32 version; 49 + struct task_struct *target; 50 struct __user_cap_data_struct data; 51 52 if (get_user(version, &header->version)) ··· 96 kernel_cap_t *inheritable, 97 kernel_cap_t *permitted) 98 { 99 + struct task_struct *g, *target; 100 int ret = -EPERM; 101 int found = 0; 102 ··· 128 kernel_cap_t *inheritable, 129 kernel_cap_t *permitted) 130 { 131 + struct task_struct *g, *target; 132 int ret = -EPERM; 133 int found = 0; 134 ··· 172 { 173 kernel_cap_t inheritable, permitted, effective; 174 __u32 version; 175 + struct task_struct *target; 176 int ret; 177 pid_t pid; 178
+19 -16
kernel/exit.c
··· 134 135 void release_task(struct task_struct * p) 136 { 137 int zap_leader; 138 - task_t *leader; 139 repeat: 140 atomic_dec(&p->user->processes); 141 write_lock_irq(&tasklist_lock); ··· 209 * 210 * "I ask you, have you ever known what it is to be an orphan?" 211 */ 212 - static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) 213 { 214 struct task_struct *p; 215 int ret = 1; ··· 582 mmput(mm); 583 } 584 585 - static inline void choose_new_parent(task_t *p, task_t *reaper) 586 { 587 /* 588 * Make sure we're not reparenting to ourselves and that ··· 593 p->real_parent = reaper; 594 } 595 596 - static void reparent_thread(task_t *p, task_t *father, int traced) 597 { 598 /* We don't want people slaying init. */ 599 if (p->exit_signal != -1) ··· 658 * group, and if no such member exists, give it to 659 * the global child reaper process (ie "init") 660 */ 661 - static void forget_original_parent(struct task_struct * father, 662 - struct list_head *to_release) 663 { 664 struct task_struct *p, *reaper = father; 665 struct list_head *_p, *_n; ··· 682 */ 683 list_for_each_safe(_p, _n, &father->children) { 684 int ptrace; 685 - p = list_entry(_p,struct task_struct,sibling); 686 687 ptrace = p->ptrace; 688 ··· 711 list_add(&p->ptrace_list, to_release); 712 } 713 list_for_each_safe(_p, _n, &father->ptrace_children) { 714 - p = list_entry(_p,struct task_struct,ptrace_list); 715 choose_new_parent(p, reaper); 716 reparent_thread(p, father, 1); 717 } ··· 831 832 list_for_each_safe(_p, _n, &ptrace_dead) { 833 list_del_init(_p); 834 - t = list_entry(_p,struct task_struct,ptrace_list); 835 release_task(t); 836 } 837 ··· 1012 do_group_exit((error_code & 0xff) << 8); 1013 } 1014 1015 - static int eligible_child(pid_t pid, int options, task_t *p) 1016 { 1017 if (pid > 0) { 1018 if (p->pid != pid) ··· 1053 return 1; 1054 } 1055 1056 - static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, 1057 int why, int status, 1058 struct siginfo __user *infop, 1059 struct rusage __user *rusagep) 1060 { 1061 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; 1062 put_task_struct(p); 1063 if (!retval) 1064 retval = put_user(SIGCHLD, &infop->si_signo); ··· 1084 * the lock and this task is uninteresting. If we return nonzero, we have 1085 * released the lock and the system call should return. 1086 */ 1087 - static int wait_task_zombie(task_t *p, int noreap, 1088 struct siginfo __user *infop, 1089 int __user *stat_addr, struct rusage __user *ru) 1090 { ··· 1246 * the lock and this task is uninteresting. If we return nonzero, we have 1247 * released the lock and the system call should return. 1248 */ 1249 - static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, 1250 - struct siginfo __user *infop, 1251 int __user *stat_addr, struct rusage __user *ru) 1252 { 1253 int retval, exit_code; ··· 1361 * the lock and this task is uninteresting. If we return nonzero, we have 1362 * released the lock and the system call should return. 1363 */ 1364 - static int wait_task_continued(task_t *p, int noreap, 1365 struct siginfo __user *infop, 1366 int __user *stat_addr, struct rusage __user *ru) 1367 { ··· 1447 int ret; 1448 1449 list_for_each(_p,&tsk->children) { 1450 - p = list_entry(_p,struct task_struct,sibling); 1451 1452 ret = eligible_child(pid, options, p); 1453 if (!ret)
··· 134 135 void release_task(struct task_struct * p) 136 { 137 + struct task_struct *leader; 138 int zap_leader; 139 repeat: 140 atomic_dec(&p->user->processes); 141 write_lock_irq(&tasklist_lock); ··· 209 * 210 * "I ask you, have you ever known what it is to be an orphan?" 211 */ 212 + static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) 213 { 214 struct task_struct *p; 215 int ret = 1; ··· 582 mmput(mm); 583 } 584 585 + static inline void 586 + choose_new_parent(struct task_struct *p, struct task_struct *reaper) 587 { 588 /* 589 * Make sure we're not reparenting to ourselves and that ··· 592 p->real_parent = reaper; 593 } 594 595 + static void 596 + reparent_thread(struct task_struct *p, struct task_struct *father, int traced) 597 { 598 /* We don't want people slaying init. */ 599 if (p->exit_signal != -1) ··· 656 * group, and if no such member exists, give it to 657 * the global child reaper process (ie "init") 658 */ 659 + static void 660 + forget_original_parent(struct task_struct *father, struct list_head *to_release) 661 { 662 struct task_struct *p, *reaper = father; 663 struct list_head *_p, *_n; ··· 680 */ 681 list_for_each_safe(_p, _n, &father->children) { 682 int ptrace; 683 + p = list_entry(_p, struct task_struct, sibling); 684 685 ptrace = p->ptrace; 686 ··· 709 list_add(&p->ptrace_list, to_release); 710 } 711 list_for_each_safe(_p, _n, &father->ptrace_children) { 712 + p = list_entry(_p, struct task_struct, ptrace_list); 713 choose_new_parent(p, reaper); 714 reparent_thread(p, father, 1); 715 } ··· 829 830 list_for_each_safe(_p, _n, &ptrace_dead) { 831 list_del_init(_p); 832 + t = list_entry(_p, struct task_struct, ptrace_list); 833 release_task(t); 834 } 835 ··· 1010 do_group_exit((error_code & 0xff) << 8); 1011 } 1012 1013 + static int eligible_child(pid_t pid, int options, struct task_struct *p) 1014 { 1015 if (pid > 0) { 1016 if (p->pid != pid) ··· 1051 return 1; 1052 } 1053 1054 + static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, 1055 int why, int status, 1056 struct siginfo __user *infop, 1057 struct rusage __user *rusagep) 1058 { 1059 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; 1060 + 1061 put_task_struct(p); 1062 if (!retval) 1063 retval = put_user(SIGCHLD, &infop->si_signo); ··· 1081 * the lock and this task is uninteresting. If we return nonzero, we have 1082 * released the lock and the system call should return. 1083 */ 1084 + static int wait_task_zombie(struct task_struct *p, int noreap, 1085 struct siginfo __user *infop, 1086 int __user *stat_addr, struct rusage __user *ru) 1087 { ··· 1243 * the lock and this task is uninteresting. If we return nonzero, we have 1244 * released the lock and the system call should return. 1245 */ 1246 + static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, 1247 + int noreap, struct siginfo __user *infop, 1248 int __user *stat_addr, struct rusage __user *ru) 1249 { 1250 int retval, exit_code; ··· 1358 * the lock and this task is uninteresting. If we return nonzero, we have 1359 * released the lock and the system call should return. 1360 */ 1361 + static int wait_task_continued(struct task_struct *p, int noreap, 1362 struct siginfo __user *infop, 1363 int __user *stat_addr, struct rusage __user *ru) 1364 { ··· 1444 int ret; 1445 1446 list_for_each(_p,&tsk->children) { 1447 + p = list_entry(_p, struct task_struct, sibling); 1448 1449 ret = eligible_child(pid, options, p); 1450 if (!ret)
+9 -9
kernel/fork.c
··· 933 * parts of the process environment (as per the clone 934 * flags). The actual kick-off is left to the caller. 935 */ 936 - static task_t *copy_process(unsigned long clone_flags, 937 - unsigned long stack_start, 938 - struct pt_regs *regs, 939 - unsigned long stack_size, 940 - int __user *parent_tidptr, 941 - int __user *child_tidptr, 942 - int pid) 943 { 944 int retval; 945 struct task_struct *p = NULL; ··· 1294 return regs; 1295 } 1296 1297 - task_t * __devinit fork_idle(int cpu) 1298 { 1299 - task_t *task; 1300 struct pt_regs regs; 1301 1302 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
··· 933 * parts of the process environment (as per the clone 934 * flags). The actual kick-off is left to the caller. 935 */ 936 + static struct task_struct *copy_process(unsigned long clone_flags, 937 + unsigned long stack_start, 938 + struct pt_regs *regs, 939 + unsigned long stack_size, 940 + int __user *parent_tidptr, 941 + int __user *child_tidptr, 942 + int pid) 943 { 944 int retval; 945 struct task_struct *p = NULL; ··· 1294 return regs; 1295 } 1296 1297 + struct task_struct * __devinit fork_idle(int cpu) 1298 { 1299 + struct task_struct *task; 1300 struct pt_regs regs; 1301 1302 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
+1 -1
kernel/hrtimer.c
··· 669 return HRTIMER_NORESTART; 670 } 671 672 - void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task) 673 { 674 sl->timer.function = hrtimer_wakeup; 675 sl->task = task;
··· 669 return HRTIMER_NORESTART; 670 } 671 672 + void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) 673 { 674 sl->timer.function = hrtimer_wakeup; 675 sl->task = task;
+3 -3
kernel/pid.c
··· 218 return NULL; 219 } 220 221 - int fastcall attach_pid(task_t *task, enum pid_type type, int nr) 222 { 223 struct pid_link *link; 224 struct pid *pid; ··· 233 return 0; 234 } 235 236 - void fastcall detach_pid(task_t *task, enum pid_type type) 237 { 238 struct pid_link *link; 239 struct pid *pid; ··· 267 /* 268 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 269 */ 270 - task_t *find_task_by_pid_type(int type, int nr) 271 { 272 return pid_task(find_pid(nr), type); 273 }
··· 218 return NULL; 219 } 220 221 + int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr) 222 { 223 struct pid_link *link; 224 struct pid *pid; ··· 233 return 0; 234 } 235 236 + void fastcall detach_pid(struct task_struct *task, enum pid_type type) 237 { 238 struct pid_link *link; 239 struct pid *pid; ··· 267 /* 268 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 269 */ 270 + struct task_struct *find_task_by_pid_type(int type, int nr) 271 { 272 return pid_task(find_pid(nr), type); 273 }
+3 -3
kernel/ptrace.c
··· 28 * 29 * Must be called with the tasklist lock write-held. 30 */ 31 - void __ptrace_link(task_t *child, task_t *new_parent) 32 { 33 BUG_ON(!list_empty(&child->ptrace_list)); 34 if (child->parent == new_parent) ··· 46 * TASK_TRACED, resume it now. 47 * Requires that irqs be disabled. 48 */ 49 - void ptrace_untrace(task_t *child) 50 { 51 spin_lock(&child->sighand->siglock); 52 if (child->state == TASK_TRACED) { ··· 65 * 66 * Must be called with the tasklist lock write-held. 67 */ 68 - void __ptrace_unlink(task_t *child) 69 { 70 BUG_ON(!child->ptrace); 71
··· 28 * 29 * Must be called with the tasklist lock write-held. 30 */ 31 + void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 32 { 33 BUG_ON(!list_empty(&child->ptrace_list)); 34 if (child->parent == new_parent) ··· 46 * TASK_TRACED, resume it now. 47 * Requires that irqs be disabled. 48 */ 49 + void ptrace_untrace(struct task_struct *child) 50 { 51 spin_lock(&child->sighand->siglock); 52 if (child->state == TASK_TRACED) { ··· 65 * 66 * Must be called with the tasklist lock write-held. 67 */ 68 + void __ptrace_unlink(struct task_struct *child) 69 { 70 BUG_ON(!child->ptrace); 71
+3 -2
kernel/rtmutex-debug.c
··· 96 rt_trace_on = 0; 97 } 98 99 - static void printk_task(task_t *p) 100 { 101 if (p) 102 printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); ··· 231 lock->name = name; 232 } 233 234 - void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) 235 { 236 } 237
··· 96 rt_trace_on = 0; 97 } 98 99 + static void printk_task(struct task_struct *p) 100 { 101 if (p) 102 printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); ··· 231 lock->name = name; 232 } 233 234 + void 235 + rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) 236 { 237 } 238
+2 -2
kernel/rtmutex-tester.c
··· 33 }; 34 35 static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; 36 - static task_t *threads[MAX_RT_TEST_THREADS]; 37 static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES]; 38 39 enum test_opcodes { ··· 361 static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) 362 { 363 struct test_thread_data *td; 364 char *curr = buf; 365 - task_t *tsk; 366 int i; 367 368 td = container_of(dev, struct test_thread_data, sysdev);
··· 33 }; 34 35 static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; 36 + static struct task_struct *threads[MAX_RT_TEST_THREADS]; 37 static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES]; 38 39 enum test_opcodes { ··· 361 static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) 362 { 363 struct test_thread_data *td; 364 + struct task_struct *tsk; 365 char *curr = buf; 366 int i; 367 368 td = container_of(dev, struct test_thread_data, sysdev);
+6 -5
kernel/rtmutex.c
··· 157 * Decreases task's usage by one - may thus free the task. 158 * Returns 0 or -EDEADLK. 159 */ 160 - static int rt_mutex_adjust_prio_chain(task_t *task, 161 int deadlock_detect, 162 struct rt_mutex *orig_lock, 163 struct rt_mutex_waiter *orig_waiter, ··· 282 spin_unlock_irqrestore(&task->pi_lock, flags); 283 out_put_task: 284 put_task_struct(task); 285 return ret; 286 } 287 ··· 404 struct rt_mutex_waiter *waiter, 405 int detect_deadlock) 406 { 407 struct rt_mutex_waiter *top_waiter = waiter; 408 - task_t *owner = rt_mutex_owner(lock); 409 - int boost = 0, res; 410 unsigned long flags; 411 412 spin_lock_irqsave(&current->pi_lock, flags); 413 __rt_mutex_adjust_prio(current); ··· 528 struct rt_mutex_waiter *waiter) 529 { 530 int first = (waiter == rt_mutex_top_waiter(lock)); 531 - int boost = 0; 532 - task_t *owner = rt_mutex_owner(lock); 533 unsigned long flags; 534 535 spin_lock_irqsave(&current->pi_lock, flags); 536 plist_del(&waiter->list_entry, &lock->wait_list);
··· 157 * Decreases task's usage by one - may thus free the task. 158 * Returns 0 or -EDEADLK. 159 */ 160 + static int rt_mutex_adjust_prio_chain(struct task_struct *task, 161 int deadlock_detect, 162 struct rt_mutex *orig_lock, 163 struct rt_mutex_waiter *orig_waiter, ··· 282 spin_unlock_irqrestore(&task->pi_lock, flags); 283 out_put_task: 284 put_task_struct(task); 285 + 286 return ret; 287 } 288 ··· 403 struct rt_mutex_waiter *waiter, 404 int detect_deadlock) 405 { 406 + struct task_struct *owner = rt_mutex_owner(lock); 407 struct rt_mutex_waiter *top_waiter = waiter; 408 unsigned long flags; 409 + int boost = 0, res; 410 411 spin_lock_irqsave(&current->pi_lock, flags); 412 __rt_mutex_adjust_prio(current); ··· 527 struct rt_mutex_waiter *waiter) 528 { 529 int first = (waiter == rt_mutex_top_waiter(lock)); 530 + struct task_struct *owner = rt_mutex_owner(lock); 531 unsigned long flags; 532 + int boost = 0; 533 534 spin_lock_irqsave(&current->pi_lock, flags); 535 plist_del(&waiter->list_entry, &lock->wait_list);
+101 -91
kernel/sched.c
··· 179 return SCALE_PRIO(DEF_TIMESLICE, static_prio); 180 } 181 182 - static inline unsigned int task_timeslice(task_t *p) 183 { 184 return static_prio_timeslice(p->static_prio); 185 } ··· 227 228 unsigned long expired_timestamp; 229 unsigned long long timestamp_last_tick; 230 - task_t *curr, *idle; 231 struct mm_struct *prev_mm; 232 prio_array_t *active, *expired, arrays[2]; 233 int best_expired_prio; ··· 240 int active_balance; 241 int push_cpu; 242 243 - task_t *migration_thread; 244 struct list_head migration_queue; 245 #endif 246 ··· 291 #endif 292 293 #ifndef __ARCH_WANT_UNLOCKED_CTXSW 294 - static inline int task_running(runqueue_t *rq, task_t *p) 295 { 296 return rq->curr == p; 297 } 298 299 - static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) 300 { 301 } 302 303 - static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) 304 { 305 #ifdef CONFIG_DEBUG_SPINLOCK 306 /* this is a valid case when another task releases the spinlock */ ··· 317 } 318 319 #else /* __ARCH_WANT_UNLOCKED_CTXSW */ 320 - static inline int task_running(runqueue_t *rq, task_t *p) 321 { 322 #ifdef CONFIG_SMP 323 return p->oncpu; ··· 326 #endif 327 } 328 329 - static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) 330 { 331 #ifdef CONFIG_SMP 332 /* ··· 343 #endif 344 } 345 346 - static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) 347 { 348 #ifdef CONFIG_SMP 349 /* ··· 364 * __task_rq_lock - lock the runqueue a given task resides on. 365 * Must be called interrupts disabled. 366 */ 367 - static inline runqueue_t *__task_rq_lock(task_t *p) 368 __acquires(rq->lock) 369 { 370 struct runqueue *rq; ··· 384 * interrupts. Note the ordering: we can safely lookup the task_rq without 385 * explicitly disabling preemption. 386 */ 387 - static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) 388 __acquires(rq->lock) 389 { 390 struct runqueue *rq; ··· 541 * long it was from the *first* time it was queued to the time that it 542 * finally hit a cpu. 543 */ 544 - static inline void sched_info_dequeued(task_t *t) 545 { 546 t->sched_info.last_queued = 0; 547 } ··· 551 * long it was waiting to run. We also note when it began so that we 552 * can keep stats on how long its timeslice is. 553 */ 554 - static void sched_info_arrive(task_t *t) 555 { 556 unsigned long now = jiffies, diff = 0; 557 struct runqueue *rq = task_rq(t); ··· 585 * the timestamp if it is already not set. It's assumed that 586 * sched_info_dequeued() will clear that stamp when appropriate. 587 */ 588 - static inline void sched_info_queued(task_t *t) 589 { 590 if (!t->sched_info.last_queued) 591 t->sched_info.last_queued = jiffies; ··· 595 * Called when a process ceases being the active-running process, either 596 * voluntarily or involuntarily. Now we can calculate how long we ran. 597 */ 598 - static inline void sched_info_depart(task_t *t) 599 { 600 struct runqueue *rq = task_rq(t); 601 unsigned long diff = jiffies - t->sched_info.last_arrival; ··· 611 * their time slice. (This may also be called when switching to or from 612 * the idle task.) We are only called when prev != next. 613 */ 614 - static inline void sched_info_switch(task_t *prev, task_t *next) 615 { 616 struct runqueue *rq = task_rq(prev); 617 ··· 684 * Both properties are important to certain workloads. 685 */ 686 687 - static inline int __normal_prio(task_t *p) 688 { 689 int bonus, prio; 690 ··· 720 #define RTPRIO_TO_LOAD_WEIGHT(rp) \ 721 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) 722 723 - static void set_load_weight(task_t *p) 724 { 725 if (has_rt_policy(p)) { 726 #ifdef CONFIG_SMP ··· 738 p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); 739 } 740 741 - static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) 742 { 743 rq->raw_weighted_load += p->load_weight; 744 } 745 746 - static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) 747 { 748 rq->raw_weighted_load -= p->load_weight; 749 } 750 751 - static inline void inc_nr_running(task_t *p, runqueue_t *rq) 752 { 753 rq->nr_running++; 754 inc_raw_weighted_load(rq, p); 755 } 756 757 - static inline void dec_nr_running(task_t *p, runqueue_t *rq) 758 { 759 rq->nr_running--; 760 dec_raw_weighted_load(rq, p); ··· 769 * setprio syscalls, and whenever the interactivity 770 * estimator recalculates. 771 */ 772 - static inline int normal_prio(task_t *p) 773 { 774 int prio; 775 ··· 787 * interactivity modifiers. Will be RT if the task got 788 * RT-boosted. If not then it returns p->normal_prio. 789 */ 790 - static int effective_prio(task_t *p) 791 { 792 p->normal_prio = normal_prio(p); 793 /* ··· 803 /* 804 * __activate_task - move a task to the runqueue. 805 */ 806 - static void __activate_task(task_t *p, runqueue_t *rq) 807 { 808 prio_array_t *target = rq->active; 809 ··· 816 /* 817 * __activate_idle_task - move idle task to the _front_ of runqueue. 818 */ 819 - static inline void __activate_idle_task(task_t *p, runqueue_t *rq) 820 { 821 enqueue_task_head(p, rq->active); 822 inc_nr_running(p, rq); ··· 826 * Recalculate p->normal_prio and p->prio after having slept, 827 * updating the sleep-average too: 828 */ 829 - static int recalc_task_prio(task_t *p, unsigned long long now) 830 { 831 /* Caller must always ensure 'now >= p->timestamp' */ 832 unsigned long sleep_time = now - p->timestamp; ··· 898 * Update all the scheduling statistics stuff. (sleep average 899 * calculation, priority modifiers, etc.) 900 */ 901 - static void activate_task(task_t *p, runqueue_t *rq, int local) 902 { 903 unsigned long long now; 904 ··· 965 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 966 #endif 967 968 - static void resched_task(task_t *p) 969 { 970 int cpu; 971 ··· 986 smp_send_reschedule(cpu); 987 } 988 #else 989 - static inline void resched_task(task_t *p) 990 { 991 assert_spin_locked(&task_rq(p)->lock); 992 set_tsk_need_resched(p); ··· 997 * task_curr - is this task currently executing on a CPU? 998 * @p: the task in question. 999 */ 1000 - inline int task_curr(const task_t *p) 1001 { 1002 return cpu_curr(task_cpu(p)) == p; 1003 } ··· 1012 typedef struct { 1013 struct list_head list; 1014 1015 - task_t *task; 1016 int dest_cpu; 1017 1018 struct completion done; ··· 1022 * The task's runqueue lock must be held. 1023 * Returns true if you have to wait for migration thread. 1024 */ 1025 - static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) 1026 { 1027 runqueue_t *rq = task_rq(p); 1028 ··· 1053 * smp_call_function() if an IPI is sent by the same process we are 1054 * waiting to become inactive. 1055 */ 1056 - void wait_task_inactive(task_t *p) 1057 { 1058 unsigned long flags; 1059 runqueue_t *rq; ··· 1087 * to another CPU then no harm is done and the purpose has been 1088 * achieved as well. 1089 */ 1090 - void kick_process(task_t *p) 1091 { 1092 int cpu; 1093 ··· 1290 * Returns the CPU we should wake onto. 1291 */ 1292 #if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1293 - static int wake_idle(int cpu, task_t *p) 1294 { 1295 cpumask_t tmp; 1296 struct sched_domain *sd; ··· 1313 return cpu; 1314 } 1315 #else 1316 - static inline int wake_idle(int cpu, task_t *p) 1317 { 1318 return cpu; 1319 } ··· 1333 * 1334 * returns failure only if the task is already active. 1335 */ 1336 - static int try_to_wake_up(task_t *p, unsigned int state, int sync) 1337 { 1338 int cpu, this_cpu, success = 0; 1339 unsigned long flags; ··· 1491 return success; 1492 } 1493 1494 - int fastcall wake_up_process(task_t *p) 1495 { 1496 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | 1497 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); 1498 } 1499 EXPORT_SYMBOL(wake_up_process); 1500 1501 - int fastcall wake_up_state(task_t *p, unsigned int state) 1502 { 1503 return try_to_wake_up(p, state, 0); 1504 } ··· 1507 * Perform scheduler related setup for a newly forked process p. 1508 * p is forked by current. 1509 */ 1510 - void fastcall sched_fork(task_t *p, int clone_flags) 1511 { 1512 int cpu = get_cpu(); 1513 ··· 1575 * that must be done for every newly created context, then puts the task 1576 * on the runqueue and wakes it. 1577 */ 1578 - void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) 1579 { 1580 unsigned long flags; 1581 int this_cpu, cpu; ··· 1659 * artificially, because any timeslice recovered here 1660 * was given away by the parent in the first place.) 1661 */ 1662 - void fastcall sched_exit(task_t *p) 1663 { 1664 unsigned long flags; 1665 runqueue_t *rq; ··· 1693 * prepare_task_switch sets up locking and calls architecture specific 1694 * hooks. 1695 */ 1696 - static inline void prepare_task_switch(runqueue_t *rq, task_t *next) 1697 { 1698 prepare_lock_switch(rq, next); 1699 prepare_arch_switch(next); ··· 1714 * with the lock held can cause deadlocks; see schedule() for 1715 * details.) 1716 */ 1717 - static inline void finish_task_switch(runqueue_t *rq, task_t *prev) 1718 __releases(rq->lock) 1719 { 1720 struct mm_struct *mm = rq->prev_mm; ··· 1752 * schedule_tail - first thing a freshly forked thread must call. 1753 * @prev: the thread we just switched away from. 1754 */ 1755 - asmlinkage void schedule_tail(task_t *prev) 1756 __releases(rq->lock) 1757 { 1758 runqueue_t *rq = this_rq(); ··· 1769 * context_switch - switch to the new MM and the new 1770 * thread's register state. 1771 */ 1772 - static inline 1773 - task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next) 1774 { 1775 struct mm_struct *mm = next->mm; 1776 struct mm_struct *oldmm = prev->active_mm; ··· 1942 * allow dest_cpu, which will force the cpu onto dest_cpu. Then 1943 * the cpu_allowed mask is restored. 1944 */ 1945 - static void sched_migrate_task(task_t *p, int dest_cpu) 1946 { 1947 migration_req_t req; 1948 runqueue_t *rq; ··· 1957 if (migrate_task(p, dest_cpu, &req)) { 1958 /* Need to wait for migration thread (might exit: take ref). */ 1959 struct task_struct *mt = rq->migration_thread; 1960 get_task_struct(mt); 1961 task_rq_unlock(rq, &flags); 1962 wake_up_process(mt); 1963 put_task_struct(mt); 1964 wait_for_completion(&req.done); 1965 return; 1966 } 1967 out: ··· 1987 * pull_task - move a task from a remote runqueue to the local runqueue. 1988 * Both runqueues must be locked. 1989 */ 1990 - static 1991 - void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, 1992 - runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) 1993 { 1994 dequeue_task(p, src_array); 1995 dec_nr_running(p, src_rq); ··· 2010 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 2011 */ 2012 static 2013 - int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, 2014 struct sched_domain *sd, enum idle_type idle, 2015 int *all_pinned) 2016 { ··· 2059 best_prio_seen, skip_for_load; 2060 prio_array_t *array, *dst_array; 2061 struct list_head *head, *curr; 2062 long rem_load_move; 2063 - task_t *tmp; 2064 2065 if (max_nr_move == 0 || max_load_move == 0) 2066 goto out; ··· 2112 head = array->queue + idx; 2113 curr = head->prev; 2114 skip_queue: 2115 - tmp = list_entry(curr, task_t, run_list); 2116 2117 curr = curr->prev; 2118 ··· 2826 * Bank in p->sched_time the ns elapsed since the last tick or switch. 2827 */ 2828 static inline void 2829 - update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) 2830 { 2831 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); 2832 } ··· 2835 * Return current->sched_time plus any more ns on the sched_clock 2836 * that have not yet been banked. 2837 */ 2838 - unsigned long long current_sched_time(const task_t *p) 2839 { 2840 unsigned long long ns; 2841 unsigned long flags; ··· 2952 void scheduler_tick(void) 2953 { 2954 unsigned long long now = sched_clock(); 2955 int cpu = smp_processor_id(); 2956 runqueue_t *rq = this_rq(); 2957 - task_t *p = current; 2958 2959 update_cpu_clock(p, rq, now); 2960 ··· 3086 * utilize, if another task runs on a sibling. This models the 3087 * slowdown effect of other tasks running on siblings: 3088 */ 3089 - static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) 3090 { 3091 return p->time_slice * (100 - sd->per_cpu_gain) / 100; 3092 } ··· 3098 * acquire their lock. As we only trylock the normal locking order does not 3099 * need to be obeyed. 3100 */ 3101 - static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) 3102 { 3103 struct sched_domain *tmp, *sd = NULL; 3104 int ret = 0, i; ··· 3119 return 0; 3120 3121 for_each_cpu_mask(i, sd->span) { 3122 runqueue_t *smt_rq; 3123 - task_t *smt_curr; 3124 3125 if (i == this_cpu) 3126 continue; ··· 3166 { 3167 } 3168 static inline int 3169 - dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) 3170 { 3171 return 0; 3172 } ··· 3220 */ 3221 asmlinkage void __sched schedule(void) 3222 { 3223 struct list_head *queue; 3224 unsigned long long now; 3225 unsigned long run_time; 3226 int cpu, idx, new_prio; 3227 - task_t *prev, *next; 3228 prio_array_t *array; 3229 long *switch_count; 3230 runqueue_t *rq; ··· 3317 3318 idx = sched_find_first_bit(array->bitmap); 3319 queue = array->queue + idx; 3320 - next = list_entry(queue->next, task_t, run_list); 3321 3322 if (!rt_task(next) && interactive_sleep(next->sleep_type)) { 3323 unsigned long long delta = now - next->timestamp; ··· 3785 * 3786 * Used by the rt_mutex code to implement priority inheritance logic. 3787 */ 3788 - void rt_mutex_setprio(task_t *p, int prio) 3789 { 3790 unsigned long flags; 3791 prio_array_t *array; ··· 3826 3827 #endif 3828 3829 - void set_user_nice(task_t *p, long nice) 3830 { 3831 int old_prio, delta; 3832 unsigned long flags; ··· 3882 * @p: task 3883 * @nice: nice value 3884 */ 3885 - int can_nice(const task_t *p, const int nice) 3886 { 3887 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3888 int nice_rlim = 20 - nice; ··· 3941 * RT tasks are offset by -200. Normal tasks are centered 3942 * around 0, value goes from -16 to +15. 3943 */ 3944 - int task_prio(const task_t *p) 3945 { 3946 return p->prio - MAX_RT_PRIO; 3947 } ··· 3950 * task_nice - return the nice value of a given task. 3951 * @p: the task in question. 3952 */ 3953 - int task_nice(const task_t *p) 3954 { 3955 return TASK_NICE(p); 3956 } ··· 3969 * idle_task - return the idle task for a given cpu. 3970 * @cpu: the processor in question. 3971 */ 3972 - task_t *idle_task(int cpu) 3973 { 3974 return cpu_rq(cpu)->idle; 3975 } ··· 3978 * find_process_by_pid - find a process with a matching PID value. 3979 * @pid: the pid in question. 3980 */ 3981 - static inline task_t *find_process_by_pid(pid_t pid) 3982 { 3983 return pid ? find_task_by_pid(pid) : current; 3984 } ··· 4112 static int 4113 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4114 { 4115 - int retval; 4116 struct sched_param lparam; 4117 struct task_struct *p; 4118 4119 if (!param || pid < 0) 4120 return -EINVAL; ··· 4130 read_unlock_irq(&tasklist_lock); 4131 retval = sched_setscheduler(p, policy, &lparam); 4132 put_task_struct(p); 4133 return retval; 4134 } 4135 ··· 4166 */ 4167 asmlinkage long sys_sched_getscheduler(pid_t pid) 4168 { 4169 int retval = -EINVAL; 4170 - task_t *p; 4171 4172 if (pid < 0) 4173 goto out_nounlock; ··· 4194 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) 4195 { 4196 struct sched_param lp; 4197 int retval = -EINVAL; 4198 - task_t *p; 4199 4200 if (!param || pid < 0) 4201 goto out_nounlock; ··· 4228 4229 long sched_setaffinity(pid_t pid, cpumask_t new_mask) 4230 { 4231 - task_t *p; 4232 - int retval; 4233 cpumask_t cpus_allowed; 4234 4235 lock_cpu_hotplug(); 4236 read_lock(&tasklist_lock); ··· 4316 4317 long sched_getaffinity(pid_t pid, cpumask_t *mask) 4318 { 4319 int retval; 4320 - task_t *p; 4321 4322 lock_cpu_hotplug(); 4323 read_lock(&tasklist_lock); ··· 4602 asmlinkage 4603 long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) 4604 { 4605 int retval = -EINVAL; 4606 struct timespec t; 4607 - task_t *p; 4608 4609 if (pid < 0) 4610 goto out_nounlock; ··· 4651 return list_entry(p->sibling.next,struct task_struct,sibling); 4652 } 4653 4654 - static void show_task(task_t *p) 4655 { 4656 - task_t *relative; 4657 - unsigned state; 4658 unsigned long free = 0; 4659 - static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; 4660 4661 printk("%-13.13s ", p->comm); 4662 state = p->state ? __ffs(p->state) + 1 : 0; ··· 4708 4709 void show_state(void) 4710 { 4711 - task_t *g, *p; 4712 4713 #if (BITS_PER_LONG == 32) 4714 printk("\n" ··· 4741 * NOTE: this function does not set the idle thread's NEED_RESCHED 4742 * flag, to make booting more robust. 4743 */ 4744 - void __devinit init_idle(task_t *idle, int cpu) 4745 { 4746 runqueue_t *rq = cpu_rq(cpu); 4747 unsigned long flags; ··· 4804 * task must not exit() & deallocate itself prematurely. The 4805 * call is not atomic; no spinlocks may be held. 4806 */ 4807 - int set_cpus_allowed(task_t *p, cpumask_t new_mask) 4808 { 4809 unsigned long flags; 4810 migration_req_t req; ··· 5072 mmdrop(mm); 5073 } 5074 5075 - static void migrate_dead(unsigned int dead_cpu, task_t *p) 5076 { 5077 struct runqueue *rq = cpu_rq(dead_cpu); 5078 ··· 5107 struct list_head *list = &rq->arrays[arr].queue[i]; 5108 5109 while (!list_empty(list)) 5110 - migrate_dead(dead_cpu, 5111 - list_entry(list->next, task_t, 5112 - run_list)); 5113 } 5114 } 5115 } ··· 6811 * 6812 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6813 */ 6814 - task_t *curr_task(int cpu) 6815 { 6816 return cpu_curr(cpu); 6817 } ··· 6831 * 6832 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6833 */ 6834 - void set_curr_task(int cpu, task_t *p) 6835 { 6836 cpu_curr(cpu) = p; 6837 }
··· 179 return SCALE_PRIO(DEF_TIMESLICE, static_prio); 180 } 181 182 + static inline unsigned int task_timeslice(struct task_struct *p) 183 { 184 return static_prio_timeslice(p->static_prio); 185 } ··· 227 228 unsigned long expired_timestamp; 229 unsigned long long timestamp_last_tick; 230 + struct task_struct *curr, *idle; 231 struct mm_struct *prev_mm; 232 prio_array_t *active, *expired, arrays[2]; 233 int best_expired_prio; ··· 240 int active_balance; 241 int push_cpu; 242 243 + struct task_struct *migration_thread; 244 struct list_head migration_queue; 245 #endif 246 ··· 291 #endif 292 293 #ifndef __ARCH_WANT_UNLOCKED_CTXSW 294 + static inline int task_running(runqueue_t *rq, struct task_struct *p) 295 { 296 return rq->curr == p; 297 } 298 299 + static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) 300 { 301 } 302 303 + static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) 304 { 305 #ifdef CONFIG_DEBUG_SPINLOCK 306 /* this is a valid case when another task releases the spinlock */ ··· 317 } 318 319 #else /* __ARCH_WANT_UNLOCKED_CTXSW */ 320 + static inline int task_running(runqueue_t *rq, struct task_struct *p) 321 { 322 #ifdef CONFIG_SMP 323 return p->oncpu; ··· 326 #endif 327 } 328 329 + static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) 330 { 331 #ifdef CONFIG_SMP 332 /* ··· 343 #endif 344 } 345 346 + static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) 347 { 348 #ifdef CONFIG_SMP 349 /* ··· 364 * __task_rq_lock - lock the runqueue a given task resides on. 365 * Must be called interrupts disabled. 366 */ 367 + static inline runqueue_t *__task_rq_lock(struct task_struct *p) 368 __acquires(rq->lock) 369 { 370 struct runqueue *rq; ··· 384 * interrupts. Note the ordering: we can safely lookup the task_rq without 385 * explicitly disabling preemption. 386 */ 387 + static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags) 388 __acquires(rq->lock) 389 { 390 struct runqueue *rq; ··· 541 * long it was from the *first* time it was queued to the time that it 542 * finally hit a cpu. 543 */ 544 + static inline void sched_info_dequeued(struct task_struct *t) 545 { 546 t->sched_info.last_queued = 0; 547 } ··· 551 * long it was waiting to run. We also note when it began so that we 552 * can keep stats on how long its timeslice is. 553 */ 554 + static void sched_info_arrive(struct task_struct *t) 555 { 556 unsigned long now = jiffies, diff = 0; 557 struct runqueue *rq = task_rq(t); ··· 585 * the timestamp if it is already not set. It's assumed that 586 * sched_info_dequeued() will clear that stamp when appropriate. 587 */ 588 + static inline void sched_info_queued(struct task_struct *t) 589 { 590 if (!t->sched_info.last_queued) 591 t->sched_info.last_queued = jiffies; ··· 595 * Called when a process ceases being the active-running process, either 596 * voluntarily or involuntarily. Now we can calculate how long we ran. 597 */ 598 + static inline void sched_info_depart(struct task_struct *t) 599 { 600 struct runqueue *rq = task_rq(t); 601 unsigned long diff = jiffies - t->sched_info.last_arrival; ··· 611 * their time slice. (This may also be called when switching to or from 612 * the idle task.) We are only called when prev != next. 613 */ 614 + static inline void 615 + sched_info_switch(struct task_struct *prev, struct task_struct *next) 616 { 617 struct runqueue *rq = task_rq(prev); 618 ··· 683 * Both properties are important to certain workloads. 684 */ 685 686 + static inline int __normal_prio(struct task_struct *p) 687 { 688 int bonus, prio; 689 ··· 719 #define RTPRIO_TO_LOAD_WEIGHT(rp) \ 720 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) 721 722 + static void set_load_weight(struct task_struct *p) 723 { 724 if (has_rt_policy(p)) { 725 #ifdef CONFIG_SMP ··· 737 p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); 738 } 739 740 + static inline void 741 + inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) 742 { 743 rq->raw_weighted_load += p->load_weight; 744 } 745 746 + static inline void 747 + dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) 748 { 749 rq->raw_weighted_load -= p->load_weight; 750 } 751 752 + static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq) 753 { 754 rq->nr_running++; 755 inc_raw_weighted_load(rq, p); 756 } 757 758 + static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq) 759 { 760 rq->nr_running--; 761 dec_raw_weighted_load(rq, p); ··· 766 * setprio syscalls, and whenever the interactivity 767 * estimator recalculates. 768 */ 769 + static inline int normal_prio(struct task_struct *p) 770 { 771 int prio; 772 ··· 784 * interactivity modifiers. Will be RT if the task got 785 * RT-boosted. If not then it returns p->normal_prio. 786 */ 787 + static int effective_prio(struct task_struct *p) 788 { 789 p->normal_prio = normal_prio(p); 790 /* ··· 800 /* 801 * __activate_task - move a task to the runqueue. 802 */ 803 + static void __activate_task(struct task_struct *p, runqueue_t *rq) 804 { 805 prio_array_t *target = rq->active; 806 ··· 813 /* 814 * __activate_idle_task - move idle task to the _front_ of runqueue. 815 */ 816 + static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq) 817 { 818 enqueue_task_head(p, rq->active); 819 inc_nr_running(p, rq); ··· 823 * Recalculate p->normal_prio and p->prio after having slept, 824 * updating the sleep-average too: 825 */ 826 + static int recalc_task_prio(struct task_struct *p, unsigned long long now) 827 { 828 /* Caller must always ensure 'now >= p->timestamp' */ 829 unsigned long sleep_time = now - p->timestamp; ··· 895 * Update all the scheduling statistics stuff. (sleep average 896 * calculation, priority modifiers, etc.) 897 */ 898 + static void activate_task(struct task_struct *p, runqueue_t *rq, int local) 899 { 900 unsigned long long now; 901 ··· 962 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 963 #endif 964 965 + static void resched_task(struct task_struct *p) 966 { 967 int cpu; 968 ··· 983 smp_send_reschedule(cpu); 984 } 985 #else 986 + static inline void resched_task(struct task_struct *p) 987 { 988 assert_spin_locked(&task_rq(p)->lock); 989 set_tsk_need_resched(p); ··· 994 * task_curr - is this task currently executing on a CPU? 995 * @p: the task in question. 996 */ 997 + inline int task_curr(const struct task_struct *p) 998 { 999 return cpu_curr(task_cpu(p)) == p; 1000 } ··· 1009 typedef struct { 1010 struct list_head list; 1011 1012 + struct task_struct *task; 1013 int dest_cpu; 1014 1015 struct completion done; ··· 1019 * The task's runqueue lock must be held. 1020 * Returns true if you have to wait for migration thread. 1021 */ 1022 + static int 1023 + migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req) 1024 { 1025 runqueue_t *rq = task_rq(p); 1026 ··· 1049 * smp_call_function() if an IPI is sent by the same process we are 1050 * waiting to become inactive. 1051 */ 1052 + void wait_task_inactive(struct task_struct *p) 1053 { 1054 unsigned long flags; 1055 runqueue_t *rq; ··· 1083 * to another CPU then no harm is done and the purpose has been 1084 * achieved as well. 1085 */ 1086 + void kick_process(struct task_struct *p) 1087 { 1088 int cpu; 1089 ··· 1286 * Returns the CPU we should wake onto. 1287 */ 1288 #if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1289 + static int wake_idle(int cpu, struct task_struct *p) 1290 { 1291 cpumask_t tmp; 1292 struct sched_domain *sd; ··· 1309 return cpu; 1310 } 1311 #else 1312 + static inline int wake_idle(int cpu, struct task_struct *p) 1313 { 1314 return cpu; 1315 } ··· 1329 * 1330 * returns failure only if the task is already active. 1331 */ 1332 + static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) 1333 { 1334 int cpu, this_cpu, success = 0; 1335 unsigned long flags; ··· 1487 return success; 1488 } 1489 1490 + int fastcall wake_up_process(struct task_struct *p) 1491 { 1492 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | 1493 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); 1494 } 1495 EXPORT_SYMBOL(wake_up_process); 1496 1497 + int fastcall wake_up_state(struct task_struct *p, unsigned int state) 1498 { 1499 return try_to_wake_up(p, state, 0); 1500 } ··· 1503 * Perform scheduler related setup for a newly forked process p. 1504 * p is forked by current. 1505 */ 1506 + void fastcall sched_fork(struct task_struct *p, int clone_flags) 1507 { 1508 int cpu = get_cpu(); 1509 ··· 1571 * that must be done for every newly created context, then puts the task 1572 * on the runqueue and wakes it. 1573 */ 1574 + void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) 1575 { 1576 unsigned long flags; 1577 int this_cpu, cpu; ··· 1655 * artificially, because any timeslice recovered here 1656 * was given away by the parent in the first place.) 1657 */ 1658 + void fastcall sched_exit(struct task_struct *p) 1659 { 1660 unsigned long flags; 1661 runqueue_t *rq; ··· 1689 * prepare_task_switch sets up locking and calls architecture specific 1690 * hooks. 1691 */ 1692 + static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next) 1693 { 1694 prepare_lock_switch(rq, next); 1695 prepare_arch_switch(next); ··· 1710 * with the lock held can cause deadlocks; see schedule() for 1711 * details.) 1712 */ 1713 + static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev) 1714 __releases(rq->lock) 1715 { 1716 struct mm_struct *mm = rq->prev_mm; ··· 1748 * schedule_tail - first thing a freshly forked thread must call. 1749 * @prev: the thread we just switched away from. 1750 */ 1751 + asmlinkage void schedule_tail(struct task_struct *prev) 1752 __releases(rq->lock) 1753 { 1754 runqueue_t *rq = this_rq(); ··· 1765 * context_switch - switch to the new MM and the new 1766 * thread's register state. 1767 */ 1768 + static inline struct task_struct * 1769 + context_switch(runqueue_t *rq, struct task_struct *prev, 1770 + struct task_struct *next) 1771 { 1772 struct mm_struct *mm = next->mm; 1773 struct mm_struct *oldmm = prev->active_mm; ··· 1937 * allow dest_cpu, which will force the cpu onto dest_cpu. Then 1938 * the cpu_allowed mask is restored. 1939 */ 1940 + static void sched_migrate_task(struct task_struct *p, int dest_cpu) 1941 { 1942 migration_req_t req; 1943 runqueue_t *rq; ··· 1952 if (migrate_task(p, dest_cpu, &req)) { 1953 /* Need to wait for migration thread (might exit: take ref). */ 1954 struct task_struct *mt = rq->migration_thread; 1955 + 1956 get_task_struct(mt); 1957 task_rq_unlock(rq, &flags); 1958 wake_up_process(mt); 1959 put_task_struct(mt); 1960 wait_for_completion(&req.done); 1961 + 1962 return; 1963 } 1964 out: ··· 1980 * pull_task - move a task from a remote runqueue to the local runqueue. 1981 * Both runqueues must be locked. 1982 */ 1983 + static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, 1984 + struct task_struct *p, runqueue_t *this_rq, 1985 + prio_array_t *this_array, int this_cpu) 1986 { 1987 dequeue_task(p, src_array); 1988 dec_nr_running(p, src_rq); ··· 2003 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 2004 */ 2005 static 2006 + int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu, 2007 struct sched_domain *sd, enum idle_type idle, 2008 int *all_pinned) 2009 { ··· 2052 best_prio_seen, skip_for_load; 2053 prio_array_t *array, *dst_array; 2054 struct list_head *head, *curr; 2055 + struct task_struct *tmp; 2056 long rem_load_move; 2057 2058 if (max_nr_move == 0 || max_load_move == 0) 2059 goto out; ··· 2105 head = array->queue + idx; 2106 curr = head->prev; 2107 skip_queue: 2108 + tmp = list_entry(curr, struct task_struct, run_list); 2109 2110 curr = curr->prev; 2111 ··· 2819 * Bank in p->sched_time the ns elapsed since the last tick or switch. 2820 */ 2821 static inline void 2822 + update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now) 2823 { 2824 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); 2825 } ··· 2828 * Return current->sched_time plus any more ns on the sched_clock 2829 * that have not yet been banked. 2830 */ 2831 + unsigned long long current_sched_time(const struct task_struct *p) 2832 { 2833 unsigned long long ns; 2834 unsigned long flags; ··· 2945 void scheduler_tick(void) 2946 { 2947 unsigned long long now = sched_clock(); 2948 + struct task_struct *p = current; 2949 int cpu = smp_processor_id(); 2950 runqueue_t *rq = this_rq(); 2951 2952 update_cpu_clock(p, rq, now); 2953 ··· 3079 * utilize, if another task runs on a sibling. This models the 3080 * slowdown effect of other tasks running on siblings: 3081 */ 3082 + static inline unsigned long 3083 + smt_slice(struct task_struct *p, struct sched_domain *sd) 3084 { 3085 return p->time_slice * (100 - sd->per_cpu_gain) / 100; 3086 } ··· 3090 * acquire their lock. As we only trylock the normal locking order does not 3091 * need to be obeyed. 3092 */ 3093 + static int 3094 + dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) 3095 { 3096 struct sched_domain *tmp, *sd = NULL; 3097 int ret = 0, i; ··· 3110 return 0; 3111 3112 for_each_cpu_mask(i, sd->span) { 3113 + struct task_struct *smt_curr; 3114 runqueue_t *smt_rq; 3115 3116 if (i == this_cpu) 3117 continue; ··· 3157 { 3158 } 3159 static inline int 3160 + dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) 3161 { 3162 return 0; 3163 } ··· 3211 */ 3212 asmlinkage void __sched schedule(void) 3213 { 3214 + struct task_struct *prev, *next; 3215 struct list_head *queue; 3216 unsigned long long now; 3217 unsigned long run_time; 3218 int cpu, idx, new_prio; 3219 prio_array_t *array; 3220 long *switch_count; 3221 runqueue_t *rq; ··· 3308 3309 idx = sched_find_first_bit(array->bitmap); 3310 queue = array->queue + idx; 3311 + next = list_entry(queue->next, struct task_struct, run_list); 3312 3313 if (!rt_task(next) && interactive_sleep(next->sleep_type)) { 3314 unsigned long long delta = now - next->timestamp; ··· 3776 * 3777 * Used by the rt_mutex code to implement priority inheritance logic. 3778 */ 3779 + void rt_mutex_setprio(struct task_struct *p, int prio) 3780 { 3781 unsigned long flags; 3782 prio_array_t *array; ··· 3817 3818 #endif 3819 3820 + void set_user_nice(struct task_struct *p, long nice) 3821 { 3822 int old_prio, delta; 3823 unsigned long flags; ··· 3873 * @p: task 3874 * @nice: nice value 3875 */ 3876 + int can_nice(const struct task_struct *p, const int nice) 3877 { 3878 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3879 int nice_rlim = 20 - nice; ··· 3932 * RT tasks are offset by -200. Normal tasks are centered 3933 * around 0, value goes from -16 to +15. 3934 */ 3935 + int task_prio(const struct task_struct *p) 3936 { 3937 return p->prio - MAX_RT_PRIO; 3938 } ··· 3941 * task_nice - return the nice value of a given task. 3942 * @p: the task in question. 3943 */ 3944 + int task_nice(const struct task_struct *p) 3945 { 3946 return TASK_NICE(p); 3947 } ··· 3960 * idle_task - return the idle task for a given cpu. 3961 * @cpu: the processor in question. 3962 */ 3963 + struct task_struct *idle_task(int cpu) 3964 { 3965 return cpu_rq(cpu)->idle; 3966 } ··· 3969 * find_process_by_pid - find a process with a matching PID value. 3970 * @pid: the pid in question. 3971 */ 3972 + static inline struct task_struct *find_process_by_pid(pid_t pid) 3973 { 3974 return pid ? find_task_by_pid(pid) : current; 3975 } ··· 4103 static int 4104 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4105 { 4106 struct sched_param lparam; 4107 struct task_struct *p; 4108 + int retval; 4109 4110 if (!param || pid < 0) 4111 return -EINVAL; ··· 4121 read_unlock_irq(&tasklist_lock); 4122 retval = sched_setscheduler(p, policy, &lparam); 4123 put_task_struct(p); 4124 + 4125 return retval; 4126 } 4127 ··· 4156 */ 4157 asmlinkage long sys_sched_getscheduler(pid_t pid) 4158 { 4159 + struct task_struct *p; 4160 int retval = -EINVAL; 4161 4162 if (pid < 0) 4163 goto out_nounlock; ··· 4184 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) 4185 { 4186 struct sched_param lp; 4187 + struct task_struct *p; 4188 int retval = -EINVAL; 4189 4190 if (!param || pid < 0) 4191 goto out_nounlock; ··· 4218 4219 long sched_setaffinity(pid_t pid, cpumask_t new_mask) 4220 { 4221 cpumask_t cpus_allowed; 4222 + struct task_struct *p; 4223 + int retval; 4224 4225 lock_cpu_hotplug(); 4226 read_lock(&tasklist_lock); ··· 4306 4307 long sched_getaffinity(pid_t pid, cpumask_t *mask) 4308 { 4309 + struct task_struct *p; 4310 int retval; 4311 4312 lock_cpu_hotplug(); 4313 read_lock(&tasklist_lock); ··· 4592 asmlinkage 4593 long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) 4594 { 4595 + struct task_struct *p; 4596 int retval = -EINVAL; 4597 struct timespec t; 4598 4599 if (pid < 0) 4600 goto out_nounlock; ··· 4641 return list_entry(p->sibling.next,struct task_struct,sibling); 4642 } 4643 4644 + static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; 4645 + 4646 + static void show_task(struct task_struct *p) 4647 { 4648 + struct task_struct *relative; 4649 unsigned long free = 0; 4650 + unsigned state; 4651 4652 printk("%-13.13s ", p->comm); 4653 state = p->state ? __ffs(p->state) + 1 : 0; ··· 4697 4698 void show_state(void) 4699 { 4700 + struct task_struct *g, *p; 4701 4702 #if (BITS_PER_LONG == 32) 4703 printk("\n" ··· 4730 * NOTE: this function does not set the idle thread's NEED_RESCHED 4731 * flag, to make booting more robust. 4732 */ 4733 + void __devinit init_idle(struct task_struct *idle, int cpu) 4734 { 4735 runqueue_t *rq = cpu_rq(cpu); 4736 unsigned long flags; ··· 4793 * task must not exit() & deallocate itself prematurely. The 4794 * call is not atomic; no spinlocks may be held. 4795 */ 4796 + int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 4797 { 4798 unsigned long flags; 4799 migration_req_t req; ··· 5061 mmdrop(mm); 5062 } 5063 5064 + static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) 5065 { 5066 struct runqueue *rq = cpu_rq(dead_cpu); 5067 ··· 5096 struct list_head *list = &rq->arrays[arr].queue[i]; 5097 5098 while (!list_empty(list)) 5099 + migrate_dead(dead_cpu, list_entry(list->next, 5100 + struct task_struct, run_list)); 5101 } 5102 } 5103 } ··· 6801 * 6802 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6803 */ 6804 + struct task_struct *curr_task(int cpu) 6805 { 6806 return cpu_curr(cpu); 6807 } ··· 6821 * 6822 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6823 */ 6824 + void set_curr_task(int cpu, struct task_struct *p) 6825 { 6826 cpu_curr(cpu) = p; 6827 }
+1 -1
kernel/timer.c
··· 1368 1369 static void process_timeout(unsigned long __data) 1370 { 1371 - wake_up_process((task_t *)__data); 1372 } 1373 1374 /**
··· 1368 1369 static void process_timeout(unsigned long __data) 1370 { 1371 + wake_up_process((struct task_struct *)__data); 1372 } 1373 1374 /**
+1 -1
kernel/workqueue.c
··· 51 wait_queue_head_t work_done; 52 53 struct workqueue_struct *wq; 54 - task_t *thread; 55 56 int run_depth; /* Detect run_workqueue() recursion depth */ 57 } ____cacheline_aligned;
··· 51 wait_queue_head_t work_done; 52 53 struct workqueue_struct *wq; 54 + struct task_struct *thread; 55 56 int run_depth; /* Detect run_workqueue() recursion depth */ 57 } ____cacheline_aligned;
+4 -4
mm/oom_kill.c
··· 225 * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that 226 * we select a process with CAP_SYS_RAW_IO set). 227 */ 228 - static void __oom_kill_task(task_t *p, const char *message) 229 { 230 if (p->pid == 1) { 231 WARN_ON(1); ··· 255 force_sig(SIGKILL, p); 256 } 257 258 - static int oom_kill_task(task_t *p, const char *message) 259 { 260 struct mm_struct *mm; 261 - task_t * g, * q; 262 263 mm = p->mm; 264 ··· 316 */ 317 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) 318 { 319 - task_t *p; 320 unsigned long points = 0; 321 322 if (printk_ratelimit()) {
··· 225 * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that 226 * we select a process with CAP_SYS_RAW_IO set). 227 */ 228 + static void __oom_kill_task(struct task_struct *p, const char *message) 229 { 230 if (p->pid == 1) { 231 WARN_ON(1); ··· 255 force_sig(SIGKILL, p); 256 } 257 258 + static int oom_kill_task(struct task_struct *p, const char *message) 259 { 260 struct mm_struct *mm; 261 + struct task_struct *g, *q; 262 263 mm = p->mm; 264 ··· 316 */ 317 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) 318 { 319 + struct task_struct *p; 320 unsigned long points = 0; 321 322 if (printk_ratelimit()) {