[PATCH] sched: cleanup, convert sched.c-internal typedefs to struct

convert:

- runqueue_t to 'struct rq'
- prio_array_t to 'struct prio_array'
- migration_req_t to 'struct migration_req'

I was the one who added these but they are both against the kernel coding
style and also were used inconsistently at places. So just get rid of them at
once, now that we are flushing the scheduler patch-queue anyway.

Conversion was mostly scripted, the result was reviewed and all secondary
whitespace and style impact (if any) was fixed up by hand.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Ingo Molnar and committed by Linus Torvalds 70b97a7f 36c8b586

+128 -127
+3 -2
include/linux/sched.h
··· 534 extern struct user_struct root_user; 535 #define INIT_USER (&root_user) 536 537 - typedef struct prio_array prio_array_t; 538 struct backing_dev_info; 539 struct reclaim_state; 540 ··· 714 SLEEP_INTERRUPTED, 715 }; 716 717 struct task_struct { 718 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 719 struct thread_info *thread_info; ··· 733 int load_weight; /* for niceness load balancing purposes */ 734 int prio, static_prio, normal_prio; 735 struct list_head run_list; 736 - prio_array_t *array; 737 738 unsigned short ioprio; 739 unsigned int btrace_seq;
··· 534 extern struct user_struct root_user; 535 #define INIT_USER (&root_user) 536 537 struct backing_dev_info; 538 struct reclaim_state; 539 ··· 715 SLEEP_INTERRUPTED, 716 }; 717 718 + struct prio_array; 719 + 720 struct task_struct { 721 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 722 struct thread_info *thread_info; ··· 732 int load_weight; /* for niceness load balancing purposes */ 733 int prio, static_prio, normal_prio; 734 struct list_head run_list; 735 + struct prio_array *array; 736 737 unsigned short ioprio; 738 unsigned int btrace_seq;
+125 -125
kernel/sched.c
··· 188 * These are the runqueue data structures: 189 */ 190 191 - typedef struct runqueue runqueue_t; 192 - 193 struct prio_array { 194 unsigned int nr_active; 195 DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */ ··· 201 * (such as the load balancing or the thread migration code), lock 202 * acquire operations must be ordered by ascending &runqueue. 203 */ 204 - struct runqueue { 205 spinlock_t lock; 206 207 /* ··· 227 unsigned long long timestamp_last_tick; 228 struct task_struct *curr, *idle; 229 struct mm_struct *prev_mm; 230 - prio_array_t *active, *expired, arrays[2]; 231 int best_expired_prio; 232 atomic_t nr_iowait; 233 ··· 264 struct lock_class_key rq_lock_key; 265 }; 266 267 - static DEFINE_PER_CPU(struct runqueue, runqueues); 268 269 /* 270 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. ··· 289 #endif 290 291 #ifndef __ARCH_WANT_UNLOCKED_CTXSW 292 - static inline int task_running(runqueue_t *rq, struct task_struct *p) 293 { 294 return rq->curr == p; 295 } 296 297 - static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) 298 { 299 } 300 301 - static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) 302 { 303 #ifdef CONFIG_DEBUG_SPINLOCK 304 /* this is a valid case when another task releases the spinlock */ ··· 315 } 316 317 #else /* __ARCH_WANT_UNLOCKED_CTXSW */ 318 - static inline int task_running(runqueue_t *rq, struct task_struct *p) 319 { 320 #ifdef CONFIG_SMP 321 return p->oncpu; ··· 324 #endif 325 } 326 327 - static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) 328 { 329 #ifdef CONFIG_SMP 330 /* ··· 341 #endif 342 } 343 344 - static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) 345 { 346 #ifdef CONFIG_SMP 347 /* ··· 362 * __task_rq_lock - lock the runqueue a given task resides on. 363 * Must be called interrupts disabled. 364 */ 365 - static inline runqueue_t *__task_rq_lock(struct task_struct *p) 366 __acquires(rq->lock) 367 { 368 - struct runqueue *rq; 369 370 repeat_lock_task: 371 rq = task_rq(p); ··· 382 * interrupts. Note the ordering: we can safely lookup the task_rq without 383 * explicitly disabling preemption. 384 */ 385 - static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags) 386 __acquires(rq->lock) 387 { 388 - struct runqueue *rq; 389 390 repeat_lock_task: 391 local_irq_save(*flags); ··· 398 return rq; 399 } 400 401 - static inline void __task_rq_unlock(runqueue_t *rq) 402 __releases(rq->lock) 403 { 404 spin_unlock(&rq->lock); 405 } 406 407 - static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) 408 __releases(rq->lock) 409 { 410 spin_unlock_irqrestore(&rq->lock, *flags); ··· 424 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); 425 seq_printf(seq, "timestamp %lu\n", jiffies); 426 for_each_online_cpu(cpu) { 427 - runqueue_t *rq = cpu_rq(cpu); 428 #ifdef CONFIG_SMP 429 struct sched_domain *sd; 430 int dcnt = 0; ··· 511 /* 512 * rq_lock - lock a given runqueue and disable interrupts. 513 */ 514 - static inline runqueue_t *this_rq_lock(void) 515 __acquires(rq->lock) 516 { 517 - runqueue_t *rq; 518 519 local_irq_disable(); 520 rq = this_rq(); ··· 552 static void sched_info_arrive(struct task_struct *t) 553 { 554 unsigned long now = jiffies, diff = 0; 555 - struct runqueue *rq = task_rq(t); 556 557 if (t->sched_info.last_queued) 558 diff = now - t->sched_info.last_queued; ··· 595 */ 596 static inline void sched_info_depart(struct task_struct *t) 597 { 598 - struct runqueue *rq = task_rq(t); 599 unsigned long diff = jiffies - t->sched_info.last_arrival; 600 601 t->sched_info.cpu_time += diff; ··· 612 static inline void 613 sched_info_switch(struct task_struct *prev, struct task_struct *next) 614 { 615 - struct runqueue *rq = task_rq(prev); 616 617 /* 618 * prev now departs the cpu. It's not interesting to record ··· 633 /* 634 * Adding/removing a task to/from a priority array: 635 */ 636 - static void dequeue_task(struct task_struct *p, prio_array_t *array) 637 { 638 array->nr_active--; 639 list_del(&p->run_list); ··· 641 __clear_bit(p->prio, array->bitmap); 642 } 643 644 - static void enqueue_task(struct task_struct *p, prio_array_t *array) 645 { 646 sched_info_queued(p); 647 list_add_tail(&p->run_list, array->queue + p->prio); ··· 654 * Put task to the end of the run list without the overhead of dequeue 655 * followed by enqueue. 656 */ 657 - static void requeue_task(struct task_struct *p, prio_array_t *array) 658 { 659 list_move_tail(&p->run_list, array->queue + p->prio); 660 } 661 662 - static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) 663 { 664 list_add(&p->run_list, array->queue + p->prio); 665 __set_bit(p->prio, array->bitmap); ··· 738 } 739 740 static inline void 741 - inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) 742 { 743 rq->raw_weighted_load += p->load_weight; 744 } 745 746 static inline void 747 - dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) 748 { 749 rq->raw_weighted_load -= p->load_weight; 750 } 751 752 - static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq) 753 { 754 rq->nr_running++; 755 inc_raw_weighted_load(rq, p); 756 } 757 758 - static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq) 759 { 760 rq->nr_running--; 761 dec_raw_weighted_load(rq, p); ··· 802 /* 803 * __activate_task - move a task to the runqueue. 804 */ 805 - static void __activate_task(struct task_struct *p, runqueue_t *rq) 806 { 807 - prio_array_t *target = rq->active; 808 809 if (batch_task(p)) 810 target = rq->expired; ··· 815 /* 816 * __activate_idle_task - move idle task to the _front_ of runqueue. 817 */ 818 - static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq) 819 { 820 enqueue_task_head(p, rq->active); 821 inc_nr_running(p, rq); ··· 897 * Update all the scheduling statistics stuff. (sleep average 898 * calculation, priority modifiers, etc.) 899 */ 900 - static void activate_task(struct task_struct *p, runqueue_t *rq, int local) 901 { 902 unsigned long long now; 903 ··· 905 #ifdef CONFIG_SMP 906 if (!local) { 907 /* Compensate for drifting sched_clock */ 908 - runqueue_t *this_rq = this_rq(); 909 now = (now - this_rq->timestamp_last_tick) 910 + rq->timestamp_last_tick; 911 } ··· 944 /* 945 * deactivate_task - remove a task from the runqueue. 946 */ 947 - static void deactivate_task(struct task_struct *p, runqueue_t *rq) 948 { 949 dec_nr_running(p, rq); 950 dequeue_task(p, p->array); ··· 1008 } 1009 1010 #ifdef CONFIG_SMP 1011 - typedef struct { 1012 struct list_head list; 1013 1014 struct task_struct *task; 1015 int dest_cpu; 1016 1017 struct completion done; 1018 - } migration_req_t; 1019 1020 /* 1021 * The task's runqueue lock must be held. 1022 * Returns true if you have to wait for migration thread. 1023 */ 1024 static int 1025 - migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req) 1026 { 1027 - runqueue_t *rq = task_rq(p); 1028 1029 /* 1030 * If the task is not on a runqueue (and not running), then ··· 1055 void wait_task_inactive(struct task_struct *p) 1056 { 1057 unsigned long flags; 1058 - runqueue_t *rq; 1059 int preempted; 1060 1061 repeat: ··· 1106 */ 1107 static inline unsigned long source_load(int cpu, int type) 1108 { 1109 - runqueue_t *rq = cpu_rq(cpu); 1110 1111 if (type == 0) 1112 return rq->raw_weighted_load; ··· 1120 */ 1121 static inline unsigned long target_load(int cpu, int type) 1122 { 1123 - runqueue_t *rq = cpu_rq(cpu); 1124 1125 if (type == 0) 1126 return rq->raw_weighted_load; ··· 1133 */ 1134 static inline unsigned long cpu_avg_load_per_task(int cpu) 1135 { 1136 - runqueue_t *rq = cpu_rq(cpu); 1137 unsigned long n = rq->nr_running; 1138 1139 return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; ··· 1337 int cpu, this_cpu, success = 0; 1338 unsigned long flags; 1339 long old_state; 1340 - runqueue_t *rq; 1341 #ifdef CONFIG_SMP 1342 - unsigned long load, this_load; 1343 struct sched_domain *sd, *this_sd = NULL; 1344 int new_cpu; 1345 #endif 1346 ··· 1576 */ 1577 void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) 1578 { 1579 unsigned long flags; 1580 int this_cpu, cpu; 1581 - runqueue_t *rq, *this_rq; 1582 1583 rq = task_rq_lock(p, &flags); 1584 BUG_ON(p->state != TASK_RUNNING); ··· 1661 void fastcall sched_exit(struct task_struct *p) 1662 { 1663 unsigned long flags; 1664 - runqueue_t *rq; 1665 1666 /* 1667 * If the child was a (relative-) CPU hog then decrease ··· 1692 * prepare_task_switch sets up locking and calls architecture specific 1693 * hooks. 1694 */ 1695 - static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next) 1696 { 1697 prepare_lock_switch(rq, next); 1698 prepare_arch_switch(next); ··· 1713 * with the lock held can cause deadlocks; see schedule() for 1714 * details.) 1715 */ 1716 - static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev) 1717 __releases(rq->lock) 1718 { 1719 struct mm_struct *mm = rq->prev_mm; ··· 1754 asmlinkage void schedule_tail(struct task_struct *prev) 1755 __releases(rq->lock) 1756 { 1757 - runqueue_t *rq = this_rq(); 1758 finish_task_switch(rq, prev); 1759 #ifdef __ARCH_WANT_UNLOCKED_CTXSW 1760 /* In this case, finish_task_switch does not reenable preemption */ ··· 1770 * thread's register state. 1771 */ 1772 static inline struct task_struct * 1773 - context_switch(runqueue_t *rq, struct task_struct *prev, 1774 struct task_struct *next) 1775 { 1776 struct mm_struct *mm = next->mm; ··· 1883 * Note this does not disable interrupts like task_rq_lock, 1884 * you need to do so manually before calling. 1885 */ 1886 - static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) 1887 __acquires(rq1->lock) 1888 __acquires(rq2->lock) 1889 { ··· 1907 * Note this does not restore interrupts like task_rq_unlock, 1908 * you need to do so manually after calling. 1909 */ 1910 - static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) 1911 __releases(rq1->lock) 1912 __releases(rq2->lock) 1913 { ··· 1921 /* 1922 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1923 */ 1924 - static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) 1925 __releases(this_rq->lock) 1926 __acquires(busiest->lock) 1927 __acquires(this_rq->lock) ··· 1944 */ 1945 static void sched_migrate_task(struct task_struct *p, int dest_cpu) 1946 { 1947 - migration_req_t req; 1948 - runqueue_t *rq; 1949 unsigned long flags; 1950 1951 rq = task_rq_lock(p, &flags); 1952 if (!cpu_isset(dest_cpu, p->cpus_allowed) ··· 1987 * pull_task - move a task from a remote runqueue to the local runqueue. 1988 * Both runqueues must be locked. 1989 */ 1990 - static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, 1991 - struct task_struct *p, runqueue_t *this_rq, 1992 - prio_array_t *this_array, int this_cpu) 1993 { 1994 dequeue_task(p, src_array); 1995 dec_nr_running(p, src_rq); ··· 2010 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 2011 */ 2012 static 2013 - int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu, 2014 struct sched_domain *sd, enum idle_type idle, 2015 int *all_pinned) 2016 { ··· 2050 * 2051 * Called with both runqueues locked. 2052 */ 2053 - static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, 2054 unsigned long max_nr_move, unsigned long max_load_move, 2055 struct sched_domain *sd, enum idle_type idle, 2056 int *all_pinned) 2057 { 2058 int idx, pulled = 0, pinned = 0, this_best_prio, best_prio, 2059 best_prio_seen, skip_for_load; 2060 - prio_array_t *array, *dst_array; 2061 struct list_head *head, *curr; 2062 struct task_struct *tmp; 2063 long rem_load_move; ··· 2212 sum_weighted_load = sum_nr_running = avg_load = 0; 2213 2214 for_each_cpu_mask(i, group->cpumask) { 2215 - runqueue_t *rq = cpu_rq(i); 2216 2217 if (*sd_idle && !idle_cpu(i)) 2218 *sd_idle = 0; ··· 2428 /* 2429 * find_busiest_queue - find the busiest runqueue among the cpus in group. 2430 */ 2431 - static runqueue_t * 2432 find_busiest_queue(struct sched_group *group, enum idle_type idle, 2433 unsigned long imbalance) 2434 { 2435 - runqueue_t *busiest = NULL, *rq; 2436 unsigned long max_load = 0; 2437 int i; 2438 ··· 2468 * 2469 * Called with this_rq unlocked. 2470 */ 2471 - static int load_balance(int this_cpu, runqueue_t *this_rq, 2472 struct sched_domain *sd, enum idle_type idle) 2473 { 2474 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 2475 struct sched_group *group; 2476 unsigned long imbalance; 2477 - runqueue_t *busiest; 2478 2479 if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && 2480 !sched_smt_power_savings) ··· 2596 * this_rq is locked. 2597 */ 2598 static int 2599 - load_balance_newidle(int this_cpu, runqueue_t *this_rq, struct sched_domain *sd) 2600 { 2601 struct sched_group *group; 2602 - runqueue_t *busiest = NULL; 2603 unsigned long imbalance; 2604 int nr_moved = 0; 2605 int sd_idle = 0; ··· 2657 * idle_balance is called by schedule() if this_cpu is about to become 2658 * idle. Attempts to pull tasks from other CPUs. 2659 */ 2660 - static void idle_balance(int this_cpu, runqueue_t *this_rq) 2661 { 2662 struct sched_domain *sd; 2663 ··· 2678 * 2679 * Called with busiest_rq locked. 2680 */ 2681 - static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) 2682 { 2683 - struct sched_domain *sd; 2684 - runqueue_t *target_rq; 2685 int target_cpu = busiest_rq->push_cpu; 2686 2687 /* Is there any task to move? */ 2688 if (busiest_rq->nr_running <= 1) ··· 2736 } 2737 2738 static void 2739 - rebalance_tick(int this_cpu, runqueue_t *this_rq, enum idle_type idle) 2740 { 2741 unsigned long this_load, interval, j = cpu_offset(this_cpu); 2742 struct sched_domain *sd; ··· 2790 /* 2791 * on UP we do not need to balance between CPUs: 2792 */ 2793 - static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle) 2794 { 2795 } 2796 - static inline void idle_balance(int cpu, runqueue_t *rq) 2797 { 2798 } 2799 #endif 2800 2801 - static inline int wake_priority_sleeper(runqueue_t *rq) 2802 { 2803 int ret = 0; 2804 ··· 2826 * Bank in p->sched_time the ns elapsed since the last tick or switch. 2827 */ 2828 static inline void 2829 - update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now) 2830 { 2831 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); 2832 } ··· 2858 * increasing number of running tasks. We also ignore the interactivity 2859 * if a better static_prio task has expired: 2860 */ 2861 - static inline int expired_starving(runqueue_t *rq) 2862 { 2863 if (rq->curr->static_prio > rq->best_expired_prio) 2864 return 1; ··· 2900 cputime_t cputime) 2901 { 2902 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 2903 - runqueue_t *rq = this_rq(); 2904 cputime64_t tmp; 2905 2906 p->stime = cputime_add(p->stime, cputime); ··· 2930 { 2931 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 2932 cputime64_t tmp = cputime_to_cputime64(steal); 2933 - runqueue_t *rq = this_rq(); 2934 2935 if (p == rq->idle) { 2936 p->stime = cputime_add(p->stime, steal); ··· 2954 unsigned long long now = sched_clock(); 2955 struct task_struct *p = current; 2956 int cpu = smp_processor_id(); 2957 - runqueue_t *rq = this_rq(); 2958 2959 update_cpu_clock(p, rq, now); 2960 ··· 3043 } 3044 3045 #ifdef CONFIG_SCHED_SMT 3046 - static inline void wakeup_busy_runqueue(runqueue_t *rq) 3047 { 3048 /* If an SMT runqueue is sleeping due to priority reasons wake it up */ 3049 if (rq->curr == rq->idle && rq->nr_running) ··· 3069 return; 3070 3071 for_each_cpu_mask(i, sd->span) { 3072 - runqueue_t *smt_rq = cpu_rq(i); 3073 3074 if (i == this_cpu) 3075 continue; ··· 3099 * need to be obeyed. 3100 */ 3101 static int 3102 - dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) 3103 { 3104 struct sched_domain *tmp, *sd = NULL; 3105 int ret = 0, i; ··· 3120 3121 for_each_cpu_mask(i, sd->span) { 3122 struct task_struct *smt_curr; 3123 - runqueue_t *smt_rq; 3124 3125 if (i == this_cpu) 3126 continue; ··· 3166 { 3167 } 3168 static inline int 3169 - dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) 3170 { 3171 return 0; 3172 } ··· 3221 asmlinkage void __sched schedule(void) 3222 { 3223 struct task_struct *prev, *next; 3224 struct list_head *queue; 3225 unsigned long long now; 3226 unsigned long run_time; 3227 int cpu, idx, new_prio; 3228 - prio_array_t *array; 3229 long *switch_count; 3230 - runqueue_t *rq; 3231 3232 /* 3233 * Test if we are atomic. Since do_exit() needs to call into ··· 3787 */ 3788 void rt_mutex_setprio(struct task_struct *p, int prio) 3789 { 3790 unsigned long flags; 3791 - prio_array_t *array; 3792 - runqueue_t *rq; 3793 int oldprio; 3794 3795 BUG_ON(prio < 0 || prio > MAX_PRIO); ··· 3828 3829 void set_user_nice(struct task_struct *p, long nice) 3830 { 3831 int old_prio, delta; 3832 unsigned long flags; 3833 - prio_array_t *array; 3834 - runqueue_t *rq; 3835 3836 if (TASK_NICE(p) == nice || nice < -20 || nice > 19) 3837 return; ··· 4012 struct sched_param *param) 4013 { 4014 int retval, oldprio, oldpolicy = -1; 4015 - prio_array_t *array; 4016 unsigned long flags; 4017 - runqueue_t *rq; 4018 4019 /* may grab non-irq protected spin_locks */ 4020 BUG_ON(in_interrupt()); ··· 4376 */ 4377 asmlinkage long sys_sched_yield(void) 4378 { 4379 - runqueue_t *rq = this_rq_lock(); 4380 - prio_array_t *array = current->array; 4381 - prio_array_t *target = rq->expired; 4382 4383 schedstat_inc(rq, yld_cnt); 4384 /* ··· 4524 */ 4525 void __sched io_schedule(void) 4526 { 4527 - struct runqueue *rq = &__raw_get_cpu_var(runqueues); 4528 4529 atomic_inc(&rq->nr_iowait); 4530 schedule(); ··· 4534 4535 long __sched io_schedule_timeout(long timeout) 4536 { 4537 - struct runqueue *rq = &__raw_get_cpu_var(runqueues); 4538 long ret; 4539 4540 atomic_inc(&rq->nr_iowait); ··· 4742 */ 4743 void __devinit init_idle(struct task_struct *idle, int cpu) 4744 { 4745 - runqueue_t *rq = cpu_rq(cpu); 4746 unsigned long flags; 4747 4748 idle->timestamp = sched_clock(); ··· 4781 /* 4782 * This is how migration works: 4783 * 4784 - * 1) we queue a migration_req_t structure in the source CPU's 4785 * runqueue and wake up that CPU's migration thread. 4786 * 2) we down() the locked semaphore => thread blocks. 4787 * 3) migration thread wakes up (implicitly it forces the migrated ··· 4805 */ 4806 int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 4807 { 4808 unsigned long flags; 4809 - migration_req_t req; 4810 - runqueue_t *rq; 4811 int ret = 0; 4812 4813 rq = task_rq_lock(p, &flags); ··· 4849 */ 4850 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 4851 { 4852 - runqueue_t *rq_dest, *rq_src; 4853 int ret = 0; 4854 4855 if (unlikely(cpu_is_offline(dest_cpu))) ··· 4895 static int migration_thread(void *data) 4896 { 4897 int cpu = (long)data; 4898 - runqueue_t *rq; 4899 4900 rq = cpu_rq(cpu); 4901 BUG_ON(rq->migration_thread != current); 4902 4903 set_current_state(TASK_INTERRUPTIBLE); 4904 while (!kthread_should_stop()) { 4905 struct list_head *head; 4906 - migration_req_t *req; 4907 4908 try_to_freeze(); 4909 ··· 4927 set_current_state(TASK_INTERRUPTIBLE); 4928 continue; 4929 } 4930 - req = list_entry(head->next, migration_req_t, list); 4931 list_del_init(head->next); 4932 4933 spin_unlock(&rq->lock); ··· 4954 /* Figure out where task on dead CPU should go, use force if neccessary. */ 4955 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 4956 { 4957 - runqueue_t *rq; 4958 unsigned long flags; 4959 - int dest_cpu; 4960 cpumask_t mask; 4961 4962 restart: 4963 /* On same node? */ ··· 4997 * their home CPUs. So we just add the counter to another CPU's counter, 4998 * to keep the global sum constant after CPU-down: 4999 */ 5000 - static void migrate_nr_uninterruptible(runqueue_t *rq_src) 5001 { 5002 - runqueue_t *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); 5003 unsigned long flags; 5004 5005 local_irq_save(flags); ··· 5035 void sched_idle_next(void) 5036 { 5037 int this_cpu = smp_processor_id(); 5038 - runqueue_t *rq = cpu_rq(this_cpu); 5039 struct task_struct *p = rq->idle; 5040 unsigned long flags; 5041 ··· 5073 5074 static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) 5075 { 5076 - struct runqueue *rq = cpu_rq(dead_cpu); 5077 5078 /* Must be exiting, otherwise would be on tasklist. */ 5079 BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); ··· 5098 /* release_task() removes task from tasklist, so we won't find dead tasks. */ 5099 static void migrate_dead_tasks(unsigned int dead_cpu) 5100 { 5101 - struct runqueue *rq = cpu_rq(dead_cpu); 5102 unsigned int arr, i; 5103 5104 for (arr = 0; arr < 2; arr++) { ··· 5122 { 5123 struct task_struct *p; 5124 int cpu = (long)hcpu; 5125 - struct runqueue *rq; 5126 unsigned long flags; 5127 5128 switch (action) { 5129 case CPU_UP_PREPARE: ··· 5175 * the requestors. */ 5176 spin_lock_irq(&rq->lock); 5177 while (!list_empty(&rq->migration_queue)) { 5178 - migration_req_t *req; 5179 req = list_entry(rq->migration_queue.next, 5180 - migration_req_t, list); 5181 list_del_init(&req->list); 5182 complete(&req->done); 5183 } ··· 5361 */ 5362 static void cpu_attach_domain(struct sched_domain *sd, int cpu) 5363 { 5364 - runqueue_t *rq = cpu_rq(cpu); 5365 struct sched_domain *tmp; 5366 5367 /* Remove the sched domains which do not contribute to scheduling. */ ··· 6690 int i, j, k; 6691 6692 for_each_possible_cpu(i) { 6693 - prio_array_t *array; 6694 - runqueue_t *rq; 6695 6696 rq = cpu_rq(i); 6697 spin_lock_init(&rq->lock); ··· 6764 #ifdef CONFIG_MAGIC_SYSRQ 6765 void normalize_rt_tasks(void) 6766 { 6767 struct task_struct *p; 6768 - prio_array_t *array; 6769 unsigned long flags; 6770 - runqueue_t *rq; 6771 6772 read_lock_irq(&tasklist_lock); 6773 for_each_process(p) {
··· 188 * These are the runqueue data structures: 189 */ 190 191 struct prio_array { 192 unsigned int nr_active; 193 DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */ ··· 203 * (such as the load balancing or the thread migration code), lock 204 * acquire operations must be ordered by ascending &runqueue. 205 */ 206 + struct rq { 207 spinlock_t lock; 208 209 /* ··· 229 unsigned long long timestamp_last_tick; 230 struct task_struct *curr, *idle; 231 struct mm_struct *prev_mm; 232 + struct prio_array *active, *expired, arrays[2]; 233 int best_expired_prio; 234 atomic_t nr_iowait; 235 ··· 266 struct lock_class_key rq_lock_key; 267 }; 268 269 + static DEFINE_PER_CPU(struct rq, runqueues); 270 271 /* 272 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. ··· 291 #endif 292 293 #ifndef __ARCH_WANT_UNLOCKED_CTXSW 294 + static inline int task_running(struct rq *rq, struct task_struct *p) 295 { 296 return rq->curr == p; 297 } 298 299 + static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 300 { 301 } 302 303 + static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 304 { 305 #ifdef CONFIG_DEBUG_SPINLOCK 306 /* this is a valid case when another task releases the spinlock */ ··· 317 } 318 319 #else /* __ARCH_WANT_UNLOCKED_CTXSW */ 320 + static inline int task_running(struct rq *rq, struct task_struct *p) 321 { 322 #ifdef CONFIG_SMP 323 return p->oncpu; ··· 326 #endif 327 } 328 329 + static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 330 { 331 #ifdef CONFIG_SMP 332 /* ··· 343 #endif 344 } 345 346 + static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 347 { 348 #ifdef CONFIG_SMP 349 /* ··· 364 * __task_rq_lock - lock the runqueue a given task resides on. 365 * Must be called interrupts disabled. 366 */ 367 + static inline struct rq *__task_rq_lock(struct task_struct *p) 368 __acquires(rq->lock) 369 { 370 + struct rq *rq; 371 372 repeat_lock_task: 373 rq = task_rq(p); ··· 384 * interrupts. Note the ordering: we can safely lookup the task_rq without 385 * explicitly disabling preemption. 386 */ 387 + static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) 388 __acquires(rq->lock) 389 { 390 + struct rq *rq; 391 392 repeat_lock_task: 393 local_irq_save(*flags); ··· 400 return rq; 401 } 402 403 + static inline void __task_rq_unlock(struct rq *rq) 404 __releases(rq->lock) 405 { 406 spin_unlock(&rq->lock); 407 } 408 409 + static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) 410 __releases(rq->lock) 411 { 412 spin_unlock_irqrestore(&rq->lock, *flags); ··· 426 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); 427 seq_printf(seq, "timestamp %lu\n", jiffies); 428 for_each_online_cpu(cpu) { 429 + struct rq *rq = cpu_rq(cpu); 430 #ifdef CONFIG_SMP 431 struct sched_domain *sd; 432 int dcnt = 0; ··· 513 /* 514 * rq_lock - lock a given runqueue and disable interrupts. 515 */ 516 + static inline struct rq *this_rq_lock(void) 517 __acquires(rq->lock) 518 { 519 + struct rq *rq; 520 521 local_irq_disable(); 522 rq = this_rq(); ··· 554 static void sched_info_arrive(struct task_struct *t) 555 { 556 unsigned long now = jiffies, diff = 0; 557 + struct rq *rq = task_rq(t); 558 559 if (t->sched_info.last_queued) 560 diff = now - t->sched_info.last_queued; ··· 597 */ 598 static inline void sched_info_depart(struct task_struct *t) 599 { 600 + struct rq *rq = task_rq(t); 601 unsigned long diff = jiffies - t->sched_info.last_arrival; 602 603 t->sched_info.cpu_time += diff; ··· 614 static inline void 615 sched_info_switch(struct task_struct *prev, struct task_struct *next) 616 { 617 + struct rq *rq = task_rq(prev); 618 619 /* 620 * prev now departs the cpu. It's not interesting to record ··· 635 /* 636 * Adding/removing a task to/from a priority array: 637 */ 638 + static void dequeue_task(struct task_struct *p, struct prio_array *array) 639 { 640 array->nr_active--; 641 list_del(&p->run_list); ··· 643 __clear_bit(p->prio, array->bitmap); 644 } 645 646 + static void enqueue_task(struct task_struct *p, struct prio_array *array) 647 { 648 sched_info_queued(p); 649 list_add_tail(&p->run_list, array->queue + p->prio); ··· 656 * Put task to the end of the run list without the overhead of dequeue 657 * followed by enqueue. 658 */ 659 + static void requeue_task(struct task_struct *p, struct prio_array *array) 660 { 661 list_move_tail(&p->run_list, array->queue + p->prio); 662 } 663 664 + static inline void 665 + enqueue_task_head(struct task_struct *p, struct prio_array *array) 666 { 667 list_add(&p->run_list, array->queue + p->prio); 668 __set_bit(p->prio, array->bitmap); ··· 739 } 740 741 static inline void 742 + inc_raw_weighted_load(struct rq *rq, const struct task_struct *p) 743 { 744 rq->raw_weighted_load += p->load_weight; 745 } 746 747 static inline void 748 + dec_raw_weighted_load(struct rq *rq, const struct task_struct *p) 749 { 750 rq->raw_weighted_load -= p->load_weight; 751 } 752 753 + static inline void inc_nr_running(struct task_struct *p, struct rq *rq) 754 { 755 rq->nr_running++; 756 inc_raw_weighted_load(rq, p); 757 } 758 759 + static inline void dec_nr_running(struct task_struct *p, struct rq *rq) 760 { 761 rq->nr_running--; 762 dec_raw_weighted_load(rq, p); ··· 803 /* 804 * __activate_task - move a task to the runqueue. 805 */ 806 + static void __activate_task(struct task_struct *p, struct rq *rq) 807 { 808 + struct prio_array *target = rq->active; 809 810 if (batch_task(p)) 811 target = rq->expired; ··· 816 /* 817 * __activate_idle_task - move idle task to the _front_ of runqueue. 818 */ 819 + static inline void __activate_idle_task(struct task_struct *p, struct rq *rq) 820 { 821 enqueue_task_head(p, rq->active); 822 inc_nr_running(p, rq); ··· 898 * Update all the scheduling statistics stuff. (sleep average 899 * calculation, priority modifiers, etc.) 900 */ 901 + static void activate_task(struct task_struct *p, struct rq *rq, int local) 902 { 903 unsigned long long now; 904 ··· 906 #ifdef CONFIG_SMP 907 if (!local) { 908 /* Compensate for drifting sched_clock */ 909 + struct rq *this_rq = this_rq(); 910 now = (now - this_rq->timestamp_last_tick) 911 + rq->timestamp_last_tick; 912 } ··· 945 /* 946 * deactivate_task - remove a task from the runqueue. 947 */ 948 + static void deactivate_task(struct task_struct *p, struct rq *rq) 949 { 950 dec_nr_running(p, rq); 951 dequeue_task(p, p->array); ··· 1009 } 1010 1011 #ifdef CONFIG_SMP 1012 + struct migration_req { 1013 struct list_head list; 1014 1015 struct task_struct *task; 1016 int dest_cpu; 1017 1018 struct completion done; 1019 + }; 1020 1021 /* 1022 * The task's runqueue lock must be held. 1023 * Returns true if you have to wait for migration thread. 1024 */ 1025 static int 1026 + migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) 1027 { 1028 + struct rq *rq = task_rq(p); 1029 1030 /* 1031 * If the task is not on a runqueue (and not running), then ··· 1056 void wait_task_inactive(struct task_struct *p) 1057 { 1058 unsigned long flags; 1059 + struct rq *rq; 1060 int preempted; 1061 1062 repeat: ··· 1107 */ 1108 static inline unsigned long source_load(int cpu, int type) 1109 { 1110 + struct rq *rq = cpu_rq(cpu); 1111 1112 if (type == 0) 1113 return rq->raw_weighted_load; ··· 1121 */ 1122 static inline unsigned long target_load(int cpu, int type) 1123 { 1124 + struct rq *rq = cpu_rq(cpu); 1125 1126 if (type == 0) 1127 return rq->raw_weighted_load; ··· 1134 */ 1135 static inline unsigned long cpu_avg_load_per_task(int cpu) 1136 { 1137 + struct rq *rq = cpu_rq(cpu); 1138 unsigned long n = rq->nr_running; 1139 1140 return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; ··· 1338 int cpu, this_cpu, success = 0; 1339 unsigned long flags; 1340 long old_state; 1341 + struct rq *rq; 1342 #ifdef CONFIG_SMP 1343 struct sched_domain *sd, *this_sd = NULL; 1344 + unsigned long load, this_load; 1345 int new_cpu; 1346 #endif 1347 ··· 1577 */ 1578 void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) 1579 { 1580 + struct rq *rq, *this_rq; 1581 unsigned long flags; 1582 int this_cpu, cpu; 1583 1584 rq = task_rq_lock(p, &flags); 1585 BUG_ON(p->state != TASK_RUNNING); ··· 1662 void fastcall sched_exit(struct task_struct *p) 1663 { 1664 unsigned long flags; 1665 + struct rq *rq; 1666 1667 /* 1668 * If the child was a (relative-) CPU hog then decrease ··· 1693 * prepare_task_switch sets up locking and calls architecture specific 1694 * hooks. 1695 */ 1696 + static inline void prepare_task_switch(struct rq *rq, struct task_struct *next) 1697 { 1698 prepare_lock_switch(rq, next); 1699 prepare_arch_switch(next); ··· 1714 * with the lock held can cause deadlocks; see schedule() for 1715 * details.) 1716 */ 1717 + static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) 1718 __releases(rq->lock) 1719 { 1720 struct mm_struct *mm = rq->prev_mm; ··· 1755 asmlinkage void schedule_tail(struct task_struct *prev) 1756 __releases(rq->lock) 1757 { 1758 + struct rq *rq = this_rq(); 1759 + 1760 finish_task_switch(rq, prev); 1761 #ifdef __ARCH_WANT_UNLOCKED_CTXSW 1762 /* In this case, finish_task_switch does not reenable preemption */ ··· 1770 * thread's register state. 1771 */ 1772 static inline struct task_struct * 1773 + context_switch(struct rq *rq, struct task_struct *prev, 1774 struct task_struct *next) 1775 { 1776 struct mm_struct *mm = next->mm; ··· 1883 * Note this does not disable interrupts like task_rq_lock, 1884 * you need to do so manually before calling. 1885 */ 1886 + static void double_rq_lock(struct rq *rq1, struct rq *rq2) 1887 __acquires(rq1->lock) 1888 __acquires(rq2->lock) 1889 { ··· 1907 * Note this does not restore interrupts like task_rq_unlock, 1908 * you need to do so manually after calling. 1909 */ 1910 + static void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1911 __releases(rq1->lock) 1912 __releases(rq2->lock) 1913 { ··· 1921 /* 1922 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1923 */ 1924 + static void double_lock_balance(struct rq *this_rq, struct rq *busiest) 1925 __releases(this_rq->lock) 1926 __acquires(busiest->lock) 1927 __acquires(this_rq->lock) ··· 1944 */ 1945 static void sched_migrate_task(struct task_struct *p, int dest_cpu) 1946 { 1947 + struct migration_req req; 1948 unsigned long flags; 1949 + struct rq *rq; 1950 1951 rq = task_rq_lock(p, &flags); 1952 if (!cpu_isset(dest_cpu, p->cpus_allowed) ··· 1987 * pull_task - move a task from a remote runqueue to the local runqueue. 1988 * Both runqueues must be locked. 1989 */ 1990 + static void pull_task(struct rq *src_rq, struct prio_array *src_array, 1991 + struct task_struct *p, struct rq *this_rq, 1992 + struct prio_array *this_array, int this_cpu) 1993 { 1994 dequeue_task(p, src_array); 1995 dec_nr_running(p, src_rq); ··· 2010 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 2011 */ 2012 static 2013 + int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, 2014 struct sched_domain *sd, enum idle_type idle, 2015 int *all_pinned) 2016 { ··· 2050 * 2051 * Called with both runqueues locked. 2052 */ 2053 + static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, 2054 unsigned long max_nr_move, unsigned long max_load_move, 2055 struct sched_domain *sd, enum idle_type idle, 2056 int *all_pinned) 2057 { 2058 int idx, pulled = 0, pinned = 0, this_best_prio, best_prio, 2059 best_prio_seen, skip_for_load; 2060 + struct prio_array *array, *dst_array; 2061 struct list_head *head, *curr; 2062 struct task_struct *tmp; 2063 long rem_load_move; ··· 2212 sum_weighted_load = sum_nr_running = avg_load = 0; 2213 2214 for_each_cpu_mask(i, group->cpumask) { 2215 + struct rq *rq = cpu_rq(i); 2216 2217 if (*sd_idle && !idle_cpu(i)) 2218 *sd_idle = 0; ··· 2428 /* 2429 * find_busiest_queue - find the busiest runqueue among the cpus in group. 2430 */ 2431 + static struct rq * 2432 find_busiest_queue(struct sched_group *group, enum idle_type idle, 2433 unsigned long imbalance) 2434 { 2435 + struct rq *busiest = NULL, *rq; 2436 unsigned long max_load = 0; 2437 int i; 2438 ··· 2468 * 2469 * Called with this_rq unlocked. 2470 */ 2471 + static int load_balance(int this_cpu, struct rq *this_rq, 2472 struct sched_domain *sd, enum idle_type idle) 2473 { 2474 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 2475 struct sched_group *group; 2476 unsigned long imbalance; 2477 + struct rq *busiest; 2478 2479 if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && 2480 !sched_smt_power_savings) ··· 2596 * this_rq is locked. 2597 */ 2598 static int 2599 + load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) 2600 { 2601 struct sched_group *group; 2602 + struct rq *busiest = NULL; 2603 unsigned long imbalance; 2604 int nr_moved = 0; 2605 int sd_idle = 0; ··· 2657 * idle_balance is called by schedule() if this_cpu is about to become 2658 * idle. Attempts to pull tasks from other CPUs. 2659 */ 2660 + static void idle_balance(int this_cpu, struct rq *this_rq) 2661 { 2662 struct sched_domain *sd; 2663 ··· 2678 * 2679 * Called with busiest_rq locked. 2680 */ 2681 + static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) 2682 { 2683 int target_cpu = busiest_rq->push_cpu; 2684 + struct sched_domain *sd; 2685 + struct rq *target_rq; 2686 2687 /* Is there any task to move? */ 2688 if (busiest_rq->nr_running <= 1) ··· 2736 } 2737 2738 static void 2739 + rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) 2740 { 2741 unsigned long this_load, interval, j = cpu_offset(this_cpu); 2742 struct sched_domain *sd; ··· 2790 /* 2791 * on UP we do not need to balance between CPUs: 2792 */ 2793 + static inline void rebalance_tick(int cpu, struct rq *rq, enum idle_type idle) 2794 { 2795 } 2796 + static inline void idle_balance(int cpu, struct rq *rq) 2797 { 2798 } 2799 #endif 2800 2801 + static inline int wake_priority_sleeper(struct rq *rq) 2802 { 2803 int ret = 0; 2804 ··· 2826 * Bank in p->sched_time the ns elapsed since the last tick or switch. 2827 */ 2828 static inline void 2829 + update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now) 2830 { 2831 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); 2832 } ··· 2858 * increasing number of running tasks. We also ignore the interactivity 2859 * if a better static_prio task has expired: 2860 */ 2861 + static inline int expired_starving(struct rq *rq) 2862 { 2863 if (rq->curr->static_prio > rq->best_expired_prio) 2864 return 1; ··· 2900 cputime_t cputime) 2901 { 2902 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 2903 + struct rq *rq = this_rq(); 2904 cputime64_t tmp; 2905 2906 p->stime = cputime_add(p->stime, cputime); ··· 2930 { 2931 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 2932 cputime64_t tmp = cputime_to_cputime64(steal); 2933 + struct rq *rq = this_rq(); 2934 2935 if (p == rq->idle) { 2936 p->stime = cputime_add(p->stime, steal); ··· 2954 unsigned long long now = sched_clock(); 2955 struct task_struct *p = current; 2956 int cpu = smp_processor_id(); 2957 + struct rq *rq = cpu_rq(cpu); 2958 2959 update_cpu_clock(p, rq, now); 2960 ··· 3043 } 3044 3045 #ifdef CONFIG_SCHED_SMT 3046 + static inline void wakeup_busy_runqueue(struct rq *rq) 3047 { 3048 /* If an SMT runqueue is sleeping due to priority reasons wake it up */ 3049 if (rq->curr == rq->idle && rq->nr_running) ··· 3069 return; 3070 3071 for_each_cpu_mask(i, sd->span) { 3072 + struct rq *smt_rq = cpu_rq(i); 3073 3074 if (i == this_cpu) 3075 continue; ··· 3099 * need to be obeyed. 3100 */ 3101 static int 3102 + dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) 3103 { 3104 struct sched_domain *tmp, *sd = NULL; 3105 int ret = 0, i; ··· 3120 3121 for_each_cpu_mask(i, sd->span) { 3122 struct task_struct *smt_curr; 3123 + struct rq *smt_rq; 3124 3125 if (i == this_cpu) 3126 continue; ··· 3166 { 3167 } 3168 static inline int 3169 + dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) 3170 { 3171 return 0; 3172 } ··· 3221 asmlinkage void __sched schedule(void) 3222 { 3223 struct task_struct *prev, *next; 3224 + struct prio_array *array; 3225 struct list_head *queue; 3226 unsigned long long now; 3227 unsigned long run_time; 3228 int cpu, idx, new_prio; 3229 long *switch_count; 3230 + struct rq *rq; 3231 3232 /* 3233 * Test if we are atomic. Since do_exit() needs to call into ··· 3787 */ 3788 void rt_mutex_setprio(struct task_struct *p, int prio) 3789 { 3790 + struct prio_array *array; 3791 unsigned long flags; 3792 + struct rq *rq; 3793 int oldprio; 3794 3795 BUG_ON(prio < 0 || prio > MAX_PRIO); ··· 3828 3829 void set_user_nice(struct task_struct *p, long nice) 3830 { 3831 + struct prio_array *array; 3832 int old_prio, delta; 3833 unsigned long flags; 3834 + struct rq *rq; 3835 3836 if (TASK_NICE(p) == nice || nice < -20 || nice > 19) 3837 return; ··· 4012 struct sched_param *param) 4013 { 4014 int retval, oldprio, oldpolicy = -1; 4015 + struct prio_array *array; 4016 unsigned long flags; 4017 + struct rq *rq; 4018 4019 /* may grab non-irq protected spin_locks */ 4020 BUG_ON(in_interrupt()); ··· 4376 */ 4377 asmlinkage long sys_sched_yield(void) 4378 { 4379 + struct rq *rq = this_rq_lock(); 4380 + struct prio_array *array = current->array, *target = rq->expired; 4381 4382 schedstat_inc(rq, yld_cnt); 4383 /* ··· 4525 */ 4526 void __sched io_schedule(void) 4527 { 4528 + struct rq *rq = &__raw_get_cpu_var(runqueues); 4529 4530 atomic_inc(&rq->nr_iowait); 4531 schedule(); ··· 4535 4536 long __sched io_schedule_timeout(long timeout) 4537 { 4538 + struct rq *rq = &__raw_get_cpu_var(runqueues); 4539 long ret; 4540 4541 atomic_inc(&rq->nr_iowait); ··· 4743 */ 4744 void __devinit init_idle(struct task_struct *idle, int cpu) 4745 { 4746 + struct rq *rq = cpu_rq(cpu); 4747 unsigned long flags; 4748 4749 idle->timestamp = sched_clock(); ··· 4782 /* 4783 * This is how migration works: 4784 * 4785 + * 1) we queue a struct migration_req structure in the source CPU's 4786 * runqueue and wake up that CPU's migration thread. 4787 * 2) we down() the locked semaphore => thread blocks. 4788 * 3) migration thread wakes up (implicitly it forces the migrated ··· 4806 */ 4807 int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 4808 { 4809 + struct migration_req req; 4810 unsigned long flags; 4811 + struct rq *rq; 4812 int ret = 0; 4813 4814 rq = task_rq_lock(p, &flags); ··· 4850 */ 4851 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 4852 { 4853 + struct rq *rq_dest, *rq_src; 4854 int ret = 0; 4855 4856 if (unlikely(cpu_is_offline(dest_cpu))) ··· 4896 static int migration_thread(void *data) 4897 { 4898 int cpu = (long)data; 4899 + struct rq *rq; 4900 4901 rq = cpu_rq(cpu); 4902 BUG_ON(rq->migration_thread != current); 4903 4904 set_current_state(TASK_INTERRUPTIBLE); 4905 while (!kthread_should_stop()) { 4906 + struct migration_req *req; 4907 struct list_head *head; 4908 4909 try_to_freeze(); 4910 ··· 4928 set_current_state(TASK_INTERRUPTIBLE); 4929 continue; 4930 } 4931 + req = list_entry(head->next, struct migration_req, list); 4932 list_del_init(head->next); 4933 4934 spin_unlock(&rq->lock); ··· 4955 /* Figure out where task on dead CPU should go, use force if neccessary. */ 4956 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 4957 { 4958 unsigned long flags; 4959 cpumask_t mask; 4960 + struct rq *rq; 4961 + int dest_cpu; 4962 4963 restart: 4964 /* On same node? */ ··· 4998 * their home CPUs. So we just add the counter to another CPU's counter, 4999 * to keep the global sum constant after CPU-down: 5000 */ 5001 + static void migrate_nr_uninterruptible(struct rq *rq_src) 5002 { 5003 + struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); 5004 unsigned long flags; 5005 5006 local_irq_save(flags); ··· 5036 void sched_idle_next(void) 5037 { 5038 int this_cpu = smp_processor_id(); 5039 + struct rq *rq = cpu_rq(this_cpu); 5040 struct task_struct *p = rq->idle; 5041 unsigned long flags; 5042 ··· 5074 5075 static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) 5076 { 5077 + struct rq *rq = cpu_rq(dead_cpu); 5078 5079 /* Must be exiting, otherwise would be on tasklist. */ 5080 BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); ··· 5099 /* release_task() removes task from tasklist, so we won't find dead tasks. */ 5100 static void migrate_dead_tasks(unsigned int dead_cpu) 5101 { 5102 + struct rq *rq = cpu_rq(dead_cpu); 5103 unsigned int arr, i; 5104 5105 for (arr = 0; arr < 2; arr++) { ··· 5123 { 5124 struct task_struct *p; 5125 int cpu = (long)hcpu; 5126 unsigned long flags; 5127 + struct rq *rq; 5128 5129 switch (action) { 5130 case CPU_UP_PREPARE: ··· 5176 * the requestors. */ 5177 spin_lock_irq(&rq->lock); 5178 while (!list_empty(&rq->migration_queue)) { 5179 + struct migration_req *req; 5180 + 5181 req = list_entry(rq->migration_queue.next, 5182 + struct migration_req, list); 5183 list_del_init(&req->list); 5184 complete(&req->done); 5185 } ··· 5361 */ 5362 static void cpu_attach_domain(struct sched_domain *sd, int cpu) 5363 { 5364 + struct rq *rq = cpu_rq(cpu); 5365 struct sched_domain *tmp; 5366 5367 /* Remove the sched domains which do not contribute to scheduling. */ ··· 6690 int i, j, k; 6691 6692 for_each_possible_cpu(i) { 6693 + struct prio_array *array; 6694 + struct rq *rq; 6695 6696 rq = cpu_rq(i); 6697 spin_lock_init(&rq->lock); ··· 6764 #ifdef CONFIG_MAGIC_SYSRQ 6765 void normalize_rt_tasks(void) 6766 { 6767 + struct prio_array *array; 6768 struct task_struct *p; 6769 unsigned long flags; 6770 + struct rq *rq; 6771 6772 read_lock_irq(&tasklist_lock); 6773 for_each_process(p) {