[PATCH] sched: cleanup, convert sched.c-internal typedefs to struct

convert:

- runqueue_t to 'struct rq'
- prio_array_t to 'struct prio_array'
- migration_req_t to 'struct migration_req'

I was the one who added these but they are both against the kernel coding
style and also were used inconsistently at places. So just get rid of them at
once, now that we are flushing the scheduler patch-queue anyway.

Conversion was mostly scripted, the result was reviewed and all secondary
whitespace and style impact (if any) was fixed up by hand.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Ingo Molnar and committed by Linus Torvalds 70b97a7f 36c8b586

+128 -127
+3 -2
include/linux/sched.h
··· 534 534 extern struct user_struct root_user; 535 535 #define INIT_USER (&root_user) 536 536 537 - typedef struct prio_array prio_array_t; 538 537 struct backing_dev_info; 539 538 struct reclaim_state; 540 539 ··· 714 715 SLEEP_INTERRUPTED, 715 716 }; 716 717 718 + struct prio_array; 719 + 717 720 struct task_struct { 718 721 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 719 722 struct thread_info *thread_info; ··· 733 732 int load_weight; /* for niceness load balancing purposes */ 734 733 int prio, static_prio, normal_prio; 735 734 struct list_head run_list; 736 - prio_array_t *array; 735 + struct prio_array *array; 737 736 738 737 unsigned short ioprio; 739 738 unsigned int btrace_seq;
+125 -125
kernel/sched.c
··· 188 188 * These are the runqueue data structures: 189 189 */ 190 190 191 - typedef struct runqueue runqueue_t; 192 - 193 191 struct prio_array { 194 192 unsigned int nr_active; 195 193 DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */ ··· 201 203 * (such as the load balancing or the thread migration code), lock 202 204 * acquire operations must be ordered by ascending &runqueue. 203 205 */ 204 - struct runqueue { 206 + struct rq { 205 207 spinlock_t lock; 206 208 207 209 /* ··· 227 229 unsigned long long timestamp_last_tick; 228 230 struct task_struct *curr, *idle; 229 231 struct mm_struct *prev_mm; 230 - prio_array_t *active, *expired, arrays[2]; 232 + struct prio_array *active, *expired, arrays[2]; 231 233 int best_expired_prio; 232 234 atomic_t nr_iowait; 233 235 ··· 264 266 struct lock_class_key rq_lock_key; 265 267 }; 266 268 267 - static DEFINE_PER_CPU(struct runqueue, runqueues); 269 + static DEFINE_PER_CPU(struct rq, runqueues); 268 270 269 271 /* 270 272 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. ··· 289 291 #endif 290 292 291 293 #ifndef __ARCH_WANT_UNLOCKED_CTXSW 292 - static inline int task_running(runqueue_t *rq, struct task_struct *p) 294 + static inline int task_running(struct rq *rq, struct task_struct *p) 293 295 { 294 296 return rq->curr == p; 295 297 } 296 298 297 - static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) 299 + static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 298 300 { 299 301 } 300 302 301 - static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) 303 + static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 302 304 { 303 305 #ifdef CONFIG_DEBUG_SPINLOCK 304 306 /* this is a valid case when another task releases the spinlock */ ··· 315 317 } 316 318 317 319 #else /* __ARCH_WANT_UNLOCKED_CTXSW */ 318 - static inline int task_running(runqueue_t *rq, struct task_struct *p) 320 + static inline int task_running(struct rq *rq, struct task_struct *p) 319 321 { 320 322 #ifdef CONFIG_SMP 321 323 return p->oncpu; ··· 324 326 #endif 325 327 } 326 328 327 - static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) 329 + static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 328 330 { 329 331 #ifdef CONFIG_SMP 330 332 /* ··· 341 343 #endif 342 344 } 343 345 344 - static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) 346 + static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 345 347 { 346 348 #ifdef CONFIG_SMP 347 349 /* ··· 362 364 * __task_rq_lock - lock the runqueue a given task resides on. 363 365 * Must be called interrupts disabled. 364 366 */ 365 - static inline runqueue_t *__task_rq_lock(struct task_struct *p) 367 + static inline struct rq *__task_rq_lock(struct task_struct *p) 366 368 __acquires(rq->lock) 367 369 { 368 - struct runqueue *rq; 370 + struct rq *rq; 369 371 370 372 repeat_lock_task: 371 373 rq = task_rq(p); ··· 382 384 * interrupts. Note the ordering: we can safely lookup the task_rq without 383 385 * explicitly disabling preemption. 384 386 */ 385 - static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags) 387 + static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) 386 388 __acquires(rq->lock) 387 389 { 388 - struct runqueue *rq; 390 + struct rq *rq; 389 391 390 392 repeat_lock_task: 391 393 local_irq_save(*flags); ··· 398 400 return rq; 399 401 } 400 402 401 - static inline void __task_rq_unlock(runqueue_t *rq) 403 + static inline void __task_rq_unlock(struct rq *rq) 402 404 __releases(rq->lock) 403 405 { 404 406 spin_unlock(&rq->lock); 405 407 } 406 408 407 - static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) 409 + static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) 408 410 __releases(rq->lock) 409 411 { 410 412 spin_unlock_irqrestore(&rq->lock, *flags); ··· 424 426 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); 425 427 seq_printf(seq, "timestamp %lu\n", jiffies); 426 428 for_each_online_cpu(cpu) { 427 - runqueue_t *rq = cpu_rq(cpu); 429 + struct rq *rq = cpu_rq(cpu); 428 430 #ifdef CONFIG_SMP 429 431 struct sched_domain *sd; 430 432 int dcnt = 0; ··· 511 513 /* 512 514 * rq_lock - lock a given runqueue and disable interrupts. 513 515 */ 514 - static inline runqueue_t *this_rq_lock(void) 516 + static inline struct rq *this_rq_lock(void) 515 517 __acquires(rq->lock) 516 518 { 517 - runqueue_t *rq; 519 + struct rq *rq; 518 520 519 521 local_irq_disable(); 520 522 rq = this_rq(); ··· 552 554 static void sched_info_arrive(struct task_struct *t) 553 555 { 554 556 unsigned long now = jiffies, diff = 0; 555 - struct runqueue *rq = task_rq(t); 557 + struct rq *rq = task_rq(t); 556 558 557 559 if (t->sched_info.last_queued) 558 560 diff = now - t->sched_info.last_queued; ··· 595 597 */ 596 598 static inline void sched_info_depart(struct task_struct *t) 597 599 { 598 - struct runqueue *rq = task_rq(t); 600 + struct rq *rq = task_rq(t); 599 601 unsigned long diff = jiffies - t->sched_info.last_arrival; 600 602 601 603 t->sched_info.cpu_time += diff; ··· 612 614 static inline void 613 615 sched_info_switch(struct task_struct *prev, struct task_struct *next) 614 616 { 615 - struct runqueue *rq = task_rq(prev); 617 + struct rq *rq = task_rq(prev); 616 618 617 619 /* 618 620 * prev now departs the cpu. It's not interesting to record ··· 633 635 /* 634 636 * Adding/removing a task to/from a priority array: 635 637 */ 636 - static void dequeue_task(struct task_struct *p, prio_array_t *array) 638 + static void dequeue_task(struct task_struct *p, struct prio_array *array) 637 639 { 638 640 array->nr_active--; 639 641 list_del(&p->run_list); ··· 641 643 __clear_bit(p->prio, array->bitmap); 642 644 } 643 645 644 - static void enqueue_task(struct task_struct *p, prio_array_t *array) 646 + static void enqueue_task(struct task_struct *p, struct prio_array *array) 645 647 { 646 648 sched_info_queued(p); 647 649 list_add_tail(&p->run_list, array->queue + p->prio); ··· 654 656 * Put task to the end of the run list without the overhead of dequeue 655 657 * followed by enqueue. 656 658 */ 657 - static void requeue_task(struct task_struct *p, prio_array_t *array) 659 + static void requeue_task(struct task_struct *p, struct prio_array *array) 658 660 { 659 661 list_move_tail(&p->run_list, array->queue + p->prio); 660 662 } 661 663 662 - static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) 664 + static inline void 665 + enqueue_task_head(struct task_struct *p, struct prio_array *array) 663 666 { 664 667 list_add(&p->run_list, array->queue + p->prio); 665 668 __set_bit(p->prio, array->bitmap); ··· 738 739 } 739 740 740 741 static inline void 741 - inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) 742 + inc_raw_weighted_load(struct rq *rq, const struct task_struct *p) 742 743 { 743 744 rq->raw_weighted_load += p->load_weight; 744 745 } 745 746 746 747 static inline void 747 - dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) 748 + dec_raw_weighted_load(struct rq *rq, const struct task_struct *p) 748 749 { 749 750 rq->raw_weighted_load -= p->load_weight; 750 751 } 751 752 752 - static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq) 753 + static inline void inc_nr_running(struct task_struct *p, struct rq *rq) 753 754 { 754 755 rq->nr_running++; 755 756 inc_raw_weighted_load(rq, p); 756 757 } 757 758 758 - static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq) 759 + static inline void dec_nr_running(struct task_struct *p, struct rq *rq) 759 760 { 760 761 rq->nr_running--; 761 762 dec_raw_weighted_load(rq, p); ··· 802 803 /* 803 804 * __activate_task - move a task to the runqueue. 804 805 */ 805 - static void __activate_task(struct task_struct *p, runqueue_t *rq) 806 + static void __activate_task(struct task_struct *p, struct rq *rq) 806 807 { 807 - prio_array_t *target = rq->active; 808 + struct prio_array *target = rq->active; 808 809 809 810 if (batch_task(p)) 810 811 target = rq->expired; ··· 815 816 /* 816 817 * __activate_idle_task - move idle task to the _front_ of runqueue. 817 818 */ 818 - static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq) 819 + static inline void __activate_idle_task(struct task_struct *p, struct rq *rq) 819 820 { 820 821 enqueue_task_head(p, rq->active); 821 822 inc_nr_running(p, rq); ··· 897 898 * Update all the scheduling statistics stuff. (sleep average 898 899 * calculation, priority modifiers, etc.) 899 900 */ 900 - static void activate_task(struct task_struct *p, runqueue_t *rq, int local) 901 + static void activate_task(struct task_struct *p, struct rq *rq, int local) 901 902 { 902 903 unsigned long long now; 903 904 ··· 905 906 #ifdef CONFIG_SMP 906 907 if (!local) { 907 908 /* Compensate for drifting sched_clock */ 908 - runqueue_t *this_rq = this_rq(); 909 + struct rq *this_rq = this_rq(); 909 910 now = (now - this_rq->timestamp_last_tick) 910 911 + rq->timestamp_last_tick; 911 912 } ··· 944 945 /* 945 946 * deactivate_task - remove a task from the runqueue. 946 947 */ 947 - static void deactivate_task(struct task_struct *p, runqueue_t *rq) 948 + static void deactivate_task(struct task_struct *p, struct rq *rq) 948 949 { 949 950 dec_nr_running(p, rq); 950 951 dequeue_task(p, p->array); ··· 1008 1009 } 1009 1010 1010 1011 #ifdef CONFIG_SMP 1011 - typedef struct { 1012 + struct migration_req { 1012 1013 struct list_head list; 1013 1014 1014 1015 struct task_struct *task; 1015 1016 int dest_cpu; 1016 1017 1017 1018 struct completion done; 1018 - } migration_req_t; 1019 + }; 1019 1020 1020 1021 /* 1021 1022 * The task's runqueue lock must be held. 1022 1023 * Returns true if you have to wait for migration thread. 1023 1024 */ 1024 1025 static int 1025 - migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req) 1026 + migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) 1026 1027 { 1027 - runqueue_t *rq = task_rq(p); 1028 + struct rq *rq = task_rq(p); 1028 1029 1029 1030 /* 1030 1031 * If the task is not on a runqueue (and not running), then ··· 1055 1056 void wait_task_inactive(struct task_struct *p) 1056 1057 { 1057 1058 unsigned long flags; 1058 - runqueue_t *rq; 1059 + struct rq *rq; 1059 1060 int preempted; 1060 1061 1061 1062 repeat: ··· 1106 1107 */ 1107 1108 static inline unsigned long source_load(int cpu, int type) 1108 1109 { 1109 - runqueue_t *rq = cpu_rq(cpu); 1110 + struct rq *rq = cpu_rq(cpu); 1110 1111 1111 1112 if (type == 0) 1112 1113 return rq->raw_weighted_load; ··· 1120 1121 */ 1121 1122 static inline unsigned long target_load(int cpu, int type) 1122 1123 { 1123 - runqueue_t *rq = cpu_rq(cpu); 1124 + struct rq *rq = cpu_rq(cpu); 1124 1125 1125 1126 if (type == 0) 1126 1127 return rq->raw_weighted_load; ··· 1133 1134 */ 1134 1135 static inline unsigned long cpu_avg_load_per_task(int cpu) 1135 1136 { 1136 - runqueue_t *rq = cpu_rq(cpu); 1137 + struct rq *rq = cpu_rq(cpu); 1137 1138 unsigned long n = rq->nr_running; 1138 1139 1139 1140 return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; ··· 1337 1338 int cpu, this_cpu, success = 0; 1338 1339 unsigned long flags; 1339 1340 long old_state; 1340 - runqueue_t *rq; 1341 + struct rq *rq; 1341 1342 #ifdef CONFIG_SMP 1342 - unsigned long load, this_load; 1343 1343 struct sched_domain *sd, *this_sd = NULL; 1344 + unsigned long load, this_load; 1344 1345 int new_cpu; 1345 1346 #endif 1346 1347 ··· 1576 1577 */ 1577 1578 void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) 1578 1579 { 1580 + struct rq *rq, *this_rq; 1579 1581 unsigned long flags; 1580 1582 int this_cpu, cpu; 1581 - runqueue_t *rq, *this_rq; 1582 1583 1583 1584 rq = task_rq_lock(p, &flags); 1584 1585 BUG_ON(p->state != TASK_RUNNING); ··· 1661 1662 void fastcall sched_exit(struct task_struct *p) 1662 1663 { 1663 1664 unsigned long flags; 1664 - runqueue_t *rq; 1665 + struct rq *rq; 1665 1666 1666 1667 /* 1667 1668 * If the child was a (relative-) CPU hog then decrease ··· 1692 1693 * prepare_task_switch sets up locking and calls architecture specific 1693 1694 * hooks. 1694 1695 */ 1695 - static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next) 1696 + static inline void prepare_task_switch(struct rq *rq, struct task_struct *next) 1696 1697 { 1697 1698 prepare_lock_switch(rq, next); 1698 1699 prepare_arch_switch(next); ··· 1713 1714 * with the lock held can cause deadlocks; see schedule() for 1714 1715 * details.) 1715 1716 */ 1716 - static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev) 1717 + static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) 1717 1718 __releases(rq->lock) 1718 1719 { 1719 1720 struct mm_struct *mm = rq->prev_mm; ··· 1754 1755 asmlinkage void schedule_tail(struct task_struct *prev) 1755 1756 __releases(rq->lock) 1756 1757 { 1757 - runqueue_t *rq = this_rq(); 1758 + struct rq *rq = this_rq(); 1759 + 1758 1760 finish_task_switch(rq, prev); 1759 1761 #ifdef __ARCH_WANT_UNLOCKED_CTXSW 1760 1762 /* In this case, finish_task_switch does not reenable preemption */ ··· 1770 1770 * thread's register state. 1771 1771 */ 1772 1772 static inline struct task_struct * 1773 - context_switch(runqueue_t *rq, struct task_struct *prev, 1773 + context_switch(struct rq *rq, struct task_struct *prev, 1774 1774 struct task_struct *next) 1775 1775 { 1776 1776 struct mm_struct *mm = next->mm; ··· 1883 1883 * Note this does not disable interrupts like task_rq_lock, 1884 1884 * you need to do so manually before calling. 1885 1885 */ 1886 - static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) 1886 + static void double_rq_lock(struct rq *rq1, struct rq *rq2) 1887 1887 __acquires(rq1->lock) 1888 1888 __acquires(rq2->lock) 1889 1889 { ··· 1907 1907 * Note this does not restore interrupts like task_rq_unlock, 1908 1908 * you need to do so manually after calling. 1909 1909 */ 1910 - static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) 1910 + static void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1911 1911 __releases(rq1->lock) 1912 1912 __releases(rq2->lock) 1913 1913 { ··· 1921 1921 /* 1922 1922 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1923 1923 */ 1924 - static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) 1924 + static void double_lock_balance(struct rq *this_rq, struct rq *busiest) 1925 1925 __releases(this_rq->lock) 1926 1926 __acquires(busiest->lock) 1927 1927 __acquires(this_rq->lock) ··· 1944 1944 */ 1945 1945 static void sched_migrate_task(struct task_struct *p, int dest_cpu) 1946 1946 { 1947 - migration_req_t req; 1948 - runqueue_t *rq; 1947 + struct migration_req req; 1949 1948 unsigned long flags; 1949 + struct rq *rq; 1950 1950 1951 1951 rq = task_rq_lock(p, &flags); 1952 1952 if (!cpu_isset(dest_cpu, p->cpus_allowed) ··· 1987 1987 * pull_task - move a task from a remote runqueue to the local runqueue. 1988 1988 * Both runqueues must be locked. 1989 1989 */ 1990 - static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, 1991 - struct task_struct *p, runqueue_t *this_rq, 1992 - prio_array_t *this_array, int this_cpu) 1990 + static void pull_task(struct rq *src_rq, struct prio_array *src_array, 1991 + struct task_struct *p, struct rq *this_rq, 1992 + struct prio_array *this_array, int this_cpu) 1993 1993 { 1994 1994 dequeue_task(p, src_array); 1995 1995 dec_nr_running(p, src_rq); ··· 2010 2010 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 2011 2011 */ 2012 2012 static 2013 - int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu, 2013 + int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, 2014 2014 struct sched_domain *sd, enum idle_type idle, 2015 2015 int *all_pinned) 2016 2016 { ··· 2050 2050 * 2051 2051 * Called with both runqueues locked. 2052 2052 */ 2053 - static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, 2053 + static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, 2054 2054 unsigned long max_nr_move, unsigned long max_load_move, 2055 2055 struct sched_domain *sd, enum idle_type idle, 2056 2056 int *all_pinned) 2057 2057 { 2058 2058 int idx, pulled = 0, pinned = 0, this_best_prio, best_prio, 2059 2059 best_prio_seen, skip_for_load; 2060 - prio_array_t *array, *dst_array; 2060 + struct prio_array *array, *dst_array; 2061 2061 struct list_head *head, *curr; 2062 2062 struct task_struct *tmp; 2063 2063 long rem_load_move; ··· 2212 2212 sum_weighted_load = sum_nr_running = avg_load = 0; 2213 2213 2214 2214 for_each_cpu_mask(i, group->cpumask) { 2215 - runqueue_t *rq = cpu_rq(i); 2215 + struct rq *rq = cpu_rq(i); 2216 2216 2217 2217 if (*sd_idle && !idle_cpu(i)) 2218 2218 *sd_idle = 0; ··· 2428 2428 /* 2429 2429 * find_busiest_queue - find the busiest runqueue among the cpus in group. 2430 2430 */ 2431 - static runqueue_t * 2431 + static struct rq * 2432 2432 find_busiest_queue(struct sched_group *group, enum idle_type idle, 2433 2433 unsigned long imbalance) 2434 2434 { 2435 - runqueue_t *busiest = NULL, *rq; 2435 + struct rq *busiest = NULL, *rq; 2436 2436 unsigned long max_load = 0; 2437 2437 int i; 2438 2438 ··· 2468 2468 * 2469 2469 * Called with this_rq unlocked. 2470 2470 */ 2471 - static int load_balance(int this_cpu, runqueue_t *this_rq, 2471 + static int load_balance(int this_cpu, struct rq *this_rq, 2472 2472 struct sched_domain *sd, enum idle_type idle) 2473 2473 { 2474 2474 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 2475 2475 struct sched_group *group; 2476 2476 unsigned long imbalance; 2477 - runqueue_t *busiest; 2477 + struct rq *busiest; 2478 2478 2479 2479 if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && 2480 2480 !sched_smt_power_savings) ··· 2596 2596 * this_rq is locked. 2597 2597 */ 2598 2598 static int 2599 - load_balance_newidle(int this_cpu, runqueue_t *this_rq, struct sched_domain *sd) 2599 + load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) 2600 2600 { 2601 2601 struct sched_group *group; 2602 - runqueue_t *busiest = NULL; 2602 + struct rq *busiest = NULL; 2603 2603 unsigned long imbalance; 2604 2604 int nr_moved = 0; 2605 2605 int sd_idle = 0; ··· 2657 2657 * idle_balance is called by schedule() if this_cpu is about to become 2658 2658 * idle. Attempts to pull tasks from other CPUs. 2659 2659 */ 2660 - static void idle_balance(int this_cpu, runqueue_t *this_rq) 2660 + static void idle_balance(int this_cpu, struct rq *this_rq) 2661 2661 { 2662 2662 struct sched_domain *sd; 2663 2663 ··· 2678 2678 * 2679 2679 * Called with busiest_rq locked. 2680 2680 */ 2681 - static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) 2681 + static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) 2682 2682 { 2683 - struct sched_domain *sd; 2684 - runqueue_t *target_rq; 2685 2683 int target_cpu = busiest_rq->push_cpu; 2684 + struct sched_domain *sd; 2685 + struct rq *target_rq; 2686 2686 2687 2687 /* Is there any task to move? */ 2688 2688 if (busiest_rq->nr_running <= 1) ··· 2736 2736 } 2737 2737 2738 2738 static void 2739 - rebalance_tick(int this_cpu, runqueue_t *this_rq, enum idle_type idle) 2739 + rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) 2740 2740 { 2741 2741 unsigned long this_load, interval, j = cpu_offset(this_cpu); 2742 2742 struct sched_domain *sd; ··· 2790 2790 /* 2791 2791 * on UP we do not need to balance between CPUs: 2792 2792 */ 2793 - static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle) 2793 + static inline void rebalance_tick(int cpu, struct rq *rq, enum idle_type idle) 2794 2794 { 2795 2795 } 2796 - static inline void idle_balance(int cpu, runqueue_t *rq) 2796 + static inline void idle_balance(int cpu, struct rq *rq) 2797 2797 { 2798 2798 } 2799 2799 #endif 2800 2800 2801 - static inline int wake_priority_sleeper(runqueue_t *rq) 2801 + static inline int wake_priority_sleeper(struct rq *rq) 2802 2802 { 2803 2803 int ret = 0; 2804 2804 ··· 2826 2826 * Bank in p->sched_time the ns elapsed since the last tick or switch. 2827 2827 */ 2828 2828 static inline void 2829 - update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now) 2829 + update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now) 2830 2830 { 2831 2831 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); 2832 2832 } ··· 2858 2858 * increasing number of running tasks. We also ignore the interactivity 2859 2859 * if a better static_prio task has expired: 2860 2860 */ 2861 - static inline int expired_starving(runqueue_t *rq) 2861 + static inline int expired_starving(struct rq *rq) 2862 2862 { 2863 2863 if (rq->curr->static_prio > rq->best_expired_prio) 2864 2864 return 1; ··· 2900 2900 cputime_t cputime) 2901 2901 { 2902 2902 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 2903 - runqueue_t *rq = this_rq(); 2903 + struct rq *rq = this_rq(); 2904 2904 cputime64_t tmp; 2905 2905 2906 2906 p->stime = cputime_add(p->stime, cputime); ··· 2930 2930 { 2931 2931 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 2932 2932 cputime64_t tmp = cputime_to_cputime64(steal); 2933 - runqueue_t *rq = this_rq(); 2933 + struct rq *rq = this_rq(); 2934 2934 2935 2935 if (p == rq->idle) { 2936 2936 p->stime = cputime_add(p->stime, steal); ··· 2954 2954 unsigned long long now = sched_clock(); 2955 2955 struct task_struct *p = current; 2956 2956 int cpu = smp_processor_id(); 2957 - runqueue_t *rq = this_rq(); 2957 + struct rq *rq = cpu_rq(cpu); 2958 2958 2959 2959 update_cpu_clock(p, rq, now); 2960 2960 ··· 3043 3043 } 3044 3044 3045 3045 #ifdef CONFIG_SCHED_SMT 3046 - static inline void wakeup_busy_runqueue(runqueue_t *rq) 3046 + static inline void wakeup_busy_runqueue(struct rq *rq) 3047 3047 { 3048 3048 /* If an SMT runqueue is sleeping due to priority reasons wake it up */ 3049 3049 if (rq->curr == rq->idle && rq->nr_running) ··· 3069 3069 return; 3070 3070 3071 3071 for_each_cpu_mask(i, sd->span) { 3072 - runqueue_t *smt_rq = cpu_rq(i); 3072 + struct rq *smt_rq = cpu_rq(i); 3073 3073 3074 3074 if (i == this_cpu) 3075 3075 continue; ··· 3099 3099 * need to be obeyed. 3100 3100 */ 3101 3101 static int 3102 - dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) 3102 + dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) 3103 3103 { 3104 3104 struct sched_domain *tmp, *sd = NULL; 3105 3105 int ret = 0, i; ··· 3120 3120 3121 3121 for_each_cpu_mask(i, sd->span) { 3122 3122 struct task_struct *smt_curr; 3123 - runqueue_t *smt_rq; 3123 + struct rq *smt_rq; 3124 3124 3125 3125 if (i == this_cpu) 3126 3126 continue; ··· 3166 3166 { 3167 3167 } 3168 3168 static inline int 3169 - dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) 3169 + dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p) 3170 3170 { 3171 3171 return 0; 3172 3172 } ··· 3221 3221 asmlinkage void __sched schedule(void) 3222 3222 { 3223 3223 struct task_struct *prev, *next; 3224 + struct prio_array *array; 3224 3225 struct list_head *queue; 3225 3226 unsigned long long now; 3226 3227 unsigned long run_time; 3227 3228 int cpu, idx, new_prio; 3228 - prio_array_t *array; 3229 3229 long *switch_count; 3230 - runqueue_t *rq; 3230 + struct rq *rq; 3231 3231 3232 3232 /* 3233 3233 * Test if we are atomic. Since do_exit() needs to call into ··· 3787 3787 */ 3788 3788 void rt_mutex_setprio(struct task_struct *p, int prio) 3789 3789 { 3790 + struct prio_array *array; 3790 3791 unsigned long flags; 3791 - prio_array_t *array; 3792 - runqueue_t *rq; 3792 + struct rq *rq; 3793 3793 int oldprio; 3794 3794 3795 3795 BUG_ON(prio < 0 || prio > MAX_PRIO); ··· 3828 3828 3829 3829 void set_user_nice(struct task_struct *p, long nice) 3830 3830 { 3831 + struct prio_array *array; 3831 3832 int old_prio, delta; 3832 3833 unsigned long flags; 3833 - prio_array_t *array; 3834 - runqueue_t *rq; 3834 + struct rq *rq; 3835 3835 3836 3836 if (TASK_NICE(p) == nice || nice < -20 || nice > 19) 3837 3837 return; ··· 4012 4012 struct sched_param *param) 4013 4013 { 4014 4014 int retval, oldprio, oldpolicy = -1; 4015 - prio_array_t *array; 4015 + struct prio_array *array; 4016 4016 unsigned long flags; 4017 - runqueue_t *rq; 4017 + struct rq *rq; 4018 4018 4019 4019 /* may grab non-irq protected spin_locks */ 4020 4020 BUG_ON(in_interrupt()); ··· 4376 4376 */ 4377 4377 asmlinkage long sys_sched_yield(void) 4378 4378 { 4379 - runqueue_t *rq = this_rq_lock(); 4380 - prio_array_t *array = current->array; 4381 - prio_array_t *target = rq->expired; 4379 + struct rq *rq = this_rq_lock(); 4380 + struct prio_array *array = current->array, *target = rq->expired; 4382 4381 4383 4382 schedstat_inc(rq, yld_cnt); 4384 4383 /* ··· 4524 4525 */ 4525 4526 void __sched io_schedule(void) 4526 4527 { 4527 - struct runqueue *rq = &__raw_get_cpu_var(runqueues); 4528 + struct rq *rq = &__raw_get_cpu_var(runqueues); 4528 4529 4529 4530 atomic_inc(&rq->nr_iowait); 4530 4531 schedule(); ··· 4534 4535 4535 4536 long __sched io_schedule_timeout(long timeout) 4536 4537 { 4537 - struct runqueue *rq = &__raw_get_cpu_var(runqueues); 4538 + struct rq *rq = &__raw_get_cpu_var(runqueues); 4538 4539 long ret; 4539 4540 4540 4541 atomic_inc(&rq->nr_iowait); ··· 4742 4743 */ 4743 4744 void __devinit init_idle(struct task_struct *idle, int cpu) 4744 4745 { 4745 - runqueue_t *rq = cpu_rq(cpu); 4746 + struct rq *rq = cpu_rq(cpu); 4746 4747 unsigned long flags; 4747 4748 4748 4749 idle->timestamp = sched_clock(); ··· 4781 4782 /* 4782 4783 * This is how migration works: 4783 4784 * 4784 - * 1) we queue a migration_req_t structure in the source CPU's 4785 + * 1) we queue a struct migration_req structure in the source CPU's 4785 4786 * runqueue and wake up that CPU's migration thread. 4786 4787 * 2) we down() the locked semaphore => thread blocks. 4787 4788 * 3) migration thread wakes up (implicitly it forces the migrated ··· 4805 4806 */ 4806 4807 int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 4807 4808 { 4809 + struct migration_req req; 4808 4810 unsigned long flags; 4809 - migration_req_t req; 4810 - runqueue_t *rq; 4811 + struct rq *rq; 4811 4812 int ret = 0; 4812 4813 4813 4814 rq = task_rq_lock(p, &flags); ··· 4849 4850 */ 4850 4851 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 4851 4852 { 4852 - runqueue_t *rq_dest, *rq_src; 4853 + struct rq *rq_dest, *rq_src; 4853 4854 int ret = 0; 4854 4855 4855 4856 if (unlikely(cpu_is_offline(dest_cpu))) ··· 4895 4896 static int migration_thread(void *data) 4896 4897 { 4897 4898 int cpu = (long)data; 4898 - runqueue_t *rq; 4899 + struct rq *rq; 4899 4900 4900 4901 rq = cpu_rq(cpu); 4901 4902 BUG_ON(rq->migration_thread != current); 4902 4903 4903 4904 set_current_state(TASK_INTERRUPTIBLE); 4904 4905 while (!kthread_should_stop()) { 4906 + struct migration_req *req; 4905 4907 struct list_head *head; 4906 - migration_req_t *req; 4907 4908 4908 4909 try_to_freeze(); 4909 4910 ··· 4927 4928 set_current_state(TASK_INTERRUPTIBLE); 4928 4929 continue; 4929 4930 } 4930 - req = list_entry(head->next, migration_req_t, list); 4931 + req = list_entry(head->next, struct migration_req, list); 4931 4932 list_del_init(head->next); 4932 4933 4933 4934 spin_unlock(&rq->lock); ··· 4954 4955 /* Figure out where task on dead CPU should go, use force if neccessary. */ 4955 4956 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 4956 4957 { 4957 - runqueue_t *rq; 4958 4958 unsigned long flags; 4959 - int dest_cpu; 4960 4959 cpumask_t mask; 4960 + struct rq *rq; 4961 + int dest_cpu; 4961 4962 4962 4963 restart: 4963 4964 /* On same node? */ ··· 4997 4998 * their home CPUs. So we just add the counter to another CPU's counter, 4998 4999 * to keep the global sum constant after CPU-down: 4999 5000 */ 5000 - static void migrate_nr_uninterruptible(runqueue_t *rq_src) 5001 + static void migrate_nr_uninterruptible(struct rq *rq_src) 5001 5002 { 5002 - runqueue_t *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); 5003 + struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); 5003 5004 unsigned long flags; 5004 5005 5005 5006 local_irq_save(flags); ··· 5035 5036 void sched_idle_next(void) 5036 5037 { 5037 5038 int this_cpu = smp_processor_id(); 5038 - runqueue_t *rq = cpu_rq(this_cpu); 5039 + struct rq *rq = cpu_rq(this_cpu); 5039 5040 struct task_struct *p = rq->idle; 5040 5041 unsigned long flags; 5041 5042 ··· 5073 5074 5074 5075 static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) 5075 5076 { 5076 - struct runqueue *rq = cpu_rq(dead_cpu); 5077 + struct rq *rq = cpu_rq(dead_cpu); 5077 5078 5078 5079 /* Must be exiting, otherwise would be on tasklist. */ 5079 5080 BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); ··· 5098 5099 /* release_task() removes task from tasklist, so we won't find dead tasks. */ 5099 5100 static void migrate_dead_tasks(unsigned int dead_cpu) 5100 5101 { 5101 - struct runqueue *rq = cpu_rq(dead_cpu); 5102 + struct rq *rq = cpu_rq(dead_cpu); 5102 5103 unsigned int arr, i; 5103 5104 5104 5105 for (arr = 0; arr < 2; arr++) { ··· 5122 5123 { 5123 5124 struct task_struct *p; 5124 5125 int cpu = (long)hcpu; 5125 - struct runqueue *rq; 5126 5126 unsigned long flags; 5127 + struct rq *rq; 5127 5128 5128 5129 switch (action) { 5129 5130 case CPU_UP_PREPARE: ··· 5175 5176 * the requestors. */ 5176 5177 spin_lock_irq(&rq->lock); 5177 5178 while (!list_empty(&rq->migration_queue)) { 5178 - migration_req_t *req; 5179 + struct migration_req *req; 5180 + 5179 5181 req = list_entry(rq->migration_queue.next, 5180 - migration_req_t, list); 5182 + struct migration_req, list); 5181 5183 list_del_init(&req->list); 5182 5184 complete(&req->done); 5183 5185 } ··· 5361 5361 */ 5362 5362 static void cpu_attach_domain(struct sched_domain *sd, int cpu) 5363 5363 { 5364 - runqueue_t *rq = cpu_rq(cpu); 5364 + struct rq *rq = cpu_rq(cpu); 5365 5365 struct sched_domain *tmp; 5366 5366 5367 5367 /* Remove the sched domains which do not contribute to scheduling. */ ··· 6690 6690 int i, j, k; 6691 6691 6692 6692 for_each_possible_cpu(i) { 6693 - prio_array_t *array; 6694 - runqueue_t *rq; 6693 + struct prio_array *array; 6694 + struct rq *rq; 6695 6695 6696 6696 rq = cpu_rq(i); 6697 6697 spin_lock_init(&rq->lock); ··· 6764 6764 #ifdef CONFIG_MAGIC_SYSRQ 6765 6765 void normalize_rt_tasks(void) 6766 6766 { 6767 + struct prio_array *array; 6767 6768 struct task_struct *p; 6768 - prio_array_t *array; 6769 6769 unsigned long flags; 6770 - runqueue_t *rq; 6770 + struct rq *rq; 6771 6771 6772 6772 read_lock_irq(&tasklist_lock); 6773 6773 for_each_process(p) {