Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched debug: dont print kernel address in /proc/sched_debug
sched: fix typo in the FAIR_GROUP_SCHED branch
sched: improve rq-clock overflow logic

+17 -7
+13 -2
kernel/sched.c
··· 263 264 unsigned int clock_warps, clock_overflows; 265 unsigned int clock_unstable_events; 266 267 atomic_t nr_iowait; 268 ··· 342 /* 343 * Catch too large forward jumps too: 344 */ 345 - if (unlikely(delta > 2*TICK_NSEC)) { 346 - clock++; 347 rq->clock_overflows++; 348 } else { 349 if (unlikely(delta > rq->clock_max_delta)) ··· 3312 int cpu = smp_processor_id(); 3313 struct rq *rq = cpu_rq(cpu); 3314 struct task_struct *curr = rq->curr; 3315 3316 spin_lock(&rq->lock); 3317 __update_rq_clock(rq); 3318 update_cpu_load(rq); 3319 if (curr != rq->idle) /* FIXME: needed? */ 3320 curr->sched_class->task_tick(rq, curr);
··· 263 264 unsigned int clock_warps, clock_overflows; 265 unsigned int clock_unstable_events; 266 + u64 tick_timestamp; 267 268 atomic_t nr_iowait; 269 ··· 341 /* 342 * Catch too large forward jumps too: 343 */ 344 + if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) { 345 + if (clock < rq->tick_timestamp + TICK_NSEC) 346 + clock = rq->tick_timestamp + TICK_NSEC; 347 + else 348 + clock++; 349 rq->clock_overflows++; 350 } else { 351 if (unlikely(delta > rq->clock_max_delta)) ··· 3308 int cpu = smp_processor_id(); 3309 struct rq *rq = cpu_rq(cpu); 3310 struct task_struct *curr = rq->curr; 3311 + u64 next_tick = rq->tick_timestamp + TICK_NSEC; 3312 3313 spin_lock(&rq->lock); 3314 __update_rq_clock(rq); 3315 + /* 3316 + * Let rq->clock advance by at least TICK_NSEC: 3317 + */ 3318 + if (unlikely(rq->clock < next_tick)) 3319 + rq->clock = next_tick; 3320 + rq->tick_timestamp = rq->clock; 3321 update_cpu_load(rq); 3322 if (curr != rq->idle) /* FIXME: needed? */ 3323 curr->sched_class->task_tick(rq, curr);
+1 -1
kernel/sched_debug.c
··· 108 109 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 110 { 111 - SEQ_printf(m, "\ncfs_rq %p\n", cfs_rq); 112 113 #define P(x) \ 114 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x))
··· 108 109 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 110 { 111 + SEQ_printf(m, "\ncfs_rq\n"); 112 113 #define P(x) \ 114 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x))
+3 -4
kernel/sched_fair.c
··· 959 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { 960 #ifdef CONFIG_FAIR_GROUP_SCHED 961 struct cfs_rq *this_cfs_rq; 962 - long imbalances; 963 unsigned long maxload; 964 965 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); 966 967 - imbalance = busy_cfs_rq->load.weight - 968 - this_cfs_rq->load.weight; 969 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */ 970 if (imbalance <= 0) 971 continue; ··· 975 976 *this_best_prio = cfs_rq_best_prio(this_cfs_rq); 977 #else 978 - #define maxload rem_load_move 979 #endif 980 /* pass busy_cfs_rq argument into 981 * load_balance_[start|next]_fair iterators
··· 959 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { 960 #ifdef CONFIG_FAIR_GROUP_SCHED 961 struct cfs_rq *this_cfs_rq; 962 + long imbalance; 963 unsigned long maxload; 964 965 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); 966 967 + imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight; 968 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */ 969 if (imbalance <= 0) 970 continue; ··· 976 977 *this_best_prio = cfs_rq_best_prio(this_cfs_rq); 978 #else 979 + # define maxload rem_load_move 980 #endif 981 /* pass busy_cfs_rq argument into 982 * load_balance_[start|next]_fair iterators