Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched debug: dont print kernel address in /proc/sched_debug
sched: fix typo in the FAIR_GROUP_SCHED branch
sched: improve rq-clock overflow logic

+17 -7
+13 -2
kernel/sched.c
··· 263 263 264 264 unsigned int clock_warps, clock_overflows; 265 265 unsigned int clock_unstable_events; 266 + u64 tick_timestamp; 266 267 267 268 atomic_t nr_iowait; 268 269 ··· 342 341 /* 343 342 * Catch too large forward jumps too: 344 343 */ 345 - if (unlikely(delta > 2*TICK_NSEC)) { 346 - clock++; 344 + if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) { 345 + if (clock < rq->tick_timestamp + TICK_NSEC) 346 + clock = rq->tick_timestamp + TICK_NSEC; 347 + else 348 + clock++; 347 349 rq->clock_overflows++; 348 350 } else { 349 351 if (unlikely(delta > rq->clock_max_delta)) ··· 3312 3308 int cpu = smp_processor_id(); 3313 3309 struct rq *rq = cpu_rq(cpu); 3314 3310 struct task_struct *curr = rq->curr; 3311 + u64 next_tick = rq->tick_timestamp + TICK_NSEC; 3315 3312 3316 3313 spin_lock(&rq->lock); 3317 3314 __update_rq_clock(rq); 3315 + /* 3316 + * Let rq->clock advance by at least TICK_NSEC: 3317 + */ 3318 + if (unlikely(rq->clock < next_tick)) 3319 + rq->clock = next_tick; 3320 + rq->tick_timestamp = rq->clock; 3318 3321 update_cpu_load(rq); 3319 3322 if (curr != rq->idle) /* FIXME: needed? */ 3320 3323 curr->sched_class->task_tick(rq, curr);
+1 -1
kernel/sched_debug.c
··· 108 108 109 109 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 110 110 { 111 - SEQ_printf(m, "\ncfs_rq %p\n", cfs_rq); 111 + SEQ_printf(m, "\ncfs_rq\n"); 112 112 113 113 #define P(x) \ 114 114 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x))
+3 -4
kernel/sched_fair.c
··· 959 959 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { 960 960 #ifdef CONFIG_FAIR_GROUP_SCHED 961 961 struct cfs_rq *this_cfs_rq; 962 - long imbalances; 962 + long imbalance; 963 963 unsigned long maxload; 964 964 965 965 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); 966 966 967 - imbalance = busy_cfs_rq->load.weight - 968 - this_cfs_rq->load.weight; 967 + imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight; 969 968 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */ 970 969 if (imbalance <= 0) 971 970 continue; ··· 975 976 976 977 *this_best_prio = cfs_rq_best_prio(this_cfs_rq); 977 978 #else 978 - #define maxload rem_load_move 979 + # define maxload rem_load_move 979 980 #endif 980 981 /* pass busy_cfs_rq argument into 981 982 * load_balance_[start|next]_fair iterators