sched: debug: fix cfs_rq->wait_runtime accounting

the cfs_rq->wait_runtime debug/statistics counter was not maintained
properly - fix this.

this also removes some code:

text data bss dec hex filename
13420 228 1204 14852 3a04 sched.o.before
13404 228 1204 14836 39f4 sched.o.after

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>

+5 -6
-1
kernel/sched.c
··· 858 859 static void set_load_weight(struct task_struct *p) 860 { 861 - task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime; 862 p->se.wait_runtime = 0; 863 864 if (task_has_rt_policy(p)) {
··· 858 859 static void set_load_weight(struct task_struct *p) 860 { 861 p->se.wait_runtime = 0; 862 863 if (task_has_rt_policy(p)) {
+5 -5
kernel/sched_fair.c
··· 194 update_load_add(&cfs_rq->load, se->load.weight); 195 cfs_rq->nr_running++; 196 se->on_rq = 1; 197 } 198 199 static inline void ··· 207 update_load_sub(&cfs_rq->load, se->load.weight); 208 cfs_rq->nr_running--; 209 se->on_rq = 0; 210 } 211 212 static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) ··· 578 579 prev_runtime = se->wait_runtime; 580 __add_wait_runtime(cfs_rq, se, delta_fair); 581 - schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); 582 delta_fair = se->wait_runtime - prev_runtime; 583 584 /* ··· 665 if (tsk->state & TASK_UNINTERRUPTIBLE) 666 se->block_start = rq_of(cfs_rq)->clock; 667 } 668 - cfs_rq->wait_runtime -= se->wait_runtime; 669 #endif 670 } 671 __dequeue_entity(cfs_rq, se); ··· 1123 * The statistical average of wait_runtime is about 1124 * -granularity/2, so initialize the task with that: 1125 */ 1126 - if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) { 1127 se->wait_runtime = -(sched_granularity(cfs_rq) / 2); 1128 - schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); 1129 - } 1130 1131 __enqueue_entity(cfs_rq, se); 1132 }
··· 194 update_load_add(&cfs_rq->load, se->load.weight); 195 cfs_rq->nr_running++; 196 se->on_rq = 1; 197 + 198 + schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); 199 } 200 201 static inline void ··· 205 update_load_sub(&cfs_rq->load, se->load.weight); 206 cfs_rq->nr_running--; 207 se->on_rq = 0; 208 + 209 + schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime); 210 } 211 212 static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) ··· 574 575 prev_runtime = se->wait_runtime; 576 __add_wait_runtime(cfs_rq, se, delta_fair); 577 delta_fair = se->wait_runtime - prev_runtime; 578 579 /* ··· 662 if (tsk->state & TASK_UNINTERRUPTIBLE) 663 se->block_start = rq_of(cfs_rq)->clock; 664 } 665 #endif 666 } 667 __dequeue_entity(cfs_rq, se); ··· 1121 * The statistical average of wait_runtime is about 1122 * -granularity/2, so initialize the task with that: 1123 */ 1124 + if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) 1125 se->wait_runtime = -(sched_granularity(cfs_rq) / 2); 1126 1127 __enqueue_entity(cfs_rq, se); 1128 }