Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Ingo writes:
"scheduler fixes:

Two fixes: a CFS-throttling bug fix, and an interactivity fix."

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/fair: Fix the min_vruntime update logic in dequeue_entity()
sched/fair: Fix throttle_list starvation with low CFS quota

+22 -4
+20 -4
kernel/sched/fair.c
··· 4001 * put back on, and if we advance min_vruntime, we'll be placed back 4002 * further than we started -- ie. we'll be penalized. 4003 */ 4004 - if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) 4005 update_min_vruntime(cfs_rq); 4006 } 4007 ··· 4476 4477 /* 4478 * Add to the _head_ of the list, so that an already-started 4479 - * distribute_cfs_runtime will not see us 4480 */ 4481 - list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 4482 4483 /* 4484 * If we're the first throttled task, make sure the bandwidth ··· 4626 * in us over-using our runtime if it is all used during this loop, but 4627 * only by limited amounts in that extreme case. 4628 */ 4629 - while (throttled && cfs_b->runtime > 0) { 4630 runtime = cfs_b->runtime; 4631 raw_spin_unlock(&cfs_b->lock); 4632 /* we can't nest cfs_b->lock while distributing bandwidth */ 4633 runtime = distribute_cfs_runtime(cfs_b, runtime, 4634 runtime_expires); 4635 raw_spin_lock(&cfs_b->lock); 4636 4637 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 4638 4639 cfs_b->runtime -= min(runtime, cfs_b->runtime); ··· 4746 4747 /* confirm we're still not at a refresh boundary */ 4748 raw_spin_lock(&cfs_b->lock); 4749 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { 4750 raw_spin_unlock(&cfs_b->lock); 4751 return; ··· 4760 runtime = cfs_b->runtime; 4761 4762 expires = cfs_b->runtime_expires; 4763 raw_spin_unlock(&cfs_b->lock); 4764 4765 if (!runtime) ··· 4773 raw_spin_lock(&cfs_b->lock); 4774 if (expires == cfs_b->runtime_expires) 4775 cfs_b->runtime -= min(runtime, cfs_b->runtime); 4776 raw_spin_unlock(&cfs_b->lock); 4777 } 4778 ··· 4882 cfs_b->period_timer.function = sched_cfs_period_timer; 4883 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4884 cfs_b->slack_timer.function = sched_cfs_slack_timer; 4885 } 4886 4887 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
··· 4001 * put back on, and if we advance min_vruntime, we'll be placed back 4002 * further than we started -- ie. we'll be penalized. 4003 */ 4004 + if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) 4005 update_min_vruntime(cfs_rq); 4006 } 4007 ··· 4476 4477 /* 4478 * Add to the _head_ of the list, so that an already-started 4479 + * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is 4480 + * not running add to the tail so that later runqueues don't get starved. 4481 */ 4482 + if (cfs_b->distribute_running) 4483 + list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 4484 + else 4485 + list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 4486 4487 /* 4488 * If we're the first throttled task, make sure the bandwidth ··· 4622 * in us over-using our runtime if it is all used during this loop, but 4623 * only by limited amounts in that extreme case. 4624 */ 4625 + while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { 4626 runtime = cfs_b->runtime; 4627 + cfs_b->distribute_running = 1; 4628 raw_spin_unlock(&cfs_b->lock); 4629 /* we can't nest cfs_b->lock while distributing bandwidth */ 4630 runtime = distribute_cfs_runtime(cfs_b, runtime, 4631 runtime_expires); 4632 raw_spin_lock(&cfs_b->lock); 4633 4634 + cfs_b->distribute_running = 0; 4635 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 4636 4637 cfs_b->runtime -= min(runtime, cfs_b->runtime); ··· 4740 4741 /* confirm we're still not at a refresh boundary */ 4742 raw_spin_lock(&cfs_b->lock); 4743 + if (cfs_b->distribute_running) { 4744 + raw_spin_unlock(&cfs_b->lock); 4745 + return; 4746 + } 4747 + 4748 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { 4749 raw_spin_unlock(&cfs_b->lock); 4750 return; ··· 4749 runtime = cfs_b->runtime; 4750 4751 expires = cfs_b->runtime_expires; 4752 + if (runtime) 4753 + cfs_b->distribute_running = 1; 4754 + 4755 raw_spin_unlock(&cfs_b->lock); 4756 4757 if (!runtime) ··· 4759 raw_spin_lock(&cfs_b->lock); 4760 if (expires == cfs_b->runtime_expires) 4761 cfs_b->runtime -= min(runtime, cfs_b->runtime); 4762 + cfs_b->distribute_running = 0; 4763 raw_spin_unlock(&cfs_b->lock); 4764 } 4765 ··· 4867 cfs_b->period_timer.function = sched_cfs_period_timer; 4868 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4869 cfs_b->slack_timer.function = sched_cfs_slack_timer; 4870 + cfs_b->distribute_running = 0; 4871 } 4872 4873 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+2
kernel/sched/sched.h
··· 346 int nr_periods; 347 int nr_throttled; 348 u64 throttled_time; 349 #endif 350 }; 351
··· 346 int nr_periods; 347 int nr_throttled; 348 u64 throttled_time; 349 + 350 + bool distribute_running; 351 #endif 352 }; 353