+20
-4
kernel/sched/fair.c
+20
-4
kernel/sched/fair.c
···
4001
4001
* put back on, and if we advance min_vruntime, we'll be placed back
4002
4002
* further than we started -- ie. we'll be penalized.
4003
4003
*/
4004
-
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
4004
+
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
4005
4005
update_min_vruntime(cfs_rq);
4006
4006
}
4007
4007
···
4476
4476
4477
4477
/*
4478
4478
* Add to the _head_ of the list, so that an already-started
4479
-
* distribute_cfs_runtime will not see us
4479
+
* distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
4480
+
* not running add to the tail so that later runqueues don't get starved.
4480
4481
*/
4481
-
list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
4482
+
if (cfs_b->distribute_running)
4483
+
list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
4484
+
else
4485
+
list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
4482
4486
4483
4487
/*
4484
4488
* If we're the first throttled task, make sure the bandwidth
···
4626
4622
* in us over-using our runtime if it is all used during this loop, but
4627
4623
* only by limited amounts in that extreme case.
4628
4624
*/
4629
-
while (throttled && cfs_b->runtime > 0) {
4625
+
while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
4630
4626
runtime = cfs_b->runtime;
4627
+
cfs_b->distribute_running = 1;
4631
4628
raw_spin_unlock(&cfs_b->lock);
4632
4629
/* we can't nest cfs_b->lock while distributing bandwidth */
4633
4630
runtime = distribute_cfs_runtime(cfs_b, runtime,
4634
4631
runtime_expires);
4635
4632
raw_spin_lock(&cfs_b->lock);
4636
4633
4634
+
cfs_b->distribute_running = 0;
4637
4635
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4638
4636
4639
4637
cfs_b->runtime -= min(runtime, cfs_b->runtime);
···
4746
4740
4747
4741
/* confirm we're still not at a refresh boundary */
4748
4742
raw_spin_lock(&cfs_b->lock);
4743
+
if (cfs_b->distribute_running) {
4744
+
raw_spin_unlock(&cfs_b->lock);
4745
+
return;
4746
+
}
4747
+
4749
4748
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4750
4749
raw_spin_unlock(&cfs_b->lock);
4751
4750
return;
···
4760
4749
runtime = cfs_b->runtime;
4761
4750
4762
4751
expires = cfs_b->runtime_expires;
4752
+
if (runtime)
4753
+
cfs_b->distribute_running = 1;
4754
+
4763
4755
raw_spin_unlock(&cfs_b->lock);
4764
4756
4765
4757
if (!runtime)
···
4773
4759
raw_spin_lock(&cfs_b->lock);
4774
4760
if (expires == cfs_b->runtime_expires)
4775
4761
cfs_b->runtime -= min(runtime, cfs_b->runtime);
4762
+
cfs_b->distribute_running = 0;
4776
4763
raw_spin_unlock(&cfs_b->lock);
4777
4764
}
4778
4765
···
4882
4867
cfs_b->period_timer.function = sched_cfs_period_timer;
4883
4868
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4884
4869
cfs_b->slack_timer.function = sched_cfs_slack_timer;
4870
+
cfs_b->distribute_running = 0;
4885
4871
}
4886
4872
4887
4873
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)