Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
"Misc fixes:

- a deadline scheduler related bug fix which triggered a kernel
warning

- an RT_RUNTIME_SHARE fix

- a stop_machine preemption fix

- a potential NULL dereference fix in sched_domain_debug_one()"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/rt: Restore rt_runtime after disabling RT_RUNTIME_SHARE
sched/deadline: Update rq_clock of later_rq when pushing a task
stop_machine: Disable preemption after queueing stopper threads
sched/topology: Check variable group before dereferencing it

+19 -3
+7 -1
kernel/sched/deadline.c
··· 2090 2090 sub_rq_bw(&next_task->dl, &rq->dl); 2091 2091 set_task_cpu(next_task, later_rq->cpu); 2092 2092 add_rq_bw(&next_task->dl, &later_rq->dl); 2093 + 2094 + /* 2095 + * Update the later_rq clock here, because the clock is used 2096 + * by the cpufreq_update_util() inside __add_running_bw(). 2097 + */ 2098 + update_rq_clock(later_rq); 2093 2099 add_running_bw(&next_task->dl, &later_rq->dl); 2094 - activate_task(later_rq, next_task, 0); 2100 + activate_task(later_rq, next_task, ENQUEUE_NOCLOCK); 2095 2101 ret = 1; 2096 2102 2097 2103 resched_curr(later_rq);
+2
kernel/sched/rt.c
··· 836 836 * can be time-consuming. Try to avoid it when possible. 837 837 */ 838 838 raw_spin_lock(&rt_rq->rt_runtime_lock); 839 + if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) 840 + rt_rq->rt_runtime = rt_b->rt_runtime; 839 841 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; 840 842 raw_spin_unlock(&rt_rq->rt_runtime_lock); 841 843 if (skip)
+1 -1
kernel/sched/topology.c
··· 47 47 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 48 48 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 49 49 } 50 - if (!cpumask_test_cpu(cpu, sched_group_span(group))) { 50 + if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { 51 51 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 52 52 } 53 53
+9 -1
kernel/stop_machine.c
··· 260 260 err = 0; 261 261 __cpu_stop_queue_work(stopper1, work1, &wakeq); 262 262 __cpu_stop_queue_work(stopper2, work2, &wakeq); 263 + /* 264 + * The waking up of stopper threads has to happen 265 + * in the same scheduling context as the queueing. 266 + * Otherwise, there is a possibility of one of the 267 + * above stoppers being woken up by another CPU, 268 + * and preempting us. This will cause us to n ot 269 + * wake up the other stopper forever. 270 + */ 271 + preempt_disable(); 263 272 unlock: 264 273 raw_spin_unlock(&stopper2->lock); 265 274 raw_spin_unlock_irq(&stopper1->lock); ··· 280 271 } 281 272 282 273 if (!err) { 283 - preempt_disable(); 284 274 wake_up_q(&wakeq); 285 275 preempt_enable(); 286 276 }