Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched, doc: Update sched-design-CFS.txt
sched: Remove unused 'rq' variable and cpu_rq() call from alloc_fair_sched_group()
sched.h: Fix a typo ("its")
sched: Fix yield_to kernel-doc

+4 -14
+1 -6
Documentation/scheduler/sched-design-CFS.txt
··· 164 164 It puts the scheduling entity (task) into the red-black tree and 165 165 increments the nr_running variable. 166 166 167 - - dequeue_tree(...) 167 + - dequeue_task(...) 168 168 169 169 When a task is no longer runnable, this function is called to keep the 170 170 corresponding scheduling entity out of the red-black tree. It decrements ··· 195 195 This function is mostly called from time tick functions; it might lead to 196 196 process switch. This drives the running preemption. 197 197 198 - - task_new(...) 199 - 200 - The core scheduler gives the scheduling module an opportunity to manage new 201 - task startup. The CFS scheduling module uses it for group scheduling, while 202 - the scheduling module for a real-time task does not use it. 203 198 204 199 205 200
+1 -1
include/linux/sched.h
··· 517 517 struct autogroup; 518 518 519 519 /* 520 - * NOTE! "signal_struct" does not have it's own 520 + * NOTE! "signal_struct" does not have its own 521 521 * locking, because a shared signal_struct always 522 522 * implies a shared sighand_struct, so locking 523 523 * sighand_struct is always a proper superset of
+2 -3
kernel/sched.c
··· 5473 5473 * yield_to - yield the current processor to another thread in 5474 5474 * your thread group, or accelerate that thread toward the 5475 5475 * processor it's on. 5476 + * @p: target task 5477 + * @preempt: whether task preemption is allowed or not 5476 5478 * 5477 5479 * It's the caller's job to ensure that the target task struct 5478 5480 * can't go away on us before we can do any checks. ··· 8451 8449 { 8452 8450 struct cfs_rq *cfs_rq; 8453 8451 struct sched_entity *se; 8454 - struct rq *rq; 8455 8452 int i; 8456 8453 8457 8454 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); ··· 8463 8462 tg->shares = NICE_0_LOAD; 8464 8463 8465 8464 for_each_possible_cpu(i) { 8466 - rq = cpu_rq(i); 8467 - 8468 8465 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 8469 8466 GFP_KERNEL, cpu_to_node(i)); 8470 8467 if (!cfs_rq)
-2
kernel/sched_idletask.c
··· 94 94 95 95 .prio_changed = prio_changed_idle, 96 96 .switched_to = switched_to_idle, 97 - 98 - /* no .task_new for idle tasks */ 99 97 };
-2
kernel/sched_stoptask.c
··· 102 102 103 103 .prio_changed = prio_changed_stop, 104 104 .switched_to = switched_to_stop, 105 - 106 - /* no .task_new for stop tasks */ 107 105 };