Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched: do not hurt SCHED_BATCH on wakeup
sched: touch softlockup watchdog after idling
sched: sysctl, proc_dointvec_minmax() expects int values for
sched: mark rwsem functions as __sched for wchan/profiling
sched: fix crash on ia64, introduce task_current()

+21 -15
+3 -2
kernel/rwsem.c
··· 6 7 #include <linux/types.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/rwsem.h> 11 ··· 16 /* 17 * lock for reading 18 */ 19 - void down_read(struct rw_semaphore *sem) 20 { 21 might_sleep(); 22 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); ··· 43 /* 44 * lock for writing 45 */ 46 - void down_write(struct rw_semaphore *sem) 47 { 48 might_sleep(); 49 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
··· 6 7 #include <linux/types.h> 8 #include <linux/kernel.h> 9 + #include <linux/sched.h> 10 #include <linux/module.h> 11 #include <linux/rwsem.h> 12 ··· 15 /* 16 * lock for reading 17 */ 18 + void __sched down_read(struct rw_semaphore *sem) 19 { 20 might_sleep(); 21 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); ··· 42 /* 43 * lock for writing 44 */ 45 + void __sched down_write(struct rw_semaphore *sem) 46 { 47 might_sleep(); 48 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
+12 -6
kernel/sched.c
··· 508 # define finish_arch_switch(prev) do { } while (0) 509 #endif 510 511 #ifndef __ARCH_WANT_UNLOCKED_CTXSW 512 static inline int task_running(struct rq *rq, struct task_struct *p) 513 { 514 - return rq->curr == p; 515 } 516 517 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) ··· 545 #ifdef CONFIG_SMP 546 return p->oncpu; 547 #else 548 - return rq->curr == p; 549 #endif 550 } 551 ··· 668 struct rq *rq = cpu_rq(smp_processor_id()); 669 u64 now = sched_clock(); 670 671 rq->idle_clock += delta_ns; 672 /* 673 * Override the previous timestamp and ignore all ··· 3340 3341 rq = task_rq_lock(p, &flags); 3342 ns = p->se.sum_exec_runtime; 3343 - if (rq->curr == p) { 3344 update_rq_clock(rq); 3345 delta_exec = rq->clock - p->se.exec_start; 3346 if ((s64)delta_exec > 0) ··· 4027 4028 oldprio = p->prio; 4029 on_rq = p->se.on_rq; 4030 - running = task_running(rq, p); 4031 if (on_rq) { 4032 dequeue_task(rq, p, 0); 4033 if (running) ··· 4338 } 4339 update_rq_clock(rq); 4340 on_rq = p->se.on_rq; 4341 - running = task_running(rq, p); 4342 if (on_rq) { 4343 deactivate_task(rq, p, 0); 4344 if (running) ··· 7107 7108 update_rq_clock(rq); 7109 7110 - running = task_running(rq, tsk); 7111 on_rq = tsk->se.on_rq; 7112 7113 if (on_rq) {
··· 508 # define finish_arch_switch(prev) do { } while (0) 509 #endif 510 511 + static inline int task_current(struct rq *rq, struct task_struct *p) 512 + { 513 + return rq->curr == p; 514 + } 515 + 516 #ifndef __ARCH_WANT_UNLOCKED_CTXSW 517 static inline int task_running(struct rq *rq, struct task_struct *p) 518 { 519 + return task_current(rq, p); 520 } 521 522 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) ··· 540 #ifdef CONFIG_SMP 541 return p->oncpu; 542 #else 543 + return task_current(rq, p); 544 #endif 545 } 546 ··· 663 struct rq *rq = cpu_rq(smp_processor_id()); 664 u64 now = sched_clock(); 665 666 + touch_softlockup_watchdog(); 667 rq->idle_clock += delta_ns; 668 /* 669 * Override the previous timestamp and ignore all ··· 3334 3335 rq = task_rq_lock(p, &flags); 3336 ns = p->se.sum_exec_runtime; 3337 + if (task_current(rq, p)) { 3338 update_rq_clock(rq); 3339 delta_exec = rq->clock - p->se.exec_start; 3340 if ((s64)delta_exec > 0) ··· 4021 4022 oldprio = p->prio; 4023 on_rq = p->se.on_rq; 4024 + running = task_current(rq, p); 4025 if (on_rq) { 4026 dequeue_task(rq, p, 0); 4027 if (running) ··· 4332 } 4333 update_rq_clock(rq); 4334 on_rq = p->se.on_rq; 4335 + running = task_current(rq, p); 4336 if (on_rq) { 4337 deactivate_task(rq, p, 0); 4338 if (running) ··· 7101 7102 update_rq_clock(rq); 7103 7104 + running = task_current(rq, tsk); 7105 on_rq = tsk->se.on_rq; 7106 7107 if (on_rq) {
+1 -2
kernel/sched_fair.c
··· 511 512 if (!initial) { 513 /* sleeps upto a single latency don't count. */ 514 - if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) && 515 - task_of(se)->policy != SCHED_BATCH) 516 vruntime -= sysctl_sched_latency; 517 518 /* ensure we never gain time by being placed backwards. */
··· 511 512 if (!initial) { 513 /* sleeps upto a single latency don't count. */ 514 + if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se)) 515 vruntime -= sysctl_sched_latency; 516 517 /* ensure we never gain time by being placed backwards. */
+4 -4
kernel/sysctl.c
··· 225 }; 226 227 #ifdef CONFIG_SCHED_DEBUG 228 - static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */ 229 - static unsigned long max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ 230 - static unsigned long min_wakeup_granularity_ns; /* 0 usecs */ 231 - static unsigned long max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ 232 #endif 233 234 static struct ctl_table kern_table[] = {
··· 225 }; 226 227 #ifdef CONFIG_SCHED_DEBUG 228 + static int min_sched_granularity_ns = 100000; /* 100 usecs */ 229 + static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ 230 + static int min_wakeup_granularity_ns; /* 0 usecs */ 231 + static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ 232 #endif 233 234 static struct ctl_table kern_table[] = {
+1 -1
lib/rwsem.c
··· 146 /* 147 * wait for a lock to be granted 148 */ 149 - static struct rw_semaphore * 150 rwsem_down_failed_common(struct rw_semaphore *sem, 151 struct rwsem_waiter *waiter, signed long adjustment) 152 {
··· 146 /* 147 * wait for a lock to be granted 148 */ 149 + static struct rw_semaphore __sched * 150 rwsem_down_failed_common(struct rw_semaphore *sem, 151 struct rwsem_waiter *waiter, signed long adjustment) 152 {