Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched: do not hurt SCHED_BATCH on wakeup
sched: touch softlockup watchdog after idling
sched: sysctl, proc_dointvec_minmax() expects int values for
sched: mark rwsem functions as __sched for wchan/profiling
sched: fix crash on ia64, introduce task_current()

+21 -15
+3 -2
kernel/rwsem.c
··· 6 6 7 7 #include <linux/types.h> 8 8 #include <linux/kernel.h> 9 + #include <linux/sched.h> 9 10 #include <linux/module.h> 10 11 #include <linux/rwsem.h> 11 12 ··· 16 15 /* 17 16 * lock for reading 18 17 */ 19 - void down_read(struct rw_semaphore *sem) 18 + void __sched down_read(struct rw_semaphore *sem) 20 19 { 21 20 might_sleep(); 22 21 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); ··· 43 42 /* 44 43 * lock for writing 45 44 */ 46 - void down_write(struct rw_semaphore *sem) 45 + void __sched down_write(struct rw_semaphore *sem) 47 46 { 48 47 might_sleep(); 49 48 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
+12 -6
kernel/sched.c
··· 508 508 # define finish_arch_switch(prev) do { } while (0) 509 509 #endif 510 510 511 + static inline int task_current(struct rq *rq, struct task_struct *p) 512 + { 513 + return rq->curr == p; 514 + } 515 + 511 516 #ifndef __ARCH_WANT_UNLOCKED_CTXSW 512 517 static inline int task_running(struct rq *rq, struct task_struct *p) 513 518 { 514 - return rq->curr == p; 519 + return task_current(rq, p); 515 520 } 516 521 517 522 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) ··· 545 540 #ifdef CONFIG_SMP 546 541 return p->oncpu; 547 542 #else 548 - return rq->curr == p; 543 + return task_current(rq, p); 549 544 #endif 550 545 } 551 546 ··· 668 663 struct rq *rq = cpu_rq(smp_processor_id()); 669 664 u64 now = sched_clock(); 670 665 666 + touch_softlockup_watchdog(); 671 667 rq->idle_clock += delta_ns; 672 668 /* 673 669 * Override the previous timestamp and ignore all ··· 3340 3334 3341 3335 rq = task_rq_lock(p, &flags); 3342 3336 ns = p->se.sum_exec_runtime; 3343 - if (rq->curr == p) { 3337 + if (task_current(rq, p)) { 3344 3338 update_rq_clock(rq); 3345 3339 delta_exec = rq->clock - p->se.exec_start; 3346 3340 if ((s64)delta_exec > 0) ··· 4027 4021 4028 4022 oldprio = p->prio; 4029 4023 on_rq = p->se.on_rq; 4030 - running = task_running(rq, p); 4024 + running = task_current(rq, p); 4031 4025 if (on_rq) { 4032 4026 dequeue_task(rq, p, 0); 4033 4027 if (running) ··· 4338 4332 } 4339 4333 update_rq_clock(rq); 4340 4334 on_rq = p->se.on_rq; 4341 - running = task_running(rq, p); 4335 + running = task_current(rq, p); 4342 4336 if (on_rq) { 4343 4337 deactivate_task(rq, p, 0); 4344 4338 if (running) ··· 7107 7101 7108 7102 update_rq_clock(rq); 7109 7103 7110 - running = task_running(rq, tsk); 7104 + running = task_current(rq, tsk); 7111 7105 on_rq = tsk->se.on_rq; 7112 7106 7113 7107 if (on_rq) {
+1 -2
kernel/sched_fair.c
··· 511 511 512 512 if (!initial) { 513 513 /* sleeps upto a single latency don't count. */ 514 - if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) && 515 - task_of(se)->policy != SCHED_BATCH) 514 + if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se)) 516 515 vruntime -= sysctl_sched_latency; 517 516 518 517 /* ensure we never gain time by being placed backwards. */
+4 -4
kernel/sysctl.c
··· 225 225 }; 226 226 227 227 #ifdef CONFIG_SCHED_DEBUG 228 - static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */ 229 - static unsigned long max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ 230 - static unsigned long min_wakeup_granularity_ns; /* 0 usecs */ 231 - static unsigned long max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ 228 + static int min_sched_granularity_ns = 100000; /* 100 usecs */ 229 + static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ 230 + static int min_wakeup_granularity_ns; /* 0 usecs */ 231 + static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ 232 232 #endif 233 233 234 234 static struct ctl_table kern_table[] = {
+1 -1
lib/rwsem.c
··· 146 146 /* 147 147 * wait for a lock to be granted 148 148 */ 149 - static struct rw_semaphore * 149 + static struct rw_semaphore __sched * 150 150 rwsem_down_failed_common(struct rw_semaphore *sem, 151 151 struct rwsem_waiter *waiter, signed long adjustment) 152 152 {