Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, tsc, sched: Recompute cyc2ns_offset's during resume from sleep states
sched: Fix rq->clock synchronization when migrating tasks

+44
+2
arch/x86/include/asm/tsc.h
··· 59 extern void check_tsc_sync_target(void); 60 61 extern int notsc_setup(char *); 62 63 #endif /* _ASM_X86_TSC_H */
··· 59 extern void check_tsc_sync_target(void); 60 61 extern int notsc_setup(char *); 62 + extern void save_sched_clock_state(void); 63 + extern void restore_sched_clock_state(void); 64 65 #endif /* _ASM_X86_TSC_H */
+38
arch/x86/kernel/tsc.c
··· 626 local_irq_restore(flags); 627 } 628 629 #ifdef CONFIG_CPU_FREQ 630 631 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
··· 626 local_irq_restore(flags); 627 } 628 629 + static unsigned long long cyc2ns_suspend; 630 + 631 + void save_sched_clock_state(void) 632 + { 633 + if (!sched_clock_stable) 634 + return; 635 + 636 + cyc2ns_suspend = sched_clock(); 637 + } 638 + 639 + /* 640 + * Even on processors with invariant TSC, TSC gets reset in some the 641 + * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to 642 + * arbitrary value (still sync'd across cpu's) during resume from such sleep 643 + * states. To cope up with this, recompute the cyc2ns_offset for each cpu so 644 + * that sched_clock() continues from the point where it was left off during 645 + * suspend. 646 + */ 647 + void restore_sched_clock_state(void) 648 + { 649 + unsigned long long offset; 650 + unsigned long flags; 651 + int cpu; 652 + 653 + if (!sched_clock_stable) 654 + return; 655 + 656 + local_irq_save(flags); 657 + 658 + get_cpu_var(cyc2ns_offset) = 0; 659 + offset = cyc2ns_suspend - sched_clock(); 660 + 661 + for_each_possible_cpu(cpu) 662 + per_cpu(cyc2ns_offset, cpu) = offset; 663 + 664 + local_irq_restore(flags); 665 + } 666 + 667 #ifdef CONFIG_CPU_FREQ 668 669 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
+2
arch/x86/power/cpu.c
··· 113 void save_processor_state(void) 114 { 115 __save_processor_state(&saved_context); 116 } 117 #ifdef CONFIG_X86_32 118 EXPORT_SYMBOL(save_processor_state); ··· 230 void restore_processor_state(void) 231 { 232 __restore_processor_state(&saved_context); 233 } 234 #ifdef CONFIG_X86_32 235 EXPORT_SYMBOL(restore_processor_state);
··· 113 void save_processor_state(void) 114 { 115 __save_processor_state(&saved_context); 116 + save_sched_clock_state(); 117 } 118 #ifdef CONFIG_X86_32 119 EXPORT_SYMBOL(save_processor_state); ··· 229 void restore_processor_state(void) 230 { 231 __restore_processor_state(&saved_context); 232 + restore_sched_clock_state(); 233 } 234 #ifdef CONFIG_X86_32 235 EXPORT_SYMBOL(restore_processor_state);
+2
kernel/sched_fair.c
··· 3752 3753 raw_spin_lock_irqsave(&rq->lock, flags); 3754 3755 if (unlikely(task_cpu(p) != this_cpu)) 3756 __set_task_cpu(p, this_cpu); 3757
··· 3752 3753 raw_spin_lock_irqsave(&rq->lock, flags); 3754 3755 + update_rq_clock(rq); 3756 + 3757 if (unlikely(task_cpu(p) != this_cpu)) 3758 __set_task_cpu(p, this_cpu); 3759