Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched: Fix out of scope variable access in sched_slice()
sched: Hide runqueues from direct refer at source code level
sched: Remove unneeded __ref tag
sched, x86: Fix cpufreq + sched_clock() TSC scaling

+18 -9
+5 -1
arch/x86/include/asm/timer.h
··· 45 45 */ 46 46 47 47 DECLARE_PER_CPU(unsigned long, cyc2ns); 48 + DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); 48 49 49 50 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 50 51 51 52 static inline unsigned long long __cycles_2_ns(unsigned long long cyc) 52 53 { 53 - return cyc * per_cpu(cyc2ns, smp_processor_id()) >> CYC2NS_SCALE_FACTOR; 54 + int cpu = smp_processor_id(); 55 + unsigned long long ns = per_cpu(cyc2ns_offset, cpu); 56 + ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR; 57 + return ns; 54 58 } 55 59 56 60 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+6 -2
arch/x86/kernel/tsc.c
··· 590 590 */ 591 591 592 592 DEFINE_PER_CPU(unsigned long, cyc2ns); 593 + DEFINE_PER_CPU(unsigned long long, cyc2ns_offset); 593 594 594 595 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) 595 596 { 596 - unsigned long long tsc_now, ns_now; 597 + unsigned long long tsc_now, ns_now, *offset; 597 598 unsigned long flags, *scale; 598 599 599 600 local_irq_save(flags); 600 601 sched_clock_idle_sleep_event(); 601 602 602 603 scale = &per_cpu(cyc2ns, cpu); 604 + offset = &per_cpu(cyc2ns_offset, cpu); 603 605 604 606 rdtscll(tsc_now); 605 607 ns_now = __cycles_2_ns(tsc_now); 606 608 607 - if (cpu_khz) 609 + if (cpu_khz) { 608 610 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; 611 + *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR); 612 + } 609 613 610 614 sched_clock_idle_wakeup_event(0); 611 615 local_irq_restore(flags);
+1 -1
kernel/sched.c
··· 7822 7822 free_rootdomain(old_rd); 7823 7823 } 7824 7824 7825 - static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) 7825 + static int init_rootdomain(struct root_domain *rd, bool bootmem) 7826 7826 { 7827 7827 gfp_t gfp = GFP_KERNEL; 7828 7828
+1 -1
kernel/sched_cpupri.c
··· 152 152 * 153 153 * Returns: -ENOMEM if memory fails. 154 154 */ 155 - int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) 155 + int cpupri_init(struct cpupri *cp, bool bootmem) 156 156 { 157 157 gfp_t gfp = GFP_KERNEL; 158 158 int i;
+3 -3
kernel/sched_debug.c
··· 162 162 { 163 163 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, 164 164 spread, rq0_min_vruntime, spread0; 165 - struct rq *rq = &per_cpu(runqueues, cpu); 165 + struct rq *rq = cpu_rq(cpu); 166 166 struct sched_entity *last; 167 167 unsigned long flags; 168 168 ··· 191 191 if (last) 192 192 max_vruntime = last->vruntime; 193 193 min_vruntime = cfs_rq->min_vruntime; 194 - rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime; 194 + rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 195 195 spin_unlock_irqrestore(&rq->lock, flags); 196 196 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 197 197 SPLIT_NS(MIN_vruntime)); ··· 248 248 249 249 static void print_cpu(struct seq_file *m, int cpu) 250 250 { 251 - struct rq *rq = &per_cpu(runqueues, cpu); 251 + struct rq *rq = cpu_rq(cpu); 252 252 253 253 #ifdef CONFIG_X86 254 254 {
+2 -1
kernel/sched_fair.c
··· 430 430 431 431 for_each_sched_entity(se) { 432 432 struct load_weight *load; 433 + struct load_weight lw; 433 434 434 435 cfs_rq = cfs_rq_of(se); 435 436 load = &cfs_rq->load; 436 437 437 438 if (unlikely(!se->on_rq)) { 438 - struct load_weight lw = cfs_rq->load; 439 + lw = cfs_rq->load; 439 440 440 441 update_load_add(&lw, se->load.weight); 441 442 load = &lw;