Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Thomas Gleixner:
"A single fix for a cputime accounting regression which got introduced
in the 4.11 cycle"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/cputime: Fix ksoftirqd cputime accounting regression

+23 -13
+16 -11
kernel/sched/cputime.c
··· 34 sched_clock_irqtime = 0; 35 } 36 37 /* 38 * Called before incrementing preempt_count on {soft,}irq_enter 39 * and before decrementing preempt_count on {soft,}irq_exit. ··· 53 void irqtime_account_irq(struct task_struct *curr) 54 { 55 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); 56 - u64 *cpustat = kcpustat_this_cpu->cpustat; 57 s64 delta; 58 int cpu; 59 ··· 63 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; 64 irqtime->irq_start_time += delta; 65 66 - u64_stats_update_begin(&irqtime->sync); 67 /* 68 * We do not account for softirq time from ksoftirqd here. 69 * We want to continue accounting softirq time to ksoftirqd thread 70 * in that case, so as not to confuse scheduler with a special task 71 * that do not consume any time, but still wants to run. 72 */ 73 - if (hardirq_count()) { 74 - cpustat[CPUTIME_IRQ] += delta; 75 - irqtime->tick_delta += delta; 76 - } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) { 77 - cpustat[CPUTIME_SOFTIRQ] += delta; 78 - irqtime->tick_delta += delta; 79 - } 80 - 81 - u64_stats_update_end(&irqtime->sync); 82 } 83 EXPORT_SYMBOL_GPL(irqtime_account_irq); 84
··· 34 sched_clock_irqtime = 0; 35 } 36 37 + static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, 38 + enum cpu_usage_stat idx) 39 + { 40 + u64 *cpustat = kcpustat_this_cpu->cpustat; 41 + 42 + u64_stats_update_begin(&irqtime->sync); 43 + cpustat[idx] += delta; 44 + irqtime->total += delta; 45 + irqtime->tick_delta += delta; 46 + u64_stats_update_end(&irqtime->sync); 47 + } 48 + 49 /* 50 * Called before incrementing preempt_count on {soft,}irq_enter 51 * and before decrementing preempt_count on {soft,}irq_exit. ··· 41 void irqtime_account_irq(struct task_struct *curr) 42 { 43 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); 44 s64 delta; 45 int cpu; 46 ··· 52 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; 53 irqtime->irq_start_time += delta; 54 55 /* 56 * We do not account for softirq time from ksoftirqd here. 57 * We want to continue accounting softirq time to ksoftirqd thread 58 * in that case, so as not to confuse scheduler with a special task 59 * that do not consume any time, but still wants to run. 60 */ 61 + if (hardirq_count()) 62 + irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); 63 + else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) 64 + irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); 65 } 66 EXPORT_SYMBOL_GPL(irqtime_account_irq); 67
+7 -2
kernel/sched/sched.h
··· 1869 1870 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1871 struct irqtime { 1872 u64 tick_delta; 1873 u64 irq_start_time; 1874 struct u64_stats_sync sync; ··· 1877 1878 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 1879 1880 static inline u64 irq_time_read(int cpu) 1881 { 1882 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 1883 - u64 *cpustat = kcpustat_cpu(cpu).cpustat; 1884 unsigned int seq; 1885 u64 total; 1886 1887 do { 1888 seq = __u64_stats_fetch_begin(&irqtime->sync); 1889 - total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ]; 1890 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 1891 1892 return total;
··· 1869 1870 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1871 struct irqtime { 1872 + u64 total; 1873 u64 tick_delta; 1874 u64 irq_start_time; 1875 struct u64_stats_sync sync; ··· 1876 1877 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 1878 1879 + /* 1880 + * Returns the irqtime minus the softirq time computed by ksoftirqd. 1881 + * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 1882 + * and never move forward. 1883 + */ 1884 static inline u64 irq_time_read(int cpu) 1885 { 1886 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 1887 unsigned int seq; 1888 u64 total; 1889 1890 do { 1891 seq = __u64_stats_fetch_begin(&irqtime->sync); 1892 + total = irqtime->total; 1893 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 1894 1895 return total;