Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Thomas Gleixner:
"A single fix for a cputime accounting regression which got introduced
in the 4.11 cycle"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/cputime: Fix ksoftirqd cputime accounting regression

Changed files
+23 -13
kernel
+16 -11
kernel/sched/cputime.c
··· 34 34 sched_clock_irqtime = 0; 35 35 } 36 36 37 + static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, 38 + enum cpu_usage_stat idx) 39 + { 40 + u64 *cpustat = kcpustat_this_cpu->cpustat; 41 + 42 + u64_stats_update_begin(&irqtime->sync); 43 + cpustat[idx] += delta; 44 + irqtime->total += delta; 45 + irqtime->tick_delta += delta; 46 + u64_stats_update_end(&irqtime->sync); 47 + } 48 + 37 49 /* 38 50 * Called before incrementing preempt_count on {soft,}irq_enter 39 51 * and before decrementing preempt_count on {soft,}irq_exit. ··· 53 41 void irqtime_account_irq(struct task_struct *curr) 54 42 { 55 43 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); 56 - u64 *cpustat = kcpustat_this_cpu->cpustat; 57 44 s64 delta; 58 45 int cpu; 59 46 ··· 63 52 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; 64 53 irqtime->irq_start_time += delta; 65 54 66 - u64_stats_update_begin(&irqtime->sync); 67 55 /* 68 56 * We do not account for softirq time from ksoftirqd here. 69 57 * We want to continue accounting softirq time to ksoftirqd thread 70 58 * in that case, so as not to confuse scheduler with a special task 71 59 * that do not consume any time, but still wants to run. 72 60 */ 73 - if (hardirq_count()) { 74 - cpustat[CPUTIME_IRQ] += delta; 75 - irqtime->tick_delta += delta; 76 - } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) { 77 - cpustat[CPUTIME_SOFTIRQ] += delta; 78 - irqtime->tick_delta += delta; 79 - } 80 - 81 - u64_stats_update_end(&irqtime->sync); 61 + if (hardirq_count()) 62 + irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); 63 + else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) 64 + irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); 82 65 } 83 66 EXPORT_SYMBOL_GPL(irqtime_account_irq); 84 67
+7 -2
kernel/sched/sched.h
··· 1869 1869 1870 1870 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1871 1871 struct irqtime { 1872 + u64 total; 1872 1873 u64 tick_delta; 1873 1874 u64 irq_start_time; 1874 1875 struct u64_stats_sync sync; ··· 1877 1876 1878 1877 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 1879 1878 1879 + /* 1880 + * Returns the irqtime minus the softirq time computed by ksoftirqd. 1881 + * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 1882 + * and never move forward. 1883 + */ 1880 1884 static inline u64 irq_time_read(int cpu) 1881 1885 { 1882 1886 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 1883 - u64 *cpustat = kcpustat_cpu(cpu).cpustat; 1884 1887 unsigned int seq; 1885 1888 u64 total; 1886 1889 1887 1890 do { 1888 1891 seq = __u64_stats_fetch_begin(&irqtime->sync); 1889 - total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ]; 1892 + total = irqtime->total; 1890 1893 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 1891 1894 1892 1895 return total;