Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched: loadavg: consolidate LOAD_INT, LOAD_FRAC, CALC_LOAD

There are several definitions of those functions/macros in places that
mess with fixed-point load averages. Provide an official version.

[akpm@linux-foundation.org: fix missed conversion in block/blk-iolatency.c]
Link: http://lkml.kernel.org/r/20180828172258.3185-5-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Suren Baghdasaryan <surenb@google.com>
Tested-by: Daniel Drake <drake@endlessm.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Johannes Weiner <jweiner@fb.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Enderborg <peter.enderborg@sony.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Johannes Weiner and committed by
Linus Torvalds
8508cf3f b1d29ba8

+27 -46
+1 -1
arch/powerpc/platforms/cell/cpufreq_spudemand.c
··· 49 49 cpu = info->policy->cpu; 50 50 busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus); 51 51 52 - CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1); 52 + info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1); 53 53 pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n", 54 54 cpu, busy_spus, info->busy_spus); 55 55
+3 -6
arch/powerpc/platforms/cell/spufs/sched.c
··· 987 987 unsigned long active_tasks; /* fixed-point */ 988 988 989 989 active_tasks = count_active_contexts() * FIXED_1; 990 - CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks); 991 - CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks); 992 - CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks); 990 + spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks); 991 + spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks); 992 + spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks); 993 993 } 994 994 995 995 static void spusched_wake(struct timer_list *unused) ··· 1070 1070 atomic_inc(&cbe_spu_info[node].busy_spus); 1071 1071 } 1072 1072 } 1073 - 1074 - #define LOAD_INT(x) ((x) >> FSHIFT) 1075 - #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 1076 1073 1077 1074 static int show_spu_loadavg(struct seq_file *s, void *private) 1078 1075 {
-4
arch/s390/appldata/appldata_os.c
··· 25 25 26 26 #include "appldata.h" 27 27 28 - 29 - #define LOAD_INT(x) ((x) >> FSHIFT) 30 - #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 31 - 32 28 /* 33 29 * OS data 34 30 *
+5 -3
block/blk-iolatency.c
··· 153 153 #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC 154 154 /* 155 155 * These are the constants used to fake the fixed-point moving average 156 - * calculation just like load average. The call to CALC_LOAD folds 156 + * calculation just like load average. The call to calc_load() folds 157 157 * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling 158 158 * window size is bucketed to try to approximately calculate average 159 159 * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows ··· 248 248 return; 249 249 250 250 /* 251 - * CALC_LOAD takes in a number stored in fixed point representation. 251 + * calc_load() takes in a number stored in fixed point representation. 252 252 * Because we are using this for IO time in ns, the values stored 253 253 * are significantly larger than the FIXED_1 denominator (2048). 254 254 * Therefore, rounding errors in the calculation are negligible and ··· 257 257 exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1, 258 258 div64_u64(iolat->cur_win_nsec, 259 259 BLKIOLATENCY_EXP_BUCKET_SIZE)); 260 - CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat->rqs.mean); 260 + iolat->lat_avg = calc_load(iolat->lat_avg, 261 + iolatency_exp_factors[exp_idx], 262 + stat->rqs.mean); 261 263 } 262 264 263 265 static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
-4
drivers/cpuidle/governors/menu.c
··· 130 130 int interval_ptr; 131 131 }; 132 132 133 - 134 - #define LOAD_INT(x) ((x) >> FSHIFT) 135 - #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 136 - 137 133 static inline int get_loadavg(unsigned long load) 138 134 { 139 135 return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
-3
fs/proc/loadavg.c
··· 10 10 #include <linux/seqlock.h> 11 11 #include <linux/time.h> 12 12 13 - #define LOAD_INT(x) ((x) >> FSHIFT) 14 - #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 15 - 16 13 static int loadavg_proc_show(struct seq_file *m, void *v) 17 14 { 18 15 unsigned long avnrun[3];
+17 -4
include/linux/sched/loadavg.h
··· 22 22 #define EXP_5 2014 /* 1/exp(5sec/5min) */ 23 23 #define EXP_15 2037 /* 1/exp(5sec/15min) */ 24 24 25 - #define CALC_LOAD(load,exp,n) \ 26 - load *= exp; \ 27 - load += n*(FIXED_1-exp); \ 28 - load >>= FSHIFT; 25 + /* 26 + * a1 = a0 * e + a * (1 - e) 27 + */ 28 + static inline unsigned long 29 + calc_load(unsigned long load, unsigned long exp, unsigned long active) 30 + { 31 + unsigned long newload; 32 + 33 + newload = load * exp + active * (FIXED_1 - exp); 34 + if (active >= load) 35 + newload += FIXED_1-1; 36 + 37 + return newload / FIXED_1; 38 + } 39 + 40 + #define LOAD_INT(x) ((x) >> FSHIFT) 41 + #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 29 42 30 43 extern void calc_global_load(unsigned long ticks); 31 44
+1 -6
kernel/debug/kdb/kdb_main.c
··· 2556 2556 } 2557 2557 kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60); 2558 2558 2559 - /* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */ 2560 - 2561 - #define LOAD_INT(x) ((x) >> FSHIFT) 2562 - #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 2563 2559 kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n", 2564 2560 LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]), 2565 2561 LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]), 2566 2562 LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2])); 2567 - #undef LOAD_INT 2568 - #undef LOAD_FRAC 2563 + 2569 2564 /* Display in kilobytes */ 2570 2565 #define K(x) ((x) << (PAGE_SHIFT - 10)) 2571 2566 kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
-15
kernel/sched/loadavg.c
··· 91 91 return delta; 92 92 } 93 93 94 - /* 95 - * a1 = a0 * e + a * (1 - e) 96 - */ 97 - static unsigned long 98 - calc_load(unsigned long load, unsigned long exp, unsigned long active) 99 - { 100 - unsigned long newload; 101 - 102 - newload = load * exp + active * (FIXED_1 - exp); 103 - if (active >= load) 104 - newload += FIXED_1-1; 105 - 106 - return newload / FIXED_1; 107 - } 108 - 109 94 #ifdef CONFIG_NO_HZ_COMMON 110 95 /* 111 96 * Handle NO_HZ for the global load-average.