at v4.20 4.9 kB view raw
1/* 2 * Block stat tracking code 3 * 4 * Copyright (C) 2016 Jens Axboe 5 */ 6#include <linux/kernel.h> 7#include <linux/rculist.h> 8#include <linux/blk-mq.h> 9 10#include "blk-stat.h" 11#include "blk-mq.h" 12#include "blk.h" 13 14struct blk_queue_stats { 15 struct list_head callbacks; 16 spinlock_t lock; 17 bool enable_accounting; 18}; 19 20void blk_rq_stat_init(struct blk_rq_stat *stat) 21{ 22 stat->min = -1ULL; 23 stat->max = stat->nr_samples = stat->mean = 0; 24 stat->batch = 0; 25} 26 27/* src is a per-cpu stat, mean isn't initialized */ 28void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) 29{ 30 if (!src->nr_samples) 31 return; 32 33 dst->min = min(dst->min, src->min); 34 dst->max = max(dst->max, src->max); 35 36 dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples, 37 dst->nr_samples + src->nr_samples); 38 39 dst->nr_samples += src->nr_samples; 40} 41 42void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value) 43{ 44 stat->min = min(stat->min, value); 45 stat->max = max(stat->max, value); 46 stat->batch += value; 47 stat->nr_samples++; 48} 49 50void blk_stat_add(struct request *rq, u64 now) 51{ 52 struct request_queue *q = rq->q; 53 struct blk_stat_callback *cb; 54 struct blk_rq_stat *stat; 55 int bucket; 56 u64 value; 57 58 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0; 59 60 blk_throtl_stat_add(rq, value); 61 62 rcu_read_lock(); 63 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { 64 if (!blk_stat_is_active(cb)) 65 continue; 66 67 bucket = cb->bucket_fn(rq); 68 if (bucket < 0) 69 continue; 70 71 stat = &get_cpu_ptr(cb->cpu_stat)[bucket]; 72 blk_rq_stat_add(stat, value); 73 put_cpu_ptr(cb->cpu_stat); 74 } 75 rcu_read_unlock(); 76} 77 78static void blk_stat_timer_fn(struct timer_list *t) 79{ 80 struct blk_stat_callback *cb = from_timer(cb, t, timer); 81 unsigned int bucket; 82 int cpu; 83 84 for (bucket = 0; bucket < cb->buckets; bucket++) 85 blk_rq_stat_init(&cb->stat[bucket]); 86 87 for_each_online_cpu(cpu) { 88 struct blk_rq_stat *cpu_stat; 89 90 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); 91 for (bucket = 0; bucket < cb->buckets; bucket++) { 92 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); 93 blk_rq_stat_init(&cpu_stat[bucket]); 94 } 95 } 96 97 cb->timer_fn(cb); 98} 99 100struct blk_stat_callback * 101blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *), 102 int (*bucket_fn)(const struct request *), 103 unsigned int buckets, void *data) 104{ 105 struct blk_stat_callback *cb; 106 107 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 108 if (!cb) 109 return NULL; 110 111 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), 112 GFP_KERNEL); 113 if (!cb->stat) { 114 kfree(cb); 115 return NULL; 116 } 117 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), 118 __alignof__(struct blk_rq_stat)); 119 if (!cb->cpu_stat) { 120 kfree(cb->stat); 121 kfree(cb); 122 return NULL; 123 } 124 125 cb->timer_fn = timer_fn; 126 cb->bucket_fn = bucket_fn; 127 cb->data = data; 128 cb->buckets = buckets; 129 timer_setup(&cb->timer, blk_stat_timer_fn, 0); 130 131 return cb; 132} 133EXPORT_SYMBOL_GPL(blk_stat_alloc_callback); 134 135void blk_stat_add_callback(struct request_queue *q, 136 struct blk_stat_callback *cb) 137{ 138 unsigned int bucket; 139 int cpu; 140 141 for_each_possible_cpu(cpu) { 142 struct blk_rq_stat *cpu_stat; 143 144 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); 145 for (bucket = 0; bucket < cb->buckets; bucket++) 146 blk_rq_stat_init(&cpu_stat[bucket]); 147 } 148 149 spin_lock(&q->stats->lock); 150 list_add_tail_rcu(&cb->list, &q->stats->callbacks); 151 blk_queue_flag_set(QUEUE_FLAG_STATS, q); 152 spin_unlock(&q->stats->lock); 153} 154EXPORT_SYMBOL_GPL(blk_stat_add_callback); 155 156void blk_stat_remove_callback(struct request_queue *q, 157 struct blk_stat_callback *cb) 158{ 159 spin_lock(&q->stats->lock); 160 list_del_rcu(&cb->list); 161 if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) 162 blk_queue_flag_clear(QUEUE_FLAG_STATS, q); 163 spin_unlock(&q->stats->lock); 164 165 del_timer_sync(&cb->timer); 166} 167EXPORT_SYMBOL_GPL(blk_stat_remove_callback); 168 169static void blk_stat_free_callback_rcu(struct rcu_head *head) 170{ 171 struct blk_stat_callback *cb; 172 173 cb = container_of(head, struct blk_stat_callback, rcu); 174 free_percpu(cb->cpu_stat); 175 kfree(cb->stat); 176 kfree(cb); 177} 178 179void blk_stat_free_callback(struct blk_stat_callback *cb) 180{ 181 if (cb) 182 call_rcu(&cb->rcu, blk_stat_free_callback_rcu); 183} 184EXPORT_SYMBOL_GPL(blk_stat_free_callback); 185 186void blk_stat_enable_accounting(struct request_queue *q) 187{ 188 spin_lock(&q->stats->lock); 189 q->stats->enable_accounting = true; 190 blk_queue_flag_set(QUEUE_FLAG_STATS, q); 191 spin_unlock(&q->stats->lock); 192} 193EXPORT_SYMBOL_GPL(blk_stat_enable_accounting); 194 195struct blk_queue_stats *blk_alloc_queue_stats(void) 196{ 197 struct blk_queue_stats *stats; 198 199 stats = kmalloc(sizeof(*stats), GFP_KERNEL); 200 if (!stats) 201 return NULL; 202 203 INIT_LIST_HEAD(&stats->callbacks); 204 spin_lock_init(&stats->lock); 205 stats->enable_accounting = false; 206 207 return stats; 208} 209 210void blk_free_queue_stats(struct blk_queue_stats *stats) 211{ 212 if (!stats) 213 return; 214 215 WARN_ON(!list_empty(&stats->callbacks)); 216 217 kfree(stats); 218}