blk-cgroup: Add unaccounted time to timeslice_used.

There are two kind of times that tasks are not charged for: the first
seek and the extra time slice used over the allocated timeslice. Both
of these exported as a new unaccounted_time stat.

I think it would be good to have this reported in 'time' as well, but
that is probably a separate discussion.

Signed-off-by: Justin TerAvest <teravest@google.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

authored by

Justin TerAvest and committed by
Jens Axboe
167400d3 1f940bdf

+41 -14
+15 -1
block/blk-cgroup.c
··· 371 } 372 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); 373 374 - void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) 375 { 376 unsigned long flags; 377 378 spin_lock_irqsave(&blkg->stats_lock, flags); 379 blkg->stats.time += time; 380 spin_unlock_irqrestore(&blkg->stats_lock, flags); 381 } 382 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); ··· 605 if (type == BLKIO_STAT_SECTORS) 606 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 607 blkg->stats.sectors, cb, dev); 608 #ifdef CONFIG_DEBUG_BLK_CGROUP 609 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { 610 uint64_t sum = blkg->stats.avg_queue_size_sum; ··· 1111 case BLKIO_PROP_sectors: 1112 return blkio_read_blkg_stats(blkcg, cft, cb, 1113 BLKIO_STAT_SECTORS, 0); 1114 case BLKIO_PROP_io_service_bytes: 1115 return blkio_read_blkg_stats(blkcg, cft, cb, 1116 BLKIO_STAT_SERVICE_BYTES, 1); ··· 1267 .name = "sectors", 1268 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, 1269 BLKIO_PROP_sectors), 1270 .read_map = blkiocg_file_read_map, 1271 }, 1272 {
··· 371 } 372 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); 373 374 + void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time, 375 + unsigned long unaccounted_time) 376 { 377 unsigned long flags; 378 379 spin_lock_irqsave(&blkg->stats_lock, flags); 380 blkg->stats.time += time; 381 + blkg->stats.unaccounted_time += unaccounted_time; 382 spin_unlock_irqrestore(&blkg->stats_lock, flags); 383 } 384 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); ··· 603 if (type == BLKIO_STAT_SECTORS) 604 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 605 blkg->stats.sectors, cb, dev); 606 + if (type == BLKIO_STAT_UNACCOUNTED_TIME) 607 + return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 608 + blkg->stats.unaccounted_time, cb, dev); 609 #ifdef CONFIG_DEBUG_BLK_CGROUP 610 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { 611 uint64_t sum = blkg->stats.avg_queue_size_sum; ··· 1106 case BLKIO_PROP_sectors: 1107 return blkio_read_blkg_stats(blkcg, cft, cb, 1108 BLKIO_STAT_SECTORS, 0); 1109 + case BLKIO_PROP_unaccounted_time: 1110 + return blkio_read_blkg_stats(blkcg, cft, cb, 1111 + BLKIO_STAT_UNACCOUNTED_TIME, 0); 1112 case BLKIO_PROP_io_service_bytes: 1113 return blkio_read_blkg_stats(blkcg, cft, cb, 1114 BLKIO_STAT_SERVICE_BYTES, 1); ··· 1259 .name = "sectors", 1260 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, 1261 BLKIO_PROP_sectors), 1262 + .read_map = blkiocg_file_read_map, 1263 + }, 1264 + { 1265 + .name = "unaccounted_time", 1266 + .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, 1267 + BLKIO_PROP_unaccounted_time), 1268 .read_map = blkiocg_file_read_map, 1269 }, 1270 {
+10 -2
block/blk-cgroup.h
··· 49 /* All the single valued stats go below this */ 50 BLKIO_STAT_TIME, 51 BLKIO_STAT_SECTORS, 52 #ifdef CONFIG_DEBUG_BLK_CGROUP 53 BLKIO_STAT_AVG_QUEUE_SIZE, 54 BLKIO_STAT_IDLE_TIME, ··· 83 BLKIO_PROP_io_serviced, 84 BLKIO_PROP_time, 85 BLKIO_PROP_sectors, 86 BLKIO_PROP_io_service_time, 87 BLKIO_PROP_io_wait_time, 88 BLKIO_PROP_io_merged, ··· 117 /* total disk time and nr sectors dispatched by this group */ 118 uint64_t time; 119 uint64_t sectors; 120 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; 121 #ifdef CONFIG_DEBUG_BLK_CGROUP 122 /* Sum of number of IOs queued across all samples */ ··· 298 extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, 299 void *key); 300 void blkiocg_update_timeslice_used(struct blkio_group *blkg, 301 - unsigned long time); 302 void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes, 303 bool direction, bool sync); 304 void blkiocg_update_completion_stats(struct blkio_group *blkg, ··· 325 static inline struct blkio_group * 326 blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; } 327 static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, 328 - unsigned long time) {} 329 static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 330 uint64_t bytes, bool direction, bool sync) {} 331 static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
··· 49 /* All the single valued stats go below this */ 50 BLKIO_STAT_TIME, 51 BLKIO_STAT_SECTORS, 52 + /* Time not charged to this cgroup */ 53 + BLKIO_STAT_UNACCOUNTED_TIME, 54 #ifdef CONFIG_DEBUG_BLK_CGROUP 55 BLKIO_STAT_AVG_QUEUE_SIZE, 56 BLKIO_STAT_IDLE_TIME, ··· 81 BLKIO_PROP_io_serviced, 82 BLKIO_PROP_time, 83 BLKIO_PROP_sectors, 84 + BLKIO_PROP_unaccounted_time, 85 BLKIO_PROP_io_service_time, 86 BLKIO_PROP_io_wait_time, 87 BLKIO_PROP_io_merged, ··· 114 /* total disk time and nr sectors dispatched by this group */ 115 uint64_t time; 116 uint64_t sectors; 117 + /* Time not charged to this cgroup */ 118 + uint64_t unaccounted_time; 119 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; 120 #ifdef CONFIG_DEBUG_BLK_CGROUP 121 /* Sum of number of IOs queued across all samples */ ··· 293 extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, 294 void *key); 295 void blkiocg_update_timeslice_used(struct blkio_group *blkg, 296 + unsigned long time, 297 + unsigned long unaccounted_time); 298 void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes, 299 bool direction, bool sync); 300 void blkiocg_update_completion_stats(struct blkio_group *blkg, ··· 319 static inline struct blkio_group * 320 blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; } 321 static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, 322 + unsigned long time, 323 + unsigned long unaccounted_time) 324 + {} 325 static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 326 uint64_t bytes, bool direction, bool sync) {} 327 static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
+13 -8
block/cfq-iosched.c
··· 899 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); 900 } 901 902 - static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) 903 { 904 unsigned int slice_used; 905 ··· 919 1); 920 } else { 921 slice_used = jiffies - cfqq->slice_start; 922 - if (slice_used > cfqq->allocated_slice) 923 slice_used = cfqq->allocated_slice; 924 } 925 926 return slice_used; ··· 935 struct cfq_queue *cfqq) 936 { 937 struct cfq_rb_root *st = &cfqd->grp_service_tree; 938 - unsigned int used_sl, charge; 939 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) 940 - cfqg->service_tree_idle.count; 941 942 BUG_ON(nr_sync < 0); 943 - used_sl = charge = cfq_cfqq_slice_usage(cfqq); 944 945 if (iops_mode(cfqd)) 946 charge = cfqq->slice_dispatch; ··· 966 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" 967 " sect=%u", used_sl, cfqq->slice_dispatch, charge, 968 iops_mode(cfqd), cfqq->nr_sectors); 969 - cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 970 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 971 } 972 ··· 3303 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 3304 3305 cfq_service_tree_add(cfqd, cfqq, 1); 3306 - 3307 - cfqq->slice_end = 0; 3308 - cfq_mark_cfqq_slice_new(cfqq); 3309 } 3310 3311 /*
··· 899 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); 900 } 901 902 + static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, 903 + unsigned int *unaccounted_time) 904 { 905 unsigned int slice_used; 906 ··· 918 1); 919 } else { 920 slice_used = jiffies - cfqq->slice_start; 921 + if (slice_used > cfqq->allocated_slice) { 922 + *unaccounted_time = slice_used - cfqq->allocated_slice; 923 slice_used = cfqq->allocated_slice; 924 + } 925 + if (time_after(cfqq->slice_start, cfqq->dispatch_start)) 926 + *unaccounted_time += cfqq->slice_start - 927 + cfqq->dispatch_start; 928 } 929 930 return slice_used; ··· 929 struct cfq_queue *cfqq) 930 { 931 struct cfq_rb_root *st = &cfqd->grp_service_tree; 932 + unsigned int used_sl, charge, unaccounted_sl = 0; 933 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) 934 - cfqg->service_tree_idle.count; 935 936 BUG_ON(nr_sync < 0); 937 + used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); 938 939 if (iops_mode(cfqd)) 940 charge = cfqq->slice_dispatch; ··· 960 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" 961 " sect=%u", used_sl, cfqq->slice_dispatch, charge, 962 iops_mode(cfqd), cfqq->nr_sectors); 963 + cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, 964 + unaccounted_sl); 965 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 966 } 967 ··· 3296 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 3297 3298 cfq_service_tree_add(cfqd, cfqq, 1); 3299 + __cfq_set_active_queue(cfqd, cfqq); 3300 } 3301 3302 /*
+3 -3
block/cfq.h
··· 16 } 17 18 static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 19 - unsigned long time) 20 { 21 - blkiocg_update_timeslice_used(blkg, time); 22 } 23 24 static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) ··· 85 unsigned long dequeue) {} 86 87 static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 88 - unsigned long time) {} 89 static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {} 90 static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 91 bool direction, bool sync) {}
··· 16 } 17 18 static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 19 + unsigned long time, unsigned long unaccounted_time) 20 { 21 + blkiocg_update_timeslice_used(blkg, time, unaccounted_time); 22 } 23 24 static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) ··· 85 unsigned long dequeue) {} 86 87 static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 88 + unsigned long time, unsigned long unaccounted_time) {} 89 static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {} 90 static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 91 bool direction, bool sync) {}