blk-cgroup: Add unaccounted time to timeslice_used.

There are two kind of times that tasks are not charged for: the first
seek and the extra time slice used over the allocated timeslice. Both
of these exported as a new unaccounted_time stat.

I think it would be good to have this reported in 'time' as well, but
that is probably a separate discussion.

Signed-off-by: Justin TerAvest <teravest@google.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

authored by

Justin TerAvest and committed by
Jens Axboe
167400d3 1f940bdf

+41 -14
+15 -1
block/blk-cgroup.c
··· 371 371 } 372 372 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); 373 373 374 - void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) 374 + void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time, 375 + unsigned long unaccounted_time) 375 376 { 376 377 unsigned long flags; 377 378 378 379 spin_lock_irqsave(&blkg->stats_lock, flags); 379 380 blkg->stats.time += time; 381 + blkg->stats.unaccounted_time += unaccounted_time; 380 382 spin_unlock_irqrestore(&blkg->stats_lock, flags); 381 383 } 382 384 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); ··· 605 603 if (type == BLKIO_STAT_SECTORS) 606 604 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 607 605 blkg->stats.sectors, cb, dev); 606 + if (type == BLKIO_STAT_UNACCOUNTED_TIME) 607 + return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 608 + blkg->stats.unaccounted_time, cb, dev); 608 609 #ifdef CONFIG_DEBUG_BLK_CGROUP 609 610 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { 610 611 uint64_t sum = blkg->stats.avg_queue_size_sum; ··· 1111 1106 case BLKIO_PROP_sectors: 1112 1107 return blkio_read_blkg_stats(blkcg, cft, cb, 1113 1108 BLKIO_STAT_SECTORS, 0); 1109 + case BLKIO_PROP_unaccounted_time: 1110 + return blkio_read_blkg_stats(blkcg, cft, cb, 1111 + BLKIO_STAT_UNACCOUNTED_TIME, 0); 1114 1112 case BLKIO_PROP_io_service_bytes: 1115 1113 return blkio_read_blkg_stats(blkcg, cft, cb, 1116 1114 BLKIO_STAT_SERVICE_BYTES, 1); ··· 1267 1259 .name = "sectors", 1268 1260 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, 1269 1261 BLKIO_PROP_sectors), 1262 + .read_map = blkiocg_file_read_map, 1263 + }, 1264 + { 1265 + .name = "unaccounted_time", 1266 + .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, 1267 + BLKIO_PROP_unaccounted_time), 1270 1268 .read_map = blkiocg_file_read_map, 1271 1269 }, 1272 1270 {
+10 -2
block/blk-cgroup.h
··· 49 49 /* All the single valued stats go below this */ 50 50 BLKIO_STAT_TIME, 51 51 BLKIO_STAT_SECTORS, 52 + /* Time not charged to this cgroup */ 53 + BLKIO_STAT_UNACCOUNTED_TIME, 52 54 #ifdef CONFIG_DEBUG_BLK_CGROUP 53 55 BLKIO_STAT_AVG_QUEUE_SIZE, 54 56 BLKIO_STAT_IDLE_TIME, ··· 83 81 BLKIO_PROP_io_serviced, 84 82 BLKIO_PROP_time, 85 83 BLKIO_PROP_sectors, 84 + BLKIO_PROP_unaccounted_time, 86 85 BLKIO_PROP_io_service_time, 87 86 BLKIO_PROP_io_wait_time, 88 87 BLKIO_PROP_io_merged, ··· 117 114 /* total disk time and nr sectors dispatched by this group */ 118 115 uint64_t time; 119 116 uint64_t sectors; 117 + /* Time not charged to this cgroup */ 118 + uint64_t unaccounted_time; 120 119 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; 121 120 #ifdef CONFIG_DEBUG_BLK_CGROUP 122 121 /* Sum of number of IOs queued across all samples */ ··· 298 293 extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, 299 294 void *key); 300 295 void blkiocg_update_timeslice_used(struct blkio_group *blkg, 301 - unsigned long time); 296 + unsigned long time, 297 + unsigned long unaccounted_time); 302 298 void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes, 303 299 bool direction, bool sync); 304 300 void blkiocg_update_completion_stats(struct blkio_group *blkg, ··· 325 319 static inline struct blkio_group * 326 320 blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; } 327 321 static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, 328 - unsigned long time) {} 322 + unsigned long time, 323 + unsigned long unaccounted_time) 324 + {} 329 325 static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 330 326 uint64_t bytes, bool direction, bool sync) {} 331 327 static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
+13 -8
block/cfq-iosched.c
··· 899 899 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); 900 900 } 901 901 902 - static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) 902 + static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, 903 + unsigned int *unaccounted_time) 903 904 { 904 905 unsigned int slice_used; 905 906 ··· 919 918 1); 920 919 } else { 921 920 slice_used = jiffies - cfqq->slice_start; 922 - if (slice_used > cfqq->allocated_slice) 921 + if (slice_used > cfqq->allocated_slice) { 922 + *unaccounted_time = slice_used - cfqq->allocated_slice; 923 923 slice_used = cfqq->allocated_slice; 924 + } 925 + if (time_after(cfqq->slice_start, cfqq->dispatch_start)) 926 + *unaccounted_time += cfqq->slice_start - 927 + cfqq->dispatch_start; 924 928 } 925 929 926 930 return slice_used; ··· 935 929 struct cfq_queue *cfqq) 936 930 { 937 931 struct cfq_rb_root *st = &cfqd->grp_service_tree; 938 - unsigned int used_sl, charge; 932 + unsigned int used_sl, charge, unaccounted_sl = 0; 939 933 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) 940 934 - cfqg->service_tree_idle.count; 941 935 942 936 BUG_ON(nr_sync < 0); 943 - used_sl = charge = cfq_cfqq_slice_usage(cfqq); 937 + used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); 944 938 945 939 if (iops_mode(cfqd)) 946 940 charge = cfqq->slice_dispatch; ··· 966 960 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" 967 961 " sect=%u", used_sl, cfqq->slice_dispatch, charge, 968 962 iops_mode(cfqd), cfqq->nr_sectors); 969 - cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 963 + cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, 964 + unaccounted_sl); 970 965 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 971 966 } 972 967 ··· 3303 3296 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 3304 3297 3305 3298 cfq_service_tree_add(cfqd, cfqq, 1); 3306 - 3307 - cfqq->slice_end = 0; 3308 - cfq_mark_cfqq_slice_new(cfqq); 3299 + __cfq_set_active_queue(cfqd, cfqq); 3309 3300 } 3310 3301 3311 3302 /*
+3 -3
block/cfq.h
··· 16 16 } 17 17 18 18 static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 19 - unsigned long time) 19 + unsigned long time, unsigned long unaccounted_time) 20 20 { 21 - blkiocg_update_timeslice_used(blkg, time); 21 + blkiocg_update_timeslice_used(blkg, time, unaccounted_time); 22 22 } 23 23 24 24 static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) ··· 85 85 unsigned long dequeue) {} 86 86 87 87 static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 88 - unsigned long time) {} 88 + unsigned long time, unsigned long unaccounted_time) {} 89 89 static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {} 90 90 static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 91 91 bool direction, bool sync) {}