Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: simplify I/O stat accounting

This simplifies I/O stat accounting switching code and separates it
completely from I/O scheduler switch code.

Requests are accounted according to the state of their request queue
at the time of the request allocation. There is no need anymore to
flush the request queue when switching I/O accounting state.

Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

authored by

Jerome Marchand and committed by
Jens Axboe
42dad764 097102c2

+12 -13
+4 -2
block/blk-core.c
··· 643 643 } 644 644 645 645 static struct request * 646 - blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) 646 + blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) 647 647 { 648 648 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 649 649 ··· 652 652 653 653 blk_rq_init(q, rq); 654 654 655 - rq->cmd_flags = rw | REQ_ALLOCED; 655 + rq->cmd_flags = flags | REQ_ALLOCED; 656 656 657 657 if (priv) { 658 658 if (unlikely(elv_set_request(q, rq, gfp_mask))) { ··· 792 792 if (priv) 793 793 rl->elvpriv++; 794 794 795 + if (blk_queue_io_stat(q)) 796 + rw_flags |= REQ_IO_STAT; 795 797 spin_unlock_irq(q->queue_lock); 796 798 797 799 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
+4 -1
block/blk-merge.c
··· 402 402 403 403 elv_merge_requests(q, req, next); 404 404 405 - blk_account_io_merge(req); 405 + /* 406 + * 'next' is going away, so update stats accordingly 407 + */ 408 + blk_account_io_merge(next); 406 409 407 410 req->ioprio = ioprio_best(req->ioprio, next->ioprio); 408 411 if (blk_rq_cpu_valid(next))
-4
block/blk-sysfs.c
··· 209 209 ssize_t ret = queue_var_store(&stats, page, count); 210 210 211 211 spin_lock_irq(q->queue_lock); 212 - elv_quiesce_start(q); 213 - 214 212 if (stats) 215 213 queue_flag_set(QUEUE_FLAG_IO_STAT, q); 216 214 else 217 215 queue_flag_clear(QUEUE_FLAG_IO_STAT, q); 218 - 219 - elv_quiesce_end(q); 220 216 spin_unlock_irq(q->queue_lock); 221 217 222 218 return ret;
+1 -6
block/blk.h
··· 114 114 115 115 static inline int blk_do_io_stat(struct request *rq) 116 116 { 117 - struct gendisk *disk = rq->rq_disk; 118 - 119 - if (!disk || !disk->queue) 120 - return 0; 121 - 122 - return blk_queue_io_stat(disk->queue) && (rq->cmd_flags & REQ_ELVPRIV); 117 + return rq->rq_disk && blk_rq_io_stat(rq); 123 118 } 124 119 125 120 #endif
+3
include/linux/blkdev.h
··· 118 118 __REQ_COPY_USER, /* contains copies of user pages */ 119 119 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 120 120 __REQ_NOIDLE, /* Don't anticipate more IO after this one */ 121 + __REQ_IO_STAT, /* account I/O stat */ 121 122 __REQ_NR_BITS, /* stops here */ 122 123 }; 123 124 ··· 146 145 #define REQ_COPY_USER (1 << __REQ_COPY_USER) 147 146 #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 148 147 #define REQ_NOIDLE (1 << __REQ_NOIDLE) 148 + #define REQ_IO_STAT (1 << __REQ_IO_STAT) 149 149 150 150 #define BLK_MAX_CDB 16 151 151 ··· 600 598 blk_failfast_transport(rq) || \ 601 599 blk_failfast_driver(rq)) 602 600 #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 601 + #define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) 603 602 604 603 #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 605 604