Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: Clean up special command handling logic

Remove special-casing of non-rw fs style requests (discard). The nomerge
flags are consolidated in blk_types.h, and rq_mergeable() and
bio_mergeable() have been modified to use them.

bio_is_rw() is used in place of bio_has_data() a few places. This is
done to to distinguish true reads and writes from other fs type requests
that carry a payload (e.g. write same).

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Martin K. Petersen and committed by
Jens Axboe
e2a60da7 d41570b7

+46 -49
+6 -7
block/blk-core.c
··· 1657 1657 goto end_io; 1658 1658 } 1659 1659 1660 - if (unlikely(!(bio->bi_rw & REQ_DISCARD) && 1661 - nr_sectors > queue_max_hw_sectors(q))) { 1660 + if (likely(bio_is_rw(bio) && 1661 + nr_sectors > queue_max_hw_sectors(q))) { 1662 1662 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1663 1663 bdevname(bio->bi_bdev, b), 1664 1664 bio_sectors(bio), ··· 1699 1699 1700 1700 if ((bio->bi_rw & REQ_DISCARD) && 1701 1701 (!blk_queue_discard(q) || 1702 - ((bio->bi_rw & REQ_SECURE) && 1703 - !blk_queue_secdiscard(q)))) { 1702 + ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { 1704 1703 err = -EOPNOTSUPP; 1705 1704 goto end_io; 1706 1705 } ··· 1817 1818 * If it's a regular read/write or a barrier with data attached, 1818 1819 * go through the normal accounting stuff before submission. 1819 1820 */ 1820 - if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { 1821 + if (bio_has_data(bio)) { 1821 1822 if (rw & WRITE) { 1822 1823 count_vm_events(PGPGOUT, count); 1823 1824 } else { ··· 1863 1864 */ 1864 1865 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1865 1866 { 1866 - if (rq->cmd_flags & REQ_DISCARD) 1867 + if (!rq_mergeable(rq)) 1867 1868 return 0; 1868 1869 1869 1870 if (blk_rq_sectors(rq) > queue_max_sectors(q) || ··· 2337 2338 req->buffer = bio_data(req->bio); 2338 2339 2339 2340 /* update sector only for requests with clear definition of sector */ 2340 - if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) 2341 + if (req->cmd_type == REQ_TYPE_FS) 2341 2342 req->__sector += total_bytes >> 9; 2342 2343 2343 2344 /* mixed attributes always follow the first bio */
+1 -21
block/blk-merge.c
··· 418 418 return 0; 419 419 420 420 /* 421 - * Don't merge file system requests and discard requests 422 - */ 423 - if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD)) 424 - return 0; 425 - 426 - /* 427 - * Don't merge discard requests and secure discard requests 428 - */ 429 - if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE)) 430 - return 0; 431 - 432 - /* 433 421 * not contiguous 434 422 */ 435 423 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) ··· 509 521 510 522 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) 511 523 { 512 - if (!rq_mergeable(rq)) 513 - return false; 514 - 515 - /* don't merge file system requests and discard requests */ 516 - if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD)) 517 - return false; 518 - 519 - /* don't merge discard requests and secure discard requests */ 520 - if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE)) 524 + if (!rq_mergeable(rq) || !bio_mergeable(bio)) 521 525 return false; 522 526 523 527 /* different data direction or already started, don't merge */
+2 -3
block/blk.h
··· 171 171 * 172 172 * a) it's attached to a gendisk, and 173 173 * b) the queue had IO stats enabled when this request was started, and 174 - * c) it's a file system request or a discard request 174 + * c) it's a file system request 175 175 */ 176 176 static inline int blk_do_io_stat(struct request *rq) 177 177 { 178 178 return rq->rq_disk && 179 179 (rq->cmd_flags & REQ_IO_STAT) && 180 - (rq->cmd_type == REQ_TYPE_FS || 181 - (rq->cmd_flags & REQ_DISCARD)); 180 + (rq->cmd_type == REQ_TYPE_FS); 182 181 } 183 182 184 183 /*
+2 -4
block/elevator.c
··· 562 562 563 563 if (rq->cmd_flags & REQ_SOFTBARRIER) { 564 564 /* barriers are scheduling boundary, update end_sector */ 565 - if (rq->cmd_type == REQ_TYPE_FS || 566 - (rq->cmd_flags & REQ_DISCARD)) { 565 + if (rq->cmd_type == REQ_TYPE_FS) { 567 566 q->end_sector = rq_end_sector(rq); 568 567 q->boundary_rq = rq; 569 568 } ··· 604 605 if (elv_attempt_insert_merge(q, rq)) 605 606 break; 606 607 case ELEVATOR_INSERT_SORT: 607 - BUG_ON(rq->cmd_type != REQ_TYPE_FS && 608 - !(rq->cmd_flags & REQ_DISCARD)); 608 + BUG_ON(rq->cmd_type != REQ_TYPE_FS); 609 609 rq->cmd_flags |= REQ_SORTED; 610 610 q->nr_sorted++; 611 611 if (rq_mergeable(rq)) {
+21 -2
include/linux/bio.h
··· 386 386 /* 387 387 * Check whether this bio carries any data or not. A NULL bio is allowed. 388 388 */ 389 - static inline int bio_has_data(struct bio *bio) 389 + static inline bool bio_has_data(struct bio *bio) 390 390 { 391 - return bio && bio->bi_io_vec != NULL; 391 + if (bio && bio->bi_vcnt) 392 + return true; 393 + 394 + return false; 395 + } 396 + 397 + static inline bool bio_is_rw(struct bio *bio) 398 + { 399 + if (!bio_has_data(bio)) 400 + return false; 401 + 402 + return true; 403 + } 404 + 405 + static inline bool bio_mergeable(struct bio *bio) 406 + { 407 + if (bio->bi_rw & REQ_NOMERGE_FLAGS) 408 + return false; 409 + 410 + return true; 392 411 } 393 412 394 413 /*
+4
include/linux/blk_types.h
··· 194 194 REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) 195 195 #define REQ_CLONE_MASK REQ_COMMON_MASK 196 196 197 + /* This mask is used for both bio and request merge checking */ 198 + #define REQ_NOMERGE_FLAGS \ 199 + (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) 200 + 197 201 #define REQ_RAHEAD (1 << __REQ_RAHEAD) 198 202 #define REQ_THROTTLED (1 << __REQ_THROTTLED) 199 203
+10 -12
include/linux/blkdev.h
··· 540 540 541 541 #define blk_account_rq(rq) \ 542 542 (((rq)->cmd_flags & REQ_STARTED) && \ 543 - ((rq)->cmd_type == REQ_TYPE_FS || \ 544 - ((rq)->cmd_flags & REQ_DISCARD))) 543 + ((rq)->cmd_type == REQ_TYPE_FS)) 545 544 546 545 #define blk_pm_request(rq) \ 547 546 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ ··· 594 595 rl->flags &= ~flag; 595 596 } 596 597 598 + static inline bool rq_mergeable(struct request *rq) 599 + { 600 + if (rq->cmd_type != REQ_TYPE_FS) 601 + return false; 597 602 598 - /* 599 - * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 600 - * it already be started by driver. 601 - */ 602 - #define RQ_NOMERGE_FLAGS \ 603 - (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD) 604 - #define rq_mergeable(rq) \ 605 - (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 606 - (((rq)->cmd_flags & REQ_DISCARD) || \ 607 - (rq)->cmd_type == REQ_TYPE_FS)) 603 + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 604 + return false; 605 + 606 + return true; 607 + } 608 608 609 609 /* 610 610 * q->prep_rq_fn return values