Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: Consolidate command flag and queue limit checks for merges

- blk_check_merge_flags() verifies that cmd_flags / bi_rw are
compatible. This function is called for both req-req and req-bio
merging.

- blk_rq_get_max_sectors() and blk_queue_get_max_sectors() can be used
to query the maximum sector count for a given request or queue. The
calls will return the right value from the queue limits given the
type of command (RW, discard, write same, etc.)

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Martin K. Petersen and committed by
Jens Axboe
f31dc1cd e2a60da7

+44 -20
+1 -2
block/blk-core.c
··· 1866 1866 if (!rq_mergeable(rq)) 1867 1867 return 0; 1868 1868 1869 - if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1870 - blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1869 + if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { 1871 1870 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1872 1871 return -EIO; 1873 1872 }
+12 -18
block/blk-merge.c
··· 275 275 int ll_back_merge_fn(struct request_queue *q, struct request *req, 276 276 struct bio *bio) 277 277 { 278 - unsigned short max_sectors; 279 - 280 - if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) 281 - max_sectors = queue_max_hw_sectors(q); 282 - else 283 - max_sectors = queue_max_sectors(q); 284 - 285 - if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { 278 + if (blk_rq_sectors(req) + bio_sectors(bio) > 279 + blk_rq_get_max_sectors(req)) { 286 280 req->cmd_flags |= REQ_NOMERGE; 287 281 if (req == q->last_merge) 288 282 q->last_merge = NULL; ··· 293 299 int ll_front_merge_fn(struct request_queue *q, struct request *req, 294 300 struct bio *bio) 295 301 { 296 - unsigned short max_sectors; 297 - 298 - if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) 299 - max_sectors = queue_max_hw_sectors(q); 300 - else 301 - max_sectors = queue_max_sectors(q); 302 - 303 - 304 - if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { 302 + if (blk_rq_sectors(req) + bio_sectors(bio) > 303 + blk_rq_get_max_sectors(req)) { 305 304 req->cmd_flags |= REQ_NOMERGE; 306 305 if (req == q->last_merge) 307 306 q->last_merge = NULL; ··· 325 338 /* 326 339 * Will it become too large? 327 340 */ 328 - if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q)) 341 + if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > 342 + blk_rq_get_max_sectors(req)) 329 343 return 0; 330 344 331 345 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; ··· 403 415 struct request *next) 404 416 { 405 417 if (!rq_mergeable(req) || !rq_mergeable(next)) 418 + return 0; 419 + 420 + if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) 406 421 return 0; 407 422 408 423 /* ··· 501 510 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) 502 511 { 503 512 if (!rq_mergeable(rq) || !bio_mergeable(bio)) 513 + return false; 514 + 515 + if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) 504 516 return false; 505 517 506 518 /* different data direction or already started, don't merge */
+31
include/linux/blkdev.h
··· 605 605 return true; 606 606 } 607 607 608 + static inline bool blk_check_merge_flags(unsigned int flags1, 609 + unsigned int flags2) 610 + { 611 + if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) 612 + return false; 613 + 614 + if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 615 + return false; 616 + 617 + return true; 618 + } 619 + 608 620 /* 609 621 * q->prep_rq_fn return values 610 622 */ ··· 810 798 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 811 799 { 812 800 return blk_rq_cur_bytes(rq) >> 9; 801 + } 802 + 803 + static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 804 + unsigned int cmd_flags) 805 + { 806 + if (unlikely(cmd_flags & REQ_DISCARD)) 807 + return q->limits.max_discard_sectors; 808 + 809 + return q->limits.max_sectors; 810 + } 811 + 812 + static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 813 + { 814 + struct request_queue *q = rq->q; 815 + 816 + if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 817 + return q->limits.max_hw_sectors; 818 + 819 + return blk_queue_get_max_sectors(q, rq->cmd_flags); 813 820 } 814 821 815 822 /*