Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: move a few merge helpers out of <linux/blkdev.h>

These are block-layer internal helpers, so move them to block/blk.h and
block/blk-merge.c. Also update a comment a bit to use better grammar.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20210920123328.1399408-16-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
badf7f64 b81e0c23

+62 -64
+24
block/blk-merge.c
··· 558 558 return queue_max_segments(rq->q); 559 559 } 560 560 561 + static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 562 + sector_t offset) 563 + { 564 + struct request_queue *q = rq->q; 565 + 566 + if (blk_rq_is_passthrough(rq)) 567 + return q->limits.max_hw_sectors; 568 + 569 + if (!q->limits.chunk_sectors || 570 + req_op(rq) == REQ_OP_DISCARD || 571 + req_op(rq) == REQ_OP_SECURE_ERASE) 572 + return blk_queue_get_max_sectors(q, req_op(rq)); 573 + 574 + return min(blk_max_size_offset(q, offset, 0), 575 + blk_queue_get_max_sectors(q, req_op(rq))); 576 + } 577 + 561 578 static inline int ll_new_hw_segment(struct request *req, struct bio *bio, 562 579 unsigned int nr_phys_segs) 563 580 { ··· 733 716 return ELEVATOR_BACK_MERGE; 734 717 735 718 return ELEVATOR_NO_MERGE; 719 + } 720 + 721 + static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 722 + { 723 + if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b)) 724 + return true; 725 + return false; 736 726 } 737 727 738 728 /*
+38
block/blk.h
··· 96 96 return __bvec_gap_to_prev(q, bprv, offset); 97 97 } 98 98 99 + static inline bool rq_mergeable(struct request *rq) 100 + { 101 + if (blk_rq_is_passthrough(rq)) 102 + return false; 103 + 104 + if (req_op(rq) == REQ_OP_FLUSH) 105 + return false; 106 + 107 + if (req_op(rq) == REQ_OP_WRITE_ZEROES) 108 + return false; 109 + 110 + if (req_op(rq) == REQ_OP_ZONE_APPEND) 111 + return false; 112 + 113 + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 114 + return false; 115 + if (rq->rq_flags & RQF_NOMERGE_FLAGS) 116 + return false; 117 + 118 + return true; 119 + } 120 + 121 + /* 122 + * There are two different ways to handle DISCARD merges: 123 + * 1) If max_discard_segments > 1, the driver treats every bio as a range and 124 + * send the bios to controller together. The ranges don't need to be 125 + * contiguous. 126 + * 2) Otherwise, the request will be normal read/write requests. The ranges 127 + * need to be contiguous. 128 + */ 129 + static inline bool blk_discard_mergable(struct request *req) 130 + { 131 + if (req_op(req) == REQ_OP_DISCARD && 132 + queue_max_discard_segments(req->q) > 1) 133 + return true; 134 + return false; 135 + } 136 + 99 137 #ifdef CONFIG_BLK_DEV_INTEGRITY 100 138 void blk_flush_integrity(void); 101 139 bool __bio_integrity_endio(struct bio *);
-64
include/linux/blkdev.h
··· 745 745 return op_is_sync(rq->cmd_flags); 746 746 } 747 747 748 - static inline bool rq_mergeable(struct request *rq) 749 - { 750 - if (blk_rq_is_passthrough(rq)) 751 - return false; 752 - 753 - if (req_op(rq) == REQ_OP_FLUSH) 754 - return false; 755 - 756 - if (req_op(rq) == REQ_OP_WRITE_ZEROES) 757 - return false; 758 - 759 - if (req_op(rq) == REQ_OP_ZONE_APPEND) 760 - return false; 761 - 762 - if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 763 - return false; 764 - if (rq->rq_flags & RQF_NOMERGE_FLAGS) 765 - return false; 766 - 767 - return true; 768 - } 769 - 770 - static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 771 - { 772 - if (bio_page(a) == bio_page(b) && 773 - bio_offset(a) == bio_offset(b)) 774 - return true; 775 - 776 - return false; 777 - } 778 - 779 748 static inline unsigned int blk_queue_depth(struct request_queue *q) 780 749 { 781 750 if (q->queue_depth) ··· 997 1028 chunk_sectors -= sector_div(offset, chunk_sectors); 998 1029 999 1030 return min(q->limits.max_sectors, chunk_sectors); 1000 - } 1001 - 1002 - static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 1003 - sector_t offset) 1004 - { 1005 - struct request_queue *q = rq->q; 1006 - 1007 - if (blk_rq_is_passthrough(rq)) 1008 - return q->limits.max_hw_sectors; 1009 - 1010 - if (!q->limits.chunk_sectors || 1011 - req_op(rq) == REQ_OP_DISCARD || 1012 - req_op(rq) == REQ_OP_SECURE_ERASE) 1013 - return blk_queue_get_max_sectors(q, req_op(rq)); 1014 - 1015 - return min(blk_max_size_offset(q, offset, 0), 1016 - blk_queue_get_max_sectors(q, req_op(rq))); 1017 1031 } 1018 1032 1019 1033 static inline unsigned int blk_rq_count_bios(struct request *rq) ··· 1440 1488 1441 1489 /* Turn it back into bytes, gaah */ 1442 1490 return offset << SECTOR_SHIFT; 1443 - } 1444 - 1445 - /* 1446 - * Two cases of handling DISCARD merge: 1447 - * If max_discard_segments > 1, the driver takes every bio 1448 - * as a range and send them to controller together. The ranges 1449 - * needn't to be contiguous. 1450 - * Otherwise, the bios/requests will be handled as same as 1451 - * others which should be contiguous. 1452 - */ 1453 - static inline bool blk_discard_mergable(struct request *req) 1454 - { 1455 - if (req_op(req) == REQ_OP_DISCARD && 1456 - queue_max_discard_segments(req->q) > 1) 1457 - return true; 1458 - return false; 1459 1491 } 1460 1492 1461 1493 static inline int bdev_discard_alignment(struct block_device *bdev)