Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: Kill bio_segments()/bi_vcnt usage

When we start sharing biovecs, keeping bi_vcnt accurate for splits is
going to be error prone - and unnecessary, if we refactor some code.

So bio_segments() has to go - but most of the existing users just needed
to know if the bio had multiple segments, which is easier - add a
bio_multiple_segments() for them.

(Two of the current uses of bio_segments() are going to go away in a
couple patches, but the current implementation of bio_segments() is
unsafe as soon as we start doing driver conversions for immutable
biovecs - so implement a dumb version for bisectability, it'll go away
in a couple patches)

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Neil Brown <neilb@suse.de>
Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
Cc: "James E.J. Bottomley" <JBottomley@parallels.com>

+93 -86
+3 -4
drivers/block/ps3disk.c
··· 101 101 102 102 rq_for_each_segment(bvec, req, iter) { 103 103 unsigned long flags; 104 - dev_dbg(&dev->sbd.core, 105 - "%s:%u: bio %u: %u segs %u sectors from %lu\n", 106 - __func__, __LINE__, i, bio_segments(iter.bio), 107 - bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector); 104 + dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n", 105 + __func__, __LINE__, i, bio_sectors(iter.bio), 106 + iter.bio->bi_iter.bi_sector); 108 107 109 108 size = bvec.bv_len; 110 109 buf = bvec_kmap_irq(&bvec, &flags);
+22 -29
drivers/md/bcache/io.c
··· 24 24 if (bio->bi_iter.bi_idx) { 25 25 struct bio_vec bv; 26 26 struct bvec_iter iter; 27 - struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); 27 + unsigned segs = bio_segments(bio); 28 + struct bio *clone = bio_alloc(GFP_NOIO, segs); 28 29 29 30 bio_for_each_segment(bv, bio, iter) 30 31 clone->bi_io_vec[clone->bi_vcnt++] = bv; ··· 33 32 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; 34 33 clone->bi_bdev = bio->bi_bdev; 35 34 clone->bi_rw = bio->bi_rw; 36 - clone->bi_vcnt = bio_segments(bio); 35 + clone->bi_vcnt = segs; 37 36 clone->bi_iter.bi_size = bio->bi_iter.bi_size; 38 37 39 38 clone->bi_private = bio; ··· 134 133 135 134 static unsigned bch_bio_max_sectors(struct bio *bio) 136 135 { 137 - unsigned ret = bio_sectors(bio); 138 136 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 139 - unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, 140 - queue_max_segments(q)); 137 + struct bio_vec bv; 138 + struct bvec_iter iter; 139 + unsigned ret = 0, seg = 0; 141 140 142 141 if (bio->bi_rw & REQ_DISCARD) 143 - return min(ret, q->limits.max_discard_sectors); 142 + return min(bio_sectors(bio), q->limits.max_discard_sectors); 144 143 145 - if (bio_segments(bio) > max_segments || 146 - q->merge_bvec_fn) { 147 - struct bio_vec bv; 148 - struct bvec_iter iter; 149 - unsigned seg = 0; 144 + bio_for_each_segment(bv, bio, iter) { 145 + struct bvec_merge_data bvm = { 146 + .bi_bdev = bio->bi_bdev, 147 + .bi_sector = bio->bi_iter.bi_sector, 148 + .bi_size = ret << 9, 149 + .bi_rw = bio->bi_rw, 150 + }; 150 151 151 - ret = 0; 152 + if (seg == min_t(unsigned, BIO_MAX_PAGES, 153 + queue_max_segments(q))) 154 + break; 152 155 153 - bio_for_each_segment(bv, bio, iter) { 154 - struct bvec_merge_data bvm = { 155 - .bi_bdev = bio->bi_bdev, 156 - .bi_sector = bio->bi_iter.bi_sector, 157 - .bi_size = ret << 9, 158 - .bi_rw = bio->bi_rw, 159 - }; 156 + if (q->merge_bvec_fn && 157 + q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) 158 + break; 160 159 161 - if (seg == max_segments) 162 - break; 163 - 164 - if (q->merge_bvec_fn && 165 - q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) 166 - break; 167 - 168 - seg++; 169 - ret += bv.bv_len >> 9; 170 - } 160 + seg++; 161 + ret += bv.bv_len >> 9; 171 162 } 172 163 173 164 ret = min(ret, queue_max_sectors(q));
+1 -1
drivers/md/raid0.c
··· 528 528 sector_t sector = bio->bi_iter.bi_sector; 529 529 struct bio_pair *bp; 530 530 /* Sanity check -- queue functions should prevent this happening */ 531 - if (bio_segments(bio) > 1) 531 + if (bio_multiple_segments(bio)) 532 532 goto bad_map; 533 533 /* This is a one page bio that upper layers 534 534 * refuse to split for us, so we need to split it.
+1 -1
drivers/md/raid10.c
··· 1188 1188 || conf->prev.near_copies < conf->prev.raid_disks))) { 1189 1189 struct bio_pair *bp; 1190 1190 /* Sanity check -- queue functions should prevent this happening */ 1191 - if (bio_segments(bio) > 1) 1191 + if (bio_multiple_segments(bio)) 1192 1192 goto bad_map; 1193 1193 /* This is a one page bio that upper layers 1194 1194 * refuse to split for us, so we need to split it.
+4 -4
drivers/message/fusion/mptsas.c
··· 2235 2235 } 2236 2236 2237 2237 /* do we need to support multiple segments? */ 2238 - if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { 2239 - printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 2240 - ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req), 2241 - bio_segments(rsp->bio), blk_rq_bytes(rsp)); 2238 + if (bio_multiple_segments(req->bio) || 2239 + bio_multiple_segments(rsp->bio)) { 2240 + printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n", 2241 + ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp)); 2242 2242 return -EINVAL; 2243 2243 } 2244 2244
+4 -4
drivers/scsi/libsas/sas_expander.c
··· 2163 2163 } 2164 2164 2165 2165 /* do we need to support multiple segments? */ 2166 - if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { 2167 - printk("%s: multiple segments req %u %u, rsp %u %u\n", 2168 - __func__, bio_segments(req->bio), blk_rq_bytes(req), 2169 - bio_segments(rsp->bio), blk_rq_bytes(rsp)); 2166 + if (bio_multiple_segments(req->bio) || 2167 + bio_multiple_segments(rsp->bio)) { 2168 + printk("%s: multiple segments req %u, rsp %u\n", 2169 + __func__, blk_rq_bytes(req), blk_rq_bytes(rsp)); 2170 2170 return -EINVAL; 2171 2171 } 2172 2172
+5 -5
drivers/scsi/mpt2sas/mpt2sas_transport.c
··· 1943 1943 ioc->transport_cmds.status = MPT2_CMD_PENDING; 1944 1944 1945 1945 /* Check if the request is split across multiple segments */ 1946 - if (bio_segments(req->bio) > 1) { 1946 + if (bio_multiple_segments(req->bio)) { 1947 1947 u32 offset = 0; 1948 1948 1949 1949 /* Allocate memory and copy the request */ ··· 1975 1975 1976 1976 /* Check if the response needs to be populated across 1977 1977 * multiple segments */ 1978 - if (bio_segments(rsp->bio) > 1) { 1978 + if (bio_multiple_segments(rsp->bio)) { 1979 1979 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), 1980 1980 &pci_dma_in); 1981 1981 if (!pci_addr_in) { ··· 2042 2042 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2043 2043 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 2044 2044 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2045 - if (bio_segments(req->bio) > 1) { 2045 + if (bio_multiple_segments(req->bio)) { 2046 2046 ioc->base_add_sg_single(psge, sgl_flags | 2047 2047 (blk_rq_bytes(req) - 4), pci_dma_out); 2048 2048 } else { ··· 2058 2058 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 2059 2059 MPI2_SGE_FLAGS_END_OF_LIST); 2060 2060 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2061 - if (bio_segments(rsp->bio) > 1) { 2061 + if (bio_multiple_segments(rsp->bio)) { 2062 2062 ioc->base_add_sg_single(psge, sgl_flags | 2063 2063 (blk_rq_bytes(rsp) + 4), pci_dma_in); 2064 2064 } else { ··· 2103 2103 le16_to_cpu(mpi_reply->ResponseDataLength); 2104 2104 /* check if the resp needs to be copied from the allocated 2105 2105 * pci mem */ 2106 - if (bio_segments(rsp->bio) > 1) { 2106 + if (bio_multiple_segments(rsp->bio)) { 2107 2107 u32 offset = 0; 2108 2108 u32 bytes_to_copy = 2109 2109 le16_to_cpu(mpi_reply->ResponseDataLength);
+4 -4
drivers/scsi/mpt3sas/mpt3sas_transport.c
··· 1926 1926 ioc->transport_cmds.status = MPT3_CMD_PENDING; 1927 1927 1928 1928 /* Check if the request is split across multiple segments */ 1929 - if (req->bio->bi_vcnt > 1) { 1929 + if (bio_multiple_segments(req->bio)) { 1930 1930 u32 offset = 0; 1931 1931 1932 1932 /* Allocate memory and copy the request */ ··· 1958 1958 1959 1959 /* Check if the response needs to be populated across 1960 1960 * multiple segments */ 1961 - if (rsp->bio->bi_vcnt > 1) { 1961 + if (bio_multiple_segments(rsp->bio)) { 1962 1962 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), 1963 1963 &pci_dma_in); 1964 1964 if (!pci_addr_in) { ··· 2019 2019 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); 2020 2020 psge = &mpi_request->SGL; 2021 2021 2022 - if (req->bio->bi_vcnt > 1) 2022 + if (bio_multiple_segments(req->bio)) 2023 2023 ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), 2024 2024 pci_dma_in, (blk_rq_bytes(rsp) + 4)); 2025 2025 else ··· 2064 2064 2065 2065 /* check if the resp needs to be copied from the allocated 2066 2066 * pci mem */ 2067 - if (rsp->bio->bi_vcnt > 1) { 2067 + if (bio_multiple_segments(rsp->bio)) { 2068 2068 u32 offset = 0; 2069 2069 u32 bytes_to_copy = 2070 2070 le16_to_cpu(mpi_reply->ResponseDataLength);
+1 -1
fs/bio.c
··· 1733 1733 trace_block_split(bdev_get_queue(bi->bi_bdev), bi, 1734 1734 bi->bi_iter.bi_sector + first_sectors); 1735 1735 1736 - BUG_ON(bio_segments(bi) > 1); 1736 + BUG_ON(bio_multiple_segments(bi)); 1737 1737 atomic_set(&bp->cnt, 3); 1738 1738 bp->error = 0; 1739 1739 bp->bio1 = *bi;
+48 -33
include/linux/bio.h
··· 97 97 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 98 98 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 99 99 100 - #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx) 100 + #define bio_multiple_segments(bio) \ 101 + ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) 101 102 #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) 102 103 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) 103 104 105 + /* 106 + * Check whether this bio carries any data or not. A NULL bio is allowed. 107 + */ 108 + static inline bool bio_has_data(struct bio *bio) 109 + { 110 + if (bio && 111 + bio->bi_iter.bi_size && 112 + !(bio->bi_rw & REQ_DISCARD)) 113 + return true; 114 + 115 + return false; 116 + } 117 + 118 + static inline bool bio_is_rw(struct bio *bio) 119 + { 120 + if (!bio_has_data(bio)) 121 + return false; 122 + 123 + if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) 124 + return false; 125 + 126 + return true; 127 + } 128 + 129 + static inline bool bio_mergeable(struct bio *bio) 130 + { 131 + if (bio->bi_rw & REQ_NOMERGE_FLAGS) 132 + return false; 133 + 134 + return true; 135 + } 136 + 104 137 static inline unsigned int bio_cur_bytes(struct bio *bio) 105 138 { 106 - if (bio->bi_vcnt) 139 + if (bio_has_data(bio)) 107 140 return bio_iovec(bio).bv_len; 108 141 else /* dataless requests such as discard */ 109 142 return bio->bi_iter.bi_size; ··· 144 111 145 112 static inline void *bio_data(struct bio *bio) 146 113 { 147 - if (bio->bi_vcnt) 114 + if (bio_has_data(bio)) 148 115 return page_address(bio_page(bio)) + bio_offset(bio); 149 116 150 117 return NULL; ··· 253 220 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 254 221 255 222 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 223 + 224 + static inline unsigned bio_segments(struct bio *bio) 225 + { 226 + unsigned segs = 0; 227 + struct bio_vec bv; 228 + struct bvec_iter iter; 229 + 230 + bio_for_each_segment(bv, bio, iter) 231 + segs++; 232 + 233 + return segs; 234 + } 256 235 257 236 /* 258 237 * get a reference to a bio, so it won't disappear. the intended use is ··· 478 433 #define bio_kmap_irq(bio, flags) \ 479 434 __bio_kmap_irq((bio), (bio)->bi_iter.bi_idx, (flags)) 480 435 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 481 - 482 - /* 483 - * Check whether this bio carries any data or not. A NULL bio is allowed. 484 - */ 485 - static inline bool bio_has_data(struct bio *bio) 486 - { 487 - if (bio && bio->bi_vcnt) 488 - return true; 489 - 490 - return false; 491 - } 492 - 493 - static inline bool bio_is_rw(struct bio *bio) 494 - { 495 - if (!bio_has_data(bio)) 496 - return false; 497 - 498 - if (bio->bi_rw & REQ_WRITE_SAME) 499 - return false; 500 - 501 - return true; 502 - } 503 - 504 - static inline bool bio_mergeable(struct bio *bio) 505 - { 506 - if (bio->bi_rw & REQ_NOMERGE_FLAGS) 507 - return false; 508 - 509 - return true; 510 - } 511 436 512 437 /* 513 438 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.