Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: Convert drivers to immutable biovecs

Now that we've got a mechanism for immutable biovecs -
bi_iter.bi_bvec_done - we need to convert drivers to use primitives that
respect it instead of using the bvec array directly.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: NeilBrown <neilb@suse.de>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: dm-devel@redhat.com

+88 -118
+23 -27
drivers/block/umem.c
··· 108 108 * have been written 109 109 */ 110 110 struct bio *bio, *currentbio, **biotail; 111 - int current_idx; 112 - sector_t current_sector; 111 + struct bvec_iter current_iter; 113 112 114 113 struct request_queue *queue; 115 114 ··· 117 118 struct mm_dma_desc *desc; 118 119 int cnt, headcnt; 119 120 struct bio *bio, **biotail; 120 - int idx; 121 + struct bvec_iter iter; 121 122 } mm_pages[2]; 122 123 #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) 123 124 ··· 343 344 dma_addr_t dma_handle; 344 345 int offset; 345 346 struct bio *bio; 346 - struct bio_vec *vec; 347 - int idx; 347 + struct bio_vec vec; 348 348 int rw; 349 - int len; 350 349 351 350 bio = card->currentbio; 352 351 if (!bio && card->bio) { 353 352 card->currentbio = card->bio; 354 - card->current_idx = card->bio->bi_iter.bi_idx; 355 - card->current_sector = card->bio->bi_iter.bi_sector; 353 + card->current_iter = card->bio->bi_iter; 356 354 card->bio = card->bio->bi_next; 357 355 if (card->bio == NULL) 358 356 card->biotail = &card->bio; ··· 358 362 } 359 363 if (!bio) 360 364 return 0; 361 - idx = card->current_idx; 362 365 363 366 rw = bio_rw(bio); 364 367 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) 365 368 return 0; 366 369 367 - vec = bio_iovec_idx(bio, idx); 368 - len = vec->bv_len; 370 + vec = bio_iter_iovec(bio, card->current_iter); 371 + 369 372 dma_handle = pci_map_page(card->dev, 370 - vec->bv_page, 371 - vec->bv_offset, 372 - len, 373 + vec.bv_page, 374 + vec.bv_offset, 375 + vec.bv_len, 373 376 (rw == READ) ? 374 377 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 375 378 ··· 376 381 desc = &p->desc[p->cnt]; 377 382 p->cnt++; 378 383 if (p->bio == NULL) 379 - p->idx = idx; 384 + p->iter = card->current_iter; 380 385 if ((p->biotail) != &bio->bi_next) { 381 386 *(p->biotail) = bio; 382 387 p->biotail = &(bio->bi_next); ··· 386 391 desc->data_dma_handle = dma_handle; 387 392 388 393 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); 389 - desc->local_addr = cpu_to_le64(card->current_sector << 9); 390 - desc->transfer_size = cpu_to_le32(len); 394 + desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9); 395 + desc->transfer_size = cpu_to_le32(vec.bv_len); 391 396 offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); 392 397 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); 393 398 desc->zero1 = desc->zero2 = 0; ··· 402 407 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); 403 408 desc->sem_control_bits = desc->control_bits; 404 409 405 - card->current_sector += (len >> 9); 406 - idx++; 407 - card->current_idx = idx; 408 - if (idx >= bio->bi_vcnt) 410 + 411 + bio_advance_iter(bio, &card->current_iter, vec.bv_len); 412 + if (!card->current_iter.bi_size) 409 413 card->currentbio = NULL; 410 414 411 415 return 1; ··· 433 439 struct mm_dma_desc *desc = &page->desc[page->headcnt]; 434 440 int control = le32_to_cpu(desc->sem_control_bits); 435 441 int last = 0; 436 - int idx; 442 + struct bio_vec vec; 437 443 438 444 if (!(control & DMASCR_DMA_COMPLETE)) { 439 445 control = dma_status; 440 446 last = 1; 441 447 } 448 + 442 449 page->headcnt++; 443 - idx = page->idx; 444 - page->idx++; 445 - if (page->idx >= bio->bi_vcnt) { 450 + vec = bio_iter_iovec(bio, page->iter); 451 + bio_advance_iter(bio, &page->iter, vec.bv_len); 452 + 453 + if (!page->iter.bi_size) { 446 454 page->bio = bio->bi_next; 447 455 if (page->bio) 448 - page->idx = page->bio->bi_iter.bi_idx; 456 + page->iter = page->bio->bi_iter; 449 457 } 450 458 451 459 pci_unmap_page(card->dev, desc->data_dma_handle, 452 - bio_iovec_idx(bio, idx)->bv_len, 460 + vec.bv_len, 453 461 (control & DMASCR_TRANSFER_READ) ? 454 462 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 455 463 if (control & DMASCR_HARD_ERROR) {
+18 -31
drivers/md/dm-crypt.c
··· 39 39 struct completion restart; 40 40 struct bio *bio_in; 41 41 struct bio *bio_out; 42 - unsigned int offset_in; 43 - unsigned int offset_out; 44 - unsigned int idx_in; 45 - unsigned int idx_out; 42 + struct bvec_iter iter_in; 43 + struct bvec_iter iter_out; 46 44 sector_t cc_sector; 47 45 atomic_t cc_pending; 48 46 }; ··· 824 826 { 825 827 ctx->bio_in = bio_in; 826 828 ctx->bio_out = bio_out; 827 - ctx->offset_in = 0; 828 - ctx->offset_out = 0; 829 - ctx->idx_in = bio_in ? bio_in->bi_iter.bi_idx : 0; 830 - ctx->idx_out = bio_out ? bio_out->bi_iter.bi_idx : 0; 829 + if (bio_in) 830 + ctx->iter_in = bio_in->bi_iter; 831 + if (bio_out) 832 + ctx->iter_out = bio_out->bi_iter; 831 833 ctx->cc_sector = sector + cc->iv_offset; 832 834 init_completion(&ctx->restart); 833 835 } ··· 855 857 struct convert_context *ctx, 856 858 struct ablkcipher_request *req) 857 859 { 858 - struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 859 - struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 860 + struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); 861 + struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 860 862 struct dm_crypt_request *dmreq; 861 863 u8 *iv; 862 864 int r; ··· 867 869 dmreq->iv_sector = ctx->cc_sector; 868 870 dmreq->ctx = ctx; 869 871 sg_init_table(&dmreq->sg_in, 1); 870 - sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 871 - bv_in->bv_offset + ctx->offset_in); 872 + sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, 873 + bv_in.bv_offset); 872 874 873 875 sg_init_table(&dmreq->sg_out, 1); 874 - sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, 875 - bv_out->bv_offset + ctx->offset_out); 876 + sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, 877 + bv_out.bv_offset); 876 878 877 - ctx->offset_in += 1 << SECTOR_SHIFT; 878 - if (ctx->offset_in >= bv_in->bv_len) { 879 - ctx->offset_in = 0; 880 - ctx->idx_in++; 881 - } 882 - 883 - ctx->offset_out += 1 << SECTOR_SHIFT; 884 - if (ctx->offset_out >= bv_out->bv_len) { 885 - ctx->offset_out = 0; 886 - ctx->idx_out++; 887 - } 879 + bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); 880 + bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); 888 881 889 882 if (cc->iv_gen_ops) { 890 883 r = cc->iv_gen_ops->generator(cc, iv, dmreq); ··· 926 937 927 938 atomic_set(&ctx->cc_pending, 1); 928 939 929 - while(ctx->idx_in < ctx->bio_in->bi_vcnt && 930 - ctx->idx_out < ctx->bio_out->bi_vcnt) { 940 + while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { 931 941 932 942 crypt_alloc_req(cc, ctx); 933 943 ··· 1195 1207 } 1196 1208 1197 1209 /* crypt_convert should have filled the clone bio */ 1198 - BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 1210 + BUG_ON(io->ctx.iter_out.bi_size); 1199 1211 1200 1212 clone->bi_iter.bi_sector = cc->start + io->sector; 1201 1213 ··· 1234 1246 } 1235 1247 1236 1248 io->ctx.bio_out = clone; 1237 - io->ctx.idx_out = 0; 1249 + io->ctx.iter_out = clone->bi_iter; 1238 1250 1239 1251 remaining -= clone->bi_iter.bi_size; 1240 1252 sector += bio_sectors(clone); ··· 1278 1290 crypt_inc_pending(new_io); 1279 1291 crypt_convert_init(cc, &new_io->ctx, NULL, 1280 1292 io->base_bio, sector); 1281 - new_io->ctx.idx_in = io->ctx.idx_in; 1282 - new_io->ctx.offset_in = io->ctx.offset_in; 1293 + new_io->ctx.iter_in = io->ctx.iter_in; 1283 1294 1284 1295 /* 1285 1296 * Fragments after the first use the base_io
+17 -14
drivers/md/dm-io.c
··· 201 201 /* 202 202 * Functions for getting the pages from a bvec. 203 203 */ 204 - static void bvec_get_page(struct dpages *dp, 204 + static void bio_get_page(struct dpages *dp, 205 205 struct page **p, unsigned long *len, unsigned *offset) 206 206 { 207 - struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; 208 - *p = bvec->bv_page; 209 - *len = bvec->bv_len; 210 - *offset = bvec->bv_offset; 207 + struct bio *bio = dp->context_ptr; 208 + struct bio_vec bvec = bio_iovec(bio); 209 + *p = bvec.bv_page; 210 + *len = bvec.bv_len; 211 + *offset = bvec.bv_offset; 211 212 } 212 213 213 - static void bvec_next_page(struct dpages *dp) 214 + static void bio_next_page(struct dpages *dp) 214 215 { 215 - struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; 216 - dp->context_ptr = bvec + 1; 216 + struct bio *bio = dp->context_ptr; 217 + struct bio_vec bvec = bio_iovec(bio); 218 + 219 + bio_advance(bio, bvec.bv_len); 217 220 } 218 221 219 - static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) 222 + static void bio_dp_init(struct dpages *dp, struct bio *bio) 220 223 { 221 - dp->get_page = bvec_get_page; 222 - dp->next_page = bvec_next_page; 223 - dp->context_ptr = bvec; 224 + dp->get_page = bio_get_page; 225 + dp->next_page = bio_next_page; 226 + dp->context_ptr = bio; 224 227 } 225 228 226 229 /* ··· 460 457 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); 461 458 break; 462 459 463 - case DM_IO_BVEC: 464 - bvec_dp_init(dp, io_req->mem.ptr.bvec); 460 + case DM_IO_BIO: 461 + bio_dp_init(dp, io_req->mem.ptr.bio); 465 462 break; 466 463 467 464 case DM_IO_VMA:
+4 -4
drivers/md/dm-raid1.c
··· 526 526 struct dm_io_region io; 527 527 struct dm_io_request io_req = { 528 528 .bi_rw = READ, 529 - .mem.type = DM_IO_BVEC, 530 - .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx, 529 + .mem.type = DM_IO_BIO, 530 + .mem.ptr.bio = bio, 531 531 .notify.fn = read_callback, 532 532 .notify.context = bio, 533 533 .client = m->ms->io_client, ··· 629 629 struct mirror *m; 630 630 struct dm_io_request io_req = { 631 631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), 632 - .mem.type = DM_IO_BVEC, 633 - .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx, 632 + .mem.type = DM_IO_BIO, 633 + .mem.ptr.bio = bio, 634 634 .notify.fn = write_callback, 635 635 .notify.context = bio, 636 636 .client = ms->io_client,
+13 -37
drivers/md/dm-verity.c
··· 73 73 sector_t block; 74 74 unsigned n_blocks; 75 75 76 - /* saved bio vector */ 77 - struct bio_vec *io_vec; 78 - unsigned io_vec_size; 76 + struct bvec_iter iter; 79 77 80 78 struct work_struct work; 81 - 82 - /* A space for short vectors; longer vectors are allocated separately. */ 83 - struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE]; 84 79 85 80 /* 86 81 * Three variably-size fields follow this struct: ··· 279 284 static int verity_verify_io(struct dm_verity_io *io) 280 285 { 281 286 struct dm_verity *v = io->v; 287 + struct bio *bio = dm_bio_from_per_bio_data(io, 288 + v->ti->per_bio_data_size); 282 289 unsigned b; 283 290 int i; 284 - unsigned vector = 0, offset = 0; 285 291 286 292 for (b = 0; b < io->n_blocks; b++) { 287 293 struct shash_desc *desc; ··· 332 336 } 333 337 334 338 todo = 1 << v->data_dev_block_bits; 335 - do { 336 - struct bio_vec *bv; 339 + while (io->iter.bi_size) { 337 340 u8 *page; 338 - unsigned len; 341 + struct bio_vec bv = bio_iter_iovec(bio, io->iter); 339 342 340 - BUG_ON(vector >= io->io_vec_size); 341 - bv = &io->io_vec[vector]; 342 - page = kmap_atomic(bv->bv_page); 343 - len = bv->bv_len - offset; 344 - if (likely(len >= todo)) 345 - len = todo; 346 - r = crypto_shash_update(desc, 347 - page + bv->bv_offset + offset, len); 343 + page = kmap_atomic(bv.bv_page); 344 + r = crypto_shash_update(desc, page + bv.bv_offset, 345 + bv.bv_len); 348 346 kunmap_atomic(page); 347 + 349 348 if (r < 0) { 350 349 DMERR("crypto_shash_update failed: %d", r); 351 350 return r; 352 351 } 353 - offset += len; 354 - if (likely(offset == bv->bv_len)) { 355 - offset = 0; 356 - vector++; 357 - } 358 - todo -= len; 359 - } while (todo); 352 + 353 + bio_advance_iter(bio, &io->iter, bv.bv_len); 354 + } 360 355 361 356 if (!v->version) { 362 357 r = crypto_shash_update(desc, v->salt, v->salt_size); ··· 370 383 return -EIO; 371 384 } 372 385 } 373 - BUG_ON(vector != io->io_vec_size); 374 - BUG_ON(offset); 375 386 376 387 return 0; 377 388 } ··· 384 399 385 400 bio->bi_end_io = io->orig_bi_end_io; 386 401 bio->bi_private = io->orig_bi_private; 387 - 388 - if (io->io_vec != io->io_vec_inline) 389 - mempool_free(io->io_vec, v->vec_mempool); 390 402 391 403 bio_endio(bio, error); 392 404 } ··· 501 519 502 520 bio->bi_end_io = verity_end_io; 503 521 bio->bi_private = io; 504 - io->io_vec_size = bio_segments(bio); 505 - if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE) 506 - io->io_vec = io->io_vec_inline; 507 - else 508 - io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO); 509 - memcpy(io->io_vec, __bio_iovec(bio), 510 - io->io_vec_size * sizeof(struct bio_vec)); 522 + io->iter = bio->bi_iter; 511 523 512 524 verity_submit_prefetch(v, io); 513 525
+11 -3
fs/bio.c
··· 525 525 */ 526 526 void __bio_clone(struct bio *bio, struct bio *bio_src) 527 527 { 528 - memcpy(bio->bi_io_vec, bio_src->bi_io_vec, 529 - bio_src->bi_max_vecs * sizeof(struct bio_vec)); 528 + if (bio_is_rw(bio_src)) { 529 + struct bio_vec bv; 530 + struct bvec_iter iter; 531 + 532 + bio_for_each_segment(bv, bio_src, iter) 533 + bio->bi_io_vec[bio->bi_vcnt++] = bv; 534 + } else if (bio_has_data(bio_src)) { 535 + memcpy(bio->bi_io_vec, bio_src->bi_io_vec, 536 + bio_src->bi_max_vecs * sizeof(struct bio_vec)); 537 + bio->bi_vcnt = bio_src->bi_vcnt; 538 + } 530 539 531 540 /* 532 541 * most users will be overriding ->bi_bdev with a new target, ··· 544 535 bio->bi_bdev = bio_src->bi_bdev; 545 536 bio->bi_flags |= 1 << BIO_CLONED; 546 537 bio->bi_rw = bio_src->bi_rw; 547 - bio->bi_vcnt = bio_src->bi_vcnt; 548 538 bio->bi_iter = bio_src->bi_iter; 549 539 } 550 540 EXPORT_SYMBOL(__bio_clone);
+2 -2
include/linux/dm-io.h
··· 29 29 30 30 enum dm_io_mem_type { 31 31 DM_IO_PAGE_LIST,/* Page list */ 32 - DM_IO_BVEC, /* Bio vector */ 32 + DM_IO_BIO, /* Bio vector */ 33 33 DM_IO_VMA, /* Virtual memory area */ 34 34 DM_IO_KMEM, /* Kernel memory */ 35 35 }; ··· 41 41 42 42 union { 43 43 struct page_list *pl; 44 - struct bio_vec *bvec; 44 + struct bio *bio; 45 45 void *vma; 46 46 void *addr; 47 47 } ptr;