Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm bufio: add sector start offset to dm-bufio interface

Introduce dm_bufio_set_sector_offset() interface to allow setting a
sector offset for a dm-bufio client. This is a prereq for the DM
integrity target.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Milan Broz <gmazyland@gmail.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Mikulas Patocka and committed by
Mike Snitzer
400a0bef 9b4b5a79

+39 -19
+32 -19
drivers/md/dm-bufio.c
··· 110 110 struct rb_root buffer_tree; 111 111 wait_queue_head_t free_buffer_wait; 112 112 113 + sector_t start; 114 + 113 115 int async_write_error; 114 116 115 117 struct list_head client_list; ··· 559 557 b->bio.bi_end_io(&b->bio); 560 558 } 561 559 562 - static void use_dmio(struct dm_buffer *b, int rw, sector_t block, 563 - bio_end_io_t *end_io) 560 + static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, 561 + unsigned n_sectors, bio_end_io_t *end_io) 564 562 { 565 563 int r; 566 564 struct dm_io_request io_req = { ··· 572 570 }; 573 571 struct dm_io_region region = { 574 572 .bdev = b->c->bdev, 575 - .sector = block << b->c->sectors_per_block_bits, 576 - .count = b->c->block_size >> SECTOR_SHIFT, 573 + .sector = sector, 574 + .count = n_sectors, 577 575 }; 578 576 579 577 if (b->data_mode != DATA_MODE_VMALLOC) { ··· 608 606 end_fn(bio); 609 607 } 610 608 611 - static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, 612 - bio_end_io_t *end_io) 609 + static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, 610 + unsigned n_sectors, bio_end_io_t *end_io) 613 611 { 614 612 char *ptr; 615 613 int len; 616 614 617 615 bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); 618 - b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; 616 + b->bio.bi_iter.bi_sector = sector; 619 617 b->bio.bi_bdev = b->c->bdev; 620 618 b->bio.bi_end_io = inline_endio; 621 619 /* ··· 630 628 * If len < PAGE_SIZE the buffer doesn't cross page boundary. 631 629 */ 632 630 ptr = b->data; 633 - len = b->c->block_size; 631 + len = n_sectors << SECTOR_SHIFT; 634 632 635 633 if (len >= PAGE_SIZE) 636 634 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); ··· 642 640 len < PAGE_SIZE ? len : PAGE_SIZE, 643 641 offset_in_page(ptr))) { 644 642 BUG_ON(b->c->block_size <= PAGE_SIZE); 645 - use_dmio(b, rw, block, end_io); 643 + use_dmio(b, rw, sector, n_sectors, end_io); 646 644 return; 647 645 } 648 646 ··· 653 651 submit_bio(&b->bio); 654 652 } 655 653 656 - static void submit_io(struct dm_buffer *b, int rw, sector_t block, 657 - bio_end_io_t *end_io) 654 + static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io) 658 655 { 656 + unsigned n_sectors; 657 + sector_t sector; 658 + 659 659 if (rw == WRITE && b->c->write_callback) 660 660 b->c->write_callback(b); 661 661 662 - if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE && 662 + sector = (b->block << b->c->sectors_per_block_bits) + b->c->start; 663 + n_sectors = 1 << b->c->sectors_per_block_bits; 664 + 665 + if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) && 663 666 b->data_mode != DATA_MODE_VMALLOC) 664 - use_inline_bio(b, rw, block, end_io); 667 + use_inline_bio(b, rw, sector, n_sectors, end_io); 665 668 else 666 - use_dmio(b, rw, block, end_io); 669 + use_dmio(b, rw, sector, n_sectors, end_io); 667 670 } 668 671 669 672 /*---------------------------------------------------------------- ··· 720 713 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); 721 714 722 715 if (!write_list) 723 - submit_io(b, WRITE, b->block, write_endio); 716 + submit_io(b, WRITE, write_endio); 724 717 else 725 718 list_add_tail(&b->write_list, write_list); 726 719 } ··· 733 726 struct dm_buffer *b = 734 727 list_entry(write_list->next, struct dm_buffer, write_list); 735 728 list_del(&b->write_list); 736 - submit_io(b, WRITE, b->block, write_endio); 729 + submit_io(b, WRITE, write_endio); 737 730 cond_resched(); 738 731 } 739 732 blk_finish_plug(&plug); ··· 1101 1094 return NULL; 1102 1095 1103 1096 if (need_submit) 1104 - submit_io(b, READ, b->block, read_endio); 1097 + submit_io(b, READ, read_endio); 1105 1098 1106 1099 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); 1107 1100 ··· 1171 1164 dm_bufio_unlock(c); 1172 1165 1173 1166 if (need_submit) 1174 - submit_io(b, READ, b->block, read_endio); 1167 + submit_io(b, READ, read_endio); 1175 1168 dm_bufio_release(b); 1176 1169 1177 1170 cond_resched(); ··· 1412 1405 old_block = b->block; 1413 1406 __unlink_buffer(b); 1414 1407 __link_buffer(b, new_block, b->list_mode); 1415 - submit_io(b, WRITE, new_block, write_endio); 1408 + submit_io(b, WRITE, write_endio); 1416 1409 wait_on_bit_io(&b->state, B_WRITING, 1417 1410 TASK_UNINTERRUPTIBLE); 1418 1411 __unlink_buffer(b); ··· 1768 1761 kfree(c); 1769 1762 } 1770 1763 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); 1764 + 1765 + void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) 1766 + { 1767 + c->start = start; 1768 + } 1769 + EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); 1771 1770 1772 1771 static unsigned get_max_age_hz(void) 1773 1772 {
+7
drivers/md/dm-bufio.h
··· 32 32 void dm_bufio_client_destroy(struct dm_bufio_client *c); 33 33 34 34 /* 35 + * Set the sector range. 36 + * When this function is called, there must be no I/O in progress on the bufio 37 + * client. 38 + */ 39 + void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start); 40 + 41 + /* 35 42 * WARNING: to avoid deadlocks, these conditions are observed: 36 43 * 37 44 * - At most one thread can hold at most "reserved_buffers" simultaneously.