Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: target: iblock: split T10 PI SGL across command bios

When T10 PI is enabled on a backing device for the iblock backstore, the PI
SGL for the entire command is attached to the first bio only. This works fine
if the command is covered by a single bio, but can result in ref tag errors in
the client for the other bios in a multi-bio command, e.g.

[ 47.631236] sda: ref tag error at location 2048 (rcvd 0)
[ 47.637658] sda: ref tag error at location 4096 (rcvd 0)
[ 47.644228] sda: ref tag error at location 6144 (rcvd 0)

The command will be split into multiple bios if the number of data SG elements
exceeds BIO_MAX_PAGES (see iblock_get_bio()).

The bios may later be split again in the block layer on the host after
iblock_submit_bios(), depending on the queue limits of the backing device.
The block and SCSI layers will pass through the whole PI SGL down to the LLDD
however that first bio is split up, but the LLDD may only use the portion that
corresponds to the data length (depends on the LLDD, tested with scsi_debug).

Split the PI SGL across the bios in the command, so each bio's
bio_integrity_payload contains the protection information for the data in the
bio. Use an sg_mapping_iter to keep track of where we are in PI SGL, so we
know where to start with the next bio.

Signed-off-by: Greg Edwards <gedwards@ddn.com>
Reviewed-by: Mike Christie <mchristi@redhat.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Greg Edwards and committed by
Martin K. Petersen
fed564f6 2b08adff

+37 -17
+37 -17
drivers/target/target_core_iblock.c
··· 635 635 } 636 636 637 637 static int 638 - iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) 638 + iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio, 639 + struct sg_mapping_iter *miter) 639 640 { 640 641 struct se_device *dev = cmd->se_dev; 641 642 struct blk_integrity *bi; 642 643 struct bio_integrity_payload *bip; 643 644 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 644 - struct scatterlist *sg; 645 - int i, rc; 645 + int rc; 646 + size_t resid, len; 646 647 647 648 bi = bdev_get_integrity(ib_dev->ibd_bd); 648 649 if (!bi) { ··· 651 650 return -ENODEV; 652 651 } 653 652 654 - bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); 653 + bip = bio_integrity_alloc(bio, GFP_NOIO, 654 + min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES)); 655 655 if (IS_ERR(bip)) { 656 656 pr_err("Unable to allocate bio_integrity_payload\n"); 657 657 return PTR_ERR(bip); 658 658 } 659 659 660 - bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) * 661 - dev->prot_length; 662 - bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; 660 + bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); 661 + bip_set_seed(bip, bio->bi_iter.bi_sector); 663 662 664 663 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, 665 664 (unsigned long long)bip->bip_iter.bi_sector); 666 665 667 - for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { 666 + resid = bip->bip_iter.bi_size; 667 + while (resid > 0 && sg_miter_next(miter)) { 668 668 669 - rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, 670 - sg->offset); 671 - if (rc != sg->length) { 669 + len = min_t(size_t, miter->length, resid); 670 + rc = bio_integrity_add_page(bio, miter->page, len, 671 + offset_in_page(miter->addr)); 672 + if (rc != len) { 672 673 pr_err("bio_integrity_add_page() failed; %d\n", rc); 674 + sg_miter_stop(miter); 673 675 return -ENOMEM; 674 676 } 675 677 676 - pr_debug("Added bio integrity page: %p length: %d offset; %d\n", 677 - sg_page(sg), sg->length, sg->offset); 678 + pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n", 679 + miter->page, len, offset_in_page(miter->addr)); 680 + 681 + resid -= len; 682 + if (len < miter->length) 683 + miter->consumed -= miter->length - len; 678 684 } 685 + sg_miter_stop(miter); 679 686 680 687 return 0; 681 688 } ··· 695 686 struct se_device *dev = cmd->se_dev; 696 687 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); 697 688 struct iblock_req *ibr; 698 - struct bio *bio, *bio_start; 689 + struct bio *bio; 699 690 struct bio_list list; 700 691 struct scatterlist *sg; 701 692 u32 sg_num = sgl_nents; 702 693 unsigned bio_cnt; 703 - int i, op, op_flags = 0; 694 + int i, rc, op, op_flags = 0; 695 + struct sg_mapping_iter prot_miter; 704 696 705 697 if (data_direction == DMA_TO_DEVICE) { 706 698 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ··· 736 726 if (!bio) 737 727 goto fail_free_ibr; 738 728 739 - bio_start = bio; 740 729 bio_list_init(&list); 741 730 bio_list_add(&list, bio); 742 731 743 732 refcount_set(&ibr->pending, 2); 744 733 bio_cnt = 1; 734 + 735 + if (cmd->prot_type && dev->dev_attrib.pi_prot_type) 736 + sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents, 737 + op == REQ_OP_READ ? SG_MITER_FROM_SG : 738 + SG_MITER_TO_SG); 745 739 746 740 for_each_sg(sgl, sg, sgl_nents, i) { 747 741 /* ··· 755 741 */ 756 742 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 757 743 != sg->length) { 744 + if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 745 + rc = iblock_alloc_bip(cmd, bio, &prot_miter); 746 + if (rc) 747 + goto fail_put_bios; 748 + } 749 + 758 750 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { 759 751 iblock_submit_bios(&list); 760 752 bio_cnt = 0; ··· 782 762 } 783 763 784 764 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 785 - int rc = iblock_alloc_bip(cmd, bio_start); 765 + rc = iblock_alloc_bip(cmd, bio, &prot_miter); 786 766 if (rc) 787 767 goto fail_put_bios; 788 768 }