Merge branch 'for-4.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fix from David Sterba:
"We have one more fixup that stems from the blk_status_t conversion
that did not quite cover everything.

The normal cases were not affected because the code is 0, but any
error and retries could mix up new and old values"

* 'for-4.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
Btrfs: fix blk_status_t/errno confusion

+64 -60
+2 -2
fs/btrfs/disk-io.c
··· 3516 3516 struct bio *bio = device->flush_bio; 3517 3517 3518 3518 if (!device->flush_bio_sent) 3519 - return 0; 3519 + return BLK_STS_OK; 3520 3520 3521 3521 device->flush_bio_sent = 0; 3522 3522 wait_for_completion_io(&device->flush_wait); ··· 3563 3563 continue; 3564 3564 3565 3565 write_dev_flush(dev); 3566 - dev->last_flush_error = 0; 3566 + dev->last_flush_error = BLK_STS_OK; 3567 3567 } 3568 3568 3569 3569 /* wait for all the barriers */
+37 -33
fs/btrfs/inode.c
··· 7924 7924 return ret; 7925 7925 } 7926 7926 7927 - static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, 7928 - int mirror_num) 7927 + static inline blk_status_t submit_dio_repair_bio(struct inode *inode, 7928 + struct bio *bio, 7929 + int mirror_num) 7929 7930 { 7930 7931 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7931 - int ret; 7932 + blk_status_t ret; 7932 7933 7933 7934 BUG_ON(bio_op(bio) == REQ_OP_WRITE); 7934 7935 ··· 7981 7980 return 1; 7982 7981 } 7983 7982 7984 - static int dio_read_error(struct inode *inode, struct bio *failed_bio, 7985 - struct page *page, unsigned int pgoff, 7986 - u64 start, u64 end, int failed_mirror, 7987 - bio_end_io_t *repair_endio, void *repair_arg) 7983 + static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio, 7984 + struct page *page, unsigned int pgoff, 7985 + u64 start, u64 end, int failed_mirror, 7986 + bio_end_io_t *repair_endio, void *repair_arg) 7988 7987 { 7989 7988 struct io_failure_record *failrec; 7990 7989 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; ··· 7994 7993 int read_mode = 0; 7995 7994 int segs; 7996 7995 int ret; 7996 + blk_status_t status; 7997 7997 7998 7998 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); 7999 7999 8000 8000 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); 8001 8001 if (ret) 8002 - return ret; 8002 + return errno_to_blk_status(ret); 8003 8003 8004 8004 ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, 8005 8005 failed_mirror); 8006 8006 if (!ret) { 8007 8007 free_io_failure(failure_tree, io_tree, failrec); 8008 - return -EIO; 8008 + return BLK_STS_IOERR; 8009 8009 } 8010 8010 8011 8011 segs = bio_segments(failed_bio); ··· 8024 8022 "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n", 8025 8023 read_mode, failrec->this_mirror, failrec->in_validation); 8026 8024 8027 - ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror); 8028 - if (ret) { 8025 + status = submit_dio_repair_bio(inode, bio, failrec->this_mirror); 8026 + if (status) { 8029 8027 free_io_failure(failure_tree, io_tree, failrec); 8030 8028 bio_put(bio); 8031 8029 } 8032 8030 8033 - return ret; 8031 + return status; 8034 8032 } 8035 8033 8036 8034 struct btrfs_retry_complete { ··· 8067 8065 bio_put(bio); 8068 8066 } 8069 8067 8070 - static int __btrfs_correct_data_nocsum(struct inode *inode, 8071 - struct btrfs_io_bio *io_bio) 8068 + static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode, 8069 + struct btrfs_io_bio *io_bio) 8072 8070 { 8073 8071 struct btrfs_fs_info *fs_info; 8074 8072 struct bio_vec bvec; ··· 8078 8076 unsigned int pgoff; 8079 8077 u32 sectorsize; 8080 8078 int nr_sectors; 8081 - int ret; 8082 - int err = 0; 8079 + blk_status_t ret; 8080 + blk_status_t err = BLK_STS_OK; 8083 8081 8084 8082 fs_info = BTRFS_I(inode)->root->fs_info; 8085 8083 sectorsize = fs_info->sectorsize; ··· 8185 8183 int csum_pos; 8186 8184 bool uptodate = (err == 0); 8187 8185 int ret; 8186 + blk_status_t status; 8188 8187 8189 8188 fs_info = BTRFS_I(inode)->root->fs_info; 8190 8189 sectorsize = fs_info->sectorsize; 8191 8190 8192 - err = 0; 8191 + err = BLK_STS_OK; 8193 8192 start = io_bio->logical; 8194 8193 done.inode = inode; 8195 8194 io_bio->bio.bi_iter = io_bio->iter; ··· 8212 8209 done.start = start; 8213 8210 init_completion(&done.done); 8214 8211 8215 - ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page, 8216 - pgoff, start, start + sectorsize - 1, 8217 - io_bio->mirror_num, 8218 - btrfs_retry_endio, &done); 8219 - if (ret) { 8220 - err = errno_to_blk_status(ret); 8212 + status = dio_read_error(inode, &io_bio->bio, bvec.bv_page, 8213 + pgoff, start, start + sectorsize - 1, 8214 + io_bio->mirror_num, btrfs_retry_endio, 8215 + &done); 8216 + if (status) { 8217 + err = status; 8221 8218 goto next; 8222 8219 } 8223 8220 ··· 8253 8250 if (unlikely(err)) 8254 8251 return __btrfs_correct_data_nocsum(inode, io_bio); 8255 8252 else 8256 - return 0; 8253 + return BLK_STS_OK; 8257 8254 } else { 8258 8255 return __btrfs_subio_endio_read(inode, io_bio, err); 8259 8256 } ··· 8426 8423 return 0; 8427 8424 } 8428 8425 8429 - static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 8430 - u64 file_offset, int skip_sum, 8431 - int async_submit) 8426 + static inline blk_status_t 8427 + __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, u64 file_offset, 8428 + int skip_sum, int async_submit) 8432 8429 { 8433 8430 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8434 8431 struct btrfs_dio_private *dip = bio->bi_private; ··· 8491 8488 int clone_offset = 0; 8492 8489 int clone_len; 8493 8490 int ret; 8491 + blk_status_t status; 8494 8492 8495 8493 map_length = orig_bio->bi_iter.bi_size; 8496 8494 submit_len = map_length; ··· 8541 8537 */ 8542 8538 atomic_inc(&dip->pending_bios); 8543 8539 8544 - ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, 8545 - async_submit); 8546 - if (ret) { 8540 + status = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, 8541 + async_submit); 8542 + if (status) { 8547 8543 bio_put(bio); 8548 8544 atomic_dec(&dip->pending_bios); 8549 8545 goto out_err; ··· 8561 8557 } while (submit_len > 0); 8562 8558 8563 8559 submit: 8564 - ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, 8565 - async_submit); 8566 - if (!ret) 8560 + status = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, 8561 + async_submit); 8562 + if (!status) 8567 8563 return 0; 8568 8564 8569 8565 bio_put(bio);
+17 -17
fs/btrfs/raid56.c
··· 905 905 if (!atomic_dec_and_test(&rbio->stripes_pending)) 906 906 return; 907 907 908 - err = 0; 908 + err = BLK_STS_OK; 909 909 910 910 /* OK, we have read all the stripes we need to. */ 911 911 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? ··· 1324 1324 return; 1325 1325 1326 1326 cleanup: 1327 - rbio_orig_end_io(rbio, -EIO); 1327 + rbio_orig_end_io(rbio, BLK_STS_IOERR); 1328 1328 } 1329 1329 1330 1330 /* ··· 1475 1475 1476 1476 cleanup: 1477 1477 1478 - rbio_orig_end_io(rbio, -EIO); 1478 + rbio_orig_end_io(rbio, BLK_STS_IOERR); 1479 1479 } 1480 1480 1481 1481 static void async_rmw_stripe(struct btrfs_raid_bio *rbio) ··· 1579 1579 return 0; 1580 1580 1581 1581 cleanup: 1582 - rbio_orig_end_io(rbio, -EIO); 1582 + rbio_orig_end_io(rbio, BLK_STS_IOERR); 1583 1583 return -EIO; 1584 1584 1585 1585 finish: ··· 1795 1795 void **pointers; 1796 1796 int faila = -1, failb = -1; 1797 1797 struct page *page; 1798 - int err; 1798 + blk_status_t err; 1799 1799 int i; 1800 1800 1801 1801 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); 1802 1802 if (!pointers) { 1803 - err = -ENOMEM; 1803 + err = BLK_STS_RESOURCE; 1804 1804 goto cleanup_io; 1805 1805 } 1806 1806 ··· 1856 1856 * a bad data or Q stripe. 1857 1857 * TODO, we should redo the xor here. 1858 1858 */ 1859 - err = -EIO; 1859 + err = BLK_STS_IOERR; 1860 1860 goto cleanup; 1861 1861 } 1862 1862 /* ··· 1882 1882 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { 1883 1883 if (rbio->bbio->raid_map[faila] == 1884 1884 RAID5_P_STRIPE) { 1885 - err = -EIO; 1885 + err = BLK_STS_IOERR; 1886 1886 goto cleanup; 1887 1887 } 1888 1888 /* ··· 1954 1954 } 1955 1955 } 1956 1956 1957 - err = 0; 1957 + err = BLK_STS_OK; 1958 1958 cleanup: 1959 1959 kfree(pointers); 1960 1960 1961 1961 cleanup_io: 1962 1962 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { 1963 - if (err == 0) 1963 + if (err == BLK_STS_OK) 1964 1964 cache_rbio_pages(rbio); 1965 1965 else 1966 1966 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); ··· 1968 1968 rbio_orig_end_io(rbio, err); 1969 1969 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { 1970 1970 rbio_orig_end_io(rbio, err); 1971 - } else if (err == 0) { 1971 + } else if (err == BLK_STS_OK) { 1972 1972 rbio->faila = -1; 1973 1973 rbio->failb = -1; 1974 1974 ··· 2005 2005 return; 2006 2006 2007 2007 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) 2008 - rbio_orig_end_io(rbio, -EIO); 2008 + rbio_orig_end_io(rbio, BLK_STS_IOERR); 2009 2009 else 2010 2010 __raid_recover_end_io(rbio); 2011 2011 } ··· 2104 2104 cleanup: 2105 2105 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || 2106 2106 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) 2107 - rbio_orig_end_io(rbio, -EIO); 2107 + rbio_orig_end_io(rbio, BLK_STS_IOERR); 2108 2108 return -EIO; 2109 2109 } 2110 2110 ··· 2431 2431 nr_data = bio_list_size(&bio_list); 2432 2432 if (!nr_data) { 2433 2433 /* Every parity is right */ 2434 - rbio_orig_end_io(rbio, 0); 2434 + rbio_orig_end_io(rbio, BLK_STS_OK); 2435 2435 return; 2436 2436 } 2437 2437 ··· 2451 2451 return; 2452 2452 2453 2453 cleanup: 2454 - rbio_orig_end_io(rbio, -EIO); 2454 + rbio_orig_end_io(rbio, BLK_STS_IOERR); 2455 2455 } 2456 2456 2457 2457 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) ··· 2519 2519 return; 2520 2520 2521 2521 cleanup: 2522 - rbio_orig_end_io(rbio, -EIO); 2522 + rbio_orig_end_io(rbio, BLK_STS_IOERR); 2523 2523 } 2524 2524 2525 2525 /* ··· 2633 2633 return; 2634 2634 2635 2635 cleanup: 2636 - rbio_orig_end_io(rbio, -EIO); 2636 + rbio_orig_end_io(rbio, BLK_STS_IOERR); 2637 2637 return; 2638 2638 2639 2639 finish:
+5 -5
fs/btrfs/volumes.c
··· 6212 6212 } 6213 6213 } 6214 6214 6215 - int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6216 - int mirror_num, int async_submit) 6215 + blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 6216 + int mirror_num, int async_submit) 6217 6217 { 6218 6218 struct btrfs_device *dev; 6219 6219 struct bio *first_bio = bio; ··· 6233 6233 &map_length, &bbio, mirror_num, 1); 6234 6234 if (ret) { 6235 6235 btrfs_bio_counter_dec(fs_info); 6236 - return ret; 6236 + return errno_to_blk_status(ret); 6237 6237 } 6238 6238 6239 6239 total_devs = bbio->num_stripes; ··· 6256 6256 } 6257 6257 6258 6258 btrfs_bio_counter_dec(fs_info); 6259 - return ret; 6259 + return errno_to_blk_status(ret); 6260 6260 } 6261 6261 6262 6262 if (map_length < length) { ··· 6283 6283 dev_nr, async_submit); 6284 6284 } 6285 6285 btrfs_bio_counter_dec(fs_info); 6286 - return 0; 6286 + return BLK_STS_OK; 6287 6287 } 6288 6288 6289 6289 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
+3 -3
fs/btrfs/volumes.h
··· 74 74 int missing; 75 75 int can_discard; 76 76 int is_tgtdev_for_dev_replace; 77 - int last_flush_error; 77 + blk_status_t last_flush_error; 78 78 int flush_bio_sent; 79 79 80 80 #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED ··· 416 416 struct btrfs_fs_info *fs_info, u64 type); 417 417 void btrfs_mapping_init(struct btrfs_mapping_tree *tree); 418 418 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); 419 - int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 420 - int mirror_num, int async_submit); 419 + blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 420 + int mirror_num, int async_submit); 421 421 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 422 422 fmode_t flags, void *holder); 423 423 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,