Merge branch 'for-4.13-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
"We've identified and fixed a silent corruption (introduced by code in
the first pull), a fixup after the blk_status_t merge and two fixes to
incremental send that Filipe has been hunting for some time"

* 'for-4.13-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
Btrfs: fix unexpected return value of bio_readpage_error
btrfs: btrfs_create_repair_bio never fails, skip error handling
btrfs: cloned bios must not be iterated by bio_for_each_segment_all
Btrfs: fix write corruption due to bio cloning on raid5/6
Btrfs: incremental send, fix invalid memory access
Btrfs: incremental send, fix invalid path for link commands

+88 -57
+1
fs/btrfs/compression.c
··· 152 152 * we have verified the checksum already, set page 153 153 * checked so the end_io handlers know about it 154 154 */ 155 + ASSERT(!bio_flagged(bio, BIO_CLONED)); 155 156 bio_for_each_segment_all(bvec, cb->orig_bio, i) 156 157 SetPageChecked(bvec->bv_page); 157 158
+1
fs/btrfs/disk-io.c
··· 964 964 struct btrfs_root *root; 965 965 int i, ret = 0; 966 966 967 + ASSERT(!bio_flagged(bio, BIO_CLONED)); 967 968 bio_for_each_segment_all(bvec, bio, i) { 968 969 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 969 970 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
+9 -10
fs/btrfs/extent_io.c
··· 2258 2258 return 0; 2259 2259 } 2260 2260 2261 - int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio, 2261 + bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio, 2262 2262 struct io_failure_record *failrec, int failed_mirror) 2263 2263 { 2264 2264 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ··· 2274 2274 btrfs_debug(fs_info, 2275 2275 "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d", 2276 2276 num_copies, failrec->this_mirror, failed_mirror); 2277 - return 0; 2277 + return false; 2278 2278 } 2279 2279 2280 2280 /* ··· 2315 2315 btrfs_debug(fs_info, 2316 2316 "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d", 2317 2317 num_copies, failrec->this_mirror, failed_mirror); 2318 - return 0; 2318 + return false; 2319 2319 } 2320 2320 2321 - return 1; 2321 + return true; 2322 2322 } 2323 2323 2324 2324 ··· 2382 2382 if (ret) 2383 2383 return ret; 2384 2384 2385 - ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror); 2386 - if (!ret) { 2385 + if (!btrfs_check_repairable(inode, failed_bio, failrec, 2386 + failed_mirror)) { 2387 2387 free_io_failure(failure_tree, tree, failrec); 2388 2388 return -EIO; 2389 2389 } ··· 2396 2396 start - page_offset(page), 2397 2397 (int)phy_offset, failed_bio->bi_end_io, 2398 2398 NULL); 2399 - if (!bio) { 2400 - free_io_failure(failure_tree, tree, failrec); 2401 - return -EIO; 2402 - } 2403 2399 bio_set_op_attrs(bio, REQ_OP_READ, read_mode); 2404 2400 2405 2401 btrfs_debug(btrfs_sb(inode->i_sb), ··· 2452 2456 u64 end; 2453 2457 int i; 2454 2458 2459 + ASSERT(!bio_flagged(bio, BIO_CLONED)); 2455 2460 bio_for_each_segment_all(bvec, bio, i) { 2456 2461 struct page *page = bvec->bv_page; 2457 2462 struct inode *inode = page->mapping->host; ··· 2523 2526 int ret; 2524 2527 int i; 2525 2528 2529 + ASSERT(!bio_flagged(bio, BIO_CLONED)); 2526 2530 bio_for_each_segment_all(bvec, bio, i) { 2527 2531 struct page *page = bvec->bv_page; 2528 2532 struct inode *inode = page->mapping->host; ··· 3678 3680 struct extent_buffer *eb; 3679 3681 int i, done; 3680 3682 3683 + ASSERT(!bio_flagged(bio, BIO_CLONED)); 3681 3684 bio_for_each_segment_all(bvec, bio, i) { 3682 3685 struct page *page = bvec->bv_page; 3683 3686
+2 -2
fs/btrfs/extent_io.h
··· 539 539 u64 end); 540 540 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, 541 541 struct io_failure_record **failrec_ret); 542 - int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio, 543 - struct io_failure_record *failrec, int fail_mirror); 542 + bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio, 543 + struct io_failure_record *failrec, int fail_mirror); 544 544 struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, 545 545 struct io_failure_record *failrec, 546 546 struct page *page, int pg_offset, int icsum,
+2 -4
fs/btrfs/inode.c
··· 8016 8016 isector >>= inode->i_sb->s_blocksize_bits; 8017 8017 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, 8018 8018 pgoff, isector, repair_endio, repair_arg); 8019 - if (!bio) { 8020 - free_io_failure(failure_tree, io_tree, failrec); 8021 - return -EIO; 8022 - } 8023 8019 bio_set_op_attrs(bio, REQ_OP_READ, read_mode); 8024 8020 8025 8021 btrfs_debug(BTRFS_I(inode)->root->fs_info, ··· 8055 8059 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode)); 8056 8060 8057 8061 done->uptodate = 1; 8062 + ASSERT(!bio_flagged(bio, BIO_CLONED)); 8058 8063 bio_for_each_segment_all(bvec, bio, i) 8059 8064 clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree, 8060 8065 io_tree, done->start, bvec->bv_page, ··· 8147 8150 io_tree = &BTRFS_I(inode)->io_tree; 8148 8151 failure_tree = &BTRFS_I(inode)->io_failure_tree; 8149 8152 8153 + ASSERT(!bio_flagged(bio, BIO_CLONED)); 8150 8154 bio_for_each_segment_all(bvec, bio, i) { 8151 8155 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, 8152 8156 bvec->bv_offset, done->start,
+18 -8
fs/btrfs/raid56.c
··· 1136 1136 static void index_rbio_pages(struct btrfs_raid_bio *rbio) 1137 1137 { 1138 1138 struct bio *bio; 1139 - struct bio_vec *bvec; 1140 1139 u64 start; 1141 1140 unsigned long stripe_offset; 1142 1141 unsigned long page_index; 1143 - int i; 1144 1142 1145 1143 spin_lock_irq(&rbio->bio_list_lock); 1146 1144 bio_list_for_each(bio, &rbio->bio_list) { 1145 + struct bio_vec bvec; 1146 + struct bvec_iter iter; 1147 + int i = 0; 1148 + 1147 1149 start = (u64)bio->bi_iter.bi_sector << 9; 1148 1150 stripe_offset = start - rbio->bbio->raid_map[0]; 1149 1151 page_index = stripe_offset >> PAGE_SHIFT; 1150 1152 1151 - bio_for_each_segment_all(bvec, bio, i) 1152 - rbio->bio_pages[page_index + i] = bvec->bv_page; 1153 + if (bio_flagged(bio, BIO_CLONED)) 1154 + bio->bi_iter = btrfs_io_bio(bio)->iter; 1155 + 1156 + bio_for_each_segment(bvec, bio, iter) { 1157 + rbio->bio_pages[page_index + i] = bvec.bv_page; 1158 + i++; 1159 + } 1153 1160 } 1154 1161 spin_unlock_irq(&rbio->bio_list_lock); 1155 1162 } ··· 1430 1423 */ 1431 1424 static void set_bio_pages_uptodate(struct bio *bio) 1432 1425 { 1433 - struct bio_vec *bvec; 1434 - int i; 1426 + struct bio_vec bvec; 1427 + struct bvec_iter iter; 1435 1428 1436 - bio_for_each_segment_all(bvec, bio, i) 1437 - SetPageUptodate(bvec->bv_page); 1429 + if (bio_flagged(bio, BIO_CLONED)) 1430 + bio->bi_iter = btrfs_io_bio(bio)->iter; 1431 + 1432 + bio_for_each_segment(bvec, bio, iter) 1433 + SetPageUptodate(bvec.bv_page); 1438 1434 } 1439 1435 1440 1436 /*
+55 -33
fs/btrfs/send.c
··· 1856 1856 */ 1857 1857 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, 1858 1858 const char *name, int name_len, 1859 - u64 *who_ino, u64 *who_gen) 1859 + u64 *who_ino, u64 *who_gen, u64 *who_mode) 1860 1860 { 1861 1861 int ret = 0; 1862 1862 u64 gen; ··· 1905 1905 if (other_inode > sctx->send_progress || 1906 1906 is_waiting_for_move(sctx, other_inode)) { 1907 1907 ret = get_inode_info(sctx->parent_root, other_inode, NULL, 1908 - who_gen, NULL, NULL, NULL, NULL); 1908 + who_gen, who_mode, NULL, NULL, NULL); 1909 1909 if (ret < 0) 1910 1910 goto out; 1911 1911 ··· 3683 3683 return ret; 3684 3684 } 3685 3685 3686 + static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) 3687 + { 3688 + int ret; 3689 + struct fs_path *new_path; 3690 + 3691 + /* 3692 + * Our reference's name member points to its full_path member string, so 3693 + * we use here a new path. 3694 + */ 3695 + new_path = fs_path_alloc(); 3696 + if (!new_path) 3697 + return -ENOMEM; 3698 + 3699 + ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path); 3700 + if (ret < 0) { 3701 + fs_path_free(new_path); 3702 + return ret; 3703 + } 3704 + ret = fs_path_add(new_path, ref->name, ref->name_len); 3705 + if (ret < 0) { 3706 + fs_path_free(new_path); 3707 + return ret; 3708 + } 3709 + 3710 + fs_path_free(ref->full_path); 3711 + set_ref_path(ref, new_path); 3712 + 3713 + return 0; 3714 + } 3715 + 3686 3716 /* 3687 3717 * This does all the move/link/unlink/rmdir magic. 3688 3718 */ ··· 3726 3696 struct fs_path *valid_path = NULL; 3727 3697 u64 ow_inode = 0; 3728 3698 u64 ow_gen; 3699 + u64 ow_mode; 3729 3700 int did_overwrite = 0; 3730 3701 int is_orphan = 0; 3731 3702 u64 last_dir_ino_rm = 0; 3732 3703 bool can_rename = true; 3704 + bool orphanized_dir = false; 3733 3705 bool orphanized_ancestor = false; 3734 3706 3735 3707 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino); ··· 3830 3798 */ 3831 3799 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, 3832 3800 cur->name, cur->name_len, 3833 - &ow_inode, &ow_gen); 3801 + &ow_inode, &ow_gen, &ow_mode); 3834 3802 if (ret < 0) 3835 3803 goto out; 3836 3804 if (ret) { ··· 3847 3815 cur->full_path); 3848 3816 if (ret < 0) 3849 3817 goto out; 3818 + if (S_ISDIR(ow_mode)) 3819 + orphanized_dir = true; 3850 3820 3851 3821 /* 3852 3822 * If ow_inode has its rename operation delayed ··· 3954 3920 if (ret < 0) 3955 3921 goto out; 3956 3922 } else { 3923 + /* 3924 + * We might have previously orphanized an inode 3925 + * which is an ancestor of our current inode, 3926 + * so our reference's full path, which was 3927 + * computed before any such orphanizations, must 3928 + * be updated. 3929 + */ 3930 + if (orphanized_dir) { 3931 + ret = update_ref_path(sctx, cur); 3932 + if (ret < 0) 3933 + goto out; 3934 + } 3957 3935 ret = send_link(sctx, cur->full_path, 3958 3936 valid_path); 3959 3937 if (ret < 0) ··· 4036 3990 * ancestor inode. 4037 3991 */ 4038 3992 if (orphanized_ancestor) { 4039 - struct fs_path *new_path; 4040 - 4041 - /* 4042 - * Our reference's name member points to 4043 - * its full_path member string, so we 4044 - * use here a new path. 4045 - */ 4046 - new_path = fs_path_alloc(); 4047 - if (!new_path) { 4048 - ret = -ENOMEM; 3993 + ret = update_ref_path(sctx, cur); 3994 + if (ret < 0) 4049 3995 goto out; 4050 - } 4051 - ret = get_cur_path(sctx, cur->dir, 4052 - cur->dir_gen, 4053 - new_path); 4054 - if (ret < 0) { 4055 - fs_path_free(new_path); 4056 - goto out; 4057 - } 4058 - ret = fs_path_add(new_path, 4059 - cur->name, 4060 - cur->name_len); 4061 - if (ret < 0) { 4062 - fs_path_free(new_path); 4063 - goto out; 4064 - } 4065 - fs_path_free(cur->full_path); 4066 - set_ref_path(cur, new_path); 4067 3996 } 4068 3997 ret = send_unlink(sctx, cur->full_path); 4069 3998 if (ret < 0) ··· 5270 5249 goto out; 5271 5250 } 5272 5251 5273 - right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5274 5252 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 5275 5253 right_len = btrfs_file_extent_inline_len(eb, slot, ei); 5276 5254 right_len = PAGE_ALIGN(right_len); 5277 5255 } else { 5278 5256 right_len = btrfs_file_extent_num_bytes(eb, ei); 5279 5257 } 5280 - right_offset = btrfs_file_extent_offset(eb, ei); 5281 - right_gen = btrfs_file_extent_generation(eb, ei); 5282 5258 5283 5259 /* 5284 5260 * Are we at extent 8? If yes, we know the extent is changed. ··· 5299 5281 ret = 0; 5300 5282 goto out; 5301 5283 } 5284 + 5285 + right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5286 + right_offset = btrfs_file_extent_offset(eb, ei); 5287 + right_gen = btrfs_file_extent_generation(eb, ei); 5302 5288 5303 5289 left_offset_fixed = left_offset; 5304 5290 if (key.offset < ekey->offset) {