Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Btrfs: fix broken free space cache after the system crashed

When we mounted the filesystem after the crash, we got the following
message:
BTRFS error (device xxx): block group xxxx has wrong amount of free space
BTRFS error (device xxx): failed to load free space cache for block group xxx

It is because we didn't update the metadata of the allocated space (in extent
tree) until the file data was written into the disk. During this time, there was
no information about the allocated spaces in either the extent tree nor the
free space cache. when we wrote out the free space cache at this time (commit
transaction), those spaces were lost. In fact, only the free space that is
used to store the file data had this problem, the others didn't because
the metadata of them is updated in the same transaction context.

There are many methods which can fix the above problem
- track the allocated space, and write it out when we write out the free
space cache
- account the size of the allocated space that is used to store the file
data, if the size is not zero, don't write out the free space cache.

The first one is complex and may make the performance drop down.
This patch chose the second method, we use a per-block-group variant to
account the size of that allocated space. Besides that, we also introduce
a per-block-group read-write semaphore to avoid the race between
the allocation and the free space cache write out.

Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Chris Mason <clm@fb.com>

authored by

Miao Xie and committed by
Chris Mason
e570fd27 5349d6c3

+186 -44
+11 -2
fs/btrfs/ctree.h
··· 1259 1259 spinlock_t lock; 1260 1260 u64 pinned; 1261 1261 u64 reserved; 1262 + u64 delalloc_bytes; 1262 1263 u64 bytes_super; 1263 1264 u64 flags; 1264 1265 u64 sectorsize; 1265 1266 u64 cache_generation; 1267 + 1268 + /* 1269 + * It is just used for the delayed data space allocation because 1270 + * only the data space allocation and the relative metadata update 1271 + * can be done cross the transaction. 1272 + */ 1273 + struct rw_semaphore data_rwsem; 1266 1274 1267 1275 /* for raid56, this is a full stripe, without parity */ 1268 1276 unsigned long full_stripe_len; ··· 3324 3316 struct btrfs_key *ins); 3325 3317 int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes, 3326 3318 u64 min_alloc_size, u64 empty_size, u64 hint_byte, 3327 - struct btrfs_key *ins, int is_data); 3319 + struct btrfs_key *ins, int is_data, int delalloc); 3328 3320 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3329 3321 struct extent_buffer *buf, int full_backref, int no_quota); 3330 3322 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, ··· 3338 3330 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 3339 3331 u64 owner, u64 offset, int no_quota); 3340 3332 3341 - int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); 3333 + int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len, 3334 + int delalloc); 3342 3335 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 3343 3336 u64 start, u64 len); 3344 3337 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
+112 -31
fs/btrfs/extent-tree.c
··· 105 105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 106 106 int dump_block_groups); 107 107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 108 - u64 num_bytes, int reserve); 108 + u64 num_bytes, int reserve, 109 + int delalloc); 109 110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, 110 111 u64 num_bytes); 111 112 int btrfs_pin_extent(struct btrfs_root *root, ··· 3261 3260 3262 3261 spin_lock(&block_group->lock); 3263 3262 if (block_group->cached != BTRFS_CACHE_FINISHED || 3264 - !btrfs_test_opt(root, SPACE_CACHE)) { 3263 + !btrfs_test_opt(root, SPACE_CACHE) || 3264 + block_group->delalloc_bytes) { 3265 3265 /* 3266 3266 * don't bother trying to write stuff out _if_ 3267 3267 * a) we're not cached, ··· 5615 5613 * @cache: The cache we are manipulating 5616 5614 * @num_bytes: The number of bytes in question 5617 5615 * @reserve: One of the reservation enums 5616 + * @delalloc: The blocks are allocated for the delalloc write 5618 5617 * 5619 5618 * This is called by the allocator when it reserves space, or by somebody who is 5620 5619 * freeing space that was never actually used on disk. For example if you ··· 5634 5631 * succeeds. 5635 5632 */ 5636 5633 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 5637 - u64 num_bytes, int reserve) 5634 + u64 num_bytes, int reserve, int delalloc) 5638 5635 { 5639 5636 struct btrfs_space_info *space_info = cache->space_info; 5640 5637 int ret = 0; ··· 5653 5650 num_bytes, 0); 5654 5651 space_info->bytes_may_use -= num_bytes; 5655 5652 } 5653 + 5654 + if (delalloc) 5655 + cache->delalloc_bytes += num_bytes; 5656 5656 } 5657 5657 } else { 5658 5658 if (cache->ro) 5659 5659 space_info->bytes_readonly += num_bytes; 5660 5660 cache->reserved -= num_bytes; 5661 5661 space_info->bytes_reserved -= num_bytes; 5662 + 5663 + if (delalloc) 5664 + cache->delalloc_bytes -= num_bytes; 5662 5665 } 5663 5666 spin_unlock(&cache->lock); 5664 5667 spin_unlock(&space_info->lock); ··· 6215 6206 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 6216 6207 6217 6208 btrfs_add_free_space(cache, buf->start, buf->len); 6218 - btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE); 6209 + btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0); 6219 6210 trace_btrfs_reserved_extent_free(root, buf->start, buf->len); 6220 6211 pin = 0; 6221 6212 } ··· 6374 6365 LOOP_NO_EMPTY_SIZE = 3, 6375 6366 }; 6376 6367 6368 + static inline void 6369 + btrfs_lock_block_group(struct btrfs_block_group_cache *cache, 6370 + int delalloc) 6371 + { 6372 + if (delalloc) 6373 + down_read(&cache->data_rwsem); 6374 + } 6375 + 6376 + static inline void 6377 + btrfs_grab_block_group(struct btrfs_block_group_cache *cache, 6378 + int delalloc) 6379 + { 6380 + btrfs_get_block_group(cache); 6381 + if (delalloc) 6382 + down_read(&cache->data_rwsem); 6383 + } 6384 + 6385 + static struct btrfs_block_group_cache * 6386 + btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, 6387 + struct btrfs_free_cluster *cluster, 6388 + int delalloc) 6389 + { 6390 + struct btrfs_block_group_cache *used_bg; 6391 + bool locked = false; 6392 + again: 6393 + spin_lock(&cluster->refill_lock); 6394 + if (locked) { 6395 + if (used_bg == cluster->block_group) 6396 + return used_bg; 6397 + 6398 + up_read(&used_bg->data_rwsem); 6399 + btrfs_put_block_group(used_bg); 6400 + } 6401 + 6402 + used_bg = cluster->block_group; 6403 + if (!used_bg) 6404 + return NULL; 6405 + 6406 + if (used_bg == block_group) 6407 + return used_bg; 6408 + 6409 + btrfs_get_block_group(used_bg); 6410 + 6411 + if (!delalloc) 6412 + return used_bg; 6413 + 6414 + if (down_read_trylock(&used_bg->data_rwsem)) 6415 + return used_bg; 6416 + 6417 + spin_unlock(&cluster->refill_lock); 6418 + down_read(&used_bg->data_rwsem); 6419 + locked = true; 6420 + goto again; 6421 + } 6422 + 6423 + static inline void 6424 + btrfs_release_block_group(struct btrfs_block_group_cache *cache, 6425 + int delalloc) 6426 + { 6427 + if (delalloc) 6428 + up_read(&cache->data_rwsem); 6429 + btrfs_put_block_group(cache); 6430 + } 6431 + 6377 6432 /* 6378 6433 * walks the btree of allocated extents and find a hole of a given size. 6379 6434 * The key ins is changed to record the hole: ··· 6452 6379 static noinline int find_free_extent(struct btrfs_root *orig_root, 6453 6380 u64 num_bytes, u64 empty_size, 6454 6381 u64 hint_byte, struct btrfs_key *ins, 6455 - u64 flags) 6382 + u64 flags, int delalloc) 6456 6383 { 6457 6384 int ret = 0; 6458 6385 struct btrfs_root *root = orig_root->fs_info->extent_root; ··· 6540 6467 up_read(&space_info->groups_sem); 6541 6468 } else { 6542 6469 index = get_block_group_index(block_group); 6470 + btrfs_lock_block_group(block_group, delalloc); 6543 6471 goto have_block_group; 6544 6472 } 6545 6473 } else if (block_group) { ··· 6555 6481 u64 offset; 6556 6482 int cached; 6557 6483 6558 - btrfs_get_block_group(block_group); 6484 + btrfs_grab_block_group(block_group, delalloc); 6559 6485 search_start = block_group->key.objectid; 6560 6486 6561 6487 /* ··· 6603 6529 * the refill lock keeps out other 6604 6530 * people trying to start a new cluster 6605 6531 */ 6606 - spin_lock(&last_ptr->refill_lock); 6607 - used_block_group = last_ptr->block_group; 6608 - if (used_block_group != block_group && 6609 - (!used_block_group || 6610 - used_block_group->ro || 6611 - !block_group_bits(used_block_group, flags))) 6532 + used_block_group = btrfs_lock_cluster(block_group, 6533 + last_ptr, 6534 + delalloc); 6535 + if (!used_block_group) 6612 6536 goto refill_cluster; 6613 6537 6614 - if (used_block_group != block_group) 6615 - btrfs_get_block_group(used_block_group); 6538 + if (used_block_group != block_group && 6539 + (used_block_group->ro || 6540 + !block_group_bits(used_block_group, flags))) 6541 + goto release_cluster; 6616 6542 6617 6543 offset = btrfs_alloc_from_cluster(used_block_group, 6618 6544 last_ptr, ··· 6626 6552 used_block_group, 6627 6553 search_start, num_bytes); 6628 6554 if (used_block_group != block_group) { 6629 - btrfs_put_block_group(block_group); 6555 + btrfs_release_block_group(block_group, 6556 + delalloc); 6630 6557 block_group = used_block_group; 6631 6558 } 6632 6559 goto checks; 6633 6560 } 6634 6561 6635 6562 WARN_ON(last_ptr->block_group != used_block_group); 6636 - if (used_block_group != block_group) 6637 - btrfs_put_block_group(used_block_group); 6638 - refill_cluster: 6563 + release_cluster: 6639 6564 /* If we are on LOOP_NO_EMPTY_SIZE, we can't 6640 6565 * set up a new clusters, so lets just skip it 6641 6566 * and let the allocator find whatever block ··· 6651 6578 * succeeding in the unclustered 6652 6579 * allocation. */ 6653 6580 if (loop >= LOOP_NO_EMPTY_SIZE && 6654 - last_ptr->block_group != block_group) { 6581 + used_block_group != block_group) { 6655 6582 spin_unlock(&last_ptr->refill_lock); 6583 + btrfs_release_block_group(used_block_group, 6584 + delalloc); 6656 6585 goto unclustered_alloc; 6657 6586 } 6658 6587 ··· 6664 6589 */ 6665 6590 btrfs_return_cluster_to_free_space(NULL, last_ptr); 6666 6591 6592 + if (used_block_group != block_group) 6593 + btrfs_release_block_group(used_block_group, 6594 + delalloc); 6595 + refill_cluster: 6667 6596 if (loop >= LOOP_NO_EMPTY_SIZE) { 6668 6597 spin_unlock(&last_ptr->refill_lock); 6669 6598 goto unclustered_alloc; ··· 6775 6696 BUG_ON(offset > search_start); 6776 6697 6777 6698 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 6778 - alloc_type); 6699 + alloc_type, delalloc); 6779 6700 if (ret == -EAGAIN) { 6780 6701 btrfs_add_free_space(block_group, offset, num_bytes); 6781 6702 goto loop; ··· 6787 6708 6788 6709 trace_btrfs_reserve_extent(orig_root, block_group, 6789 6710 search_start, num_bytes); 6790 - btrfs_put_block_group(block_group); 6711 + btrfs_release_block_group(block_group, delalloc); 6791 6712 break; 6792 6713 loop: 6793 6714 failed_cluster_refill = false; 6794 6715 failed_alloc = false; 6795 6716 BUG_ON(index != get_block_group_index(block_group)); 6796 - btrfs_put_block_group(block_group); 6717 + btrfs_release_block_group(block_group, delalloc); 6797 6718 } 6798 6719 up_read(&space_info->groups_sem); 6799 6720 ··· 6906 6827 int btrfs_reserve_extent(struct btrfs_root *root, 6907 6828 u64 num_bytes, u64 min_alloc_size, 6908 6829 u64 empty_size, u64 hint_byte, 6909 - struct btrfs_key *ins, int is_data) 6830 + struct btrfs_key *ins, int is_data, int delalloc) 6910 6831 { 6911 6832 bool final_tried = false; 6912 6833 u64 flags; ··· 6916 6837 again: 6917 6838 WARN_ON(num_bytes < root->sectorsize); 6918 6839 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins, 6919 - flags); 6840 + flags, delalloc); 6920 6841 6921 6842 if (ret == -ENOSPC) { 6922 6843 if (!final_tried && ins->offset) { ··· 6941 6862 } 6942 6863 6943 6864 static int __btrfs_free_reserved_extent(struct btrfs_root *root, 6944 - u64 start, u64 len, int pin) 6865 + u64 start, u64 len, 6866 + int pin, int delalloc) 6945 6867 { 6946 6868 struct btrfs_block_group_cache *cache; 6947 6869 int ret = 0; ··· 6961 6881 pin_down_extent(root, cache, start, len, 1); 6962 6882 else { 6963 6883 btrfs_add_free_space(cache, start, len); 6964 - btrfs_update_reserved_bytes(cache, len, RESERVE_FREE); 6884 + btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc); 6965 6885 } 6966 6886 btrfs_put_block_group(cache); 6967 6887 ··· 6971 6891 } 6972 6892 6973 6893 int btrfs_free_reserved_extent(struct btrfs_root *root, 6974 - u64 start, u64 len) 6894 + u64 start, u64 len, int delalloc) 6975 6895 { 6976 - return __btrfs_free_reserved_extent(root, start, len, 0); 6896 + return __btrfs_free_reserved_extent(root, start, len, 0, delalloc); 6977 6897 } 6978 6898 6979 6899 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 6980 6900 u64 start, u64 len) 6981 6901 { 6982 - return __btrfs_free_reserved_extent(root, start, len, 1); 6902 + return __btrfs_free_reserved_extent(root, start, len, 1, 0); 6983 6903 } 6984 6904 6985 6905 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, ··· 7194 7114 return -EINVAL; 7195 7115 7196 7116 ret = btrfs_update_reserved_bytes(block_group, ins->offset, 7197 - RESERVE_ALLOC_NO_ACCOUNT); 7117 + RESERVE_ALLOC_NO_ACCOUNT, 0); 7198 7118 BUG_ON(ret); /* logic error */ 7199 7119 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 7200 7120 0, owner, offset, ins, 1); ··· 7336 7256 return ERR_CAST(block_rsv); 7337 7257 7338 7258 ret = btrfs_reserve_extent(root, blocksize, blocksize, 7339 - empty_size, hint, &ins, 0); 7259 + empty_size, hint, &ins, 0, 0); 7340 7260 if (ret) { 7341 7261 unuse_block_rsv(root->fs_info, block_rsv, blocksize); 7342 7262 return ERR_PTR(ret); ··· 8739 8659 start); 8740 8660 atomic_set(&cache->count, 1); 8741 8661 spin_lock_init(&cache->lock); 8662 + init_rwsem(&cache->data_rwsem); 8742 8663 INIT_LIST_HEAD(&cache->list); 8743 8664 INIT_LIST_HEAD(&cache->cluster_list); 8744 8665 INIT_LIST_HEAD(&cache->new_bg_list);
+33
fs/btrfs/free-space-cache.c
··· 680 680 generation = btrfs_free_space_generation(leaf, header); 681 681 btrfs_release_path(path); 682 682 683 + if (!BTRFS_I(inode)->generation) { 684 + btrfs_info(root->fs_info, 685 + "The free space cache file (%llu) is invalid. skip it\n", 686 + offset); 687 + return 0; 688 + } 689 + 683 690 if (BTRFS_I(inode)->generation != generation) { 684 691 btrfs_err(root->fs_info, 685 692 "free space inode generation (%llu) " ··· 1114 1107 if (ret) 1115 1108 return -1; 1116 1109 1110 + if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { 1111 + down_write(&block_group->data_rwsem); 1112 + spin_lock(&block_group->lock); 1113 + if (block_group->delalloc_bytes) { 1114 + block_group->disk_cache_state = BTRFS_DC_WRITTEN; 1115 + spin_unlock(&block_group->lock); 1116 + up_write(&block_group->data_rwsem); 1117 + BTRFS_I(inode)->generation = 0; 1118 + ret = 0; 1119 + goto out; 1120 + } 1121 + spin_unlock(&block_group->lock); 1122 + } 1123 + 1117 1124 /* Lock all pages first so we can lock the extent safely. */ 1118 1125 io_ctl_prepare_pages(&io_ctl, inode, 0); 1119 1126 ··· 1166 1145 if (ret) 1167 1146 goto out_nospc; 1168 1147 1148 + if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) 1149 + up_write(&block_group->data_rwsem); 1169 1150 /* 1170 1151 * Release the pages and unlock the extent, we will flush 1171 1152 * them out later ··· 1196 1173 1197 1174 out_nospc: 1198 1175 cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list); 1176 + 1177 + if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) 1178 + up_write(&block_group->data_rwsem); 1179 + 1199 1180 goto out; 1200 1181 } 1201 1182 ··· 1216 1189 1217 1190 spin_lock(&block_group->lock); 1218 1191 if (block_group->disk_cache_state < BTRFS_DC_SETUP) { 1192 + spin_unlock(&block_group->lock); 1193 + return 0; 1194 + } 1195 + 1196 + if (block_group->delalloc_bytes) { 1197 + block_group->disk_cache_state = BTRFS_DC_WRITTEN; 1219 1198 spin_unlock(&block_group->lock); 1220 1199 return 0; 1221 1200 }
+30 -11
fs/btrfs/inode.c
··· 693 693 ret = btrfs_reserve_extent(root, 694 694 async_extent->compressed_size, 695 695 async_extent->compressed_size, 696 - 0, alloc_hint, &ins, 1); 696 + 0, alloc_hint, &ins, 1, 1); 697 697 if (ret) { 698 698 int i; 699 699 ··· 794 794 out: 795 795 return ret; 796 796 out_free_reserve: 797 - btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 797 + btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 798 798 out_free: 799 799 extent_clear_unlock_delalloc(inode, async_extent->start, 800 800 async_extent->start + ··· 917 917 cur_alloc_size = disk_num_bytes; 918 918 ret = btrfs_reserve_extent(root, cur_alloc_size, 919 919 root->sectorsize, 0, alloc_hint, 920 - &ins, 1); 920 + &ins, 1, 1); 921 921 if (ret < 0) 922 922 goto out_unlock; 923 923 ··· 995 995 return ret; 996 996 997 997 out_reserve: 998 - btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 998 + btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 999 999 out_unlock: 1000 1000 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1001 1001 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | ··· 2599 2599 return NULL; 2600 2600 } 2601 2601 2602 + static void btrfs_release_delalloc_bytes(struct btrfs_root *root, 2603 + u64 start, u64 len) 2604 + { 2605 + struct btrfs_block_group_cache *cache; 2606 + 2607 + cache = btrfs_lookup_block_group(root->fs_info, start); 2608 + ASSERT(cache); 2609 + 2610 + spin_lock(&cache->lock); 2611 + cache->delalloc_bytes -= len; 2612 + spin_unlock(&cache->lock); 2613 + 2614 + btrfs_put_block_group(cache); 2615 + } 2616 + 2602 2617 /* as ordered data IO finishes, this gets called so we can finish 2603 2618 * an ordered extent if the range of bytes in the file it covers are 2604 2619 * fully written. ··· 2713 2698 logical_len, logical_len, 2714 2699 compress_type, 0, 0, 2715 2700 BTRFS_FILE_EXTENT_REG); 2701 + if (!ret) 2702 + btrfs_release_delalloc_bytes(root, 2703 + ordered_extent->start, 2704 + ordered_extent->disk_len); 2716 2705 } 2717 2706 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 2718 2707 ordered_extent->file_offset, ordered_extent->len, ··· 2769 2750 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 2770 2751 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) 2771 2752 btrfs_free_reserved_extent(root, ordered_extent->start, 2772 - ordered_extent->disk_len); 2753 + ordered_extent->disk_len, 1); 2773 2754 } 2774 2755 2775 2756 ··· 6554 6535 6555 6536 alloc_hint = get_extent_allocation_hint(inode, start, len); 6556 6537 ret = btrfs_reserve_extent(root, len, root->sectorsize, 0, 6557 - alloc_hint, &ins, 1); 6538 + alloc_hint, &ins, 1, 1); 6558 6539 if (ret) 6559 6540 return ERR_PTR(ret); 6560 6541 6561 6542 em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, 6562 6543 ins.offset, ins.offset, ins.offset, 0); 6563 6544 if (IS_ERR(em)) { 6564 - btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 6545 + btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 6565 6546 return em; 6566 6547 } 6567 6548 6568 6549 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, 6569 6550 ins.offset, ins.offset, 0); 6570 6551 if (ret) { 6571 - btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 6552 + btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 6572 6553 free_extent_map(em); 6573 6554 return ERR_PTR(ret); 6574 6555 } ··· 7456 7437 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && 7457 7438 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) 7458 7439 btrfs_free_reserved_extent(root, ordered->start, 7459 - ordered->disk_len); 7440 + ordered->disk_len, 1); 7460 7441 btrfs_put_ordered_extent(ordered); 7461 7442 btrfs_put_ordered_extent(ordered); 7462 7443 } ··· 8838 8819 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); 8839 8820 cur_bytes = max(cur_bytes, min_size); 8840 8821 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0, 8841 - *alloc_hint, &ins, 1); 8822 + *alloc_hint, &ins, 1, 0); 8842 8823 if (ret) { 8843 8824 if (own_trans) 8844 8825 btrfs_end_transaction(trans, root); ··· 8852 8833 BTRFS_FILE_EXTENT_PREALLOC); 8853 8834 if (ret) { 8854 8835 btrfs_free_reserved_extent(root, ins.objectid, 8855 - ins.offset); 8836 + ins.offset, 0); 8856 8837 btrfs_abort_transaction(trans, root, ret); 8857 8838 if (own_trans) 8858 8839 btrfs_end_transaction(trans, root);