Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

btrfs: get rid of block group caching progress logic

struct btrfs_caching_ctl::progress and struct
btrfs_block_group::last_byte_to_unpin were previously needed to ensure
that unpin_extent_range() didn't return a range to the free space cache
before the caching thread had a chance to cache that range. However, the
commit "btrfs: fix space cache corruption and potential double
allocations" made it so that we always synchronously cache the block
group at the time that we pin the extent, so this machinery is no longer
necessary.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: David Sterba <dsterba@suse.com>

authored by

Omar Sandoval and committed by
David Sterba
48ff7083 9ed0a72e

+2 -72
-13
fs/btrfs/block-group.c
··· 593 593 594 594 if (need_resched() || 595 595 rwsem_is_contended(&fs_info->commit_root_sem)) { 596 - if (wakeup) 597 - caching_ctl->progress = last; 598 596 btrfs_release_path(path); 599 597 up_read(&fs_info->commit_root_sem); 600 598 mutex_unlock(&caching_ctl->mutex); ··· 616 618 key.objectid = last; 617 619 key.offset = 0; 618 620 key.type = BTRFS_EXTENT_ITEM_KEY; 619 - 620 - if (wakeup) 621 - caching_ctl->progress = last; 622 621 btrfs_release_path(path); 623 622 goto next; 624 623 } ··· 650 655 651 656 total_found += add_new_free_space(block_group, last, 652 657 block_group->start + block_group->length); 653 - caching_ctl->progress = (u64)-1; 654 658 655 659 out: 656 660 btrfs_free_path(path); ··· 719 725 } 720 726 #endif 721 727 722 - caching_ctl->progress = (u64)-1; 723 - 724 728 up_read(&fs_info->commit_root_sem); 725 729 btrfs_free_excluded_extents(block_group); 726 730 mutex_unlock(&caching_ctl->mutex); ··· 747 755 mutex_init(&caching_ctl->mutex); 748 756 init_waitqueue_head(&caching_ctl->wait); 749 757 caching_ctl->block_group = cache; 750 - caching_ctl->progress = cache->start; 751 758 refcount_set(&caching_ctl->count, 2); 752 759 btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); 753 760 ··· 2066 2075 /* Should not have any excluded extents. Just in case, though. */ 2067 2076 btrfs_free_excluded_extents(cache); 2068 2077 } else if (cache->length == cache->used) { 2069 - cache->last_byte_to_unpin = (u64)-1; 2070 2078 cache->cached = BTRFS_CACHE_FINISHED; 2071 2079 btrfs_free_excluded_extents(cache); 2072 2080 } else if (cache->used == 0) { 2073 - cache->last_byte_to_unpin = (u64)-1; 2074 2081 cache->cached = BTRFS_CACHE_FINISHED; 2075 2082 add_new_free_space(cache, cache->start, 2076 2083 cache->start + cache->length); ··· 2124 2135 /* Fill dummy cache as FULL */ 2125 2136 bg->length = em->len; 2126 2137 bg->flags = map->type; 2127 - bg->last_byte_to_unpin = (u64)-1; 2128 2138 bg->cached = BTRFS_CACHE_FINISHED; 2129 2139 bg->used = em->len; 2130 2140 bg->flags = map->type; ··· 2469 2481 set_free_space_tree_thresholds(cache); 2470 2482 cache->used = bytes_used; 2471 2483 cache->flags = type; 2472 - cache->last_byte_to_unpin = (u64)-1; 2473 2484 cache->cached = BTRFS_CACHE_FINISHED; 2474 2485 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); 2475 2486
-2
fs/btrfs/block-group.h
··· 63 63 wait_queue_head_t wait; 64 64 struct btrfs_work work; 65 65 struct btrfs_block_group *block_group; 66 - u64 progress; 67 66 refcount_t count; 68 67 }; 69 68 ··· 114 115 /* Cache tracking stuff */ 115 116 int cached; 116 117 struct btrfs_caching_control *caching_ctl; 117 - u64 last_byte_to_unpin; 118 118 119 119 struct btrfs_space_info *space_info; 120 120
+2 -7
fs/btrfs/extent-tree.c
··· 2686 2686 len = cache->start + cache->length - start; 2687 2687 len = min(len, end + 1 - start); 2688 2688 2689 - down_read(&fs_info->commit_root_sem); 2690 - if (start < cache->last_byte_to_unpin && return_free_space) { 2691 - u64 add_len = min(len, cache->last_byte_to_unpin - start); 2692 - 2693 - btrfs_add_free_space(cache, start, add_len); 2694 - } 2695 - up_read(&fs_info->commit_root_sem); 2689 + if (return_free_space) 2690 + btrfs_add_free_space(cache, start, len); 2696 2691 2697 2692 start += len; 2698 2693 total_unpinned += len;
-8
fs/btrfs/free-space-tree.c
··· 1453 1453 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 1454 1454 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1455 1455 1456 - caching_ctl->progress = key.objectid; 1457 - 1458 1456 offset = key.objectid; 1459 1457 while (offset < key.objectid + key.offset) { 1460 1458 bit = free_space_test_bit(block_group, path, offset); ··· 1487 1489 ret = -EIO; 1488 1490 goto out; 1489 1491 } 1490 - 1491 - caching_ctl->progress = (u64)-1; 1492 1492 1493 1493 ret = 0; 1494 1494 out: ··· 1527 1531 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 1528 1532 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1529 1533 1530 - caching_ctl->progress = key.objectid; 1531 - 1532 1534 total_found += add_new_free_space(block_group, key.objectid, 1533 1535 key.objectid + key.offset); 1534 1536 if (total_found > CACHING_CTL_WAKE_UP) { ··· 1545 1551 ret = -EIO; 1546 1552 goto out; 1547 1553 } 1548 - 1549 - caching_ctl->progress = (u64)-1; 1550 1554 1551 1555 ret = 0; 1552 1556 out:
-41
fs/btrfs/transaction.c
··· 161 161 struct btrfs_transaction *cur_trans = trans->transaction; 162 162 struct btrfs_fs_info *fs_info = trans->fs_info; 163 163 struct btrfs_root *root, *tmp; 164 - struct btrfs_caching_control *caching_ctl, *next; 165 164 166 165 /* 167 166 * At this point no one can be using this transaction to modify any tree ··· 195 196 } 196 197 spin_unlock(&cur_trans->dropped_roots_lock); 197 198 198 - /* 199 - * We have to update the last_byte_to_unpin under the commit_root_sem, 200 - * at the same time we swap out the commit roots. 201 - * 202 - * This is because we must have a real view of the last spot the caching 203 - * kthreads were while caching. Consider the following views of the 204 - * extent tree for a block group 205 - * 206 - * commit root 207 - * +----+----+----+----+----+----+----+ 208 - * |\\\\| |\\\\|\\\\| |\\\\|\\\\| 209 - * +----+----+----+----+----+----+----+ 210 - * 0 1 2 3 4 5 6 7 211 - * 212 - * new commit root 213 - * +----+----+----+----+----+----+----+ 214 - * | | | |\\\\| | |\\\\| 215 - * +----+----+----+----+----+----+----+ 216 - * 0 1 2 3 4 5 6 7 217 - * 218 - * If the cache_ctl->progress was at 3, then we are only allowed to 219 - * unpin [0,1) and [2,3], because the caching thread has already 220 - * processed those extents. We are not allowed to unpin [5,6), because 221 - * the caching thread will re-start it's search from 3, and thus find 222 - * the hole from [4,6) to add to the free space cache. 223 - */ 224 - write_lock(&fs_info->block_group_cache_lock); 225 - list_for_each_entry_safe(caching_ctl, next, 226 - &fs_info->caching_block_groups, list) { 227 - struct btrfs_block_group *cache = caching_ctl->block_group; 228 - 229 - if (btrfs_block_group_done(cache)) { 230 - cache->last_byte_to_unpin = (u64)-1; 231 - list_del_init(&caching_ctl->list); 232 - btrfs_put_caching_control(caching_ctl); 233 - } else { 234 - cache->last_byte_to_unpin = caching_ctl->progress; 235 - } 236 - } 237 - write_unlock(&fs_info->block_group_cache_lock); 238 199 up_write(&fs_info->commit_root_sem); 239 200 } 240 201
-1
fs/btrfs/zoned.c
··· 1566 1566 free = cache->zone_capacity - cache->alloc_offset; 1567 1567 1568 1568 /* We only need ->free_space in ALLOC_SEQ block groups */ 1569 - cache->last_byte_to_unpin = (u64)-1; 1570 1569 cache->cached = BTRFS_CACHE_FINISHED; 1571 1570 cache->free_space_ctl->free_space = free; 1572 1571 cache->zone_unusable = unusable;