Merge tag 'for-6.11-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull more btrfs fixes from David Sterba:
"A more fixes. We got reports that shrinker added in 6.10 still causes
latency spikes and the fixes don't handle all corner cases. Due to
summer holidays we're taking a shortcut to disable it for release
builds and will fix it in the near future.

- only enable extent map shrinker for DEBUG builds, temporary quick
fix to avoid latency spikes for regular builds

- update target inode's ctime on unlink, mandated by POSIX

- properly take lock to read/update block group's zoned variables

- add counted_by() annotations"

* tag 'for-6.11-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
btrfs: only enable extent map shrinker for DEBUG builds
btrfs: zoned: properly take lock to read/update block group's zoned variables
btrfs: tree-checker: add dev extent item checks
btrfs: update target inode's ctime on unlink
btrfs: send: annotate struct name_cache_entry with __counted_by()

+8 -6
fs/btrfs/free-space-cache.c
··· 2697 2697 u64 offset = bytenr - block_group->start; 2698 2698 u64 to_free, to_unusable; 2699 2699 int bg_reclaim_threshold = 0; 2700 - bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0)); 2700 + bool initial; 2701 2701 u64 reclaimable_unusable; 2702 2702 2703 - WARN_ON(!initial && offset + size > block_group->zone_capacity); 2703 + spin_lock(&block_group->lock); 2704 2704 2705 + initial = ((size == block_group->length) && (block_group->alloc_offset == 0)); 2706 + WARN_ON(!initial && offset + size > block_group->zone_capacity); 2705 2707 if (!initial) 2706 2708 bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold); 2707 2709 2708 - spin_lock(&ctl->tree_lock); 2709 2710 if (!used) 2710 2711 to_free = size; 2711 2712 else if (initial) ··· 2719 2718 to_free = offset + size - block_group->alloc_offset; 2720 2719 to_unusable = size - to_free; 2721 2720 2721 + spin_lock(&ctl->tree_lock); 2722 2722 ctl->free_space += to_free; 2723 + spin_unlock(&ctl->tree_lock); 2723 2724 /* 2724 2725 * If the block group is read-only, we should account freed space into 2725 2726 * bytes_readonly. ··· 2730 2727 block_group->zone_unusable += to_unusable; 2731 2728 WARN_ON(block_group->zone_unusable > block_group->length); 2732 2729 } 2733 - spin_unlock(&ctl->tree_lock); 2734 2730 if (!used) { 2735 - spin_lock(&block_group->lock); 2736 2731 block_group->alloc_offset -= size; 2737 - spin_unlock(&block_group->lock); 2738 2732 } 2739 2733 2740 2734 reclaimable_unusable = block_group->zone_unusable - ··· 2744 2744 mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) { 2745 2745 btrfs_mark_bg_to_reclaim(block_group); 2746 2746 } 2747 + 2748 + spin_unlock(&block_group->lock); 2747 2749 2748 2750 return 0; 2749 2751 }
+1
fs/btrfs/inode.c
··· 4195 4195 4196 4196 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); 4197 4197 inode_inc_iversion(&inode->vfs_inode); 4198 + inode_set_ctime_current(&inode->vfs_inode); 4198 4199 inode_inc_iversion(&dir->vfs_inode); 4199 4200 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode)); 4200 4201 ret = btrfs_update_inode(trans, dir);
+1 -1
fs/btrfs/send.c
··· 347 347 int ret; 348 348 int need_later_update; 349 349 int name_len; 350 - char name[]; 350 + char name[] __counted_by(name_len); 351 351 }; 352 352 353 353 /* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */
+7 -1
fs/btrfs/super.c
··· 2402 2402 2403 2403 trace_btrfs_extent_map_shrinker_count(fs_info, nr); 2404 2404 2405 - return nr; 2405 + /* 2406 + * Only report the real number for DEBUG builds, as there are reports of 2407 + * serious performance degradation caused by too frequent shrinks. 2408 + */ 2409 + if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) 2410 + return nr; 2411 + return 0; 2406 2412 } 2407 2413 2408 2414 static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_control *sc)
+69
fs/btrfs/tree-checker.c
··· 1764 1764 return 0; 1765 1765 } 1766 1766 1767 + static int check_dev_extent_item(const struct extent_buffer *leaf, 1768 + const struct btrfs_key *key, 1769 + int slot, 1770 + struct btrfs_key *prev_key) 1771 + { 1772 + struct btrfs_dev_extent *de; 1773 + const u32 sectorsize = leaf->fs_info->sectorsize; 1774 + 1775 + de = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); 1776 + /* Basic fixed member checks. */ 1777 + if (unlikely(btrfs_dev_extent_chunk_tree(leaf, de) != 1778 + BTRFS_CHUNK_TREE_OBJECTID)) { 1779 + generic_err(leaf, slot, 1780 + "invalid dev extent chunk tree id, has %llu expect %llu", 1781 + btrfs_dev_extent_chunk_tree(leaf, de), 1782 + BTRFS_CHUNK_TREE_OBJECTID); 1783 + return -EUCLEAN; 1784 + } 1785 + if (unlikely(btrfs_dev_extent_chunk_objectid(leaf, de) != 1786 + BTRFS_FIRST_CHUNK_TREE_OBJECTID)) { 1787 + generic_err(leaf, slot, 1788 + "invalid dev extent chunk objectid, has %llu expect %llu", 1789 + btrfs_dev_extent_chunk_objectid(leaf, de), 1790 + BTRFS_FIRST_CHUNK_TREE_OBJECTID); 1791 + return -EUCLEAN; 1792 + } 1793 + /* Alignment check. */ 1794 + if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) { 1795 + generic_err(leaf, slot, 1796 + "invalid dev extent key.offset, has %llu not aligned to %u", 1797 + key->offset, sectorsize); 1798 + return -EUCLEAN; 1799 + } 1800 + if (unlikely(!IS_ALIGNED(btrfs_dev_extent_chunk_offset(leaf, de), 1801 + sectorsize))) { 1802 + generic_err(leaf, slot, 1803 + "invalid dev extent chunk offset, has %llu not aligned to %u", 1804 + btrfs_dev_extent_chunk_objectid(leaf, de), 1805 + sectorsize); 1806 + return -EUCLEAN; 1807 + } 1808 + if (unlikely(!IS_ALIGNED(btrfs_dev_extent_length(leaf, de), 1809 + sectorsize))) { 1810 + generic_err(leaf, slot, 1811 + "invalid dev extent length, has %llu not aligned to %u", 1812 + btrfs_dev_extent_length(leaf, de), sectorsize); 1813 + return -EUCLEAN; 1814 + } 1815 + /* Overlap check with previous dev extent. */ 1816 + if (slot && prev_key->objectid == key->objectid && 1817 + prev_key->type == key->type) { 1818 + struct btrfs_dev_extent *prev_de; 1819 + u64 prev_len; 1820 + 1821 + prev_de = btrfs_item_ptr(leaf, slot - 1, struct btrfs_dev_extent); 1822 + prev_len = btrfs_dev_extent_length(leaf, prev_de); 1823 + if (unlikely(prev_key->offset + prev_len > key->offset)) { 1824 + generic_err(leaf, slot, 1825 + "dev extent overlap, prev offset %llu len %llu current offset %llu", 1826 + prev_key->objectid, prev_len, key->offset); 1827 + return -EUCLEAN; 1828 + } 1829 + } 1830 + return 0; 1831 + } 1832 + 1767 1833 /* 1768 1834 * Common point to switch the item-specific validation. 1769 1835 */ ··· 1865 1799 break; 1866 1800 case BTRFS_DEV_ITEM_KEY: 1867 1801 ret = check_dev_item(leaf, key, slot); 1802 + break; 1803 + case BTRFS_DEV_EXTENT_KEY: 1804 + ret = check_dev_extent_item(leaf, key, slot, prev_key); 1868 1805 break; 1869 1806 case BTRFS_INODE_ITEM_KEY: 1870 1807 ret = check_inode_item(leaf, key, slot);