Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

btrfs: constify more pointer parameters

Continue adding const to parameters. This is for clarity and minor
addition to safety. There are some minor effects, in the assembly code
and .ko measured on release config.

Signed-off-by: David Sterba <dsterba@suse.com>

+84 -86
+3 -3
fs/btrfs/backref.c
··· 219 219 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1 220 220 * indicates a 'higher' block. 221 221 */ 222 - static int prelim_ref_compare(struct prelim_ref *ref1, 223 - struct prelim_ref *ref2) 222 + static int prelim_ref_compare(const struct prelim_ref *ref1, 223 + const struct prelim_ref *ref2) 224 224 { 225 225 if (ref1->level < ref2->level) 226 226 return -1; ··· 251 251 } 252 252 253 253 static void update_share_count(struct share_check *sc, int oldcount, 254 - int newcount, struct prelim_ref *newref) 254 + int newcount, const struct prelim_ref *newref) 255 255 { 256 256 if ((!sc) || (oldcount == 0 && newcount < 1)) 257 257 return;
+17 -17
fs/btrfs/block-group.c
··· 23 23 #include "extent-tree.h" 24 24 25 25 #ifdef CONFIG_BTRFS_DEBUG 26 - int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) 26 + int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group) 27 27 { 28 28 struct btrfs_fs_info *fs_info = block_group->fs_info; 29 29 ··· 40 40 * 41 41 * Should be called with balance_lock held 42 42 */ 43 - static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 43 + static u64 get_restripe_target(const struct btrfs_fs_info *fs_info, u64 flags) 44 44 { 45 - struct btrfs_balance_control *bctl = fs_info->balance_ctl; 45 + const struct btrfs_balance_control *bctl = fs_info->balance_ctl; 46 46 u64 target = 0; 47 47 48 48 if (!bctl) ··· 1415 1415 } 1416 1416 1417 1417 static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1418 - struct btrfs_block_group *bg) 1418 + const struct btrfs_block_group *bg) 1419 1419 { 1420 - struct btrfs_fs_info *fs_info = bg->fs_info; 1420 + struct btrfs_fs_info *fs_info = trans->fs_info; 1421 1421 struct btrfs_transaction *prev_trans = NULL; 1422 1422 const u64 start = bg->start; 1423 1423 const u64 end = start + bg->length - 1; ··· 1756 1756 return bg1->used > bg2->used; 1757 1757 } 1758 1758 1759 - static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) 1759 + static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info) 1760 1760 { 1761 1761 if (btrfs_is_zoned(fs_info)) 1762 1762 return btrfs_zoned_should_reclaim(fs_info); 1763 1763 return true; 1764 1764 } 1765 1765 1766 - static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed) 1766 + static bool should_reclaim_block_group(const struct btrfs_block_group *bg, u64 bytes_freed) 1767 1767 { 1768 1768 const int thresh_pct = btrfs_calc_reclaim_threshold(bg->space_info); 1769 1769 u64 thresh_bytes = mult_perc(bg->length, thresh_pct); ··· 2006 2006 spin_unlock(&fs_info->unused_bgs_lock); 2007 2007 } 2008 2008 2009 - static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 2010 - struct btrfs_path *path) 2009 + static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key, 2010 + const struct btrfs_path *path) 2011 2011 { 2012 2012 struct btrfs_chunk_map *map; 2013 2013 struct btrfs_block_group_item bg; ··· 2055 2055 2056 2056 static int find_first_block_group(struct btrfs_fs_info *fs_info, 2057 2057 struct btrfs_path *path, 2058 - struct btrfs_key *key) 2058 + const struct btrfs_key *key) 2059 2059 { 2060 2060 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2061 2061 int ret; ··· 2640 2640 } 2641 2641 2642 2642 static int insert_dev_extent(struct btrfs_trans_handle *trans, 2643 - struct btrfs_device *device, u64 chunk_offset, 2644 - u64 start, u64 num_bytes) 2643 + const struct btrfs_device *device, u64 chunk_offset, 2644 + u64 start, u64 num_bytes) 2645 2645 { 2646 2646 struct btrfs_fs_info *fs_info = device->fs_info; 2647 2647 struct btrfs_root *root = fs_info->dev_root; ··· 2817 2817 * For extent tree v2 we use the block_group_item->chunk_offset to point at our 2818 2818 * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. 2819 2819 */ 2820 - static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) 2820 + static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 offset) 2821 2821 { 2822 2822 u64 div = SZ_1G; 2823 2823 u64 index; ··· 3842 3842 } 3843 3843 } 3844 3844 3845 - static int should_alloc_chunk(struct btrfs_fs_info *fs_info, 3846 - struct btrfs_space_info *sinfo, int force) 3845 + static int should_alloc_chunk(const struct btrfs_fs_info *fs_info, 3846 + const struct btrfs_space_info *sinfo, int force) 3847 3847 { 3848 3848 u64 bytes_used = btrfs_space_info_used(sinfo, false); 3849 3849 u64 thresh; ··· 4218 4218 return ret; 4219 4219 } 4220 4220 4221 - static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) 4221 + static u64 get_profile_num_devs(const struct btrfs_fs_info *fs_info, u64 type) 4222 4222 { 4223 4223 u64 num_dev; 4224 4224 ··· 4622 4622 return 0; 4623 4623 } 4624 4624 4625 - bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg) 4625 + bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg) 4626 4626 { 4627 4627 if (btrfs_is_zoned(bg->fs_info)) 4628 4628 return false;
+5 -6
fs/btrfs/block-group.h
··· 266 266 u64 reclaim_mark; 267 267 }; 268 268 269 - static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) 269 + static inline u64 btrfs_block_group_end(const struct btrfs_block_group *block_group) 270 270 { 271 271 return (block_group->start + block_group->length); 272 272 } ··· 278 278 return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0); 279 279 } 280 280 281 - static inline bool btrfs_is_block_group_data_only( 282 - struct btrfs_block_group *block_group) 281 + static inline bool btrfs_is_block_group_data_only(const struct btrfs_block_group *block_group) 283 282 { 284 283 /* 285 284 * In mixed mode the fragmentation is expected to be high, lowering the ··· 289 290 } 290 291 291 292 #ifdef CONFIG_BTRFS_DEBUG 292 - int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group); 293 + int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group); 293 294 #endif 294 295 295 296 struct btrfs_block_group *btrfs_lookup_first_block_group( ··· 369 370 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 370 371 } 371 372 372 - static inline int btrfs_block_group_done(struct btrfs_block_group *cache) 373 + static inline int btrfs_block_group_done(const struct btrfs_block_group *cache) 373 374 { 374 375 smp_mb(); 375 376 return cache->cached == BTRFS_CACHE_FINISHED || ··· 386 387 int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, 387 388 enum btrfs_block_group_size_class size_class, 388 389 bool force_wrong_size_class); 389 - bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg); 390 + bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg); 390 391 391 392 #endif /* BTRFS_BLOCK_GROUP_H */
+1 -1
fs/btrfs/block-rsv.c
··· 553 553 return ERR_PTR(ret); 554 554 } 555 555 556 - int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, 556 + int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info, 557 557 struct btrfs_block_rsv *rsv) 558 558 { 559 559 u64 needed_bytes;
+1 -1
fs/btrfs/block-rsv.h
··· 89 89 struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans, 90 90 struct btrfs_root *root, 91 91 u32 blocksize); 92 - int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, 92 + int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info, 93 93 struct btrfs_block_rsv *rsv); 94 94 static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info, 95 95 struct btrfs_block_rsv *block_rsv,
+9 -9
fs/btrfs/ctree.c
··· 2564 2564 * 2565 2565 */ 2566 2566 static void fixup_low_keys(struct btrfs_trans_handle *trans, 2567 - struct btrfs_path *path, 2568 - struct btrfs_disk_key *key, int level) 2567 + const struct btrfs_path *path, 2568 + const struct btrfs_disk_key *key, int level) 2569 2569 { 2570 2570 int i; 2571 2571 struct extent_buffer *t; ··· 2594 2594 * that the new key won't break the order 2595 2595 */ 2596 2596 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 2597 - struct btrfs_path *path, 2597 + const struct btrfs_path *path, 2598 2598 const struct btrfs_key *new_key) 2599 2599 { 2600 2600 struct btrfs_fs_info *fs_info = trans->fs_info; ··· 2660 2660 * is correct, we only need to bother the last key of @left and the first 2661 2661 * key of @right. 2662 2662 */ 2663 - static bool check_sibling_keys(struct extent_buffer *left, 2664 - struct extent_buffer *right) 2663 + static bool check_sibling_keys(const struct extent_buffer *left, 2664 + const struct extent_buffer *right) 2665 2665 { 2666 2666 struct btrfs_key left_last; 2667 2667 struct btrfs_key right_first; ··· 2928 2928 * blocknr is the block the key points to. 2929 2929 */ 2930 2930 static int insert_ptr(struct btrfs_trans_handle *trans, 2931 - struct btrfs_path *path, 2932 - struct btrfs_disk_key *key, u64 bytenr, 2931 + const struct btrfs_path *path, 2932 + const struct btrfs_disk_key *key, u64 bytenr, 2933 2933 int slot, int level) 2934 2934 { 2935 2935 struct extent_buffer *lower; ··· 4019 4019 * the front. 4020 4020 */ 4021 4021 void btrfs_truncate_item(struct btrfs_trans_handle *trans, 4022 - struct btrfs_path *path, u32 new_size, int from_end) 4022 + const struct btrfs_path *path, u32 new_size, int from_end) 4023 4023 { 4024 4024 int slot; 4025 4025 struct extent_buffer *leaf; ··· 4111 4111 * make the item pointed to by the path bigger, data_size is the added size. 4112 4112 */ 4113 4113 void btrfs_extend_item(struct btrfs_trans_handle *trans, 4114 - struct btrfs_path *path, u32 data_size) 4114 + const struct btrfs_path *path, u32 data_size) 4115 4115 { 4116 4116 int slot; 4117 4117 struct extent_buffer *leaf;
+3 -3
fs/btrfs/ctree.h
··· 538 538 int btrfs_previous_extent_item(struct btrfs_root *root, 539 539 struct btrfs_path *path, u64 min_objectid); 540 540 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 541 - struct btrfs_path *path, 541 + const struct btrfs_path *path, 542 542 const struct btrfs_key *new_key); 543 543 struct extent_buffer *btrfs_root_node(struct btrfs_root *root); 544 544 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, ··· 572 572 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 573 573 struct btrfs_path *path, int level, int slot); 574 574 void btrfs_extend_item(struct btrfs_trans_handle *trans, 575 - struct btrfs_path *path, u32 data_size); 575 + const struct btrfs_path *path, u32 data_size); 576 576 void btrfs_truncate_item(struct btrfs_trans_handle *trans, 577 - struct btrfs_path *path, u32 new_size, int from_end); 577 + const struct btrfs_path *path, u32 new_size, int from_end); 578 578 int btrfs_split_item(struct btrfs_trans_handle *trans, 579 579 struct btrfs_root *root, 580 580 struct btrfs_path *path,
+2 -2
fs/btrfs/discard.c
··· 68 68 }; 69 69 70 70 static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, 71 - struct btrfs_block_group *block_group) 71 + const struct btrfs_block_group *block_group) 72 72 { 73 73 return &discard_ctl->discard_list[block_group->discard_index]; 74 74 } ··· 80 80 * 81 81 * Check if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set. 82 82 */ 83 - static bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl) 83 + static bool btrfs_run_discard_work(const struct btrfs_discard_ctl *discard_ctl) 84 84 { 85 85 struct btrfs_fs_info *fs_info = container_of(discard_ctl, 86 86 struct btrfs_fs_info,
+2 -2
fs/btrfs/file-item.c
··· 151 151 * Calculate the total size needed to allocate for an ordered sum structure 152 152 * spanning @bytes in the file. 153 153 */ 154 - static int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info, unsigned long bytes) 154 + static int btrfs_ordered_sum_size(const struct btrfs_fs_info *fs_info, unsigned long bytes) 155 155 { 156 156 return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes); 157 157 } ··· 1272 1272 1273 1273 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, 1274 1274 const struct btrfs_path *path, 1275 - struct btrfs_file_extent_item *fi, 1275 + const struct btrfs_file_extent_item *fi, 1276 1276 struct extent_map *em) 1277 1277 { 1278 1278 struct btrfs_fs_info *fs_info = inode->root->fs_info;
+1 -1
fs/btrfs/file-item.h
··· 74 74 unsigned long *csum_bitmap); 75 75 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, 76 76 const struct btrfs_path *path, 77 - struct btrfs_file_extent_item *fi, 77 + const struct btrfs_file_extent_item *fi, 78 78 struct extent_map *em); 79 79 int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, 80 80 u64 len);
+5 -5
fs/btrfs/inode-item.c
··· 14 14 #include "extent-tree.h" 15 15 #include "file-item.h" 16 16 17 - struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, 17 + struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf, 18 18 int slot, 19 19 const struct fscrypt_str *name) 20 20 { ··· 42 42 } 43 43 44 44 struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( 45 - struct extent_buffer *leaf, int slot, u64 ref_objectid, 45 + const struct extent_buffer *leaf, int slot, u64 ref_objectid, 46 46 const struct fscrypt_str *name) 47 47 { 48 48 struct btrfs_inode_extref *extref; ··· 423 423 return ret; 424 424 } 425 425 426 - static inline void btrfs_trace_truncate(struct btrfs_inode *inode, 427 - struct extent_buffer *leaf, 428 - struct btrfs_file_extent_item *fi, 426 + static inline void btrfs_trace_truncate(const struct btrfs_inode *inode, 427 + const struct extent_buffer *leaf, 428 + const struct btrfs_file_extent_item *fi, 429 429 u64 offset, int extent_type, int slot) 430 430 { 431 431 if (!inode)
+2 -2
fs/btrfs/inode-item.h
··· 109 109 u64 inode_objectid, u64 ref_objectid, int ins_len, 110 110 int cow); 111 111 112 - struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, 112 + struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf, 113 113 int slot, 114 114 const struct fscrypt_str *name); 115 115 struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( 116 - struct extent_buffer *leaf, int slot, u64 ref_objectid, 116 + const struct extent_buffer *leaf, int slot, u64 ref_objectid, 117 117 const struct fscrypt_str *name); 118 118 119 119 #endif
+12 -13
fs/btrfs/space-info.c
··· 163 163 * thing with or without extra unallocated space. 164 164 */ 165 165 166 - u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 166 + u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info, 167 167 bool may_use_included) 168 168 { 169 169 ASSERT(s_info); ··· 368 368 } 369 369 370 370 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, 371 - struct btrfs_space_info *space_info, 371 + const struct btrfs_space_info *space_info, 372 372 enum btrfs_reserve_flush_enum flush) 373 373 { 374 374 u64 profile; ··· 437 437 } 438 438 439 439 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 440 - struct btrfs_space_info *space_info, u64 bytes, 440 + const struct btrfs_space_info *space_info, u64 bytes, 441 441 enum btrfs_reserve_flush_enum flush) 442 442 { 443 443 u64 avail; ··· 542 542 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); 543 543 } 544 544 545 - static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 546 - struct btrfs_space_info *info) 545 + static void __btrfs_dump_space_info(const struct btrfs_fs_info *fs_info, 546 + const struct btrfs_space_info *info) 547 547 { 548 548 const char *flag_str = space_info_flag_to_str(info); 549 549 lockdep_assert_held(&info->lock); ··· 844 844 return; 845 845 } 846 846 847 - static inline u64 848 - btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 849 - struct btrfs_space_info *space_info) 847 + static u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, 848 + const struct btrfs_space_info *space_info) 850 849 { 851 850 u64 used; 852 851 u64 avail; ··· 870 871 } 871 872 872 873 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, 873 - struct btrfs_space_info *space_info) 874 + const struct btrfs_space_info *space_info) 874 875 { 875 876 const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv); 876 877 u64 ordered, delalloc; ··· 1942 1943 * Typically with 10 block groups as the target, the discrete values this comes 1943 1944 * out to are 0, 10, 20, ... , 80, 90, and 99. 1944 1945 */ 1945 - static int calc_dynamic_reclaim_threshold(struct btrfs_space_info *space_info) 1946 + static int calc_dynamic_reclaim_threshold(const struct btrfs_space_info *space_info) 1946 1947 { 1947 1948 struct btrfs_fs_info *fs_info = space_info->fs_info; 1948 1949 u64 unalloc = atomic64_read(&fs_info->free_chunk_space); ··· 1961 1962 return calc_pct_ratio(want, target); 1962 1963 } 1963 1964 1964 - int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info) 1965 + int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info) 1965 1966 { 1966 1967 lockdep_assert_held(&space_info->lock); 1967 1968 ··· 1984 1985 return unalloc < data_chunk_size; 1985 1986 } 1986 1987 1987 - static void do_reclaim_sweep(struct btrfs_fs_info *fs_info, 1988 + static void do_reclaim_sweep(const struct btrfs_fs_info *fs_info, 1988 1989 struct btrfs_space_info *space_info, int raid) 1989 1990 { 1990 1991 struct btrfs_block_group *bg; ··· 2072 2073 return ret; 2073 2074 } 2074 2075 2075 - void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info) 2076 + void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info) 2076 2077 { 2077 2078 int raid; 2078 2079 struct btrfs_space_info *space_info;
+5 -5
fs/btrfs/space-info.h
··· 217 217 wait_queue_head_t wait; 218 218 }; 219 219 220 - static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) 220 + static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_info) 221 221 { 222 222 return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && 223 223 (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); ··· 258 258 u64 chunk_size); 259 259 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 260 260 u64 flags); 261 - u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 261 + u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info, 262 262 bool may_use_included); 263 263 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 264 264 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, ··· 271 271 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 272 272 struct btrfs_space_info *space_info); 273 273 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 274 - struct btrfs_space_info *space_info, u64 bytes, 274 + const struct btrfs_space_info *space_info, u64 bytes, 275 275 enum btrfs_reserve_flush_enum flush); 276 276 277 277 static inline void btrfs_space_info_free_bytes_may_use( ··· 293 293 void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes); 294 294 void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready); 295 295 bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info); 296 - int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info); 297 - void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info); 296 + int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info); 297 + void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info); 298 298 299 299 #endif /* BTRFS_SPACE_INFO_H */
+7 -7
fs/btrfs/tree-mod-log.c
··· 170 170 * this until all tree mod log insertions are recorded in the rb tree and then 171 171 * write unlock fs_info::tree_mod_log_lock. 172 172 */ 173 - static bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) 173 + static bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, const struct extent_buffer *eb) 174 174 { 175 175 if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) 176 176 return true; ··· 188 188 189 189 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */ 190 190 static bool tree_mod_need_log(const struct btrfs_fs_info *fs_info, 191 - struct extent_buffer *eb) 191 + const struct extent_buffer *eb) 192 192 { 193 193 if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) 194 194 return false; ··· 198 198 return true; 199 199 } 200 200 201 - static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb, 201 + static struct tree_mod_elem *alloc_tree_mod_elem(const struct extent_buffer *eb, 202 202 int slot, 203 203 enum btrfs_mod_log_op op) 204 204 { ··· 221 221 return tm; 222 222 } 223 223 224 - int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot, 224 + int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot, 225 225 enum btrfs_mod_log_op op) 226 226 { 227 227 struct tree_mod_elem *tm; ··· 258 258 return ret; 259 259 } 260 260 261 - static struct tree_mod_elem *tree_mod_log_alloc_move(struct extent_buffer *eb, 261 + static struct tree_mod_elem *tree_mod_log_alloc_move(const struct extent_buffer *eb, 262 262 int dst_slot, int src_slot, 263 263 int nr_items) 264 264 { ··· 278 278 return tm; 279 279 } 280 280 281 - int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb, 281 + int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb, 282 282 int dst_slot, int src_slot, 283 283 int nr_items) 284 284 { ··· 535 535 } 536 536 537 537 int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst, 538 - struct extent_buffer *src, 538 + const struct extent_buffer *src, 539 539 unsigned long dst_offset, 540 540 unsigned long src_offset, 541 541 int nr_items)
+3 -3
fs/btrfs/tree-mod-log.h
··· 37 37 int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root, 38 38 struct extent_buffer *new_root, 39 39 bool log_removal); 40 - int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot, 40 + int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot, 41 41 enum btrfs_mod_log_op op); 42 42 int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb); 43 43 struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info, ··· 47 47 struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq); 48 48 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); 49 49 int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst, 50 - struct extent_buffer *src, 50 + const struct extent_buffer *src, 51 51 unsigned long dst_offset, 52 52 unsigned long src_offset, 53 53 int nr_items); 54 - int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb, 54 + int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb, 55 55 int dst_slot, int src_slot, 56 56 int nr_items); 57 57 u64 btrfs_tree_mod_log_lowest_seq(struct btrfs_fs_info *fs_info);
+1 -1
fs/btrfs/zoned.c
··· 2459 2459 mutex_unlock(&fs_devices->device_list_mutex); 2460 2460 } 2461 2461 2462 - bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) 2462 + bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info) 2463 2463 { 2464 2464 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 2465 2465 struct btrfs_device *device;
+2 -2
fs/btrfs/zoned.h
··· 89 89 struct extent_buffer *eb); 90 90 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg); 91 91 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info); 92 - bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info); 92 + bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info); 93 93 void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical, 94 94 u64 length); 95 95 int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info); ··· 242 242 243 243 static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { } 244 244 245 - static inline bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) 245 + static inline bool btrfs_zoned_should_reclaim(const struct btrfs_fs_info *fs_info) 246 246 { 247 247 return false; 248 248 }
+3 -3
include/trace/events/btrfs.h
··· 1825 1825 1826 1826 TRACE_EVENT(qgroup_update_reserve, 1827 1827 1828 - TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup, 1828 + TP_PROTO(const struct btrfs_fs_info *fs_info, const struct btrfs_qgroup *qgroup, 1829 1829 s64 diff, int type), 1830 1830 1831 1831 TP_ARGS(fs_info, qgroup, diff, type), ··· 1851 1851 1852 1852 TRACE_EVENT(qgroup_meta_reserve, 1853 1853 1854 - TP_PROTO(struct btrfs_root *root, s64 diff, int type), 1854 + TP_PROTO(const struct btrfs_root *root, s64 diff, int type), 1855 1855 1856 1856 TP_ARGS(root, diff, type), 1857 1857 ··· 1874 1874 1875 1875 TRACE_EVENT(qgroup_meta_convert, 1876 1876 1877 - TP_PROTO(struct btrfs_root *root, s64 diff), 1877 + TP_PROTO(const struct btrfs_root *root, s64 diff), 1878 1878 1879 1879 TP_ARGS(root, diff), 1880 1880