Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

btrfs: make static code static & remove dead code

Big patch, but all it does is add statics to functions which
are in fact static, then remove the associated dead-code fallout.

removed functions:

btrfs_iref_to_path()
__btrfs_lookup_delayed_deletion_item()
__btrfs_search_delayed_insertion_item()
__btrfs_search_delayed_deletion_item()
find_eb_for_page()
btrfs_find_block_group()
range_straddles_pages()
extent_range_uptodate()
btrfs_file_extent_length()
btrfs_scrub_cancel_devid()
btrfs_start_transaction_lflush()

btrfs_print_tree() is left because it is used for debugging.
btrfs_start_transaction_lflush() and btrfs_reada_detach() are
left for symmetry.

ulist.c functions are left, another patch will take care of those.

Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>

authored by

Eric Sandeen and committed by
Josef Bacik
48a3b636 634554dc

+135 -392
+14 -26
fs/btrfs/backref.c
··· 1189 1189 return ret; 1190 1190 } 1191 1191 1192 + /* 1193 + * this iterates to turn a name (from iref/extref) into a full filesystem path. 1194 + * Elements of the path are separated by '/' and the path is guaranteed to be 1195 + * 0-terminated. the path is only given within the current file system. 1196 + * Therefore, it never starts with a '/'. the caller is responsible to provide 1197 + * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 1198 + * the start point of the resulting string is returned. this pointer is within 1199 + * dest, normally. 1200 + * in case the path buffer would overflow, the pointer is decremented further 1201 + * as if output was written to the buffer, though no more output is actually 1202 + * generated. that way, the caller can determine how much space would be 1203 + * required for the path to fit into the buffer. in that case, the returned 1204 + * value will be smaller than dest. callers must check this! 1205 + */ 1192 1206 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 1193 1207 u32 name_len, unsigned long name_off, 1194 1208 struct extent_buffer *eb_in, u64 parent, ··· 1269 1255 return ERR_PTR(ret); 1270 1256 1271 1257 return dest + bytes_left; 1272 - } 1273 - 1274 - /* 1275 - * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements 1276 - * of the path are separated by '/' and the path is guaranteed to be 1277 - * 0-terminated. the path is only given within the current file system. 1278 - * Therefore, it never starts with a '/'. the caller is responsible to provide 1279 - * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 1280 - * the start point of the resulting string is returned. this pointer is within 1281 - * dest, normally. 1282 - * in case the path buffer would overflow, the pointer is decremented further 1283 - * as if output was written to the buffer, though no more output is actually 1284 - * generated. that way, the caller can determine how much space would be 1285 - * required for the path to fit into the buffer. in that case, the returned 1286 - * value will be smaller than dest. callers must check this! 1287 - */ 1288 - char *btrfs_iref_to_path(struct btrfs_root *fs_root, 1289 - struct btrfs_path *path, 1290 - struct btrfs_inode_ref *iref, 1291 - struct extent_buffer *eb_in, u64 parent, 1292 - char *dest, u32 size) 1293 - { 1294 - return btrfs_ref_to_path(fs_root, path, 1295 - btrfs_inode_ref_name_len(eb_in, iref), 1296 - (unsigned long)(iref + 1), 1297 - eb_in, parent, dest, size); 1298 1258 } 1299 1259 1300 1260 /*
-3
fs/btrfs/backref.h
··· 59 59 int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 60 60 struct btrfs_fs_info *fs_info, u64 bytenr, 61 61 u64 time_seq, struct ulist **roots); 62 - char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 63 - struct btrfs_inode_ref *iref, struct extent_buffer *eb, 64 - u64 parent, char *dest, u32 size); 65 62 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 66 63 u32 name_len, unsigned long name_off, 67 64 struct extent_buffer *eb_in, u64 parent,
+8 -3
fs/btrfs/compression.c
··· 82 82 u32 sums; 83 83 }; 84 84 85 + static int btrfs_decompress_biovec(int type, struct page **pages_in, 86 + u64 disk_start, struct bio_vec *bvec, 87 + int vcnt, size_t srclen); 88 + 85 89 static inline int compressed_bio_size(struct btrfs_root *root, 86 90 unsigned long disk_size) 87 91 { ··· 742 738 static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]; 743 739 static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; 744 740 745 - struct btrfs_compress_op *btrfs_compress_op[] = { 741 + static struct btrfs_compress_op *btrfs_compress_op[] = { 746 742 &btrfs_zlib_compress, 747 743 &btrfs_lzo_compress, 748 744 }; ··· 913 909 * be contiguous. They all correspond to the range of bytes covered by 914 910 * the compressed extent. 915 911 */ 916 - int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, 917 - struct bio_vec *bvec, int vcnt, size_t srclen) 912 + static int btrfs_decompress_biovec(int type, struct page **pages_in, 913 + u64 disk_start, struct bio_vec *bvec, 914 + int vcnt, size_t srclen) 918 915 { 919 916 struct list_head *workspace; 920 917 int ret;
-2
fs/btrfs/compression.h
··· 30 30 unsigned long *total_in, 31 31 unsigned long *total_out, 32 32 unsigned long max_out); 33 - int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, 34 - struct bio_vec *bvec, int vcnt, size_t srclen); 35 33 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, 36 34 unsigned long start_byte, size_t srclen, size_t destlen); 37 35 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
+2 -7
fs/btrfs/ctree.c
··· 41 41 int level, int slot); 42 42 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, 43 43 struct extent_buffer *eb); 44 - struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr, 45 - u32 blocksize, u64 parent_transid, 46 - u64 time_seq); 47 - struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root, 48 - u64 bytenr, u32 blocksize, 49 - u64 time_seq); 44 + static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 50 45 51 46 struct btrfs_path *btrfs_alloc_path(void) 52 47 { ··· 203 208 * tree until you end up with a lock on the root. A locked buffer 204 209 * is returned, with a reference held. 205 210 */ 206 - struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) 211 + static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) 207 212 { 208 213 struct extent_buffer *eb; 209 214
-22
fs/btrfs/ctree.h
··· 3044 3044 struct btrfs_fs_info *info, 3045 3045 u64 bytenr); 3046 3046 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3047 - u64 btrfs_find_block_group(struct btrfs_root *root, 3048 - u64 search_start, u64 search_hint, int owner); 3049 3047 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, 3050 3048 struct btrfs_root *root, u32 blocksize, 3051 3049 u64 parent, u64 root_objectid, ··· 3053 3055 struct btrfs_root *root, 3054 3056 struct extent_buffer *buf, 3055 3057 u64 parent, int last_ref); 3056 - struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, 3057 - struct btrfs_root *root, 3058 - u64 bytenr, u32 blocksize, 3059 - int level); 3060 3058 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 3061 3059 struct btrfs_root *root, 3062 3060 u64 root_objectid, u64 owner, ··· 3105 3111 struct btrfs_root *root, u64 group_start); 3106 3112 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, 3107 3113 struct btrfs_root *root); 3108 - u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); 3109 3114 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); 3110 3115 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 3111 3116 ··· 3293 3300 { 3294 3301 return btrfs_next_old_item(root, p, 0); 3295 3302 } 3296 - int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 3297 3303 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 3298 3304 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, 3299 3305 struct btrfs_block_rsv *block_rsv, ··· 3387 3395 btrfs_search_dir_index_item(struct btrfs_root *root, 3388 3396 struct btrfs_path *path, u64 dirid, 3389 3397 const char *name, int name_len); 3390 - struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 3391 - struct btrfs_path *path, 3392 - const char *name, int name_len); 3393 3398 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, 3394 3399 struct btrfs_root *root, 3395 3400 struct btrfs_path *path, ··· 3464 3475 struct btrfs_root *root, 3465 3476 struct btrfs_path *path, u64 objectid, 3466 3477 u64 bytenr, int mod); 3467 - u64 btrfs_file_extent_length(struct btrfs_path *path); 3468 3478 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 3469 3479 struct btrfs_root *root, 3470 3480 struct btrfs_ordered_sum *sums); 3471 3481 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 3472 3482 struct bio *bio, u64 file_start, int contig); 3473 - struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, 3474 - struct btrfs_root *root, 3475 - struct btrfs_path *path, 3476 - u64 bytenr, int cow); 3477 3483 int btrfs_csum_truncate(struct btrfs_trans_handle *trans, 3478 3484 struct btrfs_root *root, struct btrfs_path *path, 3479 3485 u64 isize); ··· 3530 3546 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); 3531 3547 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 3532 3548 struct extent_state **cached_state); 3533 - int btrfs_writepages(struct address_space *mapping, 3534 - struct writeback_control *wbc); 3535 3549 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 3536 3550 struct btrfs_root *new_root, u64 new_dirid); 3537 3551 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, ··· 3539 3557 int btrfs_readpage(struct file *file, struct page *page); 3540 3558 void btrfs_evict_inode(struct inode *inode); 3541 3559 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); 3542 - int btrfs_dirty_inode(struct inode *inode); 3543 3560 struct inode *btrfs_alloc_inode(struct super_block *sb); 3544 3561 void btrfs_destroy_inode(struct inode *inode); 3545 3562 int btrfs_drop_inode(struct inode *inode); ··· 3556 3575 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3557 3576 struct btrfs_root *root, struct inode *inode); 3558 3577 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 3559 - int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); 3560 3578 int btrfs_orphan_cleanup(struct btrfs_root *root); 3561 3579 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 3562 3580 struct btrfs_root *root); ··· 3606 3626 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 3607 3627 struct inode *inode, u64 start, u64 end); 3608 3628 int btrfs_release_file(struct inode *inode, struct file *file); 3609 - void btrfs_drop_pages(struct page **pages, size_t num_pages); 3610 3629 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 3611 3630 struct page **pages, size_t num_pages, 3612 3631 loff_t pos, size_t write_bytes, ··· 3781 3802 int btrfs_scrub_cancel(struct btrfs_fs_info *info); 3782 3803 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info, 3783 3804 struct btrfs_device *dev); 3784 - int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid); 3785 3805 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 3786 3806 struct btrfs_scrub_progress *progress); 3787 3807
+8 -47
fs/btrfs/delayed-inode.c
··· 202 202 spin_unlock(&root->lock); 203 203 } 204 204 205 - struct btrfs_delayed_node *btrfs_first_delayed_node( 205 + static struct btrfs_delayed_node *btrfs_first_delayed_node( 206 206 struct btrfs_delayed_root *delayed_root) 207 207 { 208 208 struct list_head *p; ··· 221 221 return node; 222 222 } 223 223 224 - struct btrfs_delayed_node *btrfs_next_delayed_node( 224 + static struct btrfs_delayed_node *btrfs_next_delayed_node( 225 225 struct btrfs_delayed_node *node) 226 226 { 227 227 struct btrfs_delayed_root *delayed_root; ··· 282 282 __btrfs_release_delayed_node(node, 0); 283 283 } 284 284 285 - struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( 285 + static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( 286 286 struct btrfs_delayed_root *delayed_root) 287 287 { 288 288 struct list_head *p; ··· 308 308 __btrfs_release_delayed_node(node, 1); 309 309 } 310 310 311 - struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) 311 + static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) 312 312 { 313 313 struct btrfs_delayed_item *item; 314 314 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); ··· 383 383 return NULL; 384 384 } 385 385 386 - struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( 386 + static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( 387 387 struct btrfs_delayed_node *delayed_node, 388 388 struct btrfs_key *key) 389 389 { ··· 391 391 392 392 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, 393 393 NULL, NULL); 394 - return item; 395 - } 396 - 397 - struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item( 398 - struct btrfs_delayed_node *delayed_node, 399 - struct btrfs_key *key) 400 - { 401 - struct btrfs_delayed_item *item; 402 - 403 - item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, 404 - NULL, NULL); 405 - return item; 406 - } 407 - 408 - struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item( 409 - struct btrfs_delayed_node *delayed_node, 410 - struct btrfs_key *key) 411 - { 412 - struct btrfs_delayed_item *item, *next; 413 - 414 - item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, 415 - NULL, &next); 416 - if (!item) 417 - item = next; 418 - 419 - return item; 420 - } 421 - 422 - struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item( 423 - struct btrfs_delayed_node *delayed_node, 424 - struct btrfs_key *key) 425 - { 426 - struct btrfs_delayed_item *item, *next; 427 - 428 - item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, 429 - NULL, &next); 430 - if (!item) 431 - item = next; 432 - 433 394 return item; 434 395 } 435 396 ··· 496 535 } 497 536 } 498 537 499 - struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( 538 + static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( 500 539 struct btrfs_delayed_node *delayed_node) 501 540 { 502 541 struct rb_node *p; ··· 509 548 return item; 510 549 } 511 550 512 - struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( 551 + static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( 513 552 struct btrfs_delayed_node *delayed_node) 514 553 { 515 554 struct rb_node *p; ··· 522 561 return item; 523 562 } 524 563 525 - struct btrfs_delayed_item *__btrfs_next_delayed_item( 564 + static struct btrfs_delayed_item *__btrfs_next_delayed_item( 526 565 struct btrfs_delayed_item *item) 527 566 { 528 567 struct rb_node *p;
+5 -1
fs/btrfs/dir-item.c
··· 21 21 #include "hash.h" 22 22 #include "transaction.h" 23 23 24 + static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 25 + struct btrfs_path *path, 26 + const char *name, int name_len); 27 + 24 28 /* 25 29 * insert a name into a directory, doing overflow properly if there is a hash 26 30 * collision. data_size indicates how big the item inserted should be. On ··· 383 379 * this walks through all the entries in a dir item and finds one 384 380 * for a specific name. 385 381 */ 386 - struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 382 + static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 387 383 struct btrfs_path *path, 388 384 const char *name, int name_len) 389 385 {
+5 -38
fs/btrfs/disk-io.c
··· 70 70 int mark); 71 71 static int btrfs_destroy_pinned_extent(struct btrfs_root *root, 72 72 struct extent_io_tree *pinned_extents); 73 + static int btrfs_cleanup_transaction(struct btrfs_root *root); 74 + static void btrfs_error_commit_super(struct btrfs_root *root); 73 75 74 76 /* 75 77 * end_io_wq structs are used to do processing in task context when an IO is ··· 531 529 } 532 530 533 531 return 0; 534 - } 535 - 536 - struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree, 537 - struct page *page, int max_walk) 538 - { 539 - struct extent_buffer *eb; 540 - u64 start = page_offset(page); 541 - u64 target = start; 542 - u64 min_start; 543 - 544 - if (start < max_walk) 545 - min_start = 0; 546 - else 547 - min_start = start - max_walk; 548 - 549 - while (start >= min_start) { 550 - eb = find_extent_buffer(tree, start, 0); 551 - if (eb) { 552 - /* 553 - * we found an extent buffer and it contains our page 554 - * horray! 555 - */ 556 - if (eb->start <= target && 557 - eb->start + eb->len > target) 558 - return eb; 559 - 560 - /* we found an extent buffer that wasn't for us */ 561 - free_extent_buffer(eb); 562 - return NULL; 563 - } 564 - if (start == 0) 565 - break; 566 - start -= PAGE_CACHE_SIZE; 567 - } 568 - return NULL; 569 532 } 570 533 571 534 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, ··· 3212 3245 return num_tolerated_disk_barrier_failures; 3213 3246 } 3214 3247 3215 - int write_all_supers(struct btrfs_root *root, int max_mirrors) 3248 + static int write_all_supers(struct btrfs_root *root, int max_mirrors) 3216 3249 { 3217 3250 struct list_head *head; 3218 3251 struct btrfs_device *dev; ··· 3578 3611 return 0; 3579 3612 } 3580 3613 3581 - void btrfs_error_commit_super(struct btrfs_root *root) 3614 + static void btrfs_error_commit_super(struct btrfs_root *root) 3582 3615 { 3583 3616 mutex_lock(&root->fs_info->cleaner_mutex); 3584 3617 btrfs_run_delayed_iputs(root); ··· 3846 3879 */ 3847 3880 } 3848 3881 3849 - int btrfs_cleanup_transaction(struct btrfs_root *root) 3882 + static int btrfs_cleanup_transaction(struct btrfs_root *root) 3850 3883 { 3851 3884 struct btrfs_transaction *t; 3852 3885 LIST_HEAD(list);
-2
fs/btrfs/disk-io.h
··· 61 61 struct btrfs_root *root, int max_mirrors); 62 62 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); 63 63 int btrfs_commit_super(struct btrfs_root *root); 64 - void btrfs_error_commit_super(struct btrfs_root *root); 65 64 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, 66 65 u64 bytenr, u32 blocksize); 67 66 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, ··· 92 93 struct btrfs_fs_info *fs_info); 93 94 int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 94 95 struct btrfs_root *root); 95 - int btrfs_cleanup_transaction(struct btrfs_root *root); 96 96 void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans, 97 97 struct btrfs_root *root); 98 98 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
+8 -56
fs/btrfs/extent-tree.c
··· 105 105 u64 num_bytes, int reserve); 106 106 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, 107 107 u64 num_bytes); 108 + int btrfs_pin_extent(struct btrfs_root *root, 109 + u64 bytenr, u64 num_bytes, int reserved); 108 110 109 111 static noinline int 110 112 block_group_cache_done(struct btrfs_block_group_cache *cache) ··· 684 682 list_for_each_entry_rcu(found, head, list) 685 683 found->full = 0; 686 684 rcu_read_unlock(); 687 - } 688 - 689 - u64 btrfs_find_block_group(struct btrfs_root *root, 690 - u64 search_start, u64 search_hint, int owner) 691 - { 692 - struct btrfs_block_group_cache *cache; 693 - u64 used; 694 - u64 last = max(search_hint, search_start); 695 - u64 group_start = 0; 696 - int full_search = 0; 697 - int factor = 9; 698 - int wrapped = 0; 699 - again: 700 - while (1) { 701 - cache = btrfs_lookup_first_block_group(root->fs_info, last); 702 - if (!cache) 703 - break; 704 - 705 - spin_lock(&cache->lock); 706 - last = cache->key.objectid + cache->key.offset; 707 - used = btrfs_block_group_used(&cache->item); 708 - 709 - if ((full_search || !cache->ro) && 710 - block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) { 711 - if (used + cache->pinned + cache->reserved < 712 - div_factor(cache->key.offset, factor)) { 713 - group_start = cache->key.objectid; 714 - spin_unlock(&cache->lock); 715 - btrfs_put_block_group(cache); 716 - goto found; 717 - } 718 - } 719 - spin_unlock(&cache->lock); 720 - btrfs_put_block_group(cache); 721 - cond_resched(); 722 - } 723 - if (!wrapped) { 724 - last = search_start; 725 - wrapped = 1; 726 - goto again; 727 - } 728 - if (!full_search && factor < 10) { 729 - last = search_start; 730 - full_search = 1; 731 - factor = 10; 732 - goto again; 733 - } 734 - found: 735 - return group_start; 736 685 } 737 686 738 687 /* simple helper to search for an existing extent at a given offset */ ··· 3406 3453 * progress (either running or paused) picks the target profile (if it's 3407 3454 * already available), otherwise falls back to plain reducing. 3408 3455 */ 3409 - u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) 3456 + static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) 3410 3457 { 3411 3458 /* 3412 3459 * we add in the count of missing devices because we want ··· 3880 3927 return 0; 3881 3928 } 3882 3929 3883 - void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root, 3884 - unsigned long nr_pages) 3930 + static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root, 3931 + unsigned long nr_pages) 3885 3932 { 3886 3933 struct super_block *sb = root->fs_info->sb; 3887 3934 int started; ··· 6605 6652 return ret; 6606 6653 } 6607 6654 6608 - struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, 6609 - struct btrfs_root *root, 6610 - u64 bytenr, u32 blocksize, 6611 - int level) 6655 + static struct extent_buffer * 6656 + btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, 6657 + u64 bytenr, u32 blocksize, int level) 6612 6658 { 6613 6659 struct extent_buffer *buf; 6614 6660
+11 -50
fs/btrfs/extent_io.c
··· 477 477 return prealloc; 478 478 } 479 479 480 - void extent_io_tree_panic(struct extent_io_tree *tree, int err) 480 + static void extent_io_tree_panic(struct extent_io_tree *tree, int err) 481 481 { 482 482 btrfs_panic(tree_fs_info(tree), err, "Locking error: " 483 483 "Extent tree was modified by another " ··· 658 658 * The range [start, end] is inclusive. 659 659 * The tree lock is taken by this function 660 660 */ 661 - void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) 661 + static void wait_extent_bit(struct extent_io_tree *tree, u64 start, 662 + u64 end, int bits) 662 663 { 663 664 struct extent_state *state; 664 665 struct rb_node *node; ··· 1328 1327 * return it. tree->lock must be held. NULL will returned if 1329 1328 * nothing was found after 'start' 1330 1329 */ 1331 - struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, 1332 - u64 start, int bits) 1330 + static struct extent_state * 1331 + find_first_extent_bit_state(struct extent_io_tree *tree, 1332 + u64 start, int bits) 1333 1333 { 1334 1334 struct rb_node *node; 1335 1335 struct extent_state *state; ··· 2670 2668 return ret; 2671 2669 } 2672 2670 2673 - void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page) 2671 + static void attach_extent_buffer_page(struct extent_buffer *eb, 2672 + struct page *page) 2674 2673 { 2675 2674 if (!PagePrivate(page)) { 2676 2675 SetPagePrivate(page); ··· 3789 3786 * are locked or under IO and drops the related state bits if it is safe 3790 3787 * to drop the page. 3791 3788 */ 3792 - int try_release_extent_state(struct extent_map_tree *map, 3793 - struct extent_io_tree *tree, struct page *page, 3794 - gfp_t mask) 3789 + static int try_release_extent_state(struct extent_map_tree *map, 3790 + struct extent_io_tree *tree, 3791 + struct page *page, gfp_t mask) 3795 3792 { 3796 3793 u64 start = page_offset(page); 3797 3794 u64 end = start + PAGE_CACHE_SIZE - 1; ··· 4574 4571 return was_dirty; 4575 4572 } 4576 4573 4577 - static int range_straddles_pages(u64 start, u64 len) 4578 - { 4579 - if (len < PAGE_CACHE_SIZE) 4580 - return 1; 4581 - if (start & (PAGE_CACHE_SIZE - 1)) 4582 - return 1; 4583 - if ((start + len) & (PAGE_CACHE_SIZE - 1)) 4584 - return 1; 4585 - return 0; 4586 - } 4587 - 4588 4574 int clear_extent_buffer_uptodate(struct extent_buffer *eb) 4589 4575 { 4590 4576 unsigned long i; ··· 4603 4611 SetPageUptodate(page); 4604 4612 } 4605 4613 return 0; 4606 - } 4607 - 4608 - int extent_range_uptodate(struct extent_io_tree *tree, 4609 - u64 start, u64 end) 4610 - { 4611 - struct page *page; 4612 - int ret; 4613 - int pg_uptodate = 1; 4614 - int uptodate; 4615 - unsigned long index; 4616 - 4617 - if (range_straddles_pages(start, end - start + 1)) { 4618 - ret = test_range_bit(tree, start, end, 4619 - EXTENT_UPTODATE, 1, NULL); 4620 - if (ret) 4621 - return 1; 4622 - } 4623 - while (start <= end) { 4624 - index = start >> PAGE_CACHE_SHIFT; 4625 - page = find_get_page(tree->mapping, index); 4626 - if (!page) 4627 - return 1; 4628 - uptodate = PageUptodate(page); 4629 - page_cache_release(page); 4630 - if (!uptodate) { 4631 - pg_uptodate = 0; 4632 - break; 4633 - } 4634 - start += PAGE_CACHE_SIZE; 4635 - } 4636 - return pg_uptodate; 4637 4614 } 4638 4615 4639 4616 int extent_buffer_uptodate(struct extent_buffer *eb)
-8
fs/btrfs/extent_io.h
··· 190 190 struct extent_io_tree *tree, struct page *page, 191 191 gfp_t mask); 192 192 int try_release_extent_buffer(struct page *page, gfp_t mask); 193 - int try_release_extent_state(struct extent_map_tree *map, 194 - struct extent_io_tree *tree, struct page *page, 195 - gfp_t mask); 196 193 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end); 197 194 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 198 195 int bits, struct extent_state **cached); ··· 239 242 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 240 243 u64 *start_ret, u64 *end_ret, int bits, 241 244 struct extent_state **cached_state); 242 - struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, 243 - u64 start, int bits); 244 245 int extent_invalidatepage(struct extent_io_tree *tree, 245 246 struct page *page, unsigned long offset); 246 247 int extent_write_full_page(struct extent_io_tree *tree, struct page *page, ··· 317 322 unsigned long src_offset, unsigned long len); 318 323 void memset_extent_buffer(struct extent_buffer *eb, char c, 319 324 unsigned long start, unsigned long len); 320 - void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); 321 325 void clear_extent_buffer_dirty(struct extent_buffer *eb); 322 326 int set_extent_buffer_dirty(struct extent_buffer *eb); 323 327 int set_extent_buffer_uptodate(struct extent_buffer *eb); ··· 326 332 unsigned long min_len, char **map, 327 333 unsigned long *map_start, 328 334 unsigned long *map_len); 329 - int extent_range_uptodate(struct extent_io_tree *tree, 330 - u64 start, u64 end); 331 335 int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); 332 336 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); 333 337 int extent_clear_unlock_delalloc(struct inode *inode,
+3 -2
fs/btrfs/extent_map.c
··· 345 345 return start + len; 346 346 } 347 347 348 - struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree, 349 - u64 start, u64 len, int strict) 348 + static struct extent_map * 349 + __lookup_extent_mapping(struct extent_map_tree *tree, 350 + u64 start, u64 len, int strict) 350 351 { 351 352 struct extent_map *em; 352 353 struct rb_node *rb_node;
+5 -25
fs/btrfs/file-item.c
··· 83 83 return ret; 84 84 } 85 85 86 - struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, 87 - struct btrfs_root *root, 88 - struct btrfs_path *path, 89 - u64 bytenr, int cow) 86 + static struct btrfs_csum_item * 87 + btrfs_lookup_csum(struct btrfs_trans_handle *trans, 88 + struct btrfs_root *root, 89 + struct btrfs_path *path, 90 + u64 bytenr, int cow) 90 91 { 91 92 int ret; 92 93 struct btrfs_key file_key; ··· 151 150 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); 152 151 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 153 152 return ret; 154 - } 155 - 156 - u64 btrfs_file_extent_length(struct btrfs_path *path) 157 - { 158 - int extent_type; 159 - struct btrfs_file_extent_item *fi; 160 - u64 len; 161 - 162 - fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 163 - struct btrfs_file_extent_item); 164 - extent_type = btrfs_file_extent_type(path->nodes[0], fi); 165 - 166 - if (extent_type == BTRFS_FILE_EXTENT_REG || 167 - extent_type == BTRFS_FILE_EXTENT_PREALLOC) 168 - len = btrfs_file_extent_num_bytes(path->nodes[0], fi); 169 - else if (extent_type == BTRFS_FILE_EXTENT_INLINE) 170 - len = btrfs_file_extent_inline_len(path->nodes[0], fi); 171 - else 172 - BUG(); 173 - 174 - return len; 175 153 } 176 154 177 155 static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
+6 -6
fs/btrfs/file.c
··· 192 192 * the same inode in the tree, we will merge them together (by 193 193 * __btrfs_add_inode_defrag()) and free the one that we want to requeue. 194 194 */ 195 - void btrfs_requeue_inode_defrag(struct inode *inode, 196 - struct inode_defrag *defrag) 195 + static void btrfs_requeue_inode_defrag(struct inode *inode, 196 + struct inode_defrag *defrag) 197 197 { 198 198 struct btrfs_root *root = BTRFS_I(inode)->root; 199 199 int ret; ··· 473 473 /* 474 474 * unlocks pages after btrfs_file_write is done with them 475 475 */ 476 - void btrfs_drop_pages(struct page **pages, size_t num_pages) 476 + static void btrfs_drop_pages(struct page **pages, size_t num_pages) 477 477 { 478 478 size_t i; 479 479 for (i = 0; i < num_pages; i++) { ··· 497 497 * doing real data extents, marking pages dirty and delalloc as required. 498 498 */ 499 499 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 500 - struct page **pages, size_t num_pages, 501 - loff_t pos, size_t write_bytes, 502 - struct extent_state **cached) 500 + struct page **pages, size_t num_pages, 501 + loff_t pos, size_t write_bytes, 502 + struct extent_state **cached) 503 503 { 504 504 int err = 0; 505 505 int i;
+14 -12
fs/btrfs/free-space-cache.c
··· 120 120 return inode; 121 121 } 122 122 123 - int __create_free_space_inode(struct btrfs_root *root, 124 - struct btrfs_trans_handle *trans, 125 - struct btrfs_path *path, u64 ino, u64 offset) 123 + static int __create_free_space_inode(struct btrfs_root *root, 124 + struct btrfs_trans_handle *trans, 125 + struct btrfs_path *path, 126 + u64 ino, u64 offset) 126 127 { 127 128 struct btrfs_key key; 128 129 struct btrfs_disk_key disk_key; ··· 626 625 spin_unlock(&ctl->tree_lock); 627 626 } 628 627 629 - int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, 630 - struct btrfs_free_space_ctl *ctl, 631 - struct btrfs_path *path, u64 offset) 628 + static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, 629 + struct btrfs_free_space_ctl *ctl, 630 + struct btrfs_path *path, u64 offset) 632 631 { 633 632 struct btrfs_free_space_header *header; 634 633 struct extent_buffer *leaf; ··· 869 868 * on mount. This will return 0 if it was successfull in writing the cache out, 870 869 * and -1 if it was not. 871 870 */ 872 - int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, 873 - struct btrfs_free_space_ctl *ctl, 874 - struct btrfs_block_group_cache *block_group, 875 - struct btrfs_trans_handle *trans, 876 - struct btrfs_path *path, u64 offset) 871 + static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, 872 + struct btrfs_free_space_ctl *ctl, 873 + struct btrfs_block_group_cache *block_group, 874 + struct btrfs_trans_handle *trans, 875 + struct btrfs_path *path, u64 offset) 877 876 { 878 877 struct btrfs_free_space_header *header; 879 878 struct extent_buffer *leaf; ··· 2068 2067 return 0; 2069 2068 } 2070 2069 2071 - void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl) 2070 + static void __btrfs_remove_free_space_cache_locked( 2071 + struct btrfs_free_space_ctl *ctl) 2072 2072 { 2073 2073 struct btrfs_free_space *info; 2074 2074 struct rb_node *node;
+5 -4
fs/btrfs/inode-item.c
··· 183 183 return -ENOENT; 184 184 } 185 185 186 - int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, 187 - struct btrfs_root *root, 188 - const char *name, int name_len, 189 - u64 inode_objectid, u64 ref_objectid, u64 *index) 186 + static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, 187 + struct btrfs_root *root, 188 + const char *name, int name_len, 189 + u64 inode_objectid, u64 ref_objectid, 190 + u64 *index) 190 191 { 191 192 struct btrfs_path *path; 192 193 struct btrfs_key key;
+7 -4
fs/btrfs/inode.c
··· 103 103 u64 orig_block_len, u64 ram_bytes, 104 104 int type); 105 105 106 + static int btrfs_dirty_inode(struct inode *inode); 107 + 106 108 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 107 109 struct inode *inode, struct inode *dir, 108 110 const struct qstr *qstr) ··· 3026 3024 * We have done the truncate/delete so we can go ahead and remove the orphan 3027 3025 * item for this particular inode. 3028 3026 */ 3029 - int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) 3027 + static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3028 + struct inode *inode) 3030 3029 { 3031 3030 struct btrfs_root *root = BTRFS_I(inode)->root; 3032 3031 int delete_item = 0; ··· 5345 5342 * FIXME, needs more benchmarking...there are no reasons other than performance 5346 5343 * to keep or drop this code. 5347 5344 */ 5348 - int btrfs_dirty_inode(struct inode *inode) 5345 + static int btrfs_dirty_inode(struct inode *inode) 5349 5346 { 5350 5347 struct btrfs_root *root = BTRFS_I(inode)->root; 5351 5348 struct btrfs_trans_handle *trans; ··· 7440 7437 return extent_write_full_page(tree, page, btrfs_get_extent, wbc); 7441 7438 } 7442 7439 7443 - int btrfs_writepages(struct address_space *mapping, 7444 - struct writeback_control *wbc) 7440 + static int btrfs_writepages(struct address_space *mapping, 7441 + struct writeback_control *wbc) 7445 7442 { 7446 7443 struct extent_io_tree *tree; 7447 7444
+1 -1
fs/btrfs/ioctl.c
··· 3010 3010 } 3011 3011 } 3012 3012 3013 - long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) 3013 + static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) 3014 3014 { 3015 3015 struct btrfs_ioctl_space_args space_args; 3016 3016 struct btrfs_ioctl_space_info space;
+2 -2
fs/btrfs/locking.c
··· 24 24 #include "extent_io.h" 25 25 #include "locking.h" 26 26 27 - void btrfs_assert_tree_read_locked(struct extent_buffer *eb); 27 + static void btrfs_assert_tree_read_locked(struct extent_buffer *eb); 28 28 29 29 /* 30 30 * if we currently have a spinning reader or writer lock ··· 264 264 BUG_ON(!atomic_read(&eb->write_locks)); 265 265 } 266 266 267 - void btrfs_assert_tree_read_locked(struct extent_buffer *eb) 267 + static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) 268 268 { 269 269 BUG_ON(!atomic_read(&eb->read_locks)); 270 270 }
+1 -1
fs/btrfs/print-tree.h
··· 19 19 #ifndef __PRINT_TREE_ 20 20 #define __PRINT_TREE_ 21 21 void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l); 22 - void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *t); 22 + void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c); 23 23 #endif
+7 -7
fs/btrfs/raid56.c
··· 410 410 /* 411 411 * remove everything in the cache 412 412 */ 413 - void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 413 + static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) 414 414 { 415 415 struct btrfs_stripe_hash_table *table; 416 416 unsigned long flags; ··· 1010 1010 * this will try to merge into existing bios if possible, and returns 1011 1011 * zero if all went well. 1012 1012 */ 1013 - int rbio_add_io_page(struct btrfs_raid_bio *rbio, 1014 - struct bio_list *bio_list, 1015 - struct page *page, 1016 - int stripe_nr, 1017 - unsigned long page_index, 1018 - unsigned long bio_max_len) 1013 + static int rbio_add_io_page(struct btrfs_raid_bio *rbio, 1014 + struct bio_list *bio_list, 1015 + struct page *page, 1016 + int stripe_nr, 1017 + unsigned long page_index, 1018 + unsigned long bio_max_len) 1019 1019 { 1020 1020 struct bio *last = bio_list->tail; 1021 1021 u64 last_end = 0;
+1 -2
fs/btrfs/relocation.c
··· 326 326 return NULL; 327 327 } 328 328 329 - void backref_tree_panic(struct rb_node *rb_node, int errno, 330 - u64 bytenr) 329 + static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr) 331 330 { 332 331 333 332 struct btrfs_fs_info *fs_info = NULL;
-22
fs/btrfs/scrub.c
··· 3012 3012 return 0; 3013 3013 } 3014 3014 3015 - int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid) 3016 - { 3017 - struct btrfs_fs_info *fs_info = root->fs_info; 3018 - struct btrfs_device *dev; 3019 - int ret; 3020 - 3021 - /* 3022 - * we have to hold the device_list_mutex here so the device 3023 - * does not go away in cancel_dev. FIXME: find a better solution 3024 - */ 3025 - mutex_lock(&fs_info->fs_devices->device_list_mutex); 3026 - dev = btrfs_find_device(fs_info, devid, NULL, NULL); 3027 - if (!dev) { 3028 - mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3029 - return -ENODEV; 3030 - } 3031 - ret = btrfs_scrub_cancel_dev(fs_info, dev); 3032 - mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3033 - 3034 - return ret; 3035 - } 3036 - 3037 3015 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 3038 3016 struct btrfs_scrub_progress *progress) 3039 3017 {
+1 -1
fs/btrfs/send.c
··· 387 387 return path; 388 388 } 389 389 390 - int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) 390 + static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) 391 391 { 392 392 int ret; 393 393 mm_segment_t old_fs;
-1
fs/btrfs/send.h
··· 131 131 132 132 #ifdef __KERNEL__ 133 133 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg); 134 - int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off); 135 134 #endif
+1 -1
fs/btrfs/transaction.c
··· 34 34 35 35 #define BTRFS_ROOT_TRANS_TAG 0 36 36 37 - void put_transaction(struct btrfs_transaction *transaction) 37 + static void put_transaction(struct btrfs_transaction *transaction) 38 38 { 39 39 WARN_ON(atomic_read(&transaction->use_count) == 0); 40 40 if (atomic_dec_and_test(&transaction->use_count)) {
-1
fs/btrfs/transaction.h
··· 146 146 struct extent_io_tree *dirty_pages, int mark); 147 147 int btrfs_transaction_blocked(struct btrfs_fs_info *info); 148 148 int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 149 - void put_transaction(struct btrfs_transaction *transaction); 150 149 #endif
+3 -3
fs/btrfs/tree-log.c
··· 3839 3839 * only logging is done of any parent directories that are older than 3840 3840 * the last committed transaction 3841 3841 */ 3842 - int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 3843 - struct btrfs_root *root, struct inode *inode, 3844 - struct dentry *parent, int exists_only) 3842 + static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 3843 + struct btrfs_root *root, struct inode *inode, 3844 + struct dentry *parent, int exists_only) 3845 3845 { 3846 3846 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; 3847 3847 struct super_block *sb;
-3
fs/btrfs/tree-log.h
··· 40 40 struct inode *inode, u64 dirid); 41 41 void btrfs_end_log_trans(struct btrfs_root *root); 42 42 int btrfs_pin_log_trans(struct btrfs_root *root); 43 - int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 44 - struct btrfs_root *root, struct inode *inode, 45 - struct dentry *parent, int exists_only); 46 43 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 47 44 struct inode *dir, struct inode *inode, 48 45 int for_rename);
+15 -14
fs/btrfs/volumes.c
··· 46 46 struct btrfs_device *device); 47 47 static int btrfs_relocate_sys_chunks(struct btrfs_root *root); 48 48 static void __btrfs_reset_dev_stats(struct btrfs_device *dev); 49 + static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); 49 50 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); 50 51 51 52 static DEFINE_MUTEX(uuid_mutex); ··· 1200 1199 return ret; 1201 1200 } 1202 1201 1203 - int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1204 - struct btrfs_device *device, 1205 - u64 chunk_tree, u64 chunk_objectid, 1206 - u64 chunk_offset, u64 start, u64 num_bytes) 1202 + static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 1203 + struct btrfs_device *device, 1204 + u64 chunk_tree, u64 chunk_objectid, 1205 + u64 chunk_offset, u64 start, u64 num_bytes) 1207 1206 { 1208 1207 int ret; 1209 1208 struct btrfs_path *path; ··· 1330 1329 * the device information is stored in the chunk root 1331 1330 * the btrfs_device struct should be fully filled in 1332 1331 */ 1333 - int btrfs_add_device(struct btrfs_trans_handle *trans, 1334 - struct btrfs_root *root, 1335 - struct btrfs_device *device) 1332 + static int btrfs_add_device(struct btrfs_trans_handle *trans, 1333 + struct btrfs_root *root, 1334 + struct btrfs_device *device) 1336 1335 { 1337 1336 int ret; 1338 1337 struct btrfs_path *path; ··· 1711 1710 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 1712 1711 } 1713 1712 1714 - int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path, 1715 - struct btrfs_device **device) 1713 + static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path, 1714 + struct btrfs_device **device) 1716 1715 { 1717 1716 int ret = 0; 1718 1717 struct btrfs_super_block *disk_super; ··· 3608 3607 return 0; 3609 3608 } 3610 3609 3611 - struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 3610 + static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { 3612 3611 [BTRFS_RAID_RAID10] = { 3613 3612 .sub_stripes = 2, 3614 3613 .dev_stripes = 1, ··· 5121 5120 * This will add one bio to the pending list for a device and make sure 5122 5121 * the work struct is scheduled. 5123 5122 */ 5124 - noinline void btrfs_schedule_bio(struct btrfs_root *root, 5125 - struct btrfs_device *device, 5126 - int rw, struct bio *bio) 5123 + static noinline void btrfs_schedule_bio(struct btrfs_root *root, 5124 + struct btrfs_device *device, 5125 + int rw, struct bio *bio) 5127 5126 { 5128 5127 int should_queue = 1; 5129 5128 struct btrfs_pending_bios *pending_bios; ··· 5941 5940 btrfs_dev_stat_print_on_error(dev); 5942 5941 } 5943 5942 5944 - void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 5943 + static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) 5945 5944 { 5946 5945 if (!dev->dev_stats_valid) 5947 5946 return;
-13
fs/btrfs/volumes.h
··· 254 254 #define btrfs_bio_size(n) (sizeof(struct btrfs_bio) + \ 255 255 (sizeof(struct btrfs_bio_stripe) * (n))) 256 256 257 - int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, 258 - struct btrfs_device *device, 259 - u64 chunk_tree, u64 chunk_objectid, 260 - u64 chunk_offset, u64 start, u64 num_bytes); 261 257 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 262 258 u64 logical, u64 *length, 263 259 struct btrfs_bio **bbio_ret, int mirror_num); ··· 278 282 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root, 279 283 char *device_path, 280 284 struct btrfs_device **device); 281 - int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path, 282 - struct btrfs_device **device); 283 - int btrfs_add_device(struct btrfs_trans_handle *trans, 284 - struct btrfs_root *root, 285 - struct btrfs_device *device); 286 285 int btrfs_rm_device(struct btrfs_root *root, char *device_path); 287 286 void btrfs_cleanup_fs_uuids(void); 288 287 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len); ··· 298 307 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); 299 308 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 300 309 u64 *start, u64 *max_avail); 301 - void btrfs_dev_stat_print_on_error(struct btrfs_device *device); 302 310 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); 303 311 int btrfs_get_dev_stats(struct btrfs_root *root, 304 312 struct btrfs_ioctl_get_dev_stats *stats); ··· 311 321 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info, 312 322 struct btrfs_device *tgtdev); 313 323 int btrfs_scratch_superblock(struct btrfs_device *device); 314 - void btrfs_schedule_bio(struct btrfs_root *root, 315 - struct btrfs_device *device, 316 - int rw, struct bio *bio); 317 324 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, 318 325 u64 logical, u64 len, int mirror_num); 319 326 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+2 -2
fs/btrfs/xattr.c
··· 406 406 XATTR_REPLACE); 407 407 } 408 408 409 - int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, 410 - void *fs_info) 409 + static int btrfs_initxattrs(struct inode *inode, 410 + const struct xattr *xattr_array, void *fs_info) 411 411 { 412 412 const struct xattr *xattr; 413 413 struct btrfs_trans_handle *trans = fs_info;