Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
btrfs: fix inconsonant inode information
Btrfs: make sure to update total_bitmaps when freeing cache V3
Btrfs: fix type mismatch in find_free_extent()
Btrfs: make sure to record the transid in new inodes

+112 -43
+92 -36
fs/btrfs/delayed-inode.c
··· 82 82 return root->fs_info->delayed_root; 83 83 } 84 84 85 + static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode) 86 + { 87 + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); 88 + struct btrfs_root *root = btrfs_inode->root; 89 + u64 ino = btrfs_ino(inode); 90 + struct btrfs_delayed_node *node; 91 + 92 + node = ACCESS_ONCE(btrfs_inode->delayed_node); 93 + if (node) { 94 + atomic_inc(&node->refs); 95 + return node; 96 + } 97 + 98 + spin_lock(&root->inode_lock); 99 + node = radix_tree_lookup(&root->delayed_nodes_tree, ino); 100 + if (node) { 101 + if (btrfs_inode->delayed_node) { 102 + atomic_inc(&node->refs); /* can be accessed */ 103 + BUG_ON(btrfs_inode->delayed_node != node); 104 + spin_unlock(&root->inode_lock); 105 + return node; 106 + } 107 + btrfs_inode->delayed_node = node; 108 + atomic_inc(&node->refs); /* can be accessed */ 109 + atomic_inc(&node->refs); /* cached in the inode */ 110 + spin_unlock(&root->inode_lock); 111 + return node; 112 + } 113 + spin_unlock(&root->inode_lock); 114 + 115 + return NULL; 116 + } 117 + 85 118 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( 86 119 struct inode *inode) 87 120 { ··· 125 92 int ret; 126 93 127 94 again: 128 - node = ACCESS_ONCE(btrfs_inode->delayed_node); 129 - if (node) { 130 - atomic_inc(&node->refs); /* can be accessed */ 95 + node = btrfs_get_delayed_node(inode); 96 + if (node) 131 97 return node; 132 - } 133 - 134 - spin_lock(&root->inode_lock); 135 - node = radix_tree_lookup(&root->delayed_nodes_tree, ino); 136 - if (node) { 137 - if (btrfs_inode->delayed_node) { 138 - spin_unlock(&root->inode_lock); 139 - goto again; 140 - } 141 - btrfs_inode->delayed_node = node; 142 - atomic_inc(&node->refs); /* can be accessed */ 143 - atomic_inc(&node->refs); /* cached in the inode */ 144 - spin_unlock(&root->inode_lock); 145 - return node; 146 - } 147 - spin_unlock(&root->inode_lock); 148 98 149 99 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); 150 100 if (!node) ··· 562 546 next = rb_entry(p, struct btrfs_delayed_item, rb_node); 563 547 564 548 return next; 565 - } 566 - 567 - static inline struct btrfs_delayed_node *btrfs_get_delayed_node( 568 - struct inode *inode) 569 - { 570 - struct btrfs_inode *btrfs_inode = BTRFS_I(inode); 571 - struct btrfs_delayed_node *delayed_node; 572 - 573 - delayed_node = btrfs_inode->delayed_node; 574 - if (delayed_node) 575 - atomic_inc(&delayed_node->refs); 576 - 577 - return delayed_node; 578 549 } 579 550 580 551 static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, ··· 1407 1404 1408 1405 int btrfs_inode_delayed_dir_index_count(struct inode *inode) 1409 1406 { 1410 - struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; 1411 - int ret = 0; 1407 + struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1412 1408 1413 1409 if (!delayed_node) 1414 1410 return -ENOENT; ··· 1417 1415 * a new directory index is added into the delayed node and index_cnt 1418 1416 * is updated now. So we needn't lock the delayed node. 1419 1417 */ 1420 - if (!delayed_node->index_cnt) 1418 + if (!delayed_node->index_cnt) { 1419 + btrfs_release_delayed_node(delayed_node); 1421 1420 return -EINVAL; 1421 + } 1422 1422 1423 1423 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; 1424 - return ret; 1424 + btrfs_release_delayed_node(delayed_node); 1425 + return 0; 1425 1426 } 1426 1427 1427 1428 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, ··· 1616 1611 inode->i_ctime.tv_sec); 1617 1612 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), 1618 1613 inode->i_ctime.tv_nsec); 1614 + } 1615 + 1616 + int btrfs_fill_inode(struct inode *inode, u32 *rdev) 1617 + { 1618 + struct btrfs_delayed_node *delayed_node; 1619 + struct btrfs_inode_item *inode_item; 1620 + struct btrfs_timespec *tspec; 1621 + 1622 + delayed_node = btrfs_get_delayed_node(inode); 1623 + if (!delayed_node) 1624 + return -ENOENT; 1625 + 1626 + mutex_lock(&delayed_node->mutex); 1627 + if (!delayed_node->inode_dirty) { 1628 + mutex_unlock(&delayed_node->mutex); 1629 + btrfs_release_delayed_node(delayed_node); 1630 + return -ENOENT; 1631 + } 1632 + 1633 + inode_item = &delayed_node->inode_item; 1634 + 1635 + inode->i_uid = btrfs_stack_inode_uid(inode_item); 1636 + inode->i_gid = btrfs_stack_inode_gid(inode_item); 1637 + btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); 1638 + inode->i_mode = btrfs_stack_inode_mode(inode_item); 1639 + inode->i_nlink = btrfs_stack_inode_nlink(inode_item); 1640 + inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); 1641 + BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); 1642 + BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item); 1643 + inode->i_rdev = 0; 1644 + *rdev = btrfs_stack_inode_rdev(inode_item); 1645 + BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); 1646 + 1647 + tspec = btrfs_inode_atime(inode_item); 1648 + inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec); 1649 + inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec); 1650 + 1651 + tspec = btrfs_inode_mtime(inode_item); 1652 + inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec); 1653 + inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec); 1654 + 1655 + tspec = btrfs_inode_ctime(inode_item); 1656 + inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec); 1657 + inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec); 1658 + 1659 + inode->i_generation = BTRFS_I(inode)->generation; 1660 + BTRFS_I(inode)->index_cnt = (u64)-1; 1661 + 1662 + mutex_unlock(&delayed_node->mutex); 1663 + btrfs_release_delayed_node(delayed_node); 1664 + return 0; 1619 1665 } 1620 1666 1621 1667 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+1
fs/btrfs/delayed-inode.h
··· 119 119 120 120 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, 121 121 struct btrfs_root *root, struct inode *inode); 122 + int btrfs_fill_inode(struct inode *inode, u32 *rdev); 122 123 123 124 /* Used for drop dead root */ 124 125 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
+2 -2
fs/btrfs/extent-tree.c
··· 4842 4842 u64 num_bytes, u64 empty_size, 4843 4843 u64 search_start, u64 search_end, 4844 4844 u64 hint_byte, struct btrfs_key *ins, 4845 - int data) 4845 + u64 data) 4846 4846 { 4847 4847 int ret = 0; 4848 4848 struct btrfs_root *root = orig_root->fs_info->extent_root; ··· 4869 4869 4870 4870 space_info = __find_space_info(root->fs_info, data); 4871 4871 if (!space_info) { 4872 - printk(KERN_ERR "No space info for %d\n", data); 4872 + printk(KERN_ERR "No space info for %llu\n", data); 4873 4873 return -ENOSPC; 4874 4874 } 4875 4875
+6 -3
fs/btrfs/free-space-cache.c
··· 1893 1893 1894 1894 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { 1895 1895 info = rb_entry(node, struct btrfs_free_space, offset_index); 1896 - unlink_free_space(ctl, info); 1897 - kfree(info->bitmap); 1898 - kmem_cache_free(btrfs_free_space_cachep, info); 1896 + if (!info->bitmap) { 1897 + unlink_free_space(ctl, info); 1898 + kmem_cache_free(btrfs_free_space_cachep, info); 1899 + } else { 1900 + free_bitmap(ctl, info); 1901 + } 1899 1902 if (need_resched()) { 1900 1903 spin_unlock(&ctl->tree_lock); 1901 1904 cond_resched();
+11 -2
fs/btrfs/inode.c
··· 2509 2509 int maybe_acls; 2510 2510 u32 rdev; 2511 2511 int ret; 2512 + bool filled = false; 2513 + 2514 + ret = btrfs_fill_inode(inode, &rdev); 2515 + if (!ret) 2516 + filled = true; 2512 2517 2513 2518 path = btrfs_alloc_path(); 2514 2519 BUG_ON(!path); ··· 2525 2520 goto make_bad; 2526 2521 2527 2522 leaf = path->nodes[0]; 2523 + 2524 + if (filled) 2525 + goto cache_acl; 2526 + 2528 2527 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2529 2528 struct btrfs_inode_item); 2530 2529 if (!leaf->map_token) ··· 2565 2556 2566 2557 BTRFS_I(inode)->index_cnt = (u64)-1; 2567 2558 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2568 - 2559 + cache_acl: 2569 2560 /* 2570 2561 * try to precache a NULL acl entry for files that don't have 2571 2562 * any xattrs or acls ··· 2581 2572 } 2582 2573 2583 2574 btrfs_free_path(path); 2584 - inode_item = NULL; 2585 2575 2586 2576 switch (inode->i_mode & S_IFMT) { 2587 2577 case S_IFREG: ··· 4528 4520 inode_tree_add(inode); 4529 4521 4530 4522 trace_btrfs_inode_new(inode); 4523 + btrfs_set_inode_last_trans(trans, inode); 4531 4524 4532 4525 return inode; 4533 4526 fail: