Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
Btrfs: fix possible panic on unmount
Btrfs: deal with NULL acl sent to btrfs_set_acl
Btrfs: fix regression in orphan cleanup
Btrfs: Fix race in btrfs_mark_extent_written
Btrfs, fix memory leaks in error paths
Btrfs: align offsets for btrfs_ordered_update_i_size
btrfs: fix missing last-entry in readdir(3)

+125 -41
+7 -5
fs/btrfs/acl.c
··· 112 112 switch (type) { 113 113 case ACL_TYPE_ACCESS: 114 114 mode = inode->i_mode; 115 - ret = posix_acl_equiv_mode(acl, &mode); 116 - if (ret < 0) 117 - return ret; 118 - ret = 0; 119 - inode->i_mode = mode; 120 115 name = POSIX_ACL_XATTR_ACCESS; 116 + if (acl) { 117 + ret = posix_acl_equiv_mode(acl, &mode); 118 + if (ret < 0) 119 + return ret; 120 + inode->i_mode = mode; 121 + } 122 + ret = 0; 121 123 break; 122 124 case ACL_TYPE_DEFAULT: 123 125 if (!S_ISDIR(inode->i_mode))
+19 -13
fs/btrfs/extent-tree.c
··· 83 83 return (cache->flags & bits) == bits; 84 84 } 85 85 86 + void btrfs_get_block_group(struct btrfs_block_group_cache *cache) 87 + { 88 + atomic_inc(&cache->count); 89 + } 90 + 91 + void btrfs_put_block_group(struct btrfs_block_group_cache *cache) 92 + { 93 + if (atomic_dec_and_test(&cache->count)) 94 + kfree(cache); 95 + } 96 + 86 97 /* 87 98 * this adds the block group to the fs_info rb tree for the block group 88 99 * cache ··· 167 156 } 168 157 } 169 158 if (ret) 170 - atomic_inc(&ret->count); 159 + btrfs_get_block_group(ret); 171 160 spin_unlock(&info->block_group_cache_lock); 172 161 173 162 return ret; ··· 418 407 419 408 put_caching_control(caching_ctl); 420 409 atomic_dec(&block_group->space_info->caching_threads); 410 + btrfs_put_block_group(block_group); 411 + 421 412 return 0; 422 413 } 423 414 ··· 460 447 up_write(&fs_info->extent_commit_sem); 461 448 462 449 atomic_inc(&cache->space_info->caching_threads); 450 + btrfs_get_block_group(cache); 463 451 464 452 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", 465 453 cache->key.objectid); ··· 498 484 cache = block_group_cache_tree_search(info, bytenr, 1); 499 485 500 486 return cache; 501 - } 502 - 503 - void btrfs_put_block_group(struct btrfs_block_group_cache *cache) 504 - { 505 - if (atomic_dec_and_test(&cache->count)) 506 - kfree(cache); 507 487 } 508 488 509 489 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, ··· 2590 2582 if (node) { 2591 2583 cache = rb_entry(node, struct btrfs_block_group_cache, 2592 2584 cache_node); 2593 - atomic_inc(&cache->count); 2585 + btrfs_get_block_group(cache); 2594 2586 } else 2595 2587 cache = NULL; 2596 2588 spin_unlock(&root->fs_info->block_group_cache_lock); ··· 4235 4227 u64 offset; 4236 4228 int cached; 4237 4229 4238 - atomic_inc(&block_group->count); 4230 + btrfs_get_block_group(block_group); 4239 4231 search_start = block_group->key.objectid; 4240 4232 4241 4233 have_block_group: ··· 4323 4315 4324 4316 btrfs_put_block_group(block_group); 4325 4317 block_group = last_ptr->block_group; 4326 - atomic_inc(&block_group->count); 4318 + btrfs_get_block_group(block_group); 4327 4319 spin_unlock(&last_ptr->lock); 4328 4320 spin_unlock(&last_ptr->refill_lock); 4329 4321 ··· 7403 7395 wait_block_group_cache_done(block_group); 7404 7396 7405 7397 btrfs_remove_free_space_cache(block_group); 7406 - 7407 - WARN_ON(atomic_read(&block_group->count) != 1); 7408 - kfree(block_group); 7398 + btrfs_put_block_group(block_group); 7409 7399 7410 7400 spin_lock(&info->block_group_cache_lock); 7411 7401 }
+80 -20
fs/btrfs/file.c
··· 506 506 } 507 507 508 508 static int extent_mergeable(struct extent_buffer *leaf, int slot, 509 - u64 objectid, u64 bytenr, u64 *start, u64 *end) 509 + u64 objectid, u64 bytenr, u64 orig_offset, 510 + u64 *start, u64 *end) 510 511 { 511 512 struct btrfs_file_extent_item *fi; 512 513 struct btrfs_key key; ··· 523 522 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 524 523 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 525 524 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 525 + btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 526 526 btrfs_file_extent_compression(leaf, fi) || 527 527 btrfs_file_extent_encryption(leaf, fi) || 528 528 btrfs_file_extent_other_encoding(leaf, fi)) ··· 563 561 u64 split; 564 562 int del_nr = 0; 565 563 int del_slot = 0; 564 + int recow; 566 565 int ret; 567 566 568 567 btrfs_drop_extent_cache(inode, start, end - 1, 0); ··· 571 568 path = btrfs_alloc_path(); 572 569 BUG_ON(!path); 573 570 again: 571 + recow = 0; 574 572 split = start; 575 573 key.objectid = inode->i_ino; 576 574 key.type = BTRFS_EXTENT_DATA_KEY; ··· 595 591 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 596 592 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 597 593 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 594 + memcpy(&new_key, &key, sizeof(new_key)); 595 + 596 + if (start == key.offset && end < extent_end) { 597 + other_start = 0; 598 + other_end = start; 599 + if (extent_mergeable(leaf, path->slots[0] - 1, 600 + inode->i_ino, bytenr, orig_offset, 601 + &other_start, &other_end)) { 602 + new_key.offset = end; 603 + btrfs_set_item_key_safe(trans, root, path, &new_key); 604 + fi = btrfs_item_ptr(leaf, path->slots[0], 605 + struct btrfs_file_extent_item); 606 + btrfs_set_file_extent_num_bytes(leaf, fi, 607 + extent_end - end); 608 + btrfs_set_file_extent_offset(leaf, fi, 609 + end - orig_offset); 610 + fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 611 + struct btrfs_file_extent_item); 612 + btrfs_set_file_extent_num_bytes(leaf, fi, 613 + end - other_start); 614 + btrfs_mark_buffer_dirty(leaf); 615 + goto out; 616 + } 617 + } 618 + 619 + if (start > key.offset && end == extent_end) { 620 + other_start = end; 621 + other_end = 0; 622 + if (extent_mergeable(leaf, path->slots[0] + 1, 623 + inode->i_ino, bytenr, orig_offset, 624 + &other_start, &other_end)) { 625 + fi = btrfs_item_ptr(leaf, path->slots[0], 626 + struct btrfs_file_extent_item); 627 + btrfs_set_file_extent_num_bytes(leaf, fi, 628 + start - key.offset); 629 + path->slots[0]++; 630 + new_key.offset = start; 631 + btrfs_set_item_key_safe(trans, root, path, &new_key); 632 + 633 + fi = btrfs_item_ptr(leaf, path->slots[0], 634 + struct btrfs_file_extent_item); 635 + btrfs_set_file_extent_num_bytes(leaf, fi, 636 + other_end - start); 637 + btrfs_set_file_extent_offset(leaf, fi, 638 + start - orig_offset); 639 + btrfs_mark_buffer_dirty(leaf); 640 + goto out; 641 + } 642 + } 598 643 599 644 while (start > key.offset || end < extent_end) { 600 645 if (key.offset == start) 601 646 split = end; 602 647 603 - memcpy(&new_key, &key, sizeof(new_key)); 604 648 new_key.offset = split; 605 649 ret = btrfs_duplicate_item(trans, root, path, &new_key); 606 650 if (ret == -EAGAIN) { ··· 683 631 path->slots[0]--; 684 632 extent_end = end; 685 633 } 634 + recow = 1; 686 635 } 687 - 688 - fi = btrfs_item_ptr(leaf, path->slots[0], 689 - struct btrfs_file_extent_item); 690 636 691 637 other_start = end; 692 638 other_end = 0; 693 - if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, 694 - bytenr, &other_start, &other_end)) { 639 + if (extent_mergeable(leaf, path->slots[0] + 1, 640 + inode->i_ino, bytenr, orig_offset, 641 + &other_start, &other_end)) { 642 + if (recow) { 643 + btrfs_release_path(root, path); 644 + goto again; 645 + } 695 646 extent_end = other_end; 696 647 del_slot = path->slots[0] + 1; 697 648 del_nr++; ··· 705 650 } 706 651 other_start = 0; 707 652 other_end = start; 708 - if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino, 709 - bytenr, &other_start, &other_end)) { 653 + if (extent_mergeable(leaf, path->slots[0] - 1, 654 + inode->i_ino, bytenr, orig_offset, 655 + &other_start, &other_end)) { 656 + if (recow) { 657 + btrfs_release_path(root, path); 658 + goto again; 659 + } 710 660 key.offset = other_start; 711 661 del_slot = path->slots[0]; 712 662 del_nr++; ··· 720 660 inode->i_ino, orig_offset); 721 661 BUG_ON(ret); 722 662 } 663 + fi = btrfs_item_ptr(leaf, path->slots[0], 664 + struct btrfs_file_extent_item); 723 665 if (del_nr == 0) { 724 666 btrfs_set_file_extent_type(leaf, fi, 725 667 BTRFS_FILE_EXTENT_REG); 726 668 btrfs_mark_buffer_dirty(leaf); 727 - goto out; 669 + } else { 670 + btrfs_set_file_extent_type(leaf, fi, 671 + BTRFS_FILE_EXTENT_REG); 672 + btrfs_set_file_extent_num_bytes(leaf, fi, 673 + extent_end - key.offset); 674 + btrfs_mark_buffer_dirty(leaf); 675 + 676 + ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 677 + BUG_ON(ret); 728 678 } 729 - 730 - fi = btrfs_item_ptr(leaf, del_slot - 1, 731 - struct btrfs_file_extent_item); 732 - btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); 733 - btrfs_set_file_extent_num_bytes(leaf, fi, 734 - extent_end - key.offset); 735 - btrfs_mark_buffer_dirty(leaf); 736 - 737 - ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 738 - BUG_ON(ret); 739 679 out: 740 680 btrfs_free_path(path); 741 681 return 0;
+11 -1
fs/btrfs/inode.c
··· 3796 3796 3797 3797 if (location.type == BTRFS_INODE_ITEM_KEY) { 3798 3798 inode = btrfs_iget(dir->i_sb, &location, root); 3799 + if (unlikely(root->clean_orphans) && 3800 + !(inode->i_sb->s_flags & MS_RDONLY)) { 3801 + down_read(&root->fs_info->cleanup_work_sem); 3802 + btrfs_orphan_cleanup(root); 3803 + up_read(&root->fs_info->cleanup_work_sem); 3804 + } 3799 3805 return inode; 3800 3806 } 3801 3807 ··· 4001 3995 4002 3996 /* Reached end of directory/root. Bump pos past the last item. */ 4003 3997 if (key_type == BTRFS_DIR_INDEX_KEY) 4004 - filp->f_pos = INT_LIMIT(off_t); 3998 + /* 3999 + * 32-bit glibc will use getdents64, but then strtol - 4000 + * so the last number we can serve is this. 4001 + */ 4002 + filp->f_pos = 0x7fffffff; 4005 4003 else 4006 4004 filp->f_pos++; 4007 4005 nopos:
+2
fs/btrfs/ordered-data.c
··· 626 626 627 627 if (ordered) 628 628 offset = entry_end(ordered); 629 + else 630 + offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); 629 631 630 632 mutex_lock(&tree->mutex); 631 633 disk_i_size = BTRFS_I(inode)->disk_i_size;
+3 -1
fs/btrfs/relocation.c
··· 3281 3281 return -ENOMEM; 3282 3282 3283 3283 path = btrfs_alloc_path(); 3284 - if (!path) 3284 + if (!path) { 3285 + kfree(cluster); 3285 3286 return -ENOMEM; 3287 + } 3286 3288 3287 3289 rc->extents_found = 0; 3288 3290 rc->extents_skipped = 0;
+3 -1
fs/btrfs/volumes.c
··· 2649 2649 em = lookup_extent_mapping(em_tree, logical, *length); 2650 2650 read_unlock(&em_tree->lock); 2651 2651 2652 - if (!em && unplug_page) 2652 + if (!em && unplug_page) { 2653 + kfree(multi); 2653 2654 return 0; 2655 + } 2654 2656 2655 2657 if (!em) { 2656 2658 printk(KERN_CRIT "unable to find logical %llu len %llu\n",