Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
Btrfs: fix balancing oops when invalidate_inode_pages2 returns EBUSY
Btrfs: correct error-handling zlib error handling
Btrfs: remove superfluous NULL pointer check in btrfs_rename()
Btrfs: make sure the async caching thread advances the key
Btrfs: fix btrfs_remove_from_free_space corner case

+92 -20
+17 -4
fs/btrfs/extent-tree.c
··· 265 265 266 266 atomic_inc(&block_group->space_info->caching_threads); 267 267 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 268 - again: 269 - /* need to make sure the commit_root doesn't disappear */ 270 - down_read(&fs_info->extent_commit_sem); 271 - 272 268 /* 273 269 * We don't want to deadlock with somebody trying to allocate a new 274 270 * extent for the extent root while also trying to search the extent ··· 278 282 key.objectid = last; 279 283 key.offset = 0; 280 284 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); 285 + again: 286 + /* need to make sure the commit_root doesn't disappear */ 287 + down_read(&fs_info->extent_commit_sem); 288 + 281 289 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 282 290 if (ret < 0) 283 291 goto err; ··· 304 304 305 305 if (need_resched() || 306 306 btrfs_transaction_in_commit(fs_info)) { 307 + leaf = path->nodes[0]; 308 + 309 + /* this shouldn't happen, but if the 310 + * leaf is empty just move on. 311 + */ 312 + if (btrfs_header_nritems(leaf) == 0) 313 + break; 314 + /* 315 + * we need to copy the key out so that 316 + * we are sure the next search advances 317 + * us forward in the btree. 318 + */ 319 + btrfs_item_key_to_cpu(leaf, &key, 0); 307 320 btrfs_release_path(fs_info->extent_root, path); 308 321 up_read(&fs_info->extent_commit_sem); 309 322 schedule_timeout(1);
+64 -9
fs/btrfs/free-space-cache.c
··· 414 414 u64 *offset, u64 *bytes) 415 415 { 416 416 u64 end; 417 + u64 search_start, search_bytes; 418 + int ret; 417 419 418 420 again: 419 421 end = bitmap_info->offset + 420 422 (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; 423 + 424 + /* 425 + * XXX - this can go away after a few releases. 426 + * 427 + * since the only user of btrfs_remove_free_space is the tree logging 428 + * stuff, and the only way to test that is under crash conditions, we 429 + * want to have this debug stuff here just in case somethings not 430 + * working. Search the bitmap for the space we are trying to use to 431 + * make sure its actually there. If its not there then we need to stop 432 + * because something has gone wrong. 433 + */ 434 + search_start = *offset; 435 + search_bytes = *bytes; 436 + ret = search_bitmap(block_group, bitmap_info, &search_start, 437 + &search_bytes); 438 + BUG_ON(ret < 0 || search_start != *offset); 421 439 422 440 if (*offset > bitmap_info->offset && *offset + *bytes > end) { 423 441 bitmap_clear_bits(block_group, bitmap_info, *offset, ··· 448 430 } 449 431 450 432 if (*bytes) { 433 + struct rb_node *next = rb_next(&bitmap_info->offset_index); 451 434 if (!bitmap_info->bytes) { 452 435 unlink_free_space(block_group, bitmap_info); 453 436 kfree(bitmap_info->bitmap); ··· 457 438 recalculate_thresholds(block_group); 458 439 } 459 440 460 - bitmap_info = tree_search_offset(block_group, 461 - offset_to_bitmap(block_group, 462 - *offset), 463 - 1, 0); 464 - if (!bitmap_info) 441 + /* 442 + * no entry after this bitmap, but we still have bytes to 443 + * remove, so something has gone wrong. 444 + */ 445 + if (!next) 465 446 return -EINVAL; 466 447 448 + bitmap_info = rb_entry(next, struct btrfs_free_space, 449 + offset_index); 450 + 451 + /* 452 + * if the next entry isn't a bitmap we need to return to let the 453 + * extent stuff do its work. 454 + */ 467 455 if (!bitmap_info->bitmap) 456 + return -EAGAIN; 457 + 458 + /* 459 + * Ok the next item is a bitmap, but it may not actually hold 460 + * the information for the rest of this free space stuff, so 461 + * look for it, and if we don't find it return so we can try 462 + * everything over again. 463 + */ 464 + search_start = *offset; 465 + search_bytes = *bytes; 466 + ret = search_bitmap(block_group, bitmap_info, &search_start, 467 + &search_bytes); 468 + if (ret < 0 || search_start != *offset) 468 469 return -EAGAIN; 469 470 470 471 goto again; ··· 683 644 again: 684 645 info = tree_search_offset(block_group, offset, 0, 0); 685 646 if (!info) { 686 - WARN_ON(1); 687 - goto out_lock; 647 + /* 648 + * oops didn't find an extent that matched the space we wanted 649 + * to remove, look for a bitmap instead 650 + */ 651 + info = tree_search_offset(block_group, 652 + offset_to_bitmap(block_group, offset), 653 + 1, 0); 654 + if (!info) { 655 + WARN_ON(1); 656 + goto out_lock; 657 + } 688 658 } 689 659 690 660 if (info->bytes < bytes && rb_next(&info->offset_index)) { ··· 1005 957 if (cluster->block_group != block_group) 1006 958 goto out; 1007 959 1008 - entry = tree_search_offset(block_group, search_start, 0, 0); 1009 - 960 + /* 961 + * search_start is the beginning of the bitmap, but at some point it may 962 + * be a good idea to point to the actual start of the free area in the 963 + * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only 964 + * to 1 to make sure we get the bitmap entry 965 + */ 966 + entry = tree_search_offset(block_group, 967 + offset_to_bitmap(block_group, search_start), 968 + 1, 0); 1010 969 if (!entry || !entry->bitmap) 1011 970 goto out; 1012 971
+1 -2
fs/btrfs/inode.c
··· 4785 4785 * and the replacement file is large. Start IO on it now so 4786 4786 * we don't add too much work to the end of the transaction 4787 4787 */ 4788 - if (new_inode && old_inode && S_ISREG(old_inode->i_mode) && 4789 - new_inode->i_size && 4788 + if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && 4790 4789 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 4791 4790 filemap_flush(old_inode->i_mapping); 4792 4791
+7 -2
fs/btrfs/relocation.c
··· 2553 2553 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; 2554 2554 2555 2555 /* make sure the dirty trick played by the caller work */ 2556 - ret = invalidate_inode_pages2_range(inode->i_mapping, 2557 - first_index, last_index); 2556 + while (1) { 2557 + ret = invalidate_inode_pages2_range(inode->i_mapping, 2558 + first_index, last_index); 2559 + if (ret != -EBUSY) 2560 + break; 2561 + schedule_timeout(HZ/10); 2562 + } 2558 2563 if (ret) 2559 2564 goto out_unlock; 2560 2565
+3 -3
fs/btrfs/zlib.c
··· 208 208 *total_in = 0; 209 209 210 210 workspace = find_zlib_workspace(); 211 - if (!workspace) 211 + if (IS_ERR(workspace)) 212 212 return -1; 213 213 214 214 if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { ··· 366 366 char *kaddr; 367 367 368 368 workspace = find_zlib_workspace(); 369 - if (!workspace) 369 + if (IS_ERR(workspace)) 370 370 return -ENOMEM; 371 371 372 372 data_in = kmap(pages_in[page_in_index]); ··· 547 547 return -ENOMEM; 548 548 549 549 workspace = find_zlib_workspace(); 550 - if (!workspace) 550 + if (IS_ERR(workspace)) 551 551 return -ENOMEM; 552 552 553 553 workspace->inf_strm.next_in = data_in;