Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-5.8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs updates from David Sterba:
"Highlights:

- speedup dead root detection during orphan cleanup, eg. when there
are many deleted subvolumes waiting to be cleaned, the trees are
now looked up in radix tree instead of a O(N^2) search

- snapshot creation with inherited qgroup will mark the qgroup
inconsistent, requires a rescan

- send will emit file capabilities after chown, this produces a
stream that does not need postprocessing to set the capabilities
again

- direct io ported to iomap infrastructure, cleaned up and simplified
code, notably removing last use of struct buffer_head in btrfs code

Core changes:

- factor out backreference iteration, to be used by ordinary
backreferences and relocation code

- improved global block reserve utilization
* better logic to serialize requests
* increased maximum available for unlink
* improved handling on large pages (64K)

- direct io cleanups and fixes
* simplify layering, where cloned bios were unnecessarily created
for some cases
* error handling fixes (submit, endio)
* remove repair worker thread, used to avoid deadlocks during
repair

- refactored block group reading code, preparatory work for new type
of block group storage that should improve mount time on large
filesystems

Cleanups:

- cleaned up (and slightly sped up) set/get helpers for metadata data
structure members

- root bit REF_COWS got renamed to SHAREABLE to reflect the that the
blocks of the tree get shared either among subvolumes or with the
relocation trees

Fixes:

- when subvolume deletion fails due to ENOSPC, the filesystem is not
turned read-only

- device scan deals with devices from other filesystems that changed
ownership due to overwrite (mkfs)

- fix a race between scrub and block group removal/allocation

- fix long standing bug of a runaway balance operation, printing the
same line to the syslog, caused by a stale status bit on a reloc
tree that prevented progress

- fix corrupt log due to concurrent fsync of inodes with shared
extents

- fix space underflow for NODATACOW and buffered writes when it for
some reason needs to fallback to COW mode"

* tag 'for-5.8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (133 commits)
btrfs: fix space_info bytes_may_use underflow during space cache writeout
btrfs: fix space_info bytes_may_use underflow after nocow buffered write
btrfs: fix wrong file range cleanup after an error filling dealloc range
btrfs: remove redundant local variable in read_block_for_search
btrfs: open code key_search
btrfs: split btrfs_direct_IO to read and write part
btrfs: remove BTRFS_INODE_READDIO_NEED_LOCK
fs: remove dio_end_io()
btrfs: switch to iomap_dio_rw() for dio
iomap: remove lockdep_assert_held()
iomap: add a filesystem hook for direct I/O bio submission
fs: export generic_file_buffered_read()
btrfs: turn space cache writeout failure messages into debug messages
btrfs: include error on messages about failure to write space/inode caches
btrfs: remove useless 'fail_unlock' label from btrfs_csum_file_blocks()
btrfs: do not ignore error from btrfs_next_leaf() when inserting checksums
btrfs: make checksum item extension more efficient
btrfs: fix corrupt log due to concurrent fsync of inodes with shared extents
btrfs: unexport btrfs_compress_set_level()
btrfs: simplify iget helpers
...

+3224 -3045
+1
.clang-format
··· 80 80 - 'ax25_uid_for_each' 81 81 - '__bio_for_each_bvec' 82 82 - 'bio_for_each_bvec' 83 + - 'bio_for_each_bvec_all' 83 84 - 'bio_for_each_integrity_vec' 84 85 - '__bio_for_each_segment' 85 86 - 'bio_for_each_segment'
+2
Documentation/block/biovecs.rst
··· 129 129 :: 130 130 131 131 bio_for_each_segment_all() 132 + bio_for_each_bvec_all() 132 133 bio_first_bvec_all() 133 134 bio_first_page_all() 134 135 bio_last_bvec_all() ··· 144 143 bio_vec' will contain a multi-page IO vector during the iteration:: 145 144 146 145 bio_for_each_bvec() 146 + bio_for_each_bvec_all() 147 147 rq_for_each_bvec()
+1
fs/btrfs/Kconfig
··· 14 14 select LZO_DECOMPRESS 15 15 select ZSTD_COMPRESS 16 16 select ZSTD_DECOMPRESS 17 + select FS_IOMAP 17 18 select RAID6_PQ 18 19 select XOR_BLOCKS 19 20 select SRCU
+831 -6
fs/btrfs/backref.c
··· 13 13 #include "transaction.h" 14 14 #include "delayed-ref.h" 15 15 #include "locking.h" 16 + #include "misc.h" 16 17 17 18 /* Just an arbitrary number so we can be sure this happened */ 18 19 #define BACKREF_FOUND_SHARED 6 ··· 538 537 const u64 *extent_item_pos, bool ignore_offset) 539 538 { 540 539 struct btrfs_root *root; 541 - struct btrfs_key root_key; 542 540 struct extent_buffer *eb; 543 541 int ret = 0; 544 542 int root_level; 545 543 int level = ref->level; 546 544 struct btrfs_key search_key = ref->key_for_search; 547 545 548 - root_key.objectid = ref->root_id; 549 - root_key.type = BTRFS_ROOT_ITEM_KEY; 550 - root_key.offset = (u64)-1; 551 - 552 - root = btrfs_get_fs_root(fs_info, &root_key, false); 546 + root = btrfs_get_fs_root(fs_info, ref->root_id, false); 553 547 if (IS_ERR(root)) { 554 548 ret = PTR_ERR(root); 555 549 goto out_free; ··· 2290 2294 return; 2291 2295 kvfree(ipath->fspath); 2292 2296 kfree(ipath); 2297 + } 2298 + 2299 + struct btrfs_backref_iter *btrfs_backref_iter_alloc( 2300 + struct btrfs_fs_info *fs_info, gfp_t gfp_flag) 2301 + { 2302 + struct btrfs_backref_iter *ret; 2303 + 2304 + ret = kzalloc(sizeof(*ret), gfp_flag); 2305 + if (!ret) 2306 + return NULL; 2307 + 2308 + ret->path = btrfs_alloc_path(); 2309 + if (!ret) { 2310 + kfree(ret); 2311 + return NULL; 2312 + } 2313 + 2314 + /* Current backref iterator only supports iteration in commit root */ 2315 + ret->path->search_commit_root = 1; 2316 + ret->path->skip_locking = 1; 2317 + ret->fs_info = fs_info; 2318 + 2319 + return ret; 2320 + } 2321 + 2322 + int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr) 2323 + { 2324 + struct btrfs_fs_info *fs_info = iter->fs_info; 2325 + struct btrfs_path *path = iter->path; 2326 + struct btrfs_extent_item *ei; 2327 + struct btrfs_key key; 2328 + int ret; 2329 + 2330 + key.objectid = bytenr; 2331 + key.type = BTRFS_METADATA_ITEM_KEY; 2332 + key.offset = (u64)-1; 2333 + iter->bytenr = bytenr; 2334 + 2335 + ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 2336 + if (ret < 0) 2337 + return ret; 2338 + if (ret == 0) { 2339 + ret = -EUCLEAN; 2340 + goto release; 2341 + } 2342 + if (path->slots[0] == 0) { 2343 + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 2344 + ret = -EUCLEAN; 2345 + goto release; 2346 + } 2347 + path->slots[0]--; 2348 + 2349 + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2350 + if ((key.type != BTRFS_EXTENT_ITEM_KEY && 2351 + key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) { 2352 + ret = -ENOENT; 2353 + goto release; 2354 + } 2355 + memcpy(&iter->cur_key, &key, sizeof(key)); 2356 + iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], 2357 + path->slots[0]); 2358 + iter->end_ptr = (u32)(iter->item_ptr + 2359 + btrfs_item_size_nr(path->nodes[0], path->slots[0])); 2360 + ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 2361 + struct btrfs_extent_item); 2362 + 2363 + /* 2364 + * Only support iteration on tree backref yet. 2365 + * 2366 + * This is an extra precaution for non skinny-metadata, where 2367 + * EXTENT_ITEM is also used for tree blocks, that we can only use 2368 + * extent flags to determine if it's a tree block. 2369 + */ 2370 + if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { 2371 + ret = -ENOTSUPP; 2372 + goto release; 2373 + } 2374 + iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); 2375 + 2376 + /* If there is no inline backref, go search for keyed backref */ 2377 + if (iter->cur_ptr >= iter->end_ptr) { 2378 + ret = btrfs_next_item(fs_info->extent_root, path); 2379 + 2380 + /* No inline nor keyed ref */ 2381 + if (ret > 0) { 2382 + ret = -ENOENT; 2383 + goto release; 2384 + } 2385 + if (ret < 0) 2386 + goto release; 2387 + 2388 + btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, 2389 + path->slots[0]); 2390 + if (iter->cur_key.objectid != bytenr || 2391 + (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && 2392 + iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { 2393 + ret = -ENOENT; 2394 + goto release; 2395 + } 2396 + iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], 2397 + path->slots[0]); 2398 + iter->item_ptr = iter->cur_ptr; 2399 + iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr( 2400 + path->nodes[0], path->slots[0])); 2401 + } 2402 + 2403 + return 0; 2404 + release: 2405 + btrfs_backref_iter_release(iter); 2406 + return ret; 2407 + } 2408 + 2409 + /* 2410 + * Go to the next backref item of current bytenr, can be either inlined or 2411 + * keyed. 2412 + * 2413 + * Caller needs to check whether it's inline ref or not by iter->cur_key. 2414 + * 2415 + * Return 0 if we get next backref without problem. 2416 + * Return >0 if there is no extra backref for this bytenr. 2417 + * Return <0 if there is something wrong happened. 2418 + */ 2419 + int btrfs_backref_iter_next(struct btrfs_backref_iter *iter) 2420 + { 2421 + struct extent_buffer *eb = btrfs_backref_get_eb(iter); 2422 + struct btrfs_path *path = iter->path; 2423 + struct btrfs_extent_inline_ref *iref; 2424 + int ret; 2425 + u32 size; 2426 + 2427 + if (btrfs_backref_iter_is_inline_ref(iter)) { 2428 + /* We're still inside the inline refs */ 2429 + ASSERT(iter->cur_ptr < iter->end_ptr); 2430 + 2431 + if (btrfs_backref_has_tree_block_info(iter)) { 2432 + /* First tree block info */ 2433 + size = sizeof(struct btrfs_tree_block_info); 2434 + } else { 2435 + /* Use inline ref type to determine the size */ 2436 + int type; 2437 + 2438 + iref = (struct btrfs_extent_inline_ref *) 2439 + ((unsigned long)iter->cur_ptr); 2440 + type = btrfs_extent_inline_ref_type(eb, iref); 2441 + 2442 + size = btrfs_extent_inline_ref_size(type); 2443 + } 2444 + iter->cur_ptr += size; 2445 + if (iter->cur_ptr < iter->end_ptr) 2446 + return 0; 2447 + 2448 + /* All inline items iterated, fall through */ 2449 + } 2450 + 2451 + /* We're at keyed items, there is no inline item, go to the next one */ 2452 + ret = btrfs_next_item(iter->fs_info->extent_root, iter->path); 2453 + if (ret) 2454 + return ret; 2455 + 2456 + btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); 2457 + if (iter->cur_key.objectid != iter->bytenr || 2458 + (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && 2459 + iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) 2460 + return 1; 2461 + iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], 2462 + path->slots[0]); 2463 + iter->cur_ptr = iter->item_ptr; 2464 + iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0], 2465 + path->slots[0]); 2466 + return 0; 2467 + } 2468 + 2469 + void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, 2470 + struct btrfs_backref_cache *cache, int is_reloc) 2471 + { 2472 + int i; 2473 + 2474 + cache->rb_root = RB_ROOT; 2475 + for (i = 0; i < BTRFS_MAX_LEVEL; i++) 2476 + INIT_LIST_HEAD(&cache->pending[i]); 2477 + INIT_LIST_HEAD(&cache->changed); 2478 + INIT_LIST_HEAD(&cache->detached); 2479 + INIT_LIST_HEAD(&cache->leaves); 2480 + INIT_LIST_HEAD(&cache->pending_edge); 2481 + INIT_LIST_HEAD(&cache->useless_node); 2482 + cache->fs_info = fs_info; 2483 + cache->is_reloc = is_reloc; 2484 + } 2485 + 2486 + struct btrfs_backref_node *btrfs_backref_alloc_node( 2487 + struct btrfs_backref_cache *cache, u64 bytenr, int level) 2488 + { 2489 + struct btrfs_backref_node *node; 2490 + 2491 + ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL); 2492 + node = kzalloc(sizeof(*node), GFP_NOFS); 2493 + if (!node) 2494 + return node; 2495 + 2496 + INIT_LIST_HEAD(&node->list); 2497 + INIT_LIST_HEAD(&node->upper); 2498 + INIT_LIST_HEAD(&node->lower); 2499 + RB_CLEAR_NODE(&node->rb_node); 2500 + cache->nr_nodes++; 2501 + node->level = level; 2502 + node->bytenr = bytenr; 2503 + 2504 + return node; 2505 + } 2506 + 2507 + struct btrfs_backref_edge *btrfs_backref_alloc_edge( 2508 + struct btrfs_backref_cache *cache) 2509 + { 2510 + struct btrfs_backref_edge *edge; 2511 + 2512 + edge = kzalloc(sizeof(*edge), GFP_NOFS); 2513 + if (edge) 2514 + cache->nr_edges++; 2515 + return edge; 2516 + } 2517 + 2518 + /* 2519 + * Drop the backref node from cache, also cleaning up all its 2520 + * upper edges and any uncached nodes in the path. 2521 + * 2522 + * This cleanup happens bottom up, thus the node should either 2523 + * be the lowest node in the cache or a detached node. 2524 + */ 2525 + void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, 2526 + struct btrfs_backref_node *node) 2527 + { 2528 + struct btrfs_backref_node *upper; 2529 + struct btrfs_backref_edge *edge; 2530 + 2531 + if (!node) 2532 + return; 2533 + 2534 + BUG_ON(!node->lowest && !node->detached); 2535 + while (!list_empty(&node->upper)) { 2536 + edge = list_entry(node->upper.next, struct btrfs_backref_edge, 2537 + list[LOWER]); 2538 + upper = edge->node[UPPER]; 2539 + list_del(&edge->list[LOWER]); 2540 + list_del(&edge->list[UPPER]); 2541 + btrfs_backref_free_edge(cache, edge); 2542 + 2543 + if (RB_EMPTY_NODE(&upper->rb_node)) { 2544 + BUG_ON(!list_empty(&node->upper)); 2545 + btrfs_backref_drop_node(cache, node); 2546 + node = upper; 2547 + node->lowest = 1; 2548 + continue; 2549 + } 2550 + /* 2551 + * Add the node to leaf node list if no other child block 2552 + * cached. 2553 + */ 2554 + if (list_empty(&upper->lower)) { 2555 + list_add_tail(&upper->lower, &cache->leaves); 2556 + upper->lowest = 1; 2557 + } 2558 + } 2559 + 2560 + btrfs_backref_drop_node(cache, node); 2561 + } 2562 + 2563 + /* 2564 + * Release all nodes/edges from current cache 2565 + */ 2566 + void btrfs_backref_release_cache(struct btrfs_backref_cache *cache) 2567 + { 2568 + struct btrfs_backref_node *node; 2569 + int i; 2570 + 2571 + while (!list_empty(&cache->detached)) { 2572 + node = list_entry(cache->detached.next, 2573 + struct btrfs_backref_node, list); 2574 + btrfs_backref_cleanup_node(cache, node); 2575 + } 2576 + 2577 + while (!list_empty(&cache->leaves)) { 2578 + node = list_entry(cache->leaves.next, 2579 + struct btrfs_backref_node, lower); 2580 + btrfs_backref_cleanup_node(cache, node); 2581 + } 2582 + 2583 + cache->last_trans = 0; 2584 + 2585 + for (i = 0; i < BTRFS_MAX_LEVEL; i++) 2586 + ASSERT(list_empty(&cache->pending[i])); 2587 + ASSERT(list_empty(&cache->pending_edge)); 2588 + ASSERT(list_empty(&cache->useless_node)); 2589 + ASSERT(list_empty(&cache->changed)); 2590 + ASSERT(list_empty(&cache->detached)); 2591 + ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); 2592 + ASSERT(!cache->nr_nodes); 2593 + ASSERT(!cache->nr_edges); 2594 + } 2595 + 2596 + /* 2597 + * Handle direct tree backref 2598 + * 2599 + * Direct tree backref means, the backref item shows its parent bytenr 2600 + * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined). 2601 + * 2602 + * @ref_key: The converted backref key. 2603 + * For keyed backref, it's the item key. 2604 + * For inlined backref, objectid is the bytenr, 2605 + * type is btrfs_inline_ref_type, offset is 2606 + * btrfs_inline_ref_offset. 2607 + */ 2608 + static int handle_direct_tree_backref(struct btrfs_backref_cache *cache, 2609 + struct btrfs_key *ref_key, 2610 + struct btrfs_backref_node *cur) 2611 + { 2612 + struct btrfs_backref_edge *edge; 2613 + struct btrfs_backref_node *upper; 2614 + struct rb_node *rb_node; 2615 + 2616 + ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); 2617 + 2618 + /* Only reloc root uses backref pointing to itself */ 2619 + if (ref_key->objectid == ref_key->offset) { 2620 + struct btrfs_root *root; 2621 + 2622 + cur->is_reloc_root = 1; 2623 + /* Only reloc backref cache cares about a specific root */ 2624 + if (cache->is_reloc) { 2625 + root = find_reloc_root(cache->fs_info, cur->bytenr); 2626 + if (WARN_ON(!root)) 2627 + return -ENOENT; 2628 + cur->root = root; 2629 + } else { 2630 + /* 2631 + * For generic purpose backref cache, reloc root node 2632 + * is useless. 2633 + */ 2634 + list_add(&cur->list, &cache->useless_node); 2635 + } 2636 + return 0; 2637 + } 2638 + 2639 + edge = btrfs_backref_alloc_edge(cache); 2640 + if (!edge) 2641 + return -ENOMEM; 2642 + 2643 + rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); 2644 + if (!rb_node) { 2645 + /* Parent node not yet cached */ 2646 + upper = btrfs_backref_alloc_node(cache, ref_key->offset, 2647 + cur->level + 1); 2648 + if (!upper) { 2649 + btrfs_backref_free_edge(cache, edge); 2650 + return -ENOMEM; 2651 + } 2652 + 2653 + /* 2654 + * Backrefs for the upper level block isn't cached, add the 2655 + * block to pending list 2656 + */ 2657 + list_add_tail(&edge->list[UPPER], &cache->pending_edge); 2658 + } else { 2659 + /* Parent node already cached */ 2660 + upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node); 2661 + ASSERT(upper->checked); 2662 + INIT_LIST_HEAD(&edge->list[UPPER]); 2663 + } 2664 + btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); 2665 + return 0; 2666 + } 2667 + 2668 + /* 2669 + * Handle indirect tree backref 2670 + * 2671 + * Indirect tree backref means, we only know which tree the node belongs to. 2672 + * We still need to do a tree search to find out the parents. This is for 2673 + * TREE_BLOCK_REF backref (keyed or inlined). 2674 + * 2675 + * @ref_key: The same as @ref_key in handle_direct_tree_backref() 2676 + * @tree_key: The first key of this tree block. 2677 + * @path: A clean (released) path, to avoid allocating path everytime 2678 + * the function get called. 2679 + */ 2680 + static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache, 2681 + struct btrfs_path *path, 2682 + struct btrfs_key *ref_key, 2683 + struct btrfs_key *tree_key, 2684 + struct btrfs_backref_node *cur) 2685 + { 2686 + struct btrfs_fs_info *fs_info = cache->fs_info; 2687 + struct btrfs_backref_node *upper; 2688 + struct btrfs_backref_node *lower; 2689 + struct btrfs_backref_edge *edge; 2690 + struct extent_buffer *eb; 2691 + struct btrfs_root *root; 2692 + struct rb_node *rb_node; 2693 + int level; 2694 + bool need_check = true; 2695 + int ret; 2696 + 2697 + root = btrfs_get_fs_root(fs_info, ref_key->offset, false); 2698 + if (IS_ERR(root)) 2699 + return PTR_ERR(root); 2700 + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2701 + cur->cowonly = 1; 2702 + 2703 + if (btrfs_root_level(&root->root_item) == cur->level) { 2704 + /* Tree root */ 2705 + ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); 2706 + /* 2707 + * For reloc backref cache, we may ignore reloc root. But for 2708 + * general purpose backref cache, we can't rely on 2709 + * btrfs_should_ignore_reloc_root() as it may conflict with 2710 + * current running relocation and lead to missing root. 2711 + * 2712 + * For general purpose backref cache, reloc root detection is 2713 + * completely relying on direct backref (key->offset is parent 2714 + * bytenr), thus only do such check for reloc cache. 2715 + */ 2716 + if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { 2717 + btrfs_put_root(root); 2718 + list_add(&cur->list, &cache->useless_node); 2719 + } else { 2720 + cur->root = root; 2721 + } 2722 + return 0; 2723 + } 2724 + 2725 + level = cur->level + 1; 2726 + 2727 + /* Search the tree to find parent blocks referring to the block */ 2728 + path->search_commit_root = 1; 2729 + path->skip_locking = 1; 2730 + path->lowest_level = level; 2731 + ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0); 2732 + path->lowest_level = 0; 2733 + if (ret < 0) { 2734 + btrfs_put_root(root); 2735 + return ret; 2736 + } 2737 + if (ret > 0 && path->slots[level] > 0) 2738 + path->slots[level]--; 2739 + 2740 + eb = path->nodes[level]; 2741 + if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { 2742 + btrfs_err(fs_info, 2743 + "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", 2744 + cur->bytenr, level - 1, root->root_key.objectid, 2745 + tree_key->objectid, tree_key->type, tree_key->offset); 2746 + btrfs_put_root(root); 2747 + ret = -ENOENT; 2748 + goto out; 2749 + } 2750 + lower = cur; 2751 + 2752 + /* Add all nodes and edges in the path */ 2753 + for (; level < BTRFS_MAX_LEVEL; level++) { 2754 + if (!path->nodes[level]) { 2755 + ASSERT(btrfs_root_bytenr(&root->root_item) == 2756 + lower->bytenr); 2757 + /* Same as previous should_ignore_reloc_root() call */ 2758 + if (btrfs_should_ignore_reloc_root(root) && 2759 + cache->is_reloc) { 2760 + btrfs_put_root(root); 2761 + list_add(&lower->list, &cache->useless_node); 2762 + } else { 2763 + lower->root = root; 2764 + } 2765 + break; 2766 + } 2767 + 2768 + edge = btrfs_backref_alloc_edge(cache); 2769 + if (!edge) { 2770 + btrfs_put_root(root); 2771 + ret = -ENOMEM; 2772 + goto out; 2773 + } 2774 + 2775 + eb = path->nodes[level]; 2776 + rb_node = rb_simple_search(&cache->rb_root, eb->start); 2777 + if (!rb_node) { 2778 + upper = btrfs_backref_alloc_node(cache, eb->start, 2779 + lower->level + 1); 2780 + if (!upper) { 2781 + btrfs_put_root(root); 2782 + btrfs_backref_free_edge(cache, edge); 2783 + ret = -ENOMEM; 2784 + goto out; 2785 + } 2786 + upper->owner = btrfs_header_owner(eb); 2787 + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2788 + upper->cowonly = 1; 2789 + 2790 + /* 2791 + * If we know the block isn't shared we can avoid 2792 + * checking its backrefs. 2793 + */ 2794 + if (btrfs_block_can_be_shared(root, eb)) 2795 + upper->checked = 0; 2796 + else 2797 + upper->checked = 1; 2798 + 2799 + /* 2800 + * Add the block to pending list if we need to check its 2801 + * backrefs, we only do this once while walking up a 2802 + * tree as we will catch anything else later on. 2803 + */ 2804 + if (!upper->checked && need_check) { 2805 + need_check = false; 2806 + list_add_tail(&edge->list[UPPER], 2807 + &cache->pending_edge); 2808 + } else { 2809 + if (upper->checked) 2810 + need_check = true; 2811 + INIT_LIST_HEAD(&edge->list[UPPER]); 2812 + } 2813 + } else { 2814 + upper = rb_entry(rb_node, struct btrfs_backref_node, 2815 + rb_node); 2816 + ASSERT(upper->checked); 2817 + INIT_LIST_HEAD(&edge->list[UPPER]); 2818 + if (!upper->owner) 2819 + upper->owner = btrfs_header_owner(eb); 2820 + } 2821 + btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); 2822 + 2823 + if (rb_node) { 2824 + btrfs_put_root(root); 2825 + break; 2826 + } 2827 + lower = upper; 2828 + upper = NULL; 2829 + } 2830 + out: 2831 + btrfs_release_path(path); 2832 + return ret; 2833 + } 2834 + 2835 + /* 2836 + * Add backref node @cur into @cache. 2837 + * 2838 + * NOTE: Even if the function returned 0, @cur is not yet cached as its upper 2839 + * links aren't yet bi-directional. Needs to finish such links. 2840 + * Use btrfs_backref_finish_upper_links() to finish such linkage. 2841 + * 2842 + * @path: Released path for indirect tree backref lookup 2843 + * @iter: Released backref iter for extent tree search 2844 + * @node_key: The first key of the tree block 2845 + */ 2846 + int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache, 2847 + struct btrfs_path *path, 2848 + struct btrfs_backref_iter *iter, 2849 + struct btrfs_key *node_key, 2850 + struct btrfs_backref_node *cur) 2851 + { 2852 + struct btrfs_fs_info *fs_info = cache->fs_info; 2853 + struct btrfs_backref_edge *edge; 2854 + struct btrfs_backref_node *exist; 2855 + int ret; 2856 + 2857 + ret = btrfs_backref_iter_start(iter, cur->bytenr); 2858 + if (ret < 0) 2859 + return ret; 2860 + /* 2861 + * We skip the first btrfs_tree_block_info, as we don't use the key 2862 + * stored in it, but fetch it from the tree block 2863 + */ 2864 + if (btrfs_backref_has_tree_block_info(iter)) { 2865 + ret = btrfs_backref_iter_next(iter); 2866 + if (ret < 0) 2867 + goto out; 2868 + /* No extra backref? This means the tree block is corrupted */ 2869 + if (ret > 0) { 2870 + ret = -EUCLEAN; 2871 + goto out; 2872 + } 2873 + } 2874 + WARN_ON(cur->checked); 2875 + if (!list_empty(&cur->upper)) { 2876 + /* 2877 + * The backref was added previously when processing backref of 2878 + * type BTRFS_TREE_BLOCK_REF_KEY 2879 + */ 2880 + ASSERT(list_is_singular(&cur->upper)); 2881 + edge = list_entry(cur->upper.next, struct btrfs_backref_edge, 2882 + list[LOWER]); 2883 + ASSERT(list_empty(&edge->list[UPPER])); 2884 + exist = edge->node[UPPER]; 2885 + /* 2886 + * Add the upper level block to pending list if we need check 2887 + * its backrefs 2888 + */ 2889 + if (!exist->checked) 2890 + list_add_tail(&edge->list[UPPER], &cache->pending_edge); 2891 + } else { 2892 + exist = NULL; 2893 + } 2894 + 2895 + for (; ret == 0; ret = btrfs_backref_iter_next(iter)) { 2896 + struct extent_buffer *eb; 2897 + struct btrfs_key key; 2898 + int type; 2899 + 2900 + cond_resched(); 2901 + eb = btrfs_backref_get_eb(iter); 2902 + 2903 + key.objectid = iter->bytenr; 2904 + if (btrfs_backref_iter_is_inline_ref(iter)) { 2905 + struct btrfs_extent_inline_ref *iref; 2906 + 2907 + /* Update key for inline backref */ 2908 + iref = (struct btrfs_extent_inline_ref *) 2909 + ((unsigned long)iter->cur_ptr); 2910 + type = btrfs_get_extent_inline_ref_type(eb, iref, 2911 + BTRFS_REF_TYPE_BLOCK); 2912 + if (type == BTRFS_REF_TYPE_INVALID) { 2913 + ret = -EUCLEAN; 2914 + goto out; 2915 + } 2916 + key.type = type; 2917 + key.offset = btrfs_extent_inline_ref_offset(eb, iref); 2918 + } else { 2919 + key.type = iter->cur_key.type; 2920 + key.offset = iter->cur_key.offset; 2921 + } 2922 + 2923 + /* 2924 + * Parent node found and matches current inline ref, no need to 2925 + * rebuild this node for this inline ref 2926 + */ 2927 + if (exist && 2928 + ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 2929 + exist->owner == key.offset) || 2930 + (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 2931 + exist->bytenr == key.offset))) { 2932 + exist = NULL; 2933 + continue; 2934 + } 2935 + 2936 + /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ 2937 + if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 2938 + ret = handle_direct_tree_backref(cache, &key, cur); 2939 + if (ret < 0) 2940 + goto out; 2941 + continue; 2942 + } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { 2943 + ret = -EINVAL; 2944 + btrfs_print_v0_err(fs_info); 2945 + btrfs_handle_fs_error(fs_info, ret, NULL); 2946 + goto out; 2947 + } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 2948 + continue; 2949 + } 2950 + 2951 + /* 2952 + * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset 2953 + * means the root objectid. We need to search the tree to get 2954 + * its parent bytenr. 2955 + */ 2956 + ret = handle_indirect_tree_backref(cache, path, &key, node_key, 2957 + cur); 2958 + if (ret < 0) 2959 + goto out; 2960 + } 2961 + ret = 0; 2962 + cur->checked = 1; 2963 + WARN_ON(exist); 2964 + out: 2965 + btrfs_backref_iter_release(iter); 2966 + return ret; 2967 + } 2968 + 2969 + /* 2970 + * Finish the upwards linkage created by btrfs_backref_add_tree_node() 2971 + */ 2972 + int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, 2973 + struct btrfs_backref_node *start) 2974 + { 2975 + struct list_head *useless_node = &cache->useless_node; 2976 + struct btrfs_backref_edge *edge; 2977 + struct rb_node *rb_node; 2978 + LIST_HEAD(pending_edge); 2979 + 2980 + ASSERT(start->checked); 2981 + 2982 + /* Insert this node to cache if it's not COW-only */ 2983 + if (!start->cowonly) { 2984 + rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, 2985 + &start->rb_node); 2986 + if (rb_node) 2987 + btrfs_backref_panic(cache->fs_info, start->bytenr, 2988 + -EEXIST); 2989 + list_add_tail(&start->lower, &cache->leaves); 2990 + } 2991 + 2992 + /* 2993 + * Use breadth first search to iterate all related edges. 2994 + * 2995 + * The starting points are all the edges of this node 2996 + */ 2997 + list_for_each_entry(edge, &start->upper, list[LOWER]) 2998 + list_add_tail(&edge->list[UPPER], &pending_edge); 2999 + 3000 + while (!list_empty(&pending_edge)) { 3001 + struct btrfs_backref_node *upper; 3002 + struct btrfs_backref_node *lower; 3003 + struct rb_node *rb_node; 3004 + 3005 + edge = list_first_entry(&pending_edge, 3006 + struct btrfs_backref_edge, list[UPPER]); 3007 + list_del_init(&edge->list[UPPER]); 3008 + upper = edge->node[UPPER]; 3009 + lower = edge->node[LOWER]; 3010 + 3011 + /* Parent is detached, no need to keep any edges */ 3012 + if (upper->detached) { 3013 + list_del(&edge->list[LOWER]); 3014 + btrfs_backref_free_edge(cache, edge); 3015 + 3016 + /* Lower node is orphan, queue for cleanup */ 3017 + if (list_empty(&lower->upper)) 3018 + list_add(&lower->list, useless_node); 3019 + continue; 3020 + } 3021 + 3022 + /* 3023 + * All new nodes added in current build_backref_tree() haven't 3024 + * been linked to the cache rb tree. 3025 + * So if we have upper->rb_node populated, this means a cache 3026 + * hit. We only need to link the edge, as @upper and all its 3027 + * parents have already been linked. 3028 + */ 3029 + if (!RB_EMPTY_NODE(&upper->rb_node)) { 3030 + if (upper->lowest) { 3031 + list_del_init(&upper->lower); 3032 + upper->lowest = 0; 3033 + } 3034 + 3035 + list_add_tail(&edge->list[UPPER], &upper->lower); 3036 + continue; 3037 + } 3038 + 3039 + /* Sanity check, we shouldn't have any unchecked nodes */ 3040 + if (!upper->checked) { 3041 + ASSERT(0); 3042 + return -EUCLEAN; 3043 + } 3044 + 3045 + /* Sanity check, COW-only node has non-COW-only parent */ 3046 + if (start->cowonly != upper->cowonly) { 3047 + ASSERT(0); 3048 + return -EUCLEAN; 3049 + } 3050 + 3051 + /* Only cache non-COW-only (subvolume trees) tree blocks */ 3052 + if (!upper->cowonly) { 3053 + rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, 3054 + &upper->rb_node); 3055 + if (rb_node) { 3056 + btrfs_backref_panic(cache->fs_info, 3057 + upper->bytenr, -EEXIST); 3058 + return -EUCLEAN; 3059 + } 3060 + } 3061 + 3062 + list_add_tail(&edge->list[UPPER], &upper->lower); 3063 + 3064 + /* 3065 + * Also queue all the parent edges of this uncached node 3066 + * to finish the upper linkage 3067 + */ 3068 + list_for_each_entry(edge, &upper->upper, list[LOWER]) 3069 + list_add_tail(&edge->list[UPPER], &pending_edge); 3070 + } 3071 + return 0; 3072 + } 3073 + 3074 + void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache, 3075 + struct btrfs_backref_node *node) 3076 + { 3077 + struct btrfs_backref_node *lower; 3078 + struct btrfs_backref_node *upper; 3079 + struct btrfs_backref_edge *edge; 3080 + 3081 + while (!list_empty(&cache->useless_node)) { 3082 + lower = list_first_entry(&cache->useless_node, 3083 + struct btrfs_backref_node, list); 3084 + list_del_init(&lower->list); 3085 + } 3086 + while (!list_empty(&cache->pending_edge)) { 3087 + edge = list_first_entry(&cache->pending_edge, 3088 + struct btrfs_backref_edge, list[UPPER]); 3089 + list_del(&edge->list[UPPER]); 3090 + list_del(&edge->list[LOWER]); 3091 + lower = edge->node[LOWER]; 3092 + upper = edge->node[UPPER]; 3093 + btrfs_backref_free_edge(cache, edge); 3094 + 3095 + /* 3096 + * Lower is no longer linked to any upper backref nodes and 3097 + * isn't in the cache, we can free it ourselves. 3098 + */ 3099 + if (list_empty(&lower->upper) && 3100 + RB_EMPTY_NODE(&lower->rb_node)) 3101 + list_add(&lower->list, &cache->useless_node); 3102 + 3103 + if (!RB_EMPTY_NODE(&upper->rb_node)) 3104 + continue; 3105 + 3106 + /* Add this guy's upper edges to the list to process */ 3107 + list_for_each_entry(edge, &upper->upper, list[LOWER]) 3108 + list_add_tail(&edge->list[UPPER], 3109 + &cache->pending_edge); 3110 + if (list_empty(&upper->upper)) 3111 + list_add(&upper->list, &cache->useless_node); 3112 + } 3113 + 3114 + while (!list_empty(&cache->useless_node)) { 3115 + lower = list_first_entry(&cache->useless_node, 3116 + struct btrfs_backref_node, list); 3117 + list_del_init(&lower->list); 3118 + if (lower == node) 3119 + node = NULL; 3120 + btrfs_backref_free_node(cache, lower); 3121 + } 3122 + 3123 + btrfs_backref_cleanup_node(cache, node); 3124 + ASSERT(list_empty(&cache->useless_node) && 3125 + list_empty(&cache->pending_edge)); 2293 3126 }
+297
fs/btrfs/backref.h
··· 8 8 9 9 #include <linux/btrfs.h> 10 10 #include "ulist.h" 11 + #include "disk-io.h" 11 12 #include "extent_io.h" 12 13 13 14 struct inode_fs_paths { ··· 78 77 u64 parent; 79 78 u64 wanted_disk_byte; 80 79 }; 80 + 81 + /* 82 + * Iterate backrefs of one extent. 83 + * 84 + * Now it only supports iteration of tree block in commit root. 85 + */ 86 + struct btrfs_backref_iter { 87 + u64 bytenr; 88 + struct btrfs_path *path; 89 + struct btrfs_fs_info *fs_info; 90 + struct btrfs_key cur_key; 91 + u32 item_ptr; 92 + u32 cur_ptr; 93 + u32 end_ptr; 94 + }; 95 + 96 + struct btrfs_backref_iter *btrfs_backref_iter_alloc( 97 + struct btrfs_fs_info *fs_info, gfp_t gfp_flag); 98 + 99 + static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter) 100 + { 101 + if (!iter) 102 + return; 103 + btrfs_free_path(iter->path); 104 + kfree(iter); 105 + } 106 + 107 + static inline struct extent_buffer *btrfs_backref_get_eb( 108 + struct btrfs_backref_iter *iter) 109 + { 110 + if (!iter) 111 + return NULL; 112 + return iter->path->nodes[0]; 113 + } 114 + 115 + /* 116 + * For metadata with EXTENT_ITEM key (non-skinny) case, the first inline data 117 + * is btrfs_tree_block_info, without a btrfs_extent_inline_ref header. 118 + * 119 + * This helper determines if that's the case. 120 + */ 121 + static inline bool btrfs_backref_has_tree_block_info( 122 + struct btrfs_backref_iter *iter) 123 + { 124 + if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY && 125 + iter->cur_ptr - iter->item_ptr == sizeof(struct btrfs_extent_item)) 126 + return true; 127 + return false; 128 + } 129 + 130 + int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr); 131 + 132 + int btrfs_backref_iter_next(struct btrfs_backref_iter *iter); 133 + 134 + static inline bool btrfs_backref_iter_is_inline_ref( 135 + struct btrfs_backref_iter *iter) 136 + { 137 + if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY || 138 + iter->cur_key.type == BTRFS_METADATA_ITEM_KEY) 139 + return true; 140 + return false; 141 + } 142 + 143 + static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter) 144 + { 145 + iter->bytenr = 0; 146 + iter->item_ptr = 0; 147 + iter->cur_ptr = 0; 148 + iter->end_ptr = 0; 149 + btrfs_release_path(iter->path); 150 + memset(&iter->cur_key, 0, sizeof(iter->cur_key)); 151 + } 152 + 153 + /* 154 + * Backref cache related structures 155 + * 156 + * The whole objective of backref_cache is to build a bi-directional map 157 + * of tree blocks (represented by backref_node) and all their parents. 158 + */ 159 + 160 + /* 161 + * Represent a tree block in the backref cache 162 + */ 163 + struct btrfs_backref_node { 164 + struct { 165 + struct rb_node rb_node; 166 + u64 bytenr; 167 + }; /* Use rb_simple_node for search/insert */ 168 + 169 + u64 new_bytenr; 170 + /* Objectid of tree block owner, can be not uptodate */ 171 + u64 owner; 172 + /* Link to pending, changed or detached list */ 173 + struct list_head list; 174 + 175 + /* List of upper level edges, which link this node to its parents */ 176 + struct list_head upper; 177 + /* List of lower level edges, which link this node to its children */ 178 + struct list_head lower; 179 + 180 + /* NULL if this node is not tree root */ 181 + struct btrfs_root *root; 182 + /* Extent buffer got by COWing the block */ 183 + struct extent_buffer *eb; 184 + /* Level of the tree block */ 185 + unsigned int level:8; 186 + /* Is the block in a non-shareable tree */ 187 + unsigned int cowonly:1; 188 + /* 1 if no child node is in the cache */ 189 + unsigned int lowest:1; 190 + /* Is the extent buffer locked */ 191 + unsigned int locked:1; 192 + /* Has the block been processed */ 193 + unsigned int processed:1; 194 + /* Have backrefs of this block been checked */ 195 + unsigned int checked:1; 196 + /* 197 + * 1 if corresponding block has been COWed but some upper level block 198 + * pointers may not point to the new location 199 + */ 200 + unsigned int pending:1; 201 + /* 1 if the backref node isn't connected to any other backref node */ 202 + unsigned int detached:1; 203 + 204 + /* 205 + * For generic purpose backref cache, where we only care if it's a reloc 206 + * root, doesn't care the source subvolid. 207 + */ 208 + unsigned int is_reloc_root:1; 209 + }; 210 + 211 + #define LOWER 0 212 + #define UPPER 1 213 + 214 + /* 215 + * Represent an edge connecting upper and lower backref nodes. 216 + */ 217 + struct btrfs_backref_edge { 218 + /* 219 + * list[LOWER] is linked to btrfs_backref_node::upper of lower level 220 + * node, and list[UPPER] is linked to btrfs_backref_node::lower of 221 + * upper level node. 222 + * 223 + * Also, build_backref_tree() uses list[UPPER] for pending edges, before 224 + * linking list[UPPER] to its upper level nodes. 225 + */ 226 + struct list_head list[2]; 227 + 228 + /* Two related nodes */ 229 + struct btrfs_backref_node *node[2]; 230 + }; 231 + 232 + struct btrfs_backref_cache { 233 + /* Red black tree of all backref nodes in the cache */ 234 + struct rb_root rb_root; 235 + /* For passing backref nodes to btrfs_reloc_cow_block */ 236 + struct btrfs_backref_node *path[BTRFS_MAX_LEVEL]; 237 + /* 238 + * List of blocks that have been COWed but some block pointers in upper 239 + * level blocks may not reflect the new location 240 + */ 241 + struct list_head pending[BTRFS_MAX_LEVEL]; 242 + /* List of backref nodes with no child node */ 243 + struct list_head leaves; 244 + /* List of blocks that have been COWed in current transaction */ 245 + struct list_head changed; 246 + /* List of detached backref node. */ 247 + struct list_head detached; 248 + 249 + u64 last_trans; 250 + 251 + int nr_nodes; 252 + int nr_edges; 253 + 254 + /* List of unchecked backref edges during backref cache build */ 255 + struct list_head pending_edge; 256 + 257 + /* List of useless backref nodes during backref cache build */ 258 + struct list_head useless_node; 259 + 260 + struct btrfs_fs_info *fs_info; 261 + 262 + /* 263 + * Whether this cache is for relocation 264 + * 265 + * Reloction backref cache require more info for reloc root compared 266 + * to generic backref cache. 267 + */ 268 + unsigned int is_reloc; 269 + }; 270 + 271 + void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, 272 + struct btrfs_backref_cache *cache, int is_reloc); 273 + struct btrfs_backref_node *btrfs_backref_alloc_node( 274 + struct btrfs_backref_cache *cache, u64 bytenr, int level); 275 + struct btrfs_backref_edge *btrfs_backref_alloc_edge( 276 + struct btrfs_backref_cache *cache); 277 + 278 + #define LINK_LOWER (1 << 0) 279 + #define LINK_UPPER (1 << 1) 280 + static inline void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, 281 + struct btrfs_backref_node *lower, 282 + struct btrfs_backref_node *upper, 283 + int link_which) 284 + { 285 + ASSERT(upper && lower && upper->level == lower->level + 1); 286 + edge->node[LOWER] = lower; 287 + edge->node[UPPER] = upper; 288 + if (link_which & LINK_LOWER) 289 + list_add_tail(&edge->list[LOWER], &lower->upper); 290 + if (link_which & LINK_UPPER) 291 + list_add_tail(&edge->list[UPPER], &upper->lower); 292 + } 293 + 294 + static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache, 295 + struct btrfs_backref_node *node) 296 + { 297 + if (node) { 298 + cache->nr_nodes--; 299 + btrfs_put_root(node->root); 300 + kfree(node); 301 + } 302 + } 303 + 304 + static inline void btrfs_backref_free_edge(struct btrfs_backref_cache *cache, 305 + struct btrfs_backref_edge *edge) 306 + { 307 + if (edge) { 308 + cache->nr_edges--; 309 + kfree(edge); 310 + } 311 + } 312 + 313 + static inline void btrfs_backref_unlock_node_buffer( 314 + struct btrfs_backref_node *node) 315 + { 316 + if (node->locked) { 317 + btrfs_tree_unlock(node->eb); 318 + node->locked = 0; 319 + } 320 + } 321 + 322 + static inline void btrfs_backref_drop_node_buffer( 323 + struct btrfs_backref_node *node) 324 + { 325 + if (node->eb) { 326 + btrfs_backref_unlock_node_buffer(node); 327 + free_extent_buffer(node->eb); 328 + node->eb = NULL; 329 + } 330 + } 331 + 332 + /* 333 + * Drop the backref node from cache without cleaning up its children 334 + * edges. 335 + * 336 + * This can only be called on node without parent edges. 337 + * The children edges are still kept as is. 338 + */ 339 + static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree, 340 + struct btrfs_backref_node *node) 341 + { 342 + BUG_ON(!list_empty(&node->upper)); 343 + 344 + btrfs_backref_drop_node_buffer(node); 345 + list_del(&node->list); 346 + list_del(&node->lower); 347 + if (!RB_EMPTY_NODE(&node->rb_node)) 348 + rb_erase(&node->rb_node, &tree->rb_root); 349 + btrfs_backref_free_node(tree, node); 350 + } 351 + 352 + void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, 353 + struct btrfs_backref_node *node); 354 + 355 + void btrfs_backref_release_cache(struct btrfs_backref_cache *cache); 356 + 357 + static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info, 358 + u64 bytenr, int errno) 359 + { 360 + btrfs_panic(fs_info, errno, 361 + "Inconsistency in backref cache found at offset %llu", 362 + bytenr); 363 + } 364 + 365 + int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache, 366 + struct btrfs_path *path, 367 + struct btrfs_backref_iter *iter, 368 + struct btrfs_key *node_key, 369 + struct btrfs_backref_node *cur); 370 + 371 + int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, 372 + struct btrfs_backref_node *start); 373 + 374 + void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache, 375 + struct btrfs_backref_node *node); 81 376 82 377 #endif
+154 -79
fs/btrfs/block-group.c
··· 7 7 #include "disk-io.h" 8 8 #include "free-space-cache.h" 9 9 #include "free-space-tree.h" 10 - #include "disk-io.h" 11 10 #include "volumes.h" 12 11 #include "transaction.h" 13 12 #include "ref-verify.h" ··· 159 160 struct rb_node **p; 160 161 struct rb_node *parent = NULL; 161 162 struct btrfs_block_group *cache; 163 + 164 + ASSERT(block_group->length != 0); 162 165 163 166 spin_lock(&info->block_group_cache_lock); 164 167 p = &info->block_group_cache_tree.rb_node; ··· 864 863 } 865 864 } 866 865 866 + static int remove_block_group_item(struct btrfs_trans_handle *trans, 867 + struct btrfs_path *path, 868 + struct btrfs_block_group *block_group) 869 + { 870 + struct btrfs_fs_info *fs_info = trans->fs_info; 871 + struct btrfs_root *root; 872 + struct btrfs_key key; 873 + int ret; 874 + 875 + root = fs_info->extent_root; 876 + key.objectid = block_group->start; 877 + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 878 + key.offset = block_group->length; 879 + 880 + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 881 + if (ret > 0) 882 + ret = -ENOENT; 883 + if (ret < 0) 884 + return ret; 885 + 886 + ret = btrfs_del_item(trans, root, path); 887 + return ret; 888 + } 889 + 867 890 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 868 891 u64 group_start, struct extent_map *em) 869 892 { 870 893 struct btrfs_fs_info *fs_info = trans->fs_info; 871 - struct btrfs_root *root = fs_info->extent_root; 872 894 struct btrfs_path *path; 873 895 struct btrfs_block_group *block_group; 874 896 struct btrfs_free_cluster *cluster; ··· 1089 1065 1090 1066 spin_unlock(&block_group->space_info->lock); 1091 1067 1092 - key.objectid = block_group->start; 1093 - key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 1094 - key.offset = block_group->length; 1095 - 1096 1068 mutex_lock(&fs_info->chunk_mutex); 1097 1069 spin_lock(&block_group->lock); 1098 1070 block_group->removed = 1; 1099 1071 /* 1100 - * At this point trimming can't start on this block group, because we 1101 - * removed the block group from the tree fs_info->block_group_cache_tree 1102 - * so no one can't find it anymore and even if someone already got this 1103 - * block group before we removed it from the rbtree, they have already 1104 - * incremented block_group->trimming - if they didn't, they won't find 1105 - * any free space entries because we already removed them all when we 1106 - * called btrfs_remove_free_space_cache(). 1072 + * At this point trimming or scrub can't start on this block group, 1073 + * because we removed the block group from the rbtree 1074 + * fs_info->block_group_cache_tree so no one can't find it anymore and 1075 + * even if someone already got this block group before we removed it 1076 + * from the rbtree, they have already incremented block_group->frozen - 1077 + * if they didn't, for the trimming case they won't find any free space 1078 + * entries because we already removed them all when we called 1079 + * btrfs_remove_free_space_cache(). 1107 1080 * 1108 1081 * And we must not remove the extent map from the fs_info->mapping_tree 1109 1082 * to prevent the same logical address range and physical device space 1110 - * ranges from being reused for a new block group. This is because our 1111 - * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1083 + * ranges from being reused for a new block group. This is needed to 1084 + * avoid races with trimming and scrub. 1085 + * 1086 + * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1112 1087 * completely transactionless, so while it is trimming a range the 1113 1088 * currently running transaction might finish and a new one start, 1114 1089 * allowing for new block groups to be created that can reuse the same ··· 1118 1095 * in place until the extents have been discarded completely when 1119 1096 * the transaction commit has completed. 1120 1097 */ 1121 - remove_em = (atomic_read(&block_group->trimming) == 0); 1098 + remove_em = (atomic_read(&block_group->frozen) == 0); 1122 1099 spin_unlock(&block_group->lock); 1123 1100 1124 1101 mutex_unlock(&fs_info->chunk_mutex); ··· 1130 1107 /* Once for the block groups rbtree */ 1131 1108 btrfs_put_block_group(block_group); 1132 1109 1133 - ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1134 - if (ret > 0) 1135 - ret = -EIO; 1110 + ret = remove_block_group_item(trans, path, block_group); 1136 1111 if (ret < 0) 1137 - goto out; 1138 - 1139 - ret = btrfs_del_item(trans, root, path); 1140 - if (ret) 1141 1112 goto out; 1142 1113 1143 1114 if (remove_em) { ··· 1192 1175 free_extent_map(em); 1193 1176 1194 1177 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, 1195 - num_items, 1); 1178 + num_items); 1196 1179 } 1197 1180 1198 1181 /* ··· 1301 1284 ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, 1302 1285 EXTENT_DIRTY); 1303 1286 if (ret) 1304 - goto err; 1287 + goto out; 1305 1288 } 1306 1289 1307 1290 ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, 1308 1291 EXTENT_DIRTY); 1309 - if (ret) 1310 - goto err; 1292 + out: 1311 1293 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1312 1294 if (prev_trans) 1313 1295 btrfs_put_transaction(prev_trans); 1314 1296 1315 - return true; 1316 - 1317 - err: 1318 - mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1319 - if (prev_trans) 1320 - btrfs_put_transaction(prev_trans); 1321 - btrfs_dec_block_group_ro(bg); 1322 - return false; 1297 + return ret == 0; 1323 1298 } 1324 1299 1325 1300 /* ··· 1409 1400 * We could have pending pinned extents for this block group, 1410 1401 * just delete them, we don't care about them anymore. 1411 1402 */ 1412 - if (!clean_pinned_extents(trans, block_group)) 1403 + if (!clean_pinned_extents(trans, block_group)) { 1404 + btrfs_dec_block_group_ro(block_group); 1413 1405 goto end_trans; 1406 + } 1414 1407 1415 1408 /* 1416 1409 * At this point, the block_group is read only and should fail ··· 1461 1450 1462 1451 /* Implicit trim during transaction commit. */ 1463 1452 if (trimming) 1464 - btrfs_get_block_group_trimming(block_group); 1453 + btrfs_freeze_block_group(block_group); 1465 1454 1466 1455 /* 1467 1456 * Btrfs_remove_chunk will abort the transaction if things go ··· 1471 1460 1472 1461 if (ret) { 1473 1462 if (trimming) 1474 - btrfs_put_block_group_trimming(block_group); 1463 + btrfs_unfreeze_block_group(block_group); 1475 1464 goto end_trans; 1476 1465 } 1477 1466 ··· 1785 1774 } 1786 1775 1787 1776 static struct btrfs_block_group *btrfs_create_block_group_cache( 1788 - struct btrfs_fs_info *fs_info, u64 start, u64 size) 1777 + struct btrfs_fs_info *fs_info, u64 start) 1789 1778 { 1790 1779 struct btrfs_block_group *cache; 1791 1780 ··· 1801 1790 } 1802 1791 1803 1792 cache->start = start; 1804 - cache->length = size; 1805 1793 1806 1794 cache->fs_info = fs_info; 1807 1795 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); ··· 1819 1809 INIT_LIST_HEAD(&cache->dirty_list); 1820 1810 INIT_LIST_HEAD(&cache->io_list); 1821 1811 btrfs_init_free_space_ctl(cache); 1822 - atomic_set(&cache->trimming, 0); 1812 + atomic_set(&cache->frozen, 0); 1823 1813 mutex_init(&cache->free_space_lock); 1824 1814 btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); 1825 1815 ··· 1880 1870 return ret; 1881 1871 } 1882 1872 1873 + static int read_block_group_item(struct btrfs_block_group *cache, 1874 + struct btrfs_path *path, 1875 + const struct btrfs_key *key) 1876 + { 1877 + struct extent_buffer *leaf = path->nodes[0]; 1878 + struct btrfs_block_group_item bgi; 1879 + int slot = path->slots[0]; 1880 + 1881 + cache->length = key->offset; 1882 + 1883 + read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 1884 + sizeof(bgi)); 1885 + cache->used = btrfs_stack_block_group_used(&bgi); 1886 + cache->flags = btrfs_stack_block_group_flags(&bgi); 1887 + 1888 + return 0; 1889 + } 1890 + 1883 1891 static int read_one_block_group(struct btrfs_fs_info *info, 1884 1892 struct btrfs_path *path, 1885 1893 const struct btrfs_key *key, 1886 1894 int need_clear) 1887 1895 { 1888 - struct extent_buffer *leaf = path->nodes[0]; 1889 1896 struct btrfs_block_group *cache; 1890 1897 struct btrfs_space_info *space_info; 1891 - struct btrfs_block_group_item bgi; 1892 1898 const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 1893 - int slot = path->slots[0]; 1894 1899 int ret; 1895 1900 1896 1901 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 1897 1902 1898 - cache = btrfs_create_block_group_cache(info, key->objectid, key->offset); 1903 + cache = btrfs_create_block_group_cache(info, key->objectid); 1899 1904 if (!cache) 1900 1905 return -ENOMEM; 1906 + 1907 + ret = read_block_group_item(cache, path, key); 1908 + if (ret < 0) 1909 + goto error; 1901 1910 1902 1911 if (need_clear) { 1903 1912 /* ··· 1932 1903 if (btrfs_test_opt(info, SPACE_CACHE)) 1933 1904 cache->disk_cache_state = BTRFS_DC_CLEAR; 1934 1905 } 1935 - read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 1936 - sizeof(bgi)); 1937 - cache->used = btrfs_stack_block_group_used(&bgi); 1938 - cache->flags = btrfs_stack_block_group_flags(&bgi); 1939 1906 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 1940 1907 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 1941 1908 btrfs_err(info, ··· 1959 1934 * are empty, and we can just add all the space in and be done with it. 1960 1935 * This saves us _a_lot_ of time, particularly in the full case. 1961 1936 */ 1962 - if (key->offset == cache->used) { 1937 + if (cache->length == cache->used) { 1963 1938 cache->last_byte_to_unpin = (u64)-1; 1964 1939 cache->cached = BTRFS_CACHE_FINISHED; 1965 1940 btrfs_free_excluded_extents(cache); 1966 1941 } else if (cache->used == 0) { 1967 1942 cache->last_byte_to_unpin = (u64)-1; 1968 1943 cache->cached = BTRFS_CACHE_FINISHED; 1969 - add_new_free_space(cache, key->objectid, 1970 - key->objectid + key->offset); 1944 + add_new_free_space(cache, cache->start, 1945 + cache->start + cache->length); 1971 1946 btrfs_free_excluded_extents(cache); 1972 1947 } 1973 1948 ··· 1977 1952 goto error; 1978 1953 } 1979 1954 trace_btrfs_add_block_group(info, cache, 0); 1980 - btrfs_update_space_info(info, cache->flags, key->offset, 1955 + btrfs_update_space_info(info, cache->flags, cache->length, 1981 1956 cache->used, cache->bytes_super, &space_info); 1982 1957 1983 1958 cache->space_info = space_info; ··· 2016 1991 path = btrfs_alloc_path(); 2017 1992 if (!path) 2018 1993 return -ENOMEM; 2019 - path->reada = READA_FORWARD; 2020 1994 2021 1995 cache_gen = btrfs_super_cache_generation(info->super_copy); 2022 1996 if (btrfs_test_opt(info, SPACE_CACHE) && ··· 2070 2046 return ret; 2071 2047 } 2072 2048 2049 + static int insert_block_group_item(struct btrfs_trans_handle *trans, 2050 + struct btrfs_block_group *block_group) 2051 + { 2052 + struct btrfs_fs_info *fs_info = trans->fs_info; 2053 + struct btrfs_block_group_item bgi; 2054 + struct btrfs_root *root; 2055 + struct btrfs_key key; 2056 + 2057 + spin_lock(&block_group->lock); 2058 + btrfs_set_stack_block_group_used(&bgi, block_group->used); 2059 + btrfs_set_stack_block_group_chunk_objectid(&bgi, 2060 + BTRFS_FIRST_CHUNK_TREE_OBJECTID); 2061 + btrfs_set_stack_block_group_flags(&bgi, block_group->flags); 2062 + key.objectid = block_group->start; 2063 + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2064 + key.offset = block_group->length; 2065 + spin_unlock(&block_group->lock); 2066 + 2067 + root = fs_info->extent_root; 2068 + return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); 2069 + } 2070 + 2073 2071 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 2074 2072 { 2075 2073 struct btrfs_fs_info *fs_info = trans->fs_info; 2076 2074 struct btrfs_block_group *block_group; 2077 - struct btrfs_root *extent_root = fs_info->extent_root; 2078 - struct btrfs_block_group_item item; 2079 - struct btrfs_key key; 2080 2075 int ret = 0; 2081 2076 2082 2077 if (!trans->can_flush_pending_bgs) ··· 2108 2065 if (ret) 2109 2066 goto next; 2110 2067 2111 - spin_lock(&block_group->lock); 2112 - btrfs_set_stack_block_group_used(&item, block_group->used); 2113 - btrfs_set_stack_block_group_chunk_objectid(&item, 2114 - BTRFS_FIRST_CHUNK_TREE_OBJECTID); 2115 - btrfs_set_stack_block_group_flags(&item, block_group->flags); 2116 - key.objectid = block_group->start; 2117 - key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2118 - key.offset = block_group->length; 2119 - spin_unlock(&block_group->lock); 2120 - 2121 - ret = btrfs_insert_item(trans, extent_root, &key, &item, 2122 - sizeof(item)); 2068 + ret = insert_block_group_item(trans, block_group); 2123 2069 if (ret) 2124 2070 btrfs_abort_transaction(trans, ret); 2125 - ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset); 2071 + ret = btrfs_finish_chunk_alloc(trans, block_group->start, 2072 + block_group->length); 2126 2073 if (ret) 2127 2074 btrfs_abort_transaction(trans, ret); 2128 2075 add_block_group_free_space(trans, block_group); ··· 2133 2100 2134 2101 btrfs_set_log_full_commit(trans); 2135 2102 2136 - cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size); 2103 + cache = btrfs_create_block_group_cache(fs_info, chunk_offset); 2137 2104 if (!cache) 2138 2105 return -ENOMEM; 2139 2106 2107 + cache->length = size; 2140 2108 cache->used = bytes_used; 2141 2109 cache->flags = type; 2142 2110 cache->last_byte_to_unpin = (u64)-1; ··· 2348 2314 spin_unlock(&sinfo->lock); 2349 2315 } 2350 2316 2351 - static int write_one_cache_group(struct btrfs_trans_handle *trans, 2352 - struct btrfs_path *path, 2353 - struct btrfs_block_group *cache) 2317 + static int update_block_group_item(struct btrfs_trans_handle *trans, 2318 + struct btrfs_path *path, 2319 + struct btrfs_block_group *cache) 2354 2320 { 2355 2321 struct btrfs_fs_info *fs_info = trans->fs_info; 2356 2322 int ret; 2357 - struct btrfs_root *extent_root = fs_info->extent_root; 2323 + struct btrfs_root *root = fs_info->extent_root; 2358 2324 unsigned long bi; 2359 2325 struct extent_buffer *leaf; 2360 2326 struct btrfs_block_group_item bgi; ··· 2364 2330 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2365 2331 key.offset = cache->length; 2366 2332 2367 - ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1); 2333 + ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2368 2334 if (ret) { 2369 2335 if (ret > 0) 2370 2336 ret = -ENOENT; ··· 2676 2642 } 2677 2643 } 2678 2644 if (!ret) { 2679 - ret = write_one_cache_group(trans, path, cache); 2645 + ret = update_block_group_item(trans, path, cache); 2680 2646 /* 2681 2647 * Our block group might still be attached to the list 2682 2648 * of new block groups in the transaction handle of some ··· 2825 2791 } 2826 2792 } 2827 2793 if (!ret) { 2828 - ret = write_one_cache_group(trans, path, cache); 2794 + ret = update_block_group_item(trans, path, cache); 2829 2795 /* 2830 2796 * One of the free space endio workers might have 2831 2797 * created a new block group while updating a free space ··· 2842 2808 if (ret == -ENOENT) { 2843 2809 wait_event(cur_trans->writer_wait, 2844 2810 atomic_read(&cur_trans->num_writers) == 1); 2845 - ret = write_one_cache_group(trans, path, cache); 2811 + ret = update_block_group_item(trans, path, cache); 2846 2812 } 2847 2813 if (ret) 2848 2814 btrfs_abort_transaction(trans, ret); ··· 3417 3383 btrfs_sysfs_remove_space_info(space_info); 3418 3384 } 3419 3385 return 0; 3386 + } 3387 + 3388 + void btrfs_freeze_block_group(struct btrfs_block_group *cache) 3389 + { 3390 + atomic_inc(&cache->frozen); 3391 + } 3392 + 3393 + void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 3394 + { 3395 + struct btrfs_fs_info *fs_info = block_group->fs_info; 3396 + struct extent_map_tree *em_tree; 3397 + struct extent_map *em; 3398 + bool cleanup; 3399 + 3400 + spin_lock(&block_group->lock); 3401 + cleanup = (atomic_dec_and_test(&block_group->frozen) && 3402 + block_group->removed); 3403 + spin_unlock(&block_group->lock); 3404 + 3405 + if (cleanup) { 3406 + mutex_lock(&fs_info->chunk_mutex); 3407 + em_tree = &fs_info->mapping_tree; 3408 + write_lock(&em_tree->lock); 3409 + em = lookup_extent_mapping(em_tree, block_group->start, 3410 + 1); 3411 + BUG_ON(!em); /* logic error, can't happen */ 3412 + remove_extent_mapping(em_tree, em); 3413 + write_unlock(&em_tree->lock); 3414 + mutex_unlock(&fs_info->chunk_mutex); 3415 + 3416 + /* once for us and once for the tree */ 3417 + free_extent_map(em); 3418 + free_extent_map(em); 3419 + 3420 + /* 3421 + * We may have left one free space entry and other possible 3422 + * tasks trimming this block group have left 1 entry each one. 3423 + * Free them if any. 3424 + */ 3425 + __btrfs_remove_free_space_cache(block_group->free_space_ctl); 3426 + } 3420 3427 }
+13 -1
fs/btrfs/block-group.h
··· 129 129 /* For read-only block groups */ 130 130 struct list_head ro_list; 131 131 132 + /* 133 + * When non-zero it means the block group's logical address and its 134 + * device extents can not be reused for future block group allocations 135 + * until the counter goes down to 0. This is to prevent them from being 136 + * reused while some task is still using the block group after it was 137 + * deleted - we want to make sure they can only be reused for new block 138 + * groups after that task is done with the deleted block group. 139 + */ 140 + atomic_t frozen; 141 + 132 142 /* For discard operations */ 133 - atomic_t trimming; 134 143 struct list_head discard_list; 135 144 int discard_index; 136 145 u64 discard_eligible_time; ··· 291 282 return cache->cached == BTRFS_CACHE_FINISHED || 292 283 cache->cached == BTRFS_CACHE_ERROR; 293 284 } 285 + 286 + void btrfs_freeze_block_group(struct btrfs_block_group *cache); 287 + void btrfs_unfreeze_block_group(struct btrfs_block_group *cache); 294 288 295 289 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 296 290 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
+4 -1
fs/btrfs/block-rsv.c
··· 5 5 #include "block-rsv.h" 6 6 #include "space-info.h" 7 7 #include "transaction.h" 8 + #include "block-group.h" 8 9 9 10 /* 10 11 * HOW DO BLOCK RESERVES WORK ··· 406 405 else 407 406 block_rsv->full = 0; 408 407 408 + if (block_rsv->size >= sinfo->total_bytes) 409 + sinfo->force_alloc = CHUNK_ALLOC_FORCE; 409 410 spin_unlock(&block_rsv->lock); 410 411 spin_unlock(&sinfo->lock); 411 412 } ··· 458 455 struct btrfs_fs_info *fs_info = root->fs_info; 459 456 struct btrfs_block_rsv *block_rsv = NULL; 460 457 461 - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 458 + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || 462 459 (root == fs_info->csum_root && trans->adding_csums) || 463 460 (root == fs_info->uuid_root)) 464 461 block_rsv = trans->block_rsv;
+8 -36
fs/btrfs/btrfs_inode.h
··· 7 7 #define BTRFS_INODE_H 8 8 9 9 #include <linux/hash.h> 10 + #include <linux/refcount.h> 10 11 #include "extent_map.h" 11 12 #include "extent_io.h" 12 13 #include "ordered-data.h" ··· 28 27 BTRFS_INODE_NEEDS_FULL_SYNC, 29 28 BTRFS_INODE_COPY_EVERYTHING, 30 29 BTRFS_INODE_IN_DELALLOC_LIST, 31 - BTRFS_INODE_READDIO_NEED_LOCK, 32 30 BTRFS_INODE_HAS_PROPS, 33 31 BTRFS_INODE_SNAPSHOT_FLUSH, 34 32 }; ··· 293 293 return ret; 294 294 } 295 295 296 - #define BTRFS_DIO_ORIG_BIO_SUBMITTED 0x1 297 - 298 296 struct btrfs_dio_private { 299 297 struct inode *inode; 300 - unsigned long flags; 301 298 u64 logical_offset; 302 299 u64 disk_bytenr; 303 300 u64 bytes; 304 - void *private; 305 301 306 - /* number of bios pending for this dio */ 307 - atomic_t pending_bios; 308 - 309 - /* IO errors */ 310 - int errors; 311 - 312 - /* orig_bio is our btrfs_io_bio */ 313 - struct bio *orig_bio; 302 + /* 303 + * References to this structure. There is one reference per in-flight 304 + * bio plus one while we're still setting up. 305 + */ 306 + refcount_t refs; 314 307 315 308 /* dio_bio came from fs/direct-io.c */ 316 309 struct bio *dio_bio; 317 310 318 - /* 319 - * The original bio may be split to several sub-bios, this is 320 - * done during endio of sub-bios 321 - */ 322 - blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *, 323 - blk_status_t); 311 + /* Array of checksums */ 312 + u8 csums[]; 324 313 }; 325 - 326 - /* 327 - * Disable DIO read nolock optimization, so new dio readers will be forced 328 - * to grab i_mutex. It is used to avoid the endless truncate due to 329 - * nonlocked dio read. 330 - */ 331 - static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode) 332 - { 333 - set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags); 334 - smp_mb(); 335 - } 336 - 337 - static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode) 338 - { 339 - smp_mb__before_atomic(); 340 - clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags); 341 - } 342 314 343 315 /* Array of bytes with variable length, hexadecimal format 0x1234 */ 344 316 #define CSUM_FMT "0x%*phN"
+17 -19
fs/btrfs/compression.c
··· 194 194 for (i = 0; i < cb->nr_pages; i++) { 195 195 page = cb->compressed_pages[i]; 196 196 197 - crypto_shash_init(shash); 198 197 kaddr = kmap_atomic(page); 199 - crypto_shash_update(shash, kaddr, PAGE_SIZE); 198 + crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum); 200 199 kunmap_atomic(kaddr); 201 - crypto_shash_final(shash, (u8 *)&csum); 202 200 203 201 if (memcmp(&csum, cb_sum, csum_size)) { 204 202 btrfs_print_data_csum_error(inode, disk_start, ··· 1140 1142 } 1141 1143 1142 1144 /* 1145 + * Adjust @level according to the limits of the compression algorithm or 1146 + * fallback to default 1147 + */ 1148 + static unsigned int btrfs_compress_set_level(int type, unsigned level) 1149 + { 1150 + const struct btrfs_compress_op *ops = btrfs_compress_op[type]; 1151 + 1152 + if (level == 0) 1153 + level = ops->default_level; 1154 + else 1155 + level = min(level, ops->max_level); 1156 + 1157 + return level; 1158 + } 1159 + 1160 + /* 1143 1161 * Given an address space and start and length, compress the bytes into @pages 1144 1162 * that are allocated on demand. 1145 1163 * ··· 1759 1745 } 1760 1746 1761 1747 level = btrfs_compress_set_level(type, level); 1762 - 1763 - return level; 1764 - } 1765 - 1766 - /* 1767 - * Adjust @level according to the limits of the compression algorithm or 1768 - * fallback to default 1769 - */ 1770 - unsigned int btrfs_compress_set_level(int type, unsigned level) 1771 - { 1772 - const struct btrfs_compress_op *ops = btrfs_compress_op[type]; 1773 - 1774 - if (level == 0) 1775 - level = ops->default_level; 1776 - else 1777 - level = min(level, ops->max_level); 1778 1748 1779 1749 return level; 1780 1750 }
-2
fs/btrfs/compression.h
··· 140 140 const char* btrfs_compress_type2str(enum btrfs_compression_type type); 141 141 bool btrfs_compress_is_valid_type(const char *str, size_t len); 142 142 143 - unsigned int btrfs_compress_set_level(int type, unsigned level); 144 - 145 143 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end); 146 144 147 145 #endif
+76 -104
fs/btrfs/ctree.c
··· 144 144 return eb; 145 145 } 146 146 147 - /* cowonly root (everything not a reference counted cow subvolume), just get 148 - * put onto a simple dirty list. transaction.c walks this to make sure they 149 - * get properly updated on disk. 147 + /* 148 + * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 149 + * just get put onto a simple dirty list. Transaction walks this list to make 150 + * sure they get properly updated on disk. 150 151 */ 151 152 static void add_root_to_dirty_list(struct btrfs_root *root) 152 153 { ··· 186 185 int level; 187 186 struct btrfs_disk_key disk_key; 188 187 189 - WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 188 + WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 190 189 trans->transid != fs_info->running_transaction->transid); 191 - WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 190 + WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 192 191 trans->transid != root->last_trans); 193 192 194 193 level = btrfs_header_level(buf); ··· 827 826 struct extent_buffer *buf) 828 827 { 829 828 /* 830 - * Tree blocks not in reference counted trees and tree roots 831 - * are never shared. If a block was allocated after the last 832 - * snapshot and the block was not allocated by tree relocation, 833 - * we know the block is not shared. 829 + * Tree blocks not in shareable trees and tree roots are never shared. 830 + * If a block was allocated after the last snapshot and the block was 831 + * not allocated by tree relocation, we know the block is not shared. 834 832 */ 835 - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 833 + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 836 834 buf != root->node && buf != root->commit_root && 837 835 (btrfs_header_generation(buf) <= 838 836 btrfs_root_last_snapshot(&root->root_item) || ··· 1024 1024 1025 1025 btrfs_assert_tree_locked(buf); 1026 1026 1027 - WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 1027 + WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 1028 1028 trans->transid != fs_info->running_transaction->transid); 1029 - WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 1029 + WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 1030 1030 trans->transid != root->last_trans); 1031 1031 1032 1032 level = btrfs_header_level(buf); ··· 1065 1065 return ret; 1066 1066 } 1067 1067 1068 - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 1068 + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 1069 1069 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 1070 1070 if (ret) { 1071 1071 btrfs_abort_transaction(trans, ret); ··· 1668 1668 { 1669 1669 int low = 0; 1670 1670 int high = max; 1671 - int mid; 1672 1671 int ret; 1673 - struct btrfs_disk_key *tmp = NULL; 1674 - struct btrfs_disk_key unaligned; 1675 - unsigned long offset; 1676 - char *kaddr = NULL; 1677 - unsigned long map_start = 0; 1678 - unsigned long map_len = 0; 1679 - int err; 1672 + const int key_size = sizeof(struct btrfs_disk_key); 1680 1673 1681 1674 if (low > high) { 1682 1675 btrfs_err(eb->fs_info, ··· 1680 1687 } 1681 1688 1682 1689 while (low < high) { 1690 + unsigned long oip; 1691 + unsigned long offset; 1692 + struct btrfs_disk_key *tmp; 1693 + struct btrfs_disk_key unaligned; 1694 + int mid; 1695 + 1683 1696 mid = (low + high) / 2; 1684 1697 offset = p + mid * item_size; 1698 + oip = offset_in_page(offset); 1685 1699 1686 - if (!kaddr || offset < map_start || 1687 - (offset + sizeof(struct btrfs_disk_key)) > 1688 - map_start + map_len) { 1700 + if (oip + key_size <= PAGE_SIZE) { 1701 + const unsigned long idx = offset >> PAGE_SHIFT; 1702 + char *kaddr = page_address(eb->pages[idx]); 1689 1703 1690 - err = map_private_extent_buffer(eb, offset, 1691 - sizeof(struct btrfs_disk_key), 1692 - &kaddr, &map_start, &map_len); 1693 - 1694 - if (!err) { 1695 - tmp = (struct btrfs_disk_key *)(kaddr + offset - 1696 - map_start); 1697 - } else if (err == 1) { 1698 - read_extent_buffer(eb, &unaligned, 1699 - offset, sizeof(unaligned)); 1700 - tmp = &unaligned; 1701 - } else { 1702 - return err; 1703 - } 1704 - 1704 + tmp = (struct btrfs_disk_key *)(kaddr + oip); 1705 1705 } else { 1706 - tmp = (struct btrfs_disk_key *)(kaddr + offset - 1707 - map_start); 1706 + read_extent_buffer(eb, &unaligned, offset, key_size); 1707 + tmp = &unaligned; 1708 1708 } 1709 + 1709 1710 ret = comp_keys(tmp, key); 1710 1711 1711 1712 if (ret < 0) ··· 1720 1733 * leaves vs nodes 1721 1734 */ 1722 1735 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, 1723 - int level, int *slot) 1736 + int *slot) 1724 1737 { 1725 - if (level == 0) 1738 + if (btrfs_header_level(eb) == 0) 1726 1739 return generic_bin_search(eb, 1727 1740 offsetof(struct btrfs_leaf, items), 1728 1741 sizeof(struct btrfs_item), ··· 2335 2348 struct btrfs_fs_info *fs_info = root->fs_info; 2336 2349 u64 blocknr; 2337 2350 u64 gen; 2338 - struct extent_buffer *b = *eb_ret; 2339 2351 struct extent_buffer *tmp; 2340 2352 struct btrfs_key first_key; 2341 2353 int ret; 2342 2354 int parent_level; 2343 2355 2344 - blocknr = btrfs_node_blockptr(b, slot); 2345 - gen = btrfs_node_ptr_generation(b, slot); 2346 - parent_level = btrfs_header_level(b); 2347 - btrfs_node_key_to_cpu(b, &first_key, slot); 2356 + blocknr = btrfs_node_blockptr(*eb_ret, slot); 2357 + gen = btrfs_node_ptr_generation(*eb_ret, slot); 2358 + parent_level = btrfs_header_level(*eb_ret); 2359 + btrfs_node_key_to_cpu(*eb_ret, &first_key, slot); 2348 2360 2349 2361 tmp = find_extent_buffer(fs_info, blocknr); 2350 2362 if (tmp) { ··· 2485 2499 ret = -EAGAIN; 2486 2500 done: 2487 2501 return ret; 2488 - } 2489 - 2490 - static int key_search(struct extent_buffer *b, const struct btrfs_key *key, 2491 - int level, int *prev_cmp, int *slot) 2492 - { 2493 - if (*prev_cmp != 0) { 2494 - *prev_cmp = btrfs_bin_search(b, key, level, slot); 2495 - return *prev_cmp; 2496 - } 2497 - 2498 - *slot = 0; 2499 - 2500 - return 0; 2501 2502 } 2502 2503 2503 2504 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, ··· 2756 2783 } 2757 2784 } 2758 2785 2759 - ret = key_search(b, key, level, &prev_cmp, &slot); 2760 - if (ret < 0) 2761 - goto done; 2786 + /* 2787 + * If btrfs_bin_search returns an exact match (prev_cmp == 0) 2788 + * we can safely assume the target key will always be in slot 0 2789 + * on lower levels due to the invariants BTRFS' btree provides, 2790 + * namely that a btrfs_key_ptr entry always points to the 2791 + * lowest key in the child node, thus we can skip searching 2792 + * lower levels 2793 + */ 2794 + if (prev_cmp == 0) { 2795 + slot = 0; 2796 + ret = 0; 2797 + } else { 2798 + ret = btrfs_bin_search(b, key, &slot); 2799 + prev_cmp = ret; 2800 + if (ret < 0) 2801 + goto done; 2802 + } 2762 2803 2763 2804 if (level == 0) { 2764 2805 p->slots[level] = slot; ··· 2896 2909 int level; 2897 2910 int lowest_unlock = 1; 2898 2911 u8 lowest_level = 0; 2899 - int prev_cmp = -1; 2900 2912 2901 2913 lowest_level = p->lowest_level; 2902 2914 WARN_ON(p->nodes[0] != NULL); ··· 2928 2942 */ 2929 2943 btrfs_unlock_up_safe(p, level + 1); 2930 2944 2931 - /* 2932 - * Since we can unwind ebs we want to do a real search every 2933 - * time. 2934 - */ 2935 - prev_cmp = -1; 2936 - ret = key_search(b, key, level, &prev_cmp, &slot); 2945 + ret = btrfs_bin_search(b, key, &slot); 2937 2946 if (ret < 0) 2938 2947 goto done; 2939 2948 ··· 3488 3507 { 3489 3508 struct btrfs_item *start_item; 3490 3509 struct btrfs_item *end_item; 3491 - struct btrfs_map_token token; 3492 3510 int data_len; 3493 3511 int nritems = btrfs_header_nritems(l); 3494 3512 int end = min(nritems, start + nr) - 1; 3495 3513 3496 3514 if (!nr) 3497 3515 return 0; 3498 - btrfs_init_map_token(&token, l); 3499 3516 start_item = btrfs_item_nr(start); 3500 3517 end_item = btrfs_item_nr(end); 3501 - data_len = btrfs_token_item_offset(l, start_item, &token) + 3502 - btrfs_token_item_size(l, start_item, &token); 3503 - data_len = data_len - btrfs_token_item_offset(l, end_item, &token); 3518 + data_len = btrfs_item_offset(l, start_item) + 3519 + btrfs_item_size(l, start_item); 3520 + data_len = data_len - btrfs_item_offset(l, end_item); 3504 3521 data_len += sizeof(struct btrfs_item) * nr; 3505 3522 WARN_ON(data_len < 0); 3506 3523 return data_len; ··· 3629 3650 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3630 3651 for (i = 0; i < right_nritems; i++) { 3631 3652 item = btrfs_item_nr(i); 3632 - push_space -= btrfs_token_item_size(right, item, &token); 3633 - btrfs_set_token_item_offset(right, item, push_space, &token); 3653 + push_space -= btrfs_token_item_size(&token, item); 3654 + btrfs_set_token_item_offset(&token, item, push_space); 3634 3655 } 3635 3656 3636 3657 left_nritems -= push_items; ··· 3838 3859 3839 3860 item = btrfs_item_nr(i); 3840 3861 3841 - ioff = btrfs_token_item_offset(left, item, &token); 3842 - btrfs_set_token_item_offset(left, item, 3843 - ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size), 3844 - &token); 3862 + ioff = btrfs_token_item_offset(&token, item); 3863 + btrfs_set_token_item_offset(&token, item, 3864 + ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3845 3865 } 3846 3866 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3847 3867 ··· 3870 3892 for (i = 0; i < right_nritems; i++) { 3871 3893 item = btrfs_item_nr(i); 3872 3894 3873 - push_space = push_space - btrfs_token_item_size(right, 3874 - item, &token); 3875 - btrfs_set_token_item_offset(right, item, push_space, &token); 3895 + push_space = push_space - btrfs_token_item_size(&token, item); 3896 + btrfs_set_token_item_offset(&token, item, push_space); 3876 3897 } 3877 3898 3878 3899 btrfs_mark_buffer_dirty(left); ··· 4013 4036 struct btrfs_item *item = btrfs_item_nr(i); 4014 4037 u32 ioff; 4015 4038 4016 - ioff = btrfs_token_item_offset(right, item, &token); 4017 - btrfs_set_token_item_offset(right, item, 4018 - ioff + rt_data_off, &token); 4039 + ioff = btrfs_token_item_offset(&token, item); 4040 + btrfs_set_token_item_offset(&token, item, ioff + rt_data_off); 4019 4041 } 4020 4042 4021 4043 btrfs_set_header_nritems(l, mid); ··· 4517 4541 u32 ioff; 4518 4542 item = btrfs_item_nr(i); 4519 4543 4520 - ioff = btrfs_token_item_offset(leaf, item, &token); 4521 - btrfs_set_token_item_offset(leaf, item, 4522 - ioff + size_diff, &token); 4544 + ioff = btrfs_token_item_offset(&token, item); 4545 + btrfs_set_token_item_offset(&token, item, ioff + size_diff); 4523 4546 } 4524 4547 4525 4548 /* shift the data */ ··· 4615 4640 u32 ioff; 4616 4641 item = btrfs_item_nr(i); 4617 4642 4618 - ioff = btrfs_token_item_offset(leaf, item, &token); 4619 - btrfs_set_token_item_offset(leaf, item, 4620 - ioff - data_size, &token); 4643 + ioff = btrfs_token_item_offset(&token, item); 4644 + btrfs_set_token_item_offset(&token, item, ioff - data_size); 4621 4645 } 4622 4646 4623 4647 /* shift the data */ ··· 4692 4718 u32 ioff; 4693 4719 4694 4720 item = btrfs_item_nr(i); 4695 - ioff = btrfs_token_item_offset(leaf, item, &token); 4696 - btrfs_set_token_item_offset(leaf, item, 4697 - ioff - total_data, &token); 4721 + ioff = btrfs_token_item_offset(&token, item); 4722 + btrfs_set_token_item_offset(&token, item, 4723 + ioff - total_data); 4698 4724 } 4699 4725 /* shift the items */ 4700 4726 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), ··· 4713 4739 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); 4714 4740 btrfs_set_item_key(leaf, &disk_key, slot + i); 4715 4741 item = btrfs_item_nr(slot + i); 4716 - btrfs_set_token_item_offset(leaf, item, 4717 - data_end - data_size[i], &token); 4742 + btrfs_set_token_item_offset(&token, item, data_end - data_size[i]); 4718 4743 data_end -= data_size[i]; 4719 - btrfs_set_token_item_size(leaf, item, data_size[i], &token); 4744 + btrfs_set_token_item_size(&token, item, data_size[i]); 4720 4745 } 4721 4746 4722 4747 btrfs_set_header_nritems(leaf, nritems + nr); ··· 4903 4930 u32 ioff; 4904 4931 4905 4932 item = btrfs_item_nr(i); 4906 - ioff = btrfs_token_item_offset(leaf, item, &token); 4907 - btrfs_set_token_item_offset(leaf, item, 4908 - ioff + dsize, &token); 4933 + ioff = btrfs_token_item_offset(&token, item); 4934 + btrfs_set_token_item_offset(&token, item, ioff + dsize); 4909 4935 } 4910 4936 4911 4937 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), ··· 5075 5103 while (1) { 5076 5104 nritems = btrfs_header_nritems(cur); 5077 5105 level = btrfs_header_level(cur); 5078 - sret = btrfs_bin_search(cur, min_key, level, &slot); 5106 + sret = btrfs_bin_search(cur, min_key, &slot); 5079 5107 if (sret < 0) { 5080 5108 ret = sret; 5081 5109 goto out;
+66 -55
fs/btrfs/ctree.h
··· 28 28 #include <linux/dynamic_debug.h> 29 29 #include <linux/refcount.h> 30 30 #include <linux/crc32c.h> 31 + #include <linux/iomap.h> 31 32 #include "extent-io-tree.h" 32 33 #include "extent_io.h" 33 34 #include "extent_map.h" ··· 583 582 struct btrfs_root *quota_root; 584 583 struct btrfs_root *uuid_root; 585 584 struct btrfs_root *free_space_root; 585 + struct btrfs_root *data_reloc_root; 586 586 587 587 /* the log root tree is a directory of all the other log roots */ 588 588 struct btrfs_root *log_root_tree; ··· 760 758 struct btrfs_workqueue *endio_workers; 761 759 struct btrfs_workqueue *endio_meta_workers; 762 760 struct btrfs_workqueue *endio_raid56_workers; 763 - struct btrfs_workqueue *endio_repair_workers; 764 761 struct btrfs_workqueue *rmw_workers; 765 762 struct btrfs_workqueue *endio_meta_write_workers; 766 763 struct btrfs_workqueue *endio_write_workers; ··· 971 970 * is used to tell us when more checks are required 972 971 */ 973 972 BTRFS_ROOT_IN_TRANS_SETUP, 974 - BTRFS_ROOT_REF_COWS, 973 + 974 + /* 975 + * Set if tree blocks of this root can be shared by other roots. 976 + * Only subvolume trees and their reloc trees have this bit set. 977 + * Conflicts with TRACK_DIRTY bit. 978 + * 979 + * This affects two things: 980 + * 981 + * - How balance works 982 + * For shareable roots, we need to use reloc tree and do path 983 + * replacement for balance, and need various pre/post hooks for 984 + * snapshot creation to handle them. 985 + * 986 + * While for non-shareable trees, we just simply do a tree search 987 + * with COW. 988 + * 989 + * - How dirty roots are tracked 990 + * For shareable roots, btrfs_record_root_in_trans() is needed to 991 + * track them, while non-subvolume roots have TRACK_DIRTY bit, they 992 + * don't need to set this manually. 993 + */ 994 + BTRFS_ROOT_SHAREABLE, 975 995 BTRFS_ROOT_TRACK_DIRTY, 976 996 BTRFS_ROOT_IN_RADIX, 977 997 BTRFS_ROOT_ORPHAN_ITEM_INSERTED, ··· 1078 1056 struct btrfs_key defrag_progress; 1079 1057 struct btrfs_key defrag_max; 1080 1058 1081 - /* the dirty list is only used by non-reference counted roots */ 1059 + /* The dirty list is only used by non-shareable roots */ 1082 1060 struct list_head dirty_list; 1083 1061 1084 1062 struct list_head root_list; ··· 1167 1145 1168 1146 /* Record pairs of swapped blocks for qgroup */ 1169 1147 struct btrfs_qgroup_swapped_blocks swapped_blocks; 1148 + 1149 + /* Used only by log trees, when logging csum items */ 1150 + struct extent_io_tree log_csum_range; 1170 1151 1171 1152 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1172 1153 u64 alloc_bytenr; ··· 1366 1341 BTRFS_INODE_ROOT_ITEM_INIT) 1367 1342 1368 1343 struct btrfs_map_token { 1369 - const struct extent_buffer *eb; 1344 + struct extent_buffer *eb; 1370 1345 char *kaddr; 1371 1346 unsigned long offset; 1372 1347 }; ··· 1378 1353 struct extent_buffer *eb) 1379 1354 { 1380 1355 token->eb = eb; 1381 - token->kaddr = NULL; 1356 + token->kaddr = page_address(eb->pages[0]); 1357 + token->offset = 0; 1382 1358 } 1383 1359 1384 1360 /* some macros to generate set/get functions for the struct fields. This ··· 1403 1377 sizeof(((type *)0)->member))) 1404 1378 1405 1379 #define DECLARE_BTRFS_SETGET_BITS(bits) \ 1406 - u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ 1407 - const void *ptr, unsigned long off, \ 1408 - struct btrfs_map_token *token); \ 1409 - void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \ 1410 - unsigned long off, u##bits val, \ 1411 - struct btrfs_map_token *token); \ 1380 + u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \ 1381 + const void *ptr, unsigned long off); \ 1382 + void btrfs_set_token_##bits(struct btrfs_map_token *token, \ 1383 + const void *ptr, unsigned long off, \ 1384 + u##bits val); \ 1412 1385 u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ 1413 1386 const void *ptr, unsigned long off); \ 1414 - void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ 1387 + void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \ 1415 1388 unsigned long off, u##bits val); 1416 1389 1417 1390 DECLARE_BTRFS_SETGET_BITS(8) ··· 1425 1400 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1426 1401 return btrfs_get_##bits(eb, s, offsetof(type, member)); \ 1427 1402 } \ 1428 - static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ 1403 + static inline void btrfs_set_##name(const struct extent_buffer *eb, type *s, \ 1429 1404 u##bits val) \ 1430 1405 { \ 1431 1406 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1432 1407 btrfs_set_##bits(eb, s, offsetof(type, member), val); \ 1433 1408 } \ 1434 - static inline u##bits btrfs_token_##name(const struct extent_buffer *eb,\ 1435 - const type *s, \ 1436 - struct btrfs_map_token *token) \ 1409 + static inline u##bits btrfs_token_##name(struct btrfs_map_token *token, \ 1410 + const type *s) \ 1437 1411 { \ 1438 1412 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1439 - return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ 1413 + return btrfs_get_token_##bits(token, s, offsetof(type, member));\ 1440 1414 } \ 1441 - static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ 1442 - type *s, u##bits val, \ 1443 - struct btrfs_map_token *token) \ 1415 + static inline void btrfs_set_token_##name(struct btrfs_map_token *token,\ 1416 + type *s, u##bits val) \ 1444 1417 { \ 1445 1418 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1446 - btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ 1419 + btrfs_set_token_##bits(token, s, offsetof(type, member), val); \ 1447 1420 } 1448 1421 1449 1422 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ ··· 1451 1428 u##bits res = le##bits##_to_cpu(p->member); \ 1452 1429 return res; \ 1453 1430 } \ 1454 - static inline void btrfs_set_##name(struct extent_buffer *eb, \ 1431 + static inline void btrfs_set_##name(const struct extent_buffer *eb, \ 1455 1432 u##bits val) \ 1456 1433 { \ 1457 1434 type *p = page_address(eb->pages[0]); \ ··· 1469 1446 } 1470 1447 1471 1448 1472 - static inline u64 btrfs_device_total_bytes(struct extent_buffer *eb, 1449 + static inline u64 btrfs_device_total_bytes(const struct extent_buffer *eb, 1473 1450 struct btrfs_dev_item *s) 1474 1451 { 1475 1452 BUILD_BUG_ON(sizeof(u64) != ··· 1477 1454 return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item, 1478 1455 total_bytes)); 1479 1456 } 1480 - static inline void btrfs_set_device_total_bytes(struct extent_buffer *eb, 1457 + static inline void btrfs_set_device_total_bytes(const struct extent_buffer *eb, 1481 1458 struct btrfs_dev_item *s, 1482 1459 u64 val) 1483 1460 { ··· 1581 1558 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); 1582 1559 } 1583 1560 1584 - static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, 1561 + static inline u64 btrfs_stripe_offset_nr(const struct extent_buffer *eb, 1585 1562 struct btrfs_chunk *c, int nr) 1586 1563 { 1587 1564 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); 1588 1565 } 1589 1566 1590 - static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, 1567 + static inline u64 btrfs_stripe_devid_nr(const struct extent_buffer *eb, 1591 1568 struct btrfs_chunk *c, int nr) 1592 1569 { 1593 1570 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); ··· 1667 1644 BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent, 1668 1645 chunk_offset, 64); 1669 1646 BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); 1670 - 1671 - static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev) 1672 - { 1673 - unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid); 1674 - return (unsigned long)dev + ptr; 1675 - } 1676 - 1677 1647 BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); 1678 1648 BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, 1679 1649 generation, 64); 1680 1650 BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64); 1681 1651 1682 - BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); 1683 - 1684 - 1685 1652 BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); 1686 1653 1687 - static inline void btrfs_tree_block_key(struct extent_buffer *eb, 1654 + static inline void btrfs_tree_block_key(const struct extent_buffer *eb, 1688 1655 struct btrfs_tree_block_info *item, 1689 1656 struct btrfs_disk_key *key) 1690 1657 { 1691 1658 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 1692 1659 } 1693 1660 1694 - static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, 1661 + static inline void btrfs_set_tree_block_key(const struct extent_buffer *eb, 1695 1662 struct btrfs_tree_block_info *item, 1696 1663 struct btrfs_disk_key *key) 1697 1664 { ··· 1719 1706 return 0; 1720 1707 } 1721 1708 1722 - BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64); 1723 - BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0, 1724 - generation, 64); 1725 - BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64); 1726 - BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32); 1727 - 1728 1709 /* struct btrfs_node */ 1729 1710 BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64); 1730 1711 BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64); ··· 1727 1720 BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr, 1728 1721 generation, 64); 1729 1722 1730 - static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) 1723 + static inline u64 btrfs_node_blockptr(const struct extent_buffer *eb, int nr) 1731 1724 { 1732 1725 unsigned long ptr; 1733 1726 ptr = offsetof(struct btrfs_node, ptrs) + ··· 1735 1728 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); 1736 1729 } 1737 1730 1738 - static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, 1731 + static inline void btrfs_set_node_blockptr(const struct extent_buffer *eb, 1739 1732 int nr, u64 val) 1740 1733 { 1741 1734 unsigned long ptr; ··· 1744 1737 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); 1745 1738 } 1746 1739 1747 - static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) 1740 + static inline u64 btrfs_node_ptr_generation(const struct extent_buffer *eb, int nr) 1748 1741 { 1749 1742 unsigned long ptr; 1750 1743 ptr = offsetof(struct btrfs_node, ptrs) + ··· 1752 1745 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); 1753 1746 } 1754 1747 1755 - static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, 1748 + static inline void btrfs_set_node_ptr_generation(const struct extent_buffer *eb, 1756 1749 int nr, u64 val) 1757 1750 { 1758 1751 unsigned long ptr; ··· 1770 1763 void btrfs_node_key(const struct extent_buffer *eb, 1771 1764 struct btrfs_disk_key *disk_key, int nr); 1772 1765 1773 - static inline void btrfs_set_node_key(struct extent_buffer *eb, 1766 + static inline void btrfs_set_node_key(const struct extent_buffer *eb, 1774 1767 struct btrfs_disk_key *disk_key, int nr) 1775 1768 { 1776 1769 unsigned long ptr; ··· 2505 2498 struct btrfs_ref *generic_ref); 2506 2499 2507 2500 int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); 2508 - void btrfs_get_block_group_trimming(struct btrfs_block_group *cache); 2509 - void btrfs_put_block_group_trimming(struct btrfs_block_group *cache); 2510 2501 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 2511 2502 2512 2503 enum btrfs_reserve_flush_enum { ··· 2517 2512 BTRFS_RESERVE_FLUSH_LIMIT, 2518 2513 BTRFS_RESERVE_FLUSH_EVICT, 2519 2514 BTRFS_RESERVE_FLUSH_ALL, 2515 + BTRFS_RESERVE_FLUSH_ALL_STEAL, 2520 2516 }; 2521 2517 2522 2518 enum btrfs_flush_state { ··· 2557 2551 2558 2552 /* ctree.c */ 2559 2553 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, 2560 - int level, int *slot); 2554 + int *slot); 2561 2555 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); 2562 2556 int btrfs_previous_item(struct btrfs_root *root, 2563 2557 struct btrfs_path *path, u64 min_objectid, ··· 2902 2896 int btrfs_drop_inode(struct inode *inode); 2903 2897 int __init btrfs_init_cachep(void); 2904 2898 void __cold btrfs_destroy_cachep(void); 2905 - struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, 2899 + struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 2906 2900 struct btrfs_root *root, struct btrfs_path *path); 2907 - struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 2908 - struct btrfs_root *root); 2901 + struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root); 2909 2902 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 2910 2903 struct page *page, size_t pg_offset, 2911 2904 u64 start, u64 end); ··· 2934 2929 void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, 2935 2930 u64 end, int uptodate); 2936 2931 extern const struct dentry_operations btrfs_dentry_operations; 2932 + ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter); 2933 + extern const struct iomap_ops btrfs_dio_iomap_ops; 2934 + extern const struct iomap_dio_ops btrfs_dops; 2937 2935 2938 2936 /* ioctl.c */ 2939 2937 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); ··· 3389 3381 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 3390 3382 struct btrfs_pending_snapshot *pending); 3391 3383 int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info); 3384 + struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, 3385 + u64 bytenr); 3386 + int btrfs_should_ignore_reloc_root(struct btrfs_root *root); 3392 3387 3393 3388 /* scrub.c */ 3394 3389 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
+51 -42
fs/btrfs/disk-io.c
··· 358 358 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 359 359 360 360 shash->tfm = fs_info->csum_shash; 361 - crypto_shash_init(shash); 362 361 363 362 /* 364 363 * The super_block structure does not span the whole 365 364 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is 366 365 * filled with zeros and is included in the checksum. 367 366 */ 368 - crypto_shash_update(shash, raw_disk_sb + BTRFS_CSUM_SIZE, 369 - BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 370 - crypto_shash_final(shash, result); 367 + crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE, 368 + BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result); 371 369 372 370 if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb))) 373 371 return 1; ··· 707 709 else 708 710 wq = fs_info->endio_write_workers; 709 711 } else { 710 - if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR)) 711 - wq = fs_info->endio_repair_workers; 712 - else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) 712 + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) 713 713 wq = fs_info->endio_raid56_workers; 714 714 else if (end_io_wq->metadata) 715 715 wq = fs_info->endio_meta_workers; ··· 1131 1135 root->log_transid = 0; 1132 1136 root->log_transid_committed = -1; 1133 1137 root->last_log_commit = 0; 1134 - if (!dummy) 1138 + if (!dummy) { 1135 1139 extent_io_tree_init(fs_info, &root->dirty_log_pages, 1136 1140 IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL); 1141 + extent_io_tree_init(fs_info, &root->log_csum_range, 1142 + IO_TREE_LOG_CSUM_RANGE, NULL); 1143 + } 1137 1144 1138 1145 memset(&root->root_key, 0, sizeof(root->root_key)); 1139 1146 memset(&root->root_item, 0, sizeof(root->root_item)); ··· 1274 1275 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; 1275 1276 1276 1277 /* 1277 - * DON'T set REF_COWS for log trees 1278 + * DON'T set SHAREABLE bit for log trees. 1278 1279 * 1279 - * log trees do not get reference counted because they go away 1280 - * before a real commit is actually done. They do store pointers 1281 - * to file data extents, and those reference counts still get 1282 - * updated (along with back refs to the log tree). 1280 + * Log trees are not exposed to user space thus can't be snapshotted, 1281 + * and they go away before a real commit is actually done. 1282 + * 1283 + * They do store pointers to file data extents, and those reference 1284 + * counts still get updated (along with back refs to the log tree). 1283 1285 */ 1284 1286 1285 1287 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, ··· 1418 1418 if (ret) 1419 1419 goto fail; 1420 1420 1421 - if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 1422 - set_bit(BTRFS_ROOT_REF_COWS, &root->state); 1421 + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID && 1422 + root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { 1423 + set_bit(BTRFS_ROOT_SHAREABLE, &root->state); 1423 1424 btrfs_check_and_init_root_item(&root->root_item); 1424 1425 } 1425 1426 ··· 1525 1524 btrfs_put_root(fs_info->uuid_root); 1526 1525 btrfs_put_root(fs_info->free_space_root); 1527 1526 btrfs_put_root(fs_info->fs_root); 1527 + btrfs_put_root(fs_info->data_reloc_root); 1528 1528 btrfs_check_leaked_roots(fs_info); 1529 1529 btrfs_extent_buffer_leak_debug_check(fs_info); 1530 1530 kfree(fs_info->super_copy); ··· 1535 1533 1536 1534 1537 1535 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, 1538 - struct btrfs_key *location, 1539 - bool check_ref) 1536 + u64 objectid, bool check_ref) 1540 1537 { 1541 1538 struct btrfs_root *root; 1542 1539 struct btrfs_path *path; 1543 1540 struct btrfs_key key; 1544 1541 int ret; 1545 1542 1546 - if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) 1543 + if (objectid == BTRFS_ROOT_TREE_OBJECTID) 1547 1544 return btrfs_grab_root(fs_info->tree_root); 1548 - if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) 1545 + if (objectid == BTRFS_EXTENT_TREE_OBJECTID) 1549 1546 return btrfs_grab_root(fs_info->extent_root); 1550 - if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) 1547 + if (objectid == BTRFS_CHUNK_TREE_OBJECTID) 1551 1548 return btrfs_grab_root(fs_info->chunk_root); 1552 - if (location->objectid == BTRFS_DEV_TREE_OBJECTID) 1549 + if (objectid == BTRFS_DEV_TREE_OBJECTID) 1553 1550 return btrfs_grab_root(fs_info->dev_root); 1554 - if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) 1551 + if (objectid == BTRFS_CSUM_TREE_OBJECTID) 1555 1552 return btrfs_grab_root(fs_info->csum_root); 1556 - if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) 1553 + if (objectid == BTRFS_QUOTA_TREE_OBJECTID) 1557 1554 return btrfs_grab_root(fs_info->quota_root) ? 1558 1555 fs_info->quota_root : ERR_PTR(-ENOENT); 1559 - if (location->objectid == BTRFS_UUID_TREE_OBJECTID) 1556 + if (objectid == BTRFS_UUID_TREE_OBJECTID) 1560 1557 return btrfs_grab_root(fs_info->uuid_root) ? 1561 1558 fs_info->uuid_root : ERR_PTR(-ENOENT); 1562 - if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 1559 + if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 1563 1560 return btrfs_grab_root(fs_info->free_space_root) ? 1564 1561 fs_info->free_space_root : ERR_PTR(-ENOENT); 1565 1562 again: 1566 - root = btrfs_lookup_fs_root(fs_info, location->objectid); 1563 + root = btrfs_lookup_fs_root(fs_info, objectid); 1567 1564 if (root) { 1568 1565 if (check_ref && btrfs_root_refs(&root->root_item) == 0) { 1569 1566 btrfs_put_root(root); ··· 1571 1570 return root; 1572 1571 } 1573 1572 1574 - root = btrfs_read_tree_root(fs_info->tree_root, location); 1573 + key.objectid = objectid; 1574 + key.type = BTRFS_ROOT_ITEM_KEY; 1575 + key.offset = (u64)-1; 1576 + root = btrfs_read_tree_root(fs_info->tree_root, &key); 1575 1577 if (IS_ERR(root)) 1576 1578 return root; 1577 1579 ··· 1594 1590 } 1595 1591 key.objectid = BTRFS_ORPHAN_OBJECTID; 1596 1592 key.type = BTRFS_ORPHAN_ITEM_KEY; 1597 - key.offset = location->objectid; 1593 + key.offset = objectid; 1598 1594 1599 1595 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 1600 1596 btrfs_free_path(path); ··· 1944 1940 btrfs_destroy_workqueue(fs_info->workers); 1945 1941 btrfs_destroy_workqueue(fs_info->endio_workers); 1946 1942 btrfs_destroy_workqueue(fs_info->endio_raid56_workers); 1947 - btrfs_destroy_workqueue(fs_info->endio_repair_workers); 1948 1943 btrfs_destroy_workqueue(fs_info->rmw_workers); 1949 1944 btrfs_destroy_workqueue(fs_info->endio_write_workers); 1950 1945 btrfs_destroy_workqueue(fs_info->endio_freespace_worker); ··· 1984 1981 free_root_extent_buffers(info->quota_root); 1985 1982 free_root_extent_buffers(info->uuid_root); 1986 1983 free_root_extent_buffers(info->fs_root); 1984 + free_root_extent_buffers(info->data_reloc_root); 1987 1985 if (free_chunk_root) 1988 1986 free_root_extent_buffers(info->chunk_root); 1989 1987 free_root_extent_buffers(info->free_space_root); ··· 1997 1993 1998 1994 if (refcount_dec_and_test(&root->refs)) { 1999 1995 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); 1996 + WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)); 2000 1997 if (root->anon_dev) 2001 1998 free_anon_bdev(root->anon_dev); 2002 1999 btrfs_drew_lock_destroy(&root->snapshot_lock); ··· 2148 2143 fs_info->endio_raid56_workers = 2149 2144 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, 2150 2145 max_active, 4); 2151 - fs_info->endio_repair_workers = 2152 - btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0); 2153 2146 fs_info->rmw_workers = 2154 2147 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); 2155 2148 fs_info->endio_write_workers = ··· 2171 2168 fs_info->flush_workers && 2172 2169 fs_info->endio_workers && fs_info->endio_meta_workers && 2173 2170 fs_info->endio_meta_write_workers && 2174 - fs_info->endio_repair_workers && 2175 2171 fs_info->endio_write_workers && fs_info->endio_raid56_workers && 2176 2172 fs_info->endio_freespace_worker && fs_info->rmw_workers && 2177 2173 fs_info->caching_workers && fs_info->readahead_workers && ··· 2291 2289 } 2292 2290 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2293 2291 fs_info->csum_root = root; 2292 + 2293 + /* 2294 + * This tree can share blocks with some other fs tree during relocation 2295 + * and we need a proper setup by btrfs_get_fs_root 2296 + */ 2297 + root = btrfs_get_fs_root(tree_root->fs_info, 2298 + BTRFS_DATA_RELOC_TREE_OBJECTID, true); 2299 + if (IS_ERR(root)) { 2300 + ret = PTR_ERR(root); 2301 + goto out; 2302 + } 2303 + set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2304 + fs_info->data_reloc_root = root; 2294 2305 2295 2306 location.objectid = BTRFS_QUOTA_TREE_OBJECTID; 2296 2307 root = btrfs_read_tree_root(tree_root, &location); ··· 2842 2827 u64 generation; 2843 2828 u64 features; 2844 2829 u16 csum_type; 2845 - struct btrfs_key location; 2846 2830 struct btrfs_super_block *disk_super; 2847 2831 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2848 2832 struct btrfs_root *tree_root; ··· 3255 3241 } 3256 3242 } 3257 3243 3258 - location.objectid = BTRFS_FS_TREE_OBJECTID; 3259 - location.type = BTRFS_ROOT_ITEM_KEY; 3260 - location.offset = 0; 3261 - 3262 - fs_info->fs_root = btrfs_get_fs_root(fs_info, &location, true); 3244 + fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true); 3263 3245 if (IS_ERR(fs_info->fs_root)) { 3264 3246 err = PTR_ERR(fs_info->fs_root); 3265 3247 btrfs_warn(fs_info, "failed to read fs tree: %d", err); ··· 3518 3508 3519 3509 btrfs_set_super_bytenr(sb, bytenr); 3520 3510 3521 - crypto_shash_init(shash); 3522 - crypto_shash_update(shash, (const char *)sb + BTRFS_CSUM_SIZE, 3523 - BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 3524 - crypto_shash_final(shash, sb->csum); 3511 + crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE, 3512 + BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, 3513 + sb->csum); 3525 3514 3526 3515 page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT, 3527 3516 GFP_NOFS);
+1 -3
fs/btrfs/disk-io.h
··· 25 25 BTRFS_WQ_ENDIO_METADATA, 26 26 BTRFS_WQ_ENDIO_FREE_SPACE, 27 27 BTRFS_WQ_ENDIO_RAID56, 28 - BTRFS_WQ_ENDIO_DIO_REPAIR, 29 28 }; 30 29 31 30 static inline u64 btrfs_sb_offset(int mirror) ··· 66 67 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info); 67 68 68 69 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, 69 - struct btrfs_key *key, 70 - bool check_ref); 70 + u64 objectid, bool check_ref); 71 71 72 72 void btrfs_free_fs_info(struct btrfs_fs_info *fs_info); 73 73 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
+3 -14
fs/btrfs/export.c
··· 64 64 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 65 65 struct btrfs_root *root; 66 66 struct inode *inode; 67 - struct btrfs_key key; 68 67 69 68 if (objectid < BTRFS_FIRST_FREE_OBJECTID) 70 69 return ERR_PTR(-ESTALE); 71 70 72 - key.objectid = root_objectid; 73 - key.type = BTRFS_ROOT_ITEM_KEY; 74 - key.offset = (u64)-1; 75 - 76 - root = btrfs_get_fs_root(fs_info, &key, true); 71 + root = btrfs_get_fs_root(fs_info, root_objectid, true); 77 72 if (IS_ERR(root)) 78 73 return ERR_CAST(root); 79 74 80 - key.objectid = objectid; 81 - key.type = BTRFS_INODE_ITEM_KEY; 82 - key.offset = 0; 83 - 84 - inode = btrfs_iget(sb, &key, root); 75 + inode = btrfs_iget(sb, objectid, root); 85 76 btrfs_put_root(root); 86 77 if (IS_ERR(inode)) 87 78 return ERR_CAST(inode); ··· 191 200 found_key.offset, 0, 0); 192 201 } 193 202 194 - key.type = BTRFS_INODE_ITEM_KEY; 195 - key.offset = 0; 196 - return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root)); 203 + return d_obtain_alias(btrfs_iget(fs_info->sb, key.objectid, root)); 197 204 fail: 198 205 btrfs_free_path(path); 199 206 return ERR_PTR(ret);
+1
fs/btrfs/extent-io-tree.h
··· 44 44 IO_TREE_TRANS_DIRTY_PAGES, 45 45 IO_TREE_ROOT_DIRTY_LOG_PAGES, 46 46 IO_TREE_INODE_FILE_EXTENT, 47 + IO_TREE_LOG_CSUM_RANGE, 47 48 IO_TREE_SELFTEST, 48 49 }; 49 50
+3 -20
fs/btrfs/extent-tree.c
··· 2114 2114 } 2115 2115 #endif 2116 2116 2117 - static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads) 2118 - { 2119 - u64 num_bytes; 2120 - 2121 - num_bytes = heads * (sizeof(struct btrfs_extent_item) + 2122 - sizeof(struct btrfs_extent_inline_ref)); 2123 - if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 2124 - num_bytes += heads * sizeof(struct btrfs_tree_block_info); 2125 - 2126 - /* 2127 - * We don't ever fill up leaves all the way so multiply by 2 just to be 2128 - * closer to what we're really going to want to use. 2129 - */ 2130 - return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info)); 2131 - } 2132 - 2133 2117 /* 2134 2118 * Takes the number of bytes to be csumm'ed and figures out how many leaves it 2135 2119 * would require to store the csums for that many bytes. ··· 2426 2442 nritems = btrfs_header_nritems(buf); 2427 2443 level = btrfs_header_level(buf); 2428 2444 2429 - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0) 2445 + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) 2430 2446 return 0; 2431 2447 2432 2448 if (full_backref) ··· 2916 2932 &trimmed); 2917 2933 2918 2934 list_del_init(&block_group->bg_list); 2919 - btrfs_put_block_group_trimming(block_group); 2935 + btrfs_unfreeze_block_group(block_group); 2920 2936 btrfs_put_block_group(block_group); 2921 2937 2922 2938 if (ret) { ··· 3353 3369 struct btrfs_block_group *block_group, 3354 3370 struct btrfs_free_cluster *cluster, 3355 3371 int delalloc) 3372 + __acquires(&cluster->refill_lock) 3356 3373 { 3357 3374 struct btrfs_block_group *used_bg = NULL; 3358 3375 ··· 5486 5501 */ 5487 5502 if (!for_reloc && !root_dropped) 5488 5503 btrfs_add_dead_root(root); 5489 - if (err && err != -EAGAIN) 5490 - btrfs_handle_fs_error(fs_info, err, NULL); 5491 5504 return err; 5492 5505 } 5493 5506
+132 -156
fs/btrfs/extent_io.c
··· 2333 2333 return 0; 2334 2334 } 2335 2335 2336 - int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num) 2336 + int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num) 2337 2337 { 2338 2338 struct btrfs_fs_info *fs_info = eb->fs_info; 2339 2339 u64 start = eb->start; ··· 2537 2537 return 0; 2538 2538 } 2539 2539 2540 - bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages, 2541 - struct io_failure_record *failrec, int failed_mirror) 2540 + static bool btrfs_check_repairable(struct inode *inode, bool needs_validation, 2541 + struct io_failure_record *failrec, 2542 + int failed_mirror) 2542 2543 { 2543 2544 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2544 2545 int num_copies; ··· 2562 2561 * a) deliver good data to the caller 2563 2562 * b) correct the bad sectors on disk 2564 2563 */ 2565 - if (failed_bio_pages > 1) { 2564 + if (needs_validation) { 2566 2565 /* 2567 2566 * to fulfill b), we need to know the exact failing sectors, as 2568 2567 * we don't want to rewrite any more than the failed ones. thus, ··· 2601 2600 return true; 2602 2601 } 2603 2602 2604 - 2605 - struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, 2606 - struct io_failure_record *failrec, 2607 - struct page *page, int pg_offset, int icsum, 2608 - bio_end_io_t *endio_func, void *data) 2603 + static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio) 2609 2604 { 2610 - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2611 - struct bio *bio; 2612 - struct btrfs_io_bio *btrfs_failed_bio; 2613 - struct btrfs_io_bio *btrfs_bio; 2605 + u64 len = 0; 2606 + const u32 blocksize = inode->i_sb->s_blocksize; 2614 2607 2615 - bio = btrfs_io_bio_alloc(1); 2616 - bio->bi_end_io = endio_func; 2617 - bio->bi_iter.bi_sector = failrec->logical >> 9; 2618 - bio->bi_iter.bi_size = 0; 2619 - bio->bi_private = data; 2608 + /* 2609 + * If bi_status is BLK_STS_OK, then this was a checksum error, not an 2610 + * I/O error. In this case, we already know exactly which sector was 2611 + * bad, so we don't need to validate. 2612 + */ 2613 + if (bio->bi_status == BLK_STS_OK) 2614 + return false; 2620 2615 2621 - btrfs_failed_bio = btrfs_io_bio(failed_bio); 2622 - if (btrfs_failed_bio->csum) { 2623 - u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 2616 + /* 2617 + * We need to validate each sector individually if the failed I/O was 2618 + * for multiple sectors. 2619 + * 2620 + * There are a few possible bios that can end up here: 2621 + * 1. A buffered read bio, which is not cloned. 2622 + * 2. A direct I/O read bio, which is cloned. 2623 + * 3. A (buffered or direct) repair bio, which is not cloned. 2624 + * 2625 + * For cloned bios (case 2), we can get the size from 2626 + * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get 2627 + * it from the bvecs. 2628 + */ 2629 + if (bio_flagged(bio, BIO_CLONED)) { 2630 + if (btrfs_io_bio(bio)->iter.bi_size > blocksize) 2631 + return true; 2632 + } else { 2633 + struct bio_vec *bvec; 2634 + int i; 2624 2635 2625 - btrfs_bio = btrfs_io_bio(bio); 2626 - btrfs_bio->csum = btrfs_bio->csum_inline; 2627 - icsum *= csum_size; 2628 - memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum, 2629 - csum_size); 2636 + bio_for_each_bvec_all(bvec, bio, i) { 2637 + len += bvec->bv_len; 2638 + if (len > blocksize) 2639 + return true; 2640 + } 2630 2641 } 2631 - 2632 - bio_add_page(bio, page, failrec->len, pg_offset); 2633 - 2634 - return bio; 2642 + return false; 2635 2643 } 2636 2644 2637 - /* 2638 - * This is a generic handler for readpage errors. If other copies exist, read 2639 - * those and write back good data to the failed position. Does not investigate 2640 - * in remapping the failed extent elsewhere, hoping the device will be smart 2641 - * enough to do this as needed 2642 - */ 2643 - static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, 2644 - struct page *page, u64 start, u64 end, 2645 - int failed_mirror) 2645 + blk_status_t btrfs_submit_read_repair(struct inode *inode, 2646 + struct bio *failed_bio, u64 phy_offset, 2647 + struct page *page, unsigned int pgoff, 2648 + u64 start, u64 end, int failed_mirror, 2649 + submit_bio_hook_t *submit_bio_hook) 2646 2650 { 2647 2651 struct io_failure_record *failrec; 2648 - struct inode *inode = page->mapping->host; 2652 + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2649 2653 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 2650 2654 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 2651 - struct bio *bio; 2652 - int read_mode = 0; 2655 + struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio); 2656 + const int icsum = phy_offset >> inode->i_sb->s_blocksize_bits; 2657 + bool need_validation; 2658 + struct bio *repair_bio; 2659 + struct btrfs_io_bio *repair_io_bio; 2653 2660 blk_status_t status; 2654 2661 int ret; 2655 - unsigned failed_bio_pages = failed_bio->bi_iter.bi_size >> PAGE_SHIFT; 2662 + 2663 + btrfs_debug(fs_info, 2664 + "repair read error: read error at %llu", start); 2656 2665 2657 2666 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); 2658 2667 2659 2668 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); 2660 2669 if (ret) 2661 - return ret; 2670 + return errno_to_blk_status(ret); 2662 2671 2663 - if (!btrfs_check_repairable(inode, failed_bio_pages, failrec, 2672 + need_validation = btrfs_io_needs_validation(inode, failed_bio); 2673 + 2674 + if (!btrfs_check_repairable(inode, need_validation, failrec, 2664 2675 failed_mirror)) { 2665 2676 free_io_failure(failure_tree, tree, failrec); 2666 - return -EIO; 2677 + return BLK_STS_IOERR; 2667 2678 } 2668 2679 2669 - if (failed_bio_pages > 1) 2670 - read_mode |= REQ_FAILFAST_DEV; 2680 + repair_bio = btrfs_io_bio_alloc(1); 2681 + repair_io_bio = btrfs_io_bio(repair_bio); 2682 + repair_bio->bi_opf = REQ_OP_READ; 2683 + if (need_validation) 2684 + repair_bio->bi_opf |= REQ_FAILFAST_DEV; 2685 + repair_bio->bi_end_io = failed_bio->bi_end_io; 2686 + repair_bio->bi_iter.bi_sector = failrec->logical >> 9; 2687 + repair_bio->bi_private = failed_bio->bi_private; 2671 2688 2672 - phy_offset >>= inode->i_sb->s_blocksize_bits; 2673 - bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, 2674 - start - page_offset(page), 2675 - (int)phy_offset, failed_bio->bi_end_io, 2676 - NULL); 2677 - bio->bi_opf = REQ_OP_READ | read_mode; 2689 + if (failed_io_bio->csum) { 2690 + const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 2691 + 2692 + repair_io_bio->csum = repair_io_bio->csum_inline; 2693 + memcpy(repair_io_bio->csum, 2694 + failed_io_bio->csum + csum_size * icsum, csum_size); 2695 + } 2696 + 2697 + bio_add_page(repair_bio, page, failrec->len, pgoff); 2698 + repair_io_bio->logical = failrec->start; 2699 + repair_io_bio->iter = repair_bio->bi_iter; 2678 2700 2679 2701 btrfs_debug(btrfs_sb(inode->i_sb), 2680 - "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d", 2681 - read_mode, failrec->this_mirror, failrec->in_validation); 2702 + "repair read error: submitting new read to mirror %d, in_validation=%d", 2703 + failrec->this_mirror, failrec->in_validation); 2682 2704 2683 - status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror, 2684 - failrec->bio_flags); 2705 + status = submit_bio_hook(inode, repair_bio, failrec->this_mirror, 2706 + failrec->bio_flags); 2685 2707 if (status) { 2686 2708 free_io_failure(failure_tree, tree, failrec); 2687 - bio_put(bio); 2688 - ret = blk_status_to_errno(status); 2709 + bio_put(repair_bio); 2689 2710 } 2690 - 2691 - return ret; 2711 + return status; 2692 2712 } 2693 2713 2694 2714 /* lots and lots of room for performance fixes in the end_bio funcs */ ··· 2881 2859 * If it can't handle the error it will return -EIO and 2882 2860 * we remain responsible for that page. 2883 2861 */ 2884 - ret = bio_readpage_error(bio, offset, page, start, end, 2885 - mirror); 2886 - if (ret == 0) { 2862 + if (!btrfs_submit_read_repair(inode, bio, offset, page, 2863 + start - page_offset(page), 2864 + start, end, mirror, 2865 + tree->ops->submit_bio_hook)) { 2887 2866 uptodate = !bio->bi_status; 2888 2867 offset += len; 2889 2868 continue; ··· 4885 4862 kmem_cache_free(extent_buffer_cache, eb); 4886 4863 } 4887 4864 4888 - int extent_buffer_under_io(struct extent_buffer *eb) 4865 + int extent_buffer_under_io(const struct extent_buffer *eb) 4889 4866 { 4890 4867 return (atomic_read(&eb->io_pages) || 4891 4868 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || ··· 4990 4967 return eb; 4991 4968 } 4992 4969 4993 - struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) 4970 + struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src) 4994 4971 { 4995 4972 int i; 4996 4973 struct page *p; ··· 5396 5373 release_extent_buffer(eb); 5397 5374 } 5398 5375 5399 - void clear_extent_buffer_dirty(struct extent_buffer *eb) 5376 + void clear_extent_buffer_dirty(const struct extent_buffer *eb) 5400 5377 { 5401 5378 int i; 5402 5379 int num_pages; ··· 5594 5571 struct page *page; 5595 5572 char *kaddr; 5596 5573 char *dst = (char *)dstv; 5597 - size_t start_offset = offset_in_page(eb->start); 5598 - unsigned long i = (start_offset + start) >> PAGE_SHIFT; 5574 + unsigned long i = start >> PAGE_SHIFT; 5599 5575 5600 5576 if (start + len > eb->len) { 5601 5577 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n", ··· 5603 5581 return; 5604 5582 } 5605 5583 5606 - offset = offset_in_page(start_offset + start); 5584 + offset = offset_in_page(start); 5607 5585 5608 5586 while (len > 0) { 5609 5587 page = eb->pages[i]; ··· 5628 5606 struct page *page; 5629 5607 char *kaddr; 5630 5608 char __user *dst = (char __user *)dstv; 5631 - size_t start_offset = offset_in_page(eb->start); 5632 - unsigned long i = (start_offset + start) >> PAGE_SHIFT; 5609 + unsigned long i = start >> PAGE_SHIFT; 5633 5610 int ret = 0; 5634 5611 5635 5612 WARN_ON(start > eb->len); 5636 5613 WARN_ON(start + len > eb->start + eb->len); 5637 5614 5638 - offset = offset_in_page(start_offset + start); 5615 + offset = offset_in_page(start); 5639 5616 5640 5617 while (len > 0) { 5641 5618 page = eb->pages[i]; ··· 5655 5634 return ret; 5656 5635 } 5657 5636 5658 - /* 5659 - * return 0 if the item is found within a page. 5660 - * return 1 if the item spans two pages. 5661 - * return -EINVAL otherwise. 5662 - */ 5663 - int map_private_extent_buffer(const struct extent_buffer *eb, 5664 - unsigned long start, unsigned long min_len, 5665 - char **map, unsigned long *map_start, 5666 - unsigned long *map_len) 5667 - { 5668 - size_t offset; 5669 - char *kaddr; 5670 - struct page *p; 5671 - size_t start_offset = offset_in_page(eb->start); 5672 - unsigned long i = (start_offset + start) >> PAGE_SHIFT; 5673 - unsigned long end_i = (start_offset + start + min_len - 1) >> 5674 - PAGE_SHIFT; 5675 - 5676 - if (start + min_len > eb->len) { 5677 - WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n", 5678 - eb->start, eb->len, start, min_len); 5679 - return -EINVAL; 5680 - } 5681 - 5682 - if (i != end_i) 5683 - return 1; 5684 - 5685 - if (i == 0) { 5686 - offset = start_offset; 5687 - *map_start = 0; 5688 - } else { 5689 - offset = 0; 5690 - *map_start = ((u64)i << PAGE_SHIFT) - start_offset; 5691 - } 5692 - 5693 - p = eb->pages[i]; 5694 - kaddr = page_address(p); 5695 - *map = kaddr + offset; 5696 - *map_len = PAGE_SIZE - offset; 5697 - return 0; 5698 - } 5699 - 5700 5637 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, 5701 5638 unsigned long start, unsigned long len) 5702 5639 { ··· 5663 5684 struct page *page; 5664 5685 char *kaddr; 5665 5686 char *ptr = (char *)ptrv; 5666 - size_t start_offset = offset_in_page(eb->start); 5667 - unsigned long i = (start_offset + start) >> PAGE_SHIFT; 5687 + unsigned long i = start >> PAGE_SHIFT; 5668 5688 int ret = 0; 5669 5689 5670 5690 WARN_ON(start > eb->len); 5671 5691 WARN_ON(start + len > eb->start + eb->len); 5672 5692 5673 - offset = offset_in_page(start_offset + start); 5693 + offset = offset_in_page(start); 5674 5694 5675 5695 while (len > 0) { 5676 5696 page = eb->pages[i]; ··· 5689 5711 return ret; 5690 5712 } 5691 5713 5692 - void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, 5714 + void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb, 5693 5715 const void *srcv) 5694 5716 { 5695 5717 char *kaddr; ··· 5700 5722 BTRFS_FSID_SIZE); 5701 5723 } 5702 5724 5703 - void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv) 5725 + void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv) 5704 5726 { 5705 5727 char *kaddr; 5706 5728 ··· 5710 5732 BTRFS_FSID_SIZE); 5711 5733 } 5712 5734 5713 - void write_extent_buffer(struct extent_buffer *eb, const void *srcv, 5735 + void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, 5714 5736 unsigned long start, unsigned long len) 5715 5737 { 5716 5738 size_t cur; ··· 5718 5740 struct page *page; 5719 5741 char *kaddr; 5720 5742 char *src = (char *)srcv; 5721 - size_t start_offset = offset_in_page(eb->start); 5722 - unsigned long i = (start_offset + start) >> PAGE_SHIFT; 5743 + unsigned long i = start >> PAGE_SHIFT; 5723 5744 5724 5745 WARN_ON(start > eb->len); 5725 5746 WARN_ON(start + len > eb->start + eb->len); 5726 5747 5727 - offset = offset_in_page(start_offset + start); 5748 + offset = offset_in_page(start); 5728 5749 5729 5750 while (len > 0) { 5730 5751 page = eb->pages[i]; ··· 5740 5763 } 5741 5764 } 5742 5765 5743 - void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, 5766 + void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, 5744 5767 unsigned long len) 5745 5768 { 5746 5769 size_t cur; 5747 5770 size_t offset; 5748 5771 struct page *page; 5749 5772 char *kaddr; 5750 - size_t start_offset = offset_in_page(eb->start); 5751 - unsigned long i = (start_offset + start) >> PAGE_SHIFT; 5773 + unsigned long i = start >> PAGE_SHIFT; 5752 5774 5753 5775 WARN_ON(start > eb->len); 5754 5776 WARN_ON(start + len > eb->start + eb->len); 5755 5777 5756 - offset = offset_in_page(start_offset + start); 5778 + offset = offset_in_page(start); 5757 5779 5758 5780 while (len > 0) { 5759 5781 page = eb->pages[i]; ··· 5768 5792 } 5769 5793 } 5770 5794 5771 - void copy_extent_buffer_full(struct extent_buffer *dst, 5772 - struct extent_buffer *src) 5795 + void copy_extent_buffer_full(const struct extent_buffer *dst, 5796 + const struct extent_buffer *src) 5773 5797 { 5774 5798 int i; 5775 5799 int num_pages; ··· 5782 5806 page_address(src->pages[i])); 5783 5807 } 5784 5808 5785 - void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, 5809 + void copy_extent_buffer(const struct extent_buffer *dst, 5810 + const struct extent_buffer *src, 5786 5811 unsigned long dst_offset, unsigned long src_offset, 5787 5812 unsigned long len) 5788 5813 { ··· 5792 5815 size_t offset; 5793 5816 struct page *page; 5794 5817 char *kaddr; 5795 - size_t start_offset = offset_in_page(dst->start); 5796 - unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT; 5818 + unsigned long i = dst_offset >> PAGE_SHIFT; 5797 5819 5798 5820 WARN_ON(src->len != dst_len); 5799 5821 5800 - offset = offset_in_page(start_offset + dst_offset); 5822 + offset = offset_in_page(dst_offset); 5801 5823 5802 5824 while (len > 0) { 5803 5825 page = dst->pages[i]; ··· 5827 5851 * This helper hides the ugliness of finding the byte in an extent buffer which 5828 5852 * contains a given bit. 5829 5853 */ 5830 - static inline void eb_bitmap_offset(struct extent_buffer *eb, 5854 + static inline void eb_bitmap_offset(const struct extent_buffer *eb, 5831 5855 unsigned long start, unsigned long nr, 5832 5856 unsigned long *page_index, 5833 5857 size_t *page_offset) 5834 5858 { 5835 - size_t start_offset = offset_in_page(eb->start); 5836 5859 size_t byte_offset = BIT_BYTE(nr); 5837 5860 size_t offset; 5838 5861 ··· 5840 5865 * the bitmap item in the extent buffer + the offset of the byte in the 5841 5866 * bitmap item. 5842 5867 */ 5843 - offset = start_offset + start + byte_offset; 5868 + offset = start + byte_offset; 5844 5869 5845 5870 *page_index = offset >> PAGE_SHIFT; 5846 5871 *page_offset = offset_in_page(offset); ··· 5852 5877 * @start: offset of the bitmap item in the extent buffer 5853 5878 * @nr: bit number to test 5854 5879 */ 5855 - int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, 5880 + int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, 5856 5881 unsigned long nr) 5857 5882 { 5858 5883 u8 *kaddr; ··· 5874 5899 * @pos: bit number of the first bit 5875 5900 * @len: number of bits to set 5876 5901 */ 5877 - void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, 5902 + void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, 5878 5903 unsigned long pos, unsigned long len) 5879 5904 { 5880 5905 u8 *kaddr; ··· 5916 5941 * @pos: bit number of the first bit 5917 5942 * @len: number of bits to clear 5918 5943 */ 5919 - void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, 5920 - unsigned long pos, unsigned long len) 5944 + void extent_buffer_bitmap_clear(const struct extent_buffer *eb, 5945 + unsigned long start, unsigned long pos, 5946 + unsigned long len) 5921 5947 { 5922 5948 u8 *kaddr; 5923 5949 struct page *page; ··· 5979 6003 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 5980 6004 } 5981 6005 5982 - void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 5983 - unsigned long src_offset, unsigned long len) 6006 + void memcpy_extent_buffer(const struct extent_buffer *dst, 6007 + unsigned long dst_offset, unsigned long src_offset, 6008 + unsigned long len) 5984 6009 { 5985 6010 struct btrfs_fs_info *fs_info = dst->fs_info; 5986 6011 size_t cur; 5987 6012 size_t dst_off_in_page; 5988 6013 size_t src_off_in_page; 5989 - size_t start_offset = offset_in_page(dst->start); 5990 6014 unsigned long dst_i; 5991 6015 unsigned long src_i; 5992 6016 ··· 6004 6028 } 6005 6029 6006 6030 while (len > 0) { 6007 - dst_off_in_page = offset_in_page(start_offset + dst_offset); 6008 - src_off_in_page = offset_in_page(start_offset + src_offset); 6031 + dst_off_in_page = offset_in_page(dst_offset); 6032 + src_off_in_page = offset_in_page(src_offset); 6009 6033 6010 - dst_i = (start_offset + dst_offset) >> PAGE_SHIFT; 6011 - src_i = (start_offset + src_offset) >> PAGE_SHIFT; 6034 + dst_i = dst_offset >> PAGE_SHIFT; 6035 + src_i = src_offset >> PAGE_SHIFT; 6012 6036 6013 6037 cur = min(len, (unsigned long)(PAGE_SIZE - 6014 6038 src_off_in_page)); ··· 6024 6048 } 6025 6049 } 6026 6050 6027 - void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 6028 - unsigned long src_offset, unsigned long len) 6051 + void memmove_extent_buffer(const struct extent_buffer *dst, 6052 + unsigned long dst_offset, unsigned long src_offset, 6053 + unsigned long len) 6029 6054 { 6030 6055 struct btrfs_fs_info *fs_info = dst->fs_info; 6031 6056 size_t cur; ··· 6034 6057 size_t src_off_in_page; 6035 6058 unsigned long dst_end = dst_offset + len - 1; 6036 6059 unsigned long src_end = src_offset + len - 1; 6037 - size_t start_offset = offset_in_page(dst->start); 6038 6060 unsigned long dst_i; 6039 6061 unsigned long src_i; 6040 6062 ··· 6054 6078 return; 6055 6079 } 6056 6080 while (len > 0) { 6057 - dst_i = (start_offset + dst_end) >> PAGE_SHIFT; 6058 - src_i = (start_offset + src_end) >> PAGE_SHIFT; 6081 + dst_i = dst_end >> PAGE_SHIFT; 6082 + src_i = src_end >> PAGE_SHIFT; 6059 6083 6060 - dst_off_in_page = offset_in_page(start_offset + dst_end); 6061 - src_off_in_page = offset_in_page(start_offset + src_end); 6084 + dst_off_in_page = offset_in_page(dst_end); 6085 + src_off_in_page = offset_in_page(src_end); 6062 6086 6063 6087 cur = min_t(unsigned long, len, src_off_in_page + 1); 6064 6088 cur = min(cur, dst_off_in_page + 1);
+35 -32
fs/btrfs/extent_io.h
··· 66 66 struct io_failure_record; 67 67 struct extent_io_tree; 68 68 69 + typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio, 70 + int mirror_num, 71 + unsigned long bio_flags); 72 + 69 73 typedef blk_status_t (extent_submit_bio_start_t)(void *private_data, 70 74 struct bio *bio, u64 bio_offset); 71 75 ··· 78 74 * The following callbacks must be always defined, the function 79 75 * pointer will be called unconditionally. 80 76 */ 81 - blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio, 82 - int mirror_num, unsigned long bio_flags); 77 + submit_bio_hook_t *submit_bio_hook; 83 78 int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset, 84 79 struct page *page, u64 start, u64 end, 85 80 int mirror); ··· 212 209 u64 start, unsigned long len); 213 210 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, 214 211 u64 start); 215 - struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); 212 + struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src); 216 213 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, 217 214 u64 start); 218 215 void free_extent_buffer(struct extent_buffer *eb); ··· 230 227 (eb->start >> PAGE_SHIFT); 231 228 } 232 229 233 - static inline int extent_buffer_uptodate(struct extent_buffer *eb) 230 + static inline int extent_buffer_uptodate(const struct extent_buffer *eb) 234 231 { 235 232 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 236 233 } ··· 243 240 int read_extent_buffer_to_user(const struct extent_buffer *eb, 244 241 void __user *dst, unsigned long start, 245 242 unsigned long len); 246 - void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src); 247 - void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, 243 + void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src); 244 + void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb, 248 245 const void *src); 249 - void write_extent_buffer(struct extent_buffer *eb, const void *src, 246 + void write_extent_buffer(const struct extent_buffer *eb, const void *src, 250 247 unsigned long start, unsigned long len); 251 - void copy_extent_buffer_full(struct extent_buffer *dst, 252 - struct extent_buffer *src); 253 - void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, 248 + void copy_extent_buffer_full(const struct extent_buffer *dst, 249 + const struct extent_buffer *src); 250 + void copy_extent_buffer(const struct extent_buffer *dst, 251 + const struct extent_buffer *src, 254 252 unsigned long dst_offset, unsigned long src_offset, 255 253 unsigned long len); 256 - void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 257 - unsigned long src_offset, unsigned long len); 258 - void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 259 - unsigned long src_offset, unsigned long len); 260 - void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, 254 + void memcpy_extent_buffer(const struct extent_buffer *dst, 255 + unsigned long dst_offset, unsigned long src_offset, 256 + unsigned long len); 257 + void memmove_extent_buffer(const struct extent_buffer *dst, 258 + unsigned long dst_offset, unsigned long src_offset, 261 259 unsigned long len); 262 - int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, 260 + void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, 261 + unsigned long len); 262 + int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, 263 263 unsigned long pos); 264 - void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, 264 + void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, 265 265 unsigned long pos, unsigned long len); 266 - void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, 267 - unsigned long pos, unsigned long len); 268 - void clear_extent_buffer_dirty(struct extent_buffer *eb); 266 + void extent_buffer_bitmap_clear(const struct extent_buffer *eb, 267 + unsigned long start, unsigned long pos, 268 + unsigned long len); 269 + void clear_extent_buffer_dirty(const struct extent_buffer *eb); 269 270 bool set_extent_buffer_dirty(struct extent_buffer *eb); 270 271 void set_extent_buffer_uptodate(struct extent_buffer *eb); 271 272 void clear_extent_buffer_uptodate(struct extent_buffer *eb); 272 - int extent_buffer_under_io(struct extent_buffer *eb); 273 - int map_private_extent_buffer(const struct extent_buffer *eb, 274 - unsigned long offset, unsigned long min_len, 275 - char **map, unsigned long *map_start, 276 - unsigned long *map_len); 273 + int extent_buffer_under_io(const struct extent_buffer *eb); 277 274 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); 278 275 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); 279 276 void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, ··· 292 289 u64 length, u64 logical, struct page *page, 293 290 unsigned int pg_offset, int mirror_num); 294 291 void end_extent_writepage(struct page *page, int err, u64 start, u64 end); 295 - int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num); 292 + int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num); 296 293 297 294 /* 298 295 * When IO fails, either with EIO or csum verification fails, we ··· 314 311 }; 315 312 316 313 317 - bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages, 318 - struct io_failure_record *failrec, int fail_mirror); 319 - struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, 320 - struct io_failure_record *failrec, 321 - struct page *page, int pg_offset, int icsum, 322 - bio_end_io_t *endio_func, void *data); 314 + blk_status_t btrfs_submit_read_repair(struct inode *inode, 315 + struct bio *failed_bio, u64 phy_offset, 316 + struct page *page, unsigned int pgoff, 317 + u64 start, u64 end, int failed_mirror, 318 + submit_bio_hook_t *submit_bio_hook); 319 + 323 320 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 324 321 bool find_lock_delalloc_range(struct inode *inode, 325 322 struct page *locked_page, u64 *start,
+35 -27
fs/btrfs/file-item.c
··· 242 242 /** 243 243 * btrfs_lookup_bio_sums - Look up checksums for a bio. 244 244 * @inode: inode that the bio is for. 245 - * @bio: bio embedded in btrfs_io_bio. 245 + * @bio: bio to look up. 246 246 * @offset: Unless (u64)-1, look up checksums for this offset in the file. 247 247 * If (u64)-1, use the page offsets from the bio instead. 248 - * @dst: Buffer of size btrfs_super_csum_size() used to return checksum. If 249 - * NULL, the checksum is returned in btrfs_io_bio(bio)->csum instead. 248 + * @dst: Buffer of size nblocks * btrfs_super_csum_size() used to return 249 + * checksum (nblocks = bio->bi_iter.bi_size / fs_info->sectorsize). If 250 + * NULL, the checksum buffer is allocated and returned in 251 + * btrfs_io_bio(bio)->csum instead. 250 252 * 251 253 * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise. 252 254 */ ··· 258 256 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 259 257 struct bio_vec bvec; 260 258 struct bvec_iter iter; 261 - struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); 262 259 struct btrfs_csum_item *item = NULL; 263 260 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 264 261 struct btrfs_path *path; ··· 278 277 279 278 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 280 279 if (!dst) { 280 + struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); 281 + 281 282 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 282 283 btrfs_bio->csum = kmalloc_array(nblocks, csum_size, 283 284 GFP_NOFS); ··· 601 598 index = 0; 602 599 } 603 600 604 - crypto_shash_init(shash); 605 601 data = kmap_atomic(bvec.bv_page); 606 - crypto_shash_update(shash, data + bvec.bv_offset 602 + crypto_shash_digest(shash, data + bvec.bv_offset 607 603 + (i * fs_info->sectorsize), 608 - fs_info->sectorsize); 604 + fs_info->sectorsize, 605 + sums->sums + index); 609 606 kunmap_atomic(data); 610 - crypto_shash_final(shash, (char *)(sums->sums + index)); 611 607 index += csum_size; 612 608 offset += fs_info->sectorsize; 613 609 this_sum_bytes += fs_info->sectorsize; ··· 871 869 } 872 870 ret = PTR_ERR(item); 873 871 if (ret != -EFBIG && ret != -ENOENT) 874 - goto fail_unlock; 872 + goto out; 875 873 876 874 if (ret == -EFBIG) { 877 875 u32 item_size; ··· 889 887 nritems = btrfs_header_nritems(path->nodes[0]); 890 888 if (!nritems || (path->slots[0] >= nritems - 1)) { 891 889 ret = btrfs_next_leaf(root, path); 892 - if (ret == 1) 890 + if (ret < 0) { 891 + goto out; 892 + } else if (ret > 0) { 893 893 found_next = 1; 894 - if (ret != 0) 895 894 goto insert; 895 + } 896 896 slot = path->slots[0]; 897 897 } 898 898 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); ··· 909 905 } 910 906 911 907 /* 912 - * at this point, we know the tree has an item, but it isn't big 913 - * enough yet to put our csum in. Grow it 908 + * At this point, we know the tree has a checksum item that ends at an 909 + * offset matching the start of the checksum range we want to insert. 910 + * We try to extend that item as much as possible and then add as many 911 + * checksums to it as they fit. 912 + * 913 + * First check if the leaf has enough free space for at least one 914 + * checksum. If it has go directly to the item extension code, otherwise 915 + * release the path and do a search for insertion before the extension. 914 916 */ 917 + if (btrfs_leaf_free_space(leaf) >= csum_size) { 918 + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 919 + csum_offset = (bytenr - found_key.offset) >> 920 + fs_info->sb->s_blocksize_bits; 921 + goto extend_csum; 922 + } 923 + 915 924 btrfs_release_path(path); 916 925 ret = btrfs_search_slot(trans, root, &file_key, path, 917 926 csum_size, 1); 918 927 if (ret < 0) 919 - goto fail_unlock; 928 + goto out; 920 929 921 930 if (ret > 0) { 922 931 if (path->slots[0] == 0) ··· 948 931 goto insert; 949 932 } 950 933 934 + extend_csum: 951 935 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) / 952 936 csum_size) { 953 937 int extend_nr; 954 938 u64 tmp; 955 939 u32 diff; 956 - u32 free_space; 957 940 958 - if (btrfs_leaf_free_space(leaf) < 959 - sizeof(struct btrfs_item) + csum_size * 2) 960 - goto insert; 961 - 962 - free_space = btrfs_leaf_free_space(leaf) - 963 - sizeof(struct btrfs_item) - csum_size; 964 941 tmp = sums->len - total_bytes; 965 942 tmp >>= fs_info->sb->s_blocksize_bits; 966 943 WARN_ON(tmp < 1); ··· 965 954 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); 966 955 967 956 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); 968 - diff = min(free_space, diff); 957 + diff = min_t(u32, btrfs_leaf_free_space(leaf), diff); 969 958 diff /= csum_size; 970 959 diff *= csum_size; 971 960 ··· 996 985 ins_size); 997 986 path->leave_spinning = 0; 998 987 if (ret < 0) 999 - goto fail_unlock; 988 + goto out; 1000 989 if (WARN_ON(ret != 0)) 1001 - goto fail_unlock; 990 + goto out; 1002 991 leaf = path->nodes[0]; 1003 992 csum: 1004 993 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); ··· 1028 1017 out: 1029 1018 btrfs_free_path(path); 1030 1019 return ret; 1031 - 1032 - fail_unlock: 1033 - goto out; 1034 1020 } 1035 1021 1036 1022 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
+94 -17
fs/btrfs/file.c
··· 275 275 { 276 276 struct btrfs_root *inode_root; 277 277 struct inode *inode; 278 - struct btrfs_key key; 279 278 struct btrfs_ioctl_defrag_range_args range; 280 279 int num_defrag; 281 280 int ret; 282 281 283 282 /* get the inode */ 284 - key.objectid = defrag->root; 285 - key.type = BTRFS_ROOT_ITEM_KEY; 286 - key.offset = (u64)-1; 287 - 288 - inode_root = btrfs_get_fs_root(fs_info, &key, true); 283 + inode_root = btrfs_get_fs_root(fs_info, defrag->root, true); 289 284 if (IS_ERR(inode_root)) { 290 285 ret = PTR_ERR(inode_root); 291 286 goto cleanup; 292 287 } 293 288 294 - key.objectid = defrag->ino; 295 - key.type = BTRFS_INODE_ITEM_KEY; 296 - key.offset = 0; 297 - inode = btrfs_iget(fs_info->sb, &key, inode_root); 289 + inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root); 298 290 btrfs_put_root(inode_root); 299 291 if (IS_ERR(inode)) { 300 292 ret = PTR_ERR(inode); ··· 767 775 if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent) 768 776 modify_tree = 0; 769 777 770 - update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 778 + update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || 771 779 root == fs_info->tree_root); 772 780 while (1) { 773 781 recow = 0; ··· 1809 1817 return num_written ? num_written : ret; 1810 1818 } 1811 1819 1812 - static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) 1820 + static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, 1821 + const struct iov_iter *iter, loff_t offset) 1822 + { 1823 + const unsigned int blocksize_mask = fs_info->sectorsize - 1; 1824 + 1825 + if (offset & blocksize_mask) 1826 + return -EINVAL; 1827 + 1828 + if (iov_iter_alignment(iter) & blocksize_mask) 1829 + return -EINVAL; 1830 + 1831 + return 0; 1832 + } 1833 + 1834 + static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) 1813 1835 { 1814 1836 struct file *file = iocb->ki_filp; 1815 1837 struct inode *inode = file_inode(file); 1816 - loff_t pos; 1817 - ssize_t written; 1838 + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1839 + loff_t pos = iocb->ki_pos; 1840 + ssize_t written = 0; 1818 1841 ssize_t written_buffered; 1819 1842 loff_t endbyte; 1820 1843 int err; 1844 + size_t count = 0; 1845 + bool relock = false; 1821 1846 1822 - written = generic_file_direct_write(iocb, from); 1847 + if (check_direct_IO(fs_info, from, pos)) 1848 + goto buffered; 1849 + 1850 + count = iov_iter_count(from); 1851 + /* 1852 + * If the write DIO is beyond the EOF, we need update the isize, but it 1853 + * is protected by i_mutex. So we can not unlock the i_mutex at this 1854 + * case. 1855 + */ 1856 + if (pos + count <= inode->i_size) { 1857 + inode_unlock(inode); 1858 + relock = true; 1859 + } else if (iocb->ki_flags & IOCB_NOWAIT) { 1860 + return -EAGAIN; 1861 + } 1862 + 1863 + down_read(&BTRFS_I(inode)->dio_sem); 1864 + written = iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dops, 1865 + is_sync_kiocb(iocb)); 1866 + up_read(&BTRFS_I(inode)->dio_sem); 1867 + 1868 + if (relock) 1869 + inode_lock(inode); 1823 1870 1824 1871 if (written < 0 || !iov_iter_count(from)) 1825 1872 return written; 1826 1873 1874 + buffered: 1827 1875 pos = iocb->ki_pos; 1828 1876 written_buffered = btrfs_buffered_write(iocb, from); 1829 1877 if (written_buffered < 0) { ··· 2002 1970 atomic_inc(&BTRFS_I(inode)->sync_writers); 2003 1971 2004 1972 if (iocb->ki_flags & IOCB_DIRECT) { 2005 - num_written = __btrfs_direct_write(iocb, from); 1973 + num_written = btrfs_direct_write(iocb, from); 2006 1974 } else { 2007 1975 num_written = btrfs_buffered_write(iocb, from); 2008 1976 if (num_written > 0) ··· 3516 3484 return generic_file_open(inode, filp); 3517 3485 } 3518 3486 3487 + static int check_direct_read(struct btrfs_fs_info *fs_info, 3488 + const struct iov_iter *iter, loff_t offset) 3489 + { 3490 + int ret; 3491 + int i, seg; 3492 + 3493 + ret = check_direct_IO(fs_info, iter, offset); 3494 + if (ret < 0) 3495 + return ret; 3496 + 3497 + for (seg = 0; seg < iter->nr_segs; seg++) 3498 + for (i = seg + 1; i < iter->nr_segs; i++) 3499 + if (iter->iov[seg].iov_base == iter->iov[i].iov_base) 3500 + return -EINVAL; 3501 + return 0; 3502 + } 3503 + 3504 + static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to) 3505 + { 3506 + struct inode *inode = file_inode(iocb->ki_filp); 3507 + ssize_t ret; 3508 + 3509 + if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos)) 3510 + return 0; 3511 + 3512 + inode_lock_shared(inode); 3513 + ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dops, 3514 + is_sync_kiocb(iocb)); 3515 + inode_unlock_shared(inode); 3516 + return ret; 3517 + } 3518 + 3519 + static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 3520 + { 3521 + ssize_t ret = 0; 3522 + 3523 + if (iocb->ki_flags & IOCB_DIRECT) { 3524 + ret = btrfs_direct_read(iocb, to); 3525 + if (ret < 0) 3526 + return ret; 3527 + } 3528 + 3529 + return generic_file_buffered_read(iocb, to, ret); 3530 + } 3531 + 3519 3532 const struct file_operations btrfs_file_operations = { 3520 3533 .llseek = btrfs_file_llseek, 3521 - .read_iter = generic_file_read_iter, 3534 + .read_iter = btrfs_file_read_iter, 3522 3535 .splice_read = generic_file_splice_read, 3523 3536 .write_iter = btrfs_file_write_iter, 3524 3537 .mmap = btrfs_file_mmap,
+17 -64
fs/btrfs/free-space-cache.c
··· 82 82 * sure NOFS is set to keep us from deadlocking. 83 83 */ 84 84 nofs_flag = memalloc_nofs_save(); 85 - inode = btrfs_iget_path(fs_info->sb, &location, root, path); 85 + inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path); 86 86 btrfs_release_path(path); 87 87 memalloc_nofs_restore(nofs_flag); 88 88 if (IS_ERR(inode)) ··· 1190 1190 if (ret) { 1191 1191 invalidate_inode_pages2(inode->i_mapping); 1192 1192 BTRFS_I(inode)->generation = 0; 1193 - if (block_group) { 1194 - #ifdef CONFIG_BTRFS_DEBUG 1195 - btrfs_err(root->fs_info, 1196 - "failed to write free space cache for block group %llu", 1197 - block_group->start); 1198 - #endif 1199 - } 1193 + if (block_group) 1194 + btrfs_debug(root->fs_info, 1195 + "failed to write free space cache for block group %llu error %d", 1196 + block_group->start, ret); 1200 1197 } 1201 1198 btrfs_update_inode(trans, root, inode); 1202 1199 ··· 1412 1415 ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl, 1413 1416 block_group, &block_group->io_ctl, trans); 1414 1417 if (ret) { 1415 - #ifdef CONFIG_BTRFS_DEBUG 1416 - btrfs_err(fs_info, 1417 - "failed to write free space cache for block group %llu", 1418 - block_group->start); 1419 - #endif 1418 + btrfs_debug(fs_info, 1419 + "failed to write free space cache for block group %llu error %d", 1420 + block_group->start, ret); 1420 1421 spin_lock(&block_group->lock); 1421 1422 block_group->disk_cache_state = BTRFS_DC_ERROR; 1422 1423 spin_unlock(&block_group->lock); ··· 3757 3762 return ret; 3758 3763 } 3759 3764 3760 - void btrfs_get_block_group_trimming(struct btrfs_block_group *cache) 3761 - { 3762 - atomic_inc(&cache->trimming); 3763 - } 3764 - 3765 - void btrfs_put_block_group_trimming(struct btrfs_block_group *block_group) 3766 - { 3767 - struct btrfs_fs_info *fs_info = block_group->fs_info; 3768 - struct extent_map_tree *em_tree; 3769 - struct extent_map *em; 3770 - bool cleanup; 3771 - 3772 - spin_lock(&block_group->lock); 3773 - cleanup = (atomic_dec_and_test(&block_group->trimming) && 3774 - block_group->removed); 3775 - spin_unlock(&block_group->lock); 3776 - 3777 - if (cleanup) { 3778 - mutex_lock(&fs_info->chunk_mutex); 3779 - em_tree = &fs_info->mapping_tree; 3780 - write_lock(&em_tree->lock); 3781 - em = lookup_extent_mapping(em_tree, block_group->start, 3782 - 1); 3783 - BUG_ON(!em); /* logic error, can't happen */ 3784 - remove_extent_mapping(em_tree, em); 3785 - write_unlock(&em_tree->lock); 3786 - mutex_unlock(&fs_info->chunk_mutex); 3787 - 3788 - /* once for us and once for the tree */ 3789 - free_extent_map(em); 3790 - free_extent_map(em); 3791 - 3792 - /* 3793 - * We've left one free space entry and other tasks trimming 3794 - * this block group have left 1 entry each one. Free them. 3795 - */ 3796 - __btrfs_remove_free_space_cache(block_group->free_space_ctl); 3797 - } 3798 - } 3799 - 3800 3765 int btrfs_trim_block_group(struct btrfs_block_group *block_group, 3801 3766 u64 *trimmed, u64 start, u64 end, u64 minlen) 3802 3767 { ··· 3771 3816 spin_unlock(&block_group->lock); 3772 3817 return 0; 3773 3818 } 3774 - btrfs_get_block_group_trimming(block_group); 3819 + btrfs_freeze_block_group(block_group); 3775 3820 spin_unlock(&block_group->lock); 3776 3821 3777 3822 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false); ··· 3784 3829 if (rem) 3785 3830 reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end)); 3786 3831 out: 3787 - btrfs_put_block_group_trimming(block_group); 3832 + btrfs_unfreeze_block_group(block_group); 3788 3833 return ret; 3789 3834 } 3790 3835 ··· 3801 3846 spin_unlock(&block_group->lock); 3802 3847 return 0; 3803 3848 } 3804 - btrfs_get_block_group_trimming(block_group); 3849 + btrfs_freeze_block_group(block_group); 3805 3850 spin_unlock(&block_group->lock); 3806 3851 3807 3852 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async); 3808 - btrfs_put_block_group_trimming(block_group); 3853 + btrfs_unfreeze_block_group(block_group); 3809 3854 3810 3855 return ret; 3811 3856 } ··· 3823 3868 spin_unlock(&block_group->lock); 3824 3869 return 0; 3825 3870 } 3826 - btrfs_get_block_group_trimming(block_group); 3871 + btrfs_freeze_block_group(block_group); 3827 3872 spin_unlock(&block_group->lock); 3828 3873 3829 3874 ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen, 3830 3875 async); 3831 3876 3832 - btrfs_put_block_group_trimming(block_group); 3877 + btrfs_unfreeze_block_group(block_group); 3833 3878 3834 3879 return ret; 3835 3880 } ··· 3990 4035 if (release_metadata) 3991 4036 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3992 4037 inode->i_size, true); 3993 - #ifdef CONFIG_BTRFS_DEBUG 3994 - btrfs_err(fs_info, 3995 - "failed to write free ino cache for root %llu", 3996 - root->root_key.objectid); 3997 - #endif 4038 + btrfs_debug(fs_info, 4039 + "failed to write free ino cache for root %llu error %d", 4040 + root->root_key.objectid, ret); 3998 4041 } 3999 4042 4000 4043 return ret;
+454 -869
fs/btrfs/inode.c
··· 5 5 6 6 #include <linux/kernel.h> 7 7 #include <linux/bio.h> 8 - #include <linux/buffer_head.h> 9 8 #include <linux/file.h> 10 9 #include <linux/fs.h> 11 10 #include <linux/pagemap.h> ··· 48 49 #include "qgroup.h" 49 50 #include "delalloc-space.h" 50 51 #include "block-group.h" 52 + #include "space-info.h" 51 53 52 54 struct btrfs_iget_args { 53 - struct btrfs_key *location; 55 + u64 ino; 54 56 struct btrfs_root *root; 55 57 }; 56 58 57 59 struct btrfs_dio_data { 58 60 u64 reserve; 59 - u64 unsubmitted_oe_range_start; 60 - u64 unsubmitted_oe_range_end; 61 - int overwrite; 61 + loff_t length; 62 + ssize_t submitted; 63 + struct extent_changeset *data_reserved; 62 64 }; 63 65 64 66 static const struct inode_operations btrfs_dir_inode_operations; ··· 1142 1142 */ 1143 1143 if (extent_reserved) { 1144 1144 extent_clear_unlock_delalloc(inode, start, 1145 - start + cur_alloc_size, 1145 + start + cur_alloc_size - 1, 1146 1146 locked_page, 1147 1147 clear_bits, 1148 1148 page_ops); ··· 1353 1353 if (ret < 0) 1354 1354 return ret; 1355 1355 return 1; 1356 + } 1357 + 1358 + static int fallback_to_cow(struct inode *inode, struct page *locked_page, 1359 + const u64 start, const u64 end, 1360 + int *page_started, unsigned long *nr_written) 1361 + { 1362 + const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode)); 1363 + const u64 range_bytes = end + 1 - start; 1364 + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1365 + u64 range_start = start; 1366 + u64 count; 1367 + 1368 + /* 1369 + * If EXTENT_NORESERVE is set it means that when the buffered write was 1370 + * made we had not enough available data space and therefore we did not 1371 + * reserve data space for it, since we though we could do NOCOW for the 1372 + * respective file range (either there is prealloc extent or the inode 1373 + * has the NOCOW bit set). 1374 + * 1375 + * However when we need to fallback to COW mode (because for example the 1376 + * block group for the corresponding extent was turned to RO mode by a 1377 + * scrub or relocation) we need to do the following: 1378 + * 1379 + * 1) We increment the bytes_may_use counter of the data space info. 1380 + * If COW succeeds, it allocates a new data extent and after doing 1381 + * that it decrements the space info's bytes_may_use counter and 1382 + * increments its bytes_reserved counter by the same amount (we do 1383 + * this at btrfs_add_reserved_bytes()). So we need to increment the 1384 + * bytes_may_use counter to compensate (when space is reserved at 1385 + * buffered write time, the bytes_may_use counter is incremented); 1386 + * 1387 + * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so 1388 + * that if the COW path fails for any reason, it decrements (through 1389 + * extent_clear_unlock_delalloc()) the bytes_may_use counter of the 1390 + * data space info, which we incremented in the step above. 1391 + * 1392 + * If we need to fallback to cow and the inode corresponds to a free 1393 + * space cache inode, we must also increment bytes_may_use of the data 1394 + * space_info for the same reason. Space caches always get a prealloc 1395 + * extent for them, however scrub or balance may have set the block 1396 + * group that contains that extent to RO mode. 1397 + */ 1398 + count = count_range_bits(io_tree, &range_start, end, range_bytes, 1399 + EXTENT_NORESERVE, 0); 1400 + if (count > 0 || is_space_ino) { 1401 + const u64 bytes = is_space_ino ? range_bytes : count; 1402 + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 1403 + struct btrfs_space_info *sinfo = fs_info->data_sinfo; 1404 + 1405 + spin_lock(&sinfo->lock); 1406 + btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); 1407 + spin_unlock(&sinfo->lock); 1408 + 1409 + if (count > 0) 1410 + clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, 1411 + 0, 0, NULL); 1412 + } 1413 + 1414 + return cow_file_range(inode, locked_page, start, end, page_started, 1415 + nr_written, 1); 1356 1416 } 1357 1417 1358 1418 /* ··· 1662 1602 * NOCOW, following one which needs to be COW'ed 1663 1603 */ 1664 1604 if (cow_start != (u64)-1) { 1665 - ret = cow_file_range(inode, locked_page, 1666 - cow_start, found_key.offset - 1, 1667 - page_started, nr_written, 1); 1605 + ret = fallback_to_cow(inode, locked_page, cow_start, 1606 + found_key.offset - 1, 1607 + page_started, nr_written); 1668 1608 if (ret) { 1669 1609 if (nocow) 1670 1610 btrfs_dec_nocow_writers(fs_info, ··· 1753 1693 1754 1694 if (cow_start != (u64)-1) { 1755 1695 cur_offset = end; 1756 - ret = cow_file_range(inode, locked_page, cow_start, end, 1757 - page_started, nr_written, 1); 1696 + ret = fallback_to_cow(inode, locked_page, cow_start, end, 1697 + page_started, nr_written); 1758 1698 if (ret) 1759 1699 goto error; 1760 1700 } ··· 2786 2726 btrfs_queue_work(wq, &ordered_extent->work); 2787 2727 } 2788 2728 2789 - static int __readpage_endio_check(struct inode *inode, 2790 - struct btrfs_io_bio *io_bio, 2791 - int icsum, struct page *page, 2792 - int pgoff, u64 start, size_t len) 2729 + static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio, 2730 + int icsum, struct page *page, int pgoff, u64 start, 2731 + size_t len) 2793 2732 { 2794 2733 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2795 2734 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); ··· 2802 2743 kaddr = kmap_atomic(page); 2803 2744 shash->tfm = fs_info->csum_shash; 2804 2745 2805 - crypto_shash_init(shash); 2806 - crypto_shash_update(shash, kaddr + pgoff, len); 2807 - crypto_shash_final(shash, csum); 2746 + crypto_shash_digest(shash, kaddr + pgoff, len, csum); 2808 2747 2809 2748 if (memcmp(csum, csum_expected, csum_size)) 2810 2749 goto zeroit; ··· 2847 2790 } 2848 2791 2849 2792 phy_offset >>= inode->i_sb->s_blocksize_bits; 2850 - return __readpage_endio_check(inode, io_bio, phy_offset, page, offset, 2851 - start, (size_t)(end - start + 1)); 2793 + return check_data_csum(inode, io_bio, phy_offset, page, offset, start, 2794 + (size_t)(end - start + 1)); 2852 2795 } 2853 2796 2854 2797 /* ··· 3038 2981 found_key.objectid = found_key.offset; 3039 2982 found_key.type = BTRFS_INODE_ITEM_KEY; 3040 2983 found_key.offset = 0; 3041 - inode = btrfs_iget(fs_info->sb, &found_key, root); 2984 + inode = btrfs_iget(fs_info->sb, last_objectid, root); 3042 2985 ret = PTR_ERR_OR_ZERO(inode); 3043 2986 if (ret && ret != -ENOENT) 3044 2987 goto out; ··· 3057 3000 * orphan must not get deleted. 3058 3001 * find_dead_roots already ran before us, so if this 3059 3002 * is a snapshot deletion, we should find the root 3060 - * in the dead_roots list 3003 + * in the fs_roots radix tree. 3061 3004 */ 3062 - spin_lock(&fs_info->trans_lock); 3063 - list_for_each_entry(dead_root, &fs_info->dead_roots, 3064 - root_list) { 3065 - if (dead_root->root_key.objectid == 3066 - found_key.objectid) { 3067 - is_dead_root = 1; 3068 - break; 3069 - } 3070 - } 3071 - spin_unlock(&fs_info->trans_lock); 3005 + 3006 + spin_lock(&fs_info->fs_roots_radix_lock); 3007 + dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, 3008 + (unsigned long)found_key.objectid); 3009 + if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) 3010 + is_dead_root = 1; 3011 + spin_unlock(&fs_info->fs_roots_radix_lock); 3012 + 3072 3013 if (is_dead_root) { 3073 3014 /* prevent this orphan from being found again */ 3074 3015 key.offset = found_key.objectid - 1; ··· 3412 3357 3413 3358 btrfs_init_map_token(&token, leaf); 3414 3359 3415 - btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3416 - btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3417 - btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size, 3418 - &token); 3419 - btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3420 - btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3360 + btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 3361 + btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 3362 + btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); 3363 + btrfs_set_token_inode_mode(&token, item, inode->i_mode); 3364 + btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 3421 3365 3422 - btrfs_set_token_timespec_sec(leaf, &item->atime, 3423 - inode->i_atime.tv_sec, &token); 3424 - btrfs_set_token_timespec_nsec(leaf, &item->atime, 3425 - inode->i_atime.tv_nsec, &token); 3366 + btrfs_set_token_timespec_sec(&token, &item->atime, 3367 + inode->i_atime.tv_sec); 3368 + btrfs_set_token_timespec_nsec(&token, &item->atime, 3369 + inode->i_atime.tv_nsec); 3426 3370 3427 - btrfs_set_token_timespec_sec(leaf, &item->mtime, 3428 - inode->i_mtime.tv_sec, &token); 3429 - btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3430 - inode->i_mtime.tv_nsec, &token); 3371 + btrfs_set_token_timespec_sec(&token, &item->mtime, 3372 + inode->i_mtime.tv_sec); 3373 + btrfs_set_token_timespec_nsec(&token, &item->mtime, 3374 + inode->i_mtime.tv_nsec); 3431 3375 3432 - btrfs_set_token_timespec_sec(leaf, &item->ctime, 3433 - inode->i_ctime.tv_sec, &token); 3434 - btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3435 - inode->i_ctime.tv_nsec, &token); 3376 + btrfs_set_token_timespec_sec(&token, &item->ctime, 3377 + inode->i_ctime.tv_sec); 3378 + btrfs_set_token_timespec_nsec(&token, &item->ctime, 3379 + inode->i_ctime.tv_nsec); 3436 3380 3437 - btrfs_set_token_timespec_sec(leaf, &item->otime, 3438 - BTRFS_I(inode)->i_otime.tv_sec, &token); 3439 - btrfs_set_token_timespec_nsec(leaf, &item->otime, 3440 - BTRFS_I(inode)->i_otime.tv_nsec, &token); 3381 + btrfs_set_token_timespec_sec(&token, &item->otime, 3382 + BTRFS_I(inode)->i_otime.tv_sec); 3383 + btrfs_set_token_timespec_nsec(&token, &item->otime, 3384 + BTRFS_I(inode)->i_otime.tv_nsec); 3441 3385 3442 - btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3443 - &token); 3444 - btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, 3445 - &token); 3446 - btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode), 3447 - &token); 3448 - btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3449 - btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3450 - btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3451 - btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3386 + btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 3387 + btrfs_set_token_inode_generation(&token, item, 3388 + BTRFS_I(inode)->generation); 3389 + btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 3390 + btrfs_set_token_inode_transid(&token, item, trans->transid); 3391 + btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 3392 + btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags); 3393 + btrfs_set_token_inode_block_group(&token, item, 0); 3452 3394 } 3453 3395 3454 3396 /* ··· 3670 3618 * 1 for the inode ref 3671 3619 * 1 for the inode 3672 3620 */ 3673 - return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); 3621 + return btrfs_start_transaction_fallback_global_rsv(root, 5); 3674 3622 } 3675 3623 3676 3624 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) ··· 4160 4108 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 4161 4109 4162 4110 /* 4163 - * for non-free space inodes and ref cows, we want to back off from 4164 - * time to time 4111 + * For non-free space inodes and non-shareable roots, we want to back 4112 + * off from time to time. This means all inodes in subvolume roots, 4113 + * reloc roots, and data reloc roots. 4165 4114 */ 4166 4115 if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && 4167 - test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4116 + test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 4168 4117 be_nice = true; 4169 4118 4170 4119 path = btrfs_alloc_path(); ··· 4173 4120 return -ENOMEM; 4174 4121 path->reada = READA_BACK; 4175 4122 4176 - if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 4123 + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 4177 4124 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1, 4178 4125 &cached_state); 4179 4126 4180 - /* 4181 - * We want to drop from the next block forward in case this new size is 4182 - * not block aligned since we will be keeping the last block of the 4183 - * extent just the way it is. 4184 - */ 4185 - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 4186 - root == fs_info->tree_root) 4127 + /* 4128 + * We want to drop from the next block forward in case this 4129 + * new size is not block aligned since we will be keeping the 4130 + * last block of the extent just the way it is. 4131 + */ 4187 4132 btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size, 4188 4133 fs_info->sectorsize), 4189 4134 (u64)-1, 0); 4135 + } 4190 4136 4191 4137 /* 4192 4138 * This function is also used to drop the items in the log tree before ··· 4293 4241 extent_num_bytes); 4294 4242 num_dec = (orig_num_bytes - 4295 4243 extent_num_bytes); 4296 - if (test_bit(BTRFS_ROOT_REF_COWS, 4244 + if (test_bit(BTRFS_ROOT_SHAREABLE, 4297 4245 &root->state) && 4298 4246 extent_start != 0) 4299 4247 inode_sub_bytes(inode, num_dec); ··· 4309 4257 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 4310 4258 if (extent_start != 0) { 4311 4259 found_extent = 1; 4312 - if (test_bit(BTRFS_ROOT_REF_COWS, 4260 + if (test_bit(BTRFS_ROOT_SHAREABLE, 4313 4261 &root->state)) 4314 4262 inode_sub_bytes(inode, num_dec); 4315 4263 } ··· 4345 4293 clear_len = fs_info->sectorsize; 4346 4294 } 4347 4295 4348 - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4296 + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 4349 4297 inode_sub_bytes(inode, item_end + 1 - new_size); 4350 4298 } 4351 4299 delete: ··· 4386 4334 should_throttle = false; 4387 4335 4388 4336 if (found_extent && 4389 - (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 4390 - root == fs_info->tree_root)) { 4337 + root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 4391 4338 struct btrfs_ref ref = { 0 }; 4392 4339 4393 4340 bytes_deleted += extent_num_bytes; ··· 4810 4759 4811 4760 truncate_setsize(inode, newsize); 4812 4761 4813 - /* Disable nonlocked read DIO to avoid the endless truncate */ 4814 - btrfs_inode_block_unlocked_dio(BTRFS_I(inode)); 4815 4762 inode_dio_wait(inode); 4816 - btrfs_inode_resume_unlocked_dio(BTRFS_I(inode)); 4817 4763 4818 4764 ret = btrfs_truncate(inode, newsize == oldsize); 4819 4765 if (ret && inode->i_nlink) { ··· 5202 5154 5203 5155 btrfs_release_path(path); 5204 5156 5205 - new_root = btrfs_get_fs_root(fs_info, location, true); 5157 + new_root = btrfs_get_fs_root(fs_info, location->objectid, true); 5206 5158 if (IS_ERR(new_root)) { 5207 5159 err = PTR_ERR(new_root); 5208 5160 goto out; ··· 5280 5232 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5281 5233 { 5282 5234 struct btrfs_iget_args *args = p; 5283 - inode->i_ino = args->location->objectid; 5284 - memcpy(&BTRFS_I(inode)->location, args->location, 5285 - sizeof(*args->location)); 5235 + 5236 + inode->i_ino = args->ino; 5237 + BTRFS_I(inode)->location.objectid = args->ino; 5238 + BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 5239 + BTRFS_I(inode)->location.offset = 0; 5286 5240 BTRFS_I(inode)->root = btrfs_grab_root(args->root); 5287 5241 BUG_ON(args->root && !BTRFS_I(inode)->root); 5288 5242 return 0; ··· 5293 5243 static int btrfs_find_actor(struct inode *inode, void *opaque) 5294 5244 { 5295 5245 struct btrfs_iget_args *args = opaque; 5296 - return args->location->objectid == BTRFS_I(inode)->location.objectid && 5246 + 5247 + return args->ino == BTRFS_I(inode)->location.objectid && 5297 5248 args->root == BTRFS_I(inode)->root; 5298 5249 } 5299 5250 5300 - static struct inode *btrfs_iget_locked(struct super_block *s, 5301 - struct btrfs_key *location, 5251 + static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino, 5302 5252 struct btrfs_root *root) 5303 5253 { 5304 5254 struct inode *inode; 5305 5255 struct btrfs_iget_args args; 5306 - unsigned long hashval = btrfs_inode_hash(location->objectid, root); 5256 + unsigned long hashval = btrfs_inode_hash(ino, root); 5307 5257 5308 - args.location = location; 5258 + args.ino = ino; 5309 5259 args.root = root; 5310 5260 5311 5261 inode = iget5_locked(s, hashval, btrfs_find_actor, ··· 5315 5265 } 5316 5266 5317 5267 /* 5318 - * Get an inode object given its location and corresponding root. 5268 + * Get an inode object given its inode number and corresponding root. 5319 5269 * Path can be preallocated to prevent recursing back to iget through 5320 5270 * allocator. NULL is also valid but may require an additional allocation 5321 5271 * later. 5322 5272 */ 5323 - struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, 5273 + struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 5324 5274 struct btrfs_root *root, struct btrfs_path *path) 5325 5275 { 5326 5276 struct inode *inode; 5327 5277 5328 - inode = btrfs_iget_locked(s, location, root); 5278 + inode = btrfs_iget_locked(s, ino, root); 5329 5279 if (!inode) 5330 5280 return ERR_PTR(-ENOMEM); 5331 5281 ··· 5352 5302 return inode; 5353 5303 } 5354 5304 5355 - struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 5356 - struct btrfs_root *root) 5305 + struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root) 5357 5306 { 5358 - return btrfs_iget_path(s, location, root, NULL); 5307 + return btrfs_iget_path(s, ino, root, NULL); 5359 5308 } 5360 5309 5361 5310 static struct inode *new_simple_dir(struct super_block *s, ··· 5423 5374 return ERR_PTR(ret); 5424 5375 5425 5376 if (location.type == BTRFS_INODE_ITEM_KEY) { 5426 - inode = btrfs_iget(dir->i_sb, &location, root); 5377 + inode = btrfs_iget(dir->i_sb, location.objectid, root); 5427 5378 if (IS_ERR(inode)) 5428 5379 return inode; 5429 5380 ··· 5447 5398 else 5448 5399 inode = new_simple_dir(dir->i_sb, &location, sub_root); 5449 5400 } else { 5450 - inode = btrfs_iget(dir->i_sb, &location, sub_root); 5401 + inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); 5451 5402 } 5452 5403 if (root != sub_root) 5453 5404 btrfs_put_root(sub_root); ··· 5828 5779 static int btrfs_insert_inode_locked(struct inode *inode) 5829 5780 { 5830 5781 struct btrfs_iget_args args; 5831 - args.location = &BTRFS_I(inode)->location; 5782 + 5783 + args.ino = BTRFS_I(inode)->location.objectid; 5832 5784 args.root = BTRFS_I(inode)->root; 5833 5785 5834 5786 return insert_inode_locked4(inode, ··· 7041 6991 } 7042 6992 7043 6993 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7044 - struct extent_state **cached_state, int writing) 6994 + struct extent_state **cached_state, bool writing) 7045 6995 { 7046 6996 struct btrfs_ordered_extent *ordered; 7047 6997 int ret = 0; ··· 7179 7129 } 7180 7130 7181 7131 7182 - static int btrfs_get_blocks_direct_read(struct extent_map *em, 7183 - struct buffer_head *bh_result, 7184 - struct inode *inode, 7185 - u64 start, u64 len) 7186 - { 7187 - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7188 - 7189 - if (em->block_start == EXTENT_MAP_HOLE || 7190 - test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7191 - return -ENOENT; 7192 - 7193 - len = min(len, em->len - (start - em->start)); 7194 - 7195 - bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 7196 - inode->i_blkbits; 7197 - bh_result->b_size = len; 7198 - bh_result->b_bdev = fs_info->fs_devices->latest_bdev; 7199 - set_buffer_mapped(bh_result); 7200 - 7201 - return 0; 7202 - } 7203 - 7204 7132 static int btrfs_get_blocks_direct_write(struct extent_map **map, 7205 - struct buffer_head *bh_result, 7206 7133 struct inode *inode, 7207 7134 struct btrfs_dio_data *dio_data, 7208 7135 u64 start, u64 len) ··· 7241 7214 } 7242 7215 7243 7216 /* this will cow the extent */ 7244 - len = bh_result->b_size; 7245 7217 free_extent_map(em); 7246 7218 *map = em = btrfs_new_extent_direct(inode, start, len); 7247 7219 if (IS_ERR(em)) { ··· 7251 7225 len = min(len, em->len - (start - em->start)); 7252 7226 7253 7227 skip_cow: 7254 - bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 7255 - inode->i_blkbits; 7256 - bh_result->b_size = len; 7257 - bh_result->b_bdev = fs_info->fs_devices->latest_bdev; 7258 - set_buffer_mapped(bh_result); 7259 - 7260 - if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7261 - set_buffer_new(bh_result); 7262 - 7263 7228 /* 7264 7229 * Need to update the i_size under the extent lock so buffered 7265 7230 * readers will get the updated i_size when we unlock. 7266 7231 */ 7267 - if (!dio_data->overwrite && start + len > i_size_read(inode)) 7232 + if (start + len > i_size_read(inode)) 7268 7233 i_size_write(inode, start + len); 7269 7234 7270 - WARN_ON(dio_data->reserve < len); 7271 7235 dio_data->reserve -= len; 7272 - dio_data->unsubmitted_oe_range_end = start + len; 7273 - current->journal_info = dio_data; 7274 7236 out: 7275 7237 return ret; 7276 7238 } 7277 7239 7278 - static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 7279 - struct buffer_head *bh_result, int create) 7240 + static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, 7241 + loff_t length, unsigned flags, struct iomap *iomap, 7242 + struct iomap *srcmap) 7280 7243 { 7281 7244 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7282 7245 struct extent_map *em; 7283 7246 struct extent_state *cached_state = NULL; 7284 7247 struct btrfs_dio_data *dio_data = NULL; 7285 - u64 start = iblock << inode->i_blkbits; 7286 7248 u64 lockstart, lockend; 7287 - u64 len = bh_result->b_size; 7249 + const bool write = !!(flags & IOMAP_WRITE); 7288 7250 int ret = 0; 7251 + u64 len = length; 7252 + bool unlock_extents = false; 7289 7253 7290 - if (!create) 7254 + if (!write) 7291 7255 len = min_t(u64, len, fs_info->sectorsize); 7292 7256 7293 7257 lockstart = start; 7294 7258 lockend = start + len - 1; 7295 7259 7296 - if (current->journal_info) { 7297 - /* 7298 - * Need to pull our outstanding extents and set journal_info to NULL so 7299 - * that anything that needs to check if there's a transaction doesn't get 7300 - * confused. 7301 - */ 7302 - dio_data = current->journal_info; 7303 - current->journal_info = NULL; 7260 + /* 7261 + * The generic stuff only does filemap_write_and_wait_range, which 7262 + * isn't enough if we've written compressed pages to this area, so we 7263 + * need to flush the dirty pages again to make absolutely sure that any 7264 + * outstanding dirty pages are on disk. 7265 + */ 7266 + if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7267 + &BTRFS_I(inode)->runtime_flags)) 7268 + ret = filemap_fdatawrite_range(inode->i_mapping, start, 7269 + start + length - 1); 7270 + 7271 + dio_data = kzalloc(sizeof(*dio_data), GFP_NOFS); 7272 + if (!dio_data) 7273 + return -ENOMEM; 7274 + 7275 + dio_data->length = length; 7276 + if (write) { 7277 + dio_data->reserve = round_up(length, fs_info->sectorsize); 7278 + ret = btrfs_delalloc_reserve_space(inode, 7279 + &dio_data->data_reserved, 7280 + start, dio_data->reserve); 7281 + if (ret) { 7282 + extent_changeset_free(dio_data->data_reserved); 7283 + kfree(dio_data); 7284 + return ret; 7285 + } 7304 7286 } 7287 + iomap->private = dio_data; 7288 + 7305 7289 7306 7290 /* 7307 7291 * If this errors out it's because we couldn't invalidate pagecache for 7308 7292 * this range and we need to fallback to buffered. 7309 7293 */ 7310 - if (lock_extent_direct(inode, lockstart, lockend, &cached_state, 7311 - create)) { 7294 + if (lock_extent_direct(inode, lockstart, lockend, &cached_state, write)) { 7312 7295 ret = -ENOTBLK; 7313 7296 goto err; 7314 7297 } ··· 7349 7314 goto unlock_err; 7350 7315 } 7351 7316 7352 - if (create) { 7353 - ret = btrfs_get_blocks_direct_write(&em, bh_result, inode, 7354 - dio_data, start, len); 7317 + len = min(len, em->len - (start - em->start)); 7318 + if (write) { 7319 + ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, 7320 + start, len); 7355 7321 if (ret < 0) 7356 7322 goto unlock_err; 7357 - 7358 - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, 7359 - lockend, &cached_state); 7323 + unlock_extents = true; 7324 + /* Recalc len in case the new em is smaller than requested */ 7325 + len = min(len, em->len - (start - em->start)); 7360 7326 } else { 7361 - ret = btrfs_get_blocks_direct_read(em, bh_result, inode, 7362 - start, len); 7363 - /* Can be negative only if we read from a hole */ 7364 - if (ret < 0) { 7365 - ret = 0; 7366 - free_extent_map(em); 7367 - goto unlock_err; 7368 - } 7369 7327 /* 7370 7328 * We need to unlock only the end area that we aren't using. 7371 7329 * The rest is going to be unlocked by the endio routine. 7372 7330 */ 7373 - lockstart = start + bh_result->b_size; 7374 - if (lockstart < lockend) { 7375 - unlock_extent_cached(&BTRFS_I(inode)->io_tree, 7376 - lockstart, lockend, &cached_state); 7377 - } else { 7378 - free_extent_state(cached_state); 7379 - } 7331 + lockstart = start + len; 7332 + if (lockstart < lockend) 7333 + unlock_extents = true; 7380 7334 } 7335 + 7336 + if (unlock_extents) 7337 + unlock_extent_cached(&BTRFS_I(inode)->io_tree, 7338 + lockstart, lockend, &cached_state); 7339 + else 7340 + free_extent_state(cached_state); 7341 + 7342 + /* 7343 + * Translate extent map information to iomap. 7344 + * We trim the extents (and move the addr) even though iomap code does 7345 + * that, since we have locked only the parts we are performing I/O in. 7346 + */ 7347 + if ((em->block_start == EXTENT_MAP_HOLE) || 7348 + (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { 7349 + iomap->addr = IOMAP_NULL_ADDR; 7350 + iomap->type = IOMAP_HOLE; 7351 + } else { 7352 + iomap->addr = em->block_start + (start - em->start); 7353 + iomap->type = IOMAP_MAPPED; 7354 + } 7355 + iomap->offset = start; 7356 + iomap->bdev = fs_info->fs_devices->latest_bdev; 7357 + iomap->length = len; 7381 7358 7382 7359 free_extent_map(em); 7383 7360 ··· 7399 7352 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7400 7353 &cached_state); 7401 7354 err: 7402 - if (dio_data) 7403 - current->journal_info = dio_data; 7355 + if (dio_data) { 7356 + btrfs_delalloc_release_space(inode, dio_data->data_reserved, 7357 + start, dio_data->reserve, true); 7358 + btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->reserve); 7359 + extent_changeset_free(dio_data->data_reserved); 7360 + kfree(dio_data); 7361 + } 7404 7362 return ret; 7405 7363 } 7406 7364 7407 - static inline blk_status_t submit_dio_repair_bio(struct inode *inode, 7408 - struct bio *bio, 7409 - int mirror_num) 7365 + static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, 7366 + ssize_t written, unsigned flags, struct iomap *iomap) 7410 7367 { 7368 + int ret = 0; 7369 + struct btrfs_dio_data *dio_data = iomap->private; 7370 + size_t submitted = dio_data->submitted; 7371 + const bool write = !!(flags & IOMAP_WRITE); 7372 + 7373 + if (!write && (iomap->type == IOMAP_HOLE)) { 7374 + /* If reading from a hole, unlock and return */ 7375 + unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1); 7376 + goto out; 7377 + } 7378 + 7379 + if (submitted < length) { 7380 + pos += submitted; 7381 + length -= submitted; 7382 + if (write) 7383 + __endio_write_update_ordered(inode, pos, length, false); 7384 + else 7385 + unlock_extent(&BTRFS_I(inode)->io_tree, pos, 7386 + pos + length - 1); 7387 + ret = -ENOTBLK; 7388 + } 7389 + 7390 + if (write) { 7391 + if (dio_data->reserve) 7392 + btrfs_delalloc_release_space(inode, 7393 + dio_data->data_reserved, pos, 7394 + dio_data->reserve, true); 7395 + btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->length); 7396 + extent_changeset_free(dio_data->data_reserved); 7397 + } 7398 + out: 7399 + kfree(dio_data); 7400 + iomap->private = NULL; 7401 + 7402 + return ret; 7403 + } 7404 + 7405 + static void btrfs_dio_private_put(struct btrfs_dio_private *dip) 7406 + { 7407 + /* 7408 + * This implies a barrier so that stores to dio_bio->bi_status before 7409 + * this and loads of dio_bio->bi_status after this are fully ordered. 7410 + */ 7411 + if (!refcount_dec_and_test(&dip->refs)) 7412 + return; 7413 + 7414 + if (bio_op(dip->dio_bio) == REQ_OP_WRITE) { 7415 + __endio_write_update_ordered(dip->inode, dip->logical_offset, 7416 + dip->bytes, 7417 + !dip->dio_bio->bi_status); 7418 + } else { 7419 + unlock_extent(&BTRFS_I(dip->inode)->io_tree, 7420 + dip->logical_offset, 7421 + dip->logical_offset + dip->bytes - 1); 7422 + } 7423 + 7424 + bio_endio(dip->dio_bio); 7425 + kfree(dip); 7426 + } 7427 + 7428 + static blk_status_t submit_dio_repair_bio(struct inode *inode, struct bio *bio, 7429 + int mirror_num, 7430 + unsigned long bio_flags) 7431 + { 7432 + struct btrfs_dio_private *dip = bio->bi_private; 7411 7433 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7412 7434 blk_status_t ret; 7413 7435 7414 7436 BUG_ON(bio_op(bio) == REQ_OP_WRITE); 7415 7437 7416 - ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR); 7438 + ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); 7417 7439 if (ret) 7418 7440 return ret; 7419 7441 7442 + refcount_inc(&dip->refs); 7420 7443 ret = btrfs_map_bio(fs_info, bio, mirror_num); 7421 - 7444 + if (ret) 7445 + refcount_dec(&dip->refs); 7422 7446 return ret; 7423 7447 } 7424 7448 7425 - static int btrfs_check_dio_repairable(struct inode *inode, 7426 - struct bio *failed_bio, 7427 - struct io_failure_record *failrec, 7428 - int failed_mirror) 7449 + static blk_status_t btrfs_check_read_dio_bio(struct inode *inode, 7450 + struct btrfs_io_bio *io_bio, 7451 + const bool uptodate) 7429 7452 { 7430 - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7431 - int num_copies; 7432 - 7433 - num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len); 7434 - if (num_copies == 1) { 7435 - /* 7436 - * we only have a single copy of the data, so don't bother with 7437 - * all the retry and error correction code that follows. no 7438 - * matter what the error is, it is very likely to persist. 7439 - */ 7440 - btrfs_debug(fs_info, 7441 - "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d", 7442 - num_copies, failrec->this_mirror, failed_mirror); 7443 - return 0; 7444 - } 7445 - 7446 - failrec->failed_mirror = failed_mirror; 7447 - failrec->this_mirror++; 7448 - if (failrec->this_mirror == failed_mirror) 7449 - failrec->this_mirror++; 7450 - 7451 - if (failrec->this_mirror > num_copies) { 7452 - btrfs_debug(fs_info, 7453 - "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d", 7454 - num_copies, failrec->this_mirror, failed_mirror); 7455 - return 0; 7456 - } 7457 - 7458 - return 1; 7459 - } 7460 - 7461 - static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio, 7462 - struct page *page, unsigned int pgoff, 7463 - u64 start, u64 end, int failed_mirror, 7464 - bio_end_io_t *repair_endio, void *repair_arg) 7465 - { 7466 - struct io_failure_record *failrec; 7467 - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7453 + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 7454 + const u32 sectorsize = fs_info->sectorsize; 7468 7455 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 7469 - struct bio *bio; 7470 - int isector; 7471 - unsigned int read_mode = 0; 7472 - int segs; 7473 - int ret; 7474 - blk_status_t status; 7475 - struct bio_vec bvec; 7476 - 7477 - BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); 7478 - 7479 - ret = btrfs_get_io_failure_record(inode, start, end, &failrec); 7480 - if (ret) 7481 - return errno_to_blk_status(ret); 7482 - 7483 - ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, 7484 - failed_mirror); 7485 - if (!ret) { 7486 - free_io_failure(failure_tree, io_tree, failrec); 7487 - return BLK_STS_IOERR; 7488 - } 7489 - 7490 - segs = bio_segments(failed_bio); 7491 - bio_get_first_bvec(failed_bio, &bvec); 7492 - if (segs > 1 || 7493 - (bvec.bv_len > btrfs_inode_sectorsize(inode))) 7494 - read_mode |= REQ_FAILFAST_DEV; 7495 - 7496 - isector = start - btrfs_io_bio(failed_bio)->logical; 7497 - isector >>= inode->i_sb->s_blocksize_bits; 7498 - bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, 7499 - pgoff, isector, repair_endio, repair_arg); 7500 - bio->bi_opf = REQ_OP_READ | read_mode; 7501 - 7502 - btrfs_debug(BTRFS_I(inode)->root->fs_info, 7503 - "repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d", 7504 - read_mode, failrec->this_mirror, failrec->in_validation); 7505 - 7506 - status = submit_dio_repair_bio(inode, bio, failrec->this_mirror); 7507 - if (status) { 7508 - free_io_failure(failure_tree, io_tree, failrec); 7509 - bio_put(bio); 7510 - } 7511 - 7512 - return status; 7513 - } 7514 - 7515 - struct btrfs_retry_complete { 7516 - struct completion done; 7517 - struct inode *inode; 7518 - u64 start; 7519 - int uptodate; 7520 - }; 7521 - 7522 - static void btrfs_retry_endio_nocsum(struct bio *bio) 7523 - { 7524 - struct btrfs_retry_complete *done = bio->bi_private; 7525 - struct inode *inode = done->inode; 7526 - struct bio_vec *bvec; 7527 - struct extent_io_tree *io_tree, *failure_tree; 7528 - struct bvec_iter_all iter_all; 7529 - 7530 - if (bio->bi_status) 7531 - goto end; 7532 - 7533 - ASSERT(bio->bi_vcnt == 1); 7534 - io_tree = &BTRFS_I(inode)->io_tree; 7535 - failure_tree = &BTRFS_I(inode)->io_failure_tree; 7536 - ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode)); 7537 - 7538 - done->uptodate = 1; 7539 - ASSERT(!bio_flagged(bio, BIO_CLONED)); 7540 - bio_for_each_segment_all(bvec, bio, iter_all) 7541 - clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree, 7542 - io_tree, done->start, bvec->bv_page, 7543 - btrfs_ino(BTRFS_I(inode)), 0); 7544 - end: 7545 - complete(&done->done); 7546 - bio_put(bio); 7547 - } 7548 - 7549 - static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode, 7550 - struct btrfs_io_bio *io_bio) 7551 - { 7552 - struct btrfs_fs_info *fs_info; 7456 + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7457 + const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); 7553 7458 struct bio_vec bvec; 7554 7459 struct bvec_iter iter; 7555 - struct btrfs_retry_complete done; 7556 - u64 start; 7557 - unsigned int pgoff; 7558 - u32 sectorsize; 7559 - int nr_sectors; 7560 - blk_status_t ret; 7460 + u64 start = io_bio->logical; 7461 + int icsum = 0; 7561 7462 blk_status_t err = BLK_STS_OK; 7562 7463 7563 - fs_info = BTRFS_I(inode)->root->fs_info; 7564 - sectorsize = fs_info->sectorsize; 7464 + __bio_for_each_segment(bvec, &io_bio->bio, iter, io_bio->iter) { 7465 + unsigned int i, nr_sectors, pgoff; 7565 7466 7566 - start = io_bio->logical; 7567 - done.inode = inode; 7568 - io_bio->bio.bi_iter = io_bio->iter; 7569 - 7570 - bio_for_each_segment(bvec, &io_bio->bio, iter) { 7571 7467 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len); 7572 7468 pgoff = bvec.bv_offset; 7573 - 7574 - next_block_or_try_again: 7575 - done.uptodate = 0; 7576 - done.start = start; 7577 - init_completion(&done.done); 7578 - 7579 - ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page, 7580 - pgoff, start, start + sectorsize - 1, 7581 - io_bio->mirror_num, 7582 - btrfs_retry_endio_nocsum, &done); 7583 - if (ret) { 7584 - err = ret; 7585 - goto next; 7586 - } 7587 - 7588 - wait_for_completion_io(&done.done); 7589 - 7590 - if (!done.uptodate) { 7591 - /* We might have another mirror, so try again */ 7592 - goto next_block_or_try_again; 7593 - } 7594 - 7595 - next: 7596 - start += sectorsize; 7597 - 7598 - nr_sectors--; 7599 - if (nr_sectors) { 7600 - pgoff += sectorsize; 7469 + for (i = 0; i < nr_sectors; i++) { 7601 7470 ASSERT(pgoff < PAGE_SIZE); 7602 - goto next_block_or_try_again; 7603 - } 7604 - } 7471 + if (uptodate && 7472 + (!csum || !check_data_csum(inode, io_bio, icsum, 7473 + bvec.bv_page, pgoff, 7474 + start, sectorsize))) { 7475 + clean_io_failure(fs_info, failure_tree, io_tree, 7476 + start, bvec.bv_page, 7477 + btrfs_ino(BTRFS_I(inode)), 7478 + pgoff); 7479 + } else { 7480 + blk_status_t status; 7605 7481 7606 - return err; 7607 - } 7608 - 7609 - static void btrfs_retry_endio(struct bio *bio) 7610 - { 7611 - struct btrfs_retry_complete *done = bio->bi_private; 7612 - struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7613 - struct extent_io_tree *io_tree, *failure_tree; 7614 - struct inode *inode = done->inode; 7615 - struct bio_vec *bvec; 7616 - int uptodate; 7617 - int ret; 7618 - int i = 0; 7619 - struct bvec_iter_all iter_all; 7620 - 7621 - if (bio->bi_status) 7622 - goto end; 7623 - 7624 - uptodate = 1; 7625 - 7626 - ASSERT(bio->bi_vcnt == 1); 7627 - ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode)); 7628 - 7629 - io_tree = &BTRFS_I(inode)->io_tree; 7630 - failure_tree = &BTRFS_I(inode)->io_failure_tree; 7631 - 7632 - ASSERT(!bio_flagged(bio, BIO_CLONED)); 7633 - bio_for_each_segment_all(bvec, bio, iter_all) { 7634 - ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, 7635 - bvec->bv_offset, done->start, 7636 - bvec->bv_len); 7637 - if (!ret) 7638 - clean_io_failure(BTRFS_I(inode)->root->fs_info, 7639 - failure_tree, io_tree, done->start, 7640 - bvec->bv_page, 7641 - btrfs_ino(BTRFS_I(inode)), 7642 - bvec->bv_offset); 7643 - else 7644 - uptodate = 0; 7645 - i++; 7646 - } 7647 - 7648 - done->uptodate = uptodate; 7649 - end: 7650 - complete(&done->done); 7651 - bio_put(bio); 7652 - } 7653 - 7654 - static blk_status_t __btrfs_subio_endio_read(struct inode *inode, 7655 - struct btrfs_io_bio *io_bio, blk_status_t err) 7656 - { 7657 - struct btrfs_fs_info *fs_info; 7658 - struct bio_vec bvec; 7659 - struct bvec_iter iter; 7660 - struct btrfs_retry_complete done; 7661 - u64 start; 7662 - u64 offset = 0; 7663 - u32 sectorsize; 7664 - int nr_sectors; 7665 - unsigned int pgoff; 7666 - int csum_pos; 7667 - bool uptodate = (err == 0); 7668 - int ret; 7669 - blk_status_t status; 7670 - 7671 - fs_info = BTRFS_I(inode)->root->fs_info; 7672 - sectorsize = fs_info->sectorsize; 7673 - 7674 - err = BLK_STS_OK; 7675 - start = io_bio->logical; 7676 - done.inode = inode; 7677 - io_bio->bio.bi_iter = io_bio->iter; 7678 - 7679 - bio_for_each_segment(bvec, &io_bio->bio, iter) { 7680 - nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len); 7681 - 7682 - pgoff = bvec.bv_offset; 7683 - next_block: 7684 - if (uptodate) { 7685 - csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset); 7686 - ret = __readpage_endio_check(inode, io_bio, csum_pos, 7687 - bvec.bv_page, pgoff, start, sectorsize); 7688 - if (likely(!ret)) 7689 - goto next; 7690 - } 7691 - try_again: 7692 - done.uptodate = 0; 7693 - done.start = start; 7694 - init_completion(&done.done); 7695 - 7696 - status = dio_read_error(inode, &io_bio->bio, bvec.bv_page, 7697 - pgoff, start, start + sectorsize - 1, 7698 - io_bio->mirror_num, btrfs_retry_endio, 7699 - &done); 7700 - if (status) { 7701 - err = status; 7702 - goto next; 7703 - } 7704 - 7705 - wait_for_completion_io(&done.done); 7706 - 7707 - if (!done.uptodate) { 7708 - /* We might have another mirror, so try again */ 7709 - goto try_again; 7710 - } 7711 - next: 7712 - offset += sectorsize; 7713 - start += sectorsize; 7714 - 7715 - ASSERT(nr_sectors); 7716 - 7717 - nr_sectors--; 7718 - if (nr_sectors) { 7482 + status = btrfs_submit_read_repair(inode, 7483 + &io_bio->bio, 7484 + start - io_bio->logical, 7485 + bvec.bv_page, pgoff, 7486 + start, 7487 + start + sectorsize - 1, 7488 + io_bio->mirror_num, 7489 + submit_dio_repair_bio); 7490 + if (status) 7491 + err = status; 7492 + } 7493 + start += sectorsize; 7494 + icsum++; 7719 7495 pgoff += sectorsize; 7720 - ASSERT(pgoff < PAGE_SIZE); 7721 - goto next_block; 7722 7496 } 7723 7497 } 7724 - 7725 7498 return err; 7726 - } 7727 - 7728 - static blk_status_t btrfs_subio_endio_read(struct inode *inode, 7729 - struct btrfs_io_bio *io_bio, blk_status_t err) 7730 - { 7731 - bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 7732 - 7733 - if (skip_csum) { 7734 - if (unlikely(err)) 7735 - return __btrfs_correct_data_nocsum(inode, io_bio); 7736 - else 7737 - return BLK_STS_OK; 7738 - } else { 7739 - return __btrfs_subio_endio_read(inode, io_bio, err); 7740 - } 7741 - } 7742 - 7743 - static void btrfs_endio_direct_read(struct bio *bio) 7744 - { 7745 - struct btrfs_dio_private *dip = bio->bi_private; 7746 - struct inode *inode = dip->inode; 7747 - struct bio *dio_bio; 7748 - struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7749 - blk_status_t err = bio->bi_status; 7750 - 7751 - if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) 7752 - err = btrfs_subio_endio_read(inode, io_bio, err); 7753 - 7754 - unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 7755 - dip->logical_offset + dip->bytes - 1); 7756 - dio_bio = dip->dio_bio; 7757 - 7758 - kfree(dip); 7759 - 7760 - dio_bio->bi_status = err; 7761 - dio_end_io(dio_bio); 7762 - btrfs_io_bio_free_csum(io_bio); 7763 - bio_put(bio); 7764 7499 } 7765 7500 7766 7501 static void __endio_write_update_ordered(struct inode *inode, ··· 7588 7759 } 7589 7760 } 7590 7761 7591 - static void btrfs_endio_direct_write(struct bio *bio) 7592 - { 7593 - struct btrfs_dio_private *dip = bio->bi_private; 7594 - struct bio *dio_bio = dip->dio_bio; 7595 - 7596 - __endio_write_update_ordered(dip->inode, dip->logical_offset, 7597 - dip->bytes, !bio->bi_status); 7598 - 7599 - kfree(dip); 7600 - 7601 - dio_bio->bi_status = bio->bi_status; 7602 - dio_end_io(dio_bio); 7603 - bio_put(bio); 7604 - } 7605 - 7606 7762 static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data, 7607 7763 struct bio *bio, u64 offset) 7608 7764 { ··· 7611 7797 (unsigned long long)bio->bi_iter.bi_sector, 7612 7798 bio->bi_iter.bi_size, err); 7613 7799 7614 - if (dip->subio_endio) 7615 - err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err); 7616 - 7617 - if (err) { 7618 - /* 7619 - * We want to perceive the errors flag being set before 7620 - * decrementing the reference count. We don't need a barrier 7621 - * since atomic operations with a return value are fully 7622 - * ordered as per atomic_t.txt 7623 - */ 7624 - dip->errors = 1; 7800 + if (bio_op(bio) == REQ_OP_READ) { 7801 + err = btrfs_check_read_dio_bio(dip->inode, btrfs_io_bio(bio), 7802 + !err); 7625 7803 } 7626 7804 7627 - /* if there are more bios still pending for this dio, just exit */ 7628 - if (!atomic_dec_and_test(&dip->pending_bios)) 7629 - goto out; 7805 + if (err) 7806 + dip->dio_bio->bi_status = err; 7630 7807 7631 - if (dip->errors) { 7632 - bio_io_error(dip->orig_bio); 7633 - } else { 7634 - dip->dio_bio->bi_status = BLK_STS_OK; 7635 - bio_endio(dip->orig_bio); 7636 - } 7637 - out: 7638 7808 bio_put(bio); 7639 - } 7640 - 7641 - static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode, 7642 - struct btrfs_dio_private *dip, 7643 - struct bio *bio, 7644 - u64 file_offset) 7645 - { 7646 - struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7647 - struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); 7648 - u16 csum_size; 7649 - blk_status_t ret; 7650 - 7651 - /* 7652 - * We load all the csum data we need when we submit 7653 - * the first bio to reduce the csum tree search and 7654 - * contention. 7655 - */ 7656 - if (dip->logical_offset == file_offset) { 7657 - ret = btrfs_lookup_bio_sums(inode, dip->orig_bio, file_offset, 7658 - NULL); 7659 - if (ret) 7660 - return ret; 7661 - } 7662 - 7663 - if (bio == dip->orig_bio) 7664 - return 0; 7665 - 7666 - file_offset -= dip->logical_offset; 7667 - file_offset >>= inode->i_sb->s_blocksize_bits; 7668 - csum_size = btrfs_super_csum_size(btrfs_sb(inode->i_sb)->super_copy); 7669 - io_bio->csum = orig_io_bio->csum + csum_size * file_offset; 7670 - 7671 - return 0; 7809 + btrfs_dio_private_put(dip); 7672 7810 } 7673 7811 7674 7812 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio, ··· 7658 7892 if (ret) 7659 7893 goto err; 7660 7894 } else { 7661 - ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio, 7662 - file_offset); 7663 - if (ret) 7664 - goto err; 7895 + u64 csum_offset; 7896 + 7897 + csum_offset = file_offset - dip->logical_offset; 7898 + csum_offset >>= inode->i_sb->s_blocksize_bits; 7899 + csum_offset *= btrfs_super_csum_size(fs_info->super_copy); 7900 + btrfs_io_bio(bio)->csum = dip->csums + csum_offset; 7665 7901 } 7666 7902 map: 7667 7903 ret = btrfs_map_bio(fs_info, bio, 0); ··· 7671 7903 return ret; 7672 7904 } 7673 7905 7674 - static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) 7906 + /* 7907 + * If this succeeds, the btrfs_dio_private is responsible for cleaning up locked 7908 + * or ordered extents whether or not we submit any bios. 7909 + */ 7910 + static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, 7911 + struct inode *inode, 7912 + loff_t file_offset) 7675 7913 { 7676 - struct inode *inode = dip->inode; 7914 + const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); 7915 + const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); 7916 + size_t dip_size; 7917 + struct btrfs_dio_private *dip; 7918 + 7919 + dip_size = sizeof(*dip); 7920 + if (!write && csum) { 7921 + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7922 + const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 7923 + size_t nblocks; 7924 + 7925 + nblocks = dio_bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 7926 + dip_size += csum_size * nblocks; 7927 + } 7928 + 7929 + dip = kzalloc(dip_size, GFP_NOFS); 7930 + if (!dip) 7931 + return NULL; 7932 + 7933 + dip->inode = inode; 7934 + dip->logical_offset = file_offset; 7935 + dip->bytes = dio_bio->bi_iter.bi_size; 7936 + dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; 7937 + dip->dio_bio = dio_bio; 7938 + refcount_set(&dip->refs, 1); 7939 + return dip; 7940 + } 7941 + 7942 + static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap, 7943 + struct bio *dio_bio, loff_t file_offset) 7944 + { 7945 + const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); 7946 + const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); 7677 7947 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7948 + const bool raid56 = (btrfs_data_alloc_profile(fs_info) & 7949 + BTRFS_BLOCK_GROUP_RAID56_MASK); 7950 + struct btrfs_dio_private *dip; 7678 7951 struct bio *bio; 7679 - struct bio *orig_bio = dip->orig_bio; 7680 - u64 start_sector = orig_bio->bi_iter.bi_sector; 7681 - u64 file_offset = dip->logical_offset; 7952 + u64 start_sector; 7682 7953 int async_submit = 0; 7683 7954 u64 submit_len; 7684 7955 int clone_offset = 0; ··· 7725 7918 int ret; 7726 7919 blk_status_t status; 7727 7920 struct btrfs_io_geometry geom; 7921 + struct btrfs_dio_data *dio_data = iomap->private; 7728 7922 7729 - submit_len = orig_bio->bi_iter.bi_size; 7730 - ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio), 7731 - start_sector << 9, submit_len, &geom); 7732 - if (ret) 7733 - return -EIO; 7734 - 7735 - if (geom.len >= submit_len) { 7736 - bio = orig_bio; 7737 - dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED; 7738 - goto submit; 7923 + dip = btrfs_create_dio_private(dio_bio, inode, file_offset); 7924 + if (!dip) { 7925 + if (!write) { 7926 + unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, 7927 + file_offset + dio_bio->bi_iter.bi_size - 1); 7928 + } 7929 + dio_bio->bi_status = BLK_STS_RESOURCE; 7930 + bio_endio(dio_bio); 7931 + return BLK_QC_T_NONE; 7739 7932 } 7740 7933 7741 - /* async crcs make it difficult to collect full stripe writes. */ 7742 - if (btrfs_data_alloc_profile(fs_info) & BTRFS_BLOCK_GROUP_RAID56_MASK) 7743 - async_submit = 0; 7744 - else 7745 - async_submit = 1; 7934 + if (!write && csum) { 7935 + /* 7936 + * Load the csums up front to reduce csum tree searches and 7937 + * contention when submitting bios. 7938 + */ 7939 + status = btrfs_lookup_bio_sums(inode, dio_bio, file_offset, 7940 + dip->csums); 7941 + if (status != BLK_STS_OK) 7942 + goto out_err; 7943 + } 7746 7944 7747 - /* bio split */ 7748 - ASSERT(geom.len <= INT_MAX); 7749 - atomic_inc(&dip->pending_bios); 7945 + start_sector = dio_bio->bi_iter.bi_sector; 7946 + submit_len = dio_bio->bi_iter.bi_size; 7947 + 7750 7948 do { 7949 + ret = btrfs_get_io_geometry(fs_info, btrfs_op(dio_bio), 7950 + start_sector << 9, submit_len, 7951 + &geom); 7952 + if (ret) { 7953 + status = errno_to_blk_status(ret); 7954 + goto out_err; 7955 + } 7956 + ASSERT(geom.len <= INT_MAX); 7957 + 7751 7958 clone_len = min_t(int, submit_len, geom.len); 7752 7959 7753 7960 /* 7754 7961 * This will never fail as it's passing GPF_NOFS and 7755 7962 * the allocation is backed by btrfs_bioset. 7756 7963 */ 7757 - bio = btrfs_bio_clone_partial(orig_bio, clone_offset, 7758 - clone_len); 7964 + bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len); 7759 7965 bio->bi_private = dip; 7760 7966 bio->bi_end_io = btrfs_end_dio_bio; 7761 7967 btrfs_io_bio(bio)->logical = file_offset; 7762 7968 7763 7969 ASSERT(submit_len >= clone_len); 7764 7970 submit_len -= clone_len; 7765 - if (submit_len == 0) 7766 - break; 7767 7971 7768 7972 /* 7769 7973 * Increase the count before we submit the bio so we know 7770 7974 * the end IO handler won't happen before we increase the 7771 7975 * count. Otherwise, the dip might get freed before we're 7772 7976 * done setting it up. 7977 + * 7978 + * We transfer the initial reference to the last bio, so we 7979 + * don't need to increment the reference count for the last one. 7773 7980 */ 7774 - atomic_inc(&dip->pending_bios); 7981 + if (submit_len > 0) { 7982 + refcount_inc(&dip->refs); 7983 + /* 7984 + * If we are submitting more than one bio, submit them 7985 + * all asynchronously. The exception is RAID 5 or 6, as 7986 + * asynchronous checksums make it difficult to collect 7987 + * full stripe writes. 7988 + */ 7989 + if (!raid56) 7990 + async_submit = 1; 7991 + } 7775 7992 7776 7993 status = btrfs_submit_dio_bio(bio, inode, file_offset, 7777 7994 async_submit); 7778 7995 if (status) { 7779 7996 bio_put(bio); 7780 - atomic_dec(&dip->pending_bios); 7997 + if (submit_len > 0) 7998 + refcount_dec(&dip->refs); 7781 7999 goto out_err; 7782 8000 } 7783 8001 8002 + dio_data->submitted += clone_len; 7784 8003 clone_offset += clone_len; 7785 8004 start_sector += clone_len >> 9; 7786 8005 file_offset += clone_len; 7787 - 7788 - ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio), 7789 - start_sector << 9, submit_len, &geom); 7790 - if (ret) 7791 - goto out_err; 7792 8006 } while (submit_len > 0); 8007 + return BLK_QC_T_NONE; 7793 8008 7794 - submit: 7795 - status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit); 7796 - if (!status) 7797 - return 0; 7798 - 7799 - bio_put(bio); 7800 8009 out_err: 7801 - dip->errors = 1; 7802 - /* 7803 - * Before atomic variable goto zero, we must make sure dip->errors is 7804 - * perceived to be set. This ordering is ensured by the fact that an 7805 - * atomic operations with a return value are fully ordered as per 7806 - * atomic_t.txt 7807 - */ 7808 - if (atomic_dec_and_test(&dip->pending_bios)) 7809 - bio_io_error(dip->orig_bio); 7810 - 7811 - /* bio_end_io() will handle error, so we needn't return it */ 7812 - return 0; 8010 + dip->dio_bio->bi_status = status; 8011 + btrfs_dio_private_put(dip); 8012 + return BLK_QC_T_NONE; 7813 8013 } 7814 8014 7815 - static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, 7816 - loff_t file_offset) 7817 - { 7818 - struct btrfs_dio_private *dip = NULL; 7819 - struct bio *bio = NULL; 7820 - struct btrfs_io_bio *io_bio; 7821 - bool write = (bio_op(dio_bio) == REQ_OP_WRITE); 7822 - int ret = 0; 8015 + const struct iomap_ops btrfs_dio_iomap_ops = { 8016 + .iomap_begin = btrfs_dio_iomap_begin, 8017 + .iomap_end = btrfs_dio_iomap_end, 8018 + }; 7823 8019 7824 - bio = btrfs_bio_clone(dio_bio); 7825 - 7826 - dip = kzalloc(sizeof(*dip), GFP_NOFS); 7827 - if (!dip) { 7828 - ret = -ENOMEM; 7829 - goto free_ordered; 7830 - } 7831 - 7832 - dip->private = dio_bio->bi_private; 7833 - dip->inode = inode; 7834 - dip->logical_offset = file_offset; 7835 - dip->bytes = dio_bio->bi_iter.bi_size; 7836 - dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; 7837 - bio->bi_private = dip; 7838 - dip->orig_bio = bio; 7839 - dip->dio_bio = dio_bio; 7840 - atomic_set(&dip->pending_bios, 0); 7841 - io_bio = btrfs_io_bio(bio); 7842 - io_bio->logical = file_offset; 7843 - 7844 - if (write) { 7845 - bio->bi_end_io = btrfs_endio_direct_write; 7846 - } else { 7847 - bio->bi_end_io = btrfs_endio_direct_read; 7848 - dip->subio_endio = btrfs_subio_endio_read; 7849 - } 7850 - 7851 - /* 7852 - * Reset the range for unsubmitted ordered extents (to a 0 length range) 7853 - * even if we fail to submit a bio, because in such case we do the 7854 - * corresponding error handling below and it must not be done a second 7855 - * time by btrfs_direct_IO(). 7856 - */ 7857 - if (write) { 7858 - struct btrfs_dio_data *dio_data = current->journal_info; 7859 - 7860 - dio_data->unsubmitted_oe_range_end = dip->logical_offset + 7861 - dip->bytes; 7862 - dio_data->unsubmitted_oe_range_start = 7863 - dio_data->unsubmitted_oe_range_end; 7864 - } 7865 - 7866 - ret = btrfs_submit_direct_hook(dip); 7867 - if (!ret) 7868 - return; 7869 - 7870 - btrfs_io_bio_free_csum(io_bio); 7871 - 7872 - free_ordered: 7873 - /* 7874 - * If we arrived here it means either we failed to submit the dip 7875 - * or we either failed to clone the dio_bio or failed to allocate the 7876 - * dip. If we cloned the dio_bio and allocated the dip, we can just 7877 - * call bio_endio against our io_bio so that we get proper resource 7878 - * cleanup if we fail to submit the dip, otherwise, we must do the 7879 - * same as btrfs_endio_direct_[write|read] because we can't call these 7880 - * callbacks - they require an allocated dip and a clone of dio_bio. 7881 - */ 7882 - if (bio && dip) { 7883 - bio_io_error(bio); 7884 - /* 7885 - * The end io callbacks free our dip, do the final put on bio 7886 - * and all the cleanup and final put for dio_bio (through 7887 - * dio_end_io()). 7888 - */ 7889 - dip = NULL; 7890 - bio = NULL; 7891 - } else { 7892 - if (write) 7893 - __endio_write_update_ordered(inode, 7894 - file_offset, 7895 - dio_bio->bi_iter.bi_size, 7896 - false); 7897 - else 7898 - unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, 7899 - file_offset + dio_bio->bi_iter.bi_size - 1); 7900 - 7901 - dio_bio->bi_status = BLK_STS_IOERR; 7902 - /* 7903 - * Releases and cleans up our dio_bio, no need to bio_put() 7904 - * nor bio_endio()/bio_io_error() against dio_bio. 7905 - */ 7906 - dio_end_io(dio_bio); 7907 - } 7908 - if (bio) 7909 - bio_put(bio); 7910 - kfree(dip); 7911 - } 7912 - 7913 - static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, 7914 - const struct iov_iter *iter, loff_t offset) 7915 - { 7916 - int seg; 7917 - int i; 7918 - unsigned int blocksize_mask = fs_info->sectorsize - 1; 7919 - ssize_t retval = -EINVAL; 7920 - 7921 - if (offset & blocksize_mask) 7922 - goto out; 7923 - 7924 - if (iov_iter_alignment(iter) & blocksize_mask) 7925 - goto out; 7926 - 7927 - /* If this is a write we don't need to check anymore */ 7928 - if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter)) 7929 - return 0; 7930 - /* 7931 - * Check to make sure we don't have duplicate iov_base's in this 7932 - * iovec, if so return EINVAL, otherwise we'll get csum errors 7933 - * when reading back. 7934 - */ 7935 - for (seg = 0; seg < iter->nr_segs; seg++) { 7936 - for (i = seg + 1; i < iter->nr_segs; i++) { 7937 - if (iter->iov[seg].iov_base == iter->iov[i].iov_base) 7938 - goto out; 7939 - } 7940 - } 7941 - retval = 0; 7942 - out: 7943 - return retval; 7944 - } 7945 - 7946 - static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 7947 - { 7948 - struct file *file = iocb->ki_filp; 7949 - struct inode *inode = file->f_mapping->host; 7950 - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7951 - struct btrfs_dio_data dio_data = { 0 }; 7952 - struct extent_changeset *data_reserved = NULL; 7953 - loff_t offset = iocb->ki_pos; 7954 - size_t count = 0; 7955 - int flags = 0; 7956 - bool wakeup = true; 7957 - bool relock = false; 7958 - ssize_t ret; 7959 - 7960 - if (check_direct_IO(fs_info, iter, offset)) 7961 - return 0; 7962 - 7963 - inode_dio_begin(inode); 7964 - 7965 - /* 7966 - * The generic stuff only does filemap_write_and_wait_range, which 7967 - * isn't enough if we've written compressed pages to this area, so 7968 - * we need to flush the dirty pages again to make absolutely sure 7969 - * that any outstanding dirty pages are on disk. 7970 - */ 7971 - count = iov_iter_count(iter); 7972 - if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 7973 - &BTRFS_I(inode)->runtime_flags)) 7974 - filemap_fdatawrite_range(inode->i_mapping, offset, 7975 - offset + count - 1); 7976 - 7977 - if (iov_iter_rw(iter) == WRITE) { 7978 - /* 7979 - * If the write DIO is beyond the EOF, we need update 7980 - * the isize, but it is protected by i_mutex. So we can 7981 - * not unlock the i_mutex at this case. 7982 - */ 7983 - if (offset + count <= inode->i_size) { 7984 - dio_data.overwrite = 1; 7985 - inode_unlock(inode); 7986 - relock = true; 7987 - } else if (iocb->ki_flags & IOCB_NOWAIT) { 7988 - ret = -EAGAIN; 7989 - goto out; 7990 - } 7991 - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, 7992 - offset, count); 7993 - if (ret) 7994 - goto out; 7995 - 7996 - /* 7997 - * We need to know how many extents we reserved so that we can 7998 - * do the accounting properly if we go over the number we 7999 - * originally calculated. Abuse current->journal_info for this. 8000 - */ 8001 - dio_data.reserve = round_up(count, 8002 - fs_info->sectorsize); 8003 - dio_data.unsubmitted_oe_range_start = (u64)offset; 8004 - dio_data.unsubmitted_oe_range_end = (u64)offset; 8005 - current->journal_info = &dio_data; 8006 - down_read(&BTRFS_I(inode)->dio_sem); 8007 - } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8008 - &BTRFS_I(inode)->runtime_flags)) { 8009 - inode_dio_end(inode); 8010 - flags = DIO_LOCKING | DIO_SKIP_HOLES; 8011 - wakeup = false; 8012 - } 8013 - 8014 - ret = __blockdev_direct_IO(iocb, inode, 8015 - fs_info->fs_devices->latest_bdev, 8016 - iter, btrfs_get_blocks_direct, NULL, 8017 - btrfs_submit_direct, flags); 8018 - if (iov_iter_rw(iter) == WRITE) { 8019 - up_read(&BTRFS_I(inode)->dio_sem); 8020 - current->journal_info = NULL; 8021 - if (ret < 0 && ret != -EIOCBQUEUED) { 8022 - if (dio_data.reserve) 8023 - btrfs_delalloc_release_space(inode, data_reserved, 8024 - offset, dio_data.reserve, true); 8025 - /* 8026 - * On error we might have left some ordered extents 8027 - * without submitting corresponding bios for them, so 8028 - * cleanup them up to avoid other tasks getting them 8029 - * and waiting for them to complete forever. 8030 - */ 8031 - if (dio_data.unsubmitted_oe_range_start < 8032 - dio_data.unsubmitted_oe_range_end) 8033 - __endio_write_update_ordered(inode, 8034 - dio_data.unsubmitted_oe_range_start, 8035 - dio_data.unsubmitted_oe_range_end - 8036 - dio_data.unsubmitted_oe_range_start, 8037 - false); 8038 - } else if (ret >= 0 && (size_t)ret < count) 8039 - btrfs_delalloc_release_space(inode, data_reserved, 8040 - offset, count - (size_t)ret, true); 8041 - btrfs_delalloc_release_extents(BTRFS_I(inode), count); 8042 - } 8043 - out: 8044 - if (wakeup) 8045 - inode_dio_end(inode); 8046 - if (relock) 8047 - inode_lock(inode); 8048 - 8049 - extent_changeset_free(data_reserved); 8050 - return ret; 8051 - } 8020 + const struct iomap_dio_ops btrfs_dops = { 8021 + .submit_io = btrfs_submit_direct, 8022 + }; 8052 8023 8053 8024 #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) 8054 8025 ··· 10124 10539 .writepage = btrfs_writepage, 10125 10540 .writepages = btrfs_writepages, 10126 10541 .readahead = btrfs_readahead, 10127 - .direct_IO = btrfs_direct_IO, 10542 + .direct_IO = noop_direct_IO, 10128 10543 .invalidatepage = btrfs_invalidatepage, 10129 10544 .releasepage = btrfs_releasepage, 10130 10545 #ifdef CONFIG_MIGRATION
+49 -53
fs/btrfs/ioctl.c
··· 660 660 goto fail; 661 661 662 662 key.offset = (u64)-1; 663 - new_root = btrfs_get_fs_root(fs_info, &key, true); 663 + new_root = btrfs_get_fs_root(fs_info, objectid, true); 664 664 if (IS_ERR(new_root)) { 665 665 ret = PTR_ERR(new_root); 666 666 btrfs_abort_transaction(trans, ret); ··· 748 748 struct btrfs_pending_snapshot *pending_snapshot; 749 749 struct btrfs_trans_handle *trans; 750 750 int ret; 751 - bool snapshot_force_cow = false; 752 751 753 - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 752 + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 754 753 return -EINVAL; 755 754 756 755 if (atomic_read(&root->nr_swapfiles)) { ··· 770 771 goto free_pending; 771 772 } 772 773 773 - /* 774 - * Force new buffered writes to reserve space even when NOCOW is 775 - * possible. This is to avoid later writeback (running dealloc) to 776 - * fallback to COW mode and unexpectedly fail with ENOSPC. 777 - */ 778 - btrfs_drew_read_lock(&root->snapshot_lock); 779 - 780 - ret = btrfs_start_delalloc_snapshot(root); 781 - if (ret) 782 - goto dec_and_free; 783 - 784 - /* 785 - * All previous writes have started writeback in NOCOW mode, so now 786 - * we force future writes to fallback to COW mode during snapshot 787 - * creation. 788 - */ 789 - atomic_inc(&root->snapshot_force_cow); 790 - snapshot_force_cow = true; 791 - 792 - btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); 793 - 794 774 btrfs_init_block_rsv(&pending_snapshot->block_rsv, 795 775 BTRFS_BLOCK_RSV_TEMP); 796 776 /* ··· 784 806 &pending_snapshot->block_rsv, 8, 785 807 false); 786 808 if (ret) 787 - goto dec_and_free; 809 + goto free_pending; 788 810 789 811 pending_snapshot->dentry = dentry; 790 812 pending_snapshot->root = root; ··· 826 848 fail: 827 849 btrfs_put_root(pending_snapshot->snap); 828 850 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); 829 - dec_and_free: 830 - if (snapshot_force_cow) 831 - atomic_dec(&root->snapshot_force_cow); 832 - btrfs_drew_read_unlock(&root->snapshot_lock); 833 - 834 851 free_pending: 835 852 kfree(pending_snapshot->root_item); 836 853 btrfs_free_path(pending_snapshot->path); ··· 954 981 out_unlock: 955 982 inode_unlock(dir); 956 983 return error; 984 + } 985 + 986 + static noinline int btrfs_mksnapshot(const struct path *parent, 987 + const char *name, int namelen, 988 + struct btrfs_root *root, 989 + bool readonly, 990 + struct btrfs_qgroup_inherit *inherit) 991 + { 992 + int ret; 993 + bool snapshot_force_cow = false; 994 + 995 + /* 996 + * Force new buffered writes to reserve space even when NOCOW is 997 + * possible. This is to avoid later writeback (running dealloc) to 998 + * fallback to COW mode and unexpectedly fail with ENOSPC. 999 + */ 1000 + btrfs_drew_read_lock(&root->snapshot_lock); 1001 + 1002 + ret = btrfs_start_delalloc_snapshot(root); 1003 + if (ret) 1004 + goto out; 1005 + 1006 + /* 1007 + * All previous writes have started writeback in NOCOW mode, so now 1008 + * we force future writes to fallback to COW mode during snapshot 1009 + * creation. 1010 + */ 1011 + atomic_inc(&root->snapshot_force_cow); 1012 + snapshot_force_cow = true; 1013 + 1014 + btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); 1015 + 1016 + ret = btrfs_mksubvol(parent, name, namelen, 1017 + root, readonly, inherit); 1018 + out: 1019 + if (snapshot_force_cow) 1020 + atomic_dec(&root->snapshot_force_cow); 1021 + btrfs_drew_read_unlock(&root->snapshot_lock); 1022 + return ret; 957 1023 } 958 1024 959 1025 /* ··· 1774 1762 */ 1775 1763 ret = -EPERM; 1776 1764 } else { 1777 - ret = btrfs_mksubvol(&file->f_path, name, namelen, 1765 + ret = btrfs_mksnapshot(&file->f_path, name, namelen, 1778 1766 BTRFS_I(src_inode)->root, 1779 1767 readonly, inherit); 1780 1768 } ··· 2139 2127 /* search the root of the inode that was passed */ 2140 2128 root = btrfs_grab_root(BTRFS_I(inode)->root); 2141 2129 } else { 2142 - key.objectid = sk->tree_id; 2143 - key.type = BTRFS_ROOT_ITEM_KEY; 2144 - key.offset = (u64)-1; 2145 - root = btrfs_get_fs_root(info, &key, true); 2130 + root = btrfs_get_fs_root(info, sk->tree_id, true); 2146 2131 if (IS_ERR(root)) { 2147 2132 btrfs_free_path(path); 2148 2133 return PTR_ERR(root); ··· 2272 2263 2273 2264 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1]; 2274 2265 2275 - key.objectid = tree_id; 2276 - key.type = BTRFS_ROOT_ITEM_KEY; 2277 - key.offset = (u64)-1; 2278 - root = btrfs_get_fs_root(info, &key, true); 2266 + root = btrfs_get_fs_root(info, tree_id, true); 2279 2267 if (IS_ERR(root)) { 2280 2268 ret = PTR_ERR(root); 2281 2269 root = NULL; ··· 2365 2359 if (dirid != upper_limit.objectid) { 2366 2360 ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1]; 2367 2361 2368 - key.objectid = treeid; 2369 - key.type = BTRFS_ROOT_ITEM_KEY; 2370 - key.offset = (u64)-1; 2371 - root = btrfs_get_fs_root(fs_info, &key, true); 2362 + root = btrfs_get_fs_root(fs_info, treeid, true); 2372 2363 if (IS_ERR(root)) { 2373 2364 ret = PTR_ERR(root); 2374 2365 goto out; ··· 2424 2421 goto out_put; 2425 2422 } 2426 2423 2427 - temp_inode = btrfs_iget(sb, &key2, root); 2424 + temp_inode = btrfs_iget(sb, key2.objectid, root); 2428 2425 if (IS_ERR(temp_inode)) { 2429 2426 ret = PTR_ERR(temp_inode); 2430 2427 goto out_put; ··· 2611 2608 2612 2609 /* Get root_item of inode's subvolume */ 2613 2610 key.objectid = BTRFS_I(inode)->root->root_key.objectid; 2614 - key.type = BTRFS_ROOT_ITEM_KEY; 2615 - key.offset = (u64)-1; 2616 - root = btrfs_get_fs_root(fs_info, &key, true); 2611 + root = btrfs_get_fs_root(fs_info, key.objectid, true); 2617 2612 if (IS_ERR(root)) { 2618 2613 ret = PTR_ERR(root); 2619 2614 goto out_free; ··· 3279 3278 struct btrfs_dir_item *di; 3280 3279 struct btrfs_trans_handle *trans; 3281 3280 struct btrfs_path *path = NULL; 3282 - struct btrfs_key location; 3283 3281 struct btrfs_disk_key disk_key; 3284 3282 u64 objectid = 0; 3285 3283 u64 dir_id; ··· 3299 3299 if (!objectid) 3300 3300 objectid = BTRFS_FS_TREE_OBJECTID; 3301 3301 3302 - location.objectid = objectid; 3303 - location.type = BTRFS_ROOT_ITEM_KEY; 3304 - location.offset = (u64)-1; 3305 - 3306 - new_root = btrfs_get_fs_root(fs_info, &location, true); 3302 + new_root = btrfs_get_fs_root(fs_info, objectid, true); 3307 3303 if (IS_ERR(new_root)) { 3308 3304 ret = PTR_ERR(new_root); 3309 3305 goto out;
+1
fs/btrfs/locking.c
··· 410 410 * The rwlock is held for write upon exit. 411 411 */ 412 412 void btrfs_tree_lock(struct extent_buffer *eb) 413 + __acquires(&eb->lock) 413 414 { 414 415 u64 start_ns = 0; 415 416
+54
fs/btrfs/misc.h
··· 6 6 #include <linux/sched.h> 7 7 #include <linux/wait.h> 8 8 #include <asm/div64.h> 9 + #include <linux/rbtree.h> 9 10 10 11 #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len)) 11 12 ··· 57 56 static inline bool has_single_bit_set(u64 n) 58 57 { 59 58 return is_power_of_two_u64(n); 59 + } 60 + 61 + /* 62 + * Simple bytenr based rb_tree relate structures 63 + * 64 + * Any structure wants to use bytenr as single search index should have their 65 + * structure start with these members. 66 + */ 67 + struct rb_simple_node { 68 + struct rb_node rb_node; 69 + u64 bytenr; 70 + }; 71 + 72 + static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr) 73 + { 74 + struct rb_node *node = root->rb_node; 75 + struct rb_simple_node *entry; 76 + 77 + while (node) { 78 + entry = rb_entry(node, struct rb_simple_node, rb_node); 79 + 80 + if (bytenr < entry->bytenr) 81 + node = node->rb_left; 82 + else if (bytenr > entry->bytenr) 83 + node = node->rb_right; 84 + else 85 + return node; 86 + } 87 + return NULL; 88 + } 89 + 90 + static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr, 91 + struct rb_node *node) 92 + { 93 + struct rb_node **p = &root->rb_node; 94 + struct rb_node *parent = NULL; 95 + struct rb_simple_node *entry; 96 + 97 + while (*p) { 98 + parent = *p; 99 + entry = rb_entry(parent, struct rb_simple_node, rb_node); 100 + 101 + if (bytenr < entry->bytenr) 102 + p = &(*p)->rb_left; 103 + else if (bytenr > entry->bytenr) 104 + p = &(*p)->rb_right; 105 + else 106 + return parent; 107 + } 108 + 109 + rb_link_node(node, parent, p); 110 + rb_insert_color(node, root); 111 + return NULL; 60 112 } 61 113 62 114 #endif
+2 -7
fs/btrfs/props.c
··· 408 408 struct btrfs_root *parent_root) 409 409 { 410 410 struct super_block *sb = root->fs_info->sb; 411 - struct btrfs_key key; 412 411 struct inode *parent_inode, *child_inode; 413 412 int ret; 414 413 415 - key.objectid = BTRFS_FIRST_FREE_OBJECTID; 416 - key.type = BTRFS_INODE_ITEM_KEY; 417 - key.offset = 0; 418 - 419 - parent_inode = btrfs_iget(sb, &key, parent_root); 414 + parent_inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, parent_root); 420 415 if (IS_ERR(parent_inode)) 421 416 return PTR_ERR(parent_inode); 422 417 423 - child_inode = btrfs_iget(sb, &key, root); 418 + child_inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, root); 424 419 if (IS_ERR(child_inode)) { 425 420 iput(parent_inode); 426 421 return PTR_ERR(child_inode);
+14
fs/btrfs/qgroup.c
··· 2622 2622 struct btrfs_root *quota_root; 2623 2623 struct btrfs_qgroup *srcgroup; 2624 2624 struct btrfs_qgroup *dstgroup; 2625 + bool need_rescan = false; 2625 2626 u32 level_size = 0; 2626 2627 u64 nums; 2627 2628 ··· 2766 2765 goto unlock; 2767 2766 } 2768 2767 ++i_qgroups; 2768 + 2769 + /* 2770 + * If we're doing a snapshot, and adding the snapshot to a new 2771 + * qgroup, the numbers are guaranteed to be incorrect. 2772 + */ 2773 + if (srcid) 2774 + need_rescan = true; 2769 2775 } 2770 2776 2771 2777 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { ··· 2792 2784 2793 2785 dst->rfer = src->rfer - level_size; 2794 2786 dst->rfer_cmpr = src->rfer_cmpr - level_size; 2787 + 2788 + /* Manually tweaking numbers certainly needs a rescan */ 2789 + need_rescan = true; 2795 2790 } 2796 2791 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { 2797 2792 struct btrfs_qgroup *src; ··· 2813 2802 2814 2803 dst->excl = src->excl + level_size; 2815 2804 dst->excl_cmpr = src->excl_cmpr + level_size; 2805 + need_rescan = true; 2816 2806 } 2817 2807 2818 2808 unlock: ··· 2821 2809 out: 2822 2810 if (!committing) 2823 2811 mutex_unlock(&fs_info->qgroup_ioctl_lock); 2812 + if (need_rescan) 2813 + fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2824 2814 return ret; 2825 2815 } 2826 2816
+295 -1038
fs/btrfs/relocation.c
··· 24 24 #include "delalloc-space.h" 25 25 #include "block-group.h" 26 26 #include "backref.h" 27 + #include "misc.h" 27 28 28 29 /* 29 30 * Relocation overview ··· 73 72 * The entry point of relocation is relocate_block_group() function. 74 73 */ 75 74 76 - /* 77 - * backref_node, mapping_node and tree_block start with this 78 - */ 79 - struct tree_entry { 80 - struct rb_node rb_node; 81 - u64 bytenr; 82 - }; 83 - 84 - /* 85 - * present a tree block in the backref cache 86 - */ 87 - struct backref_node { 88 - struct rb_node rb_node; 89 - u64 bytenr; 90 - 91 - u64 new_bytenr; 92 - /* objectid of tree block owner, can be not uptodate */ 93 - u64 owner; 94 - /* link to pending, changed or detached list */ 95 - struct list_head list; 96 - /* list of upper level blocks reference this block */ 97 - struct list_head upper; 98 - /* list of child blocks in the cache */ 99 - struct list_head lower; 100 - /* NULL if this node is not tree root */ 101 - struct btrfs_root *root; 102 - /* extent buffer got by COW the block */ 103 - struct extent_buffer *eb; 104 - /* level of tree block */ 105 - unsigned int level:8; 106 - /* is the block in non-reference counted tree */ 107 - unsigned int cowonly:1; 108 - /* 1 if no child node in the cache */ 109 - unsigned int lowest:1; 110 - /* is the extent buffer locked */ 111 - unsigned int locked:1; 112 - /* has the block been processed */ 113 - unsigned int processed:1; 114 - /* have backrefs of this block been checked */ 115 - unsigned int checked:1; 116 - /* 117 - * 1 if corresponding block has been cowed but some upper 118 - * level block pointers may not point to the new location 119 - */ 120 - unsigned int pending:1; 121 - /* 122 - * 1 if the backref node isn't connected to any other 123 - * backref node. 124 - */ 125 - unsigned int detached:1; 126 - }; 127 - 128 - /* 129 - * present a block pointer in the backref cache 130 - */ 131 - struct backref_edge { 132 - struct list_head list[2]; 133 - struct backref_node *node[2]; 134 - }; 135 - 136 - #define LOWER 0 137 - #define UPPER 1 138 75 #define RELOCATION_RESERVED_NODES 256 139 - 140 - struct backref_cache { 141 - /* red black tree of all backref nodes in the cache */ 142 - struct rb_root rb_root; 143 - /* for passing backref nodes to btrfs_reloc_cow_block */ 144 - struct backref_node *path[BTRFS_MAX_LEVEL]; 145 - /* 146 - * list of blocks that have been cowed but some block 147 - * pointers in upper level blocks may not reflect the 148 - * new location 149 - */ 150 - struct list_head pending[BTRFS_MAX_LEVEL]; 151 - /* list of backref nodes with no child node */ 152 - struct list_head leaves; 153 - /* list of blocks that have been cowed in current transaction */ 154 - struct list_head changed; 155 - /* list of detached backref node. */ 156 - struct list_head detached; 157 - 158 - u64 last_trans; 159 - 160 - int nr_nodes; 161 - int nr_edges; 162 - }; 163 - 164 76 /* 165 77 * map address of tree root to tree 166 78 */ 167 79 struct mapping_node { 168 - struct rb_node rb_node; 169 - u64 bytenr; 80 + struct { 81 + struct rb_node rb_node; 82 + u64 bytenr; 83 + }; /* Use rb_simle_node for search/insert */ 170 84 void *data; 171 85 }; 172 86 ··· 94 178 * present a tree block to process 95 179 */ 96 180 struct tree_block { 97 - struct rb_node rb_node; 98 - u64 bytenr; 181 + struct { 182 + struct rb_node rb_node; 183 + u64 bytenr; 184 + }; /* Use rb_simple_node for search/insert */ 99 185 struct btrfs_key key; 100 186 unsigned int level:8; 101 187 unsigned int key_ready:1; ··· 122 204 123 205 struct btrfs_block_rsv *block_rsv; 124 206 125 - struct backref_cache backref_cache; 207 + struct btrfs_backref_cache backref_cache; 126 208 127 209 struct file_extent_cluster cluster; 128 210 /* tree blocks have been processed */ ··· 153 235 #define MOVE_DATA_EXTENTS 0 154 236 #define UPDATE_DATA_PTRS 1 155 237 156 - static void remove_backref_node(struct backref_cache *cache, 157 - struct backref_node *node); 158 - static void __mark_block_processed(struct reloc_control *rc, 159 - struct backref_node *node); 238 + static void mark_block_processed(struct reloc_control *rc, 239 + struct btrfs_backref_node *node) 240 + { 241 + u32 blocksize; 242 + 243 + if (node->level == 0 || 244 + in_range(node->bytenr, rc->block_group->start, 245 + rc->block_group->length)) { 246 + blocksize = rc->extent_root->fs_info->nodesize; 247 + set_extent_bits(&rc->processed_blocks, node->bytenr, 248 + node->bytenr + blocksize - 1, EXTENT_DIRTY); 249 + } 250 + node->processed = 1; 251 + } 252 + 160 253 161 254 static void mapping_tree_init(struct mapping_tree *tree) 162 255 { ··· 175 246 spin_lock_init(&tree->lock); 176 247 } 177 248 178 - static void backref_cache_init(struct backref_cache *cache) 179 - { 180 - int i; 181 - cache->rb_root = RB_ROOT; 182 - for (i = 0; i < BTRFS_MAX_LEVEL; i++) 183 - INIT_LIST_HEAD(&cache->pending[i]); 184 - INIT_LIST_HEAD(&cache->changed); 185 - INIT_LIST_HEAD(&cache->detached); 186 - INIT_LIST_HEAD(&cache->leaves); 187 - } 188 - 189 - static void backref_cache_cleanup(struct backref_cache *cache) 190 - { 191 - struct backref_node *node; 192 - int i; 193 - 194 - while (!list_empty(&cache->detached)) { 195 - node = list_entry(cache->detached.next, 196 - struct backref_node, list); 197 - remove_backref_node(cache, node); 198 - } 199 - 200 - while (!list_empty(&cache->leaves)) { 201 - node = list_entry(cache->leaves.next, 202 - struct backref_node, lower); 203 - remove_backref_node(cache, node); 204 - } 205 - 206 - cache->last_trans = 0; 207 - 208 - for (i = 0; i < BTRFS_MAX_LEVEL; i++) 209 - ASSERT(list_empty(&cache->pending[i])); 210 - ASSERT(list_empty(&cache->changed)); 211 - ASSERT(list_empty(&cache->detached)); 212 - ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); 213 - ASSERT(!cache->nr_nodes); 214 - ASSERT(!cache->nr_edges); 215 - } 216 - 217 - static struct backref_node *alloc_backref_node(struct backref_cache *cache) 218 - { 219 - struct backref_node *node; 220 - 221 - node = kzalloc(sizeof(*node), GFP_NOFS); 222 - if (node) { 223 - INIT_LIST_HEAD(&node->list); 224 - INIT_LIST_HEAD(&node->upper); 225 - INIT_LIST_HEAD(&node->lower); 226 - RB_CLEAR_NODE(&node->rb_node); 227 - cache->nr_nodes++; 228 - } 229 - return node; 230 - } 231 - 232 - static void free_backref_node(struct backref_cache *cache, 233 - struct backref_node *node) 234 - { 235 - if (node) { 236 - cache->nr_nodes--; 237 - btrfs_put_root(node->root); 238 - kfree(node); 239 - } 240 - } 241 - 242 - static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) 243 - { 244 - struct backref_edge *edge; 245 - 246 - edge = kzalloc(sizeof(*edge), GFP_NOFS); 247 - if (edge) 248 - cache->nr_edges++; 249 - return edge; 250 - } 251 - 252 - static void free_backref_edge(struct backref_cache *cache, 253 - struct backref_edge *edge) 254 - { 255 - if (edge) { 256 - cache->nr_edges--; 257 - kfree(edge); 258 - } 259 - } 260 - 261 - static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 262 - struct rb_node *node) 263 - { 264 - struct rb_node **p = &root->rb_node; 265 - struct rb_node *parent = NULL; 266 - struct tree_entry *entry; 267 - 268 - while (*p) { 269 - parent = *p; 270 - entry = rb_entry(parent, struct tree_entry, rb_node); 271 - 272 - if (bytenr < entry->bytenr) 273 - p = &(*p)->rb_left; 274 - else if (bytenr > entry->bytenr) 275 - p = &(*p)->rb_right; 276 - else 277 - return parent; 278 - } 279 - 280 - rb_link_node(node, parent, p); 281 - rb_insert_color(node, root); 282 - return NULL; 283 - } 284 - 285 - static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) 286 - { 287 - struct rb_node *n = root->rb_node; 288 - struct tree_entry *entry; 289 - 290 - while (n) { 291 - entry = rb_entry(n, struct tree_entry, rb_node); 292 - 293 - if (bytenr < entry->bytenr) 294 - n = n->rb_left; 295 - else if (bytenr > entry->bytenr) 296 - n = n->rb_right; 297 - else 298 - return n; 299 - } 300 - return NULL; 301 - } 302 - 303 - static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr) 304 - { 305 - 306 - struct btrfs_fs_info *fs_info = NULL; 307 - struct backref_node *bnode = rb_entry(rb_node, struct backref_node, 308 - rb_node); 309 - if (bnode->root) 310 - fs_info = bnode->root->fs_info; 311 - btrfs_panic(fs_info, errno, 312 - "Inconsistency in backref cache found at offset %llu", 313 - bytenr); 314 - } 315 - 316 249 /* 317 250 * walk up backref nodes until reach node presents tree root 318 251 */ 319 - static struct backref_node *walk_up_backref(struct backref_node *node, 320 - struct backref_edge *edges[], 321 - int *index) 252 + static struct btrfs_backref_node *walk_up_backref( 253 + struct btrfs_backref_node *node, 254 + struct btrfs_backref_edge *edges[], int *index) 322 255 { 323 - struct backref_edge *edge; 256 + struct btrfs_backref_edge *edge; 324 257 int idx = *index; 325 258 326 259 while (!list_empty(&node->upper)) { 327 260 edge = list_entry(node->upper.next, 328 - struct backref_edge, list[LOWER]); 261 + struct btrfs_backref_edge, list[LOWER]); 329 262 edges[idx++] = edge; 330 263 node = edge->node[UPPER]; 331 264 } ··· 199 408 /* 200 409 * walk down backref nodes to find start of next reference path 201 410 */ 202 - static struct backref_node *walk_down_backref(struct backref_edge *edges[], 203 - int *index) 411 + static struct btrfs_backref_node *walk_down_backref( 412 + struct btrfs_backref_edge *edges[], int *index) 204 413 { 205 - struct backref_edge *edge; 206 - struct backref_node *lower; 414 + struct btrfs_backref_edge *edge; 415 + struct btrfs_backref_node *lower; 207 416 int idx = *index; 208 417 209 418 while (idx > 0) { ··· 214 423 continue; 215 424 } 216 425 edge = list_entry(edge->list[LOWER].next, 217 - struct backref_edge, list[LOWER]); 426 + struct btrfs_backref_edge, list[LOWER]); 218 427 edges[idx - 1] = edge; 219 428 *index = idx; 220 429 return edge->node[UPPER]; ··· 223 432 return NULL; 224 433 } 225 434 226 - static void unlock_node_buffer(struct backref_node *node) 227 - { 228 - if (node->locked) { 229 - btrfs_tree_unlock(node->eb); 230 - node->locked = 0; 231 - } 232 - } 233 - 234 - static void drop_node_buffer(struct backref_node *node) 235 - { 236 - if (node->eb) { 237 - unlock_node_buffer(node); 238 - free_extent_buffer(node->eb); 239 - node->eb = NULL; 240 - } 241 - } 242 - 243 - static void drop_backref_node(struct backref_cache *tree, 244 - struct backref_node *node) 245 - { 246 - BUG_ON(!list_empty(&node->upper)); 247 - 248 - drop_node_buffer(node); 249 - list_del(&node->list); 250 - list_del(&node->lower); 251 - if (!RB_EMPTY_NODE(&node->rb_node)) 252 - rb_erase(&node->rb_node, &tree->rb_root); 253 - free_backref_node(tree, node); 254 - } 255 - 256 - /* 257 - * remove a backref node from the backref cache 258 - */ 259 - static void remove_backref_node(struct backref_cache *cache, 260 - struct backref_node *node) 261 - { 262 - struct backref_node *upper; 263 - struct backref_edge *edge; 264 - 265 - if (!node) 266 - return; 267 - 268 - BUG_ON(!node->lowest && !node->detached); 269 - while (!list_empty(&node->upper)) { 270 - edge = list_entry(node->upper.next, struct backref_edge, 271 - list[LOWER]); 272 - upper = edge->node[UPPER]; 273 - list_del(&edge->list[LOWER]); 274 - list_del(&edge->list[UPPER]); 275 - free_backref_edge(cache, edge); 276 - 277 - if (RB_EMPTY_NODE(&upper->rb_node)) { 278 - BUG_ON(!list_empty(&node->upper)); 279 - drop_backref_node(cache, node); 280 - node = upper; 281 - node->lowest = 1; 282 - continue; 283 - } 284 - /* 285 - * add the node to leaf node list if no other 286 - * child block cached. 287 - */ 288 - if (list_empty(&upper->lower)) { 289 - list_add_tail(&upper->lower, &cache->leaves); 290 - upper->lowest = 1; 291 - } 292 - } 293 - 294 - drop_backref_node(cache, node); 295 - } 296 - 297 - static void update_backref_node(struct backref_cache *cache, 298 - struct backref_node *node, u64 bytenr) 435 + static void update_backref_node(struct btrfs_backref_cache *cache, 436 + struct btrfs_backref_node *node, u64 bytenr) 299 437 { 300 438 struct rb_node *rb_node; 301 439 rb_erase(&node->rb_node, &cache->rb_root); 302 440 node->bytenr = bytenr; 303 - rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 441 + rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node); 304 442 if (rb_node) 305 - backref_tree_panic(rb_node, -EEXIST, bytenr); 443 + btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST); 306 444 } 307 445 308 446 /* 309 447 * update backref cache after a transaction commit 310 448 */ 311 449 static int update_backref_cache(struct btrfs_trans_handle *trans, 312 - struct backref_cache *cache) 450 + struct btrfs_backref_cache *cache) 313 451 { 314 - struct backref_node *node; 452 + struct btrfs_backref_node *node; 315 453 int level = 0; 316 454 317 455 if (cache->last_trans == 0) { ··· 258 538 */ 259 539 while (!list_empty(&cache->detached)) { 260 540 node = list_entry(cache->detached.next, 261 - struct backref_node, list); 262 - remove_backref_node(cache, node); 541 + struct btrfs_backref_node, list); 542 + btrfs_backref_cleanup_node(cache, node); 263 543 } 264 544 265 545 while (!list_empty(&cache->changed)) { 266 546 node = list_entry(cache->changed.next, 267 - struct backref_node, list); 547 + struct btrfs_backref_node, list); 268 548 list_del_init(&node->list); 269 549 BUG_ON(node->pending); 270 550 update_backref_node(cache, node, node->new_bytenr); ··· 305 585 * 306 586 * Reloc tree after swap is considered dead, thus not considered as valid. 307 587 * This is enough for most callers, as they don't distinguish dead reloc root 308 - * from no reloc root. But should_ignore_root() below is a special case. 588 + * from no reloc root. But btrfs_should_ignore_reloc_root() below is a 589 + * special case. 309 590 */ 310 591 static bool have_reloc_root(struct btrfs_root *root) 311 592 { ··· 317 596 return true; 318 597 } 319 598 320 - static int should_ignore_root(struct btrfs_root *root) 599 + int btrfs_should_ignore_reloc_root(struct btrfs_root *root) 321 600 { 322 601 struct btrfs_root *reloc_root; 323 602 324 - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 603 + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 325 604 return 0; 326 605 327 606 /* This root has been merged with its reloc tree, we can ignore it */ ··· 343 622 */ 344 623 return 1; 345 624 } 625 + 346 626 /* 347 627 * find reloc tree by address of tree root 348 628 */ 349 - static struct btrfs_root *find_reloc_root(struct reloc_control *rc, 350 - u64 bytenr) 629 + struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr) 351 630 { 631 + struct reloc_control *rc = fs_info->reloc_ctl; 352 632 struct rb_node *rb_node; 353 633 struct mapping_node *node; 354 634 struct btrfs_root *root = NULL; 355 635 636 + ASSERT(rc); 356 637 spin_lock(&rc->reloc_root_tree.lock); 357 - rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr); 638 + rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr); 358 639 if (rb_node) { 359 640 node = rb_entry(rb_node, struct mapping_node, rb_node); 360 641 root = (struct btrfs_root *)node->data; ··· 365 642 return btrfs_grab_root(root); 366 643 } 367 644 368 - static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, 369 - u64 root_objectid) 645 + /* 646 + * For useless nodes, do two major clean ups: 647 + * 648 + * - Cleanup the children edges and nodes 649 + * If child node is also orphan (no parent) during cleanup, then the child 650 + * node will also be cleaned up. 651 + * 652 + * - Freeing up leaves (level 0), keeps nodes detached 653 + * For nodes, the node is still cached as "detached" 654 + * 655 + * Return false if @node is not in the @useless_nodes list. 656 + * Return true if @node is in the @useless_nodes list. 657 + */ 658 + static bool handle_useless_nodes(struct reloc_control *rc, 659 + struct btrfs_backref_node *node) 370 660 { 371 - struct btrfs_key key; 661 + struct btrfs_backref_cache *cache = &rc->backref_cache; 662 + struct list_head *useless_node = &cache->useless_node; 663 + bool ret = false; 372 664 373 - key.objectid = root_objectid; 374 - key.type = BTRFS_ROOT_ITEM_KEY; 375 - key.offset = (u64)-1; 665 + while (!list_empty(useless_node)) { 666 + struct btrfs_backref_node *cur; 376 667 377 - return btrfs_get_fs_root(fs_info, &key, false); 378 - } 668 + cur = list_first_entry(useless_node, struct btrfs_backref_node, 669 + list); 670 + list_del_init(&cur->list); 379 671 380 - static noinline_for_stack 381 - int find_inline_backref(struct extent_buffer *leaf, int slot, 382 - unsigned long *ptr, unsigned long *end) 383 - { 384 - struct btrfs_key key; 385 - struct btrfs_extent_item *ei; 386 - struct btrfs_tree_block_info *bi; 387 - u32 item_size; 672 + /* Only tree root nodes can be added to @useless_nodes */ 673 + ASSERT(list_empty(&cur->upper)); 388 674 389 - btrfs_item_key_to_cpu(leaf, &key, slot); 675 + if (cur == node) 676 + ret = true; 390 677 391 - item_size = btrfs_item_size_nr(leaf, slot); 392 - if (item_size < sizeof(*ei)) { 393 - btrfs_print_v0_err(leaf->fs_info); 394 - btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL); 395 - return 1; 678 + /* The node is the lowest node */ 679 + if (cur->lowest) { 680 + list_del_init(&cur->lower); 681 + cur->lowest = 0; 682 + } 683 + 684 + /* Cleanup the lower edges */ 685 + while (!list_empty(&cur->lower)) { 686 + struct btrfs_backref_edge *edge; 687 + struct btrfs_backref_node *lower; 688 + 689 + edge = list_entry(cur->lower.next, 690 + struct btrfs_backref_edge, list[UPPER]); 691 + list_del(&edge->list[UPPER]); 692 + list_del(&edge->list[LOWER]); 693 + lower = edge->node[LOWER]; 694 + btrfs_backref_free_edge(cache, edge); 695 + 696 + /* Child node is also orphan, queue for cleanup */ 697 + if (list_empty(&lower->upper)) 698 + list_add(&lower->list, useless_node); 699 + } 700 + /* Mark this block processed for relocation */ 701 + mark_block_processed(rc, cur); 702 + 703 + /* 704 + * Backref nodes for tree leaves are deleted from the cache. 705 + * Backref nodes for upper level tree blocks are left in the 706 + * cache to avoid unnecessary backref lookup. 707 + */ 708 + if (cur->level > 0) { 709 + list_add(&cur->list, &cache->detached); 710 + cur->detached = 1; 711 + } else { 712 + rb_erase(&cur->rb_node, &cache->rb_root); 713 + btrfs_backref_free_node(cache, cur); 714 + } 396 715 } 397 - ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 398 - WARN_ON(!(btrfs_extent_flags(leaf, ei) & 399 - BTRFS_EXTENT_FLAG_TREE_BLOCK)); 400 - 401 - if (key.type == BTRFS_EXTENT_ITEM_KEY && 402 - item_size <= sizeof(*ei) + sizeof(*bi)) { 403 - WARN_ON(item_size < sizeof(*ei) + sizeof(*bi)); 404 - return 1; 405 - } 406 - if (key.type == BTRFS_METADATA_ITEM_KEY && 407 - item_size <= sizeof(*ei)) { 408 - WARN_ON(item_size < sizeof(*ei)); 409 - return 1; 410 - } 411 - 412 - if (key.type == BTRFS_EXTENT_ITEM_KEY) { 413 - bi = (struct btrfs_tree_block_info *)(ei + 1); 414 - *ptr = (unsigned long)(bi + 1); 415 - } else { 416 - *ptr = (unsigned long)(ei + 1); 417 - } 418 - *end = (unsigned long)ei + item_size; 419 - return 0; 716 + return ret; 420 717 } 421 718 422 719 /* 423 - * build backref tree for a given tree block. root of the backref tree 424 - * corresponds the tree block, leaves of the backref tree correspond 425 - * roots of b-trees that reference the tree block. 720 + * Build backref tree for a given tree block. Root of the backref tree 721 + * corresponds the tree block, leaves of the backref tree correspond roots of 722 + * b-trees that reference the tree block. 426 723 * 427 - * the basic idea of this function is check backrefs of a given block 428 - * to find upper level blocks that reference the block, and then check 429 - * backrefs of these upper level blocks recursively. the recursion stop 430 - * when tree root is reached or backrefs for the block is cached. 724 + * The basic idea of this function is check backrefs of a given block to find 725 + * upper level blocks that reference the block, and then check backrefs of 726 + * these upper level blocks recursively. The recursion stops when tree root is 727 + * reached or backrefs for the block is cached. 431 728 * 432 - * NOTE: if we find backrefs for a block are cached, we know backrefs 433 - * for all upper level blocks that directly/indirectly reference the 434 - * block are also cached. 729 + * NOTE: if we find that backrefs for a block are cached, we know backrefs for 730 + * all upper level blocks that directly/indirectly reference the block are also 731 + * cached. 435 732 */ 436 - static noinline_for_stack 437 - struct backref_node *build_backref_tree(struct reloc_control *rc, 438 - struct btrfs_key *node_key, 439 - int level, u64 bytenr) 733 + static noinline_for_stack struct btrfs_backref_node *build_backref_tree( 734 + struct reloc_control *rc, struct btrfs_key *node_key, 735 + int level, u64 bytenr) 440 736 { 441 - struct backref_cache *cache = &rc->backref_cache; 442 - struct btrfs_path *path1; /* For searching extent root */ 443 - struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */ 444 - struct extent_buffer *eb; 445 - struct btrfs_root *root; 446 - struct backref_node *cur; 447 - struct backref_node *upper; 448 - struct backref_node *lower; 449 - struct backref_node *node = NULL; 450 - struct backref_node *exist = NULL; 451 - struct backref_edge *edge; 452 - struct rb_node *rb_node; 453 - struct btrfs_key key; 454 - unsigned long end; 455 - unsigned long ptr; 456 - LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */ 457 - LIST_HEAD(useless); 458 - int cowonly; 737 + struct btrfs_backref_iter *iter; 738 + struct btrfs_backref_cache *cache = &rc->backref_cache; 739 + /* For searching parent of TREE_BLOCK_REF */ 740 + struct btrfs_path *path; 741 + struct btrfs_backref_node *cur; 742 + struct btrfs_backref_node *node = NULL; 743 + struct btrfs_backref_edge *edge; 459 744 int ret; 460 745 int err = 0; 461 - bool need_check = true; 462 746 463 - path1 = btrfs_alloc_path(); 464 - path2 = btrfs_alloc_path(); 465 - if (!path1 || !path2) { 747 + iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS); 748 + if (!iter) 749 + return ERR_PTR(-ENOMEM); 750 + path = btrfs_alloc_path(); 751 + if (!path) { 466 752 err = -ENOMEM; 467 753 goto out; 468 754 } 469 755 470 - node = alloc_backref_node(cache); 756 + node = btrfs_backref_alloc_node(cache, bytenr, level); 471 757 if (!node) { 472 758 err = -ENOMEM; 473 759 goto out; 474 760 } 475 761 476 - node->bytenr = bytenr; 477 - node->level = level; 478 762 node->lowest = 1; 479 763 cur = node; 480 - again: 481 - end = 0; 482 - ptr = 0; 483 - key.objectid = cur->bytenr; 484 - key.type = BTRFS_METADATA_ITEM_KEY; 485 - key.offset = (u64)-1; 486 764 487 - path1->search_commit_root = 1; 488 - path1->skip_locking = 1; 489 - ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1, 490 - 0, 0); 765 + /* Breadth-first search to build backref cache */ 766 + do { 767 + ret = btrfs_backref_add_tree_node(cache, path, iter, node_key, 768 + cur); 769 + if (ret < 0) { 770 + err = ret; 771 + goto out; 772 + } 773 + edge = list_first_entry_or_null(&cache->pending_edge, 774 + struct btrfs_backref_edge, list[UPPER]); 775 + /* 776 + * The pending list isn't empty, take the first block to 777 + * process 778 + */ 779 + if (edge) { 780 + list_del_init(&edge->list[UPPER]); 781 + cur = edge->node[UPPER]; 782 + } 783 + } while (edge); 784 + 785 + /* Finish the upper linkage of newly added edges/nodes */ 786 + ret = btrfs_backref_finish_upper_links(cache, node); 491 787 if (ret < 0) { 492 788 err = ret; 493 789 goto out; 494 790 } 495 - ASSERT(ret); 496 - ASSERT(path1->slots[0]); 497 791 498 - path1->slots[0]--; 499 - 500 - WARN_ON(cur->checked); 501 - if (!list_empty(&cur->upper)) { 502 - /* 503 - * the backref was added previously when processing 504 - * backref of type BTRFS_TREE_BLOCK_REF_KEY 505 - */ 506 - ASSERT(list_is_singular(&cur->upper)); 507 - edge = list_entry(cur->upper.next, struct backref_edge, 508 - list[LOWER]); 509 - ASSERT(list_empty(&edge->list[UPPER])); 510 - exist = edge->node[UPPER]; 511 - /* 512 - * add the upper level block to pending list if we need 513 - * check its backrefs 514 - */ 515 - if (!exist->checked) 516 - list_add_tail(&edge->list[UPPER], &list); 517 - } else { 518 - exist = NULL; 519 - } 520 - 521 - while (1) { 522 - cond_resched(); 523 - eb = path1->nodes[0]; 524 - 525 - if (ptr >= end) { 526 - if (path1->slots[0] >= btrfs_header_nritems(eb)) { 527 - ret = btrfs_next_leaf(rc->extent_root, path1); 528 - if (ret < 0) { 529 - err = ret; 530 - goto out; 531 - } 532 - if (ret > 0) 533 - break; 534 - eb = path1->nodes[0]; 535 - } 536 - 537 - btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); 538 - if (key.objectid != cur->bytenr) { 539 - WARN_ON(exist); 540 - break; 541 - } 542 - 543 - if (key.type == BTRFS_EXTENT_ITEM_KEY || 544 - key.type == BTRFS_METADATA_ITEM_KEY) { 545 - ret = find_inline_backref(eb, path1->slots[0], 546 - &ptr, &end); 547 - if (ret) 548 - goto next; 549 - } 550 - } 551 - 552 - if (ptr < end) { 553 - /* update key for inline back ref */ 554 - struct btrfs_extent_inline_ref *iref; 555 - int type; 556 - iref = (struct btrfs_extent_inline_ref *)ptr; 557 - type = btrfs_get_extent_inline_ref_type(eb, iref, 558 - BTRFS_REF_TYPE_BLOCK); 559 - if (type == BTRFS_REF_TYPE_INVALID) { 560 - err = -EUCLEAN; 561 - goto out; 562 - } 563 - key.type = type; 564 - key.offset = btrfs_extent_inline_ref_offset(eb, iref); 565 - 566 - WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY && 567 - key.type != BTRFS_SHARED_BLOCK_REF_KEY); 568 - } 569 - 570 - /* 571 - * Parent node found and matches current inline ref, no need to 572 - * rebuild this node for this inline ref. 573 - */ 574 - if (exist && 575 - ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 576 - exist->owner == key.offset) || 577 - (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 578 - exist->bytenr == key.offset))) { 579 - exist = NULL; 580 - goto next; 581 - } 582 - 583 - /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ 584 - if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 585 - if (key.objectid == key.offset) { 586 - /* 587 - * Only root blocks of reloc trees use backref 588 - * pointing to itself. 589 - */ 590 - root = find_reloc_root(rc, cur->bytenr); 591 - ASSERT(root); 592 - cur->root = root; 593 - break; 594 - } 595 - 596 - edge = alloc_backref_edge(cache); 597 - if (!edge) { 598 - err = -ENOMEM; 599 - goto out; 600 - } 601 - rb_node = tree_search(&cache->rb_root, key.offset); 602 - if (!rb_node) { 603 - upper = alloc_backref_node(cache); 604 - if (!upper) { 605 - free_backref_edge(cache, edge); 606 - err = -ENOMEM; 607 - goto out; 608 - } 609 - upper->bytenr = key.offset; 610 - upper->level = cur->level + 1; 611 - /* 612 - * backrefs for the upper level block isn't 613 - * cached, add the block to pending list 614 - */ 615 - list_add_tail(&edge->list[UPPER], &list); 616 - } else { 617 - upper = rb_entry(rb_node, struct backref_node, 618 - rb_node); 619 - ASSERT(upper->checked); 620 - INIT_LIST_HEAD(&edge->list[UPPER]); 621 - } 622 - list_add_tail(&edge->list[LOWER], &cur->upper); 623 - edge->node[LOWER] = cur; 624 - edge->node[UPPER] = upper; 625 - 626 - goto next; 627 - } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { 628 - err = -EINVAL; 629 - btrfs_print_v0_err(rc->extent_root->fs_info); 630 - btrfs_handle_fs_error(rc->extent_root->fs_info, err, 631 - NULL); 632 - goto out; 633 - } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 634 - goto next; 635 - } 636 - 637 - /* 638 - * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset 639 - * means the root objectid. We need to search the tree to get 640 - * its parent bytenr. 641 - */ 642 - root = read_fs_root(rc->extent_root->fs_info, key.offset); 643 - if (IS_ERR(root)) { 644 - err = PTR_ERR(root); 645 - goto out; 646 - } 647 - 648 - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 649 - cur->cowonly = 1; 650 - 651 - if (btrfs_root_level(&root->root_item) == cur->level) { 652 - /* tree root */ 653 - ASSERT(btrfs_root_bytenr(&root->root_item) == 654 - cur->bytenr); 655 - if (should_ignore_root(root)) { 656 - btrfs_put_root(root); 657 - list_add(&cur->list, &useless); 658 - } else { 659 - cur->root = root; 660 - } 661 - break; 662 - } 663 - 664 - level = cur->level + 1; 665 - 666 - /* Search the tree to find parent blocks referring the block. */ 667 - path2->search_commit_root = 1; 668 - path2->skip_locking = 1; 669 - path2->lowest_level = level; 670 - ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0); 671 - path2->lowest_level = 0; 672 - if (ret < 0) { 673 - btrfs_put_root(root); 674 - err = ret; 675 - goto out; 676 - } 677 - if (ret > 0 && path2->slots[level] > 0) 678 - path2->slots[level]--; 679 - 680 - eb = path2->nodes[level]; 681 - if (btrfs_node_blockptr(eb, path2->slots[level]) != 682 - cur->bytenr) { 683 - btrfs_err(root->fs_info, 684 - "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", 685 - cur->bytenr, level - 1, 686 - root->root_key.objectid, 687 - node_key->objectid, node_key->type, 688 - node_key->offset); 689 - btrfs_put_root(root); 690 - err = -ENOENT; 691 - goto out; 692 - } 693 - lower = cur; 694 - need_check = true; 695 - 696 - /* Add all nodes and edges in the path */ 697 - for (; level < BTRFS_MAX_LEVEL; level++) { 698 - if (!path2->nodes[level]) { 699 - ASSERT(btrfs_root_bytenr(&root->root_item) == 700 - lower->bytenr); 701 - if (should_ignore_root(root)) { 702 - btrfs_put_root(root); 703 - list_add(&lower->list, &useless); 704 - } else { 705 - lower->root = root; 706 - } 707 - break; 708 - } 709 - 710 - edge = alloc_backref_edge(cache); 711 - if (!edge) { 712 - btrfs_put_root(root); 713 - err = -ENOMEM; 714 - goto out; 715 - } 716 - 717 - eb = path2->nodes[level]; 718 - rb_node = tree_search(&cache->rb_root, eb->start); 719 - if (!rb_node) { 720 - upper = alloc_backref_node(cache); 721 - if (!upper) { 722 - btrfs_put_root(root); 723 - free_backref_edge(cache, edge); 724 - err = -ENOMEM; 725 - goto out; 726 - } 727 - upper->bytenr = eb->start; 728 - upper->owner = btrfs_header_owner(eb); 729 - upper->level = lower->level + 1; 730 - if (!test_bit(BTRFS_ROOT_REF_COWS, 731 - &root->state)) 732 - upper->cowonly = 1; 733 - 734 - /* 735 - * if we know the block isn't shared 736 - * we can void checking its backrefs. 737 - */ 738 - if (btrfs_block_can_be_shared(root, eb)) 739 - upper->checked = 0; 740 - else 741 - upper->checked = 1; 742 - 743 - /* 744 - * add the block to pending list if we 745 - * need check its backrefs, we only do this once 746 - * while walking up a tree as we will catch 747 - * anything else later on. 748 - */ 749 - if (!upper->checked && need_check) { 750 - need_check = false; 751 - list_add_tail(&edge->list[UPPER], 752 - &list); 753 - } else { 754 - if (upper->checked) 755 - need_check = true; 756 - INIT_LIST_HEAD(&edge->list[UPPER]); 757 - } 758 - } else { 759 - upper = rb_entry(rb_node, struct backref_node, 760 - rb_node); 761 - ASSERT(upper->checked); 762 - INIT_LIST_HEAD(&edge->list[UPPER]); 763 - if (!upper->owner) 764 - upper->owner = btrfs_header_owner(eb); 765 - } 766 - list_add_tail(&edge->list[LOWER], &lower->upper); 767 - edge->node[LOWER] = lower; 768 - edge->node[UPPER] = upper; 769 - 770 - if (rb_node) { 771 - btrfs_put_root(root); 772 - break; 773 - } 774 - lower = upper; 775 - upper = NULL; 776 - } 777 - btrfs_release_path(path2); 778 - next: 779 - if (ptr < end) { 780 - ptr += btrfs_extent_inline_ref_size(key.type); 781 - if (ptr >= end) { 782 - WARN_ON(ptr > end); 783 - ptr = 0; 784 - end = 0; 785 - } 786 - } 787 - if (ptr >= end) 788 - path1->slots[0]++; 789 - } 790 - btrfs_release_path(path1); 791 - 792 - cur->checked = 1; 793 - WARN_ON(exist); 794 - 795 - /* the pending list isn't empty, take the first block to process */ 796 - if (!list_empty(&list)) { 797 - edge = list_entry(list.next, struct backref_edge, list[UPPER]); 798 - list_del_init(&edge->list[UPPER]); 799 - cur = edge->node[UPPER]; 800 - goto again; 801 - } 802 - 803 - /* 804 - * everything goes well, connect backref nodes and insert backref nodes 805 - * into the cache. 806 - */ 807 - ASSERT(node->checked); 808 - cowonly = node->cowonly; 809 - if (!cowonly) { 810 - rb_node = tree_insert(&cache->rb_root, node->bytenr, 811 - &node->rb_node); 812 - if (rb_node) 813 - backref_tree_panic(rb_node, -EEXIST, node->bytenr); 814 - list_add_tail(&node->lower, &cache->leaves); 815 - } 816 - 817 - list_for_each_entry(edge, &node->upper, list[LOWER]) 818 - list_add_tail(&edge->list[UPPER], &list); 819 - 820 - while (!list_empty(&list)) { 821 - edge = list_entry(list.next, struct backref_edge, list[UPPER]); 822 - list_del_init(&edge->list[UPPER]); 823 - upper = edge->node[UPPER]; 824 - if (upper->detached) { 825 - list_del(&edge->list[LOWER]); 826 - lower = edge->node[LOWER]; 827 - free_backref_edge(cache, edge); 828 - if (list_empty(&lower->upper)) 829 - list_add(&lower->list, &useless); 830 - continue; 831 - } 832 - 833 - if (!RB_EMPTY_NODE(&upper->rb_node)) { 834 - if (upper->lowest) { 835 - list_del_init(&upper->lower); 836 - upper->lowest = 0; 837 - } 838 - 839 - list_add_tail(&edge->list[UPPER], &upper->lower); 840 - continue; 841 - } 842 - 843 - if (!upper->checked) { 844 - /* 845 - * Still want to blow up for developers since this is a 846 - * logic bug. 847 - */ 848 - ASSERT(0); 849 - err = -EINVAL; 850 - goto out; 851 - } 852 - if (cowonly != upper->cowonly) { 853 - ASSERT(0); 854 - err = -EINVAL; 855 - goto out; 856 - } 857 - 858 - if (!cowonly) { 859 - rb_node = tree_insert(&cache->rb_root, upper->bytenr, 860 - &upper->rb_node); 861 - if (rb_node) 862 - backref_tree_panic(rb_node, -EEXIST, 863 - upper->bytenr); 864 - } 865 - 866 - list_add_tail(&edge->list[UPPER], &upper->lower); 867 - 868 - list_for_each_entry(edge, &upper->upper, list[LOWER]) 869 - list_add_tail(&edge->list[UPPER], &list); 870 - } 871 - /* 872 - * process useless backref nodes. backref nodes for tree leaves 873 - * are deleted from the cache. backref nodes for upper level 874 - * tree blocks are left in the cache to avoid unnecessary backref 875 - * lookup. 876 - */ 877 - while (!list_empty(&useless)) { 878 - upper = list_entry(useless.next, struct backref_node, list); 879 - list_del_init(&upper->list); 880 - ASSERT(list_empty(&upper->upper)); 881 - if (upper == node) 882 - node = NULL; 883 - if (upper->lowest) { 884 - list_del_init(&upper->lower); 885 - upper->lowest = 0; 886 - } 887 - while (!list_empty(&upper->lower)) { 888 - edge = list_entry(upper->lower.next, 889 - struct backref_edge, list[UPPER]); 890 - list_del(&edge->list[UPPER]); 891 - list_del(&edge->list[LOWER]); 892 - lower = edge->node[LOWER]; 893 - free_backref_edge(cache, edge); 894 - 895 - if (list_empty(&lower->upper)) 896 - list_add(&lower->list, &useless); 897 - } 898 - __mark_block_processed(rc, upper); 899 - if (upper->level > 0) { 900 - list_add(&upper->list, &cache->detached); 901 - upper->detached = 1; 902 - } else { 903 - rb_erase(&upper->rb_node, &cache->rb_root); 904 - free_backref_node(cache, upper); 905 - } 906 - } 792 + if (handle_useless_nodes(rc, node)) 793 + node = NULL; 907 794 out: 908 - btrfs_free_path(path1); 909 - btrfs_free_path(path2); 795 + btrfs_backref_iter_free(iter); 796 + btrfs_free_path(path); 910 797 if (err) { 911 - while (!list_empty(&useless)) { 912 - lower = list_entry(useless.next, 913 - struct backref_node, list); 914 - list_del_init(&lower->list); 915 - } 916 - while (!list_empty(&list)) { 917 - edge = list_first_entry(&list, struct backref_edge, 918 - list[UPPER]); 919 - list_del(&edge->list[UPPER]); 920 - list_del(&edge->list[LOWER]); 921 - lower = edge->node[LOWER]; 922 - upper = edge->node[UPPER]; 923 - free_backref_edge(cache, edge); 924 - 925 - /* 926 - * Lower is no longer linked to any upper backref nodes 927 - * and isn't in the cache, we can free it ourselves. 928 - */ 929 - if (list_empty(&lower->upper) && 930 - RB_EMPTY_NODE(&lower->rb_node)) 931 - list_add(&lower->list, &useless); 932 - 933 - if (!RB_EMPTY_NODE(&upper->rb_node)) 934 - continue; 935 - 936 - /* Add this guy's upper edges to the list to process */ 937 - list_for_each_entry(edge, &upper->upper, list[LOWER]) 938 - list_add_tail(&edge->list[UPPER], &list); 939 - if (list_empty(&upper->upper)) 940 - list_add(&upper->list, &useless); 941 - } 942 - 943 - while (!list_empty(&useless)) { 944 - lower = list_entry(useless.next, 945 - struct backref_node, list); 946 - list_del_init(&lower->list); 947 - if (lower == node) 948 - node = NULL; 949 - free_backref_node(cache, lower); 950 - } 951 - 952 - remove_backref_node(cache, node); 798 + btrfs_backref_error_cleanup(cache, node); 953 799 return ERR_PTR(err); 954 800 } 955 801 ASSERT(!node || !node->detached); 802 + ASSERT(list_empty(&cache->useless_node) && 803 + list_empty(&cache->pending_edge)); 956 804 return node; 957 805 } 958 806 ··· 538 1244 struct btrfs_root *dest) 539 1245 { 540 1246 struct btrfs_root *reloc_root = src->reloc_root; 541 - struct backref_cache *cache = &rc->backref_cache; 542 - struct backref_node *node = NULL; 543 - struct backref_node *new_node; 544 - struct backref_edge *edge; 545 - struct backref_edge *new_edge; 1247 + struct btrfs_backref_cache *cache = &rc->backref_cache; 1248 + struct btrfs_backref_node *node = NULL; 1249 + struct btrfs_backref_node *new_node; 1250 + struct btrfs_backref_edge *edge; 1251 + struct btrfs_backref_edge *new_edge; 546 1252 struct rb_node *rb_node; 547 1253 548 1254 if (cache->last_trans > 0) 549 1255 update_backref_cache(trans, cache); 550 1256 551 - rb_node = tree_search(&cache->rb_root, src->commit_root->start); 1257 + rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start); 552 1258 if (rb_node) { 553 - node = rb_entry(rb_node, struct backref_node, rb_node); 1259 + node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); 554 1260 if (node->detached) 555 1261 node = NULL; 556 1262 else ··· 558 1264 } 559 1265 560 1266 if (!node) { 561 - rb_node = tree_search(&cache->rb_root, 562 - reloc_root->commit_root->start); 1267 + rb_node = rb_simple_search(&cache->rb_root, 1268 + reloc_root->commit_root->start); 563 1269 if (rb_node) { 564 - node = rb_entry(rb_node, struct backref_node, 1270 + node = rb_entry(rb_node, struct btrfs_backref_node, 565 1271 rb_node); 566 1272 BUG_ON(node->detached); 567 1273 } ··· 570 1276 if (!node) 571 1277 return 0; 572 1278 573 - new_node = alloc_backref_node(cache); 1279 + new_node = btrfs_backref_alloc_node(cache, dest->node->start, 1280 + node->level); 574 1281 if (!new_node) 575 1282 return -ENOMEM; 576 1283 577 - new_node->bytenr = dest->node->start; 578 - new_node->level = node->level; 579 1284 new_node->lowest = node->lowest; 580 1285 new_node->checked = 1; 581 1286 new_node->root = btrfs_grab_root(dest); ··· 582 1289 583 1290 if (!node->lowest) { 584 1291 list_for_each_entry(edge, &node->lower, list[UPPER]) { 585 - new_edge = alloc_backref_edge(cache); 1292 + new_edge = btrfs_backref_alloc_edge(cache); 586 1293 if (!new_edge) 587 1294 goto fail; 588 1295 589 - new_edge->node[UPPER] = new_node; 590 - new_edge->node[LOWER] = edge->node[LOWER]; 591 - list_add_tail(&new_edge->list[UPPER], 592 - &new_node->lower); 1296 + btrfs_backref_link_edge(new_edge, edge->node[LOWER], 1297 + new_node, LINK_UPPER); 593 1298 } 594 1299 } else { 595 1300 list_add_tail(&new_node->lower, &cache->leaves); 596 1301 } 597 1302 598 - rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 599 - &new_node->rb_node); 1303 + rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr, 1304 + &new_node->rb_node); 600 1305 if (rb_node) 601 - backref_tree_panic(rb_node, -EEXIST, new_node->bytenr); 1306 + btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST); 602 1307 603 1308 if (!new_node->lowest) { 604 1309 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { ··· 608 1317 fail: 609 1318 while (!list_empty(&new_node->lower)) { 610 1319 new_edge = list_entry(new_node->lower.next, 611 - struct backref_edge, list[UPPER]); 1320 + struct btrfs_backref_edge, list[UPPER]); 612 1321 list_del(&new_edge->list[UPPER]); 613 - free_backref_edge(cache, new_edge); 1322 + btrfs_backref_free_edge(cache, new_edge); 614 1323 } 615 - free_backref_node(cache, new_node); 1324 + btrfs_backref_free_node(cache, new_node); 616 1325 return -ENOMEM; 617 1326 } 618 1327 ··· 634 1343 node->data = root; 635 1344 636 1345 spin_lock(&rc->reloc_root_tree.lock); 637 - rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 638 - node->bytenr, &node->rb_node); 1346 + rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, 1347 + node->bytenr, &node->rb_node); 639 1348 spin_unlock(&rc->reloc_root_tree.lock); 640 1349 if (rb_node) { 641 1350 btrfs_panic(fs_info, -EEXIST, ··· 661 1370 662 1371 if (rc && root->node) { 663 1372 spin_lock(&rc->reloc_root_tree.lock); 664 - rb_node = tree_search(&rc->reloc_root_tree.rb_root, 665 - root->commit_root->start); 1373 + rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, 1374 + root->commit_root->start); 666 1375 if (rb_node) { 667 1376 node = rb_entry(rb_node, struct mapping_node, rb_node); 668 1377 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); ··· 705 1414 struct reloc_control *rc = fs_info->reloc_ctl; 706 1415 707 1416 spin_lock(&rc->reloc_root_tree.lock); 708 - rb_node = tree_search(&rc->reloc_root_tree.rb_root, 709 - root->commit_root->start); 1417 + rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, 1418 + root->commit_root->start); 710 1419 if (rb_node) { 711 1420 node = rb_entry(rb_node, struct mapping_node, rb_node); 712 1421 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); ··· 719 1428 720 1429 spin_lock(&rc->reloc_root_tree.lock); 721 1430 node->bytenr = root->node->start; 722 - rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 723 - node->bytenr, &node->rb_node); 1431 + rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, 1432 + node->bytenr, &node->rb_node); 724 1433 spin_unlock(&rc->reloc_root_tree.lock); 725 1434 if (rb_node) 726 - backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1435 + btrfs_backref_panic(fs_info, node->bytenr, -EEXIST); 727 1436 return 0; 728 1437 } 729 1438 ··· 796 1505 797 1506 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key); 798 1507 BUG_ON(IS_ERR(reloc_root)); 799 - set_bit(BTRFS_ROOT_REF_COWS, &reloc_root->state); 1508 + set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); 800 1509 reloc_root->last_trans = trans->transid; 801 1510 return reloc_root; 802 1511 } ··· 970 1679 return NULL; 971 1680 } 972 1681 973 - static int in_block_group(u64 bytenr, struct btrfs_block_group *block_group) 974 - { 975 - if (bytenr >= block_group->start && 976 - bytenr < block_group->start + block_group->length) 977 - return 1; 978 - return 0; 979 - } 980 - 981 1682 /* 982 1683 * get new location of data 983 1684 */ ··· 1067 1784 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1068 1785 if (bytenr == 0) 1069 1786 continue; 1070 - if (!in_block_group(bytenr, rc->block_group)) 1787 + if (!in_range(bytenr, rc->block_group->start, 1788 + rc->block_group->length)) 1071 1789 continue; 1072 1790 1073 1791 /* ··· 1224 1940 level = btrfs_header_level(parent); 1225 1941 BUG_ON(level < lowest_level); 1226 1942 1227 - ret = btrfs_bin_search(parent, &key, level, &slot); 1943 + ret = btrfs_bin_search(parent, &key, &slot); 1228 1944 if (ret < 0) 1229 1945 break; 1230 1946 if (ret && slot > 0) ··· 1844 2560 struct btrfs_root, root_list); 1845 2561 list_del_init(&reloc_root->root_list); 1846 2562 1847 - root = read_fs_root(fs_info, reloc_root->root_key.offset); 2563 + root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 2564 + false); 1848 2565 BUG_ON(IS_ERR(root)); 1849 2566 BUG_ON(root->reloc_root != reloc_root); 1850 2567 ··· 1873 2588 static noinline_for_stack 1874 2589 void free_reloc_roots(struct list_head *list) 1875 2590 { 1876 - struct btrfs_root *reloc_root; 2591 + struct btrfs_root *reloc_root, *tmp; 1877 2592 1878 - while (!list_empty(list)) { 1879 - reloc_root = list_entry(list->next, struct btrfs_root, 1880 - root_list); 2593 + list_for_each_entry_safe(reloc_root, tmp, list, root_list) 1881 2594 __del_reloc_root(reloc_root); 1882 - } 1883 2595 } 1884 2596 1885 2597 static noinline_for_stack ··· 1906 2624 reloc_root = list_entry(reloc_roots.next, 1907 2625 struct btrfs_root, root_list); 1908 2626 2627 + root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 2628 + false); 1909 2629 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 1910 - root = read_fs_root(fs_info, 1911 - reloc_root->root_key.offset); 1912 2630 BUG_ON(IS_ERR(root)); 1913 2631 BUG_ON(root->reloc_root != reloc_root); 1914 - 1915 2632 ret = merge_reloc_root(rc, root); 1916 2633 btrfs_put_root(root); 1917 2634 if (ret) { ··· 1920 2639 goto out; 1921 2640 } 1922 2641 } else { 2642 + if (!IS_ERR(root)) { 2643 + if (root->reloc_root == reloc_root) { 2644 + root->reloc_root = NULL; 2645 + btrfs_put_root(reloc_root); 2646 + } 2647 + clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, 2648 + &root->state); 2649 + btrfs_put_root(root); 2650 + } 2651 + 1923 2652 list_del_init(&reloc_root->root_list); 1924 2653 /* Don't forget to queue this reloc root for cleanup */ 1925 2654 list_add_tail(&reloc_root->reloc_dirty_list, ··· 1944 2653 out: 1945 2654 if (ret) { 1946 2655 btrfs_handle_fs_error(fs_info, ret, NULL); 1947 - if (!list_empty(&reloc_roots)) 1948 - free_reloc_roots(&reloc_roots); 2656 + free_reloc_roots(&reloc_roots); 1949 2657 1950 2658 /* new reloc root may be added */ 1951 2659 mutex_lock(&fs_info->reloc_mutex); 1952 2660 list_splice_init(&rc->reloc_roots, &reloc_roots); 1953 2661 mutex_unlock(&fs_info->reloc_mutex); 1954 - if (!list_empty(&reloc_roots)) 1955 - free_reloc_roots(&reloc_roots); 2662 + free_reloc_roots(&reloc_roots); 1956 2663 } 1957 2664 1958 2665 /* ··· 1991 2702 if (reloc_root->last_trans == trans->transid) 1992 2703 return 0; 1993 2704 1994 - root = read_fs_root(fs_info, reloc_root->root_key.offset); 2705 + root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); 1995 2706 BUG_ON(IS_ERR(root)); 1996 2707 BUG_ON(root->reloc_root != reloc_root); 1997 2708 ret = btrfs_record_root_in_trans(trans, root); ··· 2003 2714 static noinline_for_stack 2004 2715 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2005 2716 struct reloc_control *rc, 2006 - struct backref_node *node, 2007 - struct backref_edge *edges[]) 2717 + struct btrfs_backref_node *node, 2718 + struct btrfs_backref_edge *edges[]) 2008 2719 { 2009 - struct backref_node *next; 2720 + struct btrfs_backref_node *next; 2010 2721 struct btrfs_root *root; 2011 2722 int index = 0; 2012 2723 ··· 2016 2727 next = walk_up_backref(next, edges, &index); 2017 2728 root = next->root; 2018 2729 BUG_ON(!root); 2019 - BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state)); 2730 + BUG_ON(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)); 2020 2731 2021 2732 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2022 2733 record_reloc_root_in_trans(trans, root); ··· 2035 2746 ASSERT(next->root); 2036 2747 list_add_tail(&next->list, 2037 2748 &rc->backref_cache.changed); 2038 - __mark_block_processed(rc, next); 2749 + mark_block_processed(rc, next); 2039 2750 break; 2040 2751 } 2041 2752 ··· 2060 2771 } 2061 2772 2062 2773 /* 2063 - * select a tree root for relocation. return NULL if the block 2064 - * is reference counted. we should use do_relocation() in this 2065 - * case. return a tree root pointer if the block isn't reference 2066 - * counted. return -ENOENT if the block is root of reloc tree. 2774 + * Select a tree root for relocation. 2775 + * 2776 + * Return NULL if the block is not shareable. We should use do_relocation() in 2777 + * this case. 2778 + * 2779 + * Return a tree root pointer if the block is shareable. 2780 + * Return -ENOENT if the block is root of reloc tree. 2067 2781 */ 2068 2782 static noinline_for_stack 2069 - struct btrfs_root *select_one_root(struct backref_node *node) 2783 + struct btrfs_root *select_one_root(struct btrfs_backref_node *node) 2070 2784 { 2071 - struct backref_node *next; 2785 + struct btrfs_backref_node *next; 2072 2786 struct btrfs_root *root; 2073 2787 struct btrfs_root *fs_root = NULL; 2074 - struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2788 + struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2075 2789 int index = 0; 2076 2790 2077 2791 next = node; ··· 2084 2792 root = next->root; 2085 2793 BUG_ON(!root); 2086 2794 2087 - /* no other choice for non-references counted tree */ 2088 - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 2795 + /* No other choice for non-shareable tree */ 2796 + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2089 2797 return root; 2090 2798 2091 2799 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) ··· 2106 2814 2107 2815 static noinline_for_stack 2108 2816 u64 calcu_metadata_size(struct reloc_control *rc, 2109 - struct backref_node *node, int reserve) 2817 + struct btrfs_backref_node *node, int reserve) 2110 2818 { 2111 2819 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2112 - struct backref_node *next = node; 2113 - struct backref_edge *edge; 2114 - struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2820 + struct btrfs_backref_node *next = node; 2821 + struct btrfs_backref_edge *edge; 2822 + struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2115 2823 u64 num_bytes = 0; 2116 2824 int index = 0; 2117 2825 ··· 2129 2837 break; 2130 2838 2131 2839 edge = list_entry(next->upper.next, 2132 - struct backref_edge, list[LOWER]); 2840 + struct btrfs_backref_edge, list[LOWER]); 2133 2841 edges[index++] = edge; 2134 2842 next = edge->node[UPPER]; 2135 2843 } ··· 2140 2848 2141 2849 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2142 2850 struct reloc_control *rc, 2143 - struct backref_node *node) 2851 + struct btrfs_backref_node *node) 2144 2852 { 2145 2853 struct btrfs_root *root = rc->extent_root; 2146 2854 struct btrfs_fs_info *fs_info = root->fs_info; ··· 2188 2896 */ 2189 2897 static int do_relocation(struct btrfs_trans_handle *trans, 2190 2898 struct reloc_control *rc, 2191 - struct backref_node *node, 2899 + struct btrfs_backref_node *node, 2192 2900 struct btrfs_key *key, 2193 2901 struct btrfs_path *path, int lowest) 2194 2902 { 2195 2903 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2196 - struct backref_node *upper; 2197 - struct backref_edge *edge; 2198 - struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2904 + struct btrfs_backref_node *upper; 2905 + struct btrfs_backref_edge *edge; 2906 + struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2199 2907 struct btrfs_root *root; 2200 2908 struct extent_buffer *eb; 2201 2909 u32 blocksize; ··· 2221 2929 2222 2930 if (upper->eb && !upper->locked) { 2223 2931 if (!lowest) { 2224 - ret = btrfs_bin_search(upper->eb, key, 2225 - upper->level, &slot); 2932 + ret = btrfs_bin_search(upper->eb, key, &slot); 2226 2933 if (ret < 0) { 2227 2934 err = ret; 2228 2935 goto next; ··· 2231 2940 if (node->eb->start == bytenr) 2232 2941 goto next; 2233 2942 } 2234 - drop_node_buffer(upper); 2943 + btrfs_backref_drop_node_buffer(upper); 2235 2944 } 2236 2945 2237 2946 if (!upper->eb) { ··· 2259 2968 slot = path->slots[upper->level]; 2260 2969 btrfs_release_path(path); 2261 2970 } else { 2262 - ret = btrfs_bin_search(upper->eb, key, upper->level, 2263 - &slot); 2971 + ret = btrfs_bin_search(upper->eb, key, &slot); 2264 2972 if (ret < 0) { 2265 2973 err = ret; 2266 2974 goto next; ··· 2329 3039 } 2330 3040 next: 2331 3041 if (!upper->pending) 2332 - drop_node_buffer(upper); 3042 + btrfs_backref_drop_node_buffer(upper); 2333 3043 else 2334 - unlock_node_buffer(upper); 3044 + btrfs_backref_unlock_node_buffer(upper); 2335 3045 if (err) 2336 3046 break; 2337 3047 } 2338 3048 2339 3049 if (!err && node->pending) { 2340 - drop_node_buffer(node); 3050 + btrfs_backref_drop_node_buffer(node); 2341 3051 list_move_tail(&node->list, &rc->backref_cache.changed); 2342 3052 node->pending = 0; 2343 3053 } ··· 2349 3059 2350 3060 static int link_to_upper(struct btrfs_trans_handle *trans, 2351 3061 struct reloc_control *rc, 2352 - struct backref_node *node, 3062 + struct btrfs_backref_node *node, 2353 3063 struct btrfs_path *path) 2354 3064 { 2355 3065 struct btrfs_key key; ··· 2363 3073 struct btrfs_path *path, int err) 2364 3074 { 2365 3075 LIST_HEAD(list); 2366 - struct backref_cache *cache = &rc->backref_cache; 2367 - struct backref_node *node; 3076 + struct btrfs_backref_cache *cache = &rc->backref_cache; 3077 + struct btrfs_backref_node *node; 2368 3078 int level; 2369 3079 int ret; 2370 3080 2371 3081 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2372 3082 while (!list_empty(&cache->pending[level])) { 2373 3083 node = list_entry(cache->pending[level].next, 2374 - struct backref_node, list); 3084 + struct btrfs_backref_node, list); 2375 3085 list_move_tail(&node->list, &list); 2376 3086 BUG_ON(!node->pending); 2377 3087 ··· 2386 3096 return err; 2387 3097 } 2388 3098 2389 - static void mark_block_processed(struct reloc_control *rc, 2390 - u64 bytenr, u32 blocksize) 2391 - { 2392 - set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, 2393 - EXTENT_DIRTY); 2394 - } 2395 - 2396 - static void __mark_block_processed(struct reloc_control *rc, 2397 - struct backref_node *node) 2398 - { 2399 - u32 blocksize; 2400 - if (node->level == 0 || 2401 - in_block_group(node->bytenr, rc->block_group)) { 2402 - blocksize = rc->extent_root->fs_info->nodesize; 2403 - mark_block_processed(rc, node->bytenr, blocksize); 2404 - } 2405 - node->processed = 1; 2406 - } 2407 - 2408 3099 /* 2409 3100 * mark a block and all blocks directly/indirectly reference the block 2410 3101 * as processed. 2411 3102 */ 2412 3103 static void update_processed_blocks(struct reloc_control *rc, 2413 - struct backref_node *node) 3104 + struct btrfs_backref_node *node) 2414 3105 { 2415 - struct backref_node *next = node; 2416 - struct backref_edge *edge; 2417 - struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 3106 + struct btrfs_backref_node *next = node; 3107 + struct btrfs_backref_edge *edge; 3108 + struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2418 3109 int index = 0; 2419 3110 2420 3111 while (next) { ··· 2404 3133 if (next->processed) 2405 3134 break; 2406 3135 2407 - __mark_block_processed(rc, next); 3136 + mark_block_processed(rc, next); 2408 3137 2409 3138 if (list_empty(&next->upper)) 2410 3139 break; 2411 3140 2412 3141 edge = list_entry(next->upper.next, 2413 - struct backref_edge, list[LOWER]); 3142 + struct btrfs_backref_edge, list[LOWER]); 2414 3143 edges[index++] = edge; 2415 3144 next = edge->node[UPPER]; 2416 3145 } ··· 2455 3184 */ 2456 3185 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2457 3186 struct reloc_control *rc, 2458 - struct backref_node *node, 3187 + struct btrfs_backref_node *node, 2459 3188 struct btrfs_key *key, 2460 3189 struct btrfs_path *path) 2461 3190 { ··· 2481 3210 } 2482 3211 2483 3212 if (root) { 2484 - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 3213 + if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 2485 3214 BUG_ON(node->new_bytenr); 2486 3215 BUG_ON(!list_empty(&node->list)); 2487 3216 btrfs_record_root_in_trans(trans, root); ··· 2505 3234 } 2506 3235 out: 2507 3236 if (ret || node->level == 0 || node->cowonly) 2508 - remove_backref_node(&rc->backref_cache, node); 3237 + btrfs_backref_cleanup_node(&rc->backref_cache, node); 2509 3238 return ret; 2510 3239 } 2511 3240 ··· 2517 3246 struct reloc_control *rc, struct rb_root *blocks) 2518 3247 { 2519 3248 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2520 - struct backref_node *node; 3249 + struct btrfs_backref_node *node; 2521 3250 struct btrfs_path *path; 2522 3251 struct tree_block *block; 2523 3252 struct tree_block *next; ··· 2884 3613 block->level = level; 2885 3614 block->key_ready = 0; 2886 3615 2887 - rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3616 + rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node); 2888 3617 if (rb_node) 2889 - backref_tree_panic(rb_node, -EEXIST, block->bytenr); 3618 + btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr, 3619 + -EEXIST); 2890 3620 2891 3621 return 0; 2892 3622 } ··· 2908 3636 if (tree_block_processed(bytenr, rc)) 2909 3637 return 0; 2910 3638 2911 - if (tree_search(blocks, bytenr)) 3639 + if (rb_simple_search(blocks, bytenr)) 2912 3640 return 0; 2913 3641 2914 3642 path = btrfs_alloc_path(); ··· 2970 3698 struct inode *inode, 2971 3699 u64 ino) 2972 3700 { 2973 - struct btrfs_key key; 2974 3701 struct btrfs_root *root = fs_info->tree_root; 2975 3702 struct btrfs_trans_handle *trans; 2976 3703 int ret = 0; ··· 2977 3706 if (inode) 2978 3707 goto truncate; 2979 3708 2980 - key.objectid = ino; 2981 - key.type = BTRFS_INODE_ITEM_KEY; 2982 - key.offset = 0; 2983 - 2984 - inode = btrfs_iget(fs_info->sb, &key, root); 3709 + inode = btrfs_iget(fs_info->sb, ino, root); 2985 3710 if (IS_ERR(inode)) 2986 3711 return -ENOENT; 2987 3712 ··· 3389 4122 rc->create_reloc_tree = 0; 3390 4123 set_reloc_control(rc); 3391 4124 3392 - backref_cache_cleanup(&rc->backref_cache); 4125 + btrfs_backref_release_cache(&rc->backref_cache); 3393 4126 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); 3394 4127 3395 4128 /* ··· 3465 4198 struct inode *inode = NULL; 3466 4199 struct btrfs_trans_handle *trans; 3467 4200 struct btrfs_root *root; 3468 - struct btrfs_key key; 3469 4201 u64 objectid; 3470 4202 int err = 0; 3471 4203 3472 - root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 3473 - if (IS_ERR(root)) 3474 - return ERR_CAST(root); 3475 - 4204 + root = btrfs_grab_root(fs_info->data_reloc_root); 3476 4205 trans = btrfs_start_transaction(root, 6); 3477 4206 if (IS_ERR(trans)) { 3478 4207 btrfs_put_root(root); ··· 3482 4219 err = __insert_orphan_inode(trans, root, objectid); 3483 4220 BUG_ON(err); 3484 4221 3485 - key.objectid = objectid; 3486 - key.type = BTRFS_INODE_ITEM_KEY; 3487 - key.offset = 0; 3488 - inode = btrfs_iget(fs_info->sb, &key, root); 4222 + inode = btrfs_iget(fs_info->sb, objectid, root); 3489 4223 BUG_ON(IS_ERR(inode)); 3490 4224 BTRFS_I(inode)->index_cnt = group->start; 3491 4225 ··· 3509 4249 3510 4250 INIT_LIST_HEAD(&rc->reloc_roots); 3511 4251 INIT_LIST_HEAD(&rc->dirty_subvol_roots); 3512 - backref_cache_init(&rc->backref_cache); 4252 + btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1); 3513 4253 mapping_tree_init(&rc->reloc_root_tree); 3514 4254 extent_io_tree_init(fs_info, &rc->processed_blocks, 3515 4255 IO_TREE_RELOC_BLOCKS, NULL); ··· 3754 4494 goto out; 3755 4495 } 3756 4496 3757 - set_bit(BTRFS_ROOT_REF_COWS, &reloc_root->state); 4497 + set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); 3758 4498 list_add(&reloc_root->root_list, &reloc_roots); 3759 4499 3760 4500 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 3761 - fs_root = read_fs_root(fs_info, 3762 - reloc_root->root_key.offset); 4501 + fs_root = btrfs_get_fs_root(fs_info, 4502 + reloc_root->root_key.offset, false); 3763 4503 if (IS_ERR(fs_root)) { 3764 4504 ret = PTR_ERR(fs_root); 3765 4505 if (ret != -ENOENT) { ··· 3815 4555 continue; 3816 4556 } 3817 4557 3818 - fs_root = read_fs_root(fs_info, reloc_root->root_key.offset); 4558 + fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 4559 + false); 3819 4560 if (IS_ERR(fs_root)) { 3820 4561 err = PTR_ERR(fs_root); 3821 4562 list_add_tail(&reloc_root->root_list, &reloc_roots); ··· 3852 4591 unset_reloc_control(rc); 3853 4592 free_reloc_control(rc); 3854 4593 out: 3855 - if (!list_empty(&reloc_roots)) 3856 - free_reloc_roots(&reloc_roots); 4594 + free_reloc_roots(&reloc_roots); 3857 4595 3858 4596 btrfs_free_path(path); 3859 4597 3860 4598 if (err == 0) { 3861 4599 /* cleanup orphan inode in data relocation tree */ 3862 - fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 3863 - if (IS_ERR(fs_root)) { 3864 - err = PTR_ERR(fs_root); 3865 - } else { 3866 - err = btrfs_orphan_cleanup(fs_root); 3867 - btrfs_put_root(fs_root); 3868 - } 4600 + fs_root = btrfs_grab_root(fs_info->data_reloc_root); 4601 + ASSERT(fs_root); 4602 + err = btrfs_orphan_cleanup(fs_root); 4603 + btrfs_put_root(fs_root); 3869 4604 } 3870 4605 return err; 3871 4606 } ··· 3923 4666 { 3924 4667 struct btrfs_fs_info *fs_info = root->fs_info; 3925 4668 struct reloc_control *rc; 3926 - struct backref_node *node; 4669 + struct btrfs_backref_node *node; 3927 4670 int first_cow = 0; 3928 4671 int level; 3929 4672 int ret = 0; ··· 3948 4691 BUG_ON(node->bytenr != buf->start && 3949 4692 node->new_bytenr != buf->start); 3950 4693 3951 - drop_node_buffer(node); 4694 + btrfs_backref_drop_node_buffer(node); 3952 4695 atomic_inc(&cow->refs); 3953 4696 node->eb = cow; 3954 4697 node->new_bytenr = cow->start; ··· 3960 4703 } 3961 4704 3962 4705 if (first_cow) 3963 - __mark_block_processed(rc, node); 4706 + mark_block_processed(rc, node); 3964 4707 3965 4708 if (first_cow && level > 0) 3966 4709 rc->nodes_relocated += buf->len;
+5 -7
fs/btrfs/root-tree.c
··· 210 210 struct extent_buffer *leaf; 211 211 struct btrfs_path *path; 212 212 struct btrfs_key key; 213 - struct btrfs_key root_key; 214 213 struct btrfs_root *root; 215 214 int err = 0; 216 215 int ret; ··· 222 223 key.type = BTRFS_ORPHAN_ITEM_KEY; 223 224 key.offset = 0; 224 225 225 - root_key.type = BTRFS_ROOT_ITEM_KEY; 226 - root_key.offset = (u64)-1; 227 - 228 226 while (1) { 227 + u64 root_objectid; 228 + 229 229 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); 230 230 if (ret < 0) { 231 231 err = ret; ··· 248 250 key.type != BTRFS_ORPHAN_ITEM_KEY) 249 251 break; 250 252 251 - root_key.objectid = key.offset; 253 + root_objectid = key.offset; 252 254 key.offset++; 253 255 254 - root = btrfs_get_fs_root(fs_info, &root_key, false); 256 + root = btrfs_get_fs_root(fs_info, root_objectid, false); 255 257 err = PTR_ERR_OR_ZERO(root); 256 258 if (err && err != -ENOENT) { 257 259 break; ··· 268 270 break; 269 271 } 270 272 err = btrfs_del_orphan_item(trans, tree_root, 271 - root_key.objectid); 273 + root_objectid); 272 274 btrfs_end_transaction(trans); 273 275 if (err) { 274 276 btrfs_handle_fs_error(fs_info, err,
+45 -14
fs/btrfs/scrub.c
··· 647 647 struct btrfs_fs_info *fs_info = swarn->dev->fs_info; 648 648 struct inode_fs_paths *ipath = NULL; 649 649 struct btrfs_root *local_root; 650 - struct btrfs_key root_key; 651 650 struct btrfs_key key; 652 651 653 - root_key.objectid = root; 654 - root_key.type = BTRFS_ROOT_ITEM_KEY; 655 - root_key.offset = (u64)-1; 656 - local_root = btrfs_get_fs_root(fs_info, &root_key, true); 652 + local_root = btrfs_get_fs_root(fs_info, root, true); 657 653 if (IS_ERR(local_root)) { 658 654 ret = PTR_ERR(local_root); 659 655 goto err; ··· 3042 3046 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, 3043 3047 struct map_lookup *map, 3044 3048 struct btrfs_device *scrub_dev, 3045 - int num, u64 base, u64 length) 3049 + int num, u64 base, u64 length, 3050 + struct btrfs_block_group *cache) 3046 3051 { 3047 3052 struct btrfs_path *path, *ppath; 3048 3053 struct btrfs_fs_info *fs_info = sctx->fs_info; ··· 3281 3284 break; 3282 3285 } 3283 3286 3287 + /* 3288 + * If our block group was removed in the meanwhile, just 3289 + * stop scrubbing since there is no point in continuing. 3290 + * Continuing would prevent reusing its device extents 3291 + * for new block groups for a long time. 3292 + */ 3293 + spin_lock(&cache->lock); 3294 + if (cache->removed) { 3295 + spin_unlock(&cache->lock); 3296 + ret = 0; 3297 + goto out; 3298 + } 3299 + spin_unlock(&cache->lock); 3300 + 3284 3301 extent = btrfs_item_ptr(l, slot, 3285 3302 struct btrfs_extent_item); 3286 3303 flags = btrfs_extent_flags(l, extent); ··· 3339 3328 &extent_dev, 3340 3329 &extent_mirror_num); 3341 3330 3342 - ret = btrfs_lookup_csums_range(csum_root, 3343 - extent_logical, 3344 - extent_logical + 3345 - extent_len - 1, 3346 - &sctx->csum_list, 1); 3347 - if (ret) 3348 - goto out; 3331 + if (flags & BTRFS_EXTENT_FLAG_DATA) { 3332 + ret = btrfs_lookup_csums_range(csum_root, 3333 + extent_logical, 3334 + extent_logical + extent_len - 1, 3335 + &sctx->csum_list, 1); 3336 + if (ret) 3337 + goto out; 3338 + } 3349 3339 3350 3340 ret = scrub_extent(sctx, map, extent_logical, extent_len, 3351 3341 extent_physical, extent_dev, flags, ··· 3469 3457 if (map->stripes[i].dev->bdev == scrub_dev->bdev && 3470 3458 map->stripes[i].physical == dev_offset) { 3471 3459 ret = scrub_stripe(sctx, map, scrub_dev, i, 3472 - chunk_offset, length); 3460 + chunk_offset, length, cache); 3473 3461 if (ret) 3474 3462 goto out; 3475 3463 } ··· 3567 3555 goto skip; 3568 3556 3569 3557 /* 3558 + * Make sure that while we are scrubbing the corresponding block 3559 + * group doesn't get its logical address and its device extents 3560 + * reused for another block group, which can possibly be of a 3561 + * different type and different profile. We do this to prevent 3562 + * false error detections and crashes due to bogus attempts to 3563 + * repair extents. 3564 + */ 3565 + spin_lock(&cache->lock); 3566 + if (cache->removed) { 3567 + spin_unlock(&cache->lock); 3568 + btrfs_put_block_group(cache); 3569 + goto skip; 3570 + } 3571 + btrfs_freeze_block_group(cache); 3572 + spin_unlock(&cache->lock); 3573 + 3574 + /* 3570 3575 * we need call btrfs_inc_block_group_ro() with scrubs_paused, 3571 3576 * to avoid deadlock caused by: 3572 3577 * btrfs_inc_block_group_ro() ··· 3638 3609 } else { 3639 3610 btrfs_warn(fs_info, 3640 3611 "failed setting block group ro: %d", ret); 3612 + btrfs_unfreeze_block_group(cache); 3641 3613 btrfs_put_block_group(cache); 3642 3614 scrub_pause_off(fs_info); 3643 3615 break; ··· 3725 3695 spin_unlock(&cache->lock); 3726 3696 } 3727 3697 3698 + btrfs_unfreeze_block_group(cache); 3728 3699 btrfs_put_block_group(cache); 3729 3700 if (ret) 3730 3701 break;
+72 -17
fs/btrfs/send.c
··· 23 23 #include "btrfs_inode.h" 24 24 #include "transaction.h" 25 25 #include "compression.h" 26 + #include "xattr.h" 26 27 27 28 /* 28 29 * Maximum number of references an extent can have in order for us to attempt to ··· 4546 4545 struct fs_path *p; 4547 4546 struct posix_acl_xattr_header dummy_acl; 4548 4547 4548 + /* Capabilities are emitted by finish_inode_if_needed */ 4549 + if (!strncmp(name, XATTR_NAME_CAPS, name_len)) 4550 + return 0; 4551 + 4549 4552 p = fs_path_alloc(); 4550 4553 if (!p) 4551 4554 return -ENOMEM; ··· 4806 4801 struct inode *inode; 4807 4802 struct page *page; 4808 4803 char *addr; 4809 - struct btrfs_key key; 4810 4804 pgoff_t index = offset >> PAGE_SHIFT; 4811 4805 pgoff_t last_index; 4812 4806 unsigned pg_offset = offset_in_page(offset); 4813 4807 ssize_t ret = 0; 4814 4808 4815 - key.objectid = sctx->cur_ino; 4816 - key.type = BTRFS_INODE_ITEM_KEY; 4817 - key.offset = 0; 4818 - 4819 - inode = btrfs_iget(fs_info->sb, &key, root); 4809 + inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root); 4820 4810 if (IS_ERR(inode)) 4821 4811 return PTR_ERR(inode); 4822 4812 ··· 5105 5105 sent += ret; 5106 5106 } 5107 5107 return 0; 5108 + } 5109 + 5110 + /* 5111 + * Search for a capability xattr related to sctx->cur_ino. If the capability is 5112 + * found, call send_set_xattr function to emit it. 5113 + * 5114 + * Return 0 if there isn't a capability, or when the capability was emitted 5115 + * successfully, or < 0 if an error occurred. 5116 + */ 5117 + static int send_capabilities(struct send_ctx *sctx) 5118 + { 5119 + struct fs_path *fspath = NULL; 5120 + struct btrfs_path *path; 5121 + struct btrfs_dir_item *di; 5122 + struct extent_buffer *leaf; 5123 + unsigned long data_ptr; 5124 + char *buf = NULL; 5125 + int buf_len; 5126 + int ret = 0; 5127 + 5128 + path = alloc_path_for_send(); 5129 + if (!path) 5130 + return -ENOMEM; 5131 + 5132 + di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, 5133 + XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); 5134 + if (!di) { 5135 + /* There is no xattr for this inode */ 5136 + goto out; 5137 + } else if (IS_ERR(di)) { 5138 + ret = PTR_ERR(di); 5139 + goto out; 5140 + } 5141 + 5142 + leaf = path->nodes[0]; 5143 + buf_len = btrfs_dir_data_len(leaf, di); 5144 + 5145 + fspath = fs_path_alloc(); 5146 + buf = kmalloc(buf_len, GFP_KERNEL); 5147 + if (!fspath || !buf) { 5148 + ret = -ENOMEM; 5149 + goto out; 5150 + } 5151 + 5152 + ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); 5153 + if (ret < 0) 5154 + goto out; 5155 + 5156 + data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); 5157 + read_extent_buffer(leaf, buf, data_ptr, buf_len); 5158 + 5159 + ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, 5160 + strlen(XATTR_NAME_CAPS), buf, buf_len); 5161 + out: 5162 + kfree(buf); 5163 + fs_path_free(fspath); 5164 + btrfs_free_path(path); 5165 + return ret; 5108 5166 } 5109 5167 5110 5168 static int clone_range(struct send_ctx *sctx, ··· 6029 5971 if (ret < 0) 6030 5972 goto out; 6031 5973 } 5974 + 5975 + ret = send_capabilities(sctx); 5976 + if (ret < 0) 5977 + goto out; 6032 5978 6033 5979 /* 6034 5980 * If other directory inodes depended on our current directory ··· 7083 7021 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root; 7084 7022 struct btrfs_fs_info *fs_info = send_root->fs_info; 7085 7023 struct btrfs_root *clone_root; 7086 - struct btrfs_key key; 7087 7024 struct send_ctx *sctx = NULL; 7088 7025 u32 i; 7089 7026 u64 *clone_sources_tmp = NULL; ··· 7204 7143 } 7205 7144 7206 7145 for (i = 0; i < arg->clone_sources_count; i++) { 7207 - key.objectid = clone_sources_tmp[i]; 7208 - key.type = BTRFS_ROOT_ITEM_KEY; 7209 - key.offset = (u64)-1; 7210 - 7211 - clone_root = btrfs_get_fs_root(fs_info, &key, true); 7146 + clone_root = btrfs_get_fs_root(fs_info, 7147 + clone_sources_tmp[i], true); 7212 7148 if (IS_ERR(clone_root)) { 7213 7149 ret = PTR_ERR(clone_root); 7214 7150 goto out; ··· 7236 7178 } 7237 7179 7238 7180 if (arg->parent_root) { 7239 - key.objectid = arg->parent_root; 7240 - key.type = BTRFS_ROOT_ITEM_KEY; 7241 - key.offset = (u64)-1; 7242 - 7243 - sctx->parent_root = btrfs_get_fs_root(fs_info, &key, true); 7181 + sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root, 7182 + true); 7244 7183 if (IS_ERR(sctx->parent_root)) { 7245 7184 ret = PTR_ERR(sctx->parent_root); 7246 7185 goto out;
+73 -8
fs/btrfs/space-info.c
··· 626 626 struct reserve_ticket *ticket = NULL; 627 627 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; 628 628 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 629 + struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; 629 630 struct btrfs_trans_handle *trans; 630 631 u64 bytes_needed; 631 632 u64 reclaim_bytes = 0; ··· 689 688 spin_lock(&delayed_refs_rsv->lock); 690 689 reclaim_bytes += delayed_refs_rsv->reserved; 691 690 spin_unlock(&delayed_refs_rsv->lock); 691 + 692 + spin_lock(&trans_rsv->lock); 693 + reclaim_bytes += trans_rsv->reserved; 694 + spin_unlock(&trans_rsv->lock); 695 + 692 696 if (reclaim_bytes >= bytes_needed) 693 697 goto commit; 694 698 bytes_needed -= reclaim_bytes; ··· 862 856 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 863 857 } 864 858 859 + static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, 860 + struct btrfs_space_info *space_info, 861 + struct reserve_ticket *ticket) 862 + { 863 + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 864 + u64 min_bytes; 865 + 866 + if (global_rsv->space_info != space_info) 867 + return false; 868 + 869 + spin_lock(&global_rsv->lock); 870 + min_bytes = div_factor(global_rsv->size, 1); 871 + if (global_rsv->reserved < min_bytes + ticket->bytes) { 872 + spin_unlock(&global_rsv->lock); 873 + return false; 874 + } 875 + global_rsv->reserved -= ticket->bytes; 876 + ticket->bytes = 0; 877 + list_del_init(&ticket->list); 878 + wake_up(&ticket->wait); 879 + space_info->tickets_id++; 880 + if (global_rsv->reserved < global_rsv->size) 881 + global_rsv->full = 0; 882 + spin_unlock(&global_rsv->lock); 883 + 884 + return true; 885 + } 886 + 865 887 /* 866 888 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets 867 889 * @fs_info - fs_info for this fs ··· 921 887 tickets_id == space_info->tickets_id) { 922 888 ticket = list_first_entry(&space_info->tickets, 923 889 struct reserve_ticket, list); 890 + 891 + if (ticket->steal && 892 + steal_from_global_rsv(fs_info, space_info, ticket)) 893 + return true; 924 894 925 895 /* 926 896 * may_commit_transaction will avoid committing the transaction ··· 1142 1104 1143 1105 switch (flush) { 1144 1106 case BTRFS_RESERVE_FLUSH_ALL: 1107 + case BTRFS_RESERVE_FLUSH_ALL_STEAL: 1145 1108 wait_reserve_ticket(fs_info, space_info, ticket); 1146 1109 break; 1147 1110 case BTRFS_RESERVE_FLUSH_LIMIT: ··· 1164 1125 ret = ticket->error; 1165 1126 if (ticket->bytes || ticket->error) { 1166 1127 /* 1167 - * Need to delete here for priority tickets. For regular tickets 1168 - * either the async reclaim job deletes the ticket from the list 1169 - * or we delete it ourselves at wait_reserve_ticket(). 1128 + * We were a priority ticket, so we need to delete ourselves 1129 + * from the list. Because we could have other priority tickets 1130 + * behind us that require less space, run 1131 + * btrfs_try_granting_tickets() to see if their reservations can 1132 + * now be made. 1170 1133 */ 1171 - remove_ticket(space_info, ticket); 1134 + if (!list_empty(&ticket->list)) { 1135 + remove_ticket(space_info, ticket); 1136 + btrfs_try_granting_tickets(fs_info, space_info); 1137 + } 1138 + 1172 1139 if (!ret) 1173 1140 ret = -ENOSPC; 1174 1141 } ··· 1188 1143 */ 1189 1144 ASSERT(!(ticket->bytes == 0 && ticket->error)); 1190 1145 return ret; 1146 + } 1147 + 1148 + /* 1149 + * This returns true if this flush state will go through the ordinary flushing 1150 + * code. 1151 + */ 1152 + static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) 1153 + { 1154 + return (flush == BTRFS_RESERVE_FLUSH_ALL) || 1155 + (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1191 1156 } 1192 1157 1193 1158 /** ··· 1230 1175 spin_lock(&space_info->lock); 1231 1176 ret = -ENOSPC; 1232 1177 used = btrfs_space_info_used(space_info, true); 1233 - pending_tickets = !list_empty(&space_info->tickets) || 1234 - !list_empty(&space_info->priority_tickets); 1178 + 1179 + /* 1180 + * We don't want NO_FLUSH allocations to jump everybody, they can 1181 + * generally handle ENOSPC in a different way, so treat them the same as 1182 + * normal flushers when it comes to skipping pending tickets. 1183 + */ 1184 + if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) 1185 + pending_tickets = !list_empty(&space_info->tickets) || 1186 + !list_empty(&space_info->priority_tickets); 1187 + else 1188 + pending_tickets = !list_empty(&space_info->priority_tickets); 1235 1189 1236 1190 /* 1237 1191 * Carry on if we have enough space (short-circuit) OR call ··· 1262 1198 * the list and we will do our own flushing further down. 1263 1199 */ 1264 1200 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 1265 - ASSERT(space_info->reclaim_size >= 0); 1266 1201 ticket.bytes = orig_bytes; 1267 1202 ticket.error = 0; 1268 1203 space_info->reclaim_size += ticket.bytes; 1269 1204 init_waitqueue_head(&ticket.wait); 1270 - if (flush == BTRFS_RESERVE_FLUSH_ALL) { 1205 + ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); 1206 + if (flush == BTRFS_RESERVE_FLUSH_ALL || 1207 + flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { 1271 1208 list_add_tail(&ticket.list, &space_info->tickets); 1272 1209 if (!space_info->flush) { 1273 1210 space_info->flush = 1;
+1
fs/btrfs/space-info.h
··· 78 78 struct reserve_ticket { 79 79 u64 bytes; 80 80 int error; 81 + bool steal; 81 82 struct list_head list; 82 83 wait_queue_head_t wait; 83 84 };
+112 -111
fs/btrfs/struct-funcs.c
··· 17 17 *(u8 *)p = val; 18 18 } 19 19 20 + static bool check_setget_bounds(const struct extent_buffer *eb, 21 + const void *ptr, unsigned off, int size) 22 + { 23 + const unsigned long member_offset = (unsigned long)ptr + off; 24 + 25 + if (member_offset > eb->len) { 26 + btrfs_warn(eb->fs_info, 27 + "bad eb member start: ptr 0x%lx start %llu member offset %lu size %d", 28 + (unsigned long)ptr, eb->start, member_offset, size); 29 + return false; 30 + } 31 + if (member_offset + size > eb->len) { 32 + btrfs_warn(eb->fs_info, 33 + "bad eb member end: ptr 0x%lx start %llu member offset %lu size %d", 34 + (unsigned long)ptr, eb->start, member_offset, size); 35 + return false; 36 + } 37 + 38 + return true; 39 + } 40 + 20 41 /* 21 - * this is some deeply nasty code. 42 + * Macro templates that define helpers to read/write extent buffer data of a 43 + * given size, that are also used via ctree.h for access to item members by 44 + * specialized helpers. 22 45 * 23 - * The end result is that anyone who #includes ctree.h gets a 24 - * declaration for the btrfs_set_foo functions and btrfs_foo functions, 25 - * which are wrappers of btrfs_set_token_#bits functions and 26 - * btrfs_get_token_#bits functions, which are defined in this file. 46 + * Generic helpers: 47 + * - btrfs_set_8 (for 8/16/32/64) 48 + * - btrfs_get_8 (for 8/16/32/64) 27 49 * 28 - * These setget functions do all the extent_buffer related mapping 29 - * required to efficiently read and write specific fields in the extent 30 - * buffers. Every pointer to metadata items in btrfs is really just 31 - * an unsigned long offset into the extent buffer which has been 32 - * cast to a specific type. This gives us all the gcc type checking. 50 + * Generic helpers with a token (cached address of the most recently accessed 51 + * page): 52 + * - btrfs_set_token_8 (for 8/16/32/64) 53 + * - btrfs_get_token_8 (for 8/16/32/64) 33 54 * 34 - * The extent buffer api is used to do the page spanning work required to 35 - * have a metadata blocksize different from the page size. 55 + * The set/get functions handle data spanning two pages transparently, in case 56 + * metadata block size is larger than page. Every pointer to metadata items is 57 + * an offset into the extent buffer page array, cast to a specific type. This 58 + * gives us all the type checking. 36 59 * 37 - * There are 2 variants defined, one with a token pointer and one without. 60 + * The extent buffer pages stored in the array pages do not form a contiguous 61 + * phyusical range, but the API functions assume the linear offset to the range 62 + * from 0 to metadata node size. 38 63 */ 39 64 40 65 #define DEFINE_BTRFS_SETGET_BITS(bits) \ 41 - u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ 42 - const void *ptr, unsigned long off, \ 43 - struct btrfs_map_token *token) \ 66 + u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \ 67 + const void *ptr, unsigned long off) \ 44 68 { \ 45 - unsigned long part_offset = (unsigned long)ptr; \ 46 - unsigned long offset = part_offset + off; \ 47 - void *p; \ 48 - int err; \ 49 - char *kaddr; \ 50 - unsigned long map_start; \ 51 - unsigned long map_len; \ 52 - int size = sizeof(u##bits); \ 53 - u##bits res; \ 69 + const unsigned long member_offset = (unsigned long)ptr + off; \ 70 + const unsigned long idx = member_offset >> PAGE_SHIFT; \ 71 + const unsigned long oip = offset_in_page(member_offset); \ 72 + const int size = sizeof(u##bits); \ 73 + u8 lebytes[sizeof(u##bits)]; \ 74 + const int part = PAGE_SIZE - oip; \ 54 75 \ 55 76 ASSERT(token); \ 56 - ASSERT(token->eb == eb); \ 57 - \ 58 - if (token->kaddr && token->offset <= offset && \ 59 - (token->offset + PAGE_SIZE >= offset + size)) { \ 60 - kaddr = token->kaddr; \ 61 - p = kaddr + part_offset - token->offset; \ 62 - res = get_unaligned_le##bits(p + off); \ 63 - return res; \ 77 + ASSERT(token->kaddr); \ 78 + ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \ 79 + if (token->offset <= member_offset && \ 80 + member_offset + size <= token->offset + PAGE_SIZE) { \ 81 + return get_unaligned_le##bits(token->kaddr + oip); \ 64 82 } \ 65 - err = map_private_extent_buffer(eb, offset, size, \ 66 - &kaddr, &map_start, &map_len); \ 67 - if (err) { \ 68 - __le##bits leres; \ 83 + token->kaddr = page_address(token->eb->pages[idx]); \ 84 + token->offset = idx << PAGE_SHIFT; \ 85 + if (oip + size <= PAGE_SIZE) \ 86 + return get_unaligned_le##bits(token->kaddr + oip); \ 69 87 \ 70 - read_extent_buffer(eb, &leres, offset, size); \ 71 - return le##bits##_to_cpu(leres); \ 72 - } \ 73 - p = kaddr + part_offset - map_start; \ 74 - res = get_unaligned_le##bits(p + off); \ 75 - token->kaddr = kaddr; \ 76 - token->offset = map_start; \ 77 - return res; \ 88 + memcpy(lebytes, token->kaddr + oip, part); \ 89 + token->kaddr = page_address(token->eb->pages[idx + 1]); \ 90 + token->offset = (idx + 1) << PAGE_SHIFT; \ 91 + memcpy(lebytes + part, token->kaddr, size - part); \ 92 + return get_unaligned_le##bits(lebytes); \ 78 93 } \ 79 94 u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ 80 95 const void *ptr, unsigned long off) \ 81 96 { \ 82 - unsigned long part_offset = (unsigned long)ptr; \ 83 - unsigned long offset = part_offset + off; \ 84 - void *p; \ 85 - int err; \ 86 - char *kaddr; \ 87 - unsigned long map_start; \ 88 - unsigned long map_len; \ 89 - int size = sizeof(u##bits); \ 90 - u##bits res; \ 97 + const unsigned long member_offset = (unsigned long)ptr + off; \ 98 + const unsigned long oip = offset_in_page(member_offset); \ 99 + const unsigned long idx = member_offset >> PAGE_SHIFT; \ 100 + char *kaddr = page_address(eb->pages[idx]); \ 101 + const int size = sizeof(u##bits); \ 102 + const int part = PAGE_SIZE - oip; \ 103 + u8 lebytes[sizeof(u##bits)]; \ 91 104 \ 92 - err = map_private_extent_buffer(eb, offset, size, \ 93 - &kaddr, &map_start, &map_len); \ 94 - if (err) { \ 95 - __le##bits leres; \ 105 + ASSERT(check_setget_bounds(eb, ptr, off, size)); \ 106 + if (oip + size <= PAGE_SIZE) \ 107 + return get_unaligned_le##bits(kaddr + oip); \ 96 108 \ 97 - read_extent_buffer(eb, &leres, offset, size); \ 98 - return le##bits##_to_cpu(leres); \ 99 - } \ 100 - p = kaddr + part_offset - map_start; \ 101 - res = get_unaligned_le##bits(p + off); \ 102 - return res; \ 109 + memcpy(lebytes, kaddr + oip, part); \ 110 + kaddr = page_address(eb->pages[idx + 1]); \ 111 + memcpy(lebytes + part, kaddr, size - part); \ 112 + return get_unaligned_le##bits(lebytes); \ 103 113 } \ 104 - void btrfs_set_token_##bits(struct extent_buffer *eb, \ 114 + void btrfs_set_token_##bits(struct btrfs_map_token *token, \ 105 115 const void *ptr, unsigned long off, \ 106 - u##bits val, \ 107 - struct btrfs_map_token *token) \ 116 + u##bits val) \ 108 117 { \ 109 - unsigned long part_offset = (unsigned long)ptr; \ 110 - unsigned long offset = part_offset + off; \ 111 - void *p; \ 112 - int err; \ 113 - char *kaddr; \ 114 - unsigned long map_start; \ 115 - unsigned long map_len; \ 116 - int size = sizeof(u##bits); \ 118 + const unsigned long member_offset = (unsigned long)ptr + off; \ 119 + const unsigned long idx = member_offset >> PAGE_SHIFT; \ 120 + const unsigned long oip = offset_in_page(member_offset); \ 121 + const int size = sizeof(u##bits); \ 122 + u8 lebytes[sizeof(u##bits)]; \ 123 + const int part = PAGE_SIZE - oip; \ 117 124 \ 118 125 ASSERT(token); \ 119 - ASSERT(token->eb == eb); \ 120 - \ 121 - if (token->kaddr && token->offset <= offset && \ 122 - (token->offset + PAGE_SIZE >= offset + size)) { \ 123 - kaddr = token->kaddr; \ 124 - p = kaddr + part_offset - token->offset; \ 125 - put_unaligned_le##bits(val, p + off); \ 126 + ASSERT(token->kaddr); \ 127 + ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \ 128 + if (token->offset <= member_offset && \ 129 + member_offset + size <= token->offset + PAGE_SIZE) { \ 130 + put_unaligned_le##bits(val, token->kaddr + oip); \ 126 131 return; \ 127 132 } \ 128 - err = map_private_extent_buffer(eb, offset, size, \ 129 - &kaddr, &map_start, &map_len); \ 130 - if (err) { \ 131 - __le##bits val2; \ 132 - \ 133 - val2 = cpu_to_le##bits(val); \ 134 - write_extent_buffer(eb, &val2, offset, size); \ 133 + token->kaddr = page_address(token->eb->pages[idx]); \ 134 + token->offset = idx << PAGE_SHIFT; \ 135 + if (oip + size <= PAGE_SIZE) { \ 136 + put_unaligned_le##bits(val, token->kaddr + oip); \ 135 137 return; \ 136 138 } \ 137 - p = kaddr + part_offset - map_start; \ 138 - put_unaligned_le##bits(val, p + off); \ 139 - token->kaddr = kaddr; \ 140 - token->offset = map_start; \ 139 + put_unaligned_le##bits(val, lebytes); \ 140 + memcpy(token->kaddr + oip, lebytes, part); \ 141 + token->kaddr = page_address(token->eb->pages[idx + 1]); \ 142 + token->offset = (idx + 1) << PAGE_SHIFT; \ 143 + memcpy(token->kaddr, lebytes + part, size - part); \ 141 144 } \ 142 - void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ 145 + void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \ 143 146 unsigned long off, u##bits val) \ 144 147 { \ 145 - unsigned long part_offset = (unsigned long)ptr; \ 146 - unsigned long offset = part_offset + off; \ 147 - void *p; \ 148 - int err; \ 149 - char *kaddr; \ 150 - unsigned long map_start; \ 151 - unsigned long map_len; \ 152 - int size = sizeof(u##bits); \ 148 + const unsigned long member_offset = (unsigned long)ptr + off; \ 149 + const unsigned long oip = offset_in_page(member_offset); \ 150 + const unsigned long idx = member_offset >> PAGE_SHIFT; \ 151 + char *kaddr = page_address(eb->pages[idx]); \ 152 + const int size = sizeof(u##bits); \ 153 + const int part = PAGE_SIZE - oip; \ 154 + u8 lebytes[sizeof(u##bits)]; \ 153 155 \ 154 - err = map_private_extent_buffer(eb, offset, size, \ 155 - &kaddr, &map_start, &map_len); \ 156 - if (err) { \ 157 - __le##bits val2; \ 158 - \ 159 - val2 = cpu_to_le##bits(val); \ 160 - write_extent_buffer(eb, &val2, offset, size); \ 156 + ASSERT(check_setget_bounds(eb, ptr, off, size)); \ 157 + if (oip + size <= PAGE_SIZE) { \ 158 + put_unaligned_le##bits(val, kaddr + oip); \ 161 159 return; \ 162 160 } \ 163 - p = kaddr + part_offset - map_start; \ 164 - put_unaligned_le##bits(val, p + off); \ 161 + \ 162 + put_unaligned_le##bits(val, lebytes); \ 163 + memcpy(kaddr + oip, lebytes, part); \ 164 + kaddr = page_address(eb->pages[idx + 1]); \ 165 + memcpy(kaddr, lebytes + part, size - part); \ 165 166 } 166 167 167 168 DEFINE_BTRFS_SETGET_BITS(8)
+20 -18
fs/btrfs/super.c
··· 72 72 char *errstr = "unknown"; 73 73 74 74 switch (errno) { 75 - case -EIO: 75 + case -ENOENT: /* -2 */ 76 + errstr = "No such entry"; 77 + break; 78 + case -EIO: /* -5 */ 76 79 errstr = "IO failure"; 77 80 break; 78 - case -ENOMEM: 81 + case -ENOMEM: /* -12*/ 79 82 errstr = "Out of memory"; 80 83 break; 81 - case -EROFS: 82 - errstr = "Readonly filesystem"; 83 - break; 84 - case -EEXIST: 84 + case -EEXIST: /* -17 */ 85 85 errstr = "Object already exists"; 86 86 break; 87 - case -ENOSPC: 87 + case -ENOSPC: /* -28 */ 88 88 errstr = "No space left"; 89 89 break; 90 - case -ENOENT: 91 - errstr = "No such entry"; 90 + case -EROFS: /* -30 */ 91 + errstr = "Readonly filesystem"; 92 + break; 93 + case -EOPNOTSUPP: /* -95 */ 94 + errstr = "Operation not supported"; 95 + break; 96 + case -EUCLEAN: /* -117 */ 97 + errstr = "Filesystem corrupted"; 98 + break; 99 + case -EDQUOT: /* -122 */ 100 + errstr = "Quota exceeded"; 92 101 break; 93 102 } 94 103 ··· 1102 1093 dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref); 1103 1094 btrfs_release_path(path); 1104 1095 1105 - key.objectid = subvol_objectid; 1106 - key.type = BTRFS_ROOT_ITEM_KEY; 1107 - key.offset = (u64)-1; 1108 - fs_root = btrfs_get_fs_root(fs_info, &key, true); 1096 + fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true); 1109 1097 if (IS_ERR(fs_root)) { 1110 1098 ret = PTR_ERR(fs_root); 1111 1099 fs_root = NULL; ··· 1217 1211 { 1218 1212 struct inode *inode; 1219 1213 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1220 - struct btrfs_key key; 1221 1214 int err; 1222 1215 1223 1216 sb->s_maxbytes = MAX_LFS_FILESIZE; ··· 1244 1239 return err; 1245 1240 } 1246 1241 1247 - key.objectid = BTRFS_FIRST_FREE_OBJECTID; 1248 - key.type = BTRFS_INODE_ITEM_KEY; 1249 - key.offset = 0; 1250 - inode = btrfs_iget(sb, &key, fs_info->fs_root); 1242 + inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root); 1251 1243 if (IS_ERR(inode)) { 1252 1244 err = PTR_ERR(inode); 1253 1245 goto fail_close;
+32 -46
fs/btrfs/transaction.c
··· 21 21 #include "dev-replace.h" 22 22 #include "qgroup.h" 23 23 #include "block-group.h" 24 + #include "space-info.h" 24 25 25 26 #define BTRFS_ROOT_TRANS_TAG 0 26 27 ··· 142 141 struct btrfs_block_group, 143 142 bg_list); 144 143 list_del_init(&cache->bg_list); 145 - btrfs_put_block_group_trimming(cache); 144 + btrfs_unfreeze_block_group(cache); 146 145 btrfs_put_block_group(cache); 147 146 } 148 147 WARN_ON(!list_empty(&transaction->dev_update_list)); ··· 349 348 } 350 349 351 350 /* 352 - * this does all the record keeping required to make sure that a reference 353 - * counted root is properly recorded in a given transaction. This is required 354 - * to make sure the old root from before we joined the transaction is deleted 355 - * when the transaction commits 351 + * This does all the record keeping required to make sure that a shareable root 352 + * is properly recorded in a given transaction. This is required to make sure 353 + * the old root from before we joined the transaction is deleted when the 354 + * transaction commits. 356 355 */ 357 356 static int record_root_in_trans(struct btrfs_trans_handle *trans, 358 357 struct btrfs_root *root, ··· 360 359 { 361 360 struct btrfs_fs_info *fs_info = root->fs_info; 362 361 363 - if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 362 + if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 364 363 root->last_trans < trans->transid) || force) { 365 364 WARN_ON(root == fs_info->extent_root); 366 365 WARN_ON(!force && root->commit_root != root->node); ··· 439 438 { 440 439 struct btrfs_fs_info *fs_info = root->fs_info; 441 440 442 - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 441 + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 443 442 return 0; 444 443 445 444 /* ··· 504 503 struct btrfs_fs_info *fs_info = root->fs_info; 505 504 506 505 if (!fs_info->reloc_ctl || 507 - !test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 506 + !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || 508 507 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 509 508 root->reloc_root) 510 509 return false; ··· 524 523 u64 num_bytes = 0; 525 524 u64 qgroup_reserved = 0; 526 525 bool reloc_reserved = false; 526 + bool do_chunk_alloc = false; 527 527 int ret; 528 528 529 529 /* Send isn't supposed to start transactions. */ ··· 565 563 * refill that amount for whatever is missing in the reserve. 566 564 */ 567 565 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); 568 - if (delayed_refs_rsv->full == 0) { 566 + if (flush == BTRFS_RESERVE_FLUSH_ALL && 567 + delayed_refs_rsv->full == 0) { 569 568 delayed_refs_bytes = num_bytes; 570 569 num_bytes <<= 1; 571 570 } ··· 587 584 delayed_refs_bytes); 588 585 num_bytes -= delayed_refs_bytes; 589 586 } 587 + 588 + if (rsv->space_info->force_alloc) 589 + do_chunk_alloc = true; 590 590 } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && 591 591 !delayed_refs_rsv->full) { 592 592 /* ··· 672 666 current->journal_info = h; 673 667 674 668 /* 669 + * If the space_info is marked ALLOC_FORCE then we'll get upgraded to 670 + * ALLOC_FORCE the first run through, and then we won't allocate for 671 + * anybody else who races in later. We don't care about the return 672 + * value here. 673 + */ 674 + if (do_chunk_alloc && num_bytes) { 675 + u64 flags = h->block_rsv->space_info->flags; 676 + 677 + btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags), 678 + CHUNK_ALLOC_NO_FORCE); 679 + } 680 + 681 + /* 675 682 * btrfs_record_root_in_trans() needs to alloc new extents, and may 676 683 * call btrfs_join_transaction() while we're also starting a 677 684 * transaction. ··· 718 699 719 700 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 720 701 struct btrfs_root *root, 721 - unsigned int num_items, 722 - int min_factor) 702 + unsigned int num_items) 723 703 { 724 - struct btrfs_fs_info *fs_info = root->fs_info; 725 - struct btrfs_trans_handle *trans; 726 - u64 num_bytes; 727 - int ret; 728 - 729 - /* 730 - * We have two callers: unlink and block group removal. The 731 - * former should succeed even if we will temporarily exceed 732 - * quota and the latter operates on the extent root so 733 - * qgroup enforcement is ignored anyway. 734 - */ 735 - trans = start_transaction(root, num_items, TRANS_START, 736 - BTRFS_RESERVE_FLUSH_ALL, false); 737 - if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 738 - return trans; 739 - 740 - trans = btrfs_start_transaction(root, 0); 741 - if (IS_ERR(trans)) 742 - return trans; 743 - 744 - num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); 745 - ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv, 746 - num_bytes, min_factor); 747 - if (ret) { 748 - btrfs_end_transaction(trans); 749 - return ERR_PTR(ret); 750 - } 751 - 752 - trans->block_rsv = &fs_info->trans_block_rsv; 753 - trans->bytes_reserved = num_bytes; 754 - trace_btrfs_space_reservation(fs_info, "transaction", 755 - trans->transid, num_bytes, 1); 756 - 757 - return trans; 704 + return start_transaction(root, num_items, TRANS_START, 705 + BTRFS_RESERVE_FLUSH_ALL_STEAL, false); 758 706 } 759 707 760 708 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) ··· 1630 1644 } 1631 1645 1632 1646 key.offset = (u64)-1; 1633 - pending->snap = btrfs_get_fs_root(fs_info, &key, true); 1647 + pending->snap = btrfs_get_fs_root(fs_info, objectid, true); 1634 1648 if (IS_ERR(pending->snap)) { 1635 1649 ret = PTR_ERR(pending->snap); 1636 1650 btrfs_abort_transaction(trans, ret);
+1 -2
fs/btrfs/transaction.h
··· 193 193 unsigned int num_items); 194 194 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 195 195 struct btrfs_root *root, 196 - unsigned int num_items, 197 - int min_factor); 196 + unsigned int num_items); 198 197 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); 199 198 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root); 200 199 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
-4
fs/btrfs/tree-checker.c
··· 957 957 return 0; 958 958 } 959 959 960 - /* Inode item error output has the same format as dir_item_err() */ 961 - #define inode_item_err(eb, slot, fmt, ...) \ 962 - dir_item_err(eb, slot, fmt, __VA_ARGS__) 963 - 964 960 static int check_inode_item(struct extent_buffer *leaf, 965 961 struct btrfs_key *key, int slot) 966 962 {
+1 -1
fs/btrfs/tree-defrag.c
··· 35 35 goto out; 36 36 } 37 37 38 - if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 38 + if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 39 39 goto out; 40 40 41 41 path = btrfs_alloc_path();
+81 -93
fs/btrfs/tree-log.c
··· 505 505 */ 506 506 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && 507 507 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && 508 - ino_size != 0) { 509 - struct btrfs_map_token token; 510 - 511 - btrfs_init_map_token(&token, dst_eb); 512 - btrfs_set_token_inode_size(dst_eb, dst_item, 513 - ino_size, &token); 514 - } 508 + ino_size != 0) 509 + btrfs_set_inode_size(dst_eb, dst_item, ino_size); 515 510 goto no_copy; 516 511 } 517 512 ··· 550 555 static noinline struct inode *read_one_inode(struct btrfs_root *root, 551 556 u64 objectid) 552 557 { 553 - struct btrfs_key key; 554 558 struct inode *inode; 555 559 556 - key.objectid = objectid; 557 - key.type = BTRFS_INODE_ITEM_KEY; 558 - key.offset = 0; 559 - inode = btrfs_iget(root->fs_info->sb, &key, root); 560 + inode = btrfs_iget(root->fs_info->sb, objectid, root); 560 561 if (IS_ERR(inode)) 561 562 inode = NULL; 562 563 return inode; ··· 3290 3299 3291 3300 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1, 3292 3301 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); 3302 + extent_io_tree_release(&log->log_csum_range); 3293 3303 btrfs_put_root(log); 3294 3304 } 3295 3305 ··· 3808 3816 3809 3817 found_key.offset = 0; 3810 3818 found_key.type = 0; 3811 - ret = btrfs_bin_search(path->nodes[0], &found_key, 0, 3812 - &start_slot); 3819 + ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot); 3813 3820 if (ret < 0) 3814 3821 break; 3815 3822 ··· 3844 3853 * just to say 'this inode exists' and a logging 3845 3854 * to say 'update this inode with these values' 3846 3855 */ 3847 - btrfs_set_token_inode_generation(leaf, item, 0, &token); 3848 - btrfs_set_token_inode_size(leaf, item, logged_isize, &token); 3856 + btrfs_set_token_inode_generation(&token, item, 0); 3857 + btrfs_set_token_inode_size(&token, item, logged_isize); 3849 3858 } else { 3850 - btrfs_set_token_inode_generation(leaf, item, 3851 - BTRFS_I(inode)->generation, 3852 - &token); 3853 - btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); 3859 + btrfs_set_token_inode_generation(&token, item, 3860 + BTRFS_I(inode)->generation); 3861 + btrfs_set_token_inode_size(&token, item, inode->i_size); 3854 3862 } 3855 3863 3856 - btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3857 - btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3858 - btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3859 - btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3864 + btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 3865 + btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 3866 + btrfs_set_token_inode_mode(&token, item, inode->i_mode); 3867 + btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 3860 3868 3861 - btrfs_set_token_timespec_sec(leaf, &item->atime, 3862 - inode->i_atime.tv_sec, &token); 3863 - btrfs_set_token_timespec_nsec(leaf, &item->atime, 3864 - inode->i_atime.tv_nsec, &token); 3869 + btrfs_set_token_timespec_sec(&token, &item->atime, 3870 + inode->i_atime.tv_sec); 3871 + btrfs_set_token_timespec_nsec(&token, &item->atime, 3872 + inode->i_atime.tv_nsec); 3865 3873 3866 - btrfs_set_token_timespec_sec(leaf, &item->mtime, 3867 - inode->i_mtime.tv_sec, &token); 3868 - btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3869 - inode->i_mtime.tv_nsec, &token); 3874 + btrfs_set_token_timespec_sec(&token, &item->mtime, 3875 + inode->i_mtime.tv_sec); 3876 + btrfs_set_token_timespec_nsec(&token, &item->mtime, 3877 + inode->i_mtime.tv_nsec); 3870 3878 3871 - btrfs_set_token_timespec_sec(leaf, &item->ctime, 3872 - inode->i_ctime.tv_sec, &token); 3873 - btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3874 - inode->i_ctime.tv_nsec, &token); 3879 + btrfs_set_token_timespec_sec(&token, &item->ctime, 3880 + inode->i_ctime.tv_sec); 3881 + btrfs_set_token_timespec_nsec(&token, &item->ctime, 3882 + inode->i_ctime.tv_nsec); 3875 3883 3876 - btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3877 - &token); 3884 + btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode)); 3878 3885 3879 - btrfs_set_token_inode_sequence(leaf, item, 3880 - inode_peek_iversion(inode), &token); 3881 - btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3882 - btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3883 - btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3884 - btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3886 + btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 3887 + btrfs_set_token_inode_transid(&token, item, trans->transid); 3888 + btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 3889 + btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags); 3890 + btrfs_set_token_inode_block_group(&token, item, 0); 3885 3891 } 3886 3892 3887 3893 static int log_inode_item(struct btrfs_trans_handle *trans, ··· 3904 3916 struct btrfs_root *log_root, 3905 3917 struct btrfs_ordered_sum *sums) 3906 3918 { 3919 + const u64 lock_end = sums->bytenr + sums->len - 1; 3920 + struct extent_state *cached_state = NULL; 3907 3921 int ret; 3908 3922 3923 + /* 3924 + * Serialize logging for checksums. This is to avoid racing with the 3925 + * same checksum being logged by another task that is logging another 3926 + * file which happens to refer to the same extent as well. Such races 3927 + * can leave checksum items in the log with overlapping ranges. 3928 + */ 3929 + ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr, 3930 + lock_end, &cached_state); 3931 + if (ret) 3932 + return ret; 3909 3933 /* 3910 3934 * Due to extent cloning, we might have logged a csum item that covers a 3911 3935 * subrange of a cloned extent, and later we can end up logging a csum ··· 3928 3928 * trim and adjust) any existing csum items in the log for this range. 3929 3929 */ 3930 3930 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len); 3931 - if (ret) 3932 - return ret; 3931 + if (!ret) 3932 + ret = btrfs_csum_file_blocks(trans, log_root, sums); 3933 3933 3934 - return btrfs_csum_file_blocks(trans, log_root, sums); 3934 + unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end, 3935 + &cached_state); 3936 + 3937 + return ret; 3935 3938 } 3936 3939 3937 3940 static noinline int copy_items(struct btrfs_trans_handle *trans, ··· 4167 4164 fi = btrfs_item_ptr(leaf, path->slots[0], 4168 4165 struct btrfs_file_extent_item); 4169 4166 4170 - btrfs_set_token_file_extent_generation(leaf, fi, trans->transid, 4171 - &token); 4167 + btrfs_set_token_file_extent_generation(&token, fi, trans->transid); 4172 4168 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 4173 - btrfs_set_token_file_extent_type(leaf, fi, 4174 - BTRFS_FILE_EXTENT_PREALLOC, 4175 - &token); 4169 + btrfs_set_token_file_extent_type(&token, fi, 4170 + BTRFS_FILE_EXTENT_PREALLOC); 4176 4171 else 4177 - btrfs_set_token_file_extent_type(leaf, fi, 4178 - BTRFS_FILE_EXTENT_REG, 4179 - &token); 4172 + btrfs_set_token_file_extent_type(&token, fi, 4173 + BTRFS_FILE_EXTENT_REG); 4180 4174 4181 4175 block_len = max(em->block_len, em->orig_block_len); 4182 4176 if (em->compress_type != BTRFS_COMPRESS_NONE) { 4183 - btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4184 - em->block_start, 4185 - &token); 4186 - btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4187 - &token); 4177 + btrfs_set_token_file_extent_disk_bytenr(&token, fi, 4178 + em->block_start); 4179 + btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len); 4188 4180 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 4189 - btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4181 + btrfs_set_token_file_extent_disk_bytenr(&token, fi, 4190 4182 em->block_start - 4191 - extent_offset, &token); 4192 - btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4193 - &token); 4183 + extent_offset); 4184 + btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len); 4194 4185 } else { 4195 - btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token); 4196 - btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0, 4197 - &token); 4186 + btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0); 4187 + btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0); 4198 4188 } 4199 4189 4200 - btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token); 4201 - btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token); 4202 - btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token); 4203 - btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type, 4204 - &token); 4205 - btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token); 4206 - btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token); 4190 + btrfs_set_token_file_extent_offset(&token, fi, extent_offset); 4191 + btrfs_set_token_file_extent_num_bytes(&token, fi, em->len); 4192 + btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes); 4193 + btrfs_set_token_file_extent_compression(&token, fi, em->compress_type); 4194 + btrfs_set_token_file_extent_encryption(&token, fi, 0); 4195 + btrfs_set_token_file_extent_other_encoding(&token, fi, 0); 4207 4196 btrfs_mark_buffer_dirty(leaf); 4208 4197 4209 4198 btrfs_release_path(path); ··· 4331 4336 } 4332 4337 } 4333 4338 } 4334 - if (ins_nr > 0) { 4339 + if (ins_nr > 0) 4335 4340 ret = copy_items(trans, inode, dst_path, path, 4336 4341 start_slot, ins_nr, 1, 0); 4337 - if (ret > 0) 4338 - ret = 0; 4339 - } 4340 4342 out: 4341 4343 btrfs_release_path(path); 4342 4344 btrfs_free_path(dst_path); ··· 4827 4835 4828 4836 btrfs_release_path(path); 4829 4837 4830 - key.objectid = ino; 4831 - key.type = BTRFS_INODE_ITEM_KEY; 4832 - key.offset = 0; 4833 - inode = btrfs_iget(fs_info->sb, &key, root); 4838 + inode = btrfs_iget(fs_info->sb, ino, root); 4834 4839 /* 4835 4840 * If the other inode that had a conflicting dir entry was 4836 4841 * deleted in the current transaction, we need to log its parent ··· 4836 4847 if (IS_ERR(inode)) { 4837 4848 ret = PTR_ERR(inode); 4838 4849 if (ret == -ENOENT) { 4839 - key.objectid = parent; 4840 - inode = btrfs_iget(fs_info->sb, &key, root); 4850 + inode = btrfs_iget(fs_info->sb, parent, root); 4841 4851 if (IS_ERR(inode)) { 4842 4852 ret = PTR_ERR(inode); 4843 4853 } else { ··· 5575 5587 continue; 5576 5588 5577 5589 btrfs_release_path(path); 5578 - di_inode = btrfs_iget(fs_info->sb, &di_key, root); 5590 + di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root); 5579 5591 if (IS_ERR(di_inode)) { 5580 5592 ret = PTR_ERR(di_inode); 5581 5593 goto next_dir_inode; ··· 5701 5713 cur_offset = item_size; 5702 5714 } 5703 5715 5704 - dir_inode = btrfs_iget(fs_info->sb, &inode_key, root); 5716 + dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid, 5717 + root); 5705 5718 /* 5706 5719 * If the parent inode was deleted, return an error to 5707 5720 * fallback to a transaction commit. This is to prevent ··· 5769 5780 int slot = path->slots[0]; 5770 5781 struct btrfs_key search_key; 5771 5782 struct inode *inode; 5783 + u64 ino; 5772 5784 int ret = 0; 5773 5785 5774 5786 btrfs_release_path(path); 5775 5787 5788 + ino = found_key.offset; 5789 + 5776 5790 search_key.objectid = found_key.offset; 5777 5791 search_key.type = BTRFS_INODE_ITEM_KEY; 5778 5792 search_key.offset = 0; 5779 - inode = btrfs_iget(fs_info->sb, &search_key, root); 5793 + inode = btrfs_iget(fs_info->sb, ino, root); 5780 5794 if (IS_ERR(inode)) 5781 5795 return PTR_ERR(inode); 5782 5796 ··· 6124 6132 struct btrfs_trans_handle *trans; 6125 6133 struct btrfs_key key; 6126 6134 struct btrfs_key found_key; 6127 - struct btrfs_key tmp_key; 6128 6135 struct btrfs_root *log; 6129 6136 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 6130 6137 struct walk_control wc = { ··· 6185 6194 goto error; 6186 6195 } 6187 6196 6188 - tmp_key.objectid = found_key.offset; 6189 - tmp_key.type = BTRFS_ROOT_ITEM_KEY; 6190 - tmp_key.offset = (u64)-1; 6191 - 6192 - wc.replay_dest = btrfs_get_fs_root(fs_info, &tmp_key, true); 6197 + wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset, 6198 + true); 6193 6199 if (IS_ERR(wc.replay_dest)) { 6194 6200 ret = PTR_ERR(wc.replay_dest); 6195 6201
+1 -5
fs/btrfs/uuid-tree.c
··· 257 257 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, 258 258 u8 *uuid, u8 type, u64 subvolid) 259 259 { 260 - struct btrfs_key key; 261 260 int ret = 0; 262 261 struct btrfs_root *subvol_root; 263 262 ··· 264 265 type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) 265 266 goto out; 266 267 267 - key.objectid = subvolid; 268 - key.type = BTRFS_ROOT_ITEM_KEY; 269 - key.offset = (u64)-1; 270 - subvol_root = btrfs_get_fs_root(fs_info, &key, true); 268 + subvol_root = btrfs_get_fs_root(fs_info, subvolid, true); 271 269 if (IS_ERR(subvol_root)) { 272 270 ret = PTR_ERR(subvol_root); 273 271 if (ret == -ENOENT)
+44 -36
fs/btrfs/volumes.c
··· 280 280 * ============ 281 281 * 282 282 * uuid_mutex 283 - * volume_mutex 284 - * device_list_mutex 285 - * chunk_mutex 286 - * balance_mutex 283 + * device_list_mutex 284 + * chunk_mutex 285 + * balance_mutex 287 286 * 288 287 * 289 288 * Exclusive operations, BTRFS_FS_EXCL_OP ··· 1041 1042 &device->dev_state)) { 1042 1043 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, 1043 1044 &device->dev_state) && 1045 + !test_bit(BTRFS_DEV_STATE_MISSING, 1046 + &device->dev_state) && 1044 1047 (!latest_dev || 1045 1048 device->generation > latest_dev->generation)) { 1046 1049 latest_dev = device; ··· 1186 1185 { 1187 1186 struct btrfs_device *device; 1188 1187 struct btrfs_device *latest_dev = NULL; 1189 - int ret = 0; 1190 1188 1191 1189 flags |= FMODE_EXCL; 1192 1190 ··· 1198 1198 device->generation > latest_dev->generation) 1199 1199 latest_dev = device; 1200 1200 } 1201 - if (fs_devices->open_devices == 0) { 1202 - ret = -EINVAL; 1203 - goto out; 1204 - } 1201 + if (fs_devices->open_devices == 0) 1202 + return -EINVAL; 1203 + 1205 1204 fs_devices->opened = 1; 1206 1205 fs_devices->latest_bdev = latest_dev->bdev; 1207 1206 fs_devices->total_rw_bytes = 0; 1208 1207 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR; 1209 - out: 1210 - return ret; 1208 + 1209 + return 0; 1211 1210 } 1212 1211 1213 1212 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b) ··· 1250 1251 put_page(page); 1251 1252 } 1252 1253 1253 - static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr, 1254 - struct page **page, 1255 - struct btrfs_super_block **disk_super) 1254 + static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, 1255 + u64 bytenr) 1256 1256 { 1257 + struct btrfs_super_block *disk_super; 1258 + struct page *page; 1257 1259 void *p; 1258 1260 pgoff_t index; 1259 1261 1260 1262 /* make sure our super fits in the device */ 1261 1263 if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) 1262 - return 1; 1264 + return ERR_PTR(-EINVAL); 1263 1265 1264 1266 /* make sure our super fits in the page */ 1265 - if (sizeof(**disk_super) > PAGE_SIZE) 1266 - return 1; 1267 + if (sizeof(*disk_super) > PAGE_SIZE) 1268 + return ERR_PTR(-EINVAL); 1267 1269 1268 1270 /* make sure our super doesn't straddle pages on disk */ 1269 1271 index = bytenr >> PAGE_SHIFT; 1270 - if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index) 1271 - return 1; 1272 + if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) 1273 + return ERR_PTR(-EINVAL); 1272 1274 1273 1275 /* pull in the page with our super */ 1274 - *page = read_cache_page_gfp(bdev->bd_inode->i_mapping, 1275 - index, GFP_KERNEL); 1276 + page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); 1276 1277 1277 - if (IS_ERR(*page)) 1278 - return 1; 1278 + if (IS_ERR(page)) 1279 + return ERR_CAST(page); 1279 1280 1280 - p = page_address(*page); 1281 + p = page_address(page); 1281 1282 1282 1283 /* align our pointer to the offset of the super block */ 1283 - *disk_super = p + offset_in_page(bytenr); 1284 + disk_super = p + offset_in_page(bytenr); 1284 1285 1285 - if (btrfs_super_bytenr(*disk_super) != bytenr || 1286 - btrfs_super_magic(*disk_super) != BTRFS_MAGIC) { 1286 + if (btrfs_super_bytenr(disk_super) != bytenr || 1287 + btrfs_super_magic(disk_super) != BTRFS_MAGIC) { 1287 1288 btrfs_release_disk_super(p); 1288 - return 1; 1289 + return ERR_PTR(-EINVAL); 1289 1290 } 1290 1291 1291 - if ((*disk_super)->label[0] && 1292 - (*disk_super)->label[BTRFS_LABEL_SIZE - 1]) 1293 - (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0'; 1292 + if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) 1293 + disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; 1294 1294 1295 - return 0; 1295 + return disk_super; 1296 1296 } 1297 1297 1298 1298 int btrfs_forget_devices(const char *path) ··· 1317 1319 bool new_device_added = false; 1318 1320 struct btrfs_device *device = NULL; 1319 1321 struct block_device *bdev; 1320 - struct page *page; 1321 1322 u64 bytenr; 1322 1323 1323 1324 lockdep_assert_held(&uuid_mutex); ··· 1334 1337 if (IS_ERR(bdev)) 1335 1338 return ERR_CAST(bdev); 1336 1339 1337 - if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) { 1338 - device = ERR_PTR(-EINVAL); 1340 + disk_super = btrfs_read_disk_super(bdev, bytenr); 1341 + if (IS_ERR(disk_super)) { 1342 + device = ERR_CAST(disk_super); 1339 1343 goto error_bdev_put; 1340 1344 } 1341 1345 ··· 2661 2663 ret = btrfs_commit_transaction(trans); 2662 2664 } 2663 2665 2664 - /* Update ctime/mtime for libblkid */ 2666 + /* 2667 + * Now that we have written a new super block to this device, check all 2668 + * other fs_devices list if device_path alienates any other scanned 2669 + * device. 2670 + * We can ignore the return value as it typically returns -EINVAL and 2671 + * only succeeds if the device was an alien. 2672 + */ 2673 + btrfs_forget_devices(device_path); 2674 + 2675 + /* Update ctime/mtime for blkid or udev */ 2665 2676 update_dev_time(device_path); 2677 + 2666 2678 return ret; 2667 2679 2668 2680 error_sysfs:
-19
fs/direct-io.c
··· 386 386 spin_unlock_irqrestore(&dio->bio_lock, flags); 387 387 } 388 388 389 - /** 390 - * dio_end_io - handle the end io action for the given bio 391 - * @bio: The direct io bio thats being completed 392 - * 393 - * This is meant to be called by any filesystem that uses their own dio_submit_t 394 - * so that the DIO specific endio actions are dealt with after the filesystem 395 - * has done it's completion work. 396 - */ 397 - void dio_end_io(struct bio *bio) 398 - { 399 - struct dio *dio = bio->bi_private; 400 - 401 - if (dio->is_async) 402 - dio_bio_end_aio(bio); 403 - else 404 - dio_bio_end_io(bio); 405 - } 406 - EXPORT_SYMBOL_GPL(dio_end_io); 407 - 408 389 static inline void 409 390 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, 410 391 struct block_device *bdev,
+10 -7
fs/iomap/direct-io.c
··· 59 59 EXPORT_SYMBOL_GPL(iomap_dio_iopoll); 60 60 61 61 static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap, 62 - struct bio *bio) 62 + struct bio *bio, loff_t pos) 63 63 { 64 64 atomic_inc(&dio->ref); 65 65 ··· 67 67 bio_set_polled(bio, dio->iocb); 68 68 69 69 dio->submit.last_queue = bdev_get_queue(iomap->bdev); 70 - dio->submit.cookie = submit_bio(bio); 70 + if (dio->dops && dio->dops->submit_io) 71 + dio->submit.cookie = dio->dops->submit_io( 72 + file_inode(dio->iocb->ki_filp), 73 + iomap, bio, pos); 74 + else 75 + dio->submit.cookie = submit_bio(bio); 71 76 } 72 77 73 78 static ssize_t iomap_dio_complete(struct iomap_dio *dio) ··· 196 191 get_page(page); 197 192 __bio_add_page(bio, page, len, 0); 198 193 bio_set_op_attrs(bio, REQ_OP_WRITE, flags); 199 - iomap_dio_submit_bio(dio, iomap, bio); 194 + iomap_dio_submit_bio(dio, iomap, bio, pos); 200 195 } 201 196 202 197 static loff_t ··· 304 299 } 305 300 306 301 dio->size += n; 307 - pos += n; 308 302 copied += n; 309 303 310 304 nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES); 311 - iomap_dio_submit_bio(dio, iomap, bio); 305 + iomap_dio_submit_bio(dio, iomap, bio, pos); 306 + pos += n; 312 307 } while (nr_pages); 313 308 314 309 /* ··· 415 410 unsigned int flags = IOMAP_DIRECT; 416 411 struct blk_plug plug; 417 412 struct iomap_dio *dio; 418 - 419 - lockdep_assert_held(&inode->i_rwsem); 420 413 421 414 if (!count) 422 415 return 0;
+8
include/linux/bio.h
··· 169 169 #define bio_for_each_bvec(bvl, bio, iter) \ 170 170 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) 171 171 172 + /* 173 + * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the 174 + * same reasons as bio_for_each_segment_all(). 175 + */ 176 + #define bio_for_each_bvec_all(bvl, bio, i) \ 177 + for (i = 0, bvl = bio_first_bvec_all(bio); \ 178 + i < (bio)->bi_vcnt; i++, bvl++) \ 179 + 172 180 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 173 181 174 182 static inline unsigned bio_segments(struct bio *bio)
+2 -2
include/linux/fs.h
··· 3148 3148 extern int generic_copy_file_checks(struct file *file_in, loff_t pos_in, 3149 3149 struct file *file_out, loff_t pos_out, 3150 3150 size_t *count, unsigned int flags); 3151 + extern ssize_t generic_file_buffered_read(struct kiocb *iocb, 3152 + struct iov_iter *to, ssize_t already_read); 3151 3153 extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); 3152 3154 extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); 3153 3155 extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); ··· 3210 3208 /* filesystem does not support filling holes */ 3211 3209 DIO_SKIP_HOLES = 0x02, 3212 3210 }; 3213 - 3214 - void dio_end_io(struct bio *bio); 3215 3211 3216 3212 ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, 3217 3213 struct block_device *bdev, struct iov_iter *iter,
+2
include/linux/iomap.h
··· 251 251 struct iomap_dio_ops { 252 252 int (*end_io)(struct kiocb *iocb, ssize_t size, int error, 253 253 unsigned flags); 254 + blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap, 255 + struct bio *bio, loff_t file_offset); 254 256 }; 255 257 256 258 ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+1
include/trace/events/btrfs.h
··· 89 89 { IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES" }, \ 90 90 { IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES" }, \ 91 91 { IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT" }, \ 92 + { IO_TREE_LOG_CSUM_RANGE, "LOG_CSUM_RANGE" }, \ 92 93 { IO_TREE_SELFTEST, "SELFTEST" }) 93 94 94 95 #define BTRFS_GROUP_FLAGS \
-9
include/uapi/linux/btrfs_tree.h
··· 519 519 __le64 offset; 520 520 } __attribute__ ((__packed__)); 521 521 522 - /* old style backrefs item */ 523 - struct btrfs_extent_ref_v0 { 524 - __le64 root; 525 - __le64 generation; 526 - __le64 objectid; 527 - __le32 count; 528 - } __attribute__ ((__packed__)); 529 - 530 - 531 522 /* dev extents record free space on individual devices. The owner 532 523 * field points back to the chunk allocation mapping tree that allocated 533 524 * the extent. The chunk tree uuid field is a way to double check the owner
+2 -1
mm/filemap.c
··· 1991 1991 * * total number of bytes copied, including those the were already @written 1992 1992 * * negative error code if nothing was copied 1993 1993 */ 1994 - static ssize_t generic_file_buffered_read(struct kiocb *iocb, 1994 + ssize_t generic_file_buffered_read(struct kiocb *iocb, 1995 1995 struct iov_iter *iter, ssize_t written) 1996 1996 { 1997 1997 struct file *filp = iocb->ki_filp; ··· 2243 2243 file_accessed(filp); 2244 2244 return written ? written : error; 2245 2245 } 2246 + EXPORT_SYMBOL_GPL(generic_file_buffered_read); 2246 2247 2247 2248 /** 2248 2249 * generic_file_read_iter - generic filesystem read routine