Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/fs.h>
7#include <linux/blkdev.h>
8#include <linux/radix-tree.h>
9#include <linux/writeback.h>
10#include <linux/buffer_head.h>
11#include <linux/workqueue.h>
12#include <linux/kthread.h>
13#include <linux/slab.h>
14#include <linux/migrate.h>
15#include <linux/ratelimit.h>
16#include <linux/uuid.h>
17#include <linux/semaphore.h>
18#include <linux/error-injection.h>
19#include <linux/crc32c.h>
20#include <asm/unaligned.h>
21#include "ctree.h"
22#include "disk-io.h"
23#include "transaction.h"
24#include "btrfs_inode.h"
25#include "volumes.h"
26#include "print-tree.h"
27#include "locking.h"
28#include "tree-log.h"
29#include "free-space-cache.h"
30#include "free-space-tree.h"
31#include "inode-map.h"
32#include "check-integrity.h"
33#include "rcu-string.h"
34#include "dev-replace.h"
35#include "raid56.h"
36#include "sysfs.h"
37#include "qgroup.h"
38#include "compression.h"
39#include "tree-checker.h"
40#include "ref-verify.h"
41
42#ifdef CONFIG_X86
43#include <asm/cpufeature.h>
44#endif
45
46#define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
47 BTRFS_HEADER_FLAG_RELOC |\
48 BTRFS_SUPER_FLAG_ERROR |\
49 BTRFS_SUPER_FLAG_SEEDING |\
50 BTRFS_SUPER_FLAG_METADUMP |\
51 BTRFS_SUPER_FLAG_METADUMP_V2)
52
53static const struct extent_io_ops btree_extent_io_ops;
54static void end_workqueue_fn(struct btrfs_work *work);
55static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
56static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
57 struct btrfs_fs_info *fs_info);
58static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
59static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
60 struct extent_io_tree *dirty_pages,
61 int mark);
62static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
63 struct extent_io_tree *pinned_extents);
64static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
65static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
66
67/*
68 * btrfs_end_io_wq structs are used to do processing in task context when an IO
69 * is complete. This is used during reads to verify checksums, and it is used
70 * by writes to insert metadata for new file extents after IO is complete.
71 */
72struct btrfs_end_io_wq {
73 struct bio *bio;
74 bio_end_io_t *end_io;
75 void *private;
76 struct btrfs_fs_info *info;
77 blk_status_t status;
78 enum btrfs_wq_endio_type metadata;
79 struct btrfs_work work;
80};
81
82static struct kmem_cache *btrfs_end_io_wq_cache;
83
84int __init btrfs_end_io_wq_init(void)
85{
86 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
87 sizeof(struct btrfs_end_io_wq),
88 0,
89 SLAB_MEM_SPREAD,
90 NULL);
91 if (!btrfs_end_io_wq_cache)
92 return -ENOMEM;
93 return 0;
94}
95
96void __cold btrfs_end_io_wq_exit(void)
97{
98 kmem_cache_destroy(btrfs_end_io_wq_cache);
99}
100
101/*
102 * async submit bios are used to offload expensive checksumming
103 * onto the worker threads. They checksum file and metadata bios
104 * just before they are sent down the IO stack.
105 */
106struct async_submit_bio {
107 void *private_data;
108 struct bio *bio;
109 extent_submit_bio_start_t *submit_bio_start;
110 int mirror_num;
111 /*
112 * bio_offset is optional, can be used if the pages in the bio
113 * can't tell us where in the file the bio should go
114 */
115 u64 bio_offset;
116 struct btrfs_work work;
117 blk_status_t status;
118};
119
120/*
121 * Lockdep class keys for extent_buffer->lock's in this root. For a given
122 * eb, the lockdep key is determined by the btrfs_root it belongs to and
123 * the level the eb occupies in the tree.
124 *
125 * Different roots are used for different purposes and may nest inside each
126 * other and they require separate keysets. As lockdep keys should be
127 * static, assign keysets according to the purpose of the root as indicated
128 * by btrfs_root->root_key.objectid. This ensures that all special purpose
129 * roots have separate keysets.
130 *
131 * Lock-nesting across peer nodes is always done with the immediate parent
132 * node locked thus preventing deadlock. As lockdep doesn't know this, use
133 * subclass to avoid triggering lockdep warning in such cases.
134 *
135 * The key is set by the readpage_end_io_hook after the buffer has passed
136 * csum validation but before the pages are unlocked. It is also set by
137 * btrfs_init_new_buffer on freshly allocated blocks.
138 *
139 * We also add a check to make sure the highest level of the tree is the
140 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
141 * needs update as well.
142 */
143#ifdef CONFIG_DEBUG_LOCK_ALLOC
144# if BTRFS_MAX_LEVEL != 8
145# error
146# endif
147
148static struct btrfs_lockdep_keyset {
149 u64 id; /* root objectid */
150 const char *name_stem; /* lock name stem */
151 char names[BTRFS_MAX_LEVEL + 1][20];
152 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
153} btrfs_lockdep_keysets[] = {
154 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
155 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
156 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
157 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
158 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
159 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
160 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
161 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
162 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
163 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
164 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
165 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
166 { .id = 0, .name_stem = "tree" },
167};
168
169void __init btrfs_init_lockdep(void)
170{
171 int i, j;
172
173 /* initialize lockdep class names */
174 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
175 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
176
177 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
178 snprintf(ks->names[j], sizeof(ks->names[j]),
179 "btrfs-%s-%02d", ks->name_stem, j);
180 }
181}
182
183void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
184 int level)
185{
186 struct btrfs_lockdep_keyset *ks;
187
188 BUG_ON(level >= ARRAY_SIZE(ks->keys));
189
190 /* find the matching keyset, id 0 is the default entry */
191 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
192 if (ks->id == objectid)
193 break;
194
195 lockdep_set_class_and_name(&eb->lock,
196 &ks->keys[level], ks->names[level]);
197}
198
199#endif
200
201/*
202 * extents on the btree inode are pretty simple, there's one extent
203 * that covers the entire device
204 */
205struct extent_map *btree_get_extent(struct btrfs_inode *inode,
206 struct page *page, size_t pg_offset, u64 start, u64 len,
207 int create)
208{
209 struct btrfs_fs_info *fs_info = inode->root->fs_info;
210 struct extent_map_tree *em_tree = &inode->extent_tree;
211 struct extent_map *em;
212 int ret;
213
214 read_lock(&em_tree->lock);
215 em = lookup_extent_mapping(em_tree, start, len);
216 if (em) {
217 em->bdev = fs_info->fs_devices->latest_bdev;
218 read_unlock(&em_tree->lock);
219 goto out;
220 }
221 read_unlock(&em_tree->lock);
222
223 em = alloc_extent_map();
224 if (!em) {
225 em = ERR_PTR(-ENOMEM);
226 goto out;
227 }
228 em->start = 0;
229 em->len = (u64)-1;
230 em->block_len = (u64)-1;
231 em->block_start = 0;
232 em->bdev = fs_info->fs_devices->latest_bdev;
233
234 write_lock(&em_tree->lock);
235 ret = add_extent_mapping(em_tree, em, 0);
236 if (ret == -EEXIST) {
237 free_extent_map(em);
238 em = lookup_extent_mapping(em_tree, start, len);
239 if (!em)
240 em = ERR_PTR(-EIO);
241 } else if (ret) {
242 free_extent_map(em);
243 em = ERR_PTR(ret);
244 }
245 write_unlock(&em_tree->lock);
246
247out:
248 return em;
249}
250
251u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
252{
253 return crc32c(seed, data, len);
254}
255
256void btrfs_csum_final(u32 crc, u8 *result)
257{
258 put_unaligned_le32(~crc, result);
259}
260
261/*
262 * compute the csum for a btree block, and either verify it or write it
263 * into the csum field of the block.
264 */
265static int csum_tree_block(struct btrfs_fs_info *fs_info,
266 struct extent_buffer *buf,
267 int verify)
268{
269 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
270 char result[BTRFS_CSUM_SIZE];
271 unsigned long len;
272 unsigned long cur_len;
273 unsigned long offset = BTRFS_CSUM_SIZE;
274 char *kaddr;
275 unsigned long map_start;
276 unsigned long map_len;
277 int err;
278 u32 crc = ~(u32)0;
279
280 len = buf->len - offset;
281 while (len > 0) {
282 /*
283 * Note: we don't need to check for the err == 1 case here, as
284 * with the given combination of 'start = BTRFS_CSUM_SIZE (32)'
285 * and 'min_len = 32' and the currently implemented mapping
286 * algorithm we cannot cross a page boundary.
287 */
288 err = map_private_extent_buffer(buf, offset, 32,
289 &kaddr, &map_start, &map_len);
290 if (err)
291 return err;
292 cur_len = min(len, map_len - (offset - map_start));
293 crc = btrfs_csum_data(kaddr + offset - map_start,
294 crc, cur_len);
295 len -= cur_len;
296 offset += cur_len;
297 }
298 memset(result, 0, BTRFS_CSUM_SIZE);
299
300 btrfs_csum_final(crc, result);
301
302 if (verify) {
303 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
304 u32 val;
305 u32 found = 0;
306 memcpy(&found, result, csum_size);
307
308 read_extent_buffer(buf, &val, 0, csum_size);
309 btrfs_warn_rl(fs_info,
310 "%s checksum verify failed on %llu wanted %X found %X level %d",
311 fs_info->sb->s_id, buf->start,
312 val, found, btrfs_header_level(buf));
313 return -EUCLEAN;
314 }
315 } else {
316 write_extent_buffer(buf, result, 0, csum_size);
317 }
318
319 return 0;
320}
321
322/*
323 * we can't consider a given block up to date unless the transid of the
324 * block matches the transid in the parent node's pointer. This is how we
325 * detect blocks that either didn't get written at all or got written
326 * in the wrong place.
327 */
328static int verify_parent_transid(struct extent_io_tree *io_tree,
329 struct extent_buffer *eb, u64 parent_transid,
330 int atomic)
331{
332 struct extent_state *cached_state = NULL;
333 int ret;
334 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
335
336 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
337 return 0;
338
339 if (atomic)
340 return -EAGAIN;
341
342 if (need_lock) {
343 btrfs_tree_read_lock(eb);
344 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
345 }
346
347 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
348 &cached_state);
349 if (extent_buffer_uptodate(eb) &&
350 btrfs_header_generation(eb) == parent_transid) {
351 ret = 0;
352 goto out;
353 }
354 btrfs_err_rl(eb->fs_info,
355 "parent transid verify failed on %llu wanted %llu found %llu",
356 eb->start,
357 parent_transid, btrfs_header_generation(eb));
358 ret = 1;
359
360 /*
361 * Things reading via commit roots that don't have normal protection,
362 * like send, can have a really old block in cache that may point at a
363 * block that has been freed and re-allocated. So don't clear uptodate
364 * if we find an eb that is under IO (dirty/writeback) because we could
365 * end up reading in the stale data and then writing it back out and
366 * making everybody very sad.
367 */
368 if (!extent_buffer_under_io(eb))
369 clear_extent_buffer_uptodate(eb);
370out:
371 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
372 &cached_state);
373 if (need_lock)
374 btrfs_tree_read_unlock_blocking(eb);
375 return ret;
376}
377
378/*
379 * Return 0 if the superblock checksum type matches the checksum value of that
380 * algorithm. Pass the raw disk superblock data.
381 */
382static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
383 char *raw_disk_sb)
384{
385 struct btrfs_super_block *disk_sb =
386 (struct btrfs_super_block *)raw_disk_sb;
387 u16 csum_type = btrfs_super_csum_type(disk_sb);
388 int ret = 0;
389
390 if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
391 u32 crc = ~(u32)0;
392 char result[sizeof(crc)];
393
394 /*
395 * The super_block structure does not span the whole
396 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
397 * is filled with zeros and is included in the checksum.
398 */
399 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
400 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
401 btrfs_csum_final(crc, result);
402
403 if (memcmp(raw_disk_sb, result, sizeof(result)))
404 ret = 1;
405 }
406
407 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
408 btrfs_err(fs_info, "unsupported checksum algorithm %u",
409 csum_type);
410 ret = 1;
411 }
412
413 return ret;
414}
415
416static int verify_level_key(struct btrfs_fs_info *fs_info,
417 struct extent_buffer *eb, int level,
418 struct btrfs_key *first_key, u64 parent_transid)
419{
420 int found_level;
421 struct btrfs_key found_key;
422 int ret;
423
424 found_level = btrfs_header_level(eb);
425 if (found_level != level) {
426#ifdef CONFIG_BTRFS_DEBUG
427 WARN_ON(1);
428 btrfs_err(fs_info,
429"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
430 eb->start, level, found_level);
431#endif
432 return -EIO;
433 }
434
435 if (!first_key)
436 return 0;
437
438 /*
439 * For live tree block (new tree blocks in current transaction),
440 * we need proper lock context to avoid race, which is impossible here.
441 * So we only checks tree blocks which is read from disk, whose
442 * generation <= fs_info->last_trans_committed.
443 */
444 if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
445 return 0;
446 if (found_level)
447 btrfs_node_key_to_cpu(eb, &found_key, 0);
448 else
449 btrfs_item_key_to_cpu(eb, &found_key, 0);
450 ret = btrfs_comp_cpu_keys(first_key, &found_key);
451
452#ifdef CONFIG_BTRFS_DEBUG
453 if (ret) {
454 WARN_ON(1);
455 btrfs_err(fs_info,
456"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
457 eb->start, parent_transid, first_key->objectid,
458 first_key->type, first_key->offset,
459 found_key.objectid, found_key.type,
460 found_key.offset);
461 }
462#endif
463 return ret;
464}
465
466/*
467 * helper to read a given tree block, doing retries as required when
468 * the checksums don't match and we have alternate mirrors to try.
469 *
470 * @parent_transid: expected transid, skip check if 0
471 * @level: expected level, mandatory check
472 * @first_key: expected key of first slot, skip check if NULL
473 */
474static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
475 struct extent_buffer *eb,
476 u64 parent_transid, int level,
477 struct btrfs_key *first_key)
478{
479 struct extent_io_tree *io_tree;
480 int failed = 0;
481 int ret;
482 int num_copies = 0;
483 int mirror_num = 0;
484 int failed_mirror = 0;
485
486 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
487 while (1) {
488 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
489 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
490 mirror_num);
491 if (!ret) {
492 if (verify_parent_transid(io_tree, eb,
493 parent_transid, 0))
494 ret = -EIO;
495 else if (verify_level_key(fs_info, eb, level,
496 first_key, parent_transid))
497 ret = -EUCLEAN;
498 else
499 break;
500 }
501
502 num_copies = btrfs_num_copies(fs_info,
503 eb->start, eb->len);
504 if (num_copies == 1)
505 break;
506
507 if (!failed_mirror) {
508 failed = 1;
509 failed_mirror = eb->read_mirror;
510 }
511
512 mirror_num++;
513 if (mirror_num == failed_mirror)
514 mirror_num++;
515
516 if (mirror_num > num_copies)
517 break;
518 }
519
520 if (failed && !ret && failed_mirror)
521 repair_eb_io_failure(fs_info, eb, failed_mirror);
522
523 return ret;
524}
525
526/*
527 * checksum a dirty tree block before IO. This has extra checks to make sure
528 * we only fill in the checksum field in the first page of a multi-page block
529 */
530
531static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
532{
533 u64 start = page_offset(page);
534 u64 found_start;
535 struct extent_buffer *eb;
536
537 eb = (struct extent_buffer *)page->private;
538 if (page != eb->pages[0])
539 return 0;
540
541 found_start = btrfs_header_bytenr(eb);
542 /*
543 * Please do not consolidate these warnings into a single if.
544 * It is useful to know what went wrong.
545 */
546 if (WARN_ON(found_start != start))
547 return -EUCLEAN;
548 if (WARN_ON(!PageUptodate(page)))
549 return -EUCLEAN;
550
551 ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
552 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
553
554 return csum_tree_block(fs_info, eb, 0);
555}
556
557static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
558 struct extent_buffer *eb)
559{
560 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
561 u8 fsid[BTRFS_FSID_SIZE];
562 int ret = 1;
563
564 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
565 while (fs_devices) {
566 u8 *metadata_uuid;
567
568 /*
569 * Checking the incompat flag is only valid for the current
570 * fs. For seed devices it's forbidden to have their uuid
571 * changed so reading ->fsid in this case is fine
572 */
573 if (fs_devices == fs_info->fs_devices &&
574 btrfs_fs_incompat(fs_info, METADATA_UUID))
575 metadata_uuid = fs_devices->metadata_uuid;
576 else
577 metadata_uuid = fs_devices->fsid;
578
579 if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) {
580 ret = 0;
581 break;
582 }
583 fs_devices = fs_devices->seed;
584 }
585 return ret;
586}
587
588static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
589 u64 phy_offset, struct page *page,
590 u64 start, u64 end, int mirror)
591{
592 u64 found_start;
593 int found_level;
594 struct extent_buffer *eb;
595 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
596 struct btrfs_fs_info *fs_info = root->fs_info;
597 int ret = 0;
598 int reads_done;
599
600 if (!page->private)
601 goto out;
602
603 eb = (struct extent_buffer *)page->private;
604
605 /* the pending IO might have been the only thing that kept this buffer
606 * in memory. Make sure we have a ref for all this other checks
607 */
608 extent_buffer_get(eb);
609
610 reads_done = atomic_dec_and_test(&eb->io_pages);
611 if (!reads_done)
612 goto err;
613
614 eb->read_mirror = mirror;
615 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
616 ret = -EIO;
617 goto err;
618 }
619
620 found_start = btrfs_header_bytenr(eb);
621 if (found_start != eb->start) {
622 btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
623 eb->start, found_start);
624 ret = -EIO;
625 goto err;
626 }
627 if (check_tree_block_fsid(fs_info, eb)) {
628 btrfs_err_rl(fs_info, "bad fsid on block %llu",
629 eb->start);
630 ret = -EIO;
631 goto err;
632 }
633 found_level = btrfs_header_level(eb);
634 if (found_level >= BTRFS_MAX_LEVEL) {
635 btrfs_err(fs_info, "bad tree block level %d on %llu",
636 (int)btrfs_header_level(eb), eb->start);
637 ret = -EIO;
638 goto err;
639 }
640
641 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
642 eb, found_level);
643
644 ret = csum_tree_block(fs_info, eb, 1);
645 if (ret)
646 goto err;
647
648 /*
649 * If this is a leaf block and it is corrupt, set the corrupt bit so
650 * that we don't try and read the other copies of this block, just
651 * return -EIO.
652 */
653 if (found_level == 0 && btrfs_check_leaf_full(fs_info, eb)) {
654 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
655 ret = -EIO;
656 }
657
658 if (found_level > 0 && btrfs_check_node(fs_info, eb))
659 ret = -EIO;
660
661 if (!ret)
662 set_extent_buffer_uptodate(eb);
663err:
664 if (reads_done &&
665 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
666 btree_readahead_hook(eb, ret);
667
668 if (ret) {
669 /*
670 * our io error hook is going to dec the io pages
671 * again, we have to make sure it has something
672 * to decrement
673 */
674 atomic_inc(&eb->io_pages);
675 clear_extent_buffer_uptodate(eb);
676 }
677 free_extent_buffer(eb);
678out:
679 return ret;
680}
681
682static void end_workqueue_bio(struct bio *bio)
683{
684 struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
685 struct btrfs_fs_info *fs_info;
686 struct btrfs_workqueue *wq;
687 btrfs_work_func_t func;
688
689 fs_info = end_io_wq->info;
690 end_io_wq->status = bio->bi_status;
691
692 if (bio_op(bio) == REQ_OP_WRITE) {
693 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
694 wq = fs_info->endio_meta_write_workers;
695 func = btrfs_endio_meta_write_helper;
696 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
697 wq = fs_info->endio_freespace_worker;
698 func = btrfs_freespace_write_helper;
699 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
700 wq = fs_info->endio_raid56_workers;
701 func = btrfs_endio_raid56_helper;
702 } else {
703 wq = fs_info->endio_write_workers;
704 func = btrfs_endio_write_helper;
705 }
706 } else {
707 if (unlikely(end_io_wq->metadata ==
708 BTRFS_WQ_ENDIO_DIO_REPAIR)) {
709 wq = fs_info->endio_repair_workers;
710 func = btrfs_endio_repair_helper;
711 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
712 wq = fs_info->endio_raid56_workers;
713 func = btrfs_endio_raid56_helper;
714 } else if (end_io_wq->metadata) {
715 wq = fs_info->endio_meta_workers;
716 func = btrfs_endio_meta_helper;
717 } else {
718 wq = fs_info->endio_workers;
719 func = btrfs_endio_helper;
720 }
721 }
722
723 btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
724 btrfs_queue_work(wq, &end_io_wq->work);
725}
726
727blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
728 enum btrfs_wq_endio_type metadata)
729{
730 struct btrfs_end_io_wq *end_io_wq;
731
732 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
733 if (!end_io_wq)
734 return BLK_STS_RESOURCE;
735
736 end_io_wq->private = bio->bi_private;
737 end_io_wq->end_io = bio->bi_end_io;
738 end_io_wq->info = info;
739 end_io_wq->status = 0;
740 end_io_wq->bio = bio;
741 end_io_wq->metadata = metadata;
742
743 bio->bi_private = end_io_wq;
744 bio->bi_end_io = end_workqueue_bio;
745 return 0;
746}
747
748static void run_one_async_start(struct btrfs_work *work)
749{
750 struct async_submit_bio *async;
751 blk_status_t ret;
752
753 async = container_of(work, struct async_submit_bio, work);
754 ret = async->submit_bio_start(async->private_data, async->bio,
755 async->bio_offset);
756 if (ret)
757 async->status = ret;
758}
759
760/*
761 * In order to insert checksums into the metadata in large chunks, we wait
762 * until bio submission time. All the pages in the bio are checksummed and
763 * sums are attached onto the ordered extent record.
764 *
765 * At IO completion time the csums attached on the ordered extent record are
766 * inserted into the tree.
767 */
768static void run_one_async_done(struct btrfs_work *work)
769{
770 struct async_submit_bio *async;
771 struct inode *inode;
772 blk_status_t ret;
773
774 async = container_of(work, struct async_submit_bio, work);
775 inode = async->private_data;
776
777 /* If an error occurred we just want to clean up the bio and move on */
778 if (async->status) {
779 async->bio->bi_status = async->status;
780 bio_endio(async->bio);
781 return;
782 }
783
784 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
785 async->mirror_num, 1);
786 if (ret) {
787 async->bio->bi_status = ret;
788 bio_endio(async->bio);
789 }
790}
791
792static void run_one_async_free(struct btrfs_work *work)
793{
794 struct async_submit_bio *async;
795
796 async = container_of(work, struct async_submit_bio, work);
797 kfree(async);
798}
799
800blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
801 int mirror_num, unsigned long bio_flags,
802 u64 bio_offset, void *private_data,
803 extent_submit_bio_start_t *submit_bio_start)
804{
805 struct async_submit_bio *async;
806
807 async = kmalloc(sizeof(*async), GFP_NOFS);
808 if (!async)
809 return BLK_STS_RESOURCE;
810
811 async->private_data = private_data;
812 async->bio = bio;
813 async->mirror_num = mirror_num;
814 async->submit_bio_start = submit_bio_start;
815
816 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
817 run_one_async_done, run_one_async_free);
818
819 async->bio_offset = bio_offset;
820
821 async->status = 0;
822
823 if (op_is_sync(bio->bi_opf))
824 btrfs_set_work_high_priority(&async->work);
825
826 btrfs_queue_work(fs_info->workers, &async->work);
827 return 0;
828}
829
830static blk_status_t btree_csum_one_bio(struct bio *bio)
831{
832 struct bio_vec *bvec;
833 struct btrfs_root *root;
834 int i, ret = 0;
835
836 ASSERT(!bio_flagged(bio, BIO_CLONED));
837 bio_for_each_segment_all(bvec, bio, i) {
838 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
839 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
840 if (ret)
841 break;
842 }
843
844 return errno_to_blk_status(ret);
845}
846
847static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
848 u64 bio_offset)
849{
850 /*
851 * when we're called for a write, we're already in the async
852 * submission context. Just jump into btrfs_map_bio
853 */
854 return btree_csum_one_bio(bio);
855}
856
857static int check_async_write(struct btrfs_inode *bi)
858{
859 if (atomic_read(&bi->sync_writers))
860 return 0;
861#ifdef CONFIG_X86
862 if (static_cpu_has(X86_FEATURE_XMM4_2))
863 return 0;
864#endif
865 return 1;
866}
867
868static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
869 int mirror_num, unsigned long bio_flags,
870 u64 bio_offset)
871{
872 struct inode *inode = private_data;
873 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
874 int async = check_async_write(BTRFS_I(inode));
875 blk_status_t ret;
876
877 if (bio_op(bio) != REQ_OP_WRITE) {
878 /*
879 * called for a read, do the setup so that checksum validation
880 * can happen in the async kernel threads
881 */
882 ret = btrfs_bio_wq_end_io(fs_info, bio,
883 BTRFS_WQ_ENDIO_METADATA);
884 if (ret)
885 goto out_w_error;
886 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
887 } else if (!async) {
888 ret = btree_csum_one_bio(bio);
889 if (ret)
890 goto out_w_error;
891 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
892 } else {
893 /*
894 * kthread helpers are used to submit writes so that
895 * checksumming can happen in parallel across all CPUs
896 */
897 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
898 bio_offset, private_data,
899 btree_submit_bio_start);
900 }
901
902 if (ret)
903 goto out_w_error;
904 return 0;
905
906out_w_error:
907 bio->bi_status = ret;
908 bio_endio(bio);
909 return ret;
910}
911
912#ifdef CONFIG_MIGRATION
913static int btree_migratepage(struct address_space *mapping,
914 struct page *newpage, struct page *page,
915 enum migrate_mode mode)
916{
917 /*
918 * we can't safely write a btree page from here,
919 * we haven't done the locking hook
920 */
921 if (PageDirty(page))
922 return -EAGAIN;
923 /*
924 * Buffers may be managed in a filesystem specific way.
925 * We must have no buffers or drop them.
926 */
927 if (page_has_private(page) &&
928 !try_to_release_page(page, GFP_KERNEL))
929 return -EAGAIN;
930 return migrate_page(mapping, newpage, page, mode);
931}
932#endif
933
934
935static int btree_writepages(struct address_space *mapping,
936 struct writeback_control *wbc)
937{
938 struct btrfs_fs_info *fs_info;
939 int ret;
940
941 if (wbc->sync_mode == WB_SYNC_NONE) {
942
943 if (wbc->for_kupdate)
944 return 0;
945
946 fs_info = BTRFS_I(mapping->host)->root->fs_info;
947 /* this is a bit racy, but that's ok */
948 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
949 BTRFS_DIRTY_METADATA_THRESH,
950 fs_info->dirty_metadata_batch);
951 if (ret < 0)
952 return 0;
953 }
954 return btree_write_cache_pages(mapping, wbc);
955}
956
957static int btree_readpage(struct file *file, struct page *page)
958{
959 struct extent_io_tree *tree;
960 tree = &BTRFS_I(page->mapping->host)->io_tree;
961 return extent_read_full_page(tree, page, btree_get_extent, 0);
962}
963
964static int btree_releasepage(struct page *page, gfp_t gfp_flags)
965{
966 if (PageWriteback(page) || PageDirty(page))
967 return 0;
968
969 return try_release_extent_buffer(page);
970}
971
972static void btree_invalidatepage(struct page *page, unsigned int offset,
973 unsigned int length)
974{
975 struct extent_io_tree *tree;
976 tree = &BTRFS_I(page->mapping->host)->io_tree;
977 extent_invalidatepage(tree, page, offset);
978 btree_releasepage(page, GFP_NOFS);
979 if (PagePrivate(page)) {
980 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
981 "page private not zero on page %llu",
982 (unsigned long long)page_offset(page));
983 ClearPagePrivate(page);
984 set_page_private(page, 0);
985 put_page(page);
986 }
987}
988
989static int btree_set_page_dirty(struct page *page)
990{
991#ifdef DEBUG
992 struct extent_buffer *eb;
993
994 BUG_ON(!PagePrivate(page));
995 eb = (struct extent_buffer *)page->private;
996 BUG_ON(!eb);
997 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
998 BUG_ON(!atomic_read(&eb->refs));
999 btrfs_assert_tree_locked(eb);
1000#endif
1001 return __set_page_dirty_nobuffers(page);
1002}
1003
1004static const struct address_space_operations btree_aops = {
1005 .readpage = btree_readpage,
1006 .writepages = btree_writepages,
1007 .releasepage = btree_releasepage,
1008 .invalidatepage = btree_invalidatepage,
1009#ifdef CONFIG_MIGRATION
1010 .migratepage = btree_migratepage,
1011#endif
1012 .set_page_dirty = btree_set_page_dirty,
1013};
1014
1015void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1016{
1017 struct extent_buffer *buf = NULL;
1018 struct inode *btree_inode = fs_info->btree_inode;
1019
1020 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1021 if (IS_ERR(buf))
1022 return;
1023 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1024 buf, WAIT_NONE, 0);
1025 free_extent_buffer(buf);
1026}
1027
1028int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1029 int mirror_num, struct extent_buffer **eb)
1030{
1031 struct extent_buffer *buf = NULL;
1032 struct inode *btree_inode = fs_info->btree_inode;
1033 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1034 int ret;
1035
1036 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1037 if (IS_ERR(buf))
1038 return 0;
1039
1040 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1041
1042 ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1043 mirror_num);
1044 if (ret) {
1045 free_extent_buffer(buf);
1046 return ret;
1047 }
1048
1049 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1050 free_extent_buffer(buf);
1051 return -EIO;
1052 } else if (extent_buffer_uptodate(buf)) {
1053 *eb = buf;
1054 } else {
1055 free_extent_buffer(buf);
1056 }
1057 return 0;
1058}
1059
1060struct extent_buffer *btrfs_find_create_tree_block(
1061 struct btrfs_fs_info *fs_info,
1062 u64 bytenr)
1063{
1064 if (btrfs_is_testing(fs_info))
1065 return alloc_test_extent_buffer(fs_info, bytenr);
1066 return alloc_extent_buffer(fs_info, bytenr);
1067}
1068
1069
1070int btrfs_write_tree_block(struct extent_buffer *buf)
1071{
1072 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1073 buf->start + buf->len - 1);
1074}
1075
1076void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1077{
1078 filemap_fdatawait_range(buf->pages[0]->mapping,
1079 buf->start, buf->start + buf->len - 1);
1080}
1081
1082/*
1083 * Read tree block at logical address @bytenr and do variant basic but critical
1084 * verification.
1085 *
1086 * @parent_transid: expected transid of this tree block, skip check if 0
1087 * @level: expected level, mandatory check
1088 * @first_key: expected key in slot 0, skip check if NULL
1089 */
1090struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1091 u64 parent_transid, int level,
1092 struct btrfs_key *first_key)
1093{
1094 struct extent_buffer *buf = NULL;
1095 int ret;
1096
1097 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1098 if (IS_ERR(buf))
1099 return buf;
1100
1101 ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
1102 level, first_key);
1103 if (ret) {
1104 free_extent_buffer(buf);
1105 return ERR_PTR(ret);
1106 }
1107 return buf;
1108
1109}
1110
1111void clean_tree_block(struct btrfs_fs_info *fs_info,
1112 struct extent_buffer *buf)
1113{
1114 if (btrfs_header_generation(buf) ==
1115 fs_info->running_transaction->transid) {
1116 btrfs_assert_tree_locked(buf);
1117
1118 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1119 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1120 -buf->len,
1121 fs_info->dirty_metadata_batch);
1122 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1123 btrfs_set_lock_blocking(buf);
1124 clear_extent_buffer_dirty(buf);
1125 }
1126 }
1127}
1128
1129static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1130{
1131 struct btrfs_subvolume_writers *writers;
1132 int ret;
1133
1134 writers = kmalloc(sizeof(*writers), GFP_NOFS);
1135 if (!writers)
1136 return ERR_PTR(-ENOMEM);
1137
1138 ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1139 if (ret < 0) {
1140 kfree(writers);
1141 return ERR_PTR(ret);
1142 }
1143
1144 init_waitqueue_head(&writers->wait);
1145 return writers;
1146}
1147
1148static void
1149btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1150{
1151 percpu_counter_destroy(&writers->counter);
1152 kfree(writers);
1153}
1154
1155static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1156 u64 objectid)
1157{
1158 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1159 root->node = NULL;
1160 root->commit_root = NULL;
1161 root->state = 0;
1162 root->orphan_cleanup_state = 0;
1163
1164 root->last_trans = 0;
1165 root->highest_objectid = 0;
1166 root->nr_delalloc_inodes = 0;
1167 root->nr_ordered_extents = 0;
1168 root->inode_tree = RB_ROOT;
1169 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1170 root->block_rsv = NULL;
1171
1172 INIT_LIST_HEAD(&root->dirty_list);
1173 INIT_LIST_HEAD(&root->root_list);
1174 INIT_LIST_HEAD(&root->delalloc_inodes);
1175 INIT_LIST_HEAD(&root->delalloc_root);
1176 INIT_LIST_HEAD(&root->ordered_extents);
1177 INIT_LIST_HEAD(&root->ordered_root);
1178 INIT_LIST_HEAD(&root->logged_list[0]);
1179 INIT_LIST_HEAD(&root->logged_list[1]);
1180 spin_lock_init(&root->inode_lock);
1181 spin_lock_init(&root->delalloc_lock);
1182 spin_lock_init(&root->ordered_extent_lock);
1183 spin_lock_init(&root->accounting_lock);
1184 spin_lock_init(&root->log_extents_lock[0]);
1185 spin_lock_init(&root->log_extents_lock[1]);
1186 spin_lock_init(&root->qgroup_meta_rsv_lock);
1187 mutex_init(&root->objectid_mutex);
1188 mutex_init(&root->log_mutex);
1189 mutex_init(&root->ordered_extent_mutex);
1190 mutex_init(&root->delalloc_mutex);
1191 init_waitqueue_head(&root->log_writer_wait);
1192 init_waitqueue_head(&root->log_commit_wait[0]);
1193 init_waitqueue_head(&root->log_commit_wait[1]);
1194 INIT_LIST_HEAD(&root->log_ctxs[0]);
1195 INIT_LIST_HEAD(&root->log_ctxs[1]);
1196 atomic_set(&root->log_commit[0], 0);
1197 atomic_set(&root->log_commit[1], 0);
1198 atomic_set(&root->log_writers, 0);
1199 atomic_set(&root->log_batch, 0);
1200 refcount_set(&root->refs, 1);
1201 atomic_set(&root->will_be_snapshotted, 0);
1202 atomic_set(&root->snapshot_force_cow, 0);
1203 atomic_set(&root->nr_swapfiles, 0);
1204 root->log_transid = 0;
1205 root->log_transid_committed = -1;
1206 root->last_log_commit = 0;
1207 if (!dummy)
1208 extent_io_tree_init(&root->dirty_log_pages, NULL);
1209
1210 memset(&root->root_key, 0, sizeof(root->root_key));
1211 memset(&root->root_item, 0, sizeof(root->root_item));
1212 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1213 if (!dummy)
1214 root->defrag_trans_start = fs_info->generation;
1215 else
1216 root->defrag_trans_start = 0;
1217 root->root_key.objectid = objectid;
1218 root->anon_dev = 0;
1219
1220 spin_lock_init(&root->root_item_lock);
1221}
1222
1223static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1224 gfp_t flags)
1225{
1226 struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1227 if (root)
1228 root->fs_info = fs_info;
1229 return root;
1230}
1231
1232#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1233/* Should only be used by the testing infrastructure */
1234struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1235{
1236 struct btrfs_root *root;
1237
1238 if (!fs_info)
1239 return ERR_PTR(-EINVAL);
1240
1241 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1242 if (!root)
1243 return ERR_PTR(-ENOMEM);
1244
1245 /* We don't use the stripesize in selftest, set it as sectorsize */
1246 __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1247 root->alloc_bytenr = 0;
1248
1249 return root;
1250}
1251#endif
1252
1253struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1254 struct btrfs_fs_info *fs_info,
1255 u64 objectid)
1256{
1257 struct extent_buffer *leaf;
1258 struct btrfs_root *tree_root = fs_info->tree_root;
1259 struct btrfs_root *root;
1260 struct btrfs_key key;
1261 int ret = 0;
1262 uuid_le uuid = NULL_UUID_LE;
1263
1264 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1265 if (!root)
1266 return ERR_PTR(-ENOMEM);
1267
1268 __setup_root(root, fs_info, objectid);
1269 root->root_key.objectid = objectid;
1270 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1271 root->root_key.offset = 0;
1272
1273 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1274 if (IS_ERR(leaf)) {
1275 ret = PTR_ERR(leaf);
1276 leaf = NULL;
1277 goto fail;
1278 }
1279
1280 root->node = leaf;
1281 btrfs_mark_buffer_dirty(leaf);
1282
1283 root->commit_root = btrfs_root_node(root);
1284 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1285
1286 root->root_item.flags = 0;
1287 root->root_item.byte_limit = 0;
1288 btrfs_set_root_bytenr(&root->root_item, leaf->start);
1289 btrfs_set_root_generation(&root->root_item, trans->transid);
1290 btrfs_set_root_level(&root->root_item, 0);
1291 btrfs_set_root_refs(&root->root_item, 1);
1292 btrfs_set_root_used(&root->root_item, leaf->len);
1293 btrfs_set_root_last_snapshot(&root->root_item, 0);
1294 btrfs_set_root_dirid(&root->root_item, 0);
1295 if (is_fstree(objectid))
1296 uuid_le_gen(&uuid);
1297 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1298 root->root_item.drop_level = 0;
1299
1300 key.objectid = objectid;
1301 key.type = BTRFS_ROOT_ITEM_KEY;
1302 key.offset = 0;
1303 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1304 if (ret)
1305 goto fail;
1306
1307 btrfs_tree_unlock(leaf);
1308
1309 return root;
1310
1311fail:
1312 if (leaf) {
1313 btrfs_tree_unlock(leaf);
1314 free_extent_buffer(root->commit_root);
1315 free_extent_buffer(leaf);
1316 }
1317 kfree(root);
1318
1319 return ERR_PTR(ret);
1320}
1321
1322static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1323 struct btrfs_fs_info *fs_info)
1324{
1325 struct btrfs_root *root;
1326 struct extent_buffer *leaf;
1327
1328 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1329 if (!root)
1330 return ERR_PTR(-ENOMEM);
1331
1332 __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1333
1334 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1335 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1336 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1337
1338 /*
1339 * DON'T set REF_COWS for log trees
1340 *
1341 * log trees do not get reference counted because they go away
1342 * before a real commit is actually done. They do store pointers
1343 * to file data extents, and those reference counts still get
1344 * updated (along with back refs to the log tree).
1345 */
1346
1347 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1348 NULL, 0, 0, 0);
1349 if (IS_ERR(leaf)) {
1350 kfree(root);
1351 return ERR_CAST(leaf);
1352 }
1353
1354 root->node = leaf;
1355
1356 btrfs_mark_buffer_dirty(root->node);
1357 btrfs_tree_unlock(root->node);
1358 return root;
1359}
1360
1361int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1362 struct btrfs_fs_info *fs_info)
1363{
1364 struct btrfs_root *log_root;
1365
1366 log_root = alloc_log_tree(trans, fs_info);
1367 if (IS_ERR(log_root))
1368 return PTR_ERR(log_root);
1369 WARN_ON(fs_info->log_root_tree);
1370 fs_info->log_root_tree = log_root;
1371 return 0;
1372}
1373
1374int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1375 struct btrfs_root *root)
1376{
1377 struct btrfs_fs_info *fs_info = root->fs_info;
1378 struct btrfs_root *log_root;
1379 struct btrfs_inode_item *inode_item;
1380
1381 log_root = alloc_log_tree(trans, fs_info);
1382 if (IS_ERR(log_root))
1383 return PTR_ERR(log_root);
1384
1385 log_root->last_trans = trans->transid;
1386 log_root->root_key.offset = root->root_key.objectid;
1387
1388 inode_item = &log_root->root_item.inode;
1389 btrfs_set_stack_inode_generation(inode_item, 1);
1390 btrfs_set_stack_inode_size(inode_item, 3);
1391 btrfs_set_stack_inode_nlink(inode_item, 1);
1392 btrfs_set_stack_inode_nbytes(inode_item,
1393 fs_info->nodesize);
1394 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1395
1396 btrfs_set_root_node(&log_root->root_item, log_root->node);
1397
1398 WARN_ON(root->log_root);
1399 root->log_root = log_root;
1400 root->log_transid = 0;
1401 root->log_transid_committed = -1;
1402 root->last_log_commit = 0;
1403 return 0;
1404}
1405
1406static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1407 struct btrfs_key *key)
1408{
1409 struct btrfs_root *root;
1410 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1411 struct btrfs_path *path;
1412 u64 generation;
1413 int ret;
1414 int level;
1415
1416 path = btrfs_alloc_path();
1417 if (!path)
1418 return ERR_PTR(-ENOMEM);
1419
1420 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1421 if (!root) {
1422 ret = -ENOMEM;
1423 goto alloc_fail;
1424 }
1425
1426 __setup_root(root, fs_info, key->objectid);
1427
1428 ret = btrfs_find_root(tree_root, key, path,
1429 &root->root_item, &root->root_key);
1430 if (ret) {
1431 if (ret > 0)
1432 ret = -ENOENT;
1433 goto find_fail;
1434 }
1435
1436 generation = btrfs_root_generation(&root->root_item);
1437 level = btrfs_root_level(&root->root_item);
1438 root->node = read_tree_block(fs_info,
1439 btrfs_root_bytenr(&root->root_item),
1440 generation, level, NULL);
1441 if (IS_ERR(root->node)) {
1442 ret = PTR_ERR(root->node);
1443 goto find_fail;
1444 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1445 ret = -EIO;
1446 free_extent_buffer(root->node);
1447 goto find_fail;
1448 }
1449 root->commit_root = btrfs_root_node(root);
1450out:
1451 btrfs_free_path(path);
1452 return root;
1453
1454find_fail:
1455 kfree(root);
1456alloc_fail:
1457 root = ERR_PTR(ret);
1458 goto out;
1459}
1460
1461struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1462 struct btrfs_key *location)
1463{
1464 struct btrfs_root *root;
1465
1466 root = btrfs_read_tree_root(tree_root, location);
1467 if (IS_ERR(root))
1468 return root;
1469
1470 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1471 set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1472 btrfs_check_and_init_root_item(&root->root_item);
1473 }
1474
1475 return root;
1476}
1477
1478int btrfs_init_fs_root(struct btrfs_root *root)
1479{
1480 int ret;
1481 struct btrfs_subvolume_writers *writers;
1482
1483 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1484 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1485 GFP_NOFS);
1486 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1487 ret = -ENOMEM;
1488 goto fail;
1489 }
1490
1491 writers = btrfs_alloc_subvolume_writers();
1492 if (IS_ERR(writers)) {
1493 ret = PTR_ERR(writers);
1494 goto fail;
1495 }
1496 root->subv_writers = writers;
1497
1498 btrfs_init_free_ino_ctl(root);
1499 spin_lock_init(&root->ino_cache_lock);
1500 init_waitqueue_head(&root->ino_cache_wait);
1501
1502 ret = get_anon_bdev(&root->anon_dev);
1503 if (ret)
1504 goto fail;
1505
1506 mutex_lock(&root->objectid_mutex);
1507 ret = btrfs_find_highest_objectid(root,
1508 &root->highest_objectid);
1509 if (ret) {
1510 mutex_unlock(&root->objectid_mutex);
1511 goto fail;
1512 }
1513
1514 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1515
1516 mutex_unlock(&root->objectid_mutex);
1517
1518 return 0;
1519fail:
1520 /* The caller is responsible to call btrfs_free_fs_root */
1521 return ret;
1522}
1523
1524struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1525 u64 root_id)
1526{
1527 struct btrfs_root *root;
1528
1529 spin_lock(&fs_info->fs_roots_radix_lock);
1530 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1531 (unsigned long)root_id);
1532 spin_unlock(&fs_info->fs_roots_radix_lock);
1533 return root;
1534}
1535
1536int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1537 struct btrfs_root *root)
1538{
1539 int ret;
1540
1541 ret = radix_tree_preload(GFP_NOFS);
1542 if (ret)
1543 return ret;
1544
1545 spin_lock(&fs_info->fs_roots_radix_lock);
1546 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1547 (unsigned long)root->root_key.objectid,
1548 root);
1549 if (ret == 0)
1550 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1551 spin_unlock(&fs_info->fs_roots_radix_lock);
1552 radix_tree_preload_end();
1553
1554 return ret;
1555}
1556
1557struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1558 struct btrfs_key *location,
1559 bool check_ref)
1560{
1561 struct btrfs_root *root;
1562 struct btrfs_path *path;
1563 struct btrfs_key key;
1564 int ret;
1565
1566 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1567 return fs_info->tree_root;
1568 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1569 return fs_info->extent_root;
1570 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1571 return fs_info->chunk_root;
1572 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1573 return fs_info->dev_root;
1574 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1575 return fs_info->csum_root;
1576 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1577 return fs_info->quota_root ? fs_info->quota_root :
1578 ERR_PTR(-ENOENT);
1579 if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1580 return fs_info->uuid_root ? fs_info->uuid_root :
1581 ERR_PTR(-ENOENT);
1582 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1583 return fs_info->free_space_root ? fs_info->free_space_root :
1584 ERR_PTR(-ENOENT);
1585again:
1586 root = btrfs_lookup_fs_root(fs_info, location->objectid);
1587 if (root) {
1588 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1589 return ERR_PTR(-ENOENT);
1590 return root;
1591 }
1592
1593 root = btrfs_read_fs_root(fs_info->tree_root, location);
1594 if (IS_ERR(root))
1595 return root;
1596
1597 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1598 ret = -ENOENT;
1599 goto fail;
1600 }
1601
1602 ret = btrfs_init_fs_root(root);
1603 if (ret)
1604 goto fail;
1605
1606 path = btrfs_alloc_path();
1607 if (!path) {
1608 ret = -ENOMEM;
1609 goto fail;
1610 }
1611 key.objectid = BTRFS_ORPHAN_OBJECTID;
1612 key.type = BTRFS_ORPHAN_ITEM_KEY;
1613 key.offset = location->objectid;
1614
1615 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1616 btrfs_free_path(path);
1617 if (ret < 0)
1618 goto fail;
1619 if (ret == 0)
1620 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1621
1622 ret = btrfs_insert_fs_root(fs_info, root);
1623 if (ret) {
1624 if (ret == -EEXIST) {
1625 btrfs_free_fs_root(root);
1626 goto again;
1627 }
1628 goto fail;
1629 }
1630 return root;
1631fail:
1632 btrfs_free_fs_root(root);
1633 return ERR_PTR(ret);
1634}
1635
1636static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1637{
1638 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1639 int ret = 0;
1640 struct btrfs_device *device;
1641 struct backing_dev_info *bdi;
1642
1643 rcu_read_lock();
1644 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1645 if (!device->bdev)
1646 continue;
1647 bdi = device->bdev->bd_bdi;
1648 if (bdi_congested(bdi, bdi_bits)) {
1649 ret = 1;
1650 break;
1651 }
1652 }
1653 rcu_read_unlock();
1654 return ret;
1655}
1656
1657/*
1658 * called by the kthread helper functions to finally call the bio end_io
1659 * functions. This is where read checksum verification actually happens
1660 */
1661static void end_workqueue_fn(struct btrfs_work *work)
1662{
1663 struct bio *bio;
1664 struct btrfs_end_io_wq *end_io_wq;
1665
1666 end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1667 bio = end_io_wq->bio;
1668
1669 bio->bi_status = end_io_wq->status;
1670 bio->bi_private = end_io_wq->private;
1671 bio->bi_end_io = end_io_wq->end_io;
1672 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1673 bio_endio(bio);
1674}
1675
1676static int cleaner_kthread(void *arg)
1677{
1678 struct btrfs_root *root = arg;
1679 struct btrfs_fs_info *fs_info = root->fs_info;
1680 int again;
1681
1682 while (1) {
1683 again = 0;
1684
1685 set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1686
1687 /* Make the cleaner go to sleep early. */
1688 if (btrfs_need_cleaner_sleep(fs_info))
1689 goto sleep;
1690
1691 /*
1692 * Do not do anything if we might cause open_ctree() to block
1693 * before we have finished mounting the filesystem.
1694 */
1695 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1696 goto sleep;
1697
1698 if (!mutex_trylock(&fs_info->cleaner_mutex))
1699 goto sleep;
1700
1701 /*
1702 * Avoid the problem that we change the status of the fs
1703 * during the above check and trylock.
1704 */
1705 if (btrfs_need_cleaner_sleep(fs_info)) {
1706 mutex_unlock(&fs_info->cleaner_mutex);
1707 goto sleep;
1708 }
1709
1710 mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1711 btrfs_run_delayed_iputs(fs_info);
1712 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1713
1714 again = btrfs_clean_one_deleted_snapshot(root);
1715 mutex_unlock(&fs_info->cleaner_mutex);
1716
1717 /*
1718 * The defragger has dealt with the R/O remount and umount,
1719 * needn't do anything special here.
1720 */
1721 btrfs_run_defrag_inodes(fs_info);
1722
1723 /*
1724 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1725 * with relocation (btrfs_relocate_chunk) and relocation
1726 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1727 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1728 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1729 * unused block groups.
1730 */
1731 btrfs_delete_unused_bgs(fs_info);
1732sleep:
1733 clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1734 if (kthread_should_park())
1735 kthread_parkme();
1736 if (kthread_should_stop())
1737 return 0;
1738 if (!again) {
1739 set_current_state(TASK_INTERRUPTIBLE);
1740 schedule();
1741 __set_current_state(TASK_RUNNING);
1742 }
1743 }
1744}
1745
1746static int transaction_kthread(void *arg)
1747{
1748 struct btrfs_root *root = arg;
1749 struct btrfs_fs_info *fs_info = root->fs_info;
1750 struct btrfs_trans_handle *trans;
1751 struct btrfs_transaction *cur;
1752 u64 transid;
1753 time64_t now;
1754 unsigned long delay;
1755 bool cannot_commit;
1756
1757 do {
1758 cannot_commit = false;
1759 delay = HZ * fs_info->commit_interval;
1760 mutex_lock(&fs_info->transaction_kthread_mutex);
1761
1762 spin_lock(&fs_info->trans_lock);
1763 cur = fs_info->running_transaction;
1764 if (!cur) {
1765 spin_unlock(&fs_info->trans_lock);
1766 goto sleep;
1767 }
1768
1769 now = ktime_get_seconds();
1770 if (cur->state < TRANS_STATE_BLOCKED &&
1771 !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
1772 (now < cur->start_time ||
1773 now - cur->start_time < fs_info->commit_interval)) {
1774 spin_unlock(&fs_info->trans_lock);
1775 delay = HZ * 5;
1776 goto sleep;
1777 }
1778 transid = cur->transid;
1779 spin_unlock(&fs_info->trans_lock);
1780
1781 /* If the file system is aborted, this will always fail. */
1782 trans = btrfs_attach_transaction(root);
1783 if (IS_ERR(trans)) {
1784 if (PTR_ERR(trans) != -ENOENT)
1785 cannot_commit = true;
1786 goto sleep;
1787 }
1788 if (transid == trans->transid) {
1789 btrfs_commit_transaction(trans);
1790 } else {
1791 btrfs_end_transaction(trans);
1792 }
1793sleep:
1794 wake_up_process(fs_info->cleaner_kthread);
1795 mutex_unlock(&fs_info->transaction_kthread_mutex);
1796
1797 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1798 &fs_info->fs_state)))
1799 btrfs_cleanup_transaction(fs_info);
1800 if (!kthread_should_stop() &&
1801 (!btrfs_transaction_blocked(fs_info) ||
1802 cannot_commit))
1803 schedule_timeout_interruptible(delay);
1804 } while (!kthread_should_stop());
1805 return 0;
1806}
1807
1808/*
1809 * this will find the highest generation in the array of
1810 * root backups. The index of the highest array is returned,
1811 * or -1 if we can't find anything.
1812 *
1813 * We check to make sure the array is valid by comparing the
1814 * generation of the latest root in the array with the generation
1815 * in the super block. If they don't match we pitch it.
1816 */
1817static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1818{
1819 u64 cur;
1820 int newest_index = -1;
1821 struct btrfs_root_backup *root_backup;
1822 int i;
1823
1824 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1825 root_backup = info->super_copy->super_roots + i;
1826 cur = btrfs_backup_tree_root_gen(root_backup);
1827 if (cur == newest_gen)
1828 newest_index = i;
1829 }
1830
1831 /* check to see if we actually wrapped around */
1832 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1833 root_backup = info->super_copy->super_roots;
1834 cur = btrfs_backup_tree_root_gen(root_backup);
1835 if (cur == newest_gen)
1836 newest_index = 0;
1837 }
1838 return newest_index;
1839}
1840
1841
1842/*
1843 * find the oldest backup so we know where to store new entries
1844 * in the backup array. This will set the backup_root_index
1845 * field in the fs_info struct
1846 */
1847static void find_oldest_super_backup(struct btrfs_fs_info *info,
1848 u64 newest_gen)
1849{
1850 int newest_index = -1;
1851
1852 newest_index = find_newest_super_backup(info, newest_gen);
1853 /* if there was garbage in there, just move along */
1854 if (newest_index == -1) {
1855 info->backup_root_index = 0;
1856 } else {
1857 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1858 }
1859}
1860
1861/*
1862 * copy all the root pointers into the super backup array.
1863 * this will bump the backup pointer by one when it is
1864 * done
1865 */
1866static void backup_super_roots(struct btrfs_fs_info *info)
1867{
1868 int next_backup;
1869 struct btrfs_root_backup *root_backup;
1870 int last_backup;
1871
1872 next_backup = info->backup_root_index;
1873 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1874 BTRFS_NUM_BACKUP_ROOTS;
1875
1876 /*
1877 * just overwrite the last backup if we're at the same generation
1878 * this happens only at umount
1879 */
1880 root_backup = info->super_for_commit->super_roots + last_backup;
1881 if (btrfs_backup_tree_root_gen(root_backup) ==
1882 btrfs_header_generation(info->tree_root->node))
1883 next_backup = last_backup;
1884
1885 root_backup = info->super_for_commit->super_roots + next_backup;
1886
1887 /*
1888 * make sure all of our padding and empty slots get zero filled
1889 * regardless of which ones we use today
1890 */
1891 memset(root_backup, 0, sizeof(*root_backup));
1892
1893 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1894
1895 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1896 btrfs_set_backup_tree_root_gen(root_backup,
1897 btrfs_header_generation(info->tree_root->node));
1898
1899 btrfs_set_backup_tree_root_level(root_backup,
1900 btrfs_header_level(info->tree_root->node));
1901
1902 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1903 btrfs_set_backup_chunk_root_gen(root_backup,
1904 btrfs_header_generation(info->chunk_root->node));
1905 btrfs_set_backup_chunk_root_level(root_backup,
1906 btrfs_header_level(info->chunk_root->node));
1907
1908 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1909 btrfs_set_backup_extent_root_gen(root_backup,
1910 btrfs_header_generation(info->extent_root->node));
1911 btrfs_set_backup_extent_root_level(root_backup,
1912 btrfs_header_level(info->extent_root->node));
1913
1914 /*
1915 * we might commit during log recovery, which happens before we set
1916 * the fs_root. Make sure it is valid before we fill it in.
1917 */
1918 if (info->fs_root && info->fs_root->node) {
1919 btrfs_set_backup_fs_root(root_backup,
1920 info->fs_root->node->start);
1921 btrfs_set_backup_fs_root_gen(root_backup,
1922 btrfs_header_generation(info->fs_root->node));
1923 btrfs_set_backup_fs_root_level(root_backup,
1924 btrfs_header_level(info->fs_root->node));
1925 }
1926
1927 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1928 btrfs_set_backup_dev_root_gen(root_backup,
1929 btrfs_header_generation(info->dev_root->node));
1930 btrfs_set_backup_dev_root_level(root_backup,
1931 btrfs_header_level(info->dev_root->node));
1932
1933 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1934 btrfs_set_backup_csum_root_gen(root_backup,
1935 btrfs_header_generation(info->csum_root->node));
1936 btrfs_set_backup_csum_root_level(root_backup,
1937 btrfs_header_level(info->csum_root->node));
1938
1939 btrfs_set_backup_total_bytes(root_backup,
1940 btrfs_super_total_bytes(info->super_copy));
1941 btrfs_set_backup_bytes_used(root_backup,
1942 btrfs_super_bytes_used(info->super_copy));
1943 btrfs_set_backup_num_devices(root_backup,
1944 btrfs_super_num_devices(info->super_copy));
1945
1946 /*
1947 * if we don't copy this out to the super_copy, it won't get remembered
1948 * for the next commit
1949 */
1950 memcpy(&info->super_copy->super_roots,
1951 &info->super_for_commit->super_roots,
1952 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1953}
1954
1955/*
1956 * this copies info out of the root backup array and back into
1957 * the in-memory super block. It is meant to help iterate through
1958 * the array, so you send it the number of backups you've already
1959 * tried and the last backup index you used.
1960 *
1961 * this returns -1 when it has tried all the backups
1962 */
1963static noinline int next_root_backup(struct btrfs_fs_info *info,
1964 struct btrfs_super_block *super,
1965 int *num_backups_tried, int *backup_index)
1966{
1967 struct btrfs_root_backup *root_backup;
1968 int newest = *backup_index;
1969
1970 if (*num_backups_tried == 0) {
1971 u64 gen = btrfs_super_generation(super);
1972
1973 newest = find_newest_super_backup(info, gen);
1974 if (newest == -1)
1975 return -1;
1976
1977 *backup_index = newest;
1978 *num_backups_tried = 1;
1979 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1980 /* we've tried all the backups, all done */
1981 return -1;
1982 } else {
1983 /* jump to the next oldest backup */
1984 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1985 BTRFS_NUM_BACKUP_ROOTS;
1986 *backup_index = newest;
1987 *num_backups_tried += 1;
1988 }
1989 root_backup = super->super_roots + newest;
1990
1991 btrfs_set_super_generation(super,
1992 btrfs_backup_tree_root_gen(root_backup));
1993 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1994 btrfs_set_super_root_level(super,
1995 btrfs_backup_tree_root_level(root_backup));
1996 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1997
1998 /*
1999 * fixme: the total bytes and num_devices need to match or we should
2000 * need a fsck
2001 */
2002 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2003 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2004 return 0;
2005}
2006
2007/* helper to cleanup workers */
2008static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2009{
2010 btrfs_destroy_workqueue(fs_info->fixup_workers);
2011 btrfs_destroy_workqueue(fs_info->delalloc_workers);
2012 btrfs_destroy_workqueue(fs_info->workers);
2013 btrfs_destroy_workqueue(fs_info->endio_workers);
2014 btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2015 btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2016 btrfs_destroy_workqueue(fs_info->rmw_workers);
2017 btrfs_destroy_workqueue(fs_info->endio_write_workers);
2018 btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2019 btrfs_destroy_workqueue(fs_info->submit_workers);
2020 btrfs_destroy_workqueue(fs_info->delayed_workers);
2021 btrfs_destroy_workqueue(fs_info->caching_workers);
2022 btrfs_destroy_workqueue(fs_info->readahead_workers);
2023 btrfs_destroy_workqueue(fs_info->flush_workers);
2024 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2025 btrfs_destroy_workqueue(fs_info->extent_workers);
2026 /*
2027 * Now that all other work queues are destroyed, we can safely destroy
2028 * the queues used for metadata I/O, since tasks from those other work
2029 * queues can do metadata I/O operations.
2030 */
2031 btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2032 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2033}
2034
2035static void free_root_extent_buffers(struct btrfs_root *root)
2036{
2037 if (root) {
2038 free_extent_buffer(root->node);
2039 free_extent_buffer(root->commit_root);
2040 root->node = NULL;
2041 root->commit_root = NULL;
2042 }
2043}
2044
2045/* helper to cleanup tree roots */
2046static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2047{
2048 free_root_extent_buffers(info->tree_root);
2049
2050 free_root_extent_buffers(info->dev_root);
2051 free_root_extent_buffers(info->extent_root);
2052 free_root_extent_buffers(info->csum_root);
2053 free_root_extent_buffers(info->quota_root);
2054 free_root_extent_buffers(info->uuid_root);
2055 if (chunk_root)
2056 free_root_extent_buffers(info->chunk_root);
2057 free_root_extent_buffers(info->free_space_root);
2058}
2059
2060void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2061{
2062 int ret;
2063 struct btrfs_root *gang[8];
2064 int i;
2065
2066 while (!list_empty(&fs_info->dead_roots)) {
2067 gang[0] = list_entry(fs_info->dead_roots.next,
2068 struct btrfs_root, root_list);
2069 list_del(&gang[0]->root_list);
2070
2071 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2072 btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2073 } else {
2074 free_extent_buffer(gang[0]->node);
2075 free_extent_buffer(gang[0]->commit_root);
2076 btrfs_put_fs_root(gang[0]);
2077 }
2078 }
2079
2080 while (1) {
2081 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2082 (void **)gang, 0,
2083 ARRAY_SIZE(gang));
2084 if (!ret)
2085 break;
2086 for (i = 0; i < ret; i++)
2087 btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2088 }
2089
2090 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2091 btrfs_free_log_root_tree(NULL, fs_info);
2092 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2093 }
2094}
2095
2096static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2097{
2098 mutex_init(&fs_info->scrub_lock);
2099 atomic_set(&fs_info->scrubs_running, 0);
2100 atomic_set(&fs_info->scrub_pause_req, 0);
2101 atomic_set(&fs_info->scrubs_paused, 0);
2102 atomic_set(&fs_info->scrub_cancel_req, 0);
2103 init_waitqueue_head(&fs_info->scrub_pause_wait);
2104 fs_info->scrub_workers_refcnt = 0;
2105}
2106
2107static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2108{
2109 spin_lock_init(&fs_info->balance_lock);
2110 mutex_init(&fs_info->balance_mutex);
2111 atomic_set(&fs_info->balance_pause_req, 0);
2112 atomic_set(&fs_info->balance_cancel_req, 0);
2113 fs_info->balance_ctl = NULL;
2114 init_waitqueue_head(&fs_info->balance_wait_q);
2115}
2116
2117static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2118{
2119 struct inode *inode = fs_info->btree_inode;
2120
2121 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2122 set_nlink(inode, 1);
2123 /*
2124 * we set the i_size on the btree inode to the max possible int.
2125 * the real end of the address space is determined by all of
2126 * the devices in the system
2127 */
2128 inode->i_size = OFFSET_MAX;
2129 inode->i_mapping->a_ops = &btree_aops;
2130
2131 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2132 extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2133 BTRFS_I(inode)->io_tree.track_uptodate = 0;
2134 extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2135
2136 BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2137
2138 BTRFS_I(inode)->root = fs_info->tree_root;
2139 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2140 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2141 btrfs_insert_inode_hash(inode);
2142}
2143
2144static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2145{
2146 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2147 init_rwsem(&fs_info->dev_replace.rwsem);
2148 init_waitqueue_head(&fs_info->dev_replace.replace_wait);
2149}
2150
2151static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2152{
2153 spin_lock_init(&fs_info->qgroup_lock);
2154 mutex_init(&fs_info->qgroup_ioctl_lock);
2155 fs_info->qgroup_tree = RB_ROOT;
2156 fs_info->qgroup_op_tree = RB_ROOT;
2157 INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2158 fs_info->qgroup_seq = 1;
2159 fs_info->qgroup_ulist = NULL;
2160 fs_info->qgroup_rescan_running = false;
2161 mutex_init(&fs_info->qgroup_rescan_lock);
2162}
2163
2164static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2165 struct btrfs_fs_devices *fs_devices)
2166{
2167 u32 max_active = fs_info->thread_pool_size;
2168 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2169
2170 fs_info->workers =
2171 btrfs_alloc_workqueue(fs_info, "worker",
2172 flags | WQ_HIGHPRI, max_active, 16);
2173
2174 fs_info->delalloc_workers =
2175 btrfs_alloc_workqueue(fs_info, "delalloc",
2176 flags, max_active, 2);
2177
2178 fs_info->flush_workers =
2179 btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2180 flags, max_active, 0);
2181
2182 fs_info->caching_workers =
2183 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2184
2185 /*
2186 * a higher idle thresh on the submit workers makes it much more
2187 * likely that bios will be send down in a sane order to the
2188 * devices
2189 */
2190 fs_info->submit_workers =
2191 btrfs_alloc_workqueue(fs_info, "submit", flags,
2192 min_t(u64, fs_devices->num_devices,
2193 max_active), 64);
2194
2195 fs_info->fixup_workers =
2196 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2197
2198 /*
2199 * endios are largely parallel and should have a very
2200 * low idle thresh
2201 */
2202 fs_info->endio_workers =
2203 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2204 fs_info->endio_meta_workers =
2205 btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2206 max_active, 4);
2207 fs_info->endio_meta_write_workers =
2208 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2209 max_active, 2);
2210 fs_info->endio_raid56_workers =
2211 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2212 max_active, 4);
2213 fs_info->endio_repair_workers =
2214 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2215 fs_info->rmw_workers =
2216 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2217 fs_info->endio_write_workers =
2218 btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2219 max_active, 2);
2220 fs_info->endio_freespace_worker =
2221 btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2222 max_active, 0);
2223 fs_info->delayed_workers =
2224 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2225 max_active, 0);
2226 fs_info->readahead_workers =
2227 btrfs_alloc_workqueue(fs_info, "readahead", flags,
2228 max_active, 2);
2229 fs_info->qgroup_rescan_workers =
2230 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2231 fs_info->extent_workers =
2232 btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2233 min_t(u64, fs_devices->num_devices,
2234 max_active), 8);
2235
2236 if (!(fs_info->workers && fs_info->delalloc_workers &&
2237 fs_info->submit_workers && fs_info->flush_workers &&
2238 fs_info->endio_workers && fs_info->endio_meta_workers &&
2239 fs_info->endio_meta_write_workers &&
2240 fs_info->endio_repair_workers &&
2241 fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2242 fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2243 fs_info->caching_workers && fs_info->readahead_workers &&
2244 fs_info->fixup_workers && fs_info->delayed_workers &&
2245 fs_info->extent_workers &&
2246 fs_info->qgroup_rescan_workers)) {
2247 return -ENOMEM;
2248 }
2249
2250 return 0;
2251}
2252
2253static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2254 struct btrfs_fs_devices *fs_devices)
2255{
2256 int ret;
2257 struct btrfs_root *log_tree_root;
2258 struct btrfs_super_block *disk_super = fs_info->super_copy;
2259 u64 bytenr = btrfs_super_log_root(disk_super);
2260 int level = btrfs_super_log_root_level(disk_super);
2261
2262 if (fs_devices->rw_devices == 0) {
2263 btrfs_warn(fs_info, "log replay required on RO media");
2264 return -EIO;
2265 }
2266
2267 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2268 if (!log_tree_root)
2269 return -ENOMEM;
2270
2271 __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2272
2273 log_tree_root->node = read_tree_block(fs_info, bytenr,
2274 fs_info->generation + 1,
2275 level, NULL);
2276 if (IS_ERR(log_tree_root->node)) {
2277 btrfs_warn(fs_info, "failed to read log tree");
2278 ret = PTR_ERR(log_tree_root->node);
2279 kfree(log_tree_root);
2280 return ret;
2281 } else if (!extent_buffer_uptodate(log_tree_root->node)) {
2282 btrfs_err(fs_info, "failed to read log tree");
2283 free_extent_buffer(log_tree_root->node);
2284 kfree(log_tree_root);
2285 return -EIO;
2286 }
2287 /* returns with log_tree_root freed on success */
2288 ret = btrfs_recover_log_trees(log_tree_root);
2289 if (ret) {
2290 btrfs_handle_fs_error(fs_info, ret,
2291 "Failed to recover log tree");
2292 free_extent_buffer(log_tree_root->node);
2293 kfree(log_tree_root);
2294 return ret;
2295 }
2296
2297 if (sb_rdonly(fs_info->sb)) {
2298 ret = btrfs_commit_super(fs_info);
2299 if (ret)
2300 return ret;
2301 }
2302
2303 return 0;
2304}
2305
2306static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2307{
2308 struct btrfs_root *tree_root = fs_info->tree_root;
2309 struct btrfs_root *root;
2310 struct btrfs_key location;
2311 int ret;
2312
2313 BUG_ON(!fs_info->tree_root);
2314
2315 location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2316 location.type = BTRFS_ROOT_ITEM_KEY;
2317 location.offset = 0;
2318
2319 root = btrfs_read_tree_root(tree_root, &location);
2320 if (IS_ERR(root)) {
2321 ret = PTR_ERR(root);
2322 goto out;
2323 }
2324 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2325 fs_info->extent_root = root;
2326
2327 location.objectid = BTRFS_DEV_TREE_OBJECTID;
2328 root = btrfs_read_tree_root(tree_root, &location);
2329 if (IS_ERR(root)) {
2330 ret = PTR_ERR(root);
2331 goto out;
2332 }
2333 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2334 fs_info->dev_root = root;
2335 btrfs_init_devices_late(fs_info);
2336
2337 location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2338 root = btrfs_read_tree_root(tree_root, &location);
2339 if (IS_ERR(root)) {
2340 ret = PTR_ERR(root);
2341 goto out;
2342 }
2343 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2344 fs_info->csum_root = root;
2345
2346 location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2347 root = btrfs_read_tree_root(tree_root, &location);
2348 if (!IS_ERR(root)) {
2349 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2350 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2351 fs_info->quota_root = root;
2352 }
2353
2354 location.objectid = BTRFS_UUID_TREE_OBJECTID;
2355 root = btrfs_read_tree_root(tree_root, &location);
2356 if (IS_ERR(root)) {
2357 ret = PTR_ERR(root);
2358 if (ret != -ENOENT)
2359 goto out;
2360 } else {
2361 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2362 fs_info->uuid_root = root;
2363 }
2364
2365 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2366 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2367 root = btrfs_read_tree_root(tree_root, &location);
2368 if (IS_ERR(root)) {
2369 ret = PTR_ERR(root);
2370 goto out;
2371 }
2372 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2373 fs_info->free_space_root = root;
2374 }
2375
2376 return 0;
2377out:
2378 btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2379 location.objectid, ret);
2380 return ret;
2381}
2382
2383/*
2384 * Real super block validation
2385 * NOTE: super csum type and incompat features will not be checked here.
2386 *
2387 * @sb: super block to check
2388 * @mirror_num: the super block number to check its bytenr:
2389 * 0 the primary (1st) sb
2390 * 1, 2 2nd and 3rd backup copy
2391 * -1 skip bytenr check
2392 */
2393static int validate_super(struct btrfs_fs_info *fs_info,
2394 struct btrfs_super_block *sb, int mirror_num)
2395{
2396 u64 nodesize = btrfs_super_nodesize(sb);
2397 u64 sectorsize = btrfs_super_sectorsize(sb);
2398 int ret = 0;
2399
2400 if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2401 btrfs_err(fs_info, "no valid FS found");
2402 ret = -EINVAL;
2403 }
2404 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2405 btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2406 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2407 ret = -EINVAL;
2408 }
2409 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2410 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2411 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2412 ret = -EINVAL;
2413 }
2414 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2415 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2416 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2417 ret = -EINVAL;
2418 }
2419 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2420 btrfs_err(fs_info, "log_root level too big: %d >= %d",
2421 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2422 ret = -EINVAL;
2423 }
2424
2425 /*
2426 * Check sectorsize and nodesize first, other check will need it.
2427 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2428 */
2429 if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2430 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2431 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2432 ret = -EINVAL;
2433 }
2434 /* Only PAGE SIZE is supported yet */
2435 if (sectorsize != PAGE_SIZE) {
2436 btrfs_err(fs_info,
2437 "sectorsize %llu not supported yet, only support %lu",
2438 sectorsize, PAGE_SIZE);
2439 ret = -EINVAL;
2440 }
2441 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2442 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2443 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2444 ret = -EINVAL;
2445 }
2446 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2447 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2448 le32_to_cpu(sb->__unused_leafsize), nodesize);
2449 ret = -EINVAL;
2450 }
2451
2452 /* Root alignment check */
2453 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2454 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2455 btrfs_super_root(sb));
2456 ret = -EINVAL;
2457 }
2458 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2459 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2460 btrfs_super_chunk_root(sb));
2461 ret = -EINVAL;
2462 }
2463 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2464 btrfs_warn(fs_info, "log_root block unaligned: %llu",
2465 btrfs_super_log_root(sb));
2466 ret = -EINVAL;
2467 }
2468
2469 if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2470 BTRFS_FSID_SIZE) != 0) {
2471 btrfs_err(fs_info,
2472 "dev_item UUID does not match metadata fsid: %pU != %pU",
2473 fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2474 ret = -EINVAL;
2475 }
2476
2477 /*
2478 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2479 * done later
2480 */
2481 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2482 btrfs_err(fs_info, "bytes_used is too small %llu",
2483 btrfs_super_bytes_used(sb));
2484 ret = -EINVAL;
2485 }
2486 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2487 btrfs_err(fs_info, "invalid stripesize %u",
2488 btrfs_super_stripesize(sb));
2489 ret = -EINVAL;
2490 }
2491 if (btrfs_super_num_devices(sb) > (1UL << 31))
2492 btrfs_warn(fs_info, "suspicious number of devices: %llu",
2493 btrfs_super_num_devices(sb));
2494 if (btrfs_super_num_devices(sb) == 0) {
2495 btrfs_err(fs_info, "number of devices is 0");
2496 ret = -EINVAL;
2497 }
2498
2499 if (mirror_num >= 0 &&
2500 btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2501 btrfs_err(fs_info, "super offset mismatch %llu != %u",
2502 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2503 ret = -EINVAL;
2504 }
2505
2506 /*
2507 * Obvious sys_chunk_array corruptions, it must hold at least one key
2508 * and one chunk
2509 */
2510 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2511 btrfs_err(fs_info, "system chunk array too big %u > %u",
2512 btrfs_super_sys_array_size(sb),
2513 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2514 ret = -EINVAL;
2515 }
2516 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2517 + sizeof(struct btrfs_chunk)) {
2518 btrfs_err(fs_info, "system chunk array too small %u < %zu",
2519 btrfs_super_sys_array_size(sb),
2520 sizeof(struct btrfs_disk_key)
2521 + sizeof(struct btrfs_chunk));
2522 ret = -EINVAL;
2523 }
2524
2525 /*
2526 * The generation is a global counter, we'll trust it more than the others
2527 * but it's still possible that it's the one that's wrong.
2528 */
2529 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2530 btrfs_warn(fs_info,
2531 "suspicious: generation < chunk_root_generation: %llu < %llu",
2532 btrfs_super_generation(sb),
2533 btrfs_super_chunk_root_generation(sb));
2534 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2535 && btrfs_super_cache_generation(sb) != (u64)-1)
2536 btrfs_warn(fs_info,
2537 "suspicious: generation < cache_generation: %llu < %llu",
2538 btrfs_super_generation(sb),
2539 btrfs_super_cache_generation(sb));
2540
2541 return ret;
2542}
2543
2544/*
2545 * Validation of super block at mount time.
2546 * Some checks already done early at mount time, like csum type and incompat
2547 * flags will be skipped.
2548 */
2549static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2550{
2551 return validate_super(fs_info, fs_info->super_copy, 0);
2552}
2553
2554/*
2555 * Validation of super block at write time.
2556 * Some checks like bytenr check will be skipped as their values will be
2557 * overwritten soon.
2558 * Extra checks like csum type and incompat flags will be done here.
2559 */
2560static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2561 struct btrfs_super_block *sb)
2562{
2563 int ret;
2564
2565 ret = validate_super(fs_info, sb, -1);
2566 if (ret < 0)
2567 goto out;
2568 if (btrfs_super_csum_type(sb) != BTRFS_CSUM_TYPE_CRC32) {
2569 ret = -EUCLEAN;
2570 btrfs_err(fs_info, "invalid csum type, has %u want %u",
2571 btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2572 goto out;
2573 }
2574 if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2575 ret = -EUCLEAN;
2576 btrfs_err(fs_info,
2577 "invalid incompat flags, has 0x%llx valid mask 0x%llx",
2578 btrfs_super_incompat_flags(sb),
2579 (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2580 goto out;
2581 }
2582out:
2583 if (ret < 0)
2584 btrfs_err(fs_info,
2585 "super block corruption detected before writing it to disk");
2586 return ret;
2587}
2588
2589int open_ctree(struct super_block *sb,
2590 struct btrfs_fs_devices *fs_devices,
2591 char *options)
2592{
2593 u32 sectorsize;
2594 u32 nodesize;
2595 u32 stripesize;
2596 u64 generation;
2597 u64 features;
2598 struct btrfs_key location;
2599 struct buffer_head *bh;
2600 struct btrfs_super_block *disk_super;
2601 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2602 struct btrfs_root *tree_root;
2603 struct btrfs_root *chunk_root;
2604 int ret;
2605 int err = -EINVAL;
2606 int num_backups_tried = 0;
2607 int backup_index = 0;
2608 int clear_free_space_tree = 0;
2609 int level;
2610
2611 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2612 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2613 if (!tree_root || !chunk_root) {
2614 err = -ENOMEM;
2615 goto fail;
2616 }
2617
2618 ret = init_srcu_struct(&fs_info->subvol_srcu);
2619 if (ret) {
2620 err = ret;
2621 goto fail;
2622 }
2623
2624 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2625 if (ret) {
2626 err = ret;
2627 goto fail_srcu;
2628 }
2629 fs_info->dirty_metadata_batch = PAGE_SIZE *
2630 (1 + ilog2(nr_cpu_ids));
2631
2632 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2633 if (ret) {
2634 err = ret;
2635 goto fail_dirty_metadata_bytes;
2636 }
2637
2638 ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2639 GFP_KERNEL);
2640 if (ret) {
2641 err = ret;
2642 goto fail_delalloc_bytes;
2643 }
2644
2645 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2646 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2647 INIT_LIST_HEAD(&fs_info->trans_list);
2648 INIT_LIST_HEAD(&fs_info->dead_roots);
2649 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2650 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2651 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2652 INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
2653 spin_lock_init(&fs_info->pending_raid_kobjs_lock);
2654 spin_lock_init(&fs_info->delalloc_root_lock);
2655 spin_lock_init(&fs_info->trans_lock);
2656 spin_lock_init(&fs_info->fs_roots_radix_lock);
2657 spin_lock_init(&fs_info->delayed_iput_lock);
2658 spin_lock_init(&fs_info->defrag_inodes_lock);
2659 spin_lock_init(&fs_info->tree_mod_seq_lock);
2660 spin_lock_init(&fs_info->super_lock);
2661 spin_lock_init(&fs_info->qgroup_op_lock);
2662 spin_lock_init(&fs_info->buffer_lock);
2663 spin_lock_init(&fs_info->unused_bgs_lock);
2664 rwlock_init(&fs_info->tree_mod_log_lock);
2665 mutex_init(&fs_info->unused_bg_unpin_mutex);
2666 mutex_init(&fs_info->delete_unused_bgs_mutex);
2667 mutex_init(&fs_info->reloc_mutex);
2668 mutex_init(&fs_info->delalloc_root_mutex);
2669 mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2670 seqlock_init(&fs_info->profiles_lock);
2671
2672 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2673 INIT_LIST_HEAD(&fs_info->space_info);
2674 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2675 INIT_LIST_HEAD(&fs_info->unused_bgs);
2676 btrfs_mapping_init(&fs_info->mapping_tree);
2677 btrfs_init_block_rsv(&fs_info->global_block_rsv,
2678 BTRFS_BLOCK_RSV_GLOBAL);
2679 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2680 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2681 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2682 btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2683 BTRFS_BLOCK_RSV_DELOPS);
2684 btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2685 BTRFS_BLOCK_RSV_DELREFS);
2686
2687 atomic_set(&fs_info->async_delalloc_pages, 0);
2688 atomic_set(&fs_info->defrag_running, 0);
2689 atomic_set(&fs_info->qgroup_op_seq, 0);
2690 atomic_set(&fs_info->reada_works_cnt, 0);
2691 atomic64_set(&fs_info->tree_mod_seq, 0);
2692 fs_info->sb = sb;
2693 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2694 fs_info->metadata_ratio = 0;
2695 fs_info->defrag_inodes = RB_ROOT;
2696 atomic64_set(&fs_info->free_chunk_space, 0);
2697 fs_info->tree_mod_log = RB_ROOT;
2698 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2699 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2700 /* readahead state */
2701 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2702 spin_lock_init(&fs_info->reada_lock);
2703 btrfs_init_ref_verify(fs_info);
2704
2705 fs_info->thread_pool_size = min_t(unsigned long,
2706 num_online_cpus() + 2, 8);
2707
2708 INIT_LIST_HEAD(&fs_info->ordered_roots);
2709 spin_lock_init(&fs_info->ordered_root_lock);
2710
2711 fs_info->btree_inode = new_inode(sb);
2712 if (!fs_info->btree_inode) {
2713 err = -ENOMEM;
2714 goto fail_bio_counter;
2715 }
2716 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2717
2718 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2719 GFP_KERNEL);
2720 if (!fs_info->delayed_root) {
2721 err = -ENOMEM;
2722 goto fail_iput;
2723 }
2724 btrfs_init_delayed_root(fs_info->delayed_root);
2725
2726 btrfs_init_scrub(fs_info);
2727#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2728 fs_info->check_integrity_print_mask = 0;
2729#endif
2730 btrfs_init_balance(fs_info);
2731 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2732
2733 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2734 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2735
2736 btrfs_init_btree_inode(fs_info);
2737
2738 spin_lock_init(&fs_info->block_group_cache_lock);
2739 fs_info->block_group_cache_tree = RB_ROOT;
2740 fs_info->first_logical_byte = (u64)-1;
2741
2742 extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2743 extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2744 fs_info->pinned_extents = &fs_info->freed_extents[0];
2745 set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2746
2747 mutex_init(&fs_info->ordered_operations_mutex);
2748 mutex_init(&fs_info->tree_log_mutex);
2749 mutex_init(&fs_info->chunk_mutex);
2750 mutex_init(&fs_info->transaction_kthread_mutex);
2751 mutex_init(&fs_info->cleaner_mutex);
2752 mutex_init(&fs_info->ro_block_group_mutex);
2753 init_rwsem(&fs_info->commit_root_sem);
2754 init_rwsem(&fs_info->cleanup_work_sem);
2755 init_rwsem(&fs_info->subvol_sem);
2756 sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2757
2758 btrfs_init_dev_replace_locks(fs_info);
2759 btrfs_init_qgroup(fs_info);
2760
2761 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2762 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2763
2764 init_waitqueue_head(&fs_info->transaction_throttle);
2765 init_waitqueue_head(&fs_info->transaction_wait);
2766 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2767 init_waitqueue_head(&fs_info->async_submit_wait);
2768
2769 INIT_LIST_HEAD(&fs_info->pinned_chunks);
2770
2771 /* Usable values until the real ones are cached from the superblock */
2772 fs_info->nodesize = 4096;
2773 fs_info->sectorsize = 4096;
2774 fs_info->stripesize = 4096;
2775
2776 spin_lock_init(&fs_info->swapfile_pins_lock);
2777 fs_info->swapfile_pins = RB_ROOT;
2778
2779 ret = btrfs_alloc_stripe_hash_table(fs_info);
2780 if (ret) {
2781 err = ret;
2782 goto fail_alloc;
2783 }
2784
2785 __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2786
2787 invalidate_bdev(fs_devices->latest_bdev);
2788
2789 /*
2790 * Read super block and check the signature bytes only
2791 */
2792 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2793 if (IS_ERR(bh)) {
2794 err = PTR_ERR(bh);
2795 goto fail_alloc;
2796 }
2797
2798 /*
2799 * We want to check superblock checksum, the type is stored inside.
2800 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2801 */
2802 if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2803 btrfs_err(fs_info, "superblock checksum mismatch");
2804 err = -EINVAL;
2805 brelse(bh);
2806 goto fail_alloc;
2807 }
2808
2809 /*
2810 * super_copy is zeroed at allocation time and we never touch the
2811 * following bytes up to INFO_SIZE, the checksum is calculated from
2812 * the whole block of INFO_SIZE
2813 */
2814 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2815 brelse(bh);
2816
2817 disk_super = fs_info->super_copy;
2818
2819 ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
2820 BTRFS_FSID_SIZE));
2821
2822 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
2823 ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
2824 fs_info->super_copy->metadata_uuid,
2825 BTRFS_FSID_SIZE));
2826 }
2827
2828 features = btrfs_super_flags(disk_super);
2829 if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
2830 features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
2831 btrfs_set_super_flags(disk_super, features);
2832 btrfs_info(fs_info,
2833 "found metadata UUID change in progress flag, clearing");
2834 }
2835
2836 memcpy(fs_info->super_for_commit, fs_info->super_copy,
2837 sizeof(*fs_info->super_for_commit));
2838
2839 ret = btrfs_validate_mount_super(fs_info);
2840 if (ret) {
2841 btrfs_err(fs_info, "superblock contains fatal errors");
2842 err = -EINVAL;
2843 goto fail_alloc;
2844 }
2845
2846 if (!btrfs_super_root(disk_super))
2847 goto fail_alloc;
2848
2849 /* check FS state, whether FS is broken. */
2850 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2851 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2852
2853 /*
2854 * run through our array of backup supers and setup
2855 * our ring pointer to the oldest one
2856 */
2857 generation = btrfs_super_generation(disk_super);
2858 find_oldest_super_backup(fs_info, generation);
2859
2860 /*
2861 * In the long term, we'll store the compression type in the super
2862 * block, and it'll be used for per file compression control.
2863 */
2864 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2865
2866 ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2867 if (ret) {
2868 err = ret;
2869 goto fail_alloc;
2870 }
2871
2872 features = btrfs_super_incompat_flags(disk_super) &
2873 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2874 if (features) {
2875 btrfs_err(fs_info,
2876 "cannot mount because of unsupported optional features (%llx)",
2877 features);
2878 err = -EINVAL;
2879 goto fail_alloc;
2880 }
2881
2882 features = btrfs_super_incompat_flags(disk_super);
2883 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2884 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2885 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2886 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2887 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2888
2889 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2890 btrfs_info(fs_info, "has skinny extents");
2891
2892 /*
2893 * flag our filesystem as having big metadata blocks if
2894 * they are bigger than the page size
2895 */
2896 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2897 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2898 btrfs_info(fs_info,
2899 "flagging fs with big metadata feature");
2900 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2901 }
2902
2903 nodesize = btrfs_super_nodesize(disk_super);
2904 sectorsize = btrfs_super_sectorsize(disk_super);
2905 stripesize = sectorsize;
2906 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2907 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2908
2909 /* Cache block sizes */
2910 fs_info->nodesize = nodesize;
2911 fs_info->sectorsize = sectorsize;
2912 fs_info->stripesize = stripesize;
2913
2914 /*
2915 * mixed block groups end up with duplicate but slightly offset
2916 * extent buffers for the same range. It leads to corruptions
2917 */
2918 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2919 (sectorsize != nodesize)) {
2920 btrfs_err(fs_info,
2921"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2922 nodesize, sectorsize);
2923 goto fail_alloc;
2924 }
2925
2926 /*
2927 * Needn't use the lock because there is no other task which will
2928 * update the flag.
2929 */
2930 btrfs_set_super_incompat_flags(disk_super, features);
2931
2932 features = btrfs_super_compat_ro_flags(disk_super) &
2933 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2934 if (!sb_rdonly(sb) && features) {
2935 btrfs_err(fs_info,
2936 "cannot mount read-write because of unsupported optional features (%llx)",
2937 features);
2938 err = -EINVAL;
2939 goto fail_alloc;
2940 }
2941
2942 ret = btrfs_init_workqueues(fs_info, fs_devices);
2943 if (ret) {
2944 err = ret;
2945 goto fail_sb_buffer;
2946 }
2947
2948 sb->s_bdi->congested_fn = btrfs_congested_fn;
2949 sb->s_bdi->congested_data = fs_info;
2950 sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2951 sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE;
2952 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2953 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2954
2955 sb->s_blocksize = sectorsize;
2956 sb->s_blocksize_bits = blksize_bits(sectorsize);
2957 memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
2958
2959 mutex_lock(&fs_info->chunk_mutex);
2960 ret = btrfs_read_sys_array(fs_info);
2961 mutex_unlock(&fs_info->chunk_mutex);
2962 if (ret) {
2963 btrfs_err(fs_info, "failed to read the system array: %d", ret);
2964 goto fail_sb_buffer;
2965 }
2966
2967 generation = btrfs_super_chunk_root_generation(disk_super);
2968 level = btrfs_super_chunk_root_level(disk_super);
2969
2970 __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2971
2972 chunk_root->node = read_tree_block(fs_info,
2973 btrfs_super_chunk_root(disk_super),
2974 generation, level, NULL);
2975 if (IS_ERR(chunk_root->node) ||
2976 !extent_buffer_uptodate(chunk_root->node)) {
2977 btrfs_err(fs_info, "failed to read chunk root");
2978 if (!IS_ERR(chunk_root->node))
2979 free_extent_buffer(chunk_root->node);
2980 chunk_root->node = NULL;
2981 goto fail_tree_roots;
2982 }
2983 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2984 chunk_root->commit_root = btrfs_root_node(chunk_root);
2985
2986 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2987 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2988
2989 ret = btrfs_read_chunk_tree(fs_info);
2990 if (ret) {
2991 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2992 goto fail_tree_roots;
2993 }
2994
2995 /*
2996 * Keep the devid that is marked to be the target device for the
2997 * device replace procedure
2998 */
2999 btrfs_free_extra_devids(fs_devices, 0);
3000
3001 if (!fs_devices->latest_bdev) {
3002 btrfs_err(fs_info, "failed to read devices");
3003 goto fail_tree_roots;
3004 }
3005
3006retry_root_backup:
3007 generation = btrfs_super_generation(disk_super);
3008 level = btrfs_super_root_level(disk_super);
3009
3010 tree_root->node = read_tree_block(fs_info,
3011 btrfs_super_root(disk_super),
3012 generation, level, NULL);
3013 if (IS_ERR(tree_root->node) ||
3014 !extent_buffer_uptodate(tree_root->node)) {
3015 btrfs_warn(fs_info, "failed to read tree root");
3016 if (!IS_ERR(tree_root->node))
3017 free_extent_buffer(tree_root->node);
3018 tree_root->node = NULL;
3019 goto recovery_tree_root;
3020 }
3021
3022 btrfs_set_root_node(&tree_root->root_item, tree_root->node);
3023 tree_root->commit_root = btrfs_root_node(tree_root);
3024 btrfs_set_root_refs(&tree_root->root_item, 1);
3025
3026 mutex_lock(&tree_root->objectid_mutex);
3027 ret = btrfs_find_highest_objectid(tree_root,
3028 &tree_root->highest_objectid);
3029 if (ret) {
3030 mutex_unlock(&tree_root->objectid_mutex);
3031 goto recovery_tree_root;
3032 }
3033
3034 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
3035
3036 mutex_unlock(&tree_root->objectid_mutex);
3037
3038 ret = btrfs_read_roots(fs_info);
3039 if (ret)
3040 goto recovery_tree_root;
3041
3042 fs_info->generation = generation;
3043 fs_info->last_trans_committed = generation;
3044
3045 ret = btrfs_verify_dev_extents(fs_info);
3046 if (ret) {
3047 btrfs_err(fs_info,
3048 "failed to verify dev extents against chunks: %d",
3049 ret);
3050 goto fail_block_groups;
3051 }
3052 ret = btrfs_recover_balance(fs_info);
3053 if (ret) {
3054 btrfs_err(fs_info, "failed to recover balance: %d", ret);
3055 goto fail_block_groups;
3056 }
3057
3058 ret = btrfs_init_dev_stats(fs_info);
3059 if (ret) {
3060 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3061 goto fail_block_groups;
3062 }
3063
3064 ret = btrfs_init_dev_replace(fs_info);
3065 if (ret) {
3066 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3067 goto fail_block_groups;
3068 }
3069
3070 btrfs_free_extra_devids(fs_devices, 1);
3071
3072 ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3073 if (ret) {
3074 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3075 ret);
3076 goto fail_block_groups;
3077 }
3078
3079 ret = btrfs_sysfs_add_device(fs_devices);
3080 if (ret) {
3081 btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3082 ret);
3083 goto fail_fsdev_sysfs;
3084 }
3085
3086 ret = btrfs_sysfs_add_mounted(fs_info);
3087 if (ret) {
3088 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3089 goto fail_fsdev_sysfs;
3090 }
3091
3092 ret = btrfs_init_space_info(fs_info);
3093 if (ret) {
3094 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3095 goto fail_sysfs;
3096 }
3097
3098 ret = btrfs_read_block_groups(fs_info);
3099 if (ret) {
3100 btrfs_err(fs_info, "failed to read block groups: %d", ret);
3101 goto fail_sysfs;
3102 }
3103
3104 if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3105 btrfs_warn(fs_info,
3106 "writable mount is not allowed due to too many missing devices");
3107 goto fail_sysfs;
3108 }
3109
3110 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3111 "btrfs-cleaner");
3112 if (IS_ERR(fs_info->cleaner_kthread))
3113 goto fail_sysfs;
3114
3115 fs_info->transaction_kthread = kthread_run(transaction_kthread,
3116 tree_root,
3117 "btrfs-transaction");
3118 if (IS_ERR(fs_info->transaction_kthread))
3119 goto fail_cleaner;
3120
3121 if (!btrfs_test_opt(fs_info, NOSSD) &&
3122 !fs_info->fs_devices->rotating) {
3123 btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3124 }
3125
3126 /*
3127 * Mount does not set all options immediately, we can do it now and do
3128 * not have to wait for transaction commit
3129 */
3130 btrfs_apply_pending_changes(fs_info);
3131
3132#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3133 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3134 ret = btrfsic_mount(fs_info, fs_devices,
3135 btrfs_test_opt(fs_info,
3136 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3137 1 : 0,
3138 fs_info->check_integrity_print_mask);
3139 if (ret)
3140 btrfs_warn(fs_info,
3141 "failed to initialize integrity check module: %d",
3142 ret);
3143 }
3144#endif
3145 ret = btrfs_read_qgroup_config(fs_info);
3146 if (ret)
3147 goto fail_trans_kthread;
3148
3149 if (btrfs_build_ref_tree(fs_info))
3150 btrfs_err(fs_info, "couldn't build ref tree");
3151
3152 /* do not make disk changes in broken FS or nologreplay is given */
3153 if (btrfs_super_log_root(disk_super) != 0 &&
3154 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3155 ret = btrfs_replay_log(fs_info, fs_devices);
3156 if (ret) {
3157 err = ret;
3158 goto fail_qgroup;
3159 }
3160 }
3161
3162 ret = btrfs_find_orphan_roots(fs_info);
3163 if (ret)
3164 goto fail_qgroup;
3165
3166 if (!sb_rdonly(sb)) {
3167 ret = btrfs_cleanup_fs_roots(fs_info);
3168 if (ret)
3169 goto fail_qgroup;
3170
3171 mutex_lock(&fs_info->cleaner_mutex);
3172 ret = btrfs_recover_relocation(tree_root);
3173 mutex_unlock(&fs_info->cleaner_mutex);
3174 if (ret < 0) {
3175 btrfs_warn(fs_info, "failed to recover relocation: %d",
3176 ret);
3177 err = -EINVAL;
3178 goto fail_qgroup;
3179 }
3180 }
3181
3182 location.objectid = BTRFS_FS_TREE_OBJECTID;
3183 location.type = BTRFS_ROOT_ITEM_KEY;
3184 location.offset = 0;
3185
3186 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3187 if (IS_ERR(fs_info->fs_root)) {
3188 err = PTR_ERR(fs_info->fs_root);
3189 btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3190 goto fail_qgroup;
3191 }
3192
3193 if (sb_rdonly(sb))
3194 return 0;
3195
3196 if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3197 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3198 clear_free_space_tree = 1;
3199 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3200 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3201 btrfs_warn(fs_info, "free space tree is invalid");
3202 clear_free_space_tree = 1;
3203 }
3204
3205 if (clear_free_space_tree) {
3206 btrfs_info(fs_info, "clearing free space tree");
3207 ret = btrfs_clear_free_space_tree(fs_info);
3208 if (ret) {
3209 btrfs_warn(fs_info,
3210 "failed to clear free space tree: %d", ret);
3211 close_ctree(fs_info);
3212 return ret;
3213 }
3214 }
3215
3216 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3217 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3218 btrfs_info(fs_info, "creating free space tree");
3219 ret = btrfs_create_free_space_tree(fs_info);
3220 if (ret) {
3221 btrfs_warn(fs_info,
3222 "failed to create free space tree: %d", ret);
3223 close_ctree(fs_info);
3224 return ret;
3225 }
3226 }
3227
3228 down_read(&fs_info->cleanup_work_sem);
3229 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3230 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3231 up_read(&fs_info->cleanup_work_sem);
3232 close_ctree(fs_info);
3233 return ret;
3234 }
3235 up_read(&fs_info->cleanup_work_sem);
3236
3237 ret = btrfs_resume_balance_async(fs_info);
3238 if (ret) {
3239 btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3240 close_ctree(fs_info);
3241 return ret;
3242 }
3243
3244 ret = btrfs_resume_dev_replace_async(fs_info);
3245 if (ret) {
3246 btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3247 close_ctree(fs_info);
3248 return ret;
3249 }
3250
3251 btrfs_qgroup_rescan_resume(fs_info);
3252
3253 if (!fs_info->uuid_root) {
3254 btrfs_info(fs_info, "creating UUID tree");
3255 ret = btrfs_create_uuid_tree(fs_info);
3256 if (ret) {
3257 btrfs_warn(fs_info,
3258 "failed to create the UUID tree: %d", ret);
3259 close_ctree(fs_info);
3260 return ret;
3261 }
3262 } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3263 fs_info->generation !=
3264 btrfs_super_uuid_tree_generation(disk_super)) {
3265 btrfs_info(fs_info, "checking UUID tree");
3266 ret = btrfs_check_uuid_tree(fs_info);
3267 if (ret) {
3268 btrfs_warn(fs_info,
3269 "failed to check the UUID tree: %d", ret);
3270 close_ctree(fs_info);
3271 return ret;
3272 }
3273 } else {
3274 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3275 }
3276 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3277
3278 /*
3279 * backuproot only affect mount behavior, and if open_ctree succeeded,
3280 * no need to keep the flag
3281 */
3282 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3283
3284 return 0;
3285
3286fail_qgroup:
3287 btrfs_free_qgroup_config(fs_info);
3288fail_trans_kthread:
3289 kthread_stop(fs_info->transaction_kthread);
3290 btrfs_cleanup_transaction(fs_info);
3291 btrfs_free_fs_roots(fs_info);
3292fail_cleaner:
3293 kthread_stop(fs_info->cleaner_kthread);
3294
3295 /*
3296 * make sure we're done with the btree inode before we stop our
3297 * kthreads
3298 */
3299 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3300
3301fail_sysfs:
3302 btrfs_sysfs_remove_mounted(fs_info);
3303
3304fail_fsdev_sysfs:
3305 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3306
3307fail_block_groups:
3308 btrfs_put_block_group_cache(fs_info);
3309
3310fail_tree_roots:
3311 free_root_pointers(fs_info, 1);
3312 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3313
3314fail_sb_buffer:
3315 btrfs_stop_all_workers(fs_info);
3316 btrfs_free_block_groups(fs_info);
3317fail_alloc:
3318fail_iput:
3319 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3320
3321 iput(fs_info->btree_inode);
3322fail_bio_counter:
3323 percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
3324fail_delalloc_bytes:
3325 percpu_counter_destroy(&fs_info->delalloc_bytes);
3326fail_dirty_metadata_bytes:
3327 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3328fail_srcu:
3329 cleanup_srcu_struct(&fs_info->subvol_srcu);
3330fail:
3331 btrfs_free_stripe_hash_table(fs_info);
3332 btrfs_close_devices(fs_info->fs_devices);
3333 return err;
3334
3335recovery_tree_root:
3336 if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3337 goto fail_tree_roots;
3338
3339 free_root_pointers(fs_info, 0);
3340
3341 /* don't use the log in recovery mode, it won't be valid */
3342 btrfs_set_super_log_root(disk_super, 0);
3343
3344 /* we can't trust the free space cache either */
3345 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3346
3347 ret = next_root_backup(fs_info, fs_info->super_copy,
3348 &num_backups_tried, &backup_index);
3349 if (ret == -1)
3350 goto fail_block_groups;
3351 goto retry_root_backup;
3352}
3353ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3354
3355static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3356{
3357 if (uptodate) {
3358 set_buffer_uptodate(bh);
3359 } else {
3360 struct btrfs_device *device = (struct btrfs_device *)
3361 bh->b_private;
3362
3363 btrfs_warn_rl_in_rcu(device->fs_info,
3364 "lost page write due to IO error on %s",
3365 rcu_str_deref(device->name));
3366 /* note, we don't set_buffer_write_io_error because we have
3367 * our own ways of dealing with the IO errors
3368 */
3369 clear_buffer_uptodate(bh);
3370 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3371 }
3372 unlock_buffer(bh);
3373 put_bh(bh);
3374}
3375
3376int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3377 struct buffer_head **bh_ret)
3378{
3379 struct buffer_head *bh;
3380 struct btrfs_super_block *super;
3381 u64 bytenr;
3382
3383 bytenr = btrfs_sb_offset(copy_num);
3384 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3385 return -EINVAL;
3386
3387 bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3388 /*
3389 * If we fail to read from the underlying devices, as of now
3390 * the best option we have is to mark it EIO.
3391 */
3392 if (!bh)
3393 return -EIO;
3394
3395 super = (struct btrfs_super_block *)bh->b_data;
3396 if (btrfs_super_bytenr(super) != bytenr ||
3397 btrfs_super_magic(super) != BTRFS_MAGIC) {
3398 brelse(bh);
3399 return -EINVAL;
3400 }
3401
3402 *bh_ret = bh;
3403 return 0;
3404}
3405
3406
3407struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3408{
3409 struct buffer_head *bh;
3410 struct buffer_head *latest = NULL;
3411 struct btrfs_super_block *super;
3412 int i;
3413 u64 transid = 0;
3414 int ret = -EINVAL;
3415
3416 /* we would like to check all the supers, but that would make
3417 * a btrfs mount succeed after a mkfs from a different FS.
3418 * So, we need to add a special mount option to scan for
3419 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3420 */
3421 for (i = 0; i < 1; i++) {
3422 ret = btrfs_read_dev_one_super(bdev, i, &bh);
3423 if (ret)
3424 continue;
3425
3426 super = (struct btrfs_super_block *)bh->b_data;
3427
3428 if (!latest || btrfs_super_generation(super) > transid) {
3429 brelse(latest);
3430 latest = bh;
3431 transid = btrfs_super_generation(super);
3432 } else {
3433 brelse(bh);
3434 }
3435 }
3436
3437 if (!latest)
3438 return ERR_PTR(ret);
3439
3440 return latest;
3441}
3442
3443/*
3444 * Write superblock @sb to the @device. Do not wait for completion, all the
3445 * buffer heads we write are pinned.
3446 *
3447 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3448 * the expected device size at commit time. Note that max_mirrors must be
3449 * same for write and wait phases.
3450 *
3451 * Return number of errors when buffer head is not found or submission fails.
3452 */
3453static int write_dev_supers(struct btrfs_device *device,
3454 struct btrfs_super_block *sb, int max_mirrors)
3455{
3456 struct buffer_head *bh;
3457 int i;
3458 int ret;
3459 int errors = 0;
3460 u32 crc;
3461 u64 bytenr;
3462 int op_flags;
3463
3464 if (max_mirrors == 0)
3465 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3466
3467 for (i = 0; i < max_mirrors; i++) {
3468 bytenr = btrfs_sb_offset(i);
3469 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3470 device->commit_total_bytes)
3471 break;
3472
3473 btrfs_set_super_bytenr(sb, bytenr);
3474
3475 crc = ~(u32)0;
3476 crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc,
3477 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3478 btrfs_csum_final(crc, sb->csum);
3479
3480 /* One reference for us, and we leave it for the caller */
3481 bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3482 BTRFS_SUPER_INFO_SIZE);
3483 if (!bh) {
3484 btrfs_err(device->fs_info,
3485 "couldn't get super buffer head for bytenr %llu",
3486 bytenr);
3487 errors++;
3488 continue;
3489 }
3490
3491 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3492
3493 /* one reference for submit_bh */
3494 get_bh(bh);
3495
3496 set_buffer_uptodate(bh);
3497 lock_buffer(bh);
3498 bh->b_end_io = btrfs_end_buffer_write_sync;
3499 bh->b_private = device;
3500
3501 /*
3502 * we fua the first super. The others we allow
3503 * to go down lazy.
3504 */
3505 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3506 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3507 op_flags |= REQ_FUA;
3508 ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3509 if (ret)
3510 errors++;
3511 }
3512 return errors < i ? 0 : -1;
3513}
3514
3515/*
3516 * Wait for write completion of superblocks done by write_dev_supers,
3517 * @max_mirrors same for write and wait phases.
3518 *
3519 * Return number of errors when buffer head is not found or not marked up to
3520 * date.
3521 */
3522static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3523{
3524 struct buffer_head *bh;
3525 int i;
3526 int errors = 0;
3527 bool primary_failed = false;
3528 u64 bytenr;
3529
3530 if (max_mirrors == 0)
3531 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3532
3533 for (i = 0; i < max_mirrors; i++) {
3534 bytenr = btrfs_sb_offset(i);
3535 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3536 device->commit_total_bytes)
3537 break;
3538
3539 bh = __find_get_block(device->bdev,
3540 bytenr / BTRFS_BDEV_BLOCKSIZE,
3541 BTRFS_SUPER_INFO_SIZE);
3542 if (!bh) {
3543 errors++;
3544 if (i == 0)
3545 primary_failed = true;
3546 continue;
3547 }
3548 wait_on_buffer(bh);
3549 if (!buffer_uptodate(bh)) {
3550 errors++;
3551 if (i == 0)
3552 primary_failed = true;
3553 }
3554
3555 /* drop our reference */
3556 brelse(bh);
3557
3558 /* drop the reference from the writing run */
3559 brelse(bh);
3560 }
3561
3562 /* log error, force error return */
3563 if (primary_failed) {
3564 btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3565 device->devid);
3566 return -1;
3567 }
3568
3569 return errors < i ? 0 : -1;
3570}
3571
3572/*
3573 * endio for the write_dev_flush, this will wake anyone waiting
3574 * for the barrier when it is done
3575 */
3576static void btrfs_end_empty_barrier(struct bio *bio)
3577{
3578 complete(bio->bi_private);
3579}
3580
3581/*
3582 * Submit a flush request to the device if it supports it. Error handling is
3583 * done in the waiting counterpart.
3584 */
3585static void write_dev_flush(struct btrfs_device *device)
3586{
3587 struct request_queue *q = bdev_get_queue(device->bdev);
3588 struct bio *bio = device->flush_bio;
3589
3590 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3591 return;
3592
3593 bio_reset(bio);
3594 bio->bi_end_io = btrfs_end_empty_barrier;
3595 bio_set_dev(bio, device->bdev);
3596 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3597 init_completion(&device->flush_wait);
3598 bio->bi_private = &device->flush_wait;
3599
3600 btrfsic_submit_bio(bio);
3601 set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3602}
3603
3604/*
3605 * If the flush bio has been submitted by write_dev_flush, wait for it.
3606 */
3607static blk_status_t wait_dev_flush(struct btrfs_device *device)
3608{
3609 struct bio *bio = device->flush_bio;
3610
3611 if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3612 return BLK_STS_OK;
3613
3614 clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3615 wait_for_completion_io(&device->flush_wait);
3616
3617 return bio->bi_status;
3618}
3619
3620static int check_barrier_error(struct btrfs_fs_info *fs_info)
3621{
3622 if (!btrfs_check_rw_degradable(fs_info, NULL))
3623 return -EIO;
3624 return 0;
3625}
3626
3627/*
3628 * send an empty flush down to each device in parallel,
3629 * then wait for them
3630 */
3631static int barrier_all_devices(struct btrfs_fs_info *info)
3632{
3633 struct list_head *head;
3634 struct btrfs_device *dev;
3635 int errors_wait = 0;
3636 blk_status_t ret;
3637
3638 lockdep_assert_held(&info->fs_devices->device_list_mutex);
3639 /* send down all the barriers */
3640 head = &info->fs_devices->devices;
3641 list_for_each_entry(dev, head, dev_list) {
3642 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3643 continue;
3644 if (!dev->bdev)
3645 continue;
3646 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3647 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3648 continue;
3649
3650 write_dev_flush(dev);
3651 dev->last_flush_error = BLK_STS_OK;
3652 }
3653
3654 /* wait for all the barriers */
3655 list_for_each_entry(dev, head, dev_list) {
3656 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3657 continue;
3658 if (!dev->bdev) {
3659 errors_wait++;
3660 continue;
3661 }
3662 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3663 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3664 continue;
3665
3666 ret = wait_dev_flush(dev);
3667 if (ret) {
3668 dev->last_flush_error = ret;
3669 btrfs_dev_stat_inc_and_print(dev,
3670 BTRFS_DEV_STAT_FLUSH_ERRS);
3671 errors_wait++;
3672 }
3673 }
3674
3675 if (errors_wait) {
3676 /*
3677 * At some point we need the status of all disks
3678 * to arrive at the volume status. So error checking
3679 * is being pushed to a separate loop.
3680 */
3681 return check_barrier_error(info);
3682 }
3683 return 0;
3684}
3685
3686int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3687{
3688 int raid_type;
3689 int min_tolerated = INT_MAX;
3690
3691 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3692 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3693 min_tolerated = min(min_tolerated,
3694 btrfs_raid_array[BTRFS_RAID_SINGLE].
3695 tolerated_failures);
3696
3697 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3698 if (raid_type == BTRFS_RAID_SINGLE)
3699 continue;
3700 if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3701 continue;
3702 min_tolerated = min(min_tolerated,
3703 btrfs_raid_array[raid_type].
3704 tolerated_failures);
3705 }
3706
3707 if (min_tolerated == INT_MAX) {
3708 pr_warn("BTRFS: unknown raid flag: %llu", flags);
3709 min_tolerated = 0;
3710 }
3711
3712 return min_tolerated;
3713}
3714
3715int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3716{
3717 struct list_head *head;
3718 struct btrfs_device *dev;
3719 struct btrfs_super_block *sb;
3720 struct btrfs_dev_item *dev_item;
3721 int ret;
3722 int do_barriers;
3723 int max_errors;
3724 int total_errors = 0;
3725 u64 flags;
3726
3727 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3728
3729 /*
3730 * max_mirrors == 0 indicates we're from commit_transaction,
3731 * not from fsync where the tree roots in fs_info have not
3732 * been consistent on disk.
3733 */
3734 if (max_mirrors == 0)
3735 backup_super_roots(fs_info);
3736
3737 sb = fs_info->super_for_commit;
3738 dev_item = &sb->dev_item;
3739
3740 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3741 head = &fs_info->fs_devices->devices;
3742 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3743
3744 if (do_barriers) {
3745 ret = barrier_all_devices(fs_info);
3746 if (ret) {
3747 mutex_unlock(
3748 &fs_info->fs_devices->device_list_mutex);
3749 btrfs_handle_fs_error(fs_info, ret,
3750 "errors while submitting device barriers.");
3751 return ret;
3752 }
3753 }
3754
3755 list_for_each_entry(dev, head, dev_list) {
3756 if (!dev->bdev) {
3757 total_errors++;
3758 continue;
3759 }
3760 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3761 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3762 continue;
3763
3764 btrfs_set_stack_device_generation(dev_item, 0);
3765 btrfs_set_stack_device_type(dev_item, dev->type);
3766 btrfs_set_stack_device_id(dev_item, dev->devid);
3767 btrfs_set_stack_device_total_bytes(dev_item,
3768 dev->commit_total_bytes);
3769 btrfs_set_stack_device_bytes_used(dev_item,
3770 dev->commit_bytes_used);
3771 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3772 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3773 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3774 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3775 memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
3776 BTRFS_FSID_SIZE);
3777
3778 flags = btrfs_super_flags(sb);
3779 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3780
3781 ret = btrfs_validate_write_super(fs_info, sb);
3782 if (ret < 0) {
3783 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3784 btrfs_handle_fs_error(fs_info, -EUCLEAN,
3785 "unexpected superblock corruption detected");
3786 return -EUCLEAN;
3787 }
3788
3789 ret = write_dev_supers(dev, sb, max_mirrors);
3790 if (ret)
3791 total_errors++;
3792 }
3793 if (total_errors > max_errors) {
3794 btrfs_err(fs_info, "%d errors while writing supers",
3795 total_errors);
3796 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3797
3798 /* FUA is masked off if unsupported and can't be the reason */
3799 btrfs_handle_fs_error(fs_info, -EIO,
3800 "%d errors while writing supers",
3801 total_errors);
3802 return -EIO;
3803 }
3804
3805 total_errors = 0;
3806 list_for_each_entry(dev, head, dev_list) {
3807 if (!dev->bdev)
3808 continue;
3809 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3810 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3811 continue;
3812
3813 ret = wait_dev_supers(dev, max_mirrors);
3814 if (ret)
3815 total_errors++;
3816 }
3817 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3818 if (total_errors > max_errors) {
3819 btrfs_handle_fs_error(fs_info, -EIO,
3820 "%d errors while writing supers",
3821 total_errors);
3822 return -EIO;
3823 }
3824 return 0;
3825}
3826
3827/* Drop a fs root from the radix tree and free it. */
3828void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3829 struct btrfs_root *root)
3830{
3831 spin_lock(&fs_info->fs_roots_radix_lock);
3832 radix_tree_delete(&fs_info->fs_roots_radix,
3833 (unsigned long)root->root_key.objectid);
3834 spin_unlock(&fs_info->fs_roots_radix_lock);
3835
3836 if (btrfs_root_refs(&root->root_item) == 0)
3837 synchronize_srcu(&fs_info->subvol_srcu);
3838
3839 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3840 btrfs_free_log(NULL, root);
3841 if (root->reloc_root) {
3842 free_extent_buffer(root->reloc_root->node);
3843 free_extent_buffer(root->reloc_root->commit_root);
3844 btrfs_put_fs_root(root->reloc_root);
3845 root->reloc_root = NULL;
3846 }
3847 }
3848
3849 if (root->free_ino_pinned)
3850 __btrfs_remove_free_space_cache(root->free_ino_pinned);
3851 if (root->free_ino_ctl)
3852 __btrfs_remove_free_space_cache(root->free_ino_ctl);
3853 btrfs_free_fs_root(root);
3854}
3855
3856void btrfs_free_fs_root(struct btrfs_root *root)
3857{
3858 iput(root->ino_cache_inode);
3859 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3860 if (root->anon_dev)
3861 free_anon_bdev(root->anon_dev);
3862 if (root->subv_writers)
3863 btrfs_free_subvolume_writers(root->subv_writers);
3864 free_extent_buffer(root->node);
3865 free_extent_buffer(root->commit_root);
3866 kfree(root->free_ino_ctl);
3867 kfree(root->free_ino_pinned);
3868 btrfs_put_fs_root(root);
3869}
3870
3871int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3872{
3873 u64 root_objectid = 0;
3874 struct btrfs_root *gang[8];
3875 int i = 0;
3876 int err = 0;
3877 unsigned int ret = 0;
3878 int index;
3879
3880 while (1) {
3881 index = srcu_read_lock(&fs_info->subvol_srcu);
3882 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3883 (void **)gang, root_objectid,
3884 ARRAY_SIZE(gang));
3885 if (!ret) {
3886 srcu_read_unlock(&fs_info->subvol_srcu, index);
3887 break;
3888 }
3889 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3890
3891 for (i = 0; i < ret; i++) {
3892 /* Avoid to grab roots in dead_roots */
3893 if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3894 gang[i] = NULL;
3895 continue;
3896 }
3897 /* grab all the search result for later use */
3898 gang[i] = btrfs_grab_fs_root(gang[i]);
3899 }
3900 srcu_read_unlock(&fs_info->subvol_srcu, index);
3901
3902 for (i = 0; i < ret; i++) {
3903 if (!gang[i])
3904 continue;
3905 root_objectid = gang[i]->root_key.objectid;
3906 err = btrfs_orphan_cleanup(gang[i]);
3907 if (err)
3908 break;
3909 btrfs_put_fs_root(gang[i]);
3910 }
3911 root_objectid++;
3912 }
3913
3914 /* release the uncleaned roots due to error */
3915 for (; i < ret; i++) {
3916 if (gang[i])
3917 btrfs_put_fs_root(gang[i]);
3918 }
3919 return err;
3920}
3921
3922int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3923{
3924 struct btrfs_root *root = fs_info->tree_root;
3925 struct btrfs_trans_handle *trans;
3926
3927 mutex_lock(&fs_info->cleaner_mutex);
3928 btrfs_run_delayed_iputs(fs_info);
3929 mutex_unlock(&fs_info->cleaner_mutex);
3930 wake_up_process(fs_info->cleaner_kthread);
3931
3932 /* wait until ongoing cleanup work done */
3933 down_write(&fs_info->cleanup_work_sem);
3934 up_write(&fs_info->cleanup_work_sem);
3935
3936 trans = btrfs_join_transaction(root);
3937 if (IS_ERR(trans))
3938 return PTR_ERR(trans);
3939 return btrfs_commit_transaction(trans);
3940}
3941
3942void close_ctree(struct btrfs_fs_info *fs_info)
3943{
3944 int ret;
3945
3946 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3947 /*
3948 * We don't want the cleaner to start new transactions, add more delayed
3949 * iputs, etc. while we're closing. We can't use kthread_stop() yet
3950 * because that frees the task_struct, and the transaction kthread might
3951 * still try to wake up the cleaner.
3952 */
3953 kthread_park(fs_info->cleaner_kthread);
3954
3955 /* wait for the qgroup rescan worker to stop */
3956 btrfs_qgroup_wait_for_completion(fs_info, false);
3957
3958 /* wait for the uuid_scan task to finish */
3959 down(&fs_info->uuid_tree_rescan_sem);
3960 /* avoid complains from lockdep et al., set sem back to initial state */
3961 up(&fs_info->uuid_tree_rescan_sem);
3962
3963 /* pause restriper - we want to resume on mount */
3964 btrfs_pause_balance(fs_info);
3965
3966 btrfs_dev_replace_suspend_for_unmount(fs_info);
3967
3968 btrfs_scrub_cancel(fs_info);
3969
3970 /* wait for any defraggers to finish */
3971 wait_event(fs_info->transaction_wait,
3972 (atomic_read(&fs_info->defrag_running) == 0));
3973
3974 /* clear out the rbtree of defraggable inodes */
3975 btrfs_cleanup_defrag_inodes(fs_info);
3976
3977 cancel_work_sync(&fs_info->async_reclaim_work);
3978
3979 if (!sb_rdonly(fs_info->sb)) {
3980 /*
3981 * The cleaner kthread is stopped, so do one final pass over
3982 * unused block groups.
3983 */
3984 btrfs_delete_unused_bgs(fs_info);
3985
3986 ret = btrfs_commit_super(fs_info);
3987 if (ret)
3988 btrfs_err(fs_info, "commit super ret %d", ret);
3989 }
3990
3991 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
3992 test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
3993 btrfs_error_commit_super(fs_info);
3994
3995 kthread_stop(fs_info->transaction_kthread);
3996 kthread_stop(fs_info->cleaner_kthread);
3997
3998 ASSERT(list_empty(&fs_info->delayed_iputs));
3999 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4000
4001 btrfs_free_qgroup_config(fs_info);
4002 ASSERT(list_empty(&fs_info->delalloc_roots));
4003
4004 if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4005 btrfs_info(fs_info, "at unmount delalloc count %lld",
4006 percpu_counter_sum(&fs_info->delalloc_bytes));
4007 }
4008
4009 btrfs_sysfs_remove_mounted(fs_info);
4010 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4011
4012 btrfs_free_fs_roots(fs_info);
4013
4014 btrfs_put_block_group_cache(fs_info);
4015
4016 /*
4017 * we must make sure there is not any read request to
4018 * submit after we stopping all workers.
4019 */
4020 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4021 btrfs_stop_all_workers(fs_info);
4022
4023 btrfs_free_block_groups(fs_info);
4024
4025 clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4026 free_root_pointers(fs_info, 1);
4027
4028 iput(fs_info->btree_inode);
4029
4030#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4031 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
4032 btrfsic_unmount(fs_info->fs_devices);
4033#endif
4034
4035 btrfs_close_devices(fs_info->fs_devices);
4036 btrfs_mapping_tree_free(&fs_info->mapping_tree);
4037
4038 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
4039 percpu_counter_destroy(&fs_info->delalloc_bytes);
4040 percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
4041 cleanup_srcu_struct(&fs_info->subvol_srcu);
4042
4043 btrfs_free_stripe_hash_table(fs_info);
4044 btrfs_free_ref_cache(fs_info);
4045
4046 while (!list_empty(&fs_info->pinned_chunks)) {
4047 struct extent_map *em;
4048
4049 em = list_first_entry(&fs_info->pinned_chunks,
4050 struct extent_map, list);
4051 list_del_init(&em->list);
4052 free_extent_map(em);
4053 }
4054}
4055
4056int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4057 int atomic)
4058{
4059 int ret;
4060 struct inode *btree_inode = buf->pages[0]->mapping->host;
4061
4062 ret = extent_buffer_uptodate(buf);
4063 if (!ret)
4064 return ret;
4065
4066 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4067 parent_transid, atomic);
4068 if (ret == -EAGAIN)
4069 return ret;
4070 return !ret;
4071}
4072
4073void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4074{
4075 struct btrfs_fs_info *fs_info;
4076 struct btrfs_root *root;
4077 u64 transid = btrfs_header_generation(buf);
4078 int was_dirty;
4079
4080#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4081 /*
4082 * This is a fast path so only do this check if we have sanity tests
4083 * enabled. Normal people shouldn't be using unmapped buffers as dirty
4084 * outside of the sanity tests.
4085 */
4086 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4087 return;
4088#endif
4089 root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4090 fs_info = root->fs_info;
4091 btrfs_assert_tree_locked(buf);
4092 if (transid != fs_info->generation)
4093 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4094 buf->start, transid, fs_info->generation);
4095 was_dirty = set_extent_buffer_dirty(buf);
4096 if (!was_dirty)
4097 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4098 buf->len,
4099 fs_info->dirty_metadata_batch);
4100#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4101 /*
4102 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
4103 * but item data not updated.
4104 * So here we should only check item pointers, not item data.
4105 */
4106 if (btrfs_header_level(buf) == 0 &&
4107 btrfs_check_leaf_relaxed(fs_info, buf)) {
4108 btrfs_print_leaf(buf);
4109 ASSERT(0);
4110 }
4111#endif
4112}
4113
4114static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4115 int flush_delayed)
4116{
4117 /*
4118 * looks as though older kernels can get into trouble with
4119 * this code, they end up stuck in balance_dirty_pages forever
4120 */
4121 int ret;
4122
4123 if (current->flags & PF_MEMALLOC)
4124 return;
4125
4126 if (flush_delayed)
4127 btrfs_balance_delayed_items(fs_info);
4128
4129 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4130 BTRFS_DIRTY_METADATA_THRESH,
4131 fs_info->dirty_metadata_batch);
4132 if (ret > 0) {
4133 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4134 }
4135}
4136
4137void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4138{
4139 __btrfs_btree_balance_dirty(fs_info, 1);
4140}
4141
4142void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4143{
4144 __btrfs_btree_balance_dirty(fs_info, 0);
4145}
4146
4147int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
4148 struct btrfs_key *first_key)
4149{
4150 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4151 struct btrfs_fs_info *fs_info = root->fs_info;
4152
4153 return btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
4154 level, first_key);
4155}
4156
4157static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4158{
4159 /* cleanup FS via transaction */
4160 btrfs_cleanup_transaction(fs_info);
4161
4162 mutex_lock(&fs_info->cleaner_mutex);
4163 btrfs_run_delayed_iputs(fs_info);
4164 mutex_unlock(&fs_info->cleaner_mutex);
4165
4166 down_write(&fs_info->cleanup_work_sem);
4167 up_write(&fs_info->cleanup_work_sem);
4168}
4169
4170static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4171{
4172 struct btrfs_ordered_extent *ordered;
4173
4174 spin_lock(&root->ordered_extent_lock);
4175 /*
4176 * This will just short circuit the ordered completion stuff which will
4177 * make sure the ordered extent gets properly cleaned up.
4178 */
4179 list_for_each_entry(ordered, &root->ordered_extents,
4180 root_extent_list)
4181 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4182 spin_unlock(&root->ordered_extent_lock);
4183}
4184
4185static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4186{
4187 struct btrfs_root *root;
4188 struct list_head splice;
4189
4190 INIT_LIST_HEAD(&splice);
4191
4192 spin_lock(&fs_info->ordered_root_lock);
4193 list_splice_init(&fs_info->ordered_roots, &splice);
4194 while (!list_empty(&splice)) {
4195 root = list_first_entry(&splice, struct btrfs_root,
4196 ordered_root);
4197 list_move_tail(&root->ordered_root,
4198 &fs_info->ordered_roots);
4199
4200 spin_unlock(&fs_info->ordered_root_lock);
4201 btrfs_destroy_ordered_extents(root);
4202
4203 cond_resched();
4204 spin_lock(&fs_info->ordered_root_lock);
4205 }
4206 spin_unlock(&fs_info->ordered_root_lock);
4207
4208 /*
4209 * We need this here because if we've been flipped read-only we won't
4210 * get sync() from the umount, so we need to make sure any ordered
4211 * extents that haven't had their dirty pages IO start writeout yet
4212 * actually get run and error out properly.
4213 */
4214 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4215}
4216
4217static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4218 struct btrfs_fs_info *fs_info)
4219{
4220 struct rb_node *node;
4221 struct btrfs_delayed_ref_root *delayed_refs;
4222 struct btrfs_delayed_ref_node *ref;
4223 int ret = 0;
4224
4225 delayed_refs = &trans->delayed_refs;
4226
4227 spin_lock(&delayed_refs->lock);
4228 if (atomic_read(&delayed_refs->num_entries) == 0) {
4229 spin_unlock(&delayed_refs->lock);
4230 btrfs_info(fs_info, "delayed_refs has NO entry");
4231 return ret;
4232 }
4233
4234 while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4235 struct btrfs_delayed_ref_head *head;
4236 struct rb_node *n;
4237 bool pin_bytes = false;
4238
4239 head = rb_entry(node, struct btrfs_delayed_ref_head,
4240 href_node);
4241 if (!mutex_trylock(&head->mutex)) {
4242 refcount_inc(&head->refs);
4243 spin_unlock(&delayed_refs->lock);
4244
4245 mutex_lock(&head->mutex);
4246 mutex_unlock(&head->mutex);
4247 btrfs_put_delayed_ref_head(head);
4248 spin_lock(&delayed_refs->lock);
4249 continue;
4250 }
4251 spin_lock(&head->lock);
4252 while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4253 ref = rb_entry(n, struct btrfs_delayed_ref_node,
4254 ref_node);
4255 ref->in_tree = 0;
4256 rb_erase_cached(&ref->ref_node, &head->ref_tree);
4257 RB_CLEAR_NODE(&ref->ref_node);
4258 if (!list_empty(&ref->add_list))
4259 list_del(&ref->add_list);
4260 atomic_dec(&delayed_refs->num_entries);
4261 btrfs_put_delayed_ref(ref);
4262 }
4263 if (head->must_insert_reserved)
4264 pin_bytes = true;
4265 btrfs_free_delayed_extent_op(head->extent_op);
4266 delayed_refs->num_heads--;
4267 if (head->processing == 0)
4268 delayed_refs->num_heads_ready--;
4269 atomic_dec(&delayed_refs->num_entries);
4270 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
4271 RB_CLEAR_NODE(&head->href_node);
4272 spin_unlock(&head->lock);
4273 spin_unlock(&delayed_refs->lock);
4274 mutex_unlock(&head->mutex);
4275
4276 if (pin_bytes)
4277 btrfs_pin_extent(fs_info, head->bytenr,
4278 head->num_bytes, 1);
4279 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4280 btrfs_put_delayed_ref_head(head);
4281 cond_resched();
4282 spin_lock(&delayed_refs->lock);
4283 }
4284
4285 spin_unlock(&delayed_refs->lock);
4286
4287 return ret;
4288}
4289
4290static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4291{
4292 struct btrfs_inode *btrfs_inode;
4293 struct list_head splice;
4294
4295 INIT_LIST_HEAD(&splice);
4296
4297 spin_lock(&root->delalloc_lock);
4298 list_splice_init(&root->delalloc_inodes, &splice);
4299
4300 while (!list_empty(&splice)) {
4301 struct inode *inode = NULL;
4302 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4303 delalloc_inodes);
4304 __btrfs_del_delalloc_inode(root, btrfs_inode);
4305 spin_unlock(&root->delalloc_lock);
4306
4307 /*
4308 * Make sure we get a live inode and that it'll not disappear
4309 * meanwhile.
4310 */
4311 inode = igrab(&btrfs_inode->vfs_inode);
4312 if (inode) {
4313 invalidate_inode_pages2(inode->i_mapping);
4314 iput(inode);
4315 }
4316 spin_lock(&root->delalloc_lock);
4317 }
4318 spin_unlock(&root->delalloc_lock);
4319}
4320
4321static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4322{
4323 struct btrfs_root *root;
4324 struct list_head splice;
4325
4326 INIT_LIST_HEAD(&splice);
4327
4328 spin_lock(&fs_info->delalloc_root_lock);
4329 list_splice_init(&fs_info->delalloc_roots, &splice);
4330 while (!list_empty(&splice)) {
4331 root = list_first_entry(&splice, struct btrfs_root,
4332 delalloc_root);
4333 root = btrfs_grab_fs_root(root);
4334 BUG_ON(!root);
4335 spin_unlock(&fs_info->delalloc_root_lock);
4336
4337 btrfs_destroy_delalloc_inodes(root);
4338 btrfs_put_fs_root(root);
4339
4340 spin_lock(&fs_info->delalloc_root_lock);
4341 }
4342 spin_unlock(&fs_info->delalloc_root_lock);
4343}
4344
4345static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4346 struct extent_io_tree *dirty_pages,
4347 int mark)
4348{
4349 int ret;
4350 struct extent_buffer *eb;
4351 u64 start = 0;
4352 u64 end;
4353
4354 while (1) {
4355 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4356 mark, NULL);
4357 if (ret)
4358 break;
4359
4360 clear_extent_bits(dirty_pages, start, end, mark);
4361 while (start <= end) {
4362 eb = find_extent_buffer(fs_info, start);
4363 start += fs_info->nodesize;
4364 if (!eb)
4365 continue;
4366 wait_on_extent_buffer_writeback(eb);
4367
4368 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4369 &eb->bflags))
4370 clear_extent_buffer_dirty(eb);
4371 free_extent_buffer_stale(eb);
4372 }
4373 }
4374
4375 return ret;
4376}
4377
4378static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4379 struct extent_io_tree *pinned_extents)
4380{
4381 struct extent_io_tree *unpin;
4382 u64 start;
4383 u64 end;
4384 int ret;
4385 bool loop = true;
4386
4387 unpin = pinned_extents;
4388again:
4389 while (1) {
4390 struct extent_state *cached_state = NULL;
4391
4392 /*
4393 * The btrfs_finish_extent_commit() may get the same range as
4394 * ours between find_first_extent_bit and clear_extent_dirty.
4395 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4396 * the same extent range.
4397 */
4398 mutex_lock(&fs_info->unused_bg_unpin_mutex);
4399 ret = find_first_extent_bit(unpin, 0, &start, &end,
4400 EXTENT_DIRTY, &cached_state);
4401 if (ret) {
4402 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4403 break;
4404 }
4405
4406 clear_extent_dirty(unpin, start, end, &cached_state);
4407 free_extent_state(cached_state);
4408 btrfs_error_unpin_extent_range(fs_info, start, end);
4409 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4410 cond_resched();
4411 }
4412
4413 if (loop) {
4414 if (unpin == &fs_info->freed_extents[0])
4415 unpin = &fs_info->freed_extents[1];
4416 else
4417 unpin = &fs_info->freed_extents[0];
4418 loop = false;
4419 goto again;
4420 }
4421
4422 return 0;
4423}
4424
4425static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4426{
4427 struct inode *inode;
4428
4429 inode = cache->io_ctl.inode;
4430 if (inode) {
4431 invalidate_inode_pages2(inode->i_mapping);
4432 BTRFS_I(inode)->generation = 0;
4433 cache->io_ctl.inode = NULL;
4434 iput(inode);
4435 }
4436 btrfs_put_block_group(cache);
4437}
4438
4439void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4440 struct btrfs_fs_info *fs_info)
4441{
4442 struct btrfs_block_group_cache *cache;
4443
4444 spin_lock(&cur_trans->dirty_bgs_lock);
4445 while (!list_empty(&cur_trans->dirty_bgs)) {
4446 cache = list_first_entry(&cur_trans->dirty_bgs,
4447 struct btrfs_block_group_cache,
4448 dirty_list);
4449
4450 if (!list_empty(&cache->io_list)) {
4451 spin_unlock(&cur_trans->dirty_bgs_lock);
4452 list_del_init(&cache->io_list);
4453 btrfs_cleanup_bg_io(cache);
4454 spin_lock(&cur_trans->dirty_bgs_lock);
4455 }
4456
4457 list_del_init(&cache->dirty_list);
4458 spin_lock(&cache->lock);
4459 cache->disk_cache_state = BTRFS_DC_ERROR;
4460 spin_unlock(&cache->lock);
4461
4462 spin_unlock(&cur_trans->dirty_bgs_lock);
4463 btrfs_put_block_group(cache);
4464 btrfs_delayed_refs_rsv_release(fs_info, 1);
4465 spin_lock(&cur_trans->dirty_bgs_lock);
4466 }
4467 spin_unlock(&cur_trans->dirty_bgs_lock);
4468
4469 /*
4470 * Refer to the definition of io_bgs member for details why it's safe
4471 * to use it without any locking
4472 */
4473 while (!list_empty(&cur_trans->io_bgs)) {
4474 cache = list_first_entry(&cur_trans->io_bgs,
4475 struct btrfs_block_group_cache,
4476 io_list);
4477
4478 list_del_init(&cache->io_list);
4479 spin_lock(&cache->lock);
4480 cache->disk_cache_state = BTRFS_DC_ERROR;
4481 spin_unlock(&cache->lock);
4482 btrfs_cleanup_bg_io(cache);
4483 }
4484}
4485
4486void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4487 struct btrfs_fs_info *fs_info)
4488{
4489 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4490 ASSERT(list_empty(&cur_trans->dirty_bgs));
4491 ASSERT(list_empty(&cur_trans->io_bgs));
4492
4493 btrfs_destroy_delayed_refs(cur_trans, fs_info);
4494
4495 cur_trans->state = TRANS_STATE_COMMIT_START;
4496 wake_up(&fs_info->transaction_blocked_wait);
4497
4498 cur_trans->state = TRANS_STATE_UNBLOCKED;
4499 wake_up(&fs_info->transaction_wait);
4500
4501 btrfs_destroy_delayed_inodes(fs_info);
4502 btrfs_assert_delayed_root_empty(fs_info);
4503
4504 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4505 EXTENT_DIRTY);
4506 btrfs_destroy_pinned_extent(fs_info,
4507 fs_info->pinned_extents);
4508
4509 cur_trans->state =TRANS_STATE_COMPLETED;
4510 wake_up(&cur_trans->commit_wait);
4511}
4512
4513static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4514{
4515 struct btrfs_transaction *t;
4516
4517 mutex_lock(&fs_info->transaction_kthread_mutex);
4518
4519 spin_lock(&fs_info->trans_lock);
4520 while (!list_empty(&fs_info->trans_list)) {
4521 t = list_first_entry(&fs_info->trans_list,
4522 struct btrfs_transaction, list);
4523 if (t->state >= TRANS_STATE_COMMIT_START) {
4524 refcount_inc(&t->use_count);
4525 spin_unlock(&fs_info->trans_lock);
4526 btrfs_wait_for_commit(fs_info, t->transid);
4527 btrfs_put_transaction(t);
4528 spin_lock(&fs_info->trans_lock);
4529 continue;
4530 }
4531 if (t == fs_info->running_transaction) {
4532 t->state = TRANS_STATE_COMMIT_DOING;
4533 spin_unlock(&fs_info->trans_lock);
4534 /*
4535 * We wait for 0 num_writers since we don't hold a trans
4536 * handle open currently for this transaction.
4537 */
4538 wait_event(t->writer_wait,
4539 atomic_read(&t->num_writers) == 0);
4540 } else {
4541 spin_unlock(&fs_info->trans_lock);
4542 }
4543 btrfs_cleanup_one_transaction(t, fs_info);
4544
4545 spin_lock(&fs_info->trans_lock);
4546 if (t == fs_info->running_transaction)
4547 fs_info->running_transaction = NULL;
4548 list_del_init(&t->list);
4549 spin_unlock(&fs_info->trans_lock);
4550
4551 btrfs_put_transaction(t);
4552 trace_btrfs_transaction_commit(fs_info->tree_root);
4553 spin_lock(&fs_info->trans_lock);
4554 }
4555 spin_unlock(&fs_info->trans_lock);
4556 btrfs_destroy_all_ordered_extents(fs_info);
4557 btrfs_destroy_delayed_inodes(fs_info);
4558 btrfs_assert_delayed_root_empty(fs_info);
4559 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4560 btrfs_destroy_all_delalloc_inodes(fs_info);
4561 mutex_unlock(&fs_info->transaction_kthread_mutex);
4562
4563 return 0;
4564}
4565
4566static const struct extent_io_ops btree_extent_io_ops = {
4567 /* mandatory callbacks */
4568 .submit_bio_hook = btree_submit_bio_hook,
4569 .readpage_end_io_hook = btree_readpage_end_io_hook,
4570};