Btrfs: forced readonly mounts on errors

This patch comes from "Forced readonly mounts on errors" ideas.

As we know, this is the first step in being more fault tolerant of disk
corruptions instead of just using BUG() statements.

The major content:
- add a framework for generating errors that should result in filesystems
going readonly.
- keep FS state in disk super block.
- make sure that all of resource will be freed and released at umount time.
- make sure that fter FS is forced readonly on error, there will be no more
disk change before FS is corrected. For this, we should stop write operation.

After this patch is applied, the conversion from BUG() to such a framework can
happen incrementally.

Signed-off-by: Liu Bo <liubo2009@cn.fujitsu.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>

authored by liubo and committed by Chris Mason acce952b 6f88a440

+523 -2
+24
fs/btrfs/ctree.h
··· 295 295 #define BTRFS_FSID_SIZE 16 296 296 #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) 297 297 #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) 298 + 299 + /* 300 + * File system states 301 + */ 302 + 303 + /* Errors detected */ 304 + #define BTRFS_SUPER_FLAG_ERROR (1ULL << 2) 305 + 298 306 #define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) 299 307 #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) 300 308 ··· 1066 1058 unsigned metadata_ratio; 1067 1059 1068 1060 void *bdev_holder; 1061 + 1062 + /* filesystem state */ 1063 + u64 fs_state; 1069 1064 }; 1070 1065 1071 1066 /* ··· 2214 2203 struct btrfs_block_group_cache *cache); 2215 2204 void btrfs_put_block_group_cache(struct btrfs_fs_info *info); 2216 2205 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 2206 + int btrfs_error_unpin_extent_range(struct btrfs_root *root, 2207 + u64 start, u64 end); 2208 + int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, 2209 + u64 num_bytes); 2210 + 2217 2211 /* ctree.c */ 2218 2212 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 2219 2213 int level, int *slot); ··· 2572 2556 /* super.c */ 2573 2557 int btrfs_parse_options(struct btrfs_root *root, char *options); 2574 2558 int btrfs_sync_fs(struct super_block *sb, int wait); 2559 + void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 2560 + unsigned int line, int errno); 2561 + 2562 + #define btrfs_std_error(fs_info, errno) \ 2563 + do { \ 2564 + if ((errno)) \ 2565 + __btrfs_std_error((fs_info), __func__, __LINE__, (errno));\ 2566 + } while (0) 2575 2567 2576 2568 /* acl.c */ 2577 2569 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
+389 -2
fs/btrfs/disk-io.c
··· 44 44 static struct extent_io_ops btree_extent_io_ops; 45 45 static void end_workqueue_fn(struct btrfs_work *work); 46 46 static void free_fs_root(struct btrfs_root *root); 47 + static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 48 + int read_only); 49 + static int btrfs_destroy_ordered_operations(struct btrfs_root *root); 50 + static int btrfs_destroy_ordered_extents(struct btrfs_root *root); 51 + static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 52 + struct btrfs_root *root); 53 + static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t); 54 + static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root); 55 + static int btrfs_destroy_marked_extents(struct btrfs_root *root, 56 + struct extent_io_tree *dirty_pages, 57 + int mark); 58 + static int btrfs_destroy_pinned_extent(struct btrfs_root *root, 59 + struct extent_io_tree *pinned_extents); 60 + static int btrfs_cleanup_transaction(struct btrfs_root *root); 47 61 48 62 /* 49 63 * end_io_wq structs are used to do processing in task context when an IO is ··· 1752 1738 if (!btrfs_super_root(disk_super)) 1753 1739 goto fail_iput; 1754 1740 1741 + /* check FS state, whether FS is broken. */ 1742 + fs_info->fs_state |= btrfs_super_flags(disk_super); 1743 + 1744 + btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); 1745 + 1755 1746 ret = btrfs_parse_options(tree_root, options); 1756 1747 if (ret) { 1757 1748 err = ret; ··· 1987 1968 btrfs_set_opt(fs_info->mount_opt, SSD); 1988 1969 } 1989 1970 1990 - if (btrfs_super_log_root(disk_super) != 0) { 1971 + /* do not make disk changes in broken FS */ 1972 + if (btrfs_super_log_root(disk_super) != 0 && 1973 + !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) { 1991 1974 u64 bytenr = btrfs_super_log_root(disk_super); 1992 1975 1993 1976 if (fs_devices->rw_devices == 0) { ··· 2485 2464 smp_mb(); 2486 2465 2487 2466 btrfs_put_block_group_cache(fs_info); 2467 + 2468 + /* 2469 + * Here come 2 situations when btrfs is broken to flip readonly: 2470 + * 2471 + * 1. when btrfs flips readonly somewhere else before 2472 + * btrfs_commit_super, sb->s_flags has MS_RDONLY flag, 2473 + * and btrfs will skip to write sb directly to keep 2474 + * ERROR state on disk. 2475 + * 2476 + * 2. when btrfs flips readonly just in btrfs_commit_super, 2477 + * and in such case, btrfs cannnot write sb via btrfs_commit_super, 2478 + * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag, 2479 + * btrfs will cleanup all FS resources first and write sb then. 2480 + */ 2488 2481 if (!(fs_info->sb->s_flags & MS_RDONLY)) { 2489 - ret = btrfs_commit_super(root); 2482 + ret = btrfs_commit_super(root); 2483 + if (ret) 2484 + printk(KERN_ERR "btrfs: commit super ret %d\n", ret); 2485 + } 2486 + 2487 + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 2488 + ret = btrfs_error_commit_super(root); 2490 2489 if (ret) 2491 2490 printk(KERN_ERR "btrfs: commit super ret %d\n", ret); 2492 2491 } ··· 2679 2638 free_extent_buffer(eb); 2680 2639 out: 2681 2640 lock_page(page); 2641 + return 0; 2642 + } 2643 + 2644 + static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 2645 + int read_only) 2646 + { 2647 + if (read_only) 2648 + return; 2649 + 2650 + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 2651 + printk(KERN_WARNING "warning: mount fs with errors, " 2652 + "running btrfsck is recommended\n"); 2653 + } 2654 + 2655 + int btrfs_error_commit_super(struct btrfs_root *root) 2656 + { 2657 + int ret; 2658 + 2659 + mutex_lock(&root->fs_info->cleaner_mutex); 2660 + btrfs_run_delayed_iputs(root); 2661 + mutex_unlock(&root->fs_info->cleaner_mutex); 2662 + 2663 + down_write(&root->fs_info->cleanup_work_sem); 2664 + up_write(&root->fs_info->cleanup_work_sem); 2665 + 2666 + /* cleanup FS via transaction */ 2667 + btrfs_cleanup_transaction(root); 2668 + 2669 + ret = write_ctree_super(NULL, root, 0); 2670 + 2671 + return ret; 2672 + } 2673 + 2674 + static int btrfs_destroy_ordered_operations(struct btrfs_root *root) 2675 + { 2676 + struct btrfs_inode *btrfs_inode; 2677 + struct list_head splice; 2678 + 2679 + INIT_LIST_HEAD(&splice); 2680 + 2681 + mutex_lock(&root->fs_info->ordered_operations_mutex); 2682 + spin_lock(&root->fs_info->ordered_extent_lock); 2683 + 2684 + list_splice_init(&root->fs_info->ordered_operations, &splice); 2685 + while (!list_empty(&splice)) { 2686 + btrfs_inode = list_entry(splice.next, struct btrfs_inode, 2687 + ordered_operations); 2688 + 2689 + list_del_init(&btrfs_inode->ordered_operations); 2690 + 2691 + btrfs_invalidate_inodes(btrfs_inode->root); 2692 + } 2693 + 2694 + spin_unlock(&root->fs_info->ordered_extent_lock); 2695 + mutex_unlock(&root->fs_info->ordered_operations_mutex); 2696 + 2697 + return 0; 2698 + } 2699 + 2700 + static int btrfs_destroy_ordered_extents(struct btrfs_root *root) 2701 + { 2702 + struct list_head splice; 2703 + struct btrfs_ordered_extent *ordered; 2704 + struct inode *inode; 2705 + 2706 + INIT_LIST_HEAD(&splice); 2707 + 2708 + spin_lock(&root->fs_info->ordered_extent_lock); 2709 + 2710 + list_splice_init(&root->fs_info->ordered_extents, &splice); 2711 + while (!list_empty(&splice)) { 2712 + ordered = list_entry(splice.next, struct btrfs_ordered_extent, 2713 + root_extent_list); 2714 + 2715 + list_del_init(&ordered->root_extent_list); 2716 + atomic_inc(&ordered->refs); 2717 + 2718 + /* the inode may be getting freed (in sys_unlink path). */ 2719 + inode = igrab(ordered->inode); 2720 + 2721 + spin_unlock(&root->fs_info->ordered_extent_lock); 2722 + if (inode) 2723 + iput(inode); 2724 + 2725 + atomic_set(&ordered->refs, 1); 2726 + btrfs_put_ordered_extent(ordered); 2727 + 2728 + spin_lock(&root->fs_info->ordered_extent_lock); 2729 + } 2730 + 2731 + spin_unlock(&root->fs_info->ordered_extent_lock); 2732 + 2733 + return 0; 2734 + } 2735 + 2736 + static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 2737 + struct btrfs_root *root) 2738 + { 2739 + struct rb_node *node; 2740 + struct btrfs_delayed_ref_root *delayed_refs; 2741 + struct btrfs_delayed_ref_node *ref; 2742 + int ret = 0; 2743 + 2744 + delayed_refs = &trans->delayed_refs; 2745 + 2746 + spin_lock(&delayed_refs->lock); 2747 + if (delayed_refs->num_entries == 0) { 2748 + printk(KERN_INFO "delayed_refs has NO entry\n"); 2749 + return ret; 2750 + } 2751 + 2752 + node = rb_first(&delayed_refs->root); 2753 + while (node) { 2754 + ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); 2755 + node = rb_next(node); 2756 + 2757 + ref->in_tree = 0; 2758 + rb_erase(&ref->rb_node, &delayed_refs->root); 2759 + delayed_refs->num_entries--; 2760 + 2761 + atomic_set(&ref->refs, 1); 2762 + if (btrfs_delayed_ref_is_head(ref)) { 2763 + struct btrfs_delayed_ref_head *head; 2764 + 2765 + head = btrfs_delayed_node_to_head(ref); 2766 + mutex_lock(&head->mutex); 2767 + kfree(head->extent_op); 2768 + delayed_refs->num_heads--; 2769 + if (list_empty(&head->cluster)) 2770 + delayed_refs->num_heads_ready--; 2771 + list_del_init(&head->cluster); 2772 + mutex_unlock(&head->mutex); 2773 + } 2774 + 2775 + spin_unlock(&delayed_refs->lock); 2776 + btrfs_put_delayed_ref(ref); 2777 + 2778 + cond_resched(); 2779 + spin_lock(&delayed_refs->lock); 2780 + } 2781 + 2782 + spin_unlock(&delayed_refs->lock); 2783 + 2784 + return ret; 2785 + } 2786 + 2787 + static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t) 2788 + { 2789 + struct btrfs_pending_snapshot *snapshot; 2790 + struct list_head splice; 2791 + 2792 + INIT_LIST_HEAD(&splice); 2793 + 2794 + list_splice_init(&t->pending_snapshots, &splice); 2795 + 2796 + while (!list_empty(&splice)) { 2797 + snapshot = list_entry(splice.next, 2798 + struct btrfs_pending_snapshot, 2799 + list); 2800 + 2801 + list_del_init(&snapshot->list); 2802 + 2803 + kfree(snapshot); 2804 + } 2805 + 2806 + return 0; 2807 + } 2808 + 2809 + static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) 2810 + { 2811 + struct btrfs_inode *btrfs_inode; 2812 + struct list_head splice; 2813 + 2814 + INIT_LIST_HEAD(&splice); 2815 + 2816 + list_splice_init(&root->fs_info->delalloc_inodes, &splice); 2817 + 2818 + spin_lock(&root->fs_info->delalloc_lock); 2819 + 2820 + while (!list_empty(&splice)) { 2821 + btrfs_inode = list_entry(splice.next, struct btrfs_inode, 2822 + delalloc_inodes); 2823 + 2824 + list_del_init(&btrfs_inode->delalloc_inodes); 2825 + 2826 + btrfs_invalidate_inodes(btrfs_inode->root); 2827 + } 2828 + 2829 + spin_unlock(&root->fs_info->delalloc_lock); 2830 + 2831 + return 0; 2832 + } 2833 + 2834 + static int btrfs_destroy_marked_extents(struct btrfs_root *root, 2835 + struct extent_io_tree *dirty_pages, 2836 + int mark) 2837 + { 2838 + int ret; 2839 + struct page *page; 2840 + struct inode *btree_inode = root->fs_info->btree_inode; 2841 + struct extent_buffer *eb; 2842 + u64 start = 0; 2843 + u64 end; 2844 + u64 offset; 2845 + unsigned long index; 2846 + 2847 + while (1) { 2848 + ret = find_first_extent_bit(dirty_pages, start, &start, &end, 2849 + mark); 2850 + if (ret) 2851 + break; 2852 + 2853 + clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); 2854 + while (start <= end) { 2855 + index = start >> PAGE_CACHE_SHIFT; 2856 + start = (u64)(index + 1) << PAGE_CACHE_SHIFT; 2857 + page = find_get_page(btree_inode->i_mapping, index); 2858 + if (!page) 2859 + continue; 2860 + offset = page_offset(page); 2861 + 2862 + spin_lock(&dirty_pages->buffer_lock); 2863 + eb = radix_tree_lookup( 2864 + &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, 2865 + offset >> PAGE_CACHE_SHIFT); 2866 + spin_unlock(&dirty_pages->buffer_lock); 2867 + if (eb) { 2868 + ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, 2869 + &eb->bflags); 2870 + atomic_set(&eb->refs, 1); 2871 + } 2872 + if (PageWriteback(page)) 2873 + end_page_writeback(page); 2874 + 2875 + lock_page(page); 2876 + if (PageDirty(page)) { 2877 + clear_page_dirty_for_io(page); 2878 + spin_lock_irq(&page->mapping->tree_lock); 2879 + radix_tree_tag_clear(&page->mapping->page_tree, 2880 + page_index(page), 2881 + PAGECACHE_TAG_DIRTY); 2882 + spin_unlock_irq(&page->mapping->tree_lock); 2883 + } 2884 + 2885 + page->mapping->a_ops->invalidatepage(page, 0); 2886 + unlock_page(page); 2887 + } 2888 + } 2889 + 2890 + return ret; 2891 + } 2892 + 2893 + static int btrfs_destroy_pinned_extent(struct btrfs_root *root, 2894 + struct extent_io_tree *pinned_extents) 2895 + { 2896 + struct extent_io_tree *unpin; 2897 + u64 start; 2898 + u64 end; 2899 + int ret; 2900 + 2901 + unpin = pinned_extents; 2902 + while (1) { 2903 + ret = find_first_extent_bit(unpin, 0, &start, &end, 2904 + EXTENT_DIRTY); 2905 + if (ret) 2906 + break; 2907 + 2908 + /* opt_discard */ 2909 + ret = btrfs_error_discard_extent(root, start, end + 1 - start); 2910 + 2911 + clear_extent_dirty(unpin, start, end, GFP_NOFS); 2912 + btrfs_error_unpin_extent_range(root, start, end); 2913 + cond_resched(); 2914 + } 2915 + 2916 + return 0; 2917 + } 2918 + 2919 + static int btrfs_cleanup_transaction(struct btrfs_root *root) 2920 + { 2921 + struct btrfs_transaction *t; 2922 + LIST_HEAD(list); 2923 + 2924 + WARN_ON(1); 2925 + 2926 + mutex_lock(&root->fs_info->trans_mutex); 2927 + mutex_lock(&root->fs_info->transaction_kthread_mutex); 2928 + 2929 + list_splice_init(&root->fs_info->trans_list, &list); 2930 + while (!list_empty(&list)) { 2931 + t = list_entry(list.next, struct btrfs_transaction, list); 2932 + if (!t) 2933 + break; 2934 + 2935 + btrfs_destroy_ordered_operations(root); 2936 + 2937 + btrfs_destroy_ordered_extents(root); 2938 + 2939 + btrfs_destroy_delayed_refs(t, root); 2940 + 2941 + btrfs_block_rsv_release(root, 2942 + &root->fs_info->trans_block_rsv, 2943 + t->dirty_pages.dirty_bytes); 2944 + 2945 + /* FIXME: cleanup wait for commit */ 2946 + t->in_commit = 1; 2947 + t->blocked = 1; 2948 + if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) 2949 + wake_up(&root->fs_info->transaction_blocked_wait); 2950 + 2951 + t->blocked = 0; 2952 + if (waitqueue_active(&root->fs_info->transaction_wait)) 2953 + wake_up(&root->fs_info->transaction_wait); 2954 + mutex_unlock(&root->fs_info->trans_mutex); 2955 + 2956 + mutex_lock(&root->fs_info->trans_mutex); 2957 + t->commit_done = 1; 2958 + if (waitqueue_active(&t->commit_wait)) 2959 + wake_up(&t->commit_wait); 2960 + mutex_unlock(&root->fs_info->trans_mutex); 2961 + 2962 + mutex_lock(&root->fs_info->trans_mutex); 2963 + 2964 + btrfs_destroy_pending_snapshots(t); 2965 + 2966 + btrfs_destroy_delalloc_inodes(root); 2967 + 2968 + spin_lock(&root->fs_info->new_trans_lock); 2969 + root->fs_info->running_transaction = NULL; 2970 + spin_unlock(&root->fs_info->new_trans_lock); 2971 + 2972 + btrfs_destroy_marked_extents(root, &t->dirty_pages, 2973 + EXTENT_DIRTY); 2974 + 2975 + btrfs_destroy_pinned_extent(root, 2976 + root->fs_info->pinned_extents); 2977 + 2978 + t->use_count = 0; 2979 + list_del_init(&t->list); 2980 + memset(t, 0, sizeof(*t)); 2981 + kmem_cache_free(btrfs_transaction_cachep, t); 2982 + } 2983 + 2984 + mutex_unlock(&root->fs_info->transaction_kthread_mutex); 2985 + mutex_unlock(&root->fs_info->trans_mutex); 2986 + 2682 2987 return 0; 2683 2988 } 2684 2989
+1
fs/btrfs/disk-io.h
··· 52 52 struct btrfs_root *root, int max_mirrors); 53 53 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); 54 54 int btrfs_commit_super(struct btrfs_root *root); 55 + int btrfs_error_commit_super(struct btrfs_root *root); 55 56 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, 56 57 u64 bytenr, u32 blocksize); 57 58 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+11
fs/btrfs/extent-tree.c
··· 8642 8642 btrfs_free_path(path); 8643 8643 return ret; 8644 8644 } 8645 + 8646 + int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 8647 + { 8648 + return unpin_extent_range(root, start, end); 8649 + } 8650 + 8651 + int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, 8652 + u64 num_bytes) 8653 + { 8654 + return btrfs_discard_extent(root, bytenr, num_bytes); 8655 + }
+11
fs/btrfs/file.c
··· 892 892 if (err) 893 893 goto out; 894 894 895 + /* 896 + * If BTRFS flips readonly due to some impossible error 897 + * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), 898 + * although we have opened a file as writable, we have 899 + * to stop this write operation to ensure FS consistency. 900 + */ 901 + if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 902 + err = -EROFS; 903 + goto out; 904 + } 905 + 895 906 file_update_time(file); 896 907 BTRFS_I(inode)->sequence++; 897 908
+84
fs/btrfs/super.c
··· 54 54 55 55 static const struct super_operations btrfs_super_ops; 56 56 57 + static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno, 58 + char nbuf[16]) 59 + { 60 + char *errstr = NULL; 61 + 62 + switch (errno) { 63 + case -EIO: 64 + errstr = "IO failure"; 65 + break; 66 + case -ENOMEM: 67 + errstr = "Out of memory"; 68 + break; 69 + case -EROFS: 70 + errstr = "Readonly filesystem"; 71 + break; 72 + default: 73 + if (nbuf) { 74 + if (snprintf(nbuf, 16, "error %d", -errno) >= 0) 75 + errstr = nbuf; 76 + } 77 + break; 78 + } 79 + 80 + return errstr; 81 + } 82 + 83 + static void __save_error_info(struct btrfs_fs_info *fs_info) 84 + { 85 + /* 86 + * today we only save the error info into ram. Long term we'll 87 + * also send it down to the disk 88 + */ 89 + fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR; 90 + } 91 + 92 + /* NOTE: 93 + * We move write_super stuff at umount in order to avoid deadlock 94 + * for umount hold all lock. 95 + */ 96 + static void save_error_info(struct btrfs_fs_info *fs_info) 97 + { 98 + __save_error_info(fs_info); 99 + } 100 + 101 + /* btrfs handle error by forcing the filesystem readonly */ 102 + static void btrfs_handle_error(struct btrfs_fs_info *fs_info) 103 + { 104 + struct super_block *sb = fs_info->sb; 105 + 106 + if (sb->s_flags & MS_RDONLY) 107 + return; 108 + 109 + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 110 + sb->s_flags |= MS_RDONLY; 111 + printk(KERN_INFO "btrfs is forced readonly\n"); 112 + } 113 + } 114 + 115 + /* 116 + * __btrfs_std_error decodes expected errors from the caller and 117 + * invokes the approciate error response. 118 + */ 119 + void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 120 + unsigned int line, int errno) 121 + { 122 + struct super_block *sb = fs_info->sb; 123 + char nbuf[16]; 124 + const char *errstr; 125 + 126 + /* 127 + * Special case: if the error is EROFS, and we're already 128 + * under MS_RDONLY, then it is safe here. 129 + */ 130 + if (errno == -EROFS && (sb->s_flags & MS_RDONLY)) 131 + return; 132 + 133 + errstr = btrfs_decode_error(fs_info, errno, nbuf); 134 + printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n", 135 + sb->s_id, function, line, errstr); 136 + save_error_info(fs_info); 137 + 138 + btrfs_handle_error(fs_info); 139 + } 140 + 57 141 static void btrfs_put_super(struct super_block *sb) 58 142 { 59 143 struct btrfs_root *root = btrfs_sb(sb);
+3
fs/btrfs/transaction.c
··· 181 181 struct btrfs_trans_handle *h; 182 182 struct btrfs_transaction *cur_trans; 183 183 int ret; 184 + 185 + if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 186 + return ERR_PTR(-EROFS); 184 187 again: 185 188 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 186 189 if (!h)