Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: convert bch_fs_flags to x-macro

Now we can print out filesystem flags in sysfs, useful for debugging
various "what's my filesystem doing" issues.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+139 -119
+32 -26
fs/bcachefs/bcachefs.h
··· 566 566 struct io_count __percpu *io_done; 567 567 }; 568 568 569 - enum { 570 - /* startup: */ 571 - BCH_FS_STARTED, 572 - BCH_FS_MAY_GO_RW, 573 - BCH_FS_RW, 574 - BCH_FS_WAS_RW, 569 + /* 570 + * fsck_done - kill? 571 + * 572 + * replace with something more general from enumated fsck passes/errors: 573 + * initial_gc_unfixed 574 + * error 575 + * topology error 576 + */ 575 577 576 - /* shutdown: */ 577 - BCH_FS_STOPPING, 578 - BCH_FS_EMERGENCY_RO, 579 - BCH_FS_GOING_RO, 580 - BCH_FS_WRITE_DISABLE_COMPLETE, 581 - BCH_FS_CLEAN_SHUTDOWN, 578 + #define BCH_FS_FLAGS() \ 579 + x(started) \ 580 + x(may_go_rw) \ 581 + x(rw) \ 582 + x(was_rw) \ 583 + x(stopping) \ 584 + x(emergency_ro) \ 585 + x(going_ro) \ 586 + x(write_disable_complete) \ 587 + x(clean_shutdown) \ 588 + x(fsck_done) \ 589 + x(initial_gc_unfixed) \ 590 + x(need_another_gc) \ 591 + x(need_delete_dead_snapshots) \ 592 + x(error) \ 593 + x(topology_error) \ 594 + x(errors_fixed) \ 595 + x(errors_not_fixed) 582 596 583 - /* fsck passes: */ 584 - BCH_FS_FSCK_DONE, 585 - BCH_FS_INITIAL_GC_UNFIXED, /* kill when we enumerate fsck errors */ 586 - BCH_FS_NEED_ANOTHER_GC, 587 - 588 - BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, 589 - 590 - /* errors: */ 591 - BCH_FS_ERROR, 592 - BCH_FS_TOPOLOGY_ERROR, 593 - BCH_FS_ERRORS_FIXED, 594 - BCH_FS_ERRORS_NOT_FIXED, 597 + enum bch_fs_flags { 598 + #define x(n) BCH_FS_##n, 599 + BCH_FS_FLAGS() 600 + #undef x 595 601 }; 596 602 597 603 struct btree_debug { ··· 1076 1070 static inline bool bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref) 1077 1071 { 1078 1072 #ifdef BCH_WRITE_REF_DEBUG 1079 - return !test_bit(BCH_FS_GOING_RO, &c->flags) && 1073 + return !test_bit(BCH_FS_going_ro, &c->flags) && 1080 1074 atomic_long_inc_not_zero(&c->writes[ref]); 1081 1075 #else 1082 1076 return percpu_ref_tryget_live(&c->writes); ··· 1095 1089 if (atomic_long_read(&c->writes[i])) 1096 1090 return; 1097 1091 1098 - set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); 1092 + set_bit(BCH_FS_write_disable_complete, &c->flags); 1099 1093 wake_up(&bch2_read_only_wait); 1100 1094 #else 1101 1095 percpu_ref_put(&c->writes);
+7 -7
fs/bcachefs/btree_gc.c
··· 108 108 ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology); 109 109 goto err; 110 110 } else { 111 - set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags); 111 + set_bit(BCH_FS_initial_gc_unfixed, &c->flags); 112 112 } 113 113 } 114 114 } ··· 134 134 ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology); 135 135 goto err; 136 136 } else { 137 - set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags); 137 + set_bit(BCH_FS_initial_gc_unfixed, &c->flags); 138 138 } 139 139 } 140 140 ··· 619 619 g->data_type = 0; 620 620 g->dirty_sectors = 0; 621 621 g->cached_sectors = 0; 622 - set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); 622 + set_bit(BCH_FS_need_another_gc, &c->flags); 623 623 } else { 624 624 do_update = true; 625 625 } ··· 664 664 bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) { 665 665 if (data_type == BCH_DATA_btree) { 666 666 g->data_type = data_type; 667 - set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); 667 + set_bit(BCH_FS_need_another_gc, &c->flags); 668 668 } else { 669 669 do_update = true; 670 670 } ··· 996 996 /* Continue marking when opted to not 997 997 * fix the error: */ 998 998 ret = 0; 999 - set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags); 999 + set_bit(BCH_FS_initial_gc_unfixed, &c->flags); 1000 1000 continue; 1001 1001 } 1002 1002 } else if (ret) { ··· 1845 1845 #endif 1846 1846 c->gc_count++; 1847 1847 1848 - if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) || 1848 + if (test_bit(BCH_FS_need_another_gc, &c->flags) || 1849 1849 (!iter && bch2_test_restart_gc)) { 1850 1850 if (iter++ > 2) { 1851 1851 bch_info(c, "Unable to fix bucket gens, looping"); ··· 1857 1857 * XXX: make sure gens we fixed got saved 1858 1858 */ 1859 1859 bch_info(c, "Second GC pass needed, restarting:"); 1860 - clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); 1860 + clear_bit(BCH_FS_need_another_gc, &c->flags); 1861 1861 __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING)); 1862 1862 1863 1863 bch2_gc_stripes_reset(c, metadata_only);
+2 -2
fs/bcachefs/btree_iter.c
··· 781 781 struct btree_node_iter node_iter = l->iter; 782 782 struct bkey_packed *k; 783 783 struct bkey_buf tmp; 784 - unsigned nr = test_bit(BCH_FS_STARTED, &c->flags) 784 + unsigned nr = test_bit(BCH_FS_started, &c->flags) 785 785 ? (path->level > 1 ? 0 : 2) 786 786 : (path->level > 1 ? 1 : 16); 787 787 bool was_locked = btree_node_locked(path, path->level); ··· 816 816 struct bch_fs *c = trans->c; 817 817 struct bkey_s_c k; 818 818 struct bkey_buf tmp; 819 - unsigned nr = test_bit(BCH_FS_STARTED, &c->flags) 819 + unsigned nr = test_bit(BCH_FS_started, &c->flags) 820 820 ? (path->level > 1 ? 0 : 2) 821 821 : (path->level > 1 ? 1 : 16); 822 822 bool was_locked = btree_node_locked(path, path->level);
+1 -1
fs/bcachefs/btree_journal_iter.c
··· 177 177 struct journal_keys *keys = &c->journal_keys; 178 178 size_t idx = bch2_journal_key_search(keys, id, level, k->k.p); 179 179 180 - BUG_ON(test_bit(BCH_FS_RW, &c->flags)); 180 + BUG_ON(test_bit(BCH_FS_rw, &c->flags)); 181 181 182 182 if (idx < keys->size && 183 183 journal_key_cmp(&n, &keys->d[idx]) == 0) {
+2 -2
fs/bcachefs/btree_key_cache.c
··· 778 778 ck->valid = true; 779 779 780 780 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { 781 - EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags)); 781 + EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags)); 782 782 set_bit(BKEY_CACHED_DIRTY, &ck->flags); 783 783 atomic_long_inc(&c->btree_key_cache.nr_dirty); 784 784 ··· 1005 1005 1006 1006 if (atomic_long_read(&bc->nr_dirty) && 1007 1007 !bch2_journal_error(&c->journal) && 1008 - test_bit(BCH_FS_WAS_RW, &c->flags)) 1008 + test_bit(BCH_FS_was_rw, &c->flags)) 1009 1009 panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n", 1010 1010 atomic_long_read(&bc->nr_dirty)); 1011 1011
+4 -4
fs/bcachefs/btree_trans_commit.c
··· 287 287 bch2_btree_add_journal_pin(c, b, journal_seq); 288 288 289 289 if (unlikely(!btree_node_dirty(b))) { 290 - EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags)); 290 + EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags)); 291 291 set_btree_node_dirty_acct(c, b); 292 292 } 293 293 ··· 995 995 int ret; 996 996 997 997 if (likely(!(flags & BCH_TRANS_COMMIT_lazy_rw)) || 998 - test_bit(BCH_FS_STARTED, &c->flags)) 998 + test_bit(BCH_FS_started, &c->flags)) 999 999 return -BCH_ERR_erofs_trans_commit; 1000 1000 1001 1001 ret = drop_locks_do(trans, bch2_fs_read_write_early(c)); ··· 1060 1060 return ret; 1061 1061 } 1062 1062 1063 - if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) { 1063 + if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) { 1064 1064 ret = do_bch2_trans_commit_to_journal_replay(trans); 1065 1065 goto out_reset; 1066 1066 } ··· 1086 1086 goto out; 1087 1087 } 1088 1088 1089 - EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags)); 1089 + EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags)); 1090 1090 1091 1091 trans->journal_u64s = trans->extra_journal_entries.nr; 1092 1092 trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names);
+2 -2
fs/bcachefs/btree_update_interior.c
··· 2080 2080 a->seq = b->data->keys.seq; 2081 2081 INIT_WORK(&a->work, async_btree_node_rewrite_work); 2082 2082 2083 - if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) { 2083 + if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) { 2084 2084 mutex_lock(&c->pending_node_rewrites_lock); 2085 2085 list_add(&a->list, &c->pending_node_rewrites); 2086 2086 mutex_unlock(&c->pending_node_rewrites_lock); ··· 2088 2088 } 2089 2089 2090 2090 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) { 2091 - if (test_bit(BCH_FS_STARTED, &c->flags)) { 2091 + if (test_bit(BCH_FS_started, &c->flags)) { 2092 2092 bch_err(c, "%s: error getting c->writes ref", __func__); 2093 2093 kfree(a); 2094 2094 return;
+4 -4
fs/bcachefs/chardev.c
··· 418 418 unsigned i; 419 419 int ret = 0; 420 420 421 - if (!test_bit(BCH_FS_STARTED, &c->flags)) 421 + if (!test_bit(BCH_FS_started, &c->flags)) 422 422 return -EINVAL; 423 423 424 424 if (get_user(replica_entries_bytes, &user_arg->replica_entries_bytes)) ··· 492 492 struct bch_dev *ca; 493 493 unsigned i; 494 494 495 - if (!test_bit(BCH_FS_STARTED, &c->flags)) 495 + if (!test_bit(BCH_FS_started, &c->flags)) 496 496 return -EINVAL; 497 497 498 498 if (copy_from_user(&arg, user_arg, sizeof(arg))) ··· 533 533 struct bch_dev *ca; 534 534 int ret = 0; 535 535 536 - if (!test_bit(BCH_FS_STARTED, &c->flags)) 536 + if (!test_bit(BCH_FS_started, &c->flags)) 537 537 return -EINVAL; 538 538 539 539 if (copy_from_user(&arg, user_arg, sizeof(arg))) ··· 725 725 BCH_IOCTL(disk_get_idx, struct bch_ioctl_disk_get_idx); 726 726 } 727 727 728 - if (!test_bit(BCH_FS_STARTED, &c->flags)) 728 + if (!test_bit(BCH_FS_started, &c->flags)) 729 729 return -EINVAL; 730 730 731 731 switch (cmd) {
+1 -1
fs/bcachefs/ec.c
··· 1415 1415 if (ret) 1416 1416 return ERR_PTR(ret); 1417 1417 1418 - if (test_bit(BCH_FS_GOING_RO, &c->flags)) { 1418 + if (test_bit(BCH_FS_going_ro, &c->flags)) { 1419 1419 h = ERR_PTR(-BCH_ERR_erofs_no_writes); 1420 1420 goto found; 1421 1421 }
+9 -9
fs/bcachefs/error.c
··· 7 7 8 8 bool bch2_inconsistent_error(struct bch_fs *c) 9 9 { 10 - set_bit(BCH_FS_ERROR, &c->flags); 10 + set_bit(BCH_FS_error, &c->flags); 11 11 12 12 switch (c->opts.errors) { 13 13 case BCH_ON_ERROR_continue: ··· 26 26 27 27 void bch2_topology_error(struct bch_fs *c) 28 28 { 29 - set_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags); 30 - if (test_bit(BCH_FS_FSCK_DONE, &c->flags)) 29 + set_bit(BCH_FS_topology_error, &c->flags); 30 + if (test_bit(BCH_FS_fsck_done, &c->flags)) 31 31 bch2_inconsistent_error(c); 32 32 } 33 33 ··· 114 114 { 115 115 struct fsck_err_state *s; 116 116 117 - if (test_bit(BCH_FS_FSCK_DONE, &c->flags)) 117 + if (test_bit(BCH_FS_fsck_done, &c->flags)) 118 118 return NULL; 119 119 120 120 list_for_each_entry(s, &c->fsck_error_msgs, list) ··· 196 196 prt_printf(out, bch2_log_msg(c, "")); 197 197 #endif 198 198 199 - if (test_bit(BCH_FS_FSCK_DONE, &c->flags)) { 199 + if (test_bit(BCH_FS_fsck_done, &c->flags)) { 200 200 if (c->opts.errors != BCH_ON_ERROR_continue || 201 201 !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) { 202 202 prt_str(out, ", shutting down"); ··· 256 256 if (print) 257 257 bch2_print_string_as_lines(KERN_ERR, out->buf); 258 258 259 - if (!test_bit(BCH_FS_FSCK_DONE, &c->flags) && 259 + if (!test_bit(BCH_FS_fsck_done, &c->flags) && 260 260 (ret != -BCH_ERR_fsck_fix && 261 261 ret != -BCH_ERR_fsck_ignore)) 262 262 bch_err(c, "Unable to continue, halting"); ··· 274 274 bch2_inconsistent_error(c); 275 275 276 276 if (ret == -BCH_ERR_fsck_fix) { 277 - set_bit(BCH_FS_ERRORS_FIXED, &c->flags); 277 + set_bit(BCH_FS_errors_fixed, &c->flags); 278 278 } else { 279 - set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags); 280 - set_bit(BCH_FS_ERROR, &c->flags); 279 + set_bit(BCH_FS_errors_not_fixed, &c->flags); 280 + set_bit(BCH_FS_error, &c->flags); 281 281 } 282 282 283 283 return ret;
+1 -1
fs/bcachefs/fs-io-buffered.c
··· 638 638 /* Check for writing past i_size: */ 639 639 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) > 640 640 round_up(i_size, block_bytes(c)) && 641 - !test_bit(BCH_FS_EMERGENCY_RO, &c->flags), 641 + !test_bit(BCH_FS_emergency_ro, &c->flags), 642 642 "writing past i_size: %llu > %llu (unrounded %llu)\n", 643 643 bio_end_sector(&w->io->op.wbio.bio) << 9, 644 644 round_up(i_size, block_bytes(c)),
+1 -1
fs/bcachefs/fs.c
··· 1770 1770 struct bch_fs *c = sb->s_fs_info; 1771 1771 int ret; 1772 1772 1773 - if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) 1773 + if (test_bit(BCH_FS_emergency_ro, &c->flags)) 1774 1774 return 0; 1775 1775 1776 1776 down_write(&c->state_lock);
+1 -1
fs/bcachefs/fsck.c
··· 448 448 bch2_btree_id_str(btree_id), 449 449 pos.inode, pos.offset, 450 450 i->id, n.id, n.equiv); 451 - set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags); 451 + set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags); 452 452 return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots); 453 453 } 454 454 }
+1 -1
fs/bcachefs/inode.c
··· 1182 1182 break; 1183 1183 1184 1184 if (ret) { 1185 - if (!test_bit(BCH_FS_RW, &c->flags)) { 1185 + if (!test_bit(BCH_FS_rw, &c->flags)) { 1186 1186 bch2_trans_unlock(trans); 1187 1187 bch2_fs_lazy_rw(c); 1188 1188 }
+1 -1
fs/bcachefs/journal_seq_blacklist.c
··· 267 267 268 268 while (!(ret = PTR_ERR_OR_ZERO(b)) && 269 269 b && 270 - !test_bit(BCH_FS_STOPPING, &c->flags)) 270 + !test_bit(BCH_FS_stopping, &c->flags)) 271 271 b = bch2_btree_iter_next_node(&iter); 272 272 273 273 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+17 -17
fs/bcachefs/recovery.c
··· 533 533 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr); 534 534 keys->gap = keys->nr; 535 535 536 - set_bit(BCH_FS_MAY_GO_RW, &c->flags); 536 + set_bit(BCH_FS_may_go_rw, &c->flags); 537 537 if (keys->nr) 538 538 return bch2_fs_read_write_early(c); 539 539 return 0; ··· 961 961 962 962 /* If we fixed errors, verify that fs is actually clean now: */ 963 963 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 964 - test_bit(BCH_FS_ERRORS_FIXED, &c->flags) && 965 - !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags) && 966 - !test_bit(BCH_FS_ERROR, &c->flags)) { 964 + test_bit(BCH_FS_errors_fixed, &c->flags) && 965 + !test_bit(BCH_FS_errors_not_fixed, &c->flags) && 966 + !test_bit(BCH_FS_error, &c->flags)) { 967 967 bch2_flush_fsck_errs(c); 968 968 969 969 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean"); 970 - clear_bit(BCH_FS_ERRORS_FIXED, &c->flags); 970 + clear_bit(BCH_FS_errors_fixed, &c->flags); 971 971 972 972 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; 973 973 ··· 975 975 if (ret) 976 976 goto err; 977 977 978 - if (test_bit(BCH_FS_ERRORS_FIXED, &c->flags) || 979 - test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) { 978 + if (test_bit(BCH_FS_errors_fixed, &c->flags) || 979 + test_bit(BCH_FS_errors_not_fixed, &c->flags)) { 980 980 bch_err(c, "Second fsck run was not clean"); 981 - set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags); 981 + set_bit(BCH_FS_errors_not_fixed, &c->flags); 982 982 } 983 983 984 - set_bit(BCH_FS_ERRORS_FIXED, &c->flags); 984 + set_bit(BCH_FS_errors_fixed, &c->flags); 985 985 } 986 986 987 987 if (enabled_qtypes(c)) { ··· 1000 1000 write_sb = true; 1001 1001 } 1002 1002 1003 - if (!test_bit(BCH_FS_ERROR, &c->flags) && 1003 + if (!test_bit(BCH_FS_error, &c->flags) && 1004 1004 !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) { 1005 1005 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info); 1006 1006 write_sb = true; 1007 1007 } 1008 1008 1009 - if (!test_bit(BCH_FS_ERROR, &c->flags)) { 1009 + if (!test_bit(BCH_FS_error, &c->flags)) { 1010 1010 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); 1011 1011 if (ext && 1012 1012 (!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) || ··· 1018 1018 } 1019 1019 1020 1020 if (c->opts.fsck && 1021 - !test_bit(BCH_FS_ERROR, &c->flags) && 1022 - !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) { 1021 + !test_bit(BCH_FS_error, &c->flags) && 1022 + !test_bit(BCH_FS_errors_not_fixed, &c->flags)) { 1023 1023 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0); 1024 1024 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0); 1025 1025 write_sb = true; ··· 1053 1053 1054 1054 ret = 0; 1055 1055 out: 1056 - set_bit(BCH_FS_FSCK_DONE, &c->flags); 1056 + set_bit(BCH_FS_fsck_done, &c->flags); 1057 1057 bch2_flush_fsck_errs(c); 1058 1058 1059 1059 if (!c->opts.keep_journal && ··· 1061 1061 bch2_journal_keys_put_initial(c); 1062 1062 kfree(clean); 1063 1063 1064 - if (!ret && test_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) { 1064 + if (!ret && test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) { 1065 1065 bch2_fs_read_write_early(c); 1066 1066 bch2_delete_dead_snapshots_async(c); 1067 1067 } ··· 1100 1100 mutex_unlock(&c->sb_lock); 1101 1101 1102 1102 c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns); 1103 - set_bit(BCH_FS_MAY_GO_RW, &c->flags); 1104 - set_bit(BCH_FS_FSCK_DONE, &c->flags); 1103 + set_bit(BCH_FS_may_go_rw, &c->flags); 1104 + set_bit(BCH_FS_fsck_done, &c->flags); 1105 1105 1106 1106 for (i = 0; i < BTREE_ID_NR; i++) 1107 1107 bch2_btree_root_alloc(c, i);
+4 -4
fs/bcachefs/snapshot.c
··· 318 318 __set_is_ancestor_bitmap(c, id); 319 319 320 320 if (BCH_SNAPSHOT_DELETED(s.v)) { 321 - set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags); 321 + set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags); 322 322 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots) 323 323 bch2_delete_dead_snapshots_async(c); 324 324 } ··· 1376 1376 u32 *i, id; 1377 1377 int ret = 0; 1378 1378 1379 - if (!test_and_clear_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) 1379 + if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) 1380 1380 return 0; 1381 1381 1382 - if (!test_bit(BCH_FS_STARTED, &c->flags)) { 1382 + if (!test_bit(BCH_FS_started, &c->flags)) { 1383 1383 ret = bch2_fs_read_write_early(c); 1384 1384 if (ret) { 1385 1385 bch_err_msg(c, ret, "deleting dead snapshots: error going rw"); ··· 1680 1680 if (BCH_SNAPSHOT_DELETED(snap.v) || 1681 1681 bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset || 1682 1682 (ret = bch2_snapshot_needs_delete(trans, k)) > 0) { 1683 - set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags); 1683 + set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags); 1684 1684 return 0; 1685 1685 } 1686 1686
+2 -2
fs/bcachefs/super-io.c
··· 950 950 951 951 le64_add_cpu(&c->disk_sb.sb->seq, 1); 952 952 953 - if (test_bit(BCH_FS_ERROR, &c->flags)) 953 + if (test_bit(BCH_FS_error, &c->flags)) 954 954 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 1); 955 - if (test_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags)) 955 + if (test_bit(BCH_FS_topology_error, &c->flags)) 956 956 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 1); 957 957 958 958 SET_BCH_SB_BIG_ENDIAN(c->disk_sb.sb, CPU_BIG_ENDIAN);
+35 -28
fs/bcachefs/super.c
··· 79 79 MODULE_SOFTDEP("pre: poly1305"); 80 80 MODULE_SOFTDEP("pre: xxhash"); 81 81 82 + const char * const bch2_fs_flag_strs[] = { 83 + #define x(n) #n, 84 + BCH_FS_FLAGS() 85 + #undef x 86 + NULL 87 + }; 88 + 82 89 #define KTYPE(type) \ 83 90 static const struct attribute_group type ## _group = { \ 84 91 .attrs = type ## _files \ ··· 253 246 journal_cur_seq(&c->journal)); 254 247 255 248 if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) && 256 - !test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) 257 - set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags); 249 + !test_bit(BCH_FS_emergency_ro, &c->flags)) 250 + set_bit(BCH_FS_clean_shutdown, &c->flags); 258 251 bch2_fs_journal_stop(&c->journal); 259 252 260 253 /* ··· 269 262 { 270 263 struct bch_fs *c = container_of(writes, struct bch_fs, writes); 271 264 272 - set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); 265 + set_bit(BCH_FS_write_disable_complete, &c->flags); 273 266 wake_up(&bch2_read_only_wait); 274 267 } 275 268 #endif 276 269 277 270 void bch2_fs_read_only(struct bch_fs *c) 278 271 { 279 - if (!test_bit(BCH_FS_RW, &c->flags)) { 272 + if (!test_bit(BCH_FS_rw, &c->flags)) { 280 273 bch2_journal_reclaim_stop(&c->journal); 281 274 return; 282 275 } 283 276 284 - BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags)); 277 + BUG_ON(test_bit(BCH_FS_write_disable_complete, &c->flags)); 285 278 286 279 bch_verbose(c, "going read-only"); 287 280 ··· 289 282 * Block new foreground-end write operations from starting - any new 290 283 * writes will return -EROFS: 291 284 */ 292 - set_bit(BCH_FS_GOING_RO, &c->flags); 285 + set_bit(BCH_FS_going_ro, &c->flags); 293 286 #ifndef BCH_WRITE_REF_DEBUG 294 287 percpu_ref_kill(&c->writes); 295 288 #else ··· 309 302 * that going RO is complete: 310 303 */ 311 304 wait_event(bch2_read_only_wait, 312 - test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) || 313 - test_bit(BCH_FS_EMERGENCY_RO, &c->flags)); 305 + test_bit(BCH_FS_write_disable_complete, &c->flags) || 306 + test_bit(BCH_FS_emergency_ro, &c->flags)); 314 307 315 - bool writes_disabled = test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); 308 + bool writes_disabled = test_bit(BCH_FS_write_disable_complete, &c->flags); 316 309 if (writes_disabled) 317 310 bch_verbose(c, "finished waiting for writes to stop"); 318 311 319 312 __bch2_fs_read_only(c); 320 313 321 314 wait_event(bch2_read_only_wait, 322 - test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags)); 315 + test_bit(BCH_FS_write_disable_complete, &c->flags)); 323 316 324 317 if (!writes_disabled) 325 318 bch_verbose(c, "finished waiting for writes to stop"); 326 319 327 - clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); 328 - clear_bit(BCH_FS_GOING_RO, &c->flags); 329 - clear_bit(BCH_FS_RW, &c->flags); 320 + clear_bit(BCH_FS_write_disable_complete, &c->flags); 321 + clear_bit(BCH_FS_going_ro, &c->flags); 322 + clear_bit(BCH_FS_rw, &c->flags); 330 323 331 324 if (!bch2_journal_error(&c->journal) && 332 - !test_bit(BCH_FS_ERROR, &c->flags) && 333 - !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) && 334 - test_bit(BCH_FS_STARTED, &c->flags) && 335 - test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) && 325 + !test_bit(BCH_FS_error, &c->flags) && 326 + !test_bit(BCH_FS_emergency_ro, &c->flags) && 327 + test_bit(BCH_FS_started, &c->flags) && 328 + test_bit(BCH_FS_clean_shutdown, &c->flags) && 336 329 !c->opts.norecovery) { 337 330 BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal)); 338 331 BUG_ON(atomic_read(&c->btree_cache.dirty)); ··· 363 356 364 357 bool bch2_fs_emergency_read_only(struct bch_fs *c) 365 358 { 366 - bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags); 359 + bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags); 367 360 368 361 bch2_journal_halt(&c->journal); 369 362 bch2_fs_read_only_async(c); ··· 404 397 unsigned i; 405 398 int ret; 406 399 407 - if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) { 400 + if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) { 408 401 bch_err(c, "cannot go rw, unfixed btree errors"); 409 402 return -BCH_ERR_erofs_unfixed_errors; 410 403 } 411 404 412 - if (test_bit(BCH_FS_RW, &c->flags)) 405 + if (test_bit(BCH_FS_rw, &c->flags)) 413 406 return 0; 414 407 415 408 if (c->opts.norecovery) ··· 432 425 if (ret) 433 426 goto err; 434 427 435 - clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags); 428 + clear_bit(BCH_FS_clean_shutdown, &c->flags); 436 429 437 430 /* 438 431 * First journal write must be a flush write: after a clean shutdown we ··· 446 439 bch2_dev_allocator_add(c, ca); 447 440 bch2_recalc_capacity(c); 448 441 449 - set_bit(BCH_FS_RW, &c->flags); 450 - set_bit(BCH_FS_WAS_RW, &c->flags); 442 + set_bit(BCH_FS_rw, &c->flags); 443 + set_bit(BCH_FS_was_rw, &c->flags); 451 444 452 445 #ifndef BCH_WRITE_REF_DEBUG 453 446 percpu_ref_reinit(&c->writes); ··· 480 473 bch2_do_pending_node_rewrites(c); 481 474 return 0; 482 475 err: 483 - if (test_bit(BCH_FS_RW, &c->flags)) 476 + if (test_bit(BCH_FS_rw, &c->flags)) 484 477 bch2_fs_read_only(c); 485 478 else 486 479 __bch2_fs_read_only(c); ··· 580 573 581 574 bch_verbose(c, "shutting down"); 582 575 583 - set_bit(BCH_FS_STOPPING, &c->flags); 576 + set_bit(BCH_FS_stopping, &c->flags); 584 577 585 578 cancel_work_sync(&c->journal_seq_blacklist_gc_work); 586 579 ··· 973 966 974 967 down_write(&c->state_lock); 975 968 976 - BUG_ON(test_bit(BCH_FS_STARTED, &c->flags)); 969 + BUG_ON(test_bit(BCH_FS_started, &c->flags)); 977 970 978 971 mutex_lock(&c->sb_lock); 979 972 ··· 1008 1001 goto err; 1009 1002 } 1010 1003 1011 - set_bit(BCH_FS_STARTED, &c->flags); 1004 + set_bit(BCH_FS_started, &c->flags); 1012 1005 1013 1006 if (c->opts.read_only || c->opts.nochanges) { 1014 1007 bch2_fs_read_only(c); 1015 1008 } else { 1016 - ret = !test_bit(BCH_FS_RW, &c->flags) 1009 + ret = !test_bit(BCH_FS_rw, &c->flags) 1017 1010 ? bch2_fs_read_write(c) 1018 1011 : bch2_fs_read_write_late(c); 1019 1012 if (ret)
+4 -2
fs/bcachefs/super.h
··· 8 8 9 9 #include <linux/math64.h> 10 10 11 + extern const char * const bch2_fs_flag_strs[]; 12 + 11 13 struct bch_fs *bch2_dev_to_fs(dev_t); 12 14 struct bch_fs *bch2_uuid_to_fs(__uuid_t); 13 15 ··· 39 37 */ 40 38 static inline void bch2_fs_lazy_rw(struct bch_fs *c) 41 39 { 42 - if (!test_bit(BCH_FS_RW, &c->flags) && 43 - !test_bit(BCH_FS_WAS_RW, &c->flags)) 40 + if (!test_bit(BCH_FS_rw, &c->flags) && 41 + !test_bit(BCH_FS_was_rw, &c->flags)) 44 42 bch2_fs_read_write_early(c); 45 43 } 46 44
+8 -3
fs/bcachefs/sysfs.c
··· 145 145 146 146 read_attribute(uuid); 147 147 read_attribute(minor); 148 + read_attribute(flags); 148 149 read_attribute(bucket_size); 149 150 read_attribute(first_bucket); 150 151 read_attribute(nbuckets); ··· 269 268 270 269 memset(s, 0, sizeof(s)); 271 270 272 - if (!test_bit(BCH_FS_STARTED, &c->flags)) 271 + if (!test_bit(BCH_FS_started, &c->flags)) 273 272 return -EPERM; 274 273 275 274 trans = bch2_trans_get(c); ··· 385 384 sysfs_print(minor, c->minor); 386 385 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b); 387 386 387 + if (attr == &sysfs_flags) 388 + prt_bitflags(out, bch2_fs_flag_strs, c->flags); 389 + 388 390 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c)); 389 391 390 392 if (attr == &sysfs_btree_write_stats) ··· 501 497 502 498 /* Debugging: */ 503 499 504 - if (!test_bit(BCH_FS_STARTED, &c->flags)) 500 + if (!test_bit(BCH_FS_started, &c->flags)) 505 501 return -EPERM; 506 502 507 503 /* Debugging: */ 508 504 509 - if (!test_bit(BCH_FS_RW, &c->flags)) 505 + if (!test_bit(BCH_FS_rw, &c->flags)) 510 506 return -EROFS; 511 507 512 508 if (attr == &sysfs_prune_cache) { ··· 638 634 SYSFS_OPS(bch2_fs_internal); 639 635 640 636 struct attribute *bch2_fs_internal_files[] = { 637 + &sysfs_flags, 641 638 &sysfs_journal_debug, 642 639 &sysfs_btree_updates, 643 640 &sysfs_btree_cache,