Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: move: move_stats refactoring

data_progress_list is gone - it was redundant with moving_context_list

The upcoming rebalance rewrite is going to have it using two different
move_stats objects with the same moving_context, depending on whether
it's scanning or using the rebalance_work btree - this patch plumbs
stats around a bit differently so that will work.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+83 -63
-3
fs/bcachefs/bcachefs.h
··· 939 939 struct list_head moving_context_list; 940 940 struct mutex moving_context_lock; 941 941 942 - struct list_head data_progress_list; 943 - struct mutex data_progress_lock; 944 - 945 942 /* REBALANCE */ 946 943 struct bch_fs_rebalance rebalance; 947 944
+1 -1
fs/bcachefs/data_update.c
··· 440 440 m->btree_id = btree_id; 441 441 m->data_opts = data_opts; 442 442 m->ctxt = ctxt; 443 - m->stats = ctxt->stats; 443 + m->stats = ctxt ? ctxt->stats : NULL; 444 444 445 445 bch2_write_op_init(&m->op, c, io_opts); 446 446 m->op.pos = bkey_start_pos(k.k);
+54 -46
fs/bcachefs/move.c
··· 60 60 } 61 61 } 62 62 63 - static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats) 64 - { 65 - mutex_lock(&c->data_progress_lock); 66 - list_add(&stats->list, &c->data_progress_list); 67 - mutex_unlock(&c->data_progress_lock); 68 - } 69 - 70 - static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats) 71 - { 72 - mutex_lock(&c->data_progress_lock); 73 - list_del(&stats->list); 74 - mutex_unlock(&c->data_progress_lock); 75 - } 76 - 77 63 struct moving_io { 78 64 struct list_head read_list; 79 65 struct list_head io_list; ··· 176 190 EBUG_ON(atomic_read(&ctxt->read_sectors)); 177 191 EBUG_ON(atomic_read(&ctxt->read_ios)); 178 192 179 - if (ctxt->stats) { 180 - progress_list_del(c, ctxt->stats); 181 - trace_move_data(c, 182 - atomic64_read(&ctxt->stats->sectors_moved), 183 - atomic64_read(&ctxt->stats->keys_moved)); 184 - } 185 - 186 193 mutex_lock(&c->moving_context_lock); 187 194 list_del(&ctxt->list); 188 195 mutex_unlock(&c->moving_context_lock); ··· 210 231 mutex_lock(&c->moving_context_lock); 211 232 list_add(&ctxt->list, &c->moving_context_list); 212 233 mutex_unlock(&c->moving_context_lock); 234 + } 213 235 214 - if (stats) { 215 - progress_list_add(c, stats); 216 - stats->data_type = BCH_DATA_user; 217 - } 236 + void bch2_move_stats_exit(struct bch_move_stats *stats, struct bch_fs *c) 237 + { 238 + trace_move_data(c, stats); 218 239 } 219 240 220 241 void bch2_move_stats_init(struct bch_move_stats *stats, char *name) 221 242 { 222 243 memset(stats, 0, sizeof(*stats)); 244 + stats->data_type = BCH_DATA_user; 223 245 scnprintf(stats->name, sizeof(stats->name), "%s", name); 224 246 } 225 247 ··· 283 303 unsigned sectors = k.k->size, pages; 284 304 int ret = -ENOMEM; 285 305 306 + if (ctxt->stats) 307 + ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos); 286 308 trace_move_extent2(c, k); 287 309 288 310 bch2_data_update_opts_normalize(k, &data_opts); ··· 860 878 { 861 879 bool kthread = (current->flags & PF_KTHREAD) != 0; 862 880 struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); 863 - struct btree_trans *trans = bch2_trans_get(c); 881 + struct moving_context ctxt; 882 + struct btree_trans *trans; 864 883 struct btree_iter iter; 865 884 struct btree *b; 866 885 enum btree_id id; 867 886 struct data_update_opts data_opts; 868 887 int ret = 0; 869 888 870 - progress_list_add(c, stats); 889 + bch2_moving_ctxt_init(&ctxt, c, NULL, stats, 890 + writepoint_ptr(&c->btree_write_point), 891 + true); 892 + trans = ctxt.trans; 871 893 872 894 stats->data_type = BCH_DATA_btree; 873 895 ··· 919 933 break; 920 934 } 921 935 922 - bch2_trans_put(trans); 923 - 924 - if (ret) 925 - bch_err_fn(c, ret); 926 - 936 + bch_err_fn(c, ret); 937 + bch2_moving_ctxt_exit(&ctxt); 927 938 bch2_btree_interior_updates_flush(c); 928 939 929 - progress_list_del(c, stats); 930 940 return ret; 931 941 } 932 942 ··· 1043 1061 mutex_unlock(&c->sb_lock); 1044 1062 } 1045 1063 1046 - if (ret) 1047 - bch_err_fn(c, ret); 1064 + bch_err_fn(c, ret); 1048 1065 return ret; 1049 1066 } 1050 1067 ··· 1074 1093 true, 1075 1094 rereplicate_pred, c) ?: ret; 1076 1095 ret = bch2_replicas_gc2(c) ?: ret; 1096 + 1097 + bch2_move_stats_exit(stats, c); 1077 1098 break; 1078 1099 case BCH_DATA_OP_MIGRATE: 1079 1100 if (op.migrate.dev >= c->sb.nr_devices) ··· 1100 1117 true, 1101 1118 migrate_pred, &op) ?: ret; 1102 1119 ret = bch2_replicas_gc2(c) ?: ret; 1120 + 1121 + bch2_move_stats_exit(stats, c); 1103 1122 break; 1104 1123 case BCH_DATA_OP_REWRITE_OLD_NODES: 1105 1124 bch2_move_stats_init(stats, "rewrite_old_nodes"); 1106 1125 ret = bch2_scan_old_btree_nodes(c, stats); 1126 + bch2_move_stats_exit(stats, c); 1107 1127 break; 1108 1128 default: 1109 1129 ret = -EINVAL; ··· 1115 1129 return ret; 1116 1130 } 1117 1131 1118 - static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt) 1132 + void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats) 1119 1133 { 1120 - struct bch_move_stats *stats = ctxt->stats; 1121 - struct moving_io *io; 1122 - 1123 - prt_printf(out, "%s (%ps):", stats->name, ctxt->fn); 1124 - prt_newline(out); 1125 - 1126 - prt_printf(out, " data type %s position: ", 1134 + prt_printf(out, "%s: data type=%s pos=", 1135 + stats->name, 1127 1136 bch2_data_types[stats->data_type]); 1128 1137 bch2_bbpos_to_text(out, stats->pos); 1129 1138 prt_newline(out); 1139 + printbuf_indent_add(out, 2); 1140 + 1141 + prt_str(out, "keys moved: "); 1142 + prt_u64(out, atomic64_read(&stats->keys_moved)); 1143 + prt_newline(out); 1144 + 1145 + prt_str(out, "keys raced: "); 1146 + prt_u64(out, atomic64_read(&stats->keys_raced)); 1147 + prt_newline(out); 1148 + 1149 + prt_str(out, "bytes seen: "); 1150 + prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9); 1151 + prt_newline(out); 1152 + 1153 + prt_str(out, "bytes moved: "); 1154 + prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9); 1155 + prt_newline(out); 1156 + 1157 + prt_str(out, "bytes raced: "); 1158 + prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9); 1159 + prt_newline(out); 1160 + 1161 + printbuf_indent_sub(out, 2); 1162 + } 1163 + 1164 + static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt) 1165 + { 1166 + struct moving_io *io; 1167 + 1168 + bch2_move_stats_to_text(out, ctxt->stats); 1130 1169 printbuf_indent_add(out, 2); 1131 1170 1132 1171 prt_printf(out, "reads: ios %u/%u sectors %u/%u", ··· 1192 1181 { 1193 1182 INIT_LIST_HEAD(&c->moving_context_list); 1194 1183 mutex_init(&c->moving_context_lock); 1195 - 1196 - INIT_LIST_HEAD(&c->data_progress_list); 1197 - mutex_init(&c->data_progress_lock); 1198 1184 }
+4 -1
fs/bcachefs/move.h
··· 127 127 struct bch_move_stats *, 128 128 struct bch_ioctl_data); 129 129 130 - void bch2_move_stats_init(struct bch_move_stats *stats, char *name); 130 + void bch2_move_stats_to_text(struct printbuf *, struct bch_move_stats *); 131 + void bch2_move_stats_exit(struct bch_move_stats *, struct bch_fs *); 132 + void bch2_move_stats_init(struct bch_move_stats *, char *); 133 + 131 134 void bch2_fs_moving_ctxts_to_text(struct printbuf *, struct bch_fs *); 132 135 133 136 void bch2_fs_move_init(struct bch_fs *);
+1 -2
fs/bcachefs/move_types.h
··· 7 7 struct bch_move_stats { 8 8 enum bch_data_type data_type; 9 9 struct bbpos pos; 10 - struct list_head list; 11 10 char name[32]; 12 11 13 12 atomic64_t keys_moved; 14 13 atomic64_t keys_raced; 15 - atomic64_t sectors_moved; 16 14 atomic64_t sectors_seen; 15 + atomic64_t sectors_moved; 17 16 atomic64_t sectors_raced; 18 17 }; 19 18
+1
fs/bcachefs/movinggc.c
··· 361 361 move_buckets_wait(&ctxt, &buckets, true); 362 362 rhashtable_destroy(&buckets.table); 363 363 bch2_moving_ctxt_exit(&ctxt); 364 + bch2_move_stats_exit(&move_stats, c); 364 365 365 366 return 0; 366 367 }
+1
fs/bcachefs/trace.c
··· 7 7 #include "btree_locking.h" 8 8 #include "btree_update_interior.h" 9 9 #include "keylist.h" 10 + #include "move_types.h" 10 11 #include "opts.h" 11 12 #include "six.h" 12 13
+21 -10
fs/bcachefs/trace.h
··· 767 767 ); 768 768 769 769 TRACE_EVENT(move_data, 770 - TP_PROTO(struct bch_fs *c, u64 sectors_moved, 771 - u64 keys_moved), 772 - TP_ARGS(c, sectors_moved, keys_moved), 770 + TP_PROTO(struct bch_fs *c, 771 + struct bch_move_stats *stats), 772 + TP_ARGS(c, stats), 773 773 774 774 TP_STRUCT__entry( 775 - __field(dev_t, dev ) 776 - __field(u64, sectors_moved ) 775 + __field(dev_t, dev ) 777 776 __field(u64, keys_moved ) 777 + __field(u64, keys_raced ) 778 + __field(u64, sectors_seen ) 779 + __field(u64, sectors_moved ) 780 + __field(u64, sectors_raced ) 778 781 ), 779 782 780 783 TP_fast_assign( 781 - __entry->dev = c->dev; 782 - __entry->sectors_moved = sectors_moved; 783 - __entry->keys_moved = keys_moved; 784 + __entry->dev = c->dev; 785 + __entry->keys_moved = atomic64_read(&stats->keys_moved); 786 + __entry->keys_raced = atomic64_read(&stats->keys_raced); 787 + __entry->sectors_seen = atomic64_read(&stats->sectors_seen); 788 + __entry->sectors_moved = atomic64_read(&stats->sectors_moved); 789 + __entry->sectors_raced = atomic64_read(&stats->sectors_raced); 784 790 ), 785 791 786 - TP_printk("%d,%d sectors_moved %llu keys_moved %llu", 792 + TP_printk("%d,%d keys moved %llu raced %llu" 793 + "sectors seen %llu moved %llu raced %llu", 787 794 MAJOR(__entry->dev), MINOR(__entry->dev), 788 - __entry->sectors_moved, __entry->keys_moved) 795 + __entry->keys_moved, 796 + __entry->keys_raced, 797 + __entry->sectors_seen, 798 + __entry->sectors_moved, 799 + __entry->sectors_raced) 789 800 ); 790 801 791 802 TRACE_EVENT(evacuate_bucket,