Merge tag 'bcachefs-2025-03-14' of git://evilpiepirate.org/bcachefs

Pull bcachefs hotfix from Kent Overstreet:
"This one is high priority: a user hit an assertion in the upgrade to
6.14, and we don't have a reproducer, so this changes the assertion to
an emergency read-only with more info so we can debug it"

* tag 'bcachefs-2025-03-14' of git://evilpiepirate.org/bcachefs:
bcachefs: Change btree wb assert to runtime error

+28 -1
+8
fs/bcachefs/btree_update.h
··· 126 127 int bch2_btree_insert_clone_trans(struct btree_trans *, enum btree_id, struct bkey_i *); 128 129 static inline int __must_check bch2_trans_update_buffered(struct btree_trans *trans, 130 enum btree_id btree, 131 struct bkey_i *k) 132 { 133 /* 134 * Most updates skip the btree write buffer until journal replay is 135 * finished because synchronization with journal replay relies on having
··· 126 127 int bch2_btree_insert_clone_trans(struct btree_trans *, enum btree_id, struct bkey_i *); 128 129 + int bch2_btree_write_buffer_insert_err(struct btree_trans *, 130 + enum btree_id, struct bkey_i *); 131 + 132 static inline int __must_check bch2_trans_update_buffered(struct btree_trans *trans, 133 enum btree_id btree, 134 struct bkey_i *k) 135 { 136 + if (unlikely(!btree_type_uses_write_buffer(btree))) { 137 + int ret = bch2_btree_write_buffer_insert_err(trans, btree, k); 138 + dump_stack(); 139 + return ret; 140 + } 141 /* 142 * Most updates skip the btree write buffer until journal replay is 143 * finished because synchronization with journal replay relies on having
+20 -1
fs/bcachefs/btree_write_buffer.c
··· 264 BUG_ON(wb->sorted.size < wb->flushing.keys.nr); 265 } 266 267 static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) 268 { 269 struct bch_fs *c = trans->c; ··· 328 darray_for_each(wb->sorted, i) { 329 struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx]; 330 331 - BUG_ON(!btree_type_uses_write_buffer(k->btree)); 332 333 for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++) 334 prefetch(&wb->flushing.keys.data[n->idx]);
··· 264 BUG_ON(wb->sorted.size < wb->flushing.keys.nr); 265 } 266 267 + int bch2_btree_write_buffer_insert_err(struct btree_trans *trans, 268 + enum btree_id btree, struct bkey_i *k) 269 + { 270 + struct bch_fs *c = trans->c; 271 + struct printbuf buf = PRINTBUF; 272 + 273 + prt_printf(&buf, "attempting to do write buffer update on non wb btree="); 274 + bch2_btree_id_to_text(&buf, btree); 275 + prt_str(&buf, "\n"); 276 + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); 277 + 278 + bch2_fs_inconsistent(c, "%s", buf.buf); 279 + printbuf_exit(&buf); 280 + return -EROFS; 281 + } 282 + 283 static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) 284 { 285 struct bch_fs *c = trans->c; ··· 312 darray_for_each(wb->sorted, i) { 313 struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx]; 314 315 + if (unlikely(!btree_type_uses_write_buffer(k->btree))) { 316 + ret = bch2_btree_write_buffer_insert_err(trans, k->btree, &k->k); 317 + goto err; 318 + } 319 320 for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++) 321 prefetch(&wb->flushing.keys.data[n->idx]);