Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'bcachefs-2024-06-12' of https://evilpiepirate.org/git/bcachefs

Pull bcachefs fixes from Kent Overstreet:

- fix kworker explosion, due to calling submit_bio() (which can block)
from a multithreaded workqueue

- fix error handling in btree node scan

- forward compat fix: kill an old debug assert

- key cache shrinker fixes

This is a partial fix for stalls doing multithreaded creates - there
were various O(n^2) issues the key cache shrinker was hitting [1].

There's more work coming here; I'm working on a patch to delete the
key cache lock, which initial testing shows to be a pretty drastic
performance improvement

- assorted syzbot fixes

Link: https://lore.kernel.org/linux-bcachefs/CAGudoHGenxzk0ZqPXXi1_QDbfqQhGHu+wUwzyS6WmfkUZ1HiXA@mail.gmail.com/ [1]

* tag 'bcachefs-2024-06-12' of https://evilpiepirate.org/git/bcachefs:
bcachefs: Fix rcu_read_lock() leak in drop_extra_replicas
bcachefs: Add missing bch_inode_info.ei_flags init
bcachefs: Add missing synchronize_srcu_expedited() call when shutting down
bcachefs: Check for invalid bucket from bucket_gen(), gc_bucket()
bcachefs: Replace bucket_valid() asserts in bucket lookup with proper checks
bcachefs: Fix snapshot_create_lock lock ordering
bcachefs: Fix refcount leak in check_fix_ptrs()
bcachefs: Leave a buffer in the btree key cache to avoid lock thrashing
bcachefs: Fix reporting of freed objects from key cache shrinker
bcachefs: set sb->s_shrinker->seeks = 0
bcachefs: increase key cache shrinker batch size
bcachefs: Enable automatic shrinking for rhashtables
bcachefs: fix the display format for show-super
bcachefs: fix stack frame size in fsck.c
bcachefs: Delete incorrect BTREE_ID_NR assertion
bcachefs: Fix incorrect error handling found_btree_node_is_readable()
bcachefs: Split out btree_write_submit_wq

+352 -228
+20 -2
fs/bcachefs/alloc_background.c
··· 741 741 enum btree_iter_update_trigger_flags flags) 742 742 { 743 743 struct bch_fs *c = trans->c; 744 + struct printbuf buf = PRINTBUF; 744 745 int ret = 0; 745 746 746 747 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); ··· 861 860 } 862 861 863 862 percpu_down_read(&c->mark_lock); 864 - if (new_a->gen != old_a->gen) 865 - *bucket_gen(ca, new.k->p.offset) = new_a->gen; 863 + if (new_a->gen != old_a->gen) { 864 + u8 *gen = bucket_gen(ca, new.k->p.offset); 865 + if (unlikely(!gen)) { 866 + percpu_up_read(&c->mark_lock); 867 + goto invalid_bucket; 868 + } 869 + *gen = new_a->gen; 870 + } 866 871 867 872 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false); 868 873 percpu_up_read(&c->mark_lock); ··· 902 895 903 896 percpu_down_read(&c->mark_lock); 904 897 struct bucket *g = gc_bucket(ca, new.k->p.offset); 898 + if (unlikely(!g)) { 899 + percpu_up_read(&c->mark_lock); 900 + goto invalid_bucket; 901 + } 902 + g->gen_valid = 1; 905 903 906 904 bucket_lock(g); 907 905 ··· 922 910 percpu_up_read(&c->mark_lock); 923 911 } 924 912 err: 913 + printbuf_exit(&buf); 925 914 bch2_dev_put(ca); 926 915 return ret; 916 + invalid_bucket: 917 + bch2_fs_inconsistent(c, "reference to invalid bucket\n %s", 918 + (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf)); 919 + ret = -EIO; 920 + goto err; 927 921 } 928 922 929 923 /*
+2 -1
fs/bcachefs/bcachefs.h
··· 790 790 791 791 /* BTREE CACHE */ 792 792 struct bio_set btree_bio; 793 - struct workqueue_struct *io_complete_wq; 793 + struct workqueue_struct *btree_read_complete_wq; 794 + struct workqueue_struct *btree_write_submit_wq; 794 795 795 796 struct btree_root btree_roots_known[BTREE_ID_NR]; 796 797 DARRAY(struct btree_root) btree_roots_extra;
+5 -4
fs/bcachefs/btree_cache.c
··· 91 91 } 92 92 93 93 static const struct rhashtable_params bch_btree_cache_params = { 94 - .head_offset = offsetof(struct btree, hash), 95 - .key_offset = offsetof(struct btree, hash_val), 96 - .key_len = sizeof(u64), 97 - .obj_cmpfn = bch2_btree_cache_cmp_fn, 94 + .head_offset = offsetof(struct btree, hash), 95 + .key_offset = offsetof(struct btree, hash_val), 96 + .key_len = sizeof(u64), 97 + .obj_cmpfn = bch2_btree_cache_cmp_fn, 98 + .automatic_shrinking = true, 98 99 }; 99 100 100 101 static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
+12 -5
fs/bcachefs/btree_gc.c
··· 874 874 const struct bch_alloc_v4 *old; 875 875 int ret; 876 876 877 + if (!bucket_valid(ca, k.k->p.offset)) 878 + return 0; 879 + 877 880 old = bch2_alloc_to_v4(k, &old_convert); 878 881 gc = new = *old; 879 882 ··· 993 990 994 991 buckets->first_bucket = ca->mi.first_bucket; 995 992 buckets->nbuckets = ca->mi.nbuckets; 993 + buckets->nbuckets_minus_first = 994 + buckets->nbuckets - buckets->first_bucket; 996 995 rcu_assign_pointer(ca->buckets_gc, buckets); 997 996 } 998 997 ··· 1008 1003 continue; 1009 1004 } 1010 1005 1011 - struct bch_alloc_v4 a_convert; 1012 - const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); 1006 + if (bucket_valid(ca, k.k->p.offset)) { 1007 + struct bch_alloc_v4 a_convert; 1008 + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); 1013 1009 1014 - struct bucket *g = gc_bucket(ca, k.k->p.offset); 1015 - g->gen_valid = 1; 1016 - g->gen = a->gen; 1010 + struct bucket *g = gc_bucket(ca, k.k->p.offset); 1011 + g->gen_valid = 1; 1012 + g->gen = a->gen; 1013 + } 1017 1014 0; 1018 1015 }))); 1019 1016 bch2_dev_put(ca);
+4 -4
fs/bcachefs/btree_io.c
··· 1389 1389 bch2_latency_acct(ca, rb->start_time, READ); 1390 1390 } 1391 1391 1392 - queue_work(c->io_complete_wq, &rb->work); 1392 + queue_work(c->btree_read_complete_wq, &rb->work); 1393 1393 } 1394 1394 1395 1395 struct btree_node_read_all { ··· 1656 1656 btree_node_read_all_replicas_done(&ra->cl.work); 1657 1657 } else { 1658 1658 continue_at(&ra->cl, btree_node_read_all_replicas_done, 1659 - c->io_complete_wq); 1659 + c->btree_read_complete_wq); 1660 1660 } 1661 1661 1662 1662 return 0; ··· 1737 1737 if (sync) 1738 1738 btree_node_read_work(&rb->work); 1739 1739 else 1740 - queue_work(c->io_complete_wq, &rb->work); 1740 + queue_work(c->btree_read_complete_wq, &rb->work); 1741 1741 } 1742 1742 } 1743 1743 ··· 2229 2229 atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes); 2230 2230 2231 2231 INIT_WORK(&wbio->work, btree_write_submit); 2232 - queue_work(c->io_complete_wq, &wbio->work); 2232 + queue_work(c->btree_write_submit_wq, &wbio->work); 2233 2233 return; 2234 2234 err: 2235 2235 set_btree_node_noevict(b);
+4 -7
fs/bcachefs/btree_iter.c
··· 221 221 struct btree_path *path) 222 222 { 223 223 struct bch_fs *c = trans->c; 224 - unsigned i; 225 224 226 - EBUG_ON(path->btree_id >= BTREE_ID_NR); 227 - 228 - for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) { 225 + for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) { 229 226 if (!path->l[i].b) { 230 227 BUG_ON(!path->cached && 231 228 bch2_btree_id_root(c, path->btree_id)->b->c.level > i); ··· 247 250 static void bch2_btree_iter_verify(struct btree_iter *iter) 248 251 { 249 252 struct btree_trans *trans = iter->trans; 250 - 251 - BUG_ON(iter->btree_id >= BTREE_ID_NR); 252 253 253 254 BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached); 254 255 ··· 3401 3406 bch2_time_stats_exit(&s->lock_hold_times); 3402 3407 } 3403 3408 3404 - if (c->btree_trans_barrier_initialized) 3409 + if (c->btree_trans_barrier_initialized) { 3410 + synchronize_srcu_expedited(&c->btree_trans_barrier); 3405 3411 cleanup_srcu_struct(&c->btree_trans_barrier); 3412 + } 3406 3413 mempool_exit(&c->btree_trans_mem_pool); 3407 3414 mempool_exit(&c->btree_trans_pool); 3408 3415 }
+20 -13
fs/bcachefs/btree_key_cache.c
··· 32 32 } 33 33 34 34 static const struct rhashtable_params bch2_btree_key_cache_params = { 35 - .head_offset = offsetof(struct bkey_cached, hash), 36 - .key_offset = offsetof(struct bkey_cached, key), 37 - .key_len = sizeof(struct bkey_cached_key), 38 - .obj_cmpfn = bch2_btree_key_cache_cmp_fn, 35 + .head_offset = offsetof(struct bkey_cached, hash), 36 + .key_offset = offsetof(struct bkey_cached, key), 37 + .key_len = sizeof(struct bkey_cached_key), 38 + .obj_cmpfn = bch2_btree_key_cache_cmp_fn, 39 + .automatic_shrinking = true, 39 40 }; 40 41 41 42 __flatten ··· 841 840 six_lock_exit(&ck->c.lock); 842 841 kmem_cache_free(bch2_key_cache, ck); 843 842 atomic_long_dec(&bc->nr_freed); 844 - freed++; 845 843 bc->nr_freed_nonpcpu--; 846 844 bc->freed++; 847 845 } ··· 854 854 six_lock_exit(&ck->c.lock); 855 855 kmem_cache_free(bch2_key_cache, ck); 856 856 atomic_long_dec(&bc->nr_freed); 857 - freed++; 858 857 bc->nr_freed_pcpu--; 859 858 bc->freed++; 860 859 } ··· 875 876 876 877 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { 877 878 bc->skipped_dirty++; 878 - goto next; 879 879 } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) { 880 880 clear_bit(BKEY_CACHED_ACCESSED, &ck->flags); 881 881 bc->skipped_accessed++; 882 - goto next; 883 - } else if (bkey_cached_lock_for_evict(ck)) { 882 + } else if (!bkey_cached_lock_for_evict(ck)) { 883 + bc->skipped_lock_fail++; 884 + } else { 884 885 bkey_cached_evict(bc, ck); 885 886 bkey_cached_free(bc, ck); 886 887 bc->moved_to_freelist++; 887 - } else { 888 - bc->skipped_lock_fail++; 888 + freed++; 889 889 } 890 890 891 891 scanned++; 892 892 if (scanned >= nr) 893 893 break; 894 - next: 894 + 895 895 pos = next; 896 896 } 897 897 ··· 914 916 struct btree_key_cache *bc = &c->btree_key_cache; 915 917 long nr = atomic_long_read(&bc->nr_keys) - 916 918 atomic_long_read(&bc->nr_dirty); 919 + 920 + /* 921 + * Avoid hammering our shrinker too much if it's nearly empty - the 922 + * shrinker code doesn't take into account how big our cache is, if it's 923 + * mostly empty but the system is under memory pressure it causes nasty 924 + * lock contention: 925 + */ 926 + nr -= 128; 917 927 918 928 return max(0L, nr); 919 929 } ··· 1031 1025 if (!shrink) 1032 1026 return -BCH_ERR_ENOMEM_fs_btree_cache_init; 1033 1027 bc->shrink = shrink; 1034 - shrink->seeks = 0; 1035 1028 shrink->count_objects = bch2_btree_key_cache_count; 1036 1029 shrink->scan_objects = bch2_btree_key_cache_scan; 1030 + shrink->batch = 1 << 14; 1031 + shrink->seeks = 0; 1037 1032 shrink->private_data = c; 1038 1033 shrinker_register(shrink); 1039 1034 return 0;
+5 -4
fs/bcachefs/btree_node_scan.c
··· 72 72 73 73 struct btree *b = bch2_btree_node_get_noiter(trans, &k.k, f->btree_id, f->level, false); 74 74 bool ret = !IS_ERR_OR_NULL(b); 75 - if (ret) { 76 - f->sectors_written = b->written; 77 - six_unlock_read(&b->c.lock); 78 - } 75 + if (!ret) 76 + return ret; 77 + 78 + f->sectors_written = b->written; 79 + six_unlock_read(&b->c.lock); 79 80 80 81 /* 81 82 * We might update this node's range; if that happens, we need the node
+173 -134
fs/bcachefs/buckets.c
··· 465 465 return bch2_update_replicas_list(trans, &r.e, sectors); 466 466 } 467 467 468 + static int bch2_check_fix_ptr(struct btree_trans *trans, 469 + struct bkey_s_c k, 470 + struct extent_ptr_decoded p, 471 + const union bch_extent_entry *entry, 472 + bool *do_update) 473 + { 474 + struct bch_fs *c = trans->c; 475 + struct printbuf buf = PRINTBUF; 476 + int ret = 0; 477 + 478 + struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); 479 + if (!ca) { 480 + if (fsck_err(c, ptr_to_invalid_device, 481 + "pointer to missing device %u\n" 482 + "while marking %s", 483 + p.ptr.dev, 484 + (printbuf_reset(&buf), 485 + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 486 + *do_update = true; 487 + return 0; 488 + } 489 + 490 + struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); 491 + if (!g) { 492 + if (fsck_err(c, ptr_to_invalid_device, 493 + "pointer to invalid bucket on device %u\n" 494 + "while marking %s", 495 + p.ptr.dev, 496 + (printbuf_reset(&buf), 497 + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 498 + *do_update = true; 499 + goto out; 500 + } 501 + 502 + enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry); 503 + 504 + if (fsck_err_on(!g->gen_valid, 505 + c, ptr_to_missing_alloc_key, 506 + "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" 507 + "while marking %s", 508 + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 509 + bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 510 + p.ptr.gen, 511 + (printbuf_reset(&buf), 512 + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 513 + if (!p.ptr.cached) { 514 + g->gen_valid = true; 515 + g->gen = p.ptr.gen; 516 + } else { 517 + *do_update = true; 518 + } 519 + } 520 + 521 + if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, 522 + c, ptr_gen_newer_than_bucket_gen, 523 + "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" 524 + "while marking %s", 525 + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 526 + bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 527 + p.ptr.gen, g->gen, 528 + (printbuf_reset(&buf), 529 + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 530 + if (!p.ptr.cached && 531 + (g->data_type != BCH_DATA_btree || 532 + data_type == BCH_DATA_btree)) { 533 + g->gen_valid = true; 534 + g->gen = p.ptr.gen; 535 + g->data_type = 0; 536 + g->dirty_sectors = 0; 537 + g->cached_sectors = 0; 538 + } else { 539 + *do_update = true; 540 + } 541 + } 542 + 543 + if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, 544 + c, ptr_gen_newer_than_bucket_gen, 545 + "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" 546 + "while marking %s", 547 + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, 548 + bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 549 + p.ptr.gen, 550 + (printbuf_reset(&buf), 551 + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 552 + *do_update = true; 553 + 554 + if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0, 555 + c, stale_dirty_ptr, 556 + "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" 557 + "while marking %s", 558 + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 559 + bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 560 + p.ptr.gen, g->gen, 561 + (printbuf_reset(&buf), 562 + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 563 + *do_update = true; 564 + 565 + if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) 566 + goto out; 567 + 568 + if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type), 569 + c, ptr_bucket_data_type_mismatch, 570 + "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" 571 + "while marking %s", 572 + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, 573 + bch2_data_type_str(g->data_type), 574 + bch2_data_type_str(data_type), 575 + (printbuf_reset(&buf), 576 + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 577 + if (data_type == BCH_DATA_btree) { 578 + g->gen_valid = true; 579 + g->gen = p.ptr.gen; 580 + g->data_type = data_type; 581 + g->dirty_sectors = 0; 582 + g->cached_sectors = 0; 583 + } else { 584 + *do_update = true; 585 + } 586 + } 587 + 588 + if (p.has_ec) { 589 + struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx); 590 + 591 + if (fsck_err_on(!m || !m->alive, 592 + c, ptr_to_missing_stripe, 593 + "pointer to nonexistent stripe %llu\n" 594 + "while marking %s", 595 + (u64) p.ec.idx, 596 + (printbuf_reset(&buf), 597 + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 598 + *do_update = true; 599 + 600 + if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), 601 + c, ptr_to_incorrect_stripe, 602 + "pointer does not match stripe %llu\n" 603 + "while marking %s", 604 + (u64) p.ec.idx, 605 + (printbuf_reset(&buf), 606 + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 607 + *do_update = true; 608 + } 609 + out: 610 + fsck_err: 611 + bch2_dev_put(ca); 612 + printbuf_exit(&buf); 613 + return ret; 614 + } 615 + 468 616 int bch2_check_fix_ptrs(struct btree_trans *trans, 469 617 enum btree_id btree, unsigned level, struct bkey_s_c k, 470 618 enum btree_iter_update_trigger_flags flags) ··· 628 480 percpu_down_read(&c->mark_lock); 629 481 630 482 bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) { 631 - struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); 632 - if (!ca) { 633 - if (fsck_err(c, ptr_to_invalid_device, 634 - "pointer to missing device %u\n" 635 - "while marking %s", 636 - p.ptr.dev, 637 - (printbuf_reset(&buf), 638 - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 639 - do_update = true; 640 - continue; 641 - } 642 - 643 - struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); 644 - enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry_c); 645 - 646 - if (fsck_err_on(!g->gen_valid, 647 - c, ptr_to_missing_alloc_key, 648 - "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" 649 - "while marking %s", 650 - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 651 - bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 652 - p.ptr.gen, 653 - (printbuf_reset(&buf), 654 - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 655 - if (!p.ptr.cached) { 656 - g->gen_valid = true; 657 - g->gen = p.ptr.gen; 658 - } else { 659 - do_update = true; 660 - } 661 - } 662 - 663 - if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, 664 - c, ptr_gen_newer_than_bucket_gen, 665 - "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" 666 - "while marking %s", 667 - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 668 - bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 669 - p.ptr.gen, g->gen, 670 - (printbuf_reset(&buf), 671 - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 672 - if (!p.ptr.cached && 673 - (g->data_type != BCH_DATA_btree || 674 - data_type == BCH_DATA_btree)) { 675 - g->gen_valid = true; 676 - g->gen = p.ptr.gen; 677 - g->data_type = 0; 678 - g->dirty_sectors = 0; 679 - g->cached_sectors = 0; 680 - } else { 681 - do_update = true; 682 - } 683 - } 684 - 685 - if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, 686 - c, ptr_gen_newer_than_bucket_gen, 687 - "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" 688 - "while marking %s", 689 - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, 690 - bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 691 - p.ptr.gen, 692 - (printbuf_reset(&buf), 693 - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 694 - do_update = true; 695 - 696 - if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0, 697 - c, stale_dirty_ptr, 698 - "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" 699 - "while marking %s", 700 - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 701 - bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), 702 - p.ptr.gen, g->gen, 703 - (printbuf_reset(&buf), 704 - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 705 - do_update = true; 706 - 707 - if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) 708 - goto next; 709 - 710 - if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type), 711 - c, ptr_bucket_data_type_mismatch, 712 - "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" 713 - "while marking %s", 714 - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, 715 - bch2_data_type_str(g->data_type), 716 - bch2_data_type_str(data_type), 717 - (printbuf_reset(&buf), 718 - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 719 - if (data_type == BCH_DATA_btree) { 720 - g->gen_valid = true; 721 - g->gen = p.ptr.gen; 722 - g->data_type = data_type; 723 - g->dirty_sectors = 0; 724 - g->cached_sectors = 0; 725 - } else { 726 - do_update = true; 727 - } 728 - } 729 - 730 - if (p.has_ec) { 731 - struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx); 732 - 733 - if (fsck_err_on(!m || !m->alive, c, 734 - ptr_to_missing_stripe, 735 - "pointer to nonexistent stripe %llu\n" 736 - "while marking %s", 737 - (u64) p.ec.idx, 738 - (printbuf_reset(&buf), 739 - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 740 - do_update = true; 741 - 742 - if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c, 743 - ptr_to_incorrect_stripe, 744 - "pointer does not match stripe %llu\n" 745 - "while marking %s", 746 - (u64) p.ec.idx, 747 - (printbuf_reset(&buf), 748 - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 749 - do_update = true; 750 - } 751 - next: 752 - bch2_dev_put(ca); 483 + ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update); 484 + if (ret) 485 + goto err; 753 486 } 754 487 755 488 if (do_update) { ··· 745 716 bch2_btree_node_update_key_early(trans, btree, level - 1, k, new); 746 717 } 747 718 err: 748 - fsck_err: 749 719 percpu_up_read(&c->mark_lock); 750 720 printbuf_exit(&buf); 751 721 return ret; ··· 1015 987 enum btree_iter_update_trigger_flags flags) 1016 988 { 1017 989 bool insert = !(flags & BTREE_TRIGGER_overwrite); 990 + struct printbuf buf = PRINTBUF; 1018 991 int ret = 0; 1019 992 1020 993 struct bch_fs *c = trans->c; ··· 1048 1019 if (flags & BTREE_TRIGGER_gc) { 1049 1020 percpu_down_read(&c->mark_lock); 1050 1021 struct bucket *g = gc_bucket(ca, bucket.offset); 1022 + if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", 1023 + p.ptr.dev, 1024 + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 1025 + ret = -EIO; 1026 + goto err_unlock; 1027 + } 1028 + 1051 1029 bucket_lock(g); 1052 1030 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; 1053 1031 ret = __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &new); ··· 1063 1027 bch2_dev_usage_update(c, ca, &old, &new, 0, true); 1064 1028 } 1065 1029 bucket_unlock(g); 1030 + err_unlock: 1066 1031 percpu_up_read(&c->mark_lock); 1067 1032 } 1068 1033 err: 1069 1034 bch2_dev_put(ca); 1035 + printbuf_exit(&buf); 1070 1036 return ret; 1071 1037 } 1072 1038 ··· 1356 1318 u64 b, enum bch_data_type data_type, unsigned sectors, 1357 1319 enum btree_iter_update_trigger_flags flags) 1358 1320 { 1359 - int ret = 0; 1360 - 1361 1321 percpu_down_read(&c->mark_lock); 1362 1322 struct bucket *g = gc_bucket(ca, b); 1323 + if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s", 1324 + ca->dev_idx, bch2_data_type_str(data_type))) 1325 + goto err_unlock; 1363 1326 1364 1327 bucket_lock(g); 1365 1328 struct bch_alloc_v4 old = bucket_m_to_alloc(*g); ··· 1369 1330 g->data_type != data_type, c, 1370 1331 "different types of data in same bucket: %s, %s", 1371 1332 bch2_data_type_str(g->data_type), 1372 - bch2_data_type_str(data_type))) { 1373 - ret = -EIO; 1333 + bch2_data_type_str(data_type))) 1374 1334 goto err; 1375 - } 1376 1335 1377 1336 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c, 1378 1337 "bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size", 1379 1338 ca->dev_idx, b, g->gen, 1380 1339 bch2_data_type_str(g->data_type ?: data_type), 1381 - g->dirty_sectors, sectors)) { 1382 - ret = -EIO; 1340 + g->dirty_sectors, sectors)) 1383 1341 goto err; 1384 - } 1385 1342 1386 1343 g->data_type = data_type; 1387 1344 g->dirty_sectors += sectors; 1388 1345 struct bch_alloc_v4 new = bucket_m_to_alloc(*g); 1346 + bch2_dev_usage_update(c, ca, &old, &new, 0, true); 1347 + percpu_up_read(&c->mark_lock); 1348 + return 0; 1389 1349 err: 1390 1350 bucket_unlock(g); 1391 - if (!ret) 1392 - bch2_dev_usage_update(c, ca, &old, &new, 0, true); 1351 + err_unlock: 1393 1352 percpu_up_read(&c->mark_lock); 1394 - return ret; 1353 + return -EIO; 1395 1354 } 1396 1355 1397 1356 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans, ··· 1632 1595 1633 1596 bucket_gens->first_bucket = ca->mi.first_bucket; 1634 1597 bucket_gens->nbuckets = nbuckets; 1598 + bucket_gens->nbuckets_minus_first = 1599 + bucket_gens->nbuckets - bucket_gens->first_bucket; 1635 1600 1636 1601 if (resize) { 1637 1602 down_write(&c->gc_lock);
+11 -6
fs/bcachefs/buckets.h
··· 93 93 { 94 94 struct bucket_array *buckets = gc_bucket_array(ca); 95 95 96 - BUG_ON(!bucket_valid(ca, b)); 96 + if (b - buckets->first_bucket >= buckets->nbuckets_minus_first) 97 + return NULL; 97 98 return buckets->b + b; 98 99 } 99 100 ··· 111 110 { 112 111 struct bucket_gens *gens = bucket_gens(ca); 113 112 114 - BUG_ON(!bucket_valid(ca, b)); 113 + if (b - gens->first_bucket >= gens->nbuckets_minus_first) 114 + return NULL; 115 115 return gens->b + b; 116 116 } 117 117 ··· 172 170 return r > 0 ? r : 0; 173 171 } 174 172 175 - static inline u8 dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr) 173 + static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr) 176 174 { 177 - return gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen); 175 + u8 *gen = bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)); 176 + if (!gen) 177 + return -1; 178 + return gen_after(*gen, ptr->gen); 178 179 } 179 180 180 181 /** 181 182 * dev_ptr_stale() - check if a pointer points into a bucket that has been 182 183 * invalidated. 183 184 */ 184 - static inline u8 dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr) 185 + static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr) 185 186 { 186 187 rcu_read_lock(); 187 - u8 ret = dev_ptr_stale_rcu(ca, ptr); 188 + int ret = dev_ptr_stale_rcu(ca, ptr); 188 189 rcu_read_unlock(); 189 190 190 191 return ret;
+2
fs/bcachefs/buckets_types.h
··· 22 22 struct rcu_head rcu; 23 23 u16 first_bucket; 24 24 size_t nbuckets; 25 + size_t nbuckets_minus_first; 25 26 struct bucket b[]; 26 27 }; 27 28 ··· 30 29 struct rcu_head rcu; 31 30 u16 first_bucket; 32 31 size_t nbuckets; 32 + size_t nbuckets_minus_first; 33 33 u8 b[]; 34 34 }; 35 35
+1 -2
fs/bcachefs/data_update.c
··· 202 202 bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i)); 203 203 204 204 /* Now, drop excess replicas: */ 205 - restart_drop_extra_replicas: 206 - 207 205 rcu_read_lock(); 206 + restart_drop_extra_replicas: 208 207 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) { 209 208 unsigned ptr_durability = bch2_extent_ptr_durability(c, &p); 210 209
+20 -6
fs/bcachefs/ec.c
··· 268 268 { 269 269 struct bch_fs *c = trans->c; 270 270 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; 271 + struct printbuf buf = PRINTBUF; 271 272 int ret = 0; 272 273 273 274 struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev); ··· 290 289 if (flags & BTREE_TRIGGER_gc) { 291 290 percpu_down_read(&c->mark_lock); 292 291 struct bucket *g = gc_bucket(ca, bucket.offset); 292 + if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", 293 + ptr->dev, 294 + (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 295 + ret = -EIO; 296 + goto err_unlock; 297 + } 298 + 293 299 bucket_lock(g); 294 300 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; 295 301 ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags); ··· 305 297 bch2_dev_usage_update(c, ca, &old, &new, 0, true); 306 298 } 307 299 bucket_unlock(g); 300 + err_unlock: 308 301 percpu_up_read(&c->mark_lock); 309 302 } 310 303 err: 311 304 bch2_dev_put(ca); 305 + printbuf_exit(&buf); 312 306 return ret; 313 307 } 314 308 ··· 724 714 bch2_blk_status_to_str(bio->bi_status))) 725 715 clear_bit(ec_bio->idx, ec_bio->buf->valid); 726 716 727 - if (dev_ptr_stale(ca, ptr)) { 717 + int stale = dev_ptr_stale(ca, ptr); 718 + if (stale) { 728 719 bch_err_ratelimited(ca->fs, 729 - "error %s stripe: stale pointer after io", 730 - bio_data_dir(bio) == READ ? "reading from" : "writing to"); 720 + "error %s stripe: stale/invalid pointer (%i) after io", 721 + bio_data_dir(bio) == READ ? "reading from" : "writing to", 722 + stale); 731 723 clear_bit(ec_bio->idx, ec_bio->buf->valid); 732 724 } 733 725 ··· 755 743 return; 756 744 } 757 745 758 - if (dev_ptr_stale(ca, ptr)) { 746 + int stale = dev_ptr_stale(ca, ptr); 747 + if (stale) { 759 748 bch_err_ratelimited(c, 760 - "error %s stripe: stale pointer", 761 - rw == READ ? "reading from" : "writing to"); 749 + "error %s stripe: stale pointer (%i)", 750 + rw == READ ? "reading from" : "writing to", 751 + stale); 762 752 clear_bit(idx, buf->valid); 763 753 return; 764 754 }
+6 -3
fs/bcachefs/extents.c
··· 137 137 138 138 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev); 139 139 140 - if (p.ptr.cached && (!ca || dev_ptr_stale(ca, &p.ptr))) 140 + if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr))) 141 141 continue; 142 142 143 143 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL; ··· 999 999 bch2_bkey_drop_ptrs(k, ptr, 1000 1000 ptr->cached && 1001 1001 (ca = bch2_dev_rcu(c, ptr->dev)) && 1002 - dev_ptr_stale_rcu(ca, ptr)); 1002 + dev_ptr_stale_rcu(ca, ptr) > 0); 1003 1003 rcu_read_unlock(); 1004 1004 1005 1005 return bkey_deleted(k.k); ··· 1024 1024 prt_str(out, " cached"); 1025 1025 if (ptr->unwritten) 1026 1026 prt_str(out, " unwritten"); 1027 - if (bucket_valid(ca, b) && dev_ptr_stale_rcu(ca, ptr)) 1027 + int stale = dev_ptr_stale_rcu(ca, ptr); 1028 + if (stale > 0) 1028 1029 prt_printf(out, " stale"); 1030 + else if (stale) 1031 + prt_printf(out, " invalid"); 1029 1032 } 1030 1033 rcu_read_unlock(); 1031 1034 --out->atomic;
+5 -12
fs/bcachefs/fs-ioctl.c
··· 308 308 return ret; 309 309 } 310 310 311 - static long __bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, 312 - struct bch_ioctl_subvolume arg) 311 + static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, 312 + struct bch_ioctl_subvolume arg) 313 313 { 314 314 struct inode *dir; 315 315 struct bch_inode_info *inode; ··· 406 406 !arg.src_ptr) 407 407 snapshot_src.subvol = inode_inum(to_bch_ei(dir)).subvol; 408 408 409 + down_write(&c->snapshot_create_lock); 409 410 inode = __bch2_create(file_mnt_idmap(filp), to_bch_ei(dir), 410 411 dst_dentry, arg.mode|S_IFDIR, 411 412 0, snapshot_src, create_flags); 413 + up_write(&c->snapshot_create_lock); 414 + 412 415 error = PTR_ERR_OR_ZERO(inode); 413 416 if (error) 414 417 goto err3; ··· 430 427 } 431 428 err1: 432 429 return error; 433 - } 434 - 435 - static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, 436 - struct bch_ioctl_subvolume arg) 437 - { 438 - down_write(&c->snapshot_create_lock); 439 - long ret = __bch2_ioctl_subvolume_create(c, filp, arg); 440 - up_write(&c->snapshot_create_lock); 441 - 442 - return ret; 443 430 } 444 431 445 432 static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
+3
fs/bcachefs/fs.c
··· 227 227 mutex_init(&inode->ei_update_lock); 228 228 two_state_lock_init(&inode->ei_pagecache_lock); 229 229 INIT_LIST_HEAD(&inode->ei_vfs_inode_list); 230 + inode->ei_flags = 0; 230 231 mutex_init(&inode->ei_quota_lock); 232 + memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush)); 231 233 inode->v.i_state = 0; 232 234 233 235 if (unlikely(inode_init_always(c->vfs_sb, &inode->v))) { ··· 1969 1967 sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1; 1970 1968 sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec); 1971 1969 sb->s_uuid = c->sb.user_uuid; 1970 + sb->s_shrink->seeks = 0; 1972 1971 c->vfs_sb = sb; 1973 1972 strscpy(sb->s_id, c->name, sizeof(sb->s_id)); 1974 1973
+3
fs/bcachefs/fsck.c
··· 1677 1677 trans_was_restarted(trans, restart_count); 1678 1678 } 1679 1679 1680 + noinline_for_stack 1680 1681 static int check_dirent_inode_dirent(struct btree_trans *trans, 1681 1682 struct btree_iter *iter, 1682 1683 struct bkey_s_c_dirent d, ··· 1774 1773 return ret; 1775 1774 } 1776 1775 1776 + noinline_for_stack 1777 1777 static int check_dirent_target(struct btree_trans *trans, 1778 1778 struct btree_iter *iter, 1779 1779 struct bkey_s_c_dirent d, ··· 1849 1847 return ret; 1850 1848 } 1851 1849 1850 + noinline_for_stack 1852 1851 static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter, 1853 1852 struct bkey_s_c_dirent d) 1854 1853 {
+27 -12
fs/bcachefs/io_read.c
··· 84 84 }; 85 85 86 86 static const struct rhashtable_params bch_promote_params = { 87 - .head_offset = offsetof(struct promote_op, hash), 88 - .key_offset = offsetof(struct promote_op, pos), 89 - .key_len = sizeof(struct bpos), 87 + .head_offset = offsetof(struct promote_op, hash), 88 + .key_offset = offsetof(struct promote_op, pos), 89 + .key_len = sizeof(struct bpos), 90 + .automatic_shrinking = true, 90 91 }; 91 92 92 93 static inline int should_promote(struct bch_fs *c, struct bkey_s_c k, ··· 777 776 PTR_BUCKET_POS(ca, &ptr), 778 777 BTREE_ITER_cached); 779 778 780 - prt_printf(&buf, "Attempting to read from stale dirty pointer:\n"); 781 - printbuf_indent_add(&buf, 2); 779 + u8 *gen = bucket_gen(ca, iter.pos.offset); 780 + if (gen) { 782 781 783 - bch2_bkey_val_to_text(&buf, c, k); 784 - prt_newline(&buf); 782 + prt_printf(&buf, "Attempting to read from stale dirty pointer:\n"); 783 + printbuf_indent_add(&buf, 2); 785 784 786 - prt_printf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset)); 787 - 788 - ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); 789 - if (!ret) { 790 - prt_newline(&buf); 791 785 bch2_bkey_val_to_text(&buf, c, k); 786 + prt_newline(&buf); 787 + 788 + prt_printf(&buf, "memory gen: %u", *gen); 789 + 790 + ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); 791 + if (!ret) { 792 + prt_newline(&buf); 793 + bch2_bkey_val_to_text(&buf, c, k); 794 + } 795 + } else { 796 + prt_printf(&buf, "Attempting to read from invalid bucket %llu:%llu:\n", 797 + iter.pos.inode, iter.pos.offset); 798 + printbuf_indent_add(&buf, 2); 799 + 800 + prt_printf(&buf, "first bucket %u nbuckets %llu\n", 801 + ca->mi.first_bucket, ca->mi.nbuckets); 802 + 803 + bch2_bkey_val_to_text(&buf, c, k); 804 + prt_newline(&buf); 792 805 } 793 806 794 807 bch2_fs_inconsistent(c, "%s", buf.buf);
+15 -4
fs/bcachefs/io_write.c
··· 1220 1220 DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets; 1221 1221 u32 snapshot; 1222 1222 struct bucket_to_lock *stale_at; 1223 - int ret; 1223 + int stale, ret; 1224 1224 1225 1225 if (op->flags & BCH_WRITE_MOVE) 1226 1226 return; ··· 1299 1299 BUCKET_NOCOW_LOCK_UPDATE); 1300 1300 1301 1301 rcu_read_lock(); 1302 - bool stale = gen_after(*bucket_gen(ca, i->b.offset), i->gen); 1302 + u8 *gen = bucket_gen(ca, i->b.offset); 1303 + stale = !gen ? -1 : gen_after(*gen, i->gen); 1303 1304 rcu_read_unlock(); 1304 1305 1305 1306 if (unlikely(stale)) { ··· 1381 1380 break; 1382 1381 } 1383 1382 1384 - /* We can retry this: */ 1385 - ret = -BCH_ERR_transaction_restart; 1383 + struct printbuf buf = PRINTBUF; 1384 + if (bch2_fs_inconsistent_on(stale < 0, c, 1385 + "pointer to invalid bucket in nocow path on device %llu\n %s", 1386 + stale_at->b.inode, 1387 + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 1388 + ret = -EIO; 1389 + } else { 1390 + /* We can retry this: */ 1391 + ret = -BCH_ERR_transaction_restart; 1392 + } 1393 + printbuf_exit(&buf); 1394 + 1386 1395 goto err_get_ioref; 1387 1396 } 1388 1397
+4 -3
fs/bcachefs/movinggc.c
··· 35 35 }; 36 36 37 37 static const struct rhashtable_params bch_move_bucket_params = { 38 - .head_offset = offsetof(struct move_bucket_in_flight, hash), 39 - .key_offset = offsetof(struct move_bucket_in_flight, bucket.k), 40 - .key_len = sizeof(struct move_bucket_key), 38 + .head_offset = offsetof(struct move_bucket_in_flight, hash), 39 + .key_offset = offsetof(struct move_bucket_in_flight, bucket.k), 40 + .key_len = sizeof(struct move_bucket_key), 41 + .automatic_shrinking = true, 41 42 }; 42 43 43 44 static struct move_bucket_in_flight *
+3 -3
fs/bcachefs/super-io.c
··· 1310 1310 1311 1311 prt_printf(out, "Device index:\t%u\n", sb->dev_idx); 1312 1312 1313 - prt_str(out, "Label:\t"); 1313 + prt_printf(out, "Label:\t"); 1314 1314 prt_printf(out, "%.*s", (int) sizeof(sb->label), sb->label); 1315 1315 prt_newline(out); 1316 1316 1317 - prt_str(out, "Version:\t"); 1317 + prt_printf(out, "Version:\t"); 1318 1318 bch2_version_to_text(out, le16_to_cpu(sb->version)); 1319 1319 prt_newline(out); 1320 1320 1321 - prt_str(out, "Version upgrade complete:\t"); 1321 + prt_printf(out, "Version upgrade complete:\t"); 1322 1322 bch2_version_to_text(out, BCH_SB_VERSION_UPGRADE_COMPLETE(sb)); 1323 1323 prt_newline(out); 1324 1324
+7 -3
fs/bcachefs/super.c
··· 582 582 583 583 if (c->write_ref_wq) 584 584 destroy_workqueue(c->write_ref_wq); 585 - if (c->io_complete_wq) 586 - destroy_workqueue(c->io_complete_wq); 585 + if (c->btree_write_submit_wq) 586 + destroy_workqueue(c->btree_write_submit_wq); 587 + if (c->btree_read_complete_wq) 588 + destroy_workqueue(c->btree_read_complete_wq); 587 589 if (c->copygc_wq) 588 590 destroy_workqueue(c->copygc_wq); 589 591 if (c->btree_io_complete_wq) ··· 880 878 WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) || 881 879 !(c->copygc_wq = alloc_workqueue("bcachefs_copygc", 882 880 WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) || 883 - !(c->io_complete_wq = alloc_workqueue("bcachefs_io", 881 + !(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete", 884 882 WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) || 883 + !(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit", 884 + WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) || 885 885 !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref", 886 886 WQ_FREEZABLE, 0)) || 887 887 #ifndef BCH_WRITE_REF_DEBUG