Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: trans->nr_paths

Start to plumb through dynamically growable btree_paths; this patch
replaces most BTREE_ITER_MAX references with trans->nr_paths.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+26 -8
+5 -4
fs/bcachefs/btree_iter.c
··· 1475 1475 { 1476 1476 struct btree_transaction_stats *s = btree_trans_stats(trans); 1477 1477 struct printbuf buf = PRINTBUF; 1478 - size_t nr = bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX); 1478 + size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths); 1479 1479 1480 1480 if (!s) 1481 1481 return; ··· 1521 1521 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans, 1522 1522 btree_path_idx_t pos) 1523 1523 { 1524 - btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, BTREE_ITER_MAX); 1524 + btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths); 1525 1525 1526 - if (unlikely(idx == BTREE_ITER_MAX)) 1526 + if (unlikely(idx == trans->nr_paths)) 1527 1527 btree_path_overflow(trans); 1528 1528 1529 1529 /* ··· 2527 2527 struct btree_path *path; 2528 2528 unsigned i; 2529 2529 2530 - BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX) - 1); 2530 + BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1); 2531 2531 2532 2532 trans_for_each_path(trans, path, i) { 2533 2533 BUG_ON(path->sorted_idx >= trans->nr_sorted); ··· 2933 2933 trans->journal_replay_not_finished = 2934 2934 unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) && 2935 2935 atomic_inc_not_zero(&c->journal_keys.ref); 2936 + trans->nr_paths = ARRAY_SIZE(trans->_paths); 2936 2937 trans->paths_allocated = trans->_paths_allocated; 2937 2938 trans->sorted = trans->_sorted; 2938 2939 trans->paths = trans->_paths;
+19 -3
fs/bcachefs/btree_iter.h
··· 82 82 static inline struct btree_path * 83 83 __trans_next_path(struct btree_trans *trans, unsigned *idx) 84 84 { 85 - *idx = find_next_bit(trans->paths_allocated, BTREE_ITER_MAX, *idx); 85 + unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG; 86 + /* 87 + * Open coded find_next_bit(), because 88 + * - this is fast path, we can't afford the function call 89 + * - and we know that nr_paths is a multiple of BITS_PER_LONG, 90 + */ 91 + while (*idx < trans->nr_paths) { 92 + unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1)); 93 + if (v) { 94 + *idx += __ffs(v); 95 + return trans->paths + *idx; 96 + } 86 97 87 - return *idx < BTREE_ITER_MAX ? &trans->paths[*idx] : NULL; 98 + *idx += BITS_PER_LONG; 99 + *idx &= ~(BITS_PER_LONG - 1); 100 + w++; 101 + } 102 + 103 + return NULL; 88 104 } 89 105 90 106 /* ··· 642 626 643 627 static inline int btree_trans_too_many_iters(struct btree_trans *trans) 644 628 { 645 - if (bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX) > BTREE_ITER_MAX - 8) 629 + if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_MAX - 8) 646 630 return __bch2_btree_trans_too_many_iters(trans); 647 631 648 632 return 0;
+1
fs/bcachefs/btree_types.h
··· 390 390 unsigned mem_bytes; 391 391 392 392 btree_path_idx_t nr_sorted; 393 + btree_path_idx_t nr_paths; 393 394 btree_path_idx_t nr_paths_max; 394 395 u8 fn_idx; 395 396 u8 nr_updates;
+1 -1
fs/bcachefs/btree_update.c
··· 386 386 387 387 struct btree_path *path = trans->paths + path_idx; 388 388 EBUG_ON(!path->should_be_locked); 389 - EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX); 389 + EBUG_ON(trans->nr_updates >= trans->nr_paths); 390 390 EBUG_ON(!bpos_eq(k->k.p, path->pos)); 391 391 392 392 n = (struct btree_insert_entry) {