Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "bkey_methods.h"
5#include "bkey_buf.h"
6#include "btree_cache.h"
7#include "btree_iter.h"
8#include "btree_journal_iter.h"
9#include "btree_key_cache.h"
10#include "btree_locking.h"
11#include "btree_update.h"
12#include "debug.h"
13#include "error.h"
14#include "extents.h"
15#include "journal.h"
16#include "journal_io.h"
17#include "replicas.h"
18#include "snapshot.h"
19#include "trace.h"
20
21#include <linux/random.h>
22#include <linux/prefetch.h>
23
24static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25static inline void btree_path_list_add(struct btree_trans *,
26 btree_path_idx_t, btree_path_idx_t);
27
28static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29{
30#ifdef TRACK_PATH_ALLOCATED
31 return iter->ip_allocated;
32#else
33 return 0;
34#endif
35}
36
37static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38static void bch2_trans_srcu_lock(struct btree_trans *);
39
40static inline int __btree_path_cmp(const struct btree_path *l,
41 enum btree_id r_btree_id,
42 bool r_cached,
43 struct bpos r_pos,
44 unsigned r_level)
45{
46 /*
47 * Must match lock ordering as defined by __bch2_btree_node_lock:
48 */
49 return cmp_int(l->btree_id, r_btree_id) ?:
50 cmp_int((int) l->cached, (int) r_cached) ?:
51 bpos_cmp(l->pos, r_pos) ?:
52 -cmp_int(l->level, r_level);
53}
54
55static inline int btree_path_cmp(const struct btree_path *l,
56 const struct btree_path *r)
57{
58 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59}
60
61static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
62{
63 /* Are we iterating over keys in all snapshots? */
64 if (iter->flags & BTREE_ITER_all_snapshots) {
65 p = bpos_successor(p);
66 } else {
67 p = bpos_nosnap_successor(p);
68 p.snapshot = iter->snapshot;
69 }
70
71 return p;
72}
73
74static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
75{
76 /* Are we iterating over keys in all snapshots? */
77 if (iter->flags & BTREE_ITER_all_snapshots) {
78 p = bpos_predecessor(p);
79 } else {
80 p = bpos_nosnap_predecessor(p);
81 p.snapshot = iter->snapshot;
82 }
83
84 return p;
85}
86
87static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
88{
89 struct bpos pos = iter->pos;
90
91 if ((iter->flags & BTREE_ITER_is_extents) &&
92 !bkey_eq(pos, POS_MAX))
93 pos = bkey_successor(iter, pos);
94 return pos;
95}
96
97static inline bool btree_path_pos_before_node(struct btree_path *path,
98 struct btree *b)
99{
100 return bpos_lt(path->pos, b->data->min_key);
101}
102
103static inline bool btree_path_pos_after_node(struct btree_path *path,
104 struct btree *b)
105{
106 return bpos_gt(path->pos, b->key.k.p);
107}
108
109static inline bool btree_path_pos_in_node(struct btree_path *path,
110 struct btree *b)
111{
112 return path->btree_id == b->c.btree_id &&
113 !btree_path_pos_before_node(path, b) &&
114 !btree_path_pos_after_node(path, b);
115}
116
117/* Btree iterator: */
118
119#ifdef CONFIG_BCACHEFS_DEBUG
120
121static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122 struct btree_path *path)
123{
124 struct bkey_cached *ck;
125 bool locked = btree_node_locked(path, 0);
126
127 if (!bch2_btree_node_relock(trans, path, 0))
128 return;
129
130 ck = (void *) path->l[0].b;
131 BUG_ON(ck->key.btree_id != path->btree_id ||
132 !bkey_eq(ck->key.pos, path->pos));
133
134 if (!locked)
135 btree_node_unlock(trans, path, 0);
136}
137
138static void bch2_btree_path_verify_level(struct btree_trans *trans,
139 struct btree_path *path, unsigned level)
140{
141 struct btree_path_level *l;
142 struct btree_node_iter tmp;
143 bool locked;
144 struct bkey_packed *p, *k;
145 struct printbuf buf1 = PRINTBUF;
146 struct printbuf buf2 = PRINTBUF;
147 struct printbuf buf3 = PRINTBUF;
148 const char *msg;
149
150 if (!bch2_debug_check_iterators)
151 return;
152
153 l = &path->l[level];
154 tmp = l->iter;
155 locked = btree_node_locked(path, level);
156
157 if (path->cached) {
158 if (!level)
159 bch2_btree_path_verify_cached(trans, path);
160 return;
161 }
162
163 if (!btree_path_node(path, level))
164 return;
165
166 if (!bch2_btree_node_relock_notrace(trans, path, level))
167 return;
168
169 BUG_ON(!btree_path_pos_in_node(path, l->b));
170
171 bch2_btree_node_iter_verify(&l->iter, l->b);
172
173 /*
174 * For interior nodes, the iterator will have skipped past deleted keys:
175 */
176 p = level
177 ? bch2_btree_node_iter_prev(&tmp, l->b)
178 : bch2_btree_node_iter_prev_all(&tmp, l->b);
179 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
180
181 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
182 msg = "before";
183 goto err;
184 }
185
186 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
187 msg = "after";
188 goto err;
189 }
190
191 if (!locked)
192 btree_node_unlock(trans, path, level);
193 return;
194err:
195 bch2_bpos_to_text(&buf1, path->pos);
196
197 if (p) {
198 struct bkey uk = bkey_unpack_key(l->b, p);
199
200 bch2_bkey_to_text(&buf2, &uk);
201 } else {
202 prt_printf(&buf2, "(none)");
203 }
204
205 if (k) {
206 struct bkey uk = bkey_unpack_key(l->b, k);
207
208 bch2_bkey_to_text(&buf3, &uk);
209 } else {
210 prt_printf(&buf3, "(none)");
211 }
212
213 panic("path should be %s key at level %u:\n"
214 "path pos %s\n"
215 "prev key %s\n"
216 "cur key %s\n",
217 msg, level, buf1.buf, buf2.buf, buf3.buf);
218}
219
220static void bch2_btree_path_verify(struct btree_trans *trans,
221 struct btree_path *path)
222{
223 struct bch_fs *c = trans->c;
224
225 for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
226 if (!path->l[i].b) {
227 BUG_ON(!path->cached &&
228 bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
229 break;
230 }
231
232 bch2_btree_path_verify_level(trans, path, i);
233 }
234
235 bch2_btree_path_verify_locks(path);
236}
237
238void bch2_trans_verify_paths(struct btree_trans *trans)
239{
240 struct btree_path *path;
241 unsigned iter;
242
243 trans_for_each_path(trans, path, iter)
244 bch2_btree_path_verify(trans, path);
245}
246
247static void bch2_btree_iter_verify(struct btree_iter *iter)
248{
249 struct btree_trans *trans = iter->trans;
250
251 BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
252
253 BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
254 (iter->flags & BTREE_ITER_all_snapshots));
255
256 BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
257 (iter->flags & BTREE_ITER_all_snapshots) &&
258 !btree_type_has_snapshot_field(iter->btree_id));
259
260 if (iter->update_path)
261 bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
262 bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
263}
264
265static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
266{
267 BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
268 !iter->pos.snapshot);
269
270 BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
271 iter->pos.snapshot != iter->snapshot);
272
273 BUG_ON(iter->flags & BTREE_ITER_all_snapshots ? !bpos_eq(iter->pos, iter->k.p) :
274 !(iter->flags & BTREE_ITER_is_extents) ? !bkey_eq(iter->pos, iter->k.p) :
275 (bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
276 bkey_gt(iter->pos, iter->k.p)));
277}
278
279static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
280{
281 struct btree_trans *trans = iter->trans;
282 struct btree_iter copy;
283 struct bkey_s_c prev;
284 int ret = 0;
285
286 if (!bch2_debug_check_iterators)
287 return 0;
288
289 if (!(iter->flags & BTREE_ITER_filter_snapshots))
290 return 0;
291
292 if (bkey_err(k) || !k.k)
293 return 0;
294
295 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
296 iter->snapshot,
297 k.k->p.snapshot));
298
299 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
300 BTREE_ITER_nopreserve|
301 BTREE_ITER_all_snapshots);
302 prev = bch2_btree_iter_prev(©);
303 if (!prev.k)
304 goto out;
305
306 ret = bkey_err(prev);
307 if (ret)
308 goto out;
309
310 if (bkey_eq(prev.k->p, k.k->p) &&
311 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
312 prev.k->p.snapshot) > 0) {
313 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
314
315 bch2_bkey_to_text(&buf1, k.k);
316 bch2_bkey_to_text(&buf2, prev.k);
317
318 panic("iter snap %u\n"
319 "k %s\n"
320 "prev %s\n",
321 iter->snapshot,
322 buf1.buf, buf2.buf);
323 }
324out:
325 bch2_trans_iter_exit(trans, ©);
326 return ret;
327}
328
329void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
330 struct bpos pos)
331{
332 bch2_trans_verify_not_unlocked_or_in_restart(trans);
333
334 struct btree_path *path;
335 struct trans_for_each_path_inorder_iter iter;
336 struct printbuf buf = PRINTBUF;
337
338 btree_trans_sort_paths(trans);
339
340 trans_for_each_path_inorder(trans, path, iter) {
341 if (path->btree_id != id ||
342 !btree_node_locked(path, 0) ||
343 !path->should_be_locked)
344 continue;
345
346 if (!path->cached) {
347 if (bkey_ge(pos, path->l[0].b->data->min_key) &&
348 bkey_le(pos, path->l[0].b->key.k.p))
349 return;
350 } else {
351 if (bkey_eq(pos, path->pos))
352 return;
353 }
354 }
355
356 bch2_dump_trans_paths_updates(trans);
357 bch2_bpos_to_text(&buf, pos);
358
359 panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
360}
361
362#else
363
364static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
365 struct btree_path *path, unsigned l) {}
366static inline void bch2_btree_path_verify(struct btree_trans *trans,
367 struct btree_path *path) {}
368static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
369static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
370static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
371
372#endif
373
374/* Btree path: fixups after btree updates */
375
376static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
377 struct btree *b,
378 struct bset_tree *t,
379 struct bkey_packed *k)
380{
381 struct btree_node_iter_set *set;
382
383 btree_node_iter_for_each(iter, set)
384 if (set->end == t->end_offset) {
385 set->k = __btree_node_key_to_offset(b, k);
386 bch2_btree_node_iter_sort(iter, b);
387 return;
388 }
389
390 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
391}
392
393static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
394 struct btree *b,
395 struct bkey_packed *where)
396{
397 struct btree_path_level *l = &path->l[b->c.level];
398
399 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
400 return;
401
402 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
403 bch2_btree_node_iter_advance(&l->iter, l->b);
404}
405
406void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
407 struct btree *b,
408 struct bkey_packed *where)
409{
410 struct btree_path *path;
411 unsigned i;
412
413 trans_for_each_path_with_node(trans, b, path, i) {
414 __bch2_btree_path_fix_key_modified(path, b, where);
415 bch2_btree_path_verify_level(trans, path, b->c.level);
416 }
417}
418
419static void __bch2_btree_node_iter_fix(struct btree_path *path,
420 struct btree *b,
421 struct btree_node_iter *node_iter,
422 struct bset_tree *t,
423 struct bkey_packed *where,
424 unsigned clobber_u64s,
425 unsigned new_u64s)
426{
427 const struct bkey_packed *end = btree_bkey_last(b, t);
428 struct btree_node_iter_set *set;
429 unsigned offset = __btree_node_key_to_offset(b, where);
430 int shift = new_u64s - clobber_u64s;
431 unsigned old_end = t->end_offset - shift;
432 unsigned orig_iter_pos = node_iter->data[0].k;
433 bool iter_current_key_modified =
434 orig_iter_pos >= offset &&
435 orig_iter_pos <= offset + clobber_u64s;
436
437 btree_node_iter_for_each(node_iter, set)
438 if (set->end == old_end)
439 goto found;
440
441 /* didn't find the bset in the iterator - might have to readd it: */
442 if (new_u64s &&
443 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
444 bch2_btree_node_iter_push(node_iter, b, where, end);
445 goto fixup_done;
446 } else {
447 /* Iterator is after key that changed */
448 return;
449 }
450found:
451 set->end = t->end_offset;
452
453 /* Iterator hasn't gotten to the key that changed yet: */
454 if (set->k < offset)
455 return;
456
457 if (new_u64s &&
458 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
459 set->k = offset;
460 } else if (set->k < offset + clobber_u64s) {
461 set->k = offset + new_u64s;
462 if (set->k == set->end)
463 bch2_btree_node_iter_set_drop(node_iter, set);
464 } else {
465 /* Iterator is after key that changed */
466 set->k = (int) set->k + shift;
467 return;
468 }
469
470 bch2_btree_node_iter_sort(node_iter, b);
471fixup_done:
472 if (node_iter->data[0].k != orig_iter_pos)
473 iter_current_key_modified = true;
474
475 /*
476 * When a new key is added, and the node iterator now points to that
477 * key, the iterator might have skipped past deleted keys that should
478 * come after the key the iterator now points to. We have to rewind to
479 * before those deleted keys - otherwise
480 * bch2_btree_node_iter_prev_all() breaks:
481 */
482 if (!bch2_btree_node_iter_end(node_iter) &&
483 iter_current_key_modified &&
484 b->c.level) {
485 struct bkey_packed *k, *k2, *p;
486
487 k = bch2_btree_node_iter_peek_all(node_iter, b);
488
489 for_each_bset(b, t) {
490 bool set_pos = false;
491
492 if (node_iter->data[0].end == t->end_offset)
493 continue;
494
495 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
496
497 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
498 bkey_iter_cmp(b, k, p) < 0) {
499 k2 = p;
500 set_pos = true;
501 }
502
503 if (set_pos)
504 btree_node_iter_set_set_pos(node_iter,
505 b, t, k2);
506 }
507 }
508}
509
510void bch2_btree_node_iter_fix(struct btree_trans *trans,
511 struct btree_path *path,
512 struct btree *b,
513 struct btree_node_iter *node_iter,
514 struct bkey_packed *where,
515 unsigned clobber_u64s,
516 unsigned new_u64s)
517{
518 struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
519 struct btree_path *linked;
520 unsigned i;
521
522 if (node_iter != &path->l[b->c.level].iter) {
523 __bch2_btree_node_iter_fix(path, b, node_iter, t,
524 where, clobber_u64s, new_u64s);
525
526 if (bch2_debug_check_iterators)
527 bch2_btree_node_iter_verify(node_iter, b);
528 }
529
530 trans_for_each_path_with_node(trans, b, linked, i) {
531 __bch2_btree_node_iter_fix(linked, b,
532 &linked->l[b->c.level].iter, t,
533 where, clobber_u64s, new_u64s);
534 bch2_btree_path_verify_level(trans, linked, b->c.level);
535 }
536}
537
538/* Btree path level: pointer to a particular btree node and node iter */
539
540static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
541 struct btree_path_level *l,
542 struct bkey *u,
543 struct bkey_packed *k)
544{
545 if (unlikely(!k)) {
546 /*
547 * signal to bch2_btree_iter_peek_slot() that we're currently at
548 * a hole
549 */
550 u->type = KEY_TYPE_deleted;
551 return bkey_s_c_null;
552 }
553
554 return bkey_disassemble(l->b, k, u);
555}
556
557static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
558 struct btree_path_level *l,
559 struct bkey *u)
560{
561 return __btree_iter_unpack(c, l, u,
562 bch2_btree_node_iter_peek_all(&l->iter, l->b));
563}
564
565static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
566 struct btree_path *path,
567 struct btree_path_level *l,
568 struct bkey *u)
569{
570 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
571 bch2_btree_node_iter_peek(&l->iter, l->b));
572
573 path->pos = k.k ? k.k->p : l->b->key.k.p;
574 trans->paths_sorted = false;
575 bch2_btree_path_verify_level(trans, path, l - path->l);
576 return k;
577}
578
579static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
580 struct btree_path *path,
581 struct btree_path_level *l,
582 struct bkey *u)
583{
584 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
585 bch2_btree_node_iter_prev(&l->iter, l->b));
586
587 path->pos = k.k ? k.k->p : l->b->data->min_key;
588 trans->paths_sorted = false;
589 bch2_btree_path_verify_level(trans, path, l - path->l);
590 return k;
591}
592
593static inline bool btree_path_advance_to_pos(struct btree_path *path,
594 struct btree_path_level *l,
595 int max_advance)
596{
597 struct bkey_packed *k;
598 int nr_advanced = 0;
599
600 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
601 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
602 if (max_advance > 0 && nr_advanced >= max_advance)
603 return false;
604
605 bch2_btree_node_iter_advance(&l->iter, l->b);
606 nr_advanced++;
607 }
608
609 return true;
610}
611
612static inline void __btree_path_level_init(struct btree_path *path,
613 unsigned level)
614{
615 struct btree_path_level *l = &path->l[level];
616
617 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
618
619 /*
620 * Iterators to interior nodes should always be pointed at the first non
621 * whiteout:
622 */
623 if (level)
624 bch2_btree_node_iter_peek(&l->iter, l->b);
625}
626
627void bch2_btree_path_level_init(struct btree_trans *trans,
628 struct btree_path *path,
629 struct btree *b)
630{
631 BUG_ON(path->cached);
632
633 EBUG_ON(!btree_path_pos_in_node(path, b));
634
635 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
636 path->l[b->c.level].b = b;
637 __btree_path_level_init(path, b->c.level);
638}
639
640/* Btree path: fixups after btree node updates: */
641
642static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
643{
644 struct bch_fs *c = trans->c;
645
646 trans_for_each_update(trans, i)
647 if (!i->cached &&
648 i->level == b->c.level &&
649 i->btree_id == b->c.btree_id &&
650 bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
651 bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
652 i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
653
654 if (unlikely(trans->journal_replay_not_finished)) {
655 struct bkey_i *j_k =
656 bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
657 i->k->k.p);
658
659 if (j_k) {
660 i->old_k = j_k->k;
661 i->old_v = &j_k->v;
662 }
663 }
664 }
665}
666
667/*
668 * A btree node is being replaced - update the iterator to point to the new
669 * node:
670 */
671void bch2_trans_node_add(struct btree_trans *trans,
672 struct btree_path *path,
673 struct btree *b)
674{
675 struct btree_path *prev;
676
677 BUG_ON(!btree_path_pos_in_node(path, b));
678
679 while ((prev = prev_btree_path(trans, path)) &&
680 btree_path_pos_in_node(prev, b))
681 path = prev;
682
683 for (;
684 path && btree_path_pos_in_node(path, b);
685 path = next_btree_path(trans, path))
686 if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
687 enum btree_node_locked_type t =
688 btree_lock_want(path, b->c.level);
689
690 if (t != BTREE_NODE_UNLOCKED) {
691 btree_node_unlock(trans, path, b->c.level);
692 six_lock_increment(&b->c.lock, (enum six_lock_type) t);
693 mark_btree_node_locked(trans, path, b->c.level, t);
694 }
695
696 bch2_btree_path_level_init(trans, path, b);
697 }
698
699 bch2_trans_revalidate_updates_in_node(trans, b);
700}
701
702void bch2_trans_node_drop(struct btree_trans *trans,
703 struct btree *b)
704{
705 struct btree_path *path;
706 unsigned i, level = b->c.level;
707
708 trans_for_each_path(trans, path, i)
709 if (path->l[level].b == b) {
710 btree_node_unlock(trans, path, level);
711 path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
712 }
713}
714
715/*
716 * A btree node has been modified in such a way as to invalidate iterators - fix
717 * them:
718 */
719void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
720{
721 struct btree_path *path;
722 unsigned i;
723
724 trans_for_each_path_with_node(trans, b, path, i)
725 __btree_path_level_init(path, b->c.level);
726
727 bch2_trans_revalidate_updates_in_node(trans, b);
728}
729
730/* Btree path: traverse, set_pos: */
731
732static inline int btree_path_lock_root(struct btree_trans *trans,
733 struct btree_path *path,
734 unsigned depth_want,
735 unsigned long trace_ip)
736{
737 struct bch_fs *c = trans->c;
738 struct btree_root *r = bch2_btree_id_root(c, path->btree_id);
739 enum six_lock_type lock_type;
740 unsigned i;
741 int ret;
742
743 EBUG_ON(path->nodes_locked);
744
745 while (1) {
746 struct btree *b = READ_ONCE(r->b);
747 if (unlikely(!b)) {
748 BUG_ON(!r->error);
749 return r->error;
750 }
751
752 path->level = READ_ONCE(b->c.level);
753
754 if (unlikely(path->level < depth_want)) {
755 /*
756 * the root is at a lower depth than the depth we want:
757 * got to the end of the btree, or we're walking nodes
758 * greater than some depth and there are no nodes >=
759 * that depth
760 */
761 path->level = depth_want;
762 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
763 path->l[i].b = NULL;
764 return 1;
765 }
766
767 lock_type = __btree_lock_want(path, path->level);
768 ret = btree_node_lock(trans, path, &b->c,
769 path->level, lock_type, trace_ip);
770 if (unlikely(ret)) {
771 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
772 return ret;
773 BUG();
774 }
775
776 if (likely(b == READ_ONCE(r->b) &&
777 b->c.level == path->level &&
778 !race_fault())) {
779 for (i = 0; i < path->level; i++)
780 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
781 path->l[path->level].b = b;
782 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
783 path->l[i].b = NULL;
784
785 mark_btree_node_locked(trans, path, path->level,
786 (enum btree_node_locked_type) lock_type);
787 bch2_btree_path_level_init(trans, path, b);
788 return 0;
789 }
790
791 six_unlock_type(&b->c.lock, lock_type);
792 }
793}
794
795noinline
796static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
797{
798 struct bch_fs *c = trans->c;
799 struct btree_path_level *l = path_l(path);
800 struct btree_node_iter node_iter = l->iter;
801 struct bkey_packed *k;
802 struct bkey_buf tmp;
803 unsigned nr = test_bit(BCH_FS_started, &c->flags)
804 ? (path->level > 1 ? 0 : 2)
805 : (path->level > 1 ? 1 : 16);
806 bool was_locked = btree_node_locked(path, path->level);
807 int ret = 0;
808
809 bch2_bkey_buf_init(&tmp);
810
811 while (nr-- && !ret) {
812 if (!bch2_btree_node_relock(trans, path, path->level))
813 break;
814
815 bch2_btree_node_iter_advance(&node_iter, l->b);
816 k = bch2_btree_node_iter_peek(&node_iter, l->b);
817 if (!k)
818 break;
819
820 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
821 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
822 path->level - 1);
823 }
824
825 if (!was_locked)
826 btree_node_unlock(trans, path, path->level);
827
828 bch2_bkey_buf_exit(&tmp, c);
829 return ret;
830}
831
832static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
833 struct btree_and_journal_iter *jiter)
834{
835 struct bch_fs *c = trans->c;
836 struct bkey_s_c k;
837 struct bkey_buf tmp;
838 unsigned nr = test_bit(BCH_FS_started, &c->flags)
839 ? (path->level > 1 ? 0 : 2)
840 : (path->level > 1 ? 1 : 16);
841 bool was_locked = btree_node_locked(path, path->level);
842 int ret = 0;
843
844 bch2_bkey_buf_init(&tmp);
845
846 jiter->fail_if_too_many_whiteouts = true;
847
848 while (nr-- && !ret) {
849 if (!bch2_btree_node_relock(trans, path, path->level))
850 break;
851
852 bch2_btree_and_journal_iter_advance(jiter);
853 k = bch2_btree_and_journal_iter_peek(jiter);
854 if (!k.k)
855 break;
856
857 bch2_bkey_buf_reassemble(&tmp, c, k);
858 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
859 path->level - 1);
860 }
861
862 if (!was_locked)
863 btree_node_unlock(trans, path, path->level);
864
865 bch2_bkey_buf_exit(&tmp, c);
866 return ret;
867}
868
869static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
870 struct btree_path *path,
871 unsigned plevel, struct btree *b)
872{
873 struct btree_path_level *l = &path->l[plevel];
874 bool locked = btree_node_locked(path, plevel);
875 struct bkey_packed *k;
876 struct bch_btree_ptr_v2 *bp;
877
878 if (!bch2_btree_node_relock(trans, path, plevel))
879 return;
880
881 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
882 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
883
884 bp = (void *) bkeyp_val(&l->b->format, k);
885 bp->mem_ptr = (unsigned long)b;
886
887 if (!locked)
888 btree_node_unlock(trans, path, plevel);
889}
890
891static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
892 struct btree_path *path,
893 unsigned flags,
894 struct bkey_buf *out)
895{
896 struct bch_fs *c = trans->c;
897 struct btree_path_level *l = path_l(path);
898 struct btree_and_journal_iter jiter;
899 struct bkey_s_c k;
900 int ret = 0;
901
902 __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
903
904 k = bch2_btree_and_journal_iter_peek(&jiter);
905 if (!k.k) {
906 struct printbuf buf = PRINTBUF;
907
908 prt_str(&buf, "node not found at pos ");
909 bch2_bpos_to_text(&buf, path->pos);
910 prt_str(&buf, " at btree ");
911 bch2_btree_pos_to_text(&buf, c, l->b);
912
913 ret = bch2_fs_topology_error(c, "%s", buf.buf);
914 printbuf_exit(&buf);
915 goto err;
916 }
917
918 bch2_bkey_buf_reassemble(out, c, k);
919
920 if ((flags & BTREE_ITER_prefetch) &&
921 c->opts.btree_node_prefetch)
922 ret = btree_path_prefetch_j(trans, path, &jiter);
923
924err:
925 bch2_btree_and_journal_iter_exit(&jiter);
926 return ret;
927}
928
929static __always_inline int btree_path_down(struct btree_trans *trans,
930 struct btree_path *path,
931 unsigned flags,
932 unsigned long trace_ip)
933{
934 struct bch_fs *c = trans->c;
935 struct btree_path_level *l = path_l(path);
936 struct btree *b;
937 unsigned level = path->level - 1;
938 enum six_lock_type lock_type = __btree_lock_want(path, level);
939 struct bkey_buf tmp;
940 int ret;
941
942 EBUG_ON(!btree_node_locked(path, path->level));
943
944 bch2_bkey_buf_init(&tmp);
945
946 if (unlikely(trans->journal_replay_not_finished)) {
947 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
948 if (ret)
949 goto err;
950 } else {
951 struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
952 if (!k) {
953 struct printbuf buf = PRINTBUF;
954
955 prt_str(&buf, "node not found at pos ");
956 bch2_bpos_to_text(&buf, path->pos);
957 prt_str(&buf, " within parent node ");
958 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
959
960 bch2_fs_fatal_error(c, "%s", buf.buf);
961 printbuf_exit(&buf);
962 ret = -BCH_ERR_btree_need_topology_repair;
963 goto err;
964 }
965
966 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
967
968 if ((flags & BTREE_ITER_prefetch) &&
969 c->opts.btree_node_prefetch) {
970 ret = btree_path_prefetch(trans, path);
971 if (ret)
972 goto err;
973 }
974 }
975
976 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
977 ret = PTR_ERR_OR_ZERO(b);
978 if (unlikely(ret))
979 goto err;
980
981 if (likely(!trans->journal_replay_not_finished &&
982 tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
983 unlikely(b != btree_node_mem_ptr(tmp.k)))
984 btree_node_mem_ptr_set(trans, path, level + 1, b);
985
986 if (btree_node_read_locked(path, level + 1))
987 btree_node_unlock(trans, path, level + 1);
988
989 mark_btree_node_locked(trans, path, level,
990 (enum btree_node_locked_type) lock_type);
991 path->level = level;
992 bch2_btree_path_level_init(trans, path, b);
993
994 bch2_btree_path_verify_locks(path);
995err:
996 bch2_bkey_buf_exit(&tmp, c);
997 return ret;
998}
999
1000static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1001{
1002 struct bch_fs *c = trans->c;
1003 struct btree_path *path;
1004 unsigned long trace_ip = _RET_IP_;
1005 unsigned i;
1006 int ret = 0;
1007
1008 if (trans->in_traverse_all)
1009 return -BCH_ERR_transaction_restart_in_traverse_all;
1010
1011 trans->in_traverse_all = true;
1012retry_all:
1013 trans->restarted = 0;
1014 trans->last_restarted_ip = 0;
1015
1016 trans_for_each_path(trans, path, i)
1017 path->should_be_locked = false;
1018
1019 btree_trans_sort_paths(trans);
1020
1021 bch2_trans_unlock(trans);
1022 cond_resched();
1023 trans_set_locked(trans, false);
1024
1025 if (unlikely(trans->memory_allocation_failure)) {
1026 struct closure cl;
1027
1028 closure_init_stack(&cl);
1029
1030 do {
1031 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1032 closure_sync(&cl);
1033 } while (ret);
1034 }
1035
1036 /* Now, redo traversals in correct order: */
1037 i = 0;
1038 while (i < trans->nr_sorted) {
1039 btree_path_idx_t idx = trans->sorted[i];
1040
1041 /*
1042 * Traversing a path can cause another path to be added at about
1043 * the same position:
1044 */
1045 if (trans->paths[idx].uptodate) {
1046 __btree_path_get(trans, &trans->paths[idx], false);
1047 ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1048 __btree_path_put(trans, &trans->paths[idx], false);
1049
1050 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1051 bch2_err_matches(ret, ENOMEM))
1052 goto retry_all;
1053 if (ret)
1054 goto err;
1055 } else {
1056 i++;
1057 }
1058 }
1059
1060 /*
1061 * We used to assert that all paths had been traversed here
1062 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1063 * path->should_be_locked is not set yet, we might have unlocked and
1064 * then failed to relock a path - that's fine.
1065 */
1066err:
1067 bch2_btree_cache_cannibalize_unlock(trans);
1068
1069 trans->in_traverse_all = false;
1070
1071 trace_and_count(c, trans_traverse_all, trans, trace_ip);
1072 return ret;
1073}
1074
1075static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1076 unsigned l, int check_pos)
1077{
1078 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1079 return false;
1080 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1081 return false;
1082 return true;
1083}
1084
1085static inline bool btree_path_good_node(struct btree_trans *trans,
1086 struct btree_path *path,
1087 unsigned l, int check_pos)
1088{
1089 return is_btree_node(path, l) &&
1090 bch2_btree_node_relock(trans, path, l) &&
1091 btree_path_check_pos_in_node(path, l, check_pos);
1092}
1093
1094static void btree_path_set_level_down(struct btree_trans *trans,
1095 struct btree_path *path,
1096 unsigned new_level)
1097{
1098 unsigned l;
1099
1100 path->level = new_level;
1101
1102 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1103 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1104 btree_node_unlock(trans, path, l);
1105
1106 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1107 bch2_btree_path_verify(trans, path);
1108}
1109
1110static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1111 struct btree_path *path,
1112 int check_pos)
1113{
1114 unsigned i, l = path->level;
1115again:
1116 while (btree_path_node(path, l) &&
1117 !btree_path_good_node(trans, path, l, check_pos))
1118 __btree_path_set_level_up(trans, path, l++);
1119
1120 /* If we need intent locks, take them too: */
1121 for (i = l + 1;
1122 i < path->locks_want && btree_path_node(path, i);
1123 i++)
1124 if (!bch2_btree_node_relock(trans, path, i)) {
1125 while (l <= i)
1126 __btree_path_set_level_up(trans, path, l++);
1127 goto again;
1128 }
1129
1130 return l;
1131}
1132
1133static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1134 struct btree_path *path,
1135 int check_pos)
1136{
1137 return likely(btree_node_locked(path, path->level) &&
1138 btree_path_check_pos_in_node(path, path->level, check_pos))
1139 ? path->level
1140 : __btree_path_up_until_good_node(trans, path, check_pos);
1141}
1142
1143/*
1144 * This is the main state machine for walking down the btree - walks down to a
1145 * specified depth
1146 *
1147 * Returns 0 on success, -EIO on error (error reading in a btree node).
1148 *
1149 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1150 * stashed in the iterator and returned from bch2_trans_exit().
1151 */
1152int bch2_btree_path_traverse_one(struct btree_trans *trans,
1153 btree_path_idx_t path_idx,
1154 unsigned flags,
1155 unsigned long trace_ip)
1156{
1157 struct btree_path *path = &trans->paths[path_idx];
1158 unsigned depth_want = path->level;
1159 int ret = -((int) trans->restarted);
1160
1161 if (unlikely(ret))
1162 goto out;
1163
1164 if (unlikely(!trans->srcu_held))
1165 bch2_trans_srcu_lock(trans);
1166
1167 trace_btree_path_traverse_start(trans, path);
1168
1169 /*
1170 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1171 * and re-traverse the path without a transaction restart:
1172 */
1173 if (path->should_be_locked) {
1174 ret = bch2_btree_path_relock(trans, path, trace_ip);
1175 goto out;
1176 }
1177
1178 if (path->cached) {
1179 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1180 goto out;
1181 }
1182
1183 path = &trans->paths[path_idx];
1184
1185 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1186 goto out_uptodate;
1187
1188 path->level = btree_path_up_until_good_node(trans, path, 0);
1189 unsigned max_level = path->level;
1190
1191 EBUG_ON(btree_path_node(path, path->level) &&
1192 !btree_node_locked(path, path->level));
1193
1194 /*
1195 * Note: path->nodes[path->level] may be temporarily NULL here - that
1196 * would indicate to other code that we got to the end of the btree,
1197 * here it indicates that relocking the root failed - it's critical that
1198 * btree_path_lock_root() comes next and that it can't fail
1199 */
1200 while (path->level > depth_want) {
1201 ret = btree_path_node(path, path->level)
1202 ? btree_path_down(trans, path, flags, trace_ip)
1203 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1204 if (unlikely(ret)) {
1205 if (ret == 1) {
1206 /*
1207 * No nodes at this level - got to the end of
1208 * the btree:
1209 */
1210 ret = 0;
1211 goto out;
1212 }
1213
1214 __bch2_btree_path_unlock(trans, path);
1215 path->level = depth_want;
1216 path->l[path->level].b = ERR_PTR(ret);
1217 goto out;
1218 }
1219 }
1220
1221 if (unlikely(max_level > path->level)) {
1222 struct btree_path *linked;
1223 unsigned iter;
1224
1225 trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1226 for (unsigned j = path->level + 1; j < max_level; j++)
1227 linked->l[j] = path->l[j];
1228 }
1229
1230out_uptodate:
1231 path->uptodate = BTREE_ITER_UPTODATE;
1232 trace_btree_path_traverse_end(trans, path);
1233out:
1234 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1235 panic("ret %s (%i) trans->restarted %s (%i)\n",
1236 bch2_err_str(ret), ret,
1237 bch2_err_str(trans->restarted), trans->restarted);
1238 bch2_btree_path_verify(trans, path);
1239 return ret;
1240}
1241
1242static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1243 struct btree_path *src)
1244{
1245 unsigned i, offset = offsetof(struct btree_path, pos);
1246
1247 memcpy((void *) dst + offset,
1248 (void *) src + offset,
1249 sizeof(struct btree_path) - offset);
1250
1251 for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1252 unsigned t = btree_node_locked_type(dst, i);
1253
1254 if (t != BTREE_NODE_UNLOCKED)
1255 six_lock_increment(&dst->l[i].b->c.lock, t);
1256 }
1257}
1258
1259static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1260 bool intent, unsigned long ip)
1261{
1262 btree_path_idx_t new = btree_path_alloc(trans, src);
1263 btree_path_copy(trans, trans->paths + new, trans->paths + src);
1264 __btree_path_get(trans, trans->paths + new, intent);
1265#ifdef TRACK_PATH_ALLOCATED
1266 trans->paths[new].ip_allocated = ip;
1267#endif
1268 return new;
1269}
1270
1271__flatten
1272btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1273 btree_path_idx_t path, bool intent, unsigned long ip)
1274{
1275 struct btree_path *old = trans->paths + path;
1276 __btree_path_put(trans, trans->paths + path, intent);
1277 path = btree_path_clone(trans, path, intent, ip);
1278 trace_btree_path_clone(trans, old, trans->paths + path);
1279 trans->paths[path].preserve = false;
1280 return path;
1281}
1282
1283btree_path_idx_t __must_check
1284__bch2_btree_path_set_pos(struct btree_trans *trans,
1285 btree_path_idx_t path_idx, struct bpos new_pos,
1286 bool intent, unsigned long ip)
1287{
1288 int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1289
1290 bch2_trans_verify_not_unlocked_or_in_restart(trans);
1291 EBUG_ON(!trans->paths[path_idx].ref);
1292
1293 trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
1294
1295 path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1296
1297 struct btree_path *path = trans->paths + path_idx;
1298 path->pos = new_pos;
1299 trans->paths_sorted = false;
1300
1301 if (unlikely(path->cached)) {
1302 btree_node_unlock(trans, path, 0);
1303 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1304 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1305 goto out;
1306 }
1307
1308 unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1309
1310 if (btree_path_node(path, level)) {
1311 struct btree_path_level *l = &path->l[level];
1312
1313 BUG_ON(!btree_node_locked(path, level));
1314 /*
1315 * We might have to skip over many keys, or just a few: try
1316 * advancing the node iterator, and if we have to skip over too
1317 * many keys just reinit it (or if we're rewinding, since that
1318 * is expensive).
1319 */
1320 if (cmp < 0 ||
1321 !btree_path_advance_to_pos(path, l, 8))
1322 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1323
1324 /*
1325 * Iterators to interior nodes should always be pointed at the first non
1326 * whiteout:
1327 */
1328 if (unlikely(level))
1329 bch2_btree_node_iter_peek(&l->iter, l->b);
1330 }
1331
1332 if (unlikely(level != path->level)) {
1333 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1334 __bch2_btree_path_unlock(trans, path);
1335 }
1336out:
1337 bch2_btree_path_verify(trans, path);
1338 return path_idx;
1339}
1340
1341/* Btree path: main interface: */
1342
1343static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1344{
1345 struct btree_path *sib;
1346
1347 sib = prev_btree_path(trans, path);
1348 if (sib && !btree_path_cmp(sib, path))
1349 return sib;
1350
1351 sib = next_btree_path(trans, path);
1352 if (sib && !btree_path_cmp(sib, path))
1353 return sib;
1354
1355 return NULL;
1356}
1357
1358static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1359{
1360 struct btree_path *sib;
1361
1362 sib = prev_btree_path(trans, path);
1363 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1364 return sib;
1365
1366 sib = next_btree_path(trans, path);
1367 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1368 return sib;
1369
1370 return NULL;
1371}
1372
1373static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1374{
1375 __bch2_btree_path_unlock(trans, trans->paths + path);
1376 btree_path_list_remove(trans, trans->paths + path);
1377 __clear_bit(path, trans->paths_allocated);
1378}
1379
1380static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1381{
1382 unsigned l = path->level;
1383
1384 do {
1385 if (!btree_path_node(path, l))
1386 break;
1387
1388 if (!is_btree_node(path, l))
1389 return false;
1390
1391 if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1392 return false;
1393
1394 l++;
1395 } while (l < path->locks_want);
1396
1397 return true;
1398}
1399
1400void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1401{
1402 struct btree_path *path = trans->paths + path_idx, *dup;
1403
1404 if (!__btree_path_put(trans, path, intent))
1405 return;
1406
1407 dup = path->preserve
1408 ? have_path_at_pos(trans, path)
1409 : have_node_at_pos(trans, path);
1410
1411 trace_btree_path_free(trans, path_idx, dup);
1412
1413 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1414 return;
1415
1416 if (path->should_be_locked && !trans->restarted) {
1417 if (!dup)
1418 return;
1419
1420 if (!(trans->locked
1421 ? bch2_btree_path_relock_norestart(trans, dup)
1422 : bch2_btree_path_can_relock(trans, dup)))
1423 return;
1424 }
1425
1426 if (dup) {
1427 dup->preserve |= path->preserve;
1428 dup->should_be_locked |= path->should_be_locked;
1429 }
1430
1431 __bch2_path_free(trans, path_idx);
1432}
1433
1434static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1435 bool intent)
1436{
1437 if (!__btree_path_put(trans, trans->paths + path, intent))
1438 return;
1439
1440 __bch2_path_free(trans, path);
1441}
1442
1443void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1444{
1445 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1446 trans->restart_count, restart_count,
1447 (void *) trans->last_begin_ip);
1448}
1449
1450static void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1451{
1452#ifdef CONFIG_BCACHEFS_DEBUG
1453 struct printbuf buf = PRINTBUF;
1454 bch2_prt_backtrace(&buf, &trans->last_restarted_trace);
1455 panic("in transaction restart: %s, last restarted by\n%s",
1456 bch2_err_str(trans->restarted),
1457 buf.buf);
1458#else
1459 panic("in transaction restart: %s, last restarted by %pS\n",
1460 bch2_err_str(trans->restarted),
1461 (void *) trans->last_restarted_ip);
1462#endif
1463}
1464
1465void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *trans)
1466{
1467 if (trans->restarted)
1468 bch2_trans_in_restart_error(trans);
1469
1470 if (!trans->locked)
1471 panic("trans should be locked, unlocked by %pS\n",
1472 (void *) trans->last_unlock_ip);
1473
1474 BUG();
1475}
1476
1477noinline __cold
1478void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1479{
1480 prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
1481 trans->nr_updates, trans->fn, trans->journal_res.seq);
1482 printbuf_indent_add(buf, 2);
1483
1484 trans_for_each_update(trans, i) {
1485 struct bkey_s_c old = { &i->old_k, i->old_v };
1486
1487 prt_str(buf, "update: btree=");
1488 bch2_btree_id_to_text(buf, i->btree_id);
1489 prt_printf(buf, " cached=%u %pS\n",
1490 i->cached,
1491 (void *) i->ip_allocated);
1492
1493 prt_printf(buf, " old ");
1494 bch2_bkey_val_to_text(buf, trans->c, old);
1495 prt_newline(buf);
1496
1497 prt_printf(buf, " new ");
1498 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1499 prt_newline(buf);
1500 }
1501
1502 for (struct jset_entry *e = trans->journal_entries;
1503 e != btree_trans_journal_entries_top(trans);
1504 e = vstruct_next(e))
1505 bch2_journal_entry_to_text(buf, trans->c, e);
1506
1507 printbuf_indent_sub(buf, 2);
1508}
1509
1510noinline __cold
1511void bch2_dump_trans_updates(struct btree_trans *trans)
1512{
1513 struct printbuf buf = PRINTBUF;
1514
1515 bch2_trans_updates_to_text(&buf, trans);
1516 bch2_print_str(trans->c, buf.buf);
1517 printbuf_exit(&buf);
1518}
1519
1520static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1521{
1522 struct btree_path *path = trans->paths + path_idx;
1523
1524 prt_printf(out, "path: idx %3u ref %u:%u %c %c %c ",
1525 path_idx, path->ref, path->intent_ref,
1526 path->preserve ? 'P' : ' ',
1527 path->should_be_locked ? 'S' : ' ',
1528 path->cached ? 'C' : 'B');
1529 bch2_btree_id_level_to_text(out, path->btree_id, path->level);
1530 prt_str(out, " pos ");
1531 bch2_bpos_to_text(out, path->pos);
1532
1533 if (!path->cached && btree_node_locked(path, path->level)) {
1534 prt_char(out, ' ');
1535 struct btree *b = path_l(path)->b;
1536 bch2_bpos_to_text(out, b->data->min_key);
1537 prt_char(out, '-');
1538 bch2_bpos_to_text(out, b->key.k.p);
1539 }
1540
1541#ifdef TRACK_PATH_ALLOCATED
1542 prt_printf(out, " %pS", (void *) path->ip_allocated);
1543#endif
1544}
1545
1546static const char *btree_node_locked_str(enum btree_node_locked_type t)
1547{
1548 switch (t) {
1549 case BTREE_NODE_UNLOCKED:
1550 return "unlocked";
1551 case BTREE_NODE_READ_LOCKED:
1552 return "read";
1553 case BTREE_NODE_INTENT_LOCKED:
1554 return "intent";
1555 case BTREE_NODE_WRITE_LOCKED:
1556 return "write";
1557 default:
1558 return NULL;
1559 }
1560}
1561
1562void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1563{
1564 bch2_btree_path_to_text_short(out, trans, path_idx);
1565
1566 struct btree_path *path = trans->paths + path_idx;
1567
1568 prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1569 prt_newline(out);
1570
1571 printbuf_indent_add(out, 2);
1572 for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1573 prt_printf(out, "l=%u locks %s seq %u node ", l,
1574 btree_node_locked_str(btree_node_locked_type(path, l)),
1575 path->l[l].lock_seq);
1576
1577 int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1578 if (ret)
1579 prt_str(out, bch2_err_str(ret));
1580 else
1581 prt_printf(out, "%px", path->l[l].b);
1582 prt_newline(out);
1583 }
1584 printbuf_indent_sub(out, 2);
1585}
1586
1587static noinline __cold
1588void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1589 bool nosort)
1590{
1591 struct trans_for_each_path_inorder_iter iter;
1592
1593 if (!nosort)
1594 btree_trans_sort_paths(trans);
1595
1596 trans_for_each_path_idx_inorder(trans, iter) {
1597 bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1598 prt_newline(out);
1599 }
1600}
1601
1602noinline __cold
1603void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1604{
1605 __bch2_trans_paths_to_text(out, trans, false);
1606}
1607
1608static noinline __cold
1609void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1610{
1611 struct printbuf buf = PRINTBUF;
1612
1613 __bch2_trans_paths_to_text(&buf, trans, nosort);
1614 bch2_trans_updates_to_text(&buf, trans);
1615
1616 bch2_print_str(trans->c, buf.buf);
1617 printbuf_exit(&buf);
1618}
1619
1620noinline __cold
1621void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1622{
1623 __bch2_dump_trans_paths_updates(trans, false);
1624}
1625
1626noinline __cold
1627static void bch2_trans_update_max_paths(struct btree_trans *trans)
1628{
1629 struct btree_transaction_stats *s = btree_trans_stats(trans);
1630 struct printbuf buf = PRINTBUF;
1631 size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1632
1633 bch2_trans_paths_to_text(&buf, trans);
1634
1635 if (!buf.allocation_failure) {
1636 mutex_lock(&s->lock);
1637 if (nr > s->nr_max_paths) {
1638 s->nr_max_paths = nr;
1639 swap(s->max_paths_text, buf.buf);
1640 }
1641 mutex_unlock(&s->lock);
1642 }
1643
1644 printbuf_exit(&buf);
1645
1646 trans->nr_paths_max = nr;
1647}
1648
1649noinline __cold
1650int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1651{
1652 if (trace_trans_restart_too_many_iters_enabled()) {
1653 struct printbuf buf = PRINTBUF;
1654
1655 bch2_trans_paths_to_text(&buf, trans);
1656 trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1657 printbuf_exit(&buf);
1658 }
1659
1660 count_event(trans->c, trans_restart_too_many_iters);
1661
1662 return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1663}
1664
1665static noinline void btree_path_overflow(struct btree_trans *trans)
1666{
1667 bch2_dump_trans_paths_updates(trans);
1668 bch_err(trans->c, "trans path overflow");
1669}
1670
1671static noinline void btree_paths_realloc(struct btree_trans *trans)
1672{
1673 unsigned nr = trans->nr_paths * 2;
1674
1675 void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1676 sizeof(struct btree_trans_paths) +
1677 nr * sizeof(struct btree_path) +
1678 nr * sizeof(btree_path_idx_t) + 8 +
1679 nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1680
1681 unsigned long *paths_allocated = p;
1682 memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1683 p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1684
1685 p += sizeof(struct btree_trans_paths);
1686 struct btree_path *paths = p;
1687 *trans_paths_nr(paths) = nr;
1688 memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1689 p += nr * sizeof(struct btree_path);
1690
1691 btree_path_idx_t *sorted = p;
1692 memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1693 p += nr * sizeof(btree_path_idx_t) + 8;
1694
1695 struct btree_insert_entry *updates = p;
1696 memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1697
1698 unsigned long *old = trans->paths_allocated;
1699
1700 rcu_assign_pointer(trans->paths_allocated, paths_allocated);
1701 rcu_assign_pointer(trans->paths, paths);
1702 rcu_assign_pointer(trans->sorted, sorted);
1703 rcu_assign_pointer(trans->updates, updates);
1704
1705 trans->nr_paths = nr;
1706
1707 if (old != trans->_paths_allocated)
1708 kfree_rcu_mightsleep(old);
1709}
1710
1711static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1712 btree_path_idx_t pos)
1713{
1714 btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1715
1716 if (unlikely(idx == trans->nr_paths)) {
1717 if (trans->nr_paths == BTREE_ITER_MAX) {
1718 btree_path_overflow(trans);
1719 return 0;
1720 }
1721
1722 btree_paths_realloc(trans);
1723 }
1724
1725 /*
1726 * Do this before marking the new path as allocated, since it won't be
1727 * initialized yet:
1728 */
1729 if (unlikely(idx > trans->nr_paths_max))
1730 bch2_trans_update_max_paths(trans);
1731
1732 __set_bit(idx, trans->paths_allocated);
1733
1734 struct btree_path *path = &trans->paths[idx];
1735 path->ref = 0;
1736 path->intent_ref = 0;
1737 path->nodes_locked = 0;
1738
1739 btree_path_list_add(trans, pos, idx);
1740 trans->paths_sorted = false;
1741 return idx;
1742}
1743
1744btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1745 enum btree_id btree_id, struct bpos pos,
1746 unsigned locks_want, unsigned level,
1747 unsigned flags, unsigned long ip)
1748{
1749 struct btree_path *path;
1750 bool cached = flags & BTREE_ITER_cached;
1751 bool intent = flags & BTREE_ITER_intent;
1752 struct trans_for_each_path_inorder_iter iter;
1753 btree_path_idx_t path_pos = 0, path_idx;
1754
1755 bch2_trans_verify_not_unlocked_or_in_restart(trans);
1756 bch2_trans_verify_locks(trans);
1757
1758 btree_trans_sort_paths(trans);
1759
1760 trans_for_each_path_inorder(trans, path, iter) {
1761 if (__btree_path_cmp(path,
1762 btree_id,
1763 cached,
1764 pos,
1765 level) > 0)
1766 break;
1767
1768 path_pos = iter.path_idx;
1769 }
1770
1771 if (path_pos &&
1772 trans->paths[path_pos].cached == cached &&
1773 trans->paths[path_pos].btree_id == btree_id &&
1774 trans->paths[path_pos].level == level) {
1775 trace_btree_path_get(trans, trans->paths + path_pos, &pos);
1776
1777 __btree_path_get(trans, trans->paths + path_pos, intent);
1778 path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1779 path = trans->paths + path_idx;
1780 } else {
1781 path_idx = btree_path_alloc(trans, path_pos);
1782 path = trans->paths + path_idx;
1783
1784 __btree_path_get(trans, path, intent);
1785 path->pos = pos;
1786 path->btree_id = btree_id;
1787 path->cached = cached;
1788 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1789 path->should_be_locked = false;
1790 path->level = level;
1791 path->locks_want = locks_want;
1792 path->nodes_locked = 0;
1793 for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1794 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
1795#ifdef TRACK_PATH_ALLOCATED
1796 path->ip_allocated = ip;
1797#endif
1798 trans->paths_sorted = false;
1799
1800 trace_btree_path_alloc(trans, path);
1801 }
1802
1803 if (!(flags & BTREE_ITER_nopreserve))
1804 path->preserve = true;
1805
1806 if (path->intent_ref)
1807 locks_want = max(locks_want, level + 1);
1808
1809 /*
1810 * If the path has locks_want greater than requested, we don't downgrade
1811 * it here - on transaction restart because btree node split needs to
1812 * upgrade locks, we might be putting/getting the iterator again.
1813 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1814 * a successful transaction commit.
1815 */
1816
1817 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1818 if (locks_want > path->locks_want)
1819 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1820
1821 return path_idx;
1822}
1823
1824btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1825 enum btree_id btree_id,
1826 unsigned level,
1827 struct bpos pos)
1828{
1829 btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1830 BTREE_ITER_nopreserve|
1831 BTREE_ITER_intent, _RET_IP_);
1832 path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1833
1834 struct btree_path *path = trans->paths + path_idx;
1835 bch2_btree_path_downgrade(trans, path);
1836 __bch2_btree_path_unlock(trans, path);
1837 return path_idx;
1838}
1839
1840struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1841{
1842
1843 struct btree_path_level *l = path_l(path);
1844 struct bkey_packed *_k;
1845 struct bkey_s_c k;
1846
1847 if (unlikely(!l->b))
1848 return bkey_s_c_null;
1849
1850 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1851 EBUG_ON(!btree_node_locked(path, path->level));
1852
1853 if (!path->cached) {
1854 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1855 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1856
1857 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1858
1859 if (!k.k || !bpos_eq(path->pos, k.k->p))
1860 goto hole;
1861 } else {
1862 struct bkey_cached *ck = (void *) path->l[0].b;
1863 if (!ck)
1864 return bkey_s_c_null;
1865
1866 EBUG_ON(path->btree_id != ck->key.btree_id ||
1867 !bkey_eq(path->pos, ck->key.pos));
1868
1869 *u = ck->k->k;
1870 k = (struct bkey_s_c) { u, &ck->k->v };
1871 }
1872
1873 return k;
1874hole:
1875 bkey_init(u);
1876 u->p = path->pos;
1877 return (struct bkey_s_c) { u, NULL };
1878}
1879
1880void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
1881{
1882 struct btree_trans *trans = iter->trans;
1883
1884 if (!iter->path || trans->restarted)
1885 return;
1886
1887 struct btree_path *path = btree_iter_path(trans, iter);
1888 path->preserve = false;
1889 if (path->ref == 1)
1890 path->should_be_locked = false;
1891}
1892/* Btree iterators: */
1893
1894int __must_check
1895__bch2_btree_iter_traverse(struct btree_iter *iter)
1896{
1897 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1898}
1899
1900int __must_check
1901bch2_btree_iter_traverse(struct btree_iter *iter)
1902{
1903 struct btree_trans *trans = iter->trans;
1904 int ret;
1905
1906 bch2_trans_verify_not_unlocked_or_in_restart(trans);
1907
1908 iter->path = bch2_btree_path_set_pos(trans, iter->path,
1909 btree_iter_search_key(iter),
1910 iter->flags & BTREE_ITER_intent,
1911 btree_iter_ip_allocated(iter));
1912
1913 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1914 if (ret)
1915 return ret;
1916
1917 struct btree_path *path = btree_iter_path(trans, iter);
1918 if (btree_path_node(path, path->level))
1919 btree_path_set_should_be_locked(trans, path);
1920 return 0;
1921}
1922
1923/* Iterate across nodes (leaf and interior nodes) */
1924
1925struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1926{
1927 struct btree_trans *trans = iter->trans;
1928 struct btree *b = NULL;
1929 int ret;
1930
1931 EBUG_ON(trans->paths[iter->path].cached);
1932 bch2_btree_iter_verify(iter);
1933
1934 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1935 if (ret)
1936 goto err;
1937
1938 struct btree_path *path = btree_iter_path(trans, iter);
1939 b = btree_path_node(path, path->level);
1940 if (!b)
1941 goto out;
1942
1943 BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1944
1945 bkey_init(&iter->k);
1946 iter->k.p = iter->pos = b->key.k.p;
1947
1948 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1949 iter->flags & BTREE_ITER_intent,
1950 btree_iter_ip_allocated(iter));
1951 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
1952out:
1953 bch2_btree_iter_verify_entry_exit(iter);
1954 bch2_btree_iter_verify(iter);
1955
1956 return b;
1957err:
1958 b = ERR_PTR(ret);
1959 goto out;
1960}
1961
1962/* Only kept for -tools */
1963struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1964{
1965 struct btree *b;
1966
1967 while (b = bch2_btree_iter_peek_node(iter),
1968 bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1969 bch2_trans_begin(iter->trans);
1970
1971 return b;
1972}
1973
1974struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1975{
1976 struct btree_trans *trans = iter->trans;
1977 struct btree *b = NULL;
1978 int ret;
1979
1980 EBUG_ON(trans->paths[iter->path].cached);
1981 bch2_trans_verify_not_unlocked_or_in_restart(trans);
1982 bch2_btree_iter_verify(iter);
1983
1984 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1985 if (ret)
1986 goto err;
1987
1988
1989 struct btree_path *path = btree_iter_path(trans, iter);
1990
1991 /* already at end? */
1992 if (!btree_path_node(path, path->level))
1993 return NULL;
1994
1995 /* got to end? */
1996 if (!btree_path_node(path, path->level + 1)) {
1997 btree_path_set_level_up(trans, path);
1998 return NULL;
1999 }
2000
2001 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2002 __bch2_btree_path_unlock(trans, path);
2003 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
2004 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
2005 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2006 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
2007 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
2008 goto err;
2009 }
2010
2011 b = btree_path_node(path, path->level + 1);
2012
2013 if (bpos_eq(iter->pos, b->key.k.p)) {
2014 __btree_path_set_level_up(trans, path, path->level++);
2015 } else {
2016 if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
2017 btree_node_unlock(trans, path, path->level + 1);
2018
2019 /*
2020 * Haven't gotten to the end of the parent node: go back down to
2021 * the next child node
2022 */
2023 iter->path = bch2_btree_path_set_pos(trans, iter->path,
2024 bpos_successor(iter->pos),
2025 iter->flags & BTREE_ITER_intent,
2026 btree_iter_ip_allocated(iter));
2027
2028 path = btree_iter_path(trans, iter);
2029 btree_path_set_level_down(trans, path, iter->min_depth);
2030
2031 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2032 if (ret)
2033 goto err;
2034
2035 path = btree_iter_path(trans, iter);
2036 b = path->l[path->level].b;
2037 }
2038
2039 bkey_init(&iter->k);
2040 iter->k.p = iter->pos = b->key.k.p;
2041
2042 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2043 iter->flags & BTREE_ITER_intent,
2044 btree_iter_ip_allocated(iter));
2045 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2046 EBUG_ON(btree_iter_path(trans, iter)->uptodate);
2047out:
2048 bch2_btree_iter_verify_entry_exit(iter);
2049 bch2_btree_iter_verify(iter);
2050
2051 return b;
2052err:
2053 b = ERR_PTR(ret);
2054 goto out;
2055}
2056
2057/* Iterate across keys (in leaf nodes only) */
2058
2059inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2060{
2061 struct bpos pos = iter->k.p;
2062 bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2063 ? bpos_eq(pos, SPOS_MAX)
2064 : bkey_eq(pos, SPOS_MAX));
2065
2066 if (ret && !(iter->flags & BTREE_ITER_is_extents))
2067 pos = bkey_successor(iter, pos);
2068 bch2_btree_iter_set_pos(iter, pos);
2069 return ret;
2070}
2071
2072inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2073{
2074 struct bpos pos = bkey_start_pos(&iter->k);
2075 bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2076 ? bpos_eq(pos, POS_MIN)
2077 : bkey_eq(pos, POS_MIN));
2078
2079 if (ret && !(iter->flags & BTREE_ITER_is_extents))
2080 pos = bkey_predecessor(iter, pos);
2081 bch2_btree_iter_set_pos(iter, pos);
2082 return ret;
2083}
2084
2085static noinline
2086void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2087 struct bkey_s_c *k)
2088{
2089 struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2090
2091 trans_for_each_update(trans, i)
2092 if (!i->key_cache_already_flushed &&
2093 i->btree_id == iter->btree_id &&
2094 bpos_le(i->k->k.p, iter->pos) &&
2095 bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2096 iter->k = i->k->k;
2097 *k = bkey_i_to_s_c(i->k);
2098 }
2099}
2100
2101static noinline
2102void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2103 struct bkey_s_c *k)
2104{
2105 struct btree_path *path = btree_iter_path(trans, iter);
2106 struct bpos end = path_l(path)->b->key.k.p;
2107
2108 trans_for_each_update(trans, i)
2109 if (!i->key_cache_already_flushed &&
2110 i->btree_id == iter->btree_id &&
2111 bpos_ge(i->k->k.p, path->pos) &&
2112 bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2113 iter->k = i->k->k;
2114 *k = bkey_i_to_s_c(i->k);
2115 }
2116}
2117
2118static noinline
2119void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2120 struct bkey_s_c *k)
2121{
2122 trans_for_each_update(trans, i)
2123 if (!i->key_cache_already_flushed &&
2124 i->btree_id == iter->btree_id &&
2125 bpos_eq(i->k->k.p, iter->pos)) {
2126 iter->k = i->k->k;
2127 *k = bkey_i_to_s_c(i->k);
2128 }
2129}
2130
2131static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2132 struct btree_iter *iter,
2133 struct bpos end_pos)
2134{
2135 struct btree_path *path = btree_iter_path(trans, iter);
2136
2137 return bch2_journal_keys_peek_max(trans->c, iter->btree_id,
2138 path->level,
2139 path->pos,
2140 end_pos,
2141 &iter->journal_idx);
2142}
2143
2144static noinline
2145struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2146 struct btree_iter *iter)
2147{
2148 struct btree_path *path = btree_iter_path(trans, iter);
2149 struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2150
2151 if (k) {
2152 iter->k = k->k;
2153 return bkey_i_to_s_c(k);
2154 } else {
2155 return bkey_s_c_null;
2156 }
2157}
2158
2159static noinline
2160void btree_trans_peek_journal(struct btree_trans *trans,
2161 struct btree_iter *iter,
2162 struct bkey_s_c *k)
2163{
2164 struct btree_path *path = btree_iter_path(trans, iter);
2165 struct bkey_i *next_journal =
2166 bch2_btree_journal_peek(trans, iter,
2167 k->k ? k->k->p : path_l(path)->b->key.k.p);
2168 if (next_journal) {
2169 iter->k = next_journal->k;
2170 *k = bkey_i_to_s_c(next_journal);
2171 }
2172}
2173
2174static struct bkey_i *bch2_btree_journal_peek_prev(struct btree_trans *trans,
2175 struct btree_iter *iter,
2176 struct bpos end_pos)
2177{
2178 struct btree_path *path = btree_iter_path(trans, iter);
2179
2180 return bch2_journal_keys_peek_prev_min(trans->c, iter->btree_id,
2181 path->level,
2182 path->pos,
2183 end_pos,
2184 &iter->journal_idx);
2185}
2186
2187static noinline
2188void btree_trans_peek_prev_journal(struct btree_trans *trans,
2189 struct btree_iter *iter,
2190 struct bkey_s_c *k)
2191{
2192 struct btree_path *path = btree_iter_path(trans, iter);
2193 struct bkey_i *next_journal =
2194 bch2_btree_journal_peek_prev(trans, iter,
2195 k->k ? k->k->p : path_l(path)->b->key.k.p);
2196
2197 if (next_journal) {
2198 iter->k = next_journal->k;
2199 *k = bkey_i_to_s_c(next_journal);
2200 }
2201}
2202
2203/*
2204 * Checks btree key cache for key at iter->pos and returns it if present, or
2205 * bkey_s_c_null:
2206 */
2207static noinline
2208struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2209{
2210 struct btree_trans *trans = iter->trans;
2211 struct bch_fs *c = trans->c;
2212 struct bkey u;
2213 struct bkey_s_c k;
2214 int ret;
2215
2216 bch2_trans_verify_not_unlocked_or_in_restart(trans);
2217
2218 if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2219 bpos_eq(iter->pos, pos))
2220 return bkey_s_c_null;
2221
2222 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2223 return bkey_s_c_null;
2224
2225 if (!iter->key_cache_path)
2226 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2227 iter->flags & BTREE_ITER_intent, 0,
2228 iter->flags|BTREE_ITER_cached|
2229 BTREE_ITER_cached_nofill,
2230 _THIS_IP_);
2231
2232 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2233 iter->flags & BTREE_ITER_intent,
2234 btree_iter_ip_allocated(iter));
2235
2236 ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
2237 iter->flags|BTREE_ITER_cached) ?:
2238 bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2239 if (unlikely(ret))
2240 return bkey_s_c_err(ret);
2241
2242 k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2243 if (!k.k)
2244 return k;
2245
2246 if ((iter->flags & BTREE_ITER_all_snapshots) &&
2247 !bpos_eq(pos, k.k->p))
2248 return bkey_s_c_null;
2249
2250 iter->k = u;
2251 k.k = &iter->k;
2252 btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
2253 return k;
2254}
2255
2256static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2257{
2258 struct btree_trans *trans = iter->trans;
2259 struct bkey_s_c k, k2;
2260 int ret;
2261
2262 EBUG_ON(btree_iter_path(trans, iter)->cached);
2263 bch2_btree_iter_verify(iter);
2264
2265 while (1) {
2266 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2267 iter->flags & BTREE_ITER_intent,
2268 btree_iter_ip_allocated(iter));
2269
2270 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2271 if (unlikely(ret)) {
2272 /* ensure that iter->k is consistent with iter->pos: */
2273 bch2_btree_iter_set_pos(iter, iter->pos);
2274 k = bkey_s_c_err(ret);
2275 break;
2276 }
2277
2278 struct btree_path *path = btree_iter_path(trans, iter);
2279 struct btree_path_level *l = path_l(path);
2280
2281 if (unlikely(!l->b)) {
2282 /* No btree nodes at requested level: */
2283 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2284 k = bkey_s_c_null;
2285 break;
2286 }
2287
2288 btree_path_set_should_be_locked(trans, path);
2289
2290 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2291
2292 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2293 k.k &&
2294 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2295 k = k2;
2296 if (bkey_err(k)) {
2297 bch2_btree_iter_set_pos(iter, iter->pos);
2298 break;
2299 }
2300 }
2301
2302 if (unlikely(iter->flags & BTREE_ITER_with_journal))
2303 btree_trans_peek_journal(trans, iter, &k);
2304
2305 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2306 trans->nr_updates))
2307 bch2_btree_trans_peek_updates(trans, iter, &k);
2308
2309 if (k.k && bkey_deleted(k.k)) {
2310 /*
2311 * If we've got a whiteout, and it's after the search
2312 * key, advance the search key to the whiteout instead
2313 * of just after the whiteout - it might be a btree
2314 * whiteout, with a real key at the same position, since
2315 * in the btree deleted keys sort before non deleted.
2316 */
2317 search_key = !bpos_eq(search_key, k.k->p)
2318 ? k.k->p
2319 : bpos_successor(k.k->p);
2320 continue;
2321 }
2322
2323 if (likely(k.k)) {
2324 break;
2325 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2326 /* Advance to next leaf node: */
2327 search_key = bpos_successor(l->b->key.k.p);
2328 } else {
2329 /* End of btree: */
2330 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2331 k = bkey_s_c_null;
2332 break;
2333 }
2334 }
2335
2336 bch2_btree_iter_verify(iter);
2337 return k;
2338}
2339
2340/**
2341 * bch2_btree_iter_peek_max() - returns first key greater than or equal to
2342 * iterator's current position
2343 * @iter: iterator to peek from
2344 * @end: search limit: returns keys less than or equal to @end
2345 *
2346 * Returns: key if found, or an error extractable with bkey_err().
2347 */
2348struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end)
2349{
2350 struct btree_trans *trans = iter->trans;
2351 struct bpos search_key = btree_iter_search_key(iter);
2352 struct bkey_s_c k;
2353 struct bpos iter_pos = iter->pos;
2354 int ret;
2355
2356 bch2_trans_verify_not_unlocked_or_in_restart(trans);
2357 bch2_btree_iter_verify_entry_exit(iter);
2358 EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2359
2360 if (iter->update_path) {
2361 bch2_path_put_nokeep(trans, iter->update_path,
2362 iter->flags & BTREE_ITER_intent);
2363 iter->update_path = 0;
2364 }
2365
2366 while (1) {
2367 k = __bch2_btree_iter_peek(iter, search_key);
2368 if (unlikely(!k.k))
2369 goto end;
2370 if (unlikely(bkey_err(k)))
2371 goto out_no_locked;
2372
2373 if (iter->flags & BTREE_ITER_filter_snapshots) {
2374 /*
2375 * We need to check against @end before FILTER_SNAPSHOTS because
2376 * if we get to a different inode that requested we might be
2377 * seeing keys for a different snapshot tree that will all be
2378 * filtered out.
2379 *
2380 * But we can't do the full check here, because bkey_start_pos()
2381 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2382 * that's what we check against in extents mode:
2383 */
2384 if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2385 ? bkey_gt(k.k->p, end)
2386 : k.k->p.inode > end.inode))
2387 goto end;
2388
2389 if (iter->update_path &&
2390 !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2391 bch2_path_put_nokeep(trans, iter->update_path,
2392 iter->flags & BTREE_ITER_intent);
2393 iter->update_path = 0;
2394 }
2395
2396 if ((iter->flags & BTREE_ITER_intent) &&
2397 !(iter->flags & BTREE_ITER_is_extents) &&
2398 !iter->update_path) {
2399 struct bpos pos = k.k->p;
2400
2401 if (pos.snapshot < iter->snapshot) {
2402 search_key = bpos_successor(k.k->p);
2403 continue;
2404 }
2405
2406 pos.snapshot = iter->snapshot;
2407
2408 /*
2409 * advance, same as on exit for iter->path, but only up
2410 * to snapshot
2411 */
2412 __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2413 iter->update_path = iter->path;
2414
2415 iter->update_path = bch2_btree_path_set_pos(trans,
2416 iter->update_path, pos,
2417 iter->flags & BTREE_ITER_intent,
2418 _THIS_IP_);
2419 ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2420 if (unlikely(ret)) {
2421 k = bkey_s_c_err(ret);
2422 goto out_no_locked;
2423 }
2424 }
2425
2426 /*
2427 * We can never have a key in a leaf node at POS_MAX, so
2428 * we don't have to check these successor() calls:
2429 */
2430 if (!bch2_snapshot_is_ancestor(trans->c,
2431 iter->snapshot,
2432 k.k->p.snapshot)) {
2433 search_key = bpos_successor(k.k->p);
2434 continue;
2435 }
2436
2437 if (bkey_whiteout(k.k) &&
2438 !(iter->flags & BTREE_ITER_key_cache_fill)) {
2439 search_key = bkey_successor(iter, k.k->p);
2440 continue;
2441 }
2442 }
2443
2444 /*
2445 * iter->pos should be mononotically increasing, and always be
2446 * equal to the key we just returned - except extents can
2447 * straddle iter->pos:
2448 */
2449 if (!(iter->flags & BTREE_ITER_is_extents))
2450 iter_pos = k.k->p;
2451 else
2452 iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2453
2454 if (unlikely(iter->flags & BTREE_ITER_all_snapshots ? bpos_gt(iter_pos, end) :
2455 iter->flags & BTREE_ITER_is_extents ? bkey_ge(iter_pos, end) :
2456 bkey_gt(iter_pos, end)))
2457 goto end;
2458
2459 break;
2460 }
2461
2462 iter->pos = iter_pos;
2463
2464 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2465 iter->flags & BTREE_ITER_intent,
2466 btree_iter_ip_allocated(iter));
2467
2468 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2469out_no_locked:
2470 if (iter->update_path) {
2471 ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2472 if (unlikely(ret))
2473 k = bkey_s_c_err(ret);
2474 else
2475 btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
2476 }
2477
2478 if (!(iter->flags & BTREE_ITER_all_snapshots))
2479 iter->pos.snapshot = iter->snapshot;
2480
2481 ret = bch2_btree_iter_verify_ret(iter, k);
2482 if (unlikely(ret)) {
2483 bch2_btree_iter_set_pos(iter, iter->pos);
2484 k = bkey_s_c_err(ret);
2485 }
2486
2487 bch2_btree_iter_verify_entry_exit(iter);
2488
2489 return k;
2490end:
2491 bch2_btree_iter_set_pos(iter, end);
2492 k = bkey_s_c_null;
2493 goto out_no_locked;
2494}
2495
2496/**
2497 * bch2_btree_iter_next() - returns first key greater than iterator's current
2498 * position
2499 * @iter: iterator to peek from
2500 *
2501 * Returns: key if found, or an error extractable with bkey_err().
2502 */
2503struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2504{
2505 if (!bch2_btree_iter_advance(iter))
2506 return bkey_s_c_null;
2507
2508 return bch2_btree_iter_peek(iter);
2509}
2510
2511static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key)
2512{
2513 struct btree_trans *trans = iter->trans;
2514 struct bkey_s_c k, k2;
2515
2516 bch2_btree_iter_verify(iter);
2517
2518 while (1) {
2519 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2520 iter->flags & BTREE_ITER_intent,
2521 btree_iter_ip_allocated(iter));
2522
2523 int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2524 if (unlikely(ret)) {
2525 /* ensure that iter->k is consistent with iter->pos: */
2526 bch2_btree_iter_set_pos(iter, iter->pos);
2527 k = bkey_s_c_err(ret);
2528 break;
2529 }
2530
2531 struct btree_path *path = btree_iter_path(trans, iter);
2532 struct btree_path_level *l = path_l(path);
2533
2534 if (unlikely(!l->b)) {
2535 /* No btree nodes at requested level: */
2536 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2537 k = bkey_s_c_null;
2538 break;
2539 }
2540
2541 btree_path_set_should_be_locked(trans, path);
2542
2543 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2544 if (!k.k || bpos_gt(k.k->p, search_key)) {
2545 k = btree_path_level_prev(trans, path, l, &iter->k);
2546
2547 BUG_ON(k.k && bpos_gt(k.k->p, search_key));
2548 }
2549
2550 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2551 k.k &&
2552 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2553 k = k2;
2554 if (bkey_err(k2)) {
2555 bch2_btree_iter_set_pos(iter, iter->pos);
2556 break;
2557 }
2558 }
2559
2560 if (unlikely(iter->flags & BTREE_ITER_with_journal))
2561 btree_trans_peek_prev_journal(trans, iter, &k);
2562
2563 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2564 trans->nr_updates))
2565 bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2566
2567 if (likely(k.k && !bkey_deleted(k.k))) {
2568 break;
2569 } else if (k.k) {
2570 search_key = bpos_predecessor(k.k->p);
2571 } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2572 /* Advance to previous leaf node: */
2573 search_key = bpos_predecessor(path->l[0].b->data->min_key);
2574 } else {
2575 /* Start of btree: */
2576 bch2_btree_iter_set_pos(iter, POS_MIN);
2577 k = bkey_s_c_null;
2578 break;
2579 }
2580 }
2581
2582 bch2_btree_iter_verify(iter);
2583 return k;
2584}
2585
2586/**
2587 * bch2_btree_iter_peek_prev_min() - returns first key less than or equal to
2588 * iterator's current position
2589 * @iter: iterator to peek from
2590 * @end: search limit: returns keys greater than or equal to @end
2591 *
2592 * Returns: key if found, or an error extractable with bkey_err().
2593 */
2594struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end)
2595{
2596 if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) &&
2597 !bkey_eq(iter->pos, POS_MAX)) {
2598 /*
2599 * bkey_start_pos(), for extents, is not monotonically
2600 * increasing until after filtering for snapshots:
2601 *
2602 * Thus, for extents we need to search forward until we find a
2603 * real visible extents - easiest to just use peek_slot() (which
2604 * internally uses peek() for extents)
2605 */
2606 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
2607 if (bkey_err(k))
2608 return k;
2609
2610 if (!bkey_deleted(k.k) &&
2611 (!(iter->flags & BTREE_ITER_is_extents) ||
2612 bkey_lt(bkey_start_pos(k.k), iter->pos)))
2613 return k;
2614 }
2615
2616 struct btree_trans *trans = iter->trans;
2617 struct bpos search_key = iter->pos;
2618 struct bkey_s_c k;
2619 btree_path_idx_t saved_path = 0;
2620
2621 bch2_trans_verify_not_unlocked_or_in_restart(trans);
2622 bch2_btree_iter_verify_entry_exit(iter);
2623 EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bpos_eq(end, POS_MIN));
2624
2625 while (1) {
2626 k = __bch2_btree_iter_peek_prev(iter, search_key);
2627 if (unlikely(!k.k))
2628 goto end;
2629 if (unlikely(bkey_err(k)))
2630 goto out_no_locked;
2631
2632 if (iter->flags & BTREE_ITER_filter_snapshots) {
2633 struct btree_path *s = saved_path ? trans->paths + saved_path : NULL;
2634 if (s && bpos_lt(k.k->p, SPOS(s->pos.inode, s->pos.offset, iter->snapshot))) {
2635 /*
2636 * If we have a saved candidate, and we're past
2637 * the last possible snapshot overwrite, return
2638 * it:
2639 */
2640 bch2_path_put_nokeep(trans, iter->path,
2641 iter->flags & BTREE_ITER_intent);
2642 iter->path = saved_path;
2643 saved_path = 0;
2644 k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k);
2645 break;
2646 }
2647
2648 /*
2649 * We need to check against @end before FILTER_SNAPSHOTS because
2650 * if we get to a different inode that requested we might be
2651 * seeing keys for a different snapshot tree that will all be
2652 * filtered out.
2653 */
2654 if (unlikely(bkey_lt(k.k->p, end)))
2655 goto end;
2656
2657 if (!bch2_snapshot_is_ancestor(trans->c, iter->snapshot, k.k->p.snapshot)) {
2658 search_key = bpos_predecessor(k.k->p);
2659 continue;
2660 }
2661
2662 if (k.k->p.snapshot != iter->snapshot) {
2663 /*
2664 * Have a key visible in iter->snapshot, but
2665 * might have overwrites: - save it and keep
2666 * searching. Unless it's a whiteout - then drop
2667 * our previous saved candidate:
2668 */
2669 if (saved_path) {
2670 bch2_path_put_nokeep(trans, saved_path,
2671 iter->flags & BTREE_ITER_intent);
2672 saved_path = 0;
2673 }
2674
2675 if (!bkey_whiteout(k.k)) {
2676 saved_path = btree_path_clone(trans, iter->path,
2677 iter->flags & BTREE_ITER_intent,
2678 _THIS_IP_);
2679 trace_btree_path_save_pos(trans,
2680 trans->paths + iter->path,
2681 trans->paths + saved_path);
2682 }
2683
2684 search_key = bpos_predecessor(k.k->p);
2685 continue;
2686 }
2687
2688 if (bkey_whiteout(k.k)) {
2689 search_key = bkey_predecessor(iter, k.k->p);
2690 search_key.snapshot = U32_MAX;
2691 continue;
2692 }
2693 }
2694
2695 EBUG_ON(iter->flags & BTREE_ITER_all_snapshots ? bpos_gt(k.k->p, iter->pos) :
2696 iter->flags & BTREE_ITER_is_extents ? bkey_ge(bkey_start_pos(k.k), iter->pos) :
2697 bkey_gt(k.k->p, iter->pos));
2698
2699 if (unlikely(iter->flags & BTREE_ITER_all_snapshots ? bpos_lt(k.k->p, end) :
2700 iter->flags & BTREE_ITER_is_extents ? bkey_le(k.k->p, end) :
2701 bkey_lt(k.k->p, end)))
2702 goto end;
2703
2704 break;
2705 }
2706
2707 /* Extents can straddle iter->pos: */
2708 iter->pos = bpos_min(iter->pos, k.k->p);;
2709
2710 if (iter->flags & BTREE_ITER_filter_snapshots)
2711 iter->pos.snapshot = iter->snapshot;
2712out_no_locked:
2713 if (saved_path)
2714 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
2715
2716 bch2_btree_iter_verify_entry_exit(iter);
2717 bch2_btree_iter_verify(iter);
2718 return k;
2719end:
2720 bch2_btree_iter_set_pos(iter, end);
2721 k = bkey_s_c_null;
2722 goto out_no_locked;
2723}
2724
2725/**
2726 * bch2_btree_iter_prev() - returns first key less than iterator's current
2727 * position
2728 * @iter: iterator to peek from
2729 *
2730 * Returns: key if found, or an error extractable with bkey_err().
2731 */
2732struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2733{
2734 if (!bch2_btree_iter_rewind(iter))
2735 return bkey_s_c_null;
2736
2737 return bch2_btree_iter_peek_prev(iter);
2738}
2739
2740struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2741{
2742 struct btree_trans *trans = iter->trans;
2743 struct bpos search_key;
2744 struct bkey_s_c k;
2745 int ret;
2746
2747 bch2_trans_verify_not_unlocked_or_in_restart(trans);
2748 bch2_btree_iter_verify(iter);
2749 bch2_btree_iter_verify_entry_exit(iter);
2750 EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2751
2752 /* extents can't span inode numbers: */
2753 if ((iter->flags & BTREE_ITER_is_extents) &&
2754 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2755 if (iter->pos.inode == KEY_INODE_MAX)
2756 return bkey_s_c_null;
2757
2758 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2759 }
2760
2761 search_key = btree_iter_search_key(iter);
2762 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2763 iter->flags & BTREE_ITER_intent,
2764 btree_iter_ip_allocated(iter));
2765
2766 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2767 if (unlikely(ret)) {
2768 k = bkey_s_c_err(ret);
2769 goto out_no_locked;
2770 }
2771
2772 struct btree_path *path = btree_iter_path(trans, iter);
2773 if (unlikely(!btree_path_node(path, path->level)))
2774 return bkey_s_c_null;
2775
2776 if ((iter->flags & BTREE_ITER_cached) ||
2777 !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2778 k = bkey_s_c_null;
2779
2780 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2781 trans->nr_updates)) {
2782 bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2783 if (k.k)
2784 goto out;
2785 }
2786
2787 if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2788 (k = btree_trans_peek_slot_journal(trans, iter)).k)
2789 goto out;
2790
2791 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2792 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2793 if (!bkey_err(k))
2794 iter->k = *k.k;
2795 /* We're not returning a key from iter->path: */
2796 goto out_no_locked;
2797 }
2798
2799 k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2800 if (unlikely(!k.k))
2801 goto out_no_locked;
2802
2803 if (unlikely(k.k->type == KEY_TYPE_whiteout &&
2804 (iter->flags & BTREE_ITER_filter_snapshots) &&
2805 !(iter->flags & BTREE_ITER_key_cache_fill)))
2806 iter->k.type = KEY_TYPE_deleted;
2807 } else {
2808 struct bpos next;
2809 struct bpos end = iter->pos;
2810
2811 if (iter->flags & BTREE_ITER_is_extents)
2812 end.offset = U64_MAX;
2813
2814 EBUG_ON(btree_iter_path(trans, iter)->level);
2815
2816 if (iter->flags & BTREE_ITER_intent) {
2817 struct btree_iter iter2;
2818
2819 bch2_trans_copy_iter(&iter2, iter);
2820 k = bch2_btree_iter_peek_max(&iter2, end);
2821
2822 if (k.k && !bkey_err(k)) {
2823 swap(iter->key_cache_path, iter2.key_cache_path);
2824 iter->k = iter2.k;
2825 k.k = &iter->k;
2826 }
2827 bch2_trans_iter_exit(trans, &iter2);
2828 } else {
2829 struct bpos pos = iter->pos;
2830
2831 k = bch2_btree_iter_peek_max(iter, end);
2832 if (unlikely(bkey_err(k)))
2833 bch2_btree_iter_set_pos(iter, pos);
2834 else
2835 iter->pos = pos;
2836 }
2837
2838 if (unlikely(bkey_err(k)))
2839 goto out_no_locked;
2840
2841 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2842
2843 if (bkey_lt(iter->pos, next)) {
2844 bkey_init(&iter->k);
2845 iter->k.p = iter->pos;
2846
2847 if (iter->flags & BTREE_ITER_is_extents) {
2848 bch2_key_resize(&iter->k,
2849 min_t(u64, KEY_SIZE_MAX,
2850 (next.inode == iter->pos.inode
2851 ? next.offset
2852 : KEY_OFFSET_MAX) -
2853 iter->pos.offset));
2854 EBUG_ON(!iter->k.size);
2855 }
2856
2857 k = (struct bkey_s_c) { &iter->k, NULL };
2858 }
2859 }
2860out:
2861 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2862out_no_locked:
2863 bch2_btree_iter_verify_entry_exit(iter);
2864 bch2_btree_iter_verify(iter);
2865 ret = bch2_btree_iter_verify_ret(iter, k);
2866 if (unlikely(ret))
2867 return bkey_s_c_err(ret);
2868
2869 return k;
2870}
2871
2872struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2873{
2874 if (!bch2_btree_iter_advance(iter))
2875 return bkey_s_c_null;
2876
2877 return bch2_btree_iter_peek_slot(iter);
2878}
2879
2880struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2881{
2882 if (!bch2_btree_iter_rewind(iter))
2883 return bkey_s_c_null;
2884
2885 return bch2_btree_iter_peek_slot(iter);
2886}
2887
2888/* Obsolete, but still used by rust wrapper in -tools */
2889struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2890{
2891 struct bkey_s_c k;
2892
2893 while (btree_trans_too_many_iters(iter->trans) ||
2894 (k = bch2_btree_iter_peek_type(iter, iter->flags),
2895 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2896 bch2_trans_begin(iter->trans);
2897
2898 return k;
2899}
2900
2901/* new transactional stuff: */
2902
2903#ifdef CONFIG_BCACHEFS_DEBUG
2904static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2905{
2906 struct btree_path *path;
2907 unsigned i;
2908
2909 BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2910
2911 trans_for_each_path(trans, path, i) {
2912 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2913 BUG_ON(trans->sorted[path->sorted_idx] != i);
2914 }
2915
2916 for (i = 0; i < trans->nr_sorted; i++) {
2917 unsigned idx = trans->sorted[i];
2918
2919 BUG_ON(!test_bit(idx, trans->paths_allocated));
2920 BUG_ON(trans->paths[idx].sorted_idx != i);
2921 }
2922}
2923
2924static void btree_trans_verify_sorted(struct btree_trans *trans)
2925{
2926 struct btree_path *path, *prev = NULL;
2927 struct trans_for_each_path_inorder_iter iter;
2928
2929 if (!bch2_debug_check_iterators)
2930 return;
2931
2932 trans_for_each_path_inorder(trans, path, iter) {
2933 if (prev && btree_path_cmp(prev, path) > 0) {
2934 __bch2_dump_trans_paths_updates(trans, true);
2935 panic("trans paths out of order!\n");
2936 }
2937 prev = path;
2938 }
2939}
2940#else
2941static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2942static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2943#endif
2944
2945void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2946{
2947 int i, l = 0, r = trans->nr_sorted, inc = 1;
2948 bool swapped;
2949
2950 btree_trans_verify_sorted_refs(trans);
2951
2952 if (trans->paths_sorted)
2953 goto out;
2954
2955 /*
2956 * Cocktail shaker sort: this is efficient because iterators will be
2957 * mostly sorted.
2958 */
2959 do {
2960 swapped = false;
2961
2962 for (i = inc > 0 ? l : r - 2;
2963 i + 1 < r && i >= l;
2964 i += inc) {
2965 if (btree_path_cmp(trans->paths + trans->sorted[i],
2966 trans->paths + trans->sorted[i + 1]) > 0) {
2967 swap(trans->sorted[i], trans->sorted[i + 1]);
2968 trans->paths[trans->sorted[i]].sorted_idx = i;
2969 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2970 swapped = true;
2971 }
2972 }
2973
2974 if (inc > 0)
2975 --r;
2976 else
2977 l++;
2978 inc = -inc;
2979 } while (swapped);
2980
2981 trans->paths_sorted = true;
2982out:
2983 btree_trans_verify_sorted(trans);
2984}
2985
2986static inline void btree_path_list_remove(struct btree_trans *trans,
2987 struct btree_path *path)
2988{
2989 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2990#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2991 trans->nr_sorted--;
2992 memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2993 trans->sorted + path->sorted_idx + 1,
2994 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2995 sizeof(u64) / sizeof(btree_path_idx_t)));
2996#else
2997 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2998#endif
2999 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3000 trans->paths[trans->sorted[i]].sorted_idx = i;
3001}
3002
3003static inline void btree_path_list_add(struct btree_trans *trans,
3004 btree_path_idx_t pos,
3005 btree_path_idx_t path_idx)
3006{
3007 struct btree_path *path = trans->paths + path_idx;
3008
3009 path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
3010
3011#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3012 memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
3013 trans->sorted + path->sorted_idx,
3014 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
3015 sizeof(u64) / sizeof(btree_path_idx_t)));
3016 trans->nr_sorted++;
3017 trans->sorted[path->sorted_idx] = path_idx;
3018#else
3019 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
3020#endif
3021
3022 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3023 trans->paths[trans->sorted[i]].sorted_idx = i;
3024
3025 btree_trans_verify_sorted_refs(trans);
3026}
3027
3028void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3029{
3030 if (iter->update_path)
3031 bch2_path_put_nokeep(trans, iter->update_path,
3032 iter->flags & BTREE_ITER_intent);
3033 if (iter->path)
3034 bch2_path_put(trans, iter->path,
3035 iter->flags & BTREE_ITER_intent);
3036 if (iter->key_cache_path)
3037 bch2_path_put(trans, iter->key_cache_path,
3038 iter->flags & BTREE_ITER_intent);
3039 iter->path = 0;
3040 iter->update_path = 0;
3041 iter->key_cache_path = 0;
3042 iter->trans = NULL;
3043}
3044
3045void bch2_trans_iter_init_outlined(struct btree_trans *trans,
3046 struct btree_iter *iter,
3047 enum btree_id btree_id, struct bpos pos,
3048 unsigned flags)
3049{
3050 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
3051 bch2_btree_iter_flags(trans, btree_id, 0, flags),
3052 _RET_IP_);
3053}
3054
3055void bch2_trans_node_iter_init(struct btree_trans *trans,
3056 struct btree_iter *iter,
3057 enum btree_id btree_id,
3058 struct bpos pos,
3059 unsigned locks_want,
3060 unsigned depth,
3061 unsigned flags)
3062{
3063 flags |= BTREE_ITER_not_extents;
3064 flags |= BTREE_ITER_snapshot_field;
3065 flags |= BTREE_ITER_all_snapshots;
3066
3067 if (!depth && btree_id_cached(trans->c, btree_id))
3068 flags |= BTREE_ITER_with_key_cache;
3069
3070 bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
3071 bch2_btree_iter_flags(trans, btree_id, depth, flags),
3072 _RET_IP_);
3073
3074 iter->min_depth = depth;
3075
3076 struct btree_path *path = btree_iter_path(trans, iter);
3077 BUG_ON(path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
3078 BUG_ON(path->level != depth);
3079 BUG_ON(iter->min_depth != depth);
3080}
3081
3082void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
3083{
3084 struct btree_trans *trans = src->trans;
3085
3086 *dst = *src;
3087#ifdef TRACK_PATH_ALLOCATED
3088 dst->ip_allocated = _RET_IP_;
3089#endif
3090 if (src->path)
3091 __btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
3092 if (src->update_path)
3093 __btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
3094 dst->key_cache_path = 0;
3095}
3096
3097void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
3098{
3099 struct bch_fs *c = trans->c;
3100 unsigned new_top = trans->mem_top + size;
3101 unsigned old_bytes = trans->mem_bytes;
3102 unsigned new_bytes = roundup_pow_of_two(new_top);
3103 int ret;
3104 void *new_mem;
3105 void *p;
3106
3107 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
3108
3109 struct btree_transaction_stats *s = btree_trans_stats(trans);
3110 s->max_mem = max(s->max_mem, new_bytes);
3111
3112 if (trans->used_mempool) {
3113 if (trans->mem_bytes >= new_bytes)
3114 goto out_change_top;
3115
3116 /* No more space from mempool item, need malloc new one */
3117 new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3118 if (unlikely(!new_mem)) {
3119 bch2_trans_unlock(trans);
3120
3121 new_mem = kmalloc(new_bytes, GFP_KERNEL);
3122 if (!new_mem)
3123 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3124
3125 ret = bch2_trans_relock(trans);
3126 if (ret) {
3127 kfree(new_mem);
3128 return ERR_PTR(ret);
3129 }
3130 }
3131 memcpy(new_mem, trans->mem, trans->mem_top);
3132 trans->used_mempool = false;
3133 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3134 goto out_new_mem;
3135 }
3136
3137 new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3138 if (unlikely(!new_mem)) {
3139 bch2_trans_unlock(trans);
3140
3141 new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
3142 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3143 new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3144 new_bytes = BTREE_TRANS_MEM_MAX;
3145 memcpy(new_mem, trans->mem, trans->mem_top);
3146 trans->used_mempool = true;
3147 kfree(trans->mem);
3148 }
3149
3150 if (!new_mem)
3151 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3152
3153 trans->mem = new_mem;
3154 trans->mem_bytes = new_bytes;
3155
3156 ret = bch2_trans_relock(trans);
3157 if (ret)
3158 return ERR_PTR(ret);
3159 }
3160out_new_mem:
3161 trans->mem = new_mem;
3162 trans->mem_bytes = new_bytes;
3163
3164 if (old_bytes) {
3165 trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
3166 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
3167 }
3168out_change_top:
3169 p = trans->mem + trans->mem_top;
3170 trans->mem_top += size;
3171 memset(p, 0, size);
3172 return p;
3173}
3174
3175static inline void check_srcu_held_too_long(struct btree_trans *trans)
3176{
3177 WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
3178 "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
3179 (jiffies - trans->srcu_lock_time) / HZ);
3180}
3181
3182void bch2_trans_srcu_unlock(struct btree_trans *trans)
3183{
3184 if (trans->srcu_held) {
3185 struct bch_fs *c = trans->c;
3186 struct btree_path *path;
3187 unsigned i;
3188
3189 trans_for_each_path(trans, path, i)
3190 if (path->cached && !btree_node_locked(path, 0))
3191 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3192
3193 check_srcu_held_too_long(trans);
3194 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3195 trans->srcu_held = false;
3196 }
3197}
3198
3199static void bch2_trans_srcu_lock(struct btree_trans *trans)
3200{
3201 if (!trans->srcu_held) {
3202 trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3203 trans->srcu_lock_time = jiffies;
3204 trans->srcu_held = true;
3205 }
3206}
3207
3208/**
3209 * bch2_trans_begin() - reset a transaction after a interrupted attempt
3210 * @trans: transaction to reset
3211 *
3212 * Returns: current restart counter, to be used with trans_was_restarted()
3213 *
3214 * While iterating over nodes or updating nodes a attempt to lock a btree node
3215 * may return BCH_ERR_transaction_restart when the trylock fails. When this
3216 * occurs bch2_trans_begin() should be called and the transaction retried.
3217 */
3218u32 bch2_trans_begin(struct btree_trans *trans)
3219{
3220 struct btree_path *path;
3221 unsigned i;
3222 u64 now;
3223
3224 bch2_trans_reset_updates(trans);
3225
3226 trans->restart_count++;
3227 trans->mem_top = 0;
3228 trans->journal_entries = NULL;
3229
3230 trans_for_each_path(trans, path, i) {
3231 path->should_be_locked = false;
3232
3233 /*
3234 * If the transaction wasn't restarted, we're presuming to be
3235 * doing something new: dont keep iterators excpt the ones that
3236 * are in use - except for the subvolumes btree:
3237 */
3238 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3239 path->preserve = false;
3240
3241 /*
3242 * XXX: we probably shouldn't be doing this if the transaction
3243 * was restarted, but currently we still overflow transaction
3244 * iterators if we do that
3245 */
3246 if (!path->ref && !path->preserve)
3247 __bch2_path_free(trans, i);
3248 else
3249 path->preserve = false;
3250 }
3251
3252 now = local_clock();
3253
3254 if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3255 time_after64(now, trans->last_begin_time + 10))
3256 __bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3257 trans->last_begin_time, now);
3258
3259 if (!trans->restarted &&
3260 (need_resched() ||
3261 time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3262 bch2_trans_unlock(trans);
3263 cond_resched();
3264 now = local_clock();
3265 }
3266 trans->last_begin_time = now;
3267
3268 if (unlikely(trans->srcu_held &&
3269 time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3270 bch2_trans_srcu_unlock(trans);
3271
3272 trans->last_begin_ip = _RET_IP_;
3273
3274 trans_set_locked(trans, false);
3275
3276 if (trans->restarted) {
3277 bch2_btree_path_traverse_all(trans);
3278 trans->notrace_relock_fail = false;
3279 }
3280
3281 bch2_trans_verify_not_unlocked_or_in_restart(trans);
3282 return trans->restart_count;
3283}
3284
3285const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3286
3287unsigned bch2_trans_get_fn_idx(const char *fn)
3288{
3289 for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3290 if (!bch2_btree_transaction_fns[i] ||
3291 bch2_btree_transaction_fns[i] == fn) {
3292 bch2_btree_transaction_fns[i] = fn;
3293 return i;
3294 }
3295
3296 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3297 return 0;
3298}
3299
3300struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3301 __acquires(&c->btree_trans_barrier)
3302{
3303 struct btree_trans *trans;
3304
3305 if (IS_ENABLED(__KERNEL__)) {
3306 trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3307 if (trans) {
3308 memset(trans, 0, offsetof(struct btree_trans, list));
3309 goto got_trans;
3310 }
3311 }
3312
3313 trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3314 memset(trans, 0, sizeof(*trans));
3315
3316 seqmutex_lock(&c->btree_trans_lock);
3317 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3318 struct btree_trans *pos;
3319 pid_t pid = current->pid;
3320
3321 trans->locking_wait.task = current;
3322
3323 list_for_each_entry(pos, &c->btree_trans_list, list) {
3324 struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3325 /*
3326 * We'd much prefer to be stricter here and completely
3327 * disallow multiple btree_trans in the same thread -
3328 * but the data move path calls bch2_write when we
3329 * already have a btree_trans initialized.
3330 */
3331 BUG_ON(pos_task &&
3332 pid == pos_task->pid &&
3333 pos->locked);
3334 }
3335 }
3336
3337 list_add(&trans->list, &c->btree_trans_list);
3338 seqmutex_unlock(&c->btree_trans_lock);
3339got_trans:
3340 trans->c = c;
3341 trans->last_begin_time = local_clock();
3342 trans->fn_idx = fn_idx;
3343 trans->locking_wait.task = current;
3344 trans->journal_replay_not_finished =
3345 unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3346 atomic_inc_not_zero(&c->journal_keys.ref);
3347 trans->nr_paths = ARRAY_SIZE(trans->_paths);
3348 trans->paths_allocated = trans->_paths_allocated;
3349 trans->sorted = trans->_sorted;
3350 trans->paths = trans->_paths;
3351 trans->updates = trans->_updates;
3352
3353 *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3354
3355 trans->paths_allocated[0] = 1;
3356
3357 static struct lock_class_key lockdep_key;
3358 lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
3359
3360 if (fn_idx < BCH_TRANSACTIONS_NR) {
3361 trans->fn = bch2_btree_transaction_fns[fn_idx];
3362
3363 struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3364
3365 if (s->max_mem) {
3366 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3367
3368 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3369 if (likely(trans->mem))
3370 trans->mem_bytes = expected_mem_bytes;
3371 }
3372
3373 trans->nr_paths_max = s->nr_max_paths;
3374 trans->journal_entries_size = s->journal_entries_size;
3375 }
3376
3377 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3378 trans->srcu_lock_time = jiffies;
3379 trans->srcu_held = true;
3380 trans_set_locked(trans, false);
3381
3382 closure_init_stack_release(&trans->ref);
3383 return trans;
3384}
3385
3386static void check_btree_paths_leaked(struct btree_trans *trans)
3387{
3388#ifdef CONFIG_BCACHEFS_DEBUG
3389 struct bch_fs *c = trans->c;
3390 struct btree_path *path;
3391 unsigned i;
3392
3393 trans_for_each_path(trans, path, i)
3394 if (path->ref)
3395 goto leaked;
3396 return;
3397leaked:
3398 bch_err(c, "btree paths leaked from %s!", trans->fn);
3399 trans_for_each_path(trans, path, i)
3400 if (path->ref)
3401 printk(KERN_ERR " btree %s %pS\n",
3402 bch2_btree_id_str(path->btree_id),
3403 (void *) path->ip_allocated);
3404 /* Be noisy about this: */
3405 bch2_fatal_error(c);
3406#endif
3407}
3408
3409void bch2_trans_put(struct btree_trans *trans)
3410 __releases(&c->btree_trans_barrier)
3411{
3412 struct bch_fs *c = trans->c;
3413
3414 if (trans->restarted)
3415 bch2_trans_in_restart_error(trans);
3416
3417 bch2_trans_unlock(trans);
3418
3419 trans_for_each_update(trans, i)
3420 __btree_path_put(trans, trans->paths + i->path, true);
3421 trans->nr_updates = 0;
3422
3423 check_btree_paths_leaked(trans);
3424
3425 if (trans->srcu_held) {
3426 check_srcu_held_too_long(trans);
3427 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3428 }
3429
3430 if (unlikely(trans->journal_replay_not_finished))
3431 bch2_journal_keys_put(c);
3432
3433 /*
3434 * trans->ref protects trans->locking_wait.task, btree_paths array; used
3435 * by cycle detector
3436 */
3437 closure_return_sync(&trans->ref);
3438 trans->locking_wait.task = NULL;
3439
3440#ifdef CONFIG_BCACHEFS_DEBUG
3441 darray_exit(&trans->last_restarted_trace);
3442#endif
3443
3444 unsigned long *paths_allocated = trans->paths_allocated;
3445 trans->paths_allocated = NULL;
3446 trans->paths = NULL;
3447
3448 if (paths_allocated != trans->_paths_allocated)
3449 kvfree_rcu_mightsleep(paths_allocated);
3450
3451 if (trans->used_mempool)
3452 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3453 else
3454 kfree(trans->mem);
3455
3456 /* Userspace doesn't have a real percpu implementation: */
3457 if (IS_ENABLED(__KERNEL__))
3458 trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3459
3460 if (trans) {
3461 seqmutex_lock(&c->btree_trans_lock);
3462 list_del(&trans->list);
3463 seqmutex_unlock(&c->btree_trans_lock);
3464
3465 mempool_free(trans, &c->btree_trans_pool);
3466 }
3467}
3468
3469bool bch2_current_has_btree_trans(struct bch_fs *c)
3470{
3471 seqmutex_lock(&c->btree_trans_lock);
3472 struct btree_trans *trans;
3473 bool ret = false;
3474 list_for_each_entry(trans, &c->btree_trans_list, list)
3475 if (trans->locking_wait.task == current &&
3476 trans->locked) {
3477 ret = true;
3478 break;
3479 }
3480 seqmutex_unlock(&c->btree_trans_lock);
3481 return ret;
3482}
3483
3484static void __maybe_unused
3485bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3486 struct btree_bkey_cached_common *b)
3487{
3488 struct six_lock_count c = six_lock_counts(&b->lock);
3489 struct task_struct *owner;
3490 pid_t pid;
3491
3492 rcu_read_lock();
3493 owner = READ_ONCE(b->lock.owner);
3494 pid = owner ? owner->pid : 0;
3495 rcu_read_unlock();
3496
3497 prt_printf(out, "\t%px %c ", b, b->cached ? 'c' : 'b');
3498 bch2_btree_id_to_text(out, b->btree_id);
3499 prt_printf(out, " l=%u:", b->level);
3500 bch2_bpos_to_text(out, btree_node_pos(b));
3501
3502 prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3503 c.n[0], c.n[1], c.n[2], pid);
3504}
3505
3506void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3507{
3508 struct btree_bkey_cached_common *b;
3509 static char lock_types[] = { 'r', 'i', 'w' };
3510 struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3511 unsigned l, idx;
3512
3513 /* before rcu_read_lock(): */
3514 bch2_printbuf_make_room(out, 4096);
3515
3516 if (!out->nr_tabstops) {
3517 printbuf_tabstop_push(out, 16);
3518 printbuf_tabstop_push(out, 32);
3519 }
3520
3521 prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3522
3523 /* trans->paths is rcu protected vs. freeing */
3524 rcu_read_lock();
3525 out->atomic++;
3526
3527 struct btree_path *paths = rcu_dereference(trans->paths);
3528 if (!paths)
3529 goto out;
3530
3531 unsigned long *paths_allocated = trans_paths_allocated(paths);
3532
3533 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3534 struct btree_path *path = paths + idx;
3535 if (!path->nodes_locked)
3536 continue;
3537
3538 prt_printf(out, " path %u %c ",
3539 idx,
3540 path->cached ? 'c' : 'b');
3541 bch2_btree_id_to_text(out, path->btree_id);
3542 prt_printf(out, " l=%u:", path->level);
3543 bch2_bpos_to_text(out, path->pos);
3544 prt_newline(out);
3545
3546 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3547 if (btree_node_locked(path, l) &&
3548 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3549 prt_printf(out, " %c l=%u ",
3550 lock_types[btree_node_locked_type(path, l)], l);
3551 bch2_btree_bkey_cached_common_to_text(out, b);
3552 prt_newline(out);
3553 }
3554 }
3555 }
3556
3557 b = READ_ONCE(trans->locking);
3558 if (b) {
3559 prt_printf(out, " blocked for %lluus on\n",
3560 div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3561 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
3562 bch2_btree_bkey_cached_common_to_text(out, b);
3563 prt_newline(out);
3564 }
3565out:
3566 --out->atomic;
3567 rcu_read_unlock();
3568}
3569
3570void bch2_fs_btree_iter_exit(struct bch_fs *c)
3571{
3572 struct btree_transaction_stats *s;
3573 struct btree_trans *trans;
3574 int cpu;
3575
3576 if (c->btree_trans_bufs)
3577 for_each_possible_cpu(cpu) {
3578 struct btree_trans *trans =
3579 per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3580
3581 if (trans) {
3582 seqmutex_lock(&c->btree_trans_lock);
3583 list_del(&trans->list);
3584 seqmutex_unlock(&c->btree_trans_lock);
3585 }
3586 kfree(trans);
3587 }
3588 free_percpu(c->btree_trans_bufs);
3589
3590 trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3591 if (trans)
3592 panic("%s leaked btree_trans\n", trans->fn);
3593
3594 for (s = c->btree_transaction_stats;
3595 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3596 s++) {
3597 kfree(s->max_paths_text);
3598 bch2_time_stats_exit(&s->lock_hold_times);
3599 }
3600
3601 if (c->btree_trans_barrier_initialized) {
3602 synchronize_srcu_expedited(&c->btree_trans_barrier);
3603 cleanup_srcu_struct(&c->btree_trans_barrier);
3604 }
3605 mempool_exit(&c->btree_trans_mem_pool);
3606 mempool_exit(&c->btree_trans_pool);
3607}
3608
3609void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3610{
3611 struct btree_transaction_stats *s;
3612
3613 for (s = c->btree_transaction_stats;
3614 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3615 s++) {
3616 bch2_time_stats_init(&s->duration);
3617 bch2_time_stats_init(&s->lock_hold_times);
3618 mutex_init(&s->lock);
3619 }
3620
3621 INIT_LIST_HEAD(&c->btree_trans_list);
3622 seqmutex_init(&c->btree_trans_lock);
3623}
3624
3625int bch2_fs_btree_iter_init(struct bch_fs *c)
3626{
3627 int ret;
3628
3629 c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3630 if (!c->btree_trans_bufs)
3631 return -ENOMEM;
3632
3633 ret = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3634 sizeof(struct btree_trans)) ?:
3635 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3636 BTREE_TRANS_MEM_MAX) ?:
3637 init_srcu_struct(&c->btree_trans_barrier);
3638 if (ret)
3639 return ret;
3640
3641 /*
3642 * static annotation (hackily done) for lock ordering of reclaim vs.
3643 * btree node locks:
3644 */
3645#ifdef CONFIG_LOCKDEP
3646 fs_reclaim_acquire(GFP_KERNEL);
3647 struct btree_trans *trans = bch2_trans_get(c);
3648 trans_set_locked(trans, false);
3649 bch2_trans_put(trans);
3650 fs_reclaim_release(GFP_KERNEL);
3651#endif
3652
3653 c->btree_trans_barrier_initialized = true;
3654 return 0;
3655
3656}