Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "btree_locking.h"
5#include "btree_types.h"
6
7static struct lock_class_key bch2_btree_node_lock_key;
8
9void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
10 enum six_lock_init_flags flags)
11{
12 __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
13 lockdep_set_novalidate_class(&b->lock);
14}
15
16#ifdef CONFIG_LOCKDEP
17void bch2_assert_btree_nodes_not_locked(void)
18{
19#if 0
20 //Re-enable when lock_class_is_held() is merged:
21 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
22#endif
23}
24#endif
25
26/* Btree node locking: */
27
28struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
29 struct btree_path *skip,
30 struct btree_bkey_cached_common *b,
31 unsigned level)
32{
33 struct btree_path *path;
34 struct six_lock_count ret;
35 unsigned i;
36
37 memset(&ret, 0, sizeof(ret));
38
39 if (IS_ERR_OR_NULL(b))
40 return ret;
41
42 trans_for_each_path(trans, path, i)
43 if (path != skip && &path->l[level].b->c == b) {
44 int t = btree_node_locked_type(path, level);
45
46 if (t != BTREE_NODE_UNLOCKED)
47 ret.n[t]++;
48 }
49
50 return ret;
51}
52
53/* unlock */
54
55void bch2_btree_node_unlock_write(struct btree_trans *trans,
56 struct btree_path *path, struct btree *b)
57{
58 bch2_btree_node_unlock_write_inlined(trans, path, b);
59}
60
61/* lock */
62
63/*
64 * @trans wants to lock @b with type @type
65 */
66struct trans_waiting_for_lock {
67 struct btree_trans *trans;
68 struct btree_bkey_cached_common *node_want;
69 enum six_lock_type lock_want;
70
71 /* for iterating over held locks :*/
72 u8 path_idx;
73 u8 level;
74 u64 lock_start_time;
75};
76
77struct lock_graph {
78 struct trans_waiting_for_lock g[8];
79 unsigned nr;
80};
81
82static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
83{
84 struct trans_waiting_for_lock *i;
85
86 prt_printf(out, "Found lock cycle (%u entries):\n", g->nr);
87
88 for (i = g->g; i < g->g + g->nr; i++) {
89 struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
90 if (!task)
91 continue;
92
93 bch2_btree_trans_to_text(out, i->trans);
94 bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
95 }
96}
97
98static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
99{
100 struct trans_waiting_for_lock *i;
101
102 for (i = g->g; i != g->g + g->nr; i++) {
103 struct task_struct *task = i->trans->locking_wait.task;
104 if (i != g->g)
105 prt_str(out, "<- ");
106 prt_printf(out, "%u ", task ?task->pid : 0);
107 }
108 prt_newline(out);
109}
110
111static void lock_graph_up(struct lock_graph *g)
112{
113 closure_put(&g->g[--g->nr].trans->ref);
114}
115
116static noinline void lock_graph_pop_all(struct lock_graph *g)
117{
118 while (g->nr)
119 lock_graph_up(g);
120}
121
122static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
123{
124 g->g[g->nr++] = (struct trans_waiting_for_lock) {
125 .trans = trans,
126 .node_want = trans->locking,
127 .lock_want = trans->locking_wait.lock_want,
128 };
129}
130
131static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
132{
133 closure_get(&trans->ref);
134 __lock_graph_down(g, trans);
135}
136
137static bool lock_graph_remove_non_waiters(struct lock_graph *g)
138{
139 struct trans_waiting_for_lock *i;
140
141 for (i = g->g + 1; i < g->g + g->nr; i++)
142 if (i->trans->locking != i->node_want ||
143 i->trans->locking_wait.start_time != i[-1].lock_start_time) {
144 while (g->g + g->nr > i)
145 lock_graph_up(g);
146 return true;
147 }
148
149 return false;
150}
151
152static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans)
153{
154 struct bch_fs *c = trans->c;
155
156 count_event(c, trans_restart_would_deadlock);
157
158 if (trace_trans_restart_would_deadlock_enabled()) {
159 struct printbuf buf = PRINTBUF;
160
161 buf.atomic++;
162 print_cycle(&buf, g);
163
164 trace_trans_restart_would_deadlock(trans, buf.buf);
165 printbuf_exit(&buf);
166 }
167}
168
169static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
170{
171 if (i == g->g) {
172 trace_would_deadlock(g, i->trans);
173 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
174 } else {
175 i->trans->lock_must_abort = true;
176 wake_up_process(i->trans->locking_wait.task);
177 return 0;
178 }
179}
180
181static int btree_trans_abort_preference(struct btree_trans *trans)
182{
183 if (trans->lock_may_not_fail)
184 return 0;
185 if (trans->locking_wait.lock_want == SIX_LOCK_write)
186 return 1;
187 if (!trans->in_traverse_all)
188 return 2;
189 return 3;
190}
191
192static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
193{
194 struct trans_waiting_for_lock *i, *abort = NULL;
195 unsigned best = 0, pref;
196 int ret;
197
198 if (lock_graph_remove_non_waiters(g))
199 return 0;
200
201 /* Only checking, for debugfs: */
202 if (cycle) {
203 print_cycle(cycle, g);
204 ret = -1;
205 goto out;
206 }
207
208 for (i = g->g; i < g->g + g->nr; i++) {
209 pref = btree_trans_abort_preference(i->trans);
210 if (pref > best) {
211 abort = i;
212 best = pref;
213 }
214 }
215
216 if (unlikely(!best)) {
217 struct printbuf buf = PRINTBUF;
218
219 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
220
221 for (i = g->g; i < g->g + g->nr; i++) {
222 struct btree_trans *trans = i->trans;
223
224 bch2_btree_trans_to_text(&buf, trans);
225
226 prt_printf(&buf, "backtrace:\n");
227 printbuf_indent_add(&buf, 2);
228 bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
229 printbuf_indent_sub(&buf, 2);
230 prt_newline(&buf);
231 }
232
233 bch2_print_string_as_lines(KERN_ERR, buf.buf);
234 printbuf_exit(&buf);
235 BUG();
236 }
237
238 ret = abort_lock(g, abort);
239out:
240 if (ret)
241 while (g->nr)
242 lock_graph_up(g);
243 return ret;
244}
245
246static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
247 struct printbuf *cycle)
248{
249 struct btree_trans *orig_trans = g->g->trans;
250 struct trans_waiting_for_lock *i;
251
252 for (i = g->g; i < g->g + g->nr; i++)
253 if (i->trans == trans) {
254 closure_put(&trans->ref);
255 return break_cycle(g, cycle);
256 }
257
258 if (g->nr == ARRAY_SIZE(g->g)) {
259 closure_put(&trans->ref);
260
261 if (orig_trans->lock_may_not_fail)
262 return 0;
263
264 while (g->nr)
265 lock_graph_up(g);
266
267 if (cycle)
268 return 0;
269
270 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
271 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
272 }
273
274 __lock_graph_down(g, trans);
275 return 0;
276}
277
278static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
279{
280 return t1 + t2 > 1;
281}
282
283int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
284{
285 struct lock_graph g;
286 struct trans_waiting_for_lock *top;
287 struct btree_bkey_cached_common *b;
288 btree_path_idx_t path_idx;
289 int ret = 0;
290
291 g.nr = 0;
292
293 if (trans->lock_must_abort) {
294 if (cycle)
295 return -1;
296
297 trace_would_deadlock(&g, trans);
298 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
299 }
300
301 lock_graph_down(&g, trans);
302
303 /* trans->paths is rcu protected vs. freeing */
304 rcu_read_lock();
305 if (cycle)
306 cycle->atomic++;
307next:
308 if (!g.nr)
309 goto out;
310
311 top = &g.g[g.nr - 1];
312
313 struct btree_path *paths = rcu_dereference(top->trans->paths);
314 if (!paths)
315 goto up;
316
317 unsigned long *paths_allocated = trans_paths_allocated(paths);
318
319 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths),
320 path_idx, top->path_idx) {
321 struct btree_path *path = paths + path_idx;
322 if (!path->nodes_locked)
323 continue;
324
325 if (path_idx != top->path_idx) {
326 top->path_idx = path_idx;
327 top->level = 0;
328 top->lock_start_time = 0;
329 }
330
331 for (;
332 top->level < BTREE_MAX_DEPTH;
333 top->level++, top->lock_start_time = 0) {
334 int lock_held = btree_node_locked_type(path, top->level);
335
336 if (lock_held == BTREE_NODE_UNLOCKED)
337 continue;
338
339 b = &READ_ONCE(path->l[top->level].b)->c;
340
341 if (IS_ERR_OR_NULL(b)) {
342 /*
343 * If we get here, it means we raced with the
344 * other thread updating its btree_path
345 * structures - which means it can't be blocked
346 * waiting on a lock:
347 */
348 if (!lock_graph_remove_non_waiters(&g)) {
349 /*
350 * If lock_graph_remove_non_waiters()
351 * didn't do anything, it must be
352 * because we're being called by debugfs
353 * checking for lock cycles, which
354 * invokes us on btree_transactions that
355 * aren't actually waiting on anything.
356 * Just bail out:
357 */
358 lock_graph_pop_all(&g);
359 }
360
361 goto next;
362 }
363
364 if (list_empty_careful(&b->lock.wait_list))
365 continue;
366
367 raw_spin_lock(&b->lock.wait_lock);
368 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
369 BUG_ON(b != trans->locking);
370
371 if (top->lock_start_time &&
372 time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
373 continue;
374
375 top->lock_start_time = trans->locking_wait.start_time;
376
377 /* Don't check for self deadlock: */
378 if (trans == top->trans ||
379 !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
380 continue;
381
382 closure_get(&trans->ref);
383 raw_spin_unlock(&b->lock.wait_lock);
384
385 ret = lock_graph_descend(&g, trans, cycle);
386 if (ret)
387 goto out;
388 goto next;
389
390 }
391 raw_spin_unlock(&b->lock.wait_lock);
392 }
393 }
394up:
395 if (g.nr > 1 && cycle)
396 print_chain(cycle, &g);
397 lock_graph_up(&g);
398 goto next;
399out:
400 if (cycle)
401 --cycle->atomic;
402 rcu_read_unlock();
403 return ret;
404}
405
406int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
407{
408 struct btree_trans *trans = p;
409
410 return bch2_check_for_deadlock(trans, NULL);
411}
412
413int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
414 struct btree_bkey_cached_common *b,
415 bool lock_may_not_fail)
416{
417 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
418 int ret;
419
420 /*
421 * Must drop our read locks before calling six_lock_write() -
422 * six_unlock() won't do wakeups until the reader count
423 * goes to 0, and it's safe because we have the node intent
424 * locked:
425 */
426 six_lock_readers_add(&b->lock, -readers);
427 ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
428 lock_may_not_fail, _RET_IP_);
429 six_lock_readers_add(&b->lock, readers);
430
431 if (ret)
432 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
433
434 return ret;
435}
436
437void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
438 struct btree_path *path,
439 struct btree_bkey_cached_common *b)
440{
441 int ret = __btree_node_lock_write(trans, path, b, true);
442 BUG_ON(ret);
443}
444
445/* relock */
446
447static inline bool btree_path_get_locks(struct btree_trans *trans,
448 struct btree_path *path,
449 bool upgrade,
450 struct get_locks_fail *f)
451{
452 unsigned l = path->level;
453 int fail_idx = -1;
454
455 do {
456 if (!btree_path_node(path, l))
457 break;
458
459 if (!(upgrade
460 ? bch2_btree_node_upgrade(trans, path, l)
461 : bch2_btree_node_relock(trans, path, l))) {
462 fail_idx = l;
463
464 if (f) {
465 f->l = l;
466 f->b = path->l[l].b;
467 }
468 }
469
470 l++;
471 } while (l < path->locks_want);
472
473 /*
474 * When we fail to get a lock, we have to ensure that any child nodes
475 * can't be relocked so bch2_btree_path_traverse has to walk back up to
476 * the node that we failed to relock:
477 */
478 if (fail_idx >= 0) {
479 __bch2_btree_path_unlock(trans, path);
480 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
481
482 do {
483 path->l[fail_idx].b = upgrade
484 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
485 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
486 --fail_idx;
487 } while (fail_idx >= 0);
488 }
489
490 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
491 path->uptodate = BTREE_ITER_UPTODATE;
492
493 return path->uptodate < BTREE_ITER_NEED_RELOCK;
494}
495
496bool __bch2_btree_node_relock(struct btree_trans *trans,
497 struct btree_path *path, unsigned level,
498 bool trace)
499{
500 struct btree *b = btree_path_node(path, level);
501 int want = __btree_lock_want(path, level);
502
503 if (race_fault())
504 goto fail;
505
506 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
507 (btree_node_lock_seq_matches(path, b, level) &&
508 btree_node_lock_increment(trans, &b->c, level, want))) {
509 mark_btree_node_locked(trans, path, level, want);
510 return true;
511 }
512fail:
513 if (trace && !trans->notrace_relock_fail)
514 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
515 return false;
516}
517
518/* upgrade */
519
520bool bch2_btree_node_upgrade(struct btree_trans *trans,
521 struct btree_path *path, unsigned level)
522{
523 struct btree *b = path->l[level].b;
524 struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
525
526 if (!is_btree_node(path, level))
527 return false;
528
529 switch (btree_lock_want(path, level)) {
530 case BTREE_NODE_UNLOCKED:
531 BUG_ON(btree_node_locked(path, level));
532 return true;
533 case BTREE_NODE_READ_LOCKED:
534 BUG_ON(btree_node_intent_locked(path, level));
535 return bch2_btree_node_relock(trans, path, level);
536 case BTREE_NODE_INTENT_LOCKED:
537 break;
538 case BTREE_NODE_WRITE_LOCKED:
539 BUG();
540 }
541
542 if (btree_node_intent_locked(path, level))
543 return true;
544
545 if (race_fault())
546 return false;
547
548 if (btree_node_locked(path, level)) {
549 bool ret;
550
551 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
552 ret = six_lock_tryupgrade(&b->c.lock);
553 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
554
555 if (ret)
556 goto success;
557 } else {
558 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
559 goto success;
560 }
561
562 /*
563 * Do we already have an intent lock via another path? If so, just bump
564 * lock count:
565 */
566 if (btree_node_lock_seq_matches(path, b, level) &&
567 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
568 btree_node_unlock(trans, path, level);
569 goto success;
570 }
571
572 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
573 return false;
574success:
575 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
576 return true;
577}
578
579/* Btree path locking: */
580
581/*
582 * Only for btree_cache.c - only relocks intent locks
583 */
584int bch2_btree_path_relock_intent(struct btree_trans *trans,
585 struct btree_path *path)
586{
587 unsigned l;
588
589 for (l = path->level;
590 l < path->locks_want && btree_path_node(path, l);
591 l++) {
592 if (!bch2_btree_node_relock(trans, path, l)) {
593 __bch2_btree_path_unlock(trans, path);
594 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
595 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
596 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
597 }
598 }
599
600 return 0;
601}
602
603__flatten
604bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
605{
606 struct get_locks_fail f;
607
608 bool ret = btree_path_get_locks(trans, path, false, &f);
609 bch2_trans_verify_locks(trans);
610 return ret;
611}
612
613int __bch2_btree_path_relock(struct btree_trans *trans,
614 struct btree_path *path, unsigned long trace_ip)
615{
616 if (!bch2_btree_path_relock_norestart(trans, path)) {
617 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
618 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
619 }
620
621 return 0;
622}
623
624bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
625 struct btree_path *path,
626 unsigned new_locks_want,
627 struct get_locks_fail *f)
628{
629 EBUG_ON(path->locks_want >= new_locks_want);
630
631 path->locks_want = new_locks_want;
632
633 bool ret = btree_path_get_locks(trans, path, true, f);
634 bch2_trans_verify_locks(trans);
635 return ret;
636}
637
638bool __bch2_btree_path_upgrade(struct btree_trans *trans,
639 struct btree_path *path,
640 unsigned new_locks_want,
641 struct get_locks_fail *f)
642{
643 bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f);
644 if (ret)
645 goto out;
646
647 /*
648 * XXX: this is ugly - we'd prefer to not be mucking with other
649 * iterators in the btree_trans here.
650 *
651 * On failure to upgrade the iterator, setting iter->locks_want and
652 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
653 * get the locks we want on transaction restart.
654 *
655 * But if this iterator was a clone, on transaction restart what we did
656 * to this iterator isn't going to be preserved.
657 *
658 * Possibly we could add an iterator field for the parent iterator when
659 * an iterator is a copy - for now, we'll just upgrade any other
660 * iterators with the same btree id.
661 *
662 * The code below used to be needed to ensure ancestor nodes get locked
663 * before interior nodes - now that's handled by
664 * bch2_btree_path_traverse_all().
665 */
666 if (!path->cached && !trans->in_traverse_all) {
667 struct btree_path *linked;
668 unsigned i;
669
670 trans_for_each_path(trans, linked, i)
671 if (linked != path &&
672 linked->cached == path->cached &&
673 linked->btree_id == path->btree_id &&
674 linked->locks_want < new_locks_want) {
675 linked->locks_want = new_locks_want;
676 btree_path_get_locks(trans, linked, true, NULL);
677 }
678 }
679out:
680 bch2_trans_verify_locks(trans);
681 return ret;
682}
683
684void __bch2_btree_path_downgrade(struct btree_trans *trans,
685 struct btree_path *path,
686 unsigned new_locks_want)
687{
688 unsigned l, old_locks_want = path->locks_want;
689
690 if (trans->restarted)
691 return;
692
693 EBUG_ON(path->locks_want < new_locks_want);
694
695 path->locks_want = new_locks_want;
696
697 while (path->nodes_locked &&
698 (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
699 if (l > path->level) {
700 btree_node_unlock(trans, path, l);
701 } else {
702 if (btree_node_intent_locked(path, l)) {
703 six_lock_downgrade(&path->l[l].b->c.lock);
704 mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
705 }
706 break;
707 }
708 }
709
710 bch2_btree_path_verify_locks(path);
711
712 trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
713}
714
715/* Btree transaction locking: */
716
717void bch2_trans_downgrade(struct btree_trans *trans)
718{
719 struct btree_path *path;
720 unsigned i;
721
722 if (trans->restarted)
723 return;
724
725 trans_for_each_path(trans, path, i)
726 if (path->ref)
727 bch2_btree_path_downgrade(trans, path);
728}
729
730static inline void __bch2_trans_unlock(struct btree_trans *trans)
731{
732 struct btree_path *path;
733 unsigned i;
734
735 trans_for_each_path(trans, path, i)
736 __bch2_btree_path_unlock(trans, path);
737}
738
739static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
740 struct get_locks_fail *f, bool trace)
741{
742 if (!trace)
743 goto out;
744
745 if (trace_trans_restart_relock_enabled()) {
746 struct printbuf buf = PRINTBUF;
747
748 bch2_bpos_to_text(&buf, path->pos);
749 prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq);
750 if (IS_ERR_OR_NULL(f->b)) {
751 prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
752 } else {
753 prt_printf(&buf, "%u", f->b->c.lock.seq);
754
755 struct six_lock_count c =
756 bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
757 prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
758
759 c = six_lock_counts(&f->b->c.lock);
760 prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
761 }
762
763 trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
764 printbuf_exit(&buf);
765 }
766
767 count_event(trans->c, trans_restart_relock);
768out:
769 __bch2_trans_unlock(trans);
770 bch2_trans_verify_locks(trans);
771 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
772}
773
774static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
775{
776 bch2_trans_verify_locks(trans);
777
778 if (unlikely(trans->restarted))
779 return -((int) trans->restarted);
780 if (unlikely(trans->locked))
781 goto out;
782
783 struct btree_path *path;
784 unsigned i;
785
786 trans_for_each_path(trans, path, i) {
787 struct get_locks_fail f;
788
789 if (path->should_be_locked &&
790 !btree_path_get_locks(trans, path, false, &f))
791 return bch2_trans_relock_fail(trans, path, &f, trace);
792 }
793
794 trans->locked = true;
795out:
796 bch2_trans_verify_locks(trans);
797 return 0;
798}
799
800int bch2_trans_relock(struct btree_trans *trans)
801{
802 return __bch2_trans_relock(trans, true);
803}
804
805int bch2_trans_relock_notrace(struct btree_trans *trans)
806{
807 return __bch2_trans_relock(trans, false);
808}
809
810void bch2_trans_unlock_noassert(struct btree_trans *trans)
811{
812 __bch2_trans_unlock(trans);
813
814 trans->locked = false;
815 trans->last_unlock_ip = _RET_IP_;
816}
817
818void bch2_trans_unlock(struct btree_trans *trans)
819{
820 __bch2_trans_unlock(trans);
821
822 trans->locked = false;
823 trans->last_unlock_ip = _RET_IP_;
824}
825
826void bch2_trans_unlock_long(struct btree_trans *trans)
827{
828 bch2_trans_unlock(trans);
829 bch2_trans_srcu_unlock(trans);
830}
831
832int __bch2_trans_mutex_lock(struct btree_trans *trans,
833 struct mutex *lock)
834{
835 int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
836
837 if (ret)
838 mutex_unlock(lock);
839 return ret;
840}
841
842/* Debug */
843
844#ifdef CONFIG_BCACHEFS_DEBUG
845
846void bch2_btree_path_verify_locks(struct btree_path *path)
847{
848 /*
849 * A path may be uptodate and yet have nothing locked if and only if
850 * there is no node at path->level, which generally means we were
851 * iterating over all nodes and got to the end of the btree
852 */
853 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
854 btree_path_node(path, path->level) &&
855 !path->nodes_locked);
856
857 if (!path->nodes_locked)
858 return;
859
860 for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
861 int want = btree_lock_want(path, l);
862 int have = btree_node_locked_type(path, l);
863
864 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
865
866 BUG_ON(is_btree_node(path, l) &&
867 (want == BTREE_NODE_UNLOCKED ||
868 have != BTREE_NODE_WRITE_LOCKED) &&
869 want != have);
870 }
871}
872
873static bool bch2_trans_locked(struct btree_trans *trans)
874{
875 struct btree_path *path;
876 unsigned i;
877
878 trans_for_each_path(trans, path, i)
879 if (path->nodes_locked)
880 return true;
881 return false;
882}
883
884void bch2_trans_verify_locks(struct btree_trans *trans)
885{
886 if (!trans->locked) {
887 BUG_ON(bch2_trans_locked(trans));
888 return;
889 }
890
891 struct btree_path *path;
892 unsigned i;
893
894 trans_for_each_path(trans, path, i)
895 bch2_btree_path_verify_locks(path);
896}
897
898#endif