Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "btree_locking.h"
5#include "btree_types.h"
6
7static struct lock_class_key bch2_btree_node_lock_key;
8
9void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
10 enum six_lock_init_flags flags)
11{
12 __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
13 lockdep_set_novalidate_class(&b->lock);
14}
15
16#ifdef CONFIG_LOCKDEP
17void bch2_assert_btree_nodes_not_locked(void)
18{
19#if 0
20 //Re-enable when lock_class_is_held() is merged:
21 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
22#endif
23}
24#endif
25
26/* Btree node locking: */
27
28struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
29 struct btree_path *skip,
30 struct btree_bkey_cached_common *b,
31 unsigned level)
32{
33 struct btree_path *path;
34 struct six_lock_count ret;
35
36 memset(&ret, 0, sizeof(ret));
37
38 if (IS_ERR_OR_NULL(b))
39 return ret;
40
41 trans_for_each_path(trans, path)
42 if (path != skip && &path->l[level].b->c == b) {
43 int t = btree_node_locked_type(path, level);
44
45 if (t != BTREE_NODE_UNLOCKED)
46 ret.n[t]++;
47 }
48
49 return ret;
50}
51
52/* unlock */
53
54void bch2_btree_node_unlock_write(struct btree_trans *trans,
55 struct btree_path *path, struct btree *b)
56{
57 bch2_btree_node_unlock_write_inlined(trans, path, b);
58}
59
60/* lock */
61
62/*
63 * @trans wants to lock @b with type @type
64 */
65struct trans_waiting_for_lock {
66 struct btree_trans *trans;
67 struct btree_bkey_cached_common *node_want;
68 enum six_lock_type lock_want;
69
70 /* for iterating over held locks :*/
71 u8 path_idx;
72 u8 level;
73 u64 lock_start_time;
74};
75
76struct lock_graph {
77 struct trans_waiting_for_lock g[8];
78 unsigned nr;
79};
80
81static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
82{
83 struct trans_waiting_for_lock *i;
84
85 prt_printf(out, "Found lock cycle (%u entries):", g->nr);
86 prt_newline(out);
87
88 for (i = g->g; i < g->g + g->nr; i++)
89 bch2_btree_trans_to_text(out, i->trans);
90}
91
92static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
93{
94 struct trans_waiting_for_lock *i;
95
96 for (i = g->g; i != g->g + g->nr; i++) {
97 if (i != g->g)
98 prt_str(out, "<- ");
99 prt_printf(out, "%u ", i->trans->locking_wait.task->pid);
100 }
101 prt_newline(out);
102}
103
104static void lock_graph_up(struct lock_graph *g)
105{
106 closure_put(&g->g[--g->nr].trans->ref);
107}
108
109static noinline void lock_graph_pop_all(struct lock_graph *g)
110{
111 while (g->nr)
112 lock_graph_up(g);
113}
114
115static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
116{
117 g->g[g->nr++] = (struct trans_waiting_for_lock) {
118 .trans = trans,
119 .node_want = trans->locking,
120 .lock_want = trans->locking_wait.lock_want,
121 };
122}
123
124static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
125{
126 closure_get(&trans->ref);
127 __lock_graph_down(g, trans);
128}
129
130static bool lock_graph_remove_non_waiters(struct lock_graph *g)
131{
132 struct trans_waiting_for_lock *i;
133
134 for (i = g->g + 1; i < g->g + g->nr; i++)
135 if (i->trans->locking != i->node_want ||
136 i->trans->locking_wait.start_time != i[-1].lock_start_time) {
137 while (g->g + g->nr > i)
138 lock_graph_up(g);
139 return true;
140 }
141
142 return false;
143}
144
145static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
146{
147 if (i == g->g) {
148 trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
149 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
150 } else {
151 i->trans->lock_must_abort = true;
152 wake_up_process(i->trans->locking_wait.task);
153 return 0;
154 }
155}
156
157static int btree_trans_abort_preference(struct btree_trans *trans)
158{
159 if (trans->lock_may_not_fail)
160 return 0;
161 if (trans->locking_wait.lock_want == SIX_LOCK_write)
162 return 1;
163 if (!trans->in_traverse_all)
164 return 2;
165 return 3;
166}
167
168static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
169{
170 struct trans_waiting_for_lock *i, *abort = NULL;
171 unsigned best = 0, pref;
172 int ret;
173
174 if (lock_graph_remove_non_waiters(g))
175 return 0;
176
177 /* Only checking, for debugfs: */
178 if (cycle) {
179 print_cycle(cycle, g);
180 ret = -1;
181 goto out;
182 }
183
184 for (i = g->g; i < g->g + g->nr; i++) {
185 pref = btree_trans_abort_preference(i->trans);
186 if (pref > best) {
187 abort = i;
188 best = pref;
189 }
190 }
191
192 if (unlikely(!best)) {
193 struct printbuf buf = PRINTBUF;
194
195 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
196
197 for (i = g->g; i < g->g + g->nr; i++) {
198 struct btree_trans *trans = i->trans;
199
200 bch2_btree_trans_to_text(&buf, trans);
201
202 prt_printf(&buf, "backtrace:");
203 prt_newline(&buf);
204 printbuf_indent_add(&buf, 2);
205 bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
206 printbuf_indent_sub(&buf, 2);
207 prt_newline(&buf);
208 }
209
210 bch2_print_string_as_lines(KERN_ERR, buf.buf);
211 printbuf_exit(&buf);
212 BUG();
213 }
214
215 ret = abort_lock(g, abort);
216out:
217 if (ret)
218 while (g->nr)
219 lock_graph_up(g);
220 return ret;
221}
222
223static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
224 struct printbuf *cycle)
225{
226 struct btree_trans *orig_trans = g->g->trans;
227 struct trans_waiting_for_lock *i;
228
229 for (i = g->g; i < g->g + g->nr; i++)
230 if (i->trans == trans) {
231 closure_put(&trans->ref);
232 return break_cycle(g, cycle);
233 }
234
235 if (g->nr == ARRAY_SIZE(g->g)) {
236 closure_put(&trans->ref);
237
238 if (orig_trans->lock_may_not_fail)
239 return 0;
240
241 while (g->nr)
242 lock_graph_up(g);
243
244 if (cycle)
245 return 0;
246
247 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
248 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
249 }
250
251 __lock_graph_down(g, trans);
252 return 0;
253}
254
255static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
256{
257 return t1 + t2 > 1;
258}
259
260int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
261{
262 struct lock_graph g;
263 struct trans_waiting_for_lock *top;
264 struct btree_bkey_cached_common *b;
265 struct btree_path *path;
266 unsigned path_idx;
267 int ret;
268
269 if (trans->lock_must_abort) {
270 if (cycle)
271 return -1;
272
273 trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
274 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
275 }
276
277 g.nr = 0;
278 lock_graph_down(&g, trans);
279next:
280 if (!g.nr)
281 return 0;
282
283 top = &g.g[g.nr - 1];
284
285 trans_for_each_path_safe_from(top->trans, path, path_idx, top->path_idx) {
286 if (!path->nodes_locked)
287 continue;
288
289 if (path_idx != top->path_idx) {
290 top->path_idx = path_idx;
291 top->level = 0;
292 top->lock_start_time = 0;
293 }
294
295 for (;
296 top->level < BTREE_MAX_DEPTH;
297 top->level++, top->lock_start_time = 0) {
298 int lock_held = btree_node_locked_type(path, top->level);
299
300 if (lock_held == BTREE_NODE_UNLOCKED)
301 continue;
302
303 b = &READ_ONCE(path->l[top->level].b)->c;
304
305 if (IS_ERR_OR_NULL(b)) {
306 /*
307 * If we get here, it means we raced with the
308 * other thread updating its btree_path
309 * structures - which means it can't be blocked
310 * waiting on a lock:
311 */
312 if (!lock_graph_remove_non_waiters(&g)) {
313 /*
314 * If lock_graph_remove_non_waiters()
315 * didn't do anything, it must be
316 * because we're being called by debugfs
317 * checking for lock cycles, which
318 * invokes us on btree_transactions that
319 * aren't actually waiting on anything.
320 * Just bail out:
321 */
322 lock_graph_pop_all(&g);
323 }
324
325 goto next;
326 }
327
328 if (list_empty_careful(&b->lock.wait_list))
329 continue;
330
331 raw_spin_lock(&b->lock.wait_lock);
332 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
333 BUG_ON(b != trans->locking);
334
335 if (top->lock_start_time &&
336 time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
337 continue;
338
339 top->lock_start_time = trans->locking_wait.start_time;
340
341 /* Don't check for self deadlock: */
342 if (trans == top->trans ||
343 !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
344 continue;
345
346 closure_get(&trans->ref);
347 raw_spin_unlock(&b->lock.wait_lock);
348
349 ret = lock_graph_descend(&g, trans, cycle);
350 if (ret)
351 return ret;
352 goto next;
353
354 }
355 raw_spin_unlock(&b->lock.wait_lock);
356 }
357 }
358
359 if (g.nr > 1 && cycle)
360 print_chain(cycle, &g);
361 lock_graph_up(&g);
362 goto next;
363}
364
365int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
366{
367 struct btree_trans *trans = p;
368
369 return bch2_check_for_deadlock(trans, NULL);
370}
371
372int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
373 struct btree_bkey_cached_common *b,
374 bool lock_may_not_fail)
375{
376 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
377 int ret;
378
379 /*
380 * Must drop our read locks before calling six_lock_write() -
381 * six_unlock() won't do wakeups until the reader count
382 * goes to 0, and it's safe because we have the node intent
383 * locked:
384 */
385 six_lock_readers_add(&b->lock, -readers);
386 ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
387 lock_may_not_fail, _RET_IP_);
388 six_lock_readers_add(&b->lock, readers);
389
390 if (ret)
391 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
392
393 return ret;
394}
395
396void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
397 struct btree_path *path,
398 struct btree_bkey_cached_common *b)
399{
400 struct btree_path *linked;
401 unsigned i;
402 int ret;
403
404 /*
405 * XXX BIG FAT NOTICE
406 *
407 * Drop all read locks before taking a write lock:
408 *
409 * This is a hack, because bch2_btree_node_lock_write_nofail() is a
410 * hack - but by dropping read locks first, this should never fail, and
411 * we only use this in code paths where whatever read locks we've
412 * already taken are no longer needed:
413 */
414
415 trans_for_each_path(trans, linked) {
416 if (!linked->nodes_locked)
417 continue;
418
419 for (i = 0; i < BTREE_MAX_DEPTH; i++)
420 if (btree_node_read_locked(linked, i)) {
421 btree_node_unlock(trans, linked, i);
422 btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
423 }
424 }
425
426 ret = __btree_node_lock_write(trans, path, b, true);
427 BUG_ON(ret);
428}
429
430/* relock */
431
432static inline bool btree_path_get_locks(struct btree_trans *trans,
433 struct btree_path *path,
434 bool upgrade,
435 struct get_locks_fail *f)
436{
437 unsigned l = path->level;
438 int fail_idx = -1;
439
440 do {
441 if (!btree_path_node(path, l))
442 break;
443
444 if (!(upgrade
445 ? bch2_btree_node_upgrade(trans, path, l)
446 : bch2_btree_node_relock(trans, path, l))) {
447 fail_idx = l;
448
449 if (f) {
450 f->l = l;
451 f->b = path->l[l].b;
452 }
453 }
454
455 l++;
456 } while (l < path->locks_want);
457
458 /*
459 * When we fail to get a lock, we have to ensure that any child nodes
460 * can't be relocked so bch2_btree_path_traverse has to walk back up to
461 * the node that we failed to relock:
462 */
463 if (fail_idx >= 0) {
464 __bch2_btree_path_unlock(trans, path);
465 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
466
467 do {
468 path->l[fail_idx].b = upgrade
469 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
470 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
471 --fail_idx;
472 } while (fail_idx >= 0);
473 }
474
475 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
476 path->uptodate = BTREE_ITER_UPTODATE;
477
478 bch2_trans_verify_locks(trans);
479
480 return path->uptodate < BTREE_ITER_NEED_RELOCK;
481}
482
483bool __bch2_btree_node_relock(struct btree_trans *trans,
484 struct btree_path *path, unsigned level,
485 bool trace)
486{
487 struct btree *b = btree_path_node(path, level);
488 int want = __btree_lock_want(path, level);
489
490 if (race_fault())
491 goto fail;
492
493 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
494 (btree_node_lock_seq_matches(path, b, level) &&
495 btree_node_lock_increment(trans, &b->c, level, want))) {
496 mark_btree_node_locked(trans, path, level, want);
497 return true;
498 }
499fail:
500 if (trace && !trans->notrace_relock_fail)
501 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
502 return false;
503}
504
505/* upgrade */
506
507bool bch2_btree_node_upgrade(struct btree_trans *trans,
508 struct btree_path *path, unsigned level)
509{
510 struct btree *b = path->l[level].b;
511 struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
512
513 if (!is_btree_node(path, level))
514 return false;
515
516 switch (btree_lock_want(path, level)) {
517 case BTREE_NODE_UNLOCKED:
518 BUG_ON(btree_node_locked(path, level));
519 return true;
520 case BTREE_NODE_READ_LOCKED:
521 BUG_ON(btree_node_intent_locked(path, level));
522 return bch2_btree_node_relock(trans, path, level);
523 case BTREE_NODE_INTENT_LOCKED:
524 break;
525 case BTREE_NODE_WRITE_LOCKED:
526 BUG();
527 }
528
529 if (btree_node_intent_locked(path, level))
530 return true;
531
532 if (race_fault())
533 return false;
534
535 if (btree_node_locked(path, level)) {
536 bool ret;
537
538 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
539 ret = six_lock_tryupgrade(&b->c.lock);
540 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
541
542 if (ret)
543 goto success;
544 } else {
545 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
546 goto success;
547 }
548
549 /*
550 * Do we already have an intent lock via another path? If so, just bump
551 * lock count:
552 */
553 if (btree_node_lock_seq_matches(path, b, level) &&
554 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
555 btree_node_unlock(trans, path, level);
556 goto success;
557 }
558
559 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
560 return false;
561success:
562 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
563 return true;
564}
565
566/* Btree path locking: */
567
568/*
569 * Only for btree_cache.c - only relocks intent locks
570 */
571int bch2_btree_path_relock_intent(struct btree_trans *trans,
572 struct btree_path *path)
573{
574 unsigned l;
575
576 for (l = path->level;
577 l < path->locks_want && btree_path_node(path, l);
578 l++) {
579 if (!bch2_btree_node_relock(trans, path, l)) {
580 __bch2_btree_path_unlock(trans, path);
581 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
582 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
583 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
584 }
585 }
586
587 return 0;
588}
589
590__flatten
591bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
592 struct btree_path *path, unsigned long trace_ip)
593{
594 struct get_locks_fail f;
595
596 return btree_path_get_locks(trans, path, false, &f);
597}
598
599int __bch2_btree_path_relock(struct btree_trans *trans,
600 struct btree_path *path, unsigned long trace_ip)
601{
602 if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
603 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
604 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
605 }
606
607 return 0;
608}
609
610bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
611 struct btree_path *path,
612 unsigned new_locks_want,
613 struct get_locks_fail *f)
614{
615 EBUG_ON(path->locks_want >= new_locks_want);
616
617 path->locks_want = new_locks_want;
618
619 return btree_path_get_locks(trans, path, true, f);
620}
621
622bool __bch2_btree_path_upgrade(struct btree_trans *trans,
623 struct btree_path *path,
624 unsigned new_locks_want,
625 struct get_locks_fail *f)
626{
627 struct btree_path *linked;
628
629 if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
630 return true;
631
632 /*
633 * XXX: this is ugly - we'd prefer to not be mucking with other
634 * iterators in the btree_trans here.
635 *
636 * On failure to upgrade the iterator, setting iter->locks_want and
637 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
638 * get the locks we want on transaction restart.
639 *
640 * But if this iterator was a clone, on transaction restart what we did
641 * to this iterator isn't going to be preserved.
642 *
643 * Possibly we could add an iterator field for the parent iterator when
644 * an iterator is a copy - for now, we'll just upgrade any other
645 * iterators with the same btree id.
646 *
647 * The code below used to be needed to ensure ancestor nodes get locked
648 * before interior nodes - now that's handled by
649 * bch2_btree_path_traverse_all().
650 */
651 if (!path->cached && !trans->in_traverse_all)
652 trans_for_each_path(trans, linked)
653 if (linked != path &&
654 linked->cached == path->cached &&
655 linked->btree_id == path->btree_id &&
656 linked->locks_want < new_locks_want) {
657 linked->locks_want = new_locks_want;
658 btree_path_get_locks(trans, linked, true, NULL);
659 }
660
661 return false;
662}
663
664void __bch2_btree_path_downgrade(struct btree_trans *trans,
665 struct btree_path *path,
666 unsigned new_locks_want)
667{
668 unsigned l;
669
670 if (trans->restarted)
671 return;
672
673 EBUG_ON(path->locks_want < new_locks_want);
674
675 path->locks_want = new_locks_want;
676
677 while (path->nodes_locked &&
678 (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
679 if (l > path->level) {
680 btree_node_unlock(trans, path, l);
681 } else {
682 if (btree_node_intent_locked(path, l)) {
683 six_lock_downgrade(&path->l[l].b->c.lock);
684 mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
685 }
686 break;
687 }
688 }
689
690 bch2_btree_path_verify_locks(path);
691
692 path->downgrade_seq++;
693 trace_path_downgrade(trans, _RET_IP_, path);
694}
695
696/* Btree transaction locking: */
697
698void bch2_trans_downgrade(struct btree_trans *trans)
699{
700 struct btree_path *path;
701
702 if (trans->restarted)
703 return;
704
705 trans_for_each_path(trans, path)
706 bch2_btree_path_downgrade(trans, path);
707}
708
709int bch2_trans_relock(struct btree_trans *trans)
710{
711 struct btree_path *path;
712
713 if (unlikely(trans->restarted))
714 return -((int) trans->restarted);
715
716 trans_for_each_path(trans, path)
717 if (path->should_be_locked &&
718 !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
719 trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
720 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
721 }
722 return 0;
723}
724
725int bch2_trans_relock_notrace(struct btree_trans *trans)
726{
727 struct btree_path *path;
728
729 if (unlikely(trans->restarted))
730 return -((int) trans->restarted);
731
732 trans_for_each_path(trans, path)
733 if (path->should_be_locked &&
734 !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
735 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
736 }
737 return 0;
738}
739
740void bch2_trans_unlock_noassert(struct btree_trans *trans)
741{
742 struct btree_path *path;
743
744 trans_for_each_path(trans, path)
745 __bch2_btree_path_unlock(trans, path);
746}
747
748void bch2_trans_unlock(struct btree_trans *trans)
749{
750 struct btree_path *path;
751
752 trans_for_each_path(trans, path)
753 __bch2_btree_path_unlock(trans, path);
754}
755
756void bch2_trans_unlock_long(struct btree_trans *trans)
757{
758 bch2_trans_unlock(trans);
759 bch2_trans_srcu_unlock(trans);
760}
761
762bool bch2_trans_locked(struct btree_trans *trans)
763{
764 struct btree_path *path;
765
766 trans_for_each_path(trans, path)
767 if (path->nodes_locked)
768 return true;
769 return false;
770}
771
772int __bch2_trans_mutex_lock(struct btree_trans *trans,
773 struct mutex *lock)
774{
775 int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
776
777 if (ret)
778 mutex_unlock(lock);
779 return ret;
780}
781
782/* Debug */
783
784#ifdef CONFIG_BCACHEFS_DEBUG
785
786void bch2_btree_path_verify_locks(struct btree_path *path)
787{
788 unsigned l;
789
790 if (!path->nodes_locked) {
791 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
792 btree_path_node(path, path->level));
793 return;
794 }
795
796 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
797 int want = btree_lock_want(path, l);
798 int have = btree_node_locked_type(path, l);
799
800 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
801
802 BUG_ON(is_btree_node(path, l) &&
803 (want == BTREE_NODE_UNLOCKED ||
804 have != BTREE_NODE_WRITE_LOCKED) &&
805 want != have);
806 }
807}
808
809void bch2_trans_verify_locks(struct btree_trans *trans)
810{
811 struct btree_path *path;
812
813 trans_for_each_path(trans, path)
814 bch2_btree_path_verify_locks(path);
815}
816
817#endif