Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "bbpos.h"
5#include "bkey_buf.h"
6#include "btree_cache.h"
7#include "btree_io.h"
8#include "btree_iter.h"
9#include "btree_locking.h"
10#include "debug.h"
11#include "errcode.h"
12#include "error.h"
13#include "journal.h"
14#include "trace.h"
15
16#include <linux/prefetch.h>
17#include <linux/sched/mm.h>
18
19#define BTREE_CACHE_NOT_FREED_INCREMENT(counter) \
20do { \
21 if (shrinker_counter) \
22 bc->not_freed_##counter++; \
23} while (0)
24
25const char * const bch2_btree_node_flags[] = {
26#define x(f) #f,
27 BTREE_FLAGS()
28#undef x
29 NULL
30};
31
32void bch2_recalc_btree_reserve(struct bch_fs *c)
33{
34 unsigned i, reserve = 16;
35
36 if (!c->btree_roots_known[0].b)
37 reserve += 8;
38
39 for (i = 0; i < btree_id_nr_alive(c); i++) {
40 struct btree_root *r = bch2_btree_id_root(c, i);
41
42 if (r->b)
43 reserve += min_t(unsigned, 1, r->b->c.level) * 8;
44 }
45
46 c->btree_cache.reserve = reserve;
47}
48
49static inline unsigned btree_cache_can_free(struct btree_cache *bc)
50{
51 return max_t(int, 0, bc->used - bc->reserve);
52}
53
54static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b)
55{
56 if (b->c.lock.readers)
57 list_move(&b->list, &bc->freed_pcpu);
58 else
59 list_move(&b->list, &bc->freed_nonpcpu);
60}
61
62static void btree_node_data_free(struct bch_fs *c, struct btree *b)
63{
64 struct btree_cache *bc = &c->btree_cache;
65
66 EBUG_ON(btree_node_write_in_flight(b));
67
68 clear_btree_node_just_written(b);
69
70 kvfree(b->data);
71 b->data = NULL;
72#ifdef __KERNEL__
73 kvfree(b->aux_data);
74#else
75 munmap(b->aux_data, btree_aux_data_bytes(b));
76#endif
77 b->aux_data = NULL;
78
79 bc->used--;
80
81 btree_node_to_freedlist(bc, b);
82}
83
84static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
85 const void *obj)
86{
87 const struct btree *b = obj;
88 const u64 *v = arg->key;
89
90 return b->hash_val == *v ? 0 : 1;
91}
92
93static const struct rhashtable_params bch_btree_cache_params = {
94 .head_offset = offsetof(struct btree, hash),
95 .key_offset = offsetof(struct btree, hash_val),
96 .key_len = sizeof(u64),
97 .obj_cmpfn = bch2_btree_cache_cmp_fn,
98 .automatic_shrinking = true,
99};
100
101static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
102{
103 BUG_ON(b->data || b->aux_data);
104
105 b->data = kvmalloc(btree_buf_bytes(b), gfp);
106 if (!b->data)
107 return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
108#ifdef __KERNEL__
109 b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp);
110#else
111 b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
112 PROT_READ|PROT_WRITE|PROT_EXEC,
113 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
114 if (b->aux_data == MAP_FAILED)
115 b->aux_data = NULL;
116#endif
117 if (!b->aux_data) {
118 kvfree(b->data);
119 b->data = NULL;
120 return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
121 }
122
123 return 0;
124}
125
126static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
127{
128 struct btree *b;
129
130 b = kzalloc(sizeof(struct btree), gfp);
131 if (!b)
132 return NULL;
133
134 bkey_btree_ptr_init(&b->key);
135 INIT_LIST_HEAD(&b->list);
136 INIT_LIST_HEAD(&b->write_blocked);
137 b->byte_order = ilog2(c->opts.btree_node_size);
138 return b;
139}
140
141struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
142{
143 struct btree_cache *bc = &c->btree_cache;
144 struct btree *b;
145
146 b = __btree_node_mem_alloc(c, GFP_KERNEL);
147 if (!b)
148 return NULL;
149
150 if (btree_node_data_alloc(c, b, GFP_KERNEL)) {
151 kfree(b);
152 return NULL;
153 }
154
155 bch2_btree_lock_init(&b->c, 0);
156
157 bc->used++;
158 list_add(&b->list, &bc->freeable);
159 return b;
160}
161
162void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b)
163{
164 mutex_lock(&c->btree_cache.lock);
165 list_move(&b->list, &c->btree_cache.freeable);
166 mutex_unlock(&c->btree_cache.lock);
167
168 six_unlock_write(&b->c.lock);
169 six_unlock_intent(&b->c.lock);
170}
171
172/* Btree in memory cache - hash table */
173
174void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
175{
176 int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
177
178 BUG_ON(ret);
179
180 /* Cause future lookups for this node to fail: */
181 b->hash_val = 0;
182
183 if (b->c.btree_id < BTREE_ID_NR)
184 --bc->used_by_btree[b->c.btree_id];
185}
186
187int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
188{
189 BUG_ON(b->hash_val);
190 b->hash_val = btree_ptr_hash_val(&b->key);
191
192 int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash,
193 bch_btree_cache_params);
194 if (!ret && b->c.btree_id < BTREE_ID_NR)
195 bc->used_by_btree[b->c.btree_id]++;
196 return ret;
197}
198
199int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
200 unsigned level, enum btree_id id)
201{
202 int ret;
203
204 b->c.level = level;
205 b->c.btree_id = id;
206
207 mutex_lock(&bc->lock);
208 ret = __bch2_btree_node_hash_insert(bc, b);
209 if (!ret)
210 list_add_tail(&b->list, &bc->live);
211 mutex_unlock(&bc->lock);
212
213 return ret;
214}
215
216void bch2_btree_node_update_key_early(struct btree_trans *trans,
217 enum btree_id btree, unsigned level,
218 struct bkey_s_c old, struct bkey_i *new)
219{
220 struct bch_fs *c = trans->c;
221 struct btree *b;
222 struct bkey_buf tmp;
223 int ret;
224
225 bch2_bkey_buf_init(&tmp);
226 bch2_bkey_buf_reassemble(&tmp, c, old);
227
228 b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true);
229 if (!IS_ERR_OR_NULL(b)) {
230 mutex_lock(&c->btree_cache.lock);
231
232 bch2_btree_node_hash_remove(&c->btree_cache, b);
233
234 bkey_copy(&b->key, new);
235 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
236 BUG_ON(ret);
237
238 mutex_unlock(&c->btree_cache.lock);
239 six_unlock_read(&b->c.lock);
240 }
241
242 bch2_bkey_buf_exit(&tmp, c);
243}
244
245__flatten
246static inline struct btree *btree_cache_find(struct btree_cache *bc,
247 const struct bkey_i *k)
248{
249 u64 v = btree_ptr_hash_val(k);
250
251 return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params);
252}
253
254/*
255 * this version is for btree nodes that have already been freed (we're not
256 * reaping a real btree node)
257 */
258static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, bool shrinker_counter)
259{
260 struct btree_cache *bc = &c->btree_cache;
261 int ret = 0;
262
263 lockdep_assert_held(&bc->lock);
264
265 struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p);
266
267 u64 mask = b->c.level
268 ? bc->pinned_nodes_interior_mask
269 : bc->pinned_nodes_leaf_mask;
270
271 if ((mask & BIT_ULL(b->c.btree_id)) &&
272 bbpos_cmp(bc->pinned_nodes_start, pos) < 0 &&
273 bbpos_cmp(bc->pinned_nodes_end, pos) >= 0)
274 return -BCH_ERR_ENOMEM_btree_node_reclaim;
275
276wait_on_io:
277 if (b->flags & ((1U << BTREE_NODE_dirty)|
278 (1U << BTREE_NODE_read_in_flight)|
279 (1U << BTREE_NODE_write_in_flight))) {
280 if (!flush) {
281 if (btree_node_dirty(b))
282 BTREE_CACHE_NOT_FREED_INCREMENT(dirty);
283 else if (btree_node_read_in_flight(b))
284 BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
285 else if (btree_node_write_in_flight(b))
286 BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
287 return -BCH_ERR_ENOMEM_btree_node_reclaim;
288 }
289
290 /* XXX: waiting on IO with btree cache lock held */
291 bch2_btree_node_wait_on_read(b);
292 bch2_btree_node_wait_on_write(b);
293 }
294
295 if (!six_trylock_intent(&b->c.lock)) {
296 BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent);
297 return -BCH_ERR_ENOMEM_btree_node_reclaim;
298 }
299
300 if (!six_trylock_write(&b->c.lock)) {
301 BTREE_CACHE_NOT_FREED_INCREMENT(lock_write);
302 goto out_unlock_intent;
303 }
304
305 /* recheck under lock */
306 if (b->flags & ((1U << BTREE_NODE_read_in_flight)|
307 (1U << BTREE_NODE_write_in_flight))) {
308 if (!flush) {
309 if (btree_node_read_in_flight(b))
310 BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
311 else if (btree_node_write_in_flight(b))
312 BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
313 goto out_unlock;
314 }
315 six_unlock_write(&b->c.lock);
316 six_unlock_intent(&b->c.lock);
317 goto wait_on_io;
318 }
319
320 if (btree_node_noevict(b)) {
321 BTREE_CACHE_NOT_FREED_INCREMENT(noevict);
322 goto out_unlock;
323 }
324 if (btree_node_write_blocked(b)) {
325 BTREE_CACHE_NOT_FREED_INCREMENT(write_blocked);
326 goto out_unlock;
327 }
328 if (btree_node_will_make_reachable(b)) {
329 BTREE_CACHE_NOT_FREED_INCREMENT(will_make_reachable);
330 goto out_unlock;
331 }
332
333 if (btree_node_dirty(b)) {
334 if (!flush) {
335 BTREE_CACHE_NOT_FREED_INCREMENT(dirty);
336 goto out_unlock;
337 }
338 /*
339 * Using the underscore version because we don't want to compact
340 * bsets after the write, since this node is about to be evicted
341 * - unless btree verify mode is enabled, since it runs out of
342 * the post write cleanup:
343 */
344 if (bch2_verify_btree_ondisk)
345 bch2_btree_node_write(c, b, SIX_LOCK_intent,
346 BTREE_WRITE_cache_reclaim);
347 else
348 __bch2_btree_node_write(c, b,
349 BTREE_WRITE_cache_reclaim);
350
351 six_unlock_write(&b->c.lock);
352 six_unlock_intent(&b->c.lock);
353 goto wait_on_io;
354 }
355out:
356 if (b->hash_val && !ret)
357 trace_and_count(c, btree_cache_reap, c, b);
358 return ret;
359out_unlock:
360 six_unlock_write(&b->c.lock);
361out_unlock_intent:
362 six_unlock_intent(&b->c.lock);
363 ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
364 goto out;
365}
366
367static int btree_node_reclaim(struct bch_fs *c, struct btree *b, bool shrinker_counter)
368{
369 return __btree_node_reclaim(c, b, false, shrinker_counter);
370}
371
372static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b)
373{
374 return __btree_node_reclaim(c, b, true, false);
375}
376
377static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
378 struct shrink_control *sc)
379{
380 struct bch_fs *c = shrink->private_data;
381 struct btree_cache *bc = &c->btree_cache;
382 struct btree *b, *t;
383 unsigned long nr = sc->nr_to_scan;
384 unsigned long can_free = 0;
385 unsigned long freed = 0;
386 unsigned long touched = 0;
387 unsigned i, flags;
388 unsigned long ret = SHRINK_STOP;
389 bool trigger_writes = atomic_read(&bc->dirty) + nr >=
390 bc->used * 3 / 4;
391
392 if (bch2_btree_shrinker_disabled)
393 return SHRINK_STOP;
394
395 mutex_lock(&bc->lock);
396 flags = memalloc_nofs_save();
397
398 /*
399 * It's _really_ critical that we don't free too many btree nodes - we
400 * have to always leave ourselves a reserve. The reserve is how we
401 * guarantee that allocating memory for a new btree node can always
402 * succeed, so that inserting keys into the btree can always succeed and
403 * IO can always make forward progress:
404 */
405 can_free = btree_cache_can_free(bc);
406 nr = min_t(unsigned long, nr, can_free);
407
408 i = 0;
409 list_for_each_entry_safe(b, t, &bc->freeable, list) {
410 /*
411 * Leave a few nodes on the freeable list, so that a btree split
412 * won't have to hit the system allocator:
413 */
414 if (++i <= 3)
415 continue;
416
417 touched++;
418
419 if (touched >= nr)
420 goto out;
421
422 if (!btree_node_reclaim(c, b, true)) {
423 btree_node_data_free(c, b);
424 six_unlock_write(&b->c.lock);
425 six_unlock_intent(&b->c.lock);
426 freed++;
427 bc->freed++;
428 }
429 }
430restart:
431 list_for_each_entry_safe(b, t, &bc->live, list) {
432 touched++;
433
434 if (btree_node_accessed(b)) {
435 clear_btree_node_accessed(b);
436 bc->not_freed_access_bit++;
437 } else if (!btree_node_reclaim(c, b, true)) {
438 freed++;
439 btree_node_data_free(c, b);
440 bc->freed++;
441
442 bch2_btree_node_hash_remove(bc, b);
443 six_unlock_write(&b->c.lock);
444 six_unlock_intent(&b->c.lock);
445
446 if (freed == nr)
447 goto out_rotate;
448 } else if (trigger_writes &&
449 btree_node_dirty(b) &&
450 !btree_node_will_make_reachable(b) &&
451 !btree_node_write_blocked(b) &&
452 six_trylock_read(&b->c.lock)) {
453 list_move(&bc->live, &b->list);
454 mutex_unlock(&bc->lock);
455 __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
456 six_unlock_read(&b->c.lock);
457 if (touched >= nr)
458 goto out_nounlock;
459 mutex_lock(&bc->lock);
460 goto restart;
461 }
462
463 if (touched >= nr)
464 break;
465 }
466out_rotate:
467 if (&t->list != &bc->live)
468 list_move_tail(&bc->live, &t->list);
469out:
470 mutex_unlock(&bc->lock);
471out_nounlock:
472 ret = freed;
473 memalloc_nofs_restore(flags);
474 trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret);
475 return ret;
476}
477
478static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
479 struct shrink_control *sc)
480{
481 struct bch_fs *c = shrink->private_data;
482 struct btree_cache *bc = &c->btree_cache;
483
484 if (bch2_btree_shrinker_disabled)
485 return 0;
486
487 return btree_cache_can_free(bc);
488}
489
490void bch2_fs_btree_cache_exit(struct bch_fs *c)
491{
492 struct btree_cache *bc = &c->btree_cache;
493 struct btree *b;
494 unsigned i, flags;
495
496 shrinker_free(bc->shrink);
497
498 /* vfree() can allocate memory: */
499 flags = memalloc_nofs_save();
500 mutex_lock(&bc->lock);
501
502 if (c->verify_data)
503 list_move(&c->verify_data->list, &bc->live);
504
505 kvfree(c->verify_ondisk);
506
507 for (i = 0; i < btree_id_nr_alive(c); i++) {
508 struct btree_root *r = bch2_btree_id_root(c, i);
509
510 if (r->b)
511 list_add(&r->b->list, &bc->live);
512 }
513
514 list_splice(&bc->freeable, &bc->live);
515
516 while (!list_empty(&bc->live)) {
517 b = list_first_entry(&bc->live, struct btree, list);
518
519 BUG_ON(btree_node_read_in_flight(b) ||
520 btree_node_write_in_flight(b));
521
522 btree_node_data_free(c, b);
523 }
524
525 BUG_ON(!bch2_journal_error(&c->journal) &&
526 atomic_read(&c->btree_cache.dirty));
527
528 list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
529
530 while (!list_empty(&bc->freed_nonpcpu)) {
531 b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
532 list_del(&b->list);
533 six_lock_exit(&b->c.lock);
534 kfree(b);
535 }
536
537 mutex_unlock(&bc->lock);
538 memalloc_nofs_restore(flags);
539
540 if (bc->table_init_done)
541 rhashtable_destroy(&bc->table);
542}
543
544int bch2_fs_btree_cache_init(struct bch_fs *c)
545{
546 struct btree_cache *bc = &c->btree_cache;
547 struct shrinker *shrink;
548 unsigned i;
549 int ret = 0;
550
551 ret = rhashtable_init(&bc->table, &bch_btree_cache_params);
552 if (ret)
553 goto err;
554
555 bc->table_init_done = true;
556
557 bch2_recalc_btree_reserve(c);
558
559 for (i = 0; i < bc->reserve; i++)
560 if (!__bch2_btree_node_mem_alloc(c))
561 goto err;
562
563 list_splice_init(&bc->live, &bc->freeable);
564
565 mutex_init(&c->verify_lock);
566
567 shrink = shrinker_alloc(0, "%s-btree_cache", c->name);
568 if (!shrink)
569 goto err;
570 bc->shrink = shrink;
571 shrink->count_objects = bch2_btree_cache_count;
572 shrink->scan_objects = bch2_btree_cache_scan;
573 shrink->seeks = 4;
574 shrink->private_data = c;
575 shrinker_register(shrink);
576
577 return 0;
578err:
579 return -BCH_ERR_ENOMEM_fs_btree_cache_init;
580}
581
582void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
583{
584 mutex_init(&bc->lock);
585 INIT_LIST_HEAD(&bc->live);
586 INIT_LIST_HEAD(&bc->freeable);
587 INIT_LIST_HEAD(&bc->freed_pcpu);
588 INIT_LIST_HEAD(&bc->freed_nonpcpu);
589}
590
591/*
592 * We can only have one thread cannibalizing other cached btree nodes at a time,
593 * or we'll deadlock. We use an open coded mutex to ensure that, which a
594 * cannibalize_bucket() will take. This means every time we unlock the root of
595 * the btree, we need to release this lock if we have it held.
596 */
597void bch2_btree_cache_cannibalize_unlock(struct btree_trans *trans)
598{
599 struct bch_fs *c = trans->c;
600 struct btree_cache *bc = &c->btree_cache;
601
602 if (bc->alloc_lock == current) {
603 trace_and_count(c, btree_cache_cannibalize_unlock, trans);
604 bc->alloc_lock = NULL;
605 closure_wake_up(&bc->alloc_wait);
606 }
607}
608
609int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure *cl)
610{
611 struct bch_fs *c = trans->c;
612 struct btree_cache *bc = &c->btree_cache;
613 struct task_struct *old;
614
615 old = NULL;
616 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current)
617 goto success;
618
619 if (!cl) {
620 trace_and_count(c, btree_cache_cannibalize_lock_fail, trans);
621 return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
622 }
623
624 closure_wait(&bc->alloc_wait, cl);
625
626 /* Try again, after adding ourselves to waitlist */
627 old = NULL;
628 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) {
629 /* We raced */
630 closure_wake_up(&bc->alloc_wait);
631 goto success;
632 }
633
634 trace_and_count(c, btree_cache_cannibalize_lock_fail, trans);
635 return -BCH_ERR_btree_cache_cannibalize_lock_blocked;
636
637success:
638 trace_and_count(c, btree_cache_cannibalize_lock, trans);
639 return 0;
640}
641
642static struct btree *btree_node_cannibalize(struct bch_fs *c)
643{
644 struct btree_cache *bc = &c->btree_cache;
645 struct btree *b;
646
647 list_for_each_entry_reverse(b, &bc->live, list)
648 if (!btree_node_reclaim(c, b, false))
649 return b;
650
651 while (1) {
652 list_for_each_entry_reverse(b, &bc->live, list)
653 if (!btree_node_write_and_reclaim(c, b))
654 return b;
655
656 /*
657 * Rare case: all nodes were intent-locked.
658 * Just busy-wait.
659 */
660 WARN_ONCE(1, "btree cache cannibalize failed\n");
661 cond_resched();
662 }
663}
664
665struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
666{
667 struct bch_fs *c = trans->c;
668 struct btree_cache *bc = &c->btree_cache;
669 struct list_head *freed = pcpu_read_locks
670 ? &bc->freed_pcpu
671 : &bc->freed_nonpcpu;
672 struct btree *b, *b2;
673 u64 start_time = local_clock();
674 unsigned flags;
675
676 flags = memalloc_nofs_save();
677 mutex_lock(&bc->lock);
678
679 /*
680 * We never free struct btree itself, just the memory that holds the on
681 * disk node. Check the freed list before allocating a new one:
682 */
683 list_for_each_entry(b, freed, list)
684 if (!btree_node_reclaim(c, b, false)) {
685 list_del_init(&b->list);
686 goto got_node;
687 }
688
689 b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN);
690 if (!b) {
691 mutex_unlock(&bc->lock);
692 bch2_trans_unlock(trans);
693 b = __btree_node_mem_alloc(c, GFP_KERNEL);
694 if (!b)
695 goto err;
696 mutex_lock(&bc->lock);
697 }
698
699 bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
700
701 BUG_ON(!six_trylock_intent(&b->c.lock));
702 BUG_ON(!six_trylock_write(&b->c.lock));
703got_node:
704
705 /*
706 * btree_free() doesn't free memory; it sticks the node on the end of
707 * the list. Check if there's any freed nodes there:
708 */
709 list_for_each_entry(b2, &bc->freeable, list)
710 if (!btree_node_reclaim(c, b2, false)) {
711 swap(b->data, b2->data);
712 swap(b->aux_data, b2->aux_data);
713 btree_node_to_freedlist(bc, b2);
714 six_unlock_write(&b2->c.lock);
715 six_unlock_intent(&b2->c.lock);
716 goto got_mem;
717 }
718
719 mutex_unlock(&bc->lock);
720
721 if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) {
722 bch2_trans_unlock(trans);
723 if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN))
724 goto err;
725 }
726
727 mutex_lock(&bc->lock);
728 bc->used++;
729got_mem:
730 mutex_unlock(&bc->lock);
731
732 BUG_ON(btree_node_hashed(b));
733 BUG_ON(btree_node_dirty(b));
734 BUG_ON(btree_node_write_in_flight(b));
735out:
736 b->flags = 0;
737 b->written = 0;
738 b->nsets = 0;
739 b->sib_u64s[0] = 0;
740 b->sib_u64s[1] = 0;
741 b->whiteout_u64s = 0;
742 bch2_btree_keys_init(b);
743 set_btree_node_accessed(b);
744
745 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
746 start_time);
747
748 memalloc_nofs_restore(flags);
749
750 int ret = bch2_trans_relock(trans);
751 if (unlikely(ret)) {
752 bch2_btree_node_to_freelist(c, b);
753 return ERR_PTR(ret);
754 }
755
756 return b;
757err:
758 mutex_lock(&bc->lock);
759
760 /* Try to cannibalize another cached btree node: */
761 if (bc->alloc_lock == current) {
762 b2 = btree_node_cannibalize(c);
763 clear_btree_node_just_written(b2);
764 bch2_btree_node_hash_remove(bc, b2);
765
766 if (b) {
767 swap(b->data, b2->data);
768 swap(b->aux_data, b2->aux_data);
769 btree_node_to_freedlist(bc, b2);
770 six_unlock_write(&b2->c.lock);
771 six_unlock_intent(&b2->c.lock);
772 } else {
773 b = b2;
774 list_del_init(&b->list);
775 }
776
777 mutex_unlock(&bc->lock);
778
779 trace_and_count(c, btree_cache_cannibalize, trans);
780 goto out;
781 }
782
783 mutex_unlock(&bc->lock);
784 memalloc_nofs_restore(flags);
785 return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
786}
787
788/* Slowpath, don't want it inlined into btree_iter_traverse() */
789static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
790 struct btree_path *path,
791 const struct bkey_i *k,
792 enum btree_id btree_id,
793 unsigned level,
794 enum six_lock_type lock_type,
795 bool sync)
796{
797 struct bch_fs *c = trans->c;
798 struct btree_cache *bc = &c->btree_cache;
799 struct btree *b;
800
801 if (unlikely(level >= BTREE_MAX_DEPTH)) {
802 int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u",
803 level, BTREE_MAX_DEPTH);
804 return ERR_PTR(ret);
805 }
806
807 if (unlikely(!bkey_is_btree_ptr(&k->k))) {
808 struct printbuf buf = PRINTBUF;
809 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
810
811 int ret = bch2_fs_topology_error(c, "attempting to get btree node with non-btree key %s", buf.buf);
812 printbuf_exit(&buf);
813 return ERR_PTR(ret);
814 }
815
816 if (unlikely(k->k.u64s > BKEY_BTREE_PTR_U64s_MAX)) {
817 struct printbuf buf = PRINTBUF;
818 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
819
820 int ret = bch2_fs_topology_error(c, "attempting to get btree node with too big key %s", buf.buf);
821 printbuf_exit(&buf);
822 return ERR_PTR(ret);
823 }
824
825 /*
826 * Parent node must be locked, else we could read in a btree node that's
827 * been freed:
828 */
829 if (path && !bch2_btree_node_relock(trans, path, level + 1)) {
830 trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
831 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
832 }
833
834 b = bch2_btree_node_mem_alloc(trans, level != 0);
835
836 if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
837 if (!path)
838 return b;
839
840 trans->memory_allocation_failure = true;
841 trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
842 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
843 }
844
845 if (IS_ERR(b))
846 return b;
847
848 bkey_copy(&b->key, k);
849 if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
850 /* raced with another fill: */
851
852 /* mark as unhashed... */
853 b->hash_val = 0;
854
855 mutex_lock(&bc->lock);
856 list_add(&b->list, &bc->freeable);
857 mutex_unlock(&bc->lock);
858
859 six_unlock_write(&b->c.lock);
860 six_unlock_intent(&b->c.lock);
861 return NULL;
862 }
863
864 set_btree_node_read_in_flight(b);
865 six_unlock_write(&b->c.lock);
866
867 if (path) {
868 u32 seq = six_lock_seq(&b->c.lock);
869
870 /* Unlock before doing IO: */
871 six_unlock_intent(&b->c.lock);
872 bch2_trans_unlock_noassert(trans);
873
874 bch2_btree_node_read(trans, b, sync);
875
876 int ret = bch2_trans_relock(trans);
877 if (ret)
878 return ERR_PTR(ret);
879
880 if (!sync)
881 return NULL;
882
883 if (!six_relock_type(&b->c.lock, lock_type, seq))
884 b = NULL;
885 } else {
886 bch2_btree_node_read(trans, b, sync);
887 if (lock_type == SIX_LOCK_read)
888 six_lock_downgrade(&b->c.lock);
889 }
890
891 return b;
892}
893
894static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
895{
896 struct printbuf buf = PRINTBUF;
897
898 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations)
899 return;
900
901 prt_printf(&buf,
902 "btree node header doesn't match ptr\n"
903 "btree %s level %u\n"
904 "ptr: ",
905 bch2_btree_id_str(b->c.btree_id), b->c.level);
906 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
907
908 prt_printf(&buf, "\nheader: btree %s level %llu\n"
909 "min ",
910 bch2_btree_id_str(BTREE_NODE_ID(b->data)),
911 BTREE_NODE_LEVEL(b->data));
912 bch2_bpos_to_text(&buf, b->data->min_key);
913
914 prt_printf(&buf, "\nmax ");
915 bch2_bpos_to_text(&buf, b->data->max_key);
916
917 bch2_fs_topology_error(c, "%s", buf.buf);
918
919 printbuf_exit(&buf);
920}
921
922static inline void btree_check_header(struct bch_fs *c, struct btree *b)
923{
924 if (b->c.btree_id != BTREE_NODE_ID(b->data) ||
925 b->c.level != BTREE_NODE_LEVEL(b->data) ||
926 !bpos_eq(b->data->max_key, b->key.k.p) ||
927 (b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
928 !bpos_eq(b->data->min_key,
929 bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)))
930 btree_bad_header(c, b);
931}
932
933static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
934 const struct bkey_i *k, unsigned level,
935 enum six_lock_type lock_type,
936 unsigned long trace_ip)
937{
938 struct bch_fs *c = trans->c;
939 struct btree_cache *bc = &c->btree_cache;
940 struct btree *b;
941 bool need_relock = false;
942 int ret;
943
944 EBUG_ON(level >= BTREE_MAX_DEPTH);
945retry:
946 b = btree_cache_find(bc, k);
947 if (unlikely(!b)) {
948 /*
949 * We must have the parent locked to call bch2_btree_node_fill(),
950 * else we could read in a btree node from disk that's been
951 * freed:
952 */
953 b = bch2_btree_node_fill(trans, path, k, path->btree_id,
954 level, lock_type, true);
955 need_relock = true;
956
957 /* We raced and found the btree node in the cache */
958 if (!b)
959 goto retry;
960
961 if (IS_ERR(b))
962 return b;
963 } else {
964 if (btree_node_read_locked(path, level + 1))
965 btree_node_unlock(trans, path, level + 1);
966
967 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip);
968 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
969 return ERR_PTR(ret);
970
971 BUG_ON(ret);
972
973 if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
974 b->c.level != level ||
975 race_fault())) {
976 six_unlock_type(&b->c.lock, lock_type);
977 if (bch2_btree_node_relock(trans, path, level + 1))
978 goto retry;
979
980 trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
981 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
982 }
983
984 /* avoid atomic set bit if it's not needed: */
985 if (!btree_node_accessed(b))
986 set_btree_node_accessed(b);
987 }
988
989 if (unlikely(btree_node_read_in_flight(b))) {
990 u32 seq = six_lock_seq(&b->c.lock);
991
992 six_unlock_type(&b->c.lock, lock_type);
993 bch2_trans_unlock(trans);
994 need_relock = true;
995
996 bch2_btree_node_wait_on_read(b);
997
998 ret = bch2_trans_relock(trans);
999 if (ret)
1000 return ERR_PTR(ret);
1001
1002 /*
1003 * should_be_locked is not set on this path yet, so we need to
1004 * relock it specifically:
1005 */
1006 if (!six_relock_type(&b->c.lock, lock_type, seq))
1007 goto retry;
1008 }
1009
1010 if (unlikely(need_relock)) {
1011 ret = bch2_trans_relock(trans) ?:
1012 bch2_btree_path_relock_intent(trans, path);
1013 if (ret) {
1014 six_unlock_type(&b->c.lock, lock_type);
1015 return ERR_PTR(ret);
1016 }
1017 }
1018
1019 prefetch(b->aux_data);
1020
1021 for_each_bset(b, t) {
1022 void *p = (u64 *) b->aux_data + t->aux_data_offset;
1023
1024 prefetch(p + L1_CACHE_BYTES * 0);
1025 prefetch(p + L1_CACHE_BYTES * 1);
1026 prefetch(p + L1_CACHE_BYTES * 2);
1027 }
1028
1029 if (unlikely(btree_node_read_error(b))) {
1030 six_unlock_type(&b->c.lock, lock_type);
1031 return ERR_PTR(-BCH_ERR_btree_node_read_error);
1032 }
1033
1034 EBUG_ON(b->c.btree_id != path->btree_id);
1035 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
1036 btree_check_header(c, b);
1037
1038 return b;
1039}
1040
1041/**
1042 * bch2_btree_node_get - find a btree node in the cache and lock it, reading it
1043 * in from disk if necessary.
1044 *
1045 * @trans: btree transaction object
1046 * @path: btree_path being traversed
1047 * @k: pointer to btree node (generally KEY_TYPE_btree_ptr_v2)
1048 * @level: level of btree node being looked up (0 == leaf node)
1049 * @lock_type: SIX_LOCK_read or SIX_LOCK_intent
1050 * @trace_ip: ip of caller of btree iterator code (i.e. caller of bch2_btree_iter_peek())
1051 *
1052 * The btree node will have either a read or a write lock held, depending on
1053 * the @write parameter.
1054 *
1055 * Returns: btree node or ERR_PTR()
1056 */
1057struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
1058 const struct bkey_i *k, unsigned level,
1059 enum six_lock_type lock_type,
1060 unsigned long trace_ip)
1061{
1062 struct bch_fs *c = trans->c;
1063 struct btree *b;
1064 int ret;
1065
1066 EBUG_ON(level >= BTREE_MAX_DEPTH);
1067
1068 b = btree_node_mem_ptr(k);
1069
1070 /*
1071 * Check b->hash_val _before_ calling btree_node_lock() - this might not
1072 * be the node we want anymore, and trying to lock the wrong node could
1073 * cause an unneccessary transaction restart:
1074 */
1075 if (unlikely(!c->opts.btree_node_mem_ptr_optimization ||
1076 !b ||
1077 b->hash_val != btree_ptr_hash_val(k)))
1078 return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
1079
1080 if (btree_node_read_locked(path, level + 1))
1081 btree_node_unlock(trans, path, level + 1);
1082
1083 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip);
1084 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1085 return ERR_PTR(ret);
1086
1087 BUG_ON(ret);
1088
1089 if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
1090 b->c.level != level ||
1091 race_fault())) {
1092 six_unlock_type(&b->c.lock, lock_type);
1093 if (bch2_btree_node_relock(trans, path, level + 1))
1094 return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
1095
1096 trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
1097 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
1098 }
1099
1100 if (unlikely(btree_node_read_in_flight(b))) {
1101 six_unlock_type(&b->c.lock, lock_type);
1102 return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
1103 }
1104
1105 prefetch(b->aux_data);
1106
1107 for_each_bset(b, t) {
1108 void *p = (u64 *) b->aux_data + t->aux_data_offset;
1109
1110 prefetch(p + L1_CACHE_BYTES * 0);
1111 prefetch(p + L1_CACHE_BYTES * 1);
1112 prefetch(p + L1_CACHE_BYTES * 2);
1113 }
1114
1115 /* avoid atomic set bit if it's not needed: */
1116 if (!btree_node_accessed(b))
1117 set_btree_node_accessed(b);
1118
1119 if (unlikely(btree_node_read_error(b))) {
1120 six_unlock_type(&b->c.lock, lock_type);
1121 return ERR_PTR(-BCH_ERR_btree_node_read_error);
1122 }
1123
1124 EBUG_ON(b->c.btree_id != path->btree_id);
1125 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
1126 btree_check_header(c, b);
1127
1128 return b;
1129}
1130
1131struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
1132 const struct bkey_i *k,
1133 enum btree_id btree_id,
1134 unsigned level,
1135 bool nofill)
1136{
1137 struct bch_fs *c = trans->c;
1138 struct btree_cache *bc = &c->btree_cache;
1139 struct btree *b;
1140 int ret;
1141
1142 EBUG_ON(level >= BTREE_MAX_DEPTH);
1143
1144 if (c->opts.btree_node_mem_ptr_optimization) {
1145 b = btree_node_mem_ptr(k);
1146 if (b)
1147 goto lock_node;
1148 }
1149retry:
1150 b = btree_cache_find(bc, k);
1151 if (unlikely(!b)) {
1152 if (nofill)
1153 goto out;
1154
1155 b = bch2_btree_node_fill(trans, NULL, k, btree_id,
1156 level, SIX_LOCK_read, true);
1157
1158 /* We raced and found the btree node in the cache */
1159 if (!b)
1160 goto retry;
1161
1162 if (IS_ERR(b) &&
1163 !bch2_btree_cache_cannibalize_lock(trans, NULL))
1164 goto retry;
1165
1166 if (IS_ERR(b))
1167 goto out;
1168 } else {
1169lock_node:
1170 ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_);
1171 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1172 return ERR_PTR(ret);
1173
1174 BUG_ON(ret);
1175
1176 if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
1177 b->c.btree_id != btree_id ||
1178 b->c.level != level)) {
1179 six_unlock_read(&b->c.lock);
1180 goto retry;
1181 }
1182 }
1183
1184 /* XXX: waiting on IO with btree locks held: */
1185 __bch2_btree_node_wait_on_read(b);
1186
1187 prefetch(b->aux_data);
1188
1189 for_each_bset(b, t) {
1190 void *p = (u64 *) b->aux_data + t->aux_data_offset;
1191
1192 prefetch(p + L1_CACHE_BYTES * 0);
1193 prefetch(p + L1_CACHE_BYTES * 1);
1194 prefetch(p + L1_CACHE_BYTES * 2);
1195 }
1196
1197 /* avoid atomic set bit if it's not needed: */
1198 if (!btree_node_accessed(b))
1199 set_btree_node_accessed(b);
1200
1201 if (unlikely(btree_node_read_error(b))) {
1202 six_unlock_read(&b->c.lock);
1203 b = ERR_PTR(-BCH_ERR_btree_node_read_error);
1204 goto out;
1205 }
1206
1207 EBUG_ON(b->c.btree_id != btree_id);
1208 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
1209 btree_check_header(c, b);
1210out:
1211 bch2_btree_cache_cannibalize_unlock(trans);
1212 return b;
1213}
1214
1215int bch2_btree_node_prefetch(struct btree_trans *trans,
1216 struct btree_path *path,
1217 const struct bkey_i *k,
1218 enum btree_id btree_id, unsigned level)
1219{
1220 struct bch_fs *c = trans->c;
1221 struct btree_cache *bc = &c->btree_cache;
1222
1223 BUG_ON(path && !btree_node_locked(path, level + 1));
1224 BUG_ON(level >= BTREE_MAX_DEPTH);
1225
1226 struct btree *b = btree_cache_find(bc, k);
1227 if (b)
1228 return 0;
1229
1230 b = bch2_btree_node_fill(trans, path, k, btree_id,
1231 level, SIX_LOCK_read, false);
1232 if (!IS_ERR_OR_NULL(b))
1233 six_unlock_read(&b->c.lock);
1234 return bch2_trans_relock(trans) ?: PTR_ERR_OR_ZERO(b);
1235}
1236
1237void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
1238{
1239 struct bch_fs *c = trans->c;
1240 struct btree_cache *bc = &c->btree_cache;
1241 struct btree *b;
1242
1243 b = btree_cache_find(bc, k);
1244 if (!b)
1245 return;
1246
1247 BUG_ON(b == btree_node_root(trans->c, b));
1248wait_on_io:
1249 /* not allowed to wait on io with btree locks held: */
1250
1251 /* XXX we're called from btree_gc which will be holding other btree
1252 * nodes locked
1253 */
1254 __bch2_btree_node_wait_on_read(b);
1255 __bch2_btree_node_wait_on_write(b);
1256
1257 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
1258 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
1259 if (unlikely(b->hash_val != btree_ptr_hash_val(k)))
1260 goto out;
1261
1262 if (btree_node_dirty(b)) {
1263 __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
1264 six_unlock_write(&b->c.lock);
1265 six_unlock_intent(&b->c.lock);
1266 goto wait_on_io;
1267 }
1268
1269 BUG_ON(btree_node_dirty(b));
1270
1271 mutex_lock(&bc->lock);
1272 btree_node_data_free(c, b);
1273 bch2_btree_node_hash_remove(bc, b);
1274 mutex_unlock(&bc->lock);
1275out:
1276 six_unlock_write(&b->c.lock);
1277 six_unlock_intent(&b->c.lock);
1278}
1279
1280const char *bch2_btree_id_str(enum btree_id btree)
1281{
1282 return btree < BTREE_ID_NR ? __bch2_btree_ids[btree] : "(unknown)";
1283}
1284
1285void bch2_btree_id_to_text(struct printbuf *out, enum btree_id btree)
1286{
1287 if (btree < BTREE_ID_NR)
1288 prt_str(out, __bch2_btree_ids[btree]);
1289 else
1290 prt_printf(out, "(unknown btree %u)", btree);
1291}
1292
1293void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
1294{
1295 prt_printf(out, "%s level %u/%u\n ",
1296 bch2_btree_id_str(b->c.btree_id),
1297 b->c.level,
1298 bch2_btree_id_root(c, b->c.btree_id)->level);
1299 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
1300}
1301
1302void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
1303{
1304 struct bset_stats stats;
1305
1306 memset(&stats, 0, sizeof(stats));
1307
1308 bch2_btree_keys_stats(b, &stats);
1309
1310 prt_printf(out, "l %u ", b->c.level);
1311 bch2_bpos_to_text(out, b->data->min_key);
1312 prt_printf(out, " - ");
1313 bch2_bpos_to_text(out, b->data->max_key);
1314 prt_printf(out, ":\n"
1315 " ptrs: ");
1316 bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key));
1317 prt_newline(out);
1318
1319 prt_printf(out,
1320 " format: ");
1321 bch2_bkey_format_to_text(out, &b->format);
1322
1323 prt_printf(out,
1324 " unpack fn len: %u\n"
1325 " bytes used %zu/%zu (%zu%% full)\n"
1326 " sib u64s: %u, %u (merge threshold %u)\n"
1327 " nr packed keys %u\n"
1328 " nr unpacked keys %u\n"
1329 " floats %zu\n"
1330 " failed unpacked %zu\n",
1331 b->unpack_fn_len,
1332 b->nr.live_u64s * sizeof(u64),
1333 btree_buf_bytes(b) - sizeof(struct btree_node),
1334 b->nr.live_u64s * 100 / btree_max_u64s(c),
1335 b->sib_u64s[0],
1336 b->sib_u64s[1],
1337 c->btree_foreground_merge_threshold,
1338 b->nr.packed_keys,
1339 b->nr.unpacked_keys,
1340 stats.floats,
1341 stats.failed);
1342}
1343
1344static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
1345 const char *label, unsigned nr)
1346{
1347 prt_printf(out, "%s\t", label);
1348 prt_human_readable_u64(out, nr * c->opts.btree_node_size);
1349 prt_printf(out, " (%u)\n", nr);
1350}
1351
1352void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc)
1353{
1354 struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache);
1355
1356 if (!out->nr_tabstops)
1357 printbuf_tabstop_push(out, 32);
1358
1359 prt_btree_cache_line(out, c, "total:", bc->used);
1360 prt_btree_cache_line(out, c, "nr dirty:", atomic_read(&bc->dirty));
1361 prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock);
1362 prt_newline(out);
1363
1364 for (unsigned i = 0; i < ARRAY_SIZE(bc->used_by_btree); i++)
1365 prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->used_by_btree[i]);
1366
1367 prt_newline(out);
1368 prt_printf(out, "freed:\t%u\n", bc->freed);
1369 prt_printf(out, "not freed:\n");
1370 prt_printf(out, " dirty\t%u\n", bc->not_freed_dirty);
1371 prt_printf(out, " write in flight\t%u\n", bc->not_freed_write_in_flight);
1372 prt_printf(out, " read in flight\t%u\n", bc->not_freed_read_in_flight);
1373 prt_printf(out, " lock intent failed\t%u\n", bc->not_freed_lock_intent);
1374 prt_printf(out, " lock write failed\t%u\n", bc->not_freed_lock_write);
1375 prt_printf(out, " access bit\t%u\n", bc->not_freed_access_bit);
1376 prt_printf(out, " no evict failed\t%u\n", bc->not_freed_noevict);
1377 prt_printf(out, " write blocked\t%u\n", bc->not_freed_write_blocked);
1378 prt_printf(out, " will make reachable\t%u\n", bc->not_freed_will_make_reachable);
1379}