Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
13 *
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
16 *
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
20 *
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22 */
23
24#include "bcache.h"
25#include "btree.h"
26#include "debug.h"
27#include "extents.h"
28
29#include <linux/slab.h>
30#include <linux/bitops.h>
31#include <linux/hash.h>
32#include <linux/kthread.h>
33#include <linux/prefetch.h>
34#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <linux/sched/clock.h>
37#include <linux/rculist.h>
38#include <linux/delay.h>
39#include <trace/events/bcache.h>
40
41/*
42 * Todo:
43 * register_bcache: Return errors out to userspace correctly
44 *
45 * Writeback: don't undirty key until after a cache flush
46 *
47 * Create an iterator for key pointers
48 *
49 * On btree write error, mark bucket such that it won't be freed from the cache
50 *
51 * Journalling:
52 * Check for bad keys in replay
53 * Propagate barriers
54 * Refcount journal entries in journal_replay
55 *
56 * Garbage collection:
57 * Finish incremental gc
58 * Gc should free old UUIDs, data for invalid UUIDs
59 *
60 * Provide a way to list backing device UUIDs we have data cached for, and
61 * probably how long it's been since we've seen them, and a way to invalidate
62 * dirty data for devices that will never be attached again
63 *
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65 * that based on that and how much dirty data we have we can keep writeback
66 * from being starved
67 *
68 * Add a tracepoint or somesuch to watch for writeback starvation
69 *
70 * When btree depth > 1 and splitting an interior node, we have to make sure
71 * alloc_bucket() cannot fail. This should be true but is not completely
72 * obvious.
73 *
74 * Plugging?
75 *
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
78 *
79 * Superblock needs to be fleshed out for multiple cache devices
80 *
81 * Add a sysfs tunable for the number of writeback IOs in flight
82 *
83 * Add a sysfs tunable for the number of open data buckets
84 *
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
87 *
88 * Test module load/unload
89 */
90
91#define MAX_NEED_GC 64
92#define MAX_SAVE_PRIO 72
93#define MAX_GC_TIMES 100
94#define MIN_GC_NODES 100
95#define GC_SLEEP_MS 100
96
97#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
98
99#define PTR_HASH(c, k) \
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101
102#define insert_lock(s, b) ((b)->level <= (s)->lock)
103
104/*
105 * These macros are for recursing down the btree - they handle the details of
106 * locking and looking up nodes in the cache for you. They're best treated as
107 * mere syntax when reading code that uses them.
108 *
109 * op->lock determines whether we take a read or a write lock at a given depth.
110 * If you've got a read lock and find that you need a write lock (i.e. you're
111 * going to have to split), set op->lock and return -EINTR; btree_root() will
112 * call you again and you'll have the correct lock.
113 */
114
115/**
116 * btree - recurse down the btree on a specified key
117 * @fn: function to call, which will be passed the child node
118 * @key: key to recurse on
119 * @b: parent btree node
120 * @op: pointer to struct btree_op
121 */
122#define btree(fn, key, b, op, ...) \
123({ \
124 int _r, l = (b)->level - 1; \
125 bool _w = l <= (op)->lock; \
126 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
127 _w, b); \
128 if (!IS_ERR(_child)) { \
129 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
130 rw_unlock(_w, _child); \
131 } else \
132 _r = PTR_ERR(_child); \
133 _r; \
134})
135
136/**
137 * btree_root - call a function on the root of the btree
138 * @fn: function to call, which will be passed the child node
139 * @c: cache set
140 * @op: pointer to struct btree_op
141 */
142#define btree_root(fn, c, op, ...) \
143({ \
144 int _r = -EINTR; \
145 do { \
146 struct btree *_b = (c)->root; \
147 bool _w = insert_lock(op, _b); \
148 rw_lock(_w, _b, _b->level); \
149 if (_b == (c)->root && \
150 _w == insert_lock(op, _b)) { \
151 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
152 } \
153 rw_unlock(_w, _b); \
154 bch_cannibalize_unlock(c); \
155 if (_r == -EINTR) \
156 schedule(); \
157 } while (_r == -EINTR); \
158 \
159 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
160 _r; \
161})
162
163static inline struct bset *write_block(struct btree *b)
164{
165 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
166}
167
168static void bch_btree_init_next(struct btree *b)
169{
170 /* If not a leaf node, always sort */
171 if (b->level && b->keys.nsets)
172 bch_btree_sort(&b->keys, &b->c->sort);
173 else
174 bch_btree_sort_lazy(&b->keys, &b->c->sort);
175
176 if (b->written < btree_blocks(b))
177 bch_bset_init_next(&b->keys, write_block(b),
178 bset_magic(&b->c->sb));
179
180}
181
182/* Btree key manipulation */
183
184void bkey_put(struct cache_set *c, struct bkey *k)
185{
186 unsigned int i;
187
188 for (i = 0; i < KEY_PTRS(k); i++)
189 if (ptr_available(c, k, i))
190 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
191}
192
193/* Btree IO */
194
195static uint64_t btree_csum_set(struct btree *b, struct bset *i)
196{
197 uint64_t crc = b->key.ptr[0];
198 void *data = (void *) i + 8, *end = bset_bkey_last(i);
199
200 crc = bch_crc64_update(crc, data, end - data);
201 return crc ^ 0xffffffffffffffffULL;
202}
203
204void bch_btree_node_read_done(struct btree *b)
205{
206 const char *err = "bad btree header";
207 struct bset *i = btree_bset_first(b);
208 struct btree_iter *iter;
209
210 /*
211 * c->fill_iter can allocate an iterator with more memory space
212 * than static MAX_BSETS.
213 * See the comment arount cache_set->fill_iter.
214 */
215 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
216 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
217 iter->used = 0;
218
219#ifdef CONFIG_BCACHE_DEBUG
220 iter->b = &b->keys;
221#endif
222
223 if (!i->seq)
224 goto err;
225
226 for (;
227 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
228 i = write_block(b)) {
229 err = "unsupported bset version";
230 if (i->version > BCACHE_BSET_VERSION)
231 goto err;
232
233 err = "bad btree header";
234 if (b->written + set_blocks(i, block_bytes(b->c)) >
235 btree_blocks(b))
236 goto err;
237
238 err = "bad magic";
239 if (i->magic != bset_magic(&b->c->sb))
240 goto err;
241
242 err = "bad checksum";
243 switch (i->version) {
244 case 0:
245 if (i->csum != csum_set(i))
246 goto err;
247 break;
248 case BCACHE_BSET_VERSION:
249 if (i->csum != btree_csum_set(b, i))
250 goto err;
251 break;
252 }
253
254 err = "empty set";
255 if (i != b->keys.set[0].data && !i->keys)
256 goto err;
257
258 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
259
260 b->written += set_blocks(i, block_bytes(b->c));
261 }
262
263 err = "corrupted btree";
264 for (i = write_block(b);
265 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
266 i = ((void *) i) + block_bytes(b->c))
267 if (i->seq == b->keys.set[0].data->seq)
268 goto err;
269
270 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
271
272 i = b->keys.set[0].data;
273 err = "short btree key";
274 if (b->keys.set[0].size &&
275 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
276 goto err;
277
278 if (b->written < btree_blocks(b))
279 bch_bset_init_next(&b->keys, write_block(b),
280 bset_magic(&b->c->sb));
281out:
282 mempool_free(iter, &b->c->fill_iter);
283 return;
284err:
285 set_btree_node_io_error(b);
286 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
287 err, PTR_BUCKET_NR(b->c, &b->key, 0),
288 bset_block_offset(b, i), i->keys);
289 goto out;
290}
291
292static void btree_node_read_endio(struct bio *bio)
293{
294 struct closure *cl = bio->bi_private;
295
296 closure_put(cl);
297}
298
299static void bch_btree_node_read(struct btree *b)
300{
301 uint64_t start_time = local_clock();
302 struct closure cl;
303 struct bio *bio;
304
305 trace_bcache_btree_read(b);
306
307 closure_init_stack(&cl);
308
309 bio = bch_bbio_alloc(b->c);
310 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
311 bio->bi_end_io = btree_node_read_endio;
312 bio->bi_private = &cl;
313 bio->bi_opf = REQ_OP_READ | REQ_META;
314
315 bch_bio_map(bio, b->keys.set[0].data);
316
317 bch_submit_bbio(bio, b->c, &b->key, 0);
318 closure_sync(&cl);
319
320 if (bio->bi_status)
321 set_btree_node_io_error(b);
322
323 bch_bbio_free(bio, b->c);
324
325 if (btree_node_io_error(b))
326 goto err;
327
328 bch_btree_node_read_done(b);
329 bch_time_stats_update(&b->c->btree_read_time, start_time);
330
331 return;
332err:
333 bch_cache_set_error(b->c, "io error reading bucket %zu",
334 PTR_BUCKET_NR(b->c, &b->key, 0));
335}
336
337static void btree_complete_write(struct btree *b, struct btree_write *w)
338{
339 if (w->prio_blocked &&
340 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
341 wake_up_allocators(b->c);
342
343 if (w->journal) {
344 atomic_dec_bug(w->journal);
345 __closure_wake_up(&b->c->journal.wait);
346 }
347
348 w->prio_blocked = 0;
349 w->journal = NULL;
350}
351
352static void btree_node_write_unlock(struct closure *cl)
353{
354 struct btree *b = container_of(cl, struct btree, io);
355
356 up(&b->io_mutex);
357}
358
359static void __btree_node_write_done(struct closure *cl)
360{
361 struct btree *b = container_of(cl, struct btree, io);
362 struct btree_write *w = btree_prev_write(b);
363
364 bch_bbio_free(b->bio, b->c);
365 b->bio = NULL;
366 btree_complete_write(b, w);
367
368 if (btree_node_dirty(b))
369 schedule_delayed_work(&b->work, 30 * HZ);
370
371 closure_return_with_destructor(cl, btree_node_write_unlock);
372}
373
374static void btree_node_write_done(struct closure *cl)
375{
376 struct btree *b = container_of(cl, struct btree, io);
377
378 bio_free_pages(b->bio);
379 __btree_node_write_done(cl);
380}
381
382static void btree_node_write_endio(struct bio *bio)
383{
384 struct closure *cl = bio->bi_private;
385 struct btree *b = container_of(cl, struct btree, io);
386
387 if (bio->bi_status)
388 set_btree_node_io_error(b);
389
390 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
391 closure_put(cl);
392}
393
394static void do_btree_node_write(struct btree *b)
395{
396 struct closure *cl = &b->io;
397 struct bset *i = btree_bset_last(b);
398 BKEY_PADDED(key) k;
399
400 i->version = BCACHE_BSET_VERSION;
401 i->csum = btree_csum_set(b, i);
402
403 BUG_ON(b->bio);
404 b->bio = bch_bbio_alloc(b->c);
405
406 b->bio->bi_end_io = btree_node_write_endio;
407 b->bio->bi_private = cl;
408 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
409 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
410 bch_bio_map(b->bio, i);
411
412 /*
413 * If we're appending to a leaf node, we don't technically need FUA -
414 * this write just needs to be persisted before the next journal write,
415 * which will be marked FLUSH|FUA.
416 *
417 * Similarly if we're writing a new btree root - the pointer is going to
418 * be in the next journal entry.
419 *
420 * But if we're writing a new btree node (that isn't a root) or
421 * appending to a non leaf btree node, we need either FUA or a flush
422 * when we write the parent with the new pointer. FUA is cheaper than a
423 * flush, and writes appending to leaf nodes aren't blocking anything so
424 * just make all btree node writes FUA to keep things sane.
425 */
426
427 bkey_copy(&k.key, &b->key);
428 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
429 bset_sector_offset(&b->keys, i));
430
431 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
432 struct bio_vec *bv;
433 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
434 struct bvec_iter_all iter_all;
435
436 bio_for_each_segment_all(bv, b->bio, iter_all) {
437 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
438 addr += PAGE_SIZE;
439 }
440
441 bch_submit_bbio(b->bio, b->c, &k.key, 0);
442
443 continue_at(cl, btree_node_write_done, NULL);
444 } else {
445 /*
446 * No problem for multipage bvec since the bio is
447 * just allocated
448 */
449 b->bio->bi_vcnt = 0;
450 bch_bio_map(b->bio, i);
451
452 bch_submit_bbio(b->bio, b->c, &k.key, 0);
453
454 closure_sync(cl);
455 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
456 }
457}
458
459void __bch_btree_node_write(struct btree *b, struct closure *parent)
460{
461 struct bset *i = btree_bset_last(b);
462
463 lockdep_assert_held(&b->write_lock);
464
465 trace_bcache_btree_write(b);
466
467 BUG_ON(current->bio_list);
468 BUG_ON(b->written >= btree_blocks(b));
469 BUG_ON(b->written && !i->keys);
470 BUG_ON(btree_bset_first(b)->seq != i->seq);
471 bch_check_keys(&b->keys, "writing");
472
473 cancel_delayed_work(&b->work);
474
475 /* If caller isn't waiting for write, parent refcount is cache set */
476 down(&b->io_mutex);
477 closure_init(&b->io, parent ?: &b->c->cl);
478
479 clear_bit(BTREE_NODE_dirty, &b->flags);
480 change_bit(BTREE_NODE_write_idx, &b->flags);
481
482 do_btree_node_write(b);
483
484 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
485 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
486
487 b->written += set_blocks(i, block_bytes(b->c));
488}
489
490void bch_btree_node_write(struct btree *b, struct closure *parent)
491{
492 unsigned int nsets = b->keys.nsets;
493
494 lockdep_assert_held(&b->lock);
495
496 __bch_btree_node_write(b, parent);
497
498 /*
499 * do verify if there was more than one set initially (i.e. we did a
500 * sort) and we sorted down to a single set:
501 */
502 if (nsets && !b->keys.nsets)
503 bch_btree_verify(b);
504
505 bch_btree_init_next(b);
506}
507
508static void bch_btree_node_write_sync(struct btree *b)
509{
510 struct closure cl;
511
512 closure_init_stack(&cl);
513
514 mutex_lock(&b->write_lock);
515 bch_btree_node_write(b, &cl);
516 mutex_unlock(&b->write_lock);
517
518 closure_sync(&cl);
519}
520
521static void btree_node_write_work(struct work_struct *w)
522{
523 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
524
525 mutex_lock(&b->write_lock);
526 if (btree_node_dirty(b))
527 __bch_btree_node_write(b, NULL);
528 mutex_unlock(&b->write_lock);
529}
530
531static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
532{
533 struct bset *i = btree_bset_last(b);
534 struct btree_write *w = btree_current_write(b);
535
536 lockdep_assert_held(&b->write_lock);
537
538 BUG_ON(!b->written);
539 BUG_ON(!i->keys);
540
541 if (!btree_node_dirty(b))
542 schedule_delayed_work(&b->work, 30 * HZ);
543
544 set_btree_node_dirty(b);
545
546 /*
547 * w->journal is always the oldest journal pin of all bkeys
548 * in the leaf node, to make sure the oldest jset seq won't
549 * be increased before this btree node is flushed.
550 */
551 if (journal_ref) {
552 if (w->journal &&
553 journal_pin_cmp(b->c, w->journal, journal_ref)) {
554 atomic_dec_bug(w->journal);
555 w->journal = NULL;
556 }
557
558 if (!w->journal) {
559 w->journal = journal_ref;
560 atomic_inc(w->journal);
561 }
562 }
563
564 /* Force write if set is too big */
565 if (set_bytes(i) > PAGE_SIZE - 48 &&
566 !current->bio_list)
567 bch_btree_node_write(b, NULL);
568}
569
570/*
571 * Btree in memory cache - allocation/freeing
572 * mca -> memory cache
573 */
574
575#define mca_reserve(c) (((c->root && c->root->level) \
576 ? c->root->level : 1) * 8 + 16)
577#define mca_can_free(c) \
578 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
579
580static void mca_data_free(struct btree *b)
581{
582 BUG_ON(b->io_mutex.count != 1);
583
584 bch_btree_keys_free(&b->keys);
585
586 b->c->btree_cache_used--;
587 list_move(&b->list, &b->c->btree_cache_freed);
588}
589
590static void mca_bucket_free(struct btree *b)
591{
592 BUG_ON(btree_node_dirty(b));
593
594 b->key.ptr[0] = 0;
595 hlist_del_init_rcu(&b->hash);
596 list_move(&b->list, &b->c->btree_cache_freeable);
597}
598
599static unsigned int btree_order(struct bkey *k)
600{
601 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
602}
603
604static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
605{
606 if (!bch_btree_keys_alloc(&b->keys,
607 max_t(unsigned int,
608 ilog2(b->c->btree_pages),
609 btree_order(k)),
610 gfp)) {
611 b->c->btree_cache_used++;
612 list_move(&b->list, &b->c->btree_cache);
613 } else {
614 list_move(&b->list, &b->c->btree_cache_freed);
615 }
616}
617
618static struct btree *mca_bucket_alloc(struct cache_set *c,
619 struct bkey *k, gfp_t gfp)
620{
621 /*
622 * kzalloc() is necessary here for initialization,
623 * see code comments in bch_btree_keys_init().
624 */
625 struct btree *b = kzalloc(sizeof(struct btree), gfp);
626
627 if (!b)
628 return NULL;
629
630 init_rwsem(&b->lock);
631 lockdep_set_novalidate_class(&b->lock);
632 mutex_init(&b->write_lock);
633 lockdep_set_novalidate_class(&b->write_lock);
634 INIT_LIST_HEAD(&b->list);
635 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
636 b->c = c;
637 sema_init(&b->io_mutex, 1);
638
639 mca_data_alloc(b, k, gfp);
640 return b;
641}
642
643static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
644{
645 struct closure cl;
646
647 closure_init_stack(&cl);
648 lockdep_assert_held(&b->c->bucket_lock);
649
650 if (!down_write_trylock(&b->lock))
651 return -ENOMEM;
652
653 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
654
655 if (b->keys.page_order < min_order)
656 goto out_unlock;
657
658 if (!flush) {
659 if (btree_node_dirty(b))
660 goto out_unlock;
661
662 if (down_trylock(&b->io_mutex))
663 goto out_unlock;
664 up(&b->io_mutex);
665 }
666
667retry:
668 /*
669 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
670 * __bch_btree_node_write(). To avoid an extra flush, acquire
671 * b->write_lock before checking BTREE_NODE_dirty bit.
672 */
673 mutex_lock(&b->write_lock);
674 /*
675 * If this btree node is selected in btree_flush_write() by journal
676 * code, delay and retry until the node is flushed by journal code
677 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
678 */
679 if (btree_node_journal_flush(b)) {
680 pr_debug("bnode %p is flushing by journal, retry", b);
681 mutex_unlock(&b->write_lock);
682 udelay(1);
683 goto retry;
684 }
685
686 if (btree_node_dirty(b))
687 __bch_btree_node_write(b, &cl);
688 mutex_unlock(&b->write_lock);
689
690 closure_sync(&cl);
691
692 /* wait for any in flight btree write */
693 down(&b->io_mutex);
694 up(&b->io_mutex);
695
696 return 0;
697out_unlock:
698 rw_unlock(true, b);
699 return -ENOMEM;
700}
701
702static unsigned long bch_mca_scan(struct shrinker *shrink,
703 struct shrink_control *sc)
704{
705 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
706 struct btree *b, *t;
707 unsigned long i, nr = sc->nr_to_scan;
708 unsigned long freed = 0;
709 unsigned int btree_cache_used;
710
711 if (c->shrinker_disabled)
712 return SHRINK_STOP;
713
714 if (c->btree_cache_alloc_lock)
715 return SHRINK_STOP;
716
717 /* Return -1 if we can't do anything right now */
718 if (sc->gfp_mask & __GFP_IO)
719 mutex_lock(&c->bucket_lock);
720 else if (!mutex_trylock(&c->bucket_lock))
721 return -1;
722
723 /*
724 * It's _really_ critical that we don't free too many btree nodes - we
725 * have to always leave ourselves a reserve. The reserve is how we
726 * guarantee that allocating memory for a new btree node can always
727 * succeed, so that inserting keys into the btree can always succeed and
728 * IO can always make forward progress:
729 */
730 nr /= c->btree_pages;
731 if (nr == 0)
732 nr = 1;
733 nr = min_t(unsigned long, nr, mca_can_free(c));
734
735 i = 0;
736 btree_cache_used = c->btree_cache_used;
737 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
738 if (nr <= 0)
739 goto out;
740
741 if (++i > 3 &&
742 !mca_reap(b, 0, false)) {
743 mca_data_free(b);
744 rw_unlock(true, b);
745 freed++;
746 }
747 nr--;
748 }
749
750 for (; (nr--) && i < btree_cache_used; i++) {
751 if (list_empty(&c->btree_cache))
752 goto out;
753
754 b = list_first_entry(&c->btree_cache, struct btree, list);
755 list_rotate_left(&c->btree_cache);
756
757 if (!b->accessed &&
758 !mca_reap(b, 0, false)) {
759 mca_bucket_free(b);
760 mca_data_free(b);
761 rw_unlock(true, b);
762 freed++;
763 } else
764 b->accessed = 0;
765 }
766out:
767 mutex_unlock(&c->bucket_lock);
768 return freed * c->btree_pages;
769}
770
771static unsigned long bch_mca_count(struct shrinker *shrink,
772 struct shrink_control *sc)
773{
774 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
775
776 if (c->shrinker_disabled)
777 return 0;
778
779 if (c->btree_cache_alloc_lock)
780 return 0;
781
782 return mca_can_free(c) * c->btree_pages;
783}
784
785void bch_btree_cache_free(struct cache_set *c)
786{
787 struct btree *b;
788 struct closure cl;
789
790 closure_init_stack(&cl);
791
792 if (c->shrink.list.next)
793 unregister_shrinker(&c->shrink);
794
795 mutex_lock(&c->bucket_lock);
796
797#ifdef CONFIG_BCACHE_DEBUG
798 if (c->verify_data)
799 list_move(&c->verify_data->list, &c->btree_cache);
800
801 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
802#endif
803
804 list_splice(&c->btree_cache_freeable,
805 &c->btree_cache);
806
807 while (!list_empty(&c->btree_cache)) {
808 b = list_first_entry(&c->btree_cache, struct btree, list);
809
810 /*
811 * This function is called by cache_set_free(), no I/O
812 * request on cache now, it is unnecessary to acquire
813 * b->write_lock before clearing BTREE_NODE_dirty anymore.
814 */
815 if (btree_node_dirty(b)) {
816 btree_complete_write(b, btree_current_write(b));
817 clear_bit(BTREE_NODE_dirty, &b->flags);
818 }
819 mca_data_free(b);
820 }
821
822 while (!list_empty(&c->btree_cache_freed)) {
823 b = list_first_entry(&c->btree_cache_freed,
824 struct btree, list);
825 list_del(&b->list);
826 cancel_delayed_work_sync(&b->work);
827 kfree(b);
828 }
829
830 mutex_unlock(&c->bucket_lock);
831}
832
833int bch_btree_cache_alloc(struct cache_set *c)
834{
835 unsigned int i;
836
837 for (i = 0; i < mca_reserve(c); i++)
838 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
839 return -ENOMEM;
840
841 list_splice_init(&c->btree_cache,
842 &c->btree_cache_freeable);
843
844#ifdef CONFIG_BCACHE_DEBUG
845 mutex_init(&c->verify_lock);
846
847 c->verify_ondisk = (void *)
848 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
849
850 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
851
852 if (c->verify_data &&
853 c->verify_data->keys.set->data)
854 list_del_init(&c->verify_data->list);
855 else
856 c->verify_data = NULL;
857#endif
858
859 c->shrink.count_objects = bch_mca_count;
860 c->shrink.scan_objects = bch_mca_scan;
861 c->shrink.seeks = 4;
862 c->shrink.batch = c->btree_pages * 2;
863
864 if (register_shrinker(&c->shrink))
865 pr_warn("bcache: %s: could not register shrinker",
866 __func__);
867
868 return 0;
869}
870
871/* Btree in memory cache - hash table */
872
873static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
874{
875 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
876}
877
878static struct btree *mca_find(struct cache_set *c, struct bkey *k)
879{
880 struct btree *b;
881
882 rcu_read_lock();
883 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
884 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
885 goto out;
886 b = NULL;
887out:
888 rcu_read_unlock();
889 return b;
890}
891
892static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
893{
894 spin_lock(&c->btree_cannibalize_lock);
895 if (likely(c->btree_cache_alloc_lock == NULL)) {
896 c->btree_cache_alloc_lock = current;
897 } else if (c->btree_cache_alloc_lock != current) {
898 if (op)
899 prepare_to_wait(&c->btree_cache_wait, &op->wait,
900 TASK_UNINTERRUPTIBLE);
901 spin_unlock(&c->btree_cannibalize_lock);
902 return -EINTR;
903 }
904 spin_unlock(&c->btree_cannibalize_lock);
905
906 return 0;
907}
908
909static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
910 struct bkey *k)
911{
912 struct btree *b;
913
914 trace_bcache_btree_cache_cannibalize(c);
915
916 if (mca_cannibalize_lock(c, op))
917 return ERR_PTR(-EINTR);
918
919 list_for_each_entry_reverse(b, &c->btree_cache, list)
920 if (!mca_reap(b, btree_order(k), false))
921 return b;
922
923 list_for_each_entry_reverse(b, &c->btree_cache, list)
924 if (!mca_reap(b, btree_order(k), true))
925 return b;
926
927 WARN(1, "btree cache cannibalize failed\n");
928 return ERR_PTR(-ENOMEM);
929}
930
931/*
932 * We can only have one thread cannibalizing other cached btree nodes at a time,
933 * or we'll deadlock. We use an open coded mutex to ensure that, which a
934 * cannibalize_bucket() will take. This means every time we unlock the root of
935 * the btree, we need to release this lock if we have it held.
936 */
937static void bch_cannibalize_unlock(struct cache_set *c)
938{
939 spin_lock(&c->btree_cannibalize_lock);
940 if (c->btree_cache_alloc_lock == current) {
941 c->btree_cache_alloc_lock = NULL;
942 wake_up(&c->btree_cache_wait);
943 }
944 spin_unlock(&c->btree_cannibalize_lock);
945}
946
947static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
948 struct bkey *k, int level)
949{
950 struct btree *b;
951
952 BUG_ON(current->bio_list);
953
954 lockdep_assert_held(&c->bucket_lock);
955
956 if (mca_find(c, k))
957 return NULL;
958
959 /* btree_free() doesn't free memory; it sticks the node on the end of
960 * the list. Check if there's any freed nodes there:
961 */
962 list_for_each_entry(b, &c->btree_cache_freeable, list)
963 if (!mca_reap(b, btree_order(k), false))
964 goto out;
965
966 /* We never free struct btree itself, just the memory that holds the on
967 * disk node. Check the freed list before allocating a new one:
968 */
969 list_for_each_entry(b, &c->btree_cache_freed, list)
970 if (!mca_reap(b, 0, false)) {
971 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
972 if (!b->keys.set[0].data)
973 goto err;
974 else
975 goto out;
976 }
977
978 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
979 if (!b)
980 goto err;
981
982 BUG_ON(!down_write_trylock(&b->lock));
983 if (!b->keys.set->data)
984 goto err;
985out:
986 BUG_ON(b->io_mutex.count != 1);
987
988 bkey_copy(&b->key, k);
989 list_move(&b->list, &c->btree_cache);
990 hlist_del_init_rcu(&b->hash);
991 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
992
993 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
994 b->parent = (void *) ~0UL;
995 b->flags = 0;
996 b->written = 0;
997 b->level = level;
998
999 if (!b->level)
1000 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
1001 &b->c->expensive_debug_checks);
1002 else
1003 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
1004 &b->c->expensive_debug_checks);
1005
1006 return b;
1007err:
1008 if (b)
1009 rw_unlock(true, b);
1010
1011 b = mca_cannibalize(c, op, k);
1012 if (!IS_ERR(b))
1013 goto out;
1014
1015 return b;
1016}
1017
1018/*
1019 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
1020 * in from disk if necessary.
1021 *
1022 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
1023 *
1024 * The btree node will have either a read or a write lock held, depending on
1025 * level and op->lock.
1026 */
1027struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
1028 struct bkey *k, int level, bool write,
1029 struct btree *parent)
1030{
1031 int i = 0;
1032 struct btree *b;
1033
1034 BUG_ON(level < 0);
1035retry:
1036 b = mca_find(c, k);
1037
1038 if (!b) {
1039 if (current->bio_list)
1040 return ERR_PTR(-EAGAIN);
1041
1042 mutex_lock(&c->bucket_lock);
1043 b = mca_alloc(c, op, k, level);
1044 mutex_unlock(&c->bucket_lock);
1045
1046 if (!b)
1047 goto retry;
1048 if (IS_ERR(b))
1049 return b;
1050
1051 bch_btree_node_read(b);
1052
1053 if (!write)
1054 downgrade_write(&b->lock);
1055 } else {
1056 rw_lock(write, b, level);
1057 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1058 rw_unlock(write, b);
1059 goto retry;
1060 }
1061 BUG_ON(b->level != level);
1062 }
1063
1064 if (btree_node_io_error(b)) {
1065 rw_unlock(write, b);
1066 return ERR_PTR(-EIO);
1067 }
1068
1069 BUG_ON(!b->written);
1070
1071 b->parent = parent;
1072 b->accessed = 1;
1073
1074 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1075 prefetch(b->keys.set[i].tree);
1076 prefetch(b->keys.set[i].data);
1077 }
1078
1079 for (; i <= b->keys.nsets; i++)
1080 prefetch(b->keys.set[i].data);
1081
1082 return b;
1083}
1084
1085static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1086{
1087 struct btree *b;
1088
1089 mutex_lock(&parent->c->bucket_lock);
1090 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1091 mutex_unlock(&parent->c->bucket_lock);
1092
1093 if (!IS_ERR_OR_NULL(b)) {
1094 b->parent = parent;
1095 bch_btree_node_read(b);
1096 rw_unlock(true, b);
1097 }
1098}
1099
1100/* Btree alloc */
1101
1102static void btree_node_free(struct btree *b)
1103{
1104 trace_bcache_btree_node_free(b);
1105
1106 BUG_ON(b == b->c->root);
1107
1108retry:
1109 mutex_lock(&b->write_lock);
1110 /*
1111 * If the btree node is selected and flushing in btree_flush_write(),
1112 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1113 * then it is safe to free the btree node here. Otherwise this btree
1114 * node will be in race condition.
1115 */
1116 if (btree_node_journal_flush(b)) {
1117 mutex_unlock(&b->write_lock);
1118 pr_debug("bnode %p journal_flush set, retry", b);
1119 udelay(1);
1120 goto retry;
1121 }
1122
1123 if (btree_node_dirty(b)) {
1124 btree_complete_write(b, btree_current_write(b));
1125 clear_bit(BTREE_NODE_dirty, &b->flags);
1126 }
1127
1128 mutex_unlock(&b->write_lock);
1129
1130 cancel_delayed_work(&b->work);
1131
1132 mutex_lock(&b->c->bucket_lock);
1133 bch_bucket_free(b->c, &b->key);
1134 mca_bucket_free(b);
1135 mutex_unlock(&b->c->bucket_lock);
1136}
1137
1138struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1139 int level, bool wait,
1140 struct btree *parent)
1141{
1142 BKEY_PADDED(key) k;
1143 struct btree *b = ERR_PTR(-EAGAIN);
1144
1145 mutex_lock(&c->bucket_lock);
1146retry:
1147 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1148 goto err;
1149
1150 bkey_put(c, &k.key);
1151 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1152
1153 b = mca_alloc(c, op, &k.key, level);
1154 if (IS_ERR(b))
1155 goto err_free;
1156
1157 if (!b) {
1158 cache_bug(c,
1159 "Tried to allocate bucket that was in btree cache");
1160 goto retry;
1161 }
1162
1163 b->accessed = 1;
1164 b->parent = parent;
1165 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1166
1167 mutex_unlock(&c->bucket_lock);
1168
1169 trace_bcache_btree_node_alloc(b);
1170 return b;
1171err_free:
1172 bch_bucket_free(c, &k.key);
1173err:
1174 mutex_unlock(&c->bucket_lock);
1175
1176 trace_bcache_btree_node_alloc_fail(c);
1177 return b;
1178}
1179
1180static struct btree *bch_btree_node_alloc(struct cache_set *c,
1181 struct btree_op *op, int level,
1182 struct btree *parent)
1183{
1184 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1185}
1186
1187static struct btree *btree_node_alloc_replacement(struct btree *b,
1188 struct btree_op *op)
1189{
1190 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1191
1192 if (!IS_ERR_OR_NULL(n)) {
1193 mutex_lock(&n->write_lock);
1194 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1195 bkey_copy_key(&n->key, &b->key);
1196 mutex_unlock(&n->write_lock);
1197 }
1198
1199 return n;
1200}
1201
1202static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1203{
1204 unsigned int i;
1205
1206 mutex_lock(&b->c->bucket_lock);
1207
1208 atomic_inc(&b->c->prio_blocked);
1209
1210 bkey_copy(k, &b->key);
1211 bkey_copy_key(k, &ZERO_KEY);
1212
1213 for (i = 0; i < KEY_PTRS(k); i++)
1214 SET_PTR_GEN(k, i,
1215 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1216 PTR_BUCKET(b->c, &b->key, i)));
1217
1218 mutex_unlock(&b->c->bucket_lock);
1219}
1220
1221static int btree_check_reserve(struct btree *b, struct btree_op *op)
1222{
1223 struct cache_set *c = b->c;
1224 struct cache *ca;
1225 unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
1226
1227 mutex_lock(&c->bucket_lock);
1228
1229 for_each_cache(ca, c, i)
1230 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1231 if (op)
1232 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1233 TASK_UNINTERRUPTIBLE);
1234 mutex_unlock(&c->bucket_lock);
1235 return -EINTR;
1236 }
1237
1238 mutex_unlock(&c->bucket_lock);
1239
1240 return mca_cannibalize_lock(b->c, op);
1241}
1242
1243/* Garbage collection */
1244
1245static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1246 struct bkey *k)
1247{
1248 uint8_t stale = 0;
1249 unsigned int i;
1250 struct bucket *g;
1251
1252 /*
1253 * ptr_invalid() can't return true for the keys that mark btree nodes as
1254 * freed, but since ptr_bad() returns true we'll never actually use them
1255 * for anything and thus we don't want mark their pointers here
1256 */
1257 if (!bkey_cmp(k, &ZERO_KEY))
1258 return stale;
1259
1260 for (i = 0; i < KEY_PTRS(k); i++) {
1261 if (!ptr_available(c, k, i))
1262 continue;
1263
1264 g = PTR_BUCKET(c, k, i);
1265
1266 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1267 g->last_gc = PTR_GEN(k, i);
1268
1269 if (ptr_stale(c, k, i)) {
1270 stale = max(stale, ptr_stale(c, k, i));
1271 continue;
1272 }
1273
1274 cache_bug_on(GC_MARK(g) &&
1275 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1276 c, "inconsistent ptrs: mark = %llu, level = %i",
1277 GC_MARK(g), level);
1278
1279 if (level)
1280 SET_GC_MARK(g, GC_MARK_METADATA);
1281 else if (KEY_DIRTY(k))
1282 SET_GC_MARK(g, GC_MARK_DIRTY);
1283 else if (!GC_MARK(g))
1284 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1285
1286 /* guard against overflow */
1287 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1288 GC_SECTORS_USED(g) + KEY_SIZE(k),
1289 MAX_GC_SECTORS_USED));
1290
1291 BUG_ON(!GC_SECTORS_USED(g));
1292 }
1293
1294 return stale;
1295}
1296
1297#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1298
1299void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1300{
1301 unsigned int i;
1302
1303 for (i = 0; i < KEY_PTRS(k); i++)
1304 if (ptr_available(c, k, i) &&
1305 !ptr_stale(c, k, i)) {
1306 struct bucket *b = PTR_BUCKET(c, k, i);
1307
1308 b->gen = PTR_GEN(k, i);
1309
1310 if (level && bkey_cmp(k, &ZERO_KEY))
1311 b->prio = BTREE_PRIO;
1312 else if (!level && b->prio == BTREE_PRIO)
1313 b->prio = INITIAL_PRIO;
1314 }
1315
1316 __bch_btree_mark_key(c, level, k);
1317}
1318
1319void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1320{
1321 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1322}
1323
1324static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1325{
1326 uint8_t stale = 0;
1327 unsigned int keys = 0, good_keys = 0;
1328 struct bkey *k;
1329 struct btree_iter iter;
1330 struct bset_tree *t;
1331
1332 gc->nodes++;
1333
1334 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1335 stale = max(stale, btree_mark_key(b, k));
1336 keys++;
1337
1338 if (bch_ptr_bad(&b->keys, k))
1339 continue;
1340
1341 gc->key_bytes += bkey_u64s(k);
1342 gc->nkeys++;
1343 good_keys++;
1344
1345 gc->data += KEY_SIZE(k);
1346 }
1347
1348 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1349 btree_bug_on(t->size &&
1350 bset_written(&b->keys, t) &&
1351 bkey_cmp(&b->key, &t->end) < 0,
1352 b, "found short btree key in gc");
1353
1354 if (b->c->gc_always_rewrite)
1355 return true;
1356
1357 if (stale > 10)
1358 return true;
1359
1360 if ((keys - good_keys) * 2 > keys)
1361 return true;
1362
1363 return false;
1364}
1365
1366#define GC_MERGE_NODES 4U
1367
1368struct gc_merge_info {
1369 struct btree *b;
1370 unsigned int keys;
1371};
1372
1373static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1374 struct keylist *insert_keys,
1375 atomic_t *journal_ref,
1376 struct bkey *replace_key);
1377
1378static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1379 struct gc_stat *gc, struct gc_merge_info *r)
1380{
1381 unsigned int i, nodes = 0, keys = 0, blocks;
1382 struct btree *new_nodes[GC_MERGE_NODES];
1383 struct keylist keylist;
1384 struct closure cl;
1385 struct bkey *k;
1386
1387 bch_keylist_init(&keylist);
1388
1389 if (btree_check_reserve(b, NULL))
1390 return 0;
1391
1392 memset(new_nodes, 0, sizeof(new_nodes));
1393 closure_init_stack(&cl);
1394
1395 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1396 keys += r[nodes++].keys;
1397
1398 blocks = btree_default_blocks(b->c) * 2 / 3;
1399
1400 if (nodes < 2 ||
1401 __set_blocks(b->keys.set[0].data, keys,
1402 block_bytes(b->c)) > blocks * (nodes - 1))
1403 return 0;
1404
1405 for (i = 0; i < nodes; i++) {
1406 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1407 if (IS_ERR_OR_NULL(new_nodes[i]))
1408 goto out_nocoalesce;
1409 }
1410
1411 /*
1412 * We have to check the reserve here, after we've allocated our new
1413 * nodes, to make sure the insert below will succeed - we also check
1414 * before as an optimization to potentially avoid a bunch of expensive
1415 * allocs/sorts
1416 */
1417 if (btree_check_reserve(b, NULL))
1418 goto out_nocoalesce;
1419
1420 for (i = 0; i < nodes; i++)
1421 mutex_lock(&new_nodes[i]->write_lock);
1422
1423 for (i = nodes - 1; i > 0; --i) {
1424 struct bset *n1 = btree_bset_first(new_nodes[i]);
1425 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1426 struct bkey *k, *last = NULL;
1427
1428 keys = 0;
1429
1430 if (i > 1) {
1431 for (k = n2->start;
1432 k < bset_bkey_last(n2);
1433 k = bkey_next(k)) {
1434 if (__set_blocks(n1, n1->keys + keys +
1435 bkey_u64s(k),
1436 block_bytes(b->c)) > blocks)
1437 break;
1438
1439 last = k;
1440 keys += bkey_u64s(k);
1441 }
1442 } else {
1443 /*
1444 * Last node we're not getting rid of - we're getting
1445 * rid of the node at r[0]. Have to try and fit all of
1446 * the remaining keys into this node; we can't ensure
1447 * they will always fit due to rounding and variable
1448 * length keys (shouldn't be possible in practice,
1449 * though)
1450 */
1451 if (__set_blocks(n1, n1->keys + n2->keys,
1452 block_bytes(b->c)) >
1453 btree_blocks(new_nodes[i]))
1454 goto out_nocoalesce;
1455
1456 keys = n2->keys;
1457 /* Take the key of the node we're getting rid of */
1458 last = &r->b->key;
1459 }
1460
1461 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1462 btree_blocks(new_nodes[i]));
1463
1464 if (last)
1465 bkey_copy_key(&new_nodes[i]->key, last);
1466
1467 memcpy(bset_bkey_last(n1),
1468 n2->start,
1469 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1470
1471 n1->keys += keys;
1472 r[i].keys = n1->keys;
1473
1474 memmove(n2->start,
1475 bset_bkey_idx(n2, keys),
1476 (void *) bset_bkey_last(n2) -
1477 (void *) bset_bkey_idx(n2, keys));
1478
1479 n2->keys -= keys;
1480
1481 if (__bch_keylist_realloc(&keylist,
1482 bkey_u64s(&new_nodes[i]->key)))
1483 goto out_nocoalesce;
1484
1485 bch_btree_node_write(new_nodes[i], &cl);
1486 bch_keylist_add(&keylist, &new_nodes[i]->key);
1487 }
1488
1489 for (i = 0; i < nodes; i++)
1490 mutex_unlock(&new_nodes[i]->write_lock);
1491
1492 closure_sync(&cl);
1493
1494 /* We emptied out this node */
1495 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1496 btree_node_free(new_nodes[0]);
1497 rw_unlock(true, new_nodes[0]);
1498 new_nodes[0] = NULL;
1499
1500 for (i = 0; i < nodes; i++) {
1501 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1502 goto out_nocoalesce;
1503
1504 make_btree_freeing_key(r[i].b, keylist.top);
1505 bch_keylist_push(&keylist);
1506 }
1507
1508 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1509 BUG_ON(!bch_keylist_empty(&keylist));
1510
1511 for (i = 0; i < nodes; i++) {
1512 btree_node_free(r[i].b);
1513 rw_unlock(true, r[i].b);
1514
1515 r[i].b = new_nodes[i];
1516 }
1517
1518 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1519 r[nodes - 1].b = ERR_PTR(-EINTR);
1520
1521 trace_bcache_btree_gc_coalesce(nodes);
1522 gc->nodes--;
1523
1524 bch_keylist_free(&keylist);
1525
1526 /* Invalidated our iterator */
1527 return -EINTR;
1528
1529out_nocoalesce:
1530 closure_sync(&cl);
1531
1532 while ((k = bch_keylist_pop(&keylist)))
1533 if (!bkey_cmp(k, &ZERO_KEY))
1534 atomic_dec(&b->c->prio_blocked);
1535 bch_keylist_free(&keylist);
1536
1537 for (i = 0; i < nodes; i++)
1538 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1539 btree_node_free(new_nodes[i]);
1540 rw_unlock(true, new_nodes[i]);
1541 }
1542 return 0;
1543}
1544
1545static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1546 struct btree *replace)
1547{
1548 struct keylist keys;
1549 struct btree *n;
1550
1551 if (btree_check_reserve(b, NULL))
1552 return 0;
1553
1554 n = btree_node_alloc_replacement(replace, NULL);
1555
1556 /* recheck reserve after allocating replacement node */
1557 if (btree_check_reserve(b, NULL)) {
1558 btree_node_free(n);
1559 rw_unlock(true, n);
1560 return 0;
1561 }
1562
1563 bch_btree_node_write_sync(n);
1564
1565 bch_keylist_init(&keys);
1566 bch_keylist_add(&keys, &n->key);
1567
1568 make_btree_freeing_key(replace, keys.top);
1569 bch_keylist_push(&keys);
1570
1571 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1572 BUG_ON(!bch_keylist_empty(&keys));
1573
1574 btree_node_free(replace);
1575 rw_unlock(true, n);
1576
1577 /* Invalidated our iterator */
1578 return -EINTR;
1579}
1580
1581static unsigned int btree_gc_count_keys(struct btree *b)
1582{
1583 struct bkey *k;
1584 struct btree_iter iter;
1585 unsigned int ret = 0;
1586
1587 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1588 ret += bkey_u64s(k);
1589
1590 return ret;
1591}
1592
1593static size_t btree_gc_min_nodes(struct cache_set *c)
1594{
1595 size_t min_nodes;
1596
1597 /*
1598 * Since incremental GC would stop 100ms when front
1599 * side I/O comes, so when there are many btree nodes,
1600 * if GC only processes constant (100) nodes each time,
1601 * GC would last a long time, and the front side I/Os
1602 * would run out of the buckets (since no new bucket
1603 * can be allocated during GC), and be blocked again.
1604 * So GC should not process constant nodes, but varied
1605 * nodes according to the number of btree nodes, which
1606 * realized by dividing GC into constant(100) times,
1607 * so when there are many btree nodes, GC can process
1608 * more nodes each time, otherwise, GC will process less
1609 * nodes each time (but no less than MIN_GC_NODES)
1610 */
1611 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1612 if (min_nodes < MIN_GC_NODES)
1613 min_nodes = MIN_GC_NODES;
1614
1615 return min_nodes;
1616}
1617
1618
1619static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1620 struct closure *writes, struct gc_stat *gc)
1621{
1622 int ret = 0;
1623 bool should_rewrite;
1624 struct bkey *k;
1625 struct btree_iter iter;
1626 struct gc_merge_info r[GC_MERGE_NODES];
1627 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1628
1629 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1630
1631 for (i = r; i < r + ARRAY_SIZE(r); i++)
1632 i->b = ERR_PTR(-EINTR);
1633
1634 while (1) {
1635 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1636 if (k) {
1637 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1638 true, b);
1639 if (IS_ERR(r->b)) {
1640 ret = PTR_ERR(r->b);
1641 break;
1642 }
1643
1644 r->keys = btree_gc_count_keys(r->b);
1645
1646 ret = btree_gc_coalesce(b, op, gc, r);
1647 if (ret)
1648 break;
1649 }
1650
1651 if (!last->b)
1652 break;
1653
1654 if (!IS_ERR(last->b)) {
1655 should_rewrite = btree_gc_mark_node(last->b, gc);
1656 if (should_rewrite) {
1657 ret = btree_gc_rewrite_node(b, op, last->b);
1658 if (ret)
1659 break;
1660 }
1661
1662 if (last->b->level) {
1663 ret = btree_gc_recurse(last->b, op, writes, gc);
1664 if (ret)
1665 break;
1666 }
1667
1668 bkey_copy_key(&b->c->gc_done, &last->b->key);
1669
1670 /*
1671 * Must flush leaf nodes before gc ends, since replace
1672 * operations aren't journalled
1673 */
1674 mutex_lock(&last->b->write_lock);
1675 if (btree_node_dirty(last->b))
1676 bch_btree_node_write(last->b, writes);
1677 mutex_unlock(&last->b->write_lock);
1678 rw_unlock(true, last->b);
1679 }
1680
1681 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1682 r->b = NULL;
1683
1684 if (atomic_read(&b->c->search_inflight) &&
1685 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1686 gc->nodes_pre = gc->nodes;
1687 ret = -EAGAIN;
1688 break;
1689 }
1690
1691 if (need_resched()) {
1692 ret = -EAGAIN;
1693 break;
1694 }
1695 }
1696
1697 for (i = r; i < r + ARRAY_SIZE(r); i++)
1698 if (!IS_ERR_OR_NULL(i->b)) {
1699 mutex_lock(&i->b->write_lock);
1700 if (btree_node_dirty(i->b))
1701 bch_btree_node_write(i->b, writes);
1702 mutex_unlock(&i->b->write_lock);
1703 rw_unlock(true, i->b);
1704 }
1705
1706 return ret;
1707}
1708
1709static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1710 struct closure *writes, struct gc_stat *gc)
1711{
1712 struct btree *n = NULL;
1713 int ret = 0;
1714 bool should_rewrite;
1715
1716 should_rewrite = btree_gc_mark_node(b, gc);
1717 if (should_rewrite) {
1718 n = btree_node_alloc_replacement(b, NULL);
1719
1720 if (!IS_ERR_OR_NULL(n)) {
1721 bch_btree_node_write_sync(n);
1722
1723 bch_btree_set_root(n);
1724 btree_node_free(b);
1725 rw_unlock(true, n);
1726
1727 return -EINTR;
1728 }
1729 }
1730
1731 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1732
1733 if (b->level) {
1734 ret = btree_gc_recurse(b, op, writes, gc);
1735 if (ret)
1736 return ret;
1737 }
1738
1739 bkey_copy_key(&b->c->gc_done, &b->key);
1740
1741 return ret;
1742}
1743
1744static void btree_gc_start(struct cache_set *c)
1745{
1746 struct cache *ca;
1747 struct bucket *b;
1748 unsigned int i;
1749
1750 if (!c->gc_mark_valid)
1751 return;
1752
1753 mutex_lock(&c->bucket_lock);
1754
1755 c->gc_mark_valid = 0;
1756 c->gc_done = ZERO_KEY;
1757
1758 for_each_cache(ca, c, i)
1759 for_each_bucket(b, ca) {
1760 b->last_gc = b->gen;
1761 if (!atomic_read(&b->pin)) {
1762 SET_GC_MARK(b, 0);
1763 SET_GC_SECTORS_USED(b, 0);
1764 }
1765 }
1766
1767 mutex_unlock(&c->bucket_lock);
1768}
1769
1770static void bch_btree_gc_finish(struct cache_set *c)
1771{
1772 struct bucket *b;
1773 struct cache *ca;
1774 unsigned int i;
1775
1776 mutex_lock(&c->bucket_lock);
1777
1778 set_gc_sectors(c);
1779 c->gc_mark_valid = 1;
1780 c->need_gc = 0;
1781
1782 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1783 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1784 GC_MARK_METADATA);
1785
1786 /* don't reclaim buckets to which writeback keys point */
1787 rcu_read_lock();
1788 for (i = 0; i < c->devices_max_used; i++) {
1789 struct bcache_device *d = c->devices[i];
1790 struct cached_dev *dc;
1791 struct keybuf_key *w, *n;
1792 unsigned int j;
1793
1794 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1795 continue;
1796 dc = container_of(d, struct cached_dev, disk);
1797
1798 spin_lock(&dc->writeback_keys.lock);
1799 rbtree_postorder_for_each_entry_safe(w, n,
1800 &dc->writeback_keys.keys, node)
1801 for (j = 0; j < KEY_PTRS(&w->key); j++)
1802 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1803 GC_MARK_DIRTY);
1804 spin_unlock(&dc->writeback_keys.lock);
1805 }
1806 rcu_read_unlock();
1807
1808 c->avail_nbuckets = 0;
1809 for_each_cache(ca, c, i) {
1810 uint64_t *i;
1811
1812 ca->invalidate_needs_gc = 0;
1813
1814 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1815 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1816
1817 for (i = ca->prio_buckets;
1818 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1819 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1820
1821 for_each_bucket(b, ca) {
1822 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1823
1824 if (atomic_read(&b->pin))
1825 continue;
1826
1827 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1828
1829 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1830 c->avail_nbuckets++;
1831 }
1832 }
1833
1834 mutex_unlock(&c->bucket_lock);
1835}
1836
1837static void bch_btree_gc(struct cache_set *c)
1838{
1839 int ret;
1840 struct gc_stat stats;
1841 struct closure writes;
1842 struct btree_op op;
1843 uint64_t start_time = local_clock();
1844
1845 trace_bcache_gc_start(c);
1846
1847 memset(&stats, 0, sizeof(struct gc_stat));
1848 closure_init_stack(&writes);
1849 bch_btree_op_init(&op, SHRT_MAX);
1850
1851 btree_gc_start(c);
1852
1853 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1854 do {
1855 ret = btree_root(gc_root, c, &op, &writes, &stats);
1856 closure_sync(&writes);
1857 cond_resched();
1858
1859 if (ret == -EAGAIN)
1860 schedule_timeout_interruptible(msecs_to_jiffies
1861 (GC_SLEEP_MS));
1862 else if (ret)
1863 pr_warn("gc failed!");
1864 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1865
1866 bch_btree_gc_finish(c);
1867 wake_up_allocators(c);
1868
1869 bch_time_stats_update(&c->btree_gc_time, start_time);
1870
1871 stats.key_bytes *= sizeof(uint64_t);
1872 stats.data <<= 9;
1873 bch_update_bucket_in_use(c, &stats);
1874 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1875
1876 trace_bcache_gc_end(c);
1877
1878 bch_moving_gc(c);
1879}
1880
1881static bool gc_should_run(struct cache_set *c)
1882{
1883 struct cache *ca;
1884 unsigned int i;
1885
1886 for_each_cache(ca, c, i)
1887 if (ca->invalidate_needs_gc)
1888 return true;
1889
1890 if (atomic_read(&c->sectors_to_gc) < 0)
1891 return true;
1892
1893 return false;
1894}
1895
1896static int bch_gc_thread(void *arg)
1897{
1898 struct cache_set *c = arg;
1899
1900 while (1) {
1901 wait_event_interruptible(c->gc_wait,
1902 kthread_should_stop() ||
1903 test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1904 gc_should_run(c));
1905
1906 if (kthread_should_stop() ||
1907 test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1908 break;
1909
1910 set_gc_sectors(c);
1911 bch_btree_gc(c);
1912 }
1913
1914 wait_for_kthread_stop();
1915 return 0;
1916}
1917
1918int bch_gc_thread_start(struct cache_set *c)
1919{
1920 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1921 return PTR_ERR_OR_ZERO(c->gc_thread);
1922}
1923
1924/* Initial partial gc */
1925
1926static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1927{
1928 int ret = 0;
1929 struct bkey *k, *p = NULL;
1930 struct btree_iter iter;
1931
1932 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1933 bch_initial_mark_key(b->c, b->level, k);
1934
1935 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1936
1937 if (b->level) {
1938 bch_btree_iter_init(&b->keys, &iter, NULL);
1939
1940 do {
1941 k = bch_btree_iter_next_filter(&iter, &b->keys,
1942 bch_ptr_bad);
1943 if (k) {
1944 btree_node_prefetch(b, k);
1945 /*
1946 * initiallize c->gc_stats.nodes
1947 * for incremental GC
1948 */
1949 b->c->gc_stats.nodes++;
1950 }
1951
1952 if (p)
1953 ret = btree(check_recurse, p, b, op);
1954
1955 p = k;
1956 } while (p && !ret);
1957 }
1958
1959 return ret;
1960}
1961
1962int bch_btree_check(struct cache_set *c)
1963{
1964 struct btree_op op;
1965
1966 bch_btree_op_init(&op, SHRT_MAX);
1967
1968 return btree_root(check_recurse, c, &op);
1969}
1970
1971void bch_initial_gc_finish(struct cache_set *c)
1972{
1973 struct cache *ca;
1974 struct bucket *b;
1975 unsigned int i;
1976
1977 bch_btree_gc_finish(c);
1978
1979 mutex_lock(&c->bucket_lock);
1980
1981 /*
1982 * We need to put some unused buckets directly on the prio freelist in
1983 * order to get the allocator thread started - it needs freed buckets in
1984 * order to rewrite the prios and gens, and it needs to rewrite prios
1985 * and gens in order to free buckets.
1986 *
1987 * This is only safe for buckets that have no live data in them, which
1988 * there should always be some of.
1989 */
1990 for_each_cache(ca, c, i) {
1991 for_each_bucket(b, ca) {
1992 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1993 fifo_full(&ca->free[RESERVE_BTREE]))
1994 break;
1995
1996 if (bch_can_invalidate_bucket(ca, b) &&
1997 !GC_MARK(b)) {
1998 __bch_invalidate_one_bucket(ca, b);
1999 if (!fifo_push(&ca->free[RESERVE_PRIO],
2000 b - ca->buckets))
2001 fifo_push(&ca->free[RESERVE_BTREE],
2002 b - ca->buckets);
2003 }
2004 }
2005 }
2006
2007 mutex_unlock(&c->bucket_lock);
2008}
2009
2010/* Btree insertion */
2011
2012static bool btree_insert_key(struct btree *b, struct bkey *k,
2013 struct bkey *replace_key)
2014{
2015 unsigned int status;
2016
2017 BUG_ON(bkey_cmp(k, &b->key) > 0);
2018
2019 status = bch_btree_insert_key(&b->keys, k, replace_key);
2020 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2021 bch_check_keys(&b->keys, "%u for %s", status,
2022 replace_key ? "replace" : "insert");
2023
2024 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2025 status);
2026 return true;
2027 } else
2028 return false;
2029}
2030
2031static size_t insert_u64s_remaining(struct btree *b)
2032{
2033 long ret = bch_btree_keys_u64s_remaining(&b->keys);
2034
2035 /*
2036 * Might land in the middle of an existing extent and have to split it
2037 */
2038 if (b->keys.ops->is_extents)
2039 ret -= KEY_MAX_U64S;
2040
2041 return max(ret, 0L);
2042}
2043
2044static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2045 struct keylist *insert_keys,
2046 struct bkey *replace_key)
2047{
2048 bool ret = false;
2049 int oldsize = bch_count_data(&b->keys);
2050
2051 while (!bch_keylist_empty(insert_keys)) {
2052 struct bkey *k = insert_keys->keys;
2053
2054 if (bkey_u64s(k) > insert_u64s_remaining(b))
2055 break;
2056
2057 if (bkey_cmp(k, &b->key) <= 0) {
2058 if (!b->level)
2059 bkey_put(b->c, k);
2060
2061 ret |= btree_insert_key(b, k, replace_key);
2062 bch_keylist_pop_front(insert_keys);
2063 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2064 BKEY_PADDED(key) temp;
2065 bkey_copy(&temp.key, insert_keys->keys);
2066
2067 bch_cut_back(&b->key, &temp.key);
2068 bch_cut_front(&b->key, insert_keys->keys);
2069
2070 ret |= btree_insert_key(b, &temp.key, replace_key);
2071 break;
2072 } else {
2073 break;
2074 }
2075 }
2076
2077 if (!ret)
2078 op->insert_collision = true;
2079
2080 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2081
2082 BUG_ON(bch_count_data(&b->keys) < oldsize);
2083 return ret;
2084}
2085
2086static int btree_split(struct btree *b, struct btree_op *op,
2087 struct keylist *insert_keys,
2088 struct bkey *replace_key)
2089{
2090 bool split;
2091 struct btree *n1, *n2 = NULL, *n3 = NULL;
2092 uint64_t start_time = local_clock();
2093 struct closure cl;
2094 struct keylist parent_keys;
2095
2096 closure_init_stack(&cl);
2097 bch_keylist_init(&parent_keys);
2098
2099 if (btree_check_reserve(b, op)) {
2100 if (!b->level)
2101 return -EINTR;
2102 else
2103 WARN(1, "insufficient reserve for split\n");
2104 }
2105
2106 n1 = btree_node_alloc_replacement(b, op);
2107 if (IS_ERR(n1))
2108 goto err;
2109
2110 split = set_blocks(btree_bset_first(n1),
2111 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2112
2113 if (split) {
2114 unsigned int keys = 0;
2115
2116 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2117
2118 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2119 if (IS_ERR(n2))
2120 goto err_free1;
2121
2122 if (!b->parent) {
2123 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2124 if (IS_ERR(n3))
2125 goto err_free2;
2126 }
2127
2128 mutex_lock(&n1->write_lock);
2129 mutex_lock(&n2->write_lock);
2130
2131 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2132
2133 /*
2134 * Has to be a linear search because we don't have an auxiliary
2135 * search tree yet
2136 */
2137
2138 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2139 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2140 keys));
2141
2142 bkey_copy_key(&n1->key,
2143 bset_bkey_idx(btree_bset_first(n1), keys));
2144 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2145
2146 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2147 btree_bset_first(n1)->keys = keys;
2148
2149 memcpy(btree_bset_first(n2)->start,
2150 bset_bkey_last(btree_bset_first(n1)),
2151 btree_bset_first(n2)->keys * sizeof(uint64_t));
2152
2153 bkey_copy_key(&n2->key, &b->key);
2154
2155 bch_keylist_add(&parent_keys, &n2->key);
2156 bch_btree_node_write(n2, &cl);
2157 mutex_unlock(&n2->write_lock);
2158 rw_unlock(true, n2);
2159 } else {
2160 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2161
2162 mutex_lock(&n1->write_lock);
2163 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2164 }
2165
2166 bch_keylist_add(&parent_keys, &n1->key);
2167 bch_btree_node_write(n1, &cl);
2168 mutex_unlock(&n1->write_lock);
2169
2170 if (n3) {
2171 /* Depth increases, make a new root */
2172 mutex_lock(&n3->write_lock);
2173 bkey_copy_key(&n3->key, &MAX_KEY);
2174 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2175 bch_btree_node_write(n3, &cl);
2176 mutex_unlock(&n3->write_lock);
2177
2178 closure_sync(&cl);
2179 bch_btree_set_root(n3);
2180 rw_unlock(true, n3);
2181 } else if (!b->parent) {
2182 /* Root filled up but didn't need to be split */
2183 closure_sync(&cl);
2184 bch_btree_set_root(n1);
2185 } else {
2186 /* Split a non root node */
2187 closure_sync(&cl);
2188 make_btree_freeing_key(b, parent_keys.top);
2189 bch_keylist_push(&parent_keys);
2190
2191 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2192 BUG_ON(!bch_keylist_empty(&parent_keys));
2193 }
2194
2195 btree_node_free(b);
2196 rw_unlock(true, n1);
2197
2198 bch_time_stats_update(&b->c->btree_split_time, start_time);
2199
2200 return 0;
2201err_free2:
2202 bkey_put(b->c, &n2->key);
2203 btree_node_free(n2);
2204 rw_unlock(true, n2);
2205err_free1:
2206 bkey_put(b->c, &n1->key);
2207 btree_node_free(n1);
2208 rw_unlock(true, n1);
2209err:
2210 WARN(1, "bcache: btree split failed (level %u)", b->level);
2211
2212 if (n3 == ERR_PTR(-EAGAIN) ||
2213 n2 == ERR_PTR(-EAGAIN) ||
2214 n1 == ERR_PTR(-EAGAIN))
2215 return -EAGAIN;
2216
2217 return -ENOMEM;
2218}
2219
2220static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2221 struct keylist *insert_keys,
2222 atomic_t *journal_ref,
2223 struct bkey *replace_key)
2224{
2225 struct closure cl;
2226
2227 BUG_ON(b->level && replace_key);
2228
2229 closure_init_stack(&cl);
2230
2231 mutex_lock(&b->write_lock);
2232
2233 if (write_block(b) != btree_bset_last(b) &&
2234 b->keys.last_set_unwritten)
2235 bch_btree_init_next(b); /* just wrote a set */
2236
2237 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2238 mutex_unlock(&b->write_lock);
2239 goto split;
2240 }
2241
2242 BUG_ON(write_block(b) != btree_bset_last(b));
2243
2244 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2245 if (!b->level)
2246 bch_btree_leaf_dirty(b, journal_ref);
2247 else
2248 bch_btree_node_write(b, &cl);
2249 }
2250
2251 mutex_unlock(&b->write_lock);
2252
2253 /* wait for btree node write if necessary, after unlock */
2254 closure_sync(&cl);
2255
2256 return 0;
2257split:
2258 if (current->bio_list) {
2259 op->lock = b->c->root->level + 1;
2260 return -EAGAIN;
2261 } else if (op->lock <= b->c->root->level) {
2262 op->lock = b->c->root->level + 1;
2263 return -EINTR;
2264 } else {
2265 /* Invalidated all iterators */
2266 int ret = btree_split(b, op, insert_keys, replace_key);
2267
2268 if (bch_keylist_empty(insert_keys))
2269 return 0;
2270 else if (!ret)
2271 return -EINTR;
2272 return ret;
2273 }
2274}
2275
2276int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2277 struct bkey *check_key)
2278{
2279 int ret = -EINTR;
2280 uint64_t btree_ptr = b->key.ptr[0];
2281 unsigned long seq = b->seq;
2282 struct keylist insert;
2283 bool upgrade = op->lock == -1;
2284
2285 bch_keylist_init(&insert);
2286
2287 if (upgrade) {
2288 rw_unlock(false, b);
2289 rw_lock(true, b, b->level);
2290
2291 if (b->key.ptr[0] != btree_ptr ||
2292 b->seq != seq + 1) {
2293 op->lock = b->level;
2294 goto out;
2295 }
2296 }
2297
2298 SET_KEY_PTRS(check_key, 1);
2299 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2300
2301 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2302
2303 bch_keylist_add(&insert, check_key);
2304
2305 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2306
2307 BUG_ON(!ret && !bch_keylist_empty(&insert));
2308out:
2309 if (upgrade)
2310 downgrade_write(&b->lock);
2311 return ret;
2312}
2313
2314struct btree_insert_op {
2315 struct btree_op op;
2316 struct keylist *keys;
2317 atomic_t *journal_ref;
2318 struct bkey *replace_key;
2319};
2320
2321static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2322{
2323 struct btree_insert_op *op = container_of(b_op,
2324 struct btree_insert_op, op);
2325
2326 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2327 op->journal_ref, op->replace_key);
2328 if (ret && !bch_keylist_empty(op->keys))
2329 return ret;
2330 else
2331 return MAP_DONE;
2332}
2333
2334int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2335 atomic_t *journal_ref, struct bkey *replace_key)
2336{
2337 struct btree_insert_op op;
2338 int ret = 0;
2339
2340 BUG_ON(current->bio_list);
2341 BUG_ON(bch_keylist_empty(keys));
2342
2343 bch_btree_op_init(&op.op, 0);
2344 op.keys = keys;
2345 op.journal_ref = journal_ref;
2346 op.replace_key = replace_key;
2347
2348 while (!ret && !bch_keylist_empty(keys)) {
2349 op.op.lock = 0;
2350 ret = bch_btree_map_leaf_nodes(&op.op, c,
2351 &START_KEY(keys->keys),
2352 btree_insert_fn);
2353 }
2354
2355 if (ret) {
2356 struct bkey *k;
2357
2358 pr_err("error %i", ret);
2359
2360 while ((k = bch_keylist_pop(keys)))
2361 bkey_put(c, k);
2362 } else if (op.op.insert_collision)
2363 ret = -ESRCH;
2364
2365 return ret;
2366}
2367
2368void bch_btree_set_root(struct btree *b)
2369{
2370 unsigned int i;
2371 struct closure cl;
2372
2373 closure_init_stack(&cl);
2374
2375 trace_bcache_btree_set_root(b);
2376
2377 BUG_ON(!b->written);
2378
2379 for (i = 0; i < KEY_PTRS(&b->key); i++)
2380 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2381
2382 mutex_lock(&b->c->bucket_lock);
2383 list_del_init(&b->list);
2384 mutex_unlock(&b->c->bucket_lock);
2385
2386 b->c->root = b;
2387
2388 bch_journal_meta(b->c, &cl);
2389 closure_sync(&cl);
2390}
2391
2392/* Map across nodes or keys */
2393
2394static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2395 struct bkey *from,
2396 btree_map_nodes_fn *fn, int flags)
2397{
2398 int ret = MAP_CONTINUE;
2399
2400 if (b->level) {
2401 struct bkey *k;
2402 struct btree_iter iter;
2403
2404 bch_btree_iter_init(&b->keys, &iter, from);
2405
2406 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2407 bch_ptr_bad))) {
2408 ret = btree(map_nodes_recurse, k, b,
2409 op, from, fn, flags);
2410 from = NULL;
2411
2412 if (ret != MAP_CONTINUE)
2413 return ret;
2414 }
2415 }
2416
2417 if (!b->level || flags == MAP_ALL_NODES)
2418 ret = fn(op, b);
2419
2420 return ret;
2421}
2422
2423int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2424 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2425{
2426 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2427}
2428
2429static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2430 struct bkey *from, btree_map_keys_fn *fn,
2431 int flags)
2432{
2433 int ret = MAP_CONTINUE;
2434 struct bkey *k;
2435 struct btree_iter iter;
2436
2437 bch_btree_iter_init(&b->keys, &iter, from);
2438
2439 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2440 ret = !b->level
2441 ? fn(op, b, k)
2442 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2443 from = NULL;
2444
2445 if (ret != MAP_CONTINUE)
2446 return ret;
2447 }
2448
2449 if (!b->level && (flags & MAP_END_KEY))
2450 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2451 KEY_OFFSET(&b->key), 0));
2452
2453 return ret;
2454}
2455
2456int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2457 struct bkey *from, btree_map_keys_fn *fn, int flags)
2458{
2459 return btree_root(map_keys_recurse, c, op, from, fn, flags);
2460}
2461
2462/* Keybuf code */
2463
2464static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2465{
2466 /* Overlapping keys compare equal */
2467 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2468 return -1;
2469 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2470 return 1;
2471 return 0;
2472}
2473
2474static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2475 struct keybuf_key *r)
2476{
2477 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2478}
2479
2480struct refill {
2481 struct btree_op op;
2482 unsigned int nr_found;
2483 struct keybuf *buf;
2484 struct bkey *end;
2485 keybuf_pred_fn *pred;
2486};
2487
2488static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2489 struct bkey *k)
2490{
2491 struct refill *refill = container_of(op, struct refill, op);
2492 struct keybuf *buf = refill->buf;
2493 int ret = MAP_CONTINUE;
2494
2495 if (bkey_cmp(k, refill->end) > 0) {
2496 ret = MAP_DONE;
2497 goto out;
2498 }
2499
2500 if (!KEY_SIZE(k)) /* end key */
2501 goto out;
2502
2503 if (refill->pred(buf, k)) {
2504 struct keybuf_key *w;
2505
2506 spin_lock(&buf->lock);
2507
2508 w = array_alloc(&buf->freelist);
2509 if (!w) {
2510 spin_unlock(&buf->lock);
2511 return MAP_DONE;
2512 }
2513
2514 w->private = NULL;
2515 bkey_copy(&w->key, k);
2516
2517 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2518 array_free(&buf->freelist, w);
2519 else
2520 refill->nr_found++;
2521
2522 if (array_freelist_empty(&buf->freelist))
2523 ret = MAP_DONE;
2524
2525 spin_unlock(&buf->lock);
2526 }
2527out:
2528 buf->last_scanned = *k;
2529 return ret;
2530}
2531
2532void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2533 struct bkey *end, keybuf_pred_fn *pred)
2534{
2535 struct bkey start = buf->last_scanned;
2536 struct refill refill;
2537
2538 cond_resched();
2539
2540 bch_btree_op_init(&refill.op, -1);
2541 refill.nr_found = 0;
2542 refill.buf = buf;
2543 refill.end = end;
2544 refill.pred = pred;
2545
2546 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2547 refill_keybuf_fn, MAP_END_KEY);
2548
2549 trace_bcache_keyscan(refill.nr_found,
2550 KEY_INODE(&start), KEY_OFFSET(&start),
2551 KEY_INODE(&buf->last_scanned),
2552 KEY_OFFSET(&buf->last_scanned));
2553
2554 spin_lock(&buf->lock);
2555
2556 if (!RB_EMPTY_ROOT(&buf->keys)) {
2557 struct keybuf_key *w;
2558
2559 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2560 buf->start = START_KEY(&w->key);
2561
2562 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2563 buf->end = w->key;
2564 } else {
2565 buf->start = MAX_KEY;
2566 buf->end = MAX_KEY;
2567 }
2568
2569 spin_unlock(&buf->lock);
2570}
2571
2572static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2573{
2574 rb_erase(&w->node, &buf->keys);
2575 array_free(&buf->freelist, w);
2576}
2577
2578void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2579{
2580 spin_lock(&buf->lock);
2581 __bch_keybuf_del(buf, w);
2582 spin_unlock(&buf->lock);
2583}
2584
2585bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2586 struct bkey *end)
2587{
2588 bool ret = false;
2589 struct keybuf_key *p, *w, s;
2590
2591 s.key = *start;
2592
2593 if (bkey_cmp(end, &buf->start) <= 0 ||
2594 bkey_cmp(start, &buf->end) >= 0)
2595 return false;
2596
2597 spin_lock(&buf->lock);
2598 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2599
2600 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2601 p = w;
2602 w = RB_NEXT(w, node);
2603
2604 if (p->private)
2605 ret = true;
2606 else
2607 __bch_keybuf_del(buf, p);
2608 }
2609
2610 spin_unlock(&buf->lock);
2611 return ret;
2612}
2613
2614struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2615{
2616 struct keybuf_key *w;
2617
2618 spin_lock(&buf->lock);
2619
2620 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2621
2622 while (w && w->private)
2623 w = RB_NEXT(w, node);
2624
2625 if (w)
2626 w->private = ERR_PTR(-EINTR);
2627
2628 spin_unlock(&buf->lock);
2629 return w;
2630}
2631
2632struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2633 struct keybuf *buf,
2634 struct bkey *end,
2635 keybuf_pred_fn *pred)
2636{
2637 struct keybuf_key *ret;
2638
2639 while (1) {
2640 ret = bch_keybuf_next(buf);
2641 if (ret)
2642 break;
2643
2644 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2645 pr_debug("scan finished");
2646 break;
2647 }
2648
2649 bch_refill_keybuf(c, buf, end, pred);
2650 }
2651
2652 return ret;
2653}
2654
2655void bch_keybuf_init(struct keybuf *buf)
2656{
2657 buf->last_scanned = MAX_KEY;
2658 buf->keys = RB_ROOT;
2659
2660 spin_lock_init(&buf->lock);
2661 array_allocator_init(&buf->freelist);
2662}